/doc/man/Makefile
/include/Makefile
/src/common/Makefile
+/src/lib/lttng-ust-common/Makefile
/src/lib/lttng-ust-ctl/Makefile
/src/lib/lttng-ust-cyg-profile/Makefile
/src/lib/lttng-ust-dl/Makefile
doc/man/Makefile
include/Makefile
src/common/Makefile
+ src/lib/lttng-ust-common/Makefile
src/lib/lttng-ust-ctl/Makefile
src/lib/lttng-ust-cyg-profile/Makefile
src/lib/lttng-ust-dl/Makefile
# SPDX-License-Identifier: LGPL-2.1-only
SUBDIRS = \
+ lttng-ust-common \
lttng-ust \
lttng-ust-ctl \
lttng-ust-fd \
--- /dev/null
+# SPDX-License-Identifier: LGPL-2.1-only
+
+lib_LTLIBRARIES = liblttng-ust-common.la
+
+liblttng_ust_common_la_SOURCES = \
+ fd-tracker.c \
+ ust-common.c \
+ lttng-ust-urcu.c \
+ lttng-ust-urcu-pointer.c
+
+liblttng_ust_common_la_LIBADD = \
+ $(top_builddir)/src/common/libcommon.la
+
+liblttng_ust_common_la_LDFLAGS = -no-undefined -version-info $(LTTNG_UST_LIBRARY_VERSION)
+
--- /dev/null
+/*
+ * SPDX-License-Identifier: LGPL-2.1-only
+ *
+ * Copyright (C) 2016 Aravind HT <aravind.ht@gmail.com>
+ * Copyright (C) 2016 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ */
+
+#include <limits.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <sys/types.h>
+#include <unistd.h>
+#include <assert.h>
+#include <errno.h>
+#include <fcntl.h>
+#include <sys/select.h>
+#include <sys/resource.h>
+#include <sys/time.h>
+#include <fcntl.h>
+#include <pthread.h>
+#include <signal.h>
+#include <stdbool.h>
+#include <urcu/compiler.h>
+#include <urcu/tls-compat.h>
+#include <urcu/system.h>
+
+#include "common/ust-fd.h"
+#include "common/macros.h"
+#include <lttng/ust-error.h>
+#include "common/logging.h"
+
+/* Operations on the fd set. */
+#define IS_FD_VALID(fd) ((fd) >= 0 && (fd) < lttng_ust_max_fd)
+#define GET_FD_SET_FOR_FD(fd, fd_sets) (&((fd_sets)[(fd) / FD_SETSIZE]))
+#define CALC_INDEX_TO_SET(fd) ((fd) % FD_SETSIZE)
+#define IS_FD_STD(fd) (IS_FD_VALID(fd) && (fd) <= STDERR_FILENO)
+
+/* Check fd validity before calling these. */
+#define ADD_FD_TO_SET(fd, fd_sets) \
+ FD_SET(CALC_INDEX_TO_SET(fd), GET_FD_SET_FOR_FD(fd, fd_sets))
+#define IS_FD_SET(fd, fd_sets) \
+ FD_ISSET(CALC_INDEX_TO_SET(fd), GET_FD_SET_FOR_FD(fd, fd_sets))
+#define DEL_FD_FROM_SET(fd, fd_sets) \
+ FD_CLR(CALC_INDEX_TO_SET(fd), GET_FD_SET_FOR_FD(fd, fd_sets))
+
+/*
+ * Protect the lttng_fd_set. Nests within the ust_lock, and therefore
+ * within the libc dl lock. Therefore, we need to fixup the TLS before
+ * nesting into this lock.
+ *
+ * The ust_safe_guard_fd_mutex nests within the ust_mutex. This mutex
+ * is also held across fork.
+ */
+static pthread_mutex_t ust_safe_guard_fd_mutex = PTHREAD_MUTEX_INITIALIZER;
+
+/*
+ * Cancel state when grabbing the ust_safe_guard_fd_mutex. Saved when
+ * locking, restored on unlock. Protected by ust_safe_guard_fd_mutex.
+ */
+static int ust_safe_guard_saved_cancelstate;
+
+/*
+ * Track whether we are within lttng-ust or application, for close
+ * system call override by LD_PRELOAD library. This also tracks whether
+ * we are invoking close() from a signal handler nested on an
+ * application thread.
+ */
+static DEFINE_URCU_TLS(int, ust_fd_mutex_nest);
+
+/* fd_set used to book keep fd being used by lttng-ust. */
+static fd_set *lttng_fd_set;
+static int lttng_ust_max_fd;
+static int num_fd_sets;
+static int init_done;
+
+/*
+ * Force a read (imply TLS fixup for dlopen) of TLS variables.
+ */
+void lttng_ust_fixup_fd_tracker_tls(void)
+{
+ asm volatile ("" : : "m" (URCU_TLS(ust_fd_mutex_nest)));
+}
+
+/*
+ * Allocate the fd set array based on the hard limit set for this
+ * process. This will be called during the constructor execution
+ * and will also be called in the child after fork via lttng_ust_init.
+ */
+void lttng_ust_init_fd_tracker(void)
+{
+ struct rlimit rlim;
+ int i;
+
+ if (CMM_LOAD_SHARED(init_done))
+ return;
+
+ memset(&rlim, 0, sizeof(rlim));
+ /* Get the current possible max number of fd for this process. */
+ if (getrlimit(RLIMIT_NOFILE, &rlim) < 0)
+ abort();
+ /*
+ * FD set array size determined using the hard limit. Even if
+ * the process wishes to increase its limit using setrlimit, it
+ * can only do so with the softlimit which will be less than the
+ * hard limit.
+ */
+ lttng_ust_max_fd = rlim.rlim_max;
+ num_fd_sets = lttng_ust_max_fd / FD_SETSIZE;
+ if (lttng_ust_max_fd % FD_SETSIZE)
+ ++num_fd_sets;
+ if (lttng_fd_set != NULL) {
+ free(lttng_fd_set);
+ lttng_fd_set = NULL;
+ }
+ lttng_fd_set = malloc(num_fd_sets * (sizeof(fd_set)));
+ if (!lttng_fd_set)
+ abort();
+ for (i = 0; i < num_fd_sets; i++)
+ FD_ZERO((<tng_fd_set[i]));
+ CMM_STORE_SHARED(init_done, 1);
+}
+
+void lttng_ust_lock_fd_tracker(void)
+{
+ sigset_t sig_all_blocked, orig_mask;
+ int ret, oldstate;
+
+ ret = pthread_setcancelstate(PTHREAD_CANCEL_DISABLE, &oldstate);
+ if (ret) {
+ ERR("pthread_setcancelstate: %s", strerror(ret));
+ }
+ sigfillset(&sig_all_blocked);
+ ret = pthread_sigmask(SIG_SETMASK, &sig_all_blocked, &orig_mask);
+ if (ret) {
+ ERR("pthread_sigmask: %s", strerror(ret));
+ }
+ if (!URCU_TLS(ust_fd_mutex_nest)++) {
+ /*
+ * Ensure the compiler don't move the store after the close()
+ * call in case close() would be marked as leaf.
+ */
+ cmm_barrier();
+ pthread_mutex_lock(&ust_safe_guard_fd_mutex);
+ ust_safe_guard_saved_cancelstate = oldstate;
+ }
+ ret = pthread_sigmask(SIG_SETMASK, &orig_mask, NULL);
+ if (ret) {
+ ERR("pthread_sigmask: %s", strerror(ret));
+ }
+}
+
+void lttng_ust_unlock_fd_tracker(void)
+{
+ sigset_t sig_all_blocked, orig_mask;
+ int ret, newstate, oldstate;
+ bool restore_cancel = false;
+
+ sigfillset(&sig_all_blocked);
+ ret = pthread_sigmask(SIG_SETMASK, &sig_all_blocked, &orig_mask);
+ if (ret) {
+ ERR("pthread_sigmask: %s", strerror(ret));
+ }
+ /*
+ * Ensure the compiler don't move the store before the close()
+ * call, in case close() would be marked as leaf.
+ */
+ cmm_barrier();
+ if (!--URCU_TLS(ust_fd_mutex_nest)) {
+ newstate = ust_safe_guard_saved_cancelstate;
+ restore_cancel = true;
+ pthread_mutex_unlock(&ust_safe_guard_fd_mutex);
+ }
+ ret = pthread_sigmask(SIG_SETMASK, &orig_mask, NULL);
+ if (ret) {
+ ERR("pthread_sigmask: %s", strerror(ret));
+ }
+ if (restore_cancel) {
+ ret = pthread_setcancelstate(newstate, &oldstate);
+ if (ret) {
+ ERR("pthread_setcancelstate: %s", strerror(ret));
+ }
+ }
+}
+
+static int dup_std_fd(int fd)
+{
+ int ret, i;
+ int fd_to_close[STDERR_FILENO + 1];
+ int fd_to_close_count = 0;
+ int dup_cmd = F_DUPFD; /* Default command */
+ int fd_valid = -1;
+
+ if (!(IS_FD_STD(fd))) {
+ /* Should not be here */
+ ret = -1;
+ goto error;
+ }
+
+ /* Check for FD_CLOEXEC flag */
+ ret = fcntl(fd, F_GETFD);
+ if (ret < 0) {
+ PERROR("fcntl on f_getfd");
+ ret = -1;
+ goto error;
+ }
+
+ if (ret & FD_CLOEXEC) {
+ dup_cmd = F_DUPFD_CLOEXEC;
+ }
+
+ /* Perform dup */
+ for (i = 0; i < STDERR_FILENO + 1; i++) {
+ ret = fcntl(fd, dup_cmd, 0);
+ if (ret < 0) {
+ PERROR("fcntl dup fd");
+ goto error;
+ }
+
+ if (!(IS_FD_STD(ret))) {
+ /* fd is outside of STD range, use it. */
+ fd_valid = ret;
+ /* Close fd received as argument. */
+ fd_to_close[i] = fd;
+ fd_to_close_count++;
+ break;
+ }
+
+ fd_to_close[i] = ret;
+ fd_to_close_count++;
+ }
+
+ /* Close intermediary fds */
+ for (i = 0; i < fd_to_close_count; i++) {
+ ret = close(fd_to_close[i]);
+ if (ret) {
+ PERROR("close on temporary fd: %d.", fd_to_close[i]);
+ /*
+ * Not using an abort here would yield a complicated
+ * error handling for the caller. If a failure occurs
+ * here, the system is already in a bad state.
+ */
+ abort();
+ }
+ }
+
+ ret = fd_valid;
+error:
+ return ret;
+}
+
+/*
+ * Needs to be called with ust_safe_guard_fd_mutex held when opening the fd.
+ * Has strict checking of fd validity.
+ *
+ * If fd <= 2, dup the fd until fd > 2. This enables us to bypass
+ * problems that can be encountered if UST uses stdin, stdout, stderr
+ * fds for internal use (daemon etc.). This can happen if the
+ * application closes either of those file descriptors. Intermediary fds
+ * are closed as needed.
+ *
+ * Return -1 on error.
+ *
+ */
+int lttng_ust_add_fd_to_tracker(int fd)
+{
+ int ret;
+ /*
+ * Ensure the tracker is initialized when called from
+ * constructors.
+ */
+ lttng_ust_init_fd_tracker();
+ assert(URCU_TLS(ust_fd_mutex_nest));
+
+ if (IS_FD_STD(fd)) {
+ ret = dup_std_fd(fd);
+ if (ret < 0) {
+ goto error;
+ }
+ fd = ret;
+ }
+
+ /* Trying to add an fd which we can not accommodate. */
+ assert(IS_FD_VALID(fd));
+ /* Setting an fd thats already set. */
+ assert(!IS_FD_SET(fd, lttng_fd_set));
+
+ ADD_FD_TO_SET(fd, lttng_fd_set);
+ return fd;
+error:
+ return ret;
+}
+
+/*
+ * Needs to be called with ust_safe_guard_fd_mutex held when opening the fd.
+ * Has strict checking for fd validity.
+ */
+void lttng_ust_delete_fd_from_tracker(int fd)
+{
+ /*
+ * Ensure the tracker is initialized when called from
+ * constructors.
+ */
+ lttng_ust_init_fd_tracker();
+
+ assert(URCU_TLS(ust_fd_mutex_nest));
+ /* Not a valid fd. */
+ assert(IS_FD_VALID(fd));
+ /* Deleting an fd which was not set. */
+ assert(IS_FD_SET(fd, lttng_fd_set));
+
+ DEL_FD_FROM_SET(fd, lttng_fd_set);
+}
+
+/*
+ * Interface allowing applications to close arbitrary file descriptors.
+ * We check if it is owned by lttng-ust, and return -1, errno=EBADF
+ * instead of closing it if it is the case.
+ */
+int lttng_ust_safe_close_fd(int fd, int (*close_cb)(int fd))
+{
+ int ret = 0;
+
+ lttng_ust_fixup_fd_tracker_tls();
+
+ /*
+ * Ensure the tracker is initialized when called from
+ * constructors.
+ */
+ lttng_ust_init_fd_tracker();
+
+ /*
+ * If called from lttng-ust, we directly call close without
+ * validating whether the FD is part of the tracked set.
+ */
+ if (URCU_TLS(ust_fd_mutex_nest))
+ return close_cb(fd);
+
+ lttng_ust_lock_fd_tracker();
+ if (IS_FD_VALID(fd) && IS_FD_SET(fd, lttng_fd_set)) {
+ ret = -1;
+ errno = EBADF;
+ } else {
+ ret = close_cb(fd);
+ }
+ lttng_ust_unlock_fd_tracker();
+
+ return ret;
+}
+
+/*
+ * Interface allowing applications to close arbitrary streams.
+ * We check if it is owned by lttng-ust, and return -1, errno=EBADF
+ * instead of closing it if it is the case.
+ */
+int lttng_ust_safe_fclose_stream(FILE *stream, int (*fclose_cb)(FILE *stream))
+{
+ int ret = 0, fd;
+
+ lttng_ust_fixup_fd_tracker_tls();
+
+ /*
+ * Ensure the tracker is initialized when called from
+ * constructors.
+ */
+ lttng_ust_init_fd_tracker();
+
+ /*
+ * If called from lttng-ust, we directly call fclose without
+ * validating whether the FD is part of the tracked set.
+ */
+ if (URCU_TLS(ust_fd_mutex_nest))
+ return fclose_cb(stream);
+
+ fd = fileno(stream);
+
+ lttng_ust_lock_fd_tracker();
+ if (IS_FD_VALID(fd) && IS_FD_SET(fd, lttng_fd_set)) {
+ ret = -1;
+ errno = EBADF;
+ } else {
+ ret = fclose_cb(stream);
+ }
+ lttng_ust_unlock_fd_tracker();
+
+ return ret;
+}
+
+#ifdef __OpenBSD__
+static void set_close_success(int *p)
+{
+ *p = 1;
+}
+static int test_close_success(const int *p)
+{
+ return *p;
+}
+#else
+static void set_close_success(int *p __attribute__((unused)))
+{
+}
+static int test_close_success(const int *p __attribute__((unused)))
+{
+ return 1;
+}
+#endif
+
+/*
+ * Implement helper for closefrom() override.
+ */
+int lttng_ust_safe_closefrom_fd(int lowfd, int (*close_cb)(int fd))
+{
+ int ret = 0, close_success = 0, i;
+
+ lttng_ust_fixup_fd_tracker_tls();
+
+ /*
+ * Ensure the tracker is initialized when called from
+ * constructors.
+ */
+ lttng_ust_init_fd_tracker();
+
+ if (lowfd < 0) {
+ /*
+ * NetBSD return EBADF if fd is invalid.
+ */
+ errno = EBADF;
+ ret = -1;
+ goto end;
+ }
+ /*
+ * If called from lttng-ust, we directly call close without
+ * validating whether the FD is part of the tracked set.
+ */
+ if (URCU_TLS(ust_fd_mutex_nest)) {
+ for (i = lowfd; i < lttng_ust_max_fd; i++) {
+ if (close_cb(i) < 0) {
+ switch (errno) {
+ case EBADF:
+ continue;
+ case EINTR:
+ default:
+ ret = -1;
+ goto end;
+ }
+ }
+ set_close_success(&close_success);
+ }
+ } else {
+ lttng_ust_lock_fd_tracker();
+ for (i = lowfd; i < lttng_ust_max_fd; i++) {
+ if (IS_FD_VALID(i) && IS_FD_SET(i, lttng_fd_set))
+ continue;
+ if (close_cb(i) < 0) {
+ switch (errno) {
+ case EBADF:
+ continue;
+ case EINTR:
+ default:
+ ret = -1;
+ lttng_ust_unlock_fd_tracker();
+ goto end;
+ }
+ }
+ set_close_success(&close_success);
+ }
+ lttng_ust_unlock_fd_tracker();
+ }
+ if (!test_close_success(&close_success)) {
+ /*
+ * OpenBSD return EBADF if fd is greater than all open
+ * file descriptors.
+ */
+ ret = -1;
+ errno = EBADF;
+ }
+end:
+ return ret;
+}
--- /dev/null
+/*
+ * SPDX-License-Identifier: LGPL-2.1-or-later
+ *
+ * Copyright (c) 2009 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ * Copyright (c) 2009 Paul E. McKenney, IBM Corporation.
+ *
+ * library wrappers to be used by non-LGPL compatible source code.
+ */
+
+#include <urcu/uatomic.h>
+
+#include <lttng/urcu/static/pointer.h>
+/* Do not #define _LGPL_SOURCE to ensure we can emit the wrapper symbols */
+#include <lttng/urcu/pointer.h>
+
+void *lttng_ust_rcu_dereference_sym(void *p)
+{
+ return _lttng_ust_rcu_dereference(p);
+}
+
+void *lttng_ust_rcu_set_pointer_sym(void **p, void *v)
+{
+ cmm_wmb();
+ uatomic_set(p, v);
+ return v;
+}
+
+void *lttng_ust_rcu_xchg_pointer_sym(void **p, void *v)
+{
+ cmm_wmb();
+ return uatomic_xchg(p, v);
+}
+
+void *lttng_ust_rcu_cmpxchg_pointer_sym(void **p, void *old, void *_new)
+{
+ cmm_wmb();
+ return uatomic_cmpxchg(p, old, _new);
+}
--- /dev/null
+/*
+ * SPDX-License-Identifier: LGPL-2.1-or-later
+ *
+ * Copyright (c) 2009 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ * Copyright (c) 2009 Paul E. McKenney, IBM Corporation.
+ *
+ * Userspace RCU library for LTTng-UST, derived from liburcu "bulletproof" version.
+ */
+
+#define _LGPL_SOURCE
+#include <stdio.h>
+#include <pthread.h>
+#include <signal.h>
+#include <assert.h>
+#include <stdlib.h>
+#include <string.h>
+#include <errno.h>
+#include <poll.h>
+#include <unistd.h>
+#include <stdbool.h>
+#include <sys/mman.h>
+
+#include <urcu/arch.h>
+#include <urcu/wfcqueue.h>
+#include <lttng/urcu/static/urcu-ust.h>
+#include <lttng/urcu/pointer.h>
+#include <urcu/tls-compat.h>
+
+/* Do not #define _LGPL_SOURCE to ensure we can emit the wrapper symbols */
+#undef _LGPL_SOURCE
+#include <lttng/urcu/urcu-ust.h>
+#define _LGPL_SOURCE
+
+#ifndef MAP_ANONYMOUS
+#define MAP_ANONYMOUS MAP_ANON
+#endif
+
+#ifdef __linux__
+static
+void *mremap_wrapper(void *old_address, size_t old_size,
+ size_t new_size, int flags)
+{
+ return mremap(old_address, old_size, new_size, flags);
+}
+#else
+
+#define MREMAP_MAYMOVE 1
+#define MREMAP_FIXED 2
+
+/*
+ * mremap wrapper for non-Linux systems not allowing MAYMOVE.
+ * This is not generic.
+*/
+static
+void *mremap_wrapper(void *old_address, size_t old_size,
+ size_t new_size, int flags)
+{
+ assert(!(flags & MREMAP_MAYMOVE));
+
+ return MAP_FAILED;
+}
+#endif
+
+/* Sleep delay in ms */
+#define RCU_SLEEP_DELAY_MS 10
+#define INIT_NR_THREADS 8
+#define ARENA_INIT_ALLOC \
+ sizeof(struct registry_chunk) \
+ + INIT_NR_THREADS * sizeof(struct lttng_ust_urcu_reader)
+
+/*
+ * Active attempts to check for reader Q.S. before calling sleep().
+ */
+#define RCU_QS_ACTIVE_ATTEMPTS 100
+
+static
+int lttng_ust_urcu_refcount;
+
+/* If the headers do not support membarrier system call, fall back smp_mb. */
+#ifdef __NR_membarrier
+# define membarrier(...) syscall(__NR_membarrier, __VA_ARGS__)
+#else
+# define membarrier(...) -ENOSYS
+#endif
+
+enum membarrier_cmd {
+ MEMBARRIER_CMD_QUERY = 0,
+ MEMBARRIER_CMD_SHARED = (1 << 0),
+ /* reserved for MEMBARRIER_CMD_SHARED_EXPEDITED (1 << 1) */
+ /* reserved for MEMBARRIER_CMD_PRIVATE (1 << 2) */
+ MEMBARRIER_CMD_PRIVATE_EXPEDITED = (1 << 3),
+ MEMBARRIER_CMD_REGISTER_PRIVATE_EXPEDITED = (1 << 4),
+};
+
+static
+void _lttng_ust_urcu_init(void)
+ __attribute__((constructor));
+static
+void lttng_ust_urcu_exit(void)
+ __attribute__((destructor));
+
+#ifndef CONFIG_RCU_FORCE_SYS_MEMBARRIER
+int lttng_ust_urcu_has_sys_membarrier;
+#endif
+
+/*
+ * rcu_gp_lock ensures mutual exclusion between threads calling
+ * synchronize_rcu().
+ */
+static pthread_mutex_t rcu_gp_lock = PTHREAD_MUTEX_INITIALIZER;
+/*
+ * rcu_registry_lock ensures mutual exclusion between threads
+ * registering and unregistering themselves to/from the registry, and
+ * with threads reading that registry from synchronize_rcu(). However,
+ * this lock is not held all the way through the completion of awaiting
+ * for the grace period. It is sporadically released between iterations
+ * on the registry.
+ * rcu_registry_lock may nest inside rcu_gp_lock.
+ */
+static pthread_mutex_t rcu_registry_lock = PTHREAD_MUTEX_INITIALIZER;
+
+static pthread_mutex_t init_lock = PTHREAD_MUTEX_INITIALIZER;
+static int initialized;
+
+static pthread_key_t lttng_ust_urcu_key;
+
+struct lttng_ust_urcu_gp lttng_ust_urcu_gp = { .ctr = LTTNG_UST_URCU_GP_COUNT };
+
+/*
+ * Pointer to registry elements. Written to only by each individual reader. Read
+ * by both the reader and the writers.
+ */
+DEFINE_URCU_TLS(struct lttng_ust_urcu_reader *, lttng_ust_urcu_reader);
+
+static CDS_LIST_HEAD(registry);
+
+struct registry_chunk {
+ size_t data_len; /* data length */
+ size_t used; /* amount of data used */
+ struct cds_list_head node; /* chunk_list node */
+ char data[];
+};
+
+struct registry_arena {
+ struct cds_list_head chunk_list;
+};
+
+static struct registry_arena registry_arena = {
+ .chunk_list = CDS_LIST_HEAD_INIT(registry_arena.chunk_list),
+};
+
+/* Saved fork signal mask, protected by rcu_gp_lock */
+static sigset_t saved_fork_signal_mask;
+
+static void mutex_lock(pthread_mutex_t *mutex)
+{
+ int ret;
+
+#ifndef DISTRUST_SIGNALS_EXTREME
+ ret = pthread_mutex_lock(mutex);
+ if (ret)
+ abort();
+#else /* #ifndef DISTRUST_SIGNALS_EXTREME */
+ while ((ret = pthread_mutex_trylock(mutex)) != 0) {
+ if (ret != EBUSY && ret != EINTR)
+ abort();
+ poll(NULL,0,10);
+ }
+#endif /* #else #ifndef DISTRUST_SIGNALS_EXTREME */
+}
+
+static void mutex_unlock(pthread_mutex_t *mutex)
+{
+ int ret;
+
+ ret = pthread_mutex_unlock(mutex);
+ if (ret)
+ abort();
+}
+
+static void smp_mb_master(void)
+{
+ if (caa_likely(lttng_ust_urcu_has_sys_membarrier)) {
+ if (membarrier(MEMBARRIER_CMD_PRIVATE_EXPEDITED, 0))
+ abort();
+ } else {
+ cmm_smp_mb();
+ }
+}
+
+/*
+ * Always called with rcu_registry lock held. Releases this lock between
+ * iterations and grabs it again. Holds the lock when it returns.
+ */
+static void wait_for_readers(struct cds_list_head *input_readers,
+ struct cds_list_head *cur_snap_readers,
+ struct cds_list_head *qsreaders)
+{
+ unsigned int wait_loops = 0;
+ struct lttng_ust_urcu_reader *index, *tmp;
+
+ /*
+ * Wait for each thread URCU_TLS(lttng_ust_urcu_reader).ctr to either
+ * indicate quiescence (not nested), or observe the current
+ * rcu_gp.ctr value.
+ */
+ for (;;) {
+ if (wait_loops < RCU_QS_ACTIVE_ATTEMPTS)
+ wait_loops++;
+
+ cds_list_for_each_entry_safe(index, tmp, input_readers, node) {
+ switch (lttng_ust_urcu_reader_state(&index->ctr)) {
+ case LTTNG_UST_URCU_READER_ACTIVE_CURRENT:
+ if (cur_snap_readers) {
+ cds_list_move(&index->node,
+ cur_snap_readers);
+ break;
+ }
+ /* Fall-through */
+ case LTTNG_UST_URCU_READER_INACTIVE:
+ cds_list_move(&index->node, qsreaders);
+ break;
+ case LTTNG_UST_URCU_READER_ACTIVE_OLD:
+ /*
+ * Old snapshot. Leaving node in
+ * input_readers will make us busy-loop
+ * until the snapshot becomes current or
+ * the reader becomes inactive.
+ */
+ break;
+ }
+ }
+
+ if (cds_list_empty(input_readers)) {
+ break;
+ } else {
+ /* Temporarily unlock the registry lock. */
+ mutex_unlock(&rcu_registry_lock);
+ if (wait_loops >= RCU_QS_ACTIVE_ATTEMPTS)
+ (void) poll(NULL, 0, RCU_SLEEP_DELAY_MS);
+ else
+ caa_cpu_relax();
+ /* Re-lock the registry lock before the next loop. */
+ mutex_lock(&rcu_registry_lock);
+ }
+ }
+}
+
+void lttng_ust_urcu_synchronize_rcu(void)
+{
+ CDS_LIST_HEAD(cur_snap_readers);
+ CDS_LIST_HEAD(qsreaders);
+ sigset_t newmask, oldmask;
+ int ret;
+
+ ret = sigfillset(&newmask);
+ assert(!ret);
+ ret = pthread_sigmask(SIG_BLOCK, &newmask, &oldmask);
+ assert(!ret);
+
+ mutex_lock(&rcu_gp_lock);
+
+ mutex_lock(&rcu_registry_lock);
+
+ if (cds_list_empty(®istry))
+ goto out;
+
+ /* All threads should read qparity before accessing data structure
+ * where new ptr points to. */
+ /* Write new ptr before changing the qparity */
+ smp_mb_master();
+
+ /*
+ * Wait for readers to observe original parity or be quiescent.
+ * wait_for_readers() can release and grab again rcu_registry_lock
+ * interally.
+ */
+ wait_for_readers(®istry, &cur_snap_readers, &qsreaders);
+
+ /*
+ * Adding a cmm_smp_mb() which is _not_ formally required, but makes the
+ * model easier to understand. It does not have a big performance impact
+ * anyway, given this is the write-side.
+ */
+ cmm_smp_mb();
+
+ /* Switch parity: 0 -> 1, 1 -> 0 */
+ CMM_STORE_SHARED(lttng_ust_urcu_gp.ctr, lttng_ust_urcu_gp.ctr ^ LTTNG_UST_URCU_GP_CTR_PHASE);
+
+ /*
+ * Must commit qparity update to memory before waiting for other parity
+ * quiescent state. Failure to do so could result in the writer waiting
+ * forever while new readers are always accessing data (no progress).
+ * Ensured by CMM_STORE_SHARED and CMM_LOAD_SHARED.
+ */
+
+ /*
+ * Adding a cmm_smp_mb() which is _not_ formally required, but makes the
+ * model easier to understand. It does not have a big performance impact
+ * anyway, given this is the write-side.
+ */
+ cmm_smp_mb();
+
+ /*
+ * Wait for readers to observe new parity or be quiescent.
+ * wait_for_readers() can release and grab again rcu_registry_lock
+ * interally.
+ */
+ wait_for_readers(&cur_snap_readers, NULL, &qsreaders);
+
+ /*
+ * Put quiescent reader list back into registry.
+ */
+ cds_list_splice(&qsreaders, ®istry);
+
+ /*
+ * Finish waiting for reader threads before letting the old ptr being
+ * freed.
+ */
+ smp_mb_master();
+out:
+ mutex_unlock(&rcu_registry_lock);
+ mutex_unlock(&rcu_gp_lock);
+ ret = pthread_sigmask(SIG_SETMASK, &oldmask, NULL);
+ assert(!ret);
+}
+
+/*
+ * library wrappers to be used by non-LGPL compatible source code.
+ */
+
+void lttng_ust_urcu_read_lock(void)
+{
+ _lttng_ust_urcu_read_lock();
+}
+
+void lttng_ust_urcu_read_unlock(void)
+{
+ _lttng_ust_urcu_read_unlock();
+}
+
+int lttng_ust_urcu_read_ongoing(void)
+{
+ return _lttng_ust_urcu_read_ongoing();
+}
+
+/*
+ * Only grow for now. If empty, allocate a ARENA_INIT_ALLOC sized chunk.
+ * Else, try expanding the last chunk. If this fails, allocate a new
+ * chunk twice as big as the last chunk.
+ * Memory used by chunks _never_ moves. A chunk could theoretically be
+ * freed when all "used" slots are released, but we don't do it at this
+ * point.
+ */
+static
+void expand_arena(struct registry_arena *arena)
+{
+ struct registry_chunk *new_chunk, *last_chunk;
+ size_t old_chunk_len, new_chunk_len;
+
+ /* No chunk. */
+ if (cds_list_empty(&arena->chunk_list)) {
+ assert(ARENA_INIT_ALLOC >=
+ sizeof(struct registry_chunk)
+ + sizeof(struct lttng_ust_urcu_reader));
+ new_chunk_len = ARENA_INIT_ALLOC;
+ new_chunk = (struct registry_chunk *) mmap(NULL,
+ new_chunk_len,
+ PROT_READ | PROT_WRITE,
+ MAP_ANONYMOUS | MAP_PRIVATE,
+ -1, 0);
+ if (new_chunk == MAP_FAILED)
+ abort();
+ memset(new_chunk, 0, new_chunk_len);
+ new_chunk->data_len =
+ new_chunk_len - sizeof(struct registry_chunk);
+ cds_list_add_tail(&new_chunk->node, &arena->chunk_list);
+ return; /* We're done. */
+ }
+
+ /* Try expanding last chunk. */
+ last_chunk = cds_list_entry(arena->chunk_list.prev,
+ struct registry_chunk, node);
+ old_chunk_len =
+ last_chunk->data_len + sizeof(struct registry_chunk);
+ new_chunk_len = old_chunk_len << 1;
+
+ /* Don't allow memory mapping to move, just expand. */
+ new_chunk = mremap_wrapper(last_chunk, old_chunk_len,
+ new_chunk_len, 0);
+ if (new_chunk != MAP_FAILED) {
+ /* Should not have moved. */
+ assert(new_chunk == last_chunk);
+ memset((char *) last_chunk + old_chunk_len, 0,
+ new_chunk_len - old_chunk_len);
+ last_chunk->data_len =
+ new_chunk_len - sizeof(struct registry_chunk);
+ return; /* We're done. */
+ }
+
+ /* Remap did not succeed, we need to add a new chunk. */
+ new_chunk = (struct registry_chunk *) mmap(NULL,
+ new_chunk_len,
+ PROT_READ | PROT_WRITE,
+ MAP_ANONYMOUS | MAP_PRIVATE,
+ -1, 0);
+ if (new_chunk == MAP_FAILED)
+ abort();
+ memset(new_chunk, 0, new_chunk_len);
+ new_chunk->data_len =
+ new_chunk_len - sizeof(struct registry_chunk);
+ cds_list_add_tail(&new_chunk->node, &arena->chunk_list);
+}
+
+static
+struct lttng_ust_urcu_reader *arena_alloc(struct registry_arena *arena)
+{
+ struct registry_chunk *chunk;
+ struct lttng_ust_urcu_reader *rcu_reader_reg;
+ int expand_done = 0; /* Only allow to expand once per alloc */
+ size_t len = sizeof(struct lttng_ust_urcu_reader);
+
+retry:
+ cds_list_for_each_entry(chunk, &arena->chunk_list, node) {
+ if (chunk->data_len - chunk->used < len)
+ continue;
+ /* Find spot */
+ for (rcu_reader_reg = (struct lttng_ust_urcu_reader *) &chunk->data[0];
+ rcu_reader_reg < (struct lttng_ust_urcu_reader *) &chunk->data[chunk->data_len];
+ rcu_reader_reg++) {
+ if (!rcu_reader_reg->alloc) {
+ rcu_reader_reg->alloc = 1;
+ chunk->used += len;
+ return rcu_reader_reg;
+ }
+ }
+ }
+
+ if (!expand_done) {
+ expand_arena(arena);
+ expand_done = 1;
+ goto retry;
+ }
+
+ return NULL;
+}
+
+/* Called with signals off and mutex locked */
+static
+void add_thread(void)
+{
+ struct lttng_ust_urcu_reader *rcu_reader_reg;
+ int ret;
+
+ rcu_reader_reg = arena_alloc(®istry_arena);
+ if (!rcu_reader_reg)
+ abort();
+ ret = pthread_setspecific(lttng_ust_urcu_key, rcu_reader_reg);
+ if (ret)
+ abort();
+
+ /* Add to registry */
+ rcu_reader_reg->tid = pthread_self();
+ assert(rcu_reader_reg->ctr == 0);
+ cds_list_add(&rcu_reader_reg->node, ®istry);
+ /*
+ * Reader threads are pointing to the reader registry. This is
+ * why its memory should never be relocated.
+ */
+ URCU_TLS(lttng_ust_urcu_reader) = rcu_reader_reg;
+}
+
+/* Called with mutex locked */
+static
+void cleanup_thread(struct registry_chunk *chunk,
+ struct lttng_ust_urcu_reader *rcu_reader_reg)
+{
+ rcu_reader_reg->ctr = 0;
+ cds_list_del(&rcu_reader_reg->node);
+ rcu_reader_reg->tid = 0;
+ rcu_reader_reg->alloc = 0;
+ chunk->used -= sizeof(struct lttng_ust_urcu_reader);
+}
+
+static
+struct registry_chunk *find_chunk(struct lttng_ust_urcu_reader *rcu_reader_reg)
+{
+ struct registry_chunk *chunk;
+
+ cds_list_for_each_entry(chunk, ®istry_arena.chunk_list, node) {
+ if (rcu_reader_reg < (struct lttng_ust_urcu_reader *) &chunk->data[0])
+ continue;
+ if (rcu_reader_reg >= (struct lttng_ust_urcu_reader *) &chunk->data[chunk->data_len])
+ continue;
+ return chunk;
+ }
+ return NULL;
+}
+
+/* Called with signals off and mutex locked */
+static
+void remove_thread(struct lttng_ust_urcu_reader *rcu_reader_reg)
+{
+ cleanup_thread(find_chunk(rcu_reader_reg), rcu_reader_reg);
+ URCU_TLS(lttng_ust_urcu_reader) = NULL;
+}
+
+/* Disable signals, take mutex, add to registry */
+void lttng_ust_urcu_register(void)
+{
+ sigset_t newmask, oldmask;
+ int ret;
+
+ ret = sigfillset(&newmask);
+ if (ret)
+ abort();
+ ret = pthread_sigmask(SIG_BLOCK, &newmask, &oldmask);
+ if (ret)
+ abort();
+
+ /*
+ * Check if a signal concurrently registered our thread since
+ * the check in rcu_read_lock().
+ */
+ if (URCU_TLS(lttng_ust_urcu_reader))
+ goto end;
+
+ /*
+ * Take care of early registration before lttng_ust_urcu constructor.
+ */
+ _lttng_ust_urcu_init();
+
+ mutex_lock(&rcu_registry_lock);
+ add_thread();
+ mutex_unlock(&rcu_registry_lock);
+end:
+ ret = pthread_sigmask(SIG_SETMASK, &oldmask, NULL);
+ if (ret)
+ abort();
+}
+
+void lttng_ust_urcu_register_thread(void)
+{
+ if (caa_unlikely(!URCU_TLS(lttng_ust_urcu_reader)))
+ lttng_ust_urcu_register(); /* If not yet registered. */
+}
+
+/* Disable signals, take mutex, remove from registry */
+static
+void lttng_ust_urcu_unregister(struct lttng_ust_urcu_reader *rcu_reader_reg)
+{
+ sigset_t newmask, oldmask;
+ int ret;
+
+ ret = sigfillset(&newmask);
+ if (ret)
+ abort();
+ ret = pthread_sigmask(SIG_BLOCK, &newmask, &oldmask);
+ if (ret)
+ abort();
+
+ mutex_lock(&rcu_registry_lock);
+ remove_thread(rcu_reader_reg);
+ mutex_unlock(&rcu_registry_lock);
+ ret = pthread_sigmask(SIG_SETMASK, &oldmask, NULL);
+ if (ret)
+ abort();
+ lttng_ust_urcu_exit();
+}
+
+/*
+ * Remove thread from the registry when it exits, and flag it as
+ * destroyed so garbage collection can take care of it.
+ */
+static
+void lttng_ust_urcu_thread_exit_notifier(void *rcu_key)
+{
+ lttng_ust_urcu_unregister(rcu_key);
+}
+
+#ifdef CONFIG_RCU_FORCE_SYS_MEMBARRIER
+static
+void lttng_ust_urcu_sys_membarrier_status(bool available)
+{
+ if (!available)
+ abort();
+}
+#else
+static
+void lttng_ust_urcu_sys_membarrier_status(bool available)
+{
+ if (!available)
+ return;
+ lttng_ust_urcu_has_sys_membarrier = 1;
+}
+#endif
+
+static
+void lttng_ust_urcu_sys_membarrier_init(void)
+{
+ bool available = false;
+ int mask;
+
+ mask = membarrier(MEMBARRIER_CMD_QUERY, 0);
+ if (mask >= 0) {
+ if (mask & MEMBARRIER_CMD_PRIVATE_EXPEDITED) {
+ if (membarrier(MEMBARRIER_CMD_REGISTER_PRIVATE_EXPEDITED, 0))
+ abort();
+ available = true;
+ }
+ }
+ lttng_ust_urcu_sys_membarrier_status(available);
+}
+
+static
+void _lttng_ust_urcu_init(void)
+{
+ mutex_lock(&init_lock);
+ if (!lttng_ust_urcu_refcount++) {
+ int ret;
+
+ ret = pthread_key_create(<tng_ust_urcu_key,
+ lttng_ust_urcu_thread_exit_notifier);
+ if (ret)
+ abort();
+ lttng_ust_urcu_sys_membarrier_init();
+ initialized = 1;
+ }
+ mutex_unlock(&init_lock);
+}
+
+static
+void lttng_ust_urcu_exit(void)
+{
+ mutex_lock(&init_lock);
+ if (!--lttng_ust_urcu_refcount) {
+ struct registry_chunk *chunk, *tmp;
+ int ret;
+
+ cds_list_for_each_entry_safe(chunk, tmp,
+ ®istry_arena.chunk_list, node) {
+ munmap((void *) chunk, chunk->data_len
+ + sizeof(struct registry_chunk));
+ }
+ CDS_INIT_LIST_HEAD(®istry_arena.chunk_list);
+ ret = pthread_key_delete(lttng_ust_urcu_key);
+ if (ret)
+ abort();
+ }
+ mutex_unlock(&init_lock);
+}
+
+/*
+ * Holding the rcu_gp_lock and rcu_registry_lock across fork will make
+ * sure we fork() don't race with a concurrent thread executing with
+ * any of those locks held. This ensures that the registry and data
+ * protected by rcu_gp_lock are in a coherent state in the child.
+ */
+void lttng_ust_urcu_before_fork(void)
+{
+ sigset_t newmask, oldmask;
+ int ret;
+
+ ret = sigfillset(&newmask);
+ assert(!ret);
+ ret = pthread_sigmask(SIG_BLOCK, &newmask, &oldmask);
+ assert(!ret);
+ mutex_lock(&rcu_gp_lock);
+ mutex_lock(&rcu_registry_lock);
+ saved_fork_signal_mask = oldmask;
+}
+
+void lttng_ust_urcu_after_fork_parent(void)
+{
+ sigset_t oldmask;
+ int ret;
+
+ oldmask = saved_fork_signal_mask;
+ mutex_unlock(&rcu_registry_lock);
+ mutex_unlock(&rcu_gp_lock);
+ ret = pthread_sigmask(SIG_SETMASK, &oldmask, NULL);
+ assert(!ret);
+}
+
+/*
+ * Prune all entries from registry except our own thread. Fits the Linux
+ * fork behavior. Called with rcu_gp_lock and rcu_registry_lock held.
+ */
+static
+void lttng_ust_urcu_prune_registry(void)
+{
+ struct registry_chunk *chunk;
+ struct lttng_ust_urcu_reader *rcu_reader_reg;
+
+ cds_list_for_each_entry(chunk, ®istry_arena.chunk_list, node) {
+ for (rcu_reader_reg = (struct lttng_ust_urcu_reader *) &chunk->data[0];
+ rcu_reader_reg < (struct lttng_ust_urcu_reader *) &chunk->data[chunk->data_len];
+ rcu_reader_reg++) {
+ if (!rcu_reader_reg->alloc)
+ continue;
+ if (rcu_reader_reg->tid == pthread_self())
+ continue;
+ cleanup_thread(chunk, rcu_reader_reg);
+ }
+ }
+}
+
+void lttng_ust_urcu_after_fork_child(void)
+{
+ sigset_t oldmask;
+ int ret;
+
+ lttng_ust_urcu_prune_registry();
+ oldmask = saved_fork_signal_mask;
+ mutex_unlock(&rcu_registry_lock);
+ mutex_unlock(&rcu_gp_lock);
+ ret = pthread_sigmask(SIG_SETMASK, &oldmask, NULL);
+ assert(!ret);
+}
--- /dev/null
+/*
+ * SPDX-License-Identifier: LGPL-2.1-only
+ *
+ * Copyright (C) 2021 Michael Jeanson <mjeanson@efficios.com>
+ */
+
+#include "common/logging.h"
+#include "common/ust-fd.h"
+
+static
+void lttng_ust_common_init(void)
+ __attribute__((constructor));
+static
+void lttng_ust_common_init(void)
+{
+ /* Initialize logging for liblttng-ust-common */
+ lttng_ust_logging_init();
+
+ /*
+ * Initialize the fd-tracker, other libraries using it should also call
+ * this in their constructor in case it gets executed before this one.
+ */
+ lttng_ust_init_fd_tracker();
+}
-version-info $(LTTNG_UST_CTL_LIBRARY_VERSION)
liblttng_ust_ctl_la_LIBADD = \
- $(top_builddir)/src/lib/lttng-ust/liblttng-ust-common.la \
+ $(top_builddir)/src/lib/lttng-ust-common/liblttng-ust-common.la \
$(top_builddir)/src/lib/lttng-ust/liblttng-ust-support.la \
$(top_builddir)/src/common/libustcomm.la \
$(top_builddir)/src/common/libcommon.la \
noinst_LTLIBRARIES = liblttng-ust-runtime.la liblttng-ust-support.la
-lib_LTLIBRARIES = liblttng-ust-common.la liblttng-ust-tracepoint.la liblttng-ust.la
-
-# ust-common
-liblttng_ust_common_la_SOURCES = \
- fd-tracker.c \
- ust-common.c \
- lttng-ust-urcu.c \
- lttng-ust-urcu-pointer.c
-
-liblttng_ust_common_la_LIBADD = \
- $(top_builddir)/src/common/libcommon.la
-
-liblttng_ust_common_la_LDFLAGS = -no-undefined -version-info $(LTTNG_UST_LIBRARY_VERSION)
+lib_LTLIBRARIES = liblttng-ust-tracepoint.la liblttng-ust.la
liblttng_ust_tracepoint_la_SOURCES = \
tracepoint.c \
lttng-tracer-core.h
liblttng_ust_tracepoint_la_LIBADD = \
- liblttng-ust-common.la \
+ $(top_builddir)/src/lib/lttng-ust-common/liblttng-ust-common.la \
$(top_builddir)/src/common/libcommon.la \
$(DL_LIBS)
liblttng_ust_la_LIBADD = \
-lrt \
- liblttng-ust-common.la \
+ $(top_builddir)/src/lib/lttng-ust-common/liblttng-ust-common.la \
$(top_builddir)/src/common/libustcomm.la \
$(top_builddir)/src/common/libcommon.la \
liblttng-ust-tracepoint.la \
+++ /dev/null
-/*
- * SPDX-License-Identifier: LGPL-2.1-only
- *
- * Copyright (C) 2016 Aravind HT <aravind.ht@gmail.com>
- * Copyright (C) 2016 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
- */
-
-#include <limits.h>
-#include <stdio.h>
-#include <stdlib.h>
-#include <string.h>
-#include <sys/types.h>
-#include <unistd.h>
-#include <assert.h>
-#include <errno.h>
-#include <fcntl.h>
-#include <sys/select.h>
-#include <sys/resource.h>
-#include <sys/time.h>
-#include <fcntl.h>
-#include <pthread.h>
-#include <signal.h>
-#include <stdbool.h>
-#include <urcu/compiler.h>
-#include <urcu/tls-compat.h>
-#include <urcu/system.h>
-
-#include "common/ust-fd.h"
-#include "common/macros.h"
-#include <lttng/ust-error.h>
-#include "common/logging.h"
-
-/* Operations on the fd set. */
-#define IS_FD_VALID(fd) ((fd) >= 0 && (fd) < lttng_ust_max_fd)
-#define GET_FD_SET_FOR_FD(fd, fd_sets) (&((fd_sets)[(fd) / FD_SETSIZE]))
-#define CALC_INDEX_TO_SET(fd) ((fd) % FD_SETSIZE)
-#define IS_FD_STD(fd) (IS_FD_VALID(fd) && (fd) <= STDERR_FILENO)
-
-/* Check fd validity before calling these. */
-#define ADD_FD_TO_SET(fd, fd_sets) \
- FD_SET(CALC_INDEX_TO_SET(fd), GET_FD_SET_FOR_FD(fd, fd_sets))
-#define IS_FD_SET(fd, fd_sets) \
- FD_ISSET(CALC_INDEX_TO_SET(fd), GET_FD_SET_FOR_FD(fd, fd_sets))
-#define DEL_FD_FROM_SET(fd, fd_sets) \
- FD_CLR(CALC_INDEX_TO_SET(fd), GET_FD_SET_FOR_FD(fd, fd_sets))
-
-/*
- * Protect the lttng_fd_set. Nests within the ust_lock, and therefore
- * within the libc dl lock. Therefore, we need to fixup the TLS before
- * nesting into this lock.
- *
- * The ust_safe_guard_fd_mutex nests within the ust_mutex. This mutex
- * is also held across fork.
- */
-static pthread_mutex_t ust_safe_guard_fd_mutex = PTHREAD_MUTEX_INITIALIZER;
-
-/*
- * Cancel state when grabbing the ust_safe_guard_fd_mutex. Saved when
- * locking, restored on unlock. Protected by ust_safe_guard_fd_mutex.
- */
-static int ust_safe_guard_saved_cancelstate;
-
-/*
- * Track whether we are within lttng-ust or application, for close
- * system call override by LD_PRELOAD library. This also tracks whether
- * we are invoking close() from a signal handler nested on an
- * application thread.
- */
-static DEFINE_URCU_TLS(int, ust_fd_mutex_nest);
-
-/* fd_set used to book keep fd being used by lttng-ust. */
-static fd_set *lttng_fd_set;
-static int lttng_ust_max_fd;
-static int num_fd_sets;
-static int init_done;
-
-/*
- * Force a read (imply TLS fixup for dlopen) of TLS variables.
- */
-void lttng_ust_fixup_fd_tracker_tls(void)
-{
- asm volatile ("" : : "m" (URCU_TLS(ust_fd_mutex_nest)));
-}
-
-/*
- * Allocate the fd set array based on the hard limit set for this
- * process. This will be called during the constructor execution
- * and will also be called in the child after fork via lttng_ust_init.
- */
-void lttng_ust_init_fd_tracker(void)
-{
- struct rlimit rlim;
- int i;
-
- if (CMM_LOAD_SHARED(init_done))
- return;
-
- memset(&rlim, 0, sizeof(rlim));
- /* Get the current possible max number of fd for this process. */
- if (getrlimit(RLIMIT_NOFILE, &rlim) < 0)
- abort();
- /*
- * FD set array size determined using the hard limit. Even if
- * the process wishes to increase its limit using setrlimit, it
- * can only do so with the softlimit which will be less than the
- * hard limit.
- */
- lttng_ust_max_fd = rlim.rlim_max;
- num_fd_sets = lttng_ust_max_fd / FD_SETSIZE;
- if (lttng_ust_max_fd % FD_SETSIZE)
- ++num_fd_sets;
- if (lttng_fd_set != NULL) {
- free(lttng_fd_set);
- lttng_fd_set = NULL;
- }
- lttng_fd_set = malloc(num_fd_sets * (sizeof(fd_set)));
- if (!lttng_fd_set)
- abort();
- for (i = 0; i < num_fd_sets; i++)
- FD_ZERO((<tng_fd_set[i]));
- CMM_STORE_SHARED(init_done, 1);
-}
-
-void lttng_ust_lock_fd_tracker(void)
-{
- sigset_t sig_all_blocked, orig_mask;
- int ret, oldstate;
-
- ret = pthread_setcancelstate(PTHREAD_CANCEL_DISABLE, &oldstate);
- if (ret) {
- ERR("pthread_setcancelstate: %s", strerror(ret));
- }
- sigfillset(&sig_all_blocked);
- ret = pthread_sigmask(SIG_SETMASK, &sig_all_blocked, &orig_mask);
- if (ret) {
- ERR("pthread_sigmask: %s", strerror(ret));
- }
- if (!URCU_TLS(ust_fd_mutex_nest)++) {
- /*
- * Ensure the compiler don't move the store after the close()
- * call in case close() would be marked as leaf.
- */
- cmm_barrier();
- pthread_mutex_lock(&ust_safe_guard_fd_mutex);
- ust_safe_guard_saved_cancelstate = oldstate;
- }
- ret = pthread_sigmask(SIG_SETMASK, &orig_mask, NULL);
- if (ret) {
- ERR("pthread_sigmask: %s", strerror(ret));
- }
-}
-
-void lttng_ust_unlock_fd_tracker(void)
-{
- sigset_t sig_all_blocked, orig_mask;
- int ret, newstate, oldstate;
- bool restore_cancel = false;
-
- sigfillset(&sig_all_blocked);
- ret = pthread_sigmask(SIG_SETMASK, &sig_all_blocked, &orig_mask);
- if (ret) {
- ERR("pthread_sigmask: %s", strerror(ret));
- }
- /*
- * Ensure the compiler don't move the store before the close()
- * call, in case close() would be marked as leaf.
- */
- cmm_barrier();
- if (!--URCU_TLS(ust_fd_mutex_nest)) {
- newstate = ust_safe_guard_saved_cancelstate;
- restore_cancel = true;
- pthread_mutex_unlock(&ust_safe_guard_fd_mutex);
- }
- ret = pthread_sigmask(SIG_SETMASK, &orig_mask, NULL);
- if (ret) {
- ERR("pthread_sigmask: %s", strerror(ret));
- }
- if (restore_cancel) {
- ret = pthread_setcancelstate(newstate, &oldstate);
- if (ret) {
- ERR("pthread_setcancelstate: %s", strerror(ret));
- }
- }
-}
-
-static int dup_std_fd(int fd)
-{
- int ret, i;
- int fd_to_close[STDERR_FILENO + 1];
- int fd_to_close_count = 0;
- int dup_cmd = F_DUPFD; /* Default command */
- int fd_valid = -1;
-
- if (!(IS_FD_STD(fd))) {
- /* Should not be here */
- ret = -1;
- goto error;
- }
-
- /* Check for FD_CLOEXEC flag */
- ret = fcntl(fd, F_GETFD);
- if (ret < 0) {
- PERROR("fcntl on f_getfd");
- ret = -1;
- goto error;
- }
-
- if (ret & FD_CLOEXEC) {
- dup_cmd = F_DUPFD_CLOEXEC;
- }
-
- /* Perform dup */
- for (i = 0; i < STDERR_FILENO + 1; i++) {
- ret = fcntl(fd, dup_cmd, 0);
- if (ret < 0) {
- PERROR("fcntl dup fd");
- goto error;
- }
-
- if (!(IS_FD_STD(ret))) {
- /* fd is outside of STD range, use it. */
- fd_valid = ret;
- /* Close fd received as argument. */
- fd_to_close[i] = fd;
- fd_to_close_count++;
- break;
- }
-
- fd_to_close[i] = ret;
- fd_to_close_count++;
- }
-
- /* Close intermediary fds */
- for (i = 0; i < fd_to_close_count; i++) {
- ret = close(fd_to_close[i]);
- if (ret) {
- PERROR("close on temporary fd: %d.", fd_to_close[i]);
- /*
- * Not using an abort here would yield a complicated
- * error handling for the caller. If a failure occurs
- * here, the system is already in a bad state.
- */
- abort();
- }
- }
-
- ret = fd_valid;
-error:
- return ret;
-}
-
-/*
- * Needs to be called with ust_safe_guard_fd_mutex held when opening the fd.
- * Has strict checking of fd validity.
- *
- * If fd <= 2, dup the fd until fd > 2. This enables us to bypass
- * problems that can be encountered if UST uses stdin, stdout, stderr
- * fds for internal use (daemon etc.). This can happen if the
- * application closes either of those file descriptors. Intermediary fds
- * are closed as needed.
- *
- * Return -1 on error.
- *
- */
-int lttng_ust_add_fd_to_tracker(int fd)
-{
- int ret;
- /*
- * Ensure the tracker is initialized when called from
- * constructors.
- */
- lttng_ust_init_fd_tracker();
- assert(URCU_TLS(ust_fd_mutex_nest));
-
- if (IS_FD_STD(fd)) {
- ret = dup_std_fd(fd);
- if (ret < 0) {
- goto error;
- }
- fd = ret;
- }
-
- /* Trying to add an fd which we can not accommodate. */
- assert(IS_FD_VALID(fd));
- /* Setting an fd thats already set. */
- assert(!IS_FD_SET(fd, lttng_fd_set));
-
- ADD_FD_TO_SET(fd, lttng_fd_set);
- return fd;
-error:
- return ret;
-}
-
-/*
- * Needs to be called with ust_safe_guard_fd_mutex held when opening the fd.
- * Has strict checking for fd validity.
- */
-void lttng_ust_delete_fd_from_tracker(int fd)
-{
- /*
- * Ensure the tracker is initialized when called from
- * constructors.
- */
- lttng_ust_init_fd_tracker();
-
- assert(URCU_TLS(ust_fd_mutex_nest));
- /* Not a valid fd. */
- assert(IS_FD_VALID(fd));
- /* Deleting an fd which was not set. */
- assert(IS_FD_SET(fd, lttng_fd_set));
-
- DEL_FD_FROM_SET(fd, lttng_fd_set);
-}
-
-/*
- * Interface allowing applications to close arbitrary file descriptors.
- * We check if it is owned by lttng-ust, and return -1, errno=EBADF
- * instead of closing it if it is the case.
- */
-int lttng_ust_safe_close_fd(int fd, int (*close_cb)(int fd))
-{
- int ret = 0;
-
- lttng_ust_fixup_fd_tracker_tls();
-
- /*
- * Ensure the tracker is initialized when called from
- * constructors.
- */
- lttng_ust_init_fd_tracker();
-
- /*
- * If called from lttng-ust, we directly call close without
- * validating whether the FD is part of the tracked set.
- */
- if (URCU_TLS(ust_fd_mutex_nest))
- return close_cb(fd);
-
- lttng_ust_lock_fd_tracker();
- if (IS_FD_VALID(fd) && IS_FD_SET(fd, lttng_fd_set)) {
- ret = -1;
- errno = EBADF;
- } else {
- ret = close_cb(fd);
- }
- lttng_ust_unlock_fd_tracker();
-
- return ret;
-}
-
-/*
- * Interface allowing applications to close arbitrary streams.
- * We check if it is owned by lttng-ust, and return -1, errno=EBADF
- * instead of closing it if it is the case.
- */
-int lttng_ust_safe_fclose_stream(FILE *stream, int (*fclose_cb)(FILE *stream))
-{
- int ret = 0, fd;
-
- lttng_ust_fixup_fd_tracker_tls();
-
- /*
- * Ensure the tracker is initialized when called from
- * constructors.
- */
- lttng_ust_init_fd_tracker();
-
- /*
- * If called from lttng-ust, we directly call fclose without
- * validating whether the FD is part of the tracked set.
- */
- if (URCU_TLS(ust_fd_mutex_nest))
- return fclose_cb(stream);
-
- fd = fileno(stream);
-
- lttng_ust_lock_fd_tracker();
- if (IS_FD_VALID(fd) && IS_FD_SET(fd, lttng_fd_set)) {
- ret = -1;
- errno = EBADF;
- } else {
- ret = fclose_cb(stream);
- }
- lttng_ust_unlock_fd_tracker();
-
- return ret;
-}
-
-#ifdef __OpenBSD__
-static void set_close_success(int *p)
-{
- *p = 1;
-}
-static int test_close_success(const int *p)
-{
- return *p;
-}
-#else
-static void set_close_success(int *p __attribute__((unused)))
-{
-}
-static int test_close_success(const int *p __attribute__((unused)))
-{
- return 1;
-}
-#endif
-
-/*
- * Implement helper for closefrom() override.
- */
-int lttng_ust_safe_closefrom_fd(int lowfd, int (*close_cb)(int fd))
-{
- int ret = 0, close_success = 0, i;
-
- lttng_ust_fixup_fd_tracker_tls();
-
- /*
- * Ensure the tracker is initialized when called from
- * constructors.
- */
- lttng_ust_init_fd_tracker();
-
- if (lowfd < 0) {
- /*
- * NetBSD return EBADF if fd is invalid.
- */
- errno = EBADF;
- ret = -1;
- goto end;
- }
- /*
- * If called from lttng-ust, we directly call close without
- * validating whether the FD is part of the tracked set.
- */
- if (URCU_TLS(ust_fd_mutex_nest)) {
- for (i = lowfd; i < lttng_ust_max_fd; i++) {
- if (close_cb(i) < 0) {
- switch (errno) {
- case EBADF:
- continue;
- case EINTR:
- default:
- ret = -1;
- goto end;
- }
- }
- set_close_success(&close_success);
- }
- } else {
- lttng_ust_lock_fd_tracker();
- for (i = lowfd; i < lttng_ust_max_fd; i++) {
- if (IS_FD_VALID(i) && IS_FD_SET(i, lttng_fd_set))
- continue;
- if (close_cb(i) < 0) {
- switch (errno) {
- case EBADF:
- continue;
- case EINTR:
- default:
- ret = -1;
- lttng_ust_unlock_fd_tracker();
- goto end;
- }
- }
- set_close_success(&close_success);
- }
- lttng_ust_unlock_fd_tracker();
- }
- if (!test_close_success(&close_success)) {
- /*
- * OpenBSD return EBADF if fd is greater than all open
- * file descriptors.
- */
- ret = -1;
- errno = EBADF;
- }
-end:
- return ret;
-}
+++ /dev/null
-/*
- * SPDX-License-Identifier: LGPL-2.1-or-later
- *
- * Copyright (c) 2009 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
- * Copyright (c) 2009 Paul E. McKenney, IBM Corporation.
- *
- * library wrappers to be used by non-LGPL compatible source code.
- */
-
-#include <urcu/uatomic.h>
-
-#include <lttng/urcu/static/pointer.h>
-/* Do not #define _LGPL_SOURCE to ensure we can emit the wrapper symbols */
-#include <lttng/urcu/pointer.h>
-
-void *lttng_ust_rcu_dereference_sym(void *p)
-{
- return _lttng_ust_rcu_dereference(p);
-}
-
-void *lttng_ust_rcu_set_pointer_sym(void **p, void *v)
-{
- cmm_wmb();
- uatomic_set(p, v);
- return v;
-}
-
-void *lttng_ust_rcu_xchg_pointer_sym(void **p, void *v)
-{
- cmm_wmb();
- return uatomic_xchg(p, v);
-}
-
-void *lttng_ust_rcu_cmpxchg_pointer_sym(void **p, void *old, void *_new)
-{
- cmm_wmb();
- return uatomic_cmpxchg(p, old, _new);
-}
+++ /dev/null
-/*
- * SPDX-License-Identifier: LGPL-2.1-or-later
- *
- * Copyright (c) 2009 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
- * Copyright (c) 2009 Paul E. McKenney, IBM Corporation.
- *
- * Userspace RCU library for LTTng-UST, derived from liburcu "bulletproof" version.
- */
-
-#define _LGPL_SOURCE
-#include <stdio.h>
-#include <pthread.h>
-#include <signal.h>
-#include <assert.h>
-#include <stdlib.h>
-#include <string.h>
-#include <errno.h>
-#include <poll.h>
-#include <unistd.h>
-#include <stdbool.h>
-#include <sys/mman.h>
-
-#include <urcu/arch.h>
-#include <urcu/wfcqueue.h>
-#include <lttng/urcu/static/urcu-ust.h>
-#include <lttng/urcu/pointer.h>
-#include <urcu/tls-compat.h>
-
-/* Do not #define _LGPL_SOURCE to ensure we can emit the wrapper symbols */
-#undef _LGPL_SOURCE
-#include <lttng/urcu/urcu-ust.h>
-#define _LGPL_SOURCE
-
-#ifndef MAP_ANONYMOUS
-#define MAP_ANONYMOUS MAP_ANON
-#endif
-
-#ifdef __linux__
-static
-void *mremap_wrapper(void *old_address, size_t old_size,
- size_t new_size, int flags)
-{
- return mremap(old_address, old_size, new_size, flags);
-}
-#else
-
-#define MREMAP_MAYMOVE 1
-#define MREMAP_FIXED 2
-
-/*
- * mremap wrapper for non-Linux systems not allowing MAYMOVE.
- * This is not generic.
-*/
-static
-void *mremap_wrapper(void *old_address, size_t old_size,
- size_t new_size, int flags)
-{
- assert(!(flags & MREMAP_MAYMOVE));
-
- return MAP_FAILED;
-}
-#endif
-
-/* Sleep delay in ms */
-#define RCU_SLEEP_DELAY_MS 10
-#define INIT_NR_THREADS 8
-#define ARENA_INIT_ALLOC \
- sizeof(struct registry_chunk) \
- + INIT_NR_THREADS * sizeof(struct lttng_ust_urcu_reader)
-
-/*
- * Active attempts to check for reader Q.S. before calling sleep().
- */
-#define RCU_QS_ACTIVE_ATTEMPTS 100
-
-static
-int lttng_ust_urcu_refcount;
-
-/* If the headers do not support membarrier system call, fall back smp_mb. */
-#ifdef __NR_membarrier
-# define membarrier(...) syscall(__NR_membarrier, __VA_ARGS__)
-#else
-# define membarrier(...) -ENOSYS
-#endif
-
-enum membarrier_cmd {
- MEMBARRIER_CMD_QUERY = 0,
- MEMBARRIER_CMD_SHARED = (1 << 0),
- /* reserved for MEMBARRIER_CMD_SHARED_EXPEDITED (1 << 1) */
- /* reserved for MEMBARRIER_CMD_PRIVATE (1 << 2) */
- MEMBARRIER_CMD_PRIVATE_EXPEDITED = (1 << 3),
- MEMBARRIER_CMD_REGISTER_PRIVATE_EXPEDITED = (1 << 4),
-};
-
-static
-void _lttng_ust_urcu_init(void)
- __attribute__((constructor));
-static
-void lttng_ust_urcu_exit(void)
- __attribute__((destructor));
-
-#ifndef CONFIG_RCU_FORCE_SYS_MEMBARRIER
-int lttng_ust_urcu_has_sys_membarrier;
-#endif
-
-/*
- * rcu_gp_lock ensures mutual exclusion between threads calling
- * synchronize_rcu().
- */
-static pthread_mutex_t rcu_gp_lock = PTHREAD_MUTEX_INITIALIZER;
-/*
- * rcu_registry_lock ensures mutual exclusion between threads
- * registering and unregistering themselves to/from the registry, and
- * with threads reading that registry from synchronize_rcu(). However,
- * this lock is not held all the way through the completion of awaiting
- * for the grace period. It is sporadically released between iterations
- * on the registry.
- * rcu_registry_lock may nest inside rcu_gp_lock.
- */
-static pthread_mutex_t rcu_registry_lock = PTHREAD_MUTEX_INITIALIZER;
-
-static pthread_mutex_t init_lock = PTHREAD_MUTEX_INITIALIZER;
-static int initialized;
-
-static pthread_key_t lttng_ust_urcu_key;
-
-struct lttng_ust_urcu_gp lttng_ust_urcu_gp = { .ctr = LTTNG_UST_URCU_GP_COUNT };
-
-/*
- * Pointer to registry elements. Written to only by each individual reader. Read
- * by both the reader and the writers.
- */
-DEFINE_URCU_TLS(struct lttng_ust_urcu_reader *, lttng_ust_urcu_reader);
-
-static CDS_LIST_HEAD(registry);
-
-struct registry_chunk {
- size_t data_len; /* data length */
- size_t used; /* amount of data used */
- struct cds_list_head node; /* chunk_list node */
- char data[];
-};
-
-struct registry_arena {
- struct cds_list_head chunk_list;
-};
-
-static struct registry_arena registry_arena = {
- .chunk_list = CDS_LIST_HEAD_INIT(registry_arena.chunk_list),
-};
-
-/* Saved fork signal mask, protected by rcu_gp_lock */
-static sigset_t saved_fork_signal_mask;
-
-static void mutex_lock(pthread_mutex_t *mutex)
-{
- int ret;
-
-#ifndef DISTRUST_SIGNALS_EXTREME
- ret = pthread_mutex_lock(mutex);
- if (ret)
- abort();
-#else /* #ifndef DISTRUST_SIGNALS_EXTREME */
- while ((ret = pthread_mutex_trylock(mutex)) != 0) {
- if (ret != EBUSY && ret != EINTR)
- abort();
- poll(NULL,0,10);
- }
-#endif /* #else #ifndef DISTRUST_SIGNALS_EXTREME */
-}
-
-static void mutex_unlock(pthread_mutex_t *mutex)
-{
- int ret;
-
- ret = pthread_mutex_unlock(mutex);
- if (ret)
- abort();
-}
-
-static void smp_mb_master(void)
-{
- if (caa_likely(lttng_ust_urcu_has_sys_membarrier)) {
- if (membarrier(MEMBARRIER_CMD_PRIVATE_EXPEDITED, 0))
- abort();
- } else {
- cmm_smp_mb();
- }
-}
-
-/*
- * Always called with rcu_registry lock held. Releases this lock between
- * iterations and grabs it again. Holds the lock when it returns.
- */
-static void wait_for_readers(struct cds_list_head *input_readers,
- struct cds_list_head *cur_snap_readers,
- struct cds_list_head *qsreaders)
-{
- unsigned int wait_loops = 0;
- struct lttng_ust_urcu_reader *index, *tmp;
-
- /*
- * Wait for each thread URCU_TLS(lttng_ust_urcu_reader).ctr to either
- * indicate quiescence (not nested), or observe the current
- * rcu_gp.ctr value.
- */
- for (;;) {
- if (wait_loops < RCU_QS_ACTIVE_ATTEMPTS)
- wait_loops++;
-
- cds_list_for_each_entry_safe(index, tmp, input_readers, node) {
- switch (lttng_ust_urcu_reader_state(&index->ctr)) {
- case LTTNG_UST_URCU_READER_ACTIVE_CURRENT:
- if (cur_snap_readers) {
- cds_list_move(&index->node,
- cur_snap_readers);
- break;
- }
- /* Fall-through */
- case LTTNG_UST_URCU_READER_INACTIVE:
- cds_list_move(&index->node, qsreaders);
- break;
- case LTTNG_UST_URCU_READER_ACTIVE_OLD:
- /*
- * Old snapshot. Leaving node in
- * input_readers will make us busy-loop
- * until the snapshot becomes current or
- * the reader becomes inactive.
- */
- break;
- }
- }
-
- if (cds_list_empty(input_readers)) {
- break;
- } else {
- /* Temporarily unlock the registry lock. */
- mutex_unlock(&rcu_registry_lock);
- if (wait_loops >= RCU_QS_ACTIVE_ATTEMPTS)
- (void) poll(NULL, 0, RCU_SLEEP_DELAY_MS);
- else
- caa_cpu_relax();
- /* Re-lock the registry lock before the next loop. */
- mutex_lock(&rcu_registry_lock);
- }
- }
-}
-
-void lttng_ust_urcu_synchronize_rcu(void)
-{
- CDS_LIST_HEAD(cur_snap_readers);
- CDS_LIST_HEAD(qsreaders);
- sigset_t newmask, oldmask;
- int ret;
-
- ret = sigfillset(&newmask);
- assert(!ret);
- ret = pthread_sigmask(SIG_BLOCK, &newmask, &oldmask);
- assert(!ret);
-
- mutex_lock(&rcu_gp_lock);
-
- mutex_lock(&rcu_registry_lock);
-
- if (cds_list_empty(®istry))
- goto out;
-
- /* All threads should read qparity before accessing data structure
- * where new ptr points to. */
- /* Write new ptr before changing the qparity */
- smp_mb_master();
-
- /*
- * Wait for readers to observe original parity or be quiescent.
- * wait_for_readers() can release and grab again rcu_registry_lock
- * interally.
- */
- wait_for_readers(®istry, &cur_snap_readers, &qsreaders);
-
- /*
- * Adding a cmm_smp_mb() which is _not_ formally required, but makes the
- * model easier to understand. It does not have a big performance impact
- * anyway, given this is the write-side.
- */
- cmm_smp_mb();
-
- /* Switch parity: 0 -> 1, 1 -> 0 */
- CMM_STORE_SHARED(lttng_ust_urcu_gp.ctr, lttng_ust_urcu_gp.ctr ^ LTTNG_UST_URCU_GP_CTR_PHASE);
-
- /*
- * Must commit qparity update to memory before waiting for other parity
- * quiescent state. Failure to do so could result in the writer waiting
- * forever while new readers are always accessing data (no progress).
- * Ensured by CMM_STORE_SHARED and CMM_LOAD_SHARED.
- */
-
- /*
- * Adding a cmm_smp_mb() which is _not_ formally required, but makes the
- * model easier to understand. It does not have a big performance impact
- * anyway, given this is the write-side.
- */
- cmm_smp_mb();
-
- /*
- * Wait for readers to observe new parity or be quiescent.
- * wait_for_readers() can release and grab again rcu_registry_lock
- * interally.
- */
- wait_for_readers(&cur_snap_readers, NULL, &qsreaders);
-
- /*
- * Put quiescent reader list back into registry.
- */
- cds_list_splice(&qsreaders, ®istry);
-
- /*
- * Finish waiting for reader threads before letting the old ptr being
- * freed.
- */
- smp_mb_master();
-out:
- mutex_unlock(&rcu_registry_lock);
- mutex_unlock(&rcu_gp_lock);
- ret = pthread_sigmask(SIG_SETMASK, &oldmask, NULL);
- assert(!ret);
-}
-
-/*
- * library wrappers to be used by non-LGPL compatible source code.
- */
-
-void lttng_ust_urcu_read_lock(void)
-{
- _lttng_ust_urcu_read_lock();
-}
-
-void lttng_ust_urcu_read_unlock(void)
-{
- _lttng_ust_urcu_read_unlock();
-}
-
-int lttng_ust_urcu_read_ongoing(void)
-{
- return _lttng_ust_urcu_read_ongoing();
-}
-
-/*
- * Only grow for now. If empty, allocate a ARENA_INIT_ALLOC sized chunk.
- * Else, try expanding the last chunk. If this fails, allocate a new
- * chunk twice as big as the last chunk.
- * Memory used by chunks _never_ moves. A chunk could theoretically be
- * freed when all "used" slots are released, but we don't do it at this
- * point.
- */
-static
-void expand_arena(struct registry_arena *arena)
-{
- struct registry_chunk *new_chunk, *last_chunk;
- size_t old_chunk_len, new_chunk_len;
-
- /* No chunk. */
- if (cds_list_empty(&arena->chunk_list)) {
- assert(ARENA_INIT_ALLOC >=
- sizeof(struct registry_chunk)
- + sizeof(struct lttng_ust_urcu_reader));
- new_chunk_len = ARENA_INIT_ALLOC;
- new_chunk = (struct registry_chunk *) mmap(NULL,
- new_chunk_len,
- PROT_READ | PROT_WRITE,
- MAP_ANONYMOUS | MAP_PRIVATE,
- -1, 0);
- if (new_chunk == MAP_FAILED)
- abort();
- memset(new_chunk, 0, new_chunk_len);
- new_chunk->data_len =
- new_chunk_len - sizeof(struct registry_chunk);
- cds_list_add_tail(&new_chunk->node, &arena->chunk_list);
- return; /* We're done. */
- }
-
- /* Try expanding last chunk. */
- last_chunk = cds_list_entry(arena->chunk_list.prev,
- struct registry_chunk, node);
- old_chunk_len =
- last_chunk->data_len + sizeof(struct registry_chunk);
- new_chunk_len = old_chunk_len << 1;
-
- /* Don't allow memory mapping to move, just expand. */
- new_chunk = mremap_wrapper(last_chunk, old_chunk_len,
- new_chunk_len, 0);
- if (new_chunk != MAP_FAILED) {
- /* Should not have moved. */
- assert(new_chunk == last_chunk);
- memset((char *) last_chunk + old_chunk_len, 0,
- new_chunk_len - old_chunk_len);
- last_chunk->data_len =
- new_chunk_len - sizeof(struct registry_chunk);
- return; /* We're done. */
- }
-
- /* Remap did not succeed, we need to add a new chunk. */
- new_chunk = (struct registry_chunk *) mmap(NULL,
- new_chunk_len,
- PROT_READ | PROT_WRITE,
- MAP_ANONYMOUS | MAP_PRIVATE,
- -1, 0);
- if (new_chunk == MAP_FAILED)
- abort();
- memset(new_chunk, 0, new_chunk_len);
- new_chunk->data_len =
- new_chunk_len - sizeof(struct registry_chunk);
- cds_list_add_tail(&new_chunk->node, &arena->chunk_list);
-}
-
-static
-struct lttng_ust_urcu_reader *arena_alloc(struct registry_arena *arena)
-{
- struct registry_chunk *chunk;
- struct lttng_ust_urcu_reader *rcu_reader_reg;
- int expand_done = 0; /* Only allow to expand once per alloc */
- size_t len = sizeof(struct lttng_ust_urcu_reader);
-
-retry:
- cds_list_for_each_entry(chunk, &arena->chunk_list, node) {
- if (chunk->data_len - chunk->used < len)
- continue;
- /* Find spot */
- for (rcu_reader_reg = (struct lttng_ust_urcu_reader *) &chunk->data[0];
- rcu_reader_reg < (struct lttng_ust_urcu_reader *) &chunk->data[chunk->data_len];
- rcu_reader_reg++) {
- if (!rcu_reader_reg->alloc) {
- rcu_reader_reg->alloc = 1;
- chunk->used += len;
- return rcu_reader_reg;
- }
- }
- }
-
- if (!expand_done) {
- expand_arena(arena);
- expand_done = 1;
- goto retry;
- }
-
- return NULL;
-}
-
-/* Called with signals off and mutex locked */
-static
-void add_thread(void)
-{
- struct lttng_ust_urcu_reader *rcu_reader_reg;
- int ret;
-
- rcu_reader_reg = arena_alloc(®istry_arena);
- if (!rcu_reader_reg)
- abort();
- ret = pthread_setspecific(lttng_ust_urcu_key, rcu_reader_reg);
- if (ret)
- abort();
-
- /* Add to registry */
- rcu_reader_reg->tid = pthread_self();
- assert(rcu_reader_reg->ctr == 0);
- cds_list_add(&rcu_reader_reg->node, ®istry);
- /*
- * Reader threads are pointing to the reader registry. This is
- * why its memory should never be relocated.
- */
- URCU_TLS(lttng_ust_urcu_reader) = rcu_reader_reg;
-}
-
-/* Called with mutex locked */
-static
-void cleanup_thread(struct registry_chunk *chunk,
- struct lttng_ust_urcu_reader *rcu_reader_reg)
-{
- rcu_reader_reg->ctr = 0;
- cds_list_del(&rcu_reader_reg->node);
- rcu_reader_reg->tid = 0;
- rcu_reader_reg->alloc = 0;
- chunk->used -= sizeof(struct lttng_ust_urcu_reader);
-}
-
-static
-struct registry_chunk *find_chunk(struct lttng_ust_urcu_reader *rcu_reader_reg)
-{
- struct registry_chunk *chunk;
-
- cds_list_for_each_entry(chunk, ®istry_arena.chunk_list, node) {
- if (rcu_reader_reg < (struct lttng_ust_urcu_reader *) &chunk->data[0])
- continue;
- if (rcu_reader_reg >= (struct lttng_ust_urcu_reader *) &chunk->data[chunk->data_len])
- continue;
- return chunk;
- }
- return NULL;
-}
-
-/* Called with signals off and mutex locked */
-static
-void remove_thread(struct lttng_ust_urcu_reader *rcu_reader_reg)
-{
- cleanup_thread(find_chunk(rcu_reader_reg), rcu_reader_reg);
- URCU_TLS(lttng_ust_urcu_reader) = NULL;
-}
-
-/* Disable signals, take mutex, add to registry */
-void lttng_ust_urcu_register(void)
-{
- sigset_t newmask, oldmask;
- int ret;
-
- ret = sigfillset(&newmask);
- if (ret)
- abort();
- ret = pthread_sigmask(SIG_BLOCK, &newmask, &oldmask);
- if (ret)
- abort();
-
- /*
- * Check if a signal concurrently registered our thread since
- * the check in rcu_read_lock().
- */
- if (URCU_TLS(lttng_ust_urcu_reader))
- goto end;
-
- /*
- * Take care of early registration before lttng_ust_urcu constructor.
- */
- _lttng_ust_urcu_init();
-
- mutex_lock(&rcu_registry_lock);
- add_thread();
- mutex_unlock(&rcu_registry_lock);
-end:
- ret = pthread_sigmask(SIG_SETMASK, &oldmask, NULL);
- if (ret)
- abort();
-}
-
-void lttng_ust_urcu_register_thread(void)
-{
- if (caa_unlikely(!URCU_TLS(lttng_ust_urcu_reader)))
- lttng_ust_urcu_register(); /* If not yet registered. */
-}
-
-/* Disable signals, take mutex, remove from registry */
-static
-void lttng_ust_urcu_unregister(struct lttng_ust_urcu_reader *rcu_reader_reg)
-{
- sigset_t newmask, oldmask;
- int ret;
-
- ret = sigfillset(&newmask);
- if (ret)
- abort();
- ret = pthread_sigmask(SIG_BLOCK, &newmask, &oldmask);
- if (ret)
- abort();
-
- mutex_lock(&rcu_registry_lock);
- remove_thread(rcu_reader_reg);
- mutex_unlock(&rcu_registry_lock);
- ret = pthread_sigmask(SIG_SETMASK, &oldmask, NULL);
- if (ret)
- abort();
- lttng_ust_urcu_exit();
-}
-
-/*
- * Remove thread from the registry when it exits, and flag it as
- * destroyed so garbage collection can take care of it.
- */
-static
-void lttng_ust_urcu_thread_exit_notifier(void *rcu_key)
-{
- lttng_ust_urcu_unregister(rcu_key);
-}
-
-#ifdef CONFIG_RCU_FORCE_SYS_MEMBARRIER
-static
-void lttng_ust_urcu_sys_membarrier_status(bool available)
-{
- if (!available)
- abort();
-}
-#else
-static
-void lttng_ust_urcu_sys_membarrier_status(bool available)
-{
- if (!available)
- return;
- lttng_ust_urcu_has_sys_membarrier = 1;
-}
-#endif
-
-static
-void lttng_ust_urcu_sys_membarrier_init(void)
-{
- bool available = false;
- int mask;
-
- mask = membarrier(MEMBARRIER_CMD_QUERY, 0);
- if (mask >= 0) {
- if (mask & MEMBARRIER_CMD_PRIVATE_EXPEDITED) {
- if (membarrier(MEMBARRIER_CMD_REGISTER_PRIVATE_EXPEDITED, 0))
- abort();
- available = true;
- }
- }
- lttng_ust_urcu_sys_membarrier_status(available);
-}
-
-static
-void _lttng_ust_urcu_init(void)
-{
- mutex_lock(&init_lock);
- if (!lttng_ust_urcu_refcount++) {
- int ret;
-
- ret = pthread_key_create(<tng_ust_urcu_key,
- lttng_ust_urcu_thread_exit_notifier);
- if (ret)
- abort();
- lttng_ust_urcu_sys_membarrier_init();
- initialized = 1;
- }
- mutex_unlock(&init_lock);
-}
-
-static
-void lttng_ust_urcu_exit(void)
-{
- mutex_lock(&init_lock);
- if (!--lttng_ust_urcu_refcount) {
- struct registry_chunk *chunk, *tmp;
- int ret;
-
- cds_list_for_each_entry_safe(chunk, tmp,
- ®istry_arena.chunk_list, node) {
- munmap((void *) chunk, chunk->data_len
- + sizeof(struct registry_chunk));
- }
- CDS_INIT_LIST_HEAD(®istry_arena.chunk_list);
- ret = pthread_key_delete(lttng_ust_urcu_key);
- if (ret)
- abort();
- }
- mutex_unlock(&init_lock);
-}
-
-/*
- * Holding the rcu_gp_lock and rcu_registry_lock across fork will make
- * sure we fork() don't race with a concurrent thread executing with
- * any of those locks held. This ensures that the registry and data
- * protected by rcu_gp_lock are in a coherent state in the child.
- */
-void lttng_ust_urcu_before_fork(void)
-{
- sigset_t newmask, oldmask;
- int ret;
-
- ret = sigfillset(&newmask);
- assert(!ret);
- ret = pthread_sigmask(SIG_BLOCK, &newmask, &oldmask);
- assert(!ret);
- mutex_lock(&rcu_gp_lock);
- mutex_lock(&rcu_registry_lock);
- saved_fork_signal_mask = oldmask;
-}
-
-void lttng_ust_urcu_after_fork_parent(void)
-{
- sigset_t oldmask;
- int ret;
-
- oldmask = saved_fork_signal_mask;
- mutex_unlock(&rcu_registry_lock);
- mutex_unlock(&rcu_gp_lock);
- ret = pthread_sigmask(SIG_SETMASK, &oldmask, NULL);
- assert(!ret);
-}
-
-/*
- * Prune all entries from registry except our own thread. Fits the Linux
- * fork behavior. Called with rcu_gp_lock and rcu_registry_lock held.
- */
-static
-void lttng_ust_urcu_prune_registry(void)
-{
- struct registry_chunk *chunk;
- struct lttng_ust_urcu_reader *rcu_reader_reg;
-
- cds_list_for_each_entry(chunk, ®istry_arena.chunk_list, node) {
- for (rcu_reader_reg = (struct lttng_ust_urcu_reader *) &chunk->data[0];
- rcu_reader_reg < (struct lttng_ust_urcu_reader *) &chunk->data[chunk->data_len];
- rcu_reader_reg++) {
- if (!rcu_reader_reg->alloc)
- continue;
- if (rcu_reader_reg->tid == pthread_self())
- continue;
- cleanup_thread(chunk, rcu_reader_reg);
- }
- }
-}
-
-void lttng_ust_urcu_after_fork_child(void)
-{
- sigset_t oldmask;
- int ret;
-
- lttng_ust_urcu_prune_registry();
- oldmask = saved_fork_signal_mask;
- mutex_unlock(&rcu_registry_lock);
- mutex_unlock(&rcu_gp_lock);
- ret = pthread_sigmask(SIG_SETMASK, &oldmask, NULL);
- assert(!ret);
-}
+++ /dev/null
-/*
- * SPDX-License-Identifier: LGPL-2.1-only
- *
- * Copyright (C) 2021 Michael Jeanson <mjeanson@efficios.com>
- */
-
-#include "common/logging.h"
-#include "common/ust-fd.h"
-
-static
-void lttng_ust_common_init(void)
- __attribute__((constructor));
-static
-void lttng_ust_common_init(void)
-{
- /* Initialize logging for liblttng-ust-common */
- lttng_ust_logging_init();
-
- /*
- * Initialize the fd-tracker, other libraries using it should also call
- * this in their constructor in case it gets executed before this one.
- */
- lttng_ust_init_fd_tracker();
-}
test_shm_SOURCES = shm.c
test_shm_LDADD = \
$(top_builddir)/src/common/libringbuffer.la \
- $(top_builddir)/src/lib/lttng-ust/liblttng-ust-common.la \
+ $(top_builddir)/src/lib/lttng-ust-common/liblttng-ust-common.la \
$(top_builddir)/src/common/libcommon.la \
$(top_builddir)/tests/utils/libtap.a