/src/lib/lttng-ust-libc-wrapper/Makefile
/src/lib/lttng-ust-python-agent/Makefile
/src/lib/Makefile
-/src/liblttng-ust/Makefile
+/src/lib/lttng-ust/Makefile
/src/python-lttngust/Makefile
/src/Makefile
/tests/Makefile
src/lib/lttng-ust-java-agent/Makefile
src/lib/lttng-ust-java/Makefile
src/lib/lttng-ust-libc-wrapper/Makefile
- src/liblttng-ust/Makefile
+ src/lib/lttng-ust/Makefile
src/lib/lttng-ust-python-agent/Makefile
src/lib/Makefile
src/lttng-ust-ctl.pc
CFLAGS='$(CFLAGS)' \
AM_CFLAGS='$(AM_CFLAGS)' \
LDFLAGS="$(LDFLAGS)" \
- AM_LDFLAGS='$(AM_LDFLAGS) -L../../../src/liblttng-ust/.libs -Wl,-rpath="$(PWD)/../../src/liblttng-ust/.libs/" -Wl,-rpath-link="$(PWD)/../../src/liblttng-ust/.libs/"' \
+ AM_LDFLAGS='$(AM_LDFLAGS) -L../../../src/lib/lttng-ust/.libs -Wl,-rpath="$(PWD)/../../src/lib/lttng-ust/.libs/" -Wl,-rpath-link="$(PWD)/../../src/lib/lttng-ust/.libs/"' \
LTTNG_GEN_TP_PATH="$$rel_src_subdir$(top_srcdir)/tools/" \
AM_V_P="$(AM_V_P)" \
AM_V_at="$(AM_V_at)" \
CXX="$(CXX)" \
$(CMAKE) \
-DCMAKE_INCLUDE_PATH="$(abs_top_srcdir)/include;$(abs_top_builddir)/include" \
- -DCMAKE_LIBRARY_PATH="$(abs_top_builddir)/src/liblttng-ust/.libs" \
+ -DCMAKE_LIBRARY_PATH="$(abs_top_builddir)/src/lib/lttng-ust/.libs" \
-DCMAKE_C_FLAGS="$(AM_CFLAGS) $(CPPFLAGS) $(CFLAGS)" \
-DCMAKE_CXX_FLAGS="$(AM_CXXFLAGS) $(CXXFLAGS) $(CPPFLAGS)" \
-DCMAKE_EXE_LINKER_FLAGS="$(AM_LDFLAGS) $(LDFLAGS)" \
SUBDIRS = \
common \
- liblttng-ust \
lib
if ENABLE_PYTHON_AGENT
#include "common/dynamic-type.h"
#include "common/logging.h"
-#include "../liblttng-ust/ust-events-internal.h"
+#include "lib/lttng-ust/ust-events-internal.h"
#include "common/compat/pthread.h"
#define USTCOMM_CODE_OFFSET(code) \
# SPDX-License-Identifier: LGPL-2.1-only
SUBDIRS = \
+ lttng-ust \
lttng-ust-ctl \
lttng-ust-fd \
lttng-ust-fork \
-version-info $(LTTNG_UST_CTL_LIBRARY_VERSION)
liblttng_ust_ctl_la_LIBADD = \
- $(top_builddir)/src/liblttng-ust/liblttng-ust-common.la \
- $(top_builddir)/src/liblttng-ust/liblttng-ust-support.la \
+ $(top_builddir)/src/lib/lttng-ust/liblttng-ust-common.la \
+ $(top_builddir)/src/lib/lttng-ust/liblttng-ust-support.la \
$(top_builddir)/src/common/libustcomm.la \
$(top_builddir)/src/common/libcommon.la \
$(DL_LIBS)
#include "common/ringbuffer/backend.h"
#include "common/ringbuffer/frontend.h"
-#include "liblttng-ust/ust-events-internal.h"
-#include "liblttng-ust/wait.h"
-#include "liblttng-ust/lttng-rb-clients.h"
-#include "liblttng-ust/clock.h"
-#include "liblttng-ust/getenv.h"
-#include "liblttng-ust/lttng-tracer-core.h"
-#include "liblttng-ust/lttng-counter-client.h"
+#include "lib/lttng-ust/ust-events-internal.h"
+#include "lib/lttng-ust/wait.h"
+#include "lib/lttng-ust/lttng-rb-clients.h"
+#include "lib/lttng-ust/clock.h"
+#include "lib/lttng-ust/getenv.h"
+#include "lib/lttng-ust/lttng-tracer-core.h"
+#include "lib/lttng-ust/lttng-counter-client.h"
#include "common/counter/smp.h"
#include "common/counter/counter.h"
lttng-ust-cyg-profile.h
liblttng_ust_cyg_profile_la_LIBADD = \
- $(top_builddir)/src/liblttng-ust/liblttng-ust.la \
+ $(top_builddir)/src/lib/lttng-ust/liblttng-ust.la \
$(DL_LIBS)
liblttng_ust_cyg_profile_la_LDFLAGS = -version-info $(LTTNG_UST_LIBRARY_VERSION)
lttng-ust-cyg-profile-fast.h
liblttng_ust_cyg_profile_fast_la_LIBADD = \
- $(top_builddir)/src/liblttng-ust/liblttng-ust.la \
+ $(top_builddir)/src/lib/lttng-ust/liblttng-ust.la \
$(DL_LIBS)
liblttng_ust_cyg_profile_fast_la_LDFLAGS = -version-info $(LTTNG_UST_LIBRARY_VERSION)
ust_dl.h
liblttng_ust_dl_la_LIBADD = \
- $(top_builddir)/src/liblttng-ust/liblttng-ust.la \
+ $(top_builddir)/src/lib/lttng-ust/liblttng-ust.la \
$(top_builddir)/src/common/libcommon.la \
$(DL_LIBS)
#include "common/macros.h"
#include "common/logging.h"
-#include "liblttng-ust/ust-events-internal.h"
+#include "lib/lttng-ust/ust-events-internal.h"
/* Include link.h last else it conflicts with ust-dlfcn. */
#include <link.h>
lttng-ust-fd.c
liblttng_ust_fd_la_LIBADD = \
- $(top_builddir)/src/liblttng-ust/liblttng-ust.la \
+ $(top_builddir)/src/lib/lttng-ust/liblttng-ust.la \
$(DL_LIBS)
liblttng_ust_fd_la_LDFLAGS = -version-info $(LTTNG_UST_LIBRARY_VERSION)
lib_LTLIBRARIES = liblttng-ust-fork.la
liblttng_ust_fork_la_SOURCES = ustfork.c
liblttng_ust_fork_la_LIBADD = \
- $(top_builddir)/src/liblttng-ust/liblttng-ust.la \
+ $(top_builddir)/src/lib/lttng-ust/liblttng-ust.la \
$(DL_LIBS)
liblttng_ust_fork_la_CFLAGS = -DUST_COMPONENT=liblttng-ust-fork $(AM_CFLAGS)
nodist_liblttng_ust_context_jni_la_SOURCES = org_lttng_ust_agent_context_LttngContextApi.h
liblttng_ust_context_jni_la_LIBADD = -lc \
- $(top_builddir)/src/liblttng-ust/liblttng-ust.la
+ $(top_builddir)/src/lib/lttng-ust/liblttng-ust.la
nodist_liblttng_ust_jul_jni_la_SOURCES = org_lttng_ust_agent_jul_LttngJulApi.h
liblttng_ust_jul_jni_la_LIBADD = -lc \
- $(top_builddir)/src/liblttng-ust/liblttng-ust.la \
+ $(top_builddir)/src/lib/lttng-ust/liblttng-ust.la \
$(top_builddir)/src/lib/lttng-ust-java-agent/jni/common/liblttng-ust-context-jni.la
nodist_liblttng_ust_log4j_jni_la_SOURCES = org_lttng_ust_agent_log4j_LttngLog4jApi.h
liblttng_ust_log4j_jni_la_LIBADD = -lc \
- $(top_builddir)/src/liblttng-ust/liblttng-ust.la \
+ $(top_builddir)/src/lib/lttng-ust/liblttng-ust.la \
$(top_builddir)/src/lib/lttng-ust-java-agent/jni/common/liblttng-ust-context-jni.la
nodist_liblttng_ust_java_la_SOURCES = org_lttng_ust_LTTngUst.h
liblttng_ust_java_la_LIBADD = -lc \
- $(top_builddir)/src/liblttng-ust/liblttng-ust.la
+ $(top_builddir)/src/lib/lttng-ust/liblttng-ust.la
$(jarfile): classnoinst.stamp
$(JAR) cf $(JARFLAGS) $@ $(pkgpath)/*.class
ust_libc.h
liblttng_ust_libc_wrapper_la_LIBADD = \
- $(top_builddir)/src/liblttng-ust/liblttng-ust.la \
+ $(top_builddir)/src/lib/lttng-ust/liblttng-ust.la \
$(DL_LIBS)
liblttng_ust_libc_wrapper_la_LDFLAGS = -version-info $(LTTNG_UST_LIBRARY_VERSION)
ust_pthread.h
liblttng_ust_pthread_wrapper_la_LIBADD = \
- $(top_builddir)/src/liblttng-ust/liblttng-ust.la \
+ $(top_builddir)/src/lib/lttng-ust/liblttng-ust.la \
$(DL_LIBS)
liblttng_ust_pthread_wrapper_la_LDFLAGS = -version-info $(LTTNG_UST_LIBRARY_VERSION)
liblttng_ust_python_agent_la_SOURCES = lttng_ust_python.c lttng_ust_python.h
liblttng_ust_python_agent_la_LIBADD = \
-lc \
- $(top_builddir)/src/liblttng-ust/liblttng-ust.la
+ $(top_builddir)/src/lib/lttng-ust/liblttng-ust.la
# Follow the main library soname for co-installability
liblttng_ust_python_agent_la_LDFLAGS = -version-info $(LTTNG_UST_LIBRARY_VERSION)
--- /dev/null
+# SPDX-License-Identifier: LGPL-2.1-only
+
+AM_CFLAGS += -I$(srcdir) -fno-strict-aliasing
+
+noinst_LTLIBRARIES = liblttng-ust-runtime.la liblttng-ust-support.la
+
+lib_LTLIBRARIES = liblttng-ust-common.la liblttng-ust-tracepoint.la liblttng-ust.la
+
+# ust-common
+liblttng_ust_common_la_SOURCES = \
+ fd-tracker.c \
+ ust-common.c \
+ lttng-ust-urcu.c \
+ lttng-ust-urcu-pointer.c
+
+liblttng_ust_common_la_LIBADD = \
+ $(top_builddir)/src/common/libcommon.la
+
+liblttng_ust_common_la_LDFLAGS = -no-undefined -version-info $(LTTNG_UST_LIBRARY_VERSION)
+
+liblttng_ust_tracepoint_la_SOURCES = \
+ tracepoint.c \
+ tracepoint-weak-test.c \
+ tracepoint-internal.h \
+ lttng-tracer-core.h \
+ jhash.h \
+ error.h
+
+liblttng_ust_tracepoint_la_LIBADD = \
+ liblttng-ust-common.la \
+ $(top_builddir)/src/common/libcommon.la \
+ $(DL_LIBS)
+
+liblttng_ust_tracepoint_la_LDFLAGS = -no-undefined -version-info $(LTTNG_UST_LIBRARY_VERSION)
+liblttng_ust_tracepoint_la_CFLAGS = -DUST_COMPONENT="liblttng_ust_tracepoint" $(AM_CFLAGS)
+
+liblttng_ust_runtime_la_SOURCES = \
+ bytecode.h \
+ lttng-ust-comm.c \
+ lttng-ust-abi.c \
+ lttng-probes.c \
+ lttng-bytecode.c \
+ lttng-bytecode.h \
+ lttng-bytecode-validator.c \
+ lttng-bytecode-specialize.c \
+ lttng-bytecode-interpreter.c \
+ lttng-context-provider.c \
+ lttng-context-vtid.c \
+ lttng-context-vpid.c \
+ lttng-context-pthread-id.c \
+ lttng-context-procname.c \
+ lttng-context-ip.c \
+ lttng-context-cpu-id.c \
+ lttng-context-cgroup-ns.c \
+ lttng-context-ipc-ns.c \
+ lttng-context-mnt-ns.c \
+ lttng-context-net-ns.c \
+ lttng-context-pid-ns.c \
+ lttng-context-time-ns.c \
+ lttng-context-user-ns.c \
+ lttng-context-uts-ns.c \
+ lttng-context-vuid.c \
+ lttng-context-veuid.c \
+ lttng-context-vsuid.c \
+ lttng-context-vgid.c \
+ lttng-context-vegid.c \
+ lttng-context-vsgid.c \
+ lttng-context.c \
+ lttng-events.c \
+ lttng-hash-helper.h \
+ lttng-ust-elf.c \
+ lttng-ust-elf.h \
+ lttng-ust-statedump.c \
+ lttng-ust-statedump.h \
+ lttng-ust-statedump-provider.h \
+ ust_lib.c \
+ ust_lib.h \
+ context-internal.h \
+ context-provider-internal.h \
+ tracepoint-internal.h \
+ ust-events-internal.h \
+ clock.h \
+ wait.h \
+ jhash.h \
+ lttng-ust-uuid.h \
+ error.h \
+ tracef.c \
+ lttng-ust-tracef-provider.h \
+ tracelog.c \
+ lttng-ust-tracelog-provider.h \
+ getenv.h \
+ string-utils.c \
+ string-utils.h \
+ event-notifier-notification.c \
+ ns.h \
+ creds.h \
+ rculfhash.c \
+ rculfhash.h \
+ rculfhash-internal.h \
+ rculfhash-mm-chunk.c \
+ rculfhash-mm-mmap.c \
+ rculfhash-mm-order.c \
+ compat_futex.c \
+ futex.h
+
+if HAVE_PERF_EVENT
+liblttng_ust_runtime_la_SOURCES += \
+ lttng-context-perf-counters.c \
+ perf_event.h
+endif
+
+liblttng_ust_support_la_SOURCES = \
+ lttng-tracer.h \
+ lttng-tracer-core.h \
+ ust-core.c \
+ getenv.h \
+ getenv.c \
+ lttng-ust-dynamic-type.c \
+ lttng-rb-clients.h \
+ lttng-ring-buffer-client-template.h \
+ lttng-ring-buffer-client-discard.c \
+ lttng-ring-buffer-client-discard-rt.c \
+ lttng-ring-buffer-client-overwrite.c \
+ lttng-ring-buffer-client-overwrite-rt.c \
+ lttng-ring-buffer-metadata-client-template.h \
+ lttng-ring-buffer-metadata-client.c \
+ lttng-counter-client.h \
+ lttng-counter-client-percpu-32-modular.c \
+ lttng-counter-client-percpu-64-modular.c \
+ lttng-clock.c lttng-getcpu.c
+
+liblttng_ust_la_SOURCES =
+
+liblttng_ust_la_LDFLAGS = -no-undefined -version-info $(LTTNG_UST_LIBRARY_VERSION)
+
+liblttng_ust_support_la_LIBADD = \
+ $(top_builddir)/src/common/libringbuffer.la \
+ $(top_builddir)/src/common/libcounter.la
+
+liblttng_ust_la_LIBADD = \
+ -lrt \
+ liblttng-ust-common.la \
+ $(top_builddir)/src/common/libustcomm.la \
+ $(top_builddir)/src/common/libcommon.la \
+ liblttng-ust-tracepoint.la \
+ liblttng-ust-runtime.la liblttng-ust-support.la \
+ $(DL_LIBS)
+
+liblttng_ust_la_CFLAGS = -DUST_COMPONENT="liblttng_ust" $(AM_CFLAGS)
--- /dev/null
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright 2012-2016 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ */
+
+#ifndef _BYTECODE_H
+#define _BYTECODE_H
+
+#include <stdint.h>
+#include <lttng/ust-abi.h>
+
+/*
+ * offsets are absolute from start of bytecode.
+ */
+
+struct field_ref {
+ /* Initially, symbol offset. After link, field offset. */
+ uint16_t offset;
+} __attribute__((packed));
+
+struct get_symbol {
+ /* Symbol offset. */
+ uint16_t offset;
+} __attribute__((packed));
+
+struct get_index_u16 {
+ uint16_t index;
+} __attribute__((packed));
+
+struct get_index_u64 {
+ uint64_t index;
+} __attribute__((packed));
+
+struct literal_numeric {
+ int64_t v;
+} __attribute__((packed));
+
+struct literal_double {
+ double v;
+} __attribute__((packed));
+
+struct literal_string {
+ char string[0];
+} __attribute__((packed));
+
+enum bytecode_op {
+ BYTECODE_OP_UNKNOWN = 0,
+
+ BYTECODE_OP_RETURN = 1,
+
+ /* binary */
+ BYTECODE_OP_MUL = 2,
+ BYTECODE_OP_DIV = 3,
+ BYTECODE_OP_MOD = 4,
+ BYTECODE_OP_PLUS = 5,
+ BYTECODE_OP_MINUS = 6,
+ BYTECODE_OP_BIT_RSHIFT = 7,
+ BYTECODE_OP_BIT_LSHIFT = 8,
+ BYTECODE_OP_BIT_AND = 9,
+ BYTECODE_OP_BIT_OR = 10,
+ BYTECODE_OP_BIT_XOR = 11,
+
+ /* binary comparators */
+ BYTECODE_OP_EQ = 12,
+ BYTECODE_OP_NE = 13,
+ BYTECODE_OP_GT = 14,
+ BYTECODE_OP_LT = 15,
+ BYTECODE_OP_GE = 16,
+ BYTECODE_OP_LE = 17,
+
+ /* string binary comparator: apply to */
+ BYTECODE_OP_EQ_STRING = 18,
+ BYTECODE_OP_NE_STRING = 19,
+ BYTECODE_OP_GT_STRING = 20,
+ BYTECODE_OP_LT_STRING = 21,
+ BYTECODE_OP_GE_STRING = 22,
+ BYTECODE_OP_LE_STRING = 23,
+
+ /* s64 binary comparator */
+ BYTECODE_OP_EQ_S64 = 24,
+ BYTECODE_OP_NE_S64 = 25,
+ BYTECODE_OP_GT_S64 = 26,
+ BYTECODE_OP_LT_S64 = 27,
+ BYTECODE_OP_GE_S64 = 28,
+ BYTECODE_OP_LE_S64 = 29,
+
+ /* double binary comparator */
+ BYTECODE_OP_EQ_DOUBLE = 30,
+ BYTECODE_OP_NE_DOUBLE = 31,
+ BYTECODE_OP_GT_DOUBLE = 32,
+ BYTECODE_OP_LT_DOUBLE = 33,
+ BYTECODE_OP_GE_DOUBLE = 34,
+ BYTECODE_OP_LE_DOUBLE = 35,
+
+ /* Mixed S64-double binary comparators */
+ BYTECODE_OP_EQ_DOUBLE_S64 = 36,
+ BYTECODE_OP_NE_DOUBLE_S64 = 37,
+ BYTECODE_OP_GT_DOUBLE_S64 = 38,
+ BYTECODE_OP_LT_DOUBLE_S64 = 39,
+ BYTECODE_OP_GE_DOUBLE_S64 = 40,
+ BYTECODE_OP_LE_DOUBLE_S64 = 41,
+
+ BYTECODE_OP_EQ_S64_DOUBLE = 42,
+ BYTECODE_OP_NE_S64_DOUBLE = 43,
+ BYTECODE_OP_GT_S64_DOUBLE = 44,
+ BYTECODE_OP_LT_S64_DOUBLE = 45,
+ BYTECODE_OP_GE_S64_DOUBLE = 46,
+ BYTECODE_OP_LE_S64_DOUBLE = 47,
+
+ /* unary */
+ BYTECODE_OP_UNARY_PLUS = 48,
+ BYTECODE_OP_UNARY_MINUS = 49,
+ BYTECODE_OP_UNARY_NOT = 50,
+ BYTECODE_OP_UNARY_PLUS_S64 = 51,
+ BYTECODE_OP_UNARY_MINUS_S64 = 52,
+ BYTECODE_OP_UNARY_NOT_S64 = 53,
+ BYTECODE_OP_UNARY_PLUS_DOUBLE = 54,
+ BYTECODE_OP_UNARY_MINUS_DOUBLE = 55,
+ BYTECODE_OP_UNARY_NOT_DOUBLE = 56,
+
+ /* logical */
+ BYTECODE_OP_AND = 57,
+ BYTECODE_OP_OR = 58,
+
+ /* load field ref */
+ BYTECODE_OP_LOAD_FIELD_REF = 59,
+ BYTECODE_OP_LOAD_FIELD_REF_STRING = 60,
+ BYTECODE_OP_LOAD_FIELD_REF_SEQUENCE = 61,
+ BYTECODE_OP_LOAD_FIELD_REF_S64 = 62,
+ BYTECODE_OP_LOAD_FIELD_REF_DOUBLE = 63,
+
+ /* load immediate from operand */
+ BYTECODE_OP_LOAD_STRING = 64,
+ BYTECODE_OP_LOAD_S64 = 65,
+ BYTECODE_OP_LOAD_DOUBLE = 66,
+
+ /* cast */
+ BYTECODE_OP_CAST_TO_S64 = 67,
+ BYTECODE_OP_CAST_DOUBLE_TO_S64 = 68,
+ BYTECODE_OP_CAST_NOP = 69,
+
+ /* get context ref */
+ BYTECODE_OP_GET_CONTEXT_REF = 70,
+ BYTECODE_OP_GET_CONTEXT_REF_STRING = 71,
+ BYTECODE_OP_GET_CONTEXT_REF_S64 = 72,
+ BYTECODE_OP_GET_CONTEXT_REF_DOUBLE = 73,
+
+ /* load userspace field ref */
+ BYTECODE_OP_LOAD_FIELD_REF_USER_STRING = 74,
+ BYTECODE_OP_LOAD_FIELD_REF_USER_SEQUENCE = 75,
+
+ /*
+ * load immediate star globbing pattern (literal string)
+ * from immediate
+ */
+ BYTECODE_OP_LOAD_STAR_GLOB_STRING = 76,
+
+ /* globbing pattern binary operator: apply to */
+ BYTECODE_OP_EQ_STAR_GLOB_STRING = 77,
+ BYTECODE_OP_NE_STAR_GLOB_STRING = 78,
+
+ /*
+ * Instructions for recursive traversal through composed types.
+ */
+ BYTECODE_OP_GET_CONTEXT_ROOT = 79,
+ BYTECODE_OP_GET_APP_CONTEXT_ROOT = 80,
+ BYTECODE_OP_GET_PAYLOAD_ROOT = 81,
+
+ BYTECODE_OP_GET_SYMBOL = 82,
+ BYTECODE_OP_GET_SYMBOL_FIELD = 83,
+ BYTECODE_OP_GET_INDEX_U16 = 84,
+ BYTECODE_OP_GET_INDEX_U64 = 85,
+
+ BYTECODE_OP_LOAD_FIELD = 86,
+ BYTECODE_OP_LOAD_FIELD_S8 = 87,
+ BYTECODE_OP_LOAD_FIELD_S16 = 88,
+ BYTECODE_OP_LOAD_FIELD_S32 = 89,
+ BYTECODE_OP_LOAD_FIELD_S64 = 90,
+ BYTECODE_OP_LOAD_FIELD_U8 = 91,
+ BYTECODE_OP_LOAD_FIELD_U16 = 92,
+ BYTECODE_OP_LOAD_FIELD_U32 = 93,
+ BYTECODE_OP_LOAD_FIELD_U64 = 94,
+ BYTECODE_OP_LOAD_FIELD_STRING = 95,
+ BYTECODE_OP_LOAD_FIELD_SEQUENCE = 96,
+ BYTECODE_OP_LOAD_FIELD_DOUBLE = 97,
+
+ BYTECODE_OP_UNARY_BIT_NOT = 98,
+
+ BYTECODE_OP_RETURN_S64 = 99,
+
+ NR_BYTECODE_OPS,
+};
+
+typedef uint8_t bytecode_opcode_t;
+
+struct load_op {
+ bytecode_opcode_t op;
+ /*
+ * data to load. Size known by enum bytecode_opcode and null-term char.
+ */
+ char data[0];
+} __attribute__((packed));
+
+struct binary_op {
+ bytecode_opcode_t op;
+} __attribute__((packed));
+
+struct unary_op {
+ bytecode_opcode_t op;
+} __attribute__((packed));
+
+/* skip_offset is absolute from start of bytecode */
+struct logical_op {
+ bytecode_opcode_t op;
+ uint16_t skip_offset; /* bytecode insn, if skip second test */
+} __attribute__((packed));
+
+struct cast_op {
+ bytecode_opcode_t op;
+} __attribute__((packed));
+
+struct return_op {
+ bytecode_opcode_t op;
+} __attribute__((packed));
+
+#endif /* _BYTECODE_H */
--- /dev/null
+/*
+ * SPDX-License-Identifier: LGPL-2.1-only
+ *
+ * Copyright (C) 2010 Pierre-Marc Fournier
+ * Copyright (C) 2011 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ */
+
+#ifndef _UST_CLOCK_H
+#define _UST_CLOCK_H
+
+#include <time.h>
+#include <sys/time.h>
+#include <stdint.h>
+#include <stddef.h>
+#include <stdio.h>
+#include <urcu/system.h>
+#include <urcu/arch.h>
+#include <lttng/ust-clock.h>
+
+#include "lttng-ust-uuid.h"
+
+struct lttng_ust_trace_clock {
+ uint64_t (*read64)(void);
+ uint64_t (*freq)(void);
+ int (*uuid)(char *uuid);
+ const char *(*name)(void);
+ const char *(*description)(void);
+};
+
+extern struct lttng_ust_trace_clock *lttng_ust_trace_clock
+ __attribute__((visibility("hidden")));
+
+void lttng_ust_clock_init(void);
+
+/* Use the kernel MONOTONIC clock. */
+
+static __inline__
+uint64_t trace_clock_read64_monotonic(void)
+{
+ struct timespec ts;
+
+ if (caa_unlikely(clock_gettime(CLOCK_MONOTONIC, &ts))) {
+ ts.tv_sec = 0;
+ ts.tv_nsec = 0;
+ }
+ return ((uint64_t) ts.tv_sec * 1000000000ULL) + ts.tv_nsec;
+}
+
+static __inline__
+uint64_t trace_clock_read64(void)
+{
+ struct lttng_ust_trace_clock *ltc = CMM_LOAD_SHARED(lttng_ust_trace_clock);
+
+ if (caa_likely(!ltc)) {
+ return trace_clock_read64_monotonic();
+ } else {
+ cmm_read_barrier_depends(); /* load ltc before content */
+ return ltc->read64();
+ }
+}
+
+#endif /* _UST_CLOCK_H */
--- /dev/null
+/*
+ * SPDX-License-Identifier: LGPL-2.1-or-later
+ *
+ * Copyright (c) 2009 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ *
+ * Userspace RCU library - sys_futex compatibility code
+ */
+
+#include <stdio.h>
+#include <pthread.h>
+#include <signal.h>
+#include <assert.h>
+#include <errno.h>
+#include <poll.h>
+#include <stdint.h>
+
+#include <urcu/arch.h>
+#include <urcu/system.h>
+#include "futex.h"
+
+/*
+ * Using attribute "weak" for __lttng_ust_compat_futex_lock and
+ * __lttng_ust_compat_futex_cond. Those are globally visible by the entire
+ * program, even though many shared objects may have their own version.
+ * The first version that gets loaded will be used by the entire program
+ * (executable and all shared objects).
+ */
+
+__attribute__((weak))
+pthread_mutex_t __lttng_ust_compat_futex_lock = PTHREAD_MUTEX_INITIALIZER;
+__attribute__((weak))
+pthread_cond_t __lttng_ust_compat_futex_cond = PTHREAD_COND_INITIALIZER;
+
+/*
+ * _NOT SIGNAL-SAFE_. pthread_cond is not signal-safe anyway. Though.
+ * For now, timeout, uaddr2 and val3 are unused.
+ * Waiter will relinquish the CPU until woken up.
+ */
+
+int lttng_ust_compat_futex_noasync(int32_t *uaddr, int op, int32_t val,
+ const struct timespec *timeout, int32_t *uaddr2, int32_t val3)
+{
+ int ret = 0, lockret;
+
+ /*
+ * Check if NULL. Don't let users expect that they are taken into
+ * account.
+ */
+ assert(!timeout);
+ assert(!uaddr2);
+ assert(!val3);
+
+ /*
+ * memory barriers to serialize with the previous uaddr modification.
+ */
+ cmm_smp_mb();
+
+ lockret = pthread_mutex_lock(&__lttng_ust_compat_futex_lock);
+ if (lockret) {
+ errno = lockret;
+ ret = -1;
+ goto end;
+ }
+ switch (op) {
+ case FUTEX_WAIT:
+ /*
+ * Wait until *uaddr is changed to something else than "val".
+ * Comparing *uaddr content against val figures out which
+ * thread has been awakened.
+ */
+ while (CMM_LOAD_SHARED(*uaddr) == val)
+ pthread_cond_wait(&__lttng_ust_compat_futex_cond,
+ &__lttng_ust_compat_futex_lock);
+ break;
+ case FUTEX_WAKE:
+ /*
+ * Each wake is sending a broadcast, thus attempting wakeup of
+ * all awaiting threads, independently of their respective
+ * uaddr.
+ */
+ pthread_cond_broadcast(&__lttng_ust_compat_futex_cond);
+ break;
+ default:
+ errno = EINVAL;
+ ret = -1;
+ }
+ lockret = pthread_mutex_unlock(&__lttng_ust_compat_futex_lock);
+ if (lockret) {
+ errno = lockret;
+ ret = -1;
+ }
+end:
+ return ret;
+}
+
+/*
+ * _ASYNC SIGNAL-SAFE_.
+ * For now, timeout, uaddr2 and val3 are unused.
+ * Waiter will busy-loop trying to read the condition.
+ * It is OK to use compat_futex_async() on a futex address on which
+ * futex() WAKE operations are also performed.
+ */
+
+int lttng_ust_compat_futex_async(int32_t *uaddr, int op, int32_t val,
+ const struct timespec *timeout, int32_t *uaddr2, int32_t val3)
+{
+ int ret = 0;
+
+ /*
+ * Check if NULL. Don't let users expect that they are taken into
+ * account.
+ */
+ assert(!timeout);
+ assert(!uaddr2);
+ assert(!val3);
+
+ /*
+ * Ensure previous memory operations on uaddr have completed.
+ */
+ cmm_smp_mb();
+
+ switch (op) {
+ case FUTEX_WAIT:
+ while (CMM_LOAD_SHARED(*uaddr) == val) {
+ if (poll(NULL, 0, 10) < 0) {
+ ret = -1;
+ /* Keep poll errno. Caller handles EINTR. */
+ goto end;
+ }
+ }
+ break;
+ case FUTEX_WAKE:
+ break;
+ default:
+ errno = EINVAL;
+ ret = -1;
+ }
+end:
+ return ret;
+}
--- /dev/null
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright 2020 (C) Francis Deslauriers <francis.deslauriers@efficios.com>
+ */
+
+#ifndef _LTTNG_UST_CONTEXT_INTERNAL_H
+#define _LTTNG_UST_CONTEXT_INTERNAL_H
+
+#include <lttng/ust-events.h>
+#include "ust-events-internal.h"
+#include "common/ust-context-provider.h"
+
+int lttng_context_init_all(struct lttng_ust_ctx **ctx)
+ __attribute__((visibility("hidden")));
+
+int lttng_attach_context(struct lttng_ust_abi_context *context_param,
+ union lttng_ust_abi_args *uargs,
+ struct lttng_ust_ctx **ctx, struct lttng_ust_session *session)
+ __attribute__((visibility("hidden")));
+
+int lttng_find_context(struct lttng_ust_ctx *ctx, const char *name)
+ __attribute__((visibility("hidden")));
+
+int lttng_get_context_index(struct lttng_ust_ctx *ctx, const char *name)
+ __attribute__((visibility("hidden")));
+
+void lttng_destroy_context(struct lttng_ust_ctx *ctx)
+ __attribute__((visibility("hidden")));
+
+int lttng_ust_context_append_rcu(struct lttng_ust_ctx **ctx_p,
+ const struct lttng_ust_ctx_field *f)
+ __attribute__((visibility("hidden")));
+
+int lttng_ust_context_append(struct lttng_ust_ctx **ctx_p,
+ const struct lttng_ust_ctx_field *f)
+ __attribute__((visibility("hidden")));
+
+int lttng_context_is_app(const char *name)
+ __attribute__((visibility("hidden")));
+
+void lttng_context_vtid_reset(void)
+ __attribute__((visibility("hidden")));
+
+void lttng_context_vpid_reset(void)
+ __attribute__((visibility("hidden")));
+
+void lttng_context_cgroup_ns_reset(void)
+ __attribute__((visibility("hidden")));
+
+void lttng_context_ipc_ns_reset(void)
+ __attribute__((visibility("hidden")));
+
+void lttng_context_mnt_ns_reset(void)
+ __attribute__((visibility("hidden")));
+
+void lttng_context_net_ns_reset(void)
+ __attribute__((visibility("hidden")));
+
+void lttng_context_pid_ns_reset(void)
+ __attribute__((visibility("hidden")));
+
+void lttng_context_user_ns_reset(void)
+ __attribute__((visibility("hidden")));
+
+void lttng_context_uts_ns_reset(void)
+ __attribute__((visibility("hidden")));
+
+void lttng_context_time_ns_reset(void)
+ __attribute__((visibility("hidden")));
+
+void lttng_context_vuid_reset(void)
+ __attribute__((visibility("hidden")));
+
+void lttng_context_veuid_reset(void)
+ __attribute__((visibility("hidden")));
+
+void lttng_context_vsuid_reset(void)
+ __attribute__((visibility("hidden")));
+
+void lttng_context_vgid_reset(void)
+ __attribute__((visibility("hidden")));
+
+void lttng_context_vegid_reset(void)
+ __attribute__((visibility("hidden")));
+
+void lttng_context_vsgid_reset(void)
+ __attribute__((visibility("hidden")));
+
+int lttng_add_vtid_to_ctx(struct lttng_ust_ctx **ctx)
+ __attribute__((visibility("hidden")));
+
+int lttng_add_vpid_to_ctx(struct lttng_ust_ctx **ctx)
+ __attribute__((visibility("hidden")));
+
+int lttng_add_pthread_id_to_ctx(struct lttng_ust_ctx **ctx)
+ __attribute__((visibility("hidden")));
+
+int lttng_add_procname_to_ctx(struct lttng_ust_ctx **ctx)
+ __attribute__((visibility("hidden")));
+
+int lttng_add_ip_to_ctx(struct lttng_ust_ctx **ctx)
+ __attribute__((visibility("hidden")));
+
+int lttng_add_cpu_id_to_ctx(struct lttng_ust_ctx **ctx)
+ __attribute__((visibility("hidden")));
+
+int lttng_add_dyntest_to_ctx(struct lttng_ust_ctx **ctx)
+ __attribute__((visibility("hidden")));
+
+int lttng_add_cgroup_ns_to_ctx(struct lttng_ust_ctx **ctx)
+ __attribute__((visibility("hidden")));
+
+int lttng_add_ipc_ns_to_ctx(struct lttng_ust_ctx **ctx)
+ __attribute__((visibility("hidden")));
+
+int lttng_add_mnt_ns_to_ctx(struct lttng_ust_ctx **ctx)
+ __attribute__((visibility("hidden")));
+
+int lttng_add_net_ns_to_ctx(struct lttng_ust_ctx **ctx)
+ __attribute__((visibility("hidden")));
+
+int lttng_add_pid_ns_to_ctx(struct lttng_ust_ctx **ctx)
+ __attribute__((visibility("hidden")));
+
+int lttng_add_user_ns_to_ctx(struct lttng_ust_ctx **ctx)
+ __attribute__((visibility("hidden")));
+
+int lttng_add_uts_ns_to_ctx(struct lttng_ust_ctx **ctx)
+ __attribute__((visibility("hidden")));
+
+int lttng_add_time_ns_to_ctx(struct lttng_ust_ctx **ctx)
+ __attribute__((visibility("hidden")));
+
+int lttng_add_vuid_to_ctx(struct lttng_ust_ctx **ctx)
+ __attribute__((visibility("hidden")));
+
+int lttng_add_veuid_to_ctx(struct lttng_ust_ctx **ctx)
+ __attribute__((visibility("hidden")));
+
+int lttng_add_vsuid_to_ctx(struct lttng_ust_ctx **ctx)
+ __attribute__((visibility("hidden")));
+
+int lttng_add_vgid_to_ctx(struct lttng_ust_ctx **ctx)
+ __attribute__((visibility("hidden")));
+
+int lttng_add_vegid_to_ctx(struct lttng_ust_ctx **ctx)
+ __attribute__((visibility("hidden")));
+
+int lttng_add_vsgid_to_ctx(struct lttng_ust_ctx **ctx)
+ __attribute__((visibility("hidden")));
+
+#endif /* _LTTNG_UST_CONTEXT_INTERNAL_H */
--- /dev/null
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright 2019 Francis Deslauriers <francis.deslauriers@efficios.com>
+ */
+
+#ifndef _LTTNG_UST_CONTEXT_PROVIDER_INTERNAL_H
+#define _LTTNG_UST_CONTEXT_PROVIDER_INTERNAL_H
+
+#include <stddef.h>
+#include <lttng/ust-events.h>
+
+void lttng_ust_context_set_event_notifier_group_provider(const char *name,
+ size_t (*get_size)(void *priv, size_t offset),
+ void (*record)(void *priv,
+ struct lttng_ust_lib_ring_buffer_ctx *ctx,
+ struct lttng_ust_channel_buffer *chan),
+ void (*get_value)(void *priv,
+ struct lttng_ust_ctx_value *value),
+ void *priv)
+ __attribute__((visibility("hidden")));
+
+#endif /* _LTTNG_UST_CONTEXT_PROVIDER_INTERNAL_H */
--- /dev/null
+/*
+ * SPDX-License-Identifier: LGPL-2.1-only
+ *
+ * Copyright (C) 2019 Michael Jeanson <mjeanson@efficios.com>
+ */
+
+#ifndef _LTTNG_CREDS_H
+#define _LTTNG_CREDS_H
+
+/*
+ * This is used in the kernel as an invalid value.
+ */
+
+#define INVALID_UID (uid_t) -1
+#define INVALID_GID (gid_t) -1
+
+#endif /* _LTTNG_CREDS_H */
--- /dev/null
+/*
+ * SPDX-License-Identifier: LGPL-2.1-only
+ *
+ * Copyright (C) 2011 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ */
+
+#ifndef _LTTNG_ERROR_H
+#define _LTTNG_ERROR_H
+
+#include <urcu/compiler.h>
+#include <unistd.h>
+
+#define MAX_ERRNO 4095
+
+static inline
+int IS_ERR_VALUE(long value)
+{
+ if (caa_unlikely((unsigned long) value >= (unsigned long) -MAX_ERRNO))
+ return 1;
+ else
+ return 0;
+}
+
+static inline
+void *ERR_PTR(long error)
+{
+ return (void *) error;
+}
+
+static inline
+long PTR_ERR(const void *ptr)
+{
+ return (long) ptr;
+}
+
+static inline
+int IS_ERR(const void *ptr)
+{
+ return IS_ERR_VALUE((long) ptr);
+}
+
+#endif /* _LTTNG_ERROR_H */
--- /dev/null
+/*
+ * SPDX-License-Identifier: LGPL-2.1-only
+ *
+ * Copyright (C) 2020 Francis Deslauriers <francis.deslauriers@efficios.com>
+ */
+
+#define _LGPL_SOURCE
+
+#include <assert.h>
+#include <errno.h>
+#include <limits.h>
+
+#include <lttng/ust-endian.h>
+#include "common/logging.h"
+#include <urcu/rculist.h>
+
+#include "lttng-tracer-core.h"
+#include "ust-events-internal.h"
+#include "common/msgpack/msgpack.h"
+#include "lttng-bytecode.h"
+#include "common/patient.h"
+
+/*
+ * We want this write to be atomic AND non-blocking, meaning that we
+ * want to write either everything OR nothing.
+ * According to `pipe(7)`, writes that are less than `PIPE_BUF` bytes must be
+ * atomic, so we bound the capture buffer size to the `PIPE_BUF` minus the size
+ * of the notification struct we are sending alongside the capture buffer.
+ */
+#define CAPTURE_BUFFER_SIZE \
+ (PIPE_BUF - sizeof(struct lttng_ust_abi_event_notifier_notification) - 1)
+
+struct lttng_event_notifier_notification {
+ int notification_fd;
+ uint64_t event_notifier_token;
+ uint8_t capture_buf[CAPTURE_BUFFER_SIZE];
+ struct lttng_msgpack_writer writer;
+ bool has_captures;
+};
+
+static
+void capture_enum(struct lttng_msgpack_writer *writer,
+ struct lttng_interpreter_output *output)
+{
+ lttng_msgpack_begin_map(writer, 2);
+ lttng_msgpack_write_str(writer, "type");
+ lttng_msgpack_write_str(writer, "enum");
+
+ lttng_msgpack_write_str(writer, "value");
+
+ switch (output->type) {
+ case LTTNG_INTERPRETER_TYPE_SIGNED_ENUM:
+ lttng_msgpack_write_signed_integer(writer, output->u.s);
+ break;
+ case LTTNG_INTERPRETER_TYPE_UNSIGNED_ENUM:
+ lttng_msgpack_write_signed_integer(writer, output->u.u);
+ break;
+ default:
+ abort();
+ }
+
+ lttng_msgpack_end_map(writer);
+}
+
+static
+int64_t capture_sequence_element_signed(uint8_t *ptr,
+ const struct lttng_ust_type_integer *integer_type)
+{
+ int64_t value;
+ unsigned int size = integer_type->size;
+ bool byte_order_reversed = integer_type->reverse_byte_order;
+
+ switch (size) {
+ case 8:
+ value = *ptr;
+ break;
+ case 16:
+ {
+ int16_t tmp;
+ tmp = *(int16_t *) ptr;
+ if (byte_order_reversed)
+ tmp = bswap_16(tmp);
+
+ value = tmp;
+ break;
+ }
+ case 32:
+ {
+ int32_t tmp;
+ tmp = *(int32_t *) ptr;
+ if (byte_order_reversed)
+ tmp = bswap_32(tmp);
+
+ value = tmp;
+ break;
+ }
+ case 64:
+ {
+ int64_t tmp;
+ tmp = *(int64_t *) ptr;
+ if (byte_order_reversed)
+ tmp = bswap_64(tmp);
+
+ value = tmp;
+ break;
+ }
+ default:
+ abort();
+ }
+
+ return value;
+}
+
+static
+uint64_t capture_sequence_element_unsigned(uint8_t *ptr,
+ const struct lttng_ust_type_integer *integer_type)
+{
+ uint64_t value;
+ unsigned int size = integer_type->size;
+ bool byte_order_reversed = integer_type->reverse_byte_order;
+
+ switch (size) {
+ case 8:
+ value = *ptr;
+ break;
+ case 16:
+ {
+ uint16_t tmp;
+ tmp = *(uint16_t *) ptr;
+ if (byte_order_reversed)
+ tmp = bswap_16(tmp);
+
+ value = tmp;
+ break;
+ }
+ case 32:
+ {
+ uint32_t tmp;
+ tmp = *(uint32_t *) ptr;
+ if (byte_order_reversed)
+ tmp = bswap_32(tmp);
+
+ value = tmp;
+ break;
+ }
+ case 64:
+ {
+ uint64_t tmp;
+ tmp = *(uint64_t *) ptr;
+ if (byte_order_reversed)
+ tmp = bswap_64(tmp);
+
+ value = tmp;
+ break;
+ }
+ default:
+ abort();
+ }
+
+ return value;
+}
+
+static
+void capture_sequence(struct lttng_msgpack_writer *writer,
+ struct lttng_interpreter_output *output)
+{
+ const struct lttng_ust_type_integer *integer_type;
+ const struct lttng_ust_type_common *nested_type;
+ uint8_t *ptr;
+ bool signedness;
+ int i;
+
+ lttng_msgpack_begin_array(writer, output->u.sequence.nr_elem);
+
+ ptr = (uint8_t *) output->u.sequence.ptr;
+ nested_type = output->u.sequence.nested_type;
+ switch (nested_type->type) {
+ case lttng_ust_type_integer:
+ integer_type = lttng_ust_get_type_integer(nested_type);
+ break;
+ case lttng_ust_type_enum:
+ /* Treat enumeration as an integer. */
+ integer_type = lttng_ust_get_type_integer(lttng_ust_get_type_enum(nested_type)->container_type);
+ break;
+ default:
+ /* Capture of array of non-integer are not supported. */
+ abort();
+ }
+ signedness = integer_type->signedness;
+ for (i = 0; i < output->u.sequence.nr_elem; i++) {
+ if (signedness) {
+ lttng_msgpack_write_signed_integer(writer,
+ capture_sequence_element_signed(ptr, integer_type));
+ } else {
+ lttng_msgpack_write_unsigned_integer(writer,
+ capture_sequence_element_unsigned(ptr, integer_type));
+ }
+
+ /*
+ * We assume that alignment is smaller or equal to the size.
+ * This currently holds true but if it changes in the future,
+ * we will want to change the pointer arithmetics below to
+ * take into account that the next element might be further
+ * away.
+ */
+ assert(integer_type->alignment <= integer_type->size);
+
+ /* Size is in number of bits. */
+ ptr += (integer_type->size / CHAR_BIT) ;
+ }
+
+ lttng_msgpack_end_array(writer);
+}
+
+static
+void notification_init(struct lttng_event_notifier_notification *notif,
+ struct lttng_ust_event_notifier *event_notifier)
+{
+ struct lttng_msgpack_writer *writer = ¬if->writer;
+
+ notif->event_notifier_token = event_notifier->priv->parent.user_token;
+ notif->notification_fd = event_notifier->priv->group->notification_fd;
+ notif->has_captures = false;
+
+ if (event_notifier->priv->num_captures > 0) {
+ lttng_msgpack_writer_init(writer, notif->capture_buf,
+ CAPTURE_BUFFER_SIZE);
+
+ lttng_msgpack_begin_array(writer, event_notifier->priv->num_captures);
+ notif->has_captures = true;
+ }
+}
+
+static
+void notification_append_capture(
+ struct lttng_event_notifier_notification *notif,
+ struct lttng_interpreter_output *output)
+{
+ struct lttng_msgpack_writer *writer = ¬if->writer;
+
+ switch (output->type) {
+ case LTTNG_INTERPRETER_TYPE_S64:
+ lttng_msgpack_write_signed_integer(writer, output->u.s);
+ break;
+ case LTTNG_INTERPRETER_TYPE_U64:
+ lttng_msgpack_write_unsigned_integer(writer, output->u.u);
+ break;
+ case LTTNG_INTERPRETER_TYPE_DOUBLE:
+ lttng_msgpack_write_double(writer, output->u.d);
+ break;
+ case LTTNG_INTERPRETER_TYPE_STRING:
+ lttng_msgpack_write_str(writer, output->u.str.str);
+ break;
+ case LTTNG_INTERPRETER_TYPE_SEQUENCE:
+ capture_sequence(writer, output);
+ break;
+ case LTTNG_INTERPRETER_TYPE_SIGNED_ENUM:
+ case LTTNG_INTERPRETER_TYPE_UNSIGNED_ENUM:
+ capture_enum(writer, output);
+ break;
+ default:
+ abort();
+ }
+}
+
+static
+void notification_append_empty_capture(
+ struct lttng_event_notifier_notification *notif)
+{
+ lttng_msgpack_write_nil(¬if->writer);
+}
+
+static void record_error(struct lttng_ust_event_notifier *event_notifier)
+{
+ struct lttng_event_notifier_group *event_notifier_group =
+ event_notifier->priv->group;
+ struct lttng_counter *error_counter;
+ size_t dimension_index[1];
+ int ret;
+
+ error_counter = CMM_LOAD_SHARED(event_notifier_group->error_counter);
+ /*
+ * load-acquire paired with store-release orders creation of the
+ * error counter and setting error_counter_len before the
+ * error_counter is used.
+ * Currently a full memory barrier is used, which could be
+ * turned into acquire-release barriers.
+ */
+ cmm_smp_mb();
+ /* This group may not have an error counter attached to it. */
+ if (!error_counter)
+ return;
+
+ dimension_index[0] = event_notifier->priv->error_counter_index;
+ ret = event_notifier_group->error_counter->ops->counter_add(
+ error_counter->counter, dimension_index, 1);
+ if (ret)
+ WARN_ON_ONCE(1);
+}
+
+static
+void notification_send(struct lttng_event_notifier_notification *notif,
+ struct lttng_ust_event_notifier *event_notifier)
+{
+ ssize_t ret;
+ size_t content_len;
+ int iovec_count = 1;
+ struct lttng_ust_abi_event_notifier_notification ust_notif = {0};
+ struct iovec iov[2];
+
+ assert(notif);
+
+ ust_notif.token = event_notifier->priv->parent.user_token;
+
+ /*
+ * Prepare sending the notification from multiple buffers using an
+ * array of `struct iovec`. The first buffer of the vector is
+ * notification structure itself and is always present.
+ */
+ iov[0].iov_base = &ust_notif;
+ iov[0].iov_len = sizeof(ust_notif);
+
+ if (notif->has_captures) {
+ /*
+ * If captures were requested, the second buffer of the array
+ * is the capture buffer.
+ */
+ assert(notif->writer.buffer);
+ content_len = notif->writer.write_pos - notif->writer.buffer;
+
+ assert(content_len > 0 && content_len <= CAPTURE_BUFFER_SIZE);
+
+ iov[1].iov_base = notif->capture_buf;
+ iov[1].iov_len = content_len;
+
+ iovec_count++;
+ } else {
+ content_len = 0;
+ }
+
+ /*
+ * Update the capture buffer size so that receiver of the buffer will
+ * know how much to expect.
+ */
+ ust_notif.capture_buf_size = content_len;
+
+ /* Send all the buffers. */
+ ret = ust_patient_writev(notif->notification_fd, iov, iovec_count);
+ if (ret == -1) {
+ if (errno == EAGAIN) {
+ record_error(event_notifier);
+ DBG("Cannot send event_notifier notification without blocking: %s",
+ strerror(errno));
+ } else {
+ DBG("Error to sending event notifier notification: %s",
+ strerror(errno));
+ abort();
+ }
+ }
+}
+
+void lttng_event_notifier_notification_send(
+ struct lttng_ust_event_notifier *event_notifier,
+ const char *stack_data,
+ struct lttng_ust_notification_ctx *notif_ctx)
+{
+ /*
+ * This function is called from the probe, we must do dynamic
+ * allocation in this context.
+ */
+ struct lttng_event_notifier_notification notif = {0};
+
+ notification_init(¬if, event_notifier);
+
+ if (caa_unlikely(notif_ctx->eval_capture)) {
+ struct lttng_ust_bytecode_runtime *capture_bc_runtime;
+
+ /*
+ * Iterate over all the capture bytecodes. If the interpreter
+ * functions returns successfully, append the value of the
+ * `output` parameter to the capture buffer. If the interpreter
+ * fails, append an empty capture to the buffer.
+ */
+ cds_list_for_each_entry_rcu(capture_bc_runtime,
+ &event_notifier->priv->capture_bytecode_runtime_head, node) {
+ struct lttng_interpreter_output output;
+
+ if (capture_bc_runtime->interpreter_func(capture_bc_runtime,
+ stack_data, &output) == LTTNG_UST_BYTECODE_INTERPRETER_OK)
+ notification_append_capture(¬if, &output);
+ else
+ notification_append_empty_capture(¬if);
+ }
+ }
+
+ /*
+ * Send the notification (including the capture buffer) to the
+ * sessiond.
+ */
+ notification_send(¬if, event_notifier);
+}
--- /dev/null
+/*
+ * SPDX-License-Identifier: LGPL-2.1-only
+ *
+ * Copyright (C) 2016 Aravind HT <aravind.ht@gmail.com>
+ * Copyright (C) 2016 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ */
+
+#include <limits.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <sys/types.h>
+#include <unistd.h>
+#include <assert.h>
+#include <errno.h>
+#include <fcntl.h>
+#include <sys/select.h>
+#include <sys/resource.h>
+#include <sys/time.h>
+#include <fcntl.h>
+#include <pthread.h>
+#include <signal.h>
+#include <stdbool.h>
+#include <urcu/compiler.h>
+#include <urcu/tls-compat.h>
+#include <urcu/system.h>
+
+#include "common/ust-fd.h"
+#include "common/macros.h"
+#include <lttng/ust-error.h>
+#include "common/logging.h"
+
+/* Operations on the fd set. */
+#define IS_FD_VALID(fd) ((fd) >= 0 && (fd) < lttng_ust_max_fd)
+#define GET_FD_SET_FOR_FD(fd, fd_sets) (&((fd_sets)[(fd) / FD_SETSIZE]))
+#define CALC_INDEX_TO_SET(fd) ((fd) % FD_SETSIZE)
+#define IS_FD_STD(fd) (IS_FD_VALID(fd) && (fd) <= STDERR_FILENO)
+
+/* Check fd validity before calling these. */
+#define ADD_FD_TO_SET(fd, fd_sets) \
+ FD_SET(CALC_INDEX_TO_SET(fd), GET_FD_SET_FOR_FD(fd, fd_sets))
+#define IS_FD_SET(fd, fd_sets) \
+ FD_ISSET(CALC_INDEX_TO_SET(fd), GET_FD_SET_FOR_FD(fd, fd_sets))
+#define DEL_FD_FROM_SET(fd, fd_sets) \
+ FD_CLR(CALC_INDEX_TO_SET(fd), GET_FD_SET_FOR_FD(fd, fd_sets))
+
+/*
+ * Protect the lttng_fd_set. Nests within the ust_lock, and therefore
+ * within the libc dl lock. Therefore, we need to fixup the TLS before
+ * nesting into this lock.
+ *
+ * The ust_safe_guard_fd_mutex nests within the ust_mutex. This mutex
+ * is also held across fork.
+ */
+static pthread_mutex_t ust_safe_guard_fd_mutex = PTHREAD_MUTEX_INITIALIZER;
+
+/*
+ * Cancel state when grabbing the ust_safe_guard_fd_mutex. Saved when
+ * locking, restored on unlock. Protected by ust_safe_guard_fd_mutex.
+ */
+static int ust_safe_guard_saved_cancelstate;
+
+/*
+ * Track whether we are within lttng-ust or application, for close
+ * system call override by LD_PRELOAD library. This also tracks whether
+ * we are invoking close() from a signal handler nested on an
+ * application thread.
+ */
+static DEFINE_URCU_TLS(int, ust_fd_mutex_nest);
+
+/* fd_set used to book keep fd being used by lttng-ust. */
+static fd_set *lttng_fd_set;
+static int lttng_ust_max_fd;
+static int num_fd_sets;
+static int init_done;
+
+/*
+ * Force a read (imply TLS fixup for dlopen) of TLS variables.
+ */
+void lttng_ust_fixup_fd_tracker_tls(void)
+{
+ asm volatile ("" : : "m" (URCU_TLS(ust_fd_mutex_nest)));
+}
+
+/*
+ * Allocate the fd set array based on the hard limit set for this
+ * process. This will be called during the constructor execution
+ * and will also be called in the child after fork via lttng_ust_init.
+ */
+void lttng_ust_init_fd_tracker(void)
+{
+ struct rlimit rlim;
+ int i;
+
+ if (CMM_LOAD_SHARED(init_done))
+ return;
+
+ memset(&rlim, 0, sizeof(rlim));
+ /* Get the current possible max number of fd for this process. */
+ if (getrlimit(RLIMIT_NOFILE, &rlim) < 0)
+ abort();
+ /*
+ * FD set array size determined using the hard limit. Even if
+ * the process wishes to increase its limit using setrlimit, it
+ * can only do so with the softlimit which will be less than the
+ * hard limit.
+ */
+ lttng_ust_max_fd = rlim.rlim_max;
+ num_fd_sets = lttng_ust_max_fd / FD_SETSIZE;
+ if (lttng_ust_max_fd % FD_SETSIZE)
+ ++num_fd_sets;
+ if (lttng_fd_set != NULL) {
+ free(lttng_fd_set);
+ lttng_fd_set = NULL;
+ }
+ lttng_fd_set = malloc(num_fd_sets * (sizeof(fd_set)));
+ if (!lttng_fd_set)
+ abort();
+ for (i = 0; i < num_fd_sets; i++)
+ FD_ZERO((<tng_fd_set[i]));
+ CMM_STORE_SHARED(init_done, 1);
+}
+
+void lttng_ust_lock_fd_tracker(void)
+{
+ sigset_t sig_all_blocked, orig_mask;
+ int ret, oldstate;
+
+ ret = pthread_setcancelstate(PTHREAD_CANCEL_DISABLE, &oldstate);
+ if (ret) {
+ ERR("pthread_setcancelstate: %s", strerror(ret));
+ }
+ sigfillset(&sig_all_blocked);
+ ret = pthread_sigmask(SIG_SETMASK, &sig_all_blocked, &orig_mask);
+ if (ret) {
+ ERR("pthread_sigmask: %s", strerror(ret));
+ }
+ if (!URCU_TLS(ust_fd_mutex_nest)++) {
+ /*
+ * Ensure the compiler don't move the store after the close()
+ * call in case close() would be marked as leaf.
+ */
+ cmm_barrier();
+ pthread_mutex_lock(&ust_safe_guard_fd_mutex);
+ ust_safe_guard_saved_cancelstate = oldstate;
+ }
+ ret = pthread_sigmask(SIG_SETMASK, &orig_mask, NULL);
+ if (ret) {
+ ERR("pthread_sigmask: %s", strerror(ret));
+ }
+}
+
+void lttng_ust_unlock_fd_tracker(void)
+{
+ sigset_t sig_all_blocked, orig_mask;
+ int ret, newstate, oldstate;
+ bool restore_cancel = false;
+
+ sigfillset(&sig_all_blocked);
+ ret = pthread_sigmask(SIG_SETMASK, &sig_all_blocked, &orig_mask);
+ if (ret) {
+ ERR("pthread_sigmask: %s", strerror(ret));
+ }
+ /*
+ * Ensure the compiler don't move the store before the close()
+ * call, in case close() would be marked as leaf.
+ */
+ cmm_barrier();
+ if (!--URCU_TLS(ust_fd_mutex_nest)) {
+ newstate = ust_safe_guard_saved_cancelstate;
+ restore_cancel = true;
+ pthread_mutex_unlock(&ust_safe_guard_fd_mutex);
+ }
+ ret = pthread_sigmask(SIG_SETMASK, &orig_mask, NULL);
+ if (ret) {
+ ERR("pthread_sigmask: %s", strerror(ret));
+ }
+ if (restore_cancel) {
+ ret = pthread_setcancelstate(newstate, &oldstate);
+ if (ret) {
+ ERR("pthread_setcancelstate: %s", strerror(ret));
+ }
+ }
+}
+
+static int dup_std_fd(int fd)
+{
+ int ret, i;
+ int fd_to_close[STDERR_FILENO + 1];
+ int fd_to_close_count = 0;
+ int dup_cmd = F_DUPFD; /* Default command */
+ int fd_valid = -1;
+
+ if (!(IS_FD_STD(fd))) {
+ /* Should not be here */
+ ret = -1;
+ goto error;
+ }
+
+ /* Check for FD_CLOEXEC flag */
+ ret = fcntl(fd, F_GETFD);
+ if (ret < 0) {
+ PERROR("fcntl on f_getfd");
+ ret = -1;
+ goto error;
+ }
+
+ if (ret & FD_CLOEXEC) {
+ dup_cmd = F_DUPFD_CLOEXEC;
+ }
+
+ /* Perform dup */
+ for (i = 0; i < STDERR_FILENO + 1; i++) {
+ ret = fcntl(fd, dup_cmd, 0);
+ if (ret < 0) {
+ PERROR("fcntl dup fd");
+ goto error;
+ }
+
+ if (!(IS_FD_STD(ret))) {
+ /* fd is outside of STD range, use it. */
+ fd_valid = ret;
+ /* Close fd received as argument. */
+ fd_to_close[i] = fd;
+ fd_to_close_count++;
+ break;
+ }
+
+ fd_to_close[i] = ret;
+ fd_to_close_count++;
+ }
+
+ /* Close intermediary fds */
+ for (i = 0; i < fd_to_close_count; i++) {
+ ret = close(fd_to_close[i]);
+ if (ret) {
+ PERROR("close on temporary fd: %d.", fd_to_close[i]);
+ /*
+ * Not using an abort here would yield a complicated
+ * error handling for the caller. If a failure occurs
+ * here, the system is already in a bad state.
+ */
+ abort();
+ }
+ }
+
+ ret = fd_valid;
+error:
+ return ret;
+}
+
+/*
+ * Needs to be called with ust_safe_guard_fd_mutex held when opening the fd.
+ * Has strict checking of fd validity.
+ *
+ * If fd <= 2, dup the fd until fd > 2. This enables us to bypass
+ * problems that can be encountered if UST uses stdin, stdout, stderr
+ * fds for internal use (daemon etc.). This can happen if the
+ * application closes either of those file descriptors. Intermediary fds
+ * are closed as needed.
+ *
+ * Return -1 on error.
+ *
+ */
+int lttng_ust_add_fd_to_tracker(int fd)
+{
+ int ret;
+ /*
+ * Ensure the tracker is initialized when called from
+ * constructors.
+ */
+ lttng_ust_init_fd_tracker();
+ assert(URCU_TLS(ust_fd_mutex_nest));
+
+ if (IS_FD_STD(fd)) {
+ ret = dup_std_fd(fd);
+ if (ret < 0) {
+ goto error;
+ }
+ fd = ret;
+ }
+
+ /* Trying to add an fd which we can not accommodate. */
+ assert(IS_FD_VALID(fd));
+ /* Setting an fd thats already set. */
+ assert(!IS_FD_SET(fd, lttng_fd_set));
+
+ ADD_FD_TO_SET(fd, lttng_fd_set);
+ return fd;
+error:
+ return ret;
+}
+
+/*
+ * Needs to be called with ust_safe_guard_fd_mutex held when opening the fd.
+ * Has strict checking for fd validity.
+ */
+void lttng_ust_delete_fd_from_tracker(int fd)
+{
+ /*
+ * Ensure the tracker is initialized when called from
+ * constructors.
+ */
+ lttng_ust_init_fd_tracker();
+
+ assert(URCU_TLS(ust_fd_mutex_nest));
+ /* Not a valid fd. */
+ assert(IS_FD_VALID(fd));
+ /* Deleting an fd which was not set. */
+ assert(IS_FD_SET(fd, lttng_fd_set));
+
+ DEL_FD_FROM_SET(fd, lttng_fd_set);
+}
+
+/*
+ * Interface allowing applications to close arbitrary file descriptors.
+ * We check if it is owned by lttng-ust, and return -1, errno=EBADF
+ * instead of closing it if it is the case.
+ */
+int lttng_ust_safe_close_fd(int fd, int (*close_cb)(int fd))
+{
+ int ret = 0;
+
+ lttng_ust_fixup_fd_tracker_tls();
+
+ /*
+ * Ensure the tracker is initialized when called from
+ * constructors.
+ */
+ lttng_ust_init_fd_tracker();
+
+ /*
+ * If called from lttng-ust, we directly call close without
+ * validating whether the FD is part of the tracked set.
+ */
+ if (URCU_TLS(ust_fd_mutex_nest))
+ return close_cb(fd);
+
+ lttng_ust_lock_fd_tracker();
+ if (IS_FD_VALID(fd) && IS_FD_SET(fd, lttng_fd_set)) {
+ ret = -1;
+ errno = EBADF;
+ } else {
+ ret = close_cb(fd);
+ }
+ lttng_ust_unlock_fd_tracker();
+
+ return ret;
+}
+
+/*
+ * Interface allowing applications to close arbitrary streams.
+ * We check if it is owned by lttng-ust, and return -1, errno=EBADF
+ * instead of closing it if it is the case.
+ */
+int lttng_ust_safe_fclose_stream(FILE *stream, int (*fclose_cb)(FILE *stream))
+{
+ int ret = 0, fd;
+
+ lttng_ust_fixup_fd_tracker_tls();
+
+ /*
+ * Ensure the tracker is initialized when called from
+ * constructors.
+ */
+ lttng_ust_init_fd_tracker();
+
+ /*
+ * If called from lttng-ust, we directly call fclose without
+ * validating whether the FD is part of the tracked set.
+ */
+ if (URCU_TLS(ust_fd_mutex_nest))
+ return fclose_cb(stream);
+
+ fd = fileno(stream);
+
+ lttng_ust_lock_fd_tracker();
+ if (IS_FD_VALID(fd) && IS_FD_SET(fd, lttng_fd_set)) {
+ ret = -1;
+ errno = EBADF;
+ } else {
+ ret = fclose_cb(stream);
+ }
+ lttng_ust_unlock_fd_tracker();
+
+ return ret;
+}
+
+#ifdef __OpenBSD__
+static void set_close_success(int *p)
+{
+ *p = 1;
+}
+static int test_close_success(const int *p)
+{
+ return *p;
+}
+#else
+static void set_close_success(int *p __attribute__((unused)))
+{
+}
+static int test_close_success(const int *p __attribute__((unused)))
+{
+ return 1;
+}
+#endif
+
+/*
+ * Implement helper for closefrom() override.
+ */
+int lttng_ust_safe_closefrom_fd(int lowfd, int (*close_cb)(int fd))
+{
+ int ret = 0, close_success = 0, i;
+
+ lttng_ust_fixup_fd_tracker_tls();
+
+ /*
+ * Ensure the tracker is initialized when called from
+ * constructors.
+ */
+ lttng_ust_init_fd_tracker();
+
+ if (lowfd < 0) {
+ /*
+ * NetBSD return EBADF if fd is invalid.
+ */
+ errno = EBADF;
+ ret = -1;
+ goto end;
+ }
+ /*
+ * If called from lttng-ust, we directly call close without
+ * validating whether the FD is part of the tracked set.
+ */
+ if (URCU_TLS(ust_fd_mutex_nest)) {
+ for (i = lowfd; i < lttng_ust_max_fd; i++) {
+ if (close_cb(i) < 0) {
+ switch (errno) {
+ case EBADF:
+ continue;
+ case EINTR:
+ default:
+ ret = -1;
+ goto end;
+ }
+ }
+ set_close_success(&close_success);
+ }
+ } else {
+ lttng_ust_lock_fd_tracker();
+ for (i = lowfd; i < lttng_ust_max_fd; i++) {
+ if (IS_FD_VALID(i) && IS_FD_SET(i, lttng_fd_set))
+ continue;
+ if (close_cb(i) < 0) {
+ switch (errno) {
+ case EBADF:
+ continue;
+ case EINTR:
+ default:
+ ret = -1;
+ lttng_ust_unlock_fd_tracker();
+ goto end;
+ }
+ }
+ set_close_success(&close_success);
+ }
+ lttng_ust_unlock_fd_tracker();
+ }
+ if (!test_close_success(&close_success)) {
+ /*
+ * OpenBSD return EBADF if fd is greater than all open
+ * file descriptors.
+ */
+ ret = -1;
+ errno = EBADF;
+ }
+end:
+ return ret;
+}
--- /dev/null
+/*
+ * SPDX-License-Identifier: LGPL-2.1-or-later
+ *
+ * Copyright 2011-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ *
+ * Userspace RCU - sys_futex/compat_futex header.
+ */
+
+#ifndef _LTTNG_UST_FUTEX_H
+#define _LTTNG_UST_FUTEX_H
+
+#include <errno.h>
+#include <stdint.h>
+#include <time.h>
+#include <sys/syscall.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#define FUTEX_WAIT 0
+#define FUTEX_WAKE 1
+
+/*
+ * sys_futex compatibility header.
+ * Use *only* *either of* futex_noasync OR futex_async on a given address.
+ *
+ * futex_noasync cannot be executed in signal handlers, but ensures that
+ * it will be put in a wait queue even in compatibility mode.
+ *
+ * futex_async is signal-handler safe for the wakeup. It uses polling
+ * on the wait-side in compatibility mode.
+ *
+ * BEWARE: sys_futex() FUTEX_WAIT may return early if interrupted
+ * (returns EINTR).
+ */
+
+extern int lttng_ust_compat_futex_noasync(int32_t *uaddr, int op, int32_t val,
+ const struct timespec *timeout, int32_t *uaddr2, int32_t val3)
+ __attribute__((visibility("hidden")));
+
+extern int lttng_ust_compat_futex_async(int32_t *uaddr, int op, int32_t val,
+ const struct timespec *timeout, int32_t *uaddr2, int32_t val3)
+ __attribute__((visibility("hidden")));
+
+#if (defined(__linux__) && defined(__NR_futex))
+
+#include <unistd.h>
+#include <errno.h>
+#include <urcu/compiler.h>
+#include <urcu/arch.h>
+
+static inline int lttng_ust_futex(int32_t *uaddr, int op, int32_t val,
+ const struct timespec *timeout, int32_t *uaddr2, int32_t val3)
+{
+ return syscall(__NR_futex, uaddr, op, val, timeout,
+ uaddr2, val3);
+}
+
+static inline int lttng_ust_futex_noasync(int32_t *uaddr, int op, int32_t val,
+ const struct timespec *timeout, int32_t *uaddr2, int32_t val3)
+{
+ int ret;
+
+ ret = lttng_ust_futex(uaddr, op, val, timeout, uaddr2, val3);
+ if (caa_unlikely(ret < 0 && errno == ENOSYS)) {
+ /*
+ * The fallback on ENOSYS is the async-safe version of
+ * the compat futex implementation, because the
+ * async-safe compat implementation allows being used
+ * concurrently with calls to futex(). Indeed, sys_futex
+ * FUTEX_WAIT, on some architectures (mips and parisc),
+ * within a given process, spuriously return ENOSYS due
+ * to signal restart bugs on some kernel versions.
+ */
+ return lttng_ust_compat_futex_async(uaddr, op, val, timeout,
+ uaddr2, val3);
+ }
+ return ret;
+
+}
+
+static inline int lttng_ust_futex_async(int32_t *uaddr, int op, int32_t val,
+ const struct timespec *timeout, int32_t *uaddr2, int32_t val3)
+{
+ int ret;
+
+ ret = lttng_ust_futex(uaddr, op, val, timeout, uaddr2, val3);
+ if (caa_unlikely(ret < 0 && errno == ENOSYS)) {
+ return lttng_ust_compat_futex_async(uaddr, op, val, timeout,
+ uaddr2, val3);
+ }
+ return ret;
+}
+
+#elif defined(__FreeBSD__)
+
+#include <sys/types.h>
+#include <sys/umtx.h>
+
+static inline int lttng_ust_futex_async(int32_t *uaddr, int op, int32_t val,
+ const struct timespec *timeout, int32_t *uaddr2, int32_t val3)
+{
+ int umtx_op;
+ void *umtx_uaddr = NULL, *umtx_uaddr2 = NULL;
+ struct _umtx_time umtx_timeout = {
+ ._flags = UMTX_ABSTIME,
+ ._clockid = CLOCK_MONOTONIC,
+ };
+
+ switch (op) {
+ case FUTEX_WAIT:
+ /* On FreeBSD, a "u_int" is a 32-bit integer. */
+ umtx_op = UMTX_OP_WAIT_UINT;
+ if (timeout != NULL) {
+ umtx_timeout._timeout = *timeout;
+ umtx_uaddr = (void *) sizeof(umtx_timeout);
+ umtx_uaddr2 = (void *) &umtx_timeout;
+ }
+ break;
+ case FUTEX_WAKE:
+ umtx_op = UMTX_OP_WAKE;
+ break;
+ default:
+ errno = EINVAL;
+ return -1;
+ }
+
+ return _umtx_op(uaddr, umtx_op, (uint32_t) val, umtx_uaddr,
+ umtx_uaddr2);
+}
+
+static inline int lttng_ust_futex_noasync(int32_t *uaddr, int op, int32_t val,
+ const struct timespec *timeout, int32_t *uaddr2, int32_t val3)
+{
+ return lttng_ust_futex_async(uaddr, op, val, timeout, uaddr2, val3);
+}
+
+#elif defined(__CYGWIN__)
+
+/*
+ * The futex_noasync compat code uses a weak symbol to share state across
+ * different shared object which is not possible on Windows with the
+ * Portable Executable format. Use the async compat code for both cases.
+ */
+static inline int lttng_ust_futex_noasync(int32_t *uaddr, int op, int32_t val,
+ const struct timespec *timeout, int32_t *uaddr2, int32_t val3)
+{
+ return lttng_ust_compat_futex_async(uaddr, op, val, timeout, uaddr2, val3);
+}
+
+static inline int lttng_ust_futex_async(int32_t *uaddr, int op, int32_t val,
+ const struct timespec *timeout, int32_t *uaddr2, int32_t val3)
+{
+ return lttng_ust_compat_futex_async(uaddr, op, val, timeout, uaddr2, val3);
+}
+
+#else
+
+static inline int lttng_ust_futex_noasync(int32_t *uaddr, int op, int32_t val,
+ const struct timespec *timeout, int32_t *uaddr2, int32_t val3)
+{
+ return lttng_ust_compat_futex_noasync(uaddr, op, val, timeout, uaddr2, val3);
+}
+
+static inline int lttng_ust_futex_async(int32_t *uaddr, int op, int32_t val,
+ const struct timespec *timeout, int32_t *uaddr2, int32_t val3)
+{
+ return lttng_ust_compat_futex_async(uaddr, op, val, timeout, uaddr2, val3);
+}
+
+#endif
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _LTTNG_UST_FUTEX_H */
--- /dev/null
+/*
+ * SPDX-License-Identifier: LGPL-2.1-only
+ *
+ * Copyright (C) 2017 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ */
+
+#include <stdlib.h>
+#include <unistd.h>
+#include <stdbool.h>
+#include <stddef.h>
+#include <sys/types.h>
+#include "common/logging.h"
+#include "common/macros.h"
+#include "getenv.h"
+
+enum lttng_env_secure {
+ LTTNG_ENV_SECURE,
+ LTTNG_ENV_NOT_SECURE,
+};
+
+struct lttng_env {
+ const char *key;
+ enum lttng_env_secure secure;
+ char *value;
+};
+
+static struct lttng_env lttng_env[] = {
+ /*
+ * LTTNG_UST_DEBUG is used directly by snprintf, because it
+ * needs to be already set for ERR() used in
+ * lttng_ust_getenv_init().
+ */
+ { "LTTNG_UST_DEBUG", LTTNG_ENV_NOT_SECURE, NULL, },
+
+ /* Env. var. which can be used in setuid/setgid executables. */
+ { "LTTNG_UST_WITHOUT_BADDR_STATEDUMP", LTTNG_ENV_NOT_SECURE, NULL, },
+ { "LTTNG_UST_REGISTER_TIMEOUT", LTTNG_ENV_NOT_SECURE, NULL, },
+
+ /* Env. var. which are not fetched in setuid/setgid executables. */
+ { "LTTNG_UST_CLOCK_PLUGIN", LTTNG_ENV_SECURE, NULL, },
+ { "LTTNG_UST_GETCPU_PLUGIN", LTTNG_ENV_SECURE, NULL, },
+ { "LTTNG_UST_ALLOW_BLOCKING", LTTNG_ENV_SECURE, NULL, },
+ { "HOME", LTTNG_ENV_SECURE, NULL, },
+ { "LTTNG_HOME", LTTNG_ENV_SECURE, NULL, },
+};
+
+static
+int lttng_is_setuid_setgid(void)
+{
+ return geteuid() != getuid() || getegid() != getgid();
+}
+
+char *lttng_ust_getenv(const char *name)
+{
+ size_t i;
+ struct lttng_env *e;
+ bool found = false;
+
+ for (i = 0; i < LTTNG_ARRAY_SIZE(lttng_env); i++) {
+ e = <tng_env[i];
+
+ if (strcmp(e->key, name) == 0) {
+ found = true;
+ break;
+ }
+ }
+ if (!found) {
+ return NULL;
+ }
+ return e->value;
+}
+
+void lttng_ust_getenv_init(void)
+{
+ size_t i;
+
+ for (i = 0; i < LTTNG_ARRAY_SIZE(lttng_env); i++) {
+ struct lttng_env *e = <tng_env[i];
+
+ if (e->secure == LTTNG_ENV_SECURE && lttng_is_setuid_setgid()) {
+ ERR("Getting environment variable '%s' from setuid/setgid binary refused for security reasons.",
+ e->key);
+ continue;
+ }
+ e->value = getenv(e->key);
+ }
+}
--- /dev/null
+/*
+ * SPDX-License-Identifier: LGPL-2.1-only
+ *
+ * Copyright (C) 2015 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ */
+
+#ifndef _COMPAT_GETENV_H
+#define _COMPAT_GETENV_H
+
+/*
+ * Always add the lttng-ust environment variables using the lttng_ust_getenv()
+ * infrastructure rather than using getenv() directly. This ensures that we
+ * don't trigger races between getenv() invoked by lttng-ust listener threads
+ * invoked concurrently with setenv() called by an otherwise single-threaded
+ * application thread. (the application is not aware that it runs with
+ * lttng-ust)
+ */
+
+char *lttng_ust_getenv(const char *name)
+ __attribute__((visibility("hidden")));
+
+void lttng_ust_getenv_init(void)
+ __attribute__((visibility("hidden")));
+
+#endif /* _COMPAT_GETENV_H */
--- /dev/null
+/*
+ * SPDX-License-Identifier: LGPL-2.1-only
+ *
+ * Copyright (C) 2011 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ */
+
+#include <stddef.h>
+#include <stdint.h>
+#include <urcu/compiler.h>
+#include <lttng/ust-endian.h>
+
+/*
+ * Hash function
+ * Source: http://burtleburtle.net/bob/c/lookup3.c
+ * Originally Public Domain
+ */
+
+#define rot(x, k) (((x) << (k)) | ((x) >> (32 - (k))))
+
+#define mix(a, b, c) \
+do { \
+ a -= c; a ^= rot(c, 4); c += b; \
+ b -= a; b ^= rot(a, 6); a += c; \
+ c -= b; c ^= rot(b, 8); b += a; \
+ a -= c; a ^= rot(c, 16); c += b; \
+ b -= a; b ^= rot(a, 19); a += c; \
+ c -= b; c ^= rot(b, 4); b += a; \
+} while (0)
+
+#define final(a, b, c) \
+{ \
+ c ^= b; c -= rot(b, 14); \
+ a ^= c; a -= rot(c, 11); \
+ b ^= a; b -= rot(a, 25); \
+ c ^= b; c -= rot(b, 16); \
+ a ^= c; a -= rot(c, 4);\
+ b ^= a; b -= rot(a, 14); \
+ c ^= b; c -= rot(b, 24); \
+}
+
+#if (BYTE_ORDER == LITTLE_ENDIAN)
+#define HASH_LITTLE_ENDIAN 1
+#else
+#define HASH_LITTLE_ENDIAN 0
+#endif
+
+/*
+ *
+ * hashlittle() -- hash a variable-length key into a 32-bit value
+ * k : the key (the unaligned variable-length array of bytes)
+ * length : the length of the key, counting by bytes
+ * initval : can be any 4-byte value
+ * Returns a 32-bit value. Every bit of the key affects every bit of
+ * the return value. Two keys differing by one or two bits will have
+ * totally different hash values.
+ *
+ * The best hash table sizes are powers of 2. There is no need to do
+ * mod a prime (mod is sooo slow!). If you need less than 32 bits,
+ * use a bitmask. For example, if you need only 10 bits, do
+ * h = (h & hashmask(10));
+ * In which case, the hash table should have hashsize(10) elements.
+ *
+ * If you are hashing n strings (uint8_t **)k, do it like this:
+ * for (i = 0, h = 0; i < n; ++i) h = hashlittle(k[i], len[i], h);
+ *
+ * By Bob Jenkins, 2006. bob_jenkins@burtleburtle.net. You may use this
+ * code any way you wish, private, educational, or commercial. It's free.
+ *
+ * Use for hash table lookup, or anything where one collision in 2^^32 is
+ * acceptable. Do NOT use for cryptographic purposes.
+ */
+static
+uint32_t hashlittle(const void *key, size_t length, uint32_t initval)
+{
+ uint32_t a, b, c; /* internal state */
+ union {
+ const void *ptr;
+ size_t i;
+ } u;
+
+ /* Set up the internal state */
+ a = b = c = 0xdeadbeef + ((uint32_t)length) + initval;
+
+ u.ptr = key;
+ if (HASH_LITTLE_ENDIAN && ((u.i & 0x3) == 0)) {
+ const uint32_t *k = (const uint32_t *) key; /* read 32-bit chunks */
+
+ /*------ all but last block: aligned reads and affect 32 bits of (a,b,c) */
+ while (length > 12) {
+ a += k[0];
+ b += k[1];
+ c += k[2];
+ mix(a, b, c);
+ length -= 12;
+ k += 3;
+ }
+
+ /*----------------------------- handle the last (probably partial) block */
+ /*
+ * The original jhash.h reads beyond the end of string, and implements
+ * a special code path for VALGRIND. It seems to make ASan unhappy too
+ * though, so considering that hashing event names is not a fast-path
+ * in lttng-ust, remove the "fast" code entirely and use the slower
+ * but verifiable VALGRIND version of the code which does not issue
+ * out-of-bound reads.
+ */
+ {
+ const uint8_t *k8;
+
+ k8 = (const uint8_t *) k;
+ switch (length) {
+ case 12: c+=k[2]; b+=k[1]; a+=k[0]; break;
+ case 11: c+=((uint32_t) k8[10])<<16; /* fall through */
+ case 10: c+=((uint32_t) k8[9])<<8; /* fall through */
+ case 9 : c+=k8[8]; /* fall through */
+ case 8 : b+=k[1]; a+=k[0]; break;
+ case 7 : b+=((uint32_t) k8[6])<<16; /* fall through */
+ case 6 : b+=((uint32_t) k8[5])<<8; /* fall through */
+ case 5 : b+=k8[4]; /* fall through */
+ case 4 : a+=k[0]; break;
+ case 3 : a+=((uint32_t) k8[2])<<16; /* fall through */
+ case 2 : a+=((uint32_t) k8[1])<<8; /* fall through */
+ case 1 : a+=k8[0]; break;
+ case 0 : return c;
+ }
+ }
+
+ } else if (HASH_LITTLE_ENDIAN && ((u.i & 0x1) == 0)) {
+ const uint16_t *k = (const uint16_t *) key; /* read 16-bit chunks */
+ const uint8_t *k8;
+
+ /*--------------- all but last block: aligned reads and different mixing */
+ while (length > 12)
+ {
+ a += k[0] + (((uint32_t) k[1])<<16);
+ b += k[2] + (((uint32_t) k[3])<<16);
+ c += k[4] + (((uint32_t) k[5])<<16);
+ mix(a, b, c);
+ length -= 12;
+ k += 6;
+ }
+
+ /*----------------------------- handle the last (probably partial) block */
+ k8 = (const uint8_t *) k;
+ switch(length)
+ {
+ case 12: c+=k[4]+(((uint32_t) k[5])<<16);
+ b+=k[2]+(((uint32_t) k[3])<<16);
+ a+=k[0]+(((uint32_t) k[1])<<16);
+ break;
+ case 11: c+=((uint32_t) k8[10])<<16; /* fall through */
+ case 10: c+=k[4];
+ b+=k[2]+(((uint32_t) k[3])<<16);
+ a+=k[0]+(((uint32_t) k[1])<<16);
+ break;
+ case 9 : c+=k8[8]; /* fall through */
+ case 8 : b+=k[2]+(((uint32_t) k[3])<<16);
+ a+=k[0]+(((uint32_t) k[1])<<16);
+ break;
+ case 7 : b+=((uint32_t) k8[6])<<16; /* fall through */
+ case 6 : b+=k[2];
+ a+=k[0]+(((uint32_t) k[1])<<16);
+ break;
+ case 5 : b+=k8[4]; /* fall through */
+ case 4 : a+=k[0]+(((uint32_t) k[1])<<16);
+ break;
+ case 3 : a+=((uint32_t) k8[2])<<16; /* fall through */
+ case 2 : a+=k[0];
+ break;
+ case 1 : a+=k8[0];
+ break;
+ case 0 : return c; /* zero length requires no mixing */
+ }
+
+ } else { /* need to read the key one byte at a time */
+ const uint8_t *k = (const uint8_t *)key;
+
+ /*--------------- all but the last block: affect some 32 bits of (a, b, c) */
+ while (length > 12) {
+ a += k[0];
+ a += ((uint32_t) k[1])<<8;
+ a += ((uint32_t) k[2])<<16;
+ a += ((uint32_t) k[3])<<24;
+ b += k[4];
+ b += ((uint32_t) k[5])<<8;
+ b += ((uint32_t) k[6])<<16;
+ b += ((uint32_t) k[7])<<24;
+ c += k[8];
+ c += ((uint32_t) k[9])<<8;
+ c += ((uint32_t) k[10])<<16;
+ c += ((uint32_t) k[11])<<24;
+ mix(a,b,c);
+ length -= 12;
+ k += 12;
+ }
+
+ /*-------------------------------- last block: affect all 32 bits of (c) */
+ switch (length) { /* all the case statements fall through */
+ case 12: c+=((uint32_t) k[11])<<24; /* fall through */
+ case 11: c+=((uint32_t) k[10])<<16; /* fall through */
+ case 10: c+=((uint32_t) k[9])<<8; /* fall through */
+ case 9 : c+=k[8]; /* fall through */
+ case 8 : b+=((uint32_t) k[7])<<24; /* fall through */
+ case 7 : b+=((uint32_t) k[6])<<16; /* fall through */
+ case 6 : b+=((uint32_t) k[5])<<8; /* fall through */
+ case 5 : b+=k[4]; /* fall through */
+ case 4 : a+=((uint32_t) k[3])<<24; /* fall through */
+ case 3 : a+=((uint32_t) k[2])<<16; /* fall through */
+ case 2 : a+=((uint32_t) k[1])<<8; /* fall through */
+ case 1 : a+=k[0];
+ break;
+ case 0 : return c;
+ }
+ }
+
+ final(a, b, c);
+ return c;
+}
+
+static inline
+uint32_t jhash(const void *key, size_t length, uint32_t seed)
+{
+ return hashlittle(key, length, seed);
+}
--- /dev/null
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright (C) 2010-2016 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ *
+ * LTTng UST bytecode interpreter.
+ */
+
+#define _LGPL_SOURCE
+#include <stddef.h>
+#include <stdint.h>
+
+#include <lttng/urcu/pointer.h>
+#include <urcu/rculist.h>
+#include <lttng/ust-endian.h>
+#include <lttng/ust-events.h>
+#include "ust-events-internal.h"
+
+#include "lttng-bytecode.h"
+#include "string-utils.h"
+
+
+/*
+ * -1: wildcard found.
+ * -2: unknown escape char.
+ * 0: normal char.
+ */
+
+static
+int parse_char(const char **p)
+{
+ switch (**p) {
+ case '\\':
+ (*p)++;
+ switch (**p) {
+ case '\\':
+ case '*':
+ return 0;
+ default:
+ return -2;
+ }
+ case '*':
+ return -1;
+ default:
+ return 0;
+ }
+}
+
+/*
+ * Returns SIZE_MAX if the string is null-terminated, or the number of
+ * characters if not.
+ */
+static
+size_t get_str_or_seq_len(const struct estack_entry *entry)
+{
+ return entry->u.s.seq_len;
+}
+
+static
+int stack_star_glob_match(struct estack *stack, int top,
+ const char *cmp_type __attribute__((unused)))
+{
+ const char *pattern;
+ const char *candidate;
+ size_t pattern_len;
+ size_t candidate_len;
+
+ /* Find out which side is the pattern vs. the candidate. */
+ if (estack_ax(stack, top)->u.s.literal_type == ESTACK_STRING_LITERAL_TYPE_STAR_GLOB) {
+ pattern = estack_ax(stack, top)->u.s.str;
+ pattern_len = get_str_or_seq_len(estack_ax(stack, top));
+ candidate = estack_bx(stack, top)->u.s.str;
+ candidate_len = get_str_or_seq_len(estack_bx(stack, top));
+ } else {
+ pattern = estack_bx(stack, top)->u.s.str;
+ pattern_len = get_str_or_seq_len(estack_bx(stack, top));
+ candidate = estack_ax(stack, top)->u.s.str;
+ candidate_len = get_str_or_seq_len(estack_ax(stack, top));
+ }
+
+ /* Perform the match. Returns 0 when the result is true. */
+ return !strutils_star_glob_match(pattern, pattern_len, candidate,
+ candidate_len);
+}
+
+static
+int stack_strcmp(struct estack *stack, int top, const char *cmp_type __attribute__((unused)))
+{
+ const char *p = estack_bx(stack, top)->u.s.str, *q = estack_ax(stack, top)->u.s.str;
+ int ret;
+ int diff;
+
+ for (;;) {
+ int escaped_r0 = 0;
+
+ if (unlikely(p - estack_bx(stack, top)->u.s.str >= estack_bx(stack, top)->u.s.seq_len || *p == '\0')) {
+ if (q - estack_ax(stack, top)->u.s.str >= estack_ax(stack, top)->u.s.seq_len || *q == '\0') {
+ return 0;
+ } else {
+ if (estack_ax(stack, top)->u.s.literal_type ==
+ ESTACK_STRING_LITERAL_TYPE_PLAIN) {
+ ret = parse_char(&q);
+ if (ret == -1)
+ return 0;
+ }
+ return -1;
+ }
+ }
+ if (unlikely(q - estack_ax(stack, top)->u.s.str >= estack_ax(stack, top)->u.s.seq_len || *q == '\0')) {
+ if (estack_bx(stack, top)->u.s.literal_type ==
+ ESTACK_STRING_LITERAL_TYPE_PLAIN) {
+ ret = parse_char(&p);
+ if (ret == -1)
+ return 0;
+ }
+ return 1;
+ }
+ if (estack_bx(stack, top)->u.s.literal_type ==
+ ESTACK_STRING_LITERAL_TYPE_PLAIN) {
+ ret = parse_char(&p);
+ if (ret == -1) {
+ return 0;
+ } else if (ret == -2) {
+ escaped_r0 = 1;
+ }
+ /* else compare both char */
+ }
+ if (estack_ax(stack, top)->u.s.literal_type ==
+ ESTACK_STRING_LITERAL_TYPE_PLAIN) {
+ ret = parse_char(&q);
+ if (ret == -1) {
+ return 0;
+ } else if (ret == -2) {
+ if (!escaped_r0)
+ return -1;
+ } else {
+ if (escaped_r0)
+ return 1;
+ }
+ } else {
+ if (escaped_r0)
+ return 1;
+ }
+ diff = *p - *q;
+ if (diff != 0)
+ break;
+ p++;
+ q++;
+ }
+ return diff;
+}
+
+int lttng_bytecode_interpret_error(
+ struct lttng_ust_bytecode_runtime *bytecode_runtime __attribute__((unused)),
+ const char *stack_data __attribute__((unused)),
+ void *ctx __attribute__((unused)))
+{
+ return LTTNG_UST_BYTECODE_INTERPRETER_ERROR;
+}
+
+#ifdef INTERPRETER_USE_SWITCH
+
+/*
+ * Fallback for compilers that do not support taking address of labels.
+ */
+
+#define START_OP \
+ start_pc = &bytecode->data[0]; \
+ for (pc = next_pc = start_pc; pc - start_pc < bytecode->len; \
+ pc = next_pc) { \
+ dbg_printf("Executing op %s (%u)\n", \
+ lttng_bytecode_print_op((unsigned int) *(bytecode_opcode_t *) pc), \
+ (unsigned int) *(bytecode_opcode_t *) pc); \
+ switch (*(bytecode_opcode_t *) pc) {
+
+#define OP(name) jump_target_##name: __attribute__((unused)); \
+ case name
+
+#define PO break
+
+#define END_OP } \
+ }
+
+#define JUMP_TO(name) \
+ goto jump_target_##name
+
+#else
+
+/*
+ * Dispatch-table based interpreter.
+ */
+
+#define START_OP \
+ start_pc = &bytecode->code[0]; \
+ pc = next_pc = start_pc; \
+ if (unlikely(pc - start_pc >= bytecode->len)) \
+ goto end; \
+ goto *dispatch[*(bytecode_opcode_t *) pc];
+
+#define OP(name) \
+LABEL_##name
+
+#define PO \
+ pc = next_pc; \
+ goto *dispatch[*(bytecode_opcode_t *) pc];
+
+#define END_OP
+
+#define JUMP_TO(name) \
+ goto LABEL_##name
+
+#endif
+
+#define IS_INTEGER_REGISTER(reg_type) \
+ (reg_type == REG_U64 || reg_type == REG_S64)
+
+static int context_get_index(struct lttng_ust_ctx *ctx,
+ struct load_ptr *ptr,
+ uint32_t idx)
+{
+
+ const struct lttng_ust_ctx_field *ctx_field;
+ const struct lttng_ust_event_field *field;
+ struct lttng_ust_ctx_value v;
+
+ ctx_field = &ctx->fields[idx];
+ field = ctx_field->event_field;
+ ptr->type = LOAD_OBJECT;
+ ptr->field = field;
+
+ switch (field->type->type) {
+ case lttng_ust_type_integer:
+ ctx_field->get_value(ctx_field->priv, &v);
+ if (lttng_ust_get_type_integer(field->type)->signedness) {
+ ptr->object_type = OBJECT_TYPE_S64;
+ ptr->u.s64 = v.u.s64;
+ ptr->ptr = &ptr->u.s64;
+ } else {
+ ptr->object_type = OBJECT_TYPE_U64;
+ ptr->u.u64 = v.u.s64; /* Cast. */
+ ptr->ptr = &ptr->u.u64;
+ }
+ break;
+ case lttng_ust_type_enum:
+ {
+ const struct lttng_ust_type_integer *itype;
+
+ itype = lttng_ust_get_type_integer(lttng_ust_get_type_enum(field->type)->container_type);
+ ctx_field->get_value(ctx_field->priv, &v);
+ if (itype->signedness) {
+ ptr->object_type = OBJECT_TYPE_SIGNED_ENUM;
+ ptr->u.s64 = v.u.s64;
+ ptr->ptr = &ptr->u.s64;
+ } else {
+ ptr->object_type = OBJECT_TYPE_UNSIGNED_ENUM;
+ ptr->u.u64 = v.u.s64; /* Cast. */
+ ptr->ptr = &ptr->u.u64;
+ }
+ break;
+ }
+ case lttng_ust_type_array:
+ if (lttng_ust_get_type_array(field->type)->elem_type->type != lttng_ust_type_integer) {
+ ERR("Array nesting only supports integer types.");
+ return -EINVAL;
+ }
+ if (lttng_ust_get_type_array(field->type)->encoding == lttng_ust_string_encoding_none) {
+ ERR("Only string arrays are supported for contexts.");
+ return -EINVAL;
+ }
+ ptr->object_type = OBJECT_TYPE_STRING;
+ ctx_field->get_value(ctx_field->priv, &v);
+ ptr->ptr = v.u.str;
+ break;
+ case lttng_ust_type_sequence:
+ if (lttng_ust_get_type_sequence(field->type)->elem_type->type != lttng_ust_type_integer) {
+ ERR("Sequence nesting only supports integer types.");
+ return -EINVAL;
+ }
+ if (lttng_ust_get_type_sequence(field->type)->encoding == lttng_ust_string_encoding_none) {
+ ERR("Only string sequences are supported for contexts.");
+ return -EINVAL;
+ }
+ ptr->object_type = OBJECT_TYPE_STRING;
+ ctx_field->get_value(ctx_field->priv, &v);
+ ptr->ptr = v.u.str;
+ break;
+ case lttng_ust_type_string:
+ ptr->object_type = OBJECT_TYPE_STRING;
+ ctx_field->get_value(ctx_field->priv, &v);
+ ptr->ptr = v.u.str;
+ break;
+ case lttng_ust_type_float:
+ ptr->object_type = OBJECT_TYPE_DOUBLE;
+ ctx_field->get_value(ctx_field->priv, &v);
+ ptr->u.d = v.u.d;
+ ptr->ptr = &ptr->u.d;
+ break;
+ case lttng_ust_type_dynamic:
+ ctx_field->get_value(ctx_field->priv, &v);
+ switch (v.sel) {
+ case LTTNG_UST_DYNAMIC_TYPE_NONE:
+ return -EINVAL;
+ case LTTNG_UST_DYNAMIC_TYPE_U8:
+ case LTTNG_UST_DYNAMIC_TYPE_U16:
+ case LTTNG_UST_DYNAMIC_TYPE_U32:
+ case LTTNG_UST_DYNAMIC_TYPE_U64:
+ ptr->object_type = OBJECT_TYPE_U64;
+ ptr->u.u64 = v.u.u64;
+ ptr->ptr = &ptr->u.u64;
+ dbg_printf("context get index dynamic u64 %" PRIi64 "\n", ptr->u.u64);
+ break;
+ case LTTNG_UST_DYNAMIC_TYPE_S8:
+ case LTTNG_UST_DYNAMIC_TYPE_S16:
+ case LTTNG_UST_DYNAMIC_TYPE_S32:
+ case LTTNG_UST_DYNAMIC_TYPE_S64:
+ ptr->object_type = OBJECT_TYPE_S64;
+ ptr->u.s64 = v.u.s64;
+ ptr->ptr = &ptr->u.s64;
+ dbg_printf("context get index dynamic s64 %" PRIi64 "\n", ptr->u.s64);
+ break;
+ case LTTNG_UST_DYNAMIC_TYPE_FLOAT:
+ case LTTNG_UST_DYNAMIC_TYPE_DOUBLE:
+ ptr->object_type = OBJECT_TYPE_DOUBLE;
+ ptr->u.d = v.u.d;
+ ptr->ptr = &ptr->u.d;
+ dbg_printf("context get index dynamic double %g\n", ptr->u.d);
+ break;
+ case LTTNG_UST_DYNAMIC_TYPE_STRING:
+ ptr->object_type = OBJECT_TYPE_STRING;
+ ptr->ptr = v.u.str;
+ dbg_printf("context get index dynamic string %s\n", (const char *) ptr->ptr);
+ break;
+ default:
+ dbg_printf("Interpreter warning: unknown dynamic type (%d).\n", (int) v.sel);
+ return -EINVAL;
+ }
+ break;
+ default:
+ ERR("Unknown type: %d", (int) field->type->type);
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int dynamic_get_index(struct lttng_ust_ctx *ctx,
+ struct bytecode_runtime *runtime,
+ uint64_t index, struct estack_entry *stack_top)
+{
+ int ret;
+ const struct bytecode_get_index_data *gid;
+
+ gid = (const struct bytecode_get_index_data *) &runtime->data[index];
+ switch (stack_top->u.ptr.type) {
+ case LOAD_OBJECT:
+ switch (stack_top->u.ptr.object_type) {
+ case OBJECT_TYPE_ARRAY:
+ {
+ const char *ptr;
+
+ assert(gid->offset < gid->array_len);
+ /* Skip count (unsigned long) */
+ ptr = *(const char **) (stack_top->u.ptr.ptr + sizeof(unsigned long));
+ ptr = ptr + gid->offset;
+ stack_top->u.ptr.ptr = ptr;
+ stack_top->u.ptr.object_type = gid->elem.type;
+ stack_top->u.ptr.rev_bo = gid->elem.rev_bo;
+ assert(stack_top->u.ptr.field->type->type == lttng_ust_type_array);
+ stack_top->u.ptr.field = NULL;
+ break;
+ }
+ case OBJECT_TYPE_SEQUENCE:
+ {
+ const char *ptr;
+ size_t ptr_seq_len;
+
+ ptr = *(const char **) (stack_top->u.ptr.ptr + sizeof(unsigned long));
+ ptr_seq_len = *(unsigned long *) stack_top->u.ptr.ptr;
+ if (gid->offset >= gid->elem.len * ptr_seq_len) {
+ ret = -EINVAL;
+ goto end;
+ }
+ ptr = ptr + gid->offset;
+ stack_top->u.ptr.ptr = ptr;
+ stack_top->u.ptr.object_type = gid->elem.type;
+ stack_top->u.ptr.rev_bo = gid->elem.rev_bo;
+ assert(stack_top->u.ptr.field->type->type == lttng_ust_type_sequence);
+ stack_top->u.ptr.field = NULL;
+ break;
+ }
+ case OBJECT_TYPE_STRUCT:
+ ERR("Nested structures are not supported yet.");
+ ret = -EINVAL;
+ goto end;
+ case OBJECT_TYPE_VARIANT:
+ default:
+ ERR("Unexpected get index type %d",
+ (int) stack_top->u.ptr.object_type);
+ ret = -EINVAL;
+ goto end;
+ }
+ break;
+ case LOAD_ROOT_CONTEXT:
+ case LOAD_ROOT_APP_CONTEXT: /* Fall-through */
+ {
+ ret = context_get_index(ctx,
+ &stack_top->u.ptr,
+ gid->ctx_index);
+ if (ret) {
+ goto end;
+ }
+ break;
+ }
+ case LOAD_ROOT_PAYLOAD:
+ stack_top->u.ptr.ptr += gid->offset;
+ if (gid->elem.type == OBJECT_TYPE_STRING)
+ stack_top->u.ptr.ptr = *(const char * const *) stack_top->u.ptr.ptr;
+ stack_top->u.ptr.object_type = gid->elem.type;
+ stack_top->u.ptr.type = LOAD_OBJECT;
+ stack_top->u.ptr.field = gid->field;
+ stack_top->u.ptr.rev_bo = gid->elem.rev_bo;
+ break;
+ }
+
+ stack_top->type = REG_PTR;
+
+ return 0;
+
+end:
+ return ret;
+}
+
+static int dynamic_load_field(struct estack_entry *stack_top)
+{
+ int ret;
+
+ switch (stack_top->u.ptr.type) {
+ case LOAD_OBJECT:
+ break;
+ case LOAD_ROOT_CONTEXT:
+ case LOAD_ROOT_APP_CONTEXT:
+ case LOAD_ROOT_PAYLOAD:
+ default:
+ dbg_printf("Interpreter warning: cannot load root, missing field name.\n");
+ ret = -EINVAL;
+ goto end;
+ }
+ switch (stack_top->u.ptr.object_type) {
+ case OBJECT_TYPE_S8:
+ dbg_printf("op load field s8\n");
+ stack_top->u.v = *(int8_t *) stack_top->u.ptr.ptr;
+ stack_top->type = REG_S64;
+ break;
+ case OBJECT_TYPE_S16:
+ {
+ int16_t tmp;
+
+ dbg_printf("op load field s16\n");
+ tmp = *(int16_t *) stack_top->u.ptr.ptr;
+ if (stack_top->u.ptr.rev_bo)
+ tmp = bswap_16(tmp);
+ stack_top->u.v = tmp;
+ stack_top->type = REG_S64;
+ break;
+ }
+ case OBJECT_TYPE_S32:
+ {
+ int32_t tmp;
+
+ dbg_printf("op load field s32\n");
+ tmp = *(int32_t *) stack_top->u.ptr.ptr;
+ if (stack_top->u.ptr.rev_bo)
+ tmp = bswap_32(tmp);
+ stack_top->u.v = tmp;
+ stack_top->type = REG_S64;
+ break;
+ }
+ case OBJECT_TYPE_S64:
+ {
+ int64_t tmp;
+
+ dbg_printf("op load field s64\n");
+ tmp = *(int64_t *) stack_top->u.ptr.ptr;
+ if (stack_top->u.ptr.rev_bo)
+ tmp = bswap_64(tmp);
+ stack_top->u.v = tmp;
+ stack_top->type = REG_S64;
+ break;
+ }
+ case OBJECT_TYPE_SIGNED_ENUM:
+ {
+ int64_t tmp;
+
+ dbg_printf("op load field signed enumeration\n");
+ tmp = *(int64_t *) stack_top->u.ptr.ptr;
+ if (stack_top->u.ptr.rev_bo)
+ tmp = bswap_64(tmp);
+ stack_top->u.v = tmp;
+ stack_top->type = REG_S64;
+ break;
+ }
+ case OBJECT_TYPE_U8:
+ dbg_printf("op load field u8\n");
+ stack_top->u.v = *(uint8_t *) stack_top->u.ptr.ptr;
+ stack_top->type = REG_U64;
+ break;
+ case OBJECT_TYPE_U16:
+ {
+ uint16_t tmp;
+
+ dbg_printf("op load field u16\n");
+ tmp = *(uint16_t *) stack_top->u.ptr.ptr;
+ if (stack_top->u.ptr.rev_bo)
+ tmp = bswap_16(tmp);
+ stack_top->u.v = tmp;
+ stack_top->type = REG_U64;
+ break;
+ }
+ case OBJECT_TYPE_U32:
+ {
+ uint32_t tmp;
+
+ dbg_printf("op load field u32\n");
+ tmp = *(uint32_t *) stack_top->u.ptr.ptr;
+ if (stack_top->u.ptr.rev_bo)
+ tmp = bswap_32(tmp);
+ stack_top->u.v = tmp;
+ stack_top->type = REG_U64;
+ break;
+ }
+ case OBJECT_TYPE_U64:
+ {
+ uint64_t tmp;
+
+ dbg_printf("op load field u64\n");
+ tmp = *(uint64_t *) stack_top->u.ptr.ptr;
+ if (stack_top->u.ptr.rev_bo)
+ tmp = bswap_64(tmp);
+ stack_top->u.v = tmp;
+ stack_top->type = REG_U64;
+ break;
+ }
+ case OBJECT_TYPE_UNSIGNED_ENUM:
+ {
+ uint64_t tmp;
+
+ dbg_printf("op load field unsigned enumeration\n");
+ tmp = *(uint64_t *) stack_top->u.ptr.ptr;
+ if (stack_top->u.ptr.rev_bo)
+ tmp = bswap_64(tmp);
+ stack_top->u.v = tmp;
+ stack_top->type = REG_U64;
+ break;
+ }
+ case OBJECT_TYPE_DOUBLE:
+ memcpy(&stack_top->u.d,
+ stack_top->u.ptr.ptr,
+ sizeof(struct literal_double));
+ stack_top->type = REG_DOUBLE;
+ break;
+ case OBJECT_TYPE_STRING:
+ {
+ const char *str;
+
+ dbg_printf("op load field string\n");
+ str = (const char *) stack_top->u.ptr.ptr;
+ stack_top->u.s.str = str;
+ if (unlikely(!stack_top->u.s.str)) {
+ dbg_printf("Interpreter warning: loading a NULL string.\n");
+ ret = -EINVAL;
+ goto end;
+ }
+ stack_top->u.s.seq_len = SIZE_MAX;
+ stack_top->u.s.literal_type =
+ ESTACK_STRING_LITERAL_TYPE_NONE;
+ stack_top->type = REG_STRING;
+ break;
+ }
+ case OBJECT_TYPE_STRING_SEQUENCE:
+ {
+ const char *ptr;
+
+ dbg_printf("op load field string sequence\n");
+ ptr = stack_top->u.ptr.ptr;
+ stack_top->u.s.seq_len = *(unsigned long *) ptr;
+ stack_top->u.s.str = *(const char **) (ptr + sizeof(unsigned long));
+ stack_top->type = REG_STRING;
+ if (unlikely(!stack_top->u.s.str)) {
+ dbg_printf("Interpreter warning: loading a NULL sequence.\n");
+ ret = -EINVAL;
+ goto end;
+ }
+ stack_top->u.s.literal_type =
+ ESTACK_STRING_LITERAL_TYPE_NONE;
+ break;
+ }
+ case OBJECT_TYPE_DYNAMIC:
+ /*
+ * Dynamic types in context are looked up
+ * by context get index.
+ */
+ ret = -EINVAL;
+ goto end;
+ case OBJECT_TYPE_SEQUENCE:
+ case OBJECT_TYPE_ARRAY:
+ case OBJECT_TYPE_STRUCT:
+ case OBJECT_TYPE_VARIANT:
+ ERR("Sequences, arrays, struct and variant cannot be loaded (nested types).");
+ ret = -EINVAL;
+ goto end;
+ }
+ return 0;
+
+end:
+ return ret;
+}
+
+static
+int lttng_bytecode_interpret_format_output(struct estack_entry *ax,
+ struct lttng_interpreter_output *output)
+{
+ int ret;
+
+again:
+ switch (ax->type) {
+ case REG_S64:
+ output->type = LTTNG_INTERPRETER_TYPE_S64;
+ output->u.s = ax->u.v;
+ break;
+ case REG_U64:
+ output->type = LTTNG_INTERPRETER_TYPE_U64;
+ output->u.u = (uint64_t) ax->u.v;
+ break;
+ case REG_DOUBLE:
+ output->type = LTTNG_INTERPRETER_TYPE_DOUBLE;
+ output->u.d = ax->u.d;
+ break;
+ case REG_STRING:
+ output->type = LTTNG_INTERPRETER_TYPE_STRING;
+ output->u.str.str = ax->u.s.str;
+ output->u.str.len = ax->u.s.seq_len;
+ break;
+ case REG_PTR:
+ switch (ax->u.ptr.object_type) {
+ case OBJECT_TYPE_S8:
+ case OBJECT_TYPE_S16:
+ case OBJECT_TYPE_S32:
+ case OBJECT_TYPE_S64:
+ case OBJECT_TYPE_U8:
+ case OBJECT_TYPE_U16:
+ case OBJECT_TYPE_U32:
+ case OBJECT_TYPE_U64:
+ case OBJECT_TYPE_DOUBLE:
+ case OBJECT_TYPE_STRING:
+ case OBJECT_TYPE_STRING_SEQUENCE:
+ ret = dynamic_load_field(ax);
+ if (ret)
+ return ret;
+ /* Retry after loading ptr into stack top. */
+ goto again;
+ case OBJECT_TYPE_SEQUENCE:
+ output->type = LTTNG_INTERPRETER_TYPE_SEQUENCE;
+ output->u.sequence.ptr = *(const char **) (ax->u.ptr.ptr + sizeof(unsigned long));
+ output->u.sequence.nr_elem = *(unsigned long *) ax->u.ptr.ptr;
+ output->u.sequence.nested_type = lttng_ust_get_type_sequence(ax->u.ptr.field->type)->elem_type;
+ break;
+ case OBJECT_TYPE_ARRAY:
+ /* Skip count (unsigned long) */
+ output->type = LTTNG_INTERPRETER_TYPE_SEQUENCE;
+ output->u.sequence.ptr = *(const char **) (ax->u.ptr.ptr + sizeof(unsigned long));
+ output->u.sequence.nr_elem = lttng_ust_get_type_array(ax->u.ptr.field->type)->length;
+ output->u.sequence.nested_type = lttng_ust_get_type_array(ax->u.ptr.field->type)->elem_type;
+ break;
+ case OBJECT_TYPE_SIGNED_ENUM:
+ ret = dynamic_load_field(ax);
+ if (ret)
+ return ret;
+ output->type = LTTNG_INTERPRETER_TYPE_SIGNED_ENUM;
+ output->u.s = ax->u.v;
+ break;
+ case OBJECT_TYPE_UNSIGNED_ENUM:
+ ret = dynamic_load_field(ax);
+ if (ret)
+ return ret;
+ output->type = LTTNG_INTERPRETER_TYPE_UNSIGNED_ENUM;
+ output->u.u = ax->u.v;
+ break;
+ case OBJECT_TYPE_STRUCT:
+ case OBJECT_TYPE_VARIANT:
+ default:
+ return -EINVAL;
+ }
+
+ break;
+ case REG_STAR_GLOB_STRING:
+ case REG_UNKNOWN:
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/*
+ * Return LTTNG_UST_BYTECODE_INTERPRETER_OK on success.
+ * Return LTTNG_UST_BYTECODE_INTERPRETER_ERROR on error.
+ *
+ * For FILTER bytecode: expect a struct lttng_ust_bytecode_filter_ctx *
+ * as @ctx argument.
+ * For CAPTURE bytecode: expect a struct lttng_interpreter_output *
+ * as @ctx argument.
+ */
+int lttng_bytecode_interpret(struct lttng_ust_bytecode_runtime *ust_bytecode,
+ const char *interpreter_stack_data,
+ void *caller_ctx)
+{
+ struct bytecode_runtime *bytecode = caa_container_of(ust_bytecode, struct bytecode_runtime, p);
+ struct lttng_ust_ctx *ctx = lttng_ust_rcu_dereference(*ust_bytecode->pctx);
+ void *pc, *next_pc, *start_pc;
+ int ret = -EINVAL, retval = 0;
+ struct estack _stack;
+ struct estack *stack = &_stack;
+ register int64_t ax = 0, bx = 0;
+ register enum entry_type ax_t = REG_UNKNOWN, bx_t = REG_UNKNOWN;
+ register int top = INTERPRETER_STACK_EMPTY;
+#ifndef INTERPRETER_USE_SWITCH
+ static void *dispatch[NR_BYTECODE_OPS] = {
+ [ BYTECODE_OP_UNKNOWN ] = &&LABEL_BYTECODE_OP_UNKNOWN,
+
+ [ BYTECODE_OP_RETURN ] = &&LABEL_BYTECODE_OP_RETURN,
+
+ /* binary */
+ [ BYTECODE_OP_MUL ] = &&LABEL_BYTECODE_OP_MUL,
+ [ BYTECODE_OP_DIV ] = &&LABEL_BYTECODE_OP_DIV,
+ [ BYTECODE_OP_MOD ] = &&LABEL_BYTECODE_OP_MOD,
+ [ BYTECODE_OP_PLUS ] = &&LABEL_BYTECODE_OP_PLUS,
+ [ BYTECODE_OP_MINUS ] = &&LABEL_BYTECODE_OP_MINUS,
+ [ BYTECODE_OP_BIT_RSHIFT ] = &&LABEL_BYTECODE_OP_BIT_RSHIFT,
+ [ BYTECODE_OP_BIT_LSHIFT ] = &&LABEL_BYTECODE_OP_BIT_LSHIFT,
+ [ BYTECODE_OP_BIT_AND ] = &&LABEL_BYTECODE_OP_BIT_AND,
+ [ BYTECODE_OP_BIT_OR ] = &&LABEL_BYTECODE_OP_BIT_OR,
+ [ BYTECODE_OP_BIT_XOR ] = &&LABEL_BYTECODE_OP_BIT_XOR,
+
+ /* binary comparators */
+ [ BYTECODE_OP_EQ ] = &&LABEL_BYTECODE_OP_EQ,
+ [ BYTECODE_OP_NE ] = &&LABEL_BYTECODE_OP_NE,
+ [ BYTECODE_OP_GT ] = &&LABEL_BYTECODE_OP_GT,
+ [ BYTECODE_OP_LT ] = &&LABEL_BYTECODE_OP_LT,
+ [ BYTECODE_OP_GE ] = &&LABEL_BYTECODE_OP_GE,
+ [ BYTECODE_OP_LE ] = &&LABEL_BYTECODE_OP_LE,
+
+ /* string binary comparator */
+ [ BYTECODE_OP_EQ_STRING ] = &&LABEL_BYTECODE_OP_EQ_STRING,
+ [ BYTECODE_OP_NE_STRING ] = &&LABEL_BYTECODE_OP_NE_STRING,
+ [ BYTECODE_OP_GT_STRING ] = &&LABEL_BYTECODE_OP_GT_STRING,
+ [ BYTECODE_OP_LT_STRING ] = &&LABEL_BYTECODE_OP_LT_STRING,
+ [ BYTECODE_OP_GE_STRING ] = &&LABEL_BYTECODE_OP_GE_STRING,
+ [ BYTECODE_OP_LE_STRING ] = &&LABEL_BYTECODE_OP_LE_STRING,
+
+ /* globbing pattern binary comparator */
+ [ BYTECODE_OP_EQ_STAR_GLOB_STRING ] = &&LABEL_BYTECODE_OP_EQ_STAR_GLOB_STRING,
+ [ BYTECODE_OP_NE_STAR_GLOB_STRING ] = &&LABEL_BYTECODE_OP_NE_STAR_GLOB_STRING,
+
+ /* s64 binary comparator */
+ [ BYTECODE_OP_EQ_S64 ] = &&LABEL_BYTECODE_OP_EQ_S64,
+ [ BYTECODE_OP_NE_S64 ] = &&LABEL_BYTECODE_OP_NE_S64,
+ [ BYTECODE_OP_GT_S64 ] = &&LABEL_BYTECODE_OP_GT_S64,
+ [ BYTECODE_OP_LT_S64 ] = &&LABEL_BYTECODE_OP_LT_S64,
+ [ BYTECODE_OP_GE_S64 ] = &&LABEL_BYTECODE_OP_GE_S64,
+ [ BYTECODE_OP_LE_S64 ] = &&LABEL_BYTECODE_OP_LE_S64,
+
+ /* double binary comparator */
+ [ BYTECODE_OP_EQ_DOUBLE ] = &&LABEL_BYTECODE_OP_EQ_DOUBLE,
+ [ BYTECODE_OP_NE_DOUBLE ] = &&LABEL_BYTECODE_OP_NE_DOUBLE,
+ [ BYTECODE_OP_GT_DOUBLE ] = &&LABEL_BYTECODE_OP_GT_DOUBLE,
+ [ BYTECODE_OP_LT_DOUBLE ] = &&LABEL_BYTECODE_OP_LT_DOUBLE,
+ [ BYTECODE_OP_GE_DOUBLE ] = &&LABEL_BYTECODE_OP_GE_DOUBLE,
+ [ BYTECODE_OP_LE_DOUBLE ] = &&LABEL_BYTECODE_OP_LE_DOUBLE,
+
+ /* Mixed S64-double binary comparators */
+ [ BYTECODE_OP_EQ_DOUBLE_S64 ] = &&LABEL_BYTECODE_OP_EQ_DOUBLE_S64,
+ [ BYTECODE_OP_NE_DOUBLE_S64 ] = &&LABEL_BYTECODE_OP_NE_DOUBLE_S64,
+ [ BYTECODE_OP_GT_DOUBLE_S64 ] = &&LABEL_BYTECODE_OP_GT_DOUBLE_S64,
+ [ BYTECODE_OP_LT_DOUBLE_S64 ] = &&LABEL_BYTECODE_OP_LT_DOUBLE_S64,
+ [ BYTECODE_OP_GE_DOUBLE_S64 ] = &&LABEL_BYTECODE_OP_GE_DOUBLE_S64,
+ [ BYTECODE_OP_LE_DOUBLE_S64 ] = &&LABEL_BYTECODE_OP_LE_DOUBLE_S64,
+
+ [ BYTECODE_OP_EQ_S64_DOUBLE ] = &&LABEL_BYTECODE_OP_EQ_S64_DOUBLE,
+ [ BYTECODE_OP_NE_S64_DOUBLE ] = &&LABEL_BYTECODE_OP_NE_S64_DOUBLE,
+ [ BYTECODE_OP_GT_S64_DOUBLE ] = &&LABEL_BYTECODE_OP_GT_S64_DOUBLE,
+ [ BYTECODE_OP_LT_S64_DOUBLE ] = &&LABEL_BYTECODE_OP_LT_S64_DOUBLE,
+ [ BYTECODE_OP_GE_S64_DOUBLE ] = &&LABEL_BYTECODE_OP_GE_S64_DOUBLE,
+ [ BYTECODE_OP_LE_S64_DOUBLE ] = &&LABEL_BYTECODE_OP_LE_S64_DOUBLE,
+
+ /* unary */
+ [ BYTECODE_OP_UNARY_PLUS ] = &&LABEL_BYTECODE_OP_UNARY_PLUS,
+ [ BYTECODE_OP_UNARY_MINUS ] = &&LABEL_BYTECODE_OP_UNARY_MINUS,
+ [ BYTECODE_OP_UNARY_NOT ] = &&LABEL_BYTECODE_OP_UNARY_NOT,
+ [ BYTECODE_OP_UNARY_PLUS_S64 ] = &&LABEL_BYTECODE_OP_UNARY_PLUS_S64,
+ [ BYTECODE_OP_UNARY_MINUS_S64 ] = &&LABEL_BYTECODE_OP_UNARY_MINUS_S64,
+ [ BYTECODE_OP_UNARY_NOT_S64 ] = &&LABEL_BYTECODE_OP_UNARY_NOT_S64,
+ [ BYTECODE_OP_UNARY_PLUS_DOUBLE ] = &&LABEL_BYTECODE_OP_UNARY_PLUS_DOUBLE,
+ [ BYTECODE_OP_UNARY_MINUS_DOUBLE ] = &&LABEL_BYTECODE_OP_UNARY_MINUS_DOUBLE,
+ [ BYTECODE_OP_UNARY_NOT_DOUBLE ] = &&LABEL_BYTECODE_OP_UNARY_NOT_DOUBLE,
+
+ /* logical */
+ [ BYTECODE_OP_AND ] = &&LABEL_BYTECODE_OP_AND,
+ [ BYTECODE_OP_OR ] = &&LABEL_BYTECODE_OP_OR,
+
+ /* load field ref */
+ [ BYTECODE_OP_LOAD_FIELD_REF ] = &&LABEL_BYTECODE_OP_LOAD_FIELD_REF,
+ [ BYTECODE_OP_LOAD_FIELD_REF_STRING ] = &&LABEL_BYTECODE_OP_LOAD_FIELD_REF_STRING,
+ [ BYTECODE_OP_LOAD_FIELD_REF_SEQUENCE ] = &&LABEL_BYTECODE_OP_LOAD_FIELD_REF_SEQUENCE,
+ [ BYTECODE_OP_LOAD_FIELD_REF_S64 ] = &&LABEL_BYTECODE_OP_LOAD_FIELD_REF_S64,
+ [ BYTECODE_OP_LOAD_FIELD_REF_DOUBLE ] = &&LABEL_BYTECODE_OP_LOAD_FIELD_REF_DOUBLE,
+
+ /* load from immediate operand */
+ [ BYTECODE_OP_LOAD_STRING ] = &&LABEL_BYTECODE_OP_LOAD_STRING,
+ [ BYTECODE_OP_LOAD_STAR_GLOB_STRING ] = &&LABEL_BYTECODE_OP_LOAD_STAR_GLOB_STRING,
+ [ BYTECODE_OP_LOAD_S64 ] = &&LABEL_BYTECODE_OP_LOAD_S64,
+ [ BYTECODE_OP_LOAD_DOUBLE ] = &&LABEL_BYTECODE_OP_LOAD_DOUBLE,
+
+ /* cast */
+ [ BYTECODE_OP_CAST_TO_S64 ] = &&LABEL_BYTECODE_OP_CAST_TO_S64,
+ [ BYTECODE_OP_CAST_DOUBLE_TO_S64 ] = &&LABEL_BYTECODE_OP_CAST_DOUBLE_TO_S64,
+ [ BYTECODE_OP_CAST_NOP ] = &&LABEL_BYTECODE_OP_CAST_NOP,
+
+ /* get context ref */
+ [ BYTECODE_OP_GET_CONTEXT_REF ] = &&LABEL_BYTECODE_OP_GET_CONTEXT_REF,
+ [ BYTECODE_OP_GET_CONTEXT_REF_STRING ] = &&LABEL_BYTECODE_OP_GET_CONTEXT_REF_STRING,
+ [ BYTECODE_OP_GET_CONTEXT_REF_S64 ] = &&LABEL_BYTECODE_OP_GET_CONTEXT_REF_S64,
+ [ BYTECODE_OP_GET_CONTEXT_REF_DOUBLE ] = &&LABEL_BYTECODE_OP_GET_CONTEXT_REF_DOUBLE,
+
+ /* Instructions for recursive traversal through composed types. */
+ [ BYTECODE_OP_GET_CONTEXT_ROOT ] = &&LABEL_BYTECODE_OP_GET_CONTEXT_ROOT,
+ [ BYTECODE_OP_GET_APP_CONTEXT_ROOT ] = &&LABEL_BYTECODE_OP_GET_APP_CONTEXT_ROOT,
+ [ BYTECODE_OP_GET_PAYLOAD_ROOT ] = &&LABEL_BYTECODE_OP_GET_PAYLOAD_ROOT,
+
+ [ BYTECODE_OP_GET_SYMBOL ] = &&LABEL_BYTECODE_OP_GET_SYMBOL,
+ [ BYTECODE_OP_GET_SYMBOL_FIELD ] = &&LABEL_BYTECODE_OP_GET_SYMBOL_FIELD,
+ [ BYTECODE_OP_GET_INDEX_U16 ] = &&LABEL_BYTECODE_OP_GET_INDEX_U16,
+ [ BYTECODE_OP_GET_INDEX_U64 ] = &&LABEL_BYTECODE_OP_GET_INDEX_U64,
+
+ [ BYTECODE_OP_LOAD_FIELD ] = &&LABEL_BYTECODE_OP_LOAD_FIELD,
+ [ BYTECODE_OP_LOAD_FIELD_S8 ] = &&LABEL_BYTECODE_OP_LOAD_FIELD_S8,
+ [ BYTECODE_OP_LOAD_FIELD_S16 ] = &&LABEL_BYTECODE_OP_LOAD_FIELD_S16,
+ [ BYTECODE_OP_LOAD_FIELD_S32 ] = &&LABEL_BYTECODE_OP_LOAD_FIELD_S32,
+ [ BYTECODE_OP_LOAD_FIELD_S64 ] = &&LABEL_BYTECODE_OP_LOAD_FIELD_S64,
+ [ BYTECODE_OP_LOAD_FIELD_U8 ] = &&LABEL_BYTECODE_OP_LOAD_FIELD_U8,
+ [ BYTECODE_OP_LOAD_FIELD_U16 ] = &&LABEL_BYTECODE_OP_LOAD_FIELD_U16,
+ [ BYTECODE_OP_LOAD_FIELD_U32 ] = &&LABEL_BYTECODE_OP_LOAD_FIELD_U32,
+ [ BYTECODE_OP_LOAD_FIELD_U64 ] = &&LABEL_BYTECODE_OP_LOAD_FIELD_U64,
+ [ BYTECODE_OP_LOAD_FIELD_STRING ] = &&LABEL_BYTECODE_OP_LOAD_FIELD_STRING,
+ [ BYTECODE_OP_LOAD_FIELD_SEQUENCE ] = &&LABEL_BYTECODE_OP_LOAD_FIELD_SEQUENCE,
+ [ BYTECODE_OP_LOAD_FIELD_DOUBLE ] = &&LABEL_BYTECODE_OP_LOAD_FIELD_DOUBLE,
+
+ [ BYTECODE_OP_UNARY_BIT_NOT ] = &&LABEL_BYTECODE_OP_UNARY_BIT_NOT,
+
+ [ BYTECODE_OP_RETURN_S64 ] = &&LABEL_BYTECODE_OP_RETURN_S64,
+ };
+#endif /* #ifndef INTERPRETER_USE_SWITCH */
+
+ START_OP
+
+ OP(BYTECODE_OP_UNKNOWN):
+ OP(BYTECODE_OP_LOAD_FIELD_REF):
+#ifdef INTERPRETER_USE_SWITCH
+ default:
+#endif /* INTERPRETER_USE_SWITCH */
+ ERR("unknown bytecode op %u",
+ (unsigned int) *(bytecode_opcode_t *) pc);
+ ret = -EINVAL;
+ goto end;
+
+ OP(BYTECODE_OP_RETURN):
+ /* LTTNG_UST_BYTECODE_INTERPRETER_ERROR or LTTNG_UST_BYTECODE_INTERPRETER_OK */
+ /* Handle dynamic typing. */
+ switch (estack_ax_t) {
+ case REG_S64:
+ case REG_U64:
+ retval = !!estack_ax_v;
+ break;
+ case REG_DOUBLE:
+ case REG_STRING:
+ case REG_PTR:
+ if (ust_bytecode->type != LTTNG_UST_BYTECODE_TYPE_CAPTURE) {
+ ret = -EINVAL;
+ goto end;
+ }
+ retval = 0;
+ break;
+ case REG_STAR_GLOB_STRING:
+ case REG_UNKNOWN:
+ default:
+ ret = -EINVAL;
+ goto end;
+ }
+ ret = 0;
+ goto end;
+
+ OP(BYTECODE_OP_RETURN_S64):
+ /* LTTNG_UST_BYTECODE_INTERPRETER_ERROR or LTTNG_UST_BYTECODE_INTERPRETER_OK */
+ retval = !!estack_ax_v;
+ ret = 0;
+ goto end;
+
+ /* binary */
+ OP(BYTECODE_OP_MUL):
+ OP(BYTECODE_OP_DIV):
+ OP(BYTECODE_OP_MOD):
+ OP(BYTECODE_OP_PLUS):
+ OP(BYTECODE_OP_MINUS):
+ ERR("unsupported bytecode op %u",
+ (unsigned int) *(bytecode_opcode_t *) pc);
+ ret = -EINVAL;
+ goto end;
+
+ OP(BYTECODE_OP_EQ):
+ {
+ /* Dynamic typing. */
+ switch (estack_ax_t) {
+ case REG_S64: /* Fall-through */
+ case REG_U64:
+ switch (estack_bx_t) {
+ case REG_S64: /* Fall-through */
+ case REG_U64:
+ JUMP_TO(BYTECODE_OP_EQ_S64);
+ case REG_DOUBLE:
+ JUMP_TO(BYTECODE_OP_EQ_DOUBLE_S64);
+ case REG_STRING: /* Fall-through */
+ case REG_STAR_GLOB_STRING:
+ ret = -EINVAL;
+ goto end;
+ default:
+ ERR("Unknown interpreter register type (%d)",
+ (int) estack_bx_t);
+ ret = -EINVAL;
+ goto end;
+ }
+ break;
+ case REG_DOUBLE:
+ switch (estack_bx_t) {
+ case REG_S64: /* Fall-through */
+ case REG_U64:
+ JUMP_TO(BYTECODE_OP_EQ_S64_DOUBLE);
+ case REG_DOUBLE:
+ JUMP_TO(BYTECODE_OP_EQ_DOUBLE);
+ case REG_STRING: /* Fall-through */
+ case REG_STAR_GLOB_STRING:
+ ret = -EINVAL;
+ goto end;
+ default:
+ ERR("Unknown interpreter register type (%d)",
+ (int) estack_bx_t);
+ ret = -EINVAL;
+ goto end;
+ }
+ break;
+ case REG_STRING:
+ switch (estack_bx_t) {
+ case REG_S64: /* Fall-through */
+ case REG_U64: /* Fall-through */
+ case REG_DOUBLE:
+ ret = -EINVAL;
+ goto end;
+ case REG_STRING:
+ JUMP_TO(BYTECODE_OP_EQ_STRING);
+ case REG_STAR_GLOB_STRING:
+ JUMP_TO(BYTECODE_OP_EQ_STAR_GLOB_STRING);
+ default:
+ ERR("Unknown interpreter register type (%d)",
+ (int) estack_bx_t);
+ ret = -EINVAL;
+ goto end;
+ }
+ break;
+ case REG_STAR_GLOB_STRING:
+ switch (estack_bx_t) {
+ case REG_S64: /* Fall-through */
+ case REG_U64: /* Fall-through */
+ case REG_DOUBLE:
+ ret = -EINVAL;
+ goto end;
+ case REG_STRING:
+ JUMP_TO(BYTECODE_OP_EQ_STAR_GLOB_STRING);
+ case REG_STAR_GLOB_STRING:
+ ret = -EINVAL;
+ goto end;
+ default:
+ ERR("Unknown interpreter register type (%d)",
+ (int) estack_bx_t);
+ ret = -EINVAL;
+ goto end;
+ }
+ break;
+ default:
+ ERR("Unknown interpreter register type (%d)",
+ (int) estack_ax_t);
+ ret = -EINVAL;
+ goto end;
+ }
+ }
+ OP(BYTECODE_OP_NE):
+ {
+ /* Dynamic typing. */
+ switch (estack_ax_t) {
+ case REG_S64: /* Fall-through */
+ case REG_U64:
+ switch (estack_bx_t) {
+ case REG_S64: /* Fall-through */
+ case REG_U64:
+ JUMP_TO(BYTECODE_OP_NE_S64);
+ case REG_DOUBLE:
+ JUMP_TO(BYTECODE_OP_NE_DOUBLE_S64);
+ case REG_STRING: /* Fall-through */
+ case REG_STAR_GLOB_STRING:
+ ret = -EINVAL;
+ goto end;
+ default:
+ ERR("Unknown interpreter register type (%d)",
+ (int) estack_bx_t);
+ ret = -EINVAL;
+ goto end;
+ }
+ break;
+ case REG_DOUBLE:
+ switch (estack_bx_t) {
+ case REG_S64: /* Fall-through */
+ case REG_U64:
+ JUMP_TO(BYTECODE_OP_NE_S64_DOUBLE);
+ case REG_DOUBLE:
+ JUMP_TO(BYTECODE_OP_NE_DOUBLE);
+ case REG_STRING: /* Fall-through */
+ case REG_STAR_GLOB_STRING:
+ ret = -EINVAL;
+ goto end;
+ default:
+ ERR("Unknown interpreter register type (%d)",
+ (int) estack_bx_t);
+ ret = -EINVAL;
+ goto end;
+ }
+ break;
+ case REG_STRING:
+ switch (estack_bx_t) {
+ case REG_S64: /* Fall-through */
+ case REG_U64:
+ case REG_DOUBLE:
+ ret = -EINVAL;
+ goto end;
+ case REG_STRING:
+ JUMP_TO(BYTECODE_OP_NE_STRING);
+ case REG_STAR_GLOB_STRING:
+ JUMP_TO(BYTECODE_OP_NE_STAR_GLOB_STRING);
+ default:
+ ERR("Unknown interpreter register type (%d)",
+ (int) estack_bx_t);
+ ret = -EINVAL;
+ goto end;
+ }
+ break;
+ case REG_STAR_GLOB_STRING:
+ switch (estack_bx_t) {
+ case REG_S64: /* Fall-through */
+ case REG_U64:
+ case REG_DOUBLE:
+ ret = -EINVAL;
+ goto end;
+ case REG_STRING:
+ JUMP_TO(BYTECODE_OP_NE_STAR_GLOB_STRING);
+ case REG_STAR_GLOB_STRING:
+ ret = -EINVAL;
+ goto end;
+ default:
+ ERR("Unknown interpreter register type (%d)",
+ (int) estack_bx_t);
+ ret = -EINVAL;
+ goto end;
+ }
+ break;
+ default:
+ ERR("Unknown interpreter register type (%d)",
+ (int) estack_ax_t);
+ ret = -EINVAL;
+ goto end;
+ }
+ }
+ OP(BYTECODE_OP_GT):
+ {
+ /* Dynamic typing. */
+ switch (estack_ax_t) {
+ case REG_S64: /* Fall-through */
+ case REG_U64:
+ switch (estack_bx_t) {
+ case REG_S64: /* Fall-through */
+ case REG_U64:
+ JUMP_TO(BYTECODE_OP_GT_S64);
+ case REG_DOUBLE:
+ JUMP_TO(BYTECODE_OP_GT_DOUBLE_S64);
+ case REG_STRING: /* Fall-through */
+ case REG_STAR_GLOB_STRING:
+ ret = -EINVAL;
+ goto end;
+ default:
+ ERR("Unknown interpreter register type (%d)",
+ (int) estack_bx_t);
+ ret = -EINVAL;
+ goto end;
+ }
+ break;
+ case REG_DOUBLE:
+ switch (estack_bx_t) {
+ case REG_S64: /* Fall-through */
+ case REG_U64:
+ JUMP_TO(BYTECODE_OP_GT_S64_DOUBLE);
+ case REG_DOUBLE:
+ JUMP_TO(BYTECODE_OP_GT_DOUBLE);
+ case REG_STRING: /* Fall-through */
+ case REG_STAR_GLOB_STRING:
+ ret = -EINVAL;
+ goto end;
+ default:
+ ERR("Unknown interpreter register type (%d)",
+ (int) estack_bx_t);
+ ret = -EINVAL;
+ goto end;
+ }
+ break;
+ case REG_STRING:
+ switch (estack_bx_t) {
+ case REG_S64: /* Fall-through */
+ case REG_U64: /* Fall-through */
+ case REG_DOUBLE: /* Fall-through */
+ case REG_STAR_GLOB_STRING:
+ ret = -EINVAL;
+ goto end;
+ case REG_STRING:
+ JUMP_TO(BYTECODE_OP_GT_STRING);
+ default:
+ ERR("Unknown interpreter register type (%d)",
+ (int) estack_bx_t);
+ ret = -EINVAL;
+ goto end;
+ }
+ break;
+ default:
+ ERR("Unknown interpreter register type (%d)",
+ (int) estack_ax_t);
+ ret = -EINVAL;
+ goto end;
+ }
+ }
+ OP(BYTECODE_OP_LT):
+ {
+ /* Dynamic typing. */
+ switch (estack_ax_t) {
+ case REG_S64: /* Fall-through */
+ case REG_U64:
+ switch (estack_bx_t) {
+ case REG_S64: /* Fall-through */
+ case REG_U64:
+ JUMP_TO(BYTECODE_OP_LT_S64);
+ case REG_DOUBLE:
+ JUMP_TO(BYTECODE_OP_LT_DOUBLE_S64);
+ case REG_STRING: /* Fall-through */
+ case REG_STAR_GLOB_STRING:
+ ret = -EINVAL;
+ goto end;
+ default:
+ ERR("Unknown interpreter register type (%d)",
+ (int) estack_bx_t);
+ ret = -EINVAL;
+ goto end;
+ }
+ break;
+ case REG_DOUBLE:
+ switch (estack_bx_t) {
+ case REG_S64: /* Fall-through */
+ case REG_U64:
+ JUMP_TO(BYTECODE_OP_LT_S64_DOUBLE);
+ case REG_DOUBLE:
+ JUMP_TO(BYTECODE_OP_LT_DOUBLE);
+ case REG_STRING: /* Fall-through */
+ case REG_STAR_GLOB_STRING:
+ ret = -EINVAL;
+ goto end;
+ default:
+ ERR("Unknown interpreter register type (%d)",
+ (int) estack_bx_t);
+ ret = -EINVAL;
+ goto end;
+ }
+ break;
+ case REG_STRING:
+ switch (estack_bx_t) {
+ case REG_S64: /* Fall-through */
+ case REG_U64: /* Fall-through */
+ case REG_DOUBLE: /* Fall-through */
+ case REG_STAR_GLOB_STRING:
+ ret = -EINVAL;
+ goto end;
+ case REG_STRING:
+ JUMP_TO(BYTECODE_OP_LT_STRING);
+ default:
+ ERR("Unknown interpreter register type (%d)",
+ (int) estack_bx_t);
+ ret = -EINVAL;
+ goto end;
+ }
+ break;
+ default:
+ ERR("Unknown interpreter register type (%d)",
+ (int) estack_ax_t);
+ ret = -EINVAL;
+ goto end;
+ }
+ }
+ OP(BYTECODE_OP_GE):
+ {
+ /* Dynamic typing. */
+ switch (estack_ax_t) {
+ case REG_S64: /* Fall-through */
+ case REG_U64:
+ switch (estack_bx_t) {
+ case REG_S64: /* Fall-through */
+ case REG_U64:
+ JUMP_TO(BYTECODE_OP_GE_S64);
+ case REG_DOUBLE:
+ JUMP_TO(BYTECODE_OP_GE_DOUBLE_S64);
+ case REG_STRING: /* Fall-through */
+ case REG_STAR_GLOB_STRING:
+ ret = -EINVAL;
+ goto end;
+ default:
+ ERR("Unknown interpreter register type (%d)",
+ (int) estack_bx_t);
+ ret = -EINVAL;
+ goto end;
+ }
+ break;
+ case REG_DOUBLE:
+ switch (estack_bx_t) {
+ case REG_S64: /* Fall-through */
+ case REG_U64:
+ JUMP_TO(BYTECODE_OP_GE_S64_DOUBLE);
+ case REG_DOUBLE:
+ JUMP_TO(BYTECODE_OP_GE_DOUBLE);
+ case REG_STRING: /* Fall-through */
+ case REG_STAR_GLOB_STRING:
+ ret = -EINVAL;
+ goto end;
+ default:
+ ERR("Unknown interpreter register type (%d)",
+ (int) estack_bx_t);
+ ret = -EINVAL;
+ goto end;
+ }
+ break;
+ case REG_STRING:
+ switch (estack_bx_t) {
+ case REG_S64: /* Fall-through */
+ case REG_U64: /* Fall-through */
+ case REG_DOUBLE: /* Fall-through */
+ case REG_STAR_GLOB_STRING:
+ ret = -EINVAL;
+ goto end;
+ case REG_STRING:
+ JUMP_TO(BYTECODE_OP_GE_STRING);
+ default:
+ ERR("Unknown interpreter register type (%d)",
+ (int) estack_bx_t);
+ ret = -EINVAL;
+ goto end;
+ }
+ break;
+ default:
+ ERR("Unknown interpreter register type (%d)",
+ (int) estack_ax_t);
+ ret = -EINVAL;
+ goto end;
+ }
+ }
+ OP(BYTECODE_OP_LE):
+ {
+ /* Dynamic typing. */
+ switch (estack_ax_t) {
+ case REG_S64: /* Fall-through */
+ case REG_U64:
+ switch (estack_bx_t) {
+ case REG_S64: /* Fall-through */
+ case REG_U64:
+ JUMP_TO(BYTECODE_OP_LE_S64);
+ case REG_DOUBLE:
+ JUMP_TO(BYTECODE_OP_LE_DOUBLE_S64);
+ case REG_STRING: /* Fall-through */
+ case REG_STAR_GLOB_STRING:
+ ret = -EINVAL;
+ goto end;
+ default:
+ ERR("Unknown interpreter register type (%d)",
+ (int) estack_bx_t);
+ ret = -EINVAL;
+ goto end;
+ }
+ break;
+ case REG_DOUBLE:
+ switch (estack_bx_t) {
+ case REG_S64: /* Fall-through */
+ case REG_U64:
+ JUMP_TO(BYTECODE_OP_LE_S64_DOUBLE);
+ case REG_DOUBLE:
+ JUMP_TO(BYTECODE_OP_LE_DOUBLE);
+ case REG_STRING: /* Fall-through */
+ case REG_STAR_GLOB_STRING:
+ ret = -EINVAL;
+ goto end;
+ default:
+ ERR("Unknown interpreter register type (%d)",
+ (int) estack_bx_t);
+ ret = -EINVAL;
+ goto end;
+ }
+ break;
+ case REG_STRING:
+ switch (estack_bx_t) {
+ case REG_S64: /* Fall-through */
+ case REG_U64: /* Fall-through */
+ case REG_DOUBLE: /* Fall-through */
+ case REG_STAR_GLOB_STRING:
+ ret = -EINVAL;
+ goto end;
+ case REG_STRING:
+ JUMP_TO(BYTECODE_OP_LE_STRING);
+ default:
+ ERR("Unknown interpreter register type (%d)",
+ (int) estack_bx_t);
+ ret = -EINVAL;
+ goto end;
+ }
+ break;
+ default:
+ ERR("Unknown interpreter register type (%d)",
+ (int) estack_ax_t);
+ ret = -EINVAL;
+ goto end;
+ }
+ }
+
+ OP(BYTECODE_OP_EQ_STRING):
+ {
+ int res;
+
+ res = (stack_strcmp(stack, top, "==") == 0);
+ estack_pop(stack, top, ax, bx, ax_t, bx_t);
+ estack_ax_v = res;
+ estack_ax_t = REG_S64;
+ next_pc += sizeof(struct binary_op);
+ PO;
+ }
+ OP(BYTECODE_OP_NE_STRING):
+ {
+ int res;
+
+ res = (stack_strcmp(stack, top, "!=") != 0);
+ estack_pop(stack, top, ax, bx, ax_t, bx_t);
+ estack_ax_v = res;
+ estack_ax_t = REG_S64;
+ next_pc += sizeof(struct binary_op);
+ PO;
+ }
+ OP(BYTECODE_OP_GT_STRING):
+ {
+ int res;
+
+ res = (stack_strcmp(stack, top, ">") > 0);
+ estack_pop(stack, top, ax, bx, ax_t, bx_t);
+ estack_ax_v = res;
+ estack_ax_t = REG_S64;
+ next_pc += sizeof(struct binary_op);
+ PO;
+ }
+ OP(BYTECODE_OP_LT_STRING):
+ {
+ int res;
+
+ res = (stack_strcmp(stack, top, "<") < 0);
+ estack_pop(stack, top, ax, bx, ax_t, bx_t);
+ estack_ax_v = res;
+ estack_ax_t = REG_S64;
+ next_pc += sizeof(struct binary_op);
+ PO;
+ }
+ OP(BYTECODE_OP_GE_STRING):
+ {
+ int res;
+
+ res = (stack_strcmp(stack, top, ">=") >= 0);
+ estack_pop(stack, top, ax, bx, ax_t, bx_t);
+ estack_ax_v = res;
+ estack_ax_t = REG_S64;
+ next_pc += sizeof(struct binary_op);
+ PO;
+ }
+ OP(BYTECODE_OP_LE_STRING):
+ {
+ int res;
+
+ res = (stack_strcmp(stack, top, "<=") <= 0);
+ estack_pop(stack, top, ax, bx, ax_t, bx_t);
+ estack_ax_v = res;
+ estack_ax_t = REG_S64;
+ next_pc += sizeof(struct binary_op);
+ PO;
+ }
+
+ OP(BYTECODE_OP_EQ_STAR_GLOB_STRING):
+ {
+ int res;
+
+ res = (stack_star_glob_match(stack, top, "==") == 0);
+ estack_pop(stack, top, ax, bx, ax_t, bx_t);
+ estack_ax_v = res;
+ estack_ax_t = REG_S64;
+ next_pc += sizeof(struct binary_op);
+ PO;
+ }
+ OP(BYTECODE_OP_NE_STAR_GLOB_STRING):
+ {
+ int res;
+
+ res = (stack_star_glob_match(stack, top, "!=") != 0);
+ estack_pop(stack, top, ax, bx, ax_t, bx_t);
+ estack_ax_v = res;
+ estack_ax_t = REG_S64;
+ next_pc += sizeof(struct binary_op);
+ PO;
+ }
+
+ OP(BYTECODE_OP_EQ_S64):
+ {
+ int res;
+
+ res = (estack_bx_v == estack_ax_v);
+ estack_pop(stack, top, ax, bx, ax_t, bx_t);
+ estack_ax_v = res;
+ estack_ax_t = REG_S64;
+ next_pc += sizeof(struct binary_op);
+ PO;
+ }
+ OP(BYTECODE_OP_NE_S64):
+ {
+ int res;
+
+ res = (estack_bx_v != estack_ax_v);
+ estack_pop(stack, top, ax, bx, ax_t, bx_t);
+ estack_ax_v = res;
+ estack_ax_t = REG_S64;
+ next_pc += sizeof(struct binary_op);
+ PO;
+ }
+ OP(BYTECODE_OP_GT_S64):
+ {
+ int res;
+
+ res = (estack_bx_v > estack_ax_v);
+ estack_pop(stack, top, ax, bx, ax_t, bx_t);
+ estack_ax_v = res;
+ estack_ax_t = REG_S64;
+ next_pc += sizeof(struct binary_op);
+ PO;
+ }
+ OP(BYTECODE_OP_LT_S64):
+ {
+ int res;
+
+ res = (estack_bx_v < estack_ax_v);
+ estack_pop(stack, top, ax, bx, ax_t, bx_t);
+ estack_ax_v = res;
+ estack_ax_t = REG_S64;
+ next_pc += sizeof(struct binary_op);
+ PO;
+ }
+ OP(BYTECODE_OP_GE_S64):
+ {
+ int res;
+
+ res = (estack_bx_v >= estack_ax_v);
+ estack_pop(stack, top, ax, bx, ax_t, bx_t);
+ estack_ax_v = res;
+ estack_ax_t = REG_S64;
+ next_pc += sizeof(struct binary_op);
+ PO;
+ }
+ OP(BYTECODE_OP_LE_S64):
+ {
+ int res;
+
+ res = (estack_bx_v <= estack_ax_v);
+ estack_pop(stack, top, ax, bx, ax_t, bx_t);
+ estack_ax_v = res;
+ estack_ax_t = REG_S64;
+ next_pc += sizeof(struct binary_op);
+ PO;
+ }
+
+ OP(BYTECODE_OP_EQ_DOUBLE):
+ {
+ int res;
+
+ res = (estack_bx(stack, top)->u.d == estack_ax(stack, top)->u.d);
+ estack_pop(stack, top, ax, bx, ax_t, bx_t);
+ estack_ax_v = res;
+ estack_ax_t = REG_S64;
+ next_pc += sizeof(struct binary_op);
+ PO;
+ }
+ OP(BYTECODE_OP_NE_DOUBLE):
+ {
+ int res;
+
+ res = (estack_bx(stack, top)->u.d != estack_ax(stack, top)->u.d);
+ estack_pop(stack, top, ax, bx, ax_t, bx_t);
+ estack_ax_v = res;
+ estack_ax_t = REG_S64;
+ next_pc += sizeof(struct binary_op);
+ PO;
+ }
+ OP(BYTECODE_OP_GT_DOUBLE):
+ {
+ int res;
+
+ res = (estack_bx(stack, top)->u.d > estack_ax(stack, top)->u.d);
+ estack_pop(stack, top, ax, bx, ax_t, bx_t);
+ estack_ax_v = res;
+ estack_ax_t = REG_S64;
+ next_pc += sizeof(struct binary_op);
+ PO;
+ }
+ OP(BYTECODE_OP_LT_DOUBLE):
+ {
+ int res;
+
+ res = (estack_bx(stack, top)->u.d < estack_ax(stack, top)->u.d);
+ estack_pop(stack, top, ax, bx, ax_t, bx_t);
+ estack_ax_v = res;
+ estack_ax_t = REG_S64;
+ next_pc += sizeof(struct binary_op);
+ PO;
+ }
+ OP(BYTECODE_OP_GE_DOUBLE):
+ {
+ int res;
+
+ res = (estack_bx(stack, top)->u.d >= estack_ax(stack, top)->u.d);
+ estack_pop(stack, top, ax, bx, ax_t, bx_t);
+ estack_ax_v = res;
+ estack_ax_t = REG_S64;
+ next_pc += sizeof(struct binary_op);
+ PO;
+ }
+ OP(BYTECODE_OP_LE_DOUBLE):
+ {
+ int res;
+
+ res = (estack_bx(stack, top)->u.d <= estack_ax(stack, top)->u.d);
+ estack_pop(stack, top, ax, bx, ax_t, bx_t);
+ estack_ax_v = res;
+ estack_ax_t = REG_S64;
+ next_pc += sizeof(struct binary_op);
+ PO;
+ }
+
+ /* Mixed S64-double binary comparators */
+ OP(BYTECODE_OP_EQ_DOUBLE_S64):
+ {
+ int res;
+
+ res = (estack_bx(stack, top)->u.d == estack_ax_v);
+ estack_pop(stack, top, ax, bx, ax_t, bx_t);
+ estack_ax_v = res;
+ estack_ax_t = REG_S64;
+ next_pc += sizeof(struct binary_op);
+ PO;
+ }
+ OP(BYTECODE_OP_NE_DOUBLE_S64):
+ {
+ int res;
+
+ res = (estack_bx(stack, top)->u.d != estack_ax_v);
+ estack_pop(stack, top, ax, bx, ax_t, bx_t);
+ estack_ax_v = res;
+ estack_ax_t = REG_S64;
+ next_pc += sizeof(struct binary_op);
+ PO;
+ }
+ OP(BYTECODE_OP_GT_DOUBLE_S64):
+ {
+ int res;
+
+ res = (estack_bx(stack, top)->u.d > estack_ax_v);
+ estack_pop(stack, top, ax, bx, ax_t, bx_t);
+ estack_ax_v = res;
+ estack_ax_t = REG_S64;
+ next_pc += sizeof(struct binary_op);
+ PO;
+ }
+ OP(BYTECODE_OP_LT_DOUBLE_S64):
+ {
+ int res;
+
+ res = (estack_bx(stack, top)->u.d < estack_ax_v);
+ estack_pop(stack, top, ax, bx, ax_t, bx_t);
+ estack_ax_v = res;
+ estack_ax_t = REG_S64;
+ next_pc += sizeof(struct binary_op);
+ PO;
+ }
+ OP(BYTECODE_OP_GE_DOUBLE_S64):
+ {
+ int res;
+
+ res = (estack_bx(stack, top)->u.d >= estack_ax_v);
+ estack_pop(stack, top, ax, bx, ax_t, bx_t);
+ estack_ax_v = res;
+ estack_ax_t = REG_S64;
+ next_pc += sizeof(struct binary_op);
+ PO;
+ }
+ OP(BYTECODE_OP_LE_DOUBLE_S64):
+ {
+ int res;
+
+ res = (estack_bx(stack, top)->u.d <= estack_ax_v);
+ estack_pop(stack, top, ax, bx, ax_t, bx_t);
+ estack_ax_v = res;
+ estack_ax_t = REG_S64;
+ next_pc += sizeof(struct binary_op);
+ PO;
+ }
+
+ OP(BYTECODE_OP_EQ_S64_DOUBLE):
+ {
+ int res;
+
+ res = (estack_bx_v == estack_ax(stack, top)->u.d);
+ estack_pop(stack, top, ax, bx, ax_t, bx_t);
+ estack_ax_v = res;
+ estack_ax_t = REG_S64;
+ next_pc += sizeof(struct binary_op);
+ PO;
+ }
+ OP(BYTECODE_OP_NE_S64_DOUBLE):
+ {
+ int res;
+
+ res = (estack_bx_v != estack_ax(stack, top)->u.d);
+ estack_pop(stack, top, ax, bx, ax_t, bx_t);
+ estack_ax_v = res;
+ estack_ax_t = REG_S64;
+ next_pc += sizeof(struct binary_op);
+ PO;
+ }
+ OP(BYTECODE_OP_GT_S64_DOUBLE):
+ {
+ int res;
+
+ res = (estack_bx_v > estack_ax(stack, top)->u.d);
+ estack_pop(stack, top, ax, bx, ax_t, bx_t);
+ estack_ax_v = res;
+ estack_ax_t = REG_S64;
+ next_pc += sizeof(struct binary_op);
+ PO;
+ }
+ OP(BYTECODE_OP_LT_S64_DOUBLE):
+ {
+ int res;
+
+ res = (estack_bx_v < estack_ax(stack, top)->u.d);
+ estack_pop(stack, top, ax, bx, ax_t, bx_t);
+ estack_ax_v = res;
+ estack_ax_t = REG_S64;
+ next_pc += sizeof(struct binary_op);
+ PO;
+ }
+ OP(BYTECODE_OP_GE_S64_DOUBLE):
+ {
+ int res;
+
+ res = (estack_bx_v >= estack_ax(stack, top)->u.d);
+ estack_pop(stack, top, ax, bx, ax_t, bx_t);
+ estack_ax_v = res;
+ estack_ax_t = REG_S64;
+ next_pc += sizeof(struct binary_op);
+ PO;
+ }
+ OP(BYTECODE_OP_LE_S64_DOUBLE):
+ {
+ int res;
+
+ res = (estack_bx_v <= estack_ax(stack, top)->u.d);
+ estack_pop(stack, top, ax, bx, ax_t, bx_t);
+ estack_ax_v = res;
+ estack_ax_t = REG_S64;
+ next_pc += sizeof(struct binary_op);
+ PO;
+ }
+ OP(BYTECODE_OP_BIT_RSHIFT):
+ {
+ int64_t res;
+
+ if (!IS_INTEGER_REGISTER(estack_ax_t) || !IS_INTEGER_REGISTER(estack_bx_t)) {
+ ret = -EINVAL;
+ goto end;
+ }
+
+ /* Catch undefined behavior. */
+ if (caa_unlikely(estack_ax_v < 0 || estack_ax_v >= 64)) {
+ ret = -EINVAL;
+ goto end;
+ }
+ res = ((uint64_t) estack_bx_v >> (uint32_t) estack_ax_v);
+ estack_pop(stack, top, ax, bx, ax_t, bx_t);
+ estack_ax_v = res;
+ estack_ax_t = REG_U64;
+ next_pc += sizeof(struct binary_op);
+ PO;
+ }
+ OP(BYTECODE_OP_BIT_LSHIFT):
+ {
+ int64_t res;
+
+ if (!IS_INTEGER_REGISTER(estack_ax_t) || !IS_INTEGER_REGISTER(estack_bx_t)) {
+ ret = -EINVAL;
+ goto end;
+ }
+
+ /* Catch undefined behavior. */
+ if (caa_unlikely(estack_ax_v < 0 || estack_ax_v >= 64)) {
+ ret = -EINVAL;
+ goto end;
+ }
+ res = ((uint64_t) estack_bx_v << (uint32_t) estack_ax_v);
+ estack_pop(stack, top, ax, bx, ax_t, bx_t);
+ estack_ax_v = res;
+ estack_ax_t = REG_U64;
+ next_pc += sizeof(struct binary_op);
+ PO;
+ }
+ OP(BYTECODE_OP_BIT_AND):
+ {
+ int64_t res;
+
+ if (!IS_INTEGER_REGISTER(estack_ax_t) || !IS_INTEGER_REGISTER(estack_bx_t)) {
+ ret = -EINVAL;
+ goto end;
+ }
+
+ res = ((uint64_t) estack_bx_v & (uint64_t) estack_ax_v);
+ estack_pop(stack, top, ax, bx, ax_t, bx_t);
+ estack_ax_v = res;
+ estack_ax_t = REG_U64;
+ next_pc += sizeof(struct binary_op);
+ PO;
+ }
+ OP(BYTECODE_OP_BIT_OR):
+ {
+ int64_t res;
+
+ if (!IS_INTEGER_REGISTER(estack_ax_t) || !IS_INTEGER_REGISTER(estack_bx_t)) {
+ ret = -EINVAL;
+ goto end;
+ }
+
+ res = ((uint64_t) estack_bx_v | (uint64_t) estack_ax_v);
+ estack_pop(stack, top, ax, bx, ax_t, bx_t);
+ estack_ax_v = res;
+ estack_ax_t = REG_U64;
+ next_pc += sizeof(struct binary_op);
+ PO;
+ }
+ OP(BYTECODE_OP_BIT_XOR):
+ {
+ int64_t res;
+
+ if (!IS_INTEGER_REGISTER(estack_ax_t) || !IS_INTEGER_REGISTER(estack_bx_t)) {
+ ret = -EINVAL;
+ goto end;
+ }
+
+ res = ((uint64_t) estack_bx_v ^ (uint64_t) estack_ax_v);
+ estack_pop(stack, top, ax, bx, ax_t, bx_t);
+ estack_ax_v = res;
+ estack_ax_t = REG_U64;
+ next_pc += sizeof(struct binary_op);
+ PO;
+ }
+
+ /* unary */
+ OP(BYTECODE_OP_UNARY_PLUS):
+ {
+ /* Dynamic typing. */
+ switch (estack_ax_t) {
+ case REG_S64: /* Fall-through. */
+ case REG_U64:
+ JUMP_TO(BYTECODE_OP_UNARY_PLUS_S64);
+ case REG_DOUBLE:
+ JUMP_TO(BYTECODE_OP_UNARY_PLUS_DOUBLE);
+ case REG_STRING: /* Fall-through */
+ case REG_STAR_GLOB_STRING:
+ ret = -EINVAL;
+ goto end;
+ default:
+ ERR("Unknown interpreter register type (%d)",
+ (int) estack_ax_t);
+ ret = -EINVAL;
+ goto end;
+ }
+ }
+ OP(BYTECODE_OP_UNARY_MINUS):
+ {
+ /* Dynamic typing. */
+ switch (estack_ax_t) {
+ case REG_S64: /* Fall-through. */
+ case REG_U64:
+ JUMP_TO(BYTECODE_OP_UNARY_MINUS_S64);
+ case REG_DOUBLE:
+ JUMP_TO(BYTECODE_OP_UNARY_MINUS_DOUBLE);
+ case REG_STRING: /* Fall-through */
+ case REG_STAR_GLOB_STRING:
+ ret = -EINVAL;
+ goto end;
+ default:
+ ERR("Unknown interpreter register type (%d)",
+ (int) estack_ax_t);
+ ret = -EINVAL;
+ goto end;
+ }
+ }
+ OP(BYTECODE_OP_UNARY_NOT):
+ {
+ /* Dynamic typing. */
+ switch (estack_ax_t) {
+ case REG_S64: /* Fall-through. */
+ case REG_U64:
+ JUMP_TO(BYTECODE_OP_UNARY_NOT_S64);
+ case REG_DOUBLE:
+ JUMP_TO(BYTECODE_OP_UNARY_NOT_DOUBLE);
+ case REG_STRING: /* Fall-through */
+ case REG_STAR_GLOB_STRING:
+ ret = -EINVAL;
+ goto end;
+ default:
+ ERR("Unknown interpreter register type (%d)",
+ (int) estack_ax_t);
+ ret = -EINVAL;
+ goto end;
+ }
+ next_pc += sizeof(struct unary_op);
+ PO;
+ }
+
+ OP(BYTECODE_OP_UNARY_BIT_NOT):
+ {
+ /* Dynamic typing. */
+ if (!IS_INTEGER_REGISTER(estack_ax_t)) {
+ ret = -EINVAL;
+ goto end;
+ }
+
+ estack_ax_v = ~(uint64_t) estack_ax_v;
+ estack_ax_t = REG_U64;
+ next_pc += sizeof(struct unary_op);
+ PO;
+ }
+
+ OP(BYTECODE_OP_UNARY_PLUS_S64):
+ OP(BYTECODE_OP_UNARY_PLUS_DOUBLE):
+ {
+ next_pc += sizeof(struct unary_op);
+ PO;
+ }
+ OP(BYTECODE_OP_UNARY_MINUS_S64):
+ {
+ estack_ax_v = -estack_ax_v;
+ next_pc += sizeof(struct unary_op);
+ PO;
+ }
+ OP(BYTECODE_OP_UNARY_MINUS_DOUBLE):
+ {
+ estack_ax(stack, top)->u.d = -estack_ax(stack, top)->u.d;
+ next_pc += sizeof(struct unary_op);
+ PO;
+ }
+ OP(BYTECODE_OP_UNARY_NOT_S64):
+ {
+ estack_ax_v = !estack_ax_v;
+ estack_ax_t = REG_S64;
+ next_pc += sizeof(struct unary_op);
+ PO;
+ }
+ OP(BYTECODE_OP_UNARY_NOT_DOUBLE):
+ {
+ estack_ax_v = !estack_ax(stack, top)->u.d;
+ estack_ax_t = REG_S64;
+ next_pc += sizeof(struct unary_op);
+ PO;
+ }
+
+ /* logical */
+ OP(BYTECODE_OP_AND):
+ {
+ struct logical_op *insn = (struct logical_op *) pc;
+
+ if (estack_ax_t != REG_S64 && estack_ax_t != REG_U64) {
+ ret = -EINVAL;
+ goto end;
+ }
+ /* If AX is 0, skip and evaluate to 0 */
+ if (unlikely(estack_ax_v == 0)) {
+ dbg_printf("Jumping to bytecode offset %u\n",
+ (unsigned int) insn->skip_offset);
+ next_pc = start_pc + insn->skip_offset;
+ } else {
+ /* Pop 1 when jump not taken */
+ estack_pop(stack, top, ax, bx, ax_t, bx_t);
+ next_pc += sizeof(struct logical_op);
+ }
+ PO;
+ }
+ OP(BYTECODE_OP_OR):
+ {
+ struct logical_op *insn = (struct logical_op *) pc;
+
+ if (estack_ax_t != REG_S64 && estack_ax_t != REG_U64) {
+ ret = -EINVAL;
+ goto end;
+ }
+ /* If AX is nonzero, skip and evaluate to 1 */
+ if (unlikely(estack_ax_v != 0)) {
+ estack_ax_v = 1;
+ dbg_printf("Jumping to bytecode offset %u\n",
+ (unsigned int) insn->skip_offset);
+ next_pc = start_pc + insn->skip_offset;
+ } else {
+ /* Pop 1 when jump not taken */
+ estack_pop(stack, top, ax, bx, ax_t, bx_t);
+ next_pc += sizeof(struct logical_op);
+ }
+ PO;
+ }
+
+
+ /* load field ref */
+ OP(BYTECODE_OP_LOAD_FIELD_REF_STRING):
+ {
+ struct load_op *insn = (struct load_op *) pc;
+ struct field_ref *ref = (struct field_ref *) insn->data;
+
+ dbg_printf("load field ref offset %u type string\n",
+ ref->offset);
+ estack_push(stack, top, ax, bx, ax_t, bx_t);
+ estack_ax(stack, top)->u.s.str =
+ *(const char * const *) &interpreter_stack_data[ref->offset];
+ if (unlikely(!estack_ax(stack, top)->u.s.str)) {
+ dbg_printf("Interpreter warning: loading a NULL string.\n");
+ ret = -EINVAL;
+ goto end;
+ }
+ estack_ax(stack, top)->u.s.seq_len = SIZE_MAX;
+ estack_ax(stack, top)->u.s.literal_type =
+ ESTACK_STRING_LITERAL_TYPE_NONE;
+ estack_ax_t = REG_STRING;
+ dbg_printf("ref load string %s\n", estack_ax(stack, top)->u.s.str);
+ next_pc += sizeof(struct load_op) + sizeof(struct field_ref);
+ PO;
+ }
+
+ OP(BYTECODE_OP_LOAD_FIELD_REF_SEQUENCE):
+ {
+ struct load_op *insn = (struct load_op *) pc;
+ struct field_ref *ref = (struct field_ref *) insn->data;
+
+ dbg_printf("load field ref offset %u type sequence\n",
+ ref->offset);
+ estack_push(stack, top, ax, bx, ax_t, bx_t);
+ estack_ax(stack, top)->u.s.seq_len =
+ *(unsigned long *) &interpreter_stack_data[ref->offset];
+ estack_ax(stack, top)->u.s.str =
+ *(const char **) (&interpreter_stack_data[ref->offset
+ + sizeof(unsigned long)]);
+ estack_ax_t = REG_STRING;
+ if (unlikely(!estack_ax(stack, top)->u.s.str)) {
+ dbg_printf("Interpreter warning: loading a NULL sequence.\n");
+ ret = -EINVAL;
+ goto end;
+ }
+ estack_ax(stack, top)->u.s.literal_type =
+ ESTACK_STRING_LITERAL_TYPE_NONE;
+ next_pc += sizeof(struct load_op) + sizeof(struct field_ref);
+ PO;
+ }
+
+ OP(BYTECODE_OP_LOAD_FIELD_REF_S64):
+ {
+ struct load_op *insn = (struct load_op *) pc;
+ struct field_ref *ref = (struct field_ref *) insn->data;
+
+ dbg_printf("load field ref offset %u type s64\n",
+ ref->offset);
+ estack_push(stack, top, ax, bx, ax_t, bx_t);
+ estack_ax_v =
+ ((struct literal_numeric *) &interpreter_stack_data[ref->offset])->v;
+ estack_ax_t = REG_S64;
+ dbg_printf("ref load s64 %" PRIi64 "\n", estack_ax_v);
+ next_pc += sizeof(struct load_op) + sizeof(struct field_ref);
+ PO;
+ }
+
+ OP(BYTECODE_OP_LOAD_FIELD_REF_DOUBLE):
+ {
+ struct load_op *insn = (struct load_op *) pc;
+ struct field_ref *ref = (struct field_ref *) insn->data;
+
+ dbg_printf("load field ref offset %u type double\n",
+ ref->offset);
+ estack_push(stack, top, ax, bx, ax_t, bx_t);
+ memcpy(&estack_ax(stack, top)->u.d, &interpreter_stack_data[ref->offset],
+ sizeof(struct literal_double));
+ estack_ax_t = REG_DOUBLE;
+ dbg_printf("ref load double %g\n", estack_ax(stack, top)->u.d);
+ next_pc += sizeof(struct load_op) + sizeof(struct field_ref);
+ PO;
+ }
+
+ /* load from immediate operand */
+ OP(BYTECODE_OP_LOAD_STRING):
+ {
+ struct load_op *insn = (struct load_op *) pc;
+
+ dbg_printf("load string %s\n", insn->data);
+ estack_push(stack, top, ax, bx, ax_t, bx_t);
+ estack_ax(stack, top)->u.s.str = insn->data;
+ estack_ax(stack, top)->u.s.seq_len = SIZE_MAX;
+ estack_ax(stack, top)->u.s.literal_type =
+ ESTACK_STRING_LITERAL_TYPE_PLAIN;
+ estack_ax_t = REG_STRING;
+ next_pc += sizeof(struct load_op) + strlen(insn->data) + 1;
+ PO;
+ }
+
+ OP(BYTECODE_OP_LOAD_STAR_GLOB_STRING):
+ {
+ struct load_op *insn = (struct load_op *) pc;
+
+ dbg_printf("load globbing pattern %s\n", insn->data);
+ estack_push(stack, top, ax, bx, ax_t, bx_t);
+ estack_ax(stack, top)->u.s.str = insn->data;
+ estack_ax(stack, top)->u.s.seq_len = SIZE_MAX;
+ estack_ax(stack, top)->u.s.literal_type =
+ ESTACK_STRING_LITERAL_TYPE_STAR_GLOB;
+ estack_ax_t = REG_STAR_GLOB_STRING;
+ next_pc += sizeof(struct load_op) + strlen(insn->data) + 1;
+ PO;
+ }
+
+ OP(BYTECODE_OP_LOAD_S64):
+ {
+ struct load_op *insn = (struct load_op *) pc;
+
+ estack_push(stack, top, ax, bx, ax_t, bx_t);
+ estack_ax_v = ((struct literal_numeric *) insn->data)->v;
+ estack_ax_t = REG_S64;
+ dbg_printf("load s64 %" PRIi64 "\n", estack_ax_v);
+ next_pc += sizeof(struct load_op)
+ + sizeof(struct literal_numeric);
+ PO;
+ }
+
+ OP(BYTECODE_OP_LOAD_DOUBLE):
+ {
+ struct load_op *insn = (struct load_op *) pc;
+
+ estack_push(stack, top, ax, bx, ax_t, bx_t);
+ memcpy(&estack_ax(stack, top)->u.d, insn->data,
+ sizeof(struct literal_double));
+ estack_ax_t = REG_DOUBLE;
+ dbg_printf("load double %g\n", estack_ax(stack, top)->u.d);
+ next_pc += sizeof(struct load_op)
+ + sizeof(struct literal_double);
+ PO;
+ }
+
+ /* cast */
+ OP(BYTECODE_OP_CAST_TO_S64):
+ {
+ /* Dynamic typing. */
+ switch (estack_ax_t) {
+ case REG_S64:
+ JUMP_TO(BYTECODE_OP_CAST_NOP);
+ case REG_DOUBLE:
+ JUMP_TO(BYTECODE_OP_CAST_DOUBLE_TO_S64);
+ case REG_U64:
+ estack_ax_t = REG_S64;
+ next_pc += sizeof(struct cast_op); /* Fall-through */
+ case REG_STRING: /* Fall-through */
+ case REG_STAR_GLOB_STRING:
+ ret = -EINVAL;
+ goto end;
+ default:
+ ERR("Unknown interpreter register type (%d)",
+ (int) estack_ax_t);
+ ret = -EINVAL;
+ goto end;
+ }
+ }
+
+ OP(BYTECODE_OP_CAST_DOUBLE_TO_S64):
+ {
+ estack_ax_v = (int64_t) estack_ax(stack, top)->u.d;
+ estack_ax_t = REG_S64;
+ next_pc += sizeof(struct cast_op);
+ PO;
+ }
+
+ OP(BYTECODE_OP_CAST_NOP):
+ {
+ next_pc += sizeof(struct cast_op);
+ PO;
+ }
+
+ /* get context ref */
+ OP(BYTECODE_OP_GET_CONTEXT_REF):
+ {
+ struct load_op *insn = (struct load_op *) pc;
+ struct field_ref *ref = (struct field_ref *) insn->data;
+ const struct lttng_ust_ctx_field *ctx_field;
+ struct lttng_ust_ctx_value v;
+
+ dbg_printf("get context ref offset %u type dynamic\n",
+ ref->offset);
+ ctx_field = &ctx->fields[ref->offset];
+ ctx_field->get_value(ctx_field->priv, &v);
+ estack_push(stack, top, ax, bx, ax_t, bx_t);
+ switch (v.sel) {
+ case LTTNG_UST_DYNAMIC_TYPE_NONE:
+ ret = -EINVAL;
+ goto end;
+ case LTTNG_UST_DYNAMIC_TYPE_S64:
+ estack_ax_v = v.u.s64;
+ estack_ax_t = REG_S64;
+ dbg_printf("ref get context dynamic s64 %" PRIi64 "\n", estack_ax_v);
+ break;
+ case LTTNG_UST_DYNAMIC_TYPE_DOUBLE:
+ estack_ax(stack, top)->u.d = v.u.d;
+ estack_ax_t = REG_DOUBLE;
+ dbg_printf("ref get context dynamic double %g\n", estack_ax(stack, top)->u.d);
+ break;
+ case LTTNG_UST_DYNAMIC_TYPE_STRING:
+ estack_ax(stack, top)->u.s.str = v.u.str;
+ if (unlikely(!estack_ax(stack, top)->u.s.str)) {
+ dbg_printf("Interpreter warning: loading a NULL string.\n");
+ ret = -EINVAL;
+ goto end;
+ }
+ estack_ax(stack, top)->u.s.seq_len = SIZE_MAX;
+ estack_ax(stack, top)->u.s.literal_type =
+ ESTACK_STRING_LITERAL_TYPE_NONE;
+ dbg_printf("ref get context dynamic string %s\n", estack_ax(stack, top)->u.s.str);
+ estack_ax_t = REG_STRING;
+ break;
+ default:
+ dbg_printf("Interpreter warning: unknown dynamic type (%d).\n", (int) v.sel);
+ ret = -EINVAL;
+ goto end;
+ }
+ next_pc += sizeof(struct load_op) + sizeof(struct field_ref);
+ PO;
+ }
+
+ OP(BYTECODE_OP_GET_CONTEXT_REF_STRING):
+ {
+ struct load_op *insn = (struct load_op *) pc;
+ struct field_ref *ref = (struct field_ref *) insn->data;
+ const struct lttng_ust_ctx_field *ctx_field;
+ struct lttng_ust_ctx_value v;
+
+ dbg_printf("get context ref offset %u type string\n",
+ ref->offset);
+ ctx_field = &ctx->fields[ref->offset];
+ ctx_field->get_value(ctx_field->priv, &v);
+ estack_push(stack, top, ax, bx, ax_t, bx_t);
+ estack_ax(stack, top)->u.s.str = v.u.str;
+ if (unlikely(!estack_ax(stack, top)->u.s.str)) {
+ dbg_printf("Interpreter warning: loading a NULL string.\n");
+ ret = -EINVAL;
+ goto end;
+ }
+ estack_ax(stack, top)->u.s.seq_len = SIZE_MAX;
+ estack_ax(stack, top)->u.s.literal_type =
+ ESTACK_STRING_LITERAL_TYPE_NONE;
+ estack_ax_t = REG_STRING;
+ dbg_printf("ref get context string %s\n", estack_ax(stack, top)->u.s.str);
+ next_pc += sizeof(struct load_op) + sizeof(struct field_ref);
+ PO;
+ }
+
+ OP(BYTECODE_OP_GET_CONTEXT_REF_S64):
+ {
+ struct load_op *insn = (struct load_op *) pc;
+ struct field_ref *ref = (struct field_ref *) insn->data;
+ const struct lttng_ust_ctx_field *ctx_field;
+ struct lttng_ust_ctx_value v;
+
+ dbg_printf("get context ref offset %u type s64\n",
+ ref->offset);
+ ctx_field = &ctx->fields[ref->offset];
+ ctx_field->get_value(ctx_field->priv, &v);
+ estack_push(stack, top, ax, bx, ax_t, bx_t);
+ estack_ax_v = v.u.s64;
+ estack_ax_t = REG_S64;
+ dbg_printf("ref get context s64 %" PRIi64 "\n", estack_ax_v);
+ next_pc += sizeof(struct load_op) + sizeof(struct field_ref);
+ PO;
+ }
+
+ OP(BYTECODE_OP_GET_CONTEXT_REF_DOUBLE):
+ {
+ struct load_op *insn = (struct load_op *) pc;
+ struct field_ref *ref = (struct field_ref *) insn->data;
+ const struct lttng_ust_ctx_field *ctx_field;
+ struct lttng_ust_ctx_value v;
+
+ dbg_printf("get context ref offset %u type double\n",
+ ref->offset);
+ ctx_field = &ctx->fields[ref->offset];
+ ctx_field->get_value(ctx_field->priv, &v);
+ estack_push(stack, top, ax, bx, ax_t, bx_t);
+ memcpy(&estack_ax(stack, top)->u.d, &v.u.d, sizeof(struct literal_double));
+ estack_ax_t = REG_DOUBLE;
+ dbg_printf("ref get context double %g\n", estack_ax(stack, top)->u.d);
+ next_pc += sizeof(struct load_op) + sizeof(struct field_ref);
+ PO;
+ }
+
+ OP(BYTECODE_OP_GET_CONTEXT_ROOT):
+ {
+ dbg_printf("op get context root\n");
+ estack_push(stack, top, ax, bx, ax_t, bx_t);
+ estack_ax(stack, top)->u.ptr.type = LOAD_ROOT_CONTEXT;
+ /* "field" only needed for variants. */
+ estack_ax(stack, top)->u.ptr.field = NULL;
+ estack_ax_t = REG_PTR;
+ next_pc += sizeof(struct load_op);
+ PO;
+ }
+
+ OP(BYTECODE_OP_GET_APP_CONTEXT_ROOT):
+ {
+ dbg_printf("op get app context root\n");
+ estack_push(stack, top, ax, bx, ax_t, bx_t);
+ estack_ax(stack, top)->u.ptr.type = LOAD_ROOT_APP_CONTEXT;
+ /* "field" only needed for variants. */
+ estack_ax(stack, top)->u.ptr.field = NULL;
+ estack_ax_t = REG_PTR;
+ next_pc += sizeof(struct load_op);
+ PO;
+ }
+
+ OP(BYTECODE_OP_GET_PAYLOAD_ROOT):
+ {
+ dbg_printf("op get app payload root\n");
+ estack_push(stack, top, ax, bx, ax_t, bx_t);
+ estack_ax(stack, top)->u.ptr.type = LOAD_ROOT_PAYLOAD;
+ estack_ax(stack, top)->u.ptr.ptr = interpreter_stack_data;
+ /* "field" only needed for variants. */
+ estack_ax(stack, top)->u.ptr.field = NULL;
+ estack_ax_t = REG_PTR;
+ next_pc += sizeof(struct load_op);
+ PO;
+ }
+
+ OP(BYTECODE_OP_GET_SYMBOL):
+ {
+ dbg_printf("op get symbol\n");
+ switch (estack_ax(stack, top)->u.ptr.type) {
+ case LOAD_OBJECT:
+ ERR("Nested fields not implemented yet.");
+ ret = -EINVAL;
+ goto end;
+ case LOAD_ROOT_CONTEXT:
+ case LOAD_ROOT_APP_CONTEXT:
+ case LOAD_ROOT_PAYLOAD:
+ /*
+ * symbol lookup is performed by
+ * specialization.
+ */
+ ret = -EINVAL;
+ goto end;
+ }
+ next_pc += sizeof(struct load_op) + sizeof(struct get_symbol);
+ PO;
+ }
+
+ OP(BYTECODE_OP_GET_SYMBOL_FIELD):
+ {
+ /*
+ * Used for first variant encountered in a
+ * traversal. Variants are not implemented yet.
+ */
+ ret = -EINVAL;
+ goto end;
+ }
+
+ OP(BYTECODE_OP_GET_INDEX_U16):
+ {
+ struct load_op *insn = (struct load_op *) pc;
+ struct get_index_u16 *index = (struct get_index_u16 *) insn->data;
+
+ dbg_printf("op get index u16\n");
+ ret = dynamic_get_index(ctx, bytecode, index->index, estack_ax(stack, top));
+ if (ret)
+ goto end;
+ estack_ax_v = estack_ax(stack, top)->u.v;
+ estack_ax_t = estack_ax(stack, top)->type;
+ next_pc += sizeof(struct load_op) + sizeof(struct get_index_u16);
+ PO;
+ }
+
+ OP(BYTECODE_OP_GET_INDEX_U64):
+ {
+ struct load_op *insn = (struct load_op *) pc;
+ struct get_index_u64 *index = (struct get_index_u64 *) insn->data;
+
+ dbg_printf("op get index u64\n");
+ ret = dynamic_get_index(ctx, bytecode, index->index, estack_ax(stack, top));
+ if (ret)
+ goto end;
+ estack_ax_v = estack_ax(stack, top)->u.v;
+ estack_ax_t = estack_ax(stack, top)->type;
+ next_pc += sizeof(struct load_op) + sizeof(struct get_index_u64);
+ PO;
+ }
+
+ OP(BYTECODE_OP_LOAD_FIELD):
+ {
+ dbg_printf("op load field\n");
+ ret = dynamic_load_field(estack_ax(stack, top));
+ if (ret)
+ goto end;
+ estack_ax_v = estack_ax(stack, top)->u.v;
+ estack_ax_t = estack_ax(stack, top)->type;
+ next_pc += sizeof(struct load_op);
+ PO;
+ }
+
+ OP(BYTECODE_OP_LOAD_FIELD_S8):
+ {
+ dbg_printf("op load field s8\n");
+
+ estack_ax_v = *(int8_t *) estack_ax(stack, top)->u.ptr.ptr;
+ estack_ax_t = REG_S64;
+ next_pc += sizeof(struct load_op);
+ PO;
+ }
+ OP(BYTECODE_OP_LOAD_FIELD_S16):
+ {
+ dbg_printf("op load field s16\n");
+
+ estack_ax_v = *(int16_t *) estack_ax(stack, top)->u.ptr.ptr;
+ estack_ax_t = REG_S64;
+ next_pc += sizeof(struct load_op);
+ PO;
+ }
+ OP(BYTECODE_OP_LOAD_FIELD_S32):
+ {
+ dbg_printf("op load field s32\n");
+
+ estack_ax_v = *(int32_t *) estack_ax(stack, top)->u.ptr.ptr;
+ estack_ax_t = REG_S64;
+ next_pc += sizeof(struct load_op);
+ PO;
+ }
+ OP(BYTECODE_OP_LOAD_FIELD_S64):
+ {
+ dbg_printf("op load field s64\n");
+
+ estack_ax_v = *(int64_t *) estack_ax(stack, top)->u.ptr.ptr;
+ estack_ax_t = REG_S64;
+ next_pc += sizeof(struct load_op);
+ PO;
+ }
+ OP(BYTECODE_OP_LOAD_FIELD_U8):
+ {
+ dbg_printf("op load field u8\n");
+
+ estack_ax_v = *(uint8_t *) estack_ax(stack, top)->u.ptr.ptr;
+ estack_ax_t = REG_U64;
+ next_pc += sizeof(struct load_op);
+ PO;
+ }
+ OP(BYTECODE_OP_LOAD_FIELD_U16):
+ {
+ dbg_printf("op load field u16\n");
+
+ estack_ax_v = *(uint16_t *) estack_ax(stack, top)->u.ptr.ptr;
+ estack_ax_t = REG_U64;
+ next_pc += sizeof(struct load_op);
+ PO;
+ }
+ OP(BYTECODE_OP_LOAD_FIELD_U32):
+ {
+ dbg_printf("op load field u32\n");
+
+ estack_ax_v = *(uint32_t *) estack_ax(stack, top)->u.ptr.ptr;
+ estack_ax_t = REG_U64;
+ next_pc += sizeof(struct load_op);
+ PO;
+ }
+ OP(BYTECODE_OP_LOAD_FIELD_U64):
+ {
+ dbg_printf("op load field u64\n");
+
+ estack_ax_v = *(uint64_t *) estack_ax(stack, top)->u.ptr.ptr;
+ estack_ax_t = REG_U64;
+ next_pc += sizeof(struct load_op);
+ PO;
+ }
+ OP(BYTECODE_OP_LOAD_FIELD_DOUBLE):
+ {
+ dbg_printf("op load field double\n");
+
+ memcpy(&estack_ax(stack, top)->u.d,
+ estack_ax(stack, top)->u.ptr.ptr,
+ sizeof(struct literal_double));
+ estack_ax(stack, top)->type = REG_DOUBLE;
+ next_pc += sizeof(struct load_op);
+ PO;
+ }
+
+ OP(BYTECODE_OP_LOAD_FIELD_STRING):
+ {
+ const char *str;
+
+ dbg_printf("op load field string\n");
+ str = (const char *) estack_ax(stack, top)->u.ptr.ptr;
+ estack_ax(stack, top)->u.s.str = str;
+ if (unlikely(!estack_ax(stack, top)->u.s.str)) {
+ dbg_printf("Interpreter warning: loading a NULL string.\n");
+ ret = -EINVAL;
+ goto end;
+ }
+ estack_ax(stack, top)->u.s.seq_len = SIZE_MAX;
+ estack_ax(stack, top)->u.s.literal_type =
+ ESTACK_STRING_LITERAL_TYPE_NONE;
+ estack_ax(stack, top)->type = REG_STRING;
+ next_pc += sizeof(struct load_op);
+ PO;
+ }
+
+ OP(BYTECODE_OP_LOAD_FIELD_SEQUENCE):
+ {
+ const char *ptr;
+
+ dbg_printf("op load field string sequence\n");
+ ptr = estack_ax(stack, top)->u.ptr.ptr;
+ estack_ax(stack, top)->u.s.seq_len = *(unsigned long *) ptr;
+ estack_ax(stack, top)->u.s.str = *(const char **) (ptr + sizeof(unsigned long));
+ estack_ax(stack, top)->type = REG_STRING;
+ if (unlikely(!estack_ax(stack, top)->u.s.str)) {
+ dbg_printf("Interpreter warning: loading a NULL sequence.\n");
+ ret = -EINVAL;
+ goto end;
+ }
+ estack_ax(stack, top)->u.s.literal_type =
+ ESTACK_STRING_LITERAL_TYPE_NONE;
+ next_pc += sizeof(struct load_op);
+ PO;
+ }
+
+ END_OP
+end:
+ /* No need to prepare output if an error occurred. */
+ if (ret)
+ return LTTNG_UST_BYTECODE_INTERPRETER_ERROR;
+
+ /* Prepare output. */
+ switch (ust_bytecode->type) {
+ case LTTNG_UST_BYTECODE_TYPE_FILTER:
+ {
+ struct lttng_ust_bytecode_filter_ctx *filter_ctx =
+ (struct lttng_ust_bytecode_filter_ctx *) caller_ctx;
+ if (retval)
+ filter_ctx->result = LTTNG_UST_BYTECODE_FILTER_ACCEPT;
+ else
+ filter_ctx->result = LTTNG_UST_BYTECODE_FILTER_REJECT;
+ break;
+ }
+ case LTTNG_UST_BYTECODE_TYPE_CAPTURE:
+ ret = lttng_bytecode_interpret_format_output(estack_ax(stack, top),
+ (struct lttng_interpreter_output *) caller_ctx);
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+ if (ret)
+ return LTTNG_UST_BYTECODE_INTERPRETER_ERROR;
+ else
+ return LTTNG_UST_BYTECODE_INTERPRETER_OK;
+}
+
+/*
+ * Return LTTNG_UST_EVENT_FILTER_ACCEPT or LTTNG_UST_EVENT_FILTER_REJECT.
+ */
+int lttng_ust_interpret_event_filter(struct lttng_ust_event_common *event,
+ const char *interpreter_stack_data,
+ void *event_filter_ctx __attribute__((unused)))
+{
+ struct lttng_ust_bytecode_runtime *filter_bc_runtime;
+ struct cds_list_head *filter_bytecode_runtime_head = &event->priv->filter_bytecode_runtime_head;
+ struct lttng_ust_bytecode_filter_ctx bytecode_filter_ctx;
+ bool filter_record = false;
+
+ cds_list_for_each_entry_rcu(filter_bc_runtime, filter_bytecode_runtime_head, node) {
+ if (caa_likely(filter_bc_runtime->interpreter_func(filter_bc_runtime,
+ interpreter_stack_data, &bytecode_filter_ctx) == LTTNG_UST_BYTECODE_INTERPRETER_OK)) {
+ if (caa_unlikely(bytecode_filter_ctx.result == LTTNG_UST_BYTECODE_FILTER_ACCEPT)) {
+ filter_record = true;
+ break;
+ }
+ }
+ }
+ if (filter_record)
+ return LTTNG_UST_EVENT_FILTER_ACCEPT;
+ else
+ return LTTNG_UST_EVENT_FILTER_REJECT;
+}
+
+#undef START_OP
+#undef OP
+#undef PO
+#undef END_OP
--- /dev/null
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright (C) 2010-2016 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ *
+ * LTTng UST bytecode specializer.
+ */
+
+#define _LGPL_SOURCE
+#include <limits.h>
+#include <stddef.h>
+#include <stdint.h>
+
+#include <lttng/ust-utils.h>
+
+#include "context-internal.h"
+#include "lttng-bytecode.h"
+#include "ust-events-internal.h"
+#include "common/macros.h"
+
+static int lttng_fls(int val)
+{
+ int r = 32;
+ unsigned int x = (unsigned int) val;
+
+ if (!x)
+ return 0;
+ if (!(x & 0xFFFF0000U)) {
+ x <<= 16;
+ r -= 16;
+ }
+ if (!(x & 0xFF000000U)) {
+ x <<= 8;
+ r -= 8;
+ }
+ if (!(x & 0xF0000000U)) {
+ x <<= 4;
+ r -= 4;
+ }
+ if (!(x & 0xC0000000U)) {
+ x <<= 2;
+ r -= 2;
+ }
+ if (!(x & 0x80000000U)) {
+ r -= 1;
+ }
+ return r;
+}
+
+static int get_count_order(unsigned int count)
+{
+ int order;
+
+ order = lttng_fls(count) - 1;
+ if (count & (count - 1))
+ order++;
+ return order;
+}
+
+static ssize_t bytecode_reserve_data(struct bytecode_runtime *runtime,
+ size_t align, size_t len)
+{
+ ssize_t ret;
+ size_t padding = lttng_ust_offset_align(runtime->data_len, align);
+ size_t new_len = runtime->data_len + padding + len;
+ size_t new_alloc_len = new_len;
+ size_t old_alloc_len = runtime->data_alloc_len;
+
+ if (new_len > BYTECODE_MAX_DATA_LEN)
+ return -EINVAL;
+
+ if (new_alloc_len > old_alloc_len) {
+ char *newptr;
+
+ new_alloc_len =
+ max_t(size_t, 1U << get_count_order(new_alloc_len), old_alloc_len << 1);
+ newptr = realloc(runtime->data, new_alloc_len);
+ if (!newptr)
+ return -ENOMEM;
+ runtime->data = newptr;
+ /* We zero directly the memory from start of allocation. */
+ memset(&runtime->data[old_alloc_len], 0, new_alloc_len - old_alloc_len);
+ runtime->data_alloc_len = new_alloc_len;
+ }
+ runtime->data_len += padding;
+ ret = runtime->data_len;
+ runtime->data_len += len;
+ return ret;
+}
+
+static ssize_t bytecode_push_data(struct bytecode_runtime *runtime,
+ const void *p, size_t align, size_t len)
+{
+ ssize_t offset;
+
+ offset = bytecode_reserve_data(runtime, align, len);
+ if (offset < 0)
+ return -ENOMEM;
+ memcpy(&runtime->data[offset], p, len);
+ return offset;
+}
+
+static int specialize_load_field(struct vstack_entry *stack_top,
+ struct load_op *insn)
+{
+ int ret;
+
+ switch (stack_top->load.type) {
+ case LOAD_OBJECT:
+ break;
+ case LOAD_ROOT_CONTEXT:
+ case LOAD_ROOT_APP_CONTEXT:
+ case LOAD_ROOT_PAYLOAD:
+ default:
+ dbg_printf("Bytecode warning: cannot load root, missing field name.\n");
+ ret = -EINVAL;
+ goto end;
+ }
+ switch (stack_top->load.object_type) {
+ case OBJECT_TYPE_S8:
+ dbg_printf("op load field s8\n");
+ stack_top->type = REG_S64;
+ if (!stack_top->load.rev_bo)
+ insn->op = BYTECODE_OP_LOAD_FIELD_S8;
+ break;
+ case OBJECT_TYPE_S16:
+ dbg_printf("op load field s16\n");
+ stack_top->type = REG_S64;
+ if (!stack_top->load.rev_bo)
+ insn->op = BYTECODE_OP_LOAD_FIELD_S16;
+ break;
+ case OBJECT_TYPE_S32:
+ dbg_printf("op load field s32\n");
+ stack_top->type = REG_S64;
+ if (!stack_top->load.rev_bo)
+ insn->op = BYTECODE_OP_LOAD_FIELD_S32;
+ break;
+ case OBJECT_TYPE_S64:
+ dbg_printf("op load field s64\n");
+ stack_top->type = REG_S64;
+ if (!stack_top->load.rev_bo)
+ insn->op = BYTECODE_OP_LOAD_FIELD_S64;
+ break;
+ case OBJECT_TYPE_SIGNED_ENUM:
+ dbg_printf("op load field signed enumeration\n");
+ stack_top->type = REG_PTR;
+ break;
+ case OBJECT_TYPE_U8:
+ dbg_printf("op load field u8\n");
+ stack_top->type = REG_U64;
+ insn->op = BYTECODE_OP_LOAD_FIELD_U8;
+ break;
+ case OBJECT_TYPE_U16:
+ dbg_printf("op load field u16\n");
+ stack_top->type = REG_U64;
+ if (!stack_top->load.rev_bo)
+ insn->op = BYTECODE_OP_LOAD_FIELD_U16;
+ break;
+ case OBJECT_TYPE_U32:
+ dbg_printf("op load field u32\n");
+ stack_top->type = REG_U64;
+ if (!stack_top->load.rev_bo)
+ insn->op = BYTECODE_OP_LOAD_FIELD_U32;
+ break;
+ case OBJECT_TYPE_U64:
+ dbg_printf("op load field u64\n");
+ stack_top->type = REG_U64;
+ if (!stack_top->load.rev_bo)
+ insn->op = BYTECODE_OP_LOAD_FIELD_U64;
+ break;
+ case OBJECT_TYPE_UNSIGNED_ENUM:
+ dbg_printf("op load field unsigned enumeration\n");
+ stack_top->type = REG_PTR;
+ break;
+ case OBJECT_TYPE_DOUBLE:
+ stack_top->type = REG_DOUBLE;
+ insn->op = BYTECODE_OP_LOAD_FIELD_DOUBLE;
+ break;
+ case OBJECT_TYPE_STRING:
+ dbg_printf("op load field string\n");
+ stack_top->type = REG_STRING;
+ insn->op = BYTECODE_OP_LOAD_FIELD_STRING;
+ break;
+ case OBJECT_TYPE_STRING_SEQUENCE:
+ dbg_printf("op load field string sequence\n");
+ stack_top->type = REG_STRING;
+ insn->op = BYTECODE_OP_LOAD_FIELD_SEQUENCE;
+ break;
+ case OBJECT_TYPE_DYNAMIC:
+ dbg_printf("op load field dynamic\n");
+ stack_top->type = REG_UNKNOWN;
+ /* Don't specialize load op. */
+ break;
+ case OBJECT_TYPE_SEQUENCE:
+ case OBJECT_TYPE_ARRAY:
+ case OBJECT_TYPE_STRUCT:
+ case OBJECT_TYPE_VARIANT:
+ ERR("Sequences, arrays, struct and variant cannot be loaded (nested types).");
+ ret = -EINVAL;
+ goto end;
+ }
+ return 0;
+
+end:
+ return ret;
+}
+
+static int specialize_get_index_object_type(enum object_type *otype,
+ int signedness, uint32_t elem_len)
+{
+ switch (elem_len) {
+ case 8:
+ if (signedness)
+ *otype = OBJECT_TYPE_S8;
+ else
+ *otype = OBJECT_TYPE_U8;
+ break;
+ case 16:
+ if (signedness)
+ *otype = OBJECT_TYPE_S16;
+ else
+ *otype = OBJECT_TYPE_U16;
+ break;
+ case 32:
+ if (signedness)
+ *otype = OBJECT_TYPE_S32;
+ else
+ *otype = OBJECT_TYPE_U32;
+ break;
+ case 64:
+ if (signedness)
+ *otype = OBJECT_TYPE_S64;
+ else
+ *otype = OBJECT_TYPE_U64;
+ break;
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int specialize_get_index(struct bytecode_runtime *runtime,
+ struct load_op *insn, uint64_t index,
+ struct vstack_entry *stack_top,
+ int idx_len)
+{
+ int ret;
+ struct bytecode_get_index_data gid;
+ ssize_t data_offset;
+
+ memset(&gid, 0, sizeof(gid));
+ switch (stack_top->load.type) {
+ case LOAD_OBJECT:
+ switch (stack_top->load.object_type) {
+ case OBJECT_TYPE_ARRAY:
+ {
+ const struct lttng_ust_type_integer *integer_type;
+ const struct lttng_ust_event_field *field;
+ uint32_t elem_len, num_elems;
+ int signedness;
+
+ field = stack_top->load.field;
+ switch (field->type->type) {
+ case lttng_ust_type_array:
+ if (lttng_ust_get_type_array(field->type)->elem_type->type != lttng_ust_type_integer) {
+ ret = -EINVAL;
+ goto end;
+ }
+ integer_type = lttng_ust_get_type_integer(lttng_ust_get_type_array(field->type)->elem_type);
+ num_elems = lttng_ust_get_type_array(field->type)->length;
+ break;
+ default:
+ ret = -EINVAL;
+ goto end;
+ }
+ elem_len = integer_type->size;
+ signedness = integer_type->signedness;
+ if (index >= num_elems) {
+ ret = -EINVAL;
+ goto end;
+ }
+ ret = specialize_get_index_object_type(&stack_top->load.object_type,
+ signedness, elem_len);
+ if (ret)
+ goto end;
+ gid.offset = index * (elem_len / CHAR_BIT);
+ gid.array_len = num_elems * (elem_len / CHAR_BIT);
+ gid.elem.type = stack_top->load.object_type;
+ gid.elem.len = elem_len;
+ if (integer_type->reverse_byte_order)
+ gid.elem.rev_bo = true;
+ stack_top->load.rev_bo = gid.elem.rev_bo;
+ break;
+ }
+ case OBJECT_TYPE_SEQUENCE:
+ {
+ const struct lttng_ust_type_integer *integer_type;
+ const struct lttng_ust_event_field *field;
+ uint32_t elem_len;
+ int signedness;
+
+ field = stack_top->load.field;
+ switch (field->type->type) {
+ case lttng_ust_type_sequence:
+ if (lttng_ust_get_type_sequence(field->type)->elem_type->type != lttng_ust_type_integer) {
+ ret = -EINVAL;
+ goto end;
+ }
+ integer_type = lttng_ust_get_type_integer(lttng_ust_get_type_sequence(field->type)->elem_type);
+ break;
+ default:
+ ret = -EINVAL;
+ goto end;
+ }
+ elem_len = integer_type->size;
+ signedness = integer_type->signedness;
+ ret = specialize_get_index_object_type(&stack_top->load.object_type,
+ signedness, elem_len);
+ if (ret)
+ goto end;
+ gid.offset = index * (elem_len / CHAR_BIT);
+ gid.elem.type = stack_top->load.object_type;
+ gid.elem.len = elem_len;
+ if (integer_type->reverse_byte_order)
+ gid.elem.rev_bo = true;
+ stack_top->load.rev_bo = gid.elem.rev_bo;
+ break;
+ }
+ case OBJECT_TYPE_STRUCT:
+ /* Only generated by the specialize phase. */
+ case OBJECT_TYPE_VARIANT: /* Fall-through */
+ default:
+ ERR("Unexpected get index type %d",
+ (int) stack_top->load.object_type);
+ ret = -EINVAL;
+ goto end;
+ }
+ break;
+ case LOAD_ROOT_CONTEXT:
+ case LOAD_ROOT_APP_CONTEXT:
+ case LOAD_ROOT_PAYLOAD:
+ ERR("Index lookup for root field not implemented yet.");
+ ret = -EINVAL;
+ goto end;
+ }
+ data_offset = bytecode_push_data(runtime, &gid,
+ __alignof__(gid), sizeof(gid));
+ if (data_offset < 0) {
+ ret = -EINVAL;
+ goto end;
+ }
+ switch (idx_len) {
+ case 2:
+ ((struct get_index_u16 *) insn->data)->index = data_offset;
+ break;
+ case 8:
+ ((struct get_index_u64 *) insn->data)->index = data_offset;
+ break;
+ default:
+ ret = -EINVAL;
+ goto end;
+ }
+
+ return 0;
+
+end:
+ return ret;
+}
+
+static int specialize_context_lookup_name(struct lttng_ust_ctx *ctx,
+ struct bytecode_runtime *bytecode,
+ struct load_op *insn)
+{
+ uint16_t offset;
+ const char *name;
+
+ offset = ((struct get_symbol *) insn->data)->offset;
+ name = bytecode->p.bc->bc.data + bytecode->p.bc->bc.reloc_offset + offset;
+ return lttng_get_context_index(ctx, name);
+}
+
+static int specialize_load_object(const struct lttng_ust_event_field *field,
+ struct vstack_load *load, bool is_context)
+{
+ load->type = LOAD_OBJECT;
+
+ switch (field->type->type) {
+ case lttng_ust_type_integer:
+ if (lttng_ust_get_type_integer(field->type)->signedness)
+ load->object_type = OBJECT_TYPE_S64;
+ else
+ load->object_type = OBJECT_TYPE_U64;
+ load->rev_bo = false;
+ break;
+ case lttng_ust_type_enum:
+ {
+ const struct lttng_ust_type_integer *itype;
+
+ itype = lttng_ust_get_type_integer(lttng_ust_get_type_enum(field->type)->container_type);
+ if (itype->signedness)
+ load->object_type = OBJECT_TYPE_SIGNED_ENUM;
+ else
+ load->object_type = OBJECT_TYPE_UNSIGNED_ENUM;
+ load->rev_bo = false;
+ break;
+ }
+ case lttng_ust_type_array:
+ if (lttng_ust_get_type_array(field->type)->elem_type->type != lttng_ust_type_integer) {
+ ERR("Array nesting only supports integer types.");
+ return -EINVAL;
+ }
+ if (is_context) {
+ load->object_type = OBJECT_TYPE_STRING;
+ } else {
+ if (lttng_ust_get_type_array(field->type)->encoding == lttng_ust_string_encoding_none) {
+ load->object_type = OBJECT_TYPE_ARRAY;
+ load->field = field;
+ } else {
+ load->object_type = OBJECT_TYPE_STRING_SEQUENCE;
+ }
+ }
+ break;
+ case lttng_ust_type_sequence:
+ if (lttng_ust_get_type_sequence(field->type)->elem_type->type != lttng_ust_type_integer) {
+ ERR("Sequence nesting only supports integer types.");
+ return -EINVAL;
+ }
+ if (is_context) {
+ load->object_type = OBJECT_TYPE_STRING;
+ } else {
+ if (lttng_ust_get_type_sequence(field->type)->encoding == lttng_ust_string_encoding_none) {
+ load->object_type = OBJECT_TYPE_SEQUENCE;
+ load->field = field;
+ } else {
+ load->object_type = OBJECT_TYPE_STRING_SEQUENCE;
+ }
+ }
+ break;
+
+ case lttng_ust_type_string:
+ load->object_type = OBJECT_TYPE_STRING;
+ break;
+ case lttng_ust_type_float:
+ load->object_type = OBJECT_TYPE_DOUBLE;
+ break;
+ case lttng_ust_type_dynamic:
+ load->object_type = OBJECT_TYPE_DYNAMIC;
+ break;
+ default:
+ ERR("Unknown type: %d", (int) field->type->type);
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int specialize_context_lookup(struct lttng_ust_ctx *ctx,
+ struct bytecode_runtime *runtime,
+ struct load_op *insn,
+ struct vstack_load *load)
+{
+ int idx, ret;
+ const struct lttng_ust_ctx_field *ctx_field;
+ const struct lttng_ust_event_field *field;
+ struct bytecode_get_index_data gid;
+ ssize_t data_offset;
+
+ idx = specialize_context_lookup_name(ctx, runtime, insn);
+ if (idx < 0) {
+ return -ENOENT;
+ }
+ ctx_field = &ctx->fields[idx];
+ field = ctx_field->event_field;
+ ret = specialize_load_object(field, load, true);
+ if (ret)
+ return ret;
+ /* Specialize each get_symbol into a get_index. */
+ insn->op = BYTECODE_OP_GET_INDEX_U16;
+ memset(&gid, 0, sizeof(gid));
+ gid.ctx_index = idx;
+ gid.elem.type = load->object_type;
+ gid.elem.rev_bo = load->rev_bo;
+ gid.field = field;
+ data_offset = bytecode_push_data(runtime, &gid,
+ __alignof__(gid), sizeof(gid));
+ if (data_offset < 0) {
+ return -EINVAL;
+ }
+ ((struct get_index_u16 *) insn->data)->index = data_offset;
+ return 0;
+}
+
+static int specialize_app_context_lookup(struct lttng_ust_ctx **pctx,
+ struct bytecode_runtime *runtime,
+ struct load_op *insn,
+ struct vstack_load *load)
+{
+ uint16_t offset;
+ const char *orig_name;
+ char *name = NULL;
+ int idx, ret;
+ const struct lttng_ust_ctx_field *ctx_field;
+ const struct lttng_ust_event_field *field;
+ struct bytecode_get_index_data gid;
+ ssize_t data_offset;
+
+ offset = ((struct get_symbol *) insn->data)->offset;
+ orig_name = runtime->p.bc->bc.data + runtime->p.bc->bc.reloc_offset + offset;
+ name = zmalloc(strlen(orig_name) + strlen("$app.") + 1);
+ if (!name) {
+ ret = -ENOMEM;
+ goto end;
+ }
+ strcpy(name, "$app.");
+ strcat(name, orig_name);
+ idx = lttng_get_context_index(*pctx, name);
+ if (idx < 0) {
+ assert(lttng_context_is_app(name));
+ ret = lttng_ust_add_app_context_to_ctx_rcu(name,
+ pctx);
+ if (ret)
+ return ret;
+ idx = lttng_get_context_index(*pctx, name);
+ if (idx < 0)
+ return -ENOENT;
+ }
+ ctx_field = &(*pctx)->fields[idx];
+ field = ctx_field->event_field;
+ ret = specialize_load_object(field, load, true);
+ if (ret)
+ goto end;
+ /* Specialize each get_symbol into a get_index. */
+ insn->op = BYTECODE_OP_GET_INDEX_U16;
+ memset(&gid, 0, sizeof(gid));
+ gid.ctx_index = idx;
+ gid.elem.type = load->object_type;
+ gid.elem.rev_bo = load->rev_bo;
+ gid.field = field;
+ data_offset = bytecode_push_data(runtime, &gid,
+ __alignof__(gid), sizeof(gid));
+ if (data_offset < 0) {
+ ret = -EINVAL;
+ goto end;
+ }
+ ((struct get_index_u16 *) insn->data)->index = data_offset;
+ ret = 0;
+end:
+ free(name);
+ return ret;
+}
+
+static int specialize_payload_lookup(const struct lttng_ust_event_desc *event_desc,
+ struct bytecode_runtime *runtime,
+ struct load_op *insn,
+ struct vstack_load *load)
+{
+ const char *name;
+ uint16_t offset;
+ unsigned int i, nr_fields;
+ bool found = false;
+ uint32_t field_offset = 0;
+ const struct lttng_ust_event_field *field;
+ int ret;
+ struct bytecode_get_index_data gid;
+ ssize_t data_offset;
+
+ nr_fields = event_desc->nr_fields;
+ offset = ((struct get_symbol *) insn->data)->offset;
+ name = runtime->p.bc->bc.data + runtime->p.bc->bc.reloc_offset + offset;
+ for (i = 0; i < nr_fields; i++) {
+ field = event_desc->fields[i];
+ if (field->nofilter) {
+ continue;
+ }
+ if (!strcmp(field->name, name)) {
+ found = true;
+ break;
+ }
+ /* compute field offset on stack */
+ switch (field->type->type) {
+ case lttng_ust_type_integer:
+ case lttng_ust_type_enum:
+ field_offset += sizeof(int64_t);
+ break;
+ case lttng_ust_type_array:
+ case lttng_ust_type_sequence:
+ field_offset += sizeof(unsigned long);
+ field_offset += sizeof(void *);
+ break;
+ case lttng_ust_type_string:
+ field_offset += sizeof(void *);
+ break;
+ case lttng_ust_type_float:
+ field_offset += sizeof(double);
+ break;
+ default:
+ ret = -EINVAL;
+ goto end;
+ }
+ }
+ if (!found) {
+ ret = -EINVAL;
+ goto end;
+ }
+
+ ret = specialize_load_object(field, load, false);
+ if (ret)
+ goto end;
+
+ /* Specialize each get_symbol into a get_index. */
+ insn->op = BYTECODE_OP_GET_INDEX_U16;
+ memset(&gid, 0, sizeof(gid));
+ gid.offset = field_offset;
+ gid.elem.type = load->object_type;
+ gid.elem.rev_bo = load->rev_bo;
+ gid.field = field;
+ data_offset = bytecode_push_data(runtime, &gid,
+ __alignof__(gid), sizeof(gid));
+ if (data_offset < 0) {
+ ret = -EINVAL;
+ goto end;
+ }
+ ((struct get_index_u16 *) insn->data)->index = data_offset;
+ ret = 0;
+end:
+ return ret;
+}
+
+int lttng_bytecode_specialize(const struct lttng_ust_event_desc *event_desc,
+ struct bytecode_runtime *bytecode)
+{
+ void *pc, *next_pc, *start_pc;
+ int ret = -EINVAL;
+ struct vstack _stack;
+ struct vstack *stack = &_stack;
+ struct lttng_ust_ctx **pctx = bytecode->p.pctx;
+
+ vstack_init(stack);
+
+ start_pc = &bytecode->code[0];
+ for (pc = next_pc = start_pc; pc - start_pc < bytecode->len;
+ pc = next_pc) {
+ switch (*(bytecode_opcode_t *) pc) {
+ case BYTECODE_OP_UNKNOWN:
+ default:
+ ERR("unknown bytecode op %u\n",
+ (unsigned int) *(bytecode_opcode_t *) pc);
+ ret = -EINVAL;
+ goto end;
+
+ case BYTECODE_OP_RETURN:
+ if (vstack_ax(stack)->type == REG_S64 ||
+ vstack_ax(stack)->type == REG_U64)
+ *(bytecode_opcode_t *) pc = BYTECODE_OP_RETURN_S64;
+ ret = 0;
+ goto end;
+
+ case BYTECODE_OP_RETURN_S64:
+ if (vstack_ax(stack)->type != REG_S64 &&
+ vstack_ax(stack)->type != REG_U64) {
+ ERR("Unexpected register type\n");
+ ret = -EINVAL;
+ goto end;
+ }
+ ret = 0;
+ goto end;
+
+ /* binary */
+ case BYTECODE_OP_MUL:
+ case BYTECODE_OP_DIV:
+ case BYTECODE_OP_MOD:
+ case BYTECODE_OP_PLUS:
+ case BYTECODE_OP_MINUS:
+ ERR("unsupported bytecode op %u\n",
+ (unsigned int) *(bytecode_opcode_t *) pc);
+ ret = -EINVAL;
+ goto end;
+
+ case BYTECODE_OP_EQ:
+ {
+ struct binary_op *insn = (struct binary_op *) pc;
+
+ switch(vstack_ax(stack)->type) {
+ default:
+ ERR("unknown register type\n");
+ ret = -EINVAL;
+ goto end;
+
+ case REG_STRING:
+ if (vstack_bx(stack)->type == REG_UNKNOWN)
+ break;
+ if (vstack_bx(stack)->type == REG_STAR_GLOB_STRING)
+ insn->op = BYTECODE_OP_EQ_STAR_GLOB_STRING;
+ else
+ insn->op = BYTECODE_OP_EQ_STRING;
+ break;
+ case REG_STAR_GLOB_STRING:
+ if (vstack_bx(stack)->type == REG_UNKNOWN)
+ break;
+ insn->op = BYTECODE_OP_EQ_STAR_GLOB_STRING;
+ break;
+ case REG_S64:
+ case REG_U64:
+ if (vstack_bx(stack)->type == REG_UNKNOWN)
+ break;
+ if (vstack_bx(stack)->type == REG_S64 ||
+ vstack_bx(stack)->type == REG_U64)
+ insn->op = BYTECODE_OP_EQ_S64;
+ else
+ insn->op = BYTECODE_OP_EQ_DOUBLE_S64;
+ break;
+ case REG_DOUBLE:
+ if (vstack_bx(stack)->type == REG_UNKNOWN)
+ break;
+ if (vstack_bx(stack)->type == REG_S64 ||
+ vstack_bx(stack)->type == REG_U64)
+ insn->op = BYTECODE_OP_EQ_S64_DOUBLE;
+ else
+ insn->op = BYTECODE_OP_EQ_DOUBLE;
+ break;
+ case REG_UNKNOWN:
+ break; /* Dynamic typing. */
+ }
+ /* Pop 2, push 1 */
+ if (vstack_pop(stack)) {
+ ret = -EINVAL;
+ goto end;
+ }
+ vstack_ax(stack)->type = REG_S64;
+ next_pc += sizeof(struct binary_op);
+ break;
+ }
+
+ case BYTECODE_OP_NE:
+ {
+ struct binary_op *insn = (struct binary_op *) pc;
+
+ switch(vstack_ax(stack)->type) {
+ default:
+ ERR("unknown register type\n");
+ ret = -EINVAL;
+ goto end;
+
+ case REG_STRING:
+ if (vstack_bx(stack)->type == REG_UNKNOWN)
+ break;
+ if (vstack_bx(stack)->type == REG_STAR_GLOB_STRING)
+ insn->op = BYTECODE_OP_NE_STAR_GLOB_STRING;
+ else
+ insn->op = BYTECODE_OP_NE_STRING;
+ break;
+ case REG_STAR_GLOB_STRING:
+ if (vstack_bx(stack)->type == REG_UNKNOWN)
+ break;
+ insn->op = BYTECODE_OP_NE_STAR_GLOB_STRING;
+ break;
+ case REG_S64:
+ case REG_U64:
+ if (vstack_bx(stack)->type == REG_UNKNOWN)
+ break;
+ if (vstack_bx(stack)->type == REG_S64 ||
+ vstack_bx(stack)->type == REG_U64)
+ insn->op = BYTECODE_OP_NE_S64;
+ else
+ insn->op = BYTECODE_OP_NE_DOUBLE_S64;
+ break;
+ case REG_DOUBLE:
+ if (vstack_bx(stack)->type == REG_UNKNOWN)
+ break;
+ if (vstack_bx(stack)->type == REG_S64 ||
+ vstack_bx(stack)->type == REG_U64)
+ insn->op = BYTECODE_OP_NE_S64_DOUBLE;
+ else
+ insn->op = BYTECODE_OP_NE_DOUBLE;
+ break;
+ case REG_UNKNOWN:
+ break; /* Dynamic typing. */
+ }
+ /* Pop 2, push 1 */
+ if (vstack_pop(stack)) {
+ ret = -EINVAL;
+ goto end;
+ }
+ vstack_ax(stack)->type = REG_S64;
+ next_pc += sizeof(struct binary_op);
+ break;
+ }
+
+ case BYTECODE_OP_GT:
+ {
+ struct binary_op *insn = (struct binary_op *) pc;
+
+ switch(vstack_ax(stack)->type) {
+ default:
+ ERR("unknown register type\n");
+ ret = -EINVAL;
+ goto end;
+
+ case REG_STAR_GLOB_STRING:
+ ERR("invalid register type for > binary operator\n");
+ ret = -EINVAL;
+ goto end;
+ case REG_STRING:
+ if (vstack_bx(stack)->type == REG_UNKNOWN)
+ break;
+ insn->op = BYTECODE_OP_GT_STRING;
+ break;
+ case REG_S64:
+ case REG_U64:
+ if (vstack_bx(stack)->type == REG_UNKNOWN)
+ break;
+ if (vstack_bx(stack)->type == REG_S64 ||
+ vstack_bx(stack)->type == REG_U64)
+ insn->op = BYTECODE_OP_GT_S64;
+ else
+ insn->op = BYTECODE_OP_GT_DOUBLE_S64;
+ break;
+ case REG_DOUBLE:
+ if (vstack_bx(stack)->type == REG_UNKNOWN)
+ break;
+ if (vstack_bx(stack)->type == REG_S64 ||
+ vstack_bx(stack)->type == REG_U64)
+ insn->op = BYTECODE_OP_GT_S64_DOUBLE;
+ else
+ insn->op = BYTECODE_OP_GT_DOUBLE;
+ break;
+ case REG_UNKNOWN:
+ break; /* Dynamic typing. */
+ }
+ /* Pop 2, push 1 */
+ if (vstack_pop(stack)) {
+ ret = -EINVAL;
+ goto end;
+ }
+ vstack_ax(stack)->type = REG_S64;
+ next_pc += sizeof(struct binary_op);
+ break;
+ }
+
+ case BYTECODE_OP_LT:
+ {
+ struct binary_op *insn = (struct binary_op *) pc;
+
+ switch(vstack_ax(stack)->type) {
+ default:
+ ERR("unknown register type\n");
+ ret = -EINVAL;
+ goto end;
+
+ case REG_STAR_GLOB_STRING:
+ ERR("invalid register type for < binary operator\n");
+ ret = -EINVAL;
+ goto end;
+ case REG_STRING:
+ if (vstack_bx(stack)->type == REG_UNKNOWN)
+ break;
+ insn->op = BYTECODE_OP_LT_STRING;
+ break;
+ case REG_S64:
+ case REG_U64:
+ if (vstack_bx(stack)->type == REG_UNKNOWN)
+ break;
+ if (vstack_bx(stack)->type == REG_S64 ||
+ vstack_bx(stack)->type == REG_U64)
+ insn->op = BYTECODE_OP_LT_S64;
+ else
+ insn->op = BYTECODE_OP_LT_DOUBLE_S64;
+ break;
+ case REG_DOUBLE:
+ if (vstack_bx(stack)->type == REG_UNKNOWN)
+ break;
+ if (vstack_bx(stack)->type == REG_S64 ||
+ vstack_bx(stack)->type == REG_U64)
+ insn->op = BYTECODE_OP_LT_S64_DOUBLE;
+ else
+ insn->op = BYTECODE_OP_LT_DOUBLE;
+ break;
+ case REG_UNKNOWN:
+ break; /* Dynamic typing. */
+ }
+ /* Pop 2, push 1 */
+ if (vstack_pop(stack)) {
+ ret = -EINVAL;
+ goto end;
+ }
+ vstack_ax(stack)->type = REG_S64;
+ next_pc += sizeof(struct binary_op);
+ break;
+ }
+
+ case BYTECODE_OP_GE:
+ {
+ struct binary_op *insn = (struct binary_op *) pc;
+
+ switch(vstack_ax(stack)->type) {
+ default:
+ ERR("unknown register type\n");
+ ret = -EINVAL;
+ goto end;
+
+ case REG_STAR_GLOB_STRING:
+ ERR("invalid register type for >= binary operator\n");
+ ret = -EINVAL;
+ goto end;
+ case REG_STRING:
+ if (vstack_bx(stack)->type == REG_UNKNOWN)
+ break;
+ insn->op = BYTECODE_OP_GE_STRING;
+ break;
+ case REG_S64:
+ case REG_U64:
+ if (vstack_bx(stack)->type == REG_UNKNOWN)
+ break;
+ if (vstack_bx(stack)->type == REG_S64 ||
+ vstack_bx(stack)->type == REG_U64)
+ insn->op = BYTECODE_OP_GE_S64;
+ else
+ insn->op = BYTECODE_OP_GE_DOUBLE_S64;
+ break;
+ case REG_DOUBLE:
+ if (vstack_bx(stack)->type == REG_UNKNOWN)
+ break;
+ if (vstack_bx(stack)->type == REG_S64 ||
+ vstack_bx(stack)->type == REG_U64)
+ insn->op = BYTECODE_OP_GE_S64_DOUBLE;
+ else
+ insn->op = BYTECODE_OP_GE_DOUBLE;
+ break;
+ case REG_UNKNOWN:
+ break; /* Dynamic typing. */
+ }
+ /* Pop 2, push 1 */
+ if (vstack_pop(stack)) {
+ ret = -EINVAL;
+ goto end;
+ }
+ vstack_ax(stack)->type = REG_U64;
+ next_pc += sizeof(struct binary_op);
+ break;
+ }
+ case BYTECODE_OP_LE:
+ {
+ struct binary_op *insn = (struct binary_op *) pc;
+
+ switch(vstack_ax(stack)->type) {
+ default:
+ ERR("unknown register type\n");
+ ret = -EINVAL;
+ goto end;
+
+ case REG_STAR_GLOB_STRING:
+ ERR("invalid register type for <= binary operator\n");
+ ret = -EINVAL;
+ goto end;
+ case REG_STRING:
+ if (vstack_bx(stack)->type == REG_UNKNOWN)
+ break;
+ insn->op = BYTECODE_OP_LE_STRING;
+ break;
+ case REG_S64:
+ case REG_U64:
+ if (vstack_bx(stack)->type == REG_UNKNOWN)
+ break;
+ if (vstack_bx(stack)->type == REG_S64 ||
+ vstack_bx(stack)->type == REG_U64)
+ insn->op = BYTECODE_OP_LE_S64;
+ else
+ insn->op = BYTECODE_OP_LE_DOUBLE_S64;
+ break;
+ case REG_DOUBLE:
+ if (vstack_bx(stack)->type == REG_UNKNOWN)
+ break;
+ if (vstack_bx(stack)->type == REG_S64 ||
+ vstack_bx(stack)->type == REG_U64)
+ insn->op = BYTECODE_OP_LE_S64_DOUBLE;
+ else
+ insn->op = BYTECODE_OP_LE_DOUBLE;
+ break;
+ case REG_UNKNOWN:
+ break; /* Dynamic typing. */
+ }
+ vstack_ax(stack)->type = REG_S64;
+ next_pc += sizeof(struct binary_op);
+ break;
+ }
+
+ case BYTECODE_OP_EQ_STRING:
+ case BYTECODE_OP_NE_STRING:
+ case BYTECODE_OP_GT_STRING:
+ case BYTECODE_OP_LT_STRING:
+ case BYTECODE_OP_GE_STRING:
+ case BYTECODE_OP_LE_STRING:
+ case BYTECODE_OP_EQ_STAR_GLOB_STRING:
+ case BYTECODE_OP_NE_STAR_GLOB_STRING:
+ case BYTECODE_OP_EQ_S64:
+ case BYTECODE_OP_NE_S64:
+ case BYTECODE_OP_GT_S64:
+ case BYTECODE_OP_LT_S64:
+ case BYTECODE_OP_GE_S64:
+ case BYTECODE_OP_LE_S64:
+ case BYTECODE_OP_EQ_DOUBLE:
+ case BYTECODE_OP_NE_DOUBLE:
+ case BYTECODE_OP_GT_DOUBLE:
+ case BYTECODE_OP_LT_DOUBLE:
+ case BYTECODE_OP_GE_DOUBLE:
+ case BYTECODE_OP_LE_DOUBLE:
+ case BYTECODE_OP_EQ_DOUBLE_S64:
+ case BYTECODE_OP_NE_DOUBLE_S64:
+ case BYTECODE_OP_GT_DOUBLE_S64:
+ case BYTECODE_OP_LT_DOUBLE_S64:
+ case BYTECODE_OP_GE_DOUBLE_S64:
+ case BYTECODE_OP_LE_DOUBLE_S64:
+ case BYTECODE_OP_EQ_S64_DOUBLE:
+ case BYTECODE_OP_NE_S64_DOUBLE:
+ case BYTECODE_OP_GT_S64_DOUBLE:
+ case BYTECODE_OP_LT_S64_DOUBLE:
+ case BYTECODE_OP_GE_S64_DOUBLE:
+ case BYTECODE_OP_LE_S64_DOUBLE:
+ {
+ /* Pop 2, push 1 */
+ if (vstack_pop(stack)) {
+ ret = -EINVAL;
+ goto end;
+ }
+ vstack_ax(stack)->type = REG_S64;
+ next_pc += sizeof(struct binary_op);
+ break;
+ }
+
+ case BYTECODE_OP_BIT_RSHIFT:
+ case BYTECODE_OP_BIT_LSHIFT:
+ case BYTECODE_OP_BIT_AND:
+ case BYTECODE_OP_BIT_OR:
+ case BYTECODE_OP_BIT_XOR:
+ {
+ /* Pop 2, push 1 */
+ if (vstack_pop(stack)) {
+ ret = -EINVAL;
+ goto end;
+ }
+ vstack_ax(stack)->type = REG_S64;
+ next_pc += sizeof(struct binary_op);
+ break;
+ }
+
+ /* unary */
+ case BYTECODE_OP_UNARY_PLUS:
+ {
+ struct unary_op *insn = (struct unary_op *) pc;
+
+ switch(vstack_ax(stack)->type) {
+ default:
+ ERR("unknown register type\n");
+ ret = -EINVAL;
+ goto end;
+
+ case REG_S64:
+ case REG_U64:
+ insn->op = BYTECODE_OP_UNARY_PLUS_S64;
+ break;
+ case REG_DOUBLE:
+ insn->op = BYTECODE_OP_UNARY_PLUS_DOUBLE;
+ break;
+ case REG_UNKNOWN: /* Dynamic typing. */
+ break;
+ }
+ /* Pop 1, push 1 */
+ next_pc += sizeof(struct unary_op);
+ break;
+ }
+
+ case BYTECODE_OP_UNARY_MINUS:
+ {
+ struct unary_op *insn = (struct unary_op *) pc;
+
+ switch(vstack_ax(stack)->type) {
+ default:
+ ERR("unknown register type\n");
+ ret = -EINVAL;
+ goto end;
+
+ case REG_S64:
+ case REG_U64:
+ insn->op = BYTECODE_OP_UNARY_MINUS_S64;
+ break;
+ case REG_DOUBLE:
+ insn->op = BYTECODE_OP_UNARY_MINUS_DOUBLE;
+ break;
+ case REG_UNKNOWN: /* Dynamic typing. */
+ break;
+ }
+ /* Pop 1, push 1 */
+ next_pc += sizeof(struct unary_op);
+ break;
+ }
+
+ case BYTECODE_OP_UNARY_NOT:
+ {
+ struct unary_op *insn = (struct unary_op *) pc;
+
+ switch(vstack_ax(stack)->type) {
+ default:
+ ERR("unknown register type\n");
+ ret = -EINVAL;
+ goto end;
+
+ case REG_S64:
+ case REG_U64:
+ insn->op = BYTECODE_OP_UNARY_NOT_S64;
+ break;
+ case REG_DOUBLE:
+ insn->op = BYTECODE_OP_UNARY_NOT_DOUBLE;
+ break;
+ case REG_UNKNOWN: /* Dynamic typing. */
+ break;
+ }
+ /* Pop 1, push 1 */
+ next_pc += sizeof(struct unary_op);
+ break;
+ }
+
+ case BYTECODE_OP_UNARY_BIT_NOT:
+ {
+ /* Pop 1, push 1 */
+ next_pc += sizeof(struct unary_op);
+ break;
+ }
+
+ case BYTECODE_OP_UNARY_PLUS_S64:
+ case BYTECODE_OP_UNARY_MINUS_S64:
+ case BYTECODE_OP_UNARY_NOT_S64:
+ case BYTECODE_OP_UNARY_PLUS_DOUBLE:
+ case BYTECODE_OP_UNARY_MINUS_DOUBLE:
+ case BYTECODE_OP_UNARY_NOT_DOUBLE:
+ {
+ /* Pop 1, push 1 */
+ next_pc += sizeof(struct unary_op);
+ break;
+ }
+
+ /* logical */
+ case BYTECODE_OP_AND:
+ case BYTECODE_OP_OR:
+ {
+ /* Continue to next instruction */
+ /* Pop 1 when jump not taken */
+ if (vstack_pop(stack)) {
+ ret = -EINVAL;
+ goto end;
+ }
+ next_pc += sizeof(struct logical_op);
+ break;
+ }
+
+ /* load field ref */
+ case BYTECODE_OP_LOAD_FIELD_REF:
+ {
+ ERR("Unknown field ref type\n");
+ ret = -EINVAL;
+ goto end;
+ }
+ /* get context ref */
+ case BYTECODE_OP_GET_CONTEXT_REF:
+ {
+ if (vstack_push(stack)) {
+ ret = -EINVAL;
+ goto end;
+ }
+ vstack_ax(stack)->type = REG_UNKNOWN;
+ next_pc += sizeof(struct load_op) + sizeof(struct field_ref);
+ break;
+ }
+ case BYTECODE_OP_LOAD_FIELD_REF_STRING:
+ case BYTECODE_OP_LOAD_FIELD_REF_SEQUENCE:
+ case BYTECODE_OP_GET_CONTEXT_REF_STRING:
+ {
+ if (vstack_push(stack)) {
+ ret = -EINVAL;
+ goto end;
+ }
+ vstack_ax(stack)->type = REG_STRING;
+ next_pc += sizeof(struct load_op) + sizeof(struct field_ref);
+ break;
+ }
+ case BYTECODE_OP_LOAD_FIELD_REF_S64:
+ case BYTECODE_OP_GET_CONTEXT_REF_S64:
+ {
+ if (vstack_push(stack)) {
+ ret = -EINVAL;
+ goto end;
+ }
+ vstack_ax(stack)->type = REG_S64;
+ next_pc += sizeof(struct load_op) + sizeof(struct field_ref);
+ break;
+ }
+ case BYTECODE_OP_LOAD_FIELD_REF_DOUBLE:
+ case BYTECODE_OP_GET_CONTEXT_REF_DOUBLE:
+ {
+ if (vstack_push(stack)) {
+ ret = -EINVAL;
+ goto end;
+ }
+ vstack_ax(stack)->type = REG_DOUBLE;
+ next_pc += sizeof(struct load_op) + sizeof(struct field_ref);
+ break;
+ }
+
+ /* load from immediate operand */
+ case BYTECODE_OP_LOAD_STRING:
+ {
+ struct load_op *insn = (struct load_op *) pc;
+
+ if (vstack_push(stack)) {
+ ret = -EINVAL;
+ goto end;
+ }
+ vstack_ax(stack)->type = REG_STRING;
+ next_pc += sizeof(struct load_op) + strlen(insn->data) + 1;
+ break;
+ }
+
+ case BYTECODE_OP_LOAD_STAR_GLOB_STRING:
+ {
+ struct load_op *insn = (struct load_op *) pc;
+
+ if (vstack_push(stack)) {
+ ret = -EINVAL;
+ goto end;
+ }
+ vstack_ax(stack)->type = REG_STAR_GLOB_STRING;
+ next_pc += sizeof(struct load_op) + strlen(insn->data) + 1;
+ break;
+ }
+
+ case BYTECODE_OP_LOAD_S64:
+ {
+ if (vstack_push(stack)) {
+ ret = -EINVAL;
+ goto end;
+ }
+ vstack_ax(stack)->type = REG_S64;
+ next_pc += sizeof(struct load_op)
+ + sizeof(struct literal_numeric);
+ break;
+ }
+
+ case BYTECODE_OP_LOAD_DOUBLE:
+ {
+ if (vstack_push(stack)) {
+ ret = -EINVAL;
+ goto end;
+ }
+ vstack_ax(stack)->type = REG_DOUBLE;
+ next_pc += sizeof(struct load_op)
+ + sizeof(struct literal_double);
+ break;
+ }
+
+ /* cast */
+ case BYTECODE_OP_CAST_TO_S64:
+ {
+ struct cast_op *insn = (struct cast_op *) pc;
+
+ switch (vstack_ax(stack)->type) {
+ default:
+ ERR("unknown register type\n");
+ ret = -EINVAL;
+ goto end;
+
+ case REG_STRING:
+ case REG_STAR_GLOB_STRING:
+ ERR("Cast op can only be applied to numeric or floating point registers\n");
+ ret = -EINVAL;
+ goto end;
+ case REG_S64:
+ insn->op = BYTECODE_OP_CAST_NOP;
+ break;
+ case REG_DOUBLE:
+ insn->op = BYTECODE_OP_CAST_DOUBLE_TO_S64;
+ break;
+ case REG_UNKNOWN:
+ case REG_U64:
+ break;
+ }
+ /* Pop 1, push 1 */
+ vstack_ax(stack)->type = REG_S64;
+ next_pc += sizeof(struct cast_op);
+ break;
+ }
+ case BYTECODE_OP_CAST_DOUBLE_TO_S64:
+ {
+ /* Pop 1, push 1 */
+ vstack_ax(stack)->type = REG_S64;
+ next_pc += sizeof(struct cast_op);
+ break;
+ }
+ case BYTECODE_OP_CAST_NOP:
+ {
+ next_pc += sizeof(struct cast_op);
+ break;
+ }
+
+ /*
+ * Instructions for recursive traversal through composed types.
+ */
+ case BYTECODE_OP_GET_CONTEXT_ROOT:
+ {
+ if (vstack_push(stack)) {
+ ret = -EINVAL;
+ goto end;
+ }
+ vstack_ax(stack)->type = REG_PTR;
+ vstack_ax(stack)->load.type = LOAD_ROOT_CONTEXT;
+ next_pc += sizeof(struct load_op);
+ break;
+ }
+ case BYTECODE_OP_GET_APP_CONTEXT_ROOT:
+ {
+ if (vstack_push(stack)) {
+ ret = -EINVAL;
+ goto end;
+ }
+ vstack_ax(stack)->type = REG_PTR;
+ vstack_ax(stack)->load.type = LOAD_ROOT_APP_CONTEXT;
+ next_pc += sizeof(struct load_op);
+ break;
+ }
+ case BYTECODE_OP_GET_PAYLOAD_ROOT:
+ {
+ if (vstack_push(stack)) {
+ ret = -EINVAL;
+ goto end;
+ }
+ vstack_ax(stack)->type = REG_PTR;
+ vstack_ax(stack)->load.type = LOAD_ROOT_PAYLOAD;
+ next_pc += sizeof(struct load_op);
+ break;
+ }
+
+ case BYTECODE_OP_LOAD_FIELD:
+ {
+ struct load_op *insn = (struct load_op *) pc;
+
+ assert(vstack_ax(stack)->type == REG_PTR);
+ /* Pop 1, push 1 */
+ ret = specialize_load_field(vstack_ax(stack), insn);
+ if (ret)
+ goto end;
+
+ next_pc += sizeof(struct load_op);
+ break;
+ }
+
+ case BYTECODE_OP_LOAD_FIELD_S8:
+ case BYTECODE_OP_LOAD_FIELD_S16:
+ case BYTECODE_OP_LOAD_FIELD_S32:
+ case BYTECODE_OP_LOAD_FIELD_S64:
+ {
+ /* Pop 1, push 1 */
+ vstack_ax(stack)->type = REG_S64;
+ next_pc += sizeof(struct load_op);
+ break;
+ }
+
+ case BYTECODE_OP_LOAD_FIELD_U8:
+ case BYTECODE_OP_LOAD_FIELD_U16:
+ case BYTECODE_OP_LOAD_FIELD_U32:
+ case BYTECODE_OP_LOAD_FIELD_U64:
+ {
+ /* Pop 1, push 1 */
+ vstack_ax(stack)->type = REG_U64;
+ next_pc += sizeof(struct load_op);
+ break;
+ }
+
+ case BYTECODE_OP_LOAD_FIELD_STRING:
+ case BYTECODE_OP_LOAD_FIELD_SEQUENCE:
+ {
+ /* Pop 1, push 1 */
+ vstack_ax(stack)->type = REG_STRING;
+ next_pc += sizeof(struct load_op);
+ break;
+ }
+
+ case BYTECODE_OP_LOAD_FIELD_DOUBLE:
+ {
+ /* Pop 1, push 1 */
+ vstack_ax(stack)->type = REG_DOUBLE;
+ next_pc += sizeof(struct load_op);
+ break;
+ }
+
+ case BYTECODE_OP_GET_SYMBOL:
+ {
+ struct load_op *insn = (struct load_op *) pc;
+
+ dbg_printf("op get symbol\n");
+ switch (vstack_ax(stack)->load.type) {
+ case LOAD_OBJECT:
+ ERR("Nested fields not implemented yet.");
+ ret = -EINVAL;
+ goto end;
+ case LOAD_ROOT_CONTEXT:
+ /* Lookup context field. */
+ ret = specialize_context_lookup(*pctx,
+ bytecode, insn,
+ &vstack_ax(stack)->load);
+ if (ret)
+ goto end;
+ break;
+ case LOAD_ROOT_APP_CONTEXT:
+ /* Lookup app context field. */
+ ret = specialize_app_context_lookup(pctx,
+ bytecode, insn,
+ &vstack_ax(stack)->load);
+ if (ret)
+ goto end;
+ break;
+ case LOAD_ROOT_PAYLOAD:
+ /* Lookup event payload field. */
+ ret = specialize_payload_lookup(event_desc,
+ bytecode, insn,
+ &vstack_ax(stack)->load);
+ if (ret)
+ goto end;
+ break;
+ }
+ next_pc += sizeof(struct load_op) + sizeof(struct get_symbol);
+ break;
+ }
+
+ case BYTECODE_OP_GET_SYMBOL_FIELD:
+ {
+ /* Always generated by specialize phase. */
+ ret = -EINVAL;
+ goto end;
+ }
+
+ case BYTECODE_OP_GET_INDEX_U16:
+ {
+ struct load_op *insn = (struct load_op *) pc;
+ struct get_index_u16 *index = (struct get_index_u16 *) insn->data;
+
+ dbg_printf("op get index u16\n");
+ /* Pop 1, push 1 */
+ ret = specialize_get_index(bytecode, insn, index->index,
+ vstack_ax(stack), sizeof(*index));
+ if (ret)
+ goto end;
+ next_pc += sizeof(struct load_op) + sizeof(struct get_index_u16);
+ break;
+ }
+
+ case BYTECODE_OP_GET_INDEX_U64:
+ {
+ struct load_op *insn = (struct load_op *) pc;
+ struct get_index_u64 *index = (struct get_index_u64 *) insn->data;
+
+ dbg_printf("op get index u64\n");
+ /* Pop 1, push 1 */
+ ret = specialize_get_index(bytecode, insn, index->index,
+ vstack_ax(stack), sizeof(*index));
+ if (ret)
+ goto end;
+ next_pc += sizeof(struct load_op) + sizeof(struct get_index_u64);
+ break;
+ }
+
+ }
+ }
+end:
+ return ret;
+}
--- /dev/null
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright (C) 2010-2016 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ *
+ * LTTng UST bytecode validator.
+ */
+
+#define _LGPL_SOURCE
+#include <stddef.h>
+#include <stdint.h>
+#include <time.h>
+
+#include "rculfhash.h"
+
+#include "lttng-bytecode.h"
+#include "lttng-hash-helper.h"
+#include "string-utils.h"
+#include "ust-events-internal.h"
+#include "common/macros.h"
+
+/*
+ * Number of merge points for hash table size. Hash table initialized to
+ * that size, and we do not resize, because we do not want to trigger
+ * RCU worker thread execution: fall-back on linear traversal if number
+ * of merge points exceeds this value.
+ */
+#define DEFAULT_NR_MERGE_POINTS 128
+#define MIN_NR_BUCKETS 128
+#define MAX_NR_BUCKETS 128
+
+/* merge point table node */
+struct lfht_mp_node {
+ struct lttng_ust_lfht_node node;
+
+ /* Context at merge point */
+ struct vstack stack;
+ unsigned long target_pc;
+};
+
+static unsigned long lttng_hash_seed;
+static unsigned int lttng_hash_seed_ready;
+
+static
+int lttng_hash_match(struct lttng_ust_lfht_node *node, const void *key)
+{
+ struct lfht_mp_node *mp_node =
+ caa_container_of(node, struct lfht_mp_node, node);
+ unsigned long key_pc = (unsigned long) key;
+
+ if (mp_node->target_pc == key_pc)
+ return 1;
+ else
+ return 0;
+}
+
+static
+int merge_points_compare(const struct vstack *stacka,
+ const struct vstack *stackb)
+{
+ int i, len;
+
+ if (stacka->top != stackb->top)
+ return 1;
+ len = stacka->top + 1;
+ assert(len >= 0);
+ for (i = 0; i < len; i++) {
+ if (stacka->e[i].type != REG_UNKNOWN
+ && stackb->e[i].type != REG_UNKNOWN
+ && stacka->e[i].type != stackb->e[i].type)
+ return 1;
+ }
+ return 0;
+}
+
+static
+int merge_point_add_check(struct lttng_ust_lfht *ht, unsigned long target_pc,
+ const struct vstack *stack)
+{
+ struct lfht_mp_node *node;
+ unsigned long hash = lttng_hash_mix((const char *) target_pc,
+ sizeof(target_pc),
+ lttng_hash_seed);
+ struct lttng_ust_lfht_node *ret;
+
+ dbg_printf("Bytecode: adding merge point at offset %lu, hash %lu\n",
+ target_pc, hash);
+ node = zmalloc(sizeof(struct lfht_mp_node));
+ if (!node)
+ return -ENOMEM;
+ node->target_pc = target_pc;
+ memcpy(&node->stack, stack, sizeof(node->stack));
+ ret = lttng_ust_lfht_add_unique(ht, hash, lttng_hash_match,
+ (const char *) target_pc, &node->node);
+ if (ret != &node->node) {
+ struct lfht_mp_node *ret_mp =
+ caa_container_of(ret, struct lfht_mp_node, node);
+
+ /* Key already present */
+ dbg_printf("Bytecode: compare merge points for offset %lu, hash %lu\n",
+ target_pc, hash);
+ free(node);
+ if (merge_points_compare(stack, &ret_mp->stack)) {
+ ERR("Merge points differ for offset %lu\n",
+ target_pc);
+ return -EINVAL;
+ }
+ }
+ return 0;
+}
+
+/*
+ * Binary comparators use top of stack and top of stack -1.
+ * Return 0 if typing is known to match, 1 if typing is dynamic
+ * (unknown), negative error value on error.
+ */
+static
+int bin_op_compare_check(struct vstack *stack, bytecode_opcode_t opcode,
+ const char *str)
+{
+ if (unlikely(!vstack_ax(stack) || !vstack_bx(stack)))
+ goto error_empty;
+
+ switch (vstack_ax(stack)->type) {
+ default:
+ goto error_type;
+
+ case REG_UNKNOWN:
+ goto unknown;
+ case REG_STRING:
+ switch (vstack_bx(stack)->type) {
+ default:
+ goto error_type;
+
+ case REG_UNKNOWN:
+ goto unknown;
+ case REG_STRING:
+ break;
+ case REG_STAR_GLOB_STRING:
+ if (opcode != BYTECODE_OP_EQ && opcode != BYTECODE_OP_NE) {
+ goto error_mismatch;
+ }
+ break;
+ case REG_S64:
+ case REG_U64:
+ case REG_DOUBLE:
+ goto error_mismatch;
+ }
+ break;
+ case REG_STAR_GLOB_STRING:
+ switch (vstack_bx(stack)->type) {
+ default:
+ goto error_type;
+
+ case REG_UNKNOWN:
+ goto unknown;
+ case REG_STRING:
+ if (opcode != BYTECODE_OP_EQ && opcode != BYTECODE_OP_NE) {
+ goto error_mismatch;
+ }
+ break;
+ case REG_STAR_GLOB_STRING:
+ case REG_S64:
+ case REG_U64:
+ case REG_DOUBLE:
+ goto error_mismatch;
+ }
+ break;
+ case REG_S64:
+ case REG_U64:
+ case REG_DOUBLE:
+ switch (vstack_bx(stack)->type) {
+ default:
+ goto error_type;
+
+ case REG_UNKNOWN:
+ goto unknown;
+ case REG_STRING:
+ case REG_STAR_GLOB_STRING:
+ goto error_mismatch;
+ case REG_S64:
+ case REG_U64:
+ case REG_DOUBLE:
+ break;
+ }
+ break;
+ }
+ return 0;
+
+unknown:
+ return 1;
+
+error_mismatch:
+ ERR("type mismatch for '%s' binary operator\n", str);
+ return -EINVAL;
+
+error_empty:
+ ERR("empty stack for '%s' binary operator\n", str);
+ return -EINVAL;
+
+error_type:
+ ERR("unknown type for '%s' binary operator\n", str);
+ return -EINVAL;
+}
+
+/*
+ * Binary bitwise operators use top of stack and top of stack -1.
+ * Return 0 if typing is known to match, 1 if typing is dynamic
+ * (unknown), negative error value on error.
+ */
+static
+int bin_op_bitwise_check(struct vstack *stack,
+ bytecode_opcode_t opcode __attribute__((unused)),
+ const char *str)
+{
+ if (unlikely(!vstack_ax(stack) || !vstack_bx(stack)))
+ goto error_empty;
+
+ switch (vstack_ax(stack)->type) {
+ default:
+ goto error_type;
+
+ case REG_UNKNOWN:
+ goto unknown;
+ case REG_S64:
+ case REG_U64:
+ switch (vstack_bx(stack)->type) {
+ default:
+ goto error_type;
+
+ case REG_UNKNOWN:
+ goto unknown;
+ case REG_S64:
+ case REG_U64:
+ break;
+ }
+ break;
+ }
+ return 0;
+
+unknown:
+ return 1;
+
+error_empty:
+ ERR("empty stack for '%s' binary operator\n", str);
+ return -EINVAL;
+
+error_type:
+ ERR("unknown type for '%s' binary operator\n", str);
+ return -EINVAL;
+}
+
+static
+int validate_get_symbol(struct bytecode_runtime *bytecode,
+ const struct get_symbol *sym)
+{
+ const char *str, *str_limit;
+ size_t len_limit;
+
+ if (sym->offset >= bytecode->p.bc->bc.len - bytecode->p.bc->bc.reloc_offset)
+ return -EINVAL;
+
+ str = bytecode->p.bc->bc.data + bytecode->p.bc->bc.reloc_offset + sym->offset;
+ str_limit = bytecode->p.bc->bc.data + bytecode->p.bc->bc.len;
+ len_limit = str_limit - str;
+ if (strnlen(str, len_limit) == len_limit)
+ return -EINVAL;
+ return 0;
+}
+
+/*
+ * Validate bytecode range overflow within the validation pass.
+ * Called for each instruction encountered.
+ */
+static
+int bytecode_validate_overflow(struct bytecode_runtime *bytecode,
+ char *start_pc, char *pc)
+{
+ int ret = 0;
+
+ switch (*(bytecode_opcode_t *) pc) {
+ case BYTECODE_OP_UNKNOWN:
+ default:
+ {
+ ERR("unknown bytecode op %u\n",
+ (unsigned int) *(bytecode_opcode_t *) pc);
+ ret = -EINVAL;
+ break;
+ }
+
+ case BYTECODE_OP_RETURN:
+ case BYTECODE_OP_RETURN_S64:
+ {
+ if (unlikely(pc + sizeof(struct return_op)
+ > start_pc + bytecode->len)) {
+ ret = -ERANGE;
+ }
+ break;
+ }
+
+ /* binary */
+ case BYTECODE_OP_MUL:
+ case BYTECODE_OP_DIV:
+ case BYTECODE_OP_MOD:
+ case BYTECODE_OP_PLUS:
+ case BYTECODE_OP_MINUS:
+ {
+ ERR("unsupported bytecode op %u\n",
+ (unsigned int) *(bytecode_opcode_t *) pc);
+ ret = -EINVAL;
+ break;
+ }
+
+ case BYTECODE_OP_EQ:
+ case BYTECODE_OP_NE:
+ case BYTECODE_OP_GT:
+ case BYTECODE_OP_LT:
+ case BYTECODE_OP_GE:
+ case BYTECODE_OP_LE:
+ case BYTECODE_OP_EQ_STRING:
+ case BYTECODE_OP_NE_STRING:
+ case BYTECODE_OP_GT_STRING:
+ case BYTECODE_OP_LT_STRING:
+ case BYTECODE_OP_GE_STRING:
+ case BYTECODE_OP_LE_STRING:
+ case BYTECODE_OP_EQ_STAR_GLOB_STRING:
+ case BYTECODE_OP_NE_STAR_GLOB_STRING:
+ case BYTECODE_OP_EQ_S64:
+ case BYTECODE_OP_NE_S64:
+ case BYTECODE_OP_GT_S64:
+ case BYTECODE_OP_LT_S64:
+ case BYTECODE_OP_GE_S64:
+ case BYTECODE_OP_LE_S64:
+ case BYTECODE_OP_EQ_DOUBLE:
+ case BYTECODE_OP_NE_DOUBLE:
+ case BYTECODE_OP_GT_DOUBLE:
+ case BYTECODE_OP_LT_DOUBLE:
+ case BYTECODE_OP_GE_DOUBLE:
+ case BYTECODE_OP_LE_DOUBLE:
+ case BYTECODE_OP_EQ_DOUBLE_S64:
+ case BYTECODE_OP_NE_DOUBLE_S64:
+ case BYTECODE_OP_GT_DOUBLE_S64:
+ case BYTECODE_OP_LT_DOUBLE_S64:
+ case BYTECODE_OP_GE_DOUBLE_S64:
+ case BYTECODE_OP_LE_DOUBLE_S64:
+ case BYTECODE_OP_EQ_S64_DOUBLE:
+ case BYTECODE_OP_NE_S64_DOUBLE:
+ case BYTECODE_OP_GT_S64_DOUBLE:
+ case BYTECODE_OP_LT_S64_DOUBLE:
+ case BYTECODE_OP_GE_S64_DOUBLE:
+ case BYTECODE_OP_LE_S64_DOUBLE:
+ case BYTECODE_OP_BIT_RSHIFT:
+ case BYTECODE_OP_BIT_LSHIFT:
+ case BYTECODE_OP_BIT_AND:
+ case BYTECODE_OP_BIT_OR:
+ case BYTECODE_OP_BIT_XOR:
+ {
+ if (unlikely(pc + sizeof(struct binary_op)
+ > start_pc + bytecode->len)) {
+ ret = -ERANGE;
+ }
+ break;
+ }
+
+ /* unary */
+ case BYTECODE_OP_UNARY_PLUS:
+ case BYTECODE_OP_UNARY_MINUS:
+ case BYTECODE_OP_UNARY_NOT:
+ case BYTECODE_OP_UNARY_PLUS_S64:
+ case BYTECODE_OP_UNARY_MINUS_S64:
+ case BYTECODE_OP_UNARY_NOT_S64:
+ case BYTECODE_OP_UNARY_PLUS_DOUBLE:
+ case BYTECODE_OP_UNARY_MINUS_DOUBLE:
+ case BYTECODE_OP_UNARY_NOT_DOUBLE:
+ case BYTECODE_OP_UNARY_BIT_NOT:
+ {
+ if (unlikely(pc + sizeof(struct unary_op)
+ > start_pc + bytecode->len)) {
+ ret = -ERANGE;
+ }
+ break;
+ }
+
+ /* logical */
+ case BYTECODE_OP_AND:
+ case BYTECODE_OP_OR:
+ {
+ if (unlikely(pc + sizeof(struct logical_op)
+ > start_pc + bytecode->len)) {
+ ret = -ERANGE;
+ }
+ break;
+ }
+
+ /* load field ref */
+ case BYTECODE_OP_LOAD_FIELD_REF:
+ {
+ ERR("Unknown field ref type\n");
+ ret = -EINVAL;
+ break;
+ }
+
+ /* get context ref */
+ case BYTECODE_OP_GET_CONTEXT_REF:
+ case BYTECODE_OP_LOAD_FIELD_REF_STRING:
+ case BYTECODE_OP_LOAD_FIELD_REF_SEQUENCE:
+ case BYTECODE_OP_LOAD_FIELD_REF_S64:
+ case BYTECODE_OP_LOAD_FIELD_REF_DOUBLE:
+ case BYTECODE_OP_GET_CONTEXT_REF_STRING:
+ case BYTECODE_OP_GET_CONTEXT_REF_S64:
+ case BYTECODE_OP_GET_CONTEXT_REF_DOUBLE:
+ {
+ if (unlikely(pc + sizeof(struct load_op) + sizeof(struct field_ref)
+ > start_pc + bytecode->len)) {
+ ret = -ERANGE;
+ }
+ break;
+ }
+
+ /* load from immediate operand */
+ case BYTECODE_OP_LOAD_STRING:
+ case BYTECODE_OP_LOAD_STAR_GLOB_STRING:
+ {
+ struct load_op *insn = (struct load_op *) pc;
+ uint32_t str_len, maxlen;
+
+ if (unlikely(pc + sizeof(struct load_op)
+ > start_pc + bytecode->len)) {
+ ret = -ERANGE;
+ break;
+ }
+
+ maxlen = start_pc + bytecode->len - pc - sizeof(struct load_op);
+ str_len = strnlen(insn->data, maxlen);
+ if (unlikely(str_len >= maxlen)) {
+ /* Final '\0' not found within range */
+ ret = -ERANGE;
+ }
+ break;
+ }
+
+ case BYTECODE_OP_LOAD_S64:
+ {
+ if (unlikely(pc + sizeof(struct load_op) + sizeof(struct literal_numeric)
+ > start_pc + bytecode->len)) {
+ ret = -ERANGE;
+ }
+ break;
+ }
+
+ case BYTECODE_OP_LOAD_DOUBLE:
+ {
+ if (unlikely(pc + sizeof(struct load_op) + sizeof(struct literal_double)
+ > start_pc + bytecode->len)) {
+ ret = -ERANGE;
+ }
+ break;
+ }
+
+ case BYTECODE_OP_CAST_TO_S64:
+ case BYTECODE_OP_CAST_DOUBLE_TO_S64:
+ case BYTECODE_OP_CAST_NOP:
+ {
+ if (unlikely(pc + sizeof(struct cast_op)
+ > start_pc + bytecode->len)) {
+ ret = -ERANGE;
+ }
+ break;
+ }
+
+ /*
+ * Instructions for recursive traversal through composed types.
+ */
+ case BYTECODE_OP_GET_CONTEXT_ROOT:
+ case BYTECODE_OP_GET_APP_CONTEXT_ROOT:
+ case BYTECODE_OP_GET_PAYLOAD_ROOT:
+ case BYTECODE_OP_LOAD_FIELD:
+ case BYTECODE_OP_LOAD_FIELD_S8:
+ case BYTECODE_OP_LOAD_FIELD_S16:
+ case BYTECODE_OP_LOAD_FIELD_S32:
+ case BYTECODE_OP_LOAD_FIELD_S64:
+ case BYTECODE_OP_LOAD_FIELD_U8:
+ case BYTECODE_OP_LOAD_FIELD_U16:
+ case BYTECODE_OP_LOAD_FIELD_U32:
+ case BYTECODE_OP_LOAD_FIELD_U64:
+ case BYTECODE_OP_LOAD_FIELD_STRING:
+ case BYTECODE_OP_LOAD_FIELD_SEQUENCE:
+ case BYTECODE_OP_LOAD_FIELD_DOUBLE:
+ if (unlikely(pc + sizeof(struct load_op)
+ > start_pc + bytecode->len)) {
+ ret = -ERANGE;
+ }
+ break;
+
+ case BYTECODE_OP_GET_SYMBOL:
+ {
+ struct load_op *insn = (struct load_op *) pc;
+ struct get_symbol *sym = (struct get_symbol *) insn->data;
+
+ if (unlikely(pc + sizeof(struct load_op) + sizeof(struct get_symbol)
+ > start_pc + bytecode->len)) {
+ ret = -ERANGE;
+ break;
+ }
+ ret = validate_get_symbol(bytecode, sym);
+ break;
+ }
+
+ case BYTECODE_OP_GET_SYMBOL_FIELD:
+ ERR("Unexpected get symbol field");
+ ret = -EINVAL;
+ break;
+
+ case BYTECODE_OP_GET_INDEX_U16:
+ if (unlikely(pc + sizeof(struct load_op) + sizeof(struct get_index_u16)
+ > start_pc + bytecode->len)) {
+ ret = -ERANGE;
+ }
+ break;
+
+ case BYTECODE_OP_GET_INDEX_U64:
+ if (unlikely(pc + sizeof(struct load_op) + sizeof(struct get_index_u64)
+ > start_pc + bytecode->len)) {
+ ret = -ERANGE;
+ }
+ break;
+ }
+
+ return ret;
+}
+
+static
+unsigned long delete_all_nodes(struct lttng_ust_lfht *ht)
+{
+ struct lttng_ust_lfht_iter iter;
+ struct lfht_mp_node *node;
+ unsigned long nr_nodes = 0;
+
+ lttng_ust_lfht_for_each_entry(ht, &iter, node, node) {
+ int ret;
+
+ ret = lttng_ust_lfht_del(ht, lttng_ust_lfht_iter_get_node(&iter));
+ assert(!ret);
+ /* note: this hash table is never used concurrently */
+ free(node);
+ nr_nodes++;
+ }
+ return nr_nodes;
+}
+
+/*
+ * Return value:
+ * >=0: success
+ * <0: error
+ */
+static
+int validate_instruction_context(
+ struct bytecode_runtime *bytecode __attribute__((unused)),
+ struct vstack *stack,
+ char *start_pc,
+ char *pc)
+{
+ int ret = 0;
+ const bytecode_opcode_t opcode = *(bytecode_opcode_t *) pc;
+
+ switch (opcode) {
+ case BYTECODE_OP_UNKNOWN:
+ default:
+ {
+ ERR("unknown bytecode op %u\n",
+ (unsigned int) *(bytecode_opcode_t *) pc);
+ ret = -EINVAL;
+ goto end;
+ }
+
+ case BYTECODE_OP_RETURN:
+ case BYTECODE_OP_RETURN_S64:
+ {
+ goto end;
+ }
+
+ /* binary */
+ case BYTECODE_OP_MUL:
+ case BYTECODE_OP_DIV:
+ case BYTECODE_OP_MOD:
+ case BYTECODE_OP_PLUS:
+ case BYTECODE_OP_MINUS:
+ {
+ ERR("unsupported bytecode op %u\n",
+ (unsigned int) opcode);
+ ret = -EINVAL;
+ goto end;
+ }
+
+ case BYTECODE_OP_EQ:
+ {
+ ret = bin_op_compare_check(stack, opcode, "==");
+ if (ret < 0)
+ goto end;
+ break;
+ }
+ case BYTECODE_OP_NE:
+ {
+ ret = bin_op_compare_check(stack, opcode, "!=");
+ if (ret < 0)
+ goto end;
+ break;
+ }
+ case BYTECODE_OP_GT:
+ {
+ ret = bin_op_compare_check(stack, opcode, ">");
+ if (ret < 0)
+ goto end;
+ break;
+ }
+ case BYTECODE_OP_LT:
+ {
+ ret = bin_op_compare_check(stack, opcode, "<");
+ if (ret < 0)
+ goto end;
+ break;
+ }
+ case BYTECODE_OP_GE:
+ {
+ ret = bin_op_compare_check(stack, opcode, ">=");
+ if (ret < 0)
+ goto end;
+ break;
+ }
+ case BYTECODE_OP_LE:
+ {
+ ret = bin_op_compare_check(stack, opcode, "<=");
+ if (ret < 0)
+ goto end;
+ break;
+ }
+
+ case BYTECODE_OP_EQ_STRING:
+ case BYTECODE_OP_NE_STRING:
+ case BYTECODE_OP_GT_STRING:
+ case BYTECODE_OP_LT_STRING:
+ case BYTECODE_OP_GE_STRING:
+ case BYTECODE_OP_LE_STRING:
+ {
+ if (!vstack_ax(stack) || !vstack_bx(stack)) {
+ ERR("Empty stack\n");
+ ret = -EINVAL;
+ goto end;
+ }
+ if (vstack_ax(stack)->type != REG_STRING
+ || vstack_bx(stack)->type != REG_STRING) {
+ ERR("Unexpected register type for string comparator\n");
+ ret = -EINVAL;
+ goto end;
+ }
+ break;
+ }
+
+ case BYTECODE_OP_EQ_STAR_GLOB_STRING:
+ case BYTECODE_OP_NE_STAR_GLOB_STRING:
+ {
+ if (!vstack_ax(stack) || !vstack_bx(stack)) {
+ ERR("Empty stack\n");
+ ret = -EINVAL;
+ goto end;
+ }
+ if (vstack_ax(stack)->type != REG_STAR_GLOB_STRING
+ && vstack_bx(stack)->type != REG_STAR_GLOB_STRING) {
+ ERR("Unexpected register type for globbing pattern comparator\n");
+ ret = -EINVAL;
+ goto end;
+ }
+ break;
+ }
+
+ case BYTECODE_OP_EQ_S64:
+ case BYTECODE_OP_NE_S64:
+ case BYTECODE_OP_GT_S64:
+ case BYTECODE_OP_LT_S64:
+ case BYTECODE_OP_GE_S64:
+ case BYTECODE_OP_LE_S64:
+ {
+ if (!vstack_ax(stack) || !vstack_bx(stack)) {
+ ERR("Empty stack\n");
+ ret = -EINVAL;
+ goto end;
+ }
+ switch (vstack_ax(stack)->type) {
+ case REG_S64:
+ case REG_U64:
+ break;
+ default:
+ ERR("Unexpected register type for s64 comparator\n");
+ ret = -EINVAL;
+ goto end;
+ }
+ switch (vstack_bx(stack)->type) {
+ case REG_S64:
+ case REG_U64:
+ break;
+ default:
+ ERR("Unexpected register type for s64 comparator\n");
+ ret = -EINVAL;
+ goto end;
+ }
+ break;
+ }
+
+ case BYTECODE_OP_EQ_DOUBLE:
+ case BYTECODE_OP_NE_DOUBLE:
+ case BYTECODE_OP_GT_DOUBLE:
+ case BYTECODE_OP_LT_DOUBLE:
+ case BYTECODE_OP_GE_DOUBLE:
+ case BYTECODE_OP_LE_DOUBLE:
+ {
+ if (!vstack_ax(stack) || !vstack_bx(stack)) {
+ ERR("Empty stack\n");
+ ret = -EINVAL;
+ goto end;
+ }
+ if (vstack_ax(stack)->type != REG_DOUBLE && vstack_bx(stack)->type != REG_DOUBLE) {
+ ERR("Double operator should have two double registers\n");
+ ret = -EINVAL;
+ goto end;
+ }
+ break;
+ }
+
+ case BYTECODE_OP_EQ_DOUBLE_S64:
+ case BYTECODE_OP_NE_DOUBLE_S64:
+ case BYTECODE_OP_GT_DOUBLE_S64:
+ case BYTECODE_OP_LT_DOUBLE_S64:
+ case BYTECODE_OP_GE_DOUBLE_S64:
+ case BYTECODE_OP_LE_DOUBLE_S64:
+ {
+ if (!vstack_ax(stack) || !vstack_bx(stack)) {
+ ERR("Empty stack\n");
+ ret = -EINVAL;
+ goto end;
+ }
+ switch (vstack_ax(stack)->type) {
+ case REG_S64:
+ case REG_U64:
+ break;
+ default:
+ ERR("Double-S64 operator has unexpected register types\n");
+ ret = -EINVAL;
+ goto end;
+ }
+ switch (vstack_bx(stack)->type) {
+ case REG_DOUBLE:
+ break;
+ default:
+ ERR("Double-S64 operator has unexpected register types\n");
+ ret = -EINVAL;
+ goto end;
+ }
+ break;
+ }
+
+ case BYTECODE_OP_EQ_S64_DOUBLE:
+ case BYTECODE_OP_NE_S64_DOUBLE:
+ case BYTECODE_OP_GT_S64_DOUBLE:
+ case BYTECODE_OP_LT_S64_DOUBLE:
+ case BYTECODE_OP_GE_S64_DOUBLE:
+ case BYTECODE_OP_LE_S64_DOUBLE:
+ {
+ if (!vstack_ax(stack) || !vstack_bx(stack)) {
+ ERR("Empty stack\n");
+ ret = -EINVAL;
+ goto end;
+ }
+ switch (vstack_ax(stack)->type) {
+ case REG_DOUBLE:
+ break;
+ default:
+ ERR("S64-Double operator has unexpected register types\n");
+ ret = -EINVAL;
+ goto end;
+ }
+ switch (vstack_bx(stack)->type) {
+ case REG_S64:
+ case REG_U64:
+ break;
+ default:
+ ERR("S64-Double operator has unexpected register types\n");
+ ret = -EINVAL;
+ goto end;
+ }
+ break;
+ }
+
+ case BYTECODE_OP_BIT_RSHIFT:
+ ret = bin_op_bitwise_check(stack, opcode, ">>");
+ if (ret < 0)
+ goto end;
+ break;
+ case BYTECODE_OP_BIT_LSHIFT:
+ ret = bin_op_bitwise_check(stack, opcode, "<<");
+ if (ret < 0)
+ goto end;
+ break;
+ case BYTECODE_OP_BIT_AND:
+ ret = bin_op_bitwise_check(stack, opcode, "&");
+ if (ret < 0)
+ goto end;
+ break;
+ case BYTECODE_OP_BIT_OR:
+ ret = bin_op_bitwise_check(stack, opcode, "|");
+ if (ret < 0)
+ goto end;
+ break;
+ case BYTECODE_OP_BIT_XOR:
+ ret = bin_op_bitwise_check(stack, opcode, "^");
+ if (ret < 0)
+ goto end;
+ break;
+
+ /* unary */
+ case BYTECODE_OP_UNARY_PLUS:
+ case BYTECODE_OP_UNARY_MINUS:
+ case BYTECODE_OP_UNARY_NOT:
+ {
+ if (!vstack_ax(stack)) {
+ ERR("Empty stack\n");
+ ret = -EINVAL;
+ goto end;
+ }
+ switch (vstack_ax(stack)->type) {
+ default:
+ ERR("unknown register type\n");
+ ret = -EINVAL;
+ goto end;
+
+ case REG_STRING:
+ case REG_STAR_GLOB_STRING:
+ ERR("Unary op can only be applied to numeric or floating point registers\n");
+ ret = -EINVAL;
+ goto end;
+ case REG_S64:
+ break;
+ case REG_U64:
+ break;
+ case REG_DOUBLE:
+ break;
+ case REG_UNKNOWN:
+ break;
+ }
+ break;
+ }
+ case BYTECODE_OP_UNARY_BIT_NOT:
+ {
+ if (!vstack_ax(stack)) {
+ ERR("Empty stack\n");
+ ret = -EINVAL;
+ goto end;
+ }
+ switch (vstack_ax(stack)->type) {
+ default:
+ ERR("unknown register type\n");
+ ret = -EINVAL;
+ goto end;
+
+ case REG_STRING:
+ case REG_STAR_GLOB_STRING:
+ case REG_DOUBLE:
+ ERR("Unary bitwise op can only be applied to numeric registers\n");
+ ret = -EINVAL;
+ goto end;
+ case REG_S64:
+ break;
+ case REG_U64:
+ break;
+ case REG_UNKNOWN:
+ break;
+ }
+ break;
+ }
+
+ case BYTECODE_OP_UNARY_PLUS_S64:
+ case BYTECODE_OP_UNARY_MINUS_S64:
+ case BYTECODE_OP_UNARY_NOT_S64:
+ {
+ if (!vstack_ax(stack)) {
+ ERR("Empty stack\n");
+ ret = -EINVAL;
+ goto end;
+ }
+ if (vstack_ax(stack)->type != REG_S64 &&
+ vstack_ax(stack)->type != REG_U64) {
+ ERR("Invalid register type\n");
+ ret = -EINVAL;
+ goto end;
+ }
+ break;
+ }
+
+ case BYTECODE_OP_UNARY_PLUS_DOUBLE:
+ case BYTECODE_OP_UNARY_MINUS_DOUBLE:
+ case BYTECODE_OP_UNARY_NOT_DOUBLE:
+ {
+ if (!vstack_ax(stack)) {
+ ERR("Empty stack\n");
+ ret = -EINVAL;
+ goto end;
+ }
+ if (vstack_ax(stack)->type != REG_DOUBLE) {
+ ERR("Invalid register type\n");
+ ret = -EINVAL;
+ goto end;
+ }
+ break;
+ }
+
+ /* logical */
+ case BYTECODE_OP_AND:
+ case BYTECODE_OP_OR:
+ {
+ struct logical_op *insn = (struct logical_op *) pc;
+
+ if (!vstack_ax(stack)) {
+ ERR("Empty stack\n");
+ ret = -EINVAL;
+ goto end;
+ }
+ if (vstack_ax(stack)->type != REG_S64
+ && vstack_ax(stack)->type != REG_U64
+ && vstack_ax(stack)->type != REG_UNKNOWN) {
+ ERR("Logical comparator expects S64, U64 or dynamic register\n");
+ ret = -EINVAL;
+ goto end;
+ }
+
+ dbg_printf("Validate jumping to bytecode offset %u\n",
+ (unsigned int) insn->skip_offset);
+ if (unlikely(start_pc + insn->skip_offset <= pc)) {
+ ERR("Loops are not allowed in bytecode\n");
+ ret = -EINVAL;
+ goto end;
+ }
+ break;
+ }
+
+ /* load field ref */
+ case BYTECODE_OP_LOAD_FIELD_REF:
+ {
+ ERR("Unknown field ref type\n");
+ ret = -EINVAL;
+ goto end;
+ }
+ case BYTECODE_OP_LOAD_FIELD_REF_STRING:
+ case BYTECODE_OP_LOAD_FIELD_REF_SEQUENCE:
+ {
+ struct load_op *insn = (struct load_op *) pc;
+ struct field_ref *ref = (struct field_ref *) insn->data;
+
+ dbg_printf("Validate load field ref offset %u type string\n",
+ ref->offset);
+ break;
+ }
+ case BYTECODE_OP_LOAD_FIELD_REF_S64:
+ {
+ struct load_op *insn = (struct load_op *) pc;
+ struct field_ref *ref = (struct field_ref *) insn->data;
+
+ dbg_printf("Validate load field ref offset %u type s64\n",
+ ref->offset);
+ break;
+ }
+ case BYTECODE_OP_LOAD_FIELD_REF_DOUBLE:
+ {
+ struct load_op *insn = (struct load_op *) pc;
+ struct field_ref *ref = (struct field_ref *) insn->data;
+
+ dbg_printf("Validate load field ref offset %u type double\n",
+ ref->offset);
+ break;
+ }
+
+ /* load from immediate operand */
+ case BYTECODE_OP_LOAD_STRING:
+ case BYTECODE_OP_LOAD_STAR_GLOB_STRING:
+ {
+ break;
+ }
+
+ case BYTECODE_OP_LOAD_S64:
+ {
+ break;
+ }
+
+ case BYTECODE_OP_LOAD_DOUBLE:
+ {
+ break;
+ }
+
+ case BYTECODE_OP_CAST_TO_S64:
+ case BYTECODE_OP_CAST_DOUBLE_TO_S64:
+ {
+ struct cast_op *insn = (struct cast_op *) pc;
+
+ if (!vstack_ax(stack)) {
+ ERR("Empty stack\n");
+ ret = -EINVAL;
+ goto end;
+ }
+ switch (vstack_ax(stack)->type) {
+ default:
+ ERR("unknown register type\n");
+ ret = -EINVAL;
+ goto end;
+
+ case REG_STRING:
+ case REG_STAR_GLOB_STRING:
+ ERR("Cast op can only be applied to numeric or floating point registers\n");
+ ret = -EINVAL;
+ goto end;
+ case REG_S64:
+ break;
+ case REG_U64:
+ break;
+ case REG_DOUBLE:
+ break;
+ case REG_UNKNOWN:
+ break;
+ }
+ if (insn->op == BYTECODE_OP_CAST_DOUBLE_TO_S64) {
+ if (vstack_ax(stack)->type != REG_DOUBLE) {
+ ERR("Cast expects double\n");
+ ret = -EINVAL;
+ goto end;
+ }
+ }
+ break;
+ }
+ case BYTECODE_OP_CAST_NOP:
+ {
+ break;
+ }
+
+ /* get context ref */
+ case BYTECODE_OP_GET_CONTEXT_REF:
+ {
+ struct load_op *insn = (struct load_op *) pc;
+ struct field_ref *ref = (struct field_ref *) insn->data;
+
+ dbg_printf("Validate get context ref offset %u type dynamic\n",
+ ref->offset);
+ break;
+ }
+ case BYTECODE_OP_GET_CONTEXT_REF_STRING:
+ {
+ struct load_op *insn = (struct load_op *) pc;
+ struct field_ref *ref = (struct field_ref *) insn->data;
+
+ dbg_printf("Validate get context ref offset %u type string\n",
+ ref->offset);
+ break;
+ }
+ case BYTECODE_OP_GET_CONTEXT_REF_S64:
+ {
+ struct load_op *insn = (struct load_op *) pc;
+ struct field_ref *ref = (struct field_ref *) insn->data;
+
+ dbg_printf("Validate get context ref offset %u type s64\n",
+ ref->offset);
+ break;
+ }
+ case BYTECODE_OP_GET_CONTEXT_REF_DOUBLE:
+ {
+ struct load_op *insn = (struct load_op *) pc;
+ struct field_ref *ref = (struct field_ref *) insn->data;
+
+ dbg_printf("Validate get context ref offset %u type double\n",
+ ref->offset);
+ break;
+ }
+
+ /*
+ * Instructions for recursive traversal through composed types.
+ */
+ case BYTECODE_OP_GET_CONTEXT_ROOT:
+ {
+ dbg_printf("Validate get context root\n");
+ break;
+ }
+ case BYTECODE_OP_GET_APP_CONTEXT_ROOT:
+ {
+ dbg_printf("Validate get app context root\n");
+ break;
+ }
+ case BYTECODE_OP_GET_PAYLOAD_ROOT:
+ {
+ dbg_printf("Validate get payload root\n");
+ break;
+ }
+ case BYTECODE_OP_LOAD_FIELD:
+ {
+ /*
+ * We tolerate that field type is unknown at validation,
+ * because we are performing the load specialization in
+ * a phase after validation.
+ */
+ dbg_printf("Validate load field\n");
+ break;
+ }
+ case BYTECODE_OP_LOAD_FIELD_S8:
+ {
+ dbg_printf("Validate load field s8\n");
+ break;
+ }
+ case BYTECODE_OP_LOAD_FIELD_S16:
+ {
+ dbg_printf("Validate load field s16\n");
+ break;
+ }
+ case BYTECODE_OP_LOAD_FIELD_S32:
+ {
+ dbg_printf("Validate load field s32\n");
+ break;
+ }
+ case BYTECODE_OP_LOAD_FIELD_S64:
+ {
+ dbg_printf("Validate load field s64\n");
+ break;
+ }
+ case BYTECODE_OP_LOAD_FIELD_U8:
+ {
+ dbg_printf("Validate load field u8\n");
+ break;
+ }
+ case BYTECODE_OP_LOAD_FIELD_U16:
+ {
+ dbg_printf("Validate load field u16\n");
+ break;
+ }
+ case BYTECODE_OP_LOAD_FIELD_U32:
+ {
+ dbg_printf("Validate load field u32\n");
+ break;
+ }
+ case BYTECODE_OP_LOAD_FIELD_U64:
+ {
+ dbg_printf("Validate load field u64\n");
+ break;
+ }
+ case BYTECODE_OP_LOAD_FIELD_STRING:
+ {
+ dbg_printf("Validate load field string\n");
+ break;
+ }
+ case BYTECODE_OP_LOAD_FIELD_SEQUENCE:
+ {
+ dbg_printf("Validate load field sequence\n");
+ break;
+ }
+ case BYTECODE_OP_LOAD_FIELD_DOUBLE:
+ {
+ dbg_printf("Validate load field double\n");
+ break;
+ }
+
+ case BYTECODE_OP_GET_SYMBOL:
+ {
+ struct load_op *insn = (struct load_op *) pc;
+ struct get_symbol *sym = (struct get_symbol *) insn->data;
+
+ dbg_printf("Validate get symbol offset %u\n", sym->offset);
+ break;
+ }
+
+ case BYTECODE_OP_GET_SYMBOL_FIELD:
+ {
+ struct load_op *insn = (struct load_op *) pc;
+ struct get_symbol *sym = (struct get_symbol *) insn->data;
+
+ dbg_printf("Validate get symbol field offset %u\n", sym->offset);
+ break;
+ }
+
+ case BYTECODE_OP_GET_INDEX_U16:
+ {
+ struct load_op *insn = (struct load_op *) pc;
+ struct get_index_u16 *get_index = (struct get_index_u16 *) insn->data;
+
+ dbg_printf("Validate get index u16 index %u\n", get_index->index);
+ break;
+ }
+
+ case BYTECODE_OP_GET_INDEX_U64:
+ {
+ struct load_op *insn = (struct load_op *) pc;
+ struct get_index_u64 *get_index = (struct get_index_u64 *) insn->data;
+
+ dbg_printf("Validate get index u64 index %" PRIu64 "\n", get_index->index);
+ break;
+ }
+ }
+end:
+ return ret;
+}
+
+/*
+ * Return value:
+ * 0: success
+ * <0: error
+ */
+static
+int validate_instruction_all_contexts(struct bytecode_runtime *bytecode,
+ struct lttng_ust_lfht *merge_points,
+ struct vstack *stack,
+ char *start_pc,
+ char *pc)
+{
+ int ret;
+ unsigned long target_pc = pc - start_pc;
+ struct lttng_ust_lfht_iter iter;
+ struct lttng_ust_lfht_node *node;
+ struct lfht_mp_node *mp_node;
+ unsigned long hash;
+
+ /* Validate the context resulting from the previous instruction */
+ ret = validate_instruction_context(bytecode, stack, start_pc, pc);
+ if (ret < 0)
+ return ret;
+
+ /* Validate merge points */
+ hash = lttng_hash_mix((const char *) target_pc, sizeof(target_pc),
+ lttng_hash_seed);
+ lttng_ust_lfht_lookup(merge_points, hash, lttng_hash_match,
+ (const char *) target_pc, &iter);
+ node = lttng_ust_lfht_iter_get_node(&iter);
+ if (node) {
+ mp_node = caa_container_of(node, struct lfht_mp_node, node);
+
+ dbg_printf("Bytecode: validate merge point at offset %lu\n",
+ target_pc);
+ if (merge_points_compare(stack, &mp_node->stack)) {
+ ERR("Merge points differ for offset %lu\n",
+ target_pc);
+ return -EINVAL;
+ }
+ /* Once validated, we can remove the merge point */
+ dbg_printf("Bytecode: remove merge point at offset %lu\n",
+ target_pc);
+ ret = lttng_ust_lfht_del(merge_points, node);
+ assert(!ret);
+ }
+ return 0;
+}
+
+/*
+ * Return value:
+ * >0: going to next insn.
+ * 0: success, stop iteration.
+ * <0: error
+ */
+static
+int exec_insn(struct bytecode_runtime *bytecode __attribute__((unused)),
+ struct lttng_ust_lfht *merge_points,
+ struct vstack *stack,
+ char **_next_pc,
+ char *pc)
+{
+ int ret = 1;
+ char *next_pc = *_next_pc;
+
+ switch (*(bytecode_opcode_t *) pc) {
+ case BYTECODE_OP_UNKNOWN:
+ default:
+ {
+ ERR("unknown bytecode op %u\n",
+ (unsigned int) *(bytecode_opcode_t *) pc);
+ ret = -EINVAL;
+ goto end;
+ }
+
+ case BYTECODE_OP_RETURN:
+ {
+ if (!vstack_ax(stack)) {
+ ERR("Empty stack\n");
+ ret = -EINVAL;
+ goto end;
+ }
+ switch (vstack_ax(stack)->type) {
+ case REG_S64:
+ case REG_U64:
+ case REG_DOUBLE:
+ case REG_STRING:
+ case REG_PTR:
+ case REG_UNKNOWN:
+ break;
+ default:
+ ERR("Unexpected register type %d at end of bytecode\n",
+ (int) vstack_ax(stack)->type);
+ ret = -EINVAL;
+ goto end;
+ }
+
+ ret = 0;
+ goto end;
+ }
+ case BYTECODE_OP_RETURN_S64:
+ {
+ if (!vstack_ax(stack)) {
+ ERR("Empty stack\n");
+ ret = -EINVAL;
+ goto end;
+ }
+ switch (vstack_ax(stack)->type) {
+ case REG_S64:
+ case REG_U64:
+ break;
+ default:
+ case REG_UNKNOWN:
+ ERR("Unexpected register type %d at end of bytecode\n",
+ (int) vstack_ax(stack)->type);
+ ret = -EINVAL;
+ goto end;
+ }
+
+ ret = 0;
+ goto end;
+ }
+
+ /* binary */
+ case BYTECODE_OP_MUL:
+ case BYTECODE_OP_DIV:
+ case BYTECODE_OP_MOD:
+ case BYTECODE_OP_PLUS:
+ case BYTECODE_OP_MINUS:
+ {
+ ERR("unsupported bytecode op %u\n",
+ (unsigned int) *(bytecode_opcode_t *) pc);
+ ret = -EINVAL;
+ goto end;
+ }
+
+ case BYTECODE_OP_EQ:
+ case BYTECODE_OP_NE:
+ case BYTECODE_OP_GT:
+ case BYTECODE_OP_LT:
+ case BYTECODE_OP_GE:
+ case BYTECODE_OP_LE:
+ case BYTECODE_OP_EQ_STRING:
+ case BYTECODE_OP_NE_STRING:
+ case BYTECODE_OP_GT_STRING:
+ case BYTECODE_OP_LT_STRING:
+ case BYTECODE_OP_GE_STRING:
+ case BYTECODE_OP_LE_STRING:
+ case BYTECODE_OP_EQ_STAR_GLOB_STRING:
+ case BYTECODE_OP_NE_STAR_GLOB_STRING:
+ case BYTECODE_OP_EQ_S64:
+ case BYTECODE_OP_NE_S64:
+ case BYTECODE_OP_GT_S64:
+ case BYTECODE_OP_LT_S64:
+ case BYTECODE_OP_GE_S64:
+ case BYTECODE_OP_LE_S64:
+ case BYTECODE_OP_EQ_DOUBLE:
+ case BYTECODE_OP_NE_DOUBLE:
+ case BYTECODE_OP_GT_DOUBLE:
+ case BYTECODE_OP_LT_DOUBLE:
+ case BYTECODE_OP_GE_DOUBLE:
+ case BYTECODE_OP_LE_DOUBLE:
+ case BYTECODE_OP_EQ_DOUBLE_S64:
+ case BYTECODE_OP_NE_DOUBLE_S64:
+ case BYTECODE_OP_GT_DOUBLE_S64:
+ case BYTECODE_OP_LT_DOUBLE_S64:
+ case BYTECODE_OP_GE_DOUBLE_S64:
+ case BYTECODE_OP_LE_DOUBLE_S64:
+ case BYTECODE_OP_EQ_S64_DOUBLE:
+ case BYTECODE_OP_NE_S64_DOUBLE:
+ case BYTECODE_OP_GT_S64_DOUBLE:
+ case BYTECODE_OP_LT_S64_DOUBLE:
+ case BYTECODE_OP_GE_S64_DOUBLE:
+ case BYTECODE_OP_LE_S64_DOUBLE:
+ {
+ /* Pop 2, push 1 */
+ if (vstack_pop(stack)) {
+ ret = -EINVAL;
+ goto end;
+ }
+ if (!vstack_ax(stack)) {
+ ERR("Empty stack\n");
+ ret = -EINVAL;
+ goto end;
+ }
+ switch (vstack_ax(stack)->type) {
+ case REG_S64:
+ case REG_U64:
+ case REG_DOUBLE:
+ case REG_STRING:
+ case REG_STAR_GLOB_STRING:
+ case REG_UNKNOWN:
+ break;
+ default:
+ ERR("Unexpected register type %d for operation\n",
+ (int) vstack_ax(stack)->type);
+ ret = -EINVAL;
+ goto end;
+ }
+
+ vstack_ax(stack)->type = REG_S64;
+ next_pc += sizeof(struct binary_op);
+ break;
+ }
+
+ case BYTECODE_OP_BIT_RSHIFT:
+ case BYTECODE_OP_BIT_LSHIFT:
+ case BYTECODE_OP_BIT_AND:
+ case BYTECODE_OP_BIT_OR:
+ case BYTECODE_OP_BIT_XOR:
+ {
+ /* Pop 2, push 1 */
+ if (vstack_pop(stack)) {
+ ret = -EINVAL;
+ goto end;
+ }
+ if (!vstack_ax(stack)) {
+ ERR("Empty stack\n");
+ ret = -EINVAL;
+ goto end;
+ }
+ switch (vstack_ax(stack)->type) {
+ case REG_S64:
+ case REG_U64:
+ case REG_DOUBLE:
+ case REG_STRING:
+ case REG_STAR_GLOB_STRING:
+ case REG_UNKNOWN:
+ break;
+ default:
+ ERR("Unexpected register type %d for operation\n",
+ (int) vstack_ax(stack)->type);
+ ret = -EINVAL;
+ goto end;
+ }
+
+ vstack_ax(stack)->type = REG_U64;
+ next_pc += sizeof(struct binary_op);
+ break;
+ }
+
+ /* unary */
+ case BYTECODE_OP_UNARY_PLUS:
+ case BYTECODE_OP_UNARY_MINUS:
+ {
+ /* Pop 1, push 1 */
+ if (!vstack_ax(stack)) {
+ ERR("Empty stack\n");
+ ret = -EINVAL;
+ goto end;
+ }
+ switch (vstack_ax(stack)->type) {
+ case REG_UNKNOWN:
+ case REG_DOUBLE:
+ case REG_S64:
+ case REG_U64:
+ break;
+ default:
+ ERR("Unexpected register type %d for operation\n",
+ (int) vstack_ax(stack)->type);
+ ret = -EINVAL;
+ goto end;
+ }
+ vstack_ax(stack)->type = REG_UNKNOWN;
+ next_pc += sizeof(struct unary_op);
+ break;
+ }
+
+ case BYTECODE_OP_UNARY_PLUS_S64:
+ case BYTECODE_OP_UNARY_MINUS_S64:
+ case BYTECODE_OP_UNARY_NOT_S64:
+ {
+ /* Pop 1, push 1 */
+ if (!vstack_ax(stack)) {
+ ERR("Empty stack\n");
+ ret = -EINVAL;
+ goto end;
+ }
+ switch (vstack_ax(stack)->type) {
+ case REG_S64:
+ case REG_U64:
+ break;
+ default:
+ ERR("Unexpected register type %d for operation\n",
+ (int) vstack_ax(stack)->type);
+ ret = -EINVAL;
+ goto end;
+ }
+
+ next_pc += sizeof(struct unary_op);
+ break;
+ }
+
+ case BYTECODE_OP_UNARY_NOT:
+ {
+ /* Pop 1, push 1 */
+ if (!vstack_ax(stack)) {
+ ERR("Empty stack\n");
+ ret = -EINVAL;
+ goto end;
+ }
+ switch (vstack_ax(stack)->type) {
+ case REG_UNKNOWN:
+ case REG_DOUBLE:
+ case REG_S64:
+ case REG_U64:
+ break;
+ default:
+ ERR("Unexpected register type %d for operation\n",
+ (int) vstack_ax(stack)->type);
+ ret = -EINVAL;
+ goto end;
+ }
+
+ next_pc += sizeof(struct unary_op);
+ break;
+ }
+
+ case BYTECODE_OP_UNARY_BIT_NOT:
+ {
+ /* Pop 1, push 1 */
+ if (!vstack_ax(stack)) {
+ ERR("Empty stack\n");
+ ret = -EINVAL;
+ goto end;
+ }
+ switch (vstack_ax(stack)->type) {
+ case REG_UNKNOWN:
+ case REG_S64:
+ case REG_U64:
+ break;
+ case REG_DOUBLE:
+ default:
+ ERR("Unexpected register type %d for operation\n",
+ (int) vstack_ax(stack)->type);
+ ret = -EINVAL;
+ goto end;
+ }
+
+ vstack_ax(stack)->type = REG_U64;
+ next_pc += sizeof(struct unary_op);
+ break;
+ }
+
+ case BYTECODE_OP_UNARY_NOT_DOUBLE:
+ {
+ /* Pop 1, push 1 */
+ if (!vstack_ax(stack)) {
+ ERR("Empty stack\n");
+ ret = -EINVAL;
+ goto end;
+ }
+ switch (vstack_ax(stack)->type) {
+ case REG_DOUBLE:
+ break;
+ default:
+ ERR("Incorrect register type %d for operation\n",
+ (int) vstack_ax(stack)->type);
+ ret = -EINVAL;
+ goto end;
+ }
+
+ vstack_ax(stack)->type = REG_S64;
+ next_pc += sizeof(struct unary_op);
+ break;
+ }
+
+ case BYTECODE_OP_UNARY_PLUS_DOUBLE:
+ case BYTECODE_OP_UNARY_MINUS_DOUBLE:
+ {
+ /* Pop 1, push 1 */
+ if (!vstack_ax(stack)) {
+ ERR("Empty stack\n");
+ ret = -EINVAL;
+ goto end;
+ }
+ switch (vstack_ax(stack)->type) {
+ case REG_DOUBLE:
+ break;
+ default:
+ ERR("Incorrect register type %d for operation\n",
+ (int) vstack_ax(stack)->type);
+ ret = -EINVAL;
+ goto end;
+ }
+
+ vstack_ax(stack)->type = REG_DOUBLE;
+ next_pc += sizeof(struct unary_op);
+ break;
+ }
+
+ /* logical */
+ case BYTECODE_OP_AND:
+ case BYTECODE_OP_OR:
+ {
+ struct logical_op *insn = (struct logical_op *) pc;
+ int merge_ret;
+
+ /* Add merge point to table */
+ merge_ret = merge_point_add_check(merge_points,
+ insn->skip_offset, stack);
+ if (merge_ret) {
+ ret = merge_ret;
+ goto end;
+ }
+
+ if (!vstack_ax(stack)) {
+ ERR("Empty stack\n");
+ ret = -EINVAL;
+ goto end;
+ }
+ /* There is always a cast-to-s64 operation before a or/and op. */
+ switch (vstack_ax(stack)->type) {
+ case REG_S64:
+ case REG_U64:
+ break;
+ default:
+ ERR("Incorrect register type %d for operation\n",
+ (int) vstack_ax(stack)->type);
+ ret = -EINVAL;
+ goto end;
+ }
+
+ /* Continue to next instruction */
+ /* Pop 1 when jump not taken */
+ if (vstack_pop(stack)) {
+ ret = -EINVAL;
+ goto end;
+ }
+ next_pc += sizeof(struct logical_op);
+ break;
+ }
+
+ /* load field ref */
+ case BYTECODE_OP_LOAD_FIELD_REF:
+ {
+ ERR("Unknown field ref type\n");
+ ret = -EINVAL;
+ goto end;
+ }
+ /* get context ref */
+ case BYTECODE_OP_GET_CONTEXT_REF:
+ {
+ if (vstack_push(stack)) {
+ ret = -EINVAL;
+ goto end;
+ }
+ vstack_ax(stack)->type = REG_UNKNOWN;
+ next_pc += sizeof(struct load_op) + sizeof(struct field_ref);
+ break;
+ }
+ case BYTECODE_OP_LOAD_FIELD_REF_STRING:
+ case BYTECODE_OP_LOAD_FIELD_REF_SEQUENCE:
+ case BYTECODE_OP_GET_CONTEXT_REF_STRING:
+ {
+ if (vstack_push(stack)) {
+ ret = -EINVAL;
+ goto end;
+ }
+ vstack_ax(stack)->type = REG_STRING;
+ next_pc += sizeof(struct load_op) + sizeof(struct field_ref);
+ break;
+ }
+ case BYTECODE_OP_LOAD_FIELD_REF_S64:
+ case BYTECODE_OP_GET_CONTEXT_REF_S64:
+ {
+ if (vstack_push(stack)) {
+ ret = -EINVAL;
+ goto end;
+ }
+ vstack_ax(stack)->type = REG_S64;
+ next_pc += sizeof(struct load_op) + sizeof(struct field_ref);
+ break;
+ }
+ case BYTECODE_OP_LOAD_FIELD_REF_DOUBLE:
+ case BYTECODE_OP_GET_CONTEXT_REF_DOUBLE:
+ {
+ if (vstack_push(stack)) {
+ ret = -EINVAL;
+ goto end;
+ }
+ vstack_ax(stack)->type = REG_DOUBLE;
+ next_pc += sizeof(struct load_op) + sizeof(struct field_ref);
+ break;
+ }
+
+ /* load from immediate operand */
+ case BYTECODE_OP_LOAD_STRING:
+ {
+ struct load_op *insn = (struct load_op *) pc;
+
+ if (vstack_push(stack)) {
+ ret = -EINVAL;
+ goto end;
+ }
+ vstack_ax(stack)->type = REG_STRING;
+ next_pc += sizeof(struct load_op) + strlen(insn->data) + 1;
+ break;
+ }
+
+ case BYTECODE_OP_LOAD_STAR_GLOB_STRING:
+ {
+ struct load_op *insn = (struct load_op *) pc;
+
+ if (vstack_push(stack)) {
+ ret = -EINVAL;
+ goto end;
+ }
+ vstack_ax(stack)->type = REG_STAR_GLOB_STRING;
+ next_pc += sizeof(struct load_op) + strlen(insn->data) + 1;
+ break;
+ }
+
+ case BYTECODE_OP_LOAD_S64:
+ {
+ if (vstack_push(stack)) {
+ ret = -EINVAL;
+ goto end;
+ }
+ vstack_ax(stack)->type = REG_S64;
+ next_pc += sizeof(struct load_op)
+ + sizeof(struct literal_numeric);
+ break;
+ }
+
+ case BYTECODE_OP_LOAD_DOUBLE:
+ {
+ if (vstack_push(stack)) {
+ ret = -EINVAL;
+ goto end;
+ }
+ vstack_ax(stack)->type = REG_DOUBLE;
+ next_pc += sizeof(struct load_op)
+ + sizeof(struct literal_double);
+ break;
+ }
+
+ case BYTECODE_OP_CAST_TO_S64:
+ case BYTECODE_OP_CAST_DOUBLE_TO_S64:
+ {
+ /* Pop 1, push 1 */
+ if (!vstack_ax(stack)) {
+ ERR("Empty stack\n");
+ ret = -EINVAL;
+ goto end;
+ }
+ switch (vstack_ax(stack)->type) {
+ case REG_S64:
+ case REG_U64:
+ case REG_DOUBLE:
+ case REG_UNKNOWN:
+ break;
+ default:
+ ERR("Incorrect register type %d for cast\n",
+ (int) vstack_ax(stack)->type);
+ ret = -EINVAL;
+ goto end;
+ }
+ vstack_ax(stack)->type = REG_S64;
+ next_pc += sizeof(struct cast_op);
+ break;
+ }
+ case BYTECODE_OP_CAST_NOP:
+ {
+ next_pc += sizeof(struct cast_op);
+ break;
+ }
+
+ /*
+ * Instructions for recursive traversal through composed types.
+ */
+ case BYTECODE_OP_GET_CONTEXT_ROOT:
+ case BYTECODE_OP_GET_APP_CONTEXT_ROOT:
+ case BYTECODE_OP_GET_PAYLOAD_ROOT:
+ {
+ if (vstack_push(stack)) {
+ ret = -EINVAL;
+ goto end;
+ }
+ vstack_ax(stack)->type = REG_PTR;
+ next_pc += sizeof(struct load_op);
+ break;
+ }
+
+ case BYTECODE_OP_LOAD_FIELD:
+ {
+ /* Pop 1, push 1 */
+ if (!vstack_ax(stack)) {
+ ERR("Empty stack\n");
+ ret = -EINVAL;
+ goto end;
+ }
+ if (vstack_ax(stack)->type != REG_PTR) {
+ ERR("Expecting pointer on top of stack\n");
+ ret = -EINVAL;
+ goto end;
+ }
+ vstack_ax(stack)->type = REG_UNKNOWN;
+ next_pc += sizeof(struct load_op);
+ break;
+ }
+
+ case BYTECODE_OP_LOAD_FIELD_S8:
+ case BYTECODE_OP_LOAD_FIELD_S16:
+ case BYTECODE_OP_LOAD_FIELD_S32:
+ case BYTECODE_OP_LOAD_FIELD_S64:
+ {
+ /* Pop 1, push 1 */
+ if (!vstack_ax(stack)) {
+ ERR("Empty stack\n");
+ ret = -EINVAL;
+ goto end;
+ }
+ if (vstack_ax(stack)->type != REG_PTR) {
+ ERR("Expecting pointer on top of stack\n");
+ ret = -EINVAL;
+ goto end;
+ }
+ vstack_ax(stack)->type = REG_S64;
+ next_pc += sizeof(struct load_op);
+ break;
+ }
+
+ case BYTECODE_OP_LOAD_FIELD_U8:
+ case BYTECODE_OP_LOAD_FIELD_U16:
+ case BYTECODE_OP_LOAD_FIELD_U32:
+ case BYTECODE_OP_LOAD_FIELD_U64:
+ {
+ /* Pop 1, push 1 */
+ if (!vstack_ax(stack)) {
+ ERR("Empty stack\n");
+ ret = -EINVAL;
+ goto end;
+ }
+ if (vstack_ax(stack)->type != REG_PTR) {
+ ERR("Expecting pointer on top of stack\n");
+ ret = -EINVAL;
+ goto end;
+ }
+ vstack_ax(stack)->type = REG_U64;
+ next_pc += sizeof(struct load_op);
+ break;
+ }
+
+ case BYTECODE_OP_LOAD_FIELD_STRING:
+ case BYTECODE_OP_LOAD_FIELD_SEQUENCE:
+ {
+ /* Pop 1, push 1 */
+ if (!vstack_ax(stack)) {
+ ERR("Empty stack\n");
+ ret = -EINVAL;
+ goto end;
+ }
+ if (vstack_ax(stack)->type != REG_PTR) {
+ ERR("Expecting pointer on top of stack\n");
+ ret = -EINVAL;
+ goto end;
+ }
+ vstack_ax(stack)->type = REG_STRING;
+ next_pc += sizeof(struct load_op);
+ break;
+ }
+
+ case BYTECODE_OP_LOAD_FIELD_DOUBLE:
+ {
+ /* Pop 1, push 1 */
+ if (!vstack_ax(stack)) {
+ ERR("Empty stack\n");
+ ret = -EINVAL;
+ goto end;
+ }
+ if (vstack_ax(stack)->type != REG_PTR) {
+ ERR("Expecting pointer on top of stack\n");
+ ret = -EINVAL;
+ goto end;
+ }
+ vstack_ax(stack)->type = REG_DOUBLE;
+ next_pc += sizeof(struct load_op);
+ break;
+ }
+
+ case BYTECODE_OP_GET_SYMBOL:
+ case BYTECODE_OP_GET_SYMBOL_FIELD:
+ {
+ /* Pop 1, push 1 */
+ if (!vstack_ax(stack)) {
+ ERR("Empty stack\n");
+ ret = -EINVAL;
+ goto end;
+ }
+ if (vstack_ax(stack)->type != REG_PTR) {
+ ERR("Expecting pointer on top of stack\n");
+ ret = -EINVAL;
+ goto end;
+ }
+ next_pc += sizeof(struct load_op) + sizeof(struct get_symbol);
+ break;
+ }
+
+ case BYTECODE_OP_GET_INDEX_U16:
+ {
+ /* Pop 1, push 1 */
+ if (!vstack_ax(stack)) {
+ ERR("Empty stack\n");
+ ret = -EINVAL;
+ goto end;
+ }
+ if (vstack_ax(stack)->type != REG_PTR) {
+ ERR("Expecting pointer on top of stack\n");
+ ret = -EINVAL;
+ goto end;
+ }
+ next_pc += sizeof(struct load_op) + sizeof(struct get_index_u16);
+ break;
+ }
+
+ case BYTECODE_OP_GET_INDEX_U64:
+ {
+ /* Pop 1, push 1 */
+ if (!vstack_ax(stack)) {
+ ERR("Empty stack\n");
+ ret = -EINVAL;
+ goto end;
+ }
+ if (vstack_ax(stack)->type != REG_PTR) {
+ ERR("Expecting pointer on top of stack\n");
+ ret = -EINVAL;
+ goto end;
+ }
+ next_pc += sizeof(struct load_op) + sizeof(struct get_index_u64);
+ break;
+ }
+
+ }
+end:
+ *_next_pc = next_pc;
+ return ret;
+}
+
+/*
+ * Never called concurrently (hash seed is shared).
+ */
+int lttng_bytecode_validate(struct bytecode_runtime *bytecode)
+{
+ struct lttng_ust_lfht *merge_points;
+ char *pc, *next_pc, *start_pc;
+ int ret = -EINVAL;
+ struct vstack stack;
+
+ vstack_init(&stack);
+
+ if (!lttng_hash_seed_ready) {
+ lttng_hash_seed = time(NULL);
+ lttng_hash_seed_ready = 1;
+ }
+ /*
+ * Note: merge_points hash table used by single thread, and
+ * never concurrently resized. Therefore, we can use it without
+ * holding RCU read-side lock and free nodes without using
+ * call_rcu.
+ */
+ merge_points = lttng_ust_lfht_new(DEFAULT_NR_MERGE_POINTS,
+ MIN_NR_BUCKETS, MAX_NR_BUCKETS,
+ 0, NULL);
+ if (!merge_points) {
+ ERR("Error allocating hash table for bytecode validation\n");
+ return -ENOMEM;
+ }
+ start_pc = &bytecode->code[0];
+ for (pc = next_pc = start_pc; pc - start_pc < bytecode->len;
+ pc = next_pc) {
+ ret = bytecode_validate_overflow(bytecode, start_pc, pc);
+ if (ret != 0) {
+ if (ret == -ERANGE)
+ ERR("Bytecode overflow\n");
+ goto end;
+ }
+ dbg_printf("Validating op %s (%u)\n",
+ lttng_bytecode_print_op((unsigned int) *(bytecode_opcode_t *) pc),
+ (unsigned int) *(bytecode_opcode_t *) pc);
+
+ /*
+ * For each instruction, validate the current context
+ * (traversal of entire execution flow), and validate
+ * all merge points targeting this instruction.
+ */
+ ret = validate_instruction_all_contexts(bytecode, merge_points,
+ &stack, start_pc, pc);
+ if (ret)
+ goto end;
+ ret = exec_insn(bytecode, merge_points, &stack, &next_pc, pc);
+ if (ret <= 0)
+ goto end;
+ }
+end:
+ if (delete_all_nodes(merge_points)) {
+ if (!ret) {
+ ERR("Unexpected merge points\n");
+ ret = -EINVAL;
+ }
+ }
+ if (lttng_ust_lfht_destroy(merge_points)) {
+ ERR("Error destroying hash table\n");
+ }
+ return ret;
+}
--- /dev/null
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright (C) 2010-2016 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ *
+ * LTTng UST bytecode code.
+ */
+
+#define _LGPL_SOURCE
+#include <stddef.h>
+#include <stdint.h>
+
+#include <urcu/rculist.h>
+
+#include "context-internal.h"
+#include "lttng-bytecode.h"
+#include "ust-events-internal.h"
+#include "common/macros.h"
+
+static const char *opnames[] = {
+ [ BYTECODE_OP_UNKNOWN ] = "UNKNOWN",
+
+ [ BYTECODE_OP_RETURN ] = "RETURN",
+
+ /* binary */
+ [ BYTECODE_OP_MUL ] = "MUL",
+ [ BYTECODE_OP_DIV ] = "DIV",
+ [ BYTECODE_OP_MOD ] = "MOD",
+ [ BYTECODE_OP_PLUS ] = "PLUS",
+ [ BYTECODE_OP_MINUS ] = "MINUS",
+ [ BYTECODE_OP_BIT_RSHIFT ] = "BIT_RSHIFT",
+ [ BYTECODE_OP_BIT_LSHIFT ] = "BIT_LSHIFT",
+ [ BYTECODE_OP_BIT_AND ] = "BIT_AND",
+ [ BYTECODE_OP_BIT_OR ] = "BIT_OR",
+ [ BYTECODE_OP_BIT_XOR ] = "BIT_XOR",
+
+ /* binary comparators */
+ [ BYTECODE_OP_EQ ] = "EQ",
+ [ BYTECODE_OP_NE ] = "NE",
+ [ BYTECODE_OP_GT ] = "GT",
+ [ BYTECODE_OP_LT ] = "LT",
+ [ BYTECODE_OP_GE ] = "GE",
+ [ BYTECODE_OP_LE ] = "LE",
+
+ /* string binary comparators */
+ [ BYTECODE_OP_EQ_STRING ] = "EQ_STRING",
+ [ BYTECODE_OP_NE_STRING ] = "NE_STRING",
+ [ BYTECODE_OP_GT_STRING ] = "GT_STRING",
+ [ BYTECODE_OP_LT_STRING ] = "LT_STRING",
+ [ BYTECODE_OP_GE_STRING ] = "GE_STRING",
+ [ BYTECODE_OP_LE_STRING ] = "LE_STRING",
+
+ /* s64 binary comparators */
+ [ BYTECODE_OP_EQ_S64 ] = "EQ_S64",
+ [ BYTECODE_OP_NE_S64 ] = "NE_S64",
+ [ BYTECODE_OP_GT_S64 ] = "GT_S64",
+ [ BYTECODE_OP_LT_S64 ] = "LT_S64",
+ [ BYTECODE_OP_GE_S64 ] = "GE_S64",
+ [ BYTECODE_OP_LE_S64 ] = "LE_S64",
+
+ /* double binary comparators */
+ [ BYTECODE_OP_EQ_DOUBLE ] = "EQ_DOUBLE",
+ [ BYTECODE_OP_NE_DOUBLE ] = "NE_DOUBLE",
+ [ BYTECODE_OP_GT_DOUBLE ] = "GT_DOUBLE",
+ [ BYTECODE_OP_LT_DOUBLE ] = "LT_DOUBLE",
+ [ BYTECODE_OP_GE_DOUBLE ] = "GE_DOUBLE",
+ [ BYTECODE_OP_LE_DOUBLE ] = "LE_DOUBLE",
+
+ /* Mixed S64-double binary comparators */
+ [ BYTECODE_OP_EQ_DOUBLE_S64 ] = "EQ_DOUBLE_S64",
+ [ BYTECODE_OP_NE_DOUBLE_S64 ] = "NE_DOUBLE_S64",
+ [ BYTECODE_OP_GT_DOUBLE_S64 ] = "GT_DOUBLE_S64",
+ [ BYTECODE_OP_LT_DOUBLE_S64 ] = "LT_DOUBLE_S64",
+ [ BYTECODE_OP_GE_DOUBLE_S64 ] = "GE_DOUBLE_S64",
+ [ BYTECODE_OP_LE_DOUBLE_S64 ] = "LE_DOUBLE_S64",
+
+ [ BYTECODE_OP_EQ_S64_DOUBLE ] = "EQ_S64_DOUBLE",
+ [ BYTECODE_OP_NE_S64_DOUBLE ] = "NE_S64_DOUBLE",
+ [ BYTECODE_OP_GT_S64_DOUBLE ] = "GT_S64_DOUBLE",
+ [ BYTECODE_OP_LT_S64_DOUBLE ] = "LT_S64_DOUBLE",
+ [ BYTECODE_OP_GE_S64_DOUBLE ] = "GE_S64_DOUBLE",
+ [ BYTECODE_OP_LE_S64_DOUBLE ] = "LE_S64_DOUBLE",
+
+ /* unary */
+ [ BYTECODE_OP_UNARY_PLUS ] = "UNARY_PLUS",
+ [ BYTECODE_OP_UNARY_MINUS ] = "UNARY_MINUS",
+ [ BYTECODE_OP_UNARY_NOT ] = "UNARY_NOT",
+ [ BYTECODE_OP_UNARY_PLUS_S64 ] = "UNARY_PLUS_S64",
+ [ BYTECODE_OP_UNARY_MINUS_S64 ] = "UNARY_MINUS_S64",
+ [ BYTECODE_OP_UNARY_NOT_S64 ] = "UNARY_NOT_S64",
+ [ BYTECODE_OP_UNARY_PLUS_DOUBLE ] = "UNARY_PLUS_DOUBLE",
+ [ BYTECODE_OP_UNARY_MINUS_DOUBLE ] = "UNARY_MINUS_DOUBLE",
+ [ BYTECODE_OP_UNARY_NOT_DOUBLE ] = "UNARY_NOT_DOUBLE",
+
+ /* logical */
+ [ BYTECODE_OP_AND ] = "AND",
+ [ BYTECODE_OP_OR ] = "OR",
+
+ /* load field ref */
+ [ BYTECODE_OP_LOAD_FIELD_REF ] = "LOAD_FIELD_REF",
+ [ BYTECODE_OP_LOAD_FIELD_REF_STRING ] = "LOAD_FIELD_REF_STRING",
+ [ BYTECODE_OP_LOAD_FIELD_REF_SEQUENCE ] = "LOAD_FIELD_REF_SEQUENCE",
+ [ BYTECODE_OP_LOAD_FIELD_REF_S64 ] = "LOAD_FIELD_REF_S64",
+ [ BYTECODE_OP_LOAD_FIELD_REF_DOUBLE ] = "LOAD_FIELD_REF_DOUBLE",
+
+ /* load from immediate operand */
+ [ BYTECODE_OP_LOAD_STRING ] = "LOAD_STRING",
+ [ BYTECODE_OP_LOAD_S64 ] = "LOAD_S64",
+ [ BYTECODE_OP_LOAD_DOUBLE ] = "LOAD_DOUBLE",
+
+ /* cast */
+ [ BYTECODE_OP_CAST_TO_S64 ] = "CAST_TO_S64",
+ [ BYTECODE_OP_CAST_DOUBLE_TO_S64 ] = "CAST_DOUBLE_TO_S64",
+ [ BYTECODE_OP_CAST_NOP ] = "CAST_NOP",
+
+ /* get context ref */
+ [ BYTECODE_OP_GET_CONTEXT_REF ] = "GET_CONTEXT_REF",
+ [ BYTECODE_OP_GET_CONTEXT_REF_STRING ] = "GET_CONTEXT_REF_STRING",
+ [ BYTECODE_OP_GET_CONTEXT_REF_S64 ] = "GET_CONTEXT_REF_S64",
+ [ BYTECODE_OP_GET_CONTEXT_REF_DOUBLE ] = "GET_CONTEXT_REF_DOUBLE",
+
+ /* load userspace field ref */
+ [ BYTECODE_OP_LOAD_FIELD_REF_USER_STRING ] = "LOAD_FIELD_REF_USER_STRING",
+ [ BYTECODE_OP_LOAD_FIELD_REF_USER_SEQUENCE ] = "LOAD_FIELD_REF_USER_SEQUENCE",
+
+ /*
+ * load immediate star globbing pattern (literal string)
+ * from immediate.
+ */
+ [ BYTECODE_OP_LOAD_STAR_GLOB_STRING ] = "LOAD_STAR_GLOB_STRING",
+
+ /* globbing pattern binary operator: apply to */
+ [ BYTECODE_OP_EQ_STAR_GLOB_STRING ] = "EQ_STAR_GLOB_STRING",
+ [ BYTECODE_OP_NE_STAR_GLOB_STRING ] = "NE_STAR_GLOB_STRING",
+
+ /*
+ * Instructions for recursive traversal through composed types.
+ */
+ [ BYTECODE_OP_GET_CONTEXT_ROOT ] = "GET_CONTEXT_ROOT",
+ [ BYTECODE_OP_GET_APP_CONTEXT_ROOT ] = "GET_APP_CONTEXT_ROOT",
+ [ BYTECODE_OP_GET_PAYLOAD_ROOT ] = "GET_PAYLOAD_ROOT",
+
+ [ BYTECODE_OP_GET_SYMBOL ] = "GET_SYMBOL",
+ [ BYTECODE_OP_GET_SYMBOL_FIELD ] = "GET_SYMBOL_FIELD",
+ [ BYTECODE_OP_GET_INDEX_U16 ] = "GET_INDEX_U16",
+ [ BYTECODE_OP_GET_INDEX_U64 ] = "GET_INDEX_U64",
+
+ [ BYTECODE_OP_LOAD_FIELD ] = "LOAD_FIELD",
+ [ BYTECODE_OP_LOAD_FIELD_S8 ] = "LOAD_FIELD_S8",
+ [ BYTECODE_OP_LOAD_FIELD_S16 ] = "LOAD_FIELD_S16",
+ [ BYTECODE_OP_LOAD_FIELD_S32 ] = "LOAD_FIELD_S32",
+ [ BYTECODE_OP_LOAD_FIELD_S64 ] = "LOAD_FIELD_S64",
+ [ BYTECODE_OP_LOAD_FIELD_U8 ] = "LOAD_FIELD_U8",
+ [ BYTECODE_OP_LOAD_FIELD_U16 ] = "LOAD_FIELD_U16",
+ [ BYTECODE_OP_LOAD_FIELD_U32 ] = "LOAD_FIELD_U32",
+ [ BYTECODE_OP_LOAD_FIELD_U64 ] = "LOAD_FIELD_U64",
+ [ BYTECODE_OP_LOAD_FIELD_STRING ] = "LOAD_FIELD_STRING",
+ [ BYTECODE_OP_LOAD_FIELD_SEQUENCE ] = "LOAD_FIELD_SEQUENCE",
+ [ BYTECODE_OP_LOAD_FIELD_DOUBLE ] = "LOAD_FIELD_DOUBLE",
+
+ [ BYTECODE_OP_UNARY_BIT_NOT ] = "UNARY_BIT_NOT",
+
+ [ BYTECODE_OP_RETURN_S64 ] = "RETURN_S64",
+};
+
+const char *lttng_bytecode_print_op(enum bytecode_op op)
+{
+ if (op >= NR_BYTECODE_OPS)
+ return "UNKNOWN";
+ else
+ return opnames[op];
+}
+
+static
+int apply_field_reloc(const struct lttng_ust_event_desc *event_desc,
+ struct bytecode_runtime *runtime,
+ uint32_t runtime_len __attribute__((unused)),
+ uint32_t reloc_offset,
+ const char *field_name,
+ enum bytecode_op bytecode_op)
+{
+ const struct lttng_ust_event_field **fields, *field = NULL;
+ unsigned int nr_fields, i;
+ struct load_op *op;
+ uint32_t field_offset = 0;
+
+ dbg_printf("Apply field reloc: %u %s\n", reloc_offset, field_name);
+
+ /* Lookup event by name */
+ if (!event_desc)
+ return -EINVAL;
+ fields = event_desc->fields;
+ if (!fields)
+ return -EINVAL;
+ nr_fields = event_desc->nr_fields;
+ for (i = 0; i < nr_fields; i++) {
+ if (fields[i]->nofilter) {
+ continue;
+ }
+ if (!strcmp(fields[i]->name, field_name)) {
+ field = fields[i];
+ break;
+ }
+ /* compute field offset */
+ switch (fields[i]->type->type) {
+ case lttng_ust_type_integer:
+ case lttng_ust_type_enum:
+ field_offset += sizeof(int64_t);
+ break;
+ case lttng_ust_type_array:
+ case lttng_ust_type_sequence:
+ field_offset += sizeof(unsigned long);
+ field_offset += sizeof(void *);
+ break;
+ case lttng_ust_type_string:
+ field_offset += sizeof(void *);
+ break;
+ case lttng_ust_type_float:
+ field_offset += sizeof(double);
+ break;
+ default:
+ return -EINVAL;
+ }
+ }
+ if (!field)
+ return -EINVAL;
+
+ /* Check if field offset is too large for 16-bit offset */
+ if (field_offset > LTTNG_UST_ABI_FILTER_BYTECODE_MAX_LEN - 1)
+ return -EINVAL;
+
+ /* set type */
+ op = (struct load_op *) &runtime->code[reloc_offset];
+
+ switch (bytecode_op) {
+ case BYTECODE_OP_LOAD_FIELD_REF:
+ {
+ struct field_ref *field_ref;
+
+ field_ref = (struct field_ref *) op->data;
+ switch (field->type->type) {
+ case lttng_ust_type_integer:
+ case lttng_ust_type_enum:
+ op->op = BYTECODE_OP_LOAD_FIELD_REF_S64;
+ break;
+ case lttng_ust_type_array:
+ {
+ struct lttng_ust_type_array *array = (struct lttng_ust_type_array *) field->type;
+
+ if (array->encoding == lttng_ust_string_encoding_none)
+ return -EINVAL;
+ op->op = BYTECODE_OP_LOAD_FIELD_REF_SEQUENCE;
+ break;
+ }
+ case lttng_ust_type_sequence:
+ {
+ struct lttng_ust_type_sequence *sequence = (struct lttng_ust_type_sequence *) field->type;
+
+ if (sequence->encoding == lttng_ust_string_encoding_none)
+ return -EINVAL;
+ op->op = BYTECODE_OP_LOAD_FIELD_REF_SEQUENCE;
+ break;
+ }
+ case lttng_ust_type_string:
+ op->op = BYTECODE_OP_LOAD_FIELD_REF_STRING;
+ break;
+ case lttng_ust_type_float:
+ op->op = BYTECODE_OP_LOAD_FIELD_REF_DOUBLE;
+ break;
+ default:
+ return -EINVAL;
+ }
+ /* set offset */
+ field_ref->offset = (uint16_t) field_offset;
+ break;
+ }
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static
+int apply_context_reloc(struct bytecode_runtime *runtime,
+ uint32_t runtime_len __attribute__((unused)),
+ uint32_t reloc_offset,
+ const char *context_name,
+ enum bytecode_op bytecode_op)
+{
+ struct load_op *op;
+ const struct lttng_ust_ctx_field *ctx_field;
+ int idx;
+ struct lttng_ust_ctx **pctx = runtime->p.pctx;
+
+ dbg_printf("Apply context reloc: %u %s\n", reloc_offset, context_name);
+
+ /* Get context index */
+ idx = lttng_get_context_index(*pctx, context_name);
+ if (idx < 0) {
+ if (lttng_context_is_app(context_name)) {
+ int ret;
+
+ ret = lttng_ust_add_app_context_to_ctx_rcu(context_name,
+ pctx);
+ if (ret)
+ return ret;
+ idx = lttng_get_context_index(*pctx, context_name);
+ if (idx < 0)
+ return -ENOENT;
+ } else {
+ return -ENOENT;
+ }
+ }
+ /* Check if idx is too large for 16-bit offset */
+ if (idx > LTTNG_UST_ABI_FILTER_BYTECODE_MAX_LEN - 1)
+ return -EINVAL;
+
+ /* Get context return type */
+ ctx_field = &(*pctx)->fields[idx];
+ op = (struct load_op *) &runtime->code[reloc_offset];
+
+ switch (bytecode_op) {
+ case BYTECODE_OP_GET_CONTEXT_REF:
+ {
+ struct field_ref *field_ref;
+
+ field_ref = (struct field_ref *) op->data;
+ switch (ctx_field->event_field->type->type) {
+ case lttng_ust_type_integer:
+ case lttng_ust_type_enum:
+ op->op = BYTECODE_OP_GET_CONTEXT_REF_S64;
+ break;
+ /* Sequence and array supported only as string */
+ case lttng_ust_type_array:
+ {
+ struct lttng_ust_type_array *array = (struct lttng_ust_type_array *) ctx_field->event_field->type;
+
+ if (array->encoding == lttng_ust_string_encoding_none)
+ return -EINVAL;
+ op->op = BYTECODE_OP_GET_CONTEXT_REF_STRING;
+ break;
+ }
+ case lttng_ust_type_sequence:
+ {
+ struct lttng_ust_type_sequence *sequence = (struct lttng_ust_type_sequence *) ctx_field->event_field->type;
+
+ if (sequence->encoding == lttng_ust_string_encoding_none)
+ return -EINVAL;
+ op->op = BYTECODE_OP_GET_CONTEXT_REF_STRING;
+ break;
+ }
+ case lttng_ust_type_string:
+ op->op = BYTECODE_OP_GET_CONTEXT_REF_STRING;
+ break;
+ case lttng_ust_type_float:
+ op->op = BYTECODE_OP_GET_CONTEXT_REF_DOUBLE;
+ break;
+ case lttng_ust_type_dynamic:
+ op->op = BYTECODE_OP_GET_CONTEXT_REF;
+ break;
+ default:
+ return -EINVAL;
+ }
+ /* set offset to context index within channel contexts */
+ field_ref->offset = (uint16_t) idx;
+ break;
+ }
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static
+int apply_reloc(const struct lttng_ust_event_desc *event_desc,
+ struct bytecode_runtime *runtime,
+ uint32_t runtime_len,
+ uint32_t reloc_offset,
+ const char *name)
+{
+ struct load_op *op;
+
+ dbg_printf("Apply reloc: %u %s\n", reloc_offset, name);
+
+ /* Ensure that the reloc is within the code */
+ if (runtime_len - reloc_offset < sizeof(uint16_t))
+ return -EINVAL;
+
+ op = (struct load_op *) &runtime->code[reloc_offset];
+ switch (op->op) {
+ case BYTECODE_OP_LOAD_FIELD_REF:
+ return apply_field_reloc(event_desc, runtime, runtime_len,
+ reloc_offset, name, op->op);
+ case BYTECODE_OP_GET_CONTEXT_REF:
+ return apply_context_reloc(runtime, runtime_len,
+ reloc_offset, name, op->op);
+ case BYTECODE_OP_GET_SYMBOL:
+ case BYTECODE_OP_GET_SYMBOL_FIELD:
+ /*
+ * Will be handled by load specialize phase or
+ * dynamically by interpreter.
+ */
+ return 0;
+ default:
+ ERR("Unknown reloc op type %u\n", op->op);
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static
+int bytecode_is_linked(struct lttng_ust_bytecode_node *bytecode,
+ struct cds_list_head *bytecode_runtime_head)
+{
+ struct lttng_ust_bytecode_runtime *bc_runtime;
+
+ cds_list_for_each_entry(bc_runtime, bytecode_runtime_head, node) {
+ if (bc_runtime->bc == bytecode)
+ return 1;
+ }
+ return 0;
+}
+
+/*
+ * Take a bytecode with reloc table and link it to an event to create a
+ * bytecode runtime.
+ */
+static
+int link_bytecode(const struct lttng_ust_event_desc *event_desc,
+ struct lttng_ust_ctx **ctx,
+ struct lttng_ust_bytecode_node *bytecode,
+ struct cds_list_head *bytecode_runtime_head,
+ struct cds_list_head *insert_loc)
+{
+ int ret, offset, next_offset;
+ struct bytecode_runtime *runtime = NULL;
+ size_t runtime_alloc_len;
+
+ if (!bytecode)
+ return 0;
+ /* Bytecode already linked */
+ if (bytecode_is_linked(bytecode, bytecode_runtime_head))
+ return 0;
+
+ dbg_printf("Linking...\n");
+
+ /* We don't need the reloc table in the runtime */
+ runtime_alloc_len = sizeof(*runtime) + bytecode->bc.reloc_offset;
+ runtime = zmalloc(runtime_alloc_len);
+ if (!runtime) {
+ ret = -ENOMEM;
+ goto alloc_error;
+ }
+ runtime->p.type = bytecode->type;
+ runtime->p.bc = bytecode;
+ runtime->p.pctx = ctx;
+ runtime->len = bytecode->bc.reloc_offset;
+ /* copy original bytecode */
+ memcpy(runtime->code, bytecode->bc.data, runtime->len);
+ /*
+ * apply relocs. Those are a uint16_t (offset in bytecode)
+ * followed by a string (field name).
+ */
+ for (offset = bytecode->bc.reloc_offset;
+ offset < bytecode->bc.len;
+ offset = next_offset) {
+ uint16_t reloc_offset =
+ *(uint16_t *) &bytecode->bc.data[offset];
+ const char *name =
+ (const char *) &bytecode->bc.data[offset + sizeof(uint16_t)];
+
+ ret = apply_reloc(event_desc, runtime, runtime->len, reloc_offset, name);
+ if (ret) {
+ goto link_error;
+ }
+ next_offset = offset + sizeof(uint16_t) + strlen(name) + 1;
+ }
+ /* Validate bytecode */
+ ret = lttng_bytecode_validate(runtime);
+ if (ret) {
+ goto link_error;
+ }
+ /* Specialize bytecode */
+ ret = lttng_bytecode_specialize(event_desc, runtime);
+ if (ret) {
+ goto link_error;
+ }
+
+ runtime->p.interpreter_func = lttng_bytecode_interpret;
+ runtime->p.link_failed = 0;
+ cds_list_add_rcu(&runtime->p.node, insert_loc);
+ dbg_printf("Linking successful.\n");
+ return 0;
+
+link_error:
+ runtime->p.interpreter_func = lttng_bytecode_interpret_error;
+ runtime->p.link_failed = 1;
+ cds_list_add_rcu(&runtime->p.node, insert_loc);
+alloc_error:
+ dbg_printf("Linking failed.\n");
+ return ret;
+}
+
+void lttng_bytecode_sync_state(struct lttng_ust_bytecode_runtime *runtime)
+{
+ struct lttng_ust_bytecode_node *bc = runtime->bc;
+
+ if (!bc->enabler->enabled || runtime->link_failed)
+ runtime->interpreter_func = lttng_bytecode_interpret_error;
+ else
+ runtime->interpreter_func = lttng_bytecode_interpret;
+}
+
+/*
+ * Given the lists of bytecode programs of an instance (trigger or event) and
+ * of a matching enabler, try to link all the enabler's bytecode programs with
+ * the instance.
+ *
+ * This function is called after we confirmed that name enabler and the
+ * instance are name matching (or glob pattern matching).
+ */
+void lttng_enabler_link_bytecode(const struct lttng_ust_event_desc *event_desc,
+ struct lttng_ust_ctx **ctx,
+ struct cds_list_head *instance_bytecode_head,
+ struct cds_list_head *enabler_bytecode_head)
+{
+ struct lttng_ust_bytecode_node *enabler_bc;
+ struct lttng_ust_bytecode_runtime *runtime;
+
+ assert(event_desc);
+
+ /* Go over all the bytecode programs of the enabler. */
+ cds_list_for_each_entry(enabler_bc, enabler_bytecode_head, node) {
+ int found = 0, ret;
+ struct cds_list_head *insert_loc;
+
+ /*
+ * Check if the current enabler bytecode program is already
+ * linked with the instance.
+ */
+ cds_list_for_each_entry(runtime, instance_bytecode_head, node) {
+ if (runtime->bc == enabler_bc) {
+ found = 1;
+ break;
+ }
+ }
+
+ /*
+ * Skip bytecode already linked, go to the next enabler
+ * bytecode program.
+ */
+ if (found)
+ continue;
+
+ /*
+ * Insert at specified priority (seqnum) in increasing
+ * order. If there already is a bytecode of the same priority,
+ * insert the new bytecode right after it.
+ */
+ cds_list_for_each_entry_reverse(runtime,
+ instance_bytecode_head, node) {
+ if (runtime->bc->bc.seqnum <= enabler_bc->bc.seqnum) {
+ /* insert here */
+ insert_loc = &runtime->node;
+ goto add_within;
+ }
+ }
+
+ /* Add to head to list */
+ insert_loc = instance_bytecode_head;
+ add_within:
+ dbg_printf("linking bytecode\n");
+ ret = link_bytecode(event_desc, ctx, enabler_bc, instance_bytecode_head, insert_loc);
+ if (ret) {
+ dbg_printf("[lttng filter] warning: cannot link event bytecode\n");
+ }
+ }
+}
+
+static
+void free_filter_runtime(struct cds_list_head *bytecode_runtime_head)
+{
+ struct bytecode_runtime *runtime, *tmp;
+
+ cds_list_for_each_entry_safe(runtime, tmp, bytecode_runtime_head,
+ p.node) {
+ free(runtime->data);
+ free(runtime);
+ }
+}
+
+void lttng_free_event_filter_runtime(struct lttng_ust_event_common *event)
+{
+ free_filter_runtime(&event->priv->filter_bytecode_runtime_head);
+}
--- /dev/null
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright (C) 2010-2016 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ *
+ * LTTng UST bytecode header.
+ */
+
+#ifndef _LTTNG_BYTECODE_H
+#define _LTTNG_BYTECODE_H
+
+#include <errno.h>
+#include <stdio.h>
+#include <stdbool.h>
+#include "common/macros.h"
+#include <lttng/ust-events.h>
+#include "common/ust-context-provider.h"
+#include <stdint.h>
+#include <assert.h>
+#include <errno.h>
+#include <string.h>
+#include <inttypes.h>
+#include <limits.h>
+#include "common/logging.h"
+#include "bytecode.h"
+#include "ust-events-internal.h"
+
+/* Interpreter stack length, in number of entries */
+#define INTERPRETER_STACK_LEN 10 /* includes 2 dummy */
+#define INTERPRETER_STACK_EMPTY 1
+
+#define BYTECODE_MAX_DATA_LEN 65536
+
+#ifndef min_t
+#define min_t(type, a, b) \
+ ((type) (a) < (type) (b) ? (type) (a) : (type) (b))
+#endif
+
+#ifndef likely
+#define likely(x) __builtin_expect(!!(x), 1)
+#endif
+
+#ifndef unlikely
+#define unlikely(x) __builtin_expect(!!(x), 0)
+#endif
+
+#ifdef DEBUG
+#define dbg_printf(fmt, args...) \
+ printf("[debug bytecode in %s:%s@%u] " fmt, \
+ __FILE__, __func__, __LINE__, ## args)
+#else
+#define dbg_printf(fmt, args...) \
+do { \
+ /* do nothing but check printf format */ \
+ if (0) \
+ printf("[debug bytecode in %s:%s@%u] " fmt, \
+ __FILE__, __func__, __LINE__, ## args); \
+} while (0)
+#endif
+
+/* Linked bytecode. Child of struct lttng_bytecode_runtime. */
+struct bytecode_runtime {
+ struct lttng_ust_bytecode_runtime p;
+ size_t data_len;
+ size_t data_alloc_len;
+ char *data;
+ uint16_t len;
+ char code[0];
+};
+
+enum entry_type {
+ REG_S64,
+ REG_U64,
+ REG_DOUBLE,
+ REG_STRING,
+ REG_STAR_GLOB_STRING,
+ REG_UNKNOWN,
+ REG_PTR,
+};
+
+enum load_type {
+ LOAD_ROOT_CONTEXT,
+ LOAD_ROOT_APP_CONTEXT,
+ LOAD_ROOT_PAYLOAD,
+ LOAD_OBJECT,
+};
+
+enum object_type {
+ OBJECT_TYPE_S8,
+ OBJECT_TYPE_S16,
+ OBJECT_TYPE_S32,
+ OBJECT_TYPE_S64,
+ OBJECT_TYPE_U8,
+ OBJECT_TYPE_U16,
+ OBJECT_TYPE_U32,
+ OBJECT_TYPE_U64,
+
+ OBJECT_TYPE_SIGNED_ENUM,
+ OBJECT_TYPE_UNSIGNED_ENUM,
+
+ OBJECT_TYPE_DOUBLE,
+ OBJECT_TYPE_STRING,
+ OBJECT_TYPE_STRING_SEQUENCE,
+
+ OBJECT_TYPE_SEQUENCE,
+ OBJECT_TYPE_ARRAY,
+ OBJECT_TYPE_STRUCT,
+ OBJECT_TYPE_VARIANT,
+
+ OBJECT_TYPE_DYNAMIC,
+};
+
+struct bytecode_get_index_data {
+ uint64_t offset; /* in bytes */
+ size_t ctx_index;
+ size_t array_len;
+ /*
+ * Field is only populated for LOAD_ROOT_CONTEXT, LOAD_ROOT_APP_CONTEXT
+ * and LOAD_ROOT_PAYLOAD. Left NULL for LOAD_OBJECT, considering that the
+ * interpreter needs to find it from the event fields and types to
+ * support variants.
+ */
+ const struct lttng_ust_event_field *field;
+ struct {
+ size_t len;
+ enum object_type type;
+ bool rev_bo; /* reverse byte order */
+ } elem;
+};
+
+/* Validation stack */
+struct vstack_load {
+ enum load_type type;
+ enum object_type object_type;
+ const struct lttng_ust_event_field *field;
+ bool rev_bo; /* reverse byte order */
+};
+
+struct vstack_entry {
+ enum entry_type type;
+ struct vstack_load load;
+};
+
+struct vstack {
+ int top; /* top of stack */
+ struct vstack_entry e[INTERPRETER_STACK_LEN];
+};
+
+static inline
+void vstack_init(struct vstack *stack)
+{
+ stack->top = -1;
+}
+
+static inline
+struct vstack_entry *vstack_ax(struct vstack *stack)
+{
+ if (unlikely(stack->top < 0))
+ return NULL;
+ return &stack->e[stack->top];
+}
+
+static inline
+struct vstack_entry *vstack_bx(struct vstack *stack)
+{
+ if (unlikely(stack->top < 1))
+ return NULL;
+ return &stack->e[stack->top - 1];
+}
+
+static inline
+int vstack_push(struct vstack *stack)
+{
+ if (stack->top >= INTERPRETER_STACK_LEN - 1) {
+ ERR("Stack full\n");
+ return -EINVAL;
+ }
+ ++stack->top;
+ return 0;
+}
+
+static inline
+int vstack_pop(struct vstack *stack)
+{
+ if (unlikely(stack->top < 0)) {
+ ERR("Stack empty\n");
+ return -EINVAL;
+ }
+ stack->top--;
+ return 0;
+}
+
+/* Execution stack */
+enum estack_string_literal_type {
+ ESTACK_STRING_LITERAL_TYPE_NONE,
+ ESTACK_STRING_LITERAL_TYPE_PLAIN,
+ ESTACK_STRING_LITERAL_TYPE_STAR_GLOB,
+};
+
+struct load_ptr {
+ enum load_type type;
+ enum object_type object_type;
+ const void *ptr;
+ size_t nr_elem;
+ bool rev_bo;
+ /* Temporary place-holders for contexts. */
+ union {
+ int64_t s64;
+ uint64_t u64;
+ double d;
+ } u;
+ const struct lttng_ust_event_field *field;
+};
+
+struct estack_entry {
+ enum entry_type type; /* For dynamic typing. */
+ union {
+ int64_t v;
+ double d;
+
+ struct {
+ const char *str;
+ size_t seq_len;
+ enum estack_string_literal_type literal_type;
+ } s;
+ struct load_ptr ptr;
+ } u;
+};
+
+struct estack {
+ int top; /* top of stack */
+ struct estack_entry e[INTERPRETER_STACK_LEN];
+};
+
+/*
+ * Always use aliased type for ax/bx (top of stack).
+ * When ax/bx are S64, use aliased value.
+ */
+#define estack_ax_v ax
+#define estack_bx_v bx
+#define estack_ax_t ax_t
+#define estack_bx_t bx_t
+
+/*
+ * ax and bx registers can hold either integer, double or string.
+ */
+#define estack_ax(stack, top) \
+ ({ \
+ assert((top) > INTERPRETER_STACK_EMPTY); \
+ &(stack)->e[top]; \
+ })
+
+#define estack_bx(stack, top) \
+ ({ \
+ assert((top) > INTERPRETER_STACK_EMPTY + 1); \
+ &(stack)->e[(top) - 1]; \
+ })
+
+/*
+ * Currently, only integers (REG_S64) can be pushed into the stack.
+ */
+#define estack_push(stack, top, ax, bx, ax_t, bx_t) \
+ do { \
+ assert((top) < INTERPRETER_STACK_LEN - 1); \
+ (stack)->e[(top) - 1].u.v = (bx); \
+ (stack)->e[(top) - 1].type = (bx_t); \
+ (bx) = (ax); \
+ (bx_t) = (ax_t); \
+ ++(top); \
+ } while (0)
+
+#define estack_pop(stack, top, ax, bx, ax_t, bx_t) \
+ do { \
+ assert((top) > INTERPRETER_STACK_EMPTY); \
+ (ax) = (bx); \
+ (ax_t) = (bx_t); \
+ (bx) = (stack)->e[(top) - 2].u.v; \
+ (bx_t) = (stack)->e[(top) - 2].type; \
+ (top)--; \
+ } while (0)
+
+enum lttng_interpreter_type {
+ LTTNG_INTERPRETER_TYPE_S64,
+ LTTNG_INTERPRETER_TYPE_U64,
+ LTTNG_INTERPRETER_TYPE_SIGNED_ENUM,
+ LTTNG_INTERPRETER_TYPE_UNSIGNED_ENUM,
+ LTTNG_INTERPRETER_TYPE_DOUBLE,
+ LTTNG_INTERPRETER_TYPE_STRING,
+ LTTNG_INTERPRETER_TYPE_SEQUENCE,
+};
+
+/*
+ * Represents the output parameter of the lttng interpreter.
+ * Currently capturable field classes are integer, double, string and sequence
+ * of integer.
+ */
+struct lttng_interpreter_output {
+ enum lttng_interpreter_type type;
+ union {
+ int64_t s;
+ uint64_t u;
+ double d;
+
+ struct {
+ const char *str;
+ size_t len;
+ } str;
+ struct {
+ const void *ptr;
+ size_t nr_elem;
+
+ /* Inner type. */
+ const struct lttng_ust_type_common *nested_type;
+ } sequence;
+ } u;
+};
+
+const char *lttng_bytecode_print_op(enum bytecode_op op)
+ __attribute__((visibility("hidden")));
+
+void lttng_bytecode_sync_state(struct lttng_ust_bytecode_runtime *runtime)
+ __attribute__((visibility("hidden")));
+
+int lttng_bytecode_validate(struct bytecode_runtime *bytecode)
+ __attribute__((visibility("hidden")));
+
+int lttng_bytecode_specialize(const struct lttng_ust_event_desc *event_desc,
+ struct bytecode_runtime *bytecode)
+ __attribute__((visibility("hidden")));
+
+int lttng_bytecode_interpret_error(struct lttng_ust_bytecode_runtime *bytecode_runtime,
+ const char *stack_data,
+ void *ctx)
+ __attribute__((visibility("hidden")));
+
+int lttng_bytecode_interpret(struct lttng_ust_bytecode_runtime *bytecode_runtime,
+ const char *stack_data,
+ void *ctx)
+ __attribute__((visibility("hidden")));
+
+#endif /* _LTTNG_BYTECODE_H */
--- /dev/null
+/*
+ * SPDX-License-Identifier: LGPL-2.1-only
+ *
+ * Copyright (C) 2014 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ */
+
+#define _LGPL_SOURCE
+#include <error.h>
+#include <dlfcn.h>
+#include <stdlib.h>
+#include <stdint.h>
+#include <stdio.h>
+
+#include <lttng/ust-clock.h>
+#include <urcu/system.h>
+#include <urcu/arch.h>
+
+#include "common/logging.h"
+
+#include "clock.h"
+#include "getenv.h"
+
+struct lttng_ust_trace_clock *lttng_ust_trace_clock;
+
+static
+struct lttng_ust_trace_clock user_tc;
+
+static
+void *clock_handle;
+
+static
+uint64_t trace_clock_freq_monotonic(void)
+{
+ return 1000000000ULL;
+}
+
+static
+int trace_clock_uuid_monotonic(char *uuid)
+{
+ int ret = 0;
+ size_t len;
+ FILE *fp;
+
+ /*
+ * boot_id needs to be read once before being used concurrently
+ * to deal with a Linux kernel race. A fix is proposed for
+ * upstream, but the work-around is needed for older kernels.
+ */
+ fp = fopen("/proc/sys/kernel/random/boot_id", "r");
+ if (!fp) {
+ return -ENOENT;
+ }
+ len = fread(uuid, 1, LTTNG_UST_UUID_STR_LEN - 1, fp);
+ if (len < LTTNG_UST_UUID_STR_LEN - 1) {
+ ret = -EINVAL;
+ goto end;
+ }
+ uuid[LTTNG_UST_UUID_STR_LEN - 1] = '\0';
+end:
+ fclose(fp);
+ return ret;
+}
+
+static
+const char *trace_clock_name_monotonic(void)
+{
+ return "monotonic";
+}
+
+static
+const char *trace_clock_description_monotonic(void)
+{
+ return "Monotonic Clock";
+}
+
+int lttng_ust_trace_clock_set_read64_cb(lttng_ust_clock_read64_function read64_cb)
+{
+ if (CMM_LOAD_SHARED(lttng_ust_trace_clock))
+ return -EBUSY;
+ user_tc.read64 = read64_cb;
+ return 0;
+}
+
+int lttng_ust_trace_clock_get_read64_cb(lttng_ust_clock_read64_function *read64_cb)
+{
+ struct lttng_ust_trace_clock *ltc = CMM_LOAD_SHARED(lttng_ust_trace_clock);
+
+ if (caa_likely(!ltc)) {
+ *read64_cb = &trace_clock_read64_monotonic;
+ } else {
+ cmm_read_barrier_depends(); /* load ltc before content */
+ *read64_cb = ltc->read64;
+ }
+ return 0;
+}
+
+int lttng_ust_trace_clock_set_freq_cb(lttng_ust_clock_freq_function freq_cb)
+{
+ if (CMM_LOAD_SHARED(lttng_ust_trace_clock))
+ return -EBUSY;
+ user_tc.freq = freq_cb;
+ return 0;
+}
+
+int lttng_ust_trace_clock_get_freq_cb(lttng_ust_clock_freq_function *freq_cb)
+{
+ struct lttng_ust_trace_clock *ltc = CMM_LOAD_SHARED(lttng_ust_trace_clock);
+
+ if (caa_likely(!ltc)) {
+ *freq_cb = &trace_clock_freq_monotonic;
+ } else {
+ cmm_read_barrier_depends(); /* load ltc before content */
+ *freq_cb = ltc->freq;
+ }
+ return 0;
+}
+
+int lttng_ust_trace_clock_set_uuid_cb(lttng_ust_clock_uuid_function uuid_cb)
+{
+ if (CMM_LOAD_SHARED(lttng_ust_trace_clock))
+ return -EBUSY;
+ user_tc.uuid = uuid_cb;
+ return 0;
+}
+
+int lttng_ust_trace_clock_get_uuid_cb(lttng_ust_clock_uuid_function *uuid_cb)
+{
+ struct lttng_ust_trace_clock *ltc = CMM_LOAD_SHARED(lttng_ust_trace_clock);
+
+ if (caa_likely(!ltc)) {
+ *uuid_cb = &trace_clock_uuid_monotonic;
+ } else {
+ cmm_read_barrier_depends(); /* load ltc before content */
+ *uuid_cb = ltc->uuid;
+ }
+ return 0;
+}
+
+int lttng_ust_trace_clock_set_name_cb(lttng_ust_clock_name_function name_cb)
+{
+ if (CMM_LOAD_SHARED(lttng_ust_trace_clock))
+ return -EBUSY;
+ user_tc.name = name_cb;
+ return 0;
+}
+
+int lttng_ust_trace_clock_get_name_cb(lttng_ust_clock_name_function *name_cb)
+{
+ struct lttng_ust_trace_clock *ltc = CMM_LOAD_SHARED(lttng_ust_trace_clock);
+
+ if (caa_likely(!ltc)) {
+ *name_cb = &trace_clock_name_monotonic;
+ } else {
+ cmm_read_barrier_depends(); /* load ltc before content */
+ *name_cb = ltc->name;
+ }
+ return 0;
+}
+
+int lttng_ust_trace_clock_set_description_cb(lttng_ust_clock_description_function description_cb)
+{
+ if (CMM_LOAD_SHARED(lttng_ust_trace_clock))
+ return -EBUSY;
+ user_tc.description = description_cb;
+ return 0;
+}
+
+int lttng_ust_trace_clock_get_description_cb(lttng_ust_clock_description_function *description_cb)
+{
+ struct lttng_ust_trace_clock *ltc = CMM_LOAD_SHARED(lttng_ust_trace_clock);
+
+ if (caa_likely(!ltc)) {
+ *description_cb = &trace_clock_description_monotonic;
+ } else {
+ cmm_read_barrier_depends(); /* load ltc before content */
+ *description_cb = ltc->description;
+ }
+ return 0;
+}
+
+int lttng_ust_enable_trace_clock_override(void)
+{
+ if (CMM_LOAD_SHARED(lttng_ust_trace_clock))
+ return -EBUSY;
+ if (!user_tc.read64)
+ return -EINVAL;
+ if (!user_tc.freq)
+ return -EINVAL;
+ if (!user_tc.name)
+ return -EINVAL;
+ if (!user_tc.description)
+ return -EINVAL;
+ /* Use default uuid cb when NULL */
+ cmm_smp_mb(); /* Store callbacks before trace clock */
+ CMM_STORE_SHARED(lttng_ust_trace_clock, &user_tc);
+ return 0;
+}
+
+void lttng_ust_clock_init(void)
+{
+ const char *libname;
+ void (*libinit)(void);
+
+ if (clock_handle)
+ return;
+ libname = lttng_ust_getenv("LTTNG_UST_CLOCK_PLUGIN");
+ if (!libname)
+ return;
+ clock_handle = dlopen(libname, RTLD_NOW);
+ if (!clock_handle) {
+ PERROR("Cannot load LTTng UST clock override library %s",
+ libname);
+ return;
+ }
+ dlerror();
+ libinit = (void (*)(void)) dlsym(clock_handle,
+ "lttng_ust_clock_plugin_init");
+ if (!libinit) {
+ PERROR("Cannot find LTTng UST clock override library %s initialization function lttng_ust_clock_plugin_init()",
+ libname);
+ return;
+ }
+ libinit();
+}
--- /dev/null
+/*
+ * SPDX-License-Identifier: LGPL-2.1-only
+ *
+ * Copyright (C) 2009-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ * Copyright (C) 2019 Michael Jeanson <mjeanson@efficios.com>
+ *
+ * LTTng UST cgroup namespace context.
+ */
+
+#define _LGPL_SOURCE
+#include <limits.h>
+#include <stddef.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <unistd.h>
+#include <lttng/ust-events.h>
+#include <lttng/ust-tracer.h>
+#include "common/compat/tid.h"
+#include <urcu/tls-compat.h>
+#include <lttng/ringbuffer-context.h>
+
+#include "context-internal.h"
+#include "lttng-tracer-core.h"
+#include "ns.h"
+
+
+/*
+ * We cache the result to ensure we don't stat(2) the proc filesystem on
+ * each event.
+ */
+static DEFINE_URCU_TLS_INIT(ino_t, cached_cgroup_ns, NS_INO_UNINITIALIZED);
+
+static
+ino_t get_cgroup_ns(void)
+{
+ struct stat sb;
+ ino_t cgroup_ns;
+
+ cgroup_ns = CMM_LOAD_SHARED(URCU_TLS(cached_cgroup_ns));
+
+ /*
+ * If the cache is populated, do nothing and return the
+ * cached inode number.
+ */
+ if (caa_likely(cgroup_ns != NS_INO_UNINITIALIZED))
+ return cgroup_ns;
+
+ /*
+ * At this point we have to populate the cache, set the initial
+ * value to NS_INO_UNAVAILABLE (0), if we fail to get the inode
+ * number from the proc filesystem, this is the value we will
+ * cache.
+ */
+ cgroup_ns = NS_INO_UNAVAILABLE;
+
+ /*
+ * /proc/thread-self was introduced in kernel v3.17
+ */
+ if (stat("/proc/thread-self/ns/cgroup", &sb) == 0) {
+ cgroup_ns = sb.st_ino;
+ } else {
+ char proc_ns_path[LTTNG_PROC_NS_PATH_MAX];
+
+ if (snprintf(proc_ns_path, LTTNG_PROC_NS_PATH_MAX,
+ "/proc/self/task/%d/ns/cgroup",
+ lttng_gettid()) >= 0) {
+
+ if (stat(proc_ns_path, &sb) == 0) {
+ cgroup_ns = sb.st_ino;
+ }
+ }
+ }
+
+ /*
+ * And finally, store the inode number in the cache.
+ */
+ CMM_STORE_SHARED(URCU_TLS(cached_cgroup_ns), cgroup_ns);
+
+ return cgroup_ns;
+}
+
+/*
+ * The cgroup namespace can change for 3 reasons
+ * * clone(2) called with CLONE_NEWCGROUP
+ * * setns(2) called with the fd of a different cgroup ns
+ * * unshare(2) called with CLONE_NEWCGROUP
+ */
+void lttng_context_cgroup_ns_reset(void)
+{
+ CMM_STORE_SHARED(URCU_TLS(cached_cgroup_ns), NS_INO_UNINITIALIZED);
+}
+
+static
+size_t cgroup_ns_get_size(void *priv __attribute__((unused)),
+ size_t offset)
+{
+ size_t size = 0;
+
+ size += lttng_ust_lib_ring_buffer_align(offset, lttng_ust_rb_alignof(ino_t));
+ size += sizeof(ino_t);
+ return size;
+}
+
+static
+void cgroup_ns_record(void *priv __attribute__((unused)),
+ struct lttng_ust_lib_ring_buffer_ctx *ctx,
+ struct lttng_ust_channel_buffer *chan)
+{
+ ino_t cgroup_ns;
+
+ cgroup_ns = get_cgroup_ns();
+ chan->ops->event_write(ctx, &cgroup_ns, sizeof(cgroup_ns),
+ lttng_ust_rb_alignof(cgroup_ns));
+}
+
+static
+void cgroup_ns_get_value(void *priv __attribute__((unused)),
+ struct lttng_ust_ctx_value *value)
+{
+ value->u.s64 = get_cgroup_ns();
+}
+
+static const struct lttng_ust_ctx_field *ctx_field = lttng_ust_static_ctx_field(
+ lttng_ust_static_event_field("cgroup_ns",
+ lttng_ust_static_type_integer(sizeof(ino_t) * CHAR_BIT,
+ lttng_ust_rb_alignof(ino_t) * CHAR_BIT,
+ lttng_ust_is_signed_type(ino_t),
+ BYTE_ORDER, 10),
+ false, false),
+ cgroup_ns_get_size,
+ cgroup_ns_record,
+ cgroup_ns_get_value,
+ NULL, NULL);
+
+int lttng_add_cgroup_ns_to_ctx(struct lttng_ust_ctx **ctx)
+{
+ int ret;
+
+ if (lttng_find_context(*ctx, ctx_field->event_field->name)) {
+ ret = -EEXIST;
+ goto error_find_context;
+ }
+ ret = lttng_ust_context_append(ctx, ctx_field);
+ if (ret)
+ return ret;
+ return 0;
+
+error_find_context:
+ return ret;
+}
+
+/*
+ * * Force a read (imply TLS fixup for dlopen) of TLS variables.
+ * */
+void lttng_fixup_cgroup_ns_tls(void)
+{
+ asm volatile ("" : : "m" (URCU_TLS(cached_cgroup_ns)));
+}
--- /dev/null
+/*
+ * SPDX-License-Identifier: LGPL-2.1-only
+ *
+ * Copyright (C) 2009-2015 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ *
+ * LTTng UST CPU id context.
+ *
+ * Note: threads can be migrated at any point while executing the
+ * tracepoint probe. This means the CPU id field (and filter) is only
+ * statistical. For instance, even though a user might select a
+ * cpu_id==1 filter, there may be few events recorded into the channel
+ * appearing from other CPUs, due to migration.
+ */
+
+#define _LGPL_SOURCE
+#include <stddef.h>
+#include <sys/types.h>
+#include <unistd.h>
+#include <limits.h>
+#include <lttng/ust-events.h>
+#include <lttng/ust-tracer.h>
+#include "common/ringbuffer/getcpu.h"
+#include <lttng/ringbuffer-context.h>
+
+#include "context-internal.h"
+
+static
+size_t cpu_id_get_size(void *priv __attribute__((unused)),
+ size_t offset)
+{
+ size_t size = 0;
+
+ size += lttng_ust_lib_ring_buffer_align(offset, lttng_ust_rb_alignof(int));
+ size += sizeof(int);
+ return size;
+}
+
+static
+void cpu_id_record(void *priv __attribute__((unused)),
+ struct lttng_ust_lib_ring_buffer_ctx *ctx,
+ struct lttng_ust_channel_buffer *chan)
+{
+ int cpu;
+
+ cpu = lttng_ust_get_cpu();
+ chan->ops->event_write(ctx, &cpu, sizeof(cpu), lttng_ust_rb_alignof(cpu));
+}
+
+static
+void cpu_id_get_value(void *priv __attribute__((unused)),
+ struct lttng_ust_ctx_value *value)
+{
+ value->u.s64 = lttng_ust_get_cpu();
+}
+
+static const struct lttng_ust_ctx_field *ctx_field = lttng_ust_static_ctx_field(
+ lttng_ust_static_event_field("cpu_id",
+ lttng_ust_static_type_integer(sizeof(int) * CHAR_BIT,
+ lttng_ust_rb_alignof(int) * CHAR_BIT,
+ lttng_ust_is_signed_type(int),
+ BYTE_ORDER, 10),
+ false, false),
+ cpu_id_get_size,
+ cpu_id_record,
+ cpu_id_get_value,
+ NULL, NULL);
+
+int lttng_add_cpu_id_to_ctx(struct lttng_ust_ctx **ctx)
+{
+ int ret;
+
+ if (lttng_find_context(*ctx, ctx_field->event_field->name)) {
+ ret = -EEXIST;
+ goto error_find_context;
+ }
+ ret = lttng_ust_context_append(ctx, ctx_field);
+ if (ret)
+ return ret;
+ return 0;
+
+error_find_context:
+ return ret;
+}
--- /dev/null
+/*
+ * SPDX-License-Identifier: LGPL-2.1-only
+ *
+ * Copyright (C) 2009-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ *
+ * LTTng UST Instruction Pointer Context.
+ */
+
+#define _LGPL_SOURCE
+#include <limits.h>
+#include <stddef.h>
+#include <sys/types.h>
+#include <unistd.h>
+#include <lttng/ust-events.h>
+#include <lttng/ust-tracer.h>
+#include <lttng/ringbuffer-context.h>
+
+#include "context-internal.h"
+
+static
+size_t ip_get_size(void *priv __attribute__((unused)),
+ size_t offset)
+{
+ size_t size = 0;
+
+ size += lttng_ust_lib_ring_buffer_align(offset, lttng_ust_rb_alignof(void *));
+ size += sizeof(void *);
+ return size;
+}
+
+static
+void ip_record(void *priv __attribute__((unused)),
+ struct lttng_ust_lib_ring_buffer_ctx *ctx,
+ struct lttng_ust_channel_buffer *chan)
+{
+ void *ip;
+
+ ip = ctx->ip;
+ chan->ops->event_write(ctx, &ip, sizeof(ip), lttng_ust_rb_alignof(ip));
+}
+
+static const struct lttng_ust_ctx_field *ctx_field = lttng_ust_static_ctx_field(
+ lttng_ust_static_event_field("ip",
+ lttng_ust_static_type_integer(sizeof(void *) * CHAR_BIT,
+ lttng_ust_rb_alignof(void *) * CHAR_BIT,
+ lttng_ust_is_signed_type(void *),
+ BYTE_ORDER, 10),
+ false, false),
+ ip_get_size,
+ ip_record,
+ NULL, NULL, NULL);
+
+int lttng_add_ip_to_ctx(struct lttng_ust_ctx **ctx)
+{
+ int ret;
+
+ if (lttng_find_context(*ctx, ctx_field->event_field->name)) {
+ ret = -EEXIST;
+ goto error_find_context;
+ }
+ ret = lttng_ust_context_append(ctx, ctx_field);
+ if (ret)
+ return ret;
+ return 0;
+
+error_find_context:
+ return ret;
+}
--- /dev/null
+/*
+ * SPDX-License-Identifier: LGPL-2.1-only
+ *
+ * Copyright (C) 2009-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ * Copyright (C) 2019 Michael Jeanson <mjeanson@efficios.com>
+ *
+ * LTTng UST ipc namespace context.
+ */
+
+#define _LGPL_SOURCE
+#include <limits.h>
+#include <stddef.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <unistd.h>
+#include <lttng/ust-events.h>
+#include <lttng/ust-tracer.h>
+#include "common/compat/tid.h"
+#include <urcu/tls-compat.h>
+#include <lttng/ringbuffer-context.h>
+
+#include "context-internal.h"
+#include "lttng-tracer-core.h"
+#include "ns.h"
+
+/*
+ * We cache the result to ensure we don't stat(2) the proc filesystem on
+ * each event.
+ */
+static DEFINE_URCU_TLS_INIT(ino_t, cached_ipc_ns, NS_INO_UNINITIALIZED);
+
+static
+ino_t get_ipc_ns(void)
+{
+ struct stat sb;
+ ino_t ipc_ns;
+
+ ipc_ns = CMM_LOAD_SHARED(URCU_TLS(cached_ipc_ns));
+
+ /*
+ * If the cache is populated, do nothing and return the
+ * cached inode number.
+ */
+ if (caa_likely(ipc_ns != NS_INO_UNINITIALIZED))
+ return ipc_ns;
+
+ /*
+ * At this point we have to populate the cache, set the initial
+ * value to NS_INO_UNAVAILABLE (0), if we fail to get the inode
+ * number from the proc filesystem, this is the value we will
+ * cache.
+ */
+ ipc_ns = NS_INO_UNAVAILABLE;
+
+ /*
+ * /proc/thread-self was introduced in kernel v3.17
+ */
+ if (stat("/proc/thread-self/ns/ipc", &sb) == 0) {
+ ipc_ns = sb.st_ino;
+ } else {
+ char proc_ns_path[LTTNG_PROC_NS_PATH_MAX];
+
+ if (snprintf(proc_ns_path, LTTNG_PROC_NS_PATH_MAX,
+ "/proc/self/task/%d/ns/ipc",
+ lttng_gettid()) >= 0) {
+
+ if (stat(proc_ns_path, &sb) == 0) {
+ ipc_ns = sb.st_ino;
+ }
+ }
+ }
+
+ /*
+ * And finally, store the inode number in the cache.
+ */
+ CMM_STORE_SHARED(URCU_TLS(cached_ipc_ns), ipc_ns);
+
+ return ipc_ns;
+}
+
+/*
+ * The ipc namespace can change for 3 reasons
+ * * clone(2) called with CLONE_NEWIPC
+ * * setns(2) called with the fd of a different ipc ns
+ * * unshare(2) called with CLONE_NEWIPC
+ */
+void lttng_context_ipc_ns_reset(void)
+{
+ CMM_STORE_SHARED(URCU_TLS(cached_ipc_ns), NS_INO_UNINITIALIZED);
+}
+
+static
+size_t ipc_ns_get_size(void *priv __attribute__((unused)),
+ size_t offset)
+{
+ size_t size = 0;
+
+ size += lttng_ust_lib_ring_buffer_align(offset, lttng_ust_rb_alignof(ino_t));
+ size += sizeof(ino_t);
+ return size;
+}
+
+static
+void ipc_ns_record(void *priv __attribute__((unused)),
+ struct lttng_ust_lib_ring_buffer_ctx *ctx,
+ struct lttng_ust_channel_buffer *chan)
+{
+ ino_t ipc_ns;
+
+ ipc_ns = get_ipc_ns();
+ chan->ops->event_write(ctx, &ipc_ns, sizeof(ipc_ns), lttng_ust_rb_alignof(ipc_ns));
+}
+
+static
+void ipc_ns_get_value(void *priv __attribute__((unused)),
+ struct lttng_ust_ctx_value *value)
+{
+ value->u.s64 = get_ipc_ns();
+}
+
+const struct lttng_ust_ctx_field *ctx_field = lttng_ust_static_ctx_field(
+ lttng_ust_static_event_field("ipc_ns",
+ lttng_ust_static_type_integer(sizeof(ino_t) * CHAR_BIT,
+ lttng_ust_rb_alignof(ino_t) * CHAR_BIT,
+ lttng_ust_is_signed_type(ino_t),
+ BYTE_ORDER, 10),
+ false, false),
+ ipc_ns_get_size,
+ ipc_ns_record,
+ ipc_ns_get_value,
+ NULL, NULL);
+
+int lttng_add_ipc_ns_to_ctx(struct lttng_ust_ctx **ctx)
+{
+ int ret;
+
+ if (lttng_find_context(*ctx, ctx_field->event_field->name)) {
+ ret = -EEXIST;
+ goto error_find_context;
+ }
+ ret = lttng_ust_context_append(ctx, ctx_field);
+ if (ret)
+ return ret;
+ return 0;
+
+error_find_context:
+ return ret;
+}
+
+/*
+ * * Force a read (imply TLS fixup for dlopen) of TLS variables.
+ * */
+void lttng_fixup_ipc_ns_tls(void)
+{
+ asm volatile ("" : : "m" (URCU_TLS(cached_ipc_ns)));
+}
--- /dev/null
+/*
+ * SPDX-License-Identifier: LGPL-2.1-only
+ *
+ * Copyright (C) 2009-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ * Copyright (C) 2019 Michael Jeanson <mjeanson@efficios.com>
+ *
+ * LTTng UST mnt namespace context.
+ */
+
+#define _LGPL_SOURCE
+#include <limits.h>
+#include <stddef.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <unistd.h>
+#include <lttng/ust-events.h>
+#include <lttng/ust-tracer.h>
+#include <lttng/ringbuffer-context.h>
+
+#include "context-internal.h"
+#include "ns.h"
+
+/*
+ * We cache the result to ensure we don't stat(2) the proc filesystem on
+ * each event. The mount namespace is global to the process.
+ */
+static ino_t cached_mnt_ns = NS_INO_UNINITIALIZED;
+
+static
+ino_t get_mnt_ns(void)
+{
+ struct stat sb;
+ ino_t mnt_ns;
+
+ mnt_ns = CMM_LOAD_SHARED(cached_mnt_ns);
+
+ /*
+ * If the cache is populated, do nothing and return the
+ * cached inode number.
+ */
+ if (caa_likely(mnt_ns != NS_INO_UNINITIALIZED))
+ return mnt_ns;
+
+ /*
+ * At this point we have to populate the cache, set the initial
+ * value to NS_INO_UNAVAILABLE (0), if we fail to get the inode
+ * number from the proc filesystem, this is the value we will
+ * cache.
+ */
+ mnt_ns = NS_INO_UNAVAILABLE;
+
+ if (stat("/proc/self/ns/mnt", &sb) == 0) {
+ mnt_ns = sb.st_ino;
+ }
+
+ /*
+ * And finally, store the inode number in the cache.
+ */
+ CMM_STORE_SHARED(cached_mnt_ns, mnt_ns);
+
+ return mnt_ns;
+}
+
+/*
+ * The mnt namespace can change for 3 reasons
+ * * clone(2) called with CLONE_NEWNS
+ * * setns(2) called with the fd of a different mnt ns
+ * * unshare(2) called with CLONE_NEWNS
+ */
+void lttng_context_mnt_ns_reset(void)
+{
+ CMM_STORE_SHARED(cached_mnt_ns, NS_INO_UNINITIALIZED);
+}
+
+static
+size_t mnt_ns_get_size(void *priv __attribute__((unused)),
+ size_t offset)
+{
+ size_t size = 0;
+
+ size += lttng_ust_lib_ring_buffer_align(offset, lttng_ust_rb_alignof(ino_t));
+ size += sizeof(ino_t);
+ return size;
+}
+
+static
+void mnt_ns_record(void *priv __attribute__((unused)),
+ struct lttng_ust_lib_ring_buffer_ctx *ctx,
+ struct lttng_ust_channel_buffer *chan)
+{
+ ino_t mnt_ns;
+
+ mnt_ns = get_mnt_ns();
+ chan->ops->event_write(ctx, &mnt_ns, sizeof(mnt_ns), lttng_ust_rb_alignof(mnt_ns));
+}
+
+static
+void mnt_ns_get_value(void *priv __attribute__((unused)),
+ struct lttng_ust_ctx_value *value)
+{
+ value->u.s64 = get_mnt_ns();
+}
+
+static const struct lttng_ust_ctx_field *ctx_field = lttng_ust_static_ctx_field(
+ lttng_ust_static_event_field("mnt_ns",
+ lttng_ust_static_type_integer(sizeof(ino_t) * CHAR_BIT,
+ lttng_ust_rb_alignof(ino_t) * CHAR_BIT,
+ lttng_ust_is_signed_type(ino_t),
+ BYTE_ORDER, 10),
+ false, false),
+ mnt_ns_get_size,
+ mnt_ns_record,
+ mnt_ns_get_value,
+ NULL, NULL);
+
+int lttng_add_mnt_ns_to_ctx(struct lttng_ust_ctx **ctx)
+{
+ int ret;
+
+ if (lttng_find_context(*ctx, ctx_field->event_field->name)) {
+ ret = -EEXIST;
+ goto error_find_context;
+ }
+ ret = lttng_ust_context_append(ctx, ctx_field);
+ if (ret)
+ return ret;
+ return 0;
+
+error_find_context:
+ return ret;
+}
--- /dev/null
+/*
+ * SPDX-License-Identifier: LGPL-2.1-only
+ *
+ * Copyright (C) 2009-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ * Copyright (C) 2019 Michael Jeanson <mjeanson@efficios.com>
+ *
+ * LTTng UST net namespace context.
+ */
+
+#define _LGPL_SOURCE
+#include <limits.h>
+#include <stddef.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <unistd.h>
+#include <lttng/ust-events.h>
+#include <lttng/ust-tracer.h>
+#include <lttng/ringbuffer-context.h>
+#include "common/compat/tid.h"
+#include <urcu/tls-compat.h>
+
+#include "context-internal.h"
+#include "lttng-tracer-core.h"
+#include "ns.h"
+
+/*
+ * We cache the result to ensure we don't stat(2) the proc filesystem on
+ * each event.
+ */
+static DEFINE_URCU_TLS_INIT(ino_t, cached_net_ns, NS_INO_UNINITIALIZED);
+
+static
+ino_t get_net_ns(void)
+{
+ struct stat sb;
+ ino_t net_ns;
+
+ net_ns = CMM_LOAD_SHARED(URCU_TLS(cached_net_ns));
+
+ /*
+ * If the cache is populated, do nothing and return the
+ * cached inode number.
+ */
+ if (caa_likely(net_ns != NS_INO_UNINITIALIZED))
+ return net_ns;
+
+ /*
+ * At this point we have to populate the cache, set the initial
+ * value to NS_INO_UNAVAILABLE (0), if we fail to get the inode
+ * number from the proc filesystem, this is the value we will
+ * cache.
+ */
+ net_ns = NS_INO_UNAVAILABLE;
+
+ /*
+ * /proc/thread-self was introduced in kernel v3.17
+ */
+ if (stat("/proc/thread-self/ns/net", &sb) == 0) {
+ net_ns = sb.st_ino;
+ } else {
+ char proc_ns_path[LTTNG_PROC_NS_PATH_MAX];
+
+ if (snprintf(proc_ns_path, LTTNG_PROC_NS_PATH_MAX,
+ "/proc/self/task/%d/ns/net",
+ lttng_gettid()) >= 0) {
+
+ if (stat(proc_ns_path, &sb) == 0) {
+ net_ns = sb.st_ino;
+ }
+ }
+ }
+
+ /*
+ * And finally, store the inode number in the cache.
+ */
+ CMM_STORE_SHARED(URCU_TLS(cached_net_ns), net_ns);
+
+ return net_ns;
+}
+
+/*
+ * The net namespace can change for 3 reasons
+ * * clone(2) called with CLONE_NEWNET
+ * * setns(2) called with the fd of a different net ns
+ * * unshare(2) called with CLONE_NEWNET
+ */
+void lttng_context_net_ns_reset(void)
+{
+ CMM_STORE_SHARED(URCU_TLS(cached_net_ns), NS_INO_UNINITIALIZED);
+}
+
+static
+size_t net_ns_get_size(void *priv __attribute__((unused)),
+ size_t offset)
+{
+ size_t size = 0;
+
+ size += lttng_ust_lib_ring_buffer_align(offset, lttng_ust_rb_alignof(ino_t));
+ size += sizeof(ino_t);
+ return size;
+}
+
+static
+void net_ns_record(void *priv __attribute__((unused)),
+ struct lttng_ust_lib_ring_buffer_ctx *ctx,
+ struct lttng_ust_channel_buffer *chan)
+{
+ ino_t net_ns;
+
+ net_ns = get_net_ns();
+ chan->ops->event_write(ctx, &net_ns, sizeof(net_ns), lttng_ust_rb_alignof(net_ns));
+}
+
+static
+void net_ns_get_value(void *priv __attribute__((unused)),
+ struct lttng_ust_ctx_value *value)
+{
+ value->u.s64 = get_net_ns();
+}
+
+static const struct lttng_ust_ctx_field *ctx_field = lttng_ust_static_ctx_field(
+ lttng_ust_static_event_field("net_ns",
+ lttng_ust_static_type_integer(sizeof(ino_t) * CHAR_BIT,
+ lttng_ust_rb_alignof(ino_t) * CHAR_BIT,
+ lttng_ust_is_signed_type(ino_t),
+ BYTE_ORDER, 10),
+ false, false),
+ net_ns_get_size,
+ net_ns_record,
+ net_ns_get_value,
+ NULL, NULL);
+
+int lttng_add_net_ns_to_ctx(struct lttng_ust_ctx **ctx)
+{
+ int ret;
+
+ if (lttng_find_context(*ctx, ctx_field->event_field->name)) {
+ ret = -EEXIST;
+ goto error_find_context;
+ }
+ ret = lttng_ust_context_append(ctx, ctx_field);
+ if (ret)
+ return ret;
+ return 0;
+
+error_find_context:
+ return ret;
+}
+
+/*
+ * * Force a read (imply TLS fixup for dlopen) of TLS variables.
+ * */
+void lttng_fixup_net_ns_tls(void)
+{
+ asm volatile ("" : : "m" (URCU_TLS(cached_net_ns)));
+}
--- /dev/null
+/*
+ * SPDX-License-Identifier: LGPL-2.1-only
+ *
+ * Copyright (C) 2009-2014 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ *
+ * LTTng UST performance monitoring counters (perf-counters) integration.
+ */
+
+#define _LGPL_SOURCE
+#include <limits.h>
+#include <sys/types.h>
+#include <unistd.h>
+#include <string.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <stdbool.h>
+#include <stddef.h>
+#include <stdint.h>
+#include <sys/mman.h>
+#include <sys/syscall.h>
+#include <lttng/ust-arch.h>
+#include <lttng/ust-events.h>
+#include <lttng/ust-tracer.h>
+#include <lttng/ringbuffer-context.h>
+#include <urcu/system.h>
+#include <urcu/arch.h>
+#include <urcu/rculist.h>
+#include "common/macros.h"
+#include <urcu/ref.h>
+#include "common/logging.h"
+#include <signal.h>
+#include <urcu/tls-compat.h>
+#include "perf_event.h"
+
+#include "context-internal.h"
+#include "lttng-tracer-core.h"
+#include "ust-events-internal.h"
+
+/*
+ * We use a global perf counter key and iterate on per-thread RCU lists
+ * of fields in the fast path, even though this is not strictly speaking
+ * what would provide the best fast-path complexity, to ensure teardown
+ * of sessions vs thread exit is handled racelessly.
+ *
+ * Updates and traversals of thread_list are protected by UST lock.
+ * Updates to rcu_field_list are protected by UST lock.
+ */
+
+struct lttng_perf_counter_thread_field {
+ struct lttng_perf_counter_field *field; /* Back reference */
+ struct perf_event_mmap_page *pc;
+ struct cds_list_head thread_field_node; /* Per-field list of thread fields (node) */
+ struct cds_list_head rcu_field_node; /* RCU per-thread list of fields (node) */
+ int fd; /* Perf FD */
+};
+
+struct lttng_perf_counter_thread {
+ struct cds_list_head rcu_field_list; /* RCU per-thread list of fields */
+};
+
+struct lttng_perf_counter_field {
+ struct perf_event_attr attr;
+ struct cds_list_head thread_field_list; /* Per-field list of thread fields */
+ char *name;
+ struct lttng_ust_event_field *event_field;
+};
+
+static pthread_key_t perf_counter_key;
+
+/*
+ * lttng_perf_lock - Protect lttng-ust perf counter data structures
+ *
+ * Nests within the ust_lock, and therefore within the libc dl lock.
+ * Therefore, we need to fixup the TLS before nesting into this lock.
+ * Nests inside RCU bp read-side lock. Protects against concurrent
+ * fork.
+ */
+static pthread_mutex_t ust_perf_mutex = PTHREAD_MUTEX_INITIALIZER;
+
+/*
+ * Cancel state when grabbing the ust_perf_mutex. Saved when locking,
+ * restored on unlock. Protected by ust_perf_mutex.
+ */
+static int ust_perf_saved_cancelstate;
+
+/*
+ * Track whether we are tracing from a signal handler nested on an
+ * application thread.
+ */
+static DEFINE_URCU_TLS(int, ust_perf_mutex_nest);
+
+/*
+ * Force a read (imply TLS fixup for dlopen) of TLS variables.
+ */
+void lttng_ust_fixup_perf_counter_tls(void)
+{
+ asm volatile ("" : : "m" (URCU_TLS(ust_perf_mutex_nest)));
+}
+
+void lttng_perf_lock(void)
+{
+ sigset_t sig_all_blocked, orig_mask;
+ int ret, oldstate;
+
+ ret = pthread_setcancelstate(PTHREAD_CANCEL_DISABLE, &oldstate);
+ if (ret) {
+ ERR("pthread_setcancelstate: %s", strerror(ret));
+ }
+ sigfillset(&sig_all_blocked);
+ ret = pthread_sigmask(SIG_SETMASK, &sig_all_blocked, &orig_mask);
+ if (ret) {
+ ERR("pthread_sigmask: %s", strerror(ret));
+ }
+ if (!URCU_TLS(ust_perf_mutex_nest)++) {
+ /*
+ * Ensure the compiler don't move the store after the close()
+ * call in case close() would be marked as leaf.
+ */
+ cmm_barrier();
+ pthread_mutex_lock(&ust_perf_mutex);
+ ust_perf_saved_cancelstate = oldstate;
+ }
+ ret = pthread_sigmask(SIG_SETMASK, &orig_mask, NULL);
+ if (ret) {
+ ERR("pthread_sigmask: %s", strerror(ret));
+ }
+}
+
+void lttng_perf_unlock(void)
+{
+ sigset_t sig_all_blocked, orig_mask;
+ int ret, newstate, oldstate;
+ bool restore_cancel = false;
+
+ sigfillset(&sig_all_blocked);
+ ret = pthread_sigmask(SIG_SETMASK, &sig_all_blocked, &orig_mask);
+ if (ret) {
+ ERR("pthread_sigmask: %s", strerror(ret));
+ }
+ /*
+ * Ensure the compiler don't move the store before the close()
+ * call, in case close() would be marked as leaf.
+ */
+ cmm_barrier();
+ if (!--URCU_TLS(ust_perf_mutex_nest)) {
+ newstate = ust_perf_saved_cancelstate;
+ restore_cancel = true;
+ pthread_mutex_unlock(&ust_perf_mutex);
+ }
+ ret = pthread_sigmask(SIG_SETMASK, &orig_mask, NULL);
+ if (ret) {
+ ERR("pthread_sigmask: %s", strerror(ret));
+ }
+ if (restore_cancel) {
+ ret = pthread_setcancelstate(newstate, &oldstate);
+ if (ret) {
+ ERR("pthread_setcancelstate: %s", strerror(ret));
+ }
+ }
+}
+
+static
+size_t perf_counter_get_size(void *priv __attribute__((unused)),
+ size_t offset)
+{
+ size_t size = 0;
+
+ size += lttng_ust_lib_ring_buffer_align(offset, lttng_ust_rb_alignof(uint64_t));
+ size += sizeof(uint64_t);
+ return size;
+}
+
+static
+uint64_t read_perf_counter_syscall(
+ struct lttng_perf_counter_thread_field *thread_field)
+{
+ uint64_t count;
+
+ if (caa_unlikely(thread_field->fd < 0))
+ return 0;
+
+ if (caa_unlikely(read(thread_field->fd, &count, sizeof(count))
+ < sizeof(count)))
+ return 0;
+
+ return count;
+}
+
+#if defined(LTTNG_UST_ARCH_X86)
+
+static
+uint64_t rdpmc(unsigned int counter)
+{
+ unsigned int low, high;
+
+ asm volatile("rdpmc" : "=a" (low), "=d" (high) : "c" (counter));
+
+ return low | ((uint64_t) high) << 32;
+}
+
+static
+bool has_rdpmc(struct perf_event_mmap_page *pc)
+{
+ if (caa_unlikely(!pc->cap_bit0_is_deprecated))
+ return false;
+ /* Since Linux kernel 3.12. */
+ return pc->cap_user_rdpmc;
+}
+
+static
+uint64_t arch_read_perf_counter(
+ struct lttng_perf_counter_thread_field *thread_field)
+{
+ uint32_t seq, idx;
+ uint64_t count;
+ struct perf_event_mmap_page *pc = thread_field->pc;
+
+ if (caa_unlikely(!pc))
+ return 0;
+
+ do {
+ seq = CMM_LOAD_SHARED(pc->lock);
+ cmm_barrier();
+
+ idx = pc->index;
+ if (caa_likely(has_rdpmc(pc) && idx)) {
+ int64_t pmcval;
+
+ pmcval = rdpmc(idx - 1);
+ /* Sign-extend the pmc register result. */
+ pmcval <<= 64 - pc->pmc_width;
+ pmcval >>= 64 - pc->pmc_width;
+ count = pc->offset + pmcval;
+ } else {
+ /* Fall-back on system call if rdpmc cannot be used. */
+ return read_perf_counter_syscall(thread_field);
+ }
+ cmm_barrier();
+ } while (CMM_LOAD_SHARED(pc->lock) != seq);
+
+ return count;
+}
+
+static
+int arch_perf_keep_fd(struct lttng_perf_counter_thread_field *thread_field)
+{
+ struct perf_event_mmap_page *pc = thread_field->pc;
+
+ if (!pc)
+ return 0;
+ return !has_rdpmc(pc);
+}
+
+#else
+
+/* Generic (slow) implementation using a read system call. */
+static
+uint64_t arch_read_perf_counter(
+ struct lttng_perf_counter_thread_field *thread_field)
+{
+ return read_perf_counter_syscall(thread_field);
+}
+
+static
+int arch_perf_keep_fd(struct lttng_perf_counter_thread_field *thread_field)
+{
+ return 1;
+}
+
+#endif
+
+static
+int sys_perf_event_open(struct perf_event_attr *attr,
+ pid_t pid, int cpu, int group_fd,
+ unsigned long flags)
+{
+ return syscall(SYS_perf_event_open, attr, pid, cpu,
+ group_fd, flags);
+}
+
+static
+int open_perf_fd(struct perf_event_attr *attr)
+{
+ int fd;
+
+ fd = sys_perf_event_open(attr, 0, -1, -1, 0);
+ if (fd < 0)
+ return -1;
+
+ return fd;
+}
+
+static
+void close_perf_fd(int fd)
+{
+ int ret;
+
+ if (fd < 0)
+ return;
+
+ ret = close(fd);
+ if (ret) {
+ perror("Error closing LTTng-UST perf memory mapping FD");
+ }
+}
+
+static void setup_perf(struct lttng_perf_counter_thread_field *thread_field)
+{
+ void *perf_addr;
+
+ perf_addr = mmap(NULL, sizeof(struct perf_event_mmap_page),
+ PROT_READ, MAP_SHARED, thread_field->fd, 0);
+ if (perf_addr == MAP_FAILED)
+ perf_addr = NULL;
+ thread_field->pc = perf_addr;
+
+ if (!arch_perf_keep_fd(thread_field)) {
+ close_perf_fd(thread_field->fd);
+ thread_field->fd = -1;
+ }
+}
+
+static
+void unmap_perf_page(struct perf_event_mmap_page *pc)
+{
+ int ret;
+
+ if (!pc)
+ return;
+ ret = munmap(pc, sizeof(struct perf_event_mmap_page));
+ if (ret < 0) {
+ PERROR("Error in munmap");
+ abort();
+ }
+}
+
+static
+struct lttng_perf_counter_thread *alloc_perf_counter_thread(void)
+{
+ struct lttng_perf_counter_thread *perf_thread;
+ sigset_t newmask, oldmask;
+ int ret;
+
+ ret = sigfillset(&newmask);
+ if (ret)
+ abort();
+ ret = pthread_sigmask(SIG_BLOCK, &newmask, &oldmask);
+ if (ret)
+ abort();
+ /* Check again with signals disabled */
+ perf_thread = pthread_getspecific(perf_counter_key);
+ if (perf_thread)
+ goto skip;
+ perf_thread = zmalloc(sizeof(*perf_thread));
+ if (!perf_thread)
+ abort();
+ CDS_INIT_LIST_HEAD(&perf_thread->rcu_field_list);
+ ret = pthread_setspecific(perf_counter_key, perf_thread);
+ if (ret)
+ abort();
+skip:
+ ret = pthread_sigmask(SIG_SETMASK, &oldmask, NULL);
+ if (ret)
+ abort();
+ return perf_thread;
+}
+
+static
+struct lttng_perf_counter_thread_field *
+ add_thread_field(struct lttng_perf_counter_field *perf_field,
+ struct lttng_perf_counter_thread *perf_thread)
+{
+ struct lttng_perf_counter_thread_field *thread_field;
+ sigset_t newmask, oldmask;
+ int ret;
+
+ ret = sigfillset(&newmask);
+ if (ret)
+ abort();
+ ret = pthread_sigmask(SIG_BLOCK, &newmask, &oldmask);
+ if (ret)
+ abort();
+ /* Check again with signals disabled */
+ cds_list_for_each_entry_rcu(thread_field, &perf_thread->rcu_field_list,
+ rcu_field_node) {
+ if (thread_field->field == perf_field)
+ goto skip;
+ }
+ thread_field = zmalloc(sizeof(*thread_field));
+ if (!thread_field)
+ abort();
+ thread_field->field = perf_field;
+ thread_field->fd = open_perf_fd(&perf_field->attr);
+ if (thread_field->fd >= 0)
+ setup_perf(thread_field);
+ /*
+ * Note: thread_field->pc can be NULL if setup_perf() fails.
+ * Also, thread_field->fd can be -1 if open_perf_fd() fails.
+ */
+ lttng_perf_lock();
+ cds_list_add_rcu(&thread_field->rcu_field_node,
+ &perf_thread->rcu_field_list);
+ cds_list_add(&thread_field->thread_field_node,
+ &perf_field->thread_field_list);
+ lttng_perf_unlock();
+skip:
+ ret = pthread_sigmask(SIG_SETMASK, &oldmask, NULL);
+ if (ret)
+ abort();
+ return thread_field;
+}
+
+static
+struct lttng_perf_counter_thread_field *
+ get_thread_field(struct lttng_perf_counter_field *field)
+{
+ struct lttng_perf_counter_thread *perf_thread;
+ struct lttng_perf_counter_thread_field *thread_field;
+
+ perf_thread = pthread_getspecific(perf_counter_key);
+ if (!perf_thread)
+ perf_thread = alloc_perf_counter_thread();
+ cds_list_for_each_entry_rcu(thread_field, &perf_thread->rcu_field_list,
+ rcu_field_node) {
+ if (thread_field->field == field)
+ return thread_field;
+ }
+ /* perf_counter_thread_field not found, need to add one */
+ return add_thread_field(field, perf_thread);
+}
+
+static
+uint64_t wrapper_perf_counter_read(void *priv)
+{
+ struct lttng_perf_counter_field *perf_field;
+ struct lttng_perf_counter_thread_field *perf_thread_field;
+
+ perf_field = (struct lttng_perf_counter_field *) priv;
+ perf_thread_field = get_thread_field(perf_field);
+ return arch_read_perf_counter(perf_thread_field);
+}
+
+static
+void perf_counter_record(void *priv,
+ struct lttng_ust_lib_ring_buffer_ctx *ctx,
+ struct lttng_ust_channel_buffer *chan)
+{
+ uint64_t value;
+
+ value = wrapper_perf_counter_read(priv);
+ chan->ops->event_write(ctx, &value, sizeof(value), lttng_ust_rb_alignof(value));
+}
+
+static
+void perf_counter_get_value(void *priv,
+ struct lttng_ust_ctx_value *value)
+{
+ value->u.s64 = wrapper_perf_counter_read(priv);
+}
+
+/* Called with perf lock held */
+static
+void lttng_destroy_perf_thread_field(
+ struct lttng_perf_counter_thread_field *thread_field)
+{
+ close_perf_fd(thread_field->fd);
+ unmap_perf_page(thread_field->pc);
+ cds_list_del_rcu(&thread_field->rcu_field_node);
+ cds_list_del(&thread_field->thread_field_node);
+ free(thread_field);
+}
+
+static
+void lttng_destroy_perf_thread_key(void *_key)
+{
+ struct lttng_perf_counter_thread *perf_thread = _key;
+ struct lttng_perf_counter_thread_field *pos, *p;
+
+ lttng_perf_lock();
+ cds_list_for_each_entry_safe(pos, p, &perf_thread->rcu_field_list,
+ rcu_field_node)
+ lttng_destroy_perf_thread_field(pos);
+ lttng_perf_unlock();
+ free(perf_thread);
+}
+
+/* Called with UST lock held */
+static
+void lttng_destroy_perf_counter_ctx_field(void *priv)
+{
+ struct lttng_perf_counter_field *perf_field;
+ struct lttng_perf_counter_thread_field *pos, *p;
+
+ perf_field = (struct lttng_perf_counter_field *) priv;
+ free(perf_field->name);
+ /*
+ * This put is performed when no threads can concurrently
+ * perform a "get" concurrently, thanks to urcu-bp grace
+ * period. Holding the lttng perf lock protects against
+ * concurrent modification of the per-thread thread field
+ * list.
+ */
+ lttng_perf_lock();
+ cds_list_for_each_entry_safe(pos, p, &perf_field->thread_field_list,
+ thread_field_node)
+ lttng_destroy_perf_thread_field(pos);
+ lttng_perf_unlock();
+ free(perf_field->event_field);
+ free(perf_field);
+}
+
+#ifdef LTTNG_UST_ARCH_ARMV7
+
+static
+int perf_get_exclude_kernel(void)
+{
+ return 0;
+}
+
+#else /* LTTNG_UST_ARCH_ARMV7 */
+
+static
+int perf_get_exclude_kernel(void)
+{
+ return 1;
+}
+
+#endif /* LTTNG_UST_ARCH_ARMV7 */
+
+static const struct lttng_ust_type_common *ust_type =
+ lttng_ust_static_type_integer(sizeof(uint64_t) * CHAR_BIT,
+ lttng_ust_rb_alignof(uint64_t) * CHAR_BIT,
+ lttng_ust_is_signed_type(uint64_t),
+ BYTE_ORDER, 10);
+
+/* Called with UST lock held */
+int lttng_add_perf_counter_to_ctx(uint32_t type,
+ uint64_t config,
+ const char *name,
+ struct lttng_ust_ctx **ctx)
+{
+ struct lttng_ust_ctx_field ctx_field;
+ struct lttng_ust_event_field *event_field;
+ struct lttng_perf_counter_field *perf_field;
+ char *name_alloc;
+ int ret;
+
+ if (lttng_find_context(*ctx, name)) {
+ ret = -EEXIST;
+ goto find_error;
+ }
+ name_alloc = strdup(name);
+ if (!name_alloc) {
+ ret = -ENOMEM;
+ goto name_alloc_error;
+ }
+ event_field = zmalloc(sizeof(*event_field));
+ if (!event_field) {
+ ret = -ENOMEM;
+ goto event_field_alloc_error;
+ }
+ event_field->name = name_alloc;
+ event_field->type = ust_type;
+
+ perf_field = zmalloc(sizeof(*perf_field));
+ if (!perf_field) {
+ ret = -ENOMEM;
+ goto perf_field_alloc_error;
+ }
+ perf_field->attr.type = type;
+ perf_field->attr.config = config;
+ perf_field->attr.exclude_kernel = perf_get_exclude_kernel();
+ CDS_INIT_LIST_HEAD(&perf_field->thread_field_list);
+ perf_field->name = name_alloc;
+ perf_field->event_field = event_field;
+
+ /* Ensure that this perf counter can be used in this process. */
+ ret = open_perf_fd(&perf_field->attr);
+ if (ret < 0) {
+ ret = -ENODEV;
+ goto setup_error;
+ }
+ close_perf_fd(ret);
+
+ ctx_field.event_field = event_field;
+ ctx_field.get_size = perf_counter_get_size;
+ ctx_field.record = perf_counter_record;
+ ctx_field.get_value = perf_counter_get_value;
+ ctx_field.destroy = lttng_destroy_perf_counter_ctx_field;
+ ctx_field.priv = perf_field;
+
+ ret = lttng_ust_context_append(ctx, &ctx_field);
+ if (ret) {
+ ret = -ENOMEM;
+ goto append_context_error;
+ }
+ return 0;
+
+append_context_error:
+setup_error:
+ free(perf_field);
+perf_field_alloc_error:
+ free(event_field);
+event_field_alloc_error:
+ free(name_alloc);
+name_alloc_error:
+find_error:
+ return ret;
+}
+
+int lttng_perf_counter_init(void)
+{
+ int ret;
+
+ ret = pthread_key_create(&perf_counter_key,
+ lttng_destroy_perf_thread_key);
+ if (ret)
+ ret = -ret;
+ return ret;
+}
+
+void lttng_perf_counter_exit(void)
+{
+ int ret;
+
+ ret = pthread_key_delete(perf_counter_key);
+ if (ret) {
+ errno = ret;
+ PERROR("Error in pthread_key_delete");
+ }
+}
--- /dev/null
+/*
+ * SPDX-License-Identifier: LGPL-2.1-only
+ *
+ * Copyright (C) 2009-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ * Copyright (C) 2019 Michael Jeanson <mjeanson@efficios.com>
+ *
+ * LTTng UST pid namespace context.
+ */
+
+#define _LGPL_SOURCE
+#include <limits.h>
+#include <stddef.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <unistd.h>
+#include <lttng/ust-events.h>
+#include <lttng/ust-tracer.h>
+#include <lttng/ringbuffer-context.h>
+
+#include "context-internal.h"
+#include "ns.h"
+
+/*
+ * We cache the result to ensure we don't stat(2) the proc filesystem on
+ * each event. The PID namespace is global to the process.
+ */
+static ino_t cached_pid_ns = NS_INO_UNINITIALIZED;
+
+static
+ino_t get_pid_ns(void)
+{
+ struct stat sb;
+ ino_t pid_ns;
+
+ pid_ns = CMM_LOAD_SHARED(cached_pid_ns);
+
+ /*
+ * If the cache is populated, do nothing and return the
+ * cached inode number.
+ */
+ if (caa_likely(pid_ns != NS_INO_UNINITIALIZED))
+ return pid_ns;
+
+ /*
+ * At this point we have to populate the cache, set the initial
+ * value to NS_INO_UNAVAILABLE (0), if we fail to get the inode
+ * number from the proc filesystem, this is the value we will
+ * cache.
+ */
+ pid_ns = NS_INO_UNAVAILABLE;
+
+ if (stat("/proc/self/ns/pid", &sb) == 0) {
+ pid_ns = sb.st_ino;
+ }
+
+ /*
+ * And finally, store the inode number in the cache.
+ */
+ CMM_STORE_SHARED(cached_pid_ns, pid_ns);
+
+ return pid_ns;
+}
+
+/*
+ * A process's PID namespace membership is determined when the process is
+ * created and cannot be changed thereafter.
+ *
+ * The pid namespace can change only on clone(2) / fork(2) :
+ * - clone(2) with the CLONE_NEWPID flag
+ * - clone(2) / fork(2) after a call to unshare(2) with the CLONE_NEWPID flag
+ * - clone(2) / fork(2) after a call to setns(2) with a PID namespace fd
+ */
+void lttng_context_pid_ns_reset(void)
+{
+ CMM_STORE_SHARED(cached_pid_ns, NS_INO_UNINITIALIZED);
+}
+
+static
+size_t pid_ns_get_size(void *priv __attribute__((unused)),
+ size_t offset)
+{
+ size_t size = 0;
+
+ size += lttng_ust_lib_ring_buffer_align(offset, lttng_ust_rb_alignof(ino_t));
+ size += sizeof(ino_t);
+ return size;
+}
+
+static
+void pid_ns_record(void *priv __attribute__((unused)),
+ struct lttng_ust_lib_ring_buffer_ctx *ctx,
+ struct lttng_ust_channel_buffer *chan)
+{
+ ino_t pid_ns;
+
+ pid_ns = get_pid_ns();
+ chan->ops->event_write(ctx, &pid_ns, sizeof(pid_ns), lttng_ust_rb_alignof(pid_ns));
+}
+
+static
+void pid_ns_get_value(void *priv __attribute__((unused)),
+ struct lttng_ust_ctx_value *value)
+{
+ value->u.s64 = get_pid_ns();
+}
+
+static const struct lttng_ust_ctx_field *ctx_field = lttng_ust_static_ctx_field(
+ lttng_ust_static_event_field("pid_ns",
+ lttng_ust_static_type_integer(sizeof(ino_t) * CHAR_BIT,
+ lttng_ust_rb_alignof(ino_t) * CHAR_BIT,
+ lttng_ust_is_signed_type(ino_t),
+ BYTE_ORDER, 10),
+ false, false),
+ pid_ns_get_size,
+ pid_ns_record,
+ pid_ns_get_value,
+ NULL, NULL);
+
+int lttng_add_pid_ns_to_ctx(struct lttng_ust_ctx **ctx)
+{
+ int ret;
+
+ if (lttng_find_context(*ctx, ctx_field->event_field->name)) {
+ ret = -EEXIST;
+ goto error_find_context;
+ }
+ ret = lttng_ust_context_append(ctx, ctx_field);
+ if (ret)
+ return ret;
+ return 0;
+
+error_find_context:
+ return ret;
+}
--- /dev/null
+/*
+ * SPDX-License-Identifier: LGPL-2.1-only
+ *
+ * Copyright (C) 2009-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ *
+ * LTTng UST procname context.
+ */
+
+#define _LGPL_SOURCE
+#include <stddef.h>
+#include <lttng/ust-events.h>
+#include <lttng/ust-tracer.h>
+#include <lttng/ringbuffer-context.h>
+#include <urcu/tls-compat.h>
+#include <assert.h>
+#include "common/compat/pthread.h"
+#include "lttng-tracer-core.h"
+
+#include "context-internal.h"
+
+/* Maximum number of nesting levels for the procname cache. */
+#define PROCNAME_NESTING_MAX 2
+
+/*
+ * We cache the result to ensure we don't trigger a system call for
+ * each event.
+ * Upon exec, procname changes, but exec takes care of throwing away
+ * this cached version.
+ * The procname can also change by calling prctl(). The procname should
+ * be set for a thread before the first event is logged within this
+ * thread.
+ */
+typedef char procname_array[PROCNAME_NESTING_MAX][17];
+
+static DEFINE_URCU_TLS(procname_array, cached_procname);
+
+static DEFINE_URCU_TLS(int, procname_nesting);
+
+static inline
+const char *wrapper_getprocname(void)
+{
+ int nesting = CMM_LOAD_SHARED(URCU_TLS(procname_nesting));
+
+ if (caa_unlikely(nesting >= PROCNAME_NESTING_MAX))
+ return "<unknown>";
+ if (caa_unlikely(!URCU_TLS(cached_procname)[nesting][0])) {
+ CMM_STORE_SHARED(URCU_TLS(procname_nesting), nesting + 1);
+ /* Increment nesting before updating cache. */
+ cmm_barrier();
+ lttng_pthread_getname_np(URCU_TLS(cached_procname)[nesting], LTTNG_UST_ABI_PROCNAME_LEN);
+ URCU_TLS(cached_procname)[nesting][LTTNG_UST_ABI_PROCNAME_LEN - 1] = '\0';
+ /* Decrement nesting after updating cache. */
+ cmm_barrier();
+ CMM_STORE_SHARED(URCU_TLS(procname_nesting), nesting);
+ }
+ return URCU_TLS(cached_procname)[nesting];
+}
+
+/* Reset should not be called from a signal handler. */
+void lttng_ust_context_procname_reset(void)
+{
+ CMM_STORE_SHARED(URCU_TLS(cached_procname)[1][0], '\0');
+ CMM_STORE_SHARED(URCU_TLS(procname_nesting), 1);
+ CMM_STORE_SHARED(URCU_TLS(cached_procname)[0][0], '\0');
+ CMM_STORE_SHARED(URCU_TLS(procname_nesting), 0);
+}
+
+static
+size_t procname_get_size(void *priv __attribute__((unused)),
+ size_t offset __attribute__((unused)))
+{
+ return LTTNG_UST_ABI_PROCNAME_LEN;
+}
+
+static
+void procname_record(void *priv __attribute__((unused)),
+ struct lttng_ust_lib_ring_buffer_ctx *ctx,
+ struct lttng_ust_channel_buffer *chan)
+{
+ const char *procname;
+
+ procname = wrapper_getprocname();
+ chan->ops->event_write(ctx, procname, LTTNG_UST_ABI_PROCNAME_LEN, 1);
+}
+
+static
+void procname_get_value(void *priv __attribute__((unused)),
+ struct lttng_ust_ctx_value *value)
+{
+ value->u.str = wrapper_getprocname();
+}
+
+static const struct lttng_ust_ctx_field *ctx_field = lttng_ust_static_ctx_field(
+ lttng_ust_static_event_field("procname",
+ lttng_ust_static_type_array_text(LTTNG_UST_ABI_PROCNAME_LEN),
+ false, false),
+ procname_get_size,
+ procname_record,
+ procname_get_value,
+ NULL, NULL);
+
+int lttng_add_procname_to_ctx(struct lttng_ust_ctx **ctx)
+{
+ int ret;
+
+ if (lttng_find_context(*ctx, ctx_field->event_field->name)) {
+ ret = -EEXIST;
+ goto error_find_context;
+ }
+ ret = lttng_ust_context_append(ctx, ctx_field);
+ if (ret)
+ return ret;
+ return 0;
+
+error_find_context:
+ return ret;
+}
+
+/*
+ * Force a read (imply TLS fixup for dlopen) of TLS variables.
+ */
+void lttng_fixup_procname_tls(void)
+{
+ asm volatile ("" : : "m" (URCU_TLS(cached_procname)[0]));
+}
--- /dev/null
+/*
+ * SPDX-License-Identifier: LGPL-2.1-only
+ *
+ * Copyright (C) 2016 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ *
+ * LTTng UST application context provider.
+ */
+
+#define _LGPL_SOURCE
+#include <stddef.h>
+#include <stdint.h>
+#include <sys/types.h>
+#include <unistd.h>
+
+#include <common/ust-context-provider.h>
+
+#include "context-internal.h"
+#include "lttng-tracer-core.h"
+#include "jhash.h"
+#include "context-provider-internal.h"
+#include "common/macros.h"
+
+struct lttng_ust_registered_context_provider {
+ const struct lttng_ust_context_provider *provider;
+
+ struct cds_hlist_node node;
+};
+
+struct lttng_ust_app_ctx {
+ char *name;
+ struct lttng_ust_event_field *event_field;
+ struct lttng_ust_type_common *type;
+};
+
+#define CONTEXT_PROVIDER_HT_BITS 12
+#define CONTEXT_PROVIDER_HT_SIZE (1U << CONTEXT_PROVIDER_HT_BITS)
+struct context_provider_ht {
+ struct cds_hlist_head table[CONTEXT_PROVIDER_HT_SIZE];
+};
+
+static struct context_provider_ht context_provider_ht;
+
+static const struct lttng_ust_context_provider *
+ lookup_provider_by_name(const char *name)
+{
+ struct cds_hlist_head *head;
+ struct cds_hlist_node *node;
+ struct lttng_ust_registered_context_provider *reg_provider;
+ uint32_t hash;
+ const char *end;
+ size_t len;
+
+ /* Lookup using everything before first ':' as key. */
+ end = strchr(name, ':');
+ if (end)
+ len = end - name;
+ else
+ len = strlen(name);
+ hash = jhash(name, len, 0);
+ head = &context_provider_ht.table[hash & (CONTEXT_PROVIDER_HT_SIZE - 1)];
+ cds_hlist_for_each_entry(reg_provider, node, head, node) {
+ if (!strncmp(reg_provider->provider->name, name, len))
+ return reg_provider->provider;
+ }
+ return NULL;
+}
+
+struct lttng_ust_registered_context_provider *lttng_ust_context_provider_register(struct lttng_ust_context_provider *provider)
+{
+ struct lttng_ust_registered_context_provider *reg_provider = NULL;
+ struct cds_hlist_head *head;
+ size_t name_len = strlen(provider->name);
+ uint32_t hash;
+
+ lttng_ust_fixup_tls();
+
+ /* Provider name starts with "$app.". */
+ if (strncmp("$app.", provider->name, strlen("$app.")) != 0)
+ return NULL;
+ /* Provider name cannot contain a colon character. */
+ if (strchr(provider->name, ':'))
+ return NULL;
+ if (ust_lock())
+ goto end;
+ if (lookup_provider_by_name(provider->name))
+ goto end;
+ reg_provider = zmalloc(sizeof(struct lttng_ust_registered_context_provider));
+ if (!reg_provider)
+ goto end;
+ reg_provider->provider = provider;
+ hash = jhash(provider->name, name_len, 0);
+ head = &context_provider_ht.table[hash & (CONTEXT_PROVIDER_HT_SIZE - 1)];
+ cds_hlist_add_head(®_provider->node, head);
+
+ lttng_ust_context_set_session_provider(provider->name,
+ provider->get_size, provider->record,
+ provider->get_value, provider->priv);
+
+ lttng_ust_context_set_event_notifier_group_provider(provider->name,
+ provider->get_size, provider->record,
+ provider->get_value, provider->priv);
+end:
+ ust_unlock();
+ return reg_provider;
+}
+
+void lttng_ust_context_provider_unregister(struct lttng_ust_registered_context_provider *reg_provider)
+{
+ lttng_ust_fixup_tls();
+
+ if (ust_lock())
+ goto end;
+ lttng_ust_context_set_session_provider(reg_provider->provider->name,
+ lttng_ust_dummy_get_size, lttng_ust_dummy_record,
+ lttng_ust_dummy_get_value, NULL);
+
+ lttng_ust_context_set_event_notifier_group_provider(reg_provider->provider->name,
+ lttng_ust_dummy_get_size, lttng_ust_dummy_record,
+ lttng_ust_dummy_get_value, NULL);
+
+ cds_hlist_del(®_provider->node);
+end:
+ ust_unlock();
+ free(reg_provider);
+}
+
+static void destroy_app_ctx(void *priv)
+{
+ struct lttng_ust_app_ctx *app_ctx = (struct lttng_ust_app_ctx *) priv;
+
+ free(app_ctx->name);
+ free(app_ctx->event_field);
+ free(app_ctx->type);
+ free(app_ctx);
+}
+
+/*
+ * Called with ust mutex held.
+ * Add application context to array of context, even if the application
+ * context is not currently loaded by application. It will then use the
+ * dummy callbacks in that case.
+ * Always performed before tracing is started, since it modifies
+ * metadata describing the context.
+ */
+int lttng_ust_add_app_context_to_ctx_rcu(const char *name,
+ struct lttng_ust_ctx **ctx)
+{
+ const struct lttng_ust_context_provider *provider;
+ struct lttng_ust_ctx_field new_field = { 0 };
+ struct lttng_ust_event_field *event_field = NULL;
+ struct lttng_ust_type_common *type = NULL;
+ struct lttng_ust_app_ctx *app_ctx = NULL;
+ char *ctx_name;
+ int ret;
+
+ if (*ctx && lttng_find_context(*ctx, name))
+ return -EEXIST;
+ event_field = zmalloc(sizeof(struct lttng_ust_event_field));
+ if (!event_field) {
+ ret = -ENOMEM;
+ goto error_event_field_alloc;
+ }
+ ctx_name = strdup(name);
+ if (!ctx_name) {
+ ret = -ENOMEM;
+ goto error_field_name_alloc;
+ }
+ type = zmalloc(sizeof(struct lttng_ust_type_common));
+ if (!type) {
+ ret = -ENOMEM;
+ goto error_field_type_alloc;
+ }
+ app_ctx = zmalloc(sizeof(struct lttng_ust_app_ctx));
+ if (!app_ctx) {
+ ret = -ENOMEM;
+ goto error_app_ctx_alloc;
+ }
+ event_field->name = ctx_name;
+ type->type = lttng_ust_type_dynamic;
+ event_field->type = type;
+ new_field.event_field = event_field;
+ /*
+ * If provider is not found, we add the context anyway, but
+ * it will provide a dummy context.
+ */
+ provider = lookup_provider_by_name(name);
+ if (provider) {
+ new_field.get_size = provider->get_size;
+ new_field.record = provider->record;
+ new_field.get_value = provider->get_value;
+ } else {
+ new_field.get_size = lttng_ust_dummy_get_size;
+ new_field.record = lttng_ust_dummy_record;
+ new_field.get_value = lttng_ust_dummy_get_value;
+ }
+ new_field.destroy = destroy_app_ctx;
+ new_field.priv = app_ctx;
+ /*
+ * For application context, add it by expanding
+ * ctx array.
+ */
+ ret = lttng_ust_context_append_rcu(ctx, &new_field);
+ if (ret) {
+ destroy_app_ctx(app_ctx);
+ return ret;
+ }
+ return 0;
+
+error_app_ctx_alloc:
+ free(type);
+error_field_type_alloc:
+ free(ctx_name);
+error_field_name_alloc:
+ free(event_field);
+error_event_field_alloc:
+ return ret;
+}
--- /dev/null
+/*
+ * SPDX-License-Identifier: LGPL-2.1-only
+ *
+ * Copyright (C) 2009-2011 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ *
+ * LTTng UST pthread_id context.
+ */
+
+#define _LGPL_SOURCE
+#include <limits.h>
+#include <stddef.h>
+#include <pthread.h>
+#include <lttng/ust-events.h>
+#include <lttng/ust-tracer.h>
+#include <lttng/ringbuffer-context.h>
+
+#include "context-internal.h"
+
+static
+size_t pthread_id_get_size(void *priv __attribute__((unused)),
+ size_t offset)
+{
+ size_t size = 0;
+
+ size += lttng_ust_lib_ring_buffer_align(offset, lttng_ust_rb_alignof(unsigned long));
+ size += sizeof(unsigned long);
+ return size;
+}
+
+static
+void pthread_id_record(void *priv __attribute__((unused)),
+ struct lttng_ust_lib_ring_buffer_ctx *ctx,
+ struct lttng_ust_channel_buffer *chan)
+{
+ unsigned long pthread_id;
+
+ pthread_id = (unsigned long) pthread_self();
+ chan->ops->event_write(ctx, &pthread_id, sizeof(pthread_id), lttng_ust_rb_alignof(pthread_id));
+}
+
+static
+void pthread_id_get_value(void *priv __attribute__((unused)),
+ struct lttng_ust_ctx_value *value)
+{
+ value->u.s64 = (unsigned long) pthread_self();
+}
+
+static const struct lttng_ust_ctx_field *ctx_field = lttng_ust_static_ctx_field(
+ lttng_ust_static_event_field("pthread_id",
+ lttng_ust_static_type_integer(sizeof(unsigned long) * CHAR_BIT,
+ lttng_ust_rb_alignof(unsigned long) * CHAR_BIT,
+ lttng_ust_is_signed_type(unsigned long),
+ BYTE_ORDER, 10),
+ false, false),
+ pthread_id_get_size,
+ pthread_id_record,
+ pthread_id_get_value,
+ NULL, NULL);
+
+int lttng_add_pthread_id_to_ctx(struct lttng_ust_ctx **ctx)
+{
+ int ret;
+
+ if (lttng_find_context(*ctx, ctx_field->event_field->name)) {
+ ret = -EEXIST;
+ goto error_find_context;
+ }
+ ret = lttng_ust_context_append(ctx, ctx_field);
+ if (ret)
+ return ret;
+ return 0;
+
+error_find_context:
+ return ret;
+}
--- /dev/null
+/*
+ * SPDX-License-Identifier: LGPL-2.1-only
+ *
+ * Copyright (C) 2009-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ * Copyright (C) 2020 Michael Jeanson <mjeanson@efficios.com>
+ *
+ * LTTng UST time namespace context.
+ */
+
+#define _LGPL_SOURCE
+#include <limits.h>
+#include <stddef.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <unistd.h>
+#include <lttng/ust-events.h>
+#include <lttng/ust-tracer.h>
+#include <lttng/ringbuffer-context.h>
+#include "common/compat/tid.h"
+#include <urcu/tls-compat.h>
+#include "lttng-tracer-core.h"
+#include "ns.h"
+#include "context-internal.h"
+
+
+/*
+ * We cache the result to ensure we don't stat(2) the proc filesystem on
+ * each event.
+ */
+static DEFINE_URCU_TLS_INIT(ino_t, cached_time_ns, NS_INO_UNINITIALIZED);
+
+static
+ino_t get_time_ns(void)
+{
+ struct stat sb;
+ ino_t time_ns;
+
+ time_ns = CMM_LOAD_SHARED(URCU_TLS(cached_time_ns));
+
+ /*
+ * If the cache is populated, do nothing and return the
+ * cached inode number.
+ */
+ if (caa_likely(time_ns != NS_INO_UNINITIALIZED))
+ return time_ns;
+
+ /*
+ * At this point we have to populate the cache, set the initial
+ * value to NS_INO_UNAVAILABLE (0), if we fail to get the inode
+ * number from the proc filesystem, this is the value we will
+ * cache.
+ */
+ time_ns = NS_INO_UNAVAILABLE;
+
+ /*
+ * /proc/thread-self was introduced in kernel v3.17
+ */
+ if (stat("/proc/thread-self/ns/time", &sb) == 0) {
+ time_ns = sb.st_ino;
+ } else {
+ char proc_ns_path[LTTNG_PROC_NS_PATH_MAX];
+
+ if (snprintf(proc_ns_path, LTTNG_PROC_NS_PATH_MAX,
+ "/proc/self/task/%d/ns/time",
+ lttng_gettid()) >= 0) {
+
+ if (stat(proc_ns_path, &sb) == 0) {
+ time_ns = sb.st_ino;
+ }
+ }
+ }
+
+ /*
+ * And finally, store the inode number in the cache.
+ */
+ CMM_STORE_SHARED(URCU_TLS(cached_time_ns), time_ns);
+
+ return time_ns;
+}
+
+/*
+ * The time namespace can change for 2 reasons
+ * * setns(2) called with the fd of a different time ns
+ * * clone(2) / fork(2) after a call to unshare(2) with the CLONE_NEWTIME flag
+ */
+void lttng_context_time_ns_reset(void)
+{
+ CMM_STORE_SHARED(URCU_TLS(cached_time_ns), NS_INO_UNINITIALIZED);
+}
+
+static
+size_t time_ns_get_size(void *priv __attribute__((unused)),
+ size_t offset)
+{
+ size_t size = 0;
+
+ size += lttng_ust_lib_ring_buffer_align(offset, lttng_ust_rb_alignof(ino_t));
+ size += sizeof(ino_t);
+ return size;
+}
+
+static
+void time_ns_record(void *priv __attribute__((unused)),
+ struct lttng_ust_lib_ring_buffer_ctx *ctx,
+ struct lttng_ust_channel_buffer *chan)
+{
+ ino_t time_ns;
+
+ time_ns = get_time_ns();
+ chan->ops->event_write(ctx, &time_ns, sizeof(time_ns), lttng_ust_rb_alignof(time_ns));
+}
+
+static
+void time_ns_get_value(void *priv __attribute__((unused)),
+ struct lttng_ust_ctx_value *value)
+{
+ value->u.s64 = get_time_ns();
+}
+
+static const struct lttng_ust_ctx_field *ctx_field = lttng_ust_static_ctx_field(
+ lttng_ust_static_event_field("time_ns",
+ lttng_ust_static_type_integer(sizeof(ino_t) * CHAR_BIT,
+ lttng_ust_rb_alignof(ino_t) * CHAR_BIT,
+ lttng_ust_is_signed_type(ino_t),
+ BYTE_ORDER, 10),
+ false, false),
+ time_ns_get_size,
+ time_ns_record,
+ time_ns_get_value,
+ NULL, NULL);
+
+int lttng_add_time_ns_to_ctx(struct lttng_ust_ctx **ctx)
+{
+ int ret;
+
+ if (lttng_find_context(*ctx, ctx_field->event_field->name)) {
+ ret = -EEXIST;
+ goto error_find_context;
+ }
+ ret = lttng_ust_context_append(ctx, ctx_field);
+ if (ret)
+ return ret;
+ return 0;
+
+error_find_context:
+ return ret;
+}
+
+/*
+ * * Force a read (imply TLS fixup for dlopen) of TLS variables.
+ * */
+void lttng_fixup_time_ns_tls(void)
+{
+ asm volatile ("" : : "m" (URCU_TLS(cached_time_ns)));
+}
--- /dev/null
+/*
+ * SPDX-License-Identifier: LGPL-2.1-only
+ *
+ * Copyright (C) 2009-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ * Copyright (C) 2019 Michael Jeanson <mjeanson@efficios.com>
+ *
+ * LTTng UST user namespace context.
+ */
+
+#define _LGPL_SOURCE
+#include <limits.h>
+#include <stddef.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <unistd.h>
+#include <lttng/ust-events.h>
+#include <lttng/ust-tracer.h>
+#include <lttng/ringbuffer-context.h>
+
+#include "context-internal.h"
+#include "ns.h"
+
+/*
+ * We cache the result to ensure we don't stat(2) the proc filesystem on
+ * each event. The user namespace is global to the process.
+ */
+static ino_t cached_user_ns = NS_INO_UNINITIALIZED;
+
+static
+ino_t get_user_ns(void)
+{
+ struct stat sb;
+ ino_t user_ns;
+
+ user_ns = CMM_LOAD_SHARED(cached_user_ns);
+
+ /*
+ * If the cache is populated, do nothing and return the
+ * cached inode number.
+ */
+ if (caa_likely(user_ns != NS_INO_UNINITIALIZED))
+ return user_ns;
+
+ /*
+ * At this point we have to populate the cache, set the initial
+ * value to NS_INO_UNAVAILABLE (0), if we fail to get the inode
+ * number from the proc filesystem, this is the value we will
+ * cache.
+ */
+ user_ns = NS_INO_UNAVAILABLE;
+
+ if (stat("/proc/self/ns/user", &sb) == 0) {
+ user_ns = sb.st_ino;
+ }
+
+ /*
+ * And finally, store the inode number in the cache.
+ */
+ CMM_STORE_SHARED(cached_user_ns, user_ns);
+
+ return user_ns;
+}
+
+/*
+ * The user namespace can change for 3 reasons
+ * * clone(2) called with CLONE_NEWUSER
+ * * setns(2) called with the fd of a different user ns
+ * * unshare(2) called with CLONE_NEWUSER
+ */
+void lttng_context_user_ns_reset(void)
+{
+ CMM_STORE_SHARED(cached_user_ns, NS_INO_UNINITIALIZED);
+}
+
+static
+size_t user_ns_get_size(void *priv __attribute__((unused)),
+ size_t offset)
+{
+ size_t size = 0;
+
+ size += lttng_ust_lib_ring_buffer_align(offset, lttng_ust_rb_alignof(ino_t));
+ size += sizeof(ino_t);
+ return size;
+}
+
+static
+void user_ns_record(void *priv __attribute__((unused)),
+ struct lttng_ust_lib_ring_buffer_ctx *ctx,
+ struct lttng_ust_channel_buffer *chan)
+{
+ ino_t user_ns;
+
+ user_ns = get_user_ns();
+ chan->ops->event_write(ctx, &user_ns, sizeof(user_ns), lttng_ust_rb_alignof(user_ns));
+}
+
+static
+void user_ns_get_value(void *priv __attribute__((unused)),
+ struct lttng_ust_ctx_value *value)
+{
+ value->u.s64 = get_user_ns();
+}
+
+static const struct lttng_ust_ctx_field *ctx_field = lttng_ust_static_ctx_field(
+ lttng_ust_static_event_field("user_ns",
+ lttng_ust_static_type_integer(sizeof(ino_t) * CHAR_BIT,
+ lttng_ust_rb_alignof(ino_t) * CHAR_BIT,
+ lttng_ust_is_signed_type(ino_t),
+ BYTE_ORDER, 10),
+ false, false),
+ user_ns_get_size,
+ user_ns_record,
+ user_ns_get_value,
+ NULL, NULL);
+
+int lttng_add_user_ns_to_ctx(struct lttng_ust_ctx **ctx)
+{
+ int ret;
+
+ if (lttng_find_context(*ctx, ctx_field->event_field->name)) {
+ ret = -EEXIST;
+ goto error_find_context;
+ }
+ ret = lttng_ust_context_append(ctx, ctx_field);
+ if (ret)
+ return ret;
+ return 0;
+
+error_find_context:
+ return ret;
+}
--- /dev/null
+/*
+ * SPDX-License-Identifier: LGPL-2.1-only
+ *
+ * Copyright (C) 2009-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ * Copyright (C) 2019 Michael Jeanson <mjeanson@efficios.com>
+ *
+ * LTTng UST uts namespace context.
+ */
+
+#define _LGPL_SOURCE
+#include <limits.h>
+#include <stddef.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <unistd.h>
+#include <lttng/ust-events.h>
+#include <lttng/ust-tracer.h>
+#include <lttng/ringbuffer-context.h>
+#include "common/compat/tid.h"
+#include <urcu/tls-compat.h>
+
+#include "context-internal.h"
+#include "lttng-tracer-core.h"
+#include "ns.h"
+
+
+/*
+ * We cache the result to ensure we don't stat(2) the proc filesystem on
+ * each event.
+ */
+static DEFINE_URCU_TLS_INIT(ino_t, cached_uts_ns, NS_INO_UNINITIALIZED);
+
+static
+ino_t get_uts_ns(void)
+{
+ struct stat sb;
+ ino_t uts_ns;
+
+ uts_ns = CMM_LOAD_SHARED(URCU_TLS(cached_uts_ns));
+
+ /*
+ * If the cache is populated, do nothing and return the
+ * cached inode number.
+ */
+ if (caa_likely(uts_ns != NS_INO_UNINITIALIZED))
+ return uts_ns;
+
+ /*
+ * At this point we have to populate the cache, set the initial
+ * value to NS_INO_UNAVAILABLE (0), if we fail to get the inode
+ * number from the proc filesystem, this is the value we will
+ * cache.
+ */
+ uts_ns = NS_INO_UNAVAILABLE;
+
+ /*
+ * /proc/thread-self was introduced in kernel v3.17
+ */
+ if (stat("/proc/thread-self/ns/uts", &sb) == 0) {
+ uts_ns = sb.st_ino;
+ } else {
+ char proc_ns_path[LTTNG_PROC_NS_PATH_MAX];
+
+ if (snprintf(proc_ns_path, LTTNG_PROC_NS_PATH_MAX,
+ "/proc/self/task/%d/ns/uts",
+ lttng_gettid()) >= 0) {
+
+ if (stat(proc_ns_path, &sb) == 0) {
+ uts_ns = sb.st_ino;
+ }
+ }
+ }
+
+ /*
+ * And finally, store the inode number in the cache.
+ */
+ CMM_STORE_SHARED(URCU_TLS(cached_uts_ns), uts_ns);
+
+ return uts_ns;
+}
+
+/*
+ * The uts namespace can change for 3 reasons
+ * * clone(2) called with CLONE_NEWUTS
+ * * setns(2) called with the fd of a different uts ns
+ * * unshare(2) called with CLONE_NEWUTS
+ */
+void lttng_context_uts_ns_reset(void)
+{
+ CMM_STORE_SHARED(URCU_TLS(cached_uts_ns), NS_INO_UNINITIALIZED);
+}
+
+static
+size_t uts_ns_get_size(void *priv __attribute__((unused)),
+ size_t offset)
+{
+ size_t size = 0;
+
+ size += lttng_ust_lib_ring_buffer_align(offset, lttng_ust_rb_alignof(ino_t));
+ size += sizeof(ino_t);
+ return size;
+}
+
+static
+void uts_ns_record(void *priv __attribute__((unused)),
+ struct lttng_ust_lib_ring_buffer_ctx *ctx,
+ struct lttng_ust_channel_buffer *chan)
+{
+ ino_t uts_ns;
+
+ uts_ns = get_uts_ns();
+ chan->ops->event_write(ctx, &uts_ns, sizeof(uts_ns), lttng_ust_rb_alignof(uts_ns));
+}
+
+static
+void uts_ns_get_value(void *priv __attribute__((unused)),
+ struct lttng_ust_ctx_value *value)
+{
+ value->u.s64 = get_uts_ns();
+}
+
+static const struct lttng_ust_ctx_field *ctx_field = lttng_ust_static_ctx_field(
+ lttng_ust_static_event_field("uts_ns",
+ lttng_ust_static_type_integer(sizeof(ino_t) * CHAR_BIT,
+ lttng_ust_rb_alignof(ino_t) * CHAR_BIT,
+ lttng_ust_is_signed_type(ino_t),
+ BYTE_ORDER, 10),
+ false, false),
+ uts_ns_get_size,
+ uts_ns_record,
+ uts_ns_get_value,
+ NULL, NULL);
+
+int lttng_add_uts_ns_to_ctx(struct lttng_ust_ctx **ctx)
+{
+ int ret;
+
+ if (lttng_find_context(*ctx, ctx_field->event_field->name)) {
+ ret = -EEXIST;
+ goto error_find_context;
+ }
+ ret = lttng_ust_context_append(ctx, ctx_field);
+ if (ret)
+ return ret;
+ return 0;
+
+error_find_context:
+ return ret;
+}
+
+/*
+ * * Force a read (imply TLS fixup for dlopen) of TLS variables.
+ * */
+void lttng_fixup_uts_ns_tls(void)
+{
+ asm volatile ("" : : "m" (URCU_TLS(cached_uts_ns)));
+}
--- /dev/null
+/*
+ * SPDX-License-Identifier: LGPL-2.1-only
+ *
+ * Copyright (C) 2009-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ * Copyright (C) 2019 Michael Jeanson <mjeanson@efficios.com>
+ *
+ * LTTng UST namespaced effective group ID context.
+ */
+
+#define _LGPL_SOURCE
+#include <limits.h>
+#include <stddef.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <unistd.h>
+#include <lttng/ust-events.h>
+#include <lttng/ust-tracer.h>
+#include <lttng/ringbuffer-context.h>
+
+#include "context-internal.h"
+#include "creds.h"
+
+
+/*
+ * At the kernel level, user IDs and group IDs are a per-thread attribute.
+ * However, POSIX requires that all threads in a process share the same
+ * credentials. The NPTL threading implementation handles the POSIX
+ * requirements by providing wrapper functions for the various system calls
+ * that change process UIDs and GIDs. These wrapper functions (including those
+ * for setreuid() and setregid()) employ a signal-based technique to ensure
+ * that when one thread changes credentials, all of the other threads in the
+ * process also change their credentials.
+ */
+
+/*
+ * We cache the result to ensure we don't trigger a system call for
+ * each event. User / group IDs are global to the process.
+ */
+static gid_t cached_vegid = INVALID_GID;
+
+static
+gid_t get_vegid(void)
+{
+ gid_t vegid;
+
+ vegid = CMM_LOAD_SHARED(cached_vegid);
+
+ if (caa_unlikely(vegid == INVALID_GID)) {
+ vegid = getegid();
+ CMM_STORE_SHARED(cached_vegid, vegid);
+ }
+
+ return vegid;
+}
+
+/*
+ * The vegid can change on setuid, setreuid, setresuid and seteuid.
+ */
+void lttng_context_vegid_reset(void)
+{
+ CMM_STORE_SHARED(cached_vegid, INVALID_GID);
+}
+
+static
+size_t vegid_get_size(void *priv __attribute__((unused)),
+ size_t offset)
+{
+ size_t size = 0;
+
+ size += lttng_ust_lib_ring_buffer_align(offset, lttng_ust_rb_alignof(gid_t));
+ size += sizeof(gid_t);
+ return size;
+}
+
+static
+void vegid_record(void *priv __attribute__((unused)),
+ struct lttng_ust_lib_ring_buffer_ctx *ctx,
+ struct lttng_ust_channel_buffer *chan)
+{
+ gid_t vegid;
+
+ vegid = get_vegid();
+ chan->ops->event_write(ctx, &vegid, sizeof(vegid), lttng_ust_rb_alignof(vegid));
+}
+
+static
+void vegid_get_value(void *priv __attribute__((unused)),
+ struct lttng_ust_ctx_value *value)
+{
+ value->u.s64 = get_vegid();
+}
+
+static const struct lttng_ust_ctx_field *ctx_field = lttng_ust_static_ctx_field(
+ lttng_ust_static_event_field("vegid",
+ lttng_ust_static_type_integer(sizeof(gid_t) * CHAR_BIT,
+ lttng_ust_rb_alignof(gid_t) * CHAR_BIT,
+ lttng_ust_is_signed_type(gid_t),
+ BYTE_ORDER, 10),
+ false, false),
+ vegid_get_size,
+ vegid_record,
+ vegid_get_value,
+ NULL, NULL);
+
+int lttng_add_vegid_to_ctx(struct lttng_ust_ctx **ctx)
+{
+ int ret;
+
+ if (lttng_find_context(*ctx, ctx_field->event_field->name)) {
+ ret = -EEXIST;
+ goto error_find_context;
+ }
+ ret = lttng_ust_context_append(ctx, ctx_field);
+ if (ret)
+ return ret;
+ return 0;
+
+error_find_context:
+ return ret;
+}
--- /dev/null
+/*
+ * SPDX-License-Identifier: LGPL-2.1-only
+ *
+ * Copyright (C) 2009-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ * Copyright (C) 2019 Michael Jeanson <mjeanson@efficios.com>
+ *
+ * LTTng UST namespaced effective user ID context.
+ */
+
+#define _LGPL_SOURCE
+#include <limits.h>
+#include <stddef.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <unistd.h>
+#include <lttng/ust-events.h>
+#include <lttng/ust-tracer.h>
+#include <lttng/ringbuffer-context.h>
+
+#include "context-internal.h"
+#include "creds.h"
+
+
+/*
+ * At the kernel level, user IDs and group IDs are a per-thread attribute.
+ * However, POSIX requires that all threads in a process share the same
+ * credentials. The NPTL threading implementation handles the POSIX
+ * requirements by providing wrapper functions for the various system calls
+ * that change process UIDs and GIDs. These wrapper functions (including those
+ * for setreuid() and setregid()) employ a signal-based technique to ensure
+ * that when one thread changes credentials, all of the other threads in the
+ * process also change their credentials.
+ */
+
+/*
+ * We cache the result to ensure we don't trigger a system call for
+ * each event. User / group IDs are global to the process.
+ */
+static uid_t cached_veuid = INVALID_UID;
+
+static
+uid_t get_veuid(void)
+{
+ uid_t veuid;
+
+ veuid = CMM_LOAD_SHARED(cached_veuid);
+
+ if (caa_unlikely(veuid == INVALID_UID)) {
+ veuid = geteuid();
+ CMM_STORE_SHARED(cached_veuid, veuid);
+ }
+
+ return veuid;
+}
+
+/*
+ * The veuid can change on setuid, setreuid, setresuid and seteuid.
+ */
+void lttng_context_veuid_reset(void)
+{
+ CMM_STORE_SHARED(cached_veuid, INVALID_UID);
+}
+
+static
+size_t veuid_get_size(void *priv __attribute__((unused)),
+ size_t offset)
+{
+ size_t size = 0;
+
+ size += lttng_ust_lib_ring_buffer_align(offset, lttng_ust_rb_alignof(uid_t));
+ size += sizeof(uid_t);
+ return size;
+}
+
+static
+void veuid_record(void *priv __attribute__((unused)),
+ struct lttng_ust_lib_ring_buffer_ctx *ctx,
+ struct lttng_ust_channel_buffer *chan)
+{
+ uid_t veuid;
+
+ veuid = get_veuid();
+ chan->ops->event_write(ctx, &veuid, sizeof(veuid), lttng_ust_rb_alignof(veuid));
+}
+
+static
+void veuid_get_value(void *priv __attribute__((unused)),
+ struct lttng_ust_ctx_value *value)
+{
+ value->u.s64 = get_veuid();
+}
+
+static const struct lttng_ust_ctx_field *ctx_field = lttng_ust_static_ctx_field(
+ lttng_ust_static_event_field("veuid",
+ lttng_ust_static_type_integer(sizeof(uid_t) * CHAR_BIT,
+ lttng_ust_rb_alignof(uid_t) * CHAR_BIT,
+ lttng_ust_is_signed_type(uid_t),
+ BYTE_ORDER, 10),
+ false, false),
+ veuid_get_size,
+ veuid_record,
+ veuid_get_value,
+ NULL, NULL);
+
+int lttng_add_veuid_to_ctx(struct lttng_ust_ctx **ctx)
+{
+ int ret;
+
+ if (lttng_find_context(*ctx, ctx_field->event_field->name)) {
+ ret = -EEXIST;
+ goto error_find_context;
+ }
+ ret = lttng_ust_context_append(ctx, ctx_field);
+ if (ret)
+ return ret;
+ return 0;
+
+error_find_context:
+ return ret;
+}
--- /dev/null
+/*
+ * SPDX-License-Identifier: LGPL-2.1-only
+ *
+ * Copyright (C) 2009-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ * Copyright (C) 2019 Michael Jeanson <mjeanson@efficios.com>
+ *
+ * LTTng UST namespaced real group ID context.
+ */
+
+#define _LGPL_SOURCE
+#include <limits.h>
+#include <stddef.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <unistd.h>
+#include <lttng/ust-events.h>
+#include <lttng/ust-tracer.h>
+#include <lttng/ringbuffer-context.h>
+
+#include "context-internal.h"
+#include "creds.h"
+
+
+/*
+ * At the kernel level, user IDs and group IDs are a per-thread attribute.
+ * However, POSIX requires that all threads in a process share the same
+ * credentials. The NPTL threading implementation handles the POSIX
+ * requirements by providing wrapper functions for the various system calls
+ * that change process UIDs and GIDs. These wrapper functions (including those
+ * for setreuid() and setregid()) employ a signal-based technique to ensure
+ * that when one thread changes credentials, all of the other threads in the
+ * process also change their credentials.
+ */
+
+/*
+ * We cache the result to ensure we don't trigger a system call for
+ * each event. User / group IDs are global to the process.
+ */
+static gid_t cached_vgid = INVALID_GID;
+
+static
+gid_t get_vgid(void)
+{
+ gid_t vgid;
+
+ vgid = CMM_LOAD_SHARED(cached_vgid);
+
+ if (caa_unlikely(cached_vgid == (gid_t) -1)) {
+ vgid = getgid();
+ CMM_STORE_SHARED(cached_vgid, vgid);
+ }
+
+ return vgid;
+}
+
+/*
+ * The vgid can change on setuid, setreuid and setresuid.
+ */
+void lttng_context_vgid_reset(void)
+{
+ CMM_STORE_SHARED(cached_vgid, INVALID_GID);
+}
+
+static
+size_t vgid_get_size(void *priv __attribute__((unused)),
+ size_t offset)
+{
+ size_t size = 0;
+
+ size += lttng_ust_lib_ring_buffer_align(offset, lttng_ust_rb_alignof(gid_t));
+ size += sizeof(gid_t);
+ return size;
+}
+
+static
+void vgid_record(void *priv __attribute__((unused)),
+ struct lttng_ust_lib_ring_buffer_ctx *ctx,
+ struct lttng_ust_channel_buffer *chan)
+{
+ gid_t vgid;
+
+ vgid = get_vgid();
+ chan->ops->event_write(ctx, &vgid, sizeof(vgid), lttng_ust_rb_alignof(vgid));
+}
+
+static
+void vgid_get_value(void *priv __attribute__((unused)),
+ struct lttng_ust_ctx_value *value)
+{
+ value->u.s64 = get_vgid();
+}
+
+static const struct lttng_ust_ctx_field *ctx_field = lttng_ust_static_ctx_field(
+ lttng_ust_static_event_field("vgid",
+ lttng_ust_static_type_integer(sizeof(gid_t) * CHAR_BIT,
+ lttng_ust_rb_alignof(gid_t) * CHAR_BIT,
+ lttng_ust_is_signed_type(gid_t),
+ BYTE_ORDER, 10),
+ false, false),
+ vgid_get_size,
+ vgid_record,
+ vgid_get_value,
+ NULL, NULL);
+
+int lttng_add_vgid_to_ctx(struct lttng_ust_ctx **ctx)
+{
+ int ret;
+
+ if (lttng_find_context(*ctx, ctx_field->event_field->name)) {
+ ret = -EEXIST;
+ goto error_find_context;
+ }
+ ret = lttng_ust_context_append(ctx, ctx_field);
+ if (ret)
+ return ret;
+ return 0;
+
+error_find_context:
+ return ret;
+}
--- /dev/null
+/*
+ * SPDX-License-Identifier: LGPL-2.1-only
+ *
+ * Copyright (C) 2009-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ *
+ * LTTng UST vpid context.
+ */
+
+#define _LGPL_SOURCE
+#include <limits.h>
+#include <stddef.h>
+#include <sys/types.h>
+#include <unistd.h>
+#include <lttng/ust-events.h>
+#include <lttng/ust-tracer.h>
+#include <lttng/ringbuffer-context.h>
+
+#include "context-internal.h"
+
+/*
+ * We cache the result to ensure we don't trigger a system call for
+ * each event.
+ */
+static pid_t cached_vpid;
+
+static inline
+pid_t wrapper_getvpid(void)
+{
+ pid_t vpid;
+
+ vpid = CMM_LOAD_SHARED(cached_vpid);
+ if (caa_unlikely(!vpid)) {
+ vpid = getpid();
+ CMM_STORE_SHARED(cached_vpid, vpid);
+ }
+ return vpid;
+}
+
+/*
+ * Upon fork or clone, the PID assigned to our thread is not the same as
+ * we kept in cache.
+ */
+void lttng_context_vpid_reset(void)
+{
+ CMM_STORE_SHARED(cached_vpid, 0);
+}
+
+static
+size_t vpid_get_size(void *priv __attribute__((unused)),
+ size_t offset)
+{
+ size_t size = 0;
+
+ size += lttng_ust_lib_ring_buffer_align(offset, lttng_ust_rb_alignof(pid_t));
+ size += sizeof(pid_t);
+ return size;
+}
+
+static
+void vpid_record(void *priv __attribute__((unused)),
+ struct lttng_ust_lib_ring_buffer_ctx *ctx,
+ struct lttng_ust_channel_buffer *chan)
+{
+ pid_t vpid = wrapper_getvpid();
+
+ chan->ops->event_write(ctx, &vpid, sizeof(vpid), lttng_ust_rb_alignof(vpid));
+}
+
+static
+void vpid_get_value(void *priv __attribute__((unused)),
+ struct lttng_ust_ctx_value *value)
+{
+ value->u.s64 = wrapper_getvpid();
+}
+
+static const struct lttng_ust_ctx_field *ctx_field = lttng_ust_static_ctx_field(
+ lttng_ust_static_event_field("vpid",
+ lttng_ust_static_type_integer(sizeof(pid_t) * CHAR_BIT,
+ lttng_ust_rb_alignof(pid_t) * CHAR_BIT,
+ lttng_ust_is_signed_type(pid_t),
+ BYTE_ORDER, 10),
+ false, false),
+ vpid_get_size,
+ vpid_record,
+ vpid_get_value,
+ NULL, NULL);
+
+int lttng_add_vpid_to_ctx(struct lttng_ust_ctx **ctx)
+{
+ int ret;
+
+ if (lttng_find_context(*ctx, ctx_field->event_field->name)) {
+ ret = -EEXIST;
+ goto error_find_context;
+ }
+ ret = lttng_ust_context_append(ctx, ctx_field);
+ if (ret)
+ return ret;
+ return 0;
+
+error_find_context:
+ return ret;
+}
--- /dev/null
+/*
+ * SPDX-License-Identifier: LGPL-2.1-only
+ *
+ * Copyright (C) 2009-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ * Copyright (C) 2019 Michael Jeanson <mjeanson@efficios.com>
+ *
+ * LTTng UST namespaced saved set-group ID context.
+ */
+
+#define _LGPL_SOURCE
+#include <limits.h>
+#include <stddef.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <unistd.h>
+#include <lttng/ust-events.h>
+#include <lttng/ust-tracer.h>
+#include <lttng/ringbuffer-context.h>
+
+#include "context-internal.h"
+#include "creds.h"
+
+
+/*
+ * At the kernel level, user IDs and group IDs are a per-thread attribute.
+ * However, POSIX requires that all threads in a process share the same
+ * credentials. The NPTL threading implementation handles the POSIX
+ * requirements by providing wrapper functions for the various system calls
+ * that change process UIDs and GIDs. These wrapper functions (including those
+ * for setreuid() and setregid()) employ a signal-based technique to ensure
+ * that when one thread changes credentials, all of the other threads in the
+ * process also change their credentials.
+ */
+
+/*
+ * We cache the result to ensure we don't trigger a system call for
+ * each event. User / group IDs are global to the process.
+ */
+static gid_t cached_vsgid = INVALID_GID;
+
+static
+gid_t get_vsgid(void)
+{
+ gid_t vsgid;
+
+ vsgid = CMM_LOAD_SHARED(cached_vsgid);
+
+ if (caa_unlikely(vsgid == INVALID_GID)) {
+ gid_t gid, egid, sgid;
+
+ if (getresgid(&gid, &egid, &sgid) == 0) {
+ vsgid = sgid;
+ CMM_STORE_SHARED(cached_vsgid, vsgid);
+ }
+ }
+
+ return vsgid;
+}
+
+/*
+ * The vsgid can change on setuid, setreuid and setresuid.
+ */
+void lttng_context_vsgid_reset(void)
+{
+ CMM_STORE_SHARED(cached_vsgid, INVALID_GID);
+}
+
+static
+size_t vsgid_get_size(void *priv __attribute__((unused)),
+ size_t offset)
+{
+ size_t size = 0;
+
+ size += lttng_ust_lib_ring_buffer_align(offset, lttng_ust_rb_alignof(gid_t));
+ size += sizeof(gid_t);
+ return size;
+}
+
+static
+void vsgid_record(void *priv __attribute__((unused)),
+ struct lttng_ust_lib_ring_buffer_ctx *ctx,
+ struct lttng_ust_channel_buffer *chan)
+{
+ gid_t vsgid;
+
+ vsgid = get_vsgid();
+ chan->ops->event_write(ctx, &vsgid, sizeof(vsgid), lttng_ust_rb_alignof(vsgid));
+}
+
+static
+void vsgid_get_value(void *priv __attribute__((unused)),
+ struct lttng_ust_ctx_value *value)
+{
+ value->u.s64 = get_vsgid();
+}
+
+static const struct lttng_ust_ctx_field *ctx_field = lttng_ust_static_ctx_field(
+ lttng_ust_static_event_field("vsgid",
+ lttng_ust_static_type_integer(sizeof(gid_t) * CHAR_BIT,
+ lttng_ust_rb_alignof(gid_t) * CHAR_BIT,
+ lttng_ust_is_signed_type(gid_t),
+ BYTE_ORDER, 10),
+ false, false),
+ vsgid_get_size,
+ vsgid_record,
+ vsgid_get_value,
+ NULL, NULL);
+
+int lttng_add_vsgid_to_ctx(struct lttng_ust_ctx **ctx)
+{
+ int ret;
+
+ if (lttng_find_context(*ctx, ctx_field->event_field->name)) {
+ ret = -EEXIST;
+ goto error_find_context;
+ }
+ ret = lttng_ust_context_append(ctx, ctx_field);
+ if (ret)
+ return ret;
+ return 0;
+
+error_find_context:
+ return ret;
+}
--- /dev/null
+/*
+ * SPDX-License-Identifier: LGPL-2.1-only
+ *
+ * Copyright (C) 2009-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ * Copyright (C) 2019 Michael Jeanson <mjeanson@efficios.com>
+ *
+ * LTTng UST namespaced saved set-user ID context.
+ */
+
+#define _LGPL_SOURCE
+#include <limits.h>
+#include <stddef.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <unistd.h>
+#include <lttng/ust-events.h>
+#include <lttng/ust-tracer.h>
+#include <lttng/ringbuffer-context.h>
+
+#include "context-internal.h"
+#include "creds.h"
+
+
+/*
+ * At the kernel level, user IDs and group IDs are a per-thread attribute.
+ * However, POSIX requires that all threads in a process share the same
+ * credentials. The NPTL threading implementation handles the POSIX
+ * requirements by providing wrapper functions for the various system calls
+ * that change process UIDs and GIDs. These wrapper functions (including those
+ * for setreuid() and setregid()) employ a signal-based technique to ensure
+ * that when one thread changes credentials, all of the other threads in the
+ * process also change their credentials.
+ */
+
+/*
+ * We cache the result to ensure we don't trigger a system call for
+ * each event. User / group IDs are global to the process.
+ */
+static uid_t cached_vsuid = INVALID_UID;
+
+static
+uid_t get_vsuid(void)
+{
+ uid_t vsuid;
+
+ vsuid = CMM_LOAD_SHARED(cached_vsuid);
+
+ if (caa_unlikely(vsuid == INVALID_UID)) {
+ uid_t uid, euid, suid;
+
+ if (getresuid(&uid, &euid, &suid) == 0) {
+ vsuid = suid;
+ CMM_STORE_SHARED(cached_vsuid, vsuid);
+ }
+ }
+
+ return vsuid;
+}
+
+/*
+ * The vsuid can change on setuid, setreuid and setresuid.
+ */
+void lttng_context_vsuid_reset(void)
+{
+ CMM_STORE_SHARED(cached_vsuid, INVALID_UID);
+}
+
+static
+size_t vsuid_get_size(void *priv __attribute__((unused)),
+ size_t offset)
+{
+ size_t size = 0;
+
+ size += lttng_ust_lib_ring_buffer_align(offset, lttng_ust_rb_alignof(uid_t));
+ size += sizeof(uid_t);
+ return size;
+}
+
+static
+void vsuid_record(void *priv __attribute__((unused)),
+ struct lttng_ust_lib_ring_buffer_ctx *ctx,
+ struct lttng_ust_channel_buffer *chan)
+{
+ uid_t vsuid;
+
+ vsuid = get_vsuid();
+ chan->ops->event_write(ctx, &vsuid, sizeof(vsuid), lttng_ust_rb_alignof(vsuid));
+}
+
+static
+void vsuid_get_value(void *priv __attribute__((unused)),
+ struct lttng_ust_ctx_value *value)
+{
+ value->u.s64 = get_vsuid();
+}
+
+static const struct lttng_ust_ctx_field *ctx_field = lttng_ust_static_ctx_field(
+ lttng_ust_static_event_field("vsuid",
+ lttng_ust_static_type_integer(sizeof(uid_t) * CHAR_BIT,
+ lttng_ust_rb_alignof(uid_t) * CHAR_BIT,
+ lttng_ust_is_signed_type(uid_t),
+ BYTE_ORDER, 10),
+ false, false),
+ vsuid_get_size,
+ vsuid_record,
+ vsuid_get_value,
+ NULL, NULL);
+
+int lttng_add_vsuid_to_ctx(struct lttng_ust_ctx **ctx)
+{
+ int ret;
+
+ if (lttng_find_context(*ctx, ctx_field->event_field->name)) {
+ ret = -EEXIST;
+ goto error_find_context;
+ }
+ ret = lttng_ust_context_append(ctx, ctx_field);
+ if (ret)
+ return ret;
+ return 0;
+
+error_find_context:
+ return ret;
+}
--- /dev/null
+/*
+ * SPDX-License-Identifier: LGPL-2.1-only
+ *
+ * Copyright (C) 2009-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ *
+ * LTTng UST vtid context.
+ */
+
+#define _LGPL_SOURCE
+#include <limits.h>
+#include <stddef.h>
+#include <sys/types.h>
+#include <unistd.h>
+#include <lttng/ust-events.h>
+#include <lttng/ust-tracer.h>
+#include <lttng/ringbuffer-context.h>
+#include "common/compat/tid.h"
+#include <urcu/tls-compat.h>
+
+#include "context-internal.h"
+#include "lttng-tracer-core.h"
+
+/*
+ * We cache the result to ensure we don't trigger a system call for
+ * each event.
+ */
+static DEFINE_URCU_TLS(pid_t, cached_vtid);
+
+/*
+ * Upon fork or clone, the TID assigned to our thread is not the same as
+ * we kept in cache. Luckily, we are the only thread surviving in the
+ * child process, so we can simply clear our cached version.
+ */
+void lttng_context_vtid_reset(void)
+{
+ CMM_STORE_SHARED(URCU_TLS(cached_vtid), 0);
+}
+
+static
+size_t vtid_get_size(void *priv __attribute__((unused)),
+ size_t offset)
+{
+ size_t size = 0;
+
+ size += lttng_ust_lib_ring_buffer_align(offset, lttng_ust_rb_alignof(pid_t));
+ size += sizeof(pid_t);
+ return size;
+}
+
+static inline
+pid_t wrapper_getvtid(void)
+{
+ pid_t vtid;
+
+ vtid = CMM_LOAD_SHARED(URCU_TLS(cached_vtid));
+ if (caa_unlikely(!vtid)) {
+ vtid = lttng_gettid();
+ CMM_STORE_SHARED(URCU_TLS(cached_vtid), vtid);
+ }
+ return vtid;
+}
+
+static
+void vtid_record(void *priv __attribute__((unused)),
+ struct lttng_ust_lib_ring_buffer_ctx *ctx,
+ struct lttng_ust_channel_buffer *chan)
+{
+ pid_t vtid = wrapper_getvtid();
+
+ chan->ops->event_write(ctx, &vtid, sizeof(vtid), lttng_ust_rb_alignof(vtid));
+}
+
+static
+void vtid_get_value(void *priv __attribute__((unused)),
+ struct lttng_ust_ctx_value *value)
+{
+ value->u.s64 = wrapper_getvtid();
+}
+
+static const struct lttng_ust_ctx_field *ctx_field = lttng_ust_static_ctx_field(
+ lttng_ust_static_event_field("vtid",
+ lttng_ust_static_type_integer(sizeof(pid_t) * CHAR_BIT,
+ lttng_ust_rb_alignof(pid_t) * CHAR_BIT,
+ lttng_ust_is_signed_type(pid_t),
+ BYTE_ORDER, 10),
+ false, false),
+ vtid_get_size,
+ vtid_record,
+ vtid_get_value,
+ NULL, NULL);
+
+int lttng_add_vtid_to_ctx(struct lttng_ust_ctx **ctx)
+{
+ int ret;
+
+ if (lttng_find_context(*ctx, ctx_field->event_field->name)) {
+ ret = -EEXIST;
+ goto error_find_context;
+ }
+ ret = lttng_ust_context_append(ctx, ctx_field);
+ if (ret)
+ return ret;
+ return 0;
+
+error_find_context:
+ return ret;
+}
+
+/*
+ * Force a read (imply TLS fixup for dlopen) of TLS variables.
+ */
+void lttng_fixup_vtid_tls(void)
+{
+ asm volatile ("" : : "m" (URCU_TLS(cached_vtid)));
+}
--- /dev/null
+/*
+ * SPDX-License-Identifier: LGPL-2.1-only
+ *
+ * Copyright (C) 2009-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ * Copyright (C) 2019 Michael Jeanson <mjeanson@efficios.com>
+ *
+ * LTTng UST namespaced real user ID context.
+ */
+
+#define _LGPL_SOURCE
+#include <limits.h>
+#include <stddef.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <unistd.h>
+#include <lttng/ust-events.h>
+#include <lttng/ust-tracer.h>
+#include <lttng/ringbuffer-context.h>
+
+#include "context-internal.h"
+#include "creds.h"
+
+
+/*
+ * At the kernel level, user IDs and group IDs are a per-thread attribute.
+ * However, POSIX requires that all threads in a process share the same
+ * credentials. The NPTL threading implementation handles the POSIX
+ * requirements by providing wrapper functions for the various system calls
+ * that change process UIDs and GIDs. These wrapper functions (including those
+ * for setreuid() and setregid()) employ a signal-based technique to ensure
+ * that when one thread changes credentials, all of the other threads in the
+ * process also change their credentials.
+ */
+
+/*
+ * We cache the result to ensure we don't trigger a system call for
+ * each event. User / group IDs are global to the process.
+ */
+static uid_t cached_vuid = INVALID_UID;
+
+static
+uid_t get_vuid(void)
+{
+ uid_t vuid;
+
+ vuid = CMM_LOAD_SHARED(cached_vuid);
+
+ if (caa_unlikely(vuid == INVALID_UID)) {
+ vuid = getuid();
+ CMM_STORE_SHARED(cached_vuid, vuid);
+ }
+
+ return vuid;
+}
+
+/*
+ * The vuid can change on setuid, setreuid and setresuid.
+ */
+void lttng_context_vuid_reset(void)
+{
+ CMM_STORE_SHARED(cached_vuid, INVALID_UID);
+}
+
+static
+size_t vuid_get_size(void *priv __attribute__((unused)),
+ size_t offset)
+{
+ size_t size = 0;
+
+ size += lttng_ust_lib_ring_buffer_align(offset, lttng_ust_rb_alignof(uid_t));
+ size += sizeof(uid_t);
+ return size;
+}
+
+static
+void vuid_record(void *priv __attribute__((unused)),
+ struct lttng_ust_lib_ring_buffer_ctx *ctx,
+ struct lttng_ust_channel_buffer *chan)
+{
+ uid_t vuid;
+
+ vuid = get_vuid();
+ chan->ops->event_write(ctx, &vuid, sizeof(vuid), lttng_ust_rb_alignof(vuid));
+}
+
+static
+void vuid_get_value(void *priv __attribute__((unused)),
+ struct lttng_ust_ctx_value *value)
+{
+ value->u.s64 = get_vuid();
+}
+
+static const struct lttng_ust_ctx_field *ctx_field = lttng_ust_static_ctx_field(
+ lttng_ust_static_event_field("vuid",
+ lttng_ust_static_type_integer(sizeof(uid_t) * CHAR_BIT,
+ lttng_ust_rb_alignof(uid_t) * CHAR_BIT,
+ lttng_ust_is_signed_type(uid_t),
+ BYTE_ORDER, 10),
+ false, false),
+ vuid_get_size,
+ vuid_record,
+ vuid_get_value,
+ NULL, NULL);
+
+int lttng_add_vuid_to_ctx(struct lttng_ust_ctx **ctx)
+{
+ int ret;
+
+ if (lttng_find_context(*ctx, ctx_field->event_field->name)) {
+ ret = -EEXIST;
+ goto error_find_context;
+ }
+ ret = lttng_ust_context_append(ctx, ctx_field);
+ if (ret)
+ return ret;
+ return 0;
+
+error_find_context:
+ return ret;
+}
--- /dev/null
+/*
+ * SPDX-License-Identifier: LGPL-2.1-only
+ *
+ * Copyright (C) 2011 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ *
+ * LTTng UST trace/channel/event context management.
+ */
+
+#define _LGPL_SOURCE
+#include <lttng/ust-events.h>
+#include <lttng/ust-tracer.h>
+#include <common/ust-context-provider.h>
+#include <lttng/urcu/pointer.h>
+#include <lttng/urcu/urcu-ust.h>
+#include "common/logging.h"
+#include "common/macros.h"
+#include <stddef.h>
+#include <string.h>
+#include <assert.h>
+#include <limits.h>
+#include "tracepoint-internal.h"
+
+#include "context-internal.h"
+
+/*
+ * The filter implementation requires that two consecutive "get" for the
+ * same context performed by the same thread return the same result.
+ */
+
+int lttng_find_context(struct lttng_ust_ctx *ctx, const char *name)
+{
+ unsigned int i;
+ const char *subname;
+
+ if (!ctx)
+ return 0;
+ if (strncmp(name, "$ctx.", strlen("$ctx.")) == 0) {
+ subname = name + strlen("$ctx.");
+ } else {
+ subname = name;
+ }
+ for (i = 0; i < ctx->nr_fields; i++) {
+ /* Skip allocated (but non-initialized) contexts */
+ if (!ctx->fields[i].event_field->name)
+ continue;
+ if (!strcmp(ctx->fields[i].event_field->name, subname))
+ return 1;
+ }
+ return 0;
+}
+
+int lttng_get_context_index(struct lttng_ust_ctx *ctx, const char *name)
+{
+ unsigned int i;
+ const char *subname;
+
+ if (!ctx)
+ return -1;
+ if (strncmp(name, "$ctx.", strlen("$ctx.")) == 0) {
+ subname = name + strlen("$ctx.");
+ } else {
+ subname = name;
+ }
+ for (i = 0; i < ctx->nr_fields; i++) {
+ /* Skip allocated (but non-initialized) contexts */
+ if (!ctx->fields[i].event_field->name)
+ continue;
+ if (!strcmp(ctx->fields[i].event_field->name, subname))
+ return i;
+ }
+ return -1;
+}
+
+static int lttng_find_context_provider(struct lttng_ust_ctx *ctx, const char *name)
+{
+ unsigned int i;
+
+ for (i = 0; i < ctx->nr_fields; i++) {
+ /* Skip allocated (but non-initialized) contexts */
+ if (!ctx->fields[i].event_field->name)
+ continue;
+ if (!strncmp(ctx->fields[i].event_field->name, name,
+ strlen(name)))
+ return 1;
+ }
+ return 0;
+}
+
+/*
+ * Note: as we append context information, the pointer location may change.
+ * lttng_ust_context_add_field leaves the new last context initialized to NULL.
+ */
+static
+int lttng_ust_context_add_field(struct lttng_ust_ctx **ctx_p)
+{
+ struct lttng_ust_ctx *ctx;
+
+ if (!*ctx_p) {
+ *ctx_p = zmalloc(sizeof(struct lttng_ust_ctx));
+ if (!*ctx_p)
+ return -ENOMEM;
+ (*ctx_p)->largest_align = 1;
+ }
+ ctx = *ctx_p;
+ if (ctx->nr_fields + 1 > ctx->allocated_fields) {
+ struct lttng_ust_ctx_field *new_fields;
+
+ ctx->allocated_fields = max_t(size_t, 1, 2 * ctx->allocated_fields);
+ new_fields = zmalloc(ctx->allocated_fields * sizeof(*new_fields));
+ if (!new_fields)
+ return -ENOMEM;
+ /* Copy elements */
+ if (ctx->fields)
+ memcpy(new_fields, ctx->fields, sizeof(*ctx->fields) * ctx->nr_fields);
+ free(ctx->fields);
+ ctx->fields = new_fields;
+ }
+ ctx->nr_fields++;
+ return 0;
+}
+
+static size_t get_type_max_align(const struct lttng_ust_type_common *type)
+{
+ switch (type->type) {
+ case lttng_ust_type_integer:
+ return lttng_ust_get_type_integer(type)->alignment;
+ case lttng_ust_type_string:
+ return CHAR_BIT;
+ case lttng_ust_type_dynamic:
+ return 0;
+ case lttng_ust_type_enum:
+ return get_type_max_align(lttng_ust_get_type_enum(type)->container_type);
+ case lttng_ust_type_array:
+ return max_t(size_t, get_type_max_align(lttng_ust_get_type_array(type)->elem_type),
+ lttng_ust_get_type_array(type)->alignment);
+ case lttng_ust_type_sequence:
+ return max_t(size_t, get_type_max_align(lttng_ust_get_type_sequence(type)->elem_type),
+ lttng_ust_get_type_sequence(type)->alignment);
+ case lttng_ust_type_struct:
+ {
+ unsigned int i;
+ size_t field_align = 0;
+ const struct lttng_ust_type_struct *struct_type = lttng_ust_get_type_struct(type);
+
+ for (i = 0; i < struct_type->nr_fields; i++) {
+ field_align = max_t(size_t,
+ get_type_max_align(struct_type->fields[i]->type),
+ field_align);
+ }
+ return field_align;
+ }
+ default:
+ WARN_ON_ONCE(1);
+ return 0;
+ }
+}
+
+/*
+ * lttng_context_update() should be called at least once between context
+ * modification and trace start.
+ */
+static
+void lttng_context_update(struct lttng_ust_ctx *ctx)
+{
+ int i;
+ size_t largest_align = 8; /* in bits */
+
+ for (i = 0; i < ctx->nr_fields; i++) {
+ size_t field_align = 8;
+
+ field_align = get_type_max_align(ctx->fields[i].event_field->type);
+ largest_align = max_t(size_t, largest_align, field_align);
+ }
+ ctx->largest_align = largest_align >> 3; /* bits to bytes */
+}
+
+int lttng_ust_context_append_rcu(struct lttng_ust_ctx **ctx_p,
+ const struct lttng_ust_ctx_field *f)
+{
+ struct lttng_ust_ctx *old_ctx = *ctx_p, *new_ctx = NULL;
+ struct lttng_ust_ctx_field *new_fields = NULL;
+ int ret;
+
+ if (old_ctx) {
+ new_ctx = zmalloc(sizeof(struct lttng_ust_ctx));
+ if (!new_ctx)
+ return -ENOMEM;
+ *new_ctx = *old_ctx;
+ new_fields = zmalloc(new_ctx->allocated_fields * sizeof(*new_fields));
+ if (!new_fields) {
+ free(new_ctx);
+ return -ENOMEM;
+ }
+ /* Copy elements */
+ memcpy(new_fields, old_ctx->fields,
+ sizeof(*old_ctx->fields) * old_ctx->nr_fields);
+ new_ctx->fields = new_fields;
+ }
+ ret = lttng_ust_context_add_field(&new_ctx);
+ if (ret) {
+ free(new_fields);
+ free(new_ctx);
+ return ret;
+ }
+ new_ctx->fields[new_ctx->nr_fields - 1] = *f;
+ lttng_context_update(new_ctx);
+ lttng_ust_rcu_assign_pointer(*ctx_p, new_ctx);
+ lttng_ust_urcu_synchronize_rcu();
+ if (old_ctx) {
+ free(old_ctx->fields);
+ free(old_ctx);
+ }
+ return 0;
+}
+
+int lttng_ust_context_append(struct lttng_ust_ctx **ctx_p,
+ const struct lttng_ust_ctx_field *f)
+{
+ int ret;
+
+ ret = lttng_ust_context_add_field(ctx_p);
+ if (ret)
+ return ret;
+ (*ctx_p)->fields[(*ctx_p)->nr_fields - 1] = *f;
+ lttng_context_update(*ctx_p);
+ return 0;
+}
+
+void lttng_destroy_context(struct lttng_ust_ctx *ctx)
+{
+ int i;
+
+ if (!ctx)
+ return;
+ for (i = 0; i < ctx->nr_fields; i++) {
+ if (ctx->fields[i].destroy)
+ ctx->fields[i].destroy(ctx->fields[i].priv);
+ }
+ free(ctx->fields);
+ free(ctx);
+}
+
+/*
+ * Can be safely performed concurrently with tracing using the struct
+ * lttng_ctx. Using RCU update. Needs to match RCU read-side handling of
+ * contexts.
+ *
+ * This does not allow adding, removing, or changing typing of the
+ * contexts, since this needs to stay invariant for metadata. However,
+ * it allows updating the handlers associated with all contexts matching
+ * a provider (by name) while tracing is using it, in a way that ensures
+ * a single RCU read-side critical section see either all old, or all
+ * new handlers.
+ */
+int lttng_ust_context_set_provider_rcu(struct lttng_ust_ctx **_ctx,
+ const char *name,
+ size_t (*get_size)(void *priv, size_t offset),
+ void (*record)(void *priv, struct lttng_ust_lib_ring_buffer_ctx *ctx,
+ struct lttng_ust_channel_buffer *chan),
+ void (*get_value)(void *priv, struct lttng_ust_ctx_value *value),
+ void *priv)
+{
+ int i, ret;
+ struct lttng_ust_ctx *ctx = *_ctx, *new_ctx;
+ struct lttng_ust_ctx_field *new_fields;
+
+ if (!ctx || !lttng_find_context_provider(ctx, name))
+ return 0;
+ /*
+ * We have at least one instance of context for the provider.
+ */
+ new_ctx = zmalloc(sizeof(*new_ctx));
+ if (!new_ctx)
+ return -ENOMEM;
+ *new_ctx = *ctx;
+ new_fields = zmalloc(sizeof(*new_fields) * ctx->allocated_fields);
+ if (!new_fields) {
+ ret = -ENOMEM;
+ goto field_error;
+ }
+ /* Copy elements */
+ memcpy(new_fields, ctx->fields,
+ sizeof(*new_fields) * ctx->allocated_fields);
+ for (i = 0; i < ctx->nr_fields; i++) {
+ if (strncmp(new_fields[i].event_field->name,
+ name, strlen(name)) != 0)
+ continue;
+ new_fields[i].get_size = get_size;
+ new_fields[i].record = record;
+ new_fields[i].get_value = get_value;
+ new_fields[i].priv = priv;
+ }
+ new_ctx->fields = new_fields;
+ lttng_ust_rcu_assign_pointer(*_ctx, new_ctx);
+ lttng_ust_urcu_synchronize_rcu();
+ free(ctx->fields);
+ free(ctx);
+ return 0;
+
+field_error:
+ free(new_ctx);
+ return ret;
+}
+
+int lttng_context_init_all(struct lttng_ust_ctx **ctx)
+{
+ int ret;
+
+ ret = lttng_add_pthread_id_to_ctx(ctx);
+ if (ret) {
+ WARN("Cannot add context lttng_add_pthread_id_to_ctx");
+ goto error;
+ }
+ ret = lttng_add_vtid_to_ctx(ctx);
+ if (ret) {
+ WARN("Cannot add context lttng_add_vtid_to_ctx");
+ goto error;
+ }
+ ret = lttng_add_vpid_to_ctx(ctx);
+ if (ret) {
+ WARN("Cannot add context lttng_add_vpid_to_ctx");
+ goto error;
+ }
+ ret = lttng_add_procname_to_ctx(ctx);
+ if (ret) {
+ WARN("Cannot add context lttng_add_procname_to_ctx");
+ goto error;
+ }
+ ret = lttng_add_cpu_id_to_ctx(ctx);
+ if (ret) {
+ WARN("Cannot add context lttng_add_cpu_id_to_ctx");
+ goto error;
+ }
+ ret = lttng_add_cgroup_ns_to_ctx(ctx);
+ if (ret) {
+ WARN("Cannot add context lttng_add_cgroup_ns_to_ctx");
+ goto error;
+ }
+ ret = lttng_add_ipc_ns_to_ctx(ctx);
+ if (ret) {
+ WARN("Cannot add context lttng_add_ipc_ns_to_ctx");
+ goto error;
+ }
+ ret = lttng_add_mnt_ns_to_ctx(ctx);
+ if (ret) {
+ WARN("Cannot add context lttng_add_mnt_ns_to_ctx");
+ goto error;
+ }
+ ret = lttng_add_net_ns_to_ctx(ctx);
+ if (ret) {
+ WARN("Cannot add context lttng_add_net_ns_to_ctx");
+ goto error;
+ }
+ ret = lttng_add_pid_ns_to_ctx(ctx);
+ if (ret) {
+ WARN("Cannot add context lttng_add_pid_ns_to_ctx");
+ goto error;
+ }
+ ret = lttng_add_time_ns_to_ctx(ctx);
+ if (ret) {
+ WARN("Cannot add context lttng_add_time_ns_to_ctx");
+ goto error;
+ }
+ ret = lttng_add_user_ns_to_ctx(ctx);
+ if (ret) {
+ WARN("Cannot add context lttng_add_user_ns_to_ctx");
+ goto error;
+ }
+ ret = lttng_add_uts_ns_to_ctx(ctx);
+ if (ret) {
+ WARN("Cannot add context lttng_add_uts_ns_to_ctx");
+ goto error;
+ }
+ ret = lttng_add_vuid_to_ctx(ctx);
+ if (ret) {
+ WARN("Cannot add context lttng_add_vuid_to_ctx");
+ goto error;
+ }
+ ret = lttng_add_veuid_to_ctx(ctx);
+ if (ret) {
+ WARN("Cannot add context lttng_add_veuid_to_ctx");
+ goto error;
+ }
+ ret = lttng_add_vsuid_to_ctx(ctx);
+ if (ret) {
+ WARN("Cannot add context lttng_add_vsuid_to_ctx");
+ goto error;
+ }
+ ret = lttng_add_vgid_to_ctx(ctx);
+ if (ret) {
+ WARN("Cannot add context lttng_add_vgid_to_ctx");
+ goto error;
+ }
+ ret = lttng_add_vegid_to_ctx(ctx);
+ if (ret) {
+ WARN("Cannot add context lttng_add_vegid_to_ctx");
+ goto error;
+ }
+ ret = lttng_add_vsgid_to_ctx(ctx);
+ if (ret) {
+ WARN("Cannot add context lttng_add_vsgid_to_ctx");
+ goto error;
+ }
+ lttng_context_update(*ctx);
+ return 0;
+
+error:
+ lttng_destroy_context(*ctx);
+ return ret;
+}
--- /dev/null
+/* SPDX-License-Identifier: (GPL-2.0-only or LGPL-2.1-only)
+ *
+ * lttng-counter-client-percpu-32-modular.c
+ *
+ * LTTng lib counter client. Per-cpu 32-bit counters in modular
+ * arithmetic.
+ *
+ * Copyright (C) 2020 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ */
+
+#include "ust-events-internal.h"
+#include "common/counter/counter.h"
+#include "common/counter/counter-api.h"
+#include "lttng-tracer-core.h"
+#include "lttng-counter-client.h"
+
+static const struct lib_counter_config client_config = {
+ .alloc = COUNTER_ALLOC_PER_CPU,
+ .sync = COUNTER_SYNC_PER_CPU,
+ .arithmetic = COUNTER_ARITHMETIC_MODULAR,
+ .counter_size = COUNTER_SIZE_32_BIT,
+};
+
+static struct lib_counter *counter_create(size_t nr_dimensions,
+ const struct lttng_counter_dimension *dimensions,
+ int64_t global_sum_step,
+ int global_counter_fd,
+ int nr_counter_cpu_fds,
+ const int *counter_cpu_fds,
+ bool is_daemon)
+{
+ size_t max_nr_elem[LTTNG_COUNTER_DIMENSION_MAX], i;
+
+ if (nr_dimensions > LTTNG_COUNTER_DIMENSION_MAX)
+ return NULL;
+ for (i = 0; i < nr_dimensions; i++) {
+ if (dimensions[i].has_underflow || dimensions[i].has_overflow)
+ return NULL;
+ max_nr_elem[i] = dimensions[i].size;
+ }
+ return lttng_counter_create(&client_config, nr_dimensions, max_nr_elem,
+ global_sum_step, global_counter_fd, nr_counter_cpu_fds,
+ counter_cpu_fds, is_daemon);
+}
+
+static void counter_destroy(struct lib_counter *counter)
+{
+ lttng_counter_destroy(counter);
+}
+
+static int counter_add(struct lib_counter *counter, const size_t *dimension_indexes, int64_t v)
+{
+ return lttng_counter_add(&client_config, counter, dimension_indexes, v);
+}
+
+static int counter_read(struct lib_counter *counter, const size_t *dimension_indexes, int cpu,
+ int64_t *value, bool *overflow, bool *underflow)
+{
+ return lttng_counter_read(&client_config, counter, dimension_indexes, cpu, value,
+ overflow, underflow);
+}
+
+static int counter_aggregate(struct lib_counter *counter, const size_t *dimension_indexes,
+ int64_t *value, bool *overflow, bool *underflow)
+{
+ return lttng_counter_aggregate(&client_config, counter, dimension_indexes, value,
+ overflow, underflow);
+}
+
+static int counter_clear(struct lib_counter *counter, const size_t *dimension_indexes)
+{
+ return lttng_counter_clear(&client_config, counter, dimension_indexes);
+}
+
+static struct lttng_counter_transport lttng_counter_transport = {
+ .name = "counter-per-cpu-32-modular",
+ .ops = {
+ .counter_create = counter_create,
+ .counter_destroy = counter_destroy,
+ .counter_add = counter_add,
+ .counter_read = counter_read,
+ .counter_aggregate = counter_aggregate,
+ .counter_clear = counter_clear,
+ },
+ .client_config = &client_config,
+};
+
+void lttng_counter_client_percpu_32_modular_init(void)
+{
+ lttng_counter_transport_register(<tng_counter_transport);
+}
+
+void lttng_counter_client_percpu_32_modular_exit(void)
+{
+ lttng_counter_transport_unregister(<tng_counter_transport);
+}
--- /dev/null
+/* SPDX-License-Identifier: (GPL-2.0-only or LGPL-2.1-only)
+ *
+ * lttng-counter-client-percpu-64-modular.c
+ *
+ * LTTng lib counter client. Per-cpu 64-bit counters in modular
+ * arithmetic.
+ *
+ * Copyright (C) 2020 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ */
+
+#include "ust-events-internal.h"
+#include "common/counter/counter.h"
+#include "common/counter/counter-api.h"
+#include "lttng-tracer-core.h"
+#include "lttng-counter-client.h"
+
+static const struct lib_counter_config client_config = {
+ .alloc = COUNTER_ALLOC_PER_CPU,
+ .sync = COUNTER_SYNC_PER_CPU,
+ .arithmetic = COUNTER_ARITHMETIC_MODULAR,
+ .counter_size = COUNTER_SIZE_64_BIT,
+};
+
+static struct lib_counter *counter_create(size_t nr_dimensions,
+ const struct lttng_counter_dimension *dimensions,
+ int64_t global_sum_step,
+ int global_counter_fd,
+ int nr_counter_cpu_fds,
+ const int *counter_cpu_fds,
+ bool is_daemon)
+{
+ size_t max_nr_elem[LTTNG_COUNTER_DIMENSION_MAX], i;
+
+ if (nr_dimensions > LTTNG_COUNTER_DIMENSION_MAX)
+ return NULL;
+ for (i = 0; i < nr_dimensions; i++) {
+ if (dimensions[i].has_underflow || dimensions[i].has_overflow)
+ return NULL;
+ max_nr_elem[i] = dimensions[i].size;
+ }
+ return lttng_counter_create(&client_config, nr_dimensions, max_nr_elem,
+ global_sum_step, global_counter_fd, nr_counter_cpu_fds,
+ counter_cpu_fds, is_daemon);
+}
+
+static void counter_destroy(struct lib_counter *counter)
+{
+ lttng_counter_destroy(counter);
+}
+
+static int counter_add(struct lib_counter *counter, const size_t *dimension_indexes, int64_t v)
+{
+ return lttng_counter_add(&client_config, counter, dimension_indexes, v);
+}
+
+static int counter_read(struct lib_counter *counter, const size_t *dimension_indexes, int cpu,
+ int64_t *value, bool *overflow, bool *underflow)
+{
+ return lttng_counter_read(&client_config, counter, dimension_indexes, cpu, value,
+ overflow, underflow);
+}
+
+static int counter_aggregate(struct lib_counter *counter, const size_t *dimension_indexes,
+ int64_t *value, bool *overflow, bool *underflow)
+{
+ return lttng_counter_aggregate(&client_config, counter, dimension_indexes, value,
+ overflow, underflow);
+}
+
+static int counter_clear(struct lib_counter *counter, const size_t *dimension_indexes)
+{
+ return lttng_counter_clear(&client_config, counter, dimension_indexes);
+}
+
+static struct lttng_counter_transport lttng_counter_transport = {
+ .name = "counter-per-cpu-64-modular",
+ .ops = {
+ .counter_create = counter_create,
+ .counter_destroy = counter_destroy,
+ .counter_add = counter_add,
+ .counter_read = counter_read,
+ .counter_aggregate = counter_aggregate,
+ .counter_clear = counter_clear,
+ },
+ .client_config = &client_config,
+};
+
+void lttng_counter_client_percpu_64_modular_init(void)
+{
+ lttng_counter_transport_register(<tng_counter_transport);
+}
+
+void lttng_counter_client_percpu_64_modular_exit(void)
+{
+ lttng_counter_transport_unregister(<tng_counter_transport);
+}
--- /dev/null
+/*
+ * SPDX-License-Identifier: (GPL-2.0-only or LGPL-2.1-only)
+ *
+ * Copyright (C) 2020 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ *
+ * LTTng lib counter client.
+ */
+
+#ifndef _LTTNG_UST_COUNTER_CLIENT_H
+#define _LTTNG_UST_COUNTER_CLIENT_H
+
+/*
+ * The counter clients init/exit symbols are private ABI for
+ * liblttng-ust-ctl, which is why they are not hidden.
+ */
+
+void lttng_ust_counter_clients_init(void);
+void lttng_ust_counter_clients_exit(void);
+
+void lttng_counter_client_percpu_32_modular_init(void)
+ __attribute__((visibility("hidden")));
+
+void lttng_counter_client_percpu_32_modular_exit(void)
+ __attribute__((visibility("hidden")));
+
+void lttng_counter_client_percpu_64_modular_init(void)
+ __attribute__((visibility("hidden")));
+
+void lttng_counter_client_percpu_64_modular_exit(void)
+ __attribute__((visibility("hidden")));
+
+#endif
--- /dev/null
+/*
+ * SPDX-License-Identifier: LGPL-2.1-only
+ *
+ * Copyright (C) 2010-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ *
+ * Holds LTTng per-session event registry.
+ */
+
+#define _LGPL_SOURCE
+#include <stdio.h>
+#include <assert.h>
+#include <errno.h>
+#include <limits.h>
+#include <pthread.h>
+#include <sys/shm.h>
+#include <sys/ipc.h>
+#include <stdint.h>
+#include <stddef.h>
+#include <inttypes.h>
+#include <time.h>
+#include <stdbool.h>
+#include <unistd.h>
+#include <dlfcn.h>
+#include <lttng/ust-endian.h>
+
+#include <urcu/arch.h>
+#include <urcu/compiler.h>
+#include <urcu/hlist.h>
+#include <urcu/list.h>
+#include <urcu/uatomic.h>
+
+#include <lttng/tracepoint.h>
+#include <lttng/ust-events.h>
+
+#include "common/logging.h"
+#include "common/macros.h"
+#include <lttng/ust-ctl.h>
+#include "common/ustcomm.h"
+#include "common/ust-fd.h"
+#include "common/dynamic-type.h"
+#include "common/ust-context-provider.h"
+#include "error.h"
+#include "lttng-ust-uuid.h"
+
+#include "tracepoint-internal.h"
+#include "string-utils.h"
+#include "lttng-bytecode.h"
+#include "lttng-tracer.h"
+#include "lttng-tracer-core.h"
+#include "lttng-ust-statedump.h"
+#include "context-internal.h"
+#include "ust-events-internal.h"
+#include "wait.h"
+#include "common/ringbuffer/shm.h"
+#include "common/ringbuffer/frontend_types.h"
+#include "common/ringbuffer/frontend.h"
+#include "common/counter/counter.h"
+#include "jhash.h"
+#include <lttng/ust-abi.h>
+#include "context-provider-internal.h"
+
+/*
+ * All operations within this file are called by the communication
+ * thread, under ust_lock protection.
+ */
+
+static CDS_LIST_HEAD(sessions);
+static CDS_LIST_HEAD(event_notifier_groups);
+
+struct cds_list_head *lttng_get_sessions(void)
+{
+ return &sessions;
+}
+
+static void _lttng_event_destroy(struct lttng_ust_event_common *event);
+static void _lttng_enum_destroy(struct lttng_enum *_enum);
+
+static
+void lttng_session_lazy_sync_event_enablers(struct lttng_ust_session *session);
+static
+void lttng_session_sync_event_enablers(struct lttng_ust_session *session);
+static
+void lttng_event_notifier_group_sync_enablers(
+ struct lttng_event_notifier_group *event_notifier_group);
+static
+void lttng_enabler_destroy(struct lttng_enabler *enabler);
+
+bool lttng_ust_validate_event_name(const struct lttng_ust_event_desc *desc)
+{
+ if (strlen(desc->probe_desc->provider_name) + 1 +
+ strlen(desc->event_name) >= LTTNG_UST_ABI_SYM_NAME_LEN)
+ return false;
+ return true;
+}
+
+void lttng_ust_format_event_name(const struct lttng_ust_event_desc *desc,
+ char *name)
+{
+ strcpy(name, desc->probe_desc->provider_name);
+ strcat(name, ":");
+ strcat(name, desc->event_name);
+}
+
+/*
+ * Called with ust lock held.
+ */
+int lttng_session_active(void)
+{
+ struct lttng_ust_session_private *iter;
+
+ cds_list_for_each_entry(iter, &sessions, node) {
+ if (iter->pub->active)
+ return 1;
+ }
+ return 0;
+}
+
+static
+int lttng_loglevel_match(int loglevel,
+ unsigned int has_loglevel,
+ enum lttng_ust_abi_loglevel_type req_type,
+ int req_loglevel)
+{
+ if (!has_loglevel)
+ loglevel = TRACE_DEFAULT;
+ switch (req_type) {
+ case LTTNG_UST_ABI_LOGLEVEL_RANGE:
+ if (loglevel <= req_loglevel
+ || (req_loglevel == -1 && loglevel <= TRACE_DEBUG))
+ return 1;
+ else
+ return 0;
+ case LTTNG_UST_ABI_LOGLEVEL_SINGLE:
+ if (loglevel == req_loglevel
+ || (req_loglevel == -1 && loglevel <= TRACE_DEBUG))
+ return 1;
+ else
+ return 0;
+ case LTTNG_UST_ABI_LOGLEVEL_ALL:
+ default:
+ if (loglevel <= TRACE_DEBUG)
+ return 1;
+ else
+ return 0;
+ }
+}
+
+struct lttng_ust_session *lttng_session_create(void)
+{
+ struct lttng_ust_session *session;
+ struct lttng_ust_session_private *session_priv;
+ int i;
+
+ session = zmalloc(sizeof(struct lttng_ust_session));
+ if (!session)
+ return NULL;
+ session->struct_size = sizeof(struct lttng_ust_session);
+ session_priv = zmalloc(sizeof(struct lttng_ust_session_private));
+ if (!session_priv) {
+ free(session);
+ return NULL;
+ }
+ session->priv = session_priv;
+ session_priv->pub = session;
+ if (lttng_context_init_all(&session->priv->ctx)) {
+ free(session_priv);
+ free(session);
+ return NULL;
+ }
+ CDS_INIT_LIST_HEAD(&session->priv->chan_head);
+ CDS_INIT_LIST_HEAD(&session->priv->events_head);
+ CDS_INIT_LIST_HEAD(&session->priv->enums_head);
+ CDS_INIT_LIST_HEAD(&session->priv->enablers_head);
+ for (i = 0; i < LTTNG_UST_EVENT_HT_SIZE; i++)
+ CDS_INIT_HLIST_HEAD(&session->priv->events_ht.table[i]);
+ for (i = 0; i < LTTNG_UST_ENUM_HT_SIZE; i++)
+ CDS_INIT_HLIST_HEAD(&session->priv->enums_ht.table[i]);
+ cds_list_add(&session->priv->node, &sessions);
+ return session;
+}
+
+struct lttng_counter *lttng_ust_counter_create(
+ const char *counter_transport_name,
+ size_t number_dimensions, const struct lttng_counter_dimension *dimensions)
+{
+ struct lttng_counter_transport *counter_transport = NULL;
+ struct lttng_counter *counter = NULL;
+
+ counter_transport = lttng_counter_transport_find(counter_transport_name);
+ if (!counter_transport)
+ goto notransport;
+ counter = zmalloc(sizeof(struct lttng_counter));
+ if (!counter)
+ goto nomem;
+
+ counter->ops = &counter_transport->ops;
+ counter->transport = counter_transport;
+
+ counter->counter = counter->ops->counter_create(
+ number_dimensions, dimensions, 0,
+ -1, 0, NULL, false);
+ if (!counter->counter) {
+ goto create_error;
+ }
+
+ return counter;
+
+create_error:
+ free(counter);
+nomem:
+notransport:
+ return NULL;
+}
+
+static
+void lttng_ust_counter_destroy(struct lttng_counter *counter)
+{
+ counter->ops->counter_destroy(counter->counter);
+ free(counter);
+}
+
+struct lttng_event_notifier_group *lttng_event_notifier_group_create(void)
+{
+ struct lttng_event_notifier_group *event_notifier_group;
+ int i;
+
+ event_notifier_group = zmalloc(sizeof(struct lttng_event_notifier_group));
+ if (!event_notifier_group)
+ return NULL;
+
+ /* Add all contexts. */
+ if (lttng_context_init_all(&event_notifier_group->ctx)) {
+ free(event_notifier_group);
+ return NULL;
+ }
+
+ CDS_INIT_LIST_HEAD(&event_notifier_group->enablers_head);
+ CDS_INIT_LIST_HEAD(&event_notifier_group->event_notifiers_head);
+ for (i = 0; i < LTTNG_UST_EVENT_NOTIFIER_HT_SIZE; i++)
+ CDS_INIT_HLIST_HEAD(&event_notifier_group->event_notifiers_ht.table[i]);
+
+ cds_list_add(&event_notifier_group->node, &event_notifier_groups);
+
+ return event_notifier_group;
+}
+
+/*
+ * Only used internally at session destruction.
+ */
+static
+void _lttng_channel_unmap(struct lttng_ust_channel_buffer *lttng_chan)
+{
+ struct lttng_ust_lib_ring_buffer_channel *chan;
+ struct lttng_ust_shm_handle *handle;
+
+ cds_list_del(<tng_chan->priv->node);
+ lttng_destroy_context(lttng_chan->priv->ctx);
+ chan = lttng_chan->priv->rb_chan;
+ handle = chan->handle;
+ channel_destroy(chan, handle, 0);
+ free(lttng_chan->parent);
+ free(lttng_chan->priv);
+ free(lttng_chan);
+}
+
+static
+void register_event(struct lttng_ust_event_common *event)
+{
+ int ret;
+ const struct lttng_ust_event_desc *desc;
+ char name[LTTNG_UST_ABI_SYM_NAME_LEN];
+
+ assert(event->priv->registered == 0);
+ desc = event->priv->desc;
+ lttng_ust_format_event_name(desc, name);
+ ret = lttng_ust_tp_probe_register_queue_release(name,
+ desc->probe_callback,
+ event, desc->signature);
+ WARN_ON_ONCE(ret);
+ if (!ret)
+ event->priv->registered = 1;
+}
+
+static
+void unregister_event(struct lttng_ust_event_common *event)
+{
+ int ret;
+ const struct lttng_ust_event_desc *desc;
+ char name[LTTNG_UST_ABI_SYM_NAME_LEN];
+
+ assert(event->priv->registered == 1);
+ desc = event->priv->desc;
+ lttng_ust_format_event_name(desc, name);
+ ret = lttng_ust_tp_probe_unregister_queue_release(name,
+ desc->probe_callback,
+ event);
+ WARN_ON_ONCE(ret);
+ if (!ret)
+ event->priv->registered = 0;
+}
+
+static
+void _lttng_event_unregister(struct lttng_ust_event_common *event)
+{
+ if (event->priv->registered)
+ unregister_event(event);
+}
+
+void lttng_session_destroy(struct lttng_ust_session *session)
+{
+ struct lttng_ust_channel_buffer_private *chan, *tmpchan;
+ struct lttng_ust_event_recorder_private *event_recorder_priv, *tmpevent_recorder_priv;
+ struct lttng_enum *_enum, *tmp_enum;
+ struct lttng_event_enabler *event_enabler, *event_tmpenabler;
+
+ CMM_ACCESS_ONCE(session->active) = 0;
+ cds_list_for_each_entry(event_recorder_priv, &session->priv->events_head, node) {
+ _lttng_event_unregister(event_recorder_priv->parent.pub);
+ }
+ lttng_ust_urcu_synchronize_rcu(); /* Wait for in-flight events to complete */
+ lttng_ust_tp_probe_prune_release_queue();
+ cds_list_for_each_entry_safe(event_enabler, event_tmpenabler,
+ &session->priv->enablers_head, node)
+ lttng_event_enabler_destroy(event_enabler);
+ cds_list_for_each_entry_safe(event_recorder_priv, tmpevent_recorder_priv,
+ &session->priv->events_head, node)
+ _lttng_event_destroy(event_recorder_priv->parent.pub);
+ cds_list_for_each_entry_safe(_enum, tmp_enum,
+ &session->priv->enums_head, node)
+ _lttng_enum_destroy(_enum);
+ cds_list_for_each_entry_safe(chan, tmpchan, &session->priv->chan_head, node)
+ _lttng_channel_unmap(chan->pub);
+ cds_list_del(&session->priv->node);
+ lttng_destroy_context(session->priv->ctx);
+ free(session->priv);
+ free(session);
+}
+
+void lttng_event_notifier_group_destroy(
+ struct lttng_event_notifier_group *event_notifier_group)
+{
+ int close_ret;
+ struct lttng_event_notifier_enabler *notifier_enabler, *tmpnotifier_enabler;
+ struct lttng_ust_event_notifier_private *event_notifier_priv, *tmpevent_notifier_priv;
+
+ if (!event_notifier_group) {
+ return;
+ }
+
+ cds_list_for_each_entry(event_notifier_priv,
+ &event_notifier_group->event_notifiers_head, node)
+ _lttng_event_unregister(event_notifier_priv->parent.pub);
+
+ lttng_ust_urcu_synchronize_rcu();
+
+ cds_list_for_each_entry_safe(notifier_enabler, tmpnotifier_enabler,
+ &event_notifier_group->enablers_head, node)
+ lttng_event_notifier_enabler_destroy(notifier_enabler);
+
+ cds_list_for_each_entry_safe(event_notifier_priv, tmpevent_notifier_priv,
+ &event_notifier_group->event_notifiers_head, node)
+ _lttng_event_destroy(event_notifier_priv->parent.pub);
+
+ if (event_notifier_group->error_counter)
+ lttng_ust_counter_destroy(event_notifier_group->error_counter);
+
+ /* Close the notification fd to the listener of event_notifiers. */
+
+ lttng_ust_lock_fd_tracker();
+ close_ret = close(event_notifier_group->notification_fd);
+ if (!close_ret) {
+ lttng_ust_delete_fd_from_tracker(
+ event_notifier_group->notification_fd);
+ } else {
+ PERROR("close");
+ abort();
+ }
+ lttng_ust_unlock_fd_tracker();
+
+ cds_list_del(&event_notifier_group->node);
+ lttng_destroy_context(event_notifier_group->ctx);
+ free(event_notifier_group);
+}
+
+static
+void lttng_enabler_destroy(struct lttng_enabler *enabler)
+{
+ struct lttng_ust_bytecode_node *filter_node, *tmp_filter_node;
+ struct lttng_ust_excluder_node *excluder_node, *tmp_excluder_node;
+
+ if (!enabler) {
+ return;
+ }
+
+ /* Destroy filter bytecode */
+ cds_list_for_each_entry_safe(filter_node, tmp_filter_node,
+ &enabler->filter_bytecode_head, node) {
+ free(filter_node);
+ }
+
+ /* Destroy excluders */
+ cds_list_for_each_entry_safe(excluder_node, tmp_excluder_node,
+ &enabler->excluder_head, node) {
+ free(excluder_node);
+ }
+}
+
+ void lttng_event_notifier_enabler_destroy(struct lttng_event_notifier_enabler *event_notifier_enabler)
+{
+ if (!event_notifier_enabler) {
+ return;
+ }
+
+ cds_list_del(&event_notifier_enabler->node);
+
+ lttng_enabler_destroy(lttng_event_notifier_enabler_as_enabler(event_notifier_enabler));
+
+ free(event_notifier_enabler);
+}
+
+static
+int lttng_enum_create(const struct lttng_ust_enum_desc *desc,
+ struct lttng_ust_session *session)
+{
+ const char *enum_name = desc->name;
+ struct lttng_enum *_enum;
+ struct cds_hlist_head *head;
+ int ret = 0;
+ size_t name_len = strlen(enum_name);
+ uint32_t hash;
+ int notify_socket;
+
+ /* Check if this enum is already registered for this session. */
+ hash = jhash(enum_name, name_len, 0);
+ head = &session->priv->enums_ht.table[hash & (LTTNG_UST_ENUM_HT_SIZE - 1)];
+
+ _enum = lttng_ust_enum_get_from_desc(session, desc);
+ if (_enum) {
+ ret = -EEXIST;
+ goto exist;
+ }
+
+ notify_socket = lttng_get_notify_socket(session->priv->owner);
+ if (notify_socket < 0) {
+ ret = notify_socket;
+ goto socket_error;
+ }
+
+ _enum = zmalloc(sizeof(*_enum));
+ if (!_enum) {
+ ret = -ENOMEM;
+ goto cache_error;
+ }
+ _enum->session = session;
+ _enum->desc = desc;
+
+ ret = ustcomm_register_enum(notify_socket,
+ session->priv->objd,
+ enum_name,
+ desc->nr_entries,
+ desc->entries,
+ &_enum->id);
+ if (ret < 0) {
+ DBG("Error (%d) registering enumeration to sessiond", ret);
+ goto sessiond_register_error;
+ }
+ cds_list_add(&_enum->node, &session->priv->enums_head);
+ cds_hlist_add_head(&_enum->hlist, head);
+ return 0;
+
+sessiond_register_error:
+ free(_enum);
+cache_error:
+socket_error:
+exist:
+ return ret;
+}
+
+static
+int lttng_create_enum_check(const struct lttng_ust_type_common *type,
+ struct lttng_ust_session *session)
+{
+ switch (type->type) {
+ case lttng_ust_type_enum:
+ {
+ const struct lttng_ust_enum_desc *enum_desc;
+ int ret;
+
+ enum_desc = lttng_ust_get_type_enum(type)->desc;
+ ret = lttng_enum_create(enum_desc, session);
+ if (ret && ret != -EEXIST) {
+ DBG("Unable to create enum error: (%d)", ret);
+ return ret;
+ }
+ break;
+ }
+ case lttng_ust_type_dynamic:
+ {
+ const struct lttng_ust_event_field *tag_field_generic;
+ const struct lttng_ust_enum_desc *enum_desc;
+ int ret;
+
+ tag_field_generic = lttng_ust_dynamic_type_tag_field();
+ enum_desc = lttng_ust_get_type_enum(tag_field_generic->type)->desc;
+ ret = lttng_enum_create(enum_desc, session);
+ if (ret && ret != -EEXIST) {
+ DBG("Unable to create enum error: (%d)", ret);
+ return ret;
+ }
+ break;
+ }
+ default:
+ /* TODO: nested types when they become supported. */
+ break;
+ }
+ return 0;
+}
+
+static
+int lttng_create_all_event_enums(size_t nr_fields,
+ const struct lttng_ust_event_field **event_fields,
+ struct lttng_ust_session *session)
+{
+ size_t i;
+ int ret;
+
+ /* For each field, ensure enum is part of the session. */
+ for (i = 0; i < nr_fields; i++) {
+ const struct lttng_ust_type_common *type = event_fields[i]->type;
+
+ ret = lttng_create_enum_check(type, session);
+ if (ret)
+ return ret;
+ }
+ return 0;
+}
+
+static
+int lttng_create_all_ctx_enums(size_t nr_fields,
+ struct lttng_ust_ctx_field *ctx_fields,
+ struct lttng_ust_session *session)
+{
+ size_t i;
+ int ret;
+
+ /* For each field, ensure enum is part of the session. */
+ for (i = 0; i < nr_fields; i++) {
+ const struct lttng_ust_type_common *type = ctx_fields[i].event_field->type;
+
+ ret = lttng_create_enum_check(type, session);
+ if (ret)
+ return ret;
+ }
+ return 0;
+}
+
+/*
+ * Ensure that a state-dump will be performed for this session at the end
+ * of the current handle_message().
+ */
+int lttng_session_statedump(struct lttng_ust_session *session)
+{
+ session->priv->statedump_pending = 1;
+ lttng_ust_sockinfo_session_enabled(session->priv->owner);
+ return 0;
+}
+
+int lttng_session_enable(struct lttng_ust_session *session)
+{
+ int ret = 0;
+ struct lttng_ust_channel_buffer_private *chan;
+ int notify_socket;
+
+ if (session->active) {
+ ret = -EBUSY;
+ goto end;
+ }
+
+ notify_socket = lttng_get_notify_socket(session->priv->owner);
+ if (notify_socket < 0)
+ return notify_socket;
+
+ /* Set transient enabler state to "enabled" */
+ session->priv->tstate = 1;
+
+ /* We need to sync enablers with session before activation. */
+ lttng_session_sync_event_enablers(session);
+
+ /*
+ * Snapshot the number of events per channel to know the type of header
+ * we need to use.
+ */
+ cds_list_for_each_entry(chan, &session->priv->chan_head, node) {
+ struct lttng_ust_ctx *ctx;
+ struct lttng_ust_ctx_field *fields = NULL;
+ size_t nr_fields = 0;
+ uint32_t chan_id;
+
+ /* don't change it if session stop/restart */
+ if (chan->header_type)
+ continue;
+ ctx = chan->ctx;
+ if (ctx) {
+ nr_fields = ctx->nr_fields;
+ fields = ctx->fields;
+ ret = lttng_create_all_ctx_enums(nr_fields, fields,
+ session);
+ if (ret < 0) {
+ DBG("Error (%d) adding enum to session", ret);
+ return ret;
+ }
+ }
+ ret = ustcomm_register_channel(notify_socket,
+ session,
+ session->priv->objd,
+ chan->parent.objd,
+ nr_fields,
+ fields,
+ &chan_id,
+ &chan->header_type);
+ if (ret) {
+ DBG("Error (%d) registering channel to sessiond", ret);
+ return ret;
+ }
+ if (chan_id != chan->id) {
+ DBG("Error: channel registration id (%u) does not match id assigned at creation (%u)",
+ chan_id, chan->id);
+ return -EINVAL;
+ }
+ }
+
+ /* Set atomically the state to "active" */
+ CMM_ACCESS_ONCE(session->active) = 1;
+ CMM_ACCESS_ONCE(session->priv->been_active) = 1;
+
+ ret = lttng_session_statedump(session);
+ if (ret)
+ return ret;
+end:
+ return ret;
+}
+
+int lttng_session_disable(struct lttng_ust_session *session)
+{
+ int ret = 0;
+
+ if (!session->active) {
+ ret = -EBUSY;
+ goto end;
+ }
+ /* Set atomically the state to "inactive" */
+ CMM_ACCESS_ONCE(session->active) = 0;
+
+ /* Set transient enabler state to "disabled" */
+ session->priv->tstate = 0;
+ lttng_session_sync_event_enablers(session);
+end:
+ return ret;
+}
+
+int lttng_channel_enable(struct lttng_ust_channel_common *lttng_channel)
+{
+ int ret = 0;
+
+ if (lttng_channel->enabled) {
+ ret = -EBUSY;
+ goto end;
+ }
+ /* Set transient enabler state to "enabled" */
+ lttng_channel->priv->tstate = 1;
+ lttng_session_sync_event_enablers(lttng_channel->session);
+ /* Set atomically the state to "enabled" */
+ CMM_ACCESS_ONCE(lttng_channel->enabled) = 1;
+end:
+ return ret;
+}
+
+int lttng_channel_disable(struct lttng_ust_channel_common *lttng_channel)
+{
+ int ret = 0;
+
+ if (!lttng_channel->enabled) {
+ ret = -EBUSY;
+ goto end;
+ }
+ /* Set atomically the state to "disabled" */
+ CMM_ACCESS_ONCE(lttng_channel->enabled) = 0;
+ /* Set transient enabler state to "enabled" */
+ lttng_channel->priv->tstate = 0;
+ lttng_session_sync_event_enablers(lttng_channel->session);
+end:
+ return ret;
+}
+
+static inline
+struct cds_hlist_head *borrow_hash_table_bucket(
+ struct cds_hlist_head *hash_table,
+ unsigned int hash_table_size,
+ const struct lttng_ust_event_desc *desc)
+{
+ char name[LTTNG_UST_ABI_SYM_NAME_LEN];
+ size_t name_len;
+ uint32_t hash;
+
+ lttng_ust_format_event_name(desc, name);
+ name_len = strlen(name);
+
+ hash = jhash(name, name_len, 0);
+ return &hash_table[hash & (hash_table_size - 1)];
+}
+
+/*
+ * Supports event creation while tracing session is active.
+ */
+static
+int lttng_event_recorder_create(const struct lttng_ust_event_desc *desc,
+ struct lttng_ust_channel_buffer *chan)
+{
+ char name[LTTNG_UST_ABI_SYM_NAME_LEN];
+ struct lttng_ust_event_recorder *event_recorder;
+ struct lttng_ust_event_recorder_private *event_recorder_priv;
+ struct lttng_ust_session *session = chan->parent->session;
+ struct cds_hlist_head *head;
+ int ret = 0;
+ int notify_socket, loglevel;
+ const char *uri;
+
+ head = borrow_hash_table_bucket(chan->parent->session->priv->events_ht.table,
+ LTTNG_UST_EVENT_HT_SIZE, desc);
+
+ notify_socket = lttng_get_notify_socket(session->priv->owner);
+ if (notify_socket < 0) {
+ ret = notify_socket;
+ goto socket_error;
+ }
+
+ ret = lttng_create_all_event_enums(desc->nr_fields, desc->fields,
+ session);
+ if (ret < 0) {
+ DBG("Error (%d) adding enum to session", ret);
+ goto create_enum_error;
+ }
+
+ /*
+ * Check if loglevel match. Refuse to connect event if not.
+ */
+ event_recorder = zmalloc(sizeof(struct lttng_ust_event_recorder));
+ if (!event_recorder) {
+ ret = -ENOMEM;
+ goto cache_error;
+ }
+ event_recorder->struct_size = sizeof(struct lttng_ust_event_recorder);
+
+ event_recorder->parent = zmalloc(sizeof(struct lttng_ust_event_common));
+ if (!event_recorder->parent) {
+ ret = -ENOMEM;
+ goto parent_error;
+ }
+ event_recorder->parent->struct_size = sizeof(struct lttng_ust_event_common);
+ event_recorder->parent->type = LTTNG_UST_EVENT_TYPE_RECORDER;
+ event_recorder->parent->child = event_recorder;
+
+ event_recorder_priv = zmalloc(sizeof(struct lttng_ust_event_recorder_private));
+ if (!event_recorder_priv) {
+ ret = -ENOMEM;
+ goto priv_error;
+ }
+ event_recorder->priv = event_recorder_priv;
+ event_recorder_priv->pub = event_recorder;
+ event_recorder->parent->priv = &event_recorder_priv->parent;
+ event_recorder_priv->parent.pub = event_recorder->parent;
+
+ event_recorder->chan = chan;
+
+ /* Event will be enabled by enabler sync. */
+ event_recorder->parent->run_filter = lttng_ust_interpret_event_filter;
+ event_recorder->parent->enabled = 0;
+ event_recorder->parent->priv->registered = 0;
+ CDS_INIT_LIST_HEAD(&event_recorder->parent->priv->filter_bytecode_runtime_head);
+ CDS_INIT_LIST_HEAD(&event_recorder->parent->priv->enablers_ref_head);
+ event_recorder->parent->priv->desc = desc;
+
+ if (desc->loglevel)
+ loglevel = *(*event_recorder->parent->priv->desc->loglevel);
+ else
+ loglevel = TRACE_DEFAULT;
+ if (desc->model_emf_uri)
+ uri = *(desc->model_emf_uri);
+ else
+ uri = NULL;
+
+ lttng_ust_format_event_name(desc, name);
+
+ /* Fetch event ID from sessiond */
+ ret = ustcomm_register_event(notify_socket,
+ session,
+ session->priv->objd,
+ chan->priv->parent.objd,
+ name,
+ loglevel,
+ desc->signature,
+ desc->nr_fields,
+ desc->fields,
+ uri,
+ &event_recorder->priv->id);
+ if (ret < 0) {
+ DBG("Error (%d) registering event to sessiond", ret);
+ goto sessiond_register_error;
+ }
+
+ cds_list_add(&event_recorder_priv->node, &chan->parent->session->priv->events_head);
+ cds_hlist_add_head(&event_recorder_priv->hlist, head);
+ return 0;
+
+sessiond_register_error:
+ free(event_recorder_priv);
+priv_error:
+ free(event_recorder->parent);
+parent_error:
+ free(event_recorder);
+cache_error:
+create_enum_error:
+socket_error:
+ return ret;
+}
+
+static
+int lttng_event_notifier_create(const struct lttng_ust_event_desc *desc,
+ uint64_t token, uint64_t error_counter_index,
+ struct lttng_event_notifier_group *event_notifier_group)
+{
+ struct lttng_ust_event_notifier *event_notifier;
+ struct lttng_ust_event_notifier_private *event_notifier_priv;
+ struct cds_hlist_head *head;
+ int ret = 0;
+
+ /*
+ * Get the hashtable bucket the created lttng_event_notifier object
+ * should be inserted.
+ */
+ head = borrow_hash_table_bucket(
+ event_notifier_group->event_notifiers_ht.table,
+ LTTNG_UST_EVENT_NOTIFIER_HT_SIZE, desc);
+
+ event_notifier = zmalloc(sizeof(struct lttng_ust_event_notifier));
+ if (!event_notifier) {
+ ret = -ENOMEM;
+ goto error;
+ }
+ event_notifier->struct_size = sizeof(struct lttng_ust_event_notifier);
+
+ event_notifier->parent = zmalloc(sizeof(struct lttng_ust_event_common));
+ if (!event_notifier->parent) {
+ ret = -ENOMEM;
+ goto parent_error;
+ }
+ event_notifier->parent->struct_size = sizeof(struct lttng_ust_event_common);
+ event_notifier->parent->type = LTTNG_UST_EVENT_TYPE_NOTIFIER;
+ event_notifier->parent->child = event_notifier;
+
+ event_notifier_priv = zmalloc(sizeof(struct lttng_ust_event_notifier_private));
+ if (!event_notifier_priv) {
+ ret = -ENOMEM;
+ goto priv_error;
+ }
+ event_notifier->priv = event_notifier_priv;
+ event_notifier_priv->pub = event_notifier;
+ event_notifier->parent->priv = &event_notifier_priv->parent;
+ event_notifier_priv->parent.pub = event_notifier->parent;
+
+ event_notifier_priv->group = event_notifier_group;
+ event_notifier_priv->parent.user_token = token;
+ event_notifier_priv->error_counter_index = error_counter_index;
+
+ /* Event notifier will be enabled by enabler sync. */
+ event_notifier->parent->run_filter = lttng_ust_interpret_event_filter;
+ event_notifier->parent->enabled = 0;
+ event_notifier_priv->parent.registered = 0;
+
+ CDS_INIT_LIST_HEAD(&event_notifier->parent->priv->filter_bytecode_runtime_head);
+ CDS_INIT_LIST_HEAD(&event_notifier->priv->capture_bytecode_runtime_head);
+ CDS_INIT_LIST_HEAD(&event_notifier_priv->parent.enablers_ref_head);
+ event_notifier_priv->parent.desc = desc;
+ event_notifier->notification_send = lttng_event_notifier_notification_send;
+
+ cds_list_add(&event_notifier_priv->node,
+ &event_notifier_group->event_notifiers_head);
+ cds_hlist_add_head(&event_notifier_priv->hlist, head);
+
+ return 0;
+
+priv_error:
+ free(event_notifier->parent);
+parent_error:
+ free(event_notifier);
+error:
+ return ret;
+}
+
+static
+int lttng_desc_match_star_glob_enabler(const struct lttng_ust_event_desc *desc,
+ struct lttng_enabler *enabler)
+{
+ char name[LTTNG_UST_ABI_SYM_NAME_LEN];
+ int loglevel = 0;
+ unsigned int has_loglevel = 0;
+
+ lttng_ust_format_event_name(desc, name);
+ assert(enabler->format_type == LTTNG_ENABLER_FORMAT_STAR_GLOB);
+ if (!strutils_star_glob_match(enabler->event_param.name, SIZE_MAX,
+ name, SIZE_MAX))
+ return 0;
+ if (desc->loglevel) {
+ loglevel = *(*desc->loglevel);
+ has_loglevel = 1;
+ }
+ if (!lttng_loglevel_match(loglevel,
+ has_loglevel,
+ enabler->event_param.loglevel_type,
+ enabler->event_param.loglevel))
+ return 0;
+ return 1;
+}
+
+static
+int lttng_desc_match_event_enabler(const struct lttng_ust_event_desc *desc,
+ struct lttng_enabler *enabler)
+{
+ char name[LTTNG_UST_ABI_SYM_NAME_LEN];
+ int loglevel = 0;
+ unsigned int has_loglevel = 0;
+
+ lttng_ust_format_event_name(desc, name);
+ assert(enabler->format_type == LTTNG_ENABLER_FORMAT_EVENT);
+ if (strcmp(name, enabler->event_param.name))
+ return 0;
+ if (desc->loglevel) {
+ loglevel = *(*desc->loglevel);
+ has_loglevel = 1;
+ }
+ if (!lttng_loglevel_match(loglevel,
+ has_loglevel,
+ enabler->event_param.loglevel_type,
+ enabler->event_param.loglevel))
+ return 0;
+ return 1;
+}
+
+static
+int lttng_desc_match_enabler(const struct lttng_ust_event_desc *desc,
+ struct lttng_enabler *enabler)
+{
+ switch (enabler->format_type) {
+ case LTTNG_ENABLER_FORMAT_STAR_GLOB:
+ {
+ struct lttng_ust_excluder_node *excluder;
+
+ if (!lttng_desc_match_star_glob_enabler(desc, enabler)) {
+ return 0;
+ }
+
+ /*
+ * If the matching event matches with an excluder,
+ * return 'does not match'
+ */
+ cds_list_for_each_entry(excluder, &enabler->excluder_head, node) {
+ int count;
+
+ for (count = 0; count < excluder->excluder.count; count++) {
+ int len;
+ char *excluder_name;
+
+ excluder_name = (char *) (excluder->excluder.names)
+ + count * LTTNG_UST_ABI_SYM_NAME_LEN;
+ len = strnlen(excluder_name, LTTNG_UST_ABI_SYM_NAME_LEN);
+ if (len > 0) {
+ char name[LTTNG_UST_ABI_SYM_NAME_LEN];
+
+ lttng_ust_format_event_name(desc, name);
+ if (strutils_star_glob_match(excluder_name, len, name, SIZE_MAX)) {
+ return 0;
+ }
+ }
+ }
+ }
+ return 1;
+ }
+ case LTTNG_ENABLER_FORMAT_EVENT:
+ return lttng_desc_match_event_enabler(desc, enabler);
+ default:
+ return -EINVAL;
+ }
+}
+
+static
+int lttng_event_enabler_match_event(struct lttng_event_enabler *event_enabler,
+ struct lttng_ust_event_recorder *event_recorder)
+{
+ if (lttng_desc_match_enabler(event_recorder->parent->priv->desc,
+ lttng_event_enabler_as_enabler(event_enabler))
+ && event_recorder->chan == event_enabler->chan)
+ return 1;
+ else
+ return 0;
+}
+
+static
+int lttng_event_notifier_enabler_match_event_notifier(
+ struct lttng_event_notifier_enabler *event_notifier_enabler,
+ struct lttng_ust_event_notifier *event_notifier)
+{
+ int desc_matches = lttng_desc_match_enabler(event_notifier->priv->parent.desc,
+ lttng_event_notifier_enabler_as_enabler(event_notifier_enabler));
+
+ if (desc_matches && event_notifier->priv->group == event_notifier_enabler->group &&
+ event_notifier->priv->parent.user_token == event_notifier_enabler->user_token)
+ return 1;
+ else
+ return 0;
+}
+
+static
+struct lttng_enabler_ref *lttng_enabler_ref(
+ struct cds_list_head *enabler_ref_list,
+ struct lttng_enabler *enabler)
+{
+ struct lttng_enabler_ref *enabler_ref;
+
+ cds_list_for_each_entry(enabler_ref, enabler_ref_list, node) {
+ if (enabler_ref->ref == enabler)
+ return enabler_ref;
+ }
+ return NULL;
+}
+
+/*
+ * Create struct lttng_event if it is missing and present in the list of
+ * tracepoint probes.
+ */
+static
+void lttng_create_event_recorder_if_missing(struct lttng_event_enabler *event_enabler)
+{
+ struct lttng_ust_session *session = event_enabler->chan->parent->session;
+ struct lttng_ust_registered_probe *reg_probe;
+ const struct lttng_ust_event_desc *desc;
+ struct lttng_ust_event_recorder_private *event_recorder_priv;
+ int i;
+ struct cds_list_head *probe_list;
+
+ probe_list = lttng_get_probe_list_head();
+ /*
+ * For each probe event, if we find that a probe event matches
+ * our enabler, create an associated lttng_event if not
+ * already present.
+ */
+ cds_list_for_each_entry(reg_probe, probe_list, head) {
+ const struct lttng_ust_probe_desc *probe_desc = reg_probe->desc;
+
+ for (i = 0; i < probe_desc->nr_events; i++) {
+ int ret;
+ bool found = false;
+ struct cds_hlist_head *head;
+ struct cds_hlist_node *node;
+
+ desc = probe_desc->event_desc[i];
+ if (!lttng_desc_match_enabler(desc,
+ lttng_event_enabler_as_enabler(event_enabler)))
+ continue;
+
+ head = borrow_hash_table_bucket(
+ session->priv->events_ht.table,
+ LTTNG_UST_EVENT_HT_SIZE, desc);
+
+ cds_hlist_for_each_entry(event_recorder_priv, node, head, hlist) {
+ if (event_recorder_priv->parent.desc == desc
+ && event_recorder_priv->pub->chan == event_enabler->chan) {
+ found = true;
+ break;
+ }
+ }
+ if (found)
+ continue;
+
+ /*
+ * We need to create an event for this
+ * event probe.
+ */
+ ret = lttng_event_recorder_create(probe_desc->event_desc[i],
+ event_enabler->chan);
+ if (ret) {
+ DBG("Unable to create event \"%s:%s\", error %d\n",
+ probe_desc->provider_name,
+ probe_desc->event_desc[i]->event_name, ret);
+ }
+ }
+ }
+}
+
+static
+void probe_provider_event_for_each(const struct lttng_ust_probe_desc *provider_desc,
+ void (*event_func)(struct lttng_ust_event_common *event))
+{
+ struct cds_hlist_node *node, *tmp_node;
+ struct cds_list_head *sessionsp;
+ unsigned int i;
+
+ /* Get handle on list of sessions. */
+ sessionsp = lttng_get_sessions();
+
+ /*
+ * Iterate over all events in the probe provider descriptions and
+ * sessions to queue the unregistration of the events.
+ */
+ for (i = 0; i < provider_desc->nr_events; i++) {
+ const struct lttng_ust_event_desc *event_desc;
+ struct lttng_event_notifier_group *event_notifier_group;
+ struct lttng_ust_event_recorder_private *event_recorder_priv;
+ struct lttng_ust_event_notifier_private *event_notifier_priv;
+ struct lttng_ust_session_private *session_priv;
+ struct cds_hlist_head *head;
+
+ event_desc = provider_desc->event_desc[i];
+
+ /*
+ * Iterate over all session to find the current event
+ * description.
+ */
+ cds_list_for_each_entry(session_priv, sessionsp, node) {
+ /*
+ * Get the list of events in the hashtable bucket and
+ * iterate to find the event matching this descriptor.
+ */
+ head = borrow_hash_table_bucket(
+ session_priv->events_ht.table,
+ LTTNG_UST_EVENT_HT_SIZE, event_desc);
+
+ cds_hlist_for_each_entry_safe(event_recorder_priv, node, tmp_node, head, hlist) {
+ if (event_desc == event_recorder_priv->parent.desc) {
+ event_func(event_recorder_priv->parent.pub);
+ break;
+ }
+ }
+ }
+
+ /*
+ * Iterate over all event_notifier groups to find the current event
+ * description.
+ */
+ cds_list_for_each_entry(event_notifier_group, &event_notifier_groups, node) {
+ /*
+ * Get the list of event_notifiers in the hashtable bucket and
+ * iterate to find the event_notifier matching this
+ * descriptor.
+ */
+ head = borrow_hash_table_bucket(
+ event_notifier_group->event_notifiers_ht.table,
+ LTTNG_UST_EVENT_NOTIFIER_HT_SIZE, event_desc);
+
+ cds_hlist_for_each_entry_safe(event_notifier_priv, node, tmp_node, head, hlist) {
+ if (event_desc == event_notifier_priv->parent.desc) {
+ event_func(event_notifier_priv->parent.pub);
+ break;
+ }
+ }
+ }
+ }
+}
+
+static
+void _event_enum_destroy(struct lttng_ust_event_common *event)
+{
+
+ switch (event->type) {
+ case LTTNG_UST_EVENT_TYPE_RECORDER:
+ {
+ struct lttng_ust_event_recorder *event_recorder = event->child;
+ struct lttng_ust_session *session = event_recorder->chan->parent->session;
+ unsigned int i;
+
+ /* Destroy enums of the current event. */
+ for (i = 0; i < event_recorder->parent->priv->desc->nr_fields; i++) {
+ const struct lttng_ust_enum_desc *enum_desc;
+ const struct lttng_ust_event_field *field;
+ struct lttng_enum *curr_enum;
+
+ field = event_recorder->parent->priv->desc->fields[i];
+ switch (field->type->type) {
+ case lttng_ust_type_enum:
+ enum_desc = lttng_ust_get_type_enum(field->type)->desc;
+ break;
+ default:
+ continue;
+ }
+
+ curr_enum = lttng_ust_enum_get_from_desc(session, enum_desc);
+ if (curr_enum) {
+ _lttng_enum_destroy(curr_enum);
+ }
+ }
+ break;
+ }
+ case LTTNG_UST_EVENT_TYPE_NOTIFIER:
+ break;
+ default:
+ abort();
+ }
+ /* Destroy event. */
+ _lttng_event_destroy(event);
+}
+
+/*
+ * Iterate over all the UST sessions to unregister and destroy all probes from
+ * the probe provider descriptor received as argument. Must me called with the
+ * ust_lock held.
+ */
+void lttng_probe_provider_unregister_events(
+ const struct lttng_ust_probe_desc *provider_desc)
+{
+ /*
+ * Iterate over all events in the probe provider descriptions and sessions
+ * to queue the unregistration of the events.
+ */
+ probe_provider_event_for_each(provider_desc, _lttng_event_unregister);
+
+ /* Wait for grace period. */
+ lttng_ust_urcu_synchronize_rcu();
+ /* Prune the unregistration queue. */
+ lttng_ust_tp_probe_prune_release_queue();
+
+ /*
+ * It is now safe to destroy the events and remove them from the event list
+ * and hashtables.
+ */
+ probe_provider_event_for_each(provider_desc, _event_enum_destroy);
+}
+
+/*
+ * Create events associated with an event enabler (if not already present),
+ * and add backward reference from the event to the enabler.
+ */
+static
+int lttng_event_enabler_ref_event_recorders(struct lttng_event_enabler *event_enabler)
+{
+ struct lttng_ust_session *session = event_enabler->chan->parent->session;
+ struct lttng_ust_event_recorder_private *event_recorder_priv;
+
+ if (!lttng_event_enabler_as_enabler(event_enabler)->enabled)
+ goto end;
+
+ /* First ensure that probe events are created for this enabler. */
+ lttng_create_event_recorder_if_missing(event_enabler);
+
+ /* For each event matching enabler in session event list. */
+ cds_list_for_each_entry(event_recorder_priv, &session->priv->events_head, node) {
+ struct lttng_enabler_ref *enabler_ref;
+
+ if (!lttng_event_enabler_match_event(event_enabler, event_recorder_priv->pub))
+ continue;
+
+ enabler_ref = lttng_enabler_ref(&event_recorder_priv->parent.enablers_ref_head,
+ lttng_event_enabler_as_enabler(event_enabler));
+ if (!enabler_ref) {
+ /*
+ * If no backward ref, create it.
+ * Add backward ref from event to enabler.
+ */
+ enabler_ref = zmalloc(sizeof(*enabler_ref));
+ if (!enabler_ref)
+ return -ENOMEM;
+ enabler_ref->ref = lttng_event_enabler_as_enabler(
+ event_enabler);
+ cds_list_add(&enabler_ref->node,
+ &event_recorder_priv->parent.enablers_ref_head);
+ }
+
+ /*
+ * Link filter bytecodes if not linked yet.
+ */
+ lttng_enabler_link_bytecode(event_recorder_priv->parent.desc,
+ &session->priv->ctx,
+ &event_recorder_priv->parent.filter_bytecode_runtime_head,
+ <tng_event_enabler_as_enabler(event_enabler)->filter_bytecode_head);
+
+ /* TODO: merge event context. */
+ }
+end:
+ return 0;
+}
+
+/*
+ * Called at library load: connect the probe on all enablers matching
+ * this event.
+ * Called with session mutex held.
+ */
+int lttng_fix_pending_events(void)
+{
+ struct lttng_ust_session_private *session_priv;
+
+ cds_list_for_each_entry(session_priv, &sessions, node) {
+ lttng_session_lazy_sync_event_enablers(session_priv->pub);
+ }
+ return 0;
+}
+
+int lttng_fix_pending_event_notifiers(void)
+{
+ struct lttng_event_notifier_group *event_notifier_group;
+
+ cds_list_for_each_entry(event_notifier_group, &event_notifier_groups, node) {
+ lttng_event_notifier_group_sync_enablers(event_notifier_group);
+ }
+ return 0;
+}
+
+/*
+ * For each session of the owner thread, execute pending statedump.
+ * Only dump state for the sessions owned by the caller thread, because
+ * we don't keep ust_lock across the entire iteration.
+ */
+void lttng_handle_pending_statedump(void *owner)
+{
+ struct lttng_ust_session_private *session_priv;
+
+ /* Execute state dump */
+ do_lttng_ust_statedump(owner);
+
+ /* Clear pending state dump */
+ if (ust_lock()) {
+ goto end;
+ }
+ cds_list_for_each_entry(session_priv, &sessions, node) {
+ if (session_priv->owner != owner)
+ continue;
+ if (!session_priv->statedump_pending)
+ continue;
+ session_priv->statedump_pending = 0;
+ }
+end:
+ ust_unlock();
+ return;
+}
+
+static
+void _lttng_event_destroy(struct lttng_ust_event_common *event)
+{
+ struct lttng_enabler_ref *enabler_ref, *tmp_enabler_ref;
+
+ lttng_free_event_filter_runtime(event);
+ /* Free event enabler refs */
+ cds_list_for_each_entry_safe(enabler_ref, tmp_enabler_ref,
+ &event->priv->enablers_ref_head, node)
+ free(enabler_ref);
+
+ switch (event->type) {
+ case LTTNG_UST_EVENT_TYPE_RECORDER:
+ {
+ struct lttng_ust_event_recorder *event_recorder = event->child;
+
+ /* Remove from event list. */
+ cds_list_del(&event_recorder->priv->node);
+ /* Remove from event hash table. */
+ cds_hlist_del(&event_recorder->priv->hlist);
+
+ lttng_destroy_context(event_recorder->priv->ctx);
+ free(event_recorder->parent);
+ free(event_recorder->priv);
+ free(event_recorder);
+ break;
+ }
+ case LTTNG_UST_EVENT_TYPE_NOTIFIER:
+ {
+ struct lttng_ust_event_notifier *event_notifier = event->child;
+
+ /* Remove from event list. */
+ cds_list_del(&event_notifier->priv->node);
+ /* Remove from event hash table. */
+ cds_hlist_del(&event_notifier->priv->hlist);
+
+ free(event_notifier->priv);
+ free(event_notifier->parent);
+ free(event_notifier);
+ break;
+ }
+ default:
+ abort();
+ }
+}
+
+static
+void _lttng_enum_destroy(struct lttng_enum *_enum)
+{
+ cds_list_del(&_enum->node);
+ cds_hlist_del(&_enum->hlist);
+ free(_enum);
+}
+
+void lttng_ust_abi_events_exit(void)
+{
+ struct lttng_ust_session_private *session_priv, *tmpsession_priv;
+
+ cds_list_for_each_entry_safe(session_priv, tmpsession_priv, &sessions, node)
+ lttng_session_destroy(session_priv->pub);
+}
+
+/*
+ * Enabler management.
+ */
+struct lttng_event_enabler *lttng_event_enabler_create(
+ enum lttng_enabler_format_type format_type,
+ struct lttng_ust_abi_event *event_param,
+ struct lttng_ust_channel_buffer *chan)
+{
+ struct lttng_event_enabler *event_enabler;
+
+ event_enabler = zmalloc(sizeof(*event_enabler));
+ if (!event_enabler)
+ return NULL;
+ event_enabler->base.format_type = format_type;
+ CDS_INIT_LIST_HEAD(&event_enabler->base.filter_bytecode_head);
+ CDS_INIT_LIST_HEAD(&event_enabler->base.excluder_head);
+ memcpy(&event_enabler->base.event_param, event_param,
+ sizeof(event_enabler->base.event_param));
+ event_enabler->chan = chan;
+ /* ctx left NULL */
+ event_enabler->base.enabled = 0;
+ cds_list_add(&event_enabler->node, &event_enabler->chan->parent->session->priv->enablers_head);
+ lttng_session_lazy_sync_event_enablers(event_enabler->chan->parent->session);
+
+ return event_enabler;
+}
+
+struct lttng_event_notifier_enabler *lttng_event_notifier_enabler_create(
+ struct lttng_event_notifier_group *event_notifier_group,
+ enum lttng_enabler_format_type format_type,
+ struct lttng_ust_abi_event_notifier *event_notifier_param)
+{
+ struct lttng_event_notifier_enabler *event_notifier_enabler;
+
+ event_notifier_enabler = zmalloc(sizeof(*event_notifier_enabler));
+ if (!event_notifier_enabler)
+ return NULL;
+ event_notifier_enabler->base.format_type = format_type;
+ CDS_INIT_LIST_HEAD(&event_notifier_enabler->base.filter_bytecode_head);
+ CDS_INIT_LIST_HEAD(&event_notifier_enabler->capture_bytecode_head);
+ CDS_INIT_LIST_HEAD(&event_notifier_enabler->base.excluder_head);
+
+ event_notifier_enabler->user_token = event_notifier_param->event.token;
+ event_notifier_enabler->error_counter_index = event_notifier_param->error_counter_index;
+ event_notifier_enabler->num_captures = 0;
+
+ memcpy(&event_notifier_enabler->base.event_param.name,
+ event_notifier_param->event.name,
+ sizeof(event_notifier_enabler->base.event_param.name));
+ event_notifier_enabler->base.event_param.instrumentation =
+ event_notifier_param->event.instrumentation;
+ event_notifier_enabler->base.event_param.loglevel =
+ event_notifier_param->event.loglevel;
+ event_notifier_enabler->base.event_param.loglevel_type =
+ event_notifier_param->event.loglevel_type;
+
+ event_notifier_enabler->base.enabled = 0;
+ event_notifier_enabler->group = event_notifier_group;
+
+ cds_list_add(&event_notifier_enabler->node,
+ &event_notifier_group->enablers_head);
+
+ lttng_event_notifier_group_sync_enablers(event_notifier_group);
+
+ return event_notifier_enabler;
+}
+
+int lttng_event_enabler_enable(struct lttng_event_enabler *event_enabler)
+{
+ lttng_event_enabler_as_enabler(event_enabler)->enabled = 1;
+ lttng_session_lazy_sync_event_enablers(event_enabler->chan->parent->session);
+
+ return 0;
+}
+
+int lttng_event_enabler_disable(struct lttng_event_enabler *event_enabler)
+{
+ lttng_event_enabler_as_enabler(event_enabler)->enabled = 0;
+ lttng_session_lazy_sync_event_enablers(event_enabler->chan->parent->session);
+
+ return 0;
+}
+
+static
+void _lttng_enabler_attach_filter_bytecode(struct lttng_enabler *enabler,
+ struct lttng_ust_bytecode_node **bytecode)
+{
+ (*bytecode)->enabler = enabler;
+ cds_list_add_tail(&(*bytecode)->node, &enabler->filter_bytecode_head);
+ /* Take ownership of bytecode */
+ *bytecode = NULL;
+}
+
+int lttng_event_enabler_attach_filter_bytecode(struct lttng_event_enabler *event_enabler,
+ struct lttng_ust_bytecode_node **bytecode)
+{
+ _lttng_enabler_attach_filter_bytecode(
+ lttng_event_enabler_as_enabler(event_enabler), bytecode);
+
+ lttng_session_lazy_sync_event_enablers(event_enabler->chan->parent->session);
+ return 0;
+}
+
+static
+void _lttng_enabler_attach_exclusion(struct lttng_enabler *enabler,
+ struct lttng_ust_excluder_node **excluder)
+{
+ (*excluder)->enabler = enabler;
+ cds_list_add_tail(&(*excluder)->node, &enabler->excluder_head);
+ /* Take ownership of excluder */
+ *excluder = NULL;
+}
+
+int lttng_event_enabler_attach_exclusion(struct lttng_event_enabler *event_enabler,
+ struct lttng_ust_excluder_node **excluder)
+{
+ _lttng_enabler_attach_exclusion(
+ lttng_event_enabler_as_enabler(event_enabler), excluder);
+
+ lttng_session_lazy_sync_event_enablers(event_enabler->chan->parent->session);
+ return 0;
+}
+
+int lttng_event_notifier_enabler_enable(
+ struct lttng_event_notifier_enabler *event_notifier_enabler)
+{
+ lttng_event_notifier_enabler_as_enabler(event_notifier_enabler)->enabled = 1;
+ lttng_event_notifier_group_sync_enablers(event_notifier_enabler->group);
+
+ return 0;
+}
+
+int lttng_event_notifier_enabler_disable(
+ struct lttng_event_notifier_enabler *event_notifier_enabler)
+{
+ lttng_event_notifier_enabler_as_enabler(event_notifier_enabler)->enabled = 0;
+ lttng_event_notifier_group_sync_enablers(event_notifier_enabler->group);
+
+ return 0;
+}
+
+int lttng_event_notifier_enabler_attach_filter_bytecode(
+ struct lttng_event_notifier_enabler *event_notifier_enabler,
+ struct lttng_ust_bytecode_node **bytecode)
+{
+ _lttng_enabler_attach_filter_bytecode(
+ lttng_event_notifier_enabler_as_enabler(event_notifier_enabler),
+ bytecode);
+
+ lttng_event_notifier_group_sync_enablers(event_notifier_enabler->group);
+ return 0;
+}
+
+int lttng_event_notifier_enabler_attach_capture_bytecode(
+ struct lttng_event_notifier_enabler *event_notifier_enabler,
+ struct lttng_ust_bytecode_node **bytecode)
+{
+ (*bytecode)->enabler = lttng_event_notifier_enabler_as_enabler(
+ event_notifier_enabler);
+ cds_list_add_tail(&(*bytecode)->node,
+ &event_notifier_enabler->capture_bytecode_head);
+ /* Take ownership of bytecode */
+ *bytecode = NULL;
+ event_notifier_enabler->num_captures++;
+
+ lttng_event_notifier_group_sync_enablers(event_notifier_enabler->group);
+ return 0;
+}
+
+int lttng_event_notifier_enabler_attach_exclusion(
+ struct lttng_event_notifier_enabler *event_notifier_enabler,
+ struct lttng_ust_excluder_node **excluder)
+{
+ _lttng_enabler_attach_exclusion(
+ lttng_event_notifier_enabler_as_enabler(event_notifier_enabler),
+ excluder);
+
+ lttng_event_notifier_group_sync_enablers(event_notifier_enabler->group);
+ return 0;
+}
+
+int lttng_attach_context(struct lttng_ust_abi_context *context_param,
+ union lttng_ust_abi_args *uargs,
+ struct lttng_ust_ctx **ctx, struct lttng_ust_session *session)
+{
+ /*
+ * We cannot attach a context after trace has been started for a
+ * session because the metadata does not allow expressing this
+ * information outside of the original channel scope.
+ */
+ if (session->priv->been_active)
+ return -EPERM;
+
+ switch (context_param->ctx) {
+ case LTTNG_UST_ABI_CONTEXT_PTHREAD_ID:
+ return lttng_add_pthread_id_to_ctx(ctx);
+ case LTTNG_UST_ABI_CONTEXT_PERF_THREAD_COUNTER:
+ {
+ struct lttng_ust_abi_perf_counter_ctx *perf_ctx_param;
+
+ perf_ctx_param = &context_param->u.perf_counter;
+ return lttng_add_perf_counter_to_ctx(
+ perf_ctx_param->type,
+ perf_ctx_param->config,
+ perf_ctx_param->name,
+ ctx);
+ }
+ case LTTNG_UST_ABI_CONTEXT_VTID:
+ return lttng_add_vtid_to_ctx(ctx);
+ case LTTNG_UST_ABI_CONTEXT_VPID:
+ return lttng_add_vpid_to_ctx(ctx);
+ case LTTNG_UST_ABI_CONTEXT_PROCNAME:
+ return lttng_add_procname_to_ctx(ctx);
+ case LTTNG_UST_ABI_CONTEXT_IP:
+ return lttng_add_ip_to_ctx(ctx);
+ case LTTNG_UST_ABI_CONTEXT_CPU_ID:
+ return lttng_add_cpu_id_to_ctx(ctx);
+ case LTTNG_UST_ABI_CONTEXT_APP_CONTEXT:
+ return lttng_ust_add_app_context_to_ctx_rcu(uargs->app_context.ctxname,
+ ctx);
+ case LTTNG_UST_ABI_CONTEXT_CGROUP_NS:
+ return lttng_add_cgroup_ns_to_ctx(ctx);
+ case LTTNG_UST_ABI_CONTEXT_IPC_NS:
+ return lttng_add_ipc_ns_to_ctx(ctx);
+ case LTTNG_UST_ABI_CONTEXT_MNT_NS:
+ return lttng_add_mnt_ns_to_ctx(ctx);
+ case LTTNG_UST_ABI_CONTEXT_NET_NS:
+ return lttng_add_net_ns_to_ctx(ctx);
+ case LTTNG_UST_ABI_CONTEXT_PID_NS:
+ return lttng_add_pid_ns_to_ctx(ctx);
+ case LTTNG_UST_ABI_CONTEXT_TIME_NS:
+ return lttng_add_time_ns_to_ctx(ctx);
+ case LTTNG_UST_ABI_CONTEXT_USER_NS:
+ return lttng_add_user_ns_to_ctx(ctx);
+ case LTTNG_UST_ABI_CONTEXT_UTS_NS:
+ return lttng_add_uts_ns_to_ctx(ctx);
+ case LTTNG_UST_ABI_CONTEXT_VUID:
+ return lttng_add_vuid_to_ctx(ctx);
+ case LTTNG_UST_ABI_CONTEXT_VEUID:
+ return lttng_add_veuid_to_ctx(ctx);
+ case LTTNG_UST_ABI_CONTEXT_VSUID:
+ return lttng_add_vsuid_to_ctx(ctx);
+ case LTTNG_UST_ABI_CONTEXT_VGID:
+ return lttng_add_vgid_to_ctx(ctx);
+ case LTTNG_UST_ABI_CONTEXT_VEGID:
+ return lttng_add_vegid_to_ctx(ctx);
+ case LTTNG_UST_ABI_CONTEXT_VSGID:
+ return lttng_add_vsgid_to_ctx(ctx);
+ default:
+ return -EINVAL;
+ }
+}
+
+int lttng_event_enabler_attach_context(
+ struct lttng_event_enabler *enabler __attribute__((unused)),
+ struct lttng_ust_abi_context *context_param __attribute__((unused)))
+{
+ return -ENOSYS;
+}
+
+void lttng_event_enabler_destroy(struct lttng_event_enabler *event_enabler)
+{
+ if (!event_enabler) {
+ return;
+ }
+ cds_list_del(&event_enabler->node);
+
+ lttng_enabler_destroy(lttng_event_enabler_as_enabler(event_enabler));
+
+ lttng_destroy_context(event_enabler->ctx);
+ free(event_enabler);
+}
+
+/*
+ * lttng_session_sync_event_enablers should be called just before starting a
+ * session.
+ */
+static
+void lttng_session_sync_event_enablers(struct lttng_ust_session *session)
+{
+ struct lttng_event_enabler *event_enabler;
+ struct lttng_ust_event_recorder_private *event_recorder_priv;
+
+ cds_list_for_each_entry(event_enabler, &session->priv->enablers_head, node)
+ lttng_event_enabler_ref_event_recorders(event_enabler);
+ /*
+ * For each event, if at least one of its enablers is enabled,
+ * and its channel and session transient states are enabled, we
+ * enable the event, else we disable it.
+ */
+ cds_list_for_each_entry(event_recorder_priv, &session->priv->events_head, node) {
+ struct lttng_enabler_ref *enabler_ref;
+ struct lttng_ust_bytecode_runtime *runtime;
+ int enabled = 0, has_enablers_without_filter_bytecode = 0;
+ int nr_filters = 0;
+
+ /* Enable events */
+ cds_list_for_each_entry(enabler_ref,
+ &event_recorder_priv->parent.enablers_ref_head, node) {
+ if (enabler_ref->ref->enabled) {
+ enabled = 1;
+ break;
+ }
+ }
+ /*
+ * Enabled state is based on union of enablers, with
+ * intesection of session and channel transient enable
+ * states.
+ */
+ enabled = enabled && session->priv->tstate && event_recorder_priv->pub->chan->priv->parent.tstate;
+
+ CMM_STORE_SHARED(event_recorder_priv->pub->parent->enabled, enabled);
+ /*
+ * Sync tracepoint registration with event enabled
+ * state.
+ */
+ if (enabled) {
+ if (!event_recorder_priv->parent.registered)
+ register_event(event_recorder_priv->parent.pub);
+ } else {
+ if (event_recorder_priv->parent.registered)
+ unregister_event(event_recorder_priv->parent.pub);
+ }
+
+ /* Check if has enablers without bytecode enabled */
+ cds_list_for_each_entry(enabler_ref,
+ &event_recorder_priv->parent.enablers_ref_head, node) {
+ if (enabler_ref->ref->enabled
+ && cds_list_empty(&enabler_ref->ref->filter_bytecode_head)) {
+ has_enablers_without_filter_bytecode = 1;
+ break;
+ }
+ }
+ event_recorder_priv->parent.has_enablers_without_filter_bytecode =
+ has_enablers_without_filter_bytecode;
+
+ /* Enable filters */
+ cds_list_for_each_entry(runtime,
+ &event_recorder_priv->parent.filter_bytecode_runtime_head, node) {
+ lttng_bytecode_sync_state(runtime);
+ nr_filters++;
+ }
+ CMM_STORE_SHARED(event_recorder_priv->parent.pub->eval_filter,
+ !(has_enablers_without_filter_bytecode || !nr_filters));
+ }
+ lttng_ust_tp_probe_prune_release_queue();
+}
+
+/* Support for event notifier is introduced by probe provider major version 2. */
+static
+bool lttng_ust_probe_supports_event_notifier(const struct lttng_ust_probe_desc *probe_desc)
+{
+ return probe_desc->major >= 2;
+}
+
+static
+void lttng_create_event_notifier_if_missing(
+ struct lttng_event_notifier_enabler *event_notifier_enabler)
+{
+ struct lttng_event_notifier_group *event_notifier_group = event_notifier_enabler->group;
+ struct lttng_ust_registered_probe *reg_probe;
+ struct cds_list_head *probe_list;
+ int i;
+
+ probe_list = lttng_get_probe_list_head();
+
+ cds_list_for_each_entry(reg_probe, probe_list, head) {
+ const struct lttng_ust_probe_desc *probe_desc = reg_probe->desc;
+
+ for (i = 0; i < probe_desc->nr_events; i++) {
+ int ret;
+ bool found = false;
+ const struct lttng_ust_event_desc *desc;
+ struct lttng_ust_event_notifier_private *event_notifier_priv;
+ struct cds_hlist_head *head;
+ struct cds_hlist_node *node;
+
+ desc = probe_desc->event_desc[i];
+
+ if (!lttng_desc_match_enabler(desc,
+ lttng_event_notifier_enabler_as_enabler(event_notifier_enabler)))
+ continue;
+
+ /*
+ * Given the current event_notifier group, get the bucket that
+ * the target event_notifier would be if it was already
+ * created.
+ */
+ head = borrow_hash_table_bucket(
+ event_notifier_group->event_notifiers_ht.table,
+ LTTNG_UST_EVENT_NOTIFIER_HT_SIZE, desc);
+
+ cds_hlist_for_each_entry(event_notifier_priv, node, head, hlist) {
+ /*
+ * Check if event_notifier already exists by checking
+ * if the event_notifier and enabler share the same
+ * description and id.
+ */
+ if (event_notifier_priv->parent.desc == desc &&
+ event_notifier_priv->parent.user_token == event_notifier_enabler->user_token) {
+ found = true;
+ break;
+ }
+ }
+
+ if (found)
+ continue;
+
+ /* Check that the probe supports event notifiers, else report the error. */
+ if (!lttng_ust_probe_supports_event_notifier(probe_desc)) {
+ ERR("Probe \"%s\" contains event \"%s:%s\" which matches an enabled event notifier, "
+ "but its version (%u.%u) is too old and does not implement event notifiers. "
+ "It needs to be recompiled against a newer version of LTTng-UST, otherwise "
+ "this event will not generate any notification.",
+ probe_desc->provider_name,
+ probe_desc->provider_name, desc->event_name,
+ probe_desc->major,
+ probe_desc->minor);
+ continue;
+ }
+ /*
+ * We need to create a event_notifier for this event probe.
+ */
+ ret = lttng_event_notifier_create(desc,
+ event_notifier_enabler->user_token,
+ event_notifier_enabler->error_counter_index,
+ event_notifier_group);
+ if (ret) {
+ DBG("Unable to create event_notifier \"%s:%s\", error %d\n",
+ probe_desc->provider_name,
+ probe_desc->event_desc[i]->event_name, ret);
+ }
+ }
+ }
+}
+
+/*
+ * Create event_notifiers associated with a event_notifier enabler (if not already present).
+ */
+static
+int lttng_event_notifier_enabler_ref_event_notifiers(
+ struct lttng_event_notifier_enabler *event_notifier_enabler)
+{
+ struct lttng_event_notifier_group *event_notifier_group = event_notifier_enabler->group;
+ struct lttng_ust_event_notifier_private *event_notifier_priv;
+
+ /*
+ * Only try to create event_notifiers for enablers that are enabled, the user
+ * might still be attaching filter or exclusion to the
+ * event_notifier_enabler.
+ */
+ if (!lttng_event_notifier_enabler_as_enabler(event_notifier_enabler)->enabled)
+ goto end;
+
+ /* First, ensure that probe event_notifiers are created for this enabler. */
+ lttng_create_event_notifier_if_missing(event_notifier_enabler);
+
+ /* Link the created event_notifier with its associated enabler. */
+ cds_list_for_each_entry(event_notifier_priv, &event_notifier_group->event_notifiers_head, node) {
+ struct lttng_enabler_ref *enabler_ref;
+
+ if (!lttng_event_notifier_enabler_match_event_notifier(event_notifier_enabler, event_notifier_priv->pub))
+ continue;
+
+ enabler_ref = lttng_enabler_ref(&event_notifier_priv->parent.enablers_ref_head,
+ lttng_event_notifier_enabler_as_enabler(event_notifier_enabler));
+ if (!enabler_ref) {
+ /*
+ * If no backward ref, create it.
+ * Add backward ref from event_notifier to enabler.
+ */
+ enabler_ref = zmalloc(sizeof(*enabler_ref));
+ if (!enabler_ref)
+ return -ENOMEM;
+
+ enabler_ref->ref = lttng_event_notifier_enabler_as_enabler(
+ event_notifier_enabler);
+ cds_list_add(&enabler_ref->node,
+ &event_notifier_priv->parent.enablers_ref_head);
+ }
+
+ /*
+ * Link filter bytecodes if not linked yet.
+ */
+ lttng_enabler_link_bytecode(event_notifier_priv->parent.desc,
+ &event_notifier_group->ctx,
+ &event_notifier_priv->parent.filter_bytecode_runtime_head,
+ <tng_event_notifier_enabler_as_enabler(event_notifier_enabler)->filter_bytecode_head);
+
+ /*
+ * Link capture bytecodes if not linked yet.
+ */
+ lttng_enabler_link_bytecode(event_notifier_priv->parent.desc,
+ &event_notifier_group->ctx, &event_notifier_priv->capture_bytecode_runtime_head,
+ &event_notifier_enabler->capture_bytecode_head);
+
+ event_notifier_priv->num_captures = event_notifier_enabler->num_captures;
+ }
+end:
+ return 0;
+}
+
+static
+void lttng_event_notifier_group_sync_enablers(struct lttng_event_notifier_group *event_notifier_group)
+{
+ struct lttng_event_notifier_enabler *event_notifier_enabler;
+ struct lttng_ust_event_notifier_private *event_notifier_priv;
+
+ cds_list_for_each_entry(event_notifier_enabler, &event_notifier_group->enablers_head, node)
+ lttng_event_notifier_enabler_ref_event_notifiers(event_notifier_enabler);
+
+ /*
+ * For each event_notifier, if at least one of its enablers is enabled,
+ * we enable the event_notifier, else we disable it.
+ */
+ cds_list_for_each_entry(event_notifier_priv, &event_notifier_group->event_notifiers_head, node) {
+ struct lttng_enabler_ref *enabler_ref;
+ struct lttng_ust_bytecode_runtime *runtime;
+ int enabled = 0, has_enablers_without_filter_bytecode = 0;
+ int nr_filters = 0, nr_captures = 0;
+
+ /* Enable event_notifiers */
+ cds_list_for_each_entry(enabler_ref,
+ &event_notifier_priv->parent.enablers_ref_head, node) {
+ if (enabler_ref->ref->enabled) {
+ enabled = 1;
+ break;
+ }
+ }
+
+ CMM_STORE_SHARED(event_notifier_priv->pub->parent->enabled, enabled);
+ /*
+ * Sync tracepoint registration with event_notifier enabled
+ * state.
+ */
+ if (enabled) {
+ if (!event_notifier_priv->parent.registered)
+ register_event(event_notifier_priv->parent.pub);
+ } else {
+ if (event_notifier_priv->parent.registered)
+ unregister_event(event_notifier_priv->parent.pub);
+ }
+
+ /* Check if has enablers without bytecode enabled */
+ cds_list_for_each_entry(enabler_ref,
+ &event_notifier_priv->parent.enablers_ref_head, node) {
+ if (enabler_ref->ref->enabled
+ && cds_list_empty(&enabler_ref->ref->filter_bytecode_head)) {
+ has_enablers_without_filter_bytecode = 1;
+ break;
+ }
+ }
+ event_notifier_priv->parent.has_enablers_without_filter_bytecode =
+ has_enablers_without_filter_bytecode;
+
+ /* Enable filters */
+ cds_list_for_each_entry(runtime,
+ &event_notifier_priv->parent.filter_bytecode_runtime_head, node) {
+ lttng_bytecode_sync_state(runtime);
+ nr_filters++;
+ }
+ CMM_STORE_SHARED(event_notifier_priv->parent.pub->eval_filter,
+ !(has_enablers_without_filter_bytecode || !nr_filters));
+
+ /* Enable captures. */
+ cds_list_for_each_entry(runtime,
+ &event_notifier_priv->capture_bytecode_runtime_head, node) {
+ lttng_bytecode_sync_state(runtime);
+ nr_captures++;
+ }
+ CMM_STORE_SHARED(event_notifier_priv->pub->eval_capture,
+ !!nr_captures);
+ }
+ lttng_ust_tp_probe_prune_release_queue();
+}
+
+/*
+ * Apply enablers to session events, adding events to session if need
+ * be. It is required after each modification applied to an active
+ * session, and right before session "start".
+ * "lazy" sync means we only sync if required.
+ */
+static
+void lttng_session_lazy_sync_event_enablers(struct lttng_ust_session *session)
+{
+ /* We can skip if session is not active */
+ if (!session->active)
+ return;
+ lttng_session_sync_event_enablers(session);
+}
+
+/*
+ * Update all sessions with the given app context.
+ * Called with ust lock held.
+ * This is invoked when an application context gets loaded/unloaded. It
+ * ensures the context callbacks are in sync with the application
+ * context (either app context callbacks, or dummy callbacks).
+ */
+void lttng_ust_context_set_session_provider(const char *name,
+ size_t (*get_size)(void *priv, size_t offset),
+ void (*record)(void *priv, struct lttng_ust_lib_ring_buffer_ctx *ctx,
+ struct lttng_ust_channel_buffer *chan),
+ void (*get_value)(void *priv, struct lttng_ust_ctx_value *value),
+ void *priv)
+{
+ struct lttng_ust_session_private *session_priv;
+
+ cds_list_for_each_entry(session_priv, &sessions, node) {
+ struct lttng_ust_channel_buffer_private *chan;
+ struct lttng_ust_event_recorder_private *event_recorder_priv;
+ int ret;
+
+ ret = lttng_ust_context_set_provider_rcu(&session_priv->ctx,
+ name, get_size, record, get_value, priv);
+ if (ret)
+ abort();
+ cds_list_for_each_entry(chan, &session_priv->chan_head, node) {
+ ret = lttng_ust_context_set_provider_rcu(&chan->ctx,
+ name, get_size, record, get_value, priv);
+ if (ret)
+ abort();
+ }
+ cds_list_for_each_entry(event_recorder_priv, &session_priv->events_head, node) {
+ ret = lttng_ust_context_set_provider_rcu(&event_recorder_priv->ctx,
+ name, get_size, record, get_value, priv);
+ if (ret)
+ abort();
+ }
+ }
+}
+
+/*
+ * Update all event_notifier groups with the given app context.
+ * Called with ust lock held.
+ * This is invoked when an application context gets loaded/unloaded. It
+ * ensures the context callbacks are in sync with the application
+ * context (either app context callbacks, or dummy callbacks).
+ */
+void lttng_ust_context_set_event_notifier_group_provider(const char *name,
+ size_t (*get_size)(void *priv, size_t offset),
+ void (*record)(void *priv, struct lttng_ust_lib_ring_buffer_ctx *ctx,
+ struct lttng_ust_channel_buffer *chan),
+ void (*get_value)(void *priv, struct lttng_ust_ctx_value *value),
+ void *priv)
+{
+ struct lttng_event_notifier_group *event_notifier_group;
+
+ cds_list_for_each_entry(event_notifier_group, &event_notifier_groups, node) {
+ int ret;
+
+ ret = lttng_ust_context_set_provider_rcu(
+ &event_notifier_group->ctx,
+ name, get_size, record, get_value, priv);
+ if (ret)
+ abort();
+ }
+}
+
+int lttng_ust_session_uuid_validate(struct lttng_ust_session *session,
+ unsigned char *uuid)
+{
+ if (!session)
+ return 0;
+ /* Compare UUID with session. */
+ if (session->priv->uuid_set) {
+ if (memcmp(session->priv->uuid, uuid, LTTNG_UST_UUID_LEN)) {
+ return -1;
+ }
+ } else {
+ memcpy(session->priv->uuid, uuid, LTTNG_UST_UUID_LEN);
+ session->priv->uuid_set = true;
+ }
+ return 0;
+
+}
--- /dev/null
+/*
+ * SPDX-License-Identifier: LGPL-2.1-only
+ *
+ * Copyright (C) 2014 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ */
+
+#define _LGPL_SOURCE
+#include <error.h>
+#include <dlfcn.h>
+#include <stdlib.h>
+#include "common/logging.h"
+#include <lttng/ust-getcpu.h>
+#include <urcu/system.h>
+#include <urcu/arch.h>
+
+#include "getenv.h"
+#include "common/ringbuffer/getcpu.h"
+
+int (*lttng_get_cpu)(void);
+
+static
+void *getcpu_handle;
+
+int lttng_ust_getcpu_override(int (*getcpu)(void))
+{
+ CMM_STORE_SHARED(lttng_get_cpu, getcpu);
+ return 0;
+}
+
+void lttng_ust_getcpu_init(void)
+{
+ const char *libname;
+ void (*libinit)(void);
+
+ if (getcpu_handle)
+ return;
+ libname = lttng_ust_getenv("LTTNG_UST_GETCPU_PLUGIN");
+ if (!libname)
+ return;
+ getcpu_handle = dlopen(libname, RTLD_NOW);
+ if (!getcpu_handle) {
+ PERROR("Cannot load LTTng UST getcpu override library %s",
+ libname);
+ return;
+ }
+ dlerror();
+ libinit = (void (*)(void)) dlsym(getcpu_handle,
+ "lttng_ust_getcpu_plugin_init");
+ if (!libinit) {
+ PERROR("Cannot find LTTng UST getcpu override library %s initialization function lttng_ust_getcpu_plugin_init()",
+ libname);
+ return;
+ }
+ libinit();
+}
--- /dev/null
+/*
+ * SPDX-License-Identifier: LGPL-2.1-only
+ *
+ * Copyright (C) 2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ *
+ * LTTng hash table helpers.
+ */
+
+#ifndef _LTTNG_HASH_HELPER_H
+#define _LTTNG_HASH_HELPER_H
+
+#include <assert.h>
+#include <stddef.h>
+#include <stdint.h>
+#include <urcu/compiler.h>
+
+/*
+ * Hash function
+ * Source: http://burtleburtle.net/bob/c/lookup3.c
+ * Originally Public Domain
+ */
+
+#define rot(x, k) (((x) << (k)) | ((x) >> (32 - (k))))
+
+#define mix(a, b, c) \
+do { \
+ a -= c; a ^= rot(c, 4); c += b; \
+ b -= a; b ^= rot(a, 6); a += c; \
+ c -= b; c ^= rot(b, 8); b += a; \
+ a -= c; a ^= rot(c, 16); c += b; \
+ b -= a; b ^= rot(a, 19); a += c; \
+ c -= b; c ^= rot(b, 4); b += a; \
+} while (0)
+
+#define final(a, b, c) \
+{ \
+ c ^= b; c -= rot(b, 14); \
+ a ^= c; a -= rot(c, 11); \
+ b ^= a; b -= rot(a, 25); \
+ c ^= b; c -= rot(b, 16); \
+ a ^= c; a -= rot(c, 4);\
+ b ^= a; b -= rot(a, 14); \
+ c ^= b; c -= rot(b, 24); \
+}
+
+static inline
+uint32_t lttng_hash_u32(const uint32_t *k, size_t length, uint32_t initval)
+ __attribute__((unused));
+static inline
+uint32_t lttng_hash_u32(
+ const uint32_t *k, /* the key, an array of uint32_t values */
+ size_t length, /* the length of the key, in uint32_ts */
+ uint32_t initval) /* the previous hash, or an arbitrary value */
+{
+ uint32_t a, b, c;
+
+ /* Set up the internal state */
+ a = b = c = 0xdeadbeef + (((uint32_t) length) << 2) + initval;
+
+ /*----------------------------------------- handle most of the key */
+ while (length > 3) {
+ a += k[0];
+ b += k[1];
+ c += k[2];
+ mix(a, b, c);
+ length -= 3;
+ k += 3;
+ }
+
+ /*----------------------------------- handle the last 3 uint32_t's */
+ switch (length) { /* all the case statements fall through */
+ case 3: c += k[2];
+ case 2: b += k[1];
+ case 1: a += k[0];
+ final(a, b, c);
+ case 0: /* case 0: nothing left to add */
+ break;
+ }
+ /*---------------------------------------------- report the result */
+ return c;
+}
+
+static inline
+void lttng_hashword2(
+ const uint32_t *k, /* the key, an array of uint32_t values */
+ size_t length, /* the length of the key, in uint32_ts */
+ uint32_t *pc, /* IN: seed OUT: primary hash value */
+ uint32_t *pb) /* IN: more seed OUT: secondary hash value */
+{
+ uint32_t a, b, c;
+
+ /* Set up the internal state */
+ a = b = c = 0xdeadbeef + ((uint32_t) (length << 2)) + *pc;
+ c += *pb;
+
+ /*----------------------------------------- handle most of the key */
+ while (length > 3) {
+ a += k[0];
+ b += k[1];
+ c += k[2];
+ mix(a, b, c);
+ length -= 3;
+ k += 3;
+ }
+
+ /*----------------------------------- handle the last 3 uint32_t's */
+ switch (length) { /* all the case statements fall through */
+ case 3: c += k[2]; /* fall through */
+ case 2: b += k[1]; /* fall through */
+ case 1: a += k[0];
+ final(a, b, c); /* fall through */
+ case 0: /* case 0: nothing left to add */
+ break;
+ }
+ /*---------------------------------------------- report the result */
+ *pc = c;
+ *pb = b;
+}
+
+#if (CAA_BITS_PER_LONG == 32)
+static inline
+unsigned long lttng_hash_mix(const void *_key, size_t length, unsigned long seed)
+{
+ unsigned int key = (unsigned int) _key;
+
+ assert(length == sizeof(unsigned int));
+ return lttng_hash_u32(&key, 1, seed);
+}
+#else
+static inline
+unsigned long lttng_hash_mix(const void *_key, size_t length, unsigned long seed)
+{
+ union {
+ uint64_t v64;
+ uint32_t v32[2];
+ } v;
+ union {
+ uint64_t v64;
+ uint32_t v32[2];
+ } key;
+
+ assert(length == sizeof(unsigned long));
+ v.v64 = (uint64_t) seed;
+ key.v64 = (uint64_t) _key;
+ lttng_hashword2(key.v32, 2, &v.v32[0], &v.v32[1]);
+ return v.v64;
+}
+#endif
+
+#endif /* _LTTNG_HASH_HELPER_H */
--- /dev/null
+/*
+ * SPDX-License-Identifier: LGPL-2.1-only
+ *
+ * Copyright 2010-2012 (C) Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ *
+ * Holds LTTng probes registry.
+ */
+
+#define _LGPL_SOURCE
+#include <string.h>
+#include <errno.h>
+#include <urcu/list.h>
+#include <urcu/hlist.h>
+#include <lttng/ust-events.h>
+#include <lttng/tracepoint.h>
+#include "tracepoint-internal.h"
+#include <assert.h>
+#include "common/macros.h"
+#include <ctype.h>
+
+#include "lttng-tracer-core.h"
+#include "jhash.h"
+#include "error.h"
+#include "ust-events-internal.h"
+
+/*
+ * probe list is protected by ust_lock()/ust_unlock().
+ */
+static CDS_LIST_HEAD(_probe_list);
+
+/*
+ * List of probes registered by not yet processed.
+ */
+static CDS_LIST_HEAD(lazy_probe_init);
+
+/*
+ * lazy_nesting counter ensures we don't trigger lazy probe registration
+ * fixup while we are performing the fixup. It is protected by the ust
+ * mutex.
+ */
+static int lazy_nesting;
+
+/*
+ * Validate that each event within the probe provider refers to the
+ * right probe, and that the resulting name is not too long.
+ */
+static
+bool check_event_provider(const struct lttng_ust_probe_desc *probe_desc)
+{
+ int i;
+
+ for (i = 0; i < probe_desc->nr_events; i++) {
+ const struct lttng_ust_event_desc *event_desc = probe_desc->event_desc[i];
+
+ if (event_desc->probe_desc != probe_desc) {
+ ERR("Error registering probe provider '%s'. Event '%s:%s' refers to the wrong provider descriptor.",
+ probe_desc->provider_name, probe_desc->provider_name, event_desc->event_name);
+ return false; /* provider mismatch */
+ }
+ if (!lttng_ust_validate_event_name(event_desc)) {
+ ERR("Error registering probe provider '%s'. Event '%s:%s' name is too long.",
+ probe_desc->provider_name, probe_desc->provider_name, event_desc->event_name);
+ return false; /* provider mismatch */
+ }
+ }
+ return true;
+}
+
+/*
+ * Called under ust lock.
+ */
+static
+void lttng_lazy_probe_register(struct lttng_ust_registered_probe *reg_probe)
+{
+ struct lttng_ust_registered_probe *iter;
+ struct cds_list_head *probe_list;
+
+ /*
+ * The provider ensures there are no duplicate event names.
+ * Duplicated TRACEPOINT_EVENT event names would generate a
+ * compile-time error due to duplicated symbol names.
+ */
+
+ /*
+ * We sort the providers by struct lttng_ust_probe_desc pointer
+ * address.
+ */
+ probe_list = &_probe_list;
+ cds_list_for_each_entry_reverse(iter, probe_list, head) {
+ BUG_ON(iter == reg_probe); /* Should never be in the list twice */
+ if (iter < reg_probe) {
+ /* We belong to the location right after iter. */
+ cds_list_add(®_probe->head, &iter->head);
+ goto probe_added;
+ }
+ }
+ /* We should be added at the head of the list */
+ cds_list_add(®_probe->head, probe_list);
+probe_added:
+ DBG("just registered probe %s containing %u events",
+ reg_probe->desc->provider_name, reg_probe->desc->nr_events);
+}
+
+/*
+ * Called under ust lock.
+ */
+static
+void fixup_lazy_probes(void)
+{
+ struct lttng_ust_registered_probe *iter, *tmp;
+ int ret;
+
+ lazy_nesting++;
+ cds_list_for_each_entry_safe(iter, tmp,
+ &lazy_probe_init, lazy_init_head) {
+ lttng_lazy_probe_register(iter);
+ iter->lazy = 0;
+ cds_list_del(&iter->lazy_init_head);
+ }
+ ret = lttng_fix_pending_events();
+ assert(!ret);
+ lazy_nesting--;
+}
+
+/*
+ * Called under ust lock.
+ */
+struct cds_list_head *lttng_get_probe_list_head(void)
+{
+ if (!lazy_nesting && !cds_list_empty(&lazy_probe_init))
+ fixup_lazy_probes();
+ return &_probe_list;
+}
+
+static
+int check_provider_version(const struct lttng_ust_probe_desc *desc)
+{
+ /*
+ * Check tracepoint provider version compatibility.
+ */
+ if (desc->major <= LTTNG_UST_PROVIDER_MAJOR) {
+ DBG("Provider \"%s\" accepted, version %u.%u is compatible "
+ "with LTTng UST provider version %u.%u.",
+ desc->provider_name, desc->major, desc->minor,
+ LTTNG_UST_PROVIDER_MAJOR,
+ LTTNG_UST_PROVIDER_MINOR);
+ if (desc->major < LTTNG_UST_PROVIDER_MAJOR) {
+ DBG("However, some LTTng UST features might not be "
+ "available for this provider unless it is "
+ "recompiled against a more recent LTTng UST.");
+ }
+ return 1; /* accept */
+ } else {
+ ERR("Provider \"%s\" rejected, version %u.%u is incompatible "
+ "with LTTng UST provider version %u.%u. Please upgrade "
+ "LTTng UST.",
+ desc->provider_name, desc->major, desc->minor,
+ LTTNG_UST_PROVIDER_MAJOR,
+ LTTNG_UST_PROVIDER_MINOR);
+ return 0; /* reject */
+ }
+}
+
+struct lttng_ust_registered_probe *lttng_ust_probe_register(const struct lttng_ust_probe_desc *desc)
+{
+ struct lttng_ust_registered_probe *reg_probe = NULL;
+
+ lttng_ust_fixup_tls();
+
+ /*
+ * If version mismatch, don't register, but don't trigger assert
+ * on caller. The version check just prints an error.
+ */
+ if (!check_provider_version(desc))
+ return NULL;
+ if (!check_event_provider(desc))
+ return NULL;
+
+ ust_lock_nocheck();
+
+ reg_probe = zmalloc(sizeof(struct lttng_ust_registered_probe));
+ if (!reg_probe)
+ goto end;
+ reg_probe->desc = desc;
+ cds_list_add(®_probe->lazy_init_head, &lazy_probe_init);
+ reg_probe->lazy = 1;
+
+ DBG("adding probe %s containing %u events to lazy registration list",
+ desc->provider_name, desc->nr_events);
+ /*
+ * If there is at least one active session, we need to register
+ * the probe immediately, since we cannot delay event
+ * registration because they are needed ASAP.
+ */
+ if (lttng_session_active())
+ fixup_lazy_probes();
+
+ lttng_fix_pending_event_notifiers();
+end:
+ ust_unlock();
+ return reg_probe;
+}
+
+void lttng_ust_probe_unregister(struct lttng_ust_registered_probe *reg_probe)
+{
+ lttng_ust_fixup_tls();
+
+ if (!reg_probe)
+ return;
+ if (!check_provider_version(reg_probe->desc))
+ return;
+
+ ust_lock_nocheck();
+ if (!reg_probe->lazy)
+ cds_list_del(®_probe->head);
+ else
+ cds_list_del(®_probe->lazy_init_head);
+
+ lttng_probe_provider_unregister_events(reg_probe->desc);
+ DBG("just unregistered probes of provider %s", reg_probe->desc->provider_name);
+ ust_unlock();
+ free(reg_probe);
+}
+
+void lttng_probes_prune_event_list(struct lttng_ust_tracepoint_list *list)
+{
+ struct tp_list_entry *list_entry, *tmp;
+
+ cds_list_for_each_entry_safe(list_entry, tmp, &list->head, head) {
+ cds_list_del(&list_entry->head);
+ free(list_entry);
+ }
+}
+
+/*
+ * called with UST lock held.
+ */
+int lttng_probes_get_event_list(struct lttng_ust_tracepoint_list *list)
+{
+ struct lttng_ust_registered_probe *reg_probe;
+ struct cds_list_head *probe_list;
+ int i;
+
+ probe_list = lttng_get_probe_list_head();
+ CDS_INIT_LIST_HEAD(&list->head);
+ cds_list_for_each_entry(reg_probe, probe_list, head) {
+ const struct lttng_ust_probe_desc *probe_desc = reg_probe->desc;
+
+ for (i = 0; i < probe_desc->nr_events; i++) {
+ const struct lttng_ust_event_desc *event_desc =
+ probe_desc->event_desc[i];
+ struct tp_list_entry *list_entry;
+
+ /* Skip event if name is too long. */
+ if (!lttng_ust_validate_event_name(event_desc))
+ continue;
+ list_entry = zmalloc(sizeof(*list_entry));
+ if (!list_entry)
+ goto err_nomem;
+ cds_list_add(&list_entry->head, &list->head);
+ lttng_ust_format_event_name(event_desc, list_entry->tp.name);
+ if (!event_desc->loglevel) {
+ list_entry->tp.loglevel = TRACE_DEFAULT;
+ } else {
+ list_entry->tp.loglevel = *(*event_desc->loglevel);
+ }
+ }
+ }
+ if (cds_list_empty(&list->head))
+ list->iter = NULL;
+ else
+ list->iter =
+ cds_list_first_entry(&list->head, struct tp_list_entry, head);
+ return 0;
+
+err_nomem:
+ lttng_probes_prune_event_list(list);
+ return -ENOMEM;
+}
+
+/*
+ * Return current iteration position, advance internal iterator to next.
+ * Return NULL if end of list.
+ */
+struct lttng_ust_abi_tracepoint_iter *
+ lttng_ust_tracepoint_list_get_iter_next(struct lttng_ust_tracepoint_list *list)
+{
+ struct tp_list_entry *entry;
+
+ if (!list->iter)
+ return NULL;
+ entry = list->iter;
+ if (entry->head.next == &list->head)
+ list->iter = NULL;
+ else
+ list->iter = cds_list_entry(entry->head.next,
+ struct tp_list_entry, head);
+ return &entry->tp;
+}
+
+void lttng_probes_prune_field_list(struct lttng_ust_field_list *list)
+{
+ struct tp_field_list_entry *list_entry, *tmp;
+
+ cds_list_for_each_entry_safe(list_entry, tmp, &list->head, head) {
+ cds_list_del(&list_entry->head);
+ free(list_entry);
+ }
+}
+
+/*
+ * called with UST lock held.
+ */
+int lttng_probes_get_field_list(struct lttng_ust_field_list *list)
+{
+ struct lttng_ust_registered_probe *reg_probe;
+ struct cds_list_head *probe_list;
+ int i;
+
+ probe_list = lttng_get_probe_list_head();
+ CDS_INIT_LIST_HEAD(&list->head);
+ cds_list_for_each_entry(reg_probe, probe_list, head) {
+ const struct lttng_ust_probe_desc *probe_desc = reg_probe->desc;
+
+ for (i = 0; i < probe_desc->nr_events; i++) {
+ const struct lttng_ust_event_desc *event_desc =
+ probe_desc->event_desc[i];
+ int j;
+
+ if (event_desc->nr_fields == 0) {
+ /* Events without fields. */
+ struct tp_field_list_entry *list_entry;
+
+ /* Skip event if name is too long. */
+ if (!lttng_ust_validate_event_name(event_desc))
+ continue;
+ list_entry = zmalloc(sizeof(*list_entry));
+ if (!list_entry)
+ goto err_nomem;
+ cds_list_add(&list_entry->head, &list->head);
+ lttng_ust_format_event_name(event_desc, list_entry->field.event_name);
+ list_entry->field.field_name[0] = '\0';
+ list_entry->field.type = LTTNG_UST_ABI_FIELD_OTHER;
+ if (!event_desc->loglevel) {
+ list_entry->field.loglevel = TRACE_DEFAULT;
+ } else {
+ list_entry->field.loglevel = *(*event_desc->loglevel);
+ }
+ list_entry->field.nowrite = 1;
+ }
+
+ for (j = 0; j < event_desc->nr_fields; j++) {
+ const struct lttng_ust_event_field *event_field =
+ event_desc->fields[j];
+ struct tp_field_list_entry *list_entry;
+
+ /* Skip event if name is too long. */
+ if (!lttng_ust_validate_event_name(event_desc))
+ continue;
+ list_entry = zmalloc(sizeof(*list_entry));
+ if (!list_entry)
+ goto err_nomem;
+ cds_list_add(&list_entry->head, &list->head);
+ lttng_ust_format_event_name(event_desc, list_entry->field.event_name);
+ strncpy(list_entry->field.field_name,
+ event_field->name,
+ LTTNG_UST_ABI_SYM_NAME_LEN);
+ list_entry->field.field_name[LTTNG_UST_ABI_SYM_NAME_LEN - 1] = '\0';
+ switch (event_field->type->type) {
+ case lttng_ust_type_integer:
+ list_entry->field.type = LTTNG_UST_ABI_FIELD_INTEGER;
+ break;
+ case lttng_ust_type_string:
+ list_entry->field.type = LTTNG_UST_ABI_FIELD_STRING;
+ break;
+ case lttng_ust_type_array:
+ if (lttng_ust_get_type_array(event_field->type)->encoding == lttng_ust_string_encoding_none)
+ list_entry->field.type = LTTNG_UST_ABI_FIELD_OTHER;
+ else
+ list_entry->field.type = LTTNG_UST_ABI_FIELD_STRING;
+ break;
+ case lttng_ust_type_sequence:
+ if (lttng_ust_get_type_sequence(event_field->type)->encoding == lttng_ust_string_encoding_none)
+ list_entry->field.type = LTTNG_UST_ABI_FIELD_OTHER;
+ else
+ list_entry->field.type = LTTNG_UST_ABI_FIELD_STRING;
+ break;
+ case lttng_ust_type_float:
+ list_entry->field.type = LTTNG_UST_ABI_FIELD_FLOAT;
+ break;
+ case lttng_ust_type_enum:
+ list_entry->field.type = LTTNG_UST_ABI_FIELD_ENUM;
+ break;
+ default:
+ list_entry->field.type = LTTNG_UST_ABI_FIELD_OTHER;
+ }
+ if (!event_desc->loglevel) {
+ list_entry->field.loglevel = TRACE_DEFAULT;
+ } else {
+ list_entry->field.loglevel = *(*event_desc->loglevel);
+ }
+ list_entry->field.nowrite = event_field->nowrite;
+ }
+ }
+ }
+ if (cds_list_empty(&list->head))
+ list->iter = NULL;
+ else
+ list->iter =
+ cds_list_first_entry(&list->head,
+ struct tp_field_list_entry, head);
+ return 0;
+
+err_nomem:
+ lttng_probes_prune_field_list(list);
+ return -ENOMEM;
+}
+
+/*
+ * Return current iteration position, advance internal iterator to next.
+ * Return NULL if end of list.
+ */
+struct lttng_ust_abi_field_iter *
+ lttng_ust_field_list_get_iter_next(struct lttng_ust_field_list *list)
+{
+ struct tp_field_list_entry *entry;
+
+ if (!list->iter)
+ return NULL;
+ entry = list->iter;
+ if (entry->head.next == &list->head)
+ list->iter = NULL;
+ else
+ list->iter = cds_list_entry(entry->head.next,
+ struct tp_field_list_entry, head);
+ return &entry->field;
+}
--- /dev/null
+/*
+ * SPDX-License-Identifier: LGPL-2.1-only
+ *
+ * Copyright (C) 2013 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ */
+
+#ifndef _LTTNG_RB_CLIENT_H
+#define _LTTNG_RB_CLIENT_H
+
+#include <stdint.h>
+#include "common/ringbuffer/ringbuffer-config.h"
+
+struct lttng_ust_client_lib_ring_buffer_client_cb {
+ struct lttng_ust_lib_ring_buffer_client_cb parent;
+
+ int (*timestamp_begin) (struct lttng_ust_lib_ring_buffer *buf,
+ struct lttng_ust_lib_ring_buffer_channel *chan,
+ uint64_t *timestamp_begin);
+ int (*timestamp_end) (struct lttng_ust_lib_ring_buffer *buf,
+ struct lttng_ust_lib_ring_buffer_channel *chan,
+ uint64_t *timestamp_end);
+ int (*events_discarded) (struct lttng_ust_lib_ring_buffer *buf,
+ struct lttng_ust_lib_ring_buffer_channel *chan,
+ uint64_t *events_discarded);
+ int (*content_size) (struct lttng_ust_lib_ring_buffer *buf,
+ struct lttng_ust_lib_ring_buffer_channel *chan,
+ uint64_t *content_size);
+ int (*packet_size) (struct lttng_ust_lib_ring_buffer *buf,
+ struct lttng_ust_lib_ring_buffer_channel *chan,
+ uint64_t *packet_size);
+ int (*stream_id) (struct lttng_ust_lib_ring_buffer *buf,
+ struct lttng_ust_lib_ring_buffer_channel *chan,
+ uint64_t *stream_id);
+ int (*current_timestamp) (struct lttng_ust_lib_ring_buffer *buf,
+ struct lttng_ust_lib_ring_buffer_channel *chan,
+ uint64_t *ts);
+ int (*sequence_number) (struct lttng_ust_lib_ring_buffer *buf,
+ struct lttng_ust_lib_ring_buffer_channel *chan, uint64_t *seq);
+ int (*instance_id) (struct lttng_ust_lib_ring_buffer *buf,
+ struct lttng_ust_lib_ring_buffer_channel *chan, uint64_t *id);
+};
+
+/*
+ * The ring buffer clients init/exit symbols are private ABI for
+ * liblttng-ust-ctl, which is why they are not hidden.
+ */
+void lttng_ust_ring_buffer_clients_init(void);
+void lttng_ust_ring_buffer_clients_exit(void);
+
+void lttng_ring_buffer_client_overwrite_init(void)
+ __attribute__((visibility("hidden")));
+
+void lttng_ring_buffer_client_overwrite_rt_init(void)
+ __attribute__((visibility("hidden")));
+
+void lttng_ring_buffer_client_discard_init(void)
+ __attribute__((visibility("hidden")));
+
+void lttng_ring_buffer_client_discard_rt_init(void)
+ __attribute__((visibility("hidden")));
+
+void lttng_ring_buffer_metadata_client_init(void)
+ __attribute__((visibility("hidden")));
+
+
+void lttng_ring_buffer_client_overwrite_exit(void)
+ __attribute__((visibility("hidden")));
+
+void lttng_ring_buffer_client_overwrite_rt_exit(void)
+ __attribute__((visibility("hidden")));
+
+void lttng_ring_buffer_client_discard_exit(void)
+ __attribute__((visibility("hidden")));
+
+void lttng_ring_buffer_client_discard_rt_exit(void)
+ __attribute__((visibility("hidden")));
+
+void lttng_ring_buffer_metadata_client_exit(void)
+ __attribute__((visibility("hidden")));
+
+
+void lttng_ust_fixup_ring_buffer_client_overwrite_tls(void)
+ __attribute__((visibility("hidden")));
+
+void lttng_ust_fixup_ring_buffer_client_overwrite_rt_tls(void)
+ __attribute__((visibility("hidden")));
+
+void lttng_ust_fixup_ring_buffer_client_discard_tls(void)
+ __attribute__((visibility("hidden")));
+
+void lttng_ust_fixup_ring_buffer_client_discard_rt_tls(void)
+ __attribute__((visibility("hidden")));
+
+#endif /* _LTTNG_RB_CLIENT_H */
--- /dev/null
+/*
+ * SPDX-License-Identifier: LGPL-2.1-only
+ *
+ * Copyright (C) 2010-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ *
+ * LTTng lib ring buffer client (discard mode) for RT.
+ */
+
+#define _LGPL_SOURCE
+#include "lttng-tracer.h"
+#include "lttng-rb-clients.h"
+
+#define RING_BUFFER_MODE_TEMPLATE RING_BUFFER_DISCARD
+#define RING_BUFFER_MODE_TEMPLATE_STRING "discard-rt"
+#define RING_BUFFER_MODE_TEMPLATE_TLS_FIXUP \
+ lttng_ust_fixup_ring_buffer_client_discard_rt_tls
+#define RING_BUFFER_MODE_TEMPLATE_INIT \
+ lttng_ring_buffer_client_discard_rt_init
+#define RING_BUFFER_MODE_TEMPLATE_EXIT \
+ lttng_ring_buffer_client_discard_rt_exit
+#define LTTNG_CLIENT_TYPE LTTNG_CLIENT_DISCARD_RT
+#define LTTNG_CLIENT_WAKEUP RING_BUFFER_WAKEUP_BY_TIMER
+#include "lttng-ring-buffer-client-template.h"
--- /dev/null
+/*
+ * SPDX-License-Identifier: LGPL-2.1-only
+ *
+ * Copyright (C) 2010-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ *
+ * LTTng lib ring buffer client (discard mode).
+ */
+
+#define _LGPL_SOURCE
+#include "lttng-tracer.h"
+#include "lttng-rb-clients.h"
+
+#define RING_BUFFER_MODE_TEMPLATE RING_BUFFER_DISCARD
+#define RING_BUFFER_MODE_TEMPLATE_STRING "discard"
+#define RING_BUFFER_MODE_TEMPLATE_TLS_FIXUP \
+ lttng_ust_fixup_ring_buffer_client_discard_tls
+#define RING_BUFFER_MODE_TEMPLATE_INIT \
+ lttng_ring_buffer_client_discard_init
+#define RING_BUFFER_MODE_TEMPLATE_EXIT \
+ lttng_ring_buffer_client_discard_exit
+#define LTTNG_CLIENT_TYPE LTTNG_CLIENT_DISCARD
+#define LTTNG_CLIENT_WAKEUP RING_BUFFER_WAKEUP_BY_WRITER
+#include "lttng-ring-buffer-client-template.h"
--- /dev/null
+/*
+ * SPDX-License-Identifier: LGPL-2.1-only
+ *
+ * Copyright (C) 2010-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ *
+ * LTTng lib ring buffer client (overwrite mode).
+ */
+
+#define _LGPL_SOURCE
+#include "lttng-tracer.h"
+#include "lttng-rb-clients.h"
+
+#define RING_BUFFER_MODE_TEMPLATE RING_BUFFER_OVERWRITE
+#define RING_BUFFER_MODE_TEMPLATE_STRING "overwrite-rt"
+#define RING_BUFFER_MODE_TEMPLATE_TLS_FIXUP \
+ lttng_ust_fixup_ring_buffer_client_overwrite_rt_tls
+#define RING_BUFFER_MODE_TEMPLATE_INIT \
+ lttng_ring_buffer_client_overwrite_rt_init
+#define RING_BUFFER_MODE_TEMPLATE_EXIT \
+ lttng_ring_buffer_client_overwrite_rt_exit
+#define LTTNG_CLIENT_TYPE LTTNG_CLIENT_OVERWRITE_RT
+#define LTTNG_CLIENT_WAKEUP RING_BUFFER_WAKEUP_BY_TIMER
+#include "lttng-ring-buffer-client-template.h"
--- /dev/null
+/*
+ * SPDX-License-Identifier: LGPL-2.1-only
+ *
+ * Copyright (C) 2010-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ *
+ * LTTng lib ring buffer client (overwrite mode).
+ */
+
+#define _LGPL_SOURCE
+#include "lttng-tracer.h"
+#include "lttng-rb-clients.h"
+
+#define RING_BUFFER_MODE_TEMPLATE RING_BUFFER_OVERWRITE
+#define RING_BUFFER_MODE_TEMPLATE_STRING "overwrite"
+#define RING_BUFFER_MODE_TEMPLATE_TLS_FIXUP \
+ lttng_ust_fixup_ring_buffer_client_overwrite_tls
+#define RING_BUFFER_MODE_TEMPLATE_INIT \
+ lttng_ring_buffer_client_overwrite_init
+#define RING_BUFFER_MODE_TEMPLATE_EXIT \
+ lttng_ring_buffer_client_overwrite_exit
+#define LTTNG_CLIENT_TYPE LTTNG_CLIENT_OVERWRITE
+#define LTTNG_CLIENT_WAKEUP RING_BUFFER_WAKEUP_BY_WRITER
+#include "lttng-ring-buffer-client-template.h"
--- /dev/null
+/*
+ * SPDX-License-Identifier: LGPL-2.1-only
+ *
+ * Copyright (C) 2010-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ *
+ * LTTng lib ring buffer client template.
+ */
+
+#include <limits.h>
+#include <stddef.h>
+#include <stdint.h>
+
+#include <ust-events-internal.h>
+#include <lttng/urcu/pointer.h>
+#include "common/bitfield.h"
+#include "common/align.h"
+#include "clock.h"
+#include "context-internal.h"
+#include "lttng-tracer.h"
+#include "common/ringbuffer/frontend_types.h"
+#include <urcu/tls-compat.h>
+
+#define LTTNG_COMPACT_EVENT_BITS 5
+#define LTTNG_COMPACT_TSC_BITS 27
+
+/*
+ * Keep the natural field alignment for _each field_ within this structure if
+ * you ever add/remove a field from this header. Packed attribute is not used
+ * because gcc generates poor code on at least powerpc and mips. Don't ever
+ * let gcc add padding between the structure elements.
+ */
+
+struct packet_header {
+ /* Trace packet header */
+ uint32_t magic; /*
+ * Trace magic number.
+ * contains endianness information.
+ */
+ uint8_t uuid[LTTNG_UST_UUID_LEN];
+ uint32_t stream_id;
+ uint64_t stream_instance_id;
+
+ struct {
+ /* Stream packet context */
+ uint64_t timestamp_begin; /* Cycle count at subbuffer start */
+ uint64_t timestamp_end; /* Cycle count at subbuffer end */
+ uint64_t content_size; /* Size of data in subbuffer */
+ uint64_t packet_size; /* Subbuffer size (include padding) */
+ uint64_t packet_seq_num; /* Packet sequence number */
+ unsigned long events_discarded; /*
+ * Events lost in this subbuffer since
+ * the beginning of the trace.
+ * (may overflow)
+ */
+ uint32_t cpu_id; /* CPU id associated with stream */
+ uint8_t header_end; /* End of header */
+ } ctx;
+};
+
+struct lttng_client_ctx {
+ size_t packet_context_len;
+ size_t event_context_len;
+ struct lttng_ust_ctx *chan_ctx;
+ struct lttng_ust_ctx *event_ctx;
+};
+
+/*
+ * Indexed by lib_ring_buffer_nesting_count().
+ */
+typedef struct lttng_ust_lib_ring_buffer_ctx_private private_ctx_stack_t[LIB_RING_BUFFER_MAX_NESTING];
+static DEFINE_URCU_TLS(private_ctx_stack_t, private_ctx_stack);
+
+/*
+ * Force a read (imply TLS fixup for dlopen) of TLS variables.
+ */
+void RING_BUFFER_MODE_TEMPLATE_TLS_FIXUP(void)
+{
+ asm volatile ("" : : "m" (URCU_TLS(private_ctx_stack)));
+}
+
+static inline uint64_t lib_ring_buffer_clock_read(
+ struct lttng_ust_lib_ring_buffer_channel *chan __attribute__((unused)))
+{
+ return trace_clock_read64();
+}
+
+static inline
+size_t ctx_get_aligned_size(size_t offset, struct lttng_ust_ctx *ctx,
+ size_t ctx_len)
+{
+ size_t orig_offset = offset;
+
+ if (caa_likely(!ctx))
+ return 0;
+ offset += lttng_ust_lib_ring_buffer_align(offset, ctx->largest_align);
+ offset += ctx_len;
+ return offset - orig_offset;
+}
+
+static inline
+void ctx_get_struct_size(struct lttng_ust_ctx *ctx, size_t *ctx_len)
+{
+ int i;
+ size_t offset = 0;
+
+ if (caa_likely(!ctx)) {
+ *ctx_len = 0;
+ return;
+ }
+ for (i = 0; i < ctx->nr_fields; i++)
+ offset += ctx->fields[i].get_size(ctx->fields[i].priv, offset);
+ *ctx_len = offset;
+}
+
+static inline
+void ctx_record(struct lttng_ust_lib_ring_buffer_ctx *bufctx,
+ struct lttng_ust_channel_buffer *chan,
+ struct lttng_ust_ctx *ctx)
+{
+ int i;
+
+ if (caa_likely(!ctx))
+ return;
+ lttng_ust_lib_ring_buffer_align_ctx(bufctx, ctx->largest_align);
+ for (i = 0; i < ctx->nr_fields; i++)
+ ctx->fields[i].record(ctx->fields[i].priv, bufctx, chan);
+}
+
+/*
+ * record_header_size - Calculate the header size and padding necessary.
+ * @config: ring buffer instance configuration
+ * @chan: channel
+ * @offset: offset in the write buffer
+ * @pre_header_padding: padding to add before the header (output)
+ * @ctx: reservation context
+ *
+ * Returns the event header size (including padding).
+ *
+ * The payload must itself determine its own alignment from the biggest type it
+ * contains.
+ */
+static __inline__
+size_t record_header_size(
+ const struct lttng_ust_lib_ring_buffer_config *config __attribute__((unused)),
+ struct lttng_ust_lib_ring_buffer_channel *chan,
+ size_t offset,
+ size_t *pre_header_padding,
+ struct lttng_ust_lib_ring_buffer_ctx *ctx,
+ struct lttng_client_ctx *client_ctx)
+{
+ struct lttng_ust_channel_buffer *lttng_chan = channel_get_private(chan);
+ size_t orig_offset = offset;
+ size_t padding;
+
+ switch (lttng_chan->priv->header_type) {
+ case 1: /* compact */
+ padding = lttng_ust_lib_ring_buffer_align(offset, lttng_ust_rb_alignof(uint32_t));
+ offset += padding;
+ if (!(ctx->priv->rflags & (RING_BUFFER_RFLAG_FULL_TSC | LTTNG_RFLAG_EXTENDED))) {
+ offset += sizeof(uint32_t); /* id and timestamp */
+ } else {
+ /* Minimum space taken by LTTNG_COMPACT_EVENT_BITS id */
+ offset += (LTTNG_COMPACT_EVENT_BITS + CHAR_BIT - 1) / CHAR_BIT;
+ /* Align extended struct on largest member */
+ offset += lttng_ust_lib_ring_buffer_align(offset, lttng_ust_rb_alignof(uint64_t));
+ offset += sizeof(uint32_t); /* id */
+ offset += lttng_ust_lib_ring_buffer_align(offset, lttng_ust_rb_alignof(uint64_t));
+ offset += sizeof(uint64_t); /* timestamp */
+ }
+ break;
+ case 2: /* large */
+ padding = lttng_ust_lib_ring_buffer_align(offset, lttng_ust_rb_alignof(uint16_t));
+ offset += padding;
+ offset += sizeof(uint16_t);
+ if (!(ctx->priv->rflags & (RING_BUFFER_RFLAG_FULL_TSC | LTTNG_RFLAG_EXTENDED))) {
+ offset += lttng_ust_lib_ring_buffer_align(offset, lttng_ust_rb_alignof(uint32_t));
+ offset += sizeof(uint32_t); /* timestamp */
+ } else {
+ /* Align extended struct on largest member */
+ offset += lttng_ust_lib_ring_buffer_align(offset, lttng_ust_rb_alignof(uint64_t));
+ offset += sizeof(uint32_t); /* id */
+ offset += lttng_ust_lib_ring_buffer_align(offset, lttng_ust_rb_alignof(uint64_t));
+ offset += sizeof(uint64_t); /* timestamp */
+ }
+ break;
+ default:
+ padding = 0;
+ WARN_ON_ONCE(1);
+ }
+ offset += ctx_get_aligned_size(offset, client_ctx->chan_ctx,
+ client_ctx->packet_context_len);
+ offset += ctx_get_aligned_size(offset, client_ctx->event_ctx,
+ client_ctx->event_context_len);
+ *pre_header_padding = padding;
+ return offset - orig_offset;
+}
+
+#include "common/ringbuffer/api.h"
+#include "lttng-rb-clients.h"
+
+static
+void lttng_write_event_header_slow(const struct lttng_ust_lib_ring_buffer_config *config,
+ struct lttng_ust_lib_ring_buffer_ctx *ctx,
+ struct lttng_client_ctx *client_ctx,
+ uint32_t event_id);
+
+/*
+ * lttng_write_event_header
+ *
+ * Writes the event header to the offset (already aligned on 32-bits).
+ *
+ * @config: ring buffer instance configuration
+ * @ctx: reservation context
+ * @event_id: event ID
+ */
+static __inline__
+void lttng_write_event_header(const struct lttng_ust_lib_ring_buffer_config *config,
+ struct lttng_ust_lib_ring_buffer_ctx *ctx,
+ struct lttng_client_ctx *client_ctx,
+ uint32_t event_id)
+{
+ struct lttng_ust_channel_buffer *lttng_chan = channel_get_private(ctx->priv->chan);
+
+ if (caa_unlikely(ctx->priv->rflags))
+ goto slow_path;
+
+ switch (lttng_chan->priv->header_type) {
+ case 1: /* compact */
+ {
+ uint32_t id_time = 0;
+
+ bt_bitfield_write(&id_time, uint32_t,
+ 0,
+ LTTNG_COMPACT_EVENT_BITS,
+ event_id);
+ bt_bitfield_write(&id_time, uint32_t,
+ LTTNG_COMPACT_EVENT_BITS,
+ LTTNG_COMPACT_TSC_BITS,
+ ctx->priv->tsc);
+ lib_ring_buffer_write(config, ctx, &id_time, sizeof(id_time));
+ break;
+ }
+ case 2: /* large */
+ {
+ uint32_t timestamp = (uint32_t) ctx->priv->tsc;
+ uint16_t id = event_id;
+
+ lib_ring_buffer_write(config, ctx, &id, sizeof(id));
+ lttng_ust_lib_ring_buffer_align_ctx(ctx, lttng_ust_rb_alignof(uint32_t));
+ lib_ring_buffer_write(config, ctx, ×tamp, sizeof(timestamp));
+ break;
+ }
+ default:
+ WARN_ON_ONCE(1);
+ }
+
+ ctx_record(ctx, lttng_chan, client_ctx->chan_ctx);
+ ctx_record(ctx, lttng_chan, client_ctx->event_ctx);
+ lttng_ust_lib_ring_buffer_align_ctx(ctx, ctx->largest_align);
+
+ return;
+
+slow_path:
+ lttng_write_event_header_slow(config, ctx, client_ctx, event_id);
+}
+
+static
+void lttng_write_event_header_slow(const struct lttng_ust_lib_ring_buffer_config *config,
+ struct lttng_ust_lib_ring_buffer_ctx *ctx,
+ struct lttng_client_ctx *client_ctx,
+ uint32_t event_id)
+{
+ struct lttng_ust_lib_ring_buffer_ctx_private *ctx_private = ctx->priv;
+ struct lttng_ust_channel_buffer *lttng_chan = channel_get_private(ctx->priv->chan);
+
+ switch (lttng_chan->priv->header_type) {
+ case 1: /* compact */
+ if (!(ctx_private->rflags & (RING_BUFFER_RFLAG_FULL_TSC | LTTNG_RFLAG_EXTENDED))) {
+ uint32_t id_time = 0;
+
+ bt_bitfield_write(&id_time, uint32_t,
+ 0,
+ LTTNG_COMPACT_EVENT_BITS,
+ event_id);
+ bt_bitfield_write(&id_time, uint32_t,
+ LTTNG_COMPACT_EVENT_BITS,
+ LTTNG_COMPACT_TSC_BITS,
+ ctx_private->tsc);
+ lib_ring_buffer_write(config, ctx, &id_time, sizeof(id_time));
+ } else {
+ uint8_t id = 0;
+ uint64_t timestamp = ctx_private->tsc;
+
+ bt_bitfield_write(&id, uint8_t,
+ 0,
+ LTTNG_COMPACT_EVENT_BITS,
+ 31);
+ lib_ring_buffer_write(config, ctx, &id, sizeof(id));
+ /* Align extended struct on largest member */
+ lttng_ust_lib_ring_buffer_align_ctx(ctx, lttng_ust_rb_alignof(uint64_t));
+ lib_ring_buffer_write(config, ctx, &event_id, sizeof(event_id));
+ lttng_ust_lib_ring_buffer_align_ctx(ctx, lttng_ust_rb_alignof(uint64_t));
+ lib_ring_buffer_write(config, ctx, ×tamp, sizeof(timestamp));
+ }
+ break;
+ case 2: /* large */
+ {
+ if (!(ctx_private->rflags & (RING_BUFFER_RFLAG_FULL_TSC | LTTNG_RFLAG_EXTENDED))) {
+ uint32_t timestamp = (uint32_t) ctx_private->tsc;
+ uint16_t id = event_id;
+
+ lib_ring_buffer_write(config, ctx, &id, sizeof(id));
+ lttng_ust_lib_ring_buffer_align_ctx(ctx, lttng_ust_rb_alignof(uint32_t));
+ lib_ring_buffer_write(config, ctx, ×tamp, sizeof(timestamp));
+ } else {
+ uint16_t id = 65535;
+ uint64_t timestamp = ctx_private->tsc;
+
+ lib_ring_buffer_write(config, ctx, &id, sizeof(id));
+ /* Align extended struct on largest member */
+ lttng_ust_lib_ring_buffer_align_ctx(ctx, lttng_ust_rb_alignof(uint64_t));
+ lib_ring_buffer_write(config, ctx, &event_id, sizeof(event_id));
+ lttng_ust_lib_ring_buffer_align_ctx(ctx, lttng_ust_rb_alignof(uint64_t));
+ lib_ring_buffer_write(config, ctx, ×tamp, sizeof(timestamp));
+ }
+ break;
+ }
+ default:
+ WARN_ON_ONCE(1);
+ }
+ ctx_record(ctx, lttng_chan, client_ctx->chan_ctx);
+ ctx_record(ctx, lttng_chan, client_ctx->event_ctx);
+ lttng_ust_lib_ring_buffer_align_ctx(ctx, ctx->largest_align);
+}
+
+static const struct lttng_ust_lib_ring_buffer_config client_config;
+
+static uint64_t client_ring_buffer_clock_read(struct lttng_ust_lib_ring_buffer_channel *chan)
+{
+ return lib_ring_buffer_clock_read(chan);
+}
+
+static
+size_t client_record_header_size(const struct lttng_ust_lib_ring_buffer_config *config,
+ struct lttng_ust_lib_ring_buffer_channel *chan,
+ size_t offset,
+ size_t *pre_header_padding,
+ struct lttng_ust_lib_ring_buffer_ctx *ctx,
+ void *client_ctx)
+{
+ return record_header_size(config, chan, offset,
+ pre_header_padding, ctx, client_ctx);
+}
+
+/**
+ * client_packet_header_size - called on buffer-switch to a new sub-buffer
+ *
+ * Return header size without padding after the structure. Don't use packed
+ * structure because gcc generates inefficient code on some architectures
+ * (powerpc, mips..)
+ */
+static size_t client_packet_header_size(void)
+{
+ return offsetof(struct packet_header, ctx.header_end);
+}
+
+static void client_buffer_begin(struct lttng_ust_lib_ring_buffer *buf, uint64_t tsc,
+ unsigned int subbuf_idx,
+ struct lttng_ust_shm_handle *handle)
+{
+ struct lttng_ust_lib_ring_buffer_channel *chan = shmp(handle, buf->backend.chan);
+ struct packet_header *header =
+ (struct packet_header *)
+ lib_ring_buffer_offset_address(&buf->backend,
+ subbuf_idx * chan->backend.subbuf_size,
+ handle);
+ struct lttng_ust_channel_buffer *lttng_chan = channel_get_private(chan);
+ uint64_t cnt = shmp_index(handle, buf->backend.buf_cnt, subbuf_idx)->seq_cnt;
+
+ assert(header);
+ if (!header)
+ return;
+ header->magic = CTF_MAGIC_NUMBER;
+ memcpy(header->uuid, lttng_chan->priv->uuid, sizeof(lttng_chan->priv->uuid));
+ header->stream_id = lttng_chan->priv->id;
+ header->stream_instance_id = buf->backend.cpu;
+ header->ctx.timestamp_begin = tsc;
+ header->ctx.timestamp_end = 0;
+ header->ctx.content_size = ~0ULL; /* for debugging */
+ header->ctx.packet_size = ~0ULL;
+ header->ctx.packet_seq_num = chan->backend.num_subbuf * cnt + subbuf_idx;
+ header->ctx.events_discarded = 0;
+ header->ctx.cpu_id = buf->backend.cpu;
+}
+
+/*
+ * offset is assumed to never be 0 here : never deliver a completely empty
+ * subbuffer. data_size is between 1 and subbuf_size.
+ */
+static void client_buffer_end(struct lttng_ust_lib_ring_buffer *buf, uint64_t tsc,
+ unsigned int subbuf_idx, unsigned long data_size,
+ struct lttng_ust_shm_handle *handle)
+{
+ struct lttng_ust_lib_ring_buffer_channel *chan = shmp(handle, buf->backend.chan);
+ struct packet_header *header =
+ (struct packet_header *)
+ lib_ring_buffer_offset_address(&buf->backend,
+ subbuf_idx * chan->backend.subbuf_size,
+ handle);
+ unsigned long records_lost = 0;
+
+ assert(header);
+ if (!header)
+ return;
+ header->ctx.timestamp_end = tsc;
+ header->ctx.content_size =
+ (uint64_t) data_size * CHAR_BIT; /* in bits */
+ header->ctx.packet_size =
+ (uint64_t) LTTNG_UST_PAGE_ALIGN(data_size) * CHAR_BIT; /* in bits */
+
+ records_lost += lib_ring_buffer_get_records_lost_full(&client_config, buf);
+ records_lost += lib_ring_buffer_get_records_lost_wrap(&client_config, buf);
+ records_lost += lib_ring_buffer_get_records_lost_big(&client_config, buf);
+ header->ctx.events_discarded = records_lost;
+}
+
+static int client_buffer_create(
+ struct lttng_ust_lib_ring_buffer *buf __attribute__((unused)),
+ void *priv __attribute__((unused)),
+ int cpu __attribute__((unused)),
+ const char *name __attribute__((unused)),
+ struct lttng_ust_shm_handle *handle __attribute__((unused)))
+{
+ return 0;
+}
+
+static void client_buffer_finalize(
+ struct lttng_ust_lib_ring_buffer *buf __attribute__((unused)),
+ void *priv __attribute__((unused)),
+ int cpu __attribute__((unused)),
+ struct lttng_ust_shm_handle *handle __attribute__((unused)))
+{
+}
+
+static void client_content_size_field(
+ const struct lttng_ust_lib_ring_buffer_config *config __attribute__((unused)),
+ size_t *offset, size_t *length)
+{
+ *offset = offsetof(struct packet_header, ctx.content_size);
+ *length = sizeof(((struct packet_header *) NULL)->ctx.content_size);
+}
+
+static void client_packet_size_field(
+ const struct lttng_ust_lib_ring_buffer_config *config __attribute__((unused)),
+ size_t *offset, size_t *length)
+{
+ *offset = offsetof(struct packet_header, ctx.packet_size);
+ *length = sizeof(((struct packet_header *) NULL)->ctx.packet_size);
+}
+
+static struct packet_header *client_packet_header(struct lttng_ust_lib_ring_buffer *buf,
+ struct lttng_ust_shm_handle *handle)
+{
+ return lib_ring_buffer_read_offset_address(&buf->backend, 0, handle);
+}
+
+static int client_timestamp_begin(struct lttng_ust_lib_ring_buffer *buf,
+ struct lttng_ust_lib_ring_buffer_channel *chan,
+ uint64_t *timestamp_begin)
+{
+ struct lttng_ust_shm_handle *handle = chan->handle;
+ struct packet_header *header;
+
+ header = client_packet_header(buf, handle);
+ if (!header)
+ return -1;
+ *timestamp_begin = header->ctx.timestamp_begin;
+ return 0;
+}
+
+static int client_timestamp_end(struct lttng_ust_lib_ring_buffer *buf,
+ struct lttng_ust_lib_ring_buffer_channel *chan,
+ uint64_t *timestamp_end)
+{
+ struct lttng_ust_shm_handle *handle = chan->handle;
+ struct packet_header *header;
+
+ header = client_packet_header(buf, handle);
+ if (!header)
+ return -1;
+ *timestamp_end = header->ctx.timestamp_end;
+ return 0;
+}
+
+static int client_events_discarded(struct lttng_ust_lib_ring_buffer *buf,
+ struct lttng_ust_lib_ring_buffer_channel *chan,
+ uint64_t *events_discarded)
+{
+ struct lttng_ust_shm_handle *handle = chan->handle;
+ struct packet_header *header;
+
+ header = client_packet_header(buf, handle);
+ if (!header)
+ return -1;
+ *events_discarded = header->ctx.events_discarded;
+ return 0;
+}
+
+static int client_content_size(struct lttng_ust_lib_ring_buffer *buf,
+ struct lttng_ust_lib_ring_buffer_channel *chan,
+ uint64_t *content_size)
+{
+ struct lttng_ust_shm_handle *handle = chan->handle;
+ struct packet_header *header;
+
+ header = client_packet_header(buf, handle);
+ if (!header)
+ return -1;
+ *content_size = header->ctx.content_size;
+ return 0;
+}
+
+static int client_packet_size(struct lttng_ust_lib_ring_buffer *buf,
+ struct lttng_ust_lib_ring_buffer_channel *chan,
+ uint64_t *packet_size)
+{
+ struct lttng_ust_shm_handle *handle = chan->handle;
+ struct packet_header *header;
+
+ header = client_packet_header(buf, handle);
+ if (!header)
+ return -1;
+ *packet_size = header->ctx.packet_size;
+ return 0;
+}
+
+static int client_stream_id(struct lttng_ust_lib_ring_buffer *buf __attribute__((unused)),
+ struct lttng_ust_lib_ring_buffer_channel *chan,
+ uint64_t *stream_id)
+{
+ struct lttng_ust_channel_buffer *lttng_chan = channel_get_private(chan);
+
+ *stream_id = lttng_chan->priv->id;
+
+ return 0;
+}
+
+static int client_current_timestamp(
+ struct lttng_ust_lib_ring_buffer *buf __attribute__((unused)),
+ struct lttng_ust_lib_ring_buffer_channel *chan,
+ uint64_t *ts)
+{
+ *ts = client_ring_buffer_clock_read(chan);
+
+ return 0;
+}
+
+static int client_sequence_number(struct lttng_ust_lib_ring_buffer *buf,
+ struct lttng_ust_lib_ring_buffer_channel *chan,
+ uint64_t *seq)
+{
+ struct lttng_ust_shm_handle *handle = chan->handle;
+ struct packet_header *header;
+
+ header = client_packet_header(buf, handle);
+ if (!header)
+ return -1;
+ *seq = header->ctx.packet_seq_num;
+ return 0;
+}
+
+static int client_instance_id(struct lttng_ust_lib_ring_buffer *buf,
+ struct lttng_ust_lib_ring_buffer_channel *chan __attribute__((unused)),
+ uint64_t *id)
+{
+ *id = buf->backend.cpu;
+
+ return 0;
+}
+
+static const
+struct lttng_ust_client_lib_ring_buffer_client_cb client_cb = {
+ .parent = {
+ .ring_buffer_clock_read = client_ring_buffer_clock_read,
+ .record_header_size = client_record_header_size,
+ .subbuffer_header_size = client_packet_header_size,
+ .buffer_begin = client_buffer_begin,
+ .buffer_end = client_buffer_end,
+ .buffer_create = client_buffer_create,
+ .buffer_finalize = client_buffer_finalize,
+ .content_size_field = client_content_size_field,
+ .packet_size_field = client_packet_size_field,
+ },
+ .timestamp_begin = client_timestamp_begin,
+ .timestamp_end = client_timestamp_end,
+ .events_discarded = client_events_discarded,
+ .content_size = client_content_size,
+ .packet_size = client_packet_size,
+ .stream_id = client_stream_id,
+ .current_timestamp = client_current_timestamp,
+ .sequence_number = client_sequence_number,
+ .instance_id = client_instance_id,
+};
+
+static const struct lttng_ust_lib_ring_buffer_config client_config = {
+ .cb.ring_buffer_clock_read = client_ring_buffer_clock_read,
+ .cb.record_header_size = client_record_header_size,
+ .cb.subbuffer_header_size = client_packet_header_size,
+ .cb.buffer_begin = client_buffer_begin,
+ .cb.buffer_end = client_buffer_end,
+ .cb.buffer_create = client_buffer_create,
+ .cb.buffer_finalize = client_buffer_finalize,
+ .cb.content_size_field = client_content_size_field,
+ .cb.packet_size_field = client_packet_size_field,
+
+ .tsc_bits = LTTNG_COMPACT_TSC_BITS,
+ .alloc = RING_BUFFER_ALLOC_PER_CPU,
+ .sync = RING_BUFFER_SYNC_GLOBAL,
+ .mode = RING_BUFFER_MODE_TEMPLATE,
+ .backend = RING_BUFFER_PAGE,
+ .output = RING_BUFFER_MMAP,
+ .oops = RING_BUFFER_OOPS_CONSISTENCY,
+ .ipi = RING_BUFFER_NO_IPI_BARRIER,
+ .wakeup = LTTNG_CLIENT_WAKEUP,
+ .client_type = LTTNG_CLIENT_TYPE,
+
+ .cb_ptr = &client_cb.parent,
+};
+
+static
+struct lttng_ust_channel_buffer *_channel_create(const char *name,
+ void *buf_addr,
+ size_t subbuf_size, size_t num_subbuf,
+ unsigned int switch_timer_interval,
+ unsigned int read_timer_interval,
+ unsigned char *uuid,
+ uint32_t chan_id,
+ const int *stream_fds, int nr_stream_fds,
+ int64_t blocking_timeout)
+{
+ struct lttng_ust_abi_channel_config chan_priv_init;
+ struct lttng_ust_shm_handle *handle;
+ struct lttng_ust_channel_buffer *lttng_chan_buf;
+
+ lttng_chan_buf = lttng_ust_alloc_channel_buffer();
+ if (!lttng_chan_buf)
+ return NULL;
+ memcpy(lttng_chan_buf->priv->uuid, uuid, LTTNG_UST_UUID_LEN);
+ lttng_chan_buf->priv->id = chan_id;
+
+ memset(&chan_priv_init, 0, sizeof(chan_priv_init));
+ memcpy(chan_priv_init.uuid, uuid, LTTNG_UST_UUID_LEN);
+ chan_priv_init.id = chan_id;
+
+ handle = channel_create(&client_config, name,
+ __alignof__(struct lttng_ust_abi_channel_config),
+ sizeof(struct lttng_ust_abi_channel_config),
+ &chan_priv_init,
+ lttng_chan_buf, buf_addr, subbuf_size, num_subbuf,
+ switch_timer_interval, read_timer_interval,
+ stream_fds, nr_stream_fds, blocking_timeout);
+ if (!handle)
+ goto error;
+ lttng_chan_buf->priv->rb_chan = shmp(handle, handle->chan);
+ return lttng_chan_buf;
+
+error:
+ lttng_ust_free_channel_common(lttng_chan_buf->parent);
+ return NULL;
+}
+
+static
+void lttng_channel_destroy(struct lttng_ust_channel_buffer *lttng_chan_buf)
+{
+ channel_destroy(lttng_chan_buf->priv->rb_chan, lttng_chan_buf->priv->rb_chan->handle, 1);
+ lttng_ust_free_channel_common(lttng_chan_buf->parent);
+}
+
+static
+int lttng_event_reserve(struct lttng_ust_lib_ring_buffer_ctx *ctx)
+{
+ struct lttng_ust_event_recorder *event_recorder = ctx->client_priv;
+ struct lttng_ust_channel_buffer *lttng_chan = event_recorder->chan;
+ struct lttng_client_ctx client_ctx;
+ int ret, nesting;
+ struct lttng_ust_lib_ring_buffer_ctx_private *private_ctx;
+ uint32_t event_id;
+
+ event_id = event_recorder->priv->id;
+ client_ctx.chan_ctx = lttng_ust_rcu_dereference(lttng_chan->priv->ctx);
+ client_ctx.event_ctx = lttng_ust_rcu_dereference(event_recorder->priv->ctx);
+ /* Compute internal size of context structures. */
+ ctx_get_struct_size(client_ctx.chan_ctx, &client_ctx.packet_context_len);
+ ctx_get_struct_size(client_ctx.event_ctx, &client_ctx.event_context_len);
+
+ nesting = lib_ring_buffer_nesting_inc(&client_config);
+ if (nesting < 0)
+ return -EPERM;
+
+ private_ctx = &URCU_TLS(private_ctx_stack)[nesting];
+ memset(private_ctx, 0, sizeof(*private_ctx));
+ private_ctx->pub = ctx;
+ private_ctx->chan = lttng_chan->priv->rb_chan;
+
+ ctx->priv = private_ctx;
+
+ switch (lttng_chan->priv->header_type) {
+ case 1: /* compact */
+ if (event_id > 30)
+ private_ctx->rflags |= LTTNG_RFLAG_EXTENDED;
+ break;
+ case 2: /* large */
+ if (event_id > 65534)
+ private_ctx->rflags |= LTTNG_RFLAG_EXTENDED;
+ break;
+ default:
+ WARN_ON_ONCE(1);
+ }
+
+ ret = lib_ring_buffer_reserve(&client_config, ctx, &client_ctx);
+ if (caa_unlikely(ret))
+ goto put;
+ if (lib_ring_buffer_backend_get_pages(&client_config, ctx,
+ &private_ctx->backend_pages)) {
+ ret = -EPERM;
+ goto put;
+ }
+ lttng_write_event_header(&client_config, ctx, &client_ctx, event_id);
+ return 0;
+put:
+ lib_ring_buffer_nesting_dec(&client_config);
+ return ret;
+}
+
+static
+void lttng_event_commit(struct lttng_ust_lib_ring_buffer_ctx *ctx)
+{
+ lib_ring_buffer_commit(&client_config, ctx);
+ lib_ring_buffer_nesting_dec(&client_config);
+}
+
+static
+void lttng_event_write(struct lttng_ust_lib_ring_buffer_ctx *ctx,
+ const void *src, size_t len, size_t alignment)
+{
+ lttng_ust_lib_ring_buffer_align_ctx(ctx, alignment);
+ lib_ring_buffer_write(&client_config, ctx, src, len);
+}
+
+static
+void lttng_event_strcpy(struct lttng_ust_lib_ring_buffer_ctx *ctx,
+ const char *src, size_t len)
+{
+ lib_ring_buffer_strcpy(&client_config, ctx, src, len, '#');
+}
+
+static
+void lttng_event_pstrcpy_pad(struct lttng_ust_lib_ring_buffer_ctx *ctx,
+ const char *src, size_t len)
+{
+ lib_ring_buffer_pstrcpy(&client_config, ctx, src, len, '\0');
+}
+
+static
+int lttng_is_finalized(struct lttng_ust_channel_buffer *chan)
+{
+ struct lttng_ust_lib_ring_buffer_channel *rb_chan = chan->priv->rb_chan;
+
+ return lib_ring_buffer_channel_is_finalized(rb_chan);
+}
+
+static
+int lttng_is_disabled(struct lttng_ust_channel_buffer *chan)
+{
+ struct lttng_ust_lib_ring_buffer_channel *rb_chan = chan->priv->rb_chan;
+
+ return lib_ring_buffer_channel_is_disabled(rb_chan);
+}
+
+static
+int lttng_flush_buffer(struct lttng_ust_channel_buffer *chan)
+{
+ struct lttng_ust_lib_ring_buffer_channel *rb_chan = chan->priv->rb_chan;
+ struct lttng_ust_lib_ring_buffer *buf;
+ int cpu;
+
+ for_each_channel_cpu(cpu, rb_chan) {
+ int shm_fd, wait_fd, wakeup_fd;
+ uint64_t memory_map_size;
+
+ buf = channel_get_ring_buffer(&client_config, rb_chan,
+ cpu, rb_chan->handle, &shm_fd, &wait_fd,
+ &wakeup_fd, &memory_map_size);
+ lib_ring_buffer_switch(&client_config, buf,
+ SWITCH_ACTIVE, rb_chan->handle);
+ }
+ return 0;
+}
+
+static struct lttng_transport lttng_relay_transport = {
+ .name = "relay-" RING_BUFFER_MODE_TEMPLATE_STRING "-mmap",
+ .ops = {
+ .struct_size = sizeof(struct lttng_ust_channel_buffer_ops),
+ .priv = __LTTNG_COMPOUND_LITERAL(struct lttng_ust_channel_buffer_ops_private, {
+ .pub = <tng_relay_transport.ops,
+ .channel_create = _channel_create,
+ .channel_destroy = lttng_channel_destroy,
+ .packet_avail_size = NULL, /* Would be racy anyway */
+ .is_finalized = lttng_is_finalized,
+ .is_disabled = lttng_is_disabled,
+ .flush_buffer = lttng_flush_buffer,
+ }),
+ .event_reserve = lttng_event_reserve,
+ .event_commit = lttng_event_commit,
+ .event_write = lttng_event_write,
+ .event_strcpy = lttng_event_strcpy,
+ .event_pstrcpy_pad = lttng_event_pstrcpy_pad,
+ },
+ .client_config = &client_config,
+};
+
+void RING_BUFFER_MODE_TEMPLATE_INIT(void)
+{
+ DBG("LTT : ltt ring buffer client \"%s\" init\n",
+ "relay-" RING_BUFFER_MODE_TEMPLATE_STRING "-mmap");
+ lttng_transport_register(<tng_relay_transport);
+}
+
+void RING_BUFFER_MODE_TEMPLATE_EXIT(void)
+{
+ DBG("LTT : ltt ring buffer client \"%s\" exit\n",
+ "relay-" RING_BUFFER_MODE_TEMPLATE_STRING "-mmap");
+ lttng_transport_unregister(<tng_relay_transport);
+}
--- /dev/null
+/*
+ * SPDX-License-Identifier: LGPL-2.1-only
+ *
+ * Copyright (C) 2010-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ *
+ * LTTng lib ring buffer client template.
+ */
+
+#include <limits.h>
+#include <stddef.h>
+#include <stdint.h>
+
+#include <ust-events-internal.h>
+#include "common/bitfield.h"
+#include "common/align.h"
+#include "lttng-tracer.h"
+#include "common/ringbuffer/frontend_types.h"
+#include <urcu/tls-compat.h>
+
+struct metadata_packet_header {
+ uint32_t magic; /* 0x75D11D57 */
+ uint8_t uuid[LTTNG_UST_UUID_LEN]; /* Unique Universal Identifier */
+ uint32_t checksum; /* 0 if unused */
+ uint32_t content_size; /* in bits */
+ uint32_t packet_size; /* in bits */
+ uint8_t compression_scheme; /* 0 if unused */
+ uint8_t encryption_scheme; /* 0 if unused */
+ uint8_t checksum_scheme; /* 0 if unused */
+ uint8_t major; /* CTF spec major version number */
+ uint8_t minor; /* CTF spec minor version number */
+ uint8_t header_end[0];
+};
+
+struct metadata_record_header {
+ uint8_t header_end[0]; /* End of header */
+};
+
+static const struct lttng_ust_lib_ring_buffer_config client_config;
+
+/* No nested use supported for metadata ring buffer. */
+static DEFINE_URCU_TLS(struct lttng_ust_lib_ring_buffer_ctx_private, private_ctx);
+
+static inline uint64_t lib_ring_buffer_clock_read(
+ struct lttng_ust_lib_ring_buffer_channel *chan __attribute__((unused)))
+{
+ return 0;
+}
+
+static inline
+size_t record_header_size(
+ const struct lttng_ust_lib_ring_buffer_config *config __attribute__((unused)),
+ struct lttng_ust_lib_ring_buffer_channel *chan __attribute__((unused)),
+ size_t offset __attribute__((unused)),
+ size_t *pre_header_padding __attribute__((unused)),
+ struct lttng_ust_lib_ring_buffer_ctx *ctx __attribute__((unused)),
+ void *client_ctx __attribute__((unused)))
+{
+ return 0;
+}
+
+#include "common/ringbuffer/api.h"
+#include "lttng-rb-clients.h"
+
+static uint64_t client_ring_buffer_clock_read(
+ struct lttng_ust_lib_ring_buffer_channel *chan __attribute__((unused)))
+{
+ return 0;
+}
+
+static
+size_t client_record_header_size(
+ const struct lttng_ust_lib_ring_buffer_config *config __attribute__((unused)),
+ struct lttng_ust_lib_ring_buffer_channel *chan __attribute__((unused)),
+ size_t offset __attribute__((unused)),
+ size_t *pre_header_padding __attribute__((unused)),
+ struct lttng_ust_lib_ring_buffer_ctx *ctx __attribute__((unused)),
+ void *client_ctx __attribute__((unused)))
+{
+ return 0;
+}
+
+/**
+ * client_packet_header_size - called on buffer-switch to a new sub-buffer
+ *
+ * Return header size without padding after the structure. Don't use packed
+ * structure because gcc generates inefficient code on some architectures
+ * (powerpc, mips..)
+ */
+static size_t client_packet_header_size(void)
+{
+ return offsetof(struct metadata_packet_header, header_end);
+}
+
+static void client_buffer_begin(struct lttng_ust_lib_ring_buffer *buf,
+ uint64_t tsc __attribute__((unused)),
+ unsigned int subbuf_idx,
+ struct lttng_ust_shm_handle *handle)
+{
+ struct lttng_ust_lib_ring_buffer_channel *chan = shmp(handle, buf->backend.chan);
+ struct metadata_packet_header *header =
+ (struct metadata_packet_header *)
+ lib_ring_buffer_offset_address(&buf->backend,
+ subbuf_idx * chan->backend.subbuf_size,
+ handle);
+ struct lttng_ust_channel_buffer *lttng_chan = channel_get_private(chan);
+
+ assert(header);
+ if (!header)
+ return;
+ header->magic = TSDL_MAGIC_NUMBER;
+ memcpy(header->uuid, lttng_chan->priv->uuid, sizeof(lttng_chan->priv->uuid));
+ header->checksum = 0; /* 0 if unused */
+ header->content_size = 0xFFFFFFFF; /* in bits, for debugging */
+ header->packet_size = 0xFFFFFFFF; /* in bits, for debugging */
+ header->compression_scheme = 0; /* 0 if unused */
+ header->encryption_scheme = 0; /* 0 if unused */
+ header->checksum_scheme = 0; /* 0 if unused */
+ header->major = CTF_SPEC_MAJOR;
+ header->minor = CTF_SPEC_MINOR;
+}
+
+/*
+ * offset is assumed to never be 0 here : never deliver a completely empty
+ * subbuffer. data_size is between 1 and subbuf_size.
+ */
+static void client_buffer_end(struct lttng_ust_lib_ring_buffer *buf,
+ uint64_t tsc __attribute__((unused)),
+ unsigned int subbuf_idx, unsigned long data_size,
+ struct lttng_ust_shm_handle *handle)
+{
+ struct lttng_ust_lib_ring_buffer_channel *chan = shmp(handle, buf->backend.chan);
+ struct metadata_packet_header *header =
+ (struct metadata_packet_header *)
+ lib_ring_buffer_offset_address(&buf->backend,
+ subbuf_idx * chan->backend.subbuf_size,
+ handle);
+ unsigned long records_lost = 0;
+
+ assert(header);
+ if (!header)
+ return;
+ header->content_size = data_size * CHAR_BIT; /* in bits */
+ header->packet_size = LTTNG_UST_PAGE_ALIGN(data_size) * CHAR_BIT; /* in bits */
+ /*
+ * We do not care about the records lost count, because the metadata
+ * channel waits and retry.
+ */
+ (void) lib_ring_buffer_get_records_lost_full(&client_config, buf);
+ records_lost += lib_ring_buffer_get_records_lost_wrap(&client_config, buf);
+ records_lost += lib_ring_buffer_get_records_lost_big(&client_config, buf);
+ WARN_ON_ONCE(records_lost != 0);
+}
+
+static int client_buffer_create(
+ struct lttng_ust_lib_ring_buffer *buf __attribute__((unused)),
+ void *priv __attribute__((unused)),
+ int cpu __attribute__((unused)),
+ const char *name __attribute__((unused)),
+ struct lttng_ust_shm_handle *handle __attribute__((unused)))
+{
+ return 0;
+}
+
+static void client_buffer_finalize(
+ struct lttng_ust_lib_ring_buffer *buf __attribute__((unused)),
+ void *priv __attribute__((unused)),
+ int cpu __attribute__((unused)),
+ struct lttng_ust_shm_handle *handle __attribute__((unused)))
+{
+}
+
+static const
+struct lttng_ust_client_lib_ring_buffer_client_cb client_cb = {
+ .parent = {
+ .ring_buffer_clock_read = client_ring_buffer_clock_read,
+ .record_header_size = client_record_header_size,
+ .subbuffer_header_size = client_packet_header_size,
+ .buffer_begin = client_buffer_begin,
+ .buffer_end = client_buffer_end,
+ .buffer_create = client_buffer_create,
+ .buffer_finalize = client_buffer_finalize,
+ },
+};
+
+static const struct lttng_ust_lib_ring_buffer_config client_config = {
+ .cb.ring_buffer_clock_read = client_ring_buffer_clock_read,
+ .cb.record_header_size = client_record_header_size,
+ .cb.subbuffer_header_size = client_packet_header_size,
+ .cb.buffer_begin = client_buffer_begin,
+ .cb.buffer_end = client_buffer_end,
+ .cb.buffer_create = client_buffer_create,
+ .cb.buffer_finalize = client_buffer_finalize,
+
+ .tsc_bits = 0,
+ .alloc = RING_BUFFER_ALLOC_GLOBAL,
+ .sync = RING_BUFFER_SYNC_GLOBAL,
+ .mode = RING_BUFFER_MODE_TEMPLATE,
+ .backend = RING_BUFFER_PAGE,
+ .output = RING_BUFFER_MMAP,
+ .oops = RING_BUFFER_OOPS_CONSISTENCY,
+ .ipi = RING_BUFFER_NO_IPI_BARRIER,
+ .wakeup = RING_BUFFER_WAKEUP_BY_WRITER,
+ .client_type = LTTNG_CLIENT_TYPE,
+
+ .cb_ptr = &client_cb.parent,
+};
+
+static
+struct lttng_ust_channel_buffer *_channel_create(const char *name,
+ void *buf_addr,
+ size_t subbuf_size, size_t num_subbuf,
+ unsigned int switch_timer_interval,
+ unsigned int read_timer_interval,
+ unsigned char *uuid,
+ uint32_t chan_id,
+ const int *stream_fds, int nr_stream_fds,
+ int64_t blocking_timeout)
+{
+ struct lttng_ust_abi_channel_config chan_priv_init;
+ struct lttng_ust_shm_handle *handle;
+ struct lttng_ust_channel_buffer *lttng_chan_buf;
+
+ lttng_chan_buf = lttng_ust_alloc_channel_buffer();
+ if (!lttng_chan_buf)
+ return NULL;
+ memcpy(lttng_chan_buf->priv->uuid, uuid, LTTNG_UST_UUID_LEN);
+ lttng_chan_buf->priv->id = chan_id;
+
+ memset(&chan_priv_init, 0, sizeof(chan_priv_init));
+ memcpy(chan_priv_init.uuid, uuid, LTTNG_UST_UUID_LEN);
+ chan_priv_init.id = chan_id;
+
+ handle = channel_create(&client_config, name,
+ __alignof__(struct lttng_ust_channel_buffer),
+ sizeof(struct lttng_ust_channel_buffer),
+ &chan_priv_init,
+ lttng_chan_buf, buf_addr, subbuf_size, num_subbuf,
+ switch_timer_interval, read_timer_interval,
+ stream_fds, nr_stream_fds, blocking_timeout);
+ if (!handle)
+ goto error;
+ lttng_chan_buf->priv->rb_chan = shmp(handle, handle->chan);
+ return lttng_chan_buf;
+
+error:
+ lttng_ust_free_channel_common(lttng_chan_buf->parent);
+ return NULL;
+}
+
+static
+void lttng_channel_destroy(struct lttng_ust_channel_buffer *lttng_chan_buf)
+{
+ channel_destroy(lttng_chan_buf->priv->rb_chan, lttng_chan_buf->priv->rb_chan->handle, 1);
+ lttng_ust_free_channel_common(lttng_chan_buf->parent);
+}
+
+static
+int lttng_event_reserve(struct lttng_ust_lib_ring_buffer_ctx *ctx)
+{
+ int ret;
+
+ memset(&URCU_TLS(private_ctx), 0, sizeof(struct lttng_ust_lib_ring_buffer_ctx_private));
+ URCU_TLS(private_ctx).pub = ctx;
+ URCU_TLS(private_ctx).chan = ctx->client_priv;
+ ctx->priv = &URCU_TLS(private_ctx);
+ ret = lib_ring_buffer_reserve(&client_config, ctx, NULL);
+ if (ret)
+ return ret;
+ if (lib_ring_buffer_backend_get_pages(&client_config, ctx,
+ &ctx->priv->backend_pages))
+ return -EPERM;
+ return 0;
+}
+
+static
+void lttng_event_commit(struct lttng_ust_lib_ring_buffer_ctx *ctx)
+{
+ lib_ring_buffer_commit(&client_config, ctx);
+}
+
+static
+void lttng_event_write(struct lttng_ust_lib_ring_buffer_ctx *ctx,
+ const void *src, size_t len, size_t alignment)
+{
+ lttng_ust_lib_ring_buffer_align_ctx(ctx, alignment);
+ lib_ring_buffer_write(&client_config, ctx, src, len);
+}
+
+static
+size_t lttng_packet_avail_size(struct lttng_ust_channel_buffer *chan)
+{
+ struct lttng_ust_lib_ring_buffer_channel *rb_chan = chan->priv->rb_chan;
+ unsigned long o_begin;
+ struct lttng_ust_lib_ring_buffer *buf;
+
+ buf = shmp(rb_chan->handle, rb_chan->backend.buf[0].shmp); /* Only for global buffer ! */
+ o_begin = v_read(&client_config, &buf->offset);
+ if (subbuf_offset(o_begin, rb_chan) != 0) {
+ return rb_chan->backend.subbuf_size - subbuf_offset(o_begin, rb_chan);
+ } else {
+ return rb_chan->backend.subbuf_size - subbuf_offset(o_begin, rb_chan)
+ - sizeof(struct metadata_packet_header);
+ }
+}
+
+static
+int lttng_is_finalized(struct lttng_ust_channel_buffer *chan)
+{
+ struct lttng_ust_lib_ring_buffer_channel *rb_chan = chan->priv->rb_chan;
+
+ return lib_ring_buffer_channel_is_finalized(rb_chan);
+}
+
+static
+int lttng_is_disabled(struct lttng_ust_channel_buffer *chan)
+{
+ struct lttng_ust_lib_ring_buffer_channel *rb_chan = chan->priv->rb_chan;
+
+ return lib_ring_buffer_channel_is_disabled(rb_chan);
+}
+
+static
+int lttng_flush_buffer(struct lttng_ust_channel_buffer *chan)
+{
+ struct lttng_ust_lib_ring_buffer_channel *rb_chan = chan->priv->rb_chan;
+ struct lttng_ust_lib_ring_buffer *buf;
+ int shm_fd, wait_fd, wakeup_fd;
+ uint64_t memory_map_size;
+
+ buf = channel_get_ring_buffer(&client_config, rb_chan,
+ 0, rb_chan->handle, &shm_fd, &wait_fd, &wakeup_fd,
+ &memory_map_size);
+ lib_ring_buffer_switch(&client_config, buf,
+ SWITCH_ACTIVE, rb_chan->handle);
+ return 0;
+}
+
+static struct lttng_transport lttng_relay_transport = {
+ .name = "relay-" RING_BUFFER_MODE_TEMPLATE_STRING "-mmap",
+ .ops = {
+ .struct_size = sizeof(struct lttng_ust_channel_buffer_ops),
+
+ .priv = __LTTNG_COMPOUND_LITERAL(struct lttng_ust_channel_buffer_ops_private, {
+ .pub = <tng_relay_transport.ops,
+ .channel_create = _channel_create,
+ .channel_destroy = lttng_channel_destroy,
+ .packet_avail_size = lttng_packet_avail_size,
+ .is_finalized = lttng_is_finalized,
+ .is_disabled = lttng_is_disabled,
+ .flush_buffer = lttng_flush_buffer,
+ }),
+ .event_reserve = lttng_event_reserve,
+ .event_commit = lttng_event_commit,
+ .event_write = lttng_event_write,
+ },
+ .client_config = &client_config,
+};
+
+void RING_BUFFER_MODE_TEMPLATE_INIT(void)
+{
+ DBG("LTT : ltt ring buffer client \"%s\" init\n",
+ "relay-" RING_BUFFER_MODE_TEMPLATE_STRING "-mmap");
+ lttng_transport_register(<tng_relay_transport);
+}
+
+void RING_BUFFER_MODE_TEMPLATE_EXIT(void)
+{
+ DBG("LTT : ltt ring buffer client \"%s\" exit\n",
+ "relay-" RING_BUFFER_MODE_TEMPLATE_STRING "-mmap");
+ lttng_transport_unregister(<tng_relay_transport);
+}
--- /dev/null
+/*
+ * SPDX-License-Identifier: LGPL-2.1-only
+ *
+ * Copyright (C) 2010-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ *
+ * LTTng lib ring buffer metadta client.
+ */
+
+#define _LGPL_SOURCE
+#include "lttng-tracer.h"
+
+#define RING_BUFFER_MODE_TEMPLATE RING_BUFFER_DISCARD
+#define RING_BUFFER_MODE_TEMPLATE_STRING "metadata"
+#define RING_BUFFER_MODE_TEMPLATE_INIT \
+ lttng_ring_buffer_metadata_client_init
+#define RING_BUFFER_MODE_TEMPLATE_EXIT \
+ lttng_ring_buffer_metadata_client_exit
+#define LTTNG_CLIENT_TYPE LTTNG_CLIENT_METADATA
+#include "lttng-ring-buffer-metadata-client-template.h"
--- /dev/null
+/*
+ * SPDX-License-Identifier: LGPL-2.1-only
+ *
+ * Copyright (C) 2005-2011 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ *
+ * This contains the core definitions for the Linux Trace Toolkit.
+ */
+
+#ifndef _LTTNG_TRACER_CORE_H
+#define _LTTNG_TRACER_CORE_H
+
+#include <stddef.h>
+#include <urcu/arch.h>
+#include <urcu/list.h>
+#include <lttng/ust-tracer.h>
+#include <lttng/ringbuffer-context.h>
+#include "common/logging.h"
+
+/*
+ * The longuest possible namespace proc path is with the cgroup ns
+ * and the maximum theoretical linux pid of 536870912 :
+ *
+ * /proc/self/task/536870912/ns/cgroup
+ */
+#define LTTNG_PROC_NS_PATH_MAX 40
+
+struct lttng_ust_session;
+struct lttng_ust_channel_buffer;
+struct lttng_ust_ctx_field;
+struct lttng_ust_lib_ring_buffer_ctx;
+struct lttng_ust_ctx_value;
+struct lttng_ust_event_recorder;
+struct lttng_ust_event_notifier;
+struct lttng_ust_notification_ctx;
+
+int ust_lock(void) __attribute__ ((warn_unused_result))
+ __attribute__((visibility("hidden")));
+
+void ust_lock_nocheck(void)
+ __attribute__((visibility("hidden")));
+
+void ust_unlock(void)
+ __attribute__((visibility("hidden")));
+
+void lttng_ust_fixup_tls(void)
+ __attribute__((visibility("hidden")));
+
+void lttng_fixup_event_tls(void)
+ __attribute__((visibility("hidden")));
+
+void lttng_fixup_vtid_tls(void)
+ __attribute__((visibility("hidden")));
+
+void lttng_fixup_procname_tls(void)
+ __attribute__((visibility("hidden")));
+
+void lttng_fixup_cgroup_ns_tls(void)
+ __attribute__((visibility("hidden")));
+
+void lttng_fixup_ipc_ns_tls(void)
+ __attribute__((visibility("hidden")));
+
+void lttng_fixup_net_ns_tls(void)
+ __attribute__((visibility("hidden")));
+
+void lttng_fixup_time_ns_tls(void)
+ __attribute__((visibility("hidden")));
+
+void lttng_fixup_uts_ns_tls(void)
+ __attribute__((visibility("hidden")));
+
+const char *lttng_ust_obj_get_name(int id)
+ __attribute__((visibility("hidden")));
+
+int lttng_get_notify_socket(void *owner)
+ __attribute__((visibility("hidden")));
+
+char* lttng_ust_sockinfo_get_procname(void *owner)
+ __attribute__((visibility("hidden")));
+
+void lttng_ust_sockinfo_session_enabled(void *owner)
+ __attribute__((visibility("hidden")));
+
+ssize_t lttng_ust_read(int fd, void *buf, size_t len)
+ __attribute__((visibility("hidden")));
+
+size_t lttng_ust_dummy_get_size(void *priv, size_t offset)
+ __attribute__((visibility("hidden")));
+
+void lttng_ust_dummy_record(void *priv, struct lttng_ust_lib_ring_buffer_ctx *ctx,
+ struct lttng_ust_channel_buffer *chan)
+ __attribute__((visibility("hidden")));
+
+void lttng_ust_dummy_get_value(void *priv, struct lttng_ust_ctx_value *value)
+ __attribute__((visibility("hidden")));
+
+void lttng_event_notifier_notification_send(
+ struct lttng_ust_event_notifier *event_notifier,
+ const char *stack_data,
+ struct lttng_ust_notification_ctx *notif_ctx)
+ __attribute__((visibility("hidden")));
+
+struct lttng_counter_transport *lttng_counter_transport_find(const char *name)
+ __attribute__((visibility("hidden")));
+
+void lttng_counter_transport_register(struct lttng_counter_transport *transport)
+ __attribute__((visibility("hidden")));
+
+void lttng_counter_transport_unregister(struct lttng_counter_transport *transport)
+ __attribute__((visibility("hidden")));
+
+#ifdef HAVE_LINUX_PERF_EVENT_H
+void lttng_ust_fixup_perf_counter_tls(void)
+ __attribute__((visibility("hidden")));
+
+void lttng_perf_lock(void)
+ __attribute__((visibility("hidden")));
+
+void lttng_perf_unlock(void)
+ __attribute__((visibility("hidden")));
+#else /* #ifdef HAVE_LINUX_PERF_EVENT_H */
+static inline
+void lttng_ust_fixup_perf_counter_tls(void)
+{
+}
+static inline
+void lttng_perf_lock(void)
+{
+}
+static inline
+void lttng_perf_unlock(void)
+{
+}
+#endif /* #else #ifdef HAVE_LINUX_PERF_EVENT_H */
+
+#endif /* _LTTNG_TRACER_CORE_H */
--- /dev/null
+/*
+ * SPDX-License-Identifier: LGPL-2.1-only
+ *
+ * Copyright (C) 2005-2011 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ *
+ * This contains the definitions for the Linux Trace Toolkit tracer.
+ *
+ * Ported to userspace by Pierre-Marc Fournier.
+ */
+
+#ifndef _LTTNG_TRACER_H
+#define _LTTNG_TRACER_H
+
+#include <stdarg.h>
+#include <stdint.h>
+#include <lttng/ust-events.h>
+#include "lttng-tracer-core.h"
+
+/* Tracer properties */
+#define CTF_MAGIC_NUMBER 0xC1FC1FC1
+#define TSDL_MAGIC_NUMBER 0x75D11D57
+
+/* CTF specification version followed */
+#define CTF_SPEC_MAJOR 1
+#define CTF_SPEC_MINOR 8
+
+/*
+ * Number of milliseconds to retry before failing metadata writes on buffer full
+ * condition. (10 seconds)
+ */
+#define LTTNG_METADATA_TIMEOUT_MSEC 10000
+
+#define LTTNG_RFLAG_EXTENDED RING_BUFFER_RFLAG_END
+#define LTTNG_RFLAG_END (LTTNG_RFLAG_EXTENDED << 1)
+
+/*
+ * LTTng client type enumeration. Used by the consumer to map the
+ * callbacks from its own address space.
+ */
+enum lttng_client_types {
+ LTTNG_CLIENT_METADATA = 0,
+ LTTNG_CLIENT_DISCARD = 1,
+ LTTNG_CLIENT_OVERWRITE = 2,
+ LTTNG_CLIENT_DISCARD_RT = 3,
+ LTTNG_CLIENT_OVERWRITE_RT = 4,
+ LTTNG_NR_CLIENT_TYPES,
+};
+
+#endif /* _LTTNG_TRACER_H */
--- /dev/null
+/*
+ * SPDX-License-Identifier: LGPL-2.1-only
+ *
+ * Copyright (C) 2010-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ *
+ * LTTng UST ABI
+ *
+ * Mimic system calls for:
+ * - session creation, returns an object descriptor or failure.
+ * - channel creation, returns an object descriptor or failure.
+ * - Operates on a session object descriptor
+ * - Takes all channel options as parameters.
+ * - stream get, returns an object descriptor or failure.
+ * - Operates on a channel object descriptor.
+ * - stream notifier get, returns an object descriptor or failure.
+ * - Operates on a channel object descriptor.
+ * - event creation, returns an object descriptor or failure.
+ * - Operates on a channel object descriptor
+ * - Takes an event name as parameter
+ * - Takes an instrumentation source as parameter
+ * - e.g. tracepoints, dynamic_probes...
+ * - Takes instrumentation source specific arguments.
+ */
+
+#define _LGPL_SOURCE
+#include <fcntl.h>
+#include <stdint.h>
+#include <unistd.h>
+
+#include <urcu/compiler.h>
+#include <urcu/list.h>
+
+#include <lttng/tracepoint.h>
+#include <lttng/ust-abi.h>
+#include <lttng/ust-error.h>
+#include <lttng/ust-events.h>
+#include <lttng/ust-version.h>
+
+#include "common/ust-fd.h"
+#include "common/logging.h"
+
+#include "common/ringbuffer/frontend_types.h"
+#include "common/ringbuffer/frontend.h"
+#include "common/ringbuffer/shm.h"
+#include "common/counter/counter.h"
+#include "tracepoint-internal.h"
+#include "lttng-tracer.h"
+#include "string-utils.h"
+#include "ust-events-internal.h"
+#include "context-internal.h"
+#include "common/macros.h"
+
+#define OBJ_NAME_LEN 16
+
+static int lttng_ust_abi_close_in_progress;
+
+static
+int lttng_abi_tracepoint_list(void *owner);
+static
+int lttng_abi_tracepoint_field_list(void *owner);
+
+/*
+ * Object descriptor table. Should be protected from concurrent access
+ * by the caller.
+ */
+
+struct lttng_ust_abi_obj {
+ union {
+ struct {
+ void *private_data;
+ const struct lttng_ust_abi_objd_ops *ops;
+ int f_count;
+ int owner_ref; /* has ref from owner */
+ void *owner;
+ char name[OBJ_NAME_LEN];
+ } s;
+ int freelist_next; /* offset freelist. end is -1. */
+ } u;
+};
+
+struct lttng_ust_abi_objd_table {
+ struct lttng_ust_abi_obj *array;
+ unsigned int len, allocated_len;
+ int freelist_head; /* offset freelist head. end is -1 */
+};
+
+static struct lttng_ust_abi_objd_table objd_table = {
+ .freelist_head = -1,
+};
+
+static
+int objd_alloc(void *private_data, const struct lttng_ust_abi_objd_ops *ops,
+ void *owner, const char *name)
+{
+ struct lttng_ust_abi_obj *obj;
+
+ if (objd_table.freelist_head != -1) {
+ obj = &objd_table.array[objd_table.freelist_head];
+ objd_table.freelist_head = obj->u.freelist_next;
+ goto end;
+ }
+
+ if (objd_table.len >= objd_table.allocated_len) {
+ unsigned int new_allocated_len, old_allocated_len;
+ struct lttng_ust_abi_obj *new_table, *old_table;
+
+ old_allocated_len = objd_table.allocated_len;
+ old_table = objd_table.array;
+ if (!old_allocated_len)
+ new_allocated_len = 1;
+ else
+ new_allocated_len = old_allocated_len << 1;
+ new_table = zmalloc(sizeof(struct lttng_ust_abi_obj) * new_allocated_len);
+ if (!new_table)
+ return -ENOMEM;
+ memcpy(new_table, old_table,
+ sizeof(struct lttng_ust_abi_obj) * old_allocated_len);
+ free(old_table);
+ objd_table.array = new_table;
+ objd_table.allocated_len = new_allocated_len;
+ }
+ obj = &objd_table.array[objd_table.len];
+ objd_table.len++;
+end:
+ obj->u.s.private_data = private_data;
+ obj->u.s.ops = ops;
+ obj->u.s.f_count = 2; /* count == 1 : object is allocated */
+ /* count == 2 : allocated + hold ref */
+ obj->u.s.owner_ref = 1; /* One owner reference */
+ obj->u.s.owner = owner;
+ strncpy(obj->u.s.name, name, OBJ_NAME_LEN);
+ obj->u.s.name[OBJ_NAME_LEN - 1] = '\0';
+ return obj - objd_table.array;
+}
+
+static
+struct lttng_ust_abi_obj *_objd_get(int id)
+{
+ if (id >= objd_table.len)
+ return NULL;
+ if (!objd_table.array[id].u.s.f_count)
+ return NULL;
+ return &objd_table.array[id];
+}
+
+static
+void *objd_private(int id)
+{
+ struct lttng_ust_abi_obj *obj = _objd_get(id);
+ assert(obj);
+ return obj->u.s.private_data;
+}
+
+static
+void objd_set_private(int id, void *private_data)
+{
+ struct lttng_ust_abi_obj *obj = _objd_get(id);
+ assert(obj);
+ obj->u.s.private_data = private_data;
+}
+
+const struct lttng_ust_abi_objd_ops *lttng_ust_abi_objd_ops(int id)
+{
+ struct lttng_ust_abi_obj *obj = _objd_get(id);
+
+ if (!obj)
+ return NULL;
+ return obj->u.s.ops;
+}
+
+static
+void objd_free(int id)
+{
+ struct lttng_ust_abi_obj *obj = _objd_get(id);
+
+ assert(obj);
+ obj->u.freelist_next = objd_table.freelist_head;
+ objd_table.freelist_head = obj - objd_table.array;
+ assert(obj->u.s.f_count == 1);
+ obj->u.s.f_count = 0; /* deallocated */
+}
+
+static
+void objd_ref(int id)
+{
+ struct lttng_ust_abi_obj *obj = _objd_get(id);
+ assert(obj != NULL);
+ obj->u.s.f_count++;
+}
+
+int lttng_ust_abi_objd_unref(int id, int is_owner)
+{
+ struct lttng_ust_abi_obj *obj = _objd_get(id);
+
+ if (!obj)
+ return -EINVAL;
+ if (obj->u.s.f_count == 1) {
+ ERR("Reference counting error\n");
+ return -EINVAL;
+ }
+ if (is_owner) {
+ if (!obj->u.s.owner_ref) {
+ ERR("Error decrementing owner reference");
+ return -EINVAL;
+ }
+ obj->u.s.owner_ref--;
+ }
+ if ((--obj->u.s.f_count) == 1) {
+ const struct lttng_ust_abi_objd_ops *ops = lttng_ust_abi_objd_ops(id);
+
+ if (ops->release)
+ ops->release(id);
+ objd_free(id);
+ }
+ return 0;
+}
+
+static
+void objd_table_destroy(void)
+{
+ int i;
+
+ for (i = 0; i < objd_table.allocated_len; i++) {
+ struct lttng_ust_abi_obj *obj;
+
+ obj = _objd_get(i);
+ if (!obj)
+ continue;
+ if (!obj->u.s.owner_ref)
+ continue; /* only unref owner ref. */
+ (void) lttng_ust_abi_objd_unref(i, 1);
+ }
+ free(objd_table.array);
+ objd_table.array = NULL;
+ objd_table.len = 0;
+ objd_table.allocated_len = 0;
+ objd_table.freelist_head = -1;
+}
+
+const char *lttng_ust_obj_get_name(int id)
+{
+ struct lttng_ust_abi_obj *obj = _objd_get(id);
+
+ if (!obj)
+ return NULL;
+ return obj->u.s.name;
+}
+
+void lttng_ust_abi_objd_table_owner_cleanup(void *owner)
+{
+ int i;
+
+ for (i = 0; i < objd_table.allocated_len; i++) {
+ struct lttng_ust_abi_obj *obj;
+
+ obj = _objd_get(i);
+ if (!obj)
+ continue;
+ if (!obj->u.s.owner)
+ continue; /* skip root handles */
+ if (!obj->u.s.owner_ref)
+ continue; /* only unref owner ref. */
+ if (obj->u.s.owner == owner)
+ (void) lttng_ust_abi_objd_unref(i, 1);
+ }
+}
+
+/*
+ * This is LTTng's own personal way to create an ABI for sessiond.
+ * We send commands over a socket.
+ */
+
+static const struct lttng_ust_abi_objd_ops lttng_ops;
+static const struct lttng_ust_abi_objd_ops lttng_event_notifier_group_ops;
+static const struct lttng_ust_abi_objd_ops lttng_session_ops;
+static const struct lttng_ust_abi_objd_ops lttng_channel_ops;
+static const struct lttng_ust_abi_objd_ops lttng_event_enabler_ops;
+static const struct lttng_ust_abi_objd_ops lttng_event_notifier_enabler_ops;
+static const struct lttng_ust_abi_objd_ops lttng_tracepoint_list_ops;
+static const struct lttng_ust_abi_objd_ops lttng_tracepoint_field_list_ops;
+
+int lttng_abi_create_root_handle(void)
+{
+ int root_handle;
+
+ /* root handles have NULL owners */
+ root_handle = objd_alloc(NULL, <tng_ops, NULL, "root");
+ return root_handle;
+}
+
+static
+int lttng_is_channel_ready(struct lttng_ust_channel_buffer *lttng_chan)
+{
+ struct lttng_ust_lib_ring_buffer_channel *chan;
+ unsigned int nr_streams, exp_streams;
+
+ chan = lttng_chan->priv->rb_chan;
+ nr_streams = channel_handle_get_nr_streams(lttng_chan->priv->rb_chan->handle);
+ exp_streams = chan->nr_streams;
+ return nr_streams == exp_streams;
+}
+
+static
+int lttng_abi_create_session(void *owner)
+{
+ struct lttng_ust_session *session;
+ int session_objd, ret;
+
+ session = lttng_session_create();
+ if (!session)
+ return -ENOMEM;
+ session_objd = objd_alloc(session, <tng_session_ops, owner, "session");
+ if (session_objd < 0) {
+ ret = session_objd;
+ goto objd_error;
+ }
+ session->priv->objd = session_objd;
+ session->priv->owner = owner;
+ return session_objd;
+
+objd_error:
+ lttng_session_destroy(session);
+ return ret;
+}
+
+static
+long lttng_abi_tracer_version(int objd __attribute__((unused)),
+ struct lttng_ust_abi_tracer_version *v)
+{
+ v->major = LTTNG_UST_MAJOR_VERSION;
+ v->minor = LTTNG_UST_MINOR_VERSION;
+ v->patchlevel = LTTNG_UST_PATCHLEVEL_VERSION;
+ return 0;
+}
+
+static
+int lttng_abi_event_notifier_send_fd(void *owner, int *event_notifier_notif_fd)
+{
+ struct lttng_event_notifier_group *event_notifier_group;
+ int event_notifier_group_objd, ret, fd_flag;
+
+ event_notifier_group = lttng_event_notifier_group_create();
+ if (!event_notifier_group)
+ return -ENOMEM;
+
+ /*
+ * Set this file descriptor as NON-BLOCKING.
+ */
+ fd_flag = fcntl(*event_notifier_notif_fd, F_GETFL);
+
+ fd_flag |= O_NONBLOCK;
+
+ ret = fcntl(*event_notifier_notif_fd, F_SETFL, fd_flag);
+ if (ret) {
+ ret = -errno;
+ goto fd_error;
+ }
+
+ event_notifier_group_objd = objd_alloc(event_notifier_group,
+ <tng_event_notifier_group_ops, owner, "event_notifier_group");
+ if (event_notifier_group_objd < 0) {
+ ret = event_notifier_group_objd;
+ goto objd_error;
+ }
+
+ event_notifier_group->objd = event_notifier_group_objd;
+ event_notifier_group->owner = owner;
+ event_notifier_group->notification_fd = *event_notifier_notif_fd;
+ /* Object descriptor takes ownership of notification fd. */
+ *event_notifier_notif_fd = -1;
+
+ return event_notifier_group_objd;
+
+objd_error:
+ lttng_event_notifier_group_destroy(event_notifier_group);
+fd_error:
+ return ret;
+}
+
+static
+long lttng_abi_add_context(int objd __attribute__((unused)),
+ struct lttng_ust_abi_context *context_param,
+ union lttng_ust_abi_args *uargs,
+ struct lttng_ust_ctx **ctx, struct lttng_ust_session *session)
+{
+ return lttng_attach_context(context_param, uargs, ctx, session);
+}
+
+/**
+ * lttng_cmd - lttng control through socket commands
+ *
+ * @objd: the object descriptor
+ * @cmd: the command
+ * @arg: command arg
+ * @uargs: UST arguments (internal)
+ * @owner: objd owner
+ *
+ * This descriptor implements lttng commands:
+ * LTTNG_UST_ABI_SESSION
+ * Returns a LTTng trace session object descriptor
+ * LTTNG_UST_ABI_TRACER_VERSION
+ * Returns the LTTng kernel tracer version
+ * LTTNG_UST_ABI_TRACEPOINT_LIST
+ * Returns a file descriptor listing available tracepoints
+ * LTTNG_UST_ABI_TRACEPOINT_FIELD_LIST
+ * Returns a file descriptor listing available tracepoint fields
+ * LTTNG_UST_ABI_WAIT_QUIESCENT
+ * Returns after all previously running probes have completed
+ *
+ * The returned session will be deleted when its file descriptor is closed.
+ */
+static
+long lttng_cmd(int objd, unsigned int cmd, unsigned long arg,
+ union lttng_ust_abi_args *uargs, void *owner)
+{
+ switch (cmd) {
+ case LTTNG_UST_ABI_SESSION:
+ return lttng_abi_create_session(owner);
+ case LTTNG_UST_ABI_TRACER_VERSION:
+ return lttng_abi_tracer_version(objd,
+ (struct lttng_ust_abi_tracer_version *) arg);
+ case LTTNG_UST_ABI_TRACEPOINT_LIST:
+ return lttng_abi_tracepoint_list(owner);
+ case LTTNG_UST_ABI_TRACEPOINT_FIELD_LIST:
+ return lttng_abi_tracepoint_field_list(owner);
+ case LTTNG_UST_ABI_WAIT_QUIESCENT:
+ lttng_ust_urcu_synchronize_rcu();
+ return 0;
+ case LTTNG_UST_ABI_EVENT_NOTIFIER_GROUP_CREATE:
+ return lttng_abi_event_notifier_send_fd(owner,
+ &uargs->event_notifier_handle.event_notifier_notif_fd);
+ default:
+ return -EINVAL;
+ }
+}
+
+static const struct lttng_ust_abi_objd_ops lttng_ops = {
+ .cmd = lttng_cmd,
+};
+
+static
+int lttng_abi_map_channel(int session_objd,
+ struct lttng_ust_abi_channel *ust_chan,
+ union lttng_ust_abi_args *uargs,
+ void *owner)
+{
+ struct lttng_ust_session *session = objd_private(session_objd);
+ const char *transport_name;
+ struct lttng_transport *transport;
+ const char *chan_name;
+ int chan_objd;
+ struct lttng_ust_shm_handle *channel_handle;
+ struct lttng_ust_abi_channel_config *lttng_chan_config;
+ struct lttng_ust_channel_buffer *lttng_chan_buf;
+ struct lttng_ust_lib_ring_buffer_channel *chan;
+ struct lttng_ust_lib_ring_buffer_config *config;
+ void *chan_data;
+ int wakeup_fd;
+ uint64_t len;
+ int ret;
+ enum lttng_ust_abi_chan_type type;
+
+ chan_data = uargs->channel.chan_data;
+ wakeup_fd = uargs->channel.wakeup_fd;
+ len = ust_chan->len;
+ type = ust_chan->type;
+
+ switch (type) {
+ case LTTNG_UST_ABI_CHAN_PER_CPU:
+ break;
+ default:
+ ret = -EINVAL;
+ goto invalid;
+ }
+
+ if (session->priv->been_active) {
+ ret = -EBUSY;
+ goto active; /* Refuse to add channel to active session */
+ }
+
+ lttng_chan_buf = lttng_ust_alloc_channel_buffer();
+ if (!lttng_chan_buf) {
+ ret = -ENOMEM;
+ goto lttng_chan_buf_error;
+ }
+
+ channel_handle = channel_handle_create(chan_data, len, wakeup_fd);
+ if (!channel_handle) {
+ ret = -EINVAL;
+ goto handle_error;
+ }
+
+ /* Ownership of chan_data and wakeup_fd taken by channel handle. */
+ uargs->channel.chan_data = NULL;
+ uargs->channel.wakeup_fd = -1;
+
+ chan = shmp(channel_handle, channel_handle->chan);
+ assert(chan);
+ chan->handle = channel_handle;
+ config = &chan->backend.config;
+ lttng_chan_config = channel_get_private_config(chan);
+ if (!lttng_chan_config) {
+ ret = -EINVAL;
+ goto alloc_error;
+ }
+
+ if (lttng_ust_session_uuid_validate(session, lttng_chan_config->uuid)) {
+ ret = -EINVAL;
+ goto uuid_error;
+ }
+
+ /* Lookup transport name */
+ switch (type) {
+ case LTTNG_UST_ABI_CHAN_PER_CPU:
+ if (config->output == RING_BUFFER_MMAP) {
+ if (config->mode == RING_BUFFER_OVERWRITE) {
+ if (config->wakeup == RING_BUFFER_WAKEUP_BY_WRITER) {
+ transport_name = "relay-overwrite-mmap";
+ } else {
+ transport_name = "relay-overwrite-rt-mmap";
+ }
+ } else {
+ if (config->wakeup == RING_BUFFER_WAKEUP_BY_WRITER) {
+ transport_name = "relay-discard-mmap";
+ } else {
+ transport_name = "relay-discard-rt-mmap";
+ }
+ }
+ } else {
+ ret = -EINVAL;
+ goto notransport;
+ }
+ chan_name = "channel";
+ break;
+ default:
+ ret = -EINVAL;
+ goto notransport;
+ }
+ transport = lttng_ust_transport_find(transport_name);
+ if (!transport) {
+ DBG("LTTng transport %s not found\n",
+ transport_name);
+ ret = -EINVAL;
+ goto notransport;
+ }
+
+ chan_objd = objd_alloc(NULL, <tng_channel_ops, owner, chan_name);
+ if (chan_objd < 0) {
+ ret = chan_objd;
+ goto objd_error;
+ }
+
+ /* Initialize our lttng chan */
+ lttng_chan_buf->parent->enabled = 1;
+ lttng_chan_buf->parent->session = session;
+
+ lttng_chan_buf->priv->parent.tstate = 1;
+ lttng_chan_buf->priv->ctx = NULL;
+ lttng_chan_buf->priv->rb_chan = chan;
+
+ lttng_chan_buf->ops = &transport->ops;
+
+ memcpy(&chan->backend.config,
+ transport->client_config,
+ sizeof(chan->backend.config));
+ cds_list_add(<tng_chan_buf->priv->node, &session->priv->chan_head);
+ lttng_chan_buf->priv->header_type = 0;
+ lttng_chan_buf->priv->type = type;
+ /* Copy fields from lttng ust chan config. */
+ lttng_chan_buf->priv->id = lttng_chan_config->id;
+ memcpy(lttng_chan_buf->priv->uuid, lttng_chan_config->uuid, LTTNG_UST_UUID_LEN);
+ channel_set_private(chan, lttng_chan_buf);
+
+ /*
+ * We tolerate no failure path after channel creation. It will stay
+ * invariant for the rest of the session.
+ */
+ objd_set_private(chan_objd, lttng_chan_buf);
+ lttng_chan_buf->priv->parent.objd = chan_objd;
+ /* The channel created holds a reference on the session */
+ objd_ref(session_objd);
+ return chan_objd;
+
+ /* error path after channel was created */
+objd_error:
+notransport:
+uuid_error:
+alloc_error:
+ channel_destroy(chan, channel_handle, 0);
+ lttng_ust_free_channel_common(lttng_chan_buf->parent);
+ return ret;
+
+handle_error:
+ lttng_ust_free_channel_common(lttng_chan_buf->parent);
+lttng_chan_buf_error:
+active:
+invalid:
+ return ret;
+}
+
+/**
+ * lttng_session_cmd - lttng session object command
+ *
+ * @obj: the object
+ * @cmd: the command
+ * @arg: command arg
+ * @uargs: UST arguments (internal)
+ * @owner: objd owner
+ *
+ * This descriptor implements lttng commands:
+ * LTTNG_UST_ABI_CHANNEL
+ * Returns a LTTng channel object descriptor
+ * LTTNG_UST_ABI_ENABLE
+ * Enables tracing for a session (weak enable)
+ * LTTNG_UST_ABI_DISABLE
+ * Disables tracing for a session (strong disable)
+ *
+ * The returned channel will be deleted when its file descriptor is closed.
+ */
+static
+long lttng_session_cmd(int objd, unsigned int cmd, unsigned long arg,
+ union lttng_ust_abi_args *uargs, void *owner)
+{
+ struct lttng_ust_session *session = objd_private(objd);
+
+ switch (cmd) {
+ case LTTNG_UST_ABI_CHANNEL:
+ return lttng_abi_map_channel(objd,
+ (struct lttng_ust_abi_channel *) arg,
+ uargs, owner);
+ case LTTNG_UST_ABI_SESSION_START:
+ case LTTNG_UST_ABI_ENABLE:
+ return lttng_session_enable(session);
+ case LTTNG_UST_ABI_SESSION_STOP:
+ case LTTNG_UST_ABI_DISABLE:
+ return lttng_session_disable(session);
+ case LTTNG_UST_ABI_SESSION_STATEDUMP:
+ return lttng_session_statedump(session);
+ case LTTNG_UST_ABI_COUNTER:
+ case LTTNG_UST_ABI_COUNTER_GLOBAL:
+ case LTTNG_UST_ABI_COUNTER_CPU:
+ /* Not implemented yet. */
+ return -EINVAL;
+ default:
+ return -EINVAL;
+ }
+}
+
+/*
+ * Called when the last file reference is dropped.
+ *
+ * Big fat note: channels and events are invariant for the whole session after
+ * their creation. So this session destruction also destroys all channel and
+ * event structures specific to this session (they are not destroyed when their
+ * individual file is released).
+ */
+static
+int lttng_release_session(int objd)
+{
+ struct lttng_ust_session *session = objd_private(objd);
+
+ if (session) {
+ lttng_session_destroy(session);
+ return 0;
+ } else {
+ return -EINVAL;
+ }
+}
+
+static const struct lttng_ust_abi_objd_ops lttng_session_ops = {
+ .release = lttng_release_session,
+ .cmd = lttng_session_cmd,
+};
+
+static int lttng_ust_event_notifier_enabler_create(int event_notifier_group_obj,
+ void *owner, struct lttng_ust_abi_event_notifier *event_notifier_param,
+ enum lttng_enabler_format_type type)
+{
+ struct lttng_event_notifier_group *event_notifier_group =
+ objd_private(event_notifier_group_obj);
+ struct lttng_event_notifier_enabler *event_notifier_enabler;
+ int event_notifier_objd, ret;
+
+ event_notifier_param->event.name[LTTNG_UST_ABI_SYM_NAME_LEN - 1] = '\0';
+ event_notifier_objd = objd_alloc(NULL, <tng_event_notifier_enabler_ops, owner,
+ "event_notifier enabler");
+ if (event_notifier_objd < 0) {
+ ret = event_notifier_objd;
+ goto objd_error;
+ }
+
+ event_notifier_enabler = lttng_event_notifier_enabler_create(
+ event_notifier_group, type, event_notifier_param);
+ if (!event_notifier_enabler) {
+ ret = -ENOMEM;
+ goto event_notifier_error;
+ }
+
+ objd_set_private(event_notifier_objd, event_notifier_enabler);
+ /* The event_notifier holds a reference on the event_notifier group. */
+ objd_ref(event_notifier_enabler->group->objd);
+
+ return event_notifier_objd;
+
+event_notifier_error:
+ {
+ int err;
+
+ err = lttng_ust_abi_objd_unref(event_notifier_objd, 1);
+ assert(!err);
+ }
+objd_error:
+ return ret;
+}
+
+static
+long lttng_event_notifier_enabler_cmd(int objd, unsigned int cmd, unsigned long arg,
+ union lttng_ust_abi_args *uargs __attribute__((unused)),
+ void *owner __attribute__((unused)))
+{
+ struct lttng_event_notifier_enabler *event_notifier_enabler = objd_private(objd);
+ switch (cmd) {
+ case LTTNG_UST_ABI_FILTER:
+ return lttng_event_notifier_enabler_attach_filter_bytecode(
+ event_notifier_enabler,
+ (struct lttng_ust_bytecode_node **) arg);
+ case LTTNG_UST_ABI_EXCLUSION:
+ return lttng_event_notifier_enabler_attach_exclusion(event_notifier_enabler,
+ (struct lttng_ust_excluder_node **) arg);
+ case LTTNG_UST_ABI_CAPTURE:
+ return lttng_event_notifier_enabler_attach_capture_bytecode(
+ event_notifier_enabler,
+ (struct lttng_ust_bytecode_node **) arg);
+ case LTTNG_UST_ABI_ENABLE:
+ return lttng_event_notifier_enabler_enable(event_notifier_enabler);
+ case LTTNG_UST_ABI_DISABLE:
+ return lttng_event_notifier_enabler_disable(event_notifier_enabler);
+ default:
+ return -EINVAL;
+ }
+}
+
+/**
+ * lttng_event_notifier_group_error_counter_cmd - lttng event_notifier group error counter object command
+ *
+ * @obj: the object
+ * @cmd: the command
+ * @arg: command arg
+ * @uargs: UST arguments (internal)
+ * @owner: objd owner
+ *
+ * This descriptor implements lttng commands:
+ * LTTNG_UST_ABI_COUNTER_GLOBAL
+ * Return negative error code on error, 0 on success.
+ * LTTNG_UST_ABI_COUNTER_CPU
+ * Return negative error code on error, 0 on success.
+ */
+static
+long lttng_event_notifier_group_error_counter_cmd(int objd, unsigned int cmd, unsigned long arg,
+ union lttng_ust_abi_args *uargs, void *owner __attribute__((unused)))
+{
+ int ret;
+ struct lttng_counter *counter = objd_private(objd);
+
+ switch (cmd) {
+ case LTTNG_UST_ABI_COUNTER_GLOBAL:
+ ret = -EINVAL; /* Unimplemented. */
+ break;
+ case LTTNG_UST_ABI_COUNTER_CPU:
+ {
+ struct lttng_ust_abi_counter_cpu *counter_cpu =
+ (struct lttng_ust_abi_counter_cpu *)arg;
+
+ ret = lttng_counter_set_cpu_shm(counter->counter,
+ counter_cpu->cpu_nr, uargs->counter_shm.shm_fd);
+ if (!ret) {
+ /* Take ownership of the shm_fd. */
+ uargs->counter_shm.shm_fd = -1;
+ }
+ break;
+ }
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+ return ret;
+}
+
+int lttng_release_event_notifier_group_error_counter(int objd)
+ __attribute__((visibility("hidden")));
+int lttng_release_event_notifier_group_error_counter(int objd)
+{
+ struct lttng_counter *counter = objd_private(objd);
+
+ if (counter) {
+ return lttng_ust_abi_objd_unref(counter->event_notifier_group->objd, 0);
+ } else {
+ return -EINVAL;
+ }
+}
+
+static const struct lttng_ust_abi_objd_ops lttng_event_notifier_group_error_counter_ops = {
+ .release = lttng_release_event_notifier_group_error_counter,
+ .cmd = lttng_event_notifier_group_error_counter_cmd,
+};
+
+static
+int lttng_ust_event_notifier_group_create_error_counter(int event_notifier_group_objd, void *owner,
+ struct lttng_ust_abi_counter_conf *error_counter_conf)
+{
+ const char *counter_transport_name;
+ struct lttng_event_notifier_group *event_notifier_group =
+ objd_private(event_notifier_group_objd);
+ struct lttng_counter *counter;
+ int counter_objd, ret;
+ struct lttng_counter_dimension dimensions[1];
+ size_t counter_len;
+
+ if (event_notifier_group->error_counter)
+ return -EBUSY;
+
+ if (error_counter_conf->arithmetic != LTTNG_UST_ABI_COUNTER_ARITHMETIC_MODULAR)
+ return -EINVAL;
+
+ if (error_counter_conf->number_dimensions != 1)
+ return -EINVAL;
+
+ switch (error_counter_conf->bitness) {
+ case LTTNG_UST_ABI_COUNTER_BITNESS_64:
+ counter_transport_name = "counter-per-cpu-64-modular";
+ break;
+ case LTTNG_UST_ABI_COUNTER_BITNESS_32:
+ counter_transport_name = "counter-per-cpu-32-modular";
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ counter_objd = objd_alloc(NULL, <tng_event_notifier_group_error_counter_ops, owner,
+ "event_notifier group error counter");
+ if (counter_objd < 0) {
+ ret = counter_objd;
+ goto objd_error;
+ }
+
+ counter_len = error_counter_conf->dimensions[0].size;
+ dimensions[0].size = counter_len;
+ dimensions[0].underflow_index = 0;
+ dimensions[0].overflow_index = 0;
+ dimensions[0].has_underflow = 0;
+ dimensions[0].has_overflow = 0;
+
+ counter = lttng_ust_counter_create(counter_transport_name, 1, dimensions);
+ if (!counter) {
+ ret = -EINVAL;
+ goto create_error;
+ }
+
+ event_notifier_group->error_counter_len = counter_len;
+ /*
+ * store-release to publish error counter matches load-acquire
+ * in record_error. Ensures the counter is created and the
+ * error_counter_len is set before they are used.
+ * Currently a full memory barrier is used, which could be
+ * turned into acquire-release barriers.
+ */
+ cmm_smp_mb();
+ CMM_STORE_SHARED(event_notifier_group->error_counter, counter);
+
+ counter->objd = counter_objd;
+ counter->event_notifier_group = event_notifier_group; /* owner */
+
+ objd_set_private(counter_objd, counter);
+ /* The error counter holds a reference on the event_notifier group. */
+ objd_ref(event_notifier_group->objd);
+
+ return counter_objd;
+
+create_error:
+ {
+ int err;
+
+ err = lttng_ust_abi_objd_unref(counter_objd, 1);
+ assert(!err);
+ }
+objd_error:
+ return ret;
+}
+
+static
+long lttng_event_notifier_group_cmd(int objd, unsigned int cmd, unsigned long arg,
+ union lttng_ust_abi_args *uargs, void *owner)
+{
+ switch (cmd) {
+ case LTTNG_UST_ABI_EVENT_NOTIFIER_CREATE:
+ {
+ struct lttng_ust_abi_event_notifier *event_notifier_param =
+ (struct lttng_ust_abi_event_notifier *) arg;
+ if (strutils_is_star_glob_pattern(event_notifier_param->event.name)) {
+ /*
+ * If the event name is a star globbing pattern,
+ * we create the special star globbing enabler.
+ */
+ return lttng_ust_event_notifier_enabler_create(objd,
+ owner, event_notifier_param,
+ LTTNG_ENABLER_FORMAT_STAR_GLOB);
+ } else {
+ return lttng_ust_event_notifier_enabler_create(objd,
+ owner, event_notifier_param,
+ LTTNG_ENABLER_FORMAT_EVENT);
+ }
+ }
+ case LTTNG_UST_ABI_COUNTER:
+ {
+ struct lttng_ust_abi_counter_conf *counter_conf =
+ (struct lttng_ust_abi_counter_conf *) uargs->counter.counter_data;
+ return lttng_ust_event_notifier_group_create_error_counter(
+ objd, owner, counter_conf);
+ }
+ default:
+ return -EINVAL;
+ }
+}
+
+static
+int lttng_event_notifier_enabler_release(int objd)
+{
+ struct lttng_event_notifier_enabler *event_notifier_enabler = objd_private(objd);
+
+ if (event_notifier_enabler)
+ return lttng_ust_abi_objd_unref(event_notifier_enabler->group->objd, 0);
+ return 0;
+}
+
+static const struct lttng_ust_abi_objd_ops lttng_event_notifier_enabler_ops = {
+ .release = lttng_event_notifier_enabler_release,
+ .cmd = lttng_event_notifier_enabler_cmd,
+};
+
+static
+int lttng_release_event_notifier_group(int objd)
+{
+ struct lttng_event_notifier_group *event_notifier_group = objd_private(objd);
+
+ if (event_notifier_group) {
+ lttng_event_notifier_group_destroy(event_notifier_group);
+ return 0;
+ } else {
+ return -EINVAL;
+ }
+}
+
+static const struct lttng_ust_abi_objd_ops lttng_event_notifier_group_ops = {
+ .release = lttng_release_event_notifier_group,
+ .cmd = lttng_event_notifier_group_cmd,
+};
+
+static
+long lttng_tracepoint_list_cmd(int objd, unsigned int cmd, unsigned long arg,
+ union lttng_ust_abi_args *uargs __attribute__((unused)),
+ void *owner __attribute__((unused)))
+{
+ struct lttng_ust_tracepoint_list *list = objd_private(objd);
+ struct lttng_ust_abi_tracepoint_iter *tp =
+ (struct lttng_ust_abi_tracepoint_iter *) arg;
+ struct lttng_ust_abi_tracepoint_iter *iter;
+
+ switch (cmd) {
+ case LTTNG_UST_ABI_TRACEPOINT_LIST_GET:
+ {
+ iter = lttng_ust_tracepoint_list_get_iter_next(list);
+ if (!iter)
+ return -LTTNG_UST_ERR_NOENT;
+ memcpy(tp, iter, sizeof(*tp));
+ return 0;
+ }
+ default:
+ return -EINVAL;
+ }
+}
+
+static
+int lttng_abi_tracepoint_list(void *owner)
+{
+ int list_objd, ret;
+ struct lttng_ust_tracepoint_list *list;
+
+ list_objd = objd_alloc(NULL, <tng_tracepoint_list_ops, owner, "tp_list");
+ if (list_objd < 0) {
+ ret = list_objd;
+ goto objd_error;
+ }
+ list = zmalloc(sizeof(*list));
+ if (!list) {
+ ret = -ENOMEM;
+ goto alloc_error;
+ }
+ objd_set_private(list_objd, list);
+
+ /* populate list by walking on all registered probes. */
+ ret = lttng_probes_get_event_list(list);
+ if (ret) {
+ goto list_error;
+ }
+ return list_objd;
+
+list_error:
+ free(list);
+alloc_error:
+ {
+ int err;
+
+ err = lttng_ust_abi_objd_unref(list_objd, 1);
+ assert(!err);
+ }
+objd_error:
+ return ret;
+}
+
+static
+int lttng_release_tracepoint_list(int objd)
+{
+ struct lttng_ust_tracepoint_list *list = objd_private(objd);
+
+ if (list) {
+ lttng_probes_prune_event_list(list);
+ free(list);
+ return 0;
+ } else {
+ return -EINVAL;
+ }
+}
+
+static const struct lttng_ust_abi_objd_ops lttng_tracepoint_list_ops = {
+ .release = lttng_release_tracepoint_list,
+ .cmd = lttng_tracepoint_list_cmd,
+};
+
+static
+long lttng_tracepoint_field_list_cmd(int objd, unsigned int cmd,
+ unsigned long arg __attribute__((unused)), union lttng_ust_abi_args *uargs,
+ void *owner __attribute__((unused)))
+{
+ struct lttng_ust_field_list *list = objd_private(objd);
+ struct lttng_ust_abi_field_iter *tp = &uargs->field_list.entry;
+ struct lttng_ust_abi_field_iter *iter;
+
+ switch (cmd) {
+ case LTTNG_UST_ABI_TRACEPOINT_FIELD_LIST_GET:
+ {
+ iter = lttng_ust_field_list_get_iter_next(list);
+ if (!iter)
+ return -LTTNG_UST_ERR_NOENT;
+ memcpy(tp, iter, sizeof(*tp));
+ return 0;
+ }
+ default:
+ return -EINVAL;
+ }
+}
+
+static
+int lttng_abi_tracepoint_field_list(void *owner)
+{
+ int list_objd, ret;
+ struct lttng_ust_field_list *list;
+
+ list_objd = objd_alloc(NULL, <tng_tracepoint_field_list_ops, owner,
+ "tp_field_list");
+ if (list_objd < 0) {
+ ret = list_objd;
+ goto objd_error;
+ }
+ list = zmalloc(sizeof(*list));
+ if (!list) {
+ ret = -ENOMEM;
+ goto alloc_error;
+ }
+ objd_set_private(list_objd, list);
+
+ /* populate list by walking on all registered probes. */
+ ret = lttng_probes_get_field_list(list);
+ if (ret) {
+ goto list_error;
+ }
+ return list_objd;
+
+list_error:
+ free(list);
+alloc_error:
+ {
+ int err;
+
+ err = lttng_ust_abi_objd_unref(list_objd, 1);
+ assert(!err);
+ }
+objd_error:
+ return ret;
+}
+
+static
+int lttng_release_tracepoint_field_list(int objd)
+{
+ struct lttng_ust_field_list *list = objd_private(objd);
+
+ if (list) {
+ lttng_probes_prune_field_list(list);
+ free(list);
+ return 0;
+ } else {
+ return -EINVAL;
+ }
+}
+
+static const struct lttng_ust_abi_objd_ops lttng_tracepoint_field_list_ops = {
+ .release = lttng_release_tracepoint_field_list,
+ .cmd = lttng_tracepoint_field_list_cmd,
+};
+
+static
+int lttng_abi_map_stream(int channel_objd, struct lttng_ust_abi_stream *info,
+ union lttng_ust_abi_args *uargs, void *owner __attribute__((unused)))
+{
+ struct lttng_ust_channel_buffer *lttng_chan_buf = objd_private(channel_objd);
+ int ret;
+
+ ret = channel_handle_add_stream(lttng_chan_buf->priv->rb_chan->handle,
+ uargs->stream.shm_fd, uargs->stream.wakeup_fd,
+ info->stream_nr, info->len);
+ if (ret)
+ goto error_add_stream;
+ /* Take ownership of shm_fd and wakeup_fd. */
+ uargs->stream.shm_fd = -1;
+ uargs->stream.wakeup_fd = -1;
+
+ return 0;
+
+error_add_stream:
+ return ret;
+}
+
+static
+int lttng_abi_create_event_enabler(int channel_objd,
+ struct lttng_ust_abi_event *event_param,
+ void *owner,
+ enum lttng_enabler_format_type format_type)
+{
+ struct lttng_ust_channel_buffer *channel = objd_private(channel_objd);
+ struct lttng_event_enabler *enabler;
+ int event_objd, ret;
+
+ event_param->name[LTTNG_UST_ABI_SYM_NAME_LEN - 1] = '\0';
+ event_objd = objd_alloc(NULL, <tng_event_enabler_ops, owner,
+ "event enabler");
+ if (event_objd < 0) {
+ ret = event_objd;
+ goto objd_error;
+ }
+ /*
+ * We tolerate no failure path after event creation. It will stay
+ * invariant for the rest of the session.
+ */
+ enabler = lttng_event_enabler_create(format_type, event_param, channel);
+ if (!enabler) {
+ ret = -ENOMEM;
+ goto event_error;
+ }
+ objd_set_private(event_objd, enabler);
+ /* The event holds a reference on the channel */
+ objd_ref(channel_objd);
+ return event_objd;
+
+event_error:
+ {
+ int err;
+
+ err = lttng_ust_abi_objd_unref(event_objd, 1);
+ assert(!err);
+ }
+objd_error:
+ return ret;
+}
+
+/**
+ * lttng_channel_cmd - lttng control through object descriptors
+ *
+ * @objd: the object descriptor
+ * @cmd: the command
+ * @arg: command arg
+ * @uargs: UST arguments (internal)
+ * @owner: objd owner
+ *
+ * This object descriptor implements lttng commands:
+ * LTTNG_UST_ABI_STREAM
+ * Returns an event stream object descriptor or failure.
+ * (typically, one event stream records events from one CPU)
+ * LTTNG_UST_ABI_EVENT
+ * Returns an event object descriptor or failure.
+ * LTTNG_UST_ABI_CONTEXT
+ * Prepend a context field to each event in the channel
+ * LTTNG_UST_ABI_ENABLE
+ * Enable recording for events in this channel (weak enable)
+ * LTTNG_UST_ABI_DISABLE
+ * Disable recording for events in this channel (strong disable)
+ *
+ * Channel and event file descriptors also hold a reference on the session.
+ */
+static
+long lttng_channel_cmd(int objd, unsigned int cmd, unsigned long arg,
+ union lttng_ust_abi_args *uargs, void *owner)
+{
+ struct lttng_ust_channel_buffer *lttng_chan_buf = objd_private(objd);
+
+ if (cmd != LTTNG_UST_ABI_STREAM) {
+ /*
+ * Check if channel received all streams.
+ */
+ if (!lttng_is_channel_ready(lttng_chan_buf))
+ return -EPERM;
+ }
+
+ switch (cmd) {
+ case LTTNG_UST_ABI_STREAM:
+ {
+ struct lttng_ust_abi_stream *stream;
+
+ stream = (struct lttng_ust_abi_stream *) arg;
+ /* stream used as output */
+ return lttng_abi_map_stream(objd, stream, uargs, owner);
+ }
+ case LTTNG_UST_ABI_EVENT:
+ {
+ struct lttng_ust_abi_event *event_param =
+ (struct lttng_ust_abi_event *) arg;
+
+ if (strutils_is_star_glob_pattern(event_param->name)) {
+ /*
+ * If the event name is a star globbing pattern,
+ * we create the special star globbing enabler.
+ */
+ return lttng_abi_create_event_enabler(objd, event_param,
+ owner, LTTNG_ENABLER_FORMAT_STAR_GLOB);
+ } else {
+ return lttng_abi_create_event_enabler(objd, event_param,
+ owner, LTTNG_ENABLER_FORMAT_EVENT);
+ }
+ }
+ case LTTNG_UST_ABI_CONTEXT:
+ return lttng_abi_add_context(objd,
+ (struct lttng_ust_abi_context *) arg, uargs,
+ <tng_chan_buf->priv->ctx,
+ lttng_chan_buf->parent->session);
+ case LTTNG_UST_ABI_ENABLE:
+ return lttng_channel_enable(lttng_chan_buf->parent);
+ case LTTNG_UST_ABI_DISABLE:
+ return lttng_channel_disable(lttng_chan_buf->parent);
+ case LTTNG_UST_ABI_FLUSH_BUFFER:
+ return lttng_chan_buf->ops->priv->flush_buffer(lttng_chan_buf);
+ default:
+ return -EINVAL;
+ }
+}
+
+static
+int lttng_channel_release(int objd)
+{
+ struct lttng_ust_channel_buffer *lttng_chan_buf = objd_private(objd);
+
+ if (lttng_chan_buf)
+ return lttng_ust_abi_objd_unref(lttng_chan_buf->parent->session->priv->objd, 0);
+ return 0;
+}
+
+static const struct lttng_ust_abi_objd_ops lttng_channel_ops = {
+ .release = lttng_channel_release,
+ .cmd = lttng_channel_cmd,
+};
+
+/**
+ * lttng_enabler_cmd - lttng control through object descriptors
+ *
+ * @objd: the object descriptor
+ * @cmd: the command
+ * @arg: command arg
+ * @uargs: UST arguments (internal)
+ * @owner: objd owner
+ *
+ * This object descriptor implements lttng commands:
+ * LTTNG_UST_ABI_CONTEXT
+ * Prepend a context field to each record of events of this
+ * enabler.
+ * LTTNG_UST_ABI_ENABLE
+ * Enable recording for this enabler
+ * LTTNG_UST_ABI_DISABLE
+ * Disable recording for this enabler
+ * LTTNG_UST_ABI_FILTER
+ * Attach a filter to an enabler.
+ * LTTNG_UST_ABI_EXCLUSION
+ * Attach exclusions to an enabler.
+ */
+static
+long lttng_event_enabler_cmd(int objd, unsigned int cmd, unsigned long arg,
+ union lttng_ust_abi_args *uargs __attribute__((unused)),
+ void *owner __attribute__((unused)))
+{
+ struct lttng_event_enabler *enabler = objd_private(objd);
+
+ switch (cmd) {
+ case LTTNG_UST_ABI_CONTEXT:
+ return lttng_event_enabler_attach_context(enabler,
+ (struct lttng_ust_abi_context *) arg);
+ case LTTNG_UST_ABI_ENABLE:
+ return lttng_event_enabler_enable(enabler);
+ case LTTNG_UST_ABI_DISABLE:
+ return lttng_event_enabler_disable(enabler);
+ case LTTNG_UST_ABI_FILTER:
+ {
+ int ret;
+
+ ret = lttng_event_enabler_attach_filter_bytecode(enabler,
+ (struct lttng_ust_bytecode_node **) arg);
+ if (ret)
+ return ret;
+ return 0;
+ }
+ case LTTNG_UST_ABI_EXCLUSION:
+ {
+ return lttng_event_enabler_attach_exclusion(enabler,
+ (struct lttng_ust_excluder_node **) arg);
+ }
+ default:
+ return -EINVAL;
+ }
+}
+
+static
+int lttng_event_enabler_release(int objd)
+{
+ struct lttng_event_enabler *event_enabler = objd_private(objd);
+
+ if (event_enabler)
+ return lttng_ust_abi_objd_unref(event_enabler->chan->priv->parent.objd, 0);
+
+ return 0;
+}
+
+static const struct lttng_ust_abi_objd_ops lttng_event_enabler_ops = {
+ .release = lttng_event_enabler_release,
+ .cmd = lttng_event_enabler_cmd,
+};
+
+void lttng_ust_abi_exit(void)
+{
+ lttng_ust_abi_close_in_progress = 1;
+ ust_lock_nocheck();
+ objd_table_destroy();
+ ust_unlock();
+ lttng_ust_abi_close_in_progress = 0;
+}
--- /dev/null
+/*
+ * SPDX-License-Identifier: LGPL-2.1-only
+ *
+ * Copyright (C) 2011 David Goulet <david.goulet@polymtl.ca>
+ * Copyright (C) 2011 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ */
+
+#define _LGPL_SOURCE
+#include <stddef.h>
+#include <stdint.h>
+#include <sys/types.h>
+#include <sys/socket.h>
+#include <sys/mman.h>
+#include <sys/stat.h>
+#include <sys/types.h>
+#include <sys/wait.h>
+#include <dlfcn.h>
+#include <fcntl.h>
+#include <unistd.h>
+#include <errno.h>
+#include <pthread.h>
+#include <semaphore.h>
+#include <time.h>
+#include <assert.h>
+#include <signal.h>
+#include <limits.h>
+#include <urcu/uatomic.h>
+#include "futex.h"
+#include <urcu/compiler.h>
+#include <lttng/urcu/urcu-ust.h>
+
+#include <lttng/ust-utils.h>
+#include <lttng/ust-events.h>
+#include <lttng/ust-abi.h>
+#include <lttng/ust-fork.h>
+#include <lttng/ust-error.h>
+#include <lttng/ust-ctl.h>
+#include <lttng/ust-libc-wrapper.h>
+#include <lttng/ust-thread.h>
+#include <lttng/ust-tracer.h>
+#include <urcu/tls-compat.h>
+#include "common/ustcomm.h"
+#include "common/ust-fd.h"
+#include "common/logging.h"
+#include "common/macros.h"
+#include "tracepoint-internal.h"
+#include "lttng-tracer-core.h"
+#include "common/compat/pthread.h"
+#include "common/procname.h"
+#include "common/ringbuffer/rb-init.h"
+#include "lttng-ust-statedump.h"
+#include "clock.h"
+#include "common/ringbuffer/getcpu.h"
+#include "getenv.h"
+#include "ust-events-internal.h"
+#include "context-internal.h"
+#include "common/align.h"
+#include "lttng-counter-client.h"
+#include "lttng-rb-clients.h"
+
+/*
+ * Has lttng ust comm constructor been called ?
+ */
+static int initialized;
+
+/*
+ * The ust_lock/ust_unlock lock is used as a communication thread mutex.
+ * Held when handling a command, also held by fork() to deal with
+ * removal of threads, and by exit path.
+ *
+ * The UST lock is the centralized mutex across UST tracing control and
+ * probe registration.
+ *
+ * ust_exit_mutex must never nest in ust_mutex.
+ *
+ * ust_fork_mutex must never nest in ust_mutex.
+ *
+ * ust_mutex_nest is a per-thread nesting counter, allowing the perf
+ * counter lazy initialization called by events within the statedump,
+ * which traces while the ust_mutex is held.
+ *
+ * ust_lock nests within the dynamic loader lock (within glibc) because
+ * it is taken within the library constructor.
+ *
+ * The ust fd tracker lock nests within the ust_mutex.
+ */
+static pthread_mutex_t ust_mutex = PTHREAD_MUTEX_INITIALIZER;
+
+/* Allow nesting the ust_mutex within the same thread. */
+static DEFINE_URCU_TLS(int, ust_mutex_nest);
+
+/*
+ * ust_exit_mutex protects thread_active variable wrt thread exit. It
+ * cannot be done by ust_mutex because pthread_cancel(), which takes an
+ * internal libc lock, cannot nest within ust_mutex.
+ *
+ * It never nests within a ust_mutex.
+ */
+static pthread_mutex_t ust_exit_mutex = PTHREAD_MUTEX_INITIALIZER;
+
+/*
+ * ust_fork_mutex protects base address statedump tracing against forks. It
+ * prevents the dynamic loader lock to be taken (by base address statedump
+ * tracing) while a fork is happening, thus preventing deadlock issues with
+ * the dynamic loader lock.
+ */
+static pthread_mutex_t ust_fork_mutex = PTHREAD_MUTEX_INITIALIZER;
+
+/* Should the ust comm thread quit ? */
+static int lttng_ust_comm_should_quit;
+
+/*
+ * This variable can be tested by applications to check whether
+ * lttng-ust is loaded. They simply have to define their own
+ * "lttng_ust_loaded" weak symbol, and test it. It is set to 1 by the
+ * library constructor.
+ */
+int lttng_ust_loaded __attribute__((weak));
+
+/*
+ * Return 0 on success, -1 if should quit.
+ * The lock is taken in both cases.
+ * Signal-safe.
+ */
+int ust_lock(void)
+{
+ sigset_t sig_all_blocked, orig_mask;
+ int ret, oldstate;
+
+ ret = pthread_setcancelstate(PTHREAD_CANCEL_DISABLE, &oldstate);
+ if (ret) {
+ ERR("pthread_setcancelstate: %s", strerror(ret));
+ }
+ if (oldstate != PTHREAD_CANCEL_ENABLE) {
+ ERR("pthread_setcancelstate: unexpected oldstate");
+ }
+ sigfillset(&sig_all_blocked);
+ ret = pthread_sigmask(SIG_SETMASK, &sig_all_blocked, &orig_mask);
+ if (ret) {
+ ERR("pthread_sigmask: %s", strerror(ret));
+ }
+ if (!URCU_TLS(ust_mutex_nest)++)
+ pthread_mutex_lock(&ust_mutex);
+ ret = pthread_sigmask(SIG_SETMASK, &orig_mask, NULL);
+ if (ret) {
+ ERR("pthread_sigmask: %s", strerror(ret));
+ }
+ if (lttng_ust_comm_should_quit) {
+ return -1;
+ } else {
+ return 0;
+ }
+}
+
+/*
+ * ust_lock_nocheck() can be used in constructors/destructors, because
+ * they are already nested within the dynamic loader lock, and therefore
+ * have exclusive access against execution of liblttng-ust destructor.
+ * Signal-safe.
+ */
+void ust_lock_nocheck(void)
+{
+ sigset_t sig_all_blocked, orig_mask;
+ int ret, oldstate;
+
+ ret = pthread_setcancelstate(PTHREAD_CANCEL_DISABLE, &oldstate);
+ if (ret) {
+ ERR("pthread_setcancelstate: %s", strerror(ret));
+ }
+ if (oldstate != PTHREAD_CANCEL_ENABLE) {
+ ERR("pthread_setcancelstate: unexpected oldstate");
+ }
+ sigfillset(&sig_all_blocked);
+ ret = pthread_sigmask(SIG_SETMASK, &sig_all_blocked, &orig_mask);
+ if (ret) {
+ ERR("pthread_sigmask: %s", strerror(ret));
+ }
+ if (!URCU_TLS(ust_mutex_nest)++)
+ pthread_mutex_lock(&ust_mutex);
+ ret = pthread_sigmask(SIG_SETMASK, &orig_mask, NULL);
+ if (ret) {
+ ERR("pthread_sigmask: %s", strerror(ret));
+ }
+}
+
+/*
+ * Signal-safe.
+ */
+void ust_unlock(void)
+{
+ sigset_t sig_all_blocked, orig_mask;
+ int ret, oldstate;
+
+ sigfillset(&sig_all_blocked);
+ ret = pthread_sigmask(SIG_SETMASK, &sig_all_blocked, &orig_mask);
+ if (ret) {
+ ERR("pthread_sigmask: %s", strerror(ret));
+ }
+ if (!--URCU_TLS(ust_mutex_nest))
+ pthread_mutex_unlock(&ust_mutex);
+ ret = pthread_sigmask(SIG_SETMASK, &orig_mask, NULL);
+ if (ret) {
+ ERR("pthread_sigmask: %s", strerror(ret));
+ }
+ ret = pthread_setcancelstate(PTHREAD_CANCEL_ENABLE, &oldstate);
+ if (ret) {
+ ERR("pthread_setcancelstate: %s", strerror(ret));
+ }
+ if (oldstate != PTHREAD_CANCEL_DISABLE) {
+ ERR("pthread_setcancelstate: unexpected oldstate");
+ }
+}
+
+/*
+ * Wait for either of these before continuing to the main
+ * program:
+ * - the register_done message from sessiond daemon
+ * (will let the sessiond daemon enable sessions before main
+ * starts.)
+ * - sessiond daemon is not reachable.
+ * - timeout (ensuring applications are resilient to session
+ * daemon problems).
+ */
+static sem_t constructor_wait;
+/*
+ * Doing this for both the global and local sessiond.
+ */
+enum {
+ sem_count_initial_value = 4,
+};
+
+static int sem_count = sem_count_initial_value;
+
+/*
+ * Counting nesting within lttng-ust. Used to ensure that calling fork()
+ * from liblttng-ust does not execute the pre/post fork handlers.
+ */
+static DEFINE_URCU_TLS(int, lttng_ust_nest_count);
+
+/*
+ * Info about socket and associated listener thread.
+ */
+struct sock_info {
+ const char *name;
+ pthread_t ust_listener; /* listener thread */
+ int root_handle;
+ int registration_done;
+ int allowed;
+ int global;
+ int thread_active;
+
+ char sock_path[PATH_MAX];
+ int socket;
+ int notify_socket;
+
+ char wait_shm_path[PATH_MAX];
+ char *wait_shm_mmap;
+ /* Keep track of lazy state dump not performed yet. */
+ int statedump_pending;
+ int initial_statedump_done;
+ /* Keep procname for statedump */
+ char procname[LTTNG_UST_ABI_PROCNAME_LEN];
+};
+
+/* Socket from app (connect) to session daemon (listen) for communication */
+struct sock_info global_apps = {
+ .name = "global",
+ .global = 1,
+
+ .root_handle = -1,
+ .registration_done = 0,
+ .allowed = 0,
+ .thread_active = 0,
+
+ .sock_path = LTTNG_DEFAULT_RUNDIR "/" LTTNG_UST_SOCK_FILENAME,
+ .socket = -1,
+ .notify_socket = -1,
+
+ .wait_shm_path = "/" LTTNG_UST_WAIT_FILENAME,
+
+ .statedump_pending = 0,
+ .initial_statedump_done = 0,
+ .procname[0] = '\0'
+};
+
+/* TODO: allow global_apps_sock_path override */
+
+struct sock_info local_apps = {
+ .name = "local",
+ .global = 0,
+ .root_handle = -1,
+ .registration_done = 0,
+ .allowed = 0, /* Check setuid bit first */
+ .thread_active = 0,
+
+ .socket = -1,
+ .notify_socket = -1,
+
+ .statedump_pending = 0,
+ .initial_statedump_done = 0,
+ .procname[0] = '\0'
+};
+
+static int wait_poll_fallback;
+
+static const char *cmd_name_mapping[] = {
+ [ LTTNG_UST_ABI_RELEASE ] = "Release",
+ [ LTTNG_UST_ABI_SESSION ] = "Create Session",
+ [ LTTNG_UST_ABI_TRACER_VERSION ] = "Get Tracer Version",
+
+ [ LTTNG_UST_ABI_TRACEPOINT_LIST ] = "Create Tracepoint List",
+ [ LTTNG_UST_ABI_WAIT_QUIESCENT ] = "Wait for Quiescent State",
+ [ LTTNG_UST_ABI_REGISTER_DONE ] = "Registration Done",
+ [ LTTNG_UST_ABI_TRACEPOINT_FIELD_LIST ] = "Create Tracepoint Field List",
+
+ [ LTTNG_UST_ABI_EVENT_NOTIFIER_GROUP_CREATE ] = "Create event notifier group",
+
+ /* Session FD commands */
+ [ LTTNG_UST_ABI_CHANNEL ] = "Create Channel",
+ [ LTTNG_UST_ABI_SESSION_START ] = "Start Session",
+ [ LTTNG_UST_ABI_SESSION_STOP ] = "Stop Session",
+
+ /* Channel FD commands */
+ [ LTTNG_UST_ABI_STREAM ] = "Create Stream",
+ [ LTTNG_UST_ABI_EVENT ] = "Create Event",
+
+ /* Event and Channel FD commands */
+ [ LTTNG_UST_ABI_CONTEXT ] = "Create Context",
+ [ LTTNG_UST_ABI_FLUSH_BUFFER ] = "Flush Buffer",
+
+ /* Event, Channel and Session commands */
+ [ LTTNG_UST_ABI_ENABLE ] = "Enable",
+ [ LTTNG_UST_ABI_DISABLE ] = "Disable",
+
+ /* Tracepoint list commands */
+ [ LTTNG_UST_ABI_TRACEPOINT_LIST_GET ] = "List Next Tracepoint",
+ [ LTTNG_UST_ABI_TRACEPOINT_FIELD_LIST_GET ] = "List Next Tracepoint Field",
+
+ /* Event FD commands */
+ [ LTTNG_UST_ABI_FILTER ] = "Create Filter",
+ [ LTTNG_UST_ABI_EXCLUSION ] = "Add exclusions to event",
+
+ /* Event notifier group commands */
+ [ LTTNG_UST_ABI_EVENT_NOTIFIER_CREATE ] = "Create event notifier",
+
+ /* Session and event notifier group commands */
+ [ LTTNG_UST_ABI_COUNTER ] = "Create Counter",
+
+ /* Counter commands */
+ [ LTTNG_UST_ABI_COUNTER_GLOBAL ] = "Create Counter Global",
+ [ LTTNG_UST_ABI_COUNTER_CPU ] = "Create Counter CPU",
+};
+
+static const char *str_timeout;
+static int got_timeout_env;
+
+static char *get_map_shm(struct sock_info *sock_info);
+
+ssize_t lttng_ust_read(int fd, void *buf, size_t len)
+{
+ ssize_t ret;
+ size_t copied = 0, to_copy = len;
+
+ do {
+ ret = read(fd, buf + copied, to_copy);
+ if (ret > 0) {
+ copied += ret;
+ to_copy -= ret;
+ }
+ } while ((ret > 0 && to_copy > 0)
+ || (ret < 0 && errno == EINTR));
+ if (ret > 0) {
+ ret = copied;
+ }
+ return ret;
+}
+/*
+ * Returns the HOME directory path. Caller MUST NOT free(3) the returned
+ * pointer.
+ */
+static
+const char *get_lttng_home_dir(void)
+{
+ const char *val;
+
+ val = (const char *) lttng_ust_getenv("LTTNG_HOME");
+ if (val != NULL) {
+ return val;
+ }
+ return (const char *) lttng_ust_getenv("HOME");
+}
+
+/*
+ * Force a read (imply TLS fixup for dlopen) of TLS variables.
+ */
+static
+void lttng_fixup_nest_count_tls(void)
+{
+ asm volatile ("" : : "m" (URCU_TLS(lttng_ust_nest_count)));
+}
+
+static
+void lttng_fixup_ust_mutex_nest_tls(void)
+{
+ asm volatile ("" : : "m" (URCU_TLS(ust_mutex_nest)));
+}
+
+/*
+ * Fixup lttng-ust urcu TLS.
+ */
+static
+void lttng_fixup_lttng_ust_urcu_tls(void)
+{
+ (void) lttng_ust_urcu_read_ongoing();
+}
+
+void lttng_ust_fixup_tls(void)
+{
+ lttng_fixup_lttng_ust_urcu_tls();
+ lttng_fixup_ringbuffer_tls();
+ lttng_fixup_vtid_tls();
+ lttng_fixup_nest_count_tls();
+ lttng_fixup_procname_tls();
+ lttng_fixup_ust_mutex_nest_tls();
+ lttng_ust_fixup_perf_counter_tls();
+ lttng_ust_fixup_fd_tracker_tls();
+ lttng_fixup_cgroup_ns_tls();
+ lttng_fixup_ipc_ns_tls();
+ lttng_fixup_net_ns_tls();
+ lttng_fixup_time_ns_tls();
+ lttng_fixup_uts_ns_tls();
+ lttng_ust_fixup_ring_buffer_client_discard_tls();
+ lttng_ust_fixup_ring_buffer_client_discard_rt_tls();
+ lttng_ust_fixup_ring_buffer_client_overwrite_tls();
+ lttng_ust_fixup_ring_buffer_client_overwrite_rt_tls();
+}
+
+/*
+ * LTTng-UST uses Global Dynamic model TLS variables rather than IE
+ * model because many versions of glibc don't preallocate a pool large
+ * enough for TLS variables IE model defined in other shared libraries,
+ * and causes issues when using LTTng-UST for Java tracing.
+ *
+ * Because of this use of Global Dynamic TLS variables, users wishing to
+ * trace from signal handlers need to explicitly trigger the lazy
+ * allocation of those variables for each thread before using them.
+ * This can be triggered by calling lttng_ust_init_thread().
+ */
+void lttng_ust_init_thread(void)
+{
+ /*
+ * Because those TLS variables are global dynamic, we need to
+ * ensure those are initialized before a signal handler nesting over
+ * this thread attempts to use them.
+ */
+ lttng_ust_fixup_tls();
+}
+
+int lttng_get_notify_socket(void *owner)
+{
+ struct sock_info *info = owner;
+
+ return info->notify_socket;
+}
+
+
+char* lttng_ust_sockinfo_get_procname(void *owner)
+{
+ struct sock_info *info = owner;
+
+ return info->procname;
+}
+
+static
+void print_cmd(int cmd, int handle)
+{
+ const char *cmd_name = "Unknown";
+
+ if (cmd >= 0 && cmd < LTTNG_ARRAY_SIZE(cmd_name_mapping)
+ && cmd_name_mapping[cmd]) {
+ cmd_name = cmd_name_mapping[cmd];
+ }
+ DBG("Message Received \"%s\" (%d), Handle \"%s\" (%d)",
+ cmd_name, cmd,
+ lttng_ust_obj_get_name(handle), handle);
+}
+
+static
+int setup_global_apps(void)
+{
+ int ret = 0;
+ assert(!global_apps.wait_shm_mmap);
+
+ global_apps.wait_shm_mmap = get_map_shm(&global_apps);
+ if (!global_apps.wait_shm_mmap) {
+ WARN("Unable to get map shm for global apps. Disabling LTTng-UST global tracing.");
+ global_apps.allowed = 0;
+ ret = -EIO;
+ goto error;
+ }
+
+ global_apps.allowed = 1;
+ lttng_pthread_getname_np(global_apps.procname, LTTNG_UST_ABI_PROCNAME_LEN);
+error:
+ return ret;
+}
+static
+int setup_local_apps(void)
+{
+ int ret = 0;
+ const char *home_dir;
+ uid_t uid;
+
+ assert(!local_apps.wait_shm_mmap);
+
+ uid = getuid();
+ /*
+ * Disallow per-user tracing for setuid binaries.
+ */
+ if (uid != geteuid()) {
+ assert(local_apps.allowed == 0);
+ ret = 0;
+ goto end;
+ }
+ home_dir = get_lttng_home_dir();
+ if (!home_dir) {
+ WARN("HOME environment variable not set. Disabling LTTng-UST per-user tracing.");
+ assert(local_apps.allowed == 0);
+ ret = -ENOENT;
+ goto end;
+ }
+ local_apps.allowed = 1;
+ snprintf(local_apps.sock_path, PATH_MAX, "%s/%s/%s",
+ home_dir,
+ LTTNG_DEFAULT_HOME_RUNDIR,
+ LTTNG_UST_SOCK_FILENAME);
+ snprintf(local_apps.wait_shm_path, PATH_MAX, "/%s-%u",
+ LTTNG_UST_WAIT_FILENAME,
+ uid);
+
+ local_apps.wait_shm_mmap = get_map_shm(&local_apps);
+ if (!local_apps.wait_shm_mmap) {
+ WARN("Unable to get map shm for local apps. Disabling LTTng-UST per-user tracing.");
+ local_apps.allowed = 0;
+ ret = -EIO;
+ goto end;
+ }
+
+ lttng_pthread_getname_np(local_apps.procname, LTTNG_UST_ABI_PROCNAME_LEN);
+end:
+ return ret;
+}
+
+/*
+ * Get socket timeout, in ms.
+ * -1: wait forever. 0: don't wait. >0: timeout, in ms.
+ */
+static
+long get_timeout(void)
+{
+ long constructor_delay_ms = LTTNG_UST_DEFAULT_CONSTRUCTOR_TIMEOUT_MS;
+
+ if (!got_timeout_env) {
+ str_timeout = lttng_ust_getenv("LTTNG_UST_REGISTER_TIMEOUT");
+ got_timeout_env = 1;
+ }
+ if (str_timeout)
+ constructor_delay_ms = strtol(str_timeout, NULL, 10);
+ /* All negative values are considered as "-1". */
+ if (constructor_delay_ms < -1)
+ constructor_delay_ms = -1;
+ return constructor_delay_ms;
+}
+
+/* Timeout for notify socket send and recv. */
+static
+long get_notify_sock_timeout(void)
+{
+ return get_timeout();
+}
+
+/* Timeout for connecting to cmd and notify sockets. */
+static
+long get_connect_sock_timeout(void)
+{
+ return get_timeout();
+}
+
+/*
+ * Return values: -1: wait forever. 0: don't wait. 1: timeout wait.
+ */
+static
+int get_constructor_timeout(struct timespec *constructor_timeout)
+{
+ long constructor_delay_ms;
+ int ret;
+
+ constructor_delay_ms = get_timeout();
+
+ switch (constructor_delay_ms) {
+ case -1:/* fall-through */
+ case 0:
+ return constructor_delay_ms;
+ default:
+ break;
+ }
+
+ /*
+ * If we are unable to find the current time, don't wait.
+ */
+ ret = clock_gettime(CLOCK_REALTIME, constructor_timeout);
+ if (ret) {
+ /* Don't wait. */
+ return 0;
+ }
+ constructor_timeout->tv_sec += constructor_delay_ms / 1000UL;
+ constructor_timeout->tv_nsec +=
+ (constructor_delay_ms % 1000UL) * 1000000UL;
+ if (constructor_timeout->tv_nsec >= 1000000000UL) {
+ constructor_timeout->tv_sec++;
+ constructor_timeout->tv_nsec -= 1000000000UL;
+ }
+ /* Timeout wait (constructor_delay_ms). */
+ return 1;
+}
+
+static
+void get_allow_blocking(void)
+{
+ const char *str_allow_blocking =
+ lttng_ust_getenv("LTTNG_UST_ALLOW_BLOCKING");
+
+ if (str_allow_blocking) {
+ DBG("%s environment variable is set",
+ "LTTNG_UST_ALLOW_BLOCKING");
+ lttng_ust_ringbuffer_set_allow_blocking();
+ }
+}
+
+static
+int register_to_sessiond(int socket, enum ustctl_socket_type type)
+{
+ return ustcomm_send_reg_msg(socket,
+ type,
+ CAA_BITS_PER_LONG,
+ lttng_ust_rb_alignof(uint8_t) * CHAR_BIT,
+ lttng_ust_rb_alignof(uint16_t) * CHAR_BIT,
+ lttng_ust_rb_alignof(uint32_t) * CHAR_BIT,
+ lttng_ust_rb_alignof(uint64_t) * CHAR_BIT,
+ lttng_ust_rb_alignof(unsigned long) * CHAR_BIT);
+}
+
+static
+int send_reply(int sock, struct ustcomm_ust_reply *lur)
+{
+ ssize_t len;
+
+ len = ustcomm_send_unix_sock(sock, lur, sizeof(*lur));
+ switch (len) {
+ case sizeof(*lur):
+ DBG("message successfully sent");
+ return 0;
+ default:
+ if (len == -ECONNRESET) {
+ DBG("remote end closed connection");
+ return 0;
+ }
+ if (len < 0)
+ return len;
+ DBG("incorrect message size: %zd", len);
+ return -EINVAL;
+ }
+}
+
+static
+void decrement_sem_count(unsigned int count)
+{
+ int ret;
+
+ assert(uatomic_read(&sem_count) >= count);
+
+ if (uatomic_read(&sem_count) <= 0) {
+ return;
+ }
+
+ ret = uatomic_add_return(&sem_count, -count);
+ if (ret == 0) {
+ ret = sem_post(&constructor_wait);
+ assert(!ret);
+ }
+}
+
+static
+int handle_register_done(struct sock_info *sock_info)
+{
+ if (sock_info->registration_done)
+ return 0;
+ sock_info->registration_done = 1;
+
+ decrement_sem_count(1);
+ if (!sock_info->statedump_pending) {
+ sock_info->initial_statedump_done = 1;
+ decrement_sem_count(1);
+ }
+
+ return 0;
+}
+
+static
+int handle_register_failed(struct sock_info *sock_info)
+{
+ if (sock_info->registration_done)
+ return 0;
+ sock_info->registration_done = 1;
+ sock_info->initial_statedump_done = 1;
+
+ decrement_sem_count(2);
+
+ return 0;
+}
+
+/*
+ * Only execute pending statedump after the constructor semaphore has
+ * been posted by the current listener thread. This means statedump will
+ * only be performed after the "registration done" command is received
+ * from this thread's session daemon.
+ *
+ * This ensures we don't run into deadlock issues with the dynamic
+ * loader mutex, which is held while the constructor is called and
+ * waiting on the constructor semaphore. All operations requiring this
+ * dynamic loader lock need to be postponed using this mechanism.
+ *
+ * In a scenario with two session daemons connected to the application,
+ * it is possible that the first listener thread which receives the
+ * registration done command issues its statedump while the dynamic
+ * loader lock is still held by the application constructor waiting on
+ * the semaphore. It will however be allowed to proceed when the
+ * second session daemon sends the registration done command to the
+ * second listener thread. This situation therefore does not produce
+ * a deadlock.
+ */
+static
+void handle_pending_statedump(struct sock_info *sock_info)
+{
+ if (sock_info->registration_done && sock_info->statedump_pending) {
+ sock_info->statedump_pending = 0;
+ pthread_mutex_lock(&ust_fork_mutex);
+ lttng_handle_pending_statedump(sock_info);
+ pthread_mutex_unlock(&ust_fork_mutex);
+
+ if (!sock_info->initial_statedump_done) {
+ sock_info->initial_statedump_done = 1;
+ decrement_sem_count(1);
+ }
+ }
+}
+
+static inline
+const char *bytecode_type_str(uint32_t cmd)
+{
+ switch (cmd) {
+ case LTTNG_UST_ABI_CAPTURE:
+ return "capture";
+ case LTTNG_UST_ABI_FILTER:
+ return "filter";
+ default:
+ abort();
+ }
+}
+
+static
+int handle_bytecode_recv(struct sock_info *sock_info,
+ int sock, struct ustcomm_ust_msg *lum)
+{
+ struct lttng_ust_bytecode_node *bytecode = NULL;
+ enum lttng_ust_bytecode_type type;
+ const struct lttng_ust_abi_objd_ops *ops;
+ uint32_t data_size, data_size_max, reloc_offset;
+ uint64_t seqnum;
+ ssize_t len;
+ int ret = 0;
+
+ switch (lum->cmd) {
+ case LTTNG_UST_ABI_FILTER:
+ type = LTTNG_UST_BYTECODE_TYPE_FILTER;
+ data_size = lum->u.filter.data_size;
+ data_size_max = LTTNG_UST_ABI_FILTER_BYTECODE_MAX_LEN;
+ reloc_offset = lum->u.filter.reloc_offset;
+ seqnum = lum->u.filter.seqnum;
+ break;
+ case LTTNG_UST_ABI_CAPTURE:
+ type = LTTNG_UST_BYTECODE_TYPE_CAPTURE;
+ data_size = lum->u.capture.data_size;
+ data_size_max = LTTNG_UST_ABI_CAPTURE_BYTECODE_MAX_LEN;
+ reloc_offset = lum->u.capture.reloc_offset;
+ seqnum = lum->u.capture.seqnum;
+ break;
+ default:
+ abort();
+ }
+
+ if (data_size > data_size_max) {
+ ERR("Bytecode %s data size is too large: %u bytes",
+ bytecode_type_str(lum->cmd), data_size);
+ ret = -EINVAL;
+ goto end;
+ }
+
+ if (reloc_offset > data_size) {
+ ERR("Bytecode %s reloc offset %u is not within data",
+ bytecode_type_str(lum->cmd), reloc_offset);
+ ret = -EINVAL;
+ goto end;
+ }
+
+ /* Allocate the structure AND the `data[]` field. */
+ bytecode = zmalloc(sizeof(*bytecode) + data_size);
+ if (!bytecode) {
+ ret = -ENOMEM;
+ goto end;
+ }
+
+ bytecode->bc.len = data_size;
+ bytecode->bc.reloc_offset = reloc_offset;
+ bytecode->bc.seqnum = seqnum;
+ bytecode->type = type;
+
+ len = ustcomm_recv_unix_sock(sock, bytecode->bc.data, bytecode->bc.len);
+ switch (len) {
+ case 0: /* orderly shutdown */
+ ret = 0;
+ goto end;
+ default:
+ if (len == bytecode->bc.len) {
+ DBG("Bytecode %s data received",
+ bytecode_type_str(lum->cmd));
+ break;
+ } else if (len < 0) {
+ DBG("Receive failed from lttng-sessiond with errno %d",
+ (int) -len);
+ if (len == -ECONNRESET) {
+ ERR("%s remote end closed connection",
+ sock_info->name);
+ ret = len;
+ goto end;
+ }
+ ret = len;
+ goto end;
+ } else {
+ DBG("Incorrect %s bytecode data message size: %zd",
+ bytecode_type_str(lum->cmd), len);
+ ret = -EINVAL;
+ goto end;
+ }
+ }
+
+ ops = lttng_ust_abi_objd_ops(lum->handle);
+ if (!ops) {
+ ret = -ENOENT;
+ goto end;
+ }
+
+ if (ops->cmd)
+ ret = ops->cmd(lum->handle, lum->cmd,
+ (unsigned long) &bytecode,
+ NULL, sock_info);
+ else
+ ret = -ENOSYS;
+
+end:
+ free(bytecode);
+ return ret;
+}
+
+static
+int handle_message(struct sock_info *sock_info,
+ int sock, struct ustcomm_ust_msg *lum)
+{
+ int ret = 0;
+ const struct lttng_ust_abi_objd_ops *ops;
+ struct ustcomm_ust_reply lur;
+ union lttng_ust_abi_args args;
+ char ctxstr[LTTNG_UST_ABI_SYM_NAME_LEN]; /* App context string. */
+ ssize_t len;
+
+ memset(&lur, 0, sizeof(lur));
+
+ if (ust_lock()) {
+ ret = -LTTNG_UST_ERR_EXITING;
+ goto error;
+ }
+
+ ops = lttng_ust_abi_objd_ops(lum->handle);
+ if (!ops) {
+ ret = -ENOENT;
+ goto error;
+ }
+
+ switch (lum->cmd) {
+ case LTTNG_UST_ABI_REGISTER_DONE:
+ if (lum->handle == LTTNG_UST_ABI_ROOT_HANDLE)
+ ret = handle_register_done(sock_info);
+ else
+ ret = -EINVAL;
+ break;
+ case LTTNG_UST_ABI_RELEASE:
+ if (lum->handle == LTTNG_UST_ABI_ROOT_HANDLE)
+ ret = -EPERM;
+ else
+ ret = lttng_ust_abi_objd_unref(lum->handle, 1);
+ break;
+ case LTTNG_UST_ABI_CAPTURE:
+ case LTTNG_UST_ABI_FILTER:
+ ret = handle_bytecode_recv(sock_info, sock, lum);
+ if (ret)
+ goto error;
+ break;
+ case LTTNG_UST_ABI_EXCLUSION:
+ {
+ /* Receive exclusion names */
+ struct lttng_ust_excluder_node *node;
+ unsigned int count;
+
+ count = lum->u.exclusion.count;
+ if (count == 0) {
+ /* There are no names to read */
+ ret = 0;
+ goto error;
+ }
+ node = zmalloc(sizeof(*node) +
+ count * LTTNG_UST_ABI_SYM_NAME_LEN);
+ if (!node) {
+ ret = -ENOMEM;
+ goto error;
+ }
+ node->excluder.count = count;
+ len = ustcomm_recv_unix_sock(sock, node->excluder.names,
+ count * LTTNG_UST_ABI_SYM_NAME_LEN);
+ switch (len) {
+ case 0: /* orderly shutdown */
+ ret = 0;
+ free(node);
+ goto error;
+ default:
+ if (len == count * LTTNG_UST_ABI_SYM_NAME_LEN) {
+ DBG("Exclusion data received");
+ break;
+ } else if (len < 0) {
+ DBG("Receive failed from lttng-sessiond with errno %d", (int) -len);
+ if (len == -ECONNRESET) {
+ ERR("%s remote end closed connection", sock_info->name);
+ ret = len;
+ free(node);
+ goto error;
+ }
+ ret = len;
+ free(node);
+ goto error;
+ } else {
+ DBG("Incorrect exclusion data message size: %zd", len);
+ ret = -EINVAL;
+ free(node);
+ goto error;
+ }
+ }
+ if (ops->cmd)
+ ret = ops->cmd(lum->handle, lum->cmd,
+ (unsigned long) &node,
+ &args, sock_info);
+ else
+ ret = -ENOSYS;
+ free(node);
+ break;
+ }
+ case LTTNG_UST_ABI_EVENT_NOTIFIER_GROUP_CREATE:
+ {
+ int event_notifier_notif_fd, close_ret;
+
+ len = ustcomm_recv_event_notifier_notif_fd_from_sessiond(sock,
+ &event_notifier_notif_fd);
+ switch (len) {
+ case 0: /* orderly shutdown */
+ ret = 0;
+ goto error;
+ case 1:
+ break;
+ default:
+ if (len < 0) {
+ DBG("Receive failed from lttng-sessiond with errno %d",
+ (int) -len);
+ if (len == -ECONNRESET) {
+ ERR("%s remote end closed connection",
+ sock_info->name);
+ ret = len;
+ goto error;
+ }
+ ret = len;
+ goto error;
+ } else {
+ DBG("Incorrect event notifier fd message size: %zd",
+ len);
+ ret = -EINVAL;
+ goto error;
+ }
+ }
+ args.event_notifier_handle.event_notifier_notif_fd =
+ event_notifier_notif_fd;
+ if (ops->cmd)
+ ret = ops->cmd(lum->handle, lum->cmd,
+ (unsigned long) &lum->u,
+ &args, sock_info);
+ else
+ ret = -ENOSYS;
+ if (args.event_notifier_handle.event_notifier_notif_fd >= 0) {
+ lttng_ust_lock_fd_tracker();
+ close_ret = close(args.event_notifier_handle.event_notifier_notif_fd);
+ lttng_ust_unlock_fd_tracker();
+ if (close_ret)
+ PERROR("close");
+ }
+ break;
+ }
+ case LTTNG_UST_ABI_CHANNEL:
+ {
+ void *chan_data;
+ int wakeup_fd;
+
+ len = ustcomm_recv_channel_from_sessiond(sock,
+ &chan_data, lum->u.channel.len,
+ &wakeup_fd);
+ switch (len) {
+ case 0: /* orderly shutdown */
+ ret = 0;
+ goto error;
+ default:
+ if (len == lum->u.channel.len) {
+ DBG("channel data received");
+ break;
+ } else if (len < 0) {
+ DBG("Receive failed from lttng-sessiond with errno %d", (int) -len);
+ if (len == -ECONNRESET) {
+ ERR("%s remote end closed connection", sock_info->name);
+ ret = len;
+ goto error;
+ }
+ ret = len;
+ goto error;
+ } else {
+ DBG("incorrect channel data message size: %zd", len);
+ ret = -EINVAL;
+ goto error;
+ }
+ }
+ args.channel.chan_data = chan_data;
+ args.channel.wakeup_fd = wakeup_fd;
+ if (ops->cmd)
+ ret = ops->cmd(lum->handle, lum->cmd,
+ (unsigned long) &lum->u,
+ &args, sock_info);
+ else
+ ret = -ENOSYS;
+ if (args.channel.wakeup_fd >= 0) {
+ int close_ret;
+
+ lttng_ust_lock_fd_tracker();
+ close_ret = close(args.channel.wakeup_fd);
+ lttng_ust_unlock_fd_tracker();
+ args.channel.wakeup_fd = -1;
+ if (close_ret)
+ PERROR("close");
+ }
+ free(args.channel.chan_data);
+ break;
+ }
+ case LTTNG_UST_ABI_STREAM:
+ {
+ int close_ret;
+
+ /* Receive shm_fd, wakeup_fd */
+ ret = ustcomm_recv_stream_from_sessiond(sock,
+ NULL,
+ &args.stream.shm_fd,
+ &args.stream.wakeup_fd);
+ if (ret) {
+ goto error;
+ }
+
+ if (ops->cmd)
+ ret = ops->cmd(lum->handle, lum->cmd,
+ (unsigned long) &lum->u,
+ &args, sock_info);
+ else
+ ret = -ENOSYS;
+ if (args.stream.shm_fd >= 0) {
+ lttng_ust_lock_fd_tracker();
+ close_ret = close(args.stream.shm_fd);
+ lttng_ust_unlock_fd_tracker();
+ args.stream.shm_fd = -1;
+ if (close_ret)
+ PERROR("close");
+ }
+ if (args.stream.wakeup_fd >= 0) {
+ lttng_ust_lock_fd_tracker();
+ close_ret = close(args.stream.wakeup_fd);
+ lttng_ust_unlock_fd_tracker();
+ args.stream.wakeup_fd = -1;
+ if (close_ret)
+ PERROR("close");
+ }
+ break;
+ }
+ case LTTNG_UST_ABI_CONTEXT:
+ switch (lum->u.context.ctx) {
+ case LTTNG_UST_ABI_CONTEXT_APP_CONTEXT:
+ {
+ char *p;
+ size_t ctxlen, recvlen;
+
+ ctxlen = strlen("$app.") + lum->u.context.u.app_ctx.provider_name_len - 1
+ + strlen(":") + lum->u.context.u.app_ctx.ctx_name_len;
+ if (ctxlen >= LTTNG_UST_ABI_SYM_NAME_LEN) {
+ ERR("Application context string length size is too large: %zu bytes",
+ ctxlen);
+ ret = -EINVAL;
+ goto error;
+ }
+ strcpy(ctxstr, "$app.");
+ p = &ctxstr[strlen("$app.")];
+ recvlen = ctxlen - strlen("$app.");
+ len = ustcomm_recv_unix_sock(sock, p, recvlen);
+ switch (len) {
+ case 0: /* orderly shutdown */
+ ret = 0;
+ goto error;
+ default:
+ if (len == recvlen) {
+ DBG("app context data received");
+ break;
+ } else if (len < 0) {
+ DBG("Receive failed from lttng-sessiond with errno %d", (int) -len);
+ if (len == -ECONNRESET) {
+ ERR("%s remote end closed connection", sock_info->name);
+ ret = len;
+ goto error;
+ }
+ ret = len;
+ goto error;
+ } else {
+ DBG("incorrect app context data message size: %zd", len);
+ ret = -EINVAL;
+ goto error;
+ }
+ }
+ /* Put : between provider and ctxname. */
+ p[lum->u.context.u.app_ctx.provider_name_len - 1] = ':';
+ args.app_context.ctxname = ctxstr;
+ break;
+ }
+ default:
+ break;
+ }
+ if (ops->cmd) {
+ ret = ops->cmd(lum->handle, lum->cmd,
+ (unsigned long) &lum->u,
+ &args, sock_info);
+ } else {
+ ret = -ENOSYS;
+ }
+ break;
+ case LTTNG_UST_ABI_COUNTER:
+ {
+ void *counter_data;
+
+ len = ustcomm_recv_counter_from_sessiond(sock,
+ &counter_data, lum->u.counter.len);
+ switch (len) {
+ case 0: /* orderly shutdown */
+ ret = 0;
+ goto error;
+ default:
+ if (len == lum->u.counter.len) {
+ DBG("counter data received");
+ break;
+ } else if (len < 0) {
+ DBG("Receive failed from lttng-sessiond with errno %d", (int) -len);
+ if (len == -ECONNRESET) {
+ ERR("%s remote end closed connection", sock_info->name);
+ ret = len;
+ goto error;
+ }
+ ret = len;
+ goto error;
+ } else {
+ DBG("incorrect counter data message size: %zd", len);
+ ret = -EINVAL;
+ goto error;
+ }
+ }
+ args.counter.counter_data = counter_data;
+ if (ops->cmd)
+ ret = ops->cmd(lum->handle, lum->cmd,
+ (unsigned long) &lum->u,
+ &args, sock_info);
+ else
+ ret = -ENOSYS;
+ free(args.counter.counter_data);
+ break;
+ }
+ case LTTNG_UST_ABI_COUNTER_GLOBAL:
+ {
+ /* Receive shm_fd */
+ ret = ustcomm_recv_counter_shm_from_sessiond(sock,
+ &args.counter_shm.shm_fd);
+ if (ret) {
+ goto error;
+ }
+
+ if (ops->cmd)
+ ret = ops->cmd(lum->handle, lum->cmd,
+ (unsigned long) &lum->u,
+ &args, sock_info);
+ else
+ ret = -ENOSYS;
+ if (args.counter_shm.shm_fd >= 0) {
+ int close_ret;
+
+ lttng_ust_lock_fd_tracker();
+ close_ret = close(args.counter_shm.shm_fd);
+ lttng_ust_unlock_fd_tracker();
+ args.counter_shm.shm_fd = -1;
+ if (close_ret)
+ PERROR("close");
+ }
+ break;
+ }
+ case LTTNG_UST_ABI_COUNTER_CPU:
+ {
+ /* Receive shm_fd */
+ ret = ustcomm_recv_counter_shm_from_sessiond(sock,
+ &args.counter_shm.shm_fd);
+ if (ret) {
+ goto error;
+ }
+
+ if (ops->cmd)
+ ret = ops->cmd(lum->handle, lum->cmd,
+ (unsigned long) &lum->u,
+ &args, sock_info);
+ else
+ ret = -ENOSYS;
+ if (args.counter_shm.shm_fd >= 0) {
+ int close_ret;
+
+ lttng_ust_lock_fd_tracker();
+ close_ret = close(args.counter_shm.shm_fd);
+ lttng_ust_unlock_fd_tracker();
+ args.counter_shm.shm_fd = -1;
+ if (close_ret)
+ PERROR("close");
+ }
+ break;
+ }
+ case LTTNG_UST_ABI_EVENT_NOTIFIER_CREATE:
+ {
+ /* Receive struct lttng_ust_event_notifier */
+ struct lttng_ust_abi_event_notifier event_notifier;
+
+ if (sizeof(event_notifier) != lum->u.event_notifier.len) {
+ DBG("incorrect event notifier data message size: %u", lum->u.event_notifier.len);
+ ret = -EINVAL;
+ goto error;
+ }
+ len = ustcomm_recv_unix_sock(sock, &event_notifier, sizeof(event_notifier));
+ switch (len) {
+ case 0: /* orderly shutdown */
+ ret = 0;
+ goto error;
+ default:
+ if (len == sizeof(event_notifier)) {
+ DBG("event notifier data received");
+ break;
+ } else if (len < 0) {
+ DBG("Receive failed from lttng-sessiond with errno %d", (int) -len);
+ if (len == -ECONNRESET) {
+ ERR("%s remote end closed connection", sock_info->name);
+ ret = len;
+ goto error;
+ }
+ ret = len;
+ goto error;
+ } else {
+ DBG("incorrect event notifier data message size: %zd", len);
+ ret = -EINVAL;
+ goto error;
+ }
+ }
+ if (ops->cmd)
+ ret = ops->cmd(lum->handle, lum->cmd,
+ (unsigned long) &event_notifier,
+ &args, sock_info);
+ else
+ ret = -ENOSYS;
+ break;
+ }
+
+ default:
+ if (ops->cmd)
+ ret = ops->cmd(lum->handle, lum->cmd,
+ (unsigned long) &lum->u,
+ &args, sock_info);
+ else
+ ret = -ENOSYS;
+ break;
+ }
+
+ lur.handle = lum->handle;
+ lur.cmd = lum->cmd;
+ lur.ret_val = ret;
+ if (ret >= 0) {
+ lur.ret_code = LTTNG_UST_OK;
+ } else {
+ /*
+ * Use -LTTNG_UST_ERR as wildcard for UST internal
+ * error that are not caused by the transport, except if
+ * we already have a more precise error message to
+ * report.
+ */
+ if (ret > -LTTNG_UST_ERR) {
+ /* Translate code to UST error. */
+ switch (ret) {
+ case -EEXIST:
+ lur.ret_code = -LTTNG_UST_ERR_EXIST;
+ break;
+ case -EINVAL:
+ lur.ret_code = -LTTNG_UST_ERR_INVAL;
+ break;
+ case -ENOENT:
+ lur.ret_code = -LTTNG_UST_ERR_NOENT;
+ break;
+ case -EPERM:
+ lur.ret_code = -LTTNG_UST_ERR_PERM;
+ break;
+ case -ENOSYS:
+ lur.ret_code = -LTTNG_UST_ERR_NOSYS;
+ break;
+ default:
+ lur.ret_code = -LTTNG_UST_ERR;
+ break;
+ }
+ } else {
+ lur.ret_code = ret;
+ }
+ }
+ if (ret >= 0) {
+ switch (lum->cmd) {
+ case LTTNG_UST_ABI_TRACER_VERSION:
+ lur.u.version = lum->u.version;
+ break;
+ case LTTNG_UST_ABI_TRACEPOINT_LIST_GET:
+ memcpy(&lur.u.tracepoint, &lum->u.tracepoint, sizeof(lur.u.tracepoint));
+ break;
+ }
+ }
+ DBG("Return value: %d", lur.ret_val);
+
+ ust_unlock();
+
+ /*
+ * Performed delayed statedump operations outside of the UST
+ * lock. We need to take the dynamic loader lock before we take
+ * the UST lock internally within handle_pending_statedump().
+ */
+ handle_pending_statedump(sock_info);
+
+ if (ust_lock()) {
+ ret = -LTTNG_UST_ERR_EXITING;
+ goto error;
+ }
+
+ ret = send_reply(sock, &lur);
+ if (ret < 0) {
+ DBG("error sending reply");
+ goto error;
+ }
+
+ /*
+ * LTTNG_UST_TRACEPOINT_FIELD_LIST_GET needs to send the field
+ * after the reply.
+ */
+ if (lur.ret_code == LTTNG_UST_OK) {
+ switch (lum->cmd) {
+ case LTTNG_UST_ABI_TRACEPOINT_FIELD_LIST_GET:
+ len = ustcomm_send_unix_sock(sock,
+ &args.field_list.entry,
+ sizeof(args.field_list.entry));
+ if (len < 0) {
+ ret = len;
+ goto error;
+ }
+ if (len != sizeof(args.field_list.entry)) {
+ ret = -EINVAL;
+ goto error;
+ }
+ }
+ }
+
+error:
+ ust_unlock();
+
+ return ret;
+}
+
+static
+void cleanup_sock_info(struct sock_info *sock_info, int exiting)
+{
+ int ret;
+
+ if (sock_info->root_handle != -1) {
+ ret = lttng_ust_abi_objd_unref(sock_info->root_handle, 1);
+ if (ret) {
+ ERR("Error unref root handle");
+ }
+ sock_info->root_handle = -1;
+ }
+ sock_info->registration_done = 0;
+ sock_info->initial_statedump_done = 0;
+
+ /*
+ * wait_shm_mmap, socket and notify socket are used by listener
+ * threads outside of the ust lock, so we cannot tear them down
+ * ourselves, because we cannot join on these threads. Leave
+ * responsibility of cleaning up these resources to the OS
+ * process exit.
+ */
+ if (exiting)
+ return;
+
+ if (sock_info->socket != -1) {
+ ret = ustcomm_close_unix_sock(sock_info->socket);
+ if (ret) {
+ ERR("Error closing ust cmd socket");
+ }
+ sock_info->socket = -1;
+ }
+ if (sock_info->notify_socket != -1) {
+ ret = ustcomm_close_unix_sock(sock_info->notify_socket);
+ if (ret) {
+ ERR("Error closing ust notify socket");
+ }
+ sock_info->notify_socket = -1;
+ }
+ if (sock_info->wait_shm_mmap) {
+ long page_size;
+
+ page_size = LTTNG_UST_PAGE_SIZE;
+ if (page_size <= 0) {
+ if (!page_size) {
+ errno = EINVAL;
+ }
+ PERROR("Error in sysconf(_SC_PAGE_SIZE)");
+ } else {
+ ret = munmap(sock_info->wait_shm_mmap, page_size);
+ if (ret) {
+ ERR("Error unmapping wait shm");
+ }
+ }
+ sock_info->wait_shm_mmap = NULL;
+ }
+}
+
+/*
+ * Using fork to set umask in the child process (not multi-thread safe).
+ * We deal with the shm_open vs ftruncate race (happening when the
+ * sessiond owns the shm and does not let everybody modify it, to ensure
+ * safety against shm_unlink) by simply letting the mmap fail and
+ * retrying after a few seconds.
+ * For global shm, everybody has rw access to it until the sessiond
+ * starts.
+ */
+static
+int get_wait_shm(struct sock_info *sock_info, size_t mmap_size)
+{
+ int wait_shm_fd, ret;
+ pid_t pid;
+
+ /*
+ * Try to open read-only.
+ */
+ wait_shm_fd = shm_open(sock_info->wait_shm_path, O_RDONLY, 0);
+ if (wait_shm_fd >= 0) {
+ int32_t tmp_read;
+ ssize_t len;
+ size_t bytes_read = 0;
+
+ /*
+ * Try to read the fd. If unable to do so, try opening
+ * it in write mode.
+ */
+ do {
+ len = read(wait_shm_fd,
+ &((char *) &tmp_read)[bytes_read],
+ sizeof(tmp_read) - bytes_read);
+ if (len > 0) {
+ bytes_read += len;
+ }
+ } while ((len < 0 && errno == EINTR)
+ || (len > 0 && bytes_read < sizeof(tmp_read)));
+ if (bytes_read != sizeof(tmp_read)) {
+ ret = close(wait_shm_fd);
+ if (ret) {
+ ERR("close wait_shm_fd");
+ }
+ goto open_write;
+ }
+ goto end;
+ } else if (wait_shm_fd < 0 && errno != ENOENT) {
+ /*
+ * Real-only open did not work, and it's not because the
+ * entry was not present. It's a failure that prohibits
+ * using shm.
+ */
+ ERR("Error opening shm %s", sock_info->wait_shm_path);
+ goto end;
+ }
+
+open_write:
+ /*
+ * If the open failed because the file did not exist, or because
+ * the file was not truncated yet, try creating it ourself.
+ */
+ URCU_TLS(lttng_ust_nest_count)++;
+ pid = fork();
+ URCU_TLS(lttng_ust_nest_count)--;
+ if (pid > 0) {
+ int status;
+
+ /*
+ * Parent: wait for child to return, in which case the
+ * shared memory map will have been created.
+ */
+ pid = wait(&status);
+ if (pid < 0 || !WIFEXITED(status) || WEXITSTATUS(status) != 0) {
+ wait_shm_fd = -1;
+ goto end;
+ }
+ /*
+ * Try to open read-only again after creation.
+ */
+ wait_shm_fd = shm_open(sock_info->wait_shm_path, O_RDONLY, 0);
+ if (wait_shm_fd < 0) {
+ /*
+ * Real-only open did not work. It's a failure
+ * that prohibits using shm.
+ */
+ ERR("Error opening shm %s", sock_info->wait_shm_path);
+ goto end;
+ }
+ goto end;
+ } else if (pid == 0) {
+ int create_mode;
+
+ /* Child */
+ create_mode = S_IRUSR | S_IWUSR | S_IRGRP;
+ if (sock_info->global)
+ create_mode |= S_IROTH | S_IWGRP | S_IWOTH;
+ /*
+ * We're alone in a child process, so we can modify the
+ * process-wide umask.
+ */
+ umask(~create_mode);
+ /*
+ * Try creating shm (or get rw access).
+ * We don't do an exclusive open, because we allow other
+ * processes to create+ftruncate it concurrently.
+ */
+ wait_shm_fd = shm_open(sock_info->wait_shm_path,
+ O_RDWR | O_CREAT, create_mode);
+ if (wait_shm_fd >= 0) {
+ ret = ftruncate(wait_shm_fd, mmap_size);
+ if (ret) {
+ PERROR("ftruncate");
+ _exit(EXIT_FAILURE);
+ }
+ _exit(EXIT_SUCCESS);
+ }
+ /*
+ * For local shm, we need to have rw access to accept
+ * opening it: this means the local sessiond will be
+ * able to wake us up. For global shm, we open it even
+ * if rw access is not granted, because the root.root
+ * sessiond will be able to override all rights and wake
+ * us up.
+ */
+ if (!sock_info->global && errno != EACCES) {
+ ERR("Error opening shm %s", sock_info->wait_shm_path);
+ _exit(EXIT_FAILURE);
+ }
+ /*
+ * The shm exists, but we cannot open it RW. Report
+ * success.
+ */
+ _exit(EXIT_SUCCESS);
+ } else {
+ return -1;
+ }
+end:
+ if (wait_shm_fd >= 0 && !sock_info->global) {
+ struct stat statbuf;
+
+ /*
+ * Ensure that our user is the owner of the shm file for
+ * local shm. If we do not own the file, it means our
+ * sessiond will not have access to wake us up (there is
+ * probably a rogue process trying to fake our
+ * sessiond). Fallback to polling method in this case.
+ */
+ ret = fstat(wait_shm_fd, &statbuf);
+ if (ret) {
+ PERROR("fstat");
+ goto error_close;
+ }
+ if (statbuf.st_uid != getuid())
+ goto error_close;
+ }
+ return wait_shm_fd;
+
+error_close:
+ ret = close(wait_shm_fd);
+ if (ret) {
+ PERROR("Error closing fd");
+ }
+ return -1;
+}
+
+static
+char *get_map_shm(struct sock_info *sock_info)
+{
+ long page_size;
+ int wait_shm_fd, ret;
+ char *wait_shm_mmap;
+
+ page_size = sysconf(_SC_PAGE_SIZE);
+ if (page_size <= 0) {
+ if (!page_size) {
+ errno = EINVAL;
+ }
+ PERROR("Error in sysconf(_SC_PAGE_SIZE)");
+ goto error;
+ }
+
+ lttng_ust_lock_fd_tracker();
+ wait_shm_fd = get_wait_shm(sock_info, page_size);
+ if (wait_shm_fd < 0) {
+ lttng_ust_unlock_fd_tracker();
+ goto error;
+ }
+
+ ret = lttng_ust_add_fd_to_tracker(wait_shm_fd);
+ if (ret < 0) {
+ ret = close(wait_shm_fd);
+ if (!ret) {
+ PERROR("Error closing fd");
+ }
+ lttng_ust_unlock_fd_tracker();
+ goto error;
+ }
+
+ wait_shm_fd = ret;
+ lttng_ust_unlock_fd_tracker();
+
+ wait_shm_mmap = mmap(NULL, page_size, PROT_READ,
+ MAP_SHARED, wait_shm_fd, 0);
+
+ /* close shm fd immediately after taking the mmap reference */
+ lttng_ust_lock_fd_tracker();
+ ret = close(wait_shm_fd);
+ if (!ret) {
+ lttng_ust_delete_fd_from_tracker(wait_shm_fd);
+ } else {
+ PERROR("Error closing fd");
+ }
+ lttng_ust_unlock_fd_tracker();
+
+ if (wait_shm_mmap == MAP_FAILED) {
+ DBG("mmap error (can be caused by race with sessiond). Fallback to poll mode.");
+ goto error;
+ }
+ return wait_shm_mmap;
+
+error:
+ return NULL;
+}
+
+static
+void wait_for_sessiond(struct sock_info *sock_info)
+{
+ /* Use ust_lock to check if we should quit. */
+ if (ust_lock()) {
+ goto quit;
+ }
+ if (wait_poll_fallback) {
+ goto error;
+ }
+ ust_unlock();
+
+ assert(sock_info->wait_shm_mmap);
+
+ DBG("Waiting for %s apps sessiond", sock_info->name);
+ /* Wait for futex wakeup */
+ if (uatomic_read((int32_t *) sock_info->wait_shm_mmap))
+ goto end_wait;
+
+ while (lttng_ust_futex_async((int32_t *) sock_info->wait_shm_mmap,
+ FUTEX_WAIT, 0, NULL, NULL, 0)) {
+ switch (errno) {
+ case EWOULDBLOCK:
+ /* Value already changed. */
+ goto end_wait;
+ case EINTR:
+ /* Retry if interrupted by signal. */
+ break; /* Get out of switch. */
+ case EFAULT:
+ wait_poll_fallback = 1;
+ DBG(
+"Linux kernels 2.6.33 to 3.0 (with the exception of stable versions) "
+"do not support FUTEX_WAKE on read-only memory mappings correctly. "
+"Please upgrade your kernel "
+"(fix is commit 9ea71503a8ed9184d2d0b8ccc4d269d05f7940ae in Linux kernel "
+"mainline). LTTng-UST will use polling mode fallback.");
+ if (ust_err_debug_enabled())
+ PERROR("futex");
+ goto end_wait;
+ }
+ }
+end_wait:
+ return;
+
+quit:
+ ust_unlock();
+ return;
+
+error:
+ ust_unlock();
+ return;
+}
+
+/*
+ * This thread does not allocate any resource, except within
+ * handle_message, within mutex protection. This mutex protects against
+ * fork and exit.
+ * The other moment it allocates resources is at socket connection, which
+ * is also protected by the mutex.
+ */
+static
+void *ust_listener_thread(void *arg)
+{
+ struct sock_info *sock_info = arg;
+ int sock, ret, prev_connect_failed = 0, has_waited = 0, fd;
+ long timeout;
+
+ lttng_ust_fixup_tls();
+ /*
+ * If available, add '-ust' to the end of this thread's
+ * process name
+ */
+ ret = lttng_ust_setustprocname();
+ if (ret) {
+ ERR("Unable to set UST process name");
+ }
+
+ /* Restart trying to connect to the session daemon */
+restart:
+ if (prev_connect_failed) {
+ /* Wait for sessiond availability with pipe */
+ wait_for_sessiond(sock_info);
+ if (has_waited) {
+ has_waited = 0;
+ /*
+ * Sleep for 5 seconds before retrying after a
+ * sequence of failure / wait / failure. This
+ * deals with a killed or broken session daemon.
+ */
+ sleep(5);
+ } else {
+ has_waited = 1;
+ }
+ prev_connect_failed = 0;
+ }
+
+ if (ust_lock()) {
+ goto quit;
+ }
+
+ if (sock_info->socket != -1) {
+ /* FD tracker is updated by ustcomm_close_unix_sock() */
+ ret = ustcomm_close_unix_sock(sock_info->socket);
+ if (ret) {
+ ERR("Error closing %s ust cmd socket",
+ sock_info->name);
+ }
+ sock_info->socket = -1;
+ }
+ if (sock_info->notify_socket != -1) {
+ /* FD tracker is updated by ustcomm_close_unix_sock() */
+ ret = ustcomm_close_unix_sock(sock_info->notify_socket);
+ if (ret) {
+ ERR("Error closing %s ust notify socket",
+ sock_info->name);
+ }
+ sock_info->notify_socket = -1;
+ }
+
+
+ /*
+ * Register. We need to perform both connect and sending
+ * registration message before doing the next connect otherwise
+ * we may reach unix socket connect queue max limits and block
+ * on the 2nd connect while the session daemon is awaiting the
+ * first connect registration message.
+ */
+ /* Connect cmd socket */
+ lttng_ust_lock_fd_tracker();
+ ret = ustcomm_connect_unix_sock(sock_info->sock_path,
+ get_connect_sock_timeout());
+ if (ret < 0) {
+ lttng_ust_unlock_fd_tracker();
+ DBG("Info: sessiond not accepting connections to %s apps socket", sock_info->name);
+ prev_connect_failed = 1;
+
+ /*
+ * If we cannot find the sessiond daemon, don't delay
+ * constructor execution.
+ */
+ ret = handle_register_failed(sock_info);
+ assert(!ret);
+ ust_unlock();
+ goto restart;
+ }
+ fd = ret;
+ ret = lttng_ust_add_fd_to_tracker(fd);
+ if (ret < 0) {
+ ret = close(fd);
+ if (ret) {
+ PERROR("close on sock_info->socket");
+ }
+ ret = -1;
+ lttng_ust_unlock_fd_tracker();
+ ust_unlock();
+ goto quit;
+ }
+
+ sock_info->socket = ret;
+ lttng_ust_unlock_fd_tracker();
+
+ ust_unlock();
+ /*
+ * Unlock/relock ust lock because connect is blocking (with
+ * timeout). Don't delay constructors on the ust lock for too
+ * long.
+ */
+ if (ust_lock()) {
+ goto quit;
+ }
+
+ /*
+ * Create only one root handle per listener thread for the whole
+ * process lifetime, so we ensure we get ID which is statically
+ * assigned to the root handle.
+ */
+ if (sock_info->root_handle == -1) {
+ ret = lttng_abi_create_root_handle();
+ if (ret < 0) {
+ ERR("Error creating root handle");
+ goto quit;
+ }
+ sock_info->root_handle = ret;
+ }
+
+ ret = register_to_sessiond(sock_info->socket, USTCTL_SOCKET_CMD);
+ if (ret < 0) {
+ ERR("Error registering to %s ust cmd socket",
+ sock_info->name);
+ prev_connect_failed = 1;
+ /*
+ * If we cannot register to the sessiond daemon, don't
+ * delay constructor execution.
+ */
+ ret = handle_register_failed(sock_info);
+ assert(!ret);
+ ust_unlock();
+ goto restart;
+ }
+
+ ust_unlock();
+ /*
+ * Unlock/relock ust lock because connect is blocking (with
+ * timeout). Don't delay constructors on the ust lock for too
+ * long.
+ */
+ if (ust_lock()) {
+ goto quit;
+ }
+
+ /* Connect notify socket */
+ lttng_ust_lock_fd_tracker();
+ ret = ustcomm_connect_unix_sock(sock_info->sock_path,
+ get_connect_sock_timeout());
+ if (ret < 0) {
+ lttng_ust_unlock_fd_tracker();
+ DBG("Info: sessiond not accepting connections to %s apps socket", sock_info->name);
+ prev_connect_failed = 1;
+
+ /*
+ * If we cannot find the sessiond daemon, don't delay
+ * constructor execution.
+ */
+ ret = handle_register_failed(sock_info);
+ assert(!ret);
+ ust_unlock();
+ goto restart;
+ }
+
+ fd = ret;
+ ret = lttng_ust_add_fd_to_tracker(fd);
+ if (ret < 0) {
+ ret = close(fd);
+ if (ret) {
+ PERROR("close on sock_info->notify_socket");
+ }
+ ret = -1;
+ lttng_ust_unlock_fd_tracker();
+ ust_unlock();
+ goto quit;
+ }
+
+ sock_info->notify_socket = ret;
+ lttng_ust_unlock_fd_tracker();
+
+ ust_unlock();
+ /*
+ * Unlock/relock ust lock because connect is blocking (with
+ * timeout). Don't delay constructors on the ust lock for too
+ * long.
+ */
+ if (ust_lock()) {
+ goto quit;
+ }
+
+ timeout = get_notify_sock_timeout();
+ if (timeout >= 0) {
+ /*
+ * Give at least 10ms to sessiond to reply to
+ * notifications.
+ */
+ if (timeout < 10)
+ timeout = 10;
+ ret = ustcomm_setsockopt_rcv_timeout(sock_info->notify_socket,
+ timeout);
+ if (ret < 0) {
+ WARN("Error setting socket receive timeout");
+ }
+ ret = ustcomm_setsockopt_snd_timeout(sock_info->notify_socket,
+ timeout);
+ if (ret < 0) {
+ WARN("Error setting socket send timeout");
+ }
+ } else if (timeout < -1) {
+ WARN("Unsupported timeout value %ld", timeout);
+ }
+
+ ret = register_to_sessiond(sock_info->notify_socket,
+ USTCTL_SOCKET_NOTIFY);
+ if (ret < 0) {
+ ERR("Error registering to %s ust notify socket",
+ sock_info->name);
+ prev_connect_failed = 1;
+ /*
+ * If we cannot register to the sessiond daemon, don't
+ * delay constructor execution.
+ */
+ ret = handle_register_failed(sock_info);
+ assert(!ret);
+ ust_unlock();
+ goto restart;
+ }
+ sock = sock_info->socket;
+
+ ust_unlock();
+
+ for (;;) {
+ ssize_t len;
+ struct ustcomm_ust_msg lum;
+
+ len = ustcomm_recv_unix_sock(sock, &lum, sizeof(lum));
+ switch (len) {
+ case 0: /* orderly shutdown */
+ DBG("%s lttng-sessiond has performed an orderly shutdown", sock_info->name);
+ if (ust_lock()) {
+ goto quit;
+ }
+ /*
+ * Either sessiond has shutdown or refused us by closing the socket.
+ * In either case, we don't want to delay construction execution,
+ * and we need to wait before retry.
+ */
+ prev_connect_failed = 1;
+ /*
+ * If we cannot register to the sessiond daemon, don't
+ * delay constructor execution.
+ */
+ ret = handle_register_failed(sock_info);
+ assert(!ret);
+ ust_unlock();
+ goto end;
+ case sizeof(lum):
+ print_cmd(lum.cmd, lum.handle);
+ ret = handle_message(sock_info, sock, &lum);
+ if (ret) {
+ ERR("Error handling message for %s socket",
+ sock_info->name);
+ /*
+ * Close socket if protocol error is
+ * detected.
+ */
+ goto end;
+ }
+ continue;
+ default:
+ if (len < 0) {
+ DBG("Receive failed from lttng-sessiond with errno %d", (int) -len);
+ } else {
+ DBG("incorrect message size (%s socket): %zd", sock_info->name, len);
+ }
+ if (len == -ECONNRESET) {
+ DBG("%s remote end closed connection", sock_info->name);
+ goto end;
+ }
+ goto end;
+ }
+
+ }
+end:
+ if (ust_lock()) {
+ goto quit;
+ }
+ /* Cleanup socket handles before trying to reconnect */
+ lttng_ust_abi_objd_table_owner_cleanup(sock_info);
+ ust_unlock();
+ goto restart; /* try to reconnect */
+
+quit:
+ ust_unlock();
+
+ pthread_mutex_lock(&ust_exit_mutex);
+ sock_info->thread_active = 0;
+ pthread_mutex_unlock(&ust_exit_mutex);
+ return NULL;
+}
+
+/*
+ * Weak symbol to call when the ust malloc wrapper is not loaded.
+ */
+__attribute__((weak))
+void lttng_ust_libc_wrapper_malloc_init(void)
+{
+}
+
+/*
+ * sessiond monitoring thread: monitor presence of global and per-user
+ * sessiond by polling the application common named pipe.
+ */
+static
+void lttng_ust_init(void)
+ __attribute__((constructor));
+static
+void lttng_ust_init(void)
+{
+ struct timespec constructor_timeout;
+ sigset_t sig_all_blocked, orig_parent_mask;
+ pthread_attr_t thread_attr;
+ int timeout_mode;
+ int ret;
+ void *handle;
+
+ if (uatomic_xchg(&initialized, 1) == 1)
+ return;
+
+ /*
+ * Fixup interdependency between TLS fixup mutex (which happens
+ * to be the dynamic linker mutex) and ust_lock, taken within
+ * the ust lock.
+ */
+ lttng_ust_fixup_tls();
+
+ lttng_ust_loaded = 1;
+
+ /*
+ * We need to ensure that the liblttng-ust library is not unloaded to avoid
+ * the unloading of code used by the ust_listener_threads as we can not
+ * reliably know when they exited. To do that, manually load
+ * liblttng-ust.so to increment the dynamic loader's internal refcount for
+ * this library so it never becomes zero, thus never gets unloaded from the
+ * address space of the process. Since we are already running in the
+ * constructor of the LTTNG_UST_LIB_SONAME library, calling dlopen will
+ * simply increment the refcount and no additionnal work is needed by the
+ * dynamic loader as the shared library is already loaded in the address
+ * space. As a safe guard, we use the RTLD_NODELETE flag to prevent
+ * unloading of the UST library if its refcount becomes zero (which should
+ * never happen). Do the return value check but discard the handle at the
+ * end of the function as it's not needed.
+ */
+ handle = dlopen(LTTNG_UST_LIB_SONAME, RTLD_LAZY | RTLD_NODELETE);
+ if (!handle) {
+ ERR("dlopen of liblttng-ust shared library (%s).", LTTNG_UST_LIB_SONAME);
+ }
+
+ /*
+ * We want precise control over the order in which we construct
+ * our sub-libraries vs starting to receive commands from
+ * sessiond (otherwise leading to errors when trying to create
+ * sessiond before the init functions are completed).
+ */
+ ust_err_init();
+ lttng_ust_getenv_init(); /* Needs ust_err_init() to be completed. */
+ lttng_ust_tp_init();
+ lttng_ust_init_fd_tracker();
+ lttng_ust_clock_init();
+ lttng_ust_getcpu_init();
+ lttng_ust_statedump_init();
+ lttng_ust_ring_buffer_clients_init();
+ lttng_ust_counter_clients_init();
+ lttng_perf_counter_init();
+ /*
+ * Invoke ust malloc wrapper init before starting other threads.
+ */
+ lttng_ust_libc_wrapper_malloc_init();
+
+ timeout_mode = get_constructor_timeout(&constructor_timeout);
+
+ get_allow_blocking();
+
+ ret = sem_init(&constructor_wait, 0, 0);
+ if (ret) {
+ PERROR("sem_init");
+ }
+
+ ret = setup_global_apps();
+ if (ret) {
+ assert(global_apps.allowed == 0);
+ DBG("global apps setup returned %d", ret);
+ }
+
+ ret = setup_local_apps();
+ if (ret) {
+ assert(local_apps.allowed == 0);
+ DBG("local apps setup returned %d", ret);
+ }
+
+ /* A new thread created by pthread_create inherits the signal mask
+ * from the parent. To avoid any signal being received by the
+ * listener thread, we block all signals temporarily in the parent,
+ * while we create the listener thread.
+ */
+ sigfillset(&sig_all_blocked);
+ ret = pthread_sigmask(SIG_SETMASK, &sig_all_blocked, &orig_parent_mask);
+ if (ret) {
+ ERR("pthread_sigmask: %s", strerror(ret));
+ }
+
+ ret = pthread_attr_init(&thread_attr);
+ if (ret) {
+ ERR("pthread_attr_init: %s", strerror(ret));
+ }
+ ret = pthread_attr_setdetachstate(&thread_attr, PTHREAD_CREATE_DETACHED);
+ if (ret) {
+ ERR("pthread_attr_setdetachstate: %s", strerror(ret));
+ }
+
+ if (global_apps.allowed) {
+ pthread_mutex_lock(&ust_exit_mutex);
+ ret = pthread_create(&global_apps.ust_listener, &thread_attr,
+ ust_listener_thread, &global_apps);
+ if (ret) {
+ ERR("pthread_create global: %s", strerror(ret));
+ }
+ global_apps.thread_active = 1;
+ pthread_mutex_unlock(&ust_exit_mutex);
+ } else {
+ handle_register_done(&global_apps);
+ }
+
+ if (local_apps.allowed) {
+ pthread_mutex_lock(&ust_exit_mutex);
+ ret = pthread_create(&local_apps.ust_listener, &thread_attr,
+ ust_listener_thread, &local_apps);
+ if (ret) {
+ ERR("pthread_create local: %s", strerror(ret));
+ }
+ local_apps.thread_active = 1;
+ pthread_mutex_unlock(&ust_exit_mutex);
+ } else {
+ handle_register_done(&local_apps);
+ }
+ ret = pthread_attr_destroy(&thread_attr);
+ if (ret) {
+ ERR("pthread_attr_destroy: %s", strerror(ret));
+ }
+
+ /* Restore original signal mask in parent */
+ ret = pthread_sigmask(SIG_SETMASK, &orig_parent_mask, NULL);
+ if (ret) {
+ ERR("pthread_sigmask: %s", strerror(ret));
+ }
+
+ switch (timeout_mode) {
+ case 1: /* timeout wait */
+ do {
+ ret = sem_timedwait(&constructor_wait,
+ &constructor_timeout);
+ } while (ret < 0 && errno == EINTR);
+ if (ret < 0) {
+ switch (errno) {
+ case ETIMEDOUT:
+ ERR("Timed out waiting for lttng-sessiond");
+ break;
+ case EINVAL:
+ PERROR("sem_timedwait");
+ break;
+ default:
+ ERR("Unexpected error \"%s\" returned by sem_timedwait",
+ strerror(errno));
+ }
+ }
+ break;
+ case -1:/* wait forever */
+ do {
+ ret = sem_wait(&constructor_wait);
+ } while (ret < 0 && errno == EINTR);
+ if (ret < 0) {
+ switch (errno) {
+ case EINVAL:
+ PERROR("sem_wait");
+ break;
+ default:
+ ERR("Unexpected error \"%s\" returned by sem_wait",
+ strerror(errno));
+ }
+ }
+ break;
+ case 0: /* no timeout */
+ break;
+ }
+}
+
+static
+void lttng_ust_cleanup(int exiting)
+{
+ cleanup_sock_info(&global_apps, exiting);
+ cleanup_sock_info(&local_apps, exiting);
+ local_apps.allowed = 0;
+ global_apps.allowed = 0;
+ /*
+ * The teardown in this function all affect data structures
+ * accessed under the UST lock by the listener thread. This
+ * lock, along with the lttng_ust_comm_should_quit flag, ensure
+ * that none of these threads are accessing this data at this
+ * point.
+ */
+ lttng_ust_abi_exit();
+ lttng_ust_abi_events_exit();
+ lttng_perf_counter_exit();
+ lttng_ust_ring_buffer_clients_exit();
+ lttng_ust_counter_clients_exit();
+ lttng_ust_statedump_destroy();
+ lttng_ust_tp_exit();
+ if (!exiting) {
+ /* Reinitialize values for fork */
+ sem_count = sem_count_initial_value;
+ lttng_ust_comm_should_quit = 0;
+ initialized = 0;
+ }
+}
+
+static
+void lttng_ust_exit(void)
+ __attribute__((destructor));
+static
+void lttng_ust_exit(void)
+{
+ int ret;
+
+ /*
+ * Using pthread_cancel here because:
+ * A) we don't want to hang application teardown.
+ * B) the thread is not allocating any resource.
+ */
+
+ /*
+ * Require the communication thread to quit. Synchronize with
+ * mutexes to ensure it is not in a mutex critical section when
+ * pthread_cancel is later called.
+ */
+ ust_lock_nocheck();
+ lttng_ust_comm_should_quit = 1;
+ ust_unlock();
+
+ pthread_mutex_lock(&ust_exit_mutex);
+ /* cancel threads */
+ if (global_apps.thread_active) {
+ ret = pthread_cancel(global_apps.ust_listener);
+ if (ret) {
+ ERR("Error cancelling global ust listener thread: %s",
+ strerror(ret));
+ } else {
+ global_apps.thread_active = 0;
+ }
+ }
+ if (local_apps.thread_active) {
+ ret = pthread_cancel(local_apps.ust_listener);
+ if (ret) {
+ ERR("Error cancelling local ust listener thread: %s",
+ strerror(ret));
+ } else {
+ local_apps.thread_active = 0;
+ }
+ }
+ pthread_mutex_unlock(&ust_exit_mutex);
+
+ /*
+ * Do NOT join threads: use of sys_futex makes it impossible to
+ * join the threads without using async-cancel, but async-cancel
+ * is delivered by a signal, which could hit the target thread
+ * anywhere in its code path, including while the ust_lock() is
+ * held, causing a deadlock for the other thread. Let the OS
+ * cleanup the threads if there are stalled in a syscall.
+ */
+ lttng_ust_cleanup(1);
+}
+
+static
+void ust_context_ns_reset(void)
+{
+ lttng_context_pid_ns_reset();
+ lttng_context_cgroup_ns_reset();
+ lttng_context_ipc_ns_reset();
+ lttng_context_mnt_ns_reset();
+ lttng_context_net_ns_reset();
+ lttng_context_user_ns_reset();
+ lttng_context_time_ns_reset();
+ lttng_context_uts_ns_reset();
+}
+
+static
+void ust_context_vuids_reset(void)
+{
+ lttng_context_vuid_reset();
+ lttng_context_veuid_reset();
+ lttng_context_vsuid_reset();
+}
+
+static
+void ust_context_vgids_reset(void)
+{
+ lttng_context_vgid_reset();
+ lttng_context_vegid_reset();
+ lttng_context_vsgid_reset();
+}
+
+/*
+ * We exclude the worker threads across fork and clone (except
+ * CLONE_VM), because these system calls only keep the forking thread
+ * running in the child. Therefore, we don't want to call fork or clone
+ * in the middle of an tracepoint or ust tracing state modification.
+ * Holding this mutex protects these structures across fork and clone.
+ */
+void lttng_ust_before_fork(sigset_t *save_sigset)
+{
+ /*
+ * Disable signals. This is to avoid that the child intervenes
+ * before it is properly setup for tracing. It is safer to
+ * disable all signals, because then we know we are not breaking
+ * anything by restoring the original mask.
+ */
+ sigset_t all_sigs;
+ int ret;
+
+ /* Fixup lttng-ust TLS. */
+ lttng_ust_fixup_tls();
+
+ if (URCU_TLS(lttng_ust_nest_count))
+ return;
+ /* Disable signals */
+ sigfillset(&all_sigs);
+ ret = sigprocmask(SIG_BLOCK, &all_sigs, save_sigset);
+ if (ret == -1) {
+ PERROR("sigprocmask");
+ }
+
+ pthread_mutex_lock(&ust_fork_mutex);
+
+ ust_lock_nocheck();
+ lttng_ust_urcu_before_fork();
+ lttng_ust_lock_fd_tracker();
+ lttng_perf_lock();
+}
+
+static void ust_after_fork_common(sigset_t *restore_sigset)
+{
+ int ret;
+
+ DBG("process %d", getpid());
+ lttng_perf_unlock();
+ lttng_ust_unlock_fd_tracker();
+ ust_unlock();
+
+ pthread_mutex_unlock(&ust_fork_mutex);
+
+ /* Restore signals */
+ ret = sigprocmask(SIG_SETMASK, restore_sigset, NULL);
+ if (ret == -1) {
+ PERROR("sigprocmask");
+ }
+}
+
+void lttng_ust_after_fork_parent(sigset_t *restore_sigset)
+{
+ if (URCU_TLS(lttng_ust_nest_count))
+ return;
+ DBG("process %d", getpid());
+ lttng_ust_urcu_after_fork_parent();
+ /* Release mutexes and reenable signals */
+ ust_after_fork_common(restore_sigset);
+}
+
+/*
+ * After fork, in the child, we need to cleanup all the leftover state,
+ * except the worker thread which already magically disappeared thanks
+ * to the weird Linux fork semantics. After tyding up, we call
+ * lttng_ust_init() again to start over as a new PID.
+ *
+ * This is meant for forks() that have tracing in the child between the
+ * fork and following exec call (if there is any).
+ */
+void lttng_ust_after_fork_child(sigset_t *restore_sigset)
+{
+ if (URCU_TLS(lttng_ust_nest_count))
+ return;
+ lttng_context_vpid_reset();
+ lttng_context_vtid_reset();
+ lttng_ust_context_procname_reset();
+ ust_context_ns_reset();
+ ust_context_vuids_reset();
+ ust_context_vgids_reset();
+ DBG("process %d", getpid());
+ /* Release urcu mutexes */
+ lttng_ust_urcu_after_fork_child();
+ lttng_ust_cleanup(0);
+ /* Release mutexes and reenable signals */
+ ust_after_fork_common(restore_sigset);
+ lttng_ust_init();
+}
+
+void lttng_ust_after_setns(void)
+{
+ ust_context_ns_reset();
+ ust_context_vuids_reset();
+ ust_context_vgids_reset();
+}
+
+void lttng_ust_after_unshare(void)
+{
+ ust_context_ns_reset();
+ ust_context_vuids_reset();
+ ust_context_vgids_reset();
+}
+
+void lttng_ust_after_setuid(void)
+{
+ ust_context_vuids_reset();
+}
+
+void lttng_ust_after_seteuid(void)
+{
+ ust_context_vuids_reset();
+}
+
+void lttng_ust_after_setreuid(void)
+{
+ ust_context_vuids_reset();
+}
+
+void lttng_ust_after_setresuid(void)
+{
+ ust_context_vuids_reset();
+}
+
+void lttng_ust_after_setgid(void)
+{
+ ust_context_vgids_reset();
+}
+
+void lttng_ust_after_setegid(void)
+{
+ ust_context_vgids_reset();
+}
+
+void lttng_ust_after_setregid(void)
+{
+ ust_context_vgids_reset();
+}
+
+void lttng_ust_after_setresgid(void)
+{
+ ust_context_vgids_reset();
+}
+
+void lttng_ust_sockinfo_session_enabled(void *owner)
+{
+ struct sock_info *sock_info = owner;
+ sock_info->statedump_pending = 1;
+}
--- /dev/null
+/*
+ * SPDX-License-Identifier: LGPL-2.1-only
+ *
+ * Copyright (C) 2016 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ *
+ * UST dynamic type implementation.
+ */
+
+#define _LGPL_SOURCE
+#include <stdio.h>
+#include <stdint.h>
+#include <stddef.h>
+#include <inttypes.h>
+
+#include "common/macros.h"
+#include "common/dynamic-type.h"
+
+#define ctf_enum_value(_string, _value) \
+ __LTTNG_COMPOUND_LITERAL(struct lttng_ust_enum_entry, { \
+ .struct_size = sizeof(struct lttng_ust_enum_entry), \
+ .start = { \
+ .signedness = lttng_ust_is_signed_type(__typeof__(_value)), \
+ .value = lttng_ust_is_signed_type(__typeof__(_value)) ? \
+ (long long) (_value) : (_value), \
+ }, \
+ .end = { \
+ .signedness = lttng_ust_is_signed_type(__typeof__(_value)), \
+ .value = lttng_ust_is_signed_type(__typeof__(_value)) ? \
+ (long long) (_value) : (_value), \
+ }, \
+ .string = (_string), \
+ }),
+
+static const struct lttng_ust_enum_entry *dt_enum[_NR_LTTNG_UST_DYNAMIC_TYPES] = {
+ [LTTNG_UST_DYNAMIC_TYPE_NONE] = ctf_enum_value("_none", 0)
+ [LTTNG_UST_DYNAMIC_TYPE_S8] = ctf_enum_value("_int8", 1)
+ [LTTNG_UST_DYNAMIC_TYPE_S16] = ctf_enum_value("_int16", 2)
+ [LTTNG_UST_DYNAMIC_TYPE_S32] = ctf_enum_value("_int32", 3)
+ [LTTNG_UST_DYNAMIC_TYPE_S64] = ctf_enum_value("_int64", 4)
+ [LTTNG_UST_DYNAMIC_TYPE_U8] = ctf_enum_value("_uint8", 5)
+ [LTTNG_UST_DYNAMIC_TYPE_U16] = ctf_enum_value("_uint16", 6)
+ [LTTNG_UST_DYNAMIC_TYPE_U32] = ctf_enum_value("_uint32", 7)
+ [LTTNG_UST_DYNAMIC_TYPE_U64] = ctf_enum_value("_uint64", 8)
+ [LTTNG_UST_DYNAMIC_TYPE_FLOAT] = ctf_enum_value("_float", 9)
+ [LTTNG_UST_DYNAMIC_TYPE_DOUBLE] = ctf_enum_value("_double", 10)
+ [LTTNG_UST_DYNAMIC_TYPE_STRING] = ctf_enum_value("_string", 11)
+};
+
+static struct lttng_ust_enum_desc dt_enum_desc = {
+ .name = "dynamic_type_enum",
+ .entries = dt_enum,
+ .nr_entries = LTTNG_ARRAY_SIZE(dt_enum),
+};
+
+const struct lttng_ust_event_field *dt_var_fields[_NR_LTTNG_UST_DYNAMIC_TYPES] = {
+ [LTTNG_UST_DYNAMIC_TYPE_NONE] = __LTTNG_COMPOUND_LITERAL(const struct lttng_ust_event_field, {
+ .struct_size = sizeof(struct lttng_ust_event_field),
+ .name = "none",
+ .type = (struct lttng_ust_type_common *) __LTTNG_COMPOUND_LITERAL(struct lttng_ust_type_struct, {
+ .parent = {
+ .type = lttng_ust_type_struct,
+ },
+ .struct_size = sizeof(struct lttng_ust_type_struct),
+ .nr_fields = 0, /* empty struct */
+ .alignment = 0,
+ }),
+ .nowrite = 0,
+ }),
+ [LTTNG_UST_DYNAMIC_TYPE_S8] = __LTTNG_COMPOUND_LITERAL(const struct lttng_ust_event_field, {
+ .struct_size = sizeof(struct lttng_ust_event_field),
+ .name = "int8",
+ .type = lttng_ust_type_integer_define(int8_t, BYTE_ORDER, 10),
+ .nowrite = 0,
+ }),
+ [LTTNG_UST_DYNAMIC_TYPE_S16] = __LTTNG_COMPOUND_LITERAL(const struct lttng_ust_event_field, {
+ .struct_size = sizeof(struct lttng_ust_event_field),
+ .name = "int16",
+ .type = lttng_ust_type_integer_define(int16_t, BYTE_ORDER, 10),
+ .nowrite = 0,
+ }),
+ [LTTNG_UST_DYNAMIC_TYPE_S32] = __LTTNG_COMPOUND_LITERAL(const struct lttng_ust_event_field, {
+ .struct_size = sizeof(struct lttng_ust_event_field),
+ .name = "int32",
+ .type = lttng_ust_type_integer_define(int32_t, BYTE_ORDER, 10),
+ .nowrite = 0,
+ }),
+ [LTTNG_UST_DYNAMIC_TYPE_S64] = __LTTNG_COMPOUND_LITERAL(const struct lttng_ust_event_field, {
+ .struct_size = sizeof(struct lttng_ust_event_field),
+ .name = "int64",
+ .type = lttng_ust_type_integer_define(int64_t, BYTE_ORDER, 10),
+ .nowrite = 0,
+ }),
+ [LTTNG_UST_DYNAMIC_TYPE_U8] = __LTTNG_COMPOUND_LITERAL(const struct lttng_ust_event_field, {
+ .struct_size = sizeof(struct lttng_ust_event_field),
+ .name = "uint8",
+ .type = lttng_ust_type_integer_define(uint8_t, BYTE_ORDER, 10),
+ .nowrite = 0,
+ }),
+ [LTTNG_UST_DYNAMIC_TYPE_U16] = __LTTNG_COMPOUND_LITERAL(const struct lttng_ust_event_field, {
+ .struct_size = sizeof(struct lttng_ust_event_field),
+ .name = "uint16",
+ .type = lttng_ust_type_integer_define(uint16_t, BYTE_ORDER, 10),
+ .nowrite = 0,
+ }),
+ [LTTNG_UST_DYNAMIC_TYPE_U32] = __LTTNG_COMPOUND_LITERAL(const struct lttng_ust_event_field, {
+ .struct_size = sizeof(struct lttng_ust_event_field),
+ .name = "uint32",
+ .type = lttng_ust_type_integer_define(uint32_t, BYTE_ORDER, 10),
+ .nowrite = 0,
+ }),
+ [LTTNG_UST_DYNAMIC_TYPE_U64] = __LTTNG_COMPOUND_LITERAL(const struct lttng_ust_event_field, {
+ .struct_size = sizeof(struct lttng_ust_event_field),
+ .name = "uint64",
+ .type = lttng_ust_type_integer_define(uint64_t, BYTE_ORDER, 10),
+ .nowrite = 0,
+ }),
+ [LTTNG_UST_DYNAMIC_TYPE_FLOAT] = __LTTNG_COMPOUND_LITERAL(const struct lttng_ust_event_field, {
+ .struct_size = sizeof(struct lttng_ust_event_field),
+ .name = "float",
+ .type = lttng_ust_type_float_define(float),
+ .nowrite = 0,
+ }),
+ [LTTNG_UST_DYNAMIC_TYPE_DOUBLE] = __LTTNG_COMPOUND_LITERAL(const struct lttng_ust_event_field, {
+ .struct_size = sizeof(struct lttng_ust_event_field),
+ .name = "double",
+ .type = lttng_ust_type_float_define(double),
+ .nowrite = 0,
+ }),
+ [LTTNG_UST_DYNAMIC_TYPE_STRING] = __LTTNG_COMPOUND_LITERAL(const struct lttng_ust_event_field, {
+ .struct_size = sizeof(struct lttng_ust_event_field),
+ .name = "string",
+ .type = (struct lttng_ust_type_common *) __LTTNG_COMPOUND_LITERAL(struct lttng_ust_type_string, {
+ .parent = {
+ .type = lttng_ust_type_string,
+ },
+ .struct_size = sizeof(struct lttng_ust_type_string),
+ .encoding = lttng_ust_string_encoding_UTF8,
+ }),
+ .nowrite = 0,
+ }),
+};
+
+static const struct lttng_ust_event_field dt_enum_field = {
+ .struct_size = sizeof(struct lttng_ust_event_field),
+ .name = NULL,
+ .type = (struct lttng_ust_type_common *) __LTTNG_COMPOUND_LITERAL(struct lttng_ust_type_enum, {
+ .parent = {
+ .type = lttng_ust_type_enum,
+ },
+ .struct_size = sizeof(struct lttng_ust_type_enum),
+ .desc = &dt_enum_desc,
+ .container_type = lttng_ust_type_integer_define(char, BYTE_ORDER, 10),
+ }),
+ .nowrite = 0,
+};
+
+const struct lttng_ust_event_field *lttng_ust_dynamic_type_field(int64_t value)
+{
+ if (value >= _NR_LTTNG_UST_DYNAMIC_TYPES || value < 0)
+ return NULL;
+ return dt_var_fields[value];
+}
+
+int lttng_ust_dynamic_type_choices(size_t *nr_choices, const struct lttng_ust_event_field ***choices)
+{
+ *nr_choices = _NR_LTTNG_UST_DYNAMIC_TYPES;
+ *choices = dt_var_fields;
+ return 0;
+}
+
+const struct lttng_ust_event_field *lttng_ust_dynamic_type_tag_field(void)
+{
+ return &dt_enum_field;
+}
--- /dev/null
+/*
+ * SPDX-License-Identifier: LGPL-2.1-or-later
+ *
+ * Copyright (C) 2015 Antoine Busque <abusque@efficios.com>
+ */
+
+#define _LGPL_SOURCE
+#include <fcntl.h>
+#include <stdbool.h>
+#include <stdint.h>
+#include <string.h>
+#include <sys/stat.h>
+#include <sys/types.h>
+#include <unistd.h>
+
+#include <lttng/ust-utils.h>
+
+#include "common/elf.h"
+#include "common/ust-fd.h"
+
+#include "lttng-tracer-core.h"
+#include "lttng-ust-elf.h"
+#include "common/macros.h"
+
+#define BUF_LEN 4096
+
+#ifndef NT_GNU_BUILD_ID
+# define NT_GNU_BUILD_ID 3
+#endif
+
+/*
+ * Retrieve the nth (where n is the `index` argument) phdr (program
+ * header) from the given elf instance.
+ *
+ * A pointer to the phdr is returned on success, NULL on failure.
+ */
+static
+struct lttng_ust_elf_phdr *lttng_ust_elf_get_phdr(struct lttng_ust_elf *elf,
+ uint16_t index)
+{
+ struct lttng_ust_elf_phdr *phdr = NULL;
+ off_t offset;
+
+ if (!elf) {
+ goto error;
+ }
+
+ if (index >= elf->ehdr->e_phnum) {
+ goto error;
+ }
+
+ phdr = zmalloc(sizeof(struct lttng_ust_elf_phdr));
+ if (!phdr) {
+ goto error;
+ }
+
+ offset = (off_t) elf->ehdr->e_phoff
+ + (off_t) index * elf->ehdr->e_phentsize;
+ if (lseek(elf->fd, offset, SEEK_SET) < 0) {
+ goto error;
+ }
+
+ if (is_elf_32_bit(elf)) {
+ Elf32_Phdr elf_phdr;
+
+ if (lttng_ust_read(elf->fd, &elf_phdr, sizeof(elf_phdr))
+ < sizeof(elf_phdr)) {
+ goto error;
+ }
+ if (!is_elf_native_endian(elf)) {
+ bswap_phdr(elf_phdr);
+ }
+ copy_phdr(elf_phdr, *phdr);
+ } else {
+ Elf64_Phdr elf_phdr;
+
+ if (lttng_ust_read(elf->fd, &elf_phdr, sizeof(elf_phdr))
+ < sizeof(elf_phdr)) {
+ goto error;
+ }
+ if (!is_elf_native_endian(elf)) {
+ bswap_phdr(elf_phdr);
+ }
+ copy_phdr(elf_phdr, *phdr);
+ }
+
+ return phdr;
+
+error:
+ free(phdr);
+ return NULL;
+}
+
+/*
+ * Retrieve the nth (where n is the `index` argument) shdr (section
+ * header) from the given elf instance.
+ *
+ * A pointer to the shdr is returned on success, NULL on failure.
+ */
+static
+struct lttng_ust_elf_shdr *lttng_ust_elf_get_shdr(struct lttng_ust_elf *elf,
+ uint16_t index)
+{
+ struct lttng_ust_elf_shdr *shdr = NULL;
+ off_t offset;
+
+ if (!elf) {
+ goto error;
+ }
+
+ if (index >= elf->ehdr->e_shnum) {
+ goto error;
+ }
+
+ shdr = zmalloc(sizeof(struct lttng_ust_elf_shdr));
+ if (!shdr) {
+ goto error;
+ }
+
+ offset = (off_t) elf->ehdr->e_shoff
+ + (off_t) index * elf->ehdr->e_shentsize;
+ if (lseek(elf->fd, offset, SEEK_SET) < 0) {
+ goto error;
+ }
+
+ if (is_elf_32_bit(elf)) {
+ Elf32_Shdr elf_shdr;
+
+ if (lttng_ust_read(elf->fd, &elf_shdr, sizeof(elf_shdr))
+ < sizeof(elf_shdr)) {
+ goto error;
+ }
+ if (!is_elf_native_endian(elf)) {
+ bswap_shdr(elf_shdr);
+ }
+ copy_shdr(elf_shdr, *shdr);
+ } else {
+ Elf64_Shdr elf_shdr;
+
+ if (lttng_ust_read(elf->fd, &elf_shdr, sizeof(elf_shdr))
+ < sizeof(elf_shdr)) {
+ goto error;
+ }
+ if (!is_elf_native_endian(elf)) {
+ bswap_shdr(elf_shdr);
+ }
+ copy_shdr(elf_shdr, *shdr);
+ }
+
+ return shdr;
+
+error:
+ free(shdr);
+ return NULL;
+}
+
+/*
+ * Lookup a section's name from a given offset (usually from an shdr's
+ * sh_name value) in bytes relative to the beginning of the section
+ * names string table.
+ *
+ * If no name is found, NULL is returned.
+ */
+static
+char *lttng_ust_elf_get_section_name(struct lttng_ust_elf *elf, off_t offset)
+{
+ char *name = NULL;
+ size_t len = 0, to_read; /* len does not include \0 */
+
+ if (!elf) {
+ goto error;
+ }
+
+ if (offset >= elf->section_names_size) {
+ goto error;
+ }
+
+ if (lseek(elf->fd, elf->section_names_offset + offset, SEEK_SET) < 0) {
+ goto error;
+ }
+
+ to_read = elf->section_names_size - offset;
+
+ /* Find first \0 after or at current location, remember len. */
+ for (;;) {
+ char buf[BUF_LEN];
+ ssize_t read_len;
+ size_t i;
+
+ if (!to_read) {
+ goto error;
+ }
+ read_len = lttng_ust_read(elf->fd, buf,
+ min_t(size_t, BUF_LEN, to_read));
+ if (read_len <= 0) {
+ goto error;
+ }
+ for (i = 0; i < read_len; i++) {
+ if (buf[i] == '\0') {
+ len += i;
+ goto end;
+ }
+ }
+ len += read_len;
+ to_read -= read_len;
+ }
+end:
+ name = zmalloc(sizeof(char) * (len + 1)); /* + 1 for \0 */
+ if (!name) {
+ goto error;
+ }
+ if (lseek(elf->fd, elf->section_names_offset + offset,
+ SEEK_SET) < 0) {
+ goto error;
+ }
+ if (lttng_ust_read(elf->fd, name, len + 1) < len + 1) {
+ goto error;
+ }
+
+ return name;
+
+error:
+ free(name);
+ return NULL;
+}
+
+/*
+ * Create an instance of lttng_ust_elf for the ELF file located at
+ * `path`.
+ *
+ * Return a pointer to the instance on success, NULL on failure.
+ */
+struct lttng_ust_elf *lttng_ust_elf_create(const char *path)
+{
+ uint8_t e_ident[EI_NIDENT];
+ struct lttng_ust_elf_shdr *section_names_shdr;
+ struct lttng_ust_elf *elf = NULL;
+ int ret, fd;
+
+ elf = zmalloc(sizeof(struct lttng_ust_elf));
+ if (!elf) {
+ goto error;
+ }
+
+ /* Initialize fd field to -1. 0 is a valid fd number */
+ elf->fd = -1;
+
+ elf->path = strdup(path);
+ if (!elf->path) {
+ goto error;
+ }
+
+ lttng_ust_lock_fd_tracker();
+ fd = open(elf->path, O_RDONLY | O_CLOEXEC);
+ if (fd < 0) {
+ lttng_ust_unlock_fd_tracker();
+ goto error;
+ }
+
+ ret = lttng_ust_add_fd_to_tracker(fd);
+ if (ret < 0) {
+ ret = close(fd);
+ if (ret) {
+ PERROR("close on elf->fd");
+ }
+ ret = -1;
+ lttng_ust_unlock_fd_tracker();
+ goto error;
+ }
+ elf->fd = ret;
+ lttng_ust_unlock_fd_tracker();
+
+ if (lttng_ust_read(elf->fd, e_ident, EI_NIDENT) < EI_NIDENT) {
+ goto error;
+ }
+ elf->bitness = e_ident[EI_CLASS];
+ elf->endianness = e_ident[EI_DATA];
+
+ if (lseek(elf->fd, 0, SEEK_SET) < 0) {
+ goto error;
+ }
+
+ elf->ehdr = zmalloc(sizeof(struct lttng_ust_elf_ehdr));
+ if (!elf->ehdr) {
+ goto error;
+ }
+
+ if (is_elf_32_bit(elf)) {
+ Elf32_Ehdr elf_ehdr;
+
+ if (lttng_ust_read(elf->fd, &elf_ehdr, sizeof(elf_ehdr))
+ < sizeof(elf_ehdr)) {
+ goto error;
+ }
+ if (!is_elf_native_endian(elf)) {
+ bswap_ehdr(elf_ehdr);
+ }
+ copy_ehdr(elf_ehdr, *(elf->ehdr));
+ } else {
+ Elf64_Ehdr elf_ehdr;
+
+ if (lttng_ust_read(elf->fd, &elf_ehdr, sizeof(elf_ehdr))
+ < sizeof(elf_ehdr)) {
+ goto error;
+ }
+ if (!is_elf_native_endian(elf)) {
+ bswap_ehdr(elf_ehdr);
+ }
+ copy_ehdr(elf_ehdr, *(elf->ehdr));
+ }
+
+ section_names_shdr = lttng_ust_elf_get_shdr(elf, elf->ehdr->e_shstrndx);
+ if (!section_names_shdr) {
+ goto error;
+ }
+
+ elf->section_names_offset = section_names_shdr->sh_offset;
+ elf->section_names_size = section_names_shdr->sh_size;
+
+ free(section_names_shdr);
+ return elf;
+
+error:
+ lttng_ust_elf_destroy(elf);
+ return NULL;
+}
+
+/*
+ * Test whether the ELF file is position independent code (PIC)
+ */
+uint8_t lttng_ust_elf_is_pic(struct lttng_ust_elf *elf)
+{
+ /*
+ * PIC has and e_type value of ET_DYN, see ELF specification
+ * version 1.1 p. 1-3.
+ */
+ return elf->ehdr->e_type == ET_DYN;
+}
+
+/*
+ * Destroy the given lttng_ust_elf instance.
+ */
+void lttng_ust_elf_destroy(struct lttng_ust_elf *elf)
+{
+ int ret;
+
+ if (!elf) {
+ return;
+ }
+
+ if (elf->fd >= 0) {
+ lttng_ust_lock_fd_tracker();
+ ret = close(elf->fd);
+ if (!ret) {
+ lttng_ust_delete_fd_from_tracker(elf->fd);
+ } else {
+ PERROR("close");
+ abort();
+ }
+ lttng_ust_unlock_fd_tracker();
+ }
+
+ free(elf->ehdr);
+ free(elf->path);
+ free(elf);
+}
+
+/*
+ * Compute the total in-memory size of the ELF file, in bytes.
+ *
+ * Returns 0 if successful, -1 if not. On success, the memory size is
+ * returned through the out parameter `memsz`.
+ */
+int lttng_ust_elf_get_memsz(struct lttng_ust_elf *elf, uint64_t *memsz)
+{
+ uint16_t i;
+ uint64_t low_addr = UINT64_MAX, high_addr = 0;
+
+ if (!elf || !memsz) {
+ goto error;
+ }
+
+ for (i = 0; i < elf->ehdr->e_phnum; ++i) {
+ struct lttng_ust_elf_phdr *phdr;
+
+ phdr = lttng_ust_elf_get_phdr(elf, i);
+ if (!phdr) {
+ goto error;
+ }
+
+ /*
+ * Only PT_LOAD segments contribute to memsz. Skip
+ * other segments.
+ */
+ if (phdr->p_type != PT_LOAD) {
+ goto next_loop;
+ }
+
+ low_addr = min_t(uint64_t, low_addr, phdr->p_vaddr);
+ high_addr = max_t(uint64_t, high_addr,
+ phdr->p_vaddr + phdr->p_memsz);
+ next_loop:
+ free(phdr);
+ }
+
+ if (high_addr < low_addr) {
+ /* No PT_LOAD segments or corrupted data. */
+ goto error;
+ }
+
+ *memsz = high_addr - low_addr;
+ return 0;
+error:
+ return -1;
+}
+
+/*
+ * Internal method used to try and get the build_id from a PT_NOTE
+ * segment ranging from `offset` to `segment_end`.
+ *
+ * If the function returns successfully, the out parameter `found`
+ * indicates whether the build id information was present in the
+ * segment or not. If `found` is not 0, the out parameters `build_id`
+ * and `length` will both have been set with the retrieved
+ * information.
+ *
+ * Returns 0 on success, -1 if an error occurred.
+ */
+static
+int lttng_ust_elf_get_build_id_from_segment(
+ struct lttng_ust_elf *elf, uint8_t **build_id, size_t *length,
+ off_t offset, off_t segment_end)
+{
+ uint8_t *_build_id = NULL; /* Silence old gcc warning. */
+ size_t _length = 0; /* Silence old gcc warning. */
+
+ while (offset < segment_end) {
+ struct lttng_ust_elf_nhdr nhdr;
+ size_t read_len;
+
+ /* Align start of note entry */
+ offset += lttng_ust_offset_align(offset, ELF_NOTE_ENTRY_ALIGN);
+ if (offset >= segment_end) {
+ break;
+ }
+ /*
+ * We seek manually because if the note isn't the
+ * build id the data following the header will not
+ * have been read.
+ */
+ if (lseek(elf->fd, offset, SEEK_SET) < 0) {
+ goto error;
+ }
+ if (lttng_ust_read(elf->fd, &nhdr, sizeof(nhdr))
+ < sizeof(nhdr)) {
+ goto error;
+ }
+
+ if (!is_elf_native_endian(elf)) {
+ nhdr.n_namesz = bswap_32(nhdr.n_namesz);
+ nhdr.n_descsz = bswap_32(nhdr.n_descsz);
+ nhdr.n_type = bswap_32(nhdr.n_type);
+ }
+
+ offset += sizeof(nhdr) + nhdr.n_namesz;
+ /* Align start of desc entry */
+ offset += lttng_ust_offset_align(offset, ELF_NOTE_DESC_ALIGN);
+
+ if (nhdr.n_type != NT_GNU_BUILD_ID) {
+ /*
+ * Ignore non build id notes but still
+ * increase the offset.
+ */
+ offset += nhdr.n_descsz;
+ continue;
+ }
+
+ _length = nhdr.n_descsz;
+ _build_id = zmalloc(sizeof(uint8_t) * _length);
+ if (!_build_id) {
+ goto error;
+ }
+
+ if (lseek(elf->fd, offset, SEEK_SET) < 0) {
+ goto error;
+ }
+ read_len = sizeof(*_build_id) * _length;
+ if (lttng_ust_read(elf->fd, _build_id, read_len) < read_len) {
+ goto error;
+ }
+
+ break;
+ }
+
+ if (_build_id) {
+ *build_id = _build_id;
+ *length = _length;
+ }
+
+ return 0;
+error:
+ free(_build_id);
+ return -1;
+}
+
+/*
+ * Retrieve a build ID (an array of bytes) from the corresponding
+ * section in the ELF file. The length of the build ID can be either
+ * 16 or 20 bytes depending on the method used to generate it, hence
+ * the length out parameter.
+ *
+ * If the function returns successfully, the out parameter `found`
+ * indicates whether the build id information was present in the ELF
+ * file or not. If `found` is not 0, the out parameters `build_id` and
+ * `length` will both have been set with the retrieved information.
+ *
+ * Returns 0 on success, -1 if an error occurred.
+ */
+int lttng_ust_elf_get_build_id(struct lttng_ust_elf *elf, uint8_t **build_id,
+ size_t *length, int *found)
+{
+ uint16_t i;
+ uint8_t *_build_id = NULL; /* Silence old gcc warning. */
+ size_t _length = 0; /* Silence old gcc warning. */
+
+ if (!elf || !build_id || !length || !found) {
+ goto error;
+ }
+
+ for (i = 0; i < elf->ehdr->e_phnum; ++i) {
+ off_t offset, segment_end;
+ struct lttng_ust_elf_phdr *phdr;
+ int ret = 0;
+
+ phdr = lttng_ust_elf_get_phdr(elf, i);
+ if (!phdr) {
+ goto error;
+ }
+
+ /* Build ID will be contained in a PT_NOTE segment. */
+ if (phdr->p_type != PT_NOTE) {
+ goto next_loop;
+ }
+
+ offset = phdr->p_offset;
+ segment_end = offset + phdr->p_filesz;
+ ret = lttng_ust_elf_get_build_id_from_segment(
+ elf, &_build_id, &_length, offset, segment_end);
+ next_loop:
+ free(phdr);
+ if (ret) {
+ goto error;
+ }
+ if (_build_id) {
+ break;
+ }
+ }
+
+ if (_build_id) {
+ *build_id = _build_id;
+ *length = _length;
+ *found = 1;
+ } else {
+ *found = 0;
+ }
+
+ return 0;
+error:
+ free(_build_id);
+ return -1;
+}
+
+/*
+ * Try to retrieve filename and CRC from given ELF section `shdr`.
+ *
+ * If the function returns successfully, the out parameter `found`
+ * indicates whether the debug link information was present in the ELF
+ * section or not. If `found` is not 0, the out parameters `filename` and
+ * `crc` will both have been set with the retrieved information.
+ *
+ * Returns 0 on success, -1 if an error occurred.
+ */
+static
+int lttng_ust_elf_get_debug_link_from_section(struct lttng_ust_elf *elf,
+ char **filename, uint32_t *crc,
+ struct lttng_ust_elf_shdr *shdr)
+{
+ char *_filename = NULL; /* Silence old gcc warning. */
+ size_t filename_len;
+ char *section_name = NULL;
+ uint32_t _crc = 0; /* Silence old gcc warning. */
+
+ if (!elf || !filename || !crc || !shdr) {
+ goto error;
+ }
+
+ /*
+ * The .gnu_debuglink section is of type SHT_PROGBITS,
+ * skip the other sections.
+ */
+ if (shdr->sh_type != SHT_PROGBITS) {
+ goto end;
+ }
+
+ section_name = lttng_ust_elf_get_section_name(elf,
+ shdr->sh_name);
+ if (!section_name) {
+ goto end;
+ }
+ if (strcmp(section_name, ".gnu_debuglink")) {
+ goto end;
+ }
+
+ /*
+ * The length of the filename is the sh_size excluding the CRC
+ * which comes after it in the section.
+ */
+ _filename = zmalloc(sizeof(char) * (shdr->sh_size - ELF_CRC_SIZE));
+ if (!_filename) {
+ goto error;
+ }
+ if (lseek(elf->fd, shdr->sh_offset, SEEK_SET) < 0) {
+ goto error;
+ }
+ filename_len = sizeof(*_filename) * (shdr->sh_size - ELF_CRC_SIZE);
+ if (lttng_ust_read(elf->fd, _filename, filename_len) < filename_len) {
+ goto error;
+ }
+ if (lttng_ust_read(elf->fd, &_crc, sizeof(_crc)) < sizeof(_crc)) {
+ goto error;
+ }
+ if (!is_elf_native_endian(elf)) {
+ _crc = bswap_32(_crc);
+ }
+
+end:
+ free(section_name);
+ if (_filename) {
+ *filename = _filename;
+ *crc = _crc;
+ }
+
+ return 0;
+
+error:
+ free(_filename);
+ free(section_name);
+ return -1;
+}
+
+/*
+ * Retrieve filename and CRC from ELF's .gnu_debuglink section, if any.
+ *
+ * If the function returns successfully, the out parameter `found`
+ * indicates whether the debug link information was present in the ELF
+ * file or not. If `found` is not 0, the out parameters `filename` and
+ * `crc` will both have been set with the retrieved information.
+ *
+ * Returns 0 on success, -1 if an error occurred.
+ */
+int lttng_ust_elf_get_debug_link(struct lttng_ust_elf *elf, char **filename,
+ uint32_t *crc, int *found)
+{
+ int ret;
+ uint16_t i;
+ char *_filename = NULL; /* Silence old gcc warning. */
+ uint32_t _crc = 0; /* Silence old gcc warning. */
+
+ if (!elf || !filename || !crc || !found) {
+ goto error;
+ }
+
+ for (i = 0; i < elf->ehdr->e_shnum; ++i) {
+ struct lttng_ust_elf_shdr *shdr = NULL;
+
+ shdr = lttng_ust_elf_get_shdr(elf, i);
+ if (!shdr) {
+ goto error;
+ }
+
+ ret = lttng_ust_elf_get_debug_link_from_section(
+ elf, &_filename, &_crc, shdr);
+ free(shdr);
+
+ if (ret) {
+ goto error;
+ }
+ if (_filename) {
+ break;
+ }
+ }
+
+ if (_filename) {
+ *filename = _filename;
+ *crc = _crc;
+ *found = 1;
+ } else {
+ *found = 0;
+ }
+
+ return 0;
+
+error:
+ free(_filename);
+ return -1;
+}
--- /dev/null
+/*
+ * SPDX-License-Identifier: LGPL-2.1-or-later
+ *
+ * Copyright (C) 2015 Antoine Busque <abusque@efficios.com>
+ */
+
+#ifndef _LIB_LTTNG_UST_ELF_H
+#define _LIB_LTTNG_UST_ELF_H
+
+#include <elf.h>
+#include <lttng/ust-endian.h>
+
+/*
+ * Determine native endianness in order to convert when reading an ELF
+ * file if there is a mismatch.
+ */
+#if BYTE_ORDER == LITTLE_ENDIAN
+#define NATIVE_ELF_ENDIANNESS ELFDATA2LSB
+#else
+#define NATIVE_ELF_ENDIANNESS ELFDATA2MSB
+#endif
+
+/*
+ * The size in bytes of the debug link CRC as contained in an ELF
+ * section.
+ */
+#define ELF_CRC_SIZE 4
+/*
+ * ELF notes are aligned on 4 bytes. ref: ELF specification version
+ * 1.1 p. 2-5.
+ */
+#define ELF_NOTE_ENTRY_ALIGN 4
+/*
+ * Within an ELF note, the `desc` field is also aligned on 4
+ * bytes. ref: ELF specification version 1.1 p. 2-5.
+ */
+#define ELF_NOTE_DESC_ALIGN 4
+
+#define bswap(x) \
+ do { \
+ switch (sizeof(x)) { \
+ case 8: \
+ x = bswap_64(x); \
+ break; \
+ case 4: \
+ x = bswap_32(x); \
+ break; \
+ case 2: \
+ x = bswap_16(x); \
+ break; \
+ case 1: \
+ break; \
+ default: \
+ abort(); \
+ } \
+ } while (0)
+
+#define bswap_phdr(phdr) \
+ do { \
+ bswap((phdr).p_type); \
+ bswap((phdr).p_offset); \
+ bswap((phdr).p_filesz); \
+ bswap((phdr).p_memsz); \
+ bswap((phdr).p_align); \
+ bswap((phdr).p_vaddr); \
+ } while (0)
+
+#define bswap_shdr(shdr) \
+ do { \
+ bswap((shdr).sh_name); \
+ bswap((shdr).sh_type); \
+ bswap((shdr).sh_flags); \
+ bswap((shdr).sh_addr); \
+ bswap((shdr).sh_offset); \
+ bswap((shdr).sh_size); \
+ bswap((shdr).sh_link); \
+ bswap((shdr).sh_info); \
+ bswap((shdr).sh_addralign); \
+ bswap((shdr).sh_entsize); \
+ } while (0)
+
+#define bswap_ehdr(ehdr) \
+ do { \
+ bswap((ehdr).e_type); \
+ bswap((ehdr).e_machine); \
+ bswap((ehdr).e_version); \
+ bswap((ehdr).e_entry); \
+ bswap((ehdr).e_phoff); \
+ bswap((ehdr).e_shoff); \
+ bswap((ehdr).e_flags); \
+ bswap((ehdr).e_ehsize); \
+ bswap((ehdr).e_phentsize); \
+ bswap((ehdr).e_phnum); \
+ bswap((ehdr).e_shentsize); \
+ bswap((ehdr).e_shnum); \
+ bswap((ehdr).e_shstrndx); \
+ } while (0)
+
+#define copy_phdr(src_phdr, dst_phdr) \
+ do { \
+ (dst_phdr).p_type = (src_phdr).p_type; \
+ (dst_phdr).p_offset = (src_phdr).p_offset; \
+ (dst_phdr).p_filesz = (src_phdr).p_filesz; \
+ (dst_phdr).p_memsz = (src_phdr).p_memsz; \
+ (dst_phdr).p_align = (src_phdr).p_align; \
+ (dst_phdr).p_vaddr = (src_phdr).p_vaddr; \
+ } while (0)
+
+#define copy_shdr(src_shdr, dst_shdr) \
+ do { \
+ (dst_shdr).sh_name = (src_shdr).sh_name; \
+ (dst_shdr).sh_type = (src_shdr).sh_type; \
+ (dst_shdr).sh_flags = (src_shdr).sh_flags; \
+ (dst_shdr).sh_addr = (src_shdr).sh_addr; \
+ (dst_shdr).sh_offset = (src_shdr).sh_offset; \
+ (dst_shdr).sh_size = (src_shdr).sh_size; \
+ (dst_shdr).sh_link = (src_shdr).sh_link; \
+ (dst_shdr).sh_info = (src_shdr).sh_info; \
+ (dst_shdr).sh_addralign = (src_shdr).sh_addralign; \
+ (dst_shdr).sh_entsize = (src_shdr).sh_entsize; \
+ } while (0)
+
+#define copy_ehdr(src_ehdr, dst_ehdr) \
+ do { \
+ (dst_ehdr).e_type = (src_ehdr).e_type; \
+ (dst_ehdr).e_machine = (src_ehdr).e_machine; \
+ (dst_ehdr).e_version = (src_ehdr).e_version; \
+ (dst_ehdr).e_entry = (src_ehdr).e_entry; \
+ (dst_ehdr).e_phoff = (src_ehdr).e_phoff; \
+ (dst_ehdr).e_shoff = (src_ehdr).e_shoff; \
+ (dst_ehdr).e_flags = (src_ehdr).e_flags; \
+ (dst_ehdr).e_ehsize = (src_ehdr).e_ehsize; \
+ (dst_ehdr).e_phentsize = (src_ehdr).e_phentsize; \
+ (dst_ehdr).e_phnum = (src_ehdr).e_phnum; \
+ (dst_ehdr).e_shentsize = (src_ehdr).e_shentsize; \
+ (dst_ehdr).e_shnum = (src_ehdr).e_shnum; \
+ (dst_ehdr).e_shstrndx = (src_ehdr).e_shstrndx; \
+ } while (0)
+
+static inline
+int is_elf_32_bit(struct lttng_ust_elf *elf)
+{
+ return elf->bitness == ELFCLASS32;
+}
+
+static inline
+int is_elf_native_endian(struct lttng_ust_elf *elf)
+{
+ return elf->endianness == NATIVE_ELF_ENDIANNESS;
+}
+
+#endif
--- /dev/null
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright (C) 2013 Paul Woegerer <paul_woegerer@mentor.com>
+ * Copyright (C) 2015 Antoine Busque <abusque@efficios.com>
+ */
+
+#undef TRACEPOINT_PROVIDER
+#define TRACEPOINT_PROVIDER lttng_ust_statedump
+
+#if !defined(_TRACEPOINT_LTTNG_UST_STATEDUMP_H) || defined(TRACEPOINT_HEADER_MULTI_READ)
+#define _TRACEPOINT_LTTNG_UST_STATEDUMP_H
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <stddef.h>
+#include <stdint.h>
+#include <unistd.h>
+#include <lttng/ust-events.h>
+
+#define LTTNG_UST_STATEDUMP_PROVIDER
+#include <lttng/tracepoint.h>
+
+TRACEPOINT_EVENT(lttng_ust_statedump, start,
+ TP_ARGS(struct lttng_ust_session *, session),
+ TP_FIELDS(
+ ctf_unused(session)
+ )
+)
+
+TRACEPOINT_EVENT(lttng_ust_statedump, bin_info,
+ TP_ARGS(
+ struct lttng_ust_session *, session,
+ void *, baddr,
+ const char*, path,
+ uint64_t, memsz,
+ uint8_t, is_pic,
+ uint8_t, has_build_id,
+ uint8_t, has_debug_link
+ ),
+ TP_FIELDS(
+ ctf_unused(session)
+ ctf_integer_hex(void *, baddr, baddr)
+ ctf_integer(uint64_t, memsz, memsz)
+ ctf_string(path, path)
+ ctf_integer(uint8_t, is_pic, is_pic)
+ ctf_integer(uint8_t, has_build_id, has_build_id)
+ ctf_integer(uint8_t, has_debug_link, has_debug_link)
+ )
+)
+
+TRACEPOINT_EVENT(lttng_ust_statedump, build_id,
+ TP_ARGS(
+ struct lttng_ust_session *, session,
+ void *, baddr,
+ uint8_t *, build_id,
+ size_t, build_id_len
+ ),
+ TP_FIELDS(
+ ctf_unused(session)
+ ctf_integer_hex(void *, baddr, baddr)
+ ctf_sequence_hex(uint8_t, build_id, build_id,
+ size_t, build_id_len)
+ )
+)
+
+TRACEPOINT_EVENT(lttng_ust_statedump, debug_link,
+ TP_ARGS(
+ struct lttng_ust_session *, session,
+ void *, baddr,
+ char *, filename,
+ uint32_t, crc
+ ),
+ TP_FIELDS(
+ ctf_unused(session)
+ ctf_integer_hex(void *, baddr, baddr)
+ ctf_integer(uint32_t, crc, crc)
+ ctf_string(filename, filename)
+ )
+)
+
+TRACEPOINT_EVENT(lttng_ust_statedump, procname,
+ TP_ARGS(
+ struct lttng_ust_session *, session,
+ char *, name
+ ),
+ TP_FIELDS(
+ ctf_unused(session)
+ ctf_array_text(char, procname, name, LTTNG_UST_ABI_PROCNAME_LEN)
+ )
+)
+
+TRACEPOINT_EVENT(lttng_ust_statedump, end,
+ TP_ARGS(struct lttng_ust_session *, session),
+ TP_FIELDS(
+ ctf_unused(session)
+ )
+)
+
+#endif /* _TRACEPOINT_LTTNG_UST_STATEDUMP_H */
+
+#undef TRACEPOINT_INCLUDE
+#define TRACEPOINT_INCLUDE "./lttng-ust-statedump-provider.h"
+
+/* This part must be outside ifdef protection */
+#include <lttng/tracepoint-event.h>
+
+#ifdef __cplusplus
+}
+#endif
--- /dev/null
+/*
+ * SPDX-License-Identifier: LGPL-2.1-or-later
+ *
+ * Copyright (C) 2013 Paul Woegerer <paul_woegerer@mentor.com>
+ * Copyright (C) 2015 Antoine Busque <abusque@efficios.com>
+ * Copyright (C) 2016 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ */
+
+#define _LGPL_SOURCE
+#include <link.h>
+#include <limits.h>
+#include <stdio.h>
+#include <stdint.h>
+#include <stdlib.h>
+#include <stdbool.h>
+#include <sys/types.h>
+#include <unistd.h>
+
+#include "common/elf.h"
+#include "common/macros.h"
+#include "lttng-tracer-core.h"
+#include "lttng-ust-statedump.h"
+#include "jhash.h"
+#include "getenv.h"
+#include "ust-events-internal.h"
+
+#define TRACEPOINT_DEFINE
+#include "ust_lib.h" /* Only define. */
+
+#define TRACEPOINT_CREATE_PROBES
+#define TP_SESSION_CHECK
+#include "lttng-ust-statedump-provider.h" /* Define and create probes. */
+
+struct dl_iterate_data {
+ int exec_found;
+ bool first;
+ bool cancel;
+};
+
+struct bin_info_data {
+ void *base_addr_ptr;
+ char resolved_path[PATH_MAX];
+ char *dbg_file;
+ uint8_t *build_id;
+ uint64_t memsz;
+ size_t build_id_len;
+ int vdso;
+ uint32_t crc;
+ uint8_t is_pic;
+ uint8_t has_build_id;
+ uint8_t has_debug_link;
+};
+
+struct lttng_ust_dl_node {
+ struct bin_info_data bin_data;
+ struct cds_hlist_node node;
+ bool traced;
+ bool marked;
+};
+
+#define UST_DL_STATE_HASH_BITS 8
+#define UST_DL_STATE_TABLE_SIZE (1 << UST_DL_STATE_HASH_BITS)
+struct cds_hlist_head dl_state_table[UST_DL_STATE_TABLE_SIZE];
+
+typedef void (*tracepoint_cb)(struct lttng_ust_session *session, void *priv);
+
+static
+struct lttng_ust_dl_node *alloc_dl_node(const struct bin_info_data *bin_data)
+{
+ struct lttng_ust_dl_node *e;
+
+ e = zmalloc(sizeof(struct lttng_ust_dl_node));
+ if (!e)
+ return NULL;
+ if (bin_data->dbg_file) {
+ e->bin_data.dbg_file = strdup(bin_data->dbg_file);
+ if (!e->bin_data.dbg_file)
+ goto error;
+ }
+ if (bin_data->build_id) {
+ e->bin_data.build_id = zmalloc(bin_data->build_id_len);
+ if (!e->bin_data.build_id)
+ goto error;
+ memcpy(e->bin_data.build_id, bin_data->build_id,
+ bin_data->build_id_len);
+ }
+ e->bin_data.base_addr_ptr = bin_data->base_addr_ptr;
+ memcpy(e->bin_data.resolved_path, bin_data->resolved_path, PATH_MAX);
+ e->bin_data.memsz = bin_data->memsz;
+ e->bin_data.build_id_len = bin_data->build_id_len;
+ e->bin_data.vdso = bin_data->vdso;
+ e->bin_data.crc = bin_data->crc;
+ e->bin_data.is_pic = bin_data->is_pic;
+ e->bin_data.has_build_id = bin_data->has_build_id;
+ e->bin_data.has_debug_link = bin_data->has_debug_link;
+ return e;
+
+error:
+ free(e->bin_data.build_id);
+ free(e->bin_data.dbg_file);
+ free(e);
+ return NULL;
+}
+
+static
+void free_dl_node(struct lttng_ust_dl_node *e)
+{
+ free(e->bin_data.build_id);
+ free(e->bin_data.dbg_file);
+ free(e);
+}
+
+/* Return 0 if same, nonzero if not. */
+static
+int compare_bin_data(const struct bin_info_data *a,
+ const struct bin_info_data *b)
+{
+ if (a->base_addr_ptr != b->base_addr_ptr)
+ return -1;
+ if (strcmp(a->resolved_path, b->resolved_path) != 0)
+ return -1;
+ if (a->dbg_file && !b->dbg_file)
+ return -1;
+ if (!a->dbg_file && b->dbg_file)
+ return -1;
+ if (a->dbg_file && strcmp(a->dbg_file, b->dbg_file) != 0)
+ return -1;
+ if (a->build_id && !b->build_id)
+ return -1;
+ if (!a->build_id && b->build_id)
+ return -1;
+ if (a->build_id_len != b->build_id_len)
+ return -1;
+ if (a->build_id &&
+ memcmp(a->build_id, b->build_id, a->build_id_len) != 0)
+ return -1;
+ if (a->memsz != b->memsz)
+ return -1;
+ if (a->vdso != b->vdso)
+ return -1;
+ if (a->crc != b->crc)
+ return -1;
+ if (a->is_pic != b->is_pic)
+ return -1;
+ if (a->has_build_id != b->has_build_id)
+ return -1;
+ if (a->has_debug_link != b->has_debug_link)
+ return -1;
+ return 0;
+}
+
+static
+struct lttng_ust_dl_node *find_or_create_dl_node(struct bin_info_data *bin_data)
+{
+ struct cds_hlist_head *head;
+ struct lttng_ust_dl_node *e;
+ unsigned int hash;
+ bool found = false;
+
+ hash = jhash(&bin_data->base_addr_ptr,
+ sizeof(bin_data->base_addr_ptr), 0);
+ head = &dl_state_table[hash & (UST_DL_STATE_TABLE_SIZE - 1)];
+ cds_hlist_for_each_entry_2(e, head, node) {
+ if (compare_bin_data(&e->bin_data, bin_data) != 0)
+ continue;
+ found = true;
+ break;
+ }
+ if (!found) {
+ /* Create */
+ e = alloc_dl_node(bin_data);
+ if (!e)
+ return NULL;
+ cds_hlist_add_head(&e->node, head);
+ }
+ return e;
+}
+
+static
+void remove_dl_node(struct lttng_ust_dl_node *e)
+{
+ cds_hlist_del(&e->node);
+}
+
+/*
+ * Trace statedump event into all sessions owned by the caller thread
+ * for which statedump is pending.
+ */
+static
+void trace_statedump_event(tracepoint_cb tp_cb, void *owner, void *priv)
+{
+ struct cds_list_head *sessionsp;
+ struct lttng_ust_session_private *session_priv;
+
+ sessionsp = lttng_get_sessions();
+ cds_list_for_each_entry(session_priv, sessionsp, node) {
+ if (session_priv->owner != owner)
+ continue;
+ if (!session_priv->statedump_pending)
+ continue;
+ tp_cb(session_priv->pub, priv);
+ }
+}
+
+static
+void trace_bin_info_cb(struct lttng_ust_session *session, void *priv)
+{
+ struct bin_info_data *bin_data = (struct bin_info_data *) priv;
+
+ tracepoint(lttng_ust_statedump, bin_info,
+ session, bin_data->base_addr_ptr,
+ bin_data->resolved_path, bin_data->memsz,
+ bin_data->is_pic, bin_data->has_build_id,
+ bin_data->has_debug_link);
+}
+
+static
+void trace_build_id_cb(struct lttng_ust_session *session, void *priv)
+{
+ struct bin_info_data *bin_data = (struct bin_info_data *) priv;
+
+ tracepoint(lttng_ust_statedump, build_id,
+ session, bin_data->base_addr_ptr,
+ bin_data->build_id, bin_data->build_id_len);
+}
+
+static
+void trace_debug_link_cb(struct lttng_ust_session *session, void *priv)
+{
+ struct bin_info_data *bin_data = (struct bin_info_data *) priv;
+
+ tracepoint(lttng_ust_statedump, debug_link,
+ session, bin_data->base_addr_ptr,
+ bin_data->dbg_file, bin_data->crc);
+}
+
+static
+void procname_cb(struct lttng_ust_session *session, void *priv)
+{
+ char *procname = (char *) priv;
+ tracepoint(lttng_ust_statedump, procname, session, procname);
+}
+
+static
+void trace_start_cb(struct lttng_ust_session *session, void *priv __attribute__((unused)))
+{
+ tracepoint(lttng_ust_statedump, start, session);
+}
+
+static
+void trace_end_cb(struct lttng_ust_session *session, void *priv __attribute__((unused)))
+{
+ tracepoint(lttng_ust_statedump, end, session);
+}
+
+static
+int get_elf_info(struct bin_info_data *bin_data)
+{
+ struct lttng_ust_elf *elf;
+ int ret = 0, found;
+
+ elf = lttng_ust_elf_create(bin_data->resolved_path);
+ if (!elf) {
+ ret = -1;
+ goto end;
+ }
+
+ ret = lttng_ust_elf_get_memsz(elf, &bin_data->memsz);
+ if (ret) {
+ goto end;
+ }
+
+ found = 0;
+ ret = lttng_ust_elf_get_build_id(elf, &bin_data->build_id,
+ &bin_data->build_id_len,
+ &found);
+ if (ret) {
+ goto end;
+ }
+ bin_data->has_build_id = !!found;
+ found = 0;
+ ret = lttng_ust_elf_get_debug_link(elf, &bin_data->dbg_file,
+ &bin_data->crc,
+ &found);
+ if (ret) {
+ goto end;
+ }
+ bin_data->has_debug_link = !!found;
+
+ bin_data->is_pic = lttng_ust_elf_is_pic(elf);
+
+end:
+ lttng_ust_elf_destroy(elf);
+ return ret;
+}
+
+static
+void trace_baddr(struct bin_info_data *bin_data, void *owner)
+{
+ trace_statedump_event(trace_bin_info_cb, owner, bin_data);
+
+ if (bin_data->has_build_id)
+ trace_statedump_event(trace_build_id_cb, owner, bin_data);
+
+ if (bin_data->has_debug_link)
+ trace_statedump_event(trace_debug_link_cb, owner, bin_data);
+}
+
+static
+int extract_baddr(struct bin_info_data *bin_data)
+{
+ int ret = 0;
+ struct lttng_ust_dl_node *e;
+
+ if (!bin_data->vdso) {
+ ret = get_elf_info(bin_data);
+ if (ret) {
+ goto end;
+ }
+ } else {
+ bin_data->memsz = 0;
+ bin_data->has_build_id = 0;
+ bin_data->has_debug_link = 0;
+ }
+
+ e = find_or_create_dl_node(bin_data);
+ if (!e) {
+ ret = -1;
+ goto end;
+ }
+ e->marked = true;
+end:
+ free(bin_data->build_id);
+ bin_data->build_id = NULL;
+ free(bin_data->dbg_file);
+ bin_data->dbg_file = NULL;
+ return ret;
+}
+
+static
+void trace_statedump_start(void *owner)
+{
+ trace_statedump_event(trace_start_cb, owner, NULL);
+}
+
+static
+void trace_statedump_end(void *owner)
+{
+ trace_statedump_event(trace_end_cb, owner, NULL);
+}
+
+static
+void iter_begin(struct dl_iterate_data *data)
+{
+ unsigned int i;
+
+ /*
+ * UST lock nests within dynamic loader lock.
+ *
+ * Hold this lock across handling of the module listing to
+ * protect memory allocation at early process start, due to
+ * interactions with libc-wrapper lttng malloc instrumentation.
+ */
+ if (ust_lock()) {
+ data->cancel = true;
+ return;
+ }
+
+ /* Ensure all entries are unmarked. */
+ for (i = 0; i < UST_DL_STATE_TABLE_SIZE; i++) {
+ struct cds_hlist_head *head;
+ struct lttng_ust_dl_node *e;
+
+ head = &dl_state_table[i];
+ cds_hlist_for_each_entry_2(e, head, node)
+ assert(!e->marked);
+ }
+}
+
+static
+void trace_lib_load(const struct bin_info_data *bin_data, void *ip)
+{
+ tracepoint(lttng_ust_lib, load,
+ ip, bin_data->base_addr_ptr, bin_data->resolved_path,
+ bin_data->memsz, bin_data->has_build_id,
+ bin_data->has_debug_link);
+
+ if (bin_data->has_build_id) {
+ tracepoint(lttng_ust_lib, build_id,
+ ip, bin_data->base_addr_ptr, bin_data->build_id,
+ bin_data->build_id_len);
+ }
+
+ if (bin_data->has_debug_link) {
+ tracepoint(lttng_ust_lib, debug_link,
+ ip, bin_data->base_addr_ptr, bin_data->dbg_file,
+ bin_data->crc);
+ }
+}
+
+static
+void trace_lib_unload(const struct bin_info_data *bin_data, void *ip)
+{
+ tracepoint(lttng_ust_lib, unload, ip, bin_data->base_addr_ptr);
+}
+
+static
+void iter_end(struct dl_iterate_data *data, void *ip)
+{
+ unsigned int i;
+
+ if (data->cancel)
+ goto end;
+ /*
+ * Iterate on hash table.
+ * For each marked, traced, do nothing.
+ * For each marked, not traced, trace lib open event. traced = true.
+ * For each unmarked, traced, trace lib close event. remove node.
+ * For each unmarked, not traced, remove node.
+ */
+ for (i = 0; i < UST_DL_STATE_TABLE_SIZE; i++) {
+ struct cds_hlist_head *head;
+ struct lttng_ust_dl_node *e;
+
+ head = &dl_state_table[i];
+ cds_hlist_for_each_entry_2(e, head, node) {
+ if (e->marked) {
+ if (!e->traced) {
+ trace_lib_load(&e->bin_data, ip);
+ e->traced = true;
+ }
+ e->marked = false;
+ } else {
+ if (e->traced)
+ trace_lib_unload(&e->bin_data, ip);
+ remove_dl_node(e);
+ free_dl_node(e);
+ }
+ }
+ }
+end:
+ ust_unlock();
+}
+
+static
+int extract_bin_info_events(struct dl_phdr_info *info, size_t size __attribute__((unused)), void *_data)
+{
+ int j, ret = 0;
+ struct dl_iterate_data *data = _data;
+
+ if (data->first) {
+ iter_begin(data);
+ data->first = false;
+ }
+
+ if (data->cancel)
+ goto end;
+
+ for (j = 0; j < info->dlpi_phnum; j++) {
+ struct bin_info_data bin_data;
+
+ if (info->dlpi_phdr[j].p_type != PT_LOAD)
+ continue;
+
+ memset(&bin_data, 0, sizeof(bin_data));
+
+ /* Calculate virtual memory address of the loadable segment */
+ bin_data.base_addr_ptr = (void *) info->dlpi_addr +
+ info->dlpi_phdr[j].p_vaddr;
+
+ if ((info->dlpi_name == NULL || info->dlpi_name[0] == 0)) {
+ /*
+ * Only the first phdr without a dlpi_name
+ * encountered is considered as the program
+ * executable. The rest are vdsos.
+ */
+ if (!data->exec_found) {
+ ssize_t path_len;
+ data->exec_found = 1;
+
+ /*
+ * Use /proc/self/exe to resolve the
+ * executable's full path.
+ */
+ path_len = readlink("/proc/self/exe",
+ bin_data.resolved_path,
+ PATH_MAX - 1);
+ if (path_len <= 0)
+ break;
+
+ bin_data.resolved_path[path_len] = '\0';
+ bin_data.vdso = 0;
+ } else {
+ snprintf(bin_data.resolved_path,
+ PATH_MAX - 1, "[vdso]");
+ bin_data.vdso = 1;
+ }
+ } else {
+ /*
+ * For regular dl_phdr_info entries check if
+ * the path to the binary really exists. If not,
+ * treat as vdso and use dlpi_name as 'path'.
+ */
+ if (!realpath(info->dlpi_name,
+ bin_data.resolved_path)) {
+ snprintf(bin_data.resolved_path,
+ PATH_MAX - 1, "[%s]",
+ info->dlpi_name);
+ bin_data.vdso = 1;
+ } else {
+ bin_data.vdso = 0;
+ }
+ }
+
+ ret = extract_baddr(&bin_data);
+ break;
+ }
+end:
+ return ret;
+}
+
+static
+void ust_dl_table_statedump(void *owner)
+{
+ unsigned int i;
+
+ if (ust_lock())
+ goto end;
+
+ /* Statedump each traced table entry into session for owner. */
+ for (i = 0; i < UST_DL_STATE_TABLE_SIZE; i++) {
+ struct cds_hlist_head *head;
+ struct lttng_ust_dl_node *e;
+
+ head = &dl_state_table[i];
+ cds_hlist_for_each_entry_2(e, head, node) {
+ if (e->traced)
+ trace_baddr(&e->bin_data, owner);
+ }
+ }
+
+end:
+ ust_unlock();
+}
+
+void lttng_ust_dl_update(void *ip)
+{
+ struct dl_iterate_data data;
+
+ if (lttng_ust_getenv("LTTNG_UST_WITHOUT_BADDR_STATEDUMP"))
+ return;
+
+ /*
+ * Fixup lttng-ust TLS when called from dlopen/dlclose
+ * instrumentation.
+ */
+ lttng_ust_fixup_tls();
+
+ data.exec_found = 0;
+ data.first = true;
+ data.cancel = false;
+ /*
+ * Iterate through the list of currently loaded shared objects and
+ * generate tables entries for loadable segments using
+ * extract_bin_info_events.
+ * Removed libraries are detected by mark-and-sweep: marking is
+ * done in the iteration over libraries, and sweeping is
+ * performed by iter_end().
+ */
+ dl_iterate_phdr(extract_bin_info_events, &data);
+ if (data.first)
+ iter_begin(&data);
+ iter_end(&data, ip);
+}
+
+/*
+ * Generate a statedump of base addresses of all shared objects loaded
+ * by the traced application, as well as for the application's
+ * executable itself.
+ */
+static
+int do_baddr_statedump(void *owner)
+{
+ if (lttng_ust_getenv("LTTNG_UST_WITHOUT_BADDR_STATEDUMP"))
+ return 0;
+ lttng_ust_dl_update(LTTNG_UST_CALLER_IP());
+ ust_dl_table_statedump(owner);
+ return 0;
+}
+
+static
+int do_procname_statedump(void *owner)
+{
+ if (lttng_ust_getenv("LTTNG_UST_WITHOUT_PROCNAME_STATEDUMP"))
+ return 0;
+
+ trace_statedump_event(procname_cb, owner, lttng_ust_sockinfo_get_procname(owner));
+ return 0;
+}
+
+/*
+ * Generate a statedump of a given traced application. A statedump is
+ * delimited by start and end events. For a given (process, session)
+ * pair, begin/end events are serialized and will match. However, in a
+ * session, statedumps from different processes may be
+ * interleaved. The vpid context should be used to identify which
+ * events belong to which process.
+ *
+ * Grab the ust_lock outside of the RCU read-side lock because we
+ * perform synchronize_rcu with the ust_lock held, which can trigger
+ * deadlocks otherwise.
+ */
+int do_lttng_ust_statedump(void *owner)
+{
+ ust_lock_nocheck();
+ trace_statedump_start(owner);
+ ust_unlock();
+
+ do_procname_statedump(owner);
+ do_baddr_statedump(owner);
+
+ ust_lock_nocheck();
+ trace_statedump_end(owner);
+ ust_unlock();
+
+ return 0;
+}
+
+void lttng_ust_statedump_init(void)
+{
+ __tracepoints__init();
+ __tracepoints__ptrs_init();
+ __lttng_events_init__lttng_ust_statedump();
+ lttng_ust_dl_update(LTTNG_UST_CALLER_IP());
+}
+
+static
+void ust_dl_state_destroy(void)
+{
+ unsigned int i;
+
+ for (i = 0; i < UST_DL_STATE_TABLE_SIZE; i++) {
+ struct cds_hlist_head *head;
+ struct lttng_ust_dl_node *e, *tmp;
+
+ head = &dl_state_table[i];
+ cds_hlist_for_each_entry_safe_2(e, tmp, head, node)
+ free_dl_node(e);
+ CDS_INIT_HLIST_HEAD(head);
+ }
+}
+
+void lttng_ust_statedump_destroy(void)
+{
+ __lttng_events_exit__lttng_ust_statedump();
+ __tracepoints__ptrs_destroy();
+ __tracepoints__destroy();
+ ust_dl_state_destroy();
+}
--- /dev/null
+/*
+ * SPDX-License-Identifier: LGPL-2.1-or-later
+ *
+ * Copyright (C) 2013 Paul Woegerer <paul_woegerer@mentor.com>
+ * Copyright (C) 2015 Antoine Busque <abusque@efficios.com>
+ */
+
+#ifndef LTTNG_UST_STATEDUMP_H
+#define LTTNG_UST_STATEDUMP_H
+
+#include <lttng/ust-events.h>
+
+void lttng_ust_statedump_init(void)
+ __attribute__((visibility("hidden")));
+
+void lttng_ust_statedump_destroy(void)
+ __attribute__((visibility("hidden")));
+
+int do_lttng_ust_statedump(void *owner)
+ __attribute__((visibility("hidden")));
+
+#endif /* LTTNG_UST_STATEDUMP_H */
--- /dev/null
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright (C) 2011-2014 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ */
+
+#undef TRACEPOINT_PROVIDER
+#define TRACEPOINT_PROVIDER lttng_ust_tracef
+
+#if !defined(_TRACEPOINT_LTTNG_UST_TRACEF_PROVIDER_H) || defined(TRACEPOINT_HEADER_MULTI_READ)
+#define _TRACEPOINT_LTTNG_UST_TRACEF_PROVIDER_H
+
+#include <lttng/tp/lttng-ust-tracef.h>
+
+#endif /* _TRACEPOINT_LTTNG_UST_TRACEF_PROVIDER_H */
+
+#define TP_IP_PARAM ip /* IP context received as parameter */
+#undef TRACEPOINT_INCLUDE
+#define TRACEPOINT_INCLUDE "./tp/lttng-ust-tracef.h"
+
+/* This part must be outside ifdef protection */
+#include <lttng/tracepoint-event.h>
--- /dev/null
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright (C) 2011-2015 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ */
+
+#undef TRACEPOINT_PROVIDER
+#define TRACEPOINT_PROVIDER lttng_ust_tracelog
+
+#if !defined(_TRACEPOINT_LTTNG_UST_TRACELOG_PROVIDER_H) || defined(TRACEPOINT_HEADER_MULTI_READ)
+#define _TRACEPOINT_LTTNG_UST_TRACELOG_PROVIDER_H
+
+#include <lttng/tp/lttng-ust-tracelog.h>
+
+#endif /* _TRACEPOINT_LTTNG_UST_TRACEF_PROVIDER_H */
+
+#define TP_IP_PARAM ip /* IP context received as parameter */
+#undef TRACEPOINT_INCLUDE
+#define TRACEPOINT_INCLUDE "./tp/lttng-ust-tracelog.h"
+
+/* This part must be outside ifdef protection */
+#include <lttng/tracepoint-event.h>
--- /dev/null
+/*
+ * SPDX-License-Identifier: LGPL-2.1-or-later
+ *
+ * Copyright (c) 2009 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ * Copyright (c) 2009 Paul E. McKenney, IBM Corporation.
+ *
+ * library wrappers to be used by non-LGPL compatible source code.
+ */
+
+#include <urcu/uatomic.h>
+
+#include <lttng/urcu/static/pointer.h>
+/* Do not #define _LGPL_SOURCE to ensure we can emit the wrapper symbols */
+#include <lttng/urcu/pointer.h>
+
+void *lttng_ust_rcu_dereference_sym(void *p)
+{
+ return _lttng_ust_rcu_dereference(p);
+}
+
+void *lttng_ust_rcu_set_pointer_sym(void **p, void *v)
+{
+ cmm_wmb();
+ uatomic_set(p, v);
+ return v;
+}
+
+void *lttng_ust_rcu_xchg_pointer_sym(void **p, void *v)
+{
+ cmm_wmb();
+ return uatomic_xchg(p, v);
+}
+
+void *lttng_ust_rcu_cmpxchg_pointer_sym(void **p, void *old, void *_new)
+{
+ cmm_wmb();
+ return uatomic_cmpxchg(p, old, _new);
+}
--- /dev/null
+/*
+ * SPDX-License-Identifier: LGPL-2.1-or-later
+ *
+ * Copyright (c) 2009 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ * Copyright (c) 2009 Paul E. McKenney, IBM Corporation.
+ *
+ * Userspace RCU library for LTTng-UST, derived from liburcu "bulletproof" version.
+ */
+
+#define _LGPL_SOURCE
+#include <stdio.h>
+#include <pthread.h>
+#include <signal.h>
+#include <assert.h>
+#include <stdlib.h>
+#include <string.h>
+#include <errno.h>
+#include <poll.h>
+#include <unistd.h>
+#include <stdbool.h>
+#include <sys/mman.h>
+
+#include <urcu/arch.h>
+#include <urcu/wfcqueue.h>
+#include <lttng/urcu/static/urcu-ust.h>
+#include <lttng/urcu/pointer.h>
+#include <urcu/tls-compat.h>
+
+/* Do not #define _LGPL_SOURCE to ensure we can emit the wrapper symbols */
+#undef _LGPL_SOURCE
+#include <lttng/urcu/urcu-ust.h>
+#define _LGPL_SOURCE
+
+#ifndef MAP_ANONYMOUS
+#define MAP_ANONYMOUS MAP_ANON
+#endif
+
+#ifdef __linux__
+static
+void *mremap_wrapper(void *old_address, size_t old_size,
+ size_t new_size, int flags)
+{
+ return mremap(old_address, old_size, new_size, flags);
+}
+#else
+
+#define MREMAP_MAYMOVE 1
+#define MREMAP_FIXED 2
+
+/*
+ * mremap wrapper for non-Linux systems not allowing MAYMOVE.
+ * This is not generic.
+*/
+static
+void *mremap_wrapper(void *old_address, size_t old_size,
+ size_t new_size, int flags)
+{
+ assert(!(flags & MREMAP_MAYMOVE));
+
+ return MAP_FAILED;
+}
+#endif
+
+/* Sleep delay in ms */
+#define RCU_SLEEP_DELAY_MS 10
+#define INIT_NR_THREADS 8
+#define ARENA_INIT_ALLOC \
+ sizeof(struct registry_chunk) \
+ + INIT_NR_THREADS * sizeof(struct lttng_ust_urcu_reader)
+
+/*
+ * Active attempts to check for reader Q.S. before calling sleep().
+ */
+#define RCU_QS_ACTIVE_ATTEMPTS 100
+
+static
+int lttng_ust_urcu_refcount;
+
+/* If the headers do not support membarrier system call, fall back smp_mb. */
+#ifdef __NR_membarrier
+# define membarrier(...) syscall(__NR_membarrier, __VA_ARGS__)
+#else
+# define membarrier(...) -ENOSYS
+#endif
+
+enum membarrier_cmd {
+ MEMBARRIER_CMD_QUERY = 0,
+ MEMBARRIER_CMD_SHARED = (1 << 0),
+ /* reserved for MEMBARRIER_CMD_SHARED_EXPEDITED (1 << 1) */
+ /* reserved for MEMBARRIER_CMD_PRIVATE (1 << 2) */
+ MEMBARRIER_CMD_PRIVATE_EXPEDITED = (1 << 3),
+ MEMBARRIER_CMD_REGISTER_PRIVATE_EXPEDITED = (1 << 4),
+};
+
+static
+void _lttng_ust_urcu_init(void)
+ __attribute__((constructor));
+static
+void lttng_ust_urcu_exit(void)
+ __attribute__((destructor));
+
+#ifndef CONFIG_RCU_FORCE_SYS_MEMBARRIER
+int lttng_ust_urcu_has_sys_membarrier;
+#endif
+
+/*
+ * rcu_gp_lock ensures mutual exclusion between threads calling
+ * synchronize_rcu().
+ */
+static pthread_mutex_t rcu_gp_lock = PTHREAD_MUTEX_INITIALIZER;
+/*
+ * rcu_registry_lock ensures mutual exclusion between threads
+ * registering and unregistering themselves to/from the registry, and
+ * with threads reading that registry from synchronize_rcu(). However,
+ * this lock is not held all the way through the completion of awaiting
+ * for the grace period. It is sporadically released between iterations
+ * on the registry.
+ * rcu_registry_lock may nest inside rcu_gp_lock.
+ */
+static pthread_mutex_t rcu_registry_lock = PTHREAD_MUTEX_INITIALIZER;
+
+static pthread_mutex_t init_lock = PTHREAD_MUTEX_INITIALIZER;
+static int initialized;
+
+static pthread_key_t lttng_ust_urcu_key;
+
+struct lttng_ust_urcu_gp lttng_ust_urcu_gp = { .ctr = LTTNG_UST_URCU_GP_COUNT };
+
+/*
+ * Pointer to registry elements. Written to only by each individual reader. Read
+ * by both the reader and the writers.
+ */
+DEFINE_URCU_TLS(struct lttng_ust_urcu_reader *, lttng_ust_urcu_reader);
+
+static CDS_LIST_HEAD(registry);
+
+struct registry_chunk {
+ size_t data_len; /* data length */
+ size_t used; /* amount of data used */
+ struct cds_list_head node; /* chunk_list node */
+ char data[];
+};
+
+struct registry_arena {
+ struct cds_list_head chunk_list;
+};
+
+static struct registry_arena registry_arena = {
+ .chunk_list = CDS_LIST_HEAD_INIT(registry_arena.chunk_list),
+};
+
+/* Saved fork signal mask, protected by rcu_gp_lock */
+static sigset_t saved_fork_signal_mask;
+
+static void mutex_lock(pthread_mutex_t *mutex)
+{
+ int ret;
+
+#ifndef DISTRUST_SIGNALS_EXTREME
+ ret = pthread_mutex_lock(mutex);
+ if (ret)
+ abort();
+#else /* #ifndef DISTRUST_SIGNALS_EXTREME */
+ while ((ret = pthread_mutex_trylock(mutex)) != 0) {
+ if (ret != EBUSY && ret != EINTR)
+ abort();
+ poll(NULL,0,10);
+ }
+#endif /* #else #ifndef DISTRUST_SIGNALS_EXTREME */
+}
+
+static void mutex_unlock(pthread_mutex_t *mutex)
+{
+ int ret;
+
+ ret = pthread_mutex_unlock(mutex);
+ if (ret)
+ abort();
+}
+
+static void smp_mb_master(void)
+{
+ if (caa_likely(lttng_ust_urcu_has_sys_membarrier)) {
+ if (membarrier(MEMBARRIER_CMD_PRIVATE_EXPEDITED, 0))
+ abort();
+ } else {
+ cmm_smp_mb();
+ }
+}
+
+/*
+ * Always called with rcu_registry lock held. Releases this lock between
+ * iterations and grabs it again. Holds the lock when it returns.
+ */
+static void wait_for_readers(struct cds_list_head *input_readers,
+ struct cds_list_head *cur_snap_readers,
+ struct cds_list_head *qsreaders)
+{
+ unsigned int wait_loops = 0;
+ struct lttng_ust_urcu_reader *index, *tmp;
+
+ /*
+ * Wait for each thread URCU_TLS(lttng_ust_urcu_reader).ctr to either
+ * indicate quiescence (not nested), or observe the current
+ * rcu_gp.ctr value.
+ */
+ for (;;) {
+ if (wait_loops < RCU_QS_ACTIVE_ATTEMPTS)
+ wait_loops++;
+
+ cds_list_for_each_entry_safe(index, tmp, input_readers, node) {
+ switch (lttng_ust_urcu_reader_state(&index->ctr)) {
+ case LTTNG_UST_URCU_READER_ACTIVE_CURRENT:
+ if (cur_snap_readers) {
+ cds_list_move(&index->node,
+ cur_snap_readers);
+ break;
+ }
+ /* Fall-through */
+ case LTTNG_UST_URCU_READER_INACTIVE:
+ cds_list_move(&index->node, qsreaders);
+ break;
+ case LTTNG_UST_URCU_READER_ACTIVE_OLD:
+ /*
+ * Old snapshot. Leaving node in
+ * input_readers will make us busy-loop
+ * until the snapshot becomes current or
+ * the reader becomes inactive.
+ */
+ break;
+ }
+ }
+
+ if (cds_list_empty(input_readers)) {
+ break;
+ } else {
+ /* Temporarily unlock the registry lock. */
+ mutex_unlock(&rcu_registry_lock);
+ if (wait_loops >= RCU_QS_ACTIVE_ATTEMPTS)
+ (void) poll(NULL, 0, RCU_SLEEP_DELAY_MS);
+ else
+ caa_cpu_relax();
+ /* Re-lock the registry lock before the next loop. */
+ mutex_lock(&rcu_registry_lock);
+ }
+ }
+}
+
+void lttng_ust_urcu_synchronize_rcu(void)
+{
+ CDS_LIST_HEAD(cur_snap_readers);
+ CDS_LIST_HEAD(qsreaders);
+ sigset_t newmask, oldmask;
+ int ret;
+
+ ret = sigfillset(&newmask);
+ assert(!ret);
+ ret = pthread_sigmask(SIG_BLOCK, &newmask, &oldmask);
+ assert(!ret);
+
+ mutex_lock(&rcu_gp_lock);
+
+ mutex_lock(&rcu_registry_lock);
+
+ if (cds_list_empty(®istry))
+ goto out;
+
+ /* All threads should read qparity before accessing data structure
+ * where new ptr points to. */
+ /* Write new ptr before changing the qparity */
+ smp_mb_master();
+
+ /*
+ * Wait for readers to observe original parity or be quiescent.
+ * wait_for_readers() can release and grab again rcu_registry_lock
+ * interally.
+ */
+ wait_for_readers(®istry, &cur_snap_readers, &qsreaders);
+
+ /*
+ * Adding a cmm_smp_mb() which is _not_ formally required, but makes the
+ * model easier to understand. It does not have a big performance impact
+ * anyway, given this is the write-side.
+ */
+ cmm_smp_mb();
+
+ /* Switch parity: 0 -> 1, 1 -> 0 */
+ CMM_STORE_SHARED(lttng_ust_urcu_gp.ctr, lttng_ust_urcu_gp.ctr ^ LTTNG_UST_URCU_GP_CTR_PHASE);
+
+ /*
+ * Must commit qparity update to memory before waiting for other parity
+ * quiescent state. Failure to do so could result in the writer waiting
+ * forever while new readers are always accessing data (no progress).
+ * Ensured by CMM_STORE_SHARED and CMM_LOAD_SHARED.
+ */
+
+ /*
+ * Adding a cmm_smp_mb() which is _not_ formally required, but makes the
+ * model easier to understand. It does not have a big performance impact
+ * anyway, given this is the write-side.
+ */
+ cmm_smp_mb();
+
+ /*
+ * Wait for readers to observe new parity or be quiescent.
+ * wait_for_readers() can release and grab again rcu_registry_lock
+ * interally.
+ */
+ wait_for_readers(&cur_snap_readers, NULL, &qsreaders);
+
+ /*
+ * Put quiescent reader list back into registry.
+ */
+ cds_list_splice(&qsreaders, ®istry);
+
+ /*
+ * Finish waiting for reader threads before letting the old ptr being
+ * freed.
+ */
+ smp_mb_master();
+out:
+ mutex_unlock(&rcu_registry_lock);
+ mutex_unlock(&rcu_gp_lock);
+ ret = pthread_sigmask(SIG_SETMASK, &oldmask, NULL);
+ assert(!ret);
+}
+
+/*
+ * library wrappers to be used by non-LGPL compatible source code.
+ */
+
+void lttng_ust_urcu_read_lock(void)
+{
+ _lttng_ust_urcu_read_lock();
+}
+
+void lttng_ust_urcu_read_unlock(void)
+{
+ _lttng_ust_urcu_read_unlock();
+}
+
+int lttng_ust_urcu_read_ongoing(void)
+{
+ return _lttng_ust_urcu_read_ongoing();
+}
+
+/*
+ * Only grow for now. If empty, allocate a ARENA_INIT_ALLOC sized chunk.
+ * Else, try expanding the last chunk. If this fails, allocate a new
+ * chunk twice as big as the last chunk.
+ * Memory used by chunks _never_ moves. A chunk could theoretically be
+ * freed when all "used" slots are released, but we don't do it at this
+ * point.
+ */
+static
+void expand_arena(struct registry_arena *arena)
+{
+ struct registry_chunk *new_chunk, *last_chunk;
+ size_t old_chunk_len, new_chunk_len;
+
+ /* No chunk. */
+ if (cds_list_empty(&arena->chunk_list)) {
+ assert(ARENA_INIT_ALLOC >=
+ sizeof(struct registry_chunk)
+ + sizeof(struct lttng_ust_urcu_reader));
+ new_chunk_len = ARENA_INIT_ALLOC;
+ new_chunk = (struct registry_chunk *) mmap(NULL,
+ new_chunk_len,
+ PROT_READ | PROT_WRITE,
+ MAP_ANONYMOUS | MAP_PRIVATE,
+ -1, 0);
+ if (new_chunk == MAP_FAILED)
+ abort();
+ memset(new_chunk, 0, new_chunk_len);
+ new_chunk->data_len =
+ new_chunk_len - sizeof(struct registry_chunk);
+ cds_list_add_tail(&new_chunk->node, &arena->chunk_list);
+ return; /* We're done. */
+ }
+
+ /* Try expanding last chunk. */
+ last_chunk = cds_list_entry(arena->chunk_list.prev,
+ struct registry_chunk, node);
+ old_chunk_len =
+ last_chunk->data_len + sizeof(struct registry_chunk);
+ new_chunk_len = old_chunk_len << 1;
+
+ /* Don't allow memory mapping to move, just expand. */
+ new_chunk = mremap_wrapper(last_chunk, old_chunk_len,
+ new_chunk_len, 0);
+ if (new_chunk != MAP_FAILED) {
+ /* Should not have moved. */
+ assert(new_chunk == last_chunk);
+ memset((char *) last_chunk + old_chunk_len, 0,
+ new_chunk_len - old_chunk_len);
+ last_chunk->data_len =
+ new_chunk_len - sizeof(struct registry_chunk);
+ return; /* We're done. */
+ }
+
+ /* Remap did not succeed, we need to add a new chunk. */
+ new_chunk = (struct registry_chunk *) mmap(NULL,
+ new_chunk_len,
+ PROT_READ | PROT_WRITE,
+ MAP_ANONYMOUS | MAP_PRIVATE,
+ -1, 0);
+ if (new_chunk == MAP_FAILED)
+ abort();
+ memset(new_chunk, 0, new_chunk_len);
+ new_chunk->data_len =
+ new_chunk_len - sizeof(struct registry_chunk);
+ cds_list_add_tail(&new_chunk->node, &arena->chunk_list);
+}
+
+static
+struct lttng_ust_urcu_reader *arena_alloc(struct registry_arena *arena)
+{
+ struct registry_chunk *chunk;
+ struct lttng_ust_urcu_reader *rcu_reader_reg;
+ int expand_done = 0; /* Only allow to expand once per alloc */
+ size_t len = sizeof(struct lttng_ust_urcu_reader);
+
+retry:
+ cds_list_for_each_entry(chunk, &arena->chunk_list, node) {
+ if (chunk->data_len - chunk->used < len)
+ continue;
+ /* Find spot */
+ for (rcu_reader_reg = (struct lttng_ust_urcu_reader *) &chunk->data[0];
+ rcu_reader_reg < (struct lttng_ust_urcu_reader *) &chunk->data[chunk->data_len];
+ rcu_reader_reg++) {
+ if (!rcu_reader_reg->alloc) {
+ rcu_reader_reg->alloc = 1;
+ chunk->used += len;
+ return rcu_reader_reg;
+ }
+ }
+ }
+
+ if (!expand_done) {
+ expand_arena(arena);
+ expand_done = 1;
+ goto retry;
+ }
+
+ return NULL;
+}
+
+/* Called with signals off and mutex locked */
+static
+void add_thread(void)
+{
+ struct lttng_ust_urcu_reader *rcu_reader_reg;
+ int ret;
+
+ rcu_reader_reg = arena_alloc(®istry_arena);
+ if (!rcu_reader_reg)
+ abort();
+ ret = pthread_setspecific(lttng_ust_urcu_key, rcu_reader_reg);
+ if (ret)
+ abort();
+
+ /* Add to registry */
+ rcu_reader_reg->tid = pthread_self();
+ assert(rcu_reader_reg->ctr == 0);
+ cds_list_add(&rcu_reader_reg->node, ®istry);
+ /*
+ * Reader threads are pointing to the reader registry. This is
+ * why its memory should never be relocated.
+ */
+ URCU_TLS(lttng_ust_urcu_reader) = rcu_reader_reg;
+}
+
+/* Called with mutex locked */
+static
+void cleanup_thread(struct registry_chunk *chunk,
+ struct lttng_ust_urcu_reader *rcu_reader_reg)
+{
+ rcu_reader_reg->ctr = 0;
+ cds_list_del(&rcu_reader_reg->node);
+ rcu_reader_reg->tid = 0;
+ rcu_reader_reg->alloc = 0;
+ chunk->used -= sizeof(struct lttng_ust_urcu_reader);
+}
+
+static
+struct registry_chunk *find_chunk(struct lttng_ust_urcu_reader *rcu_reader_reg)
+{
+ struct registry_chunk *chunk;
+
+ cds_list_for_each_entry(chunk, ®istry_arena.chunk_list, node) {
+ if (rcu_reader_reg < (struct lttng_ust_urcu_reader *) &chunk->data[0])
+ continue;
+ if (rcu_reader_reg >= (struct lttng_ust_urcu_reader *) &chunk->data[chunk->data_len])
+ continue;
+ return chunk;
+ }
+ return NULL;
+}
+
+/* Called with signals off and mutex locked */
+static
+void remove_thread(struct lttng_ust_urcu_reader *rcu_reader_reg)
+{
+ cleanup_thread(find_chunk(rcu_reader_reg), rcu_reader_reg);
+ URCU_TLS(lttng_ust_urcu_reader) = NULL;
+}
+
+/* Disable signals, take mutex, add to registry */
+void lttng_ust_urcu_register(void)
+{
+ sigset_t newmask, oldmask;
+ int ret;
+
+ ret = sigfillset(&newmask);
+ if (ret)
+ abort();
+ ret = pthread_sigmask(SIG_BLOCK, &newmask, &oldmask);
+ if (ret)
+ abort();
+
+ /*
+ * Check if a signal concurrently registered our thread since
+ * the check in rcu_read_lock().
+ */
+ if (URCU_TLS(lttng_ust_urcu_reader))
+ goto end;
+
+ /*
+ * Take care of early registration before lttng_ust_urcu constructor.
+ */
+ _lttng_ust_urcu_init();
+
+ mutex_lock(&rcu_registry_lock);
+ add_thread();
+ mutex_unlock(&rcu_registry_lock);
+end:
+ ret = pthread_sigmask(SIG_SETMASK, &oldmask, NULL);
+ if (ret)
+ abort();
+}
+
+void lttng_ust_urcu_register_thread(void)
+{
+ if (caa_unlikely(!URCU_TLS(lttng_ust_urcu_reader)))
+ lttng_ust_urcu_register(); /* If not yet registered. */
+}
+
+/* Disable signals, take mutex, remove from registry */
+static
+void lttng_ust_urcu_unregister(struct lttng_ust_urcu_reader *rcu_reader_reg)
+{
+ sigset_t newmask, oldmask;
+ int ret;
+
+ ret = sigfillset(&newmask);
+ if (ret)
+ abort();
+ ret = pthread_sigmask(SIG_BLOCK, &newmask, &oldmask);
+ if (ret)
+ abort();
+
+ mutex_lock(&rcu_registry_lock);
+ remove_thread(rcu_reader_reg);
+ mutex_unlock(&rcu_registry_lock);
+ ret = pthread_sigmask(SIG_SETMASK, &oldmask, NULL);
+ if (ret)
+ abort();
+ lttng_ust_urcu_exit();
+}
+
+/*
+ * Remove thread from the registry when it exits, and flag it as
+ * destroyed so garbage collection can take care of it.
+ */
+static
+void lttng_ust_urcu_thread_exit_notifier(void *rcu_key)
+{
+ lttng_ust_urcu_unregister(rcu_key);
+}
+
+#ifdef CONFIG_RCU_FORCE_SYS_MEMBARRIER
+static
+void lttng_ust_urcu_sys_membarrier_status(bool available)
+{
+ if (!available)
+ abort();
+}
+#else
+static
+void lttng_ust_urcu_sys_membarrier_status(bool available)
+{
+ if (!available)
+ return;
+ lttng_ust_urcu_has_sys_membarrier = 1;
+}
+#endif
+
+static
+void lttng_ust_urcu_sys_membarrier_init(void)
+{
+ bool available = false;
+ int mask;
+
+ mask = membarrier(MEMBARRIER_CMD_QUERY, 0);
+ if (mask >= 0) {
+ if (mask & MEMBARRIER_CMD_PRIVATE_EXPEDITED) {
+ if (membarrier(MEMBARRIER_CMD_REGISTER_PRIVATE_EXPEDITED, 0))
+ abort();
+ available = true;
+ }
+ }
+ lttng_ust_urcu_sys_membarrier_status(available);
+}
+
+static
+void _lttng_ust_urcu_init(void)
+{
+ mutex_lock(&init_lock);
+ if (!lttng_ust_urcu_refcount++) {
+ int ret;
+
+ ret = pthread_key_create(<tng_ust_urcu_key,
+ lttng_ust_urcu_thread_exit_notifier);
+ if (ret)
+ abort();
+ lttng_ust_urcu_sys_membarrier_init();
+ initialized = 1;
+ }
+ mutex_unlock(&init_lock);
+}
+
+static
+void lttng_ust_urcu_exit(void)
+{
+ mutex_lock(&init_lock);
+ if (!--lttng_ust_urcu_refcount) {
+ struct registry_chunk *chunk, *tmp;
+ int ret;
+
+ cds_list_for_each_entry_safe(chunk, tmp,
+ ®istry_arena.chunk_list, node) {
+ munmap((void *) chunk, chunk->data_len
+ + sizeof(struct registry_chunk));
+ }
+ CDS_INIT_LIST_HEAD(®istry_arena.chunk_list);
+ ret = pthread_key_delete(lttng_ust_urcu_key);
+ if (ret)
+ abort();
+ }
+ mutex_unlock(&init_lock);
+}
+
+/*
+ * Holding the rcu_gp_lock and rcu_registry_lock across fork will make
+ * sure we fork() don't race with a concurrent thread executing with
+ * any of those locks held. This ensures that the registry and data
+ * protected by rcu_gp_lock are in a coherent state in the child.
+ */
+void lttng_ust_urcu_before_fork(void)
+{
+ sigset_t newmask, oldmask;
+ int ret;
+
+ ret = sigfillset(&newmask);
+ assert(!ret);
+ ret = pthread_sigmask(SIG_BLOCK, &newmask, &oldmask);
+ assert(!ret);
+ mutex_lock(&rcu_gp_lock);
+ mutex_lock(&rcu_registry_lock);
+ saved_fork_signal_mask = oldmask;
+}
+
+void lttng_ust_urcu_after_fork_parent(void)
+{
+ sigset_t oldmask;
+ int ret;
+
+ oldmask = saved_fork_signal_mask;
+ mutex_unlock(&rcu_registry_lock);
+ mutex_unlock(&rcu_gp_lock);
+ ret = pthread_sigmask(SIG_SETMASK, &oldmask, NULL);
+ assert(!ret);
+}
+
+/*
+ * Prune all entries from registry except our own thread. Fits the Linux
+ * fork behavior. Called with rcu_gp_lock and rcu_registry_lock held.
+ */
+static
+void lttng_ust_urcu_prune_registry(void)
+{
+ struct registry_chunk *chunk;
+ struct lttng_ust_urcu_reader *rcu_reader_reg;
+
+ cds_list_for_each_entry(chunk, ®istry_arena.chunk_list, node) {
+ for (rcu_reader_reg = (struct lttng_ust_urcu_reader *) &chunk->data[0];
+ rcu_reader_reg < (struct lttng_ust_urcu_reader *) &chunk->data[chunk->data_len];
+ rcu_reader_reg++) {
+ if (!rcu_reader_reg->alloc)
+ continue;
+ if (rcu_reader_reg->tid == pthread_self())
+ continue;
+ cleanup_thread(chunk, rcu_reader_reg);
+ }
+ }
+}
+
+void lttng_ust_urcu_after_fork_child(void)
+{
+ sigset_t oldmask;
+ int ret;
+
+ lttng_ust_urcu_prune_registry();
+ oldmask = saved_fork_signal_mask;
+ mutex_unlock(&rcu_registry_lock);
+ mutex_unlock(&rcu_gp_lock);
+ ret = pthread_sigmask(SIG_SETMASK, &oldmask, NULL);
+ assert(!ret);
+}
--- /dev/null
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright (C) 2011 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ */
+
+#ifndef _LTTNG_UST_UUID_H
+#define _LTTNG_UST_UUID_H
+
+#include <lttng/ust-events.h> /* For LTTNG_UST_UUID_LEN */
+#include <lttng/ust-clock.h>
+
+#endif /* _LTTNG_UST_UUID_H */
--- /dev/null
+/*
+ * SPDX-License-Identifier: LGPL-2.1-only
+ *
+ * Copyright (C) 2019 Michael Jeanson <mjeanson@efficios.com>
+ */
+
+#ifndef _LTTNG_NS_H
+#define _LTTNG_NS_H
+
+/*
+ * The lowest valid inode number that can be allocated in the proc filesystem
+ * is 0xF0000000. Any number below can be used internally as an error code.
+ *
+ * Zero is used in the kernel as an error code, it's the value we will return
+ * when we fail to read the proper inode number.
+ *
+ * One is used internally to identify an uninitialized cache entry, it should
+ * never be returned.
+ */
+
+enum ns_ino_state {
+ NS_INO_UNAVAILABLE = 0x0,
+ NS_INO_UNINITIALIZED = 0x1,
+ NS_INO_MIN = 0xF0000000,
+};
+
+#endif /* _LTTNG_NS_H */
--- /dev/null
+/*
+ * SPDX-License-Identifier: GPL-2.0-only
+ *
+ * Performance events:
+ *
+ * Copyright (C) 2008-2009, Thomas Gleixner <tglx@linutronix.de>
+ * Copyright (C) 2008-2011, Red Hat, Inc., Ingo Molnar
+ * Copyright (C) 2008-2011, Red Hat, Inc., Peter Zijlstra
+ *
+ * Data type definitions, declarations, prototypes.
+ *
+ * Started by: Thomas Gleixner and Ingo Molnar
+ *
+ * Header copied from Linux kernel v4.7 installed headers.
+ */
+
+#ifndef _UAPI_LINUX_PERF_EVENT_H
+#define _UAPI_LINUX_PERF_EVENT_H
+
+#include <linux/types.h>
+#include <linux/ioctl.h>
+#include <asm/byteorder.h>
+
+/*
+ * User-space ABI bits:
+ */
+
+/*
+ * attr.type
+ */
+enum perf_type_id {
+ PERF_TYPE_HARDWARE = 0,
+ PERF_TYPE_SOFTWARE = 1,
+ PERF_TYPE_TRACEPOINT = 2,
+ PERF_TYPE_HW_CACHE = 3,
+ PERF_TYPE_RAW = 4,
+ PERF_TYPE_BREAKPOINT = 5,
+
+ PERF_TYPE_MAX, /* non-ABI */
+};
+
+/*
+ * Generalized performance event event_id types, used by the
+ * attr.event_id parameter of the sys_perf_event_open()
+ * syscall:
+ */
+enum perf_hw_id {
+ /*
+ * Common hardware events, generalized by the kernel:
+ */
+ PERF_COUNT_HW_CPU_CYCLES = 0,
+ PERF_COUNT_HW_INSTRUCTIONS = 1,
+ PERF_COUNT_HW_CACHE_REFERENCES = 2,
+ PERF_COUNT_HW_CACHE_MISSES = 3,
+ PERF_COUNT_HW_BRANCH_INSTRUCTIONS = 4,
+ PERF_COUNT_HW_BRANCH_MISSES = 5,
+ PERF_COUNT_HW_BUS_CYCLES = 6,
+ PERF_COUNT_HW_STALLED_CYCLES_FRONTEND = 7,
+ PERF_COUNT_HW_STALLED_CYCLES_BACKEND = 8,
+ PERF_COUNT_HW_REF_CPU_CYCLES = 9,
+
+ PERF_COUNT_HW_MAX, /* non-ABI */
+};
+
+/*
+ * Generalized hardware cache events:
+ *
+ * { L1-D, L1-I, LLC, ITLB, DTLB, BPU, NODE } x
+ * { read, write, prefetch } x
+ * { accesses, misses }
+ */
+enum perf_hw_cache_id {
+ PERF_COUNT_HW_CACHE_L1D = 0,
+ PERF_COUNT_HW_CACHE_L1I = 1,
+ PERF_COUNT_HW_CACHE_LL = 2,
+ PERF_COUNT_HW_CACHE_DTLB = 3,
+ PERF_COUNT_HW_CACHE_ITLB = 4,
+ PERF_COUNT_HW_CACHE_BPU = 5,
+ PERF_COUNT_HW_CACHE_NODE = 6,
+
+ PERF_COUNT_HW_CACHE_MAX, /* non-ABI */
+};
+
+enum perf_hw_cache_op_id {
+ PERF_COUNT_HW_CACHE_OP_READ = 0,
+ PERF_COUNT_HW_CACHE_OP_WRITE = 1,
+ PERF_COUNT_HW_CACHE_OP_PREFETCH = 2,
+
+ PERF_COUNT_HW_CACHE_OP_MAX, /* non-ABI */
+};
+
+enum perf_hw_cache_op_result_id {
+ PERF_COUNT_HW_CACHE_RESULT_ACCESS = 0,
+ PERF_COUNT_HW_CACHE_RESULT_MISS = 1,
+
+ PERF_COUNT_HW_CACHE_RESULT_MAX, /* non-ABI */
+};
+
+/*
+ * Special "software" events provided by the kernel, even if the hardware
+ * does not support performance events. These events measure various
+ * physical and sw events of the kernel (and allow the profiling of them as
+ * well):
+ */
+enum perf_sw_ids {
+ PERF_COUNT_SW_CPU_CLOCK = 0,
+ PERF_COUNT_SW_TASK_CLOCK = 1,
+ PERF_COUNT_SW_PAGE_FAULTS = 2,
+ PERF_COUNT_SW_CONTEXT_SWITCHES = 3,
+ PERF_COUNT_SW_CPU_MIGRATIONS = 4,
+ PERF_COUNT_SW_PAGE_FAULTS_MIN = 5,
+ PERF_COUNT_SW_PAGE_FAULTS_MAJ = 6,
+ PERF_COUNT_SW_ALIGNMENT_FAULTS = 7,
+ PERF_COUNT_SW_EMULATION_FAULTS = 8,
+ PERF_COUNT_SW_DUMMY = 9,
+ PERF_COUNT_SW_BPF_OUTPUT = 10,
+
+ PERF_COUNT_SW_MAX, /* non-ABI */
+};
+
+/*
+ * Bits that can be set in attr.sample_type to request information
+ * in the overflow packets.
+ */
+enum perf_event_sample_format {
+ PERF_SAMPLE_IP = 1U << 0,
+ PERF_SAMPLE_TID = 1U << 1,
+ PERF_SAMPLE_TIME = 1U << 2,
+ PERF_SAMPLE_ADDR = 1U << 3,
+ PERF_SAMPLE_READ = 1U << 4,
+ PERF_SAMPLE_CALLCHAIN = 1U << 5,
+ PERF_SAMPLE_ID = 1U << 6,
+ PERF_SAMPLE_CPU = 1U << 7,
+ PERF_SAMPLE_PERIOD = 1U << 8,
+ PERF_SAMPLE_STREAM_ID = 1U << 9,
+ PERF_SAMPLE_RAW = 1U << 10,
+ PERF_SAMPLE_BRANCH_STACK = 1U << 11,
+ PERF_SAMPLE_REGS_USER = 1U << 12,
+ PERF_SAMPLE_STACK_USER = 1U << 13,
+ PERF_SAMPLE_WEIGHT = 1U << 14,
+ PERF_SAMPLE_DATA_SRC = 1U << 15,
+ PERF_SAMPLE_IDENTIFIER = 1U << 16,
+ PERF_SAMPLE_TRANSACTION = 1U << 17,
+ PERF_SAMPLE_REGS_INTR = 1U << 18,
+
+ PERF_SAMPLE_MAX = 1U << 19, /* non-ABI */
+};
+
+/*
+ * values to program into branch_sample_type when PERF_SAMPLE_BRANCH is set
+ *
+ * If the user does not pass priv level information via branch_sample_type,
+ * the kernel uses the event's priv level. Branch and event priv levels do
+ * not have to match. Branch priv level is checked for permissions.
+ *
+ * The branch types can be combined, however BRANCH_ANY covers all types
+ * of branches and therefore it supersedes all the other types.
+ */
+enum perf_branch_sample_type_shift {
+ PERF_SAMPLE_BRANCH_USER_SHIFT = 0, /* user branches */
+ PERF_SAMPLE_BRANCH_KERNEL_SHIFT = 1, /* kernel branches */
+ PERF_SAMPLE_BRANCH_HV_SHIFT = 2, /* hypervisor branches */
+
+ PERF_SAMPLE_BRANCH_ANY_SHIFT = 3, /* any branch types */
+ PERF_SAMPLE_BRANCH_ANY_CALL_SHIFT = 4, /* any call branch */
+ PERF_SAMPLE_BRANCH_ANY_RETURN_SHIFT = 5, /* any return branch */
+ PERF_SAMPLE_BRANCH_IND_CALL_SHIFT = 6, /* indirect calls */
+ PERF_SAMPLE_BRANCH_ABORT_TX_SHIFT = 7, /* transaction aborts */
+ PERF_SAMPLE_BRANCH_IN_TX_SHIFT = 8, /* in transaction */
+ PERF_SAMPLE_BRANCH_NO_TX_SHIFT = 9, /* not in transaction */
+ PERF_SAMPLE_BRANCH_COND_SHIFT = 10, /* conditional branches */
+
+ PERF_SAMPLE_BRANCH_CALL_STACK_SHIFT = 11, /* call/ret stack */
+ PERF_SAMPLE_BRANCH_IND_JUMP_SHIFT = 12, /* indirect jumps */
+ PERF_SAMPLE_BRANCH_CALL_SHIFT = 13, /* direct call */
+
+ PERF_SAMPLE_BRANCH_NO_FLAGS_SHIFT = 14, /* no flags */
+ PERF_SAMPLE_BRANCH_NO_CYCLES_SHIFT = 15, /* no cycles */
+
+ PERF_SAMPLE_BRANCH_MAX_SHIFT /* non-ABI */
+};
+
+enum perf_branch_sample_type {
+ PERF_SAMPLE_BRANCH_USER = 1U << PERF_SAMPLE_BRANCH_USER_SHIFT,
+ PERF_SAMPLE_BRANCH_KERNEL = 1U << PERF_SAMPLE_BRANCH_KERNEL_SHIFT,
+ PERF_SAMPLE_BRANCH_HV = 1U << PERF_SAMPLE_BRANCH_HV_SHIFT,
+
+ PERF_SAMPLE_BRANCH_ANY = 1U << PERF_SAMPLE_BRANCH_ANY_SHIFT,
+ PERF_SAMPLE_BRANCH_ANY_CALL = 1U << PERF_SAMPLE_BRANCH_ANY_CALL_SHIFT,
+ PERF_SAMPLE_BRANCH_ANY_RETURN = 1U << PERF_SAMPLE_BRANCH_ANY_RETURN_SHIFT,
+ PERF_SAMPLE_BRANCH_IND_CALL = 1U << PERF_SAMPLE_BRANCH_IND_CALL_SHIFT,
+ PERF_SAMPLE_BRANCH_ABORT_TX = 1U << PERF_SAMPLE_BRANCH_ABORT_TX_SHIFT,
+ PERF_SAMPLE_BRANCH_IN_TX = 1U << PERF_SAMPLE_BRANCH_IN_TX_SHIFT,
+ PERF_SAMPLE_BRANCH_NO_TX = 1U << PERF_SAMPLE_BRANCH_NO_TX_SHIFT,
+ PERF_SAMPLE_BRANCH_COND = 1U << PERF_SAMPLE_BRANCH_COND_SHIFT,
+
+ PERF_SAMPLE_BRANCH_CALL_STACK = 1U << PERF_SAMPLE_BRANCH_CALL_STACK_SHIFT,
+ PERF_SAMPLE_BRANCH_IND_JUMP = 1U << PERF_SAMPLE_BRANCH_IND_JUMP_SHIFT,
+ PERF_SAMPLE_BRANCH_CALL = 1U << PERF_SAMPLE_BRANCH_CALL_SHIFT,
+
+ PERF_SAMPLE_BRANCH_NO_FLAGS = 1U << PERF_SAMPLE_BRANCH_NO_FLAGS_SHIFT,
+ PERF_SAMPLE_BRANCH_NO_CYCLES = 1U << PERF_SAMPLE_BRANCH_NO_CYCLES_SHIFT,
+
+ PERF_SAMPLE_BRANCH_MAX = 1U << PERF_SAMPLE_BRANCH_MAX_SHIFT,
+};
+
+#define PERF_SAMPLE_BRANCH_PLM_ALL \
+ (PERF_SAMPLE_BRANCH_USER|\
+ PERF_SAMPLE_BRANCH_KERNEL|\
+ PERF_SAMPLE_BRANCH_HV)
+
+/*
+ * Values to determine ABI of the registers dump.
+ */
+enum perf_sample_regs_abi {
+ PERF_SAMPLE_REGS_ABI_NONE = 0,
+ PERF_SAMPLE_REGS_ABI_32 = 1,
+ PERF_SAMPLE_REGS_ABI_64 = 2,
+};
+
+/*
+ * Values for the memory transaction event qualifier, mostly for
+ * abort events. Multiple bits can be set.
+ */
+enum {
+ PERF_TXN_ELISION = (1 << 0), /* From elision */
+ PERF_TXN_TRANSACTION = (1 << 1), /* From transaction */
+ PERF_TXN_SYNC = (1 << 2), /* Instruction is related */
+ PERF_TXN_ASYNC = (1 << 3), /* Instruction not related */
+ PERF_TXN_RETRY = (1 << 4), /* Retry possible */
+ PERF_TXN_CONFLICT = (1 << 5), /* Conflict abort */
+ PERF_TXN_CAPACITY_WRITE = (1 << 6), /* Capacity write abort */
+ PERF_TXN_CAPACITY_READ = (1 << 7), /* Capacity read abort */
+
+ PERF_TXN_MAX = (1 << 8), /* non-ABI */
+
+ /* bits 32..63 are reserved for the abort code */
+
+ PERF_TXN_ABORT_MASK = (0xffffffffULL << 32),
+ PERF_TXN_ABORT_SHIFT = 32,
+};
+
+/*
+ * The format of the data returned by read() on a perf event fd,
+ * as specified by attr.read_format:
+ *
+ * struct read_format {
+ * { u64 value;
+ * { u64 time_enabled; } && PERF_FORMAT_TOTAL_TIME_ENABLED
+ * { u64 time_running; } && PERF_FORMAT_TOTAL_TIME_RUNNING
+ * { u64 id; } && PERF_FORMAT_ID
+ * } && !PERF_FORMAT_GROUP
+ *
+ * { u64 nr;
+ * { u64 time_enabled; } && PERF_FORMAT_TOTAL_TIME_ENABLED
+ * { u64 time_running; } && PERF_FORMAT_TOTAL_TIME_RUNNING
+ * { u64 value;
+ * { u64 id; } && PERF_FORMAT_ID
+ * } cntr[nr];
+ * } && PERF_FORMAT_GROUP
+ * };
+ */
+enum perf_event_read_format {
+ PERF_FORMAT_TOTAL_TIME_ENABLED = 1U << 0,
+ PERF_FORMAT_TOTAL_TIME_RUNNING = 1U << 1,
+ PERF_FORMAT_ID = 1U << 2,
+ PERF_FORMAT_GROUP = 1U << 3,
+
+ PERF_FORMAT_MAX = 1U << 4, /* non-ABI */
+};
+
+#define PERF_ATTR_SIZE_VER0 64 /* sizeof first published struct */
+#define PERF_ATTR_SIZE_VER1 72 /* add: config2 */
+#define PERF_ATTR_SIZE_VER2 80 /* add: branch_sample_type */
+#define PERF_ATTR_SIZE_VER3 96 /* add: sample_regs_user */
+ /* add: sample_stack_user */
+#define PERF_ATTR_SIZE_VER4 104 /* add: sample_regs_intr */
+#define PERF_ATTR_SIZE_VER5 112 /* add: aux_watermark */
+
+/*
+ * Hardware event_id to monitor via a performance monitoring event:
+ */
+struct perf_event_attr {
+
+ /*
+ * Major type: hardware/software/tracepoint/etc.
+ */
+ __u32 type;
+
+ /*
+ * Size of the attr structure, for fwd/bwd compat.
+ */
+ __u32 size;
+
+ /*
+ * Type specific configuration information.
+ */
+ __u64 config;
+
+ union {
+ __u64 sample_period;
+ __u64 sample_freq;
+ };
+
+ __u64 sample_type;
+ __u64 read_format;
+
+ __u64 disabled : 1, /* off by default */
+ inherit : 1, /* children inherit it */
+ pinned : 1, /* must always be on PMU */
+ exclusive : 1, /* only group on PMU */
+ exclude_user : 1, /* don't count user */
+ exclude_kernel : 1, /* ditto kernel */
+ exclude_hv : 1, /* ditto hypervisor */
+ exclude_idle : 1, /* don't count when idle */
+ mmap : 1, /* include mmap data */
+ comm : 1, /* include comm data */
+ freq : 1, /* use freq, not period */
+ inherit_stat : 1, /* per task counts */
+ enable_on_exec : 1, /* next exec enables */
+ task : 1, /* trace fork/exit */
+ watermark : 1, /* wakeup_watermark */
+ /*
+ * precise_ip:
+ *
+ * 0 - SAMPLE_IP can have arbitrary skid
+ * 1 - SAMPLE_IP must have constant skid
+ * 2 - SAMPLE_IP requested to have 0 skid
+ * 3 - SAMPLE_IP must have 0 skid
+ *
+ * See also PERF_RECORD_MISC_EXACT_IP
+ */
+ precise_ip : 2, /* skid constraint */
+ mmap_data : 1, /* non-exec mmap data */
+ sample_id_all : 1, /* sample_type all events */
+
+ exclude_host : 1, /* don't count in host */
+ exclude_guest : 1, /* don't count in guest */
+
+ exclude_callchain_kernel : 1, /* exclude kernel callchains */
+ exclude_callchain_user : 1, /* exclude user callchains */
+ mmap2 : 1, /* include mmap with inode data */
+ comm_exec : 1, /* flag comm events that are due to an exec */
+ use_clockid : 1, /* use @clockid for time fields */
+ context_switch : 1, /* context switch data */
+ write_backward : 1, /* Write ring buffer from end to beginning */
+ __reserved_1 : 36;
+
+ union {
+ __u32 wakeup_events; /* wakeup every n events */
+ __u32 wakeup_watermark; /* bytes before wakeup */
+ };
+
+ __u32 bp_type;
+ union {
+ __u64 bp_addr;
+ __u64 config1; /* extension of config */
+ };
+ union {
+ __u64 bp_len;
+ __u64 config2; /* extension of config1 */
+ };
+ __u64 branch_sample_type; /* enum perf_branch_sample_type */
+
+ /*
+ * Defines set of user regs to dump on samples.
+ * See asm/perf_regs.h for details.
+ */
+ __u64 sample_regs_user;
+
+ /*
+ * Defines size of the user stack to dump on samples.
+ */
+ __u32 sample_stack_user;
+
+ __s32 clockid;
+ /*
+ * Defines set of regs to dump for each sample
+ * state captured on:
+ * - precise = 0: PMU interrupt
+ * - precise > 0: sampled instruction
+ *
+ * See asm/perf_regs.h for details.
+ */
+ __u64 sample_regs_intr;
+
+ /*
+ * Wakeup watermark for AUX area
+ */
+ __u32 aux_watermark;
+ __u32 __reserved_2; /* align to __u64 */
+};
+
+#define perf_flags(attr) (*(&(attr)->read_format + 1))
+
+/*
+ * Ioctls that can be done on a perf event fd:
+ */
+#define PERF_EVENT_IOC_ENABLE _IO ('$', 0)
+#define PERF_EVENT_IOC_DISABLE _IO ('$', 1)
+#define PERF_EVENT_IOC_REFRESH _IO ('$', 2)
+#define PERF_EVENT_IOC_RESET _IO ('$', 3)
+#define PERF_EVENT_IOC_PERIOD _IOW('$', 4, __u64)
+#define PERF_EVENT_IOC_SET_OUTPUT _IO ('$', 5)
+#define PERF_EVENT_IOC_SET_FILTER _IOW('$', 6, char *)
+#define PERF_EVENT_IOC_ID _IOR('$', 7, __u64 *)
+#define PERF_EVENT_IOC_SET_BPF _IOW('$', 8, __u32)
+#define PERF_EVENT_IOC_PAUSE_OUTPUT _IOW('$', 9, __u32)
+
+enum perf_event_ioc_flags {
+ PERF_IOC_FLAG_GROUP = 1U << 0,
+};
+
+/*
+ * Structure of the page that can be mapped via mmap
+ */
+struct perf_event_mmap_page {
+ __u32 version; /* version number of this structure */
+ __u32 compat_version; /* lowest version this is compat with */
+
+ /*
+ * Bits needed to read the hw events in user-space.
+ *
+ * u32 seq, time_mult, time_shift, index, width;
+ * u64 count, enabled, running;
+ * u64 cyc, time_offset;
+ * s64 pmc = 0;
+ *
+ * do {
+ * seq = pc->lock;
+ * barrier()
+ *
+ * enabled = pc->time_enabled;
+ * running = pc->time_running;
+ *
+ * if (pc->cap_usr_time && enabled != running) {
+ * cyc = rdtsc();
+ * time_offset = pc->time_offset;
+ * time_mult = pc->time_mult;
+ * time_shift = pc->time_shift;
+ * }
+ *
+ * index = pc->index;
+ * count = pc->offset;
+ * if (pc->cap_user_rdpmc && index) {
+ * width = pc->pmc_width;
+ * pmc = rdpmc(index - 1);
+ * }
+ *
+ * barrier();
+ * } while (pc->lock != seq);
+ *
+ * NOTE: for obvious reason this only works on self-monitoring
+ * processes.
+ */
+ __u32 lock; /* seqlock for synchronization */
+ __u32 index; /* hardware event identifier */
+ __s64 offset; /* add to hardware event value */
+ __u64 time_enabled; /* time event active */
+ __u64 time_running; /* time event on cpu */
+ union {
+ __u64 capabilities;
+ struct {
+ __u64 cap_bit0 : 1, /* Always 0, deprecated, see commit 860f085b74e9 */
+ cap_bit0_is_deprecated : 1, /* Always 1, signals that bit 0 is zero */
+
+ cap_user_rdpmc : 1, /* The RDPMC instruction can be used to read counts */
+ cap_user_time : 1, /* The time_* fields are used */
+ cap_user_time_zero : 1, /* The time_zero field is used */
+ cap_____res : 59;
+ };
+ };
+
+ /*
+ * If cap_user_rdpmc this field provides the bit-width of the value
+ * read using the rdpmc() or equivalent instruction. This can be used
+ * to sign extend the result like:
+ *
+ * pmc <<= 64 - width;
+ * pmc >>= 64 - width; // signed shift right
+ * count += pmc;
+ */
+ __u16 pmc_width;
+
+ /*
+ * If cap_usr_time the below fields can be used to compute the time
+ * delta since time_enabled (in ns) using rdtsc or similar.
+ *
+ * u64 quot, rem;
+ * u64 delta;
+ *
+ * quot = (cyc >> time_shift);
+ * rem = cyc & (((u64)1 << time_shift) - 1);
+ * delta = time_offset + quot * time_mult +
+ * ((rem * time_mult) >> time_shift);
+ *
+ * Where time_offset,time_mult,time_shift and cyc are read in the
+ * seqcount loop described above. This delta can then be added to
+ * enabled and possible running (if index), improving the scaling:
+ *
+ * enabled += delta;
+ * if (index)
+ * running += delta;
+ *
+ * quot = count / running;
+ * rem = count % running;
+ * count = quot * enabled + (rem * enabled) / running;
+ */
+ __u16 time_shift;
+ __u32 time_mult;
+ __u64 time_offset;
+ /*
+ * If cap_usr_time_zero, the hardware clock (e.g. TSC) can be calculated
+ * from sample timestamps.
+ *
+ * time = timestamp - time_zero;
+ * quot = time / time_mult;
+ * rem = time % time_mult;
+ * cyc = (quot << time_shift) + (rem << time_shift) / time_mult;
+ *
+ * And vice versa:
+ *
+ * quot = cyc >> time_shift;
+ * rem = cyc & (((u64)1 << time_shift) - 1);
+ * timestamp = time_zero + quot * time_mult +
+ * ((rem * time_mult) >> time_shift);
+ */
+ __u64 time_zero;
+ __u32 size; /* Header size up to __reserved[] fields. */
+
+ /*
+ * Hole for extension of the self monitor capabilities
+ */
+
+ __u8 __reserved[118*8+4]; /* align to 1k. */
+
+ /*
+ * Control data for the mmap() data buffer.
+ *
+ * User-space reading the @data_head value should issue an smp_rmb(),
+ * after reading this value.
+ *
+ * When the mapping is PROT_WRITE the @data_tail value should be
+ * written by userspace to reflect the last read data, after issueing
+ * an smp_mb() to separate the data read from the ->data_tail store.
+ * In this case the kernel will not over-write unread data.
+ *
+ * See perf_output_put_handle() for the data ordering.
+ *
+ * data_{offset,size} indicate the location and size of the perf record
+ * buffer within the mmapped area.
+ */
+ __u64 data_head; /* head in the data section */
+ __u64 data_tail; /* user-space written tail */
+ __u64 data_offset; /* where the buffer starts */
+ __u64 data_size; /* data buffer size */
+
+ /*
+ * AUX area is defined by aux_{offset,size} fields that should be set
+ * by the userspace, so that
+ *
+ * aux_offset >= data_offset + data_size
+ *
+ * prior to mmap()ing it. Size of the mmap()ed area should be aux_size.
+ *
+ * Ring buffer pointers aux_{head,tail} have the same semantics as
+ * data_{head,tail} and same ordering rules apply.
+ */
+ __u64 aux_head;
+ __u64 aux_tail;
+ __u64 aux_offset;
+ __u64 aux_size;
+};
+
+#define PERF_RECORD_MISC_CPUMODE_MASK (7 << 0)
+#define PERF_RECORD_MISC_CPUMODE_UNKNOWN (0 << 0)
+#define PERF_RECORD_MISC_KERNEL (1 << 0)
+#define PERF_RECORD_MISC_USER (2 << 0)
+#define PERF_RECORD_MISC_HYPERVISOR (3 << 0)
+#define PERF_RECORD_MISC_GUEST_KERNEL (4 << 0)
+#define PERF_RECORD_MISC_GUEST_USER (5 << 0)
+
+/*
+ * Indicates that /proc/PID/maps parsing are truncated by time out.
+ */
+#define PERF_RECORD_MISC_PROC_MAP_PARSE_TIMEOUT (1 << 12)
+/*
+ * PERF_RECORD_MISC_MMAP_DATA and PERF_RECORD_MISC_COMM_EXEC are used on
+ * different events so can reuse the same bit position.
+ * Ditto PERF_RECORD_MISC_SWITCH_OUT.
+ */
+#define PERF_RECORD_MISC_MMAP_DATA (1 << 13)
+#define PERF_RECORD_MISC_COMM_EXEC (1 << 13)
+#define PERF_RECORD_MISC_SWITCH_OUT (1 << 13)
+/*
+ * Indicates that the content of PERF_SAMPLE_IP points to
+ * the actual instruction that triggered the event. See also
+ * perf_event_attr::precise_ip.
+ */
+#define PERF_RECORD_MISC_EXACT_IP (1 << 14)
+/*
+ * Reserve the last bit to indicate some extended misc field
+ */
+#define PERF_RECORD_MISC_EXT_RESERVED (1 << 15)
+
+struct perf_event_header {
+ __u32 type;
+ __u16 misc;
+ __u16 size;
+};
+
+enum perf_event_type {
+
+ /*
+ * If perf_event_attr.sample_id_all is set then all event types will
+ * have the sample_type selected fields related to where/when
+ * (identity) an event took place (TID, TIME, ID, STREAM_ID, CPU,
+ * IDENTIFIER) described in PERF_RECORD_SAMPLE below, it will be stashed
+ * just after the perf_event_header and the fields already present for
+ * the existing fields, i.e. at the end of the payload. That way a newer
+ * perf.data file will be supported by older perf tools, with these new
+ * optional fields being ignored.
+ *
+ * struct sample_id {
+ * { u32 pid, tid; } && PERF_SAMPLE_TID
+ * { u64 time; } && PERF_SAMPLE_TIME
+ * { u64 id; } && PERF_SAMPLE_ID
+ * { u64 stream_id;} && PERF_SAMPLE_STREAM_ID
+ * { u32 cpu, res; } && PERF_SAMPLE_CPU
+ * { u64 id; } && PERF_SAMPLE_IDENTIFIER
+ * } && perf_event_attr::sample_id_all
+ *
+ * Note that PERF_SAMPLE_IDENTIFIER duplicates PERF_SAMPLE_ID. The
+ * advantage of PERF_SAMPLE_IDENTIFIER is that its position is fixed
+ * relative to header.size.
+ */
+
+ /*
+ * The MMAP events record the PROT_EXEC mappings so that we can
+ * correlate userspace IPs to code. They have the following structure:
+ *
+ * struct {
+ * struct perf_event_header header;
+ *
+ * u32 pid, tid;
+ * u64 addr;
+ * u64 len;
+ * u64 pgoff;
+ * char filename[];
+ * struct sample_id sample_id;
+ * };
+ */
+ PERF_RECORD_MMAP = 1,
+
+ /*
+ * struct {
+ * struct perf_event_header header;
+ * u64 id;
+ * u64 lost;
+ * struct sample_id sample_id;
+ * };
+ */
+ PERF_RECORD_LOST = 2,
+
+ /*
+ * struct {
+ * struct perf_event_header header;
+ *
+ * u32 pid, tid;
+ * char comm[];
+ * struct sample_id sample_id;
+ * };
+ */
+ PERF_RECORD_COMM = 3,
+
+ /*
+ * struct {
+ * struct perf_event_header header;
+ * u32 pid, ppid;
+ * u32 tid, ptid;
+ * u64 time;
+ * struct sample_id sample_id;
+ * };
+ */
+ PERF_RECORD_EXIT = 4,
+
+ /*
+ * struct {
+ * struct perf_event_header header;
+ * u64 time;
+ * u64 id;
+ * u64 stream_id;
+ * struct sample_id sample_id;
+ * };
+ */
+ PERF_RECORD_THROTTLE = 5,
+ PERF_RECORD_UNTHROTTLE = 6,
+
+ /*
+ * struct {
+ * struct perf_event_header header;
+ * u32 pid, ppid;
+ * u32 tid, ptid;
+ * u64 time;
+ * struct sample_id sample_id;
+ * };
+ */
+ PERF_RECORD_FORK = 7,
+
+ /*
+ * struct {
+ * struct perf_event_header header;
+ * u32 pid, tid;
+ *
+ * struct read_format values;
+ * struct sample_id sample_id;
+ * };
+ */
+ PERF_RECORD_READ = 8,
+
+ /*
+ * struct {
+ * struct perf_event_header header;
+ *
+ * #
+ * # Note that PERF_SAMPLE_IDENTIFIER duplicates PERF_SAMPLE_ID.
+ * # The advantage of PERF_SAMPLE_IDENTIFIER is that its position
+ * # is fixed relative to header.
+ * #
+ *
+ * { u64 id; } && PERF_SAMPLE_IDENTIFIER
+ * { u64 ip; } && PERF_SAMPLE_IP
+ * { u32 pid, tid; } && PERF_SAMPLE_TID
+ * { u64 time; } && PERF_SAMPLE_TIME
+ * { u64 addr; } && PERF_SAMPLE_ADDR
+ * { u64 id; } && PERF_SAMPLE_ID
+ * { u64 stream_id;} && PERF_SAMPLE_STREAM_ID
+ * { u32 cpu, res; } && PERF_SAMPLE_CPU
+ * { u64 period; } && PERF_SAMPLE_PERIOD
+ *
+ * { struct read_format values; } && PERF_SAMPLE_READ
+ *
+ * { u64 nr,
+ * u64 ips[nr]; } && PERF_SAMPLE_CALLCHAIN
+ *
+ * #
+ * # The RAW record below is opaque data wrt the ABI
+ * #
+ * # That is, the ABI doesn't make any promises wrt to
+ * # the stability of its content, it may vary depending
+ * # on event, hardware, kernel version and phase of
+ * # the moon.
+ * #
+ * # In other words, PERF_SAMPLE_RAW contents are not an ABI.
+ * #
+ *
+ * { u32 size;
+ * char data[size];}&& PERF_SAMPLE_RAW
+ *
+ * { u64 nr;
+ * { u64 from, to, flags } lbr[nr];} && PERF_SAMPLE_BRANCH_STACK
+ *
+ * { u64 abi; # enum perf_sample_regs_abi
+ * u64 regs[weight(mask)]; } && PERF_SAMPLE_REGS_USER
+ *
+ * { u64 size;
+ * char data[size];
+ * u64 dyn_size; } && PERF_SAMPLE_STACK_USER
+ *
+ * { u64 weight; } && PERF_SAMPLE_WEIGHT
+ * { u64 data_src; } && PERF_SAMPLE_DATA_SRC
+ * { u64 transaction; } && PERF_SAMPLE_TRANSACTION
+ * { u64 abi; # enum perf_sample_regs_abi
+ * u64 regs[weight(mask)]; } && PERF_SAMPLE_REGS_INTR
+ * };
+ */
+ PERF_RECORD_SAMPLE = 9,
+
+ /*
+ * The MMAP2 records are an augmented version of MMAP, they add
+ * maj, min, ino numbers to be used to uniquely identify each mapping
+ *
+ * struct {
+ * struct perf_event_header header;
+ *
+ * u32 pid, tid;
+ * u64 addr;
+ * u64 len;
+ * u64 pgoff;
+ * u32 maj;
+ * u32 min;
+ * u64 ino;
+ * u64 ino_generation;
+ * u32 prot, flags;
+ * char filename[];
+ * struct sample_id sample_id;
+ * };
+ */
+ PERF_RECORD_MMAP2 = 10,
+
+ /*
+ * Records that new data landed in the AUX buffer part.
+ *
+ * struct {
+ * struct perf_event_header header;
+ *
+ * u64 aux_offset;
+ * u64 aux_size;
+ * u64 flags;
+ * struct sample_id sample_id;
+ * };
+ */
+ PERF_RECORD_AUX = 11,
+
+ /*
+ * Indicates that instruction trace has started
+ *
+ * struct {
+ * struct perf_event_header header;
+ * u32 pid;
+ * u32 tid;
+ * };
+ */
+ PERF_RECORD_ITRACE_START = 12,
+
+ /*
+ * Records the dropped/lost sample number.
+ *
+ * struct {
+ * struct perf_event_header header;
+ *
+ * u64 lost;
+ * struct sample_id sample_id;
+ * };
+ */
+ PERF_RECORD_LOST_SAMPLES = 13,
+
+ /*
+ * Records a context switch in or out (flagged by
+ * PERF_RECORD_MISC_SWITCH_OUT). See also
+ * PERF_RECORD_SWITCH_CPU_WIDE.
+ *
+ * struct {
+ * struct perf_event_header header;
+ * struct sample_id sample_id;
+ * };
+ */
+ PERF_RECORD_SWITCH = 14,
+
+ /*
+ * CPU-wide version of PERF_RECORD_SWITCH with next_prev_pid and
+ * next_prev_tid that are the next (switching out) or previous
+ * (switching in) pid/tid.
+ *
+ * struct {
+ * struct perf_event_header header;
+ * u32 next_prev_pid;
+ * u32 next_prev_tid;
+ * struct sample_id sample_id;
+ * };
+ */
+ PERF_RECORD_SWITCH_CPU_WIDE = 15,
+
+ PERF_RECORD_MAX, /* non-ABI */
+};
+
+#define PERF_MAX_STACK_DEPTH 127
+#define PERF_MAX_CONTEXTS_PER_STACK 8
+
+enum perf_callchain_context {
+ PERF_CONTEXT_HV = (__u64)-32,
+ PERF_CONTEXT_KERNEL = (__u64)-128,
+ PERF_CONTEXT_USER = (__u64)-512,
+
+ PERF_CONTEXT_GUEST = (__u64)-2048,
+ PERF_CONTEXT_GUEST_KERNEL = (__u64)-2176,
+ PERF_CONTEXT_GUEST_USER = (__u64)-2560,
+
+ PERF_CONTEXT_MAX = (__u64)-4095,
+};
+
+/**
+ * PERF_RECORD_AUX::flags bits
+ */
+#define PERF_AUX_FLAG_TRUNCATED 0x01 /* record was truncated to fit */
+#define PERF_AUX_FLAG_OVERWRITE 0x02 /* snapshot from overwrite mode */
+
+#define PERF_FLAG_FD_NO_GROUP (1UL << 0)
+#define PERF_FLAG_FD_OUTPUT (1UL << 1)
+#define PERF_FLAG_PID_CGROUP (1UL << 2) /* pid=cgroup id, per-cpu mode only */
+#define PERF_FLAG_FD_CLOEXEC (1UL << 3) /* O_CLOEXEC */
+
+union perf_mem_data_src {
+ __u64 val;
+ struct {
+ __u64 mem_op:5, /* type of opcode */
+ mem_lvl:14, /* memory hierarchy level */
+ mem_snoop:5, /* snoop mode */
+ mem_lock:2, /* lock instr */
+ mem_dtlb:7, /* tlb access */
+ mem_rsvd:31;
+ };
+};
+
+/* type of opcode (load/store/prefetch,code) */
+#define PERF_MEM_OP_NA 0x01 /* not available */
+#define PERF_MEM_OP_LOAD 0x02 /* load instruction */
+#define PERF_MEM_OP_STORE 0x04 /* store instruction */
+#define PERF_MEM_OP_PFETCH 0x08 /* prefetch */
+#define PERF_MEM_OP_EXEC 0x10 /* code (execution) */
+#define PERF_MEM_OP_SHIFT 0
+
+/* memory hierarchy (memory level, hit or miss) */
+#define PERF_MEM_LVL_NA 0x01 /* not available */
+#define PERF_MEM_LVL_HIT 0x02 /* hit level */
+#define PERF_MEM_LVL_MISS 0x04 /* miss level */
+#define PERF_MEM_LVL_L1 0x08 /* L1 */
+#define PERF_MEM_LVL_LFB 0x10 /* Line Fill Buffer */
+#define PERF_MEM_LVL_L2 0x20 /* L2 */
+#define PERF_MEM_LVL_L3 0x40 /* L3 */
+#define PERF_MEM_LVL_LOC_RAM 0x80 /* Local DRAM */
+#define PERF_MEM_LVL_REM_RAM1 0x100 /* Remote DRAM (1 hop) */
+#define PERF_MEM_LVL_REM_RAM2 0x200 /* Remote DRAM (2 hops) */
+#define PERF_MEM_LVL_REM_CCE1 0x400 /* Remote Cache (1 hop) */
+#define PERF_MEM_LVL_REM_CCE2 0x800 /* Remote Cache (2 hops) */
+#define PERF_MEM_LVL_IO 0x1000 /* I/O memory */
+#define PERF_MEM_LVL_UNC 0x2000 /* Uncached memory */
+#define PERF_MEM_LVL_SHIFT 5
+
+/* snoop mode */
+#define PERF_MEM_SNOOP_NA 0x01 /* not available */
+#define PERF_MEM_SNOOP_NONE 0x02 /* no snoop */
+#define PERF_MEM_SNOOP_HIT 0x04 /* snoop hit */
+#define PERF_MEM_SNOOP_MISS 0x08 /* snoop miss */
+#define PERF_MEM_SNOOP_HITM 0x10 /* snoop hit modified */
+#define PERF_MEM_SNOOP_SHIFT 19
+
+/* locked instruction */
+#define PERF_MEM_LOCK_NA 0x01 /* not available */
+#define PERF_MEM_LOCK_LOCKED 0x02 /* locked transaction */
+#define PERF_MEM_LOCK_SHIFT 24
+
+/* TLB access */
+#define PERF_MEM_TLB_NA 0x01 /* not available */
+#define PERF_MEM_TLB_HIT 0x02 /* hit level */
+#define PERF_MEM_TLB_MISS 0x04 /* miss level */
+#define PERF_MEM_TLB_L1 0x08 /* L1 */
+#define PERF_MEM_TLB_L2 0x10 /* L2 */
+#define PERF_MEM_TLB_WK 0x20 /* Hardware Walker*/
+#define PERF_MEM_TLB_OS 0x40 /* OS fault handler */
+#define PERF_MEM_TLB_SHIFT 26
+
+#define PERF_MEM_S(a, s) \
+ (((__u64)PERF_MEM_##a##_##s) << PERF_MEM_##a##_SHIFT)
+
+/*
+ * single taken branch record layout:
+ *
+ * from: source instruction (may not always be a branch insn)
+ * to: branch target
+ * mispred: branch target was mispredicted
+ * predicted: branch target was predicted
+ *
+ * support for mispred, predicted is optional. In case it
+ * is not supported mispred = predicted = 0.
+ *
+ * in_tx: running in a hardware transaction
+ * abort: aborting a hardware transaction
+ * cycles: cycles from last branch (or 0 if not supported)
+ */
+struct perf_branch_entry {
+ __u64 from;
+ __u64 to;
+ __u64 mispred:1, /* target mispredicted */
+ predicted:1,/* target predicted */
+ in_tx:1, /* in transaction */
+ abort:1, /* transaction abort */
+ cycles:16, /* cycle count to last branch */
+ reserved:44;
+};
+
+#endif /* _UAPI_LINUX_PERF_EVENT_H */
--- /dev/null
+/*
+ * SPDX-License-Identifier: LGPL-2.1-or-later
+ *
+ * Copyright 2011 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ * Copyright 2011 Lai Jiangshan <laijs@cn.fujitsu.com>
+ *
+ * Internal header for Lock-Free RCU Hash Table
+ */
+
+#ifndef _LTTNG_UST_RCULFHASH_INTERNAL_H
+#define _LTTNG_UST_RCULFHASH_INTERNAL_H
+
+#include "rculfhash.h"
+#include <stdio.h>
+#include <stdlib.h>
+#include <assert.h>
+
+#ifdef DEBUG
+#define dbg_printf(fmt, args...) printf("[debug lttng-ust rculfhash] " fmt, ## args)
+#else
+#define dbg_printf(fmt, args...) \
+do { \
+ /* do nothing but check printf format */ \
+ if (0) \
+ printf("[debug lttng-ust rculfhash] " fmt, ## args); \
+} while (0)
+#endif
+
+#if (CAA_BITS_PER_LONG == 32)
+#define MAX_TABLE_ORDER 32
+#else
+#define MAX_TABLE_ORDER 64
+#endif
+
+#define MAX_CHUNK_TABLE (1UL << 10)
+
+#ifndef min
+#define min(a, b) ((a) < (b) ? (a) : (b))
+#endif
+
+#ifndef max
+#define max(a, b) ((a) > (b) ? (a) : (b))
+#endif
+
+/*
+ * lttng_ust_lfht: Top-level data structure representing a lock-free hash
+ * table. Defined in the implementation file to make it be an opaque
+ * cookie to users.
+ *
+ * The fields used in fast-paths are placed near the end of the
+ * structure, because we need to have a variable-sized union to contain
+ * the mm plugin fields, which are used in the fast path.
+ */
+struct lttng_ust_lfht {
+ /* Initial configuration items */
+ unsigned long max_nr_buckets;
+ const struct lttng_ust_lfht_mm_type *mm; /* memory management plugin */
+ const struct rcu_flavor_struct *flavor; /* RCU flavor */
+
+ /*
+ * We need to put the work threads offline (QSBR) when taking this
+ * mutex, because we use synchronize_rcu within this mutex critical
+ * section, which waits on read-side critical sections, and could
+ * therefore cause grace-period deadlock if we hold off RCU G.P.
+ * completion.
+ */
+ pthread_mutex_t resize_mutex; /* resize mutex: add/del mutex */
+ unsigned int in_progress_destroy;
+ unsigned long resize_target;
+ int resize_initiated;
+
+ /*
+ * Variables needed for add and remove fast-paths.
+ */
+ int flags;
+ unsigned long min_alloc_buckets_order;
+ unsigned long min_nr_alloc_buckets;
+
+ /*
+ * Variables needed for the lookup, add and remove fast-paths.
+ */
+ unsigned long size; /* always a power of 2, shared (RCU) */
+ /*
+ * bucket_at pointer is kept here to skip the extra level of
+ * dereference needed to get to "mm" (this is a fast-path).
+ */
+ struct lttng_ust_lfht_node *(*bucket_at)(struct lttng_ust_lfht *ht,
+ unsigned long index);
+ /*
+ * Dynamic length "tbl_chunk" needs to be at the end of
+ * lttng_ust_lfht.
+ */
+ union {
+ /*
+ * Contains the per order-index-level bucket node table.
+ * The size of each bucket node table is half the number
+ * of hashes contained in this order (except for order 0).
+ * The minimum allocation buckets size parameter allows
+ * combining the bucket node arrays of the lowermost
+ * levels to improve cache locality for small index orders.
+ */
+ struct lttng_ust_lfht_node *tbl_order[MAX_TABLE_ORDER];
+
+ /*
+ * Contains the bucket node chunks. The size of each
+ * bucket node chunk is ->min_alloc_size (we avoid to
+ * allocate chunks with different size). Chunks improve
+ * cache locality for small index orders, and are more
+ * friendly with environments where allocation of large
+ * contiguous memory areas is challenging due to memory
+ * fragmentation concerns or inability to use virtual
+ * memory addressing.
+ */
+ struct lttng_ust_lfht_node *tbl_chunk[0];
+
+ /*
+ * Memory mapping with room for all possible buckets.
+ * Their memory is allocated when needed.
+ */
+ struct lttng_ust_lfht_node *tbl_mmap;
+ };
+ /*
+ * End of variables needed for the lookup, add and remove
+ * fast-paths.
+ */
+};
+
+extern unsigned int lttng_ust_lfht_fls_ulong(unsigned long x)
+ __attribute__((visibility("hidden")));
+
+extern int lttng_ust_lfht_get_count_order_u32(uint32_t x)
+ __attribute__((visibility("hidden")));
+
+extern int lttng_ust_lfht_get_count_order_ulong(unsigned long x)
+ __attribute__((visibility("hidden")));
+
+#ifdef POISON_FREE
+#define poison_free(ptr) \
+ do { \
+ if (ptr) { \
+ memset(ptr, 0x42, sizeof(*(ptr))); \
+ free(ptr); \
+ } \
+ } while (0)
+#else
+#define poison_free(ptr) free(ptr)
+#endif
+
+static inline
+struct lttng_ust_lfht *__default_alloc_lttng_ust_lfht(
+ const struct lttng_ust_lfht_mm_type *mm,
+ unsigned long lttng_ust_lfht_size,
+ unsigned long min_nr_alloc_buckets,
+ unsigned long max_nr_buckets)
+{
+ struct lttng_ust_lfht *ht;
+
+ ht = calloc(1, lttng_ust_lfht_size);
+ assert(ht);
+
+ ht->mm = mm;
+ ht->bucket_at = mm->bucket_at;
+ ht->min_nr_alloc_buckets = min_nr_alloc_buckets;
+ ht->min_alloc_buckets_order =
+ lttng_ust_lfht_get_count_order_ulong(min_nr_alloc_buckets);
+ ht->max_nr_buckets = max_nr_buckets;
+
+ return ht;
+}
+
+#endif /* _LTTNG_UST_RCULFHASH_INTERNAL_H */
--- /dev/null
+/*
+ * SPDX-License-Identifier: LGPL-2.1-or-later
+ *
+ * Copyright 2011 Lai Jiangshan <laijs@cn.fujitsu.com>
+ *
+ * Chunk based memory management for Lock-Free RCU Hash Table
+ */
+
+#include <stddef.h>
+#include "rculfhash-internal.h"
+
+static
+void lttng_ust_lfht_alloc_bucket_table(struct lttng_ust_lfht *ht, unsigned long order)
+{
+ if (order == 0) {
+ ht->tbl_chunk[0] = calloc(ht->min_nr_alloc_buckets,
+ sizeof(struct lttng_ust_lfht_node));
+ assert(ht->tbl_chunk[0]);
+ } else if (order > ht->min_alloc_buckets_order) {
+ unsigned long i, len = 1UL << (order - 1 - ht->min_alloc_buckets_order);
+
+ for (i = len; i < 2 * len; i++) {
+ ht->tbl_chunk[i] = calloc(ht->min_nr_alloc_buckets,
+ sizeof(struct lttng_ust_lfht_node));
+ assert(ht->tbl_chunk[i]);
+ }
+ }
+ /* Nothing to do for 0 < order && order <= ht->min_alloc_buckets_order */
+}
+
+/*
+ * lttng_ust_lfht_free_bucket_table() should be called with decreasing order.
+ * When lttng_ust_lfht_free_bucket_table(0) is called, it means the whole
+ * lfht is destroyed.
+ */
+static
+void lttng_ust_lfht_free_bucket_table(struct lttng_ust_lfht *ht, unsigned long order)
+{
+ if (order == 0)
+ poison_free(ht->tbl_chunk[0]);
+ else if (order > ht->min_alloc_buckets_order) {
+ unsigned long i, len = 1UL << (order - 1 - ht->min_alloc_buckets_order);
+
+ for (i = len; i < 2 * len; i++)
+ poison_free(ht->tbl_chunk[i]);
+ }
+ /* Nothing to do for 0 < order && order <= ht->min_alloc_buckets_order */
+}
+
+static
+struct lttng_ust_lfht_node *bucket_at(struct lttng_ust_lfht *ht, unsigned long index)
+{
+ unsigned long chunk, offset;
+
+ chunk = index >> ht->min_alloc_buckets_order;
+ offset = index & (ht->min_nr_alloc_buckets - 1);
+ return &ht->tbl_chunk[chunk][offset];
+}
+
+static
+struct lttng_ust_lfht *alloc_lttng_ust_lfht(unsigned long min_nr_alloc_buckets,
+ unsigned long max_nr_buckets)
+{
+ unsigned long nr_chunks, lttng_ust_lfht_size;
+
+ min_nr_alloc_buckets = max(min_nr_alloc_buckets,
+ max_nr_buckets / MAX_CHUNK_TABLE);
+ nr_chunks = max_nr_buckets / min_nr_alloc_buckets;
+ lttng_ust_lfht_size = offsetof(struct lttng_ust_lfht, tbl_chunk) +
+ sizeof(struct lttng_ust_lfht_node *) * nr_chunks;
+ lttng_ust_lfht_size = max(lttng_ust_lfht_size, sizeof(struct lttng_ust_lfht));
+
+ return __default_alloc_lttng_ust_lfht(
+ <tng_ust_lfht_mm_chunk, lttng_ust_lfht_size,
+ min_nr_alloc_buckets, max_nr_buckets);
+}
+
+const struct lttng_ust_lfht_mm_type lttng_ust_lfht_mm_chunk = {
+ .alloc_lttng_ust_lfht = alloc_lttng_ust_lfht,
+ .alloc_bucket_table = lttng_ust_lfht_alloc_bucket_table,
+ .free_bucket_table = lttng_ust_lfht_free_bucket_table,
+ .bucket_at = bucket_at,
+};
--- /dev/null
+/*
+ * SPDX-License-Identifier: LGPL-2.1-or-later
+ *
+ * Copyright 2011 Lai Jiangshan <laijs@cn.fujitsu.com>
+ *
+ * mmap/reservation based memory management for Lock-Free RCU Hash Table
+ */
+
+#include <unistd.h>
+#include <stdio.h>
+#include <errno.h>
+#include <stdlib.h>
+#include <sys/mman.h>
+#include "rculfhash-internal.h"
+
+#ifndef MAP_ANONYMOUS
+#define MAP_ANONYMOUS MAP_ANON
+#endif
+
+/*
+ * The allocation scheme used by the mmap based RCU hash table is to make a
+ * large unaccessible mapping to reserve memory without allocating it.
+ * Then smaller chunks are allocated by overlapping read/write mappings which
+ * do allocate memory. Deallocation is done by an overlapping unaccessible
+ * mapping.
+ *
+ * This scheme was tested on Linux, macOS and Solaris. However, on Cygwin the
+ * mmap wrapper is based on the Windows NtMapViewOfSection API which doesn't
+ * support overlapping mappings.
+ *
+ * An alternative to the overlapping mappings is to use mprotect to change the
+ * protection on chunks of the large mapping, read/write to allocate and none
+ * to deallocate. This works perfecty on Cygwin and Solaris but on Linux a
+ * call to madvise is also required to deallocate and it just doesn't work on
+ * macOS.
+ *
+ * For this reason, we keep to original scheme on all platforms except Cygwin.
+ */
+
+
+/* Reserve inaccessible memory space without allocating it */
+static
+void *memory_map(size_t length)
+{
+ void *ret;
+
+ ret = mmap(NULL, length, PROT_NONE, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
+ if (ret == MAP_FAILED) {
+ perror("mmap");
+ abort();
+ }
+ return ret;
+}
+
+static
+void memory_unmap(void *ptr, size_t length)
+{
+ if (munmap(ptr, length)) {
+ perror("munmap");
+ abort();
+ }
+}
+
+#ifdef __CYGWIN__
+/* Set protection to read/write to allocate a memory chunk */
+static
+void memory_populate(void *ptr, size_t length)
+{
+ if (mprotect(ptr, length, PROT_READ | PROT_WRITE)) {
+ perror("mprotect");
+ abort();
+ }
+}
+
+/* Set protection to none to deallocate a memory chunk */
+static
+void memory_discard(void *ptr, size_t length)
+{
+ if (mprotect(ptr, length, PROT_NONE)) {
+ perror("mprotect");
+ abort();
+ }
+}
+
+#else /* __CYGWIN__ */
+
+static
+void memory_populate(void *ptr, size_t length)
+{
+ if (mmap(ptr, length, PROT_READ | PROT_WRITE,
+ MAP_FIXED | MAP_PRIVATE | MAP_ANONYMOUS,
+ -1, 0) != ptr) {
+ perror("mmap");
+ abort();
+ }
+}
+
+/*
+ * Discard garbage memory and avoid system save it when try to swap it out.
+ * Make it still reserved, inaccessible.
+ */
+static
+void memory_discard(void *ptr, size_t length)
+{
+ if (mmap(ptr, length, PROT_NONE,
+ MAP_FIXED | MAP_PRIVATE | MAP_ANONYMOUS,
+ -1, 0) != ptr) {
+ perror("mmap");
+ abort();
+ }
+}
+#endif /* __CYGWIN__ */
+
+static
+void lttng_ust_lfht_alloc_bucket_table(struct lttng_ust_lfht *ht, unsigned long order)
+{
+ if (order == 0) {
+ if (ht->min_nr_alloc_buckets == ht->max_nr_buckets) {
+ /* small table */
+ ht->tbl_mmap = calloc(ht->max_nr_buckets,
+ sizeof(*ht->tbl_mmap));
+ assert(ht->tbl_mmap);
+ return;
+ }
+ /* large table */
+ ht->tbl_mmap = memory_map(ht->max_nr_buckets
+ * sizeof(*ht->tbl_mmap));
+ memory_populate(ht->tbl_mmap,
+ ht->min_nr_alloc_buckets * sizeof(*ht->tbl_mmap));
+ } else if (order > ht->min_alloc_buckets_order) {
+ /* large table */
+ unsigned long len = 1UL << (order - 1);
+
+ assert(ht->min_nr_alloc_buckets < ht->max_nr_buckets);
+ memory_populate(ht->tbl_mmap + len,
+ len * sizeof(*ht->tbl_mmap));
+ }
+ /* Nothing to do for 0 < order && order <= ht->min_alloc_buckets_order */
+}
+
+/*
+ * lttng_ust_lfht_free_bucket_table() should be called with decreasing order.
+ * When lttng_ust_lfht_free_bucket_table(0) is called, it means the whole
+ * lfht is destroyed.
+ */
+static
+void lttng_ust_lfht_free_bucket_table(struct lttng_ust_lfht *ht, unsigned long order)
+{
+ if (order == 0) {
+ if (ht->min_nr_alloc_buckets == ht->max_nr_buckets) {
+ /* small table */
+ poison_free(ht->tbl_mmap);
+ return;
+ }
+ /* large table */
+ memory_unmap(ht->tbl_mmap,
+ ht->max_nr_buckets * sizeof(*ht->tbl_mmap));
+ } else if (order > ht->min_alloc_buckets_order) {
+ /* large table */
+ unsigned long len = 1UL << (order - 1);
+
+ assert(ht->min_nr_alloc_buckets < ht->max_nr_buckets);
+ memory_discard(ht->tbl_mmap + len, len * sizeof(*ht->tbl_mmap));
+ }
+ /* Nothing to do for 0 < order && order <= ht->min_alloc_buckets_order */
+}
+
+static
+struct lttng_ust_lfht_node *bucket_at(struct lttng_ust_lfht *ht, unsigned long index)
+{
+ return &ht->tbl_mmap[index];
+}
+
+static
+struct lttng_ust_lfht *alloc_lttng_ust_lfht(unsigned long min_nr_alloc_buckets,
+ unsigned long max_nr_buckets)
+{
+ unsigned long page_bucket_size;
+
+ page_bucket_size = getpagesize() / sizeof(struct lttng_ust_lfht_node);
+ if (max_nr_buckets <= page_bucket_size) {
+ /* small table */
+ min_nr_alloc_buckets = max_nr_buckets;
+ } else {
+ /* large table */
+ min_nr_alloc_buckets = max(min_nr_alloc_buckets,
+ page_bucket_size);
+ }
+
+ return __default_alloc_lttng_ust_lfht(
+ <tng_ust_lfht_mm_mmap, sizeof(struct lttng_ust_lfht),
+ min_nr_alloc_buckets, max_nr_buckets);
+}
+
+const struct lttng_ust_lfht_mm_type lttng_ust_lfht_mm_mmap = {
+ .alloc_lttng_ust_lfht = alloc_lttng_ust_lfht,
+ .alloc_bucket_table = lttng_ust_lfht_alloc_bucket_table,
+ .free_bucket_table = lttng_ust_lfht_free_bucket_table,
+ .bucket_at = bucket_at,
+};
--- /dev/null
+/*
+ * SPDX-License-Identifier: LGPL-2.1-or-later
+ *
+ * Copyright 2011 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ * Copyright 2011 Lai Jiangshan <laijs@cn.fujitsu.com>
+ *
+ * Order based memory management for Lock-Free RCU Hash Table
+ */
+
+#include <rculfhash-internal.h>
+
+static
+void lttng_ust_lfht_alloc_bucket_table(struct lttng_ust_lfht *ht, unsigned long order)
+{
+ if (order == 0) {
+ ht->tbl_order[0] = calloc(ht->min_nr_alloc_buckets,
+ sizeof(struct lttng_ust_lfht_node));
+ assert(ht->tbl_order[0]);
+ } else if (order > ht->min_alloc_buckets_order) {
+ ht->tbl_order[order] = calloc(1UL << (order -1),
+ sizeof(struct lttng_ust_lfht_node));
+ assert(ht->tbl_order[order]);
+ }
+ /* Nothing to do for 0 < order && order <= ht->min_alloc_buckets_order */
+}
+
+/*
+ * lttng_ust_lfht_free_bucket_table() should be called with decreasing order.
+ * When lttng_ust_lfht_free_bucket_table(0) is called, it means the whole
+ * lfht is destroyed.
+ */
+static
+void lttng_ust_lfht_free_bucket_table(struct lttng_ust_lfht *ht, unsigned long order)
+{
+ if (order == 0)
+ poison_free(ht->tbl_order[0]);
+ else if (order > ht->min_alloc_buckets_order)
+ poison_free(ht->tbl_order[order]);
+ /* Nothing to do for 0 < order && order <= ht->min_alloc_buckets_order */
+}
+
+static
+struct lttng_ust_lfht_node *bucket_at(struct lttng_ust_lfht *ht, unsigned long index)
+{
+ unsigned long order;
+
+ if (index < ht->min_nr_alloc_buckets) {
+ dbg_printf("bucket index %lu order 0 aridx 0\n", index);
+ return &ht->tbl_order[0][index];
+ }
+ /*
+ * equivalent to lttng_ust_lfht_get_count_order_ulong(index + 1), but
+ * optimizes away the non-existing 0 special-case for
+ * lttng_ust_lfht_get_count_order_ulong.
+ */
+ order = lttng_ust_lfht_fls_ulong(index);
+ dbg_printf("bucket index %lu order %lu aridx %lu\n",
+ index, order, index & ((1UL << (order - 1)) - 1));
+ return &ht->tbl_order[order][index & ((1UL << (order - 1)) - 1)];
+}
+
+static
+struct lttng_ust_lfht *alloc_lttng_ust_lfht(unsigned long min_nr_alloc_buckets,
+ unsigned long max_nr_buckets)
+{
+ return __default_alloc_lttng_ust_lfht(
+ <tng_ust_lfht_mm_order, sizeof(struct lttng_ust_lfht),
+ min_nr_alloc_buckets, max_nr_buckets);
+}
+
+const struct lttng_ust_lfht_mm_type lttng_ust_lfht_mm_order = {
+ .alloc_lttng_ust_lfht = alloc_lttng_ust_lfht,
+ .alloc_bucket_table = lttng_ust_lfht_alloc_bucket_table,
+ .free_bucket_table = lttng_ust_lfht_free_bucket_table,
+ .bucket_at = bucket_at,
+};
--- /dev/null
+/*
+ * SPDX-License-Identifier: LGPL-2.1-or-later
+ *
+ * Copyright 2010-2011 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ * Copyright 2011 Lai Jiangshan <laijs@cn.fujitsu.com>
+ *
+ * Userspace RCU library - Lock-Free Resizable RCU Hash Table
+ */
+
+/*
+ * Based on the following articles:
+ * - Ori Shalev and Nir Shavit. Split-ordered lists: Lock-free
+ * extensible hash tables. J. ACM 53, 3 (May 2006), 379-405.
+ * - Michael, M. M. High performance dynamic lock-free hash tables
+ * and list-based sets. In Proceedings of the fourteenth annual ACM
+ * symposium on Parallel algorithms and architectures, ACM Press,
+ * (2002), 73-82.
+ *
+ * Some specificities of this Lock-Free Resizable RCU Hash Table
+ * implementation:
+ *
+ * - RCU read-side critical section allows readers to perform hash
+ * table lookups, as well as traversals, and use the returned objects
+ * safely by allowing memory reclaim to take place only after a grace
+ * period.
+ * - Add and remove operations are lock-free, and do not need to
+ * allocate memory. They need to be executed within RCU read-side
+ * critical section to ensure the objects they read are valid and to
+ * deal with the cmpxchg ABA problem.
+ * - add and add_unique operations are supported. add_unique checks if
+ * the node key already exists in the hash table. It ensures not to
+ * populate a duplicate key if the node key already exists in the hash
+ * table.
+ * - The resize operation executes concurrently with
+ * add/add_unique/add_replace/remove/lookup/traversal.
+ * - Hash table nodes are contained within a split-ordered list. This
+ * list is ordered by incrementing reversed-bits-hash value.
+ * - An index of bucket nodes is kept. These bucket nodes are the hash
+ * table "buckets". These buckets are internal nodes that allow to
+ * perform a fast hash lookup, similarly to a skip list. These
+ * buckets are chained together in the split-ordered list, which
+ * allows recursive expansion by inserting new buckets between the
+ * existing buckets. The split-ordered list allows adding new buckets
+ * between existing buckets as the table needs to grow.
+ * - The resize operation for small tables only allows expanding the
+ * hash table. It is triggered automatically by detecting long chains
+ * in the add operation.
+ * - The resize operation for larger tables (and available through an
+ * API) allows both expanding and shrinking the hash table.
+ * - Split-counters are used to keep track of the number of
+ * nodes within the hash table for automatic resize triggering.
+ * - Resize operation initiated by long chain detection is executed by a
+ * worker thread, which keeps lock-freedom of add and remove.
+ * - Resize operations are protected by a mutex.
+ * - The removal operation is split in two parts: first, a "removed"
+ * flag is set in the next pointer within the node to remove. Then,
+ * a "garbage collection" is performed in the bucket containing the
+ * removed node (from the start of the bucket up to the removed node).
+ * All encountered nodes with "removed" flag set in their next
+ * pointers are removed from the linked-list. If the cmpxchg used for
+ * removal fails (due to concurrent garbage-collection or concurrent
+ * add), we retry from the beginning of the bucket. This ensures that
+ * the node with "removed" flag set is removed from the hash table
+ * (not visible to lookups anymore) before the RCU read-side critical
+ * section held across removal ends. Furthermore, this ensures that
+ * the node with "removed" flag set is removed from the linked-list
+ * before its memory is reclaimed. After setting the "removal" flag,
+ * only the thread which removal is the first to set the "removal
+ * owner" flag (with an xchg) into a node's next pointer is considered
+ * to have succeeded its removal (and thus owns the node to reclaim).
+ * Because we garbage-collect starting from an invariant node (the
+ * start-of-bucket bucket node) up to the "removed" node (or find a
+ * reverse-hash that is higher), we are sure that a successful
+ * traversal of the chain leads to a chain that is present in the
+ * linked-list (the start node is never removed) and that it does not
+ * contain the "removed" node anymore, even if concurrent delete/add
+ * operations are changing the structure of the list concurrently.
+ * - The add operations perform garbage collection of buckets if they
+ * encounter nodes with removed flag set in the bucket where they want
+ * to add their new node. This ensures lock-freedom of add operation by
+ * helping the remover unlink nodes from the list rather than to wait
+ * for it do to so.
+ * - There are three memory backends for the hash table buckets: the
+ * "order table", the "chunks", and the "mmap".
+ * - These bucket containers contain a compact version of the hash table
+ * nodes.
+ * - The RCU "order table":
+ * - has a first level table indexed by log2(hash index) which is
+ * copied and expanded by the resize operation. This order table
+ * allows finding the "bucket node" tables.
+ * - There is one bucket node table per hash index order. The size of
+ * each bucket node table is half the number of hashes contained in
+ * this order (except for order 0).
+ * - The RCU "chunks" is best suited for close interaction with a page
+ * allocator. It uses a linear array as index to "chunks" containing
+ * each the same number of buckets.
+ * - The RCU "mmap" memory backend uses a single memory map to hold
+ * all buckets.
+ * - synchronize_rcu is used to garbage-collect the old bucket node table.
+ *
+ * Ordering Guarantees:
+ *
+ * To discuss these guarantees, we first define "read" operation as any
+ * of the the basic lttng_ust_lfht_lookup, lttng_ust_lfht_next_duplicate,
+ * lttng_ust_lfht_first, lttng_ust_lfht_next operation, as well as
+ * lttng_ust_lfht_add_unique (failure).
+ *
+ * We define "read traversal" operation as any of the following
+ * group of operations
+ * - lttng_ust_lfht_lookup followed by iteration with lttng_ust_lfht_next_duplicate
+ * (and/or lttng_ust_lfht_next, although less common).
+ * - lttng_ust_lfht_add_unique (failure) followed by iteration with
+ * lttng_ust_lfht_next_duplicate (and/or lttng_ust_lfht_next, although less
+ * common).
+ * - lttng_ust_lfht_first followed iteration with lttng_ust_lfht_next (and/or
+ * lttng_ust_lfht_next_duplicate, although less common).
+ *
+ * We define "write" operations as any of lttng_ust_lfht_add, lttng_ust_lfht_replace,
+ * lttng_ust_lfht_add_unique (success), lttng_ust_lfht_add_replace, lttng_ust_lfht_del.
+ *
+ * When lttng_ust_lfht_add_unique succeeds (returns the node passed as
+ * parameter), it acts as a "write" operation. When lttng_ust_lfht_add_unique
+ * fails (returns a node different from the one passed as parameter), it
+ * acts as a "read" operation. A lttng_ust_lfht_add_unique failure is a
+ * lttng_ust_lfht_lookup "read" operation, therefore, any ordering guarantee
+ * referring to "lookup" imply any of "lookup" or lttng_ust_lfht_add_unique
+ * (failure).
+ *
+ * We define "prior" and "later" node as nodes observable by reads and
+ * read traversals respectively before and after a write or sequence of
+ * write operations.
+ *
+ * Hash-table operations are often cascaded, for example, the pointer
+ * returned by a lttng_ust_lfht_lookup() might be passed to a lttng_ust_lfht_next(),
+ * whose return value might in turn be passed to another hash-table
+ * operation. This entire cascaded series of operations must be enclosed
+ * by a pair of matching rcu_read_lock() and rcu_read_unlock()
+ * operations.
+ *
+ * The following ordering guarantees are offered by this hash table:
+ *
+ * A.1) "read" after "write": if there is ordering between a write and a
+ * later read, then the read is guaranteed to see the write or some
+ * later write.
+ * A.2) "read traversal" after "write": given that there is dependency
+ * ordering between reads in a "read traversal", if there is
+ * ordering between a write and the first read of the traversal,
+ * then the "read traversal" is guaranteed to see the write or
+ * some later write.
+ * B.1) "write" after "read": if there is ordering between a read and a
+ * later write, then the read will never see the write.
+ * B.2) "write" after "read traversal": given that there is dependency
+ * ordering between reads in a "read traversal", if there is
+ * ordering between the last read of the traversal and a later
+ * write, then the "read traversal" will never see the write.
+ * C) "write" while "read traversal": if a write occurs during a "read
+ * traversal", the traversal may, or may not, see the write.
+ * D.1) "write" after "write": if there is ordering between a write and
+ * a later write, then the later write is guaranteed to see the
+ * effects of the first write.
+ * D.2) Concurrent "write" pairs: The system will assign an arbitrary
+ * order to any pair of concurrent conflicting writes.
+ * Non-conflicting writes (for example, to different keys) are
+ * unordered.
+ * E) If a grace period separates a "del" or "replace" operation
+ * and a subsequent operation, then that subsequent operation is
+ * guaranteed not to see the removed item.
+ * F) Uniqueness guarantee: given a hash table that does not contain
+ * duplicate items for a given key, there will only be one item in
+ * the hash table after an arbitrary sequence of add_unique and/or
+ * add_replace operations. Note, however, that a pair of
+ * concurrent read operations might well access two different items
+ * with that key.
+ * G.1) If a pair of lookups for a given key are ordered (e.g. by a
+ * memory barrier), then the second lookup will return the same
+ * node as the previous lookup, or some later node.
+ * G.2) A "read traversal" that starts after the end of a prior "read
+ * traversal" (ordered by memory barriers) is guaranteed to see the
+ * same nodes as the previous traversal, or some later nodes.
+ * G.3) Concurrent "read" pairs: concurrent reads are unordered. For
+ * example, if a pair of reads to the same key run concurrently
+ * with an insertion of that same key, the reads remain unordered
+ * regardless of their return values. In other words, you cannot
+ * rely on the values returned by the reads to deduce ordering.
+ *
+ * Progress guarantees:
+ *
+ * * Reads are wait-free. These operations always move forward in the
+ * hash table linked list, and this list has no loop.
+ * * Writes are lock-free. Any retry loop performed by a write operation
+ * is triggered by progress made within another update operation.
+ *
+ * Bucket node tables:
+ *
+ * hash table hash table the last all bucket node tables
+ * order size bucket node 0 1 2 3 4 5 6(index)
+ * table size
+ * 0 1 1 1
+ * 1 2 1 1 1
+ * 2 4 2 1 1 2
+ * 3 8 4 1 1 2 4
+ * 4 16 8 1 1 2 4 8
+ * 5 32 16 1 1 2 4 8 16
+ * 6 64 32 1 1 2 4 8 16 32
+ *
+ * When growing/shrinking, we only focus on the last bucket node table
+ * which size is (!order ? 1 : (1 << (order -1))).
+ *
+ * Example for growing/shrinking:
+ * grow hash table from order 5 to 6: init the index=6 bucket node table
+ * shrink hash table from order 6 to 5: fini the index=6 bucket node table
+ *
+ * A bit of ascii art explanation:
+ *
+ * The order index is the off-by-one compared to the actual power of 2
+ * because we use index 0 to deal with the 0 special-case.
+ *
+ * This shows the nodes for a small table ordered by reversed bits:
+ *
+ * bits reverse
+ * 0 000 000
+ * 4 100 001
+ * 2 010 010
+ * 6 110 011
+ * 1 001 100
+ * 5 101 101
+ * 3 011 110
+ * 7 111 111
+ *
+ * This shows the nodes in order of non-reversed bits, linked by
+ * reversed-bit order.
+ *
+ * order bits reverse
+ * 0 0 000 000
+ * 1 | 1 001 100 <-
+ * 2 | | 2 010 010 <- |
+ * | | | 3 011 110 | <- |
+ * 3 -> | | | 4 100 001 | |
+ * -> | | 5 101 101 |
+ * -> | 6 110 011
+ * -> 7 111 111
+ */
+
+/*
+ * Note on port to lttng-ust: auto-resize and accounting features are
+ * removed.
+ */
+
+#define _LGPL_SOURCE
+#include <stdlib.h>
+#include <errno.h>
+#include <assert.h>
+#include <stdio.h>
+#include <stdint.h>
+#include <string.h>
+#include <sched.h>
+#include <unistd.h>
+
+#include <lttng/ust-arch.h>
+#include <lttng/urcu/pointer.h>
+#include <urcu/arch.h>
+#include <urcu/uatomic.h>
+#include <urcu/compiler.h>
+#include "rculfhash.h"
+#include "rculfhash-internal.h"
+#include <stdio.h>
+#include <pthread.h>
+#include <signal.h>
+
+/*
+ * Split-counters lazily update the global counter each 1024
+ * addition/removal. It automatically keeps track of resize required.
+ * We use the bucket length as indicator for need to expand for small
+ * tables and machines lacking per-cpu data support.
+ */
+#define COUNT_COMMIT_ORDER 10
+
+/*
+ * Define the minimum table size.
+ */
+#define MIN_TABLE_ORDER 0
+#define MIN_TABLE_SIZE (1UL << MIN_TABLE_ORDER)
+
+/*
+ * Minimum number of bucket nodes to touch per thread to parallelize grow/shrink.
+ */
+#define MIN_PARTITION_PER_THREAD_ORDER 12
+#define MIN_PARTITION_PER_THREAD (1UL << MIN_PARTITION_PER_THREAD_ORDER)
+
+/*
+ * The removed flag needs to be updated atomically with the pointer.
+ * It indicates that no node must attach to the node scheduled for
+ * removal, and that node garbage collection must be performed.
+ * The bucket flag does not require to be updated atomically with the
+ * pointer, but it is added as a pointer low bit flag to save space.
+ * The "removal owner" flag is used to detect which of the "del"
+ * operation that has set the "removed flag" gets to return the removed
+ * node to its caller. Note that the replace operation does not need to
+ * iteract with the "removal owner" flag, because it validates that
+ * the "removed" flag is not set before performing its cmpxchg.
+ */
+#define REMOVED_FLAG (1UL << 0)
+#define BUCKET_FLAG (1UL << 1)
+#define REMOVAL_OWNER_FLAG (1UL << 2)
+#define FLAGS_MASK ((1UL << 3) - 1)
+
+/* Value of the end pointer. Should not interact with flags. */
+#define END_VALUE NULL
+
+/*
+ * ht_items_count: Split-counters counting the number of node addition
+ * and removal in the table. Only used if the LTTNG_UST_LFHT_ACCOUNTING flag
+ * is set at hash table creation.
+ *
+ * These are free-running counters, never reset to zero. They count the
+ * number of add/remove, and trigger every (1 << COUNT_COMMIT_ORDER)
+ * operations to update the global counter. We choose a power-of-2 value
+ * for the trigger to deal with 32 or 64-bit overflow of the counter.
+ */
+struct ht_items_count {
+ unsigned long add, del;
+} __attribute__((aligned(CAA_CACHE_LINE_SIZE)));
+
+#ifdef CONFIG_LTTNG_UST_LFHT_ITER_DEBUG
+
+static
+void lttng_ust_lfht_iter_debug_set_ht(struct lttng_ust_lfht *ht, struct lttng_ust_lfht_iter *iter)
+{
+ iter->lfht = ht;
+}
+
+#define lttng_ust_lfht_iter_debug_assert(...) assert(__VA_ARGS__)
+
+#else
+
+static
+void lttng_ust_lfht_iter_debug_set_ht(struct lttng_ust_lfht *ht __attribute__((unused)),
+ struct lttng_ust_lfht_iter *iter __attribute__((unused)))
+{
+}
+
+#define lttng_ust_lfht_iter_debug_assert(...)
+
+#endif
+
+/*
+ * Algorithm to reverse bits in a word by lookup table, extended to
+ * 64-bit words.
+ * Source:
+ * http://graphics.stanford.edu/~seander/bithacks.html#BitReverseTable
+ * Originally from Public Domain.
+ */
+
+static const uint8_t BitReverseTable256[256] =
+{
+#define R2(n) (n), (n) + 2*64, (n) + 1*64, (n) + 3*64
+#define R4(n) R2(n), R2((n) + 2*16), R2((n) + 1*16), R2((n) + 3*16)
+#define R6(n) R4(n), R4((n) + 2*4 ), R4((n) + 1*4 ), R4((n) + 3*4 )
+ R6(0), R6(2), R6(1), R6(3)
+};
+#undef R2
+#undef R4
+#undef R6
+
+static
+uint8_t bit_reverse_u8(uint8_t v)
+{
+ return BitReverseTable256[v];
+}
+
+#if (CAA_BITS_PER_LONG == 32)
+static
+uint32_t bit_reverse_u32(uint32_t v)
+{
+ return ((uint32_t) bit_reverse_u8(v) << 24) |
+ ((uint32_t) bit_reverse_u8(v >> 8) << 16) |
+ ((uint32_t) bit_reverse_u8(v >> 16) << 8) |
+ ((uint32_t) bit_reverse_u8(v >> 24));
+}
+#else
+static
+uint64_t bit_reverse_u64(uint64_t v)
+{
+ return ((uint64_t) bit_reverse_u8(v) << 56) |
+ ((uint64_t) bit_reverse_u8(v >> 8) << 48) |
+ ((uint64_t) bit_reverse_u8(v >> 16) << 40) |
+ ((uint64_t) bit_reverse_u8(v >> 24) << 32) |
+ ((uint64_t) bit_reverse_u8(v >> 32) << 24) |
+ ((uint64_t) bit_reverse_u8(v >> 40) << 16) |
+ ((uint64_t) bit_reverse_u8(v >> 48) << 8) |
+ ((uint64_t) bit_reverse_u8(v >> 56));
+}
+#endif
+
+static
+unsigned long bit_reverse_ulong(unsigned long v)
+{
+#if (CAA_BITS_PER_LONG == 32)
+ return bit_reverse_u32(v);
+#else
+ return bit_reverse_u64(v);
+#endif
+}
+
+/*
+ * fls: returns the position of the most significant bit.
+ * Returns 0 if no bit is set, else returns the position of the most
+ * significant bit (from 1 to 32 on 32-bit, from 1 to 64 on 64-bit).
+ */
+#if defined(LTTNG_UST_ARCH_X86)
+static inline
+unsigned int fls_u32(uint32_t x)
+{
+ int r;
+
+ __asm__ ("bsrl %1,%0\n\t"
+ "jnz 1f\n\t"
+ "movl $-1,%0\n\t"
+ "1:\n\t"
+ : "=r" (r) : "rm" (x));
+ return r + 1;
+}
+#define HAS_FLS_U32
+#endif
+
+#if defined(LTTNG_UST_ARCH_AMD64)
+static inline
+unsigned int fls_u64(uint64_t x)
+{
+ long r;
+
+ __asm__ ("bsrq %1,%0\n\t"
+ "jnz 1f\n\t"
+ "movq $-1,%0\n\t"
+ "1:\n\t"
+ : "=r" (r) : "rm" (x));
+ return r + 1;
+}
+#define HAS_FLS_U64
+#endif
+
+#ifndef HAS_FLS_U64
+static
+unsigned int fls_u64(uint64_t x)
+ __attribute__((unused));
+static
+unsigned int fls_u64(uint64_t x)
+{
+ unsigned int r = 64;
+
+ if (!x)
+ return 0;
+
+ if (!(x & 0xFFFFFFFF00000000ULL)) {
+ x <<= 32;
+ r -= 32;
+ }
+ if (!(x & 0xFFFF000000000000ULL)) {
+ x <<= 16;
+ r -= 16;
+ }
+ if (!(x & 0xFF00000000000000ULL)) {
+ x <<= 8;
+ r -= 8;
+ }
+ if (!(x & 0xF000000000000000ULL)) {
+ x <<= 4;
+ r -= 4;
+ }
+ if (!(x & 0xC000000000000000ULL)) {
+ x <<= 2;
+ r -= 2;
+ }
+ if (!(x & 0x8000000000000000ULL)) {
+ x <<= 1;
+ r -= 1;
+ }
+ return r;
+}
+#endif
+
+#ifndef HAS_FLS_U32
+static
+unsigned int fls_u32(uint32_t x)
+ __attribute__((unused));
+static
+unsigned int fls_u32(uint32_t x)
+{
+ unsigned int r = 32;
+
+ if (!x)
+ return 0;
+ if (!(x & 0xFFFF0000U)) {
+ x <<= 16;
+ r -= 16;
+ }
+ if (!(x & 0xFF000000U)) {
+ x <<= 8;
+ r -= 8;
+ }
+ if (!(x & 0xF0000000U)) {
+ x <<= 4;
+ r -= 4;
+ }
+ if (!(x & 0xC0000000U)) {
+ x <<= 2;
+ r -= 2;
+ }
+ if (!(x & 0x80000000U)) {
+ x <<= 1;
+ r -= 1;
+ }
+ return r;
+}
+#endif
+
+unsigned int lttng_ust_lfht_fls_ulong(unsigned long x)
+{
+#if (CAA_BITS_PER_LONG == 32)
+ return fls_u32(x);
+#else
+ return fls_u64(x);
+#endif
+}
+
+/*
+ * Return the minimum order for which x <= (1UL << order).
+ * Return -1 if x is 0.
+ */
+int lttng_ust_lfht_get_count_order_u32(uint32_t x)
+{
+ if (!x)
+ return -1;
+
+ return fls_u32(x - 1);
+}
+
+/*
+ * Return the minimum order for which x <= (1UL << order).
+ * Return -1 if x is 0.
+ */
+int lttng_ust_lfht_get_count_order_ulong(unsigned long x)
+{
+ if (!x)
+ return -1;
+
+ return lttng_ust_lfht_fls_ulong(x - 1);
+}
+
+static
+struct lttng_ust_lfht_node *clear_flag(struct lttng_ust_lfht_node *node)
+{
+ return (struct lttng_ust_lfht_node *) (((unsigned long) node) & ~FLAGS_MASK);
+}
+
+static
+int is_removed(const struct lttng_ust_lfht_node *node)
+{
+ return ((unsigned long) node) & REMOVED_FLAG;
+}
+
+static
+int is_bucket(struct lttng_ust_lfht_node *node)
+{
+ return ((unsigned long) node) & BUCKET_FLAG;
+}
+
+static
+struct lttng_ust_lfht_node *flag_bucket(struct lttng_ust_lfht_node *node)
+{
+ return (struct lttng_ust_lfht_node *) (((unsigned long) node) | BUCKET_FLAG);
+}
+
+static
+int is_removal_owner(struct lttng_ust_lfht_node *node)
+{
+ return ((unsigned long) node) & REMOVAL_OWNER_FLAG;
+}
+
+static
+struct lttng_ust_lfht_node *flag_removal_owner(struct lttng_ust_lfht_node *node)
+{
+ return (struct lttng_ust_lfht_node *) (((unsigned long) node) | REMOVAL_OWNER_FLAG);
+}
+
+static
+struct lttng_ust_lfht_node *flag_removed_or_removal_owner(struct lttng_ust_lfht_node *node)
+{
+ return (struct lttng_ust_lfht_node *) (((unsigned long) node) | REMOVED_FLAG | REMOVAL_OWNER_FLAG);
+}
+
+static
+struct lttng_ust_lfht_node *get_end(void)
+{
+ return (struct lttng_ust_lfht_node *) END_VALUE;
+}
+
+static
+int is_end(struct lttng_ust_lfht_node *node)
+{
+ return clear_flag(node) == (struct lttng_ust_lfht_node *) END_VALUE;
+}
+
+static
+void lttng_ust_lfht_alloc_bucket_table(struct lttng_ust_lfht *ht, unsigned long order)
+{
+ return ht->mm->alloc_bucket_table(ht, order);
+}
+
+/*
+ * lttng_ust_lfht_free_bucket_table() should be called with decreasing order.
+ * When lttng_ust_lfht_free_bucket_table(0) is called, it means the whole
+ * lfht is destroyed.
+ */
+static
+void lttng_ust_lfht_free_bucket_table(struct lttng_ust_lfht *ht, unsigned long order)
+{
+ return ht->mm->free_bucket_table(ht, order);
+}
+
+static inline
+struct lttng_ust_lfht_node *bucket_at(struct lttng_ust_lfht *ht, unsigned long index)
+{
+ return ht->bucket_at(ht, index);
+}
+
+static inline
+struct lttng_ust_lfht_node *lookup_bucket(struct lttng_ust_lfht *ht, unsigned long size,
+ unsigned long hash)
+{
+ assert(size > 0);
+ return bucket_at(ht, hash & (size - 1));
+}
+
+/*
+ * Remove all logically deleted nodes from a bucket up to a certain node key.
+ */
+static
+void _lttng_ust_lfht_gc_bucket(struct lttng_ust_lfht_node *bucket, struct lttng_ust_lfht_node *node)
+{
+ struct lttng_ust_lfht_node *iter_prev, *iter, *next, *new_next;
+
+ assert(!is_bucket(bucket));
+ assert(!is_removed(bucket));
+ assert(!is_removal_owner(bucket));
+ assert(!is_bucket(node));
+ assert(!is_removed(node));
+ assert(!is_removal_owner(node));
+ for (;;) {
+ iter_prev = bucket;
+ /* We can always skip the bucket node initially */
+ iter = lttng_ust_rcu_dereference(iter_prev->next);
+ assert(!is_removed(iter));
+ assert(!is_removal_owner(iter));
+ assert(iter_prev->reverse_hash <= node->reverse_hash);
+ /*
+ * We should never be called with bucket (start of chain)
+ * and logically removed node (end of path compression
+ * marker) being the actual same node. This would be a
+ * bug in the algorithm implementation.
+ */
+ assert(bucket != node);
+ for (;;) {
+ if (caa_unlikely(is_end(iter)))
+ return;
+ if (caa_likely(clear_flag(iter)->reverse_hash > node->reverse_hash))
+ return;
+ next = lttng_ust_rcu_dereference(clear_flag(iter)->next);
+ if (caa_likely(is_removed(next)))
+ break;
+ iter_prev = clear_flag(iter);
+ iter = next;
+ }
+ assert(!is_removed(iter));
+ assert(!is_removal_owner(iter));
+ if (is_bucket(iter))
+ new_next = flag_bucket(clear_flag(next));
+ else
+ new_next = clear_flag(next);
+ (void) uatomic_cmpxchg(&iter_prev->next, iter, new_next);
+ }
+}
+
+static
+int _lttng_ust_lfht_replace(struct lttng_ust_lfht *ht, unsigned long size,
+ struct lttng_ust_lfht_node *old_node,
+ struct lttng_ust_lfht_node *old_next,
+ struct lttng_ust_lfht_node *new_node)
+{
+ struct lttng_ust_lfht_node *bucket, *ret_next;
+
+ if (!old_node) /* Return -ENOENT if asked to replace NULL node */
+ return -ENOENT;
+
+ assert(!is_removed(old_node));
+ assert(!is_removal_owner(old_node));
+ assert(!is_bucket(old_node));
+ assert(!is_removed(new_node));
+ assert(!is_removal_owner(new_node));
+ assert(!is_bucket(new_node));
+ assert(new_node != old_node);
+ for (;;) {
+ /* Insert after node to be replaced */
+ if (is_removed(old_next)) {
+ /*
+ * Too late, the old node has been removed under us
+ * between lookup and replace. Fail.
+ */
+ return -ENOENT;
+ }
+ assert(old_next == clear_flag(old_next));
+ assert(new_node != old_next);
+ /*
+ * REMOVAL_OWNER flag is _NEVER_ set before the REMOVED
+ * flag. It is either set atomically at the same time
+ * (replace) or after (del).
+ */
+ assert(!is_removal_owner(old_next));
+ new_node->next = old_next;
+ /*
+ * Here is the whole trick for lock-free replace: we add
+ * the replacement node _after_ the node we want to
+ * replace by atomically setting its next pointer at the
+ * same time we set its removal flag. Given that
+ * the lookups/get next use an iterator aware of the
+ * next pointer, they will either skip the old node due
+ * to the removal flag and see the new node, or use
+ * the old node, but will not see the new one.
+ * This is a replacement of a node with another node
+ * that has the same value: we are therefore not
+ * removing a value from the hash table. We set both the
+ * REMOVED and REMOVAL_OWNER flags atomically so we own
+ * the node after successful cmpxchg.
+ */
+ ret_next = uatomic_cmpxchg(&old_node->next,
+ old_next, flag_removed_or_removal_owner(new_node));
+ if (ret_next == old_next)
+ break; /* We performed the replacement. */
+ old_next = ret_next;
+ }
+
+ /*
+ * Ensure that the old node is not visible to readers anymore:
+ * lookup for the node, and remove it (along with any other
+ * logically removed node) if found.
+ */
+ bucket = lookup_bucket(ht, size, bit_reverse_ulong(old_node->reverse_hash));
+ _lttng_ust_lfht_gc_bucket(bucket, new_node);
+
+ assert(is_removed(CMM_LOAD_SHARED(old_node->next)));
+ return 0;
+}
+
+/*
+ * A non-NULL unique_ret pointer uses the "add unique" (or uniquify) add
+ * mode. A NULL unique_ret allows creation of duplicate keys.
+ */
+static
+void _lttng_ust_lfht_add(struct lttng_ust_lfht *ht,
+ unsigned long hash,
+ lttng_ust_lfht_match_fct match,
+ const void *key,
+ unsigned long size,
+ struct lttng_ust_lfht_node *node,
+ struct lttng_ust_lfht_iter *unique_ret,
+ int bucket_flag)
+{
+ struct lttng_ust_lfht_node *iter_prev, *iter, *next, *new_node, *new_next,
+ *return_node;
+ struct lttng_ust_lfht_node *bucket;
+
+ assert(!is_bucket(node));
+ assert(!is_removed(node));
+ assert(!is_removal_owner(node));
+ bucket = lookup_bucket(ht, size, hash);
+ for (;;) {
+ /*
+ * iter_prev points to the non-removed node prior to the
+ * insert location.
+ */
+ iter_prev = bucket;
+ /* We can always skip the bucket node initially */
+ iter = lttng_ust_rcu_dereference(iter_prev->next);
+ assert(iter_prev->reverse_hash <= node->reverse_hash);
+ for (;;) {
+ if (caa_unlikely(is_end(iter)))
+ goto insert;
+ if (caa_likely(clear_flag(iter)->reverse_hash > node->reverse_hash))
+ goto insert;
+
+ /* bucket node is the first node of the identical-hash-value chain */
+ if (bucket_flag && clear_flag(iter)->reverse_hash == node->reverse_hash)
+ goto insert;
+
+ next = lttng_ust_rcu_dereference(clear_flag(iter)->next);
+ if (caa_unlikely(is_removed(next)))
+ goto gc_node;
+
+ /* uniquely add */
+ if (unique_ret
+ && !is_bucket(next)
+ && clear_flag(iter)->reverse_hash == node->reverse_hash) {
+ struct lttng_ust_lfht_iter d_iter = {
+ .node = node,
+ .next = iter,
+#ifdef CONFIG_LTTNG_UST_LFHT_ITER_DEBUG
+ .lfht = ht,
+#endif
+ };
+
+ /*
+ * uniquely adding inserts the node as the first
+ * node of the identical-hash-value node chain.
+ *
+ * This semantic ensures no duplicated keys
+ * should ever be observable in the table
+ * (including traversing the table node by
+ * node by forward iterations)
+ */
+ lttng_ust_lfht_next_duplicate(ht, match, key, &d_iter);
+ if (!d_iter.node)
+ goto insert;
+
+ *unique_ret = d_iter;
+ return;
+ }
+
+ iter_prev = clear_flag(iter);
+ iter = next;
+ }
+
+ insert:
+ assert(node != clear_flag(iter));
+ assert(!is_removed(iter_prev));
+ assert(!is_removal_owner(iter_prev));
+ assert(!is_removed(iter));
+ assert(!is_removal_owner(iter));
+ assert(iter_prev != node);
+ if (!bucket_flag)
+ node->next = clear_flag(iter);
+ else
+ node->next = flag_bucket(clear_flag(iter));
+ if (is_bucket(iter))
+ new_node = flag_bucket(node);
+ else
+ new_node = node;
+ if (uatomic_cmpxchg(&iter_prev->next, iter,
+ new_node) != iter) {
+ continue; /* retry */
+ } else {
+ return_node = node;
+ goto end;
+ }
+
+ gc_node:
+ assert(!is_removed(iter));
+ assert(!is_removal_owner(iter));
+ if (is_bucket(iter))
+ new_next = flag_bucket(clear_flag(next));
+ else
+ new_next = clear_flag(next);
+ (void) uatomic_cmpxchg(&iter_prev->next, iter, new_next);
+ /* retry */
+ }
+end:
+ if (unique_ret) {
+ unique_ret->node = return_node;
+ /* unique_ret->next left unset, never used. */
+ }
+}
+
+static
+int _lttng_ust_lfht_del(struct lttng_ust_lfht *ht, unsigned long size,
+ struct lttng_ust_lfht_node *node)
+{
+ struct lttng_ust_lfht_node *bucket, *next;
+
+ if (!node) /* Return -ENOENT if asked to delete NULL node */
+ return -ENOENT;
+
+ /* logically delete the node */
+ assert(!is_bucket(node));
+ assert(!is_removed(node));
+ assert(!is_removal_owner(node));
+
+ /*
+ * We are first checking if the node had previously been
+ * logically removed (this check is not atomic with setting the
+ * logical removal flag). Return -ENOENT if the node had
+ * previously been removed.
+ */
+ next = CMM_LOAD_SHARED(node->next); /* next is not dereferenced */
+ if (caa_unlikely(is_removed(next)))
+ return -ENOENT;
+ assert(!is_bucket(next));
+ /*
+ * The del operation semantic guarantees a full memory barrier
+ * before the uatomic_or atomic commit of the deletion flag.
+ */
+ cmm_smp_mb__before_uatomic_or();
+ /*
+ * We set the REMOVED_FLAG unconditionally. Note that there may
+ * be more than one concurrent thread setting this flag.
+ * Knowing which wins the race will be known after the garbage
+ * collection phase, stay tuned!
+ */
+ uatomic_or(&node->next, REMOVED_FLAG);
+ /* We performed the (logical) deletion. */
+
+ /*
+ * Ensure that the node is not visible to readers anymore: lookup for
+ * the node, and remove it (along with any other logically removed node)
+ * if found.
+ */
+ bucket = lookup_bucket(ht, size, bit_reverse_ulong(node->reverse_hash));
+ _lttng_ust_lfht_gc_bucket(bucket, node);
+
+ assert(is_removed(CMM_LOAD_SHARED(node->next)));
+ /*
+ * Last phase: atomically exchange node->next with a version
+ * having "REMOVAL_OWNER_FLAG" set. If the returned node->next
+ * pointer did _not_ have "REMOVAL_OWNER_FLAG" set, we now own
+ * the node and win the removal race.
+ * It is interesting to note that all "add" paths are forbidden
+ * to change the next pointer starting from the point where the
+ * REMOVED_FLAG is set, so here using a read, followed by a
+ * xchg() suffice to guarantee that the xchg() will ever only
+ * set the "REMOVAL_OWNER_FLAG" (or change nothing if the flag
+ * was already set).
+ */
+ if (!is_removal_owner(uatomic_xchg(&node->next,
+ flag_removal_owner(node->next))))
+ return 0;
+ else
+ return -ENOENT;
+}
+
+/*
+ * Never called with size < 1.
+ */
+static
+void lttng_ust_lfht_create_bucket(struct lttng_ust_lfht *ht, unsigned long size)
+{
+ struct lttng_ust_lfht_node *prev, *node;
+ unsigned long order, len, i;
+ int bucket_order;
+
+ lttng_ust_lfht_alloc_bucket_table(ht, 0);
+
+ dbg_printf("create bucket: order 0 index 0 hash 0\n");
+ node = bucket_at(ht, 0);
+ node->next = flag_bucket(get_end());
+ node->reverse_hash = 0;
+
+ bucket_order = lttng_ust_lfht_get_count_order_ulong(size);
+ assert(bucket_order >= 0);
+
+ for (order = 1; order < (unsigned long) bucket_order + 1; order++) {
+ len = 1UL << (order - 1);
+ lttng_ust_lfht_alloc_bucket_table(ht, order);
+
+ for (i = 0; i < len; i++) {
+ /*
+ * Now, we are trying to init the node with the
+ * hash=(len+i) (which is also a bucket with the
+ * index=(len+i)) and insert it into the hash table,
+ * so this node has to be inserted after the bucket
+ * with the index=(len+i)&(len-1)=i. And because there
+ * is no other non-bucket node nor bucket node with
+ * larger index/hash inserted, so the bucket node
+ * being inserted should be inserted directly linked
+ * after the bucket node with index=i.
+ */
+ prev = bucket_at(ht, i);
+ node = bucket_at(ht, len + i);
+
+ dbg_printf("create bucket: order %lu index %lu hash %lu\n",
+ order, len + i, len + i);
+ node->reverse_hash = bit_reverse_ulong(len + i);
+
+ /* insert after prev */
+ assert(is_bucket(prev->next));
+ node->next = prev->next;
+ prev->next = flag_bucket(node);
+ }
+ }
+}
+
+#if (CAA_BITS_PER_LONG > 32)
+/*
+ * For 64-bit architectures, with max number of buckets small enough not to
+ * use the entire 64-bit memory mapping space (and allowing a fair number of
+ * hash table instances), use the mmap allocator, which is faster. Otherwise,
+ * fallback to the order allocator.
+ */
+static
+const struct lttng_ust_lfht_mm_type *get_mm_type(unsigned long max_nr_buckets)
+{
+ if (max_nr_buckets && max_nr_buckets <= (1ULL << 32))
+ return <tng_ust_lfht_mm_mmap;
+ else
+ return <tng_ust_lfht_mm_order;
+}
+#else
+/*
+ * For 32-bit architectures, use the order allocator.
+ */
+static
+const struct lttng_ust_lfht_mm_type *get_mm_type(unsigned long max_nr_buckets)
+{
+ return <tng_ust_lfht_mm_order;
+}
+#endif
+
+struct lttng_ust_lfht *lttng_ust_lfht_new(unsigned long init_size,
+ unsigned long min_nr_alloc_buckets,
+ unsigned long max_nr_buckets,
+ int flags,
+ const struct lttng_ust_lfht_mm_type *mm)
+{
+ struct lttng_ust_lfht *ht;
+ unsigned long order;
+
+ /* min_nr_alloc_buckets must be power of two */
+ if (!min_nr_alloc_buckets || (min_nr_alloc_buckets & (min_nr_alloc_buckets - 1)))
+ return NULL;
+
+ /* init_size must be power of two */
+ if (!init_size || (init_size & (init_size - 1)))
+ return NULL;
+
+ /*
+ * Memory management plugin default.
+ */
+ if (!mm)
+ mm = get_mm_type(max_nr_buckets);
+
+ /* max_nr_buckets == 0 for order based mm means infinite */
+ if (mm == <tng_ust_lfht_mm_order && !max_nr_buckets)
+ max_nr_buckets = 1UL << (MAX_TABLE_ORDER - 1);
+
+ /* max_nr_buckets must be power of two */
+ if (!max_nr_buckets || (max_nr_buckets & (max_nr_buckets - 1)))
+ return NULL;
+
+ if (flags & LTTNG_UST_LFHT_AUTO_RESIZE)
+ return NULL;
+
+ min_nr_alloc_buckets = max(min_nr_alloc_buckets, MIN_TABLE_SIZE);
+ init_size = max(init_size, MIN_TABLE_SIZE);
+ max_nr_buckets = max(max_nr_buckets, min_nr_alloc_buckets);
+ init_size = min(init_size, max_nr_buckets);
+
+ ht = mm->alloc_lttng_ust_lfht(min_nr_alloc_buckets, max_nr_buckets);
+ assert(ht);
+ assert(ht->mm == mm);
+ assert(ht->bucket_at == mm->bucket_at);
+
+ ht->flags = flags;
+ /* this mutex should not nest in read-side C.S. */
+ pthread_mutex_init(&ht->resize_mutex, NULL);
+ order = lttng_ust_lfht_get_count_order_ulong(init_size);
+ ht->resize_target = 1UL << order;
+ lttng_ust_lfht_create_bucket(ht, 1UL << order);
+ ht->size = 1UL << order;
+ return ht;
+}
+
+void lttng_ust_lfht_lookup(struct lttng_ust_lfht *ht, unsigned long hash,
+ lttng_ust_lfht_match_fct match, const void *key,
+ struct lttng_ust_lfht_iter *iter)
+{
+ struct lttng_ust_lfht_node *node, *next, *bucket;
+ unsigned long reverse_hash, size;
+
+ lttng_ust_lfht_iter_debug_set_ht(ht, iter);
+
+ reverse_hash = bit_reverse_ulong(hash);
+
+ size = lttng_ust_rcu_dereference(ht->size);
+ bucket = lookup_bucket(ht, size, hash);
+ /* We can always skip the bucket node initially */
+ node = lttng_ust_rcu_dereference(bucket->next);
+ node = clear_flag(node);
+ for (;;) {
+ if (caa_unlikely(is_end(node))) {
+ node = next = NULL;
+ break;
+ }
+ if (caa_unlikely(node->reverse_hash > reverse_hash)) {
+ node = next = NULL;
+ break;
+ }
+ next = lttng_ust_rcu_dereference(node->next);
+ assert(node == clear_flag(node));
+ if (caa_likely(!is_removed(next))
+ && !is_bucket(next)
+ && node->reverse_hash == reverse_hash
+ && caa_likely(match(node, key))) {
+ break;
+ }
+ node = clear_flag(next);
+ }
+ assert(!node || !is_bucket(CMM_LOAD_SHARED(node->next)));
+ iter->node = node;
+ iter->next = next;
+}
+
+void lttng_ust_lfht_next_duplicate(struct lttng_ust_lfht *ht __attribute__((unused)),
+ lttng_ust_lfht_match_fct match,
+ const void *key, struct lttng_ust_lfht_iter *iter)
+{
+ struct lttng_ust_lfht_node *node, *next;
+ unsigned long reverse_hash;
+
+ lttng_ust_lfht_iter_debug_assert(ht == iter->lfht);
+ node = iter->node;
+ reverse_hash = node->reverse_hash;
+ next = iter->next;
+ node = clear_flag(next);
+
+ for (;;) {
+ if (caa_unlikely(is_end(node))) {
+ node = next = NULL;
+ break;
+ }
+ if (caa_unlikely(node->reverse_hash > reverse_hash)) {
+ node = next = NULL;
+ break;
+ }
+ next = lttng_ust_rcu_dereference(node->next);
+ if (caa_likely(!is_removed(next))
+ && !is_bucket(next)
+ && caa_likely(match(node, key))) {
+ break;
+ }
+ node = clear_flag(next);
+ }
+ assert(!node || !is_bucket(CMM_LOAD_SHARED(node->next)));
+ iter->node = node;
+ iter->next = next;
+}
+
+void lttng_ust_lfht_next(struct lttng_ust_lfht *ht __attribute__((unused)),
+ struct lttng_ust_lfht_iter *iter)
+{
+ struct lttng_ust_lfht_node *node, *next;
+
+ lttng_ust_lfht_iter_debug_assert(ht == iter->lfht);
+ node = clear_flag(iter->next);
+ for (;;) {
+ if (caa_unlikely(is_end(node))) {
+ node = next = NULL;
+ break;
+ }
+ next = lttng_ust_rcu_dereference(node->next);
+ if (caa_likely(!is_removed(next))
+ && !is_bucket(next)) {
+ break;
+ }
+ node = clear_flag(next);
+ }
+ assert(!node || !is_bucket(CMM_LOAD_SHARED(node->next)));
+ iter->node = node;
+ iter->next = next;
+}
+
+void lttng_ust_lfht_first(struct lttng_ust_lfht *ht, struct lttng_ust_lfht_iter *iter)
+{
+ lttng_ust_lfht_iter_debug_set_ht(ht, iter);
+ /*
+ * Get next after first bucket node. The first bucket node is the
+ * first node of the linked list.
+ */
+ iter->next = bucket_at(ht, 0)->next;
+ lttng_ust_lfht_next(ht, iter);
+}
+
+void lttng_ust_lfht_add(struct lttng_ust_lfht *ht, unsigned long hash,
+ struct lttng_ust_lfht_node *node)
+{
+ unsigned long size;
+
+ node->reverse_hash = bit_reverse_ulong(hash);
+ size = lttng_ust_rcu_dereference(ht->size);
+ _lttng_ust_lfht_add(ht, hash, NULL, NULL, size, node, NULL, 0);
+}
+
+struct lttng_ust_lfht_node *lttng_ust_lfht_add_unique(struct lttng_ust_lfht *ht,
+ unsigned long hash,
+ lttng_ust_lfht_match_fct match,
+ const void *key,
+ struct lttng_ust_lfht_node *node)
+{
+ unsigned long size;
+ struct lttng_ust_lfht_iter iter;
+
+ node->reverse_hash = bit_reverse_ulong(hash);
+ size = lttng_ust_rcu_dereference(ht->size);
+ _lttng_ust_lfht_add(ht, hash, match, key, size, node, &iter, 0);
+ return iter.node;
+}
+
+struct lttng_ust_lfht_node *lttng_ust_lfht_add_replace(struct lttng_ust_lfht *ht,
+ unsigned long hash,
+ lttng_ust_lfht_match_fct match,
+ const void *key,
+ struct lttng_ust_lfht_node *node)
+{
+ unsigned long size;
+ struct lttng_ust_lfht_iter iter;
+
+ node->reverse_hash = bit_reverse_ulong(hash);
+ size = lttng_ust_rcu_dereference(ht->size);
+ for (;;) {
+ _lttng_ust_lfht_add(ht, hash, match, key, size, node, &iter, 0);
+ if (iter.node == node) {
+ return NULL;
+ }
+
+ if (!_lttng_ust_lfht_replace(ht, size, iter.node, iter.next, node))
+ return iter.node;
+ }
+}
+
+int lttng_ust_lfht_replace(struct lttng_ust_lfht *ht,
+ struct lttng_ust_lfht_iter *old_iter,
+ unsigned long hash,
+ lttng_ust_lfht_match_fct match,
+ const void *key,
+ struct lttng_ust_lfht_node *new_node)
+{
+ unsigned long size;
+
+ new_node->reverse_hash = bit_reverse_ulong(hash);
+ if (!old_iter->node)
+ return -ENOENT;
+ if (caa_unlikely(old_iter->node->reverse_hash != new_node->reverse_hash))
+ return -EINVAL;
+ if (caa_unlikely(!match(old_iter->node, key)))
+ return -EINVAL;
+ size = lttng_ust_rcu_dereference(ht->size);
+ return _lttng_ust_lfht_replace(ht, size, old_iter->node, old_iter->next,
+ new_node);
+}
+
+int lttng_ust_lfht_del(struct lttng_ust_lfht *ht, struct lttng_ust_lfht_node *node)
+{
+ unsigned long size;
+
+ size = lttng_ust_rcu_dereference(ht->size);
+ return _lttng_ust_lfht_del(ht, size, node);
+}
+
+int lttng_ust_lfht_is_node_deleted(const struct lttng_ust_lfht_node *node)
+{
+ return is_removed(CMM_LOAD_SHARED(node->next));
+}
+
+static
+int lttng_ust_lfht_delete_bucket(struct lttng_ust_lfht *ht)
+{
+ struct lttng_ust_lfht_node *node;
+ unsigned long order, i, size;
+
+ /* Check that the table is empty */
+ node = bucket_at(ht, 0);
+ do {
+ node = clear_flag(node)->next;
+ if (!is_bucket(node))
+ return -EPERM;
+ assert(!is_removed(node));
+ assert(!is_removal_owner(node));
+ } while (!is_end(node));
+ /*
+ * size accessed without lttng_ust_rcu_dereference because hash table is
+ * being destroyed.
+ */
+ size = ht->size;
+ /* Internal sanity check: all nodes left should be buckets */
+ for (i = 0; i < size; i++) {
+ node = bucket_at(ht, i);
+ dbg_printf("delete bucket: index %lu expected hash %lu hash %lu\n",
+ i, i, bit_reverse_ulong(node->reverse_hash));
+ assert(is_bucket(node->next));
+ }
+
+ for (order = lttng_ust_lfht_get_count_order_ulong(size); (long)order >= 0; order--)
+ lttng_ust_lfht_free_bucket_table(ht, order);
+
+ return 0;
+}
+
+/*
+ * Should only be called when no more concurrent readers nor writers can
+ * possibly access the table.
+ */
+int lttng_ust_lfht_destroy(struct lttng_ust_lfht *ht)
+{
+ int ret;
+
+ ret = lttng_ust_lfht_delete_bucket(ht);
+ if (ret)
+ return ret;
+ ret = pthread_mutex_destroy(&ht->resize_mutex);
+ if (ret)
+ ret = -EBUSY;
+ poison_free(ht);
+ return ret;
+}
--- /dev/null
+/*
+ * SPDX-License-Identifier: LGPL-2.1-or-later
+ *
+ * Copyright 2011 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ * Copyright 2011 Lai Jiangshan <laijs@cn.fujitsu.com>
+ *
+ * Userspace RCU library - Lock-Free RCU Hash Table
+ */
+
+#ifndef _LTTNG_UST_RCULFHASH_H
+#define _LTTNG_UST_RCULFHASH_H
+
+#include <stdint.h>
+#include <pthread.h>
+#include <urcu/compiler.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+struct lttng_ust_lfht;
+
+/*
+ * lttng_ust_lfht_node: Contains the next pointers and reverse-hash
+ * value required for lookup and traversal of the hash table.
+ *
+ * struct lttng_ust_lfht_node should be aligned on 8-bytes boundaries because
+ * the three lower bits are used as flags. It is worth noting that the
+ * information contained within these three bits could be represented on
+ * two bits by re-using the same bit for REMOVAL_OWNER_FLAG and
+ * BUCKET_FLAG. This can be done if we ensure that no iterator nor
+ * updater check the BUCKET_FLAG after it detects that the REMOVED_FLAG
+ * is set. Given the minimum size of struct lttng_ust_lfht_node is 8 bytes on
+ * 32-bit architectures, we choose to go for simplicity and reserve
+ * three bits.
+ *
+ * struct lttng_ust_lfht_node can be embedded into a structure (as a field).
+ * caa_container_of() can be used to get the structure from the struct
+ * lttng_ust_lfht_node after a lookup.
+ *
+ * The structure which embeds it typically holds the key (or key-value
+ * pair) of the object. The caller code is responsible for calculation
+ * of the hash value for lttng_ust_lfht APIs.
+ */
+struct lttng_ust_lfht_node {
+ struct lttng_ust_lfht_node *next; /* ptr | REMOVAL_OWNER_FLAG | BUCKET_FLAG | REMOVED_FLAG */
+ unsigned long reverse_hash;
+} __attribute__((aligned(8)));
+
+/* lttng_ust_lfht_iter: Used to track state while traversing a hash chain. */
+struct lttng_ust_lfht_iter {
+ struct lttng_ust_lfht_node *node, *next;
+};
+
+static inline
+struct lttng_ust_lfht_node *lttng_ust_lfht_iter_get_node(struct lttng_ust_lfht_iter *iter)
+{
+ return iter->node;
+}
+
+struct rcu_flavor_struct;
+
+/*
+ * Caution !
+ * Ensure reader and writer threads are registered as urcu readers.
+ */
+
+typedef int (*lttng_ust_lfht_match_fct)(struct lttng_ust_lfht_node *node, const void *key);
+
+/*
+ * lttng_ust_lfht_node_init - initialize a hash table node
+ * @node: the node to initialize.
+ *
+ * This function is kept to be eventually used for debugging purposes
+ * (detection of memory corruption).
+ */
+static inline
+void lttng_ust_lfht_node_init(struct lttng_ust_lfht_node *node __attribute__((unused)))
+{
+}
+
+/*
+ * Hash table creation flags.
+ */
+enum {
+ LTTNG_UST_LFHT_AUTO_RESIZE = (1U << 0),
+ LTTNG_UST_LFHT_ACCOUNTING = (1U << 1),
+};
+
+struct lttng_ust_lfht_mm_type {
+ struct lttng_ust_lfht *(*alloc_lttng_ust_lfht)(unsigned long min_nr_alloc_buckets,
+ unsigned long max_nr_buckets);
+ void (*alloc_bucket_table)(struct lttng_ust_lfht *ht, unsigned long order);
+ void (*free_bucket_table)(struct lttng_ust_lfht *ht, unsigned long order);
+ struct lttng_ust_lfht_node *(*bucket_at)(struct lttng_ust_lfht *ht,
+ unsigned long index);
+};
+
+extern const struct lttng_ust_lfht_mm_type lttng_ust_lfht_mm_order
+ __attribute__((visibility("hidden")));
+
+extern const struct lttng_ust_lfht_mm_type lttng_ust_lfht_mm_chunk
+ __attribute__((visibility("hidden")));
+
+extern const struct lttng_ust_lfht_mm_type lttng_ust_lfht_mm_mmap
+ __attribute__((visibility("hidden")));
+
+/*
+ * lttng_ust_lfht_new - allocate a hash table.
+ * @init_size: number of buckets to allocate initially. Must be power of two.
+ * @min_nr_alloc_buckets: the minimum number of allocated buckets.
+ * (must be power of two)
+ * @max_nr_buckets: the maximum number of hash table buckets allowed.
+ * (must be power of two, 0 is accepted, means
+ * "infinite")
+ * @flags: hash table creation flags (can be combined with bitwise or: '|').
+ * 0: no flags.
+ * LTTNG_UST_LFHT_AUTO_RESIZE: automatically resize hash table.
+ * LTTNG_UST_LFHT_ACCOUNTING: count the number of node addition
+ * and removal in the table
+ *
+ * Return NULL on error.
+ * Note: the RCU flavor must be already included before the hash table header.
+ */
+extern struct lttng_ust_lfht *lttng_ust_lfht_new(unsigned long init_size,
+ unsigned long min_nr_alloc_buckets,
+ unsigned long max_nr_buckets,
+ int flags,
+ const struct lttng_ust_lfht_mm_type *mm)
+ __attribute__((visibility("hidden")));
+
+/*
+ * lttng_ust_lfht_destroy - destroy a hash table.
+ * @ht: the hash table to destroy.
+ *
+ * Return 0 on success, negative error value on error.
+
+ * Prior to liburcu 0.10:
+ * - Threads calling this API need to be registered RCU read-side
+ * threads.
+ * - lttng_ust_lfht_destroy should *not* be called from a RCU read-side
+ * critical section. It should *not* be called from a call_rcu thread
+ * context neither.
+ *
+ * Starting from liburcu 0.10, rculfhash implements its own worker
+ * thread to handle resize operations, which removes RCU requirements on
+ * lttng_ust_lfht_destroy.
+ */
+extern int lttng_ust_lfht_destroy(struct lttng_ust_lfht *ht)
+ __attribute__((visibility("hidden")));
+
+/*
+ * lttng_ust_lfht_count_nodes - count the number of nodes in the hash table.
+ * @ht: the hash table.
+ * @split_count_before: sample the node count split-counter before traversal.
+ * @count: traverse the hash table, count the number of nodes observed.
+ * @split_count_after: sample the node count split-counter after traversal.
+ *
+ * Call with rcu_read_lock held.
+ * Threads calling this API need to be registered RCU read-side threads.
+ */
+extern void lttng_ust_lfht_count_nodes(struct lttng_ust_lfht *ht,
+ long *split_count_before,
+ unsigned long *count,
+ long *split_count_after)
+ __attribute__((visibility("hidden")));
+
+/*
+ * lttng_ust_lfht_lookup - lookup a node by key.
+ * @ht: the hash table.
+ * @hash: the key hash.
+ * @match: the key match function.
+ * @key: the current node key.
+ * @iter: node, if found (output). *iter->node set to NULL if not found.
+ *
+ * Call with rcu_read_lock held.
+ * Threads calling this API need to be registered RCU read-side threads.
+ * This function acts as a rcu_dereference() to read the node pointer.
+ */
+extern void lttng_ust_lfht_lookup(struct lttng_ust_lfht *ht, unsigned long hash,
+ lttng_ust_lfht_match_fct match, const void *key,
+ struct lttng_ust_lfht_iter *iter)
+ __attribute__((visibility("hidden")));
+
+/*
+ * lttng_ust_lfht_next_duplicate - get the next item with same key, after iterator.
+ * @ht: the hash table.
+ * @match: the key match function.
+ * @key: the current node key.
+ * @iter: input: current iterator.
+ * output: node, if found. *iter->node set to NULL if not found.
+ *
+ * Uses an iterator initialized by a lookup or traversal. Important: the
+ * iterator _needs_ to be initialized before calling
+ * lttng_ust_lfht_next_duplicate.
+ * Sets *iter-node to the following node with same key.
+ * Sets *iter->node to NULL if no following node exists with same key.
+ * RCU read-side lock must be held across lttng_ust_lfht_lookup and
+ * lttng_ust_lfht_next calls, and also between lttng_ust_lfht_next calls using the
+ * node returned by a previous lttng_ust_lfht_next.
+ * Call with rcu_read_lock held.
+ * Threads calling this API need to be registered RCU read-side threads.
+ * This function acts as a rcu_dereference() to read the node pointer.
+ */
+extern void lttng_ust_lfht_next_duplicate(struct lttng_ust_lfht *ht,
+ lttng_ust_lfht_match_fct match, const void *key,
+ struct lttng_ust_lfht_iter *iter)
+ __attribute__((visibility("hidden")));
+
+/*
+ * lttng_ust_lfht_first - get the first node in the table.
+ * @ht: the hash table.
+ * @iter: First node, if exists (output). *iter->node set to NULL if not found.
+ *
+ * Output in "*iter". *iter->node set to NULL if table is empty.
+ * Call with rcu_read_lock held.
+ * Threads calling this API need to be registered RCU read-side threads.
+ * This function acts as a rcu_dereference() to read the node pointer.
+ */
+extern void lttng_ust_lfht_first(struct lttng_ust_lfht *ht, struct lttng_ust_lfht_iter *iter)
+ __attribute__((visibility("hidden")));
+
+/*
+ * lttng_ust_lfht_next - get the next node in the table.
+ * @ht: the hash table.
+ * @iter: input: current iterator.
+ * output: next node, if exists. *iter->node set to NULL if not found.
+ *
+ * Input/Output in "*iter". *iter->node set to NULL if *iter was
+ * pointing to the last table node.
+ * Call with rcu_read_lock held.
+ * Threads calling this API need to be registered RCU read-side threads.
+ * This function acts as a rcu_dereference() to read the node pointer.
+ */
+extern void lttng_ust_lfht_next(struct lttng_ust_lfht *ht, struct lttng_ust_lfht_iter *iter)
+ __attribute__((visibility("hidden")));
+
+/*
+ * lttng_ust_lfht_add - add a node to the hash table.
+ * @ht: the hash table.
+ * @hash: the key hash.
+ * @node: the node to add.
+ *
+ * This function supports adding redundant keys into the table.
+ * Call with rcu_read_lock held.
+ * Threads calling this API need to be registered RCU read-side threads.
+ * This function issues a full memory barrier before and after its
+ * atomic commit.
+ */
+extern void lttng_ust_lfht_add(struct lttng_ust_lfht *ht, unsigned long hash,
+ struct lttng_ust_lfht_node *node)
+ __attribute__((visibility("hidden")));
+
+/*
+ * lttng_ust_lfht_add_unique - add a node to hash table, if key is not present.
+ * @ht: the hash table.
+ * @hash: the node's hash.
+ * @match: the key match function.
+ * @key: the node's key.
+ * @node: the node to try adding.
+ *
+ * Return the node added upon success.
+ * Return the unique node already present upon failure. If
+ * lttng_ust_lfht_add_unique fails, the node passed as parameter should be
+ * freed by the caller. In this case, the caller does NOT need to wait
+ * for a grace period before freeing or re-using the node.
+ * Call with rcu_read_lock held.
+ * Threads calling this API need to be registered RCU read-side threads.
+ *
+ * The semantic of this function is that if only this function is used
+ * to add keys into the table, no duplicated keys should ever be
+ * observable in the table. The same guarantee apply for combination of
+ * add_unique and add_replace (see below).
+ *
+ * Upon success, this function issues a full memory barrier before and
+ * after its atomic commit. Upon failure, this function acts like a
+ * simple lookup operation: it acts as a rcu_dereference() to read the
+ * node pointer. The failure case does not guarantee any other memory
+ * barrier.
+ */
+extern struct lttng_ust_lfht_node *lttng_ust_lfht_add_unique(struct lttng_ust_lfht *ht,
+ unsigned long hash,
+ lttng_ust_lfht_match_fct match,
+ const void *key,
+ struct lttng_ust_lfht_node *node)
+ __attribute__((visibility("hidden")));
+
+/*
+ * lttng_ust_lfht_add_replace - replace or add a node within hash table.
+ * @ht: the hash table.
+ * @hash: the node's hash.
+ * @match: the key match function.
+ * @key: the node's key.
+ * @node: the node to add.
+ *
+ * Return the node replaced upon success. If no node matching the key
+ * was present, return NULL, which also means the operation succeeded.
+ * This replacement operation should never fail.
+ * Call with rcu_read_lock held.
+ * Threads calling this API need to be registered RCU read-side threads.
+ * After successful replacement, a grace period must be waited for before
+ * freeing or re-using the memory reserved for the returned node.
+ *
+ * The semantic of replacement vs lookups and traversals is the
+ * following: if lookups and traversals are performed between a key
+ * unique insertion and its removal, we guarantee that the lookups and
+ * traversals will always find exactly one instance of the key if it is
+ * replaced concurrently with the lookups.
+ *
+ * Providing this semantic allows us to ensure that replacement-only
+ * schemes will never generate duplicated keys. It also allows us to
+ * guarantee that a combination of add_replace and add_unique updates
+ * will never generate duplicated keys.
+ *
+ * This function issues a full memory barrier before and after its
+ * atomic commit.
+ */
+extern struct lttng_ust_lfht_node *lttng_ust_lfht_add_replace(struct lttng_ust_lfht *ht,
+ unsigned long hash,
+ lttng_ust_lfht_match_fct match,
+ const void *key,
+ struct lttng_ust_lfht_node *node)
+ __attribute__((visibility("hidden")));
+
+/*
+ * lttng_ust_lfht_replace - replace a node pointed to by iter within hash table.
+ * @ht: the hash table.
+ * @old_iter: the iterator position of the node to replace.
+ * @hash: the node's hash.
+ * @match: the key match function.
+ * @key: the node's key.
+ * @new_node: the new node to use as replacement.
+ *
+ * Return 0 if replacement is successful, negative value otherwise.
+ * Replacing a NULL old node or an already removed node will fail with
+ * -ENOENT.
+ * If the hash or value of the node to replace and the new node differ,
+ * this function returns -EINVAL without proceeding to the replacement.
+ * Old node can be looked up with lttng_ust_lfht_lookup and lttng_ust_lfht_next.
+ * RCU read-side lock must be held between lookup and replacement.
+ * Call with rcu_read_lock held.
+ * Threads calling this API need to be registered RCU read-side threads.
+ * After successful replacement, a grace period must be waited for before
+ * freeing or re-using the memory reserved for the old node (which can
+ * be accessed with lttng_ust_lfht_iter_get_node).
+ *
+ * The semantic of replacement vs lookups is the same as
+ * lttng_ust_lfht_add_replace().
+ *
+ * Upon success, this function issues a full memory barrier before and
+ * after its atomic commit. Upon failure, this function does not issue
+ * any memory barrier.
+ */
+extern int lttng_ust_lfht_replace(struct lttng_ust_lfht *ht,
+ struct lttng_ust_lfht_iter *old_iter,
+ unsigned long hash,
+ lttng_ust_lfht_match_fct match,
+ const void *key,
+ struct lttng_ust_lfht_node *new_node)
+ __attribute__((visibility("hidden")));
+
+/*
+ * lttng_ust_lfht_del - remove node pointed to by iterator from hash table.
+ * @ht: the hash table.
+ * @node: the node to delete.
+ *
+ * Return 0 if the node is successfully removed, negative value
+ * otherwise.
+ * Deleting a NULL node or an already removed node will fail with a
+ * negative value.
+ * Node can be looked up with lttng_ust_lfht_lookup and lttng_ust_lfht_next,
+ * followed by use of lttng_ust_lfht_iter_get_node.
+ * RCU read-side lock must be held between lookup and removal.
+ * Call with rcu_read_lock held.
+ * Threads calling this API need to be registered RCU read-side threads.
+ * After successful removal, a grace period must be waited for before
+ * freeing or re-using the memory reserved for old node (which can be
+ * accessed with lttng_ust_lfht_iter_get_node).
+ * Upon success, this function issues a full memory barrier before and
+ * after its atomic commit. Upon failure, this function does not issue
+ * any memory barrier.
+ */
+extern int lttng_ust_lfht_del(struct lttng_ust_lfht *ht, struct lttng_ust_lfht_node *node)
+ __attribute__((visibility("hidden")));
+
+/*
+ * lttng_ust_lfht_is_node_deleted - query whether a node is removed from hash table.
+ *
+ * Return non-zero if the node is deleted from the hash table, 0
+ * otherwise.
+ * Node can be looked up with lttng_ust_lfht_lookup and lttng_ust_lfht_next,
+ * followed by use of lttng_ust_lfht_iter_get_node.
+ * RCU read-side lock must be held between lookup and call to this
+ * function.
+ * Call with rcu_read_lock held.
+ * Threads calling this API need to be registered RCU read-side threads.
+ * This function does not issue any memory barrier.
+ */
+extern int lttng_ust_lfht_is_node_deleted(const struct lttng_ust_lfht_node *node)
+ __attribute__((visibility("hidden")));
+
+/*
+ * lttng_ust_lfht_resize - Force a hash table resize
+ * @ht: the hash table.
+ * @new_size: update to this hash table size.
+ *
+ * Threads calling this API need to be registered RCU read-side threads.
+ * This function does not (necessarily) issue memory barriers.
+ * lttng_ust_lfht_resize should *not* be called from a RCU read-side critical
+ * section.
+ */
+extern void lttng_ust_lfht_resize(struct lttng_ust_lfht *ht, unsigned long new_size)
+ __attribute__((visibility("hidden")));
+
+/*
+ * Note: it is safe to perform element removal (del), replacement, or
+ * any hash table update operation during any of the following hash
+ * table traversals.
+ * These functions act as rcu_dereference() to read the node pointers.
+ */
+#define lttng_ust_lfht_for_each(ht, iter, node) \
+ for (lttng_ust_lfht_first(ht, iter), \
+ node = lttng_ust_lfht_iter_get_node(iter); \
+ node != NULL; \
+ lttng_ust_lfht_next(ht, iter), \
+ node = lttng_ust_lfht_iter_get_node(iter))
+
+#define lttng_ust_lfht_for_each_duplicate(ht, hash, match, key, iter, node) \
+ for (lttng_ust_lfht_lookup(ht, hash, match, key, iter), \
+ node = lttng_ust_lfht_iter_get_node(iter); \
+ node != NULL; \
+ lttng_ust_lfht_next_duplicate(ht, match, key, iter), \
+ node = lttng_ust_lfht_iter_get_node(iter))
+
+#define lttng_ust_lfht_for_each_entry(ht, iter, pos, member) \
+ for (lttng_ust_lfht_first(ht, iter), \
+ pos = caa_container_of(lttng_ust_lfht_iter_get_node(iter), \
+ __typeof__(*(pos)), member); \
+ lttng_ust_lfht_iter_get_node(iter) != NULL; \
+ lttng_ust_lfht_next(ht, iter), \
+ pos = caa_container_of(lttng_ust_lfht_iter_get_node(iter), \
+ __typeof__(*(pos)), member))
+
+#define lttng_ust_lfht_for_each_entry_duplicate(ht, hash, match, key, \
+ iter, pos, member) \
+ for (lttng_ust_lfht_lookup(ht, hash, match, key, iter), \
+ pos = caa_container_of(lttng_ust_lfht_iter_get_node(iter), \
+ __typeof__(*(pos)), member); \
+ lttng_ust_lfht_iter_get_node(iter) != NULL; \
+ lttng_ust_lfht_next_duplicate(ht, match, key, iter), \
+ pos = caa_container_of(lttng_ust_lfht_iter_get_node(iter), \
+ __typeof__(*(pos)), member))
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _LTTNG_UST_RCULFHASH_H */
--- /dev/null
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright (C) 2017 Philippe Proulx <pproulx@efficios.com>
+ */
+
+#define _LGPL_SOURCE
+#include <stdlib.h>
+#include <string.h>
+#include <stdbool.h>
+#include <assert.h>
+
+#include "string-utils.h"
+
+enum star_glob_pattern_type_flags {
+ STAR_GLOB_PATTERN_TYPE_FLAG_NONE = 0,
+ STAR_GLOB_PATTERN_TYPE_FLAG_PATTERN = 1,
+ STAR_GLOB_PATTERN_TYPE_FLAG_END_ONLY = 2,
+};
+
+static
+enum star_glob_pattern_type_flags strutils_test_glob_pattern(const char *pattern)
+{
+ enum star_glob_pattern_type_flags ret =
+ STAR_GLOB_PATTERN_TYPE_FLAG_NONE;
+ const char *p;
+
+ assert(pattern);
+
+ for (p = pattern; *p != '\0'; p++) {
+ switch (*p) {
+ case '*':
+ ret = STAR_GLOB_PATTERN_TYPE_FLAG_PATTERN;
+
+ if (p[1] == '\0') {
+ ret |= STAR_GLOB_PATTERN_TYPE_FLAG_END_ONLY;
+ }
+
+ goto end;
+ case '\\':
+ p++;
+
+ if (*p == '\0') {
+ goto end;
+ }
+ break;
+ default:
+ break;
+ }
+ }
+
+end:
+ return ret;
+}
+
+/*
+ * Returns true if `pattern` is a star-only globbing pattern, that is,
+ * it contains at least one non-escaped `*`.
+ */
+bool strutils_is_star_glob_pattern(const char *pattern)
+{
+ return strutils_test_glob_pattern(pattern) &
+ STAR_GLOB_PATTERN_TYPE_FLAG_PATTERN;
+}
+
+/*
+ * Returns true if `pattern` is a globbing pattern with a globbing,
+ * non-escaped star only at its very end.
+ */
+bool strutils_is_star_at_the_end_only_glob_pattern(const char *pattern)
+{
+ return strutils_test_glob_pattern(pattern) &
+ STAR_GLOB_PATTERN_TYPE_FLAG_END_ONLY;
+}
+
+static inline
+bool at_end_of_pattern(const char *p, const char *pattern, size_t pattern_len)
+{
+ return (p - pattern) == pattern_len || *p == '\0';
+}
+
+/*
+ * Globbing matching function with the star feature only (`?` and
+ * character sets are not supported). This matches `candidate` (plain
+ * string) against `pattern`. A literal star can be escaped with `\` in
+ * `pattern`.
+ *
+ * `pattern_len` or `candidate_len` can be greater than the actual
+ * string length of `pattern` or `candidate` if the string is
+ * null-terminated.
+ */
+bool strutils_star_glob_match(const char *pattern, size_t pattern_len,
+ const char *candidate, size_t candidate_len) {
+ const char *retry_c = candidate, *retry_p = pattern, *c, *p;
+ bool got_a_star = false;
+
+retry:
+ c = retry_c;
+ p = retry_p;
+
+ /*
+ * The concept here is to retry a match in the specific case
+ * where we already got a star. The retry position for the
+ * pattern is just after the most recent star, and the retry
+ * position for the candidate is the character following the
+ * last try's first character.
+ *
+ * Example:
+ *
+ * candidate: hi ev every onyx one
+ * ^
+ * pattern: hi*every*one
+ * ^
+ *
+ * candidate: hi ev every onyx one
+ * ^
+ * pattern: hi*every*one
+ * ^
+ *
+ * candidate: hi ev every onyx one
+ * ^
+ * pattern: hi*every*one
+ * ^
+ *
+ * candidate: hi ev every onyx one
+ * ^
+ * pattern: hi*every*one
+ * ^ MISMATCH
+ *
+ * candidate: hi ev every onyx one
+ * ^
+ * pattern: hi*every*one
+ * ^
+ *
+ * candidate: hi ev every onyx one
+ * ^^
+ * pattern: hi*every*one
+ * ^^
+ *
+ * candidate: hi ev every onyx one
+ * ^ ^
+ * pattern: hi*every*one
+ * ^ ^ MISMATCH
+ *
+ * candidate: hi ev every onyx one
+ * ^
+ * pattern: hi*every*one
+ * ^ MISMATCH
+ *
+ * candidate: hi ev every onyx one
+ * ^
+ * pattern: hi*every*one
+ * ^ MISMATCH
+ *
+ * candidate: hi ev every onyx one
+ * ^
+ * pattern: hi*every*one
+ * ^
+ *
+ * candidate: hi ev every onyx one
+ * ^^
+ * pattern: hi*every*one
+ * ^^
+ *
+ * candidate: hi ev every onyx one
+ * ^ ^
+ * pattern: hi*every*one
+ * ^ ^
+ *
+ * candidate: hi ev every onyx one
+ * ^ ^
+ * pattern: hi*every*one
+ * ^ ^
+ *
+ * candidate: hi ev every onyx one
+ * ^ ^
+ * pattern: hi*every*one
+ * ^ ^
+ *
+ * candidate: hi ev every onyx one
+ * ^
+ * pattern: hi*every*one
+ * ^
+ *
+ * candidate: hi ev every onyx one
+ * ^
+ * pattern: hi*every*one
+ * ^ MISMATCH
+ *
+ * candidate: hi ev every onyx one
+ * ^
+ * pattern: hi*every*one
+ * ^
+ *
+ * candidate: hi ev every onyx one
+ * ^^
+ * pattern: hi*every*one
+ * ^^
+ *
+ * candidate: hi ev every onyx one
+ * ^ ^
+ * pattern: hi*every*one
+ * ^ ^ MISMATCH
+ *
+ * candidate: hi ev every onyx one
+ * ^
+ * pattern: hi*every*one
+ * ^ MISMATCH
+ *
+ * candidate: hi ev every onyx one
+ * ^
+ * pattern: hi*every*one
+ * ^ MISMATCH
+ *
+ * candidate: hi ev every onyx one
+ * ^
+ * pattern: hi*every*one
+ * ^ MISMATCH
+ *
+ * candidate: hi ev every onyx one
+ * ^
+ * pattern: hi*every*one
+ * ^ MISMATCH
+ *
+ * candidate: hi ev every onyx one
+ * ^
+ * pattern: hi*every*one
+ * ^
+ *
+ * candidate: hi ev every onyx one
+ * ^^
+ * pattern: hi*every*one
+ * ^^
+ *
+ * candidate: hi ev every onyx one
+ * ^ ^
+ * pattern: hi*every*one
+ * ^ ^
+ *
+ * candidate: hi ev every onyx one
+ * ^ ^
+ * pattern: hi*every*one
+ * ^ ^ SUCCESS
+ */
+ while ((c - candidate) < candidate_len && *c != '\0') {
+ assert(*c);
+
+ if (at_end_of_pattern(p, pattern, pattern_len)) {
+ goto end_of_pattern;
+ }
+
+ switch (*p) {
+ case '*':
+ got_a_star = true;
+
+ /*
+ * Our first try starts at the current candidate
+ * character and after the star in the pattern.
+ */
+ retry_c = c;
+ retry_p = p + 1;
+
+ if (at_end_of_pattern(retry_p, pattern, pattern_len)) {
+ /*
+ * Star at the end of the pattern at
+ * this point: automatic match.
+ */
+ return true;
+ }
+
+ goto retry;
+ case '\\':
+ /* Go to escaped character. */
+ p++; /* Fallthrough */
+
+ /*
+ * Fall through the default case which will
+ * compare the escaped character now.
+ */
+ default:
+ if (at_end_of_pattern(p, pattern, pattern_len) ||
+ *c != *p) {
+end_of_pattern:
+ /* Character mismatch OR end of pattern. */
+ if (!got_a_star) {
+ /*
+ * We didn't get any star yet,
+ * so this first mismatch
+ * automatically makes the whole
+ * test fail.
+ */
+ return false;
+ }
+
+ /*
+ * Next try: next candidate character,
+ * original pattern character (following
+ * the most recent star).
+ */
+ retry_c++;
+ goto retry;
+ }
+ break;
+ }
+
+ /* Next pattern and candidate characters. */
+ c++;
+ p++;
+ }
+
+ /*
+ * We checked every candidate character and we're still in a
+ * success state: the only pattern character allowed to remain
+ * is a star.
+ */
+ if (at_end_of_pattern(p, pattern, pattern_len)) {
+ return true;
+ }
+
+ p++;
+ return p[-1] == '*' && at_end_of_pattern(p, pattern, pattern_len);
+}
--- /dev/null
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright (C) 2017 Philippe Proulx <pproulx@efficios.com>
+ */
+
+#ifndef _STRING_UTILS_H
+#define _STRING_UTILS_H
+
+#include <stdbool.h>
+#include <stddef.h>
+
+bool strutils_is_star_glob_pattern(const char *pattern)
+ __attribute__((visibility("hidden")));
+
+bool strutils_is_star_at_the_end_only_glob_pattern(const char *pattern)
+ __attribute__((visibility("hidden")));
+
+bool strutils_star_glob_match(const char *pattern, size_t pattern_len,
+ const char *candidate, size_t candidate_len)
+ __attribute__((visibility("hidden")));
+
+#endif /* _STRING_UTILS_H */
--- /dev/null
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright (C) 2013-2014 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ */
+
+#define _LGPL_SOURCE
+#include <stdio.h>
+#include "common/macros.h"
+
+#define TRACEPOINT_CREATE_PROBES
+#define TRACEPOINT_DEFINE
+#include "lttng-ust-tracef-provider.h"
+
+static inline
+void __lttng_ust_vtracef(const char *fmt, va_list ap)
+ __attribute__((always_inline, format(printf, 1, 0)));
+static inline
+void __lttng_ust_vtracef(const char *fmt, va_list ap)
+{
+ char *msg;
+ const int len = vasprintf(&msg, fmt, ap);
+
+ /* len does not include the final \0 */
+ if (len < 0)
+ goto end;
+ __tracepoint_cb_lttng_ust_tracef___event(msg, len,
+ LTTNG_UST_CALLER_IP());
+ free(msg);
+end:
+ return;
+}
+
+/*
+ * FIXME: We should include <lttng/tracef.h> for the declarations here, but it
+ * fails with tracepoint magic above my paygrade.
+ */
+
+void _lttng_ust_vtracef(const char *fmt, va_list ap)
+ __attribute__((format(printf, 1, 0)));
+void _lttng_ust_vtracef(const char *fmt, va_list ap)
+{
+ __lttng_ust_vtracef(fmt, ap);
+}
+
+void _lttng_ust_tracef(const char *fmt, ...)
+ __attribute__((format(printf, 1, 2)));
+void _lttng_ust_tracef(const char *fmt, ...)
+{
+ va_list ap;
+
+ va_start(ap, fmt);
+ __lttng_ust_vtracef(fmt, ap);
+ va_end(ap);
+}
--- /dev/null
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright (C) 2013-2014 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ */
+
+#define _LGPL_SOURCE
+#include <stdio.h>
+#include "common/macros.h"
+
+#define TRACEPOINT_CREATE_PROBES
+#define TRACEPOINT_DEFINE
+#include "lttng-ust-tracelog-provider.h"
+
+#define TRACELOG_CB(level) \
+ static inline \
+ void __lttng_ust_vtracelog_##level(const char *file, \
+ int line, const char *func, \
+ const char *fmt, va_list ap) \
+ __attribute__((always_inline, format(printf, 4, 0))); \
+ \
+ static inline \
+ void __lttng_ust_vtracelog_##level(const char *file, \
+ int line, const char *func, \
+ const char *fmt, va_list ap) \
+ { \
+ char *msg; \
+ const int len = vasprintf(&msg, fmt, ap); \
+ \
+ /* len does not include the final \0 */ \
+ if (len < 0) \
+ goto end; \
+ __tracepoint_cb_lttng_ust_tracelog___##level(file, \
+ line, func, msg, len, \
+ LTTNG_UST_CALLER_IP()); \
+ free(msg); \
+ end: \
+ return; \
+ } \
+ \
+ void _lttng_ust_vtracelog_##level(const char *file, \
+ int line, const char *func, \
+ const char *fmt, va_list ap) \
+ __attribute__ ((format(printf, 4, 0))); \
+ \
+ void _lttng_ust_vtracelog_##level(const char *file, \
+ int line, const char *func, \
+ const char *fmt, va_list ap); \
+ void _lttng_ust_vtracelog_##level(const char *file, \
+ int line, const char *func, \
+ const char *fmt, va_list ap) \
+ { \
+ __lttng_ust_vtracelog_##level(file, line, func, fmt, ap); \
+ } \
+ \
+ void _lttng_ust_tracelog_##level(const char *file, \
+ int line, const char *func, \
+ const char *fmt, ...) \
+ __attribute__ ((format(printf, 4, 5))); \
+ \
+ void _lttng_ust_tracelog_##level(const char *file, \
+ int line, const char *func, \
+ const char *fmt, ...); \
+ void _lttng_ust_tracelog_##level(const char *file, \
+ int line, const char *func, \
+ const char *fmt, ...) \
+ { \
+ va_list ap; \
+ \
+ va_start(ap, fmt); \
+ __lttng_ust_vtracelog_##level(file, line, func, fmt, ap); \
+ va_end(ap); \
+ }
+
+TRACELOG_CB(TRACE_EMERG)
+TRACELOG_CB(TRACE_ALERT)
+TRACELOG_CB(TRACE_CRIT)
+TRACELOG_CB(TRACE_ERR)
+TRACELOG_CB(TRACE_WARNING)
+TRACELOG_CB(TRACE_NOTICE)
+TRACELOG_CB(TRACE_INFO)
+TRACELOG_CB(TRACE_DEBUG_SYSTEM)
+TRACELOG_CB(TRACE_DEBUG_PROGRAM)
+TRACELOG_CB(TRACE_DEBUG_PROCESS)
+TRACELOG_CB(TRACE_DEBUG_MODULE)
+TRACELOG_CB(TRACE_DEBUG_UNIT)
+TRACELOG_CB(TRACE_DEBUG_FUNCTION)
+TRACELOG_CB(TRACE_DEBUG_LINE)
+TRACELOG_CB(TRACE_DEBUG)
--- /dev/null
+/*
+ * SPDX-License-Identifier: LGPL-2.1-only
+ *
+ * Copyright (C) 2011 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ */
+
+#ifndef _LTTNG_TRACEPOINT_INTERNAL_H
+#define _LTTNG_TRACEPOINT_INTERNAL_H
+
+#include <urcu/list.h>
+#include <lttng/tracepoint-types.h>
+#include <lttng/ust-events.h>
+
+#define TRACE_DEFAULT TRACE_DEBUG_LINE
+
+struct tracepoint_lib {
+ struct cds_list_head list; /* list of registered libs */
+ struct lttng_ust_tracepoint * const *tracepoints_start;
+ int tracepoints_count;
+ struct cds_list_head callsites;
+};
+
+int tracepoint_probe_register_noupdate(const char *name,
+ void (*callback)(void), void *priv,
+ const char *signature)
+ __attribute__((visibility("hidden")));
+
+int tracepoint_probe_unregister_noupdate(const char *name,
+ void (*callback)(void), void *priv)
+ __attribute__((visibility("hidden")));
+
+void tracepoint_probe_update_all(void)
+ __attribute__((visibility("hidden")));
+
+
+void *lttng_ust_tp_check_weak_hidden1(void)
+ __attribute__((visibility("hidden")));
+
+void *lttng_ust_tp_check_weak_hidden2(void)
+ __attribute__((visibility("hidden")));
+
+void *lttng_ust_tp_check_weak_hidden3(void)
+ __attribute__((visibility("hidden")));
+
+/*
+ * These symbols are ABI between liblttng-ust-tracepoint and liblttng-ust,
+ * which is why they are not hidden and not part of the public API.
+ */
+int lttng_ust_tp_probe_register_queue_release(const char *name,
+ void (*func)(void), void *data, const char *signature);
+int lttng_ust_tp_probe_unregister_queue_release(const char *name,
+ void (*func)(void), void *data);
+void lttng_ust_tp_probe_prune_release_queue(void);
+
+void lttng_ust_tp_init(void);
+void lttng_ust_tp_exit(void);
+
+
+#endif /* _LTTNG_TRACEPOINT_INTERNAL_H */
--- /dev/null
+/*
+ * SPDX-License-Identifier: LGPL-2.1-only
+ *
+ * Copyright (C) 2016 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ */
+
+#include "tracepoint-internal.h"
+
+/* Test compiler support for weak symbols with hidden visibility. */
+int __tracepoint_test_symbol1 __attribute__((weak, visibility("hidden")));
+void *__tracepoint_test_symbol2 __attribute__((weak, visibility("hidden")));
+struct {
+ char a[24];
+} __tracepoint_test_symbol3 __attribute__((weak, visibility("hidden")));
+
+void *lttng_ust_tp_check_weak_hidden1(void)
+{
+ return &__tracepoint_test_symbol1;
+}
+
+void *lttng_ust_tp_check_weak_hidden2(void)
+{
+ return &__tracepoint_test_symbol2;
+}
+
+void *lttng_ust_tp_check_weak_hidden3(void)
+{
+ return &__tracepoint_test_symbol3;
+}
--- /dev/null
+/*
+ * SPDX-License-Identifier: LGPL-2.1-only
+ *
+ * Copyright (C) 2008-2011 Mathieu Desnoyers
+ * Copyright (C) 2009 Pierre-Marc Fournier
+ *
+ * Ported to userspace by Pierre-Marc Fournier.
+ */
+
+#define _LGPL_SOURCE
+#include <errno.h>
+#include <stdint.h>
+#include <stddef.h>
+#include <stdio.h>
+
+#include <urcu/arch.h>
+#include <lttng/urcu/urcu-ust.h>
+#include <urcu/hlist.h>
+#include <urcu/uatomic.h>
+#include <urcu/compiler.h>
+#include <urcu/system.h>
+
+#include <lttng/tracepoint.h>
+#include <lttng/ust-abi.h> /* for LTTNG_UST_ABI_SYM_NAME_LEN */
+
+#include "common/logging.h"
+#include "common/macros.h"
+
+#include "tracepoint-internal.h"
+#include "lttng-tracer-core.h"
+#include "jhash.h"
+#include "error.h"
+
+/* Test compiler support for weak symbols with hidden visibility. */
+int __tracepoint_test_symbol1 __attribute__((weak, visibility("hidden")));
+void *__tracepoint_test_symbol2 __attribute__((weak, visibility("hidden")));
+struct {
+ char a[24];
+} __tracepoint_test_symbol3 __attribute__((weak, visibility("hidden")));
+
+/* Set to 1 to enable tracepoint debug output */
+static const int tracepoint_debug;
+static int initialized;
+
+/*
+ * If tracepoint_destructors_state = 1, tracepoint destructors are
+ * enabled. They are disabled otherwise.
+ */
+static int tracepoint_destructors_state = 1;
+
+static void (*new_tracepoint_cb)(struct lttng_ust_tracepoint *);
+
+/*
+ * tracepoint_mutex nests inside UST mutex.
+ *
+ * Note about interaction with fork/clone: UST does not hold the
+ * tracepoint mutex across fork/clone because it is either:
+ * - nested within UST mutex, in which case holding the UST mutex across
+ * fork/clone suffice,
+ * - taken by a library constructor, which should never race with a
+ * fork/clone if the application is expected to continue running with
+ * the same memory layout (no following exec()).
+ */
+static pthread_mutex_t tracepoint_mutex = PTHREAD_MUTEX_INITIALIZER;
+
+/*
+ * libraries that contain tracepoints (struct tracepoint_lib).
+ * Protected by tracepoint mutex.
+ */
+static CDS_LIST_HEAD(libs);
+
+/*
+ * The tracepoint mutex protects the library tracepoints, the hash table, and
+ * the library list.
+ * All calls to the tracepoint API must be protected by the tracepoint mutex,
+ * excepts calls to tracepoint_register_lib and
+ * tracepoint_unregister_lib, which take the tracepoint mutex themselves.
+ */
+
+/*
+ * Tracepoint hash table, containing the active tracepoints.
+ * Protected by tracepoint mutex.
+ */
+#define TRACEPOINT_HASH_BITS 12
+#define TRACEPOINT_TABLE_SIZE (1 << TRACEPOINT_HASH_BITS)
+static struct cds_hlist_head tracepoint_table[TRACEPOINT_TABLE_SIZE];
+
+static CDS_LIST_HEAD(old_probes);
+static int need_update;
+
+static CDS_LIST_HEAD(release_queue);
+static int release_queue_need_update;
+
+/*
+ * Note about RCU :
+ * It is used to to delay the free of multiple probes array until a quiescent
+ * state is reached.
+ * Tracepoint entries modifications are protected by the tracepoint mutex.
+ */
+struct tracepoint_entry {
+ struct cds_hlist_node hlist;
+ struct lttng_ust_tracepoint_probe *probes;
+ int refcount; /* Number of times armed. 0 if disarmed. */
+ int callsite_refcount; /* how many libs use this tracepoint */
+ char *signature;
+ char *name;
+};
+
+struct tp_probes {
+ union {
+ struct cds_list_head list;
+ /* Field below only used for call_rcu scheme */
+ /* struct rcu_head head; */
+ } u;
+ struct lttng_ust_tracepoint_probe probes[0];
+};
+
+/*
+ * Callsite hash table, containing the tracepoint call sites.
+ * Protected by tracepoint mutex.
+ */
+#define CALLSITE_HASH_BITS 12
+#define CALLSITE_TABLE_SIZE (1 << CALLSITE_HASH_BITS)
+static struct cds_hlist_head callsite_table[CALLSITE_TABLE_SIZE];
+
+struct callsite_entry {
+ struct cds_hlist_node hlist; /* hash table node */
+ struct cds_list_head node; /* lib list of callsites node */
+ struct lttng_ust_tracepoint *tp;
+ bool tp_entry_callsite_ref; /* Has a tp_entry took a ref on this callsite */
+};
+
+/* coverity[+alloc] */
+static void *allocate_probes(int count)
+{
+ struct tp_probes *p =
+ zmalloc(count * sizeof(struct lttng_ust_tracepoint_probe)
+ + sizeof(struct tp_probes));
+ return p == NULL ? NULL : p->probes;
+}
+
+/* coverity[+free : arg-0] */
+static void release_probes(void *old)
+{
+ if (old) {
+ struct tp_probes *tp_probes = caa_container_of(old,
+ struct tp_probes, probes[0]);
+ lttng_ust_urcu_synchronize_rcu();
+ free(tp_probes);
+ }
+}
+
+static void debug_print_probes(struct tracepoint_entry *entry)
+{
+ int i;
+
+ if (!tracepoint_debug || !entry->probes)
+ return;
+
+ for (i = 0; entry->probes[i].func; i++)
+ DBG("Probe %d : %p", i, entry->probes[i].func);
+}
+
+static void *
+tracepoint_entry_add_probe(struct tracepoint_entry *entry,
+ void (*probe)(void), void *data)
+{
+ int nr_probes = 0;
+ struct lttng_ust_tracepoint_probe *old, *new;
+
+ if (!probe) {
+ WARN_ON(1);
+ return ERR_PTR(-EINVAL);
+ }
+ debug_print_probes(entry);
+ old = entry->probes;
+ if (old) {
+ /* (N -> N+1), (N != 0, 1) probes */
+ for (nr_probes = 0; old[nr_probes].func; nr_probes++)
+ if (old[nr_probes].func == probe &&
+ old[nr_probes].data == data)
+ return ERR_PTR(-EEXIST);
+ }
+ /* + 2 : one for new probe, one for NULL func */
+ new = allocate_probes(nr_probes + 2);
+ if (new == NULL)
+ return ERR_PTR(-ENOMEM);
+ if (old)
+ memcpy(new, old,
+ nr_probes * sizeof(struct lttng_ust_tracepoint_probe));
+ new[nr_probes].func = probe;
+ new[nr_probes].data = data;
+ new[nr_probes + 1].func = NULL;
+ entry->refcount = nr_probes + 1;
+ entry->probes = new;
+ debug_print_probes(entry);
+ return old;
+}
+
+static void *
+tracepoint_entry_remove_probe(struct tracepoint_entry *entry,
+ void (*probe)(void), void *data)
+{
+ int nr_probes = 0, nr_del = 0, i;
+ struct lttng_ust_tracepoint_probe *old, *new;
+
+ old = entry->probes;
+
+ if (!old)
+ return ERR_PTR(-ENOENT);
+
+ debug_print_probes(entry);
+ /* (N -> M), (N > 1, M >= 0) probes */
+ if (probe) {
+ for (nr_probes = 0; old[nr_probes].func; nr_probes++) {
+ if (old[nr_probes].func == probe &&
+ old[nr_probes].data == data)
+ nr_del++;
+ }
+ }
+
+ if (nr_probes - nr_del == 0) {
+ /* N -> 0, (N > 1) */
+ entry->probes = NULL;
+ entry->refcount = 0;
+ debug_print_probes(entry);
+ return old;
+ } else {
+ int j = 0;
+ /* N -> M, (N > 1, M > 0) */
+ /* + 1 for NULL */
+ new = allocate_probes(nr_probes - nr_del + 1);
+ if (new == NULL)
+ return ERR_PTR(-ENOMEM);
+ for (i = 0; old[i].func; i++)
+ if (old[i].func != probe || old[i].data != data)
+ new[j++] = old[i];
+ new[nr_probes - nr_del].func = NULL;
+ entry->refcount = nr_probes - nr_del;
+ entry->probes = new;
+ }
+ debug_print_probes(entry);
+ return old;
+}
+
+/*
+ * Get tracepoint if the tracepoint is present in the tracepoint hash table.
+ * Must be called with tracepoint mutex held.
+ * Returns NULL if not present.
+ */
+static struct tracepoint_entry *get_tracepoint(const char *name)
+{
+ struct cds_hlist_head *head;
+ struct cds_hlist_node *node;
+ struct tracepoint_entry *e;
+ size_t name_len = strlen(name);
+ uint32_t hash;
+
+ if (name_len > LTTNG_UST_ABI_SYM_NAME_LEN - 1) {
+ WARN("Truncating tracepoint name %s which exceeds size limits of %u chars", name, LTTNG_UST_ABI_SYM_NAME_LEN - 1);
+ name_len = LTTNG_UST_ABI_SYM_NAME_LEN - 1;
+ }
+ hash = jhash(name, name_len, 0);
+ head = &tracepoint_table[hash & (TRACEPOINT_TABLE_SIZE - 1)];
+ cds_hlist_for_each_entry(e, node, head, hlist) {
+ if (!strncmp(name, e->name, LTTNG_UST_ABI_SYM_NAME_LEN - 1))
+ return e;
+ }
+ return NULL;
+}
+
+/*
+ * Add the tracepoint to the tracepoint hash table. Must be called with
+ * tracepoint mutex held.
+ */
+static struct tracepoint_entry *add_tracepoint(const char *name,
+ const char *signature)
+{
+ struct cds_hlist_head *head;
+ struct cds_hlist_node *node;
+ struct tracepoint_entry *e;
+ size_t name_len = strlen(name);
+ size_t sig_len = strlen(signature);
+ size_t sig_off, name_off;
+ uint32_t hash;
+
+ if (name_len > LTTNG_UST_ABI_SYM_NAME_LEN - 1) {
+ WARN("Truncating tracepoint name %s which exceeds size limits of %u chars", name, LTTNG_UST_ABI_SYM_NAME_LEN - 1);
+ name_len = LTTNG_UST_ABI_SYM_NAME_LEN - 1;
+ }
+ hash = jhash(name, name_len, 0);
+ head = &tracepoint_table[hash & (TRACEPOINT_TABLE_SIZE - 1)];
+ cds_hlist_for_each_entry(e, node, head, hlist) {
+ if (!strncmp(name, e->name, LTTNG_UST_ABI_SYM_NAME_LEN - 1)) {
+ DBG("tracepoint %s busy", name);
+ return ERR_PTR(-EEXIST); /* Already there */
+ }
+ }
+
+ /*
+ * Using zmalloc here to allocate a variable length elements: name and
+ * signature. Could cause some memory fragmentation if overused.
+ */
+ name_off = sizeof(struct tracepoint_entry);
+ sig_off = name_off + name_len + 1;
+
+ e = zmalloc(sizeof(struct tracepoint_entry) + name_len + 1 + sig_len + 1);
+ if (!e)
+ return ERR_PTR(-ENOMEM);
+ e->name = (char *) e + name_off;
+ memcpy(e->name, name, name_len + 1);
+ e->name[name_len] = '\0';
+
+ e->signature = (char *) e + sig_off;
+ memcpy(e->signature, signature, sig_len + 1);
+ e->signature[sig_len] = '\0';
+
+ e->probes = NULL;
+ e->refcount = 0;
+ e->callsite_refcount = 0;
+
+ cds_hlist_add_head(&e->hlist, head);
+ return e;
+}
+
+/*
+ * Remove the tracepoint from the tracepoint hash table. Must be called with
+ * tracepoint mutex held.
+ */
+static void remove_tracepoint(struct tracepoint_entry *e)
+{
+ cds_hlist_del(&e->hlist);
+ free(e);
+}
+
+/*
+ * Sets the probe callback corresponding to one tracepoint.
+ */
+static void set_tracepoint(struct tracepoint_entry **entry,
+ struct lttng_ust_tracepoint *elem, int active)
+{
+ WARN_ON(strncmp((*entry)->name, elem->name, LTTNG_UST_ABI_SYM_NAME_LEN - 1) != 0);
+ /*
+ * Check that signatures match before connecting a probe to a
+ * tracepoint. Warn the user if they don't.
+ */
+ if (strcmp(elem->signature, (*entry)->signature) != 0) {
+ static int warned = 0;
+
+ /* Only print once, don't flood console. */
+ if (!warned) {
+ WARN("Tracepoint signature mismatch, not enabling one or more tracepoints. Ensure that the tracepoint probes prototypes match the application.");
+ WARN("Tracepoint \"%s\" signatures: call: \"%s\" vs probe: \"%s\".",
+ elem->name, elem->signature, (*entry)->signature);
+ warned = 1;
+ }
+ /* Don't accept connecting non-matching signatures. */
+ return;
+ }
+
+ /*
+ * rcu_assign_pointer has a cmm_smp_wmb() which makes sure that the new
+ * probe callbacks array is consistent before setting a pointer to it.
+ * This array is referenced by __DO_TRACE from
+ * include/linux/tracepoints.h. A matching cmm_smp_read_barrier_depends()
+ * is used.
+ */
+ lttng_ust_rcu_assign_pointer(elem->probes, (*entry)->probes);
+ CMM_STORE_SHARED(elem->state, active);
+}
+
+/*
+ * Disable a tracepoint and its probe callback.
+ * Note: only waiting an RCU period after setting elem->call to the empty
+ * function insures that the original callback is not used anymore. This insured
+ * by preempt_disable around the call site.
+ */
+static void disable_tracepoint(struct lttng_ust_tracepoint *elem)
+{
+ CMM_STORE_SHARED(elem->state, 0);
+ lttng_ust_rcu_assign_pointer(elem->probes, NULL);
+}
+
+/*
+ * Add the callsite to the callsite hash table. Must be called with
+ * tracepoint mutex held.
+ */
+static void add_callsite(struct tracepoint_lib * lib, struct lttng_ust_tracepoint *tp)
+{
+ struct cds_hlist_head *head;
+ struct callsite_entry *e;
+ const char *name = tp->name;
+ size_t name_len = strlen(name);
+ uint32_t hash;
+ struct tracepoint_entry *tp_entry;
+
+ if (name_len > LTTNG_UST_ABI_SYM_NAME_LEN - 1) {
+ WARN("Truncating tracepoint name %s which exceeds size limits of %u chars", name, LTTNG_UST_ABI_SYM_NAME_LEN - 1);
+ name_len = LTTNG_UST_ABI_SYM_NAME_LEN - 1;
+ }
+ hash = jhash(name, name_len, 0);
+ head = &callsite_table[hash & (CALLSITE_TABLE_SIZE - 1)];
+ e = zmalloc(sizeof(struct callsite_entry));
+ if (!e) {
+ PERROR("Unable to add callsite for tracepoint \"%s\"", name);
+ return;
+ }
+ cds_hlist_add_head(&e->hlist, head);
+ e->tp = tp;
+ cds_list_add(&e->node, &lib->callsites);
+
+ tp_entry = get_tracepoint(name);
+ if (!tp_entry)
+ return;
+ tp_entry->callsite_refcount++;
+ e->tp_entry_callsite_ref = true;
+}
+
+/*
+ * Remove the callsite from the callsite hash table and from lib
+ * callsite list. Must be called with tracepoint mutex held.
+ */
+static void remove_callsite(struct callsite_entry *e)
+{
+ struct tracepoint_entry *tp_entry;
+
+ tp_entry = get_tracepoint(e->tp->name);
+ if (tp_entry) {
+ if (e->tp_entry_callsite_ref)
+ tp_entry->callsite_refcount--;
+ if (tp_entry->callsite_refcount == 0)
+ disable_tracepoint(e->tp);
+ }
+ cds_hlist_del(&e->hlist);
+ cds_list_del(&e->node);
+ free(e);
+}
+
+/*
+ * Enable/disable all callsites based on the state of a specific
+ * tracepoint entry.
+ * Must be called with tracepoint mutex held.
+ */
+static void tracepoint_sync_callsites(const char *name)
+{
+ struct cds_hlist_head *head;
+ struct cds_hlist_node *node;
+ struct callsite_entry *e;
+ size_t name_len = strlen(name);
+ uint32_t hash;
+ struct tracepoint_entry *tp_entry;
+
+ tp_entry = get_tracepoint(name);
+ if (name_len > LTTNG_UST_ABI_SYM_NAME_LEN - 1) {
+ WARN("Truncating tracepoint name %s which exceeds size limits of %u chars", name, LTTNG_UST_ABI_SYM_NAME_LEN - 1);
+ name_len = LTTNG_UST_ABI_SYM_NAME_LEN - 1;
+ }
+ hash = jhash(name, name_len, 0);
+ head = &callsite_table[hash & (CALLSITE_TABLE_SIZE - 1)];
+ cds_hlist_for_each_entry(e, node, head, hlist) {
+ struct lttng_ust_tracepoint *tp = e->tp;
+
+ if (strncmp(name, tp->name, LTTNG_UST_ABI_SYM_NAME_LEN - 1))
+ continue;
+ if (tp_entry) {
+ if (!e->tp_entry_callsite_ref) {
+ tp_entry->callsite_refcount++;
+ e->tp_entry_callsite_ref = true;
+ }
+ set_tracepoint(&tp_entry, tp,
+ !!tp_entry->refcount);
+ } else {
+ disable_tracepoint(tp);
+ e->tp_entry_callsite_ref = false;
+ }
+ }
+}
+
+/**
+ * tracepoint_update_probe_range - Update a probe range
+ * @begin: beginning of the range
+ * @end: end of the range
+ *
+ * Updates the probe callback corresponding to a range of tracepoints.
+ */
+static
+void tracepoint_update_probe_range(struct lttng_ust_tracepoint * const *begin,
+ struct lttng_ust_tracepoint * const *end)
+{
+ struct lttng_ust_tracepoint * const *iter;
+ struct tracepoint_entry *mark_entry;
+
+ for (iter = begin; iter < end; iter++) {
+ if (!*iter)
+ continue; /* skip dummy */
+ if (!(*iter)->name) {
+ disable_tracepoint(*iter);
+ continue;
+ }
+ mark_entry = get_tracepoint((*iter)->name);
+ if (mark_entry) {
+ set_tracepoint(&mark_entry, *iter,
+ !!mark_entry->refcount);
+ } else {
+ disable_tracepoint(*iter);
+ }
+ }
+}
+
+static void lib_update_tracepoints(struct tracepoint_lib *lib)
+{
+ tracepoint_update_probe_range(lib->tracepoints_start,
+ lib->tracepoints_start + lib->tracepoints_count);
+}
+
+static void lib_register_callsites(struct tracepoint_lib *lib)
+{
+ struct lttng_ust_tracepoint * const *begin;
+ struct lttng_ust_tracepoint * const *end;
+ struct lttng_ust_tracepoint * const *iter;
+
+ begin = lib->tracepoints_start;
+ end = lib->tracepoints_start + lib->tracepoints_count;
+
+ for (iter = begin; iter < end; iter++) {
+ if (!*iter)
+ continue; /* skip dummy */
+ if (!(*iter)->name) {
+ continue;
+ }
+ add_callsite(lib, *iter);
+ }
+}
+
+static void lib_unregister_callsites(struct tracepoint_lib *lib)
+{
+ struct callsite_entry *callsite, *tmp;
+
+ cds_list_for_each_entry_safe(callsite, tmp, &lib->callsites, node)
+ remove_callsite(callsite);
+}
+
+/*
+ * Update probes, removing the faulty probes.
+ */
+static void tracepoint_update_probes(void)
+{
+ struct tracepoint_lib *lib;
+
+ /* tracepoints registered from libraries and executable. */
+ cds_list_for_each_entry(lib, &libs, list)
+ lib_update_tracepoints(lib);
+}
+
+static struct lttng_ust_tracepoint_probe *
+tracepoint_add_probe(const char *name, void (*probe)(void), void *data,
+ const char *signature)
+{
+ struct tracepoint_entry *entry;
+ struct lttng_ust_tracepoint_probe *old;
+
+ entry = get_tracepoint(name);
+ if (entry) {
+ if (strcmp(entry->signature, signature) != 0) {
+ ERR("Tracepoint and probe signature do not match.");
+ return ERR_PTR(-EINVAL);
+ }
+ } else {
+ entry = add_tracepoint(name, signature);
+ if (IS_ERR(entry))
+ return (struct lttng_ust_tracepoint_probe *)entry;
+ }
+ old = tracepoint_entry_add_probe(entry, probe, data);
+ if (IS_ERR(old) && !entry->refcount)
+ remove_tracepoint(entry);
+ return old;
+}
+
+static void tracepoint_release_queue_add_old_probes(void *old)
+{
+ release_queue_need_update = 1;
+ if (old) {
+ struct tp_probes *tp_probes = caa_container_of(old,
+ struct tp_probes, probes[0]);
+ cds_list_add(&tp_probes->u.list, &release_queue);
+ }
+}
+
+/**
+ * __tracepoint_probe_register - Connect a probe to a tracepoint
+ * @name: tracepoint name
+ * @probe: probe handler
+ *
+ * Returns 0 if ok, error value on error.
+ * The probe address must at least be aligned on the architecture pointer size.
+ * Called with the tracepoint mutex held.
+ */
+int __tracepoint_probe_register(const char *name, void (*probe)(void),
+ void *data, const char *signature)
+{
+ void *old;
+ int ret = 0;
+
+ DBG("Registering probe to tracepoint %s", name);
+
+ pthread_mutex_lock(&tracepoint_mutex);
+ old = tracepoint_add_probe(name, probe, data, signature);
+ if (IS_ERR(old)) {
+ ret = PTR_ERR(old);
+ goto end;
+ }
+
+ tracepoint_sync_callsites(name);
+ release_probes(old);
+end:
+ pthread_mutex_unlock(&tracepoint_mutex);
+ return ret;
+}
+
+/*
+ * Caller needs to invoke __tracepoint_probe_release_queue() after
+ * calling lttng_ust_tp_probe_register_queue_release() one or multiple
+ * times to ensure it does not leak memory.
+ */
+int lttng_ust_tp_probe_register_queue_release(const char *name,
+ void (*probe)(void), void *data, const char *signature)
+{
+ void *old;
+ int ret = 0;
+
+ DBG("Registering probe to tracepoint %s. Queuing release.", name);
+
+ pthread_mutex_lock(&tracepoint_mutex);
+ old = tracepoint_add_probe(name, probe, data, signature);
+ if (IS_ERR(old)) {
+ ret = PTR_ERR(old);
+ goto end;
+ }
+
+ tracepoint_sync_callsites(name);
+ tracepoint_release_queue_add_old_probes(old);
+end:
+ pthread_mutex_unlock(&tracepoint_mutex);
+ return ret;
+}
+
+static void *tracepoint_remove_probe(const char *name, void (*probe)(void),
+ void *data)
+{
+ struct tracepoint_entry *entry;
+ void *old;
+
+ entry = get_tracepoint(name);
+ if (!entry)
+ return ERR_PTR(-ENOENT);
+ old = tracepoint_entry_remove_probe(entry, probe, data);
+ if (IS_ERR(old))
+ return old;
+ if (!entry->refcount)
+ remove_tracepoint(entry);
+ return old;
+}
+
+/**
+ * tracepoint_probe_unregister - Disconnect a probe from a tracepoint
+ * @name: tracepoint name
+ * @probe: probe function pointer
+ * @probe: probe data pointer
+ */
+int __tracepoint_probe_unregister(const char *name, void (*probe)(void),
+ void *data)
+{
+ void *old;
+ int ret = 0;
+
+ DBG("Un-registering probe from tracepoint %s", name);
+
+ pthread_mutex_lock(&tracepoint_mutex);
+ old = tracepoint_remove_probe(name, probe, data);
+ if (IS_ERR(old)) {
+ ret = PTR_ERR(old);
+ goto end;
+ }
+ tracepoint_sync_callsites(name);
+ release_probes(old);
+end:
+ pthread_mutex_unlock(&tracepoint_mutex);
+ return ret;
+}
+
+/*
+ * Caller needs to invoke __tracepoint_probe_release_queue() after
+ * calling lttng_ust_tp_probe_unregister_queue_release() one or multiple
+ * times to ensure it does not leak memory.
+ */
+int lttng_ust_tp_probe_unregister_queue_release(const char *name,
+ void (*probe)(void), void *data)
+{
+ void *old;
+ int ret = 0;
+
+ DBG("Un-registering probe from tracepoint %s. Queuing release.", name);
+
+ pthread_mutex_lock(&tracepoint_mutex);
+ old = tracepoint_remove_probe(name, probe, data);
+ if (IS_ERR(old)) {
+ ret = PTR_ERR(old);
+ goto end;
+ }
+ tracepoint_sync_callsites(name);
+ tracepoint_release_queue_add_old_probes(old);
+end:
+ pthread_mutex_unlock(&tracepoint_mutex);
+ return ret;
+}
+
+void lttng_ust_tp_probe_prune_release_queue(void)
+{
+ CDS_LIST_HEAD(release_probes);
+ struct tp_probes *pos, *next;
+
+ DBG("Release queue of unregistered tracepoint probes.");
+
+ pthread_mutex_lock(&tracepoint_mutex);
+ if (!release_queue_need_update)
+ goto end;
+ if (!cds_list_empty(&release_queue))
+ cds_list_replace_init(&release_queue, &release_probes);
+ release_queue_need_update = 0;
+
+ /* Wait for grace period between all sync_callsites and free. */
+ lttng_ust_urcu_synchronize_rcu();
+
+ cds_list_for_each_entry_safe(pos, next, &release_probes, u.list) {
+ cds_list_del(&pos->u.list);
+ free(pos);
+ }
+end:
+ pthread_mutex_unlock(&tracepoint_mutex);
+}
+
+static void tracepoint_add_old_probes(void *old)
+{
+ need_update = 1;
+ if (old) {
+ struct tp_probes *tp_probes = caa_container_of(old,
+ struct tp_probes, probes[0]);
+ cds_list_add(&tp_probes->u.list, &old_probes);
+ }
+}
+
+/**
+ * tracepoint_probe_register_noupdate - register a probe but not connect
+ * @name: tracepoint name
+ * @probe: probe handler
+ *
+ * caller must call tracepoint_probe_update_all()
+ */
+int tracepoint_probe_register_noupdate(const char *name, void (*probe)(void),
+ void *data, const char *signature)
+{
+ void *old;
+ int ret = 0;
+
+ pthread_mutex_lock(&tracepoint_mutex);
+ old = tracepoint_add_probe(name, probe, data, signature);
+ if (IS_ERR(old)) {
+ ret = PTR_ERR(old);
+ goto end;
+ }
+ tracepoint_add_old_probes(old);
+end:
+ pthread_mutex_unlock(&tracepoint_mutex);
+ return ret;
+}
+
+/**
+ * tracepoint_probe_unregister_noupdate - remove a probe but not disconnect
+ * @name: tracepoint name
+ * @probe: probe function pointer
+ *
+ * caller must call tracepoint_probe_update_all()
+ * Called with the tracepoint mutex held.
+ */
+int tracepoint_probe_unregister_noupdate(const char *name, void (*probe)(void),
+ void *data)
+{
+ void *old;
+ int ret = 0;
+
+ DBG("Un-registering probe from tracepoint %s", name);
+
+ pthread_mutex_lock(&tracepoint_mutex);
+ old = tracepoint_remove_probe(name, probe, data);
+ if (IS_ERR(old)) {
+ ret = PTR_ERR(old);
+ goto end;
+ }
+ tracepoint_add_old_probes(old);
+end:
+ pthread_mutex_unlock(&tracepoint_mutex);
+ return ret;
+}
+
+/**
+ * tracepoint_probe_update_all - update tracepoints
+ */
+void tracepoint_probe_update_all(void)
+{
+ CDS_LIST_HEAD(release_probes);
+ struct tp_probes *pos, *next;
+
+ pthread_mutex_lock(&tracepoint_mutex);
+ if (!need_update) {
+ goto end;
+ }
+ if (!cds_list_empty(&old_probes))
+ cds_list_replace_init(&old_probes, &release_probes);
+ need_update = 0;
+
+ tracepoint_update_probes();
+ /* Wait for grace period between update_probes and free. */
+ lttng_ust_urcu_synchronize_rcu();
+ cds_list_for_each_entry_safe(pos, next, &release_probes, u.list) {
+ cds_list_del(&pos->u.list);
+ free(pos);
+ }
+end:
+ pthread_mutex_unlock(&tracepoint_mutex);
+}
+
+static void new_tracepoints(struct lttng_ust_tracepoint * const *start,
+ struct lttng_ust_tracepoint * const *end)
+{
+ if (new_tracepoint_cb) {
+ struct lttng_ust_tracepoint * const *t;
+
+ for (t = start; t < end; t++) {
+ if (*t)
+ new_tracepoint_cb(*t);
+ }
+ }
+}
+
+/*
+ * tracepoint_{un,}register_lib is meant to be looked up by instrumented
+ * applications through dlsym(). If found, those can register their
+ * tracepoints, else those tracepoints will not be available for
+ * tracing. The number at the end of those symbols acts as a major
+ * version for tracepoints.
+ *
+ * Older instrumented applications should still work with newer
+ * liblttng-ust, but it is fine that instrumented applications compiled
+ * against recent liblttng-ust headers require a recent liblttng-ust
+ * runtime for those tracepoints to be taken into account.
+ */
+int tracepoint_register_lib(struct lttng_ust_tracepoint * const *tracepoints_start,
+ int tracepoints_count);
+int tracepoint_register_lib(struct lttng_ust_tracepoint * const *tracepoints_start,
+ int tracepoints_count)
+{
+ struct tracepoint_lib *pl, *iter;
+
+ lttng_ust_tp_init();
+
+ pl = (struct tracepoint_lib *) zmalloc(sizeof(struct tracepoint_lib));
+ if (!pl) {
+ PERROR("Unable to register tracepoint lib");
+ return -1;
+ }
+ pl->tracepoints_start = tracepoints_start;
+ pl->tracepoints_count = tracepoints_count;
+ CDS_INIT_LIST_HEAD(&pl->callsites);
+
+ pthread_mutex_lock(&tracepoint_mutex);
+ /*
+ * We sort the libs by struct lib pointer address.
+ */
+ cds_list_for_each_entry_reverse(iter, &libs, list) {
+ BUG_ON(iter == pl); /* Should never be in the list twice */
+ if (iter < pl) {
+ /* We belong to the location right after iter. */
+ cds_list_add(&pl->list, &iter->list);
+ goto lib_added;
+ }
+ }
+ /* We should be added at the head of the list */
+ cds_list_add(&pl->list, &libs);
+lib_added:
+ new_tracepoints(tracepoints_start, tracepoints_start + tracepoints_count);
+ lib_register_callsites(pl);
+ lib_update_tracepoints(pl);
+ pthread_mutex_unlock(&tracepoint_mutex);
+
+ DBG("just registered a tracepoints section from %p and having %d tracepoints",
+ tracepoints_start, tracepoints_count);
+ if (ust_err_debug_enabled()) {
+ int i;
+
+ for (i = 0; i < tracepoints_count; i++) {
+ DBG("registered tracepoint: %s", tracepoints_start[i]->name);
+ }
+ }
+
+ return 0;
+}
+
+int tracepoint_unregister_lib(struct lttng_ust_tracepoint * const *tracepoints_start);
+int tracepoint_unregister_lib(struct lttng_ust_tracepoint * const *tracepoints_start)
+{
+ struct tracepoint_lib *lib;
+
+ pthread_mutex_lock(&tracepoint_mutex);
+ cds_list_for_each_entry(lib, &libs, list) {
+ if (lib->tracepoints_start != tracepoints_start)
+ continue;
+
+ cds_list_del(&lib->list);
+ /*
+ * Unregistering a callsite also decreases the
+ * callsite reference count of the corresponding
+ * tracepoint, and disables the tracepoint if
+ * the reference count drops to zero.
+ */
+ lib_unregister_callsites(lib);
+ DBG("just unregistered a tracepoints section from %p",
+ lib->tracepoints_start);
+ free(lib);
+ break;
+ }
+ pthread_mutex_unlock(&tracepoint_mutex);
+ return 0;
+}
+
+/*
+ * Report in debug message whether the compiler correctly supports weak
+ * hidden symbols. This test checks that the address associated with two
+ * weak symbols with hidden visibility is the same when declared within
+ * two compile units part of the same module.
+ */
+static void check_weak_hidden(void)
+{
+ DBG("Your compiler treats weak symbols with hidden visibility for integer objects as %s between compile units part of the same module.",
+ &__tracepoint_test_symbol1 == lttng_ust_tp_check_weak_hidden1() ?
+ "SAME address" :
+ "DIFFERENT addresses");
+ DBG("Your compiler treats weak symbols with hidden visibility for pointer objects as %s between compile units part of the same module.",
+ &__tracepoint_test_symbol2 == lttng_ust_tp_check_weak_hidden2() ?
+ "SAME address" :
+ "DIFFERENT addresses");
+ DBG("Your compiler treats weak symbols with hidden visibility for 24-byte structure objects as %s between compile units part of the same module.",
+ &__tracepoint_test_symbol3 == lttng_ust_tp_check_weak_hidden3() ?
+ "SAME address" :
+ "DIFFERENT addresses");
+}
+
+void lttng_ust_tp_init(void)
+{
+ if (uatomic_xchg(&initialized, 1) == 1)
+ return;
+ ust_err_init();
+ check_weak_hidden();
+}
+
+void lttng_ust_tp_exit(void)
+{
+ initialized = 0;
+}
+
+/*
+ * Create the wrapper symbols.
+ */
+#undef tp_rcu_read_lock
+#undef tp_rcu_read_unlock
+#undef tp_rcu_dereference
+
+void tp_rcu_read_lock(void);
+void tp_rcu_read_lock(void)
+{
+ lttng_ust_urcu_read_lock();
+}
+
+void tp_rcu_read_unlock(void);
+void tp_rcu_read_unlock(void)
+{
+ lttng_ust_urcu_read_unlock();
+}
+
+void *tp_rcu_dereference_sym(void *p);
+void *tp_rcu_dereference_sym(void *p)
+{
+ return lttng_ust_rcu_dereference(p);
+}
+
+/*
+ * Programs that have threads that survive after they exit, and therefore call
+ * library destructors, should disable the tracepoint destructors by calling
+ * tp_disable_destructors(). This will leak the tracepoint
+ * instrumentation library shared object, leaving its teardown to the operating
+ * system process teardown.
+ *
+ * To access and/or modify this value, users need to use a combination of
+ * dlopen(3) and dlsym(3) to get an handle on the
+ * tp_disable_destructors and tp_get_destructors_state symbols below.
+ */
+void tp_disable_destructors(void);
+void tp_disable_destructors(void)
+{
+ uatomic_set(&tracepoint_destructors_state, 0);
+}
+
+/*
+ * Returns 1 if the destructors are enabled and should be executed.
+ * Returns 0 if the destructors are disabled.
+ */
+int tp_get_destructors_state(void);
+int tp_get_destructors_state(void)
+{
+ return uatomic_read(&tracepoint_destructors_state);
+}
--- /dev/null
+/*
+ * SPDX-License-Identifier: LGPL-2.1-only
+ *
+ * Copyright (C) 2021 Michael Jeanson <mjeanson@efficios.com>
+ */
+
+#include "common/logging.h"
+#include "common/ust-fd.h"
+
+static
+void lttng_ust_common_init(void)
+ __attribute__((constructor));
+static
+void lttng_ust_common_init(void)
+{
+ /* Initialize logging for liblttng-ust-common */
+ ust_err_init();
+
+ /*
+ * Initialize the fd-tracker, other libraries using it should also call
+ * this in their constructor in case it gets executed before this one.
+ */
+ lttng_ust_init_fd_tracker();
+}
--- /dev/null
+/*
+ * SPDX-License-Identifier: LGPL-2.1-only
+ *
+ * Copyright (C) 2011 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ */
+
+#define _LGPL_SOURCE
+#include <stdint.h>
+#include <stddef.h>
+#include <stdlib.h>
+
+#include "context-internal.h"
+#include "ust-events-internal.h"
+#include "common/logging.h"
+#include "lttng-tracer-core.h"
+#include "lttng-rb-clients.h"
+#include "lttng-counter-client.h"
+#include "jhash.h"
+
+static CDS_LIST_HEAD(lttng_transport_list);
+static CDS_LIST_HEAD(lttng_counter_transport_list);
+
+struct lttng_transport *lttng_ust_transport_find(const char *name)
+{
+ struct lttng_transport *transport;
+
+ cds_list_for_each_entry(transport, <tng_transport_list, node) {
+ if (!strcmp(transport->name, name))
+ return transport;
+ }
+ return NULL;
+}
+
+struct lttng_counter_transport *lttng_counter_transport_find(const char *name)
+{
+ struct lttng_counter_transport *transport;
+
+ cds_list_for_each_entry(transport, <tng_counter_transport_list, node) {
+ if (!strcmp(transport->name, name))
+ return transport;
+ }
+ return NULL;
+}
+
+/**
+ * lttng_transport_register - LTT transport registration
+ * @transport: transport structure
+ *
+ * Registers a transport which can be used as output to extract the data out of
+ * LTTng. Called with ust_lock held.
+ */
+void lttng_transport_register(struct lttng_transport *transport)
+{
+ cds_list_add_tail(&transport->node, <tng_transport_list);
+}
+
+/**
+ * lttng_transport_unregister - LTT transport unregistration
+ * @transport: transport structure
+ * Called with ust_lock held.
+ */
+void lttng_transport_unregister(struct lttng_transport *transport)
+{
+ cds_list_del(&transport->node);
+}
+
+/**
+ * lttng_counter_transport_register - LTTng counter transport registration
+ * @transport: transport structure
+ *
+ * Registers a counter transport which can be used as output to extract
+ * the data out of LTTng. Called with ust_lock held.
+ */
+void lttng_counter_transport_register(struct lttng_counter_transport *transport)
+{
+ cds_list_add_tail(&transport->node, <tng_counter_transport_list);
+}
+
+/**
+ * lttng_counter_transport_unregister - LTTng counter transport unregistration
+ * @transport: transport structure
+ * Called with ust_lock held.
+ */
+void lttng_counter_transport_unregister(struct lttng_counter_transport *transport)
+{
+ cds_list_del(&transport->node);
+}
+
+/*
+ * Needed by comm layer.
+ */
+struct lttng_enum *lttng_ust_enum_get_from_desc(struct lttng_ust_session *session,
+ const struct lttng_ust_enum_desc *enum_desc)
+{
+ struct lttng_enum *_enum;
+ struct cds_hlist_head *head;
+ struct cds_hlist_node *node;
+ size_t name_len = strlen(enum_desc->name);
+ uint32_t hash;
+
+ hash = jhash(enum_desc->name, name_len, 0);
+ head = &session->priv->enums_ht.table[hash & (LTTNG_UST_ENUM_HT_SIZE - 1)];
+ cds_hlist_for_each_entry(_enum, node, head, hlist) {
+ assert(_enum->desc);
+ if (_enum->desc == enum_desc)
+ return _enum;
+ }
+ return NULL;
+}
+
+size_t lttng_ust_dummy_get_size(void *priv __attribute__((unused)),
+ size_t offset)
+{
+ size_t size = 0;
+
+ size += lttng_ust_lib_ring_buffer_align(offset, lttng_ust_rb_alignof(char));
+ size += sizeof(char); /* tag */
+ return size;
+}
+
+void lttng_ust_dummy_record(void *priv __attribute__((unused)),
+ struct lttng_ust_lib_ring_buffer_ctx *ctx,
+ struct lttng_ust_channel_buffer *chan)
+{
+ char sel_char = (char) LTTNG_UST_DYNAMIC_TYPE_NONE;
+
+ chan->ops->event_write(ctx, &sel_char, sizeof(sel_char), lttng_ust_rb_alignof(sel_char));
+}
+
+void lttng_ust_dummy_get_value(void *priv __attribute__((unused)),
+ struct lttng_ust_ctx_value *value)
+{
+ value->sel = LTTNG_UST_DYNAMIC_TYPE_NONE;
+}
+
+int lttng_context_is_app(const char *name)
+{
+ if (strncmp(name, "$app.", strlen("$app.")) != 0) {
+ return 0;
+ }
+ return 1;
+}
+
+struct lttng_ust_channel_buffer *lttng_ust_alloc_channel_buffer(void)
+{
+ struct lttng_ust_channel_buffer *lttng_chan_buf;
+ struct lttng_ust_channel_common *lttng_chan_common;
+ struct lttng_ust_channel_buffer_private *lttng_chan_buf_priv;
+
+ lttng_chan_buf = zmalloc(sizeof(struct lttng_ust_channel_buffer));
+ if (!lttng_chan_buf)
+ goto lttng_chan_buf_error;
+ lttng_chan_buf->struct_size = sizeof(struct lttng_ust_channel_buffer);
+ lttng_chan_common = zmalloc(sizeof(struct lttng_ust_channel_common));
+ if (!lttng_chan_common)
+ goto lttng_chan_common_error;
+ lttng_chan_common->struct_size = sizeof(struct lttng_ust_channel_common);
+ lttng_chan_buf_priv = zmalloc(sizeof(struct lttng_ust_channel_buffer_private));
+ if (!lttng_chan_buf_priv)
+ goto lttng_chan_buf_priv_error;
+ lttng_chan_buf->parent = lttng_chan_common;
+ lttng_chan_common->type = LTTNG_UST_CHANNEL_TYPE_BUFFER;
+ lttng_chan_common->child = lttng_chan_buf;
+ lttng_chan_buf->priv = lttng_chan_buf_priv;
+ lttng_chan_common->priv = <tng_chan_buf_priv->parent;
+ lttng_chan_buf_priv->pub = lttng_chan_buf;
+ lttng_chan_buf_priv->parent.pub = lttng_chan_common;
+
+ return lttng_chan_buf;
+
+lttng_chan_buf_priv_error:
+ free(lttng_chan_common);
+lttng_chan_common_error:
+ free(lttng_chan_buf);
+lttng_chan_buf_error:
+ return NULL;
+}
+
+void lttng_ust_free_channel_common(struct lttng_ust_channel_common *chan)
+{
+ switch (chan->type) {
+ case LTTNG_UST_CHANNEL_TYPE_BUFFER:
+ {
+ struct lttng_ust_channel_buffer *chan_buf;
+
+ chan_buf = (struct lttng_ust_channel_buffer *)chan->child;
+ free(chan_buf->parent);
+ free(chan_buf->priv);
+ free(chan_buf);
+ break;
+ }
+ default:
+ abort();
+ }
+}
+
+void lttng_ust_ring_buffer_clients_init(void)
+{
+ lttng_ring_buffer_metadata_client_init();
+ lttng_ring_buffer_client_overwrite_init();
+ lttng_ring_buffer_client_overwrite_rt_init();
+ lttng_ring_buffer_client_discard_init();
+ lttng_ring_buffer_client_discard_rt_init();
+}
+
+void lttng_ust_ring_buffer_clients_exit(void)
+{
+ lttng_ring_buffer_client_discard_rt_exit();
+ lttng_ring_buffer_client_discard_exit();
+ lttng_ring_buffer_client_overwrite_rt_exit();
+ lttng_ring_buffer_client_overwrite_exit();
+ lttng_ring_buffer_metadata_client_exit();
+}
+
+void lttng_ust_counter_clients_init(void)
+{
+ lttng_counter_client_percpu_64_modular_init();
+ lttng_counter_client_percpu_32_modular_init();
+}
+
+void lttng_ust_counter_clients_exit(void)
+{
+ lttng_counter_client_percpu_32_modular_exit();
+ lttng_counter_client_percpu_64_modular_exit();
+}
--- /dev/null
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright 2019 (c) Francis Deslauriers <francis.deslauriers@efficios.com>
+ */
+
+#ifndef _LTTNG_UST_EVENTS_INTERNAL_H
+#define _LTTNG_UST_EVENTS_INTERNAL_H
+
+#include <limits.h>
+#include <stdint.h>
+
+#include <urcu/list.h>
+#include <urcu/hlist.h>
+
+#include <lttng/ust-events.h>
+
+#include "common/macros.h"
+#include "common/ust-context-provider.h"
+
+struct lttng_ust_abi_obj;
+struct lttng_event_notifier_group;
+
+union lttng_ust_abi_args {
+ struct {
+ void *chan_data;
+ int wakeup_fd;
+ } channel;
+ struct {
+ int shm_fd;
+ int wakeup_fd;
+ } stream;
+ struct {
+ struct lttng_ust_abi_field_iter entry;
+ } field_list;
+ struct {
+ char *ctxname;
+ } app_context;
+ struct {
+ int event_notifier_notif_fd;
+ } event_notifier_handle;
+ struct {
+ void *counter_data;
+ } counter;
+ struct {
+ int shm_fd;
+ } counter_shm;
+};
+
+struct lttng_ust_abi_objd_ops {
+ long (*cmd)(int objd, unsigned int cmd, unsigned long arg,
+ union lttng_ust_abi_args *args, void *owner);
+ int (*release)(int objd);
+};
+
+enum lttng_enabler_format_type {
+ LTTNG_ENABLER_FORMAT_STAR_GLOB,
+ LTTNG_ENABLER_FORMAT_EVENT,
+};
+
+/*
+ * Enabler field, within whatever object is enabling an event. Target of
+ * backward reference.
+ */
+struct lttng_enabler {
+ enum lttng_enabler_format_type format_type;
+
+ /* head list of struct lttng_ust_filter_bytecode_node */
+ struct cds_list_head filter_bytecode_head;
+ /* head list of struct lttng_ust_excluder_node */
+ struct cds_list_head excluder_head;
+
+ struct lttng_ust_abi_event event_param;
+ unsigned int enabled:1;
+};
+
+struct lttng_event_enabler {
+ struct lttng_enabler base;
+ struct cds_list_head node; /* per-session list of enablers */
+ struct lttng_ust_channel_buffer *chan;
+ /*
+ * Unused, but kept around to make it explicit that the tracer can do
+ * it.
+ */
+ struct lttng_ust_ctx *ctx;
+};
+
+struct lttng_event_notifier_enabler {
+ struct lttng_enabler base;
+ uint64_t error_counter_index;
+ struct cds_list_head node; /* per-app list of event_notifier enablers */
+ struct cds_list_head capture_bytecode_head;
+ struct lttng_event_notifier_group *group; /* weak ref */
+ uint64_t user_token; /* User-provided token */
+ uint64_t num_captures;
+};
+
+enum lttng_ust_bytecode_type {
+ LTTNG_UST_BYTECODE_TYPE_FILTER,
+ LTTNG_UST_BYTECODE_TYPE_CAPTURE,
+};
+
+struct lttng_ust_bytecode_node {
+ enum lttng_ust_bytecode_type type;
+ struct cds_list_head node;
+ struct lttng_enabler *enabler;
+ struct {
+ uint32_t len;
+ uint32_t reloc_offset;
+ uint64_t seqnum;
+ char data[];
+ } bc;
+};
+
+/*
+ * Bytecode interpreter return value.
+ */
+enum lttng_ust_bytecode_interpreter_ret {
+ LTTNG_UST_BYTECODE_INTERPRETER_ERROR = -1,
+ LTTNG_UST_BYTECODE_INTERPRETER_OK = 0,
+};
+
+struct lttng_interpreter_output;
+struct lttng_ust_bytecode_runtime_private;
+
+enum lttng_ust_bytecode_filter_result {
+ LTTNG_UST_BYTECODE_FILTER_ACCEPT = 0,
+ LTTNG_UST_BYTECODE_FILTER_REJECT = 1,
+};
+
+struct lttng_ust_bytecode_filter_ctx {
+ enum lttng_ust_bytecode_filter_result result;
+};
+
+struct lttng_ust_excluder_node {
+ struct cds_list_head node;
+ struct lttng_enabler *enabler;
+ /*
+ * struct lttng_ust_event_exclusion had variable sized array,
+ * must be last field.
+ */
+ struct lttng_ust_abi_event_exclusion excluder;
+};
+
+/* Data structures used by the tracer. */
+
+struct tp_list_entry {
+ struct lttng_ust_abi_tracepoint_iter tp;
+ struct cds_list_head head;
+};
+
+struct lttng_ust_tracepoint_list {
+ struct tp_list_entry *iter;
+ struct cds_list_head head;
+};
+
+struct tp_field_list_entry {
+ struct lttng_ust_abi_field_iter field;
+ struct cds_list_head head;
+};
+
+struct lttng_ust_field_list {
+ struct tp_field_list_entry *iter;
+ struct cds_list_head head;
+};
+
+/*
+ * Objects in a linked-list of enablers, owned by an event or event_notifier.
+ * This is used because an event (or a event_notifier) can be enabled by more
+ * than one enabler and we want a quick way to iterate over all enablers of an
+ * object.
+ *
+ * For example, event rules "my_app:a*" and "my_app:ab*" will both match the
+ * event with the name "my_app:abc".
+ */
+struct lttng_enabler_ref {
+ struct cds_list_head node; /* enabler ref list */
+ struct lttng_enabler *ref; /* backward ref */
+};
+
+#define LTTNG_COUNTER_DIMENSION_MAX 8
+struct lttng_counter_dimension {
+ uint64_t size;
+ uint64_t underflow_index;
+ uint64_t overflow_index;
+ uint8_t has_underflow;
+ uint8_t has_overflow;
+};
+
+struct lttng_counter_ops {
+ struct lib_counter *(*counter_create)(size_t nr_dimensions,
+ const struct lttng_counter_dimension *dimensions,
+ int64_t global_sum_step,
+ int global_counter_fd,
+ int nr_counter_cpu_fds,
+ const int *counter_cpu_fds,
+ bool is_daemon);
+ void (*counter_destroy)(struct lib_counter *counter);
+ int (*counter_add)(struct lib_counter *counter,
+ const size_t *dimension_indexes, int64_t v);
+ int (*counter_read)(struct lib_counter *counter,
+ const size_t *dimension_indexes, int cpu,
+ int64_t *value, bool *overflow, bool *underflow);
+ int (*counter_aggregate)(struct lib_counter *counter,
+ const size_t *dimension_indexes, int64_t *value,
+ bool *overflow, bool *underflow);
+ int (*counter_clear)(struct lib_counter *counter, const size_t *dimension_indexes);
+};
+
+struct lttng_counter {
+ int objd;
+ struct lttng_event_notifier_group *event_notifier_group; /* owner */
+ struct lttng_counter_transport *transport;
+ struct lib_counter *counter;
+ struct lttng_counter_ops *ops;
+};
+
+#define LTTNG_UST_EVENT_HT_BITS 12
+#define LTTNG_UST_EVENT_HT_SIZE (1U << LTTNG_UST_EVENT_HT_BITS)
+
+struct lttng_ust_event_ht {
+ struct cds_hlist_head table[LTTNG_UST_EVENT_HT_SIZE];
+};
+
+#define LTTNG_UST_EVENT_NOTIFIER_HT_BITS 12
+#define LTTNG_UST_EVENT_NOTIFIER_HT_SIZE (1U << LTTNG_UST_EVENT_NOTIFIER_HT_BITS)
+struct lttng_ust_event_notifier_ht {
+ struct cds_hlist_head table[LTTNG_UST_EVENT_NOTIFIER_HT_SIZE];
+};
+
+#define LTTNG_UST_ENUM_HT_BITS 12
+#define LTTNG_UST_ENUM_HT_SIZE (1U << LTTNG_UST_ENUM_HT_BITS)
+
+struct lttng_ust_enum_ht {
+ struct cds_hlist_head table[LTTNG_UST_ENUM_HT_SIZE];
+};
+
+struct lttng_event_notifier_group {
+ int objd;
+ void *owner;
+ int notification_fd;
+ struct cds_list_head node; /* Event notifier group handle list */
+ struct cds_list_head enablers_head;
+ struct cds_list_head event_notifiers_head; /* list of event_notifiers */
+ struct lttng_ust_event_notifier_ht event_notifiers_ht; /* hashtable of event_notifiers */
+ struct lttng_ust_ctx *ctx; /* contexts for filters. */
+
+ struct lttng_counter *error_counter;
+ size_t error_counter_len;
+};
+
+struct lttng_transport {
+ const char *name;
+ struct cds_list_head node;
+ struct lttng_ust_channel_buffer_ops ops;
+ const struct lttng_ust_lib_ring_buffer_config *client_config;
+};
+
+struct lttng_counter_transport {
+ const char *name;
+ struct cds_list_head node;
+ struct lttng_counter_ops ops;
+ const struct lib_counter_config *client_config;
+};
+
+struct lttng_ust_event_common_private {
+ struct lttng_ust_event_common *pub; /* Public event interface */
+
+ const struct lttng_ust_event_desc *desc;
+ /* Backward references: list of lttng_enabler_ref (ref to enablers) */
+ struct cds_list_head enablers_ref_head;
+ int registered; /* has reg'd tracepoint probe */
+ uint64_t user_token;
+
+ int has_enablers_without_filter_bytecode;
+ /* list of struct lttng_ust_bytecode_runtime, sorted by seqnum */
+ struct cds_list_head filter_bytecode_runtime_head;
+};
+
+struct lttng_ust_event_recorder_private {
+ struct lttng_ust_event_common_private parent;
+
+ struct lttng_ust_event_recorder *pub; /* Public event interface */
+ struct cds_list_head node; /* Event recorder list */
+ struct cds_hlist_node hlist; /* Hash table of event recorders */
+ struct lttng_ust_ctx *ctx;
+ unsigned int id;
+};
+
+struct lttng_ust_event_notifier_private {
+ struct lttng_ust_event_common_private parent;
+
+ struct lttng_ust_event_notifier *pub; /* Public event notifier interface */
+ struct lttng_event_notifier_group *group; /* weak ref */
+ size_t num_captures; /* Needed to allocate the msgpack array. */
+ uint64_t error_counter_index;
+ struct cds_list_head node; /* Event notifier list */
+ struct cds_hlist_node hlist; /* Hash table of event notifiers */
+ struct cds_list_head capture_bytecode_runtime_head;
+};
+
+struct lttng_ust_bytecode_runtime {
+ enum lttng_ust_bytecode_type type;
+ struct lttng_ust_bytecode_node *bc;
+ int link_failed;
+ int (*interpreter_func)(struct lttng_ust_bytecode_runtime *bytecode_runtime,
+ const char *interpreter_stack_data,
+ void *ctx);
+ struct cds_list_head node; /* list of bytecode runtime in event */
+ /*
+ * Pointer to a URCU-protected pointer owned by an `struct
+ * lttng_session`or `struct lttng_event_notifier_group`.
+ */
+ struct lttng_ust_ctx **pctx;
+};
+
+struct lttng_ust_session_private {
+ struct lttng_ust_session *pub; /* Public session interface */
+
+ int been_active; /* Been active ? */
+ int objd; /* Object associated */
+ struct cds_list_head chan_head; /* Channel list head */
+ struct cds_list_head events_head; /* list of events */
+ struct cds_list_head node; /* Session list */
+
+ /* List of enablers */
+ struct cds_list_head enablers_head;
+ struct lttng_ust_event_ht events_ht; /* ht of events */
+ void *owner; /* object owner */
+ int tstate:1; /* Transient enable state */
+
+ int statedump_pending:1;
+
+ struct lttng_ust_enum_ht enums_ht; /* ht of enumerations */
+ struct cds_list_head enums_head;
+ struct lttng_ust_ctx *ctx; /* contexts for filters. */
+
+ unsigned char uuid[LTTNG_UST_UUID_LEN]; /* Trace session unique ID */
+ bool uuid_set; /* Is uuid set ? */
+};
+
+struct lttng_enum {
+ const struct lttng_ust_enum_desc *desc;
+ struct lttng_ust_session *session;
+ struct cds_list_head node; /* Enum list in session */
+ struct cds_hlist_node hlist; /* Session ht of enums */
+ uint64_t id; /* Enumeration ID in sessiond */
+};
+
+struct lttng_ust_shm_handle;
+
+struct lttng_ust_channel_buffer_ops_private {
+ struct lttng_ust_channel_buffer_ops *pub; /* Public channel buffer ops interface */
+
+ struct lttng_ust_channel_buffer *(*channel_create)(const char *name,
+ void *buf_addr,
+ size_t subbuf_size, size_t num_subbuf,
+ unsigned int switch_timer_interval,
+ unsigned int read_timer_interval,
+ unsigned char *uuid,
+ uint32_t chan_id,
+ const int *stream_fds, int nr_stream_fds,
+ int64_t blocking_timeout);
+ void (*channel_destroy)(struct lttng_ust_channel_buffer *chan);
+ /*
+ * packet_avail_size returns the available size in the current
+ * packet. Note that the size returned is only a hint, since it
+ * may change due to concurrent writes.
+ */
+ size_t (*packet_avail_size)(struct lttng_ust_channel_buffer *chan);
+ int (*is_finalized)(struct lttng_ust_channel_buffer *chan);
+ int (*is_disabled)(struct lttng_ust_channel_buffer *chan);
+ int (*flush_buffer)(struct lttng_ust_channel_buffer *chan);
+};
+
+struct lttng_ust_channel_common_private {
+ struct lttng_ust_channel_common *pub; /* Public channel interface */
+
+ int objd; /* Object associated with channel. */
+ int tstate:1; /* Transient enable state */
+};
+
+struct lttng_ust_channel_buffer_private {
+ struct lttng_ust_channel_common_private parent;
+
+ struct lttng_ust_channel_buffer *pub; /* Public channel buffer interface */
+ struct cds_list_head node; /* Channel list in session */
+ int header_type; /* 0: unset, 1: compact, 2: large */
+ unsigned int id; /* Channel ID */
+ enum lttng_ust_abi_chan_type type;
+ struct lttng_ust_ctx *ctx;
+ struct lttng_ust_lib_ring_buffer_channel *rb_chan; /* Ring buffer channel */
+ unsigned char uuid[LTTNG_UST_UUID_LEN]; /* Trace session unique ID */
+};
+
+/*
+ * IMPORTANT: this structure is part of the ABI between the consumer
+ * daemon and the UST library within traced applications. Changing it
+ * breaks the UST communication protocol.
+ *
+ * TODO: remove unused fields on next UST communication protocol
+ * breaking update.
+ */
+struct lttng_ust_abi_channel_config {
+ void *unused1;
+ int unused2;
+ void *unused3;
+ void *unused4;
+ int unused5;
+ struct cds_list_head unused6;
+ void *unused7;
+ int unused8;
+ void *unused9;
+
+ /* Channel ID */
+ unsigned int id;
+ enum lttng_ust_abi_chan_type unused10;
+ unsigned char uuid[LTTNG_UST_UUID_LEN]; /* Trace session unique ID */
+ int unused11:1;
+};
+
+/* Global (filter), event and channel contexts. */
+struct lttng_ust_ctx {
+ struct lttng_ust_ctx_field *fields;
+ unsigned int nr_fields;
+ unsigned int allocated_fields;
+ unsigned int largest_align;
+};
+
+struct lttng_ust_registered_probe {
+ const struct lttng_ust_probe_desc *desc;
+
+ struct cds_list_head head; /* chain registered probes */
+ struct cds_list_head lazy_init_head;
+ int lazy; /* lazy registration */
+};
+
+/*
+ * Context field
+ */
+
+struct lttng_ust_ctx_field {
+ const struct lttng_ust_event_field *event_field;
+ size_t (*get_size)(void *priv, size_t offset);
+ void (*record)(void *priv, struct lttng_ust_lib_ring_buffer_ctx *ctx,
+ struct lttng_ust_channel_buffer *chan);
+ void (*get_value)(void *priv, struct lttng_ust_ctx_value *value);
+ void (*destroy)(void *priv);
+ void *priv;
+};
+
+static inline
+const struct lttng_ust_type_integer *lttng_ust_get_type_integer(const struct lttng_ust_type_common *type)
+{
+ if (type->type != lttng_ust_type_integer)
+ return NULL;
+ return caa_container_of(type, const struct lttng_ust_type_integer, parent);
+}
+
+static inline
+const struct lttng_ust_type_float *lttng_ust_get_type_float(const struct lttng_ust_type_common *type)
+{
+ if (type->type != lttng_ust_type_float)
+ return NULL;
+ return caa_container_of(type, const struct lttng_ust_type_float, parent);
+}
+
+static inline
+const struct lttng_ust_type_string *lttng_ust_get_type_string(const struct lttng_ust_type_common *type)
+{
+ if (type->type != lttng_ust_type_string)
+ return NULL;
+ return caa_container_of(type, const struct lttng_ust_type_string, parent);
+}
+
+static inline
+const struct lttng_ust_type_enum *lttng_ust_get_type_enum(const struct lttng_ust_type_common *type)
+{
+ if (type->type != lttng_ust_type_enum)
+ return NULL;
+ return caa_container_of(type, const struct lttng_ust_type_enum, parent);
+}
+
+static inline
+const struct lttng_ust_type_array *lttng_ust_get_type_array(const struct lttng_ust_type_common *type)
+{
+ if (type->type != lttng_ust_type_array)
+ return NULL;
+ return caa_container_of(type, const struct lttng_ust_type_array, parent);
+}
+
+static inline
+const struct lttng_ust_type_sequence *lttng_ust_get_type_sequence(const struct lttng_ust_type_common *type)
+{
+ if (type->type != lttng_ust_type_sequence)
+ return NULL;
+ return caa_container_of(type, const struct lttng_ust_type_sequence, parent);
+}
+
+static inline
+const struct lttng_ust_type_struct *lttng_ust_get_type_struct(const struct lttng_ust_type_common *type)
+{
+ if (type->type != lttng_ust_type_struct)
+ return NULL;
+ return caa_container_of(type, const struct lttng_ust_type_struct, parent);
+}
+
+#define lttng_ust_static_type_integer(_size, _alignment, _signedness, _byte_order, _base) \
+ ((const struct lttng_ust_type_common *) __LTTNG_COMPOUND_LITERAL(const struct lttng_ust_type_integer, { \
+ .parent = { \
+ .type = lttng_ust_type_integer, \
+ }, \
+ .struct_size = sizeof(struct lttng_ust_type_integer), \
+ .size = (_size), \
+ .alignment = (_alignment), \
+ .signedness = (_signedness), \
+ .reverse_byte_order = (_byte_order) != BYTE_ORDER, \
+ .base = (_base), \
+ }))
+
+#define lttng_ust_static_type_array_text(_length) \
+ ((const struct lttng_ust_type_common *) __LTTNG_COMPOUND_LITERAL(const struct lttng_ust_type_array, { \
+ .parent = { \
+ .type = lttng_ust_type_array, \
+ }, \
+ .struct_size = sizeof(struct lttng_ust_type_array), \
+ .length = (_length), \
+ .alignment = 0, \
+ .encoding = lttng_ust_string_encoding_UTF8, \
+ .elem_type = lttng_ust_static_type_integer(sizeof(char) * CHAR_BIT, \
+ lttng_ust_rb_alignof(char) * CHAR_BIT, lttng_ust_is_signed_type(char), \
+ BYTE_ORDER, 10), \
+ }))
+
+#define lttng_ust_static_event_field(_name, _type, _nowrite, _nofilter) \
+ __LTTNG_COMPOUND_LITERAL(const struct lttng_ust_event_field, { \
+ .struct_size = sizeof(struct lttng_ust_event_field), \
+ .name = (_name), \
+ .type = (_type), \
+ .nowrite = (_nowrite), \
+ .nofilter = (_nofilter), \
+ })
+
+#define lttng_ust_static_ctx_field(_event_field, _get_size, _record, _get_value, _destroy, _priv) \
+ __LTTNG_COMPOUND_LITERAL(const struct lttng_ust_ctx_field, { \
+ .event_field = (_event_field), \
+ .get_size = (_get_size), \
+ .record = (_record), \
+ .get_value = (_get_value), \
+ .destroy = (_destroy), \
+ .priv = (_priv), \
+ })
+
+static inline
+struct lttng_enabler *lttng_event_enabler_as_enabler(
+ struct lttng_event_enabler *event_enabler)
+{
+ return &event_enabler->base;
+}
+
+static inline
+struct lttng_enabler *lttng_event_notifier_enabler_as_enabler(
+ struct lttng_event_notifier_enabler *event_notifier_enabler)
+{
+ return &event_notifier_enabler->base;
+}
+
+/*
+ * Allocate and initialize a `struct lttng_event_enabler` object.
+ *
+ * On success, returns a `struct lttng_event_enabler`,
+ * On memory error, returns NULL.
+ */
+struct lttng_event_enabler *lttng_event_enabler_create(
+ enum lttng_enabler_format_type format_type,
+ struct lttng_ust_abi_event *event_param,
+ struct lttng_ust_channel_buffer *chan)
+ __attribute__((visibility("hidden")));
+
+/*
+ * Destroy a `struct lttng_event_enabler` object.
+ */
+void lttng_event_enabler_destroy(struct lttng_event_enabler *enabler)
+ __attribute__((visibility("hidden")));
+
+/*
+ * Enable a `struct lttng_event_enabler` object and all events related to this
+ * enabler.
+ */
+int lttng_event_enabler_enable(struct lttng_event_enabler *enabler)
+ __attribute__((visibility("hidden")));
+
+/*
+ * Disable a `struct lttng_event_enabler` object and all events related to this
+ * enabler.
+ */
+int lttng_event_enabler_disable(struct lttng_event_enabler *enabler)
+ __attribute__((visibility("hidden")));
+
+/*
+ * Attach filter bytecode program to `struct lttng_event_enabler` and all
+ * events related to this enabler.
+ */
+int lttng_event_enabler_attach_filter_bytecode(
+ struct lttng_event_enabler *enabler,
+ struct lttng_ust_bytecode_node **bytecode)
+ __attribute__((visibility("hidden")));
+
+/*
+ * Attach an application context to an event enabler.
+ *
+ * Not implemented.
+ */
+int lttng_event_enabler_attach_context(struct lttng_event_enabler *enabler,
+ struct lttng_ust_abi_context *ctx)
+ __attribute__((visibility("hidden")));
+
+/*
+ * Attach exclusion list to `struct lttng_event_enabler` and all
+ * events related to this enabler.
+ */
+int lttng_event_enabler_attach_exclusion(struct lttng_event_enabler *enabler,
+ struct lttng_ust_excluder_node **excluder)
+ __attribute__((visibility("hidden")));
+
+/*
+ * Synchronize bytecodes for the enabler and the instance (event or
+ * event_notifier).
+ *
+ * This function goes over all bytecode programs of the enabler (event or
+ * event_notifier enabler) to ensure each is linked to the provided instance.
+ */
+void lttng_enabler_link_bytecode(const struct lttng_ust_event_desc *event_desc,
+ struct lttng_ust_ctx **ctx,
+ struct cds_list_head *instance_bytecode_runtime_head,
+ struct cds_list_head *enabler_bytecode_runtime_head)
+ __attribute__((visibility("hidden")));
+
+/*
+ * Allocate and initialize a `struct lttng_event_notifier_group` object.
+ *
+ * On success, returns a `struct lttng_triggre_group`,
+ * on memory error, returns NULL.
+ */
+struct lttng_event_notifier_group *lttng_event_notifier_group_create(void)
+ __attribute__((visibility("hidden")));
+
+/*
+ * Destroy a `struct lttng_event_notifier_group` object.
+ */
+void lttng_event_notifier_group_destroy(
+ struct lttng_event_notifier_group *event_notifier_group)
+ __attribute__((visibility("hidden")));
+
+/*
+ * Allocate and initialize a `struct lttng_event_notifier_enabler` object.
+ *
+ * On success, returns a `struct lttng_event_notifier_enabler`,
+ * On memory error, returns NULL.
+ */
+struct lttng_event_notifier_enabler *lttng_event_notifier_enabler_create(
+ struct lttng_event_notifier_group *event_notifier_group,
+ enum lttng_enabler_format_type format_type,
+ struct lttng_ust_abi_event_notifier *event_notifier_param)
+ __attribute__((visibility("hidden")));
+
+/*
+ * Destroy a `struct lttng_event_notifier_enabler` object.
+ */
+void lttng_event_notifier_enabler_destroy(
+ struct lttng_event_notifier_enabler *event_notifier_enabler)
+ __attribute__((visibility("hidden")));
+
+/*
+ * Enable a `struct lttng_event_notifier_enabler` object and all event
+ * notifiers related to this enabler.
+ */
+int lttng_event_notifier_enabler_enable(
+ struct lttng_event_notifier_enabler *event_notifier_enabler)
+ __attribute__((visibility("hidden")));
+
+/*
+ * Disable a `struct lttng_event_notifier_enabler` object and all event
+ * notifiers related to this enabler.
+ */
+int lttng_event_notifier_enabler_disable(
+ struct lttng_event_notifier_enabler *event_notifier_enabler)
+ __attribute__((visibility("hidden")));
+
+/*
+ * Attach filter bytecode program to `struct lttng_event_notifier_enabler` and
+ * all event notifiers related to this enabler.
+ */
+int lttng_event_notifier_enabler_attach_filter_bytecode(
+ struct lttng_event_notifier_enabler *event_notifier_enabler,
+ struct lttng_ust_bytecode_node **bytecode)
+ __attribute__((visibility("hidden")));
+
+/*
+ * Attach capture bytecode program to `struct lttng_event_notifier_enabler` and
+ * all event_notifiers related to this enabler.
+ */
+int lttng_event_notifier_enabler_attach_capture_bytecode(
+ struct lttng_event_notifier_enabler *event_notifier_enabler,
+ struct lttng_ust_bytecode_node **bytecode)
+ __attribute__((visibility("hidden")));
+
+/*
+ * Attach exclusion list to `struct lttng_event_notifier_enabler` and all
+ * event notifiers related to this enabler.
+ */
+int lttng_event_notifier_enabler_attach_exclusion(
+ struct lttng_event_notifier_enabler *event_notifier_enabler,
+ struct lttng_ust_excluder_node **excluder)
+ __attribute__((visibility("hidden")));
+
+void lttng_free_event_filter_runtime(struct lttng_ust_event_common *event)
+ __attribute__((visibility("hidden")));
+
+/*
+ * Connect the probe on all enablers matching this event description.
+ * Called on library load.
+ */
+int lttng_fix_pending_event_notifiers(void)
+ __attribute__((visibility("hidden")));
+
+struct lttng_counter *lttng_ust_counter_create(
+ const char *counter_transport_name,
+ size_t number_dimensions, const struct lttng_counter_dimension *dimensions)
+ __attribute__((visibility("hidden")));
+
+#ifdef HAVE_LINUX_PERF_EVENT_H
+
+int lttng_add_perf_counter_to_ctx(uint32_t type,
+ uint64_t config,
+ const char *name,
+ struct lttng_ust_ctx **ctx)
+ __attribute__((visibility("hidden")));
+
+int lttng_perf_counter_init(void)
+ __attribute__((visibility("hidden")));
+
+void lttng_perf_counter_exit(void)
+ __attribute__((visibility("hidden")));
+
+#else /* #ifdef HAVE_LINUX_PERF_EVENT_H */
+
+static inline
+int lttng_add_perf_counter_to_ctx(uint32_t type,
+ uint64_t config,
+ const char *name,
+ struct lttng_ust_ctx **ctx)
+{
+ return -ENOSYS;
+}
+static inline
+int lttng_perf_counter_init(void)
+{
+ return 0;
+}
+static inline
+void lttng_perf_counter_exit(void)
+{
+}
+#endif /* #else #ifdef HAVE_LINUX_PERF_EVENT_H */
+
+int lttng_probes_get_event_list(struct lttng_ust_tracepoint_list *list)
+ __attribute__((visibility("hidden")));
+
+void lttng_probes_prune_event_list(struct lttng_ust_tracepoint_list *list)
+ __attribute__((visibility("hidden")));
+
+int lttng_probes_get_field_list(struct lttng_ust_field_list *list)
+ __attribute__((visibility("hidden")));
+
+void lttng_probes_prune_field_list(struct lttng_ust_field_list *list)
+ __attribute__((visibility("hidden")));
+
+struct lttng_ust_abi_tracepoint_iter *
+ lttng_ust_tracepoint_list_get_iter_next(struct lttng_ust_tracepoint_list *list)
+ __attribute__((visibility("hidden")));
+
+struct lttng_ust_abi_field_iter *
+ lttng_ust_field_list_get_iter_next(struct lttng_ust_field_list *list)
+ __attribute__((visibility("hidden")));
+
+struct lttng_ust_session *lttng_session_create(void)
+ __attribute__((visibility("hidden")));
+
+int lttng_session_enable(struct lttng_ust_session *session)
+ __attribute__((visibility("hidden")));
+
+int lttng_session_disable(struct lttng_ust_session *session)
+ __attribute__((visibility("hidden")));
+
+int lttng_session_statedump(struct lttng_ust_session *session)
+ __attribute__((visibility("hidden")));
+
+void lttng_session_destroy(struct lttng_ust_session *session)
+ __attribute__((visibility("hidden")));
+
+/*
+ * Called with ust lock held.
+ */
+int lttng_session_active(void)
+ __attribute__((visibility("hidden")));
+
+struct cds_list_head *lttng_get_sessions(void)
+ __attribute__((visibility("hidden")));
+
+void lttng_handle_pending_statedump(void *owner)
+ __attribute__((visibility("hidden")));
+
+int lttng_channel_enable(struct lttng_ust_channel_common *lttng_channel)
+ __attribute__((visibility("hidden")));
+
+int lttng_channel_disable(struct lttng_ust_channel_common *lttng_channel)
+ __attribute__((visibility("hidden")));
+
+void lttng_transport_register(struct lttng_transport *transport)
+ __attribute__((visibility("hidden")));
+
+void lttng_transport_unregister(struct lttng_transport *transport)
+ __attribute__((visibility("hidden")));
+
+/* This is ABI between liblttng-ust and liblttng-ust-ctl */
+struct lttng_transport *lttng_ust_transport_find(const char *name);
+
+/* This is ABI between liblttng-ust and liblttng-ust-dl */
+void lttng_ust_dl_update(void *ip);
+
+void lttng_probe_provider_unregister_events(const struct lttng_ust_probe_desc *desc)
+ __attribute__((visibility("hidden")));
+
+int lttng_fix_pending_events(void)
+ __attribute__((visibility("hidden")));
+
+struct cds_list_head *lttng_get_probe_list_head(void)
+ __attribute__((visibility("hidden")));
+
+struct lttng_enum *lttng_ust_enum_get_from_desc(struct lttng_ust_session *session,
+ const struct lttng_ust_enum_desc *enum_desc)
+ __attribute__((visibility("hidden")));
+
+int lttng_abi_create_root_handle(void)
+ __attribute__((visibility("hidden")));
+
+const struct lttng_ust_abi_objd_ops *lttng_ust_abi_objd_ops(int id)
+ __attribute__((visibility("hidden")));
+
+int lttng_ust_abi_objd_unref(int id, int is_owner)
+ __attribute__((visibility("hidden")));
+
+void lttng_ust_abi_exit(void)
+ __attribute__((visibility("hidden")));
+
+void lttng_ust_abi_events_exit(void)
+ __attribute__((visibility("hidden")));
+
+void lttng_ust_abi_objd_table_owner_cleanup(void *owner)
+ __attribute__((visibility("hidden")));
+
+struct lttng_ust_channel_buffer *lttng_ust_alloc_channel_buffer(void)
+ __attribute__((visibility("hidden")));
+
+void lttng_ust_free_channel_common(struct lttng_ust_channel_common *chan)
+ __attribute__((visibility("hidden")));
+
+int lttng_ust_interpret_event_filter(struct lttng_ust_event_common *event,
+ const char *interpreter_stack_data,
+ void *filter_ctx)
+ __attribute__((visibility("hidden")));
+
+int lttng_ust_session_uuid_validate(struct lttng_ust_session *session,
+ unsigned char *uuid)
+ __attribute__((visibility("hidden")));
+
+bool lttng_ust_validate_event_name(const struct lttng_ust_event_desc *desc)
+ __attribute__((visibility("hidden")));
+
+void lttng_ust_format_event_name(const struct lttng_ust_event_desc *desc,
+ char *name)
+ __attribute__((visibility("hidden")));
+
+int lttng_ust_add_app_context_to_ctx_rcu(const char *name, struct lttng_ust_ctx **ctx)
+ __attribute__((visibility("hidden")));
+
+int lttng_ust_context_set_provider_rcu(struct lttng_ust_ctx **_ctx,
+ const char *name,
+ size_t (*get_size)(void *priv, size_t offset),
+ void (*record)(void *priv, struct lttng_ust_lib_ring_buffer_ctx *ctx,
+ struct lttng_ust_channel_buffer *chan),
+ void (*get_value)(void *priv, struct lttng_ust_ctx_value *value),
+ void *priv)
+ __attribute__((visibility("hidden")));
+
+void lttng_ust_context_set_session_provider(const char *name,
+ size_t (*get_size)(void *priv, size_t offset),
+ void (*record)(void *priv, struct lttng_ust_lib_ring_buffer_ctx *ctx,
+ struct lttng_ust_channel_buffer *chan),
+ void (*get_value)(void *priv, struct lttng_ust_ctx_value *value),
+ void *priv)
+ __attribute__((visibility("hidden")));
+
+#endif /* _LTTNG_UST_EVENTS_INTERNAL_H */
--- /dev/null
+/*
+ * SPDX-License-Identifier: LGPL-2.1-or-later
+ *
+ * Copyright (C) 2013 Paul Woegerer <paul_woegerer@mentor.com>
+ * Copyright (C) 2016 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ */
+
+#define _LGPL_SOURCE
+#define TRACEPOINT_CREATE_PROBES
+#define TP_IP_PARAM ip
+#include "ust_lib.h"
--- /dev/null
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright (C) 2013 Paul Woegerer <paul_woegerer@mentor.com>
+ * Copyright (C) 2015 Antoine Busque <abusque@efficios.com>
+ * Copyright (C) 2016 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ */
+
+#undef TRACEPOINT_PROVIDER
+#define TRACEPOINT_PROVIDER lttng_ust_lib
+
+#if !defined(_TRACEPOINT_UST_LIB_H) || defined(TRACEPOINT_HEADER_MULTI_READ)
+#define _TRACEPOINT_UST_LIB_H
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <stddef.h>
+#include <stdint.h>
+#include <unistd.h>
+
+#define LTTNG_UST_LIB_PROVIDER
+#include <lttng/tracepoint.h>
+
+TRACEPOINT_EVENT(lttng_ust_lib, load,
+ TP_ARGS(void *, ip, void *, baddr, const char*, path,
+ uint64_t, memsz, uint8_t, has_build_id,
+ uint8_t, has_debug_link),
+ TP_FIELDS(
+ ctf_unused(ip)
+ ctf_integer_hex(void *, baddr, baddr)
+ ctf_integer(uint64_t, memsz, memsz)
+ ctf_string(path, path)
+ ctf_integer(uint8_t, has_build_id, has_build_id)
+ ctf_integer(uint8_t, has_debug_link, has_debug_link)
+ )
+)
+
+TRACEPOINT_EVENT(lttng_ust_lib, build_id,
+ TP_ARGS(
+ void *, ip,
+ void *, baddr,
+ uint8_t *, build_id,
+ size_t, build_id_len
+ ),
+ TP_FIELDS(
+ ctf_unused(ip)
+ ctf_integer_hex(void *, baddr, baddr)
+ ctf_sequence_hex(uint8_t, build_id, build_id,
+ size_t, build_id_len)
+ )
+)
+
+TRACEPOINT_EVENT(lttng_ust_lib, debug_link,
+ TP_ARGS(
+ void *, ip,
+ void *, baddr,
+ char *, filename,
+ uint32_t, crc
+ ),
+ TP_FIELDS(
+ ctf_unused(ip)
+ ctf_integer_hex(void *, baddr, baddr)
+ ctf_integer(uint32_t, crc, crc)
+ ctf_string(filename, filename)
+ )
+)
+
+TRACEPOINT_EVENT(lttng_ust_lib, unload,
+ TP_ARGS(void *, ip, void *, baddr),
+ TP_FIELDS(
+ ctf_unused(ip)
+ ctf_integer_hex(void *, baddr, baddr)
+ )
+)
+
+#endif /* _TRACEPOINT_UST_LIB_H */
+
+#undef TRACEPOINT_INCLUDE
+#define TRACEPOINT_INCLUDE "./ust_lib.h"
+
+/* This part must be outside ifdef protection */
+#include <lttng/tracepoint-event.h>
+
+#ifdef __cplusplus
+}
+#endif
--- /dev/null
+/*
+ * SPDX-License-Identifier: LGPL-2.1-only
+ *
+ * Copyright (C) 2011 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ */
+
+#ifndef _UST_WAIT_H
+#define _UST_WAIT_H
+
+#include <poll.h>
+
+/*
+ * Wait until "cond" gets true or timeout (in ms).
+ */
+#define wait_cond_interruptible_timeout(_cond, _timeout) \
+ ({ \
+ int __ret = 0, __pollret; \
+ int __timeout = _timeout; \
+ \
+ for (;;) { \
+ if (_cond) \
+ break; \
+ if (__timeout <= 0) { \
+ __ret = -ETIMEDOUT; \
+ break; \
+ } \
+ __pollret = poll(NULL, 0, 10); /* wait 10ms */ \
+ if (__pollret < 0) { \
+ __ret = -errno; \
+ break; \
+ } \
+ __timeout -= 10; \
+ } \
+ __ret; \
+ })
+
+
+#endif /* _UST_WAIT_H */
+++ /dev/null
-# SPDX-License-Identifier: LGPL-2.1-only
-
-AM_CFLAGS += -I$(srcdir) -fno-strict-aliasing
-
-noinst_LTLIBRARIES = liblttng-ust-runtime.la liblttng-ust-support.la
-
-lib_LTLIBRARIES = liblttng-ust-common.la liblttng-ust-tracepoint.la liblttng-ust.la
-
-# ust-common
-liblttng_ust_common_la_SOURCES = \
- fd-tracker.c \
- ust-common.c \
- lttng-ust-urcu.c \
- lttng-ust-urcu-pointer.c
-
-liblttng_ust_common_la_LIBADD = \
- $(top_builddir)/src/common/libcommon.la
-
-liblttng_ust_common_la_LDFLAGS = -no-undefined -version-info $(LTTNG_UST_LIBRARY_VERSION)
-
-liblttng_ust_tracepoint_la_SOURCES = \
- tracepoint.c \
- tracepoint-weak-test.c \
- tracepoint-internal.h \
- lttng-tracer-core.h \
- jhash.h \
- error.h
-
-liblttng_ust_tracepoint_la_LIBADD = \
- liblttng-ust-common.la \
- $(top_builddir)/src/common/libcommon.la \
- $(DL_LIBS)
-
-liblttng_ust_tracepoint_la_LDFLAGS = -no-undefined -version-info $(LTTNG_UST_LIBRARY_VERSION)
-liblttng_ust_tracepoint_la_CFLAGS = -DUST_COMPONENT="liblttng_ust_tracepoint" $(AM_CFLAGS)
-
-liblttng_ust_runtime_la_SOURCES = \
- bytecode.h \
- lttng-ust-comm.c \
- lttng-ust-abi.c \
- lttng-probes.c \
- lttng-bytecode.c \
- lttng-bytecode.h \
- lttng-bytecode-validator.c \
- lttng-bytecode-specialize.c \
- lttng-bytecode-interpreter.c \
- lttng-context-provider.c \
- lttng-context-vtid.c \
- lttng-context-vpid.c \
- lttng-context-pthread-id.c \
- lttng-context-procname.c \
- lttng-context-ip.c \
- lttng-context-cpu-id.c \
- lttng-context-cgroup-ns.c \
- lttng-context-ipc-ns.c \
- lttng-context-mnt-ns.c \
- lttng-context-net-ns.c \
- lttng-context-pid-ns.c \
- lttng-context-time-ns.c \
- lttng-context-user-ns.c \
- lttng-context-uts-ns.c \
- lttng-context-vuid.c \
- lttng-context-veuid.c \
- lttng-context-vsuid.c \
- lttng-context-vgid.c \
- lttng-context-vegid.c \
- lttng-context-vsgid.c \
- lttng-context.c \
- lttng-events.c \
- lttng-hash-helper.h \
- lttng-ust-elf.c \
- lttng-ust-elf.h \
- lttng-ust-statedump.c \
- lttng-ust-statedump.h \
- lttng-ust-statedump-provider.h \
- ust_lib.c \
- ust_lib.h \
- context-internal.h \
- context-provider-internal.h \
- tracepoint-internal.h \
- ust-events-internal.h \
- clock.h \
- wait.h \
- jhash.h \
- lttng-ust-uuid.h \
- error.h \
- tracef.c \
- lttng-ust-tracef-provider.h \
- tracelog.c \
- lttng-ust-tracelog-provider.h \
- getenv.h \
- string-utils.c \
- string-utils.h \
- event-notifier-notification.c \
- ns.h \
- creds.h \
- rculfhash.c \
- rculfhash.h \
- rculfhash-internal.h \
- rculfhash-mm-chunk.c \
- rculfhash-mm-mmap.c \
- rculfhash-mm-order.c \
- compat_futex.c \
- futex.h
-
-if HAVE_PERF_EVENT
-liblttng_ust_runtime_la_SOURCES += \
- lttng-context-perf-counters.c \
- perf_event.h
-endif
-
-liblttng_ust_support_la_SOURCES = \
- lttng-tracer.h \
- lttng-tracer-core.h \
- ust-core.c \
- getenv.h \
- getenv.c \
- lttng-ust-dynamic-type.c \
- lttng-rb-clients.h \
- lttng-ring-buffer-client-template.h \
- lttng-ring-buffer-client-discard.c \
- lttng-ring-buffer-client-discard-rt.c \
- lttng-ring-buffer-client-overwrite.c \
- lttng-ring-buffer-client-overwrite-rt.c \
- lttng-ring-buffer-metadata-client-template.h \
- lttng-ring-buffer-metadata-client.c \
- lttng-counter-client.h \
- lttng-counter-client-percpu-32-modular.c \
- lttng-counter-client-percpu-64-modular.c \
- lttng-clock.c lttng-getcpu.c
-
-liblttng_ust_la_SOURCES =
-
-liblttng_ust_la_LDFLAGS = -no-undefined -version-info $(LTTNG_UST_LIBRARY_VERSION)
-
-liblttng_ust_support_la_LIBADD = \
- $(top_builddir)/src/common/libringbuffer.la \
- $(top_builddir)/src/common/libcounter.la
-
-liblttng_ust_la_LIBADD = \
- -lrt \
- liblttng-ust-common.la \
- $(top_builddir)/src/common/libustcomm.la \
- $(top_builddir)/src/common/libcommon.la \
- liblttng-ust-tracepoint.la \
- liblttng-ust-runtime.la liblttng-ust-support.la \
- $(DL_LIBS)
-
-liblttng_ust_la_CFLAGS = -DUST_COMPONENT="liblttng_ust" $(AM_CFLAGS)
+++ /dev/null
-/*
- * SPDX-License-Identifier: MIT
- *
- * Copyright 2012-2016 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
- */
-
-#ifndef _BYTECODE_H
-#define _BYTECODE_H
-
-#include <stdint.h>
-#include <lttng/ust-abi.h>
-
-/*
- * offsets are absolute from start of bytecode.
- */
-
-struct field_ref {
- /* Initially, symbol offset. After link, field offset. */
- uint16_t offset;
-} __attribute__((packed));
-
-struct get_symbol {
- /* Symbol offset. */
- uint16_t offset;
-} __attribute__((packed));
-
-struct get_index_u16 {
- uint16_t index;
-} __attribute__((packed));
-
-struct get_index_u64 {
- uint64_t index;
-} __attribute__((packed));
-
-struct literal_numeric {
- int64_t v;
-} __attribute__((packed));
-
-struct literal_double {
- double v;
-} __attribute__((packed));
-
-struct literal_string {
- char string[0];
-} __attribute__((packed));
-
-enum bytecode_op {
- BYTECODE_OP_UNKNOWN = 0,
-
- BYTECODE_OP_RETURN = 1,
-
- /* binary */
- BYTECODE_OP_MUL = 2,
- BYTECODE_OP_DIV = 3,
- BYTECODE_OP_MOD = 4,
- BYTECODE_OP_PLUS = 5,
- BYTECODE_OP_MINUS = 6,
- BYTECODE_OP_BIT_RSHIFT = 7,
- BYTECODE_OP_BIT_LSHIFT = 8,
- BYTECODE_OP_BIT_AND = 9,
- BYTECODE_OP_BIT_OR = 10,
- BYTECODE_OP_BIT_XOR = 11,
-
- /* binary comparators */
- BYTECODE_OP_EQ = 12,
- BYTECODE_OP_NE = 13,
- BYTECODE_OP_GT = 14,
- BYTECODE_OP_LT = 15,
- BYTECODE_OP_GE = 16,
- BYTECODE_OP_LE = 17,
-
- /* string binary comparator: apply to */
- BYTECODE_OP_EQ_STRING = 18,
- BYTECODE_OP_NE_STRING = 19,
- BYTECODE_OP_GT_STRING = 20,
- BYTECODE_OP_LT_STRING = 21,
- BYTECODE_OP_GE_STRING = 22,
- BYTECODE_OP_LE_STRING = 23,
-
- /* s64 binary comparator */
- BYTECODE_OP_EQ_S64 = 24,
- BYTECODE_OP_NE_S64 = 25,
- BYTECODE_OP_GT_S64 = 26,
- BYTECODE_OP_LT_S64 = 27,
- BYTECODE_OP_GE_S64 = 28,
- BYTECODE_OP_LE_S64 = 29,
-
- /* double binary comparator */
- BYTECODE_OP_EQ_DOUBLE = 30,
- BYTECODE_OP_NE_DOUBLE = 31,
- BYTECODE_OP_GT_DOUBLE = 32,
- BYTECODE_OP_LT_DOUBLE = 33,
- BYTECODE_OP_GE_DOUBLE = 34,
- BYTECODE_OP_LE_DOUBLE = 35,
-
- /* Mixed S64-double binary comparators */
- BYTECODE_OP_EQ_DOUBLE_S64 = 36,
- BYTECODE_OP_NE_DOUBLE_S64 = 37,
- BYTECODE_OP_GT_DOUBLE_S64 = 38,
- BYTECODE_OP_LT_DOUBLE_S64 = 39,
- BYTECODE_OP_GE_DOUBLE_S64 = 40,
- BYTECODE_OP_LE_DOUBLE_S64 = 41,
-
- BYTECODE_OP_EQ_S64_DOUBLE = 42,
- BYTECODE_OP_NE_S64_DOUBLE = 43,
- BYTECODE_OP_GT_S64_DOUBLE = 44,
- BYTECODE_OP_LT_S64_DOUBLE = 45,
- BYTECODE_OP_GE_S64_DOUBLE = 46,
- BYTECODE_OP_LE_S64_DOUBLE = 47,
-
- /* unary */
- BYTECODE_OP_UNARY_PLUS = 48,
- BYTECODE_OP_UNARY_MINUS = 49,
- BYTECODE_OP_UNARY_NOT = 50,
- BYTECODE_OP_UNARY_PLUS_S64 = 51,
- BYTECODE_OP_UNARY_MINUS_S64 = 52,
- BYTECODE_OP_UNARY_NOT_S64 = 53,
- BYTECODE_OP_UNARY_PLUS_DOUBLE = 54,
- BYTECODE_OP_UNARY_MINUS_DOUBLE = 55,
- BYTECODE_OP_UNARY_NOT_DOUBLE = 56,
-
- /* logical */
- BYTECODE_OP_AND = 57,
- BYTECODE_OP_OR = 58,
-
- /* load field ref */
- BYTECODE_OP_LOAD_FIELD_REF = 59,
- BYTECODE_OP_LOAD_FIELD_REF_STRING = 60,
- BYTECODE_OP_LOAD_FIELD_REF_SEQUENCE = 61,
- BYTECODE_OP_LOAD_FIELD_REF_S64 = 62,
- BYTECODE_OP_LOAD_FIELD_REF_DOUBLE = 63,
-
- /* load immediate from operand */
- BYTECODE_OP_LOAD_STRING = 64,
- BYTECODE_OP_LOAD_S64 = 65,
- BYTECODE_OP_LOAD_DOUBLE = 66,
-
- /* cast */
- BYTECODE_OP_CAST_TO_S64 = 67,
- BYTECODE_OP_CAST_DOUBLE_TO_S64 = 68,
- BYTECODE_OP_CAST_NOP = 69,
-
- /* get context ref */
- BYTECODE_OP_GET_CONTEXT_REF = 70,
- BYTECODE_OP_GET_CONTEXT_REF_STRING = 71,
- BYTECODE_OP_GET_CONTEXT_REF_S64 = 72,
- BYTECODE_OP_GET_CONTEXT_REF_DOUBLE = 73,
-
- /* load userspace field ref */
- BYTECODE_OP_LOAD_FIELD_REF_USER_STRING = 74,
- BYTECODE_OP_LOAD_FIELD_REF_USER_SEQUENCE = 75,
-
- /*
- * load immediate star globbing pattern (literal string)
- * from immediate
- */
- BYTECODE_OP_LOAD_STAR_GLOB_STRING = 76,
-
- /* globbing pattern binary operator: apply to */
- BYTECODE_OP_EQ_STAR_GLOB_STRING = 77,
- BYTECODE_OP_NE_STAR_GLOB_STRING = 78,
-
- /*
- * Instructions for recursive traversal through composed types.
- */
- BYTECODE_OP_GET_CONTEXT_ROOT = 79,
- BYTECODE_OP_GET_APP_CONTEXT_ROOT = 80,
- BYTECODE_OP_GET_PAYLOAD_ROOT = 81,
-
- BYTECODE_OP_GET_SYMBOL = 82,
- BYTECODE_OP_GET_SYMBOL_FIELD = 83,
- BYTECODE_OP_GET_INDEX_U16 = 84,
- BYTECODE_OP_GET_INDEX_U64 = 85,
-
- BYTECODE_OP_LOAD_FIELD = 86,
- BYTECODE_OP_LOAD_FIELD_S8 = 87,
- BYTECODE_OP_LOAD_FIELD_S16 = 88,
- BYTECODE_OP_LOAD_FIELD_S32 = 89,
- BYTECODE_OP_LOAD_FIELD_S64 = 90,
- BYTECODE_OP_LOAD_FIELD_U8 = 91,
- BYTECODE_OP_LOAD_FIELD_U16 = 92,
- BYTECODE_OP_LOAD_FIELD_U32 = 93,
- BYTECODE_OP_LOAD_FIELD_U64 = 94,
- BYTECODE_OP_LOAD_FIELD_STRING = 95,
- BYTECODE_OP_LOAD_FIELD_SEQUENCE = 96,
- BYTECODE_OP_LOAD_FIELD_DOUBLE = 97,
-
- BYTECODE_OP_UNARY_BIT_NOT = 98,
-
- BYTECODE_OP_RETURN_S64 = 99,
-
- NR_BYTECODE_OPS,
-};
-
-typedef uint8_t bytecode_opcode_t;
-
-struct load_op {
- bytecode_opcode_t op;
- /*
- * data to load. Size known by enum bytecode_opcode and null-term char.
- */
- char data[0];
-} __attribute__((packed));
-
-struct binary_op {
- bytecode_opcode_t op;
-} __attribute__((packed));
-
-struct unary_op {
- bytecode_opcode_t op;
-} __attribute__((packed));
-
-/* skip_offset is absolute from start of bytecode */
-struct logical_op {
- bytecode_opcode_t op;
- uint16_t skip_offset; /* bytecode insn, if skip second test */
-} __attribute__((packed));
-
-struct cast_op {
- bytecode_opcode_t op;
-} __attribute__((packed));
-
-struct return_op {
- bytecode_opcode_t op;
-} __attribute__((packed));
-
-#endif /* _BYTECODE_H */
+++ /dev/null
-/*
- * SPDX-License-Identifier: LGPL-2.1-only
- *
- * Copyright (C) 2010 Pierre-Marc Fournier
- * Copyright (C) 2011 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
- */
-
-#ifndef _UST_CLOCK_H
-#define _UST_CLOCK_H
-
-#include <time.h>
-#include <sys/time.h>
-#include <stdint.h>
-#include <stddef.h>
-#include <stdio.h>
-#include <urcu/system.h>
-#include <urcu/arch.h>
-#include <lttng/ust-clock.h>
-
-#include "lttng-ust-uuid.h"
-
-struct lttng_ust_trace_clock {
- uint64_t (*read64)(void);
- uint64_t (*freq)(void);
- int (*uuid)(char *uuid);
- const char *(*name)(void);
- const char *(*description)(void);
-};
-
-extern struct lttng_ust_trace_clock *lttng_ust_trace_clock
- __attribute__((visibility("hidden")));
-
-void lttng_ust_clock_init(void);
-
-/* Use the kernel MONOTONIC clock. */
-
-static __inline__
-uint64_t trace_clock_read64_monotonic(void)
-{
- struct timespec ts;
-
- if (caa_unlikely(clock_gettime(CLOCK_MONOTONIC, &ts))) {
- ts.tv_sec = 0;
- ts.tv_nsec = 0;
- }
- return ((uint64_t) ts.tv_sec * 1000000000ULL) + ts.tv_nsec;
-}
-
-static __inline__
-uint64_t trace_clock_read64(void)
-{
- struct lttng_ust_trace_clock *ltc = CMM_LOAD_SHARED(lttng_ust_trace_clock);
-
- if (caa_likely(!ltc)) {
- return trace_clock_read64_monotonic();
- } else {
- cmm_read_barrier_depends(); /* load ltc before content */
- return ltc->read64();
- }
-}
-
-#endif /* _UST_CLOCK_H */
+++ /dev/null
-/*
- * SPDX-License-Identifier: LGPL-2.1-or-later
- *
- * Copyright (c) 2009 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
- *
- * Userspace RCU library - sys_futex compatibility code
- */
-
-#include <stdio.h>
-#include <pthread.h>
-#include <signal.h>
-#include <assert.h>
-#include <errno.h>
-#include <poll.h>
-#include <stdint.h>
-
-#include <urcu/arch.h>
-#include <urcu/system.h>
-#include "futex.h"
-
-/*
- * Using attribute "weak" for __lttng_ust_compat_futex_lock and
- * __lttng_ust_compat_futex_cond. Those are globally visible by the entire
- * program, even though many shared objects may have their own version.
- * The first version that gets loaded will be used by the entire program
- * (executable and all shared objects).
- */
-
-__attribute__((weak))
-pthread_mutex_t __lttng_ust_compat_futex_lock = PTHREAD_MUTEX_INITIALIZER;
-__attribute__((weak))
-pthread_cond_t __lttng_ust_compat_futex_cond = PTHREAD_COND_INITIALIZER;
-
-/*
- * _NOT SIGNAL-SAFE_. pthread_cond is not signal-safe anyway. Though.
- * For now, timeout, uaddr2 and val3 are unused.
- * Waiter will relinquish the CPU until woken up.
- */
-
-int lttng_ust_compat_futex_noasync(int32_t *uaddr, int op, int32_t val,
- const struct timespec *timeout, int32_t *uaddr2, int32_t val3)
-{
- int ret = 0, lockret;
-
- /*
- * Check if NULL. Don't let users expect that they are taken into
- * account.
- */
- assert(!timeout);
- assert(!uaddr2);
- assert(!val3);
-
- /*
- * memory barriers to serialize with the previous uaddr modification.
- */
- cmm_smp_mb();
-
- lockret = pthread_mutex_lock(&__lttng_ust_compat_futex_lock);
- if (lockret) {
- errno = lockret;
- ret = -1;
- goto end;
- }
- switch (op) {
- case FUTEX_WAIT:
- /*
- * Wait until *uaddr is changed to something else than "val".
- * Comparing *uaddr content against val figures out which
- * thread has been awakened.
- */
- while (CMM_LOAD_SHARED(*uaddr) == val)
- pthread_cond_wait(&__lttng_ust_compat_futex_cond,
- &__lttng_ust_compat_futex_lock);
- break;
- case FUTEX_WAKE:
- /*
- * Each wake is sending a broadcast, thus attempting wakeup of
- * all awaiting threads, independently of their respective
- * uaddr.
- */
- pthread_cond_broadcast(&__lttng_ust_compat_futex_cond);
- break;
- default:
- errno = EINVAL;
- ret = -1;
- }
- lockret = pthread_mutex_unlock(&__lttng_ust_compat_futex_lock);
- if (lockret) {
- errno = lockret;
- ret = -1;
- }
-end:
- return ret;
-}
-
-/*
- * _ASYNC SIGNAL-SAFE_.
- * For now, timeout, uaddr2 and val3 are unused.
- * Waiter will busy-loop trying to read the condition.
- * It is OK to use compat_futex_async() on a futex address on which
- * futex() WAKE operations are also performed.
- */
-
-int lttng_ust_compat_futex_async(int32_t *uaddr, int op, int32_t val,
- const struct timespec *timeout, int32_t *uaddr2, int32_t val3)
-{
- int ret = 0;
-
- /*
- * Check if NULL. Don't let users expect that they are taken into
- * account.
- */
- assert(!timeout);
- assert(!uaddr2);
- assert(!val3);
-
- /*
- * Ensure previous memory operations on uaddr have completed.
- */
- cmm_smp_mb();
-
- switch (op) {
- case FUTEX_WAIT:
- while (CMM_LOAD_SHARED(*uaddr) == val) {
- if (poll(NULL, 0, 10) < 0) {
- ret = -1;
- /* Keep poll errno. Caller handles EINTR. */
- goto end;
- }
- }
- break;
- case FUTEX_WAKE:
- break;
- default:
- errno = EINVAL;
- ret = -1;
- }
-end:
- return ret;
-}
+++ /dev/null
-/*
- * SPDX-License-Identifier: MIT
- *
- * Copyright 2020 (C) Francis Deslauriers <francis.deslauriers@efficios.com>
- */
-
-#ifndef _LTTNG_UST_CONTEXT_INTERNAL_H
-#define _LTTNG_UST_CONTEXT_INTERNAL_H
-
-#include <lttng/ust-events.h>
-#include "ust-events-internal.h"
-#include "common/ust-context-provider.h"
-
-int lttng_context_init_all(struct lttng_ust_ctx **ctx)
- __attribute__((visibility("hidden")));
-
-int lttng_attach_context(struct lttng_ust_abi_context *context_param,
- union lttng_ust_abi_args *uargs,
- struct lttng_ust_ctx **ctx, struct lttng_ust_session *session)
- __attribute__((visibility("hidden")));
-
-int lttng_find_context(struct lttng_ust_ctx *ctx, const char *name)
- __attribute__((visibility("hidden")));
-
-int lttng_get_context_index(struct lttng_ust_ctx *ctx, const char *name)
- __attribute__((visibility("hidden")));
-
-void lttng_destroy_context(struct lttng_ust_ctx *ctx)
- __attribute__((visibility("hidden")));
-
-int lttng_ust_context_append_rcu(struct lttng_ust_ctx **ctx_p,
- const struct lttng_ust_ctx_field *f)
- __attribute__((visibility("hidden")));
-
-int lttng_ust_context_append(struct lttng_ust_ctx **ctx_p,
- const struct lttng_ust_ctx_field *f)
- __attribute__((visibility("hidden")));
-
-int lttng_context_is_app(const char *name)
- __attribute__((visibility("hidden")));
-
-void lttng_context_vtid_reset(void)
- __attribute__((visibility("hidden")));
-
-void lttng_context_vpid_reset(void)
- __attribute__((visibility("hidden")));
-
-void lttng_context_cgroup_ns_reset(void)
- __attribute__((visibility("hidden")));
-
-void lttng_context_ipc_ns_reset(void)
- __attribute__((visibility("hidden")));
-
-void lttng_context_mnt_ns_reset(void)
- __attribute__((visibility("hidden")));
-
-void lttng_context_net_ns_reset(void)
- __attribute__((visibility("hidden")));
-
-void lttng_context_pid_ns_reset(void)
- __attribute__((visibility("hidden")));
-
-void lttng_context_user_ns_reset(void)
- __attribute__((visibility("hidden")));
-
-void lttng_context_uts_ns_reset(void)
- __attribute__((visibility("hidden")));
-
-void lttng_context_time_ns_reset(void)
- __attribute__((visibility("hidden")));
-
-void lttng_context_vuid_reset(void)
- __attribute__((visibility("hidden")));
-
-void lttng_context_veuid_reset(void)
- __attribute__((visibility("hidden")));
-
-void lttng_context_vsuid_reset(void)
- __attribute__((visibility("hidden")));
-
-void lttng_context_vgid_reset(void)
- __attribute__((visibility("hidden")));
-
-void lttng_context_vegid_reset(void)
- __attribute__((visibility("hidden")));
-
-void lttng_context_vsgid_reset(void)
- __attribute__((visibility("hidden")));
-
-int lttng_add_vtid_to_ctx(struct lttng_ust_ctx **ctx)
- __attribute__((visibility("hidden")));
-
-int lttng_add_vpid_to_ctx(struct lttng_ust_ctx **ctx)
- __attribute__((visibility("hidden")));
-
-int lttng_add_pthread_id_to_ctx(struct lttng_ust_ctx **ctx)
- __attribute__((visibility("hidden")));
-
-int lttng_add_procname_to_ctx(struct lttng_ust_ctx **ctx)
- __attribute__((visibility("hidden")));
-
-int lttng_add_ip_to_ctx(struct lttng_ust_ctx **ctx)
- __attribute__((visibility("hidden")));
-
-int lttng_add_cpu_id_to_ctx(struct lttng_ust_ctx **ctx)
- __attribute__((visibility("hidden")));
-
-int lttng_add_dyntest_to_ctx(struct lttng_ust_ctx **ctx)
- __attribute__((visibility("hidden")));
-
-int lttng_add_cgroup_ns_to_ctx(struct lttng_ust_ctx **ctx)
- __attribute__((visibility("hidden")));
-
-int lttng_add_ipc_ns_to_ctx(struct lttng_ust_ctx **ctx)
- __attribute__((visibility("hidden")));
-
-int lttng_add_mnt_ns_to_ctx(struct lttng_ust_ctx **ctx)
- __attribute__((visibility("hidden")));
-
-int lttng_add_net_ns_to_ctx(struct lttng_ust_ctx **ctx)
- __attribute__((visibility("hidden")));
-
-int lttng_add_pid_ns_to_ctx(struct lttng_ust_ctx **ctx)
- __attribute__((visibility("hidden")));
-
-int lttng_add_user_ns_to_ctx(struct lttng_ust_ctx **ctx)
- __attribute__((visibility("hidden")));
-
-int lttng_add_uts_ns_to_ctx(struct lttng_ust_ctx **ctx)
- __attribute__((visibility("hidden")));
-
-int lttng_add_time_ns_to_ctx(struct lttng_ust_ctx **ctx)
- __attribute__((visibility("hidden")));
-
-int lttng_add_vuid_to_ctx(struct lttng_ust_ctx **ctx)
- __attribute__((visibility("hidden")));
-
-int lttng_add_veuid_to_ctx(struct lttng_ust_ctx **ctx)
- __attribute__((visibility("hidden")));
-
-int lttng_add_vsuid_to_ctx(struct lttng_ust_ctx **ctx)
- __attribute__((visibility("hidden")));
-
-int lttng_add_vgid_to_ctx(struct lttng_ust_ctx **ctx)
- __attribute__((visibility("hidden")));
-
-int lttng_add_vegid_to_ctx(struct lttng_ust_ctx **ctx)
- __attribute__((visibility("hidden")));
-
-int lttng_add_vsgid_to_ctx(struct lttng_ust_ctx **ctx)
- __attribute__((visibility("hidden")));
-
-#endif /* _LTTNG_UST_CONTEXT_INTERNAL_H */
+++ /dev/null
-/*
- * SPDX-License-Identifier: MIT
- *
- * Copyright 2019 Francis Deslauriers <francis.deslauriers@efficios.com>
- */
-
-#ifndef _LTTNG_UST_CONTEXT_PROVIDER_INTERNAL_H
-#define _LTTNG_UST_CONTEXT_PROVIDER_INTERNAL_H
-
-#include <stddef.h>
-#include <lttng/ust-events.h>
-
-void lttng_ust_context_set_event_notifier_group_provider(const char *name,
- size_t (*get_size)(void *priv, size_t offset),
- void (*record)(void *priv,
- struct lttng_ust_lib_ring_buffer_ctx *ctx,
- struct lttng_ust_channel_buffer *chan),
- void (*get_value)(void *priv,
- struct lttng_ust_ctx_value *value),
- void *priv)
- __attribute__((visibility("hidden")));
-
-#endif /* _LTTNG_UST_CONTEXT_PROVIDER_INTERNAL_H */
+++ /dev/null
-/*
- * SPDX-License-Identifier: LGPL-2.1-only
- *
- * Copyright (C) 2019 Michael Jeanson <mjeanson@efficios.com>
- */
-
-#ifndef _LTTNG_CREDS_H
-#define _LTTNG_CREDS_H
-
-/*
- * This is used in the kernel as an invalid value.
- */
-
-#define INVALID_UID (uid_t) -1
-#define INVALID_GID (gid_t) -1
-
-#endif /* _LTTNG_CREDS_H */
+++ /dev/null
-/*
- * SPDX-License-Identifier: LGPL-2.1-only
- *
- * Copyright (C) 2011 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
- */
-
-#ifndef _LTTNG_ERROR_H
-#define _LTTNG_ERROR_H
-
-#include <urcu/compiler.h>
-#include <unistd.h>
-
-#define MAX_ERRNO 4095
-
-static inline
-int IS_ERR_VALUE(long value)
-{
- if (caa_unlikely((unsigned long) value >= (unsigned long) -MAX_ERRNO))
- return 1;
- else
- return 0;
-}
-
-static inline
-void *ERR_PTR(long error)
-{
- return (void *) error;
-}
-
-static inline
-long PTR_ERR(const void *ptr)
-{
- return (long) ptr;
-}
-
-static inline
-int IS_ERR(const void *ptr)
-{
- return IS_ERR_VALUE((long) ptr);
-}
-
-#endif /* _LTTNG_ERROR_H */
+++ /dev/null
-/*
- * SPDX-License-Identifier: LGPL-2.1-only
- *
- * Copyright (C) 2020 Francis Deslauriers <francis.deslauriers@efficios.com>
- */
-
-#define _LGPL_SOURCE
-
-#include <assert.h>
-#include <errno.h>
-#include <limits.h>
-
-#include <lttng/ust-endian.h>
-#include "common/logging.h"
-#include <urcu/rculist.h>
-
-#include "lttng-tracer-core.h"
-#include "ust-events-internal.h"
-#include "common/msgpack/msgpack.h"
-#include "lttng-bytecode.h"
-#include "common/patient.h"
-
-/*
- * We want this write to be atomic AND non-blocking, meaning that we
- * want to write either everything OR nothing.
- * According to `pipe(7)`, writes that are less than `PIPE_BUF` bytes must be
- * atomic, so we bound the capture buffer size to the `PIPE_BUF` minus the size
- * of the notification struct we are sending alongside the capture buffer.
- */
-#define CAPTURE_BUFFER_SIZE \
- (PIPE_BUF - sizeof(struct lttng_ust_abi_event_notifier_notification) - 1)
-
-struct lttng_event_notifier_notification {
- int notification_fd;
- uint64_t event_notifier_token;
- uint8_t capture_buf[CAPTURE_BUFFER_SIZE];
- struct lttng_msgpack_writer writer;
- bool has_captures;
-};
-
-static
-void capture_enum(struct lttng_msgpack_writer *writer,
- struct lttng_interpreter_output *output)
-{
- lttng_msgpack_begin_map(writer, 2);
- lttng_msgpack_write_str(writer, "type");
- lttng_msgpack_write_str(writer, "enum");
-
- lttng_msgpack_write_str(writer, "value");
-
- switch (output->type) {
- case LTTNG_INTERPRETER_TYPE_SIGNED_ENUM:
- lttng_msgpack_write_signed_integer(writer, output->u.s);
- break;
- case LTTNG_INTERPRETER_TYPE_UNSIGNED_ENUM:
- lttng_msgpack_write_signed_integer(writer, output->u.u);
- break;
- default:
- abort();
- }
-
- lttng_msgpack_end_map(writer);
-}
-
-static
-int64_t capture_sequence_element_signed(uint8_t *ptr,
- const struct lttng_ust_type_integer *integer_type)
-{
- int64_t value;
- unsigned int size = integer_type->size;
- bool byte_order_reversed = integer_type->reverse_byte_order;
-
- switch (size) {
- case 8:
- value = *ptr;
- break;
- case 16:
- {
- int16_t tmp;
- tmp = *(int16_t *) ptr;
- if (byte_order_reversed)
- tmp = bswap_16(tmp);
-
- value = tmp;
- break;
- }
- case 32:
- {
- int32_t tmp;
- tmp = *(int32_t *) ptr;
- if (byte_order_reversed)
- tmp = bswap_32(tmp);
-
- value = tmp;
- break;
- }
- case 64:
- {
- int64_t tmp;
- tmp = *(int64_t *) ptr;
- if (byte_order_reversed)
- tmp = bswap_64(tmp);
-
- value = tmp;
- break;
- }
- default:
- abort();
- }
-
- return value;
-}
-
-static
-uint64_t capture_sequence_element_unsigned(uint8_t *ptr,
- const struct lttng_ust_type_integer *integer_type)
-{
- uint64_t value;
- unsigned int size = integer_type->size;
- bool byte_order_reversed = integer_type->reverse_byte_order;
-
- switch (size) {
- case 8:
- value = *ptr;
- break;
- case 16:
- {
- uint16_t tmp;
- tmp = *(uint16_t *) ptr;
- if (byte_order_reversed)
- tmp = bswap_16(tmp);
-
- value = tmp;
- break;
- }
- case 32:
- {
- uint32_t tmp;
- tmp = *(uint32_t *) ptr;
- if (byte_order_reversed)
- tmp = bswap_32(tmp);
-
- value = tmp;
- break;
- }
- case 64:
- {
- uint64_t tmp;
- tmp = *(uint64_t *) ptr;
- if (byte_order_reversed)
- tmp = bswap_64(tmp);
-
- value = tmp;
- break;
- }
- default:
- abort();
- }
-
- return value;
-}
-
-static
-void capture_sequence(struct lttng_msgpack_writer *writer,
- struct lttng_interpreter_output *output)
-{
- const struct lttng_ust_type_integer *integer_type;
- const struct lttng_ust_type_common *nested_type;
- uint8_t *ptr;
- bool signedness;
- int i;
-
- lttng_msgpack_begin_array(writer, output->u.sequence.nr_elem);
-
- ptr = (uint8_t *) output->u.sequence.ptr;
- nested_type = output->u.sequence.nested_type;
- switch (nested_type->type) {
- case lttng_ust_type_integer:
- integer_type = lttng_ust_get_type_integer(nested_type);
- break;
- case lttng_ust_type_enum:
- /* Treat enumeration as an integer. */
- integer_type = lttng_ust_get_type_integer(lttng_ust_get_type_enum(nested_type)->container_type);
- break;
- default:
- /* Capture of array of non-integer are not supported. */
- abort();
- }
- signedness = integer_type->signedness;
- for (i = 0; i < output->u.sequence.nr_elem; i++) {
- if (signedness) {
- lttng_msgpack_write_signed_integer(writer,
- capture_sequence_element_signed(ptr, integer_type));
- } else {
- lttng_msgpack_write_unsigned_integer(writer,
- capture_sequence_element_unsigned(ptr, integer_type));
- }
-
- /*
- * We assume that alignment is smaller or equal to the size.
- * This currently holds true but if it changes in the future,
- * we will want to change the pointer arithmetics below to
- * take into account that the next element might be further
- * away.
- */
- assert(integer_type->alignment <= integer_type->size);
-
- /* Size is in number of bits. */
- ptr += (integer_type->size / CHAR_BIT) ;
- }
-
- lttng_msgpack_end_array(writer);
-}
-
-static
-void notification_init(struct lttng_event_notifier_notification *notif,
- struct lttng_ust_event_notifier *event_notifier)
-{
- struct lttng_msgpack_writer *writer = ¬if->writer;
-
- notif->event_notifier_token = event_notifier->priv->parent.user_token;
- notif->notification_fd = event_notifier->priv->group->notification_fd;
- notif->has_captures = false;
-
- if (event_notifier->priv->num_captures > 0) {
- lttng_msgpack_writer_init(writer, notif->capture_buf,
- CAPTURE_BUFFER_SIZE);
-
- lttng_msgpack_begin_array(writer, event_notifier->priv->num_captures);
- notif->has_captures = true;
- }
-}
-
-static
-void notification_append_capture(
- struct lttng_event_notifier_notification *notif,
- struct lttng_interpreter_output *output)
-{
- struct lttng_msgpack_writer *writer = ¬if->writer;
-
- switch (output->type) {
- case LTTNG_INTERPRETER_TYPE_S64:
- lttng_msgpack_write_signed_integer(writer, output->u.s);
- break;
- case LTTNG_INTERPRETER_TYPE_U64:
- lttng_msgpack_write_unsigned_integer(writer, output->u.u);
- break;
- case LTTNG_INTERPRETER_TYPE_DOUBLE:
- lttng_msgpack_write_double(writer, output->u.d);
- break;
- case LTTNG_INTERPRETER_TYPE_STRING:
- lttng_msgpack_write_str(writer, output->u.str.str);
- break;
- case LTTNG_INTERPRETER_TYPE_SEQUENCE:
- capture_sequence(writer, output);
- break;
- case LTTNG_INTERPRETER_TYPE_SIGNED_ENUM:
- case LTTNG_INTERPRETER_TYPE_UNSIGNED_ENUM:
- capture_enum(writer, output);
- break;
- default:
- abort();
- }
-}
-
-static
-void notification_append_empty_capture(
- struct lttng_event_notifier_notification *notif)
-{
- lttng_msgpack_write_nil(¬if->writer);
-}
-
-static void record_error(struct lttng_ust_event_notifier *event_notifier)
-{
- struct lttng_event_notifier_group *event_notifier_group =
- event_notifier->priv->group;
- struct lttng_counter *error_counter;
- size_t dimension_index[1];
- int ret;
-
- error_counter = CMM_LOAD_SHARED(event_notifier_group->error_counter);
- /*
- * load-acquire paired with store-release orders creation of the
- * error counter and setting error_counter_len before the
- * error_counter is used.
- * Currently a full memory barrier is used, which could be
- * turned into acquire-release barriers.
- */
- cmm_smp_mb();
- /* This group may not have an error counter attached to it. */
- if (!error_counter)
- return;
-
- dimension_index[0] = event_notifier->priv->error_counter_index;
- ret = event_notifier_group->error_counter->ops->counter_add(
- error_counter->counter, dimension_index, 1);
- if (ret)
- WARN_ON_ONCE(1);
-}
-
-static
-void notification_send(struct lttng_event_notifier_notification *notif,
- struct lttng_ust_event_notifier *event_notifier)
-{
- ssize_t ret;
- size_t content_len;
- int iovec_count = 1;
- struct lttng_ust_abi_event_notifier_notification ust_notif = {0};
- struct iovec iov[2];
-
- assert(notif);
-
- ust_notif.token = event_notifier->priv->parent.user_token;
-
- /*
- * Prepare sending the notification from multiple buffers using an
- * array of `struct iovec`. The first buffer of the vector is
- * notification structure itself and is always present.
- */
- iov[0].iov_base = &ust_notif;
- iov[0].iov_len = sizeof(ust_notif);
-
- if (notif->has_captures) {
- /*
- * If captures were requested, the second buffer of the array
- * is the capture buffer.
- */
- assert(notif->writer.buffer);
- content_len = notif->writer.write_pos - notif->writer.buffer;
-
- assert(content_len > 0 && content_len <= CAPTURE_BUFFER_SIZE);
-
- iov[1].iov_base = notif->capture_buf;
- iov[1].iov_len = content_len;
-
- iovec_count++;
- } else {
- content_len = 0;
- }
-
- /*
- * Update the capture buffer size so that receiver of the buffer will
- * know how much to expect.
- */
- ust_notif.capture_buf_size = content_len;
-
- /* Send all the buffers. */
- ret = ust_patient_writev(notif->notification_fd, iov, iovec_count);
- if (ret == -1) {
- if (errno == EAGAIN) {
- record_error(event_notifier);
- DBG("Cannot send event_notifier notification without blocking: %s",
- strerror(errno));
- } else {
- DBG("Error to sending event notifier notification: %s",
- strerror(errno));
- abort();
- }
- }
-}
-
-void lttng_event_notifier_notification_send(
- struct lttng_ust_event_notifier *event_notifier,
- const char *stack_data,
- struct lttng_ust_notification_ctx *notif_ctx)
-{
- /*
- * This function is called from the probe, we must do dynamic
- * allocation in this context.
- */
- struct lttng_event_notifier_notification notif = {0};
-
- notification_init(¬if, event_notifier);
-
- if (caa_unlikely(notif_ctx->eval_capture)) {
- struct lttng_ust_bytecode_runtime *capture_bc_runtime;
-
- /*
- * Iterate over all the capture bytecodes. If the interpreter
- * functions returns successfully, append the value of the
- * `output` parameter to the capture buffer. If the interpreter
- * fails, append an empty capture to the buffer.
- */
- cds_list_for_each_entry_rcu(capture_bc_runtime,
- &event_notifier->priv->capture_bytecode_runtime_head, node) {
- struct lttng_interpreter_output output;
-
- if (capture_bc_runtime->interpreter_func(capture_bc_runtime,
- stack_data, &output) == LTTNG_UST_BYTECODE_INTERPRETER_OK)
- notification_append_capture(¬if, &output);
- else
- notification_append_empty_capture(¬if);
- }
- }
-
- /*
- * Send the notification (including the capture buffer) to the
- * sessiond.
- */
- notification_send(¬if, event_notifier);
-}
+++ /dev/null
-/*
- * SPDX-License-Identifier: LGPL-2.1-only
- *
- * Copyright (C) 2016 Aravind HT <aravind.ht@gmail.com>
- * Copyright (C) 2016 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
- */
-
-#include <limits.h>
-#include <stdio.h>
-#include <stdlib.h>
-#include <string.h>
-#include <sys/types.h>
-#include <unistd.h>
-#include <assert.h>
-#include <errno.h>
-#include <fcntl.h>
-#include <sys/select.h>
-#include <sys/resource.h>
-#include <sys/time.h>
-#include <fcntl.h>
-#include <pthread.h>
-#include <signal.h>
-#include <stdbool.h>
-#include <urcu/compiler.h>
-#include <urcu/tls-compat.h>
-#include <urcu/system.h>
-
-#include "common/ust-fd.h"
-#include "common/macros.h"
-#include <lttng/ust-error.h>
-#include "common/logging.h"
-
-/* Operations on the fd set. */
-#define IS_FD_VALID(fd) ((fd) >= 0 && (fd) < lttng_ust_max_fd)
-#define GET_FD_SET_FOR_FD(fd, fd_sets) (&((fd_sets)[(fd) / FD_SETSIZE]))
-#define CALC_INDEX_TO_SET(fd) ((fd) % FD_SETSIZE)
-#define IS_FD_STD(fd) (IS_FD_VALID(fd) && (fd) <= STDERR_FILENO)
-
-/* Check fd validity before calling these. */
-#define ADD_FD_TO_SET(fd, fd_sets) \
- FD_SET(CALC_INDEX_TO_SET(fd), GET_FD_SET_FOR_FD(fd, fd_sets))
-#define IS_FD_SET(fd, fd_sets) \
- FD_ISSET(CALC_INDEX_TO_SET(fd), GET_FD_SET_FOR_FD(fd, fd_sets))
-#define DEL_FD_FROM_SET(fd, fd_sets) \
- FD_CLR(CALC_INDEX_TO_SET(fd), GET_FD_SET_FOR_FD(fd, fd_sets))
-
-/*
- * Protect the lttng_fd_set. Nests within the ust_lock, and therefore
- * within the libc dl lock. Therefore, we need to fixup the TLS before
- * nesting into this lock.
- *
- * The ust_safe_guard_fd_mutex nests within the ust_mutex. This mutex
- * is also held across fork.
- */
-static pthread_mutex_t ust_safe_guard_fd_mutex = PTHREAD_MUTEX_INITIALIZER;
-
-/*
- * Cancel state when grabbing the ust_safe_guard_fd_mutex. Saved when
- * locking, restored on unlock. Protected by ust_safe_guard_fd_mutex.
- */
-static int ust_safe_guard_saved_cancelstate;
-
-/*
- * Track whether we are within lttng-ust or application, for close
- * system call override by LD_PRELOAD library. This also tracks whether
- * we are invoking close() from a signal handler nested on an
- * application thread.
- */
-static DEFINE_URCU_TLS(int, ust_fd_mutex_nest);
-
-/* fd_set used to book keep fd being used by lttng-ust. */
-static fd_set *lttng_fd_set;
-static int lttng_ust_max_fd;
-static int num_fd_sets;
-static int init_done;
-
-/*
- * Force a read (imply TLS fixup for dlopen) of TLS variables.
- */
-void lttng_ust_fixup_fd_tracker_tls(void)
-{
- asm volatile ("" : : "m" (URCU_TLS(ust_fd_mutex_nest)));
-}
-
-/*
- * Allocate the fd set array based on the hard limit set for this
- * process. This will be called during the constructor execution
- * and will also be called in the child after fork via lttng_ust_init.
- */
-void lttng_ust_init_fd_tracker(void)
-{
- struct rlimit rlim;
- int i;
-
- if (CMM_LOAD_SHARED(init_done))
- return;
-
- memset(&rlim, 0, sizeof(rlim));
- /* Get the current possible max number of fd for this process. */
- if (getrlimit(RLIMIT_NOFILE, &rlim) < 0)
- abort();
- /*
- * FD set array size determined using the hard limit. Even if
- * the process wishes to increase its limit using setrlimit, it
- * can only do so with the softlimit which will be less than the
- * hard limit.
- */
- lttng_ust_max_fd = rlim.rlim_max;
- num_fd_sets = lttng_ust_max_fd / FD_SETSIZE;
- if (lttng_ust_max_fd % FD_SETSIZE)
- ++num_fd_sets;
- if (lttng_fd_set != NULL) {
- free(lttng_fd_set);
- lttng_fd_set = NULL;
- }
- lttng_fd_set = malloc(num_fd_sets * (sizeof(fd_set)));
- if (!lttng_fd_set)
- abort();
- for (i = 0; i < num_fd_sets; i++)
- FD_ZERO((<tng_fd_set[i]));
- CMM_STORE_SHARED(init_done, 1);
-}
-
-void lttng_ust_lock_fd_tracker(void)
-{
- sigset_t sig_all_blocked, orig_mask;
- int ret, oldstate;
-
- ret = pthread_setcancelstate(PTHREAD_CANCEL_DISABLE, &oldstate);
- if (ret) {
- ERR("pthread_setcancelstate: %s", strerror(ret));
- }
- sigfillset(&sig_all_blocked);
- ret = pthread_sigmask(SIG_SETMASK, &sig_all_blocked, &orig_mask);
- if (ret) {
- ERR("pthread_sigmask: %s", strerror(ret));
- }
- if (!URCU_TLS(ust_fd_mutex_nest)++) {
- /*
- * Ensure the compiler don't move the store after the close()
- * call in case close() would be marked as leaf.
- */
- cmm_barrier();
- pthread_mutex_lock(&ust_safe_guard_fd_mutex);
- ust_safe_guard_saved_cancelstate = oldstate;
- }
- ret = pthread_sigmask(SIG_SETMASK, &orig_mask, NULL);
- if (ret) {
- ERR("pthread_sigmask: %s", strerror(ret));
- }
-}
-
-void lttng_ust_unlock_fd_tracker(void)
-{
- sigset_t sig_all_blocked, orig_mask;
- int ret, newstate, oldstate;
- bool restore_cancel = false;
-
- sigfillset(&sig_all_blocked);
- ret = pthread_sigmask(SIG_SETMASK, &sig_all_blocked, &orig_mask);
- if (ret) {
- ERR("pthread_sigmask: %s", strerror(ret));
- }
- /*
- * Ensure the compiler don't move the store before the close()
- * call, in case close() would be marked as leaf.
- */
- cmm_barrier();
- if (!--URCU_TLS(ust_fd_mutex_nest)) {
- newstate = ust_safe_guard_saved_cancelstate;
- restore_cancel = true;
- pthread_mutex_unlock(&ust_safe_guard_fd_mutex);
- }
- ret = pthread_sigmask(SIG_SETMASK, &orig_mask, NULL);
- if (ret) {
- ERR("pthread_sigmask: %s", strerror(ret));
- }
- if (restore_cancel) {
- ret = pthread_setcancelstate(newstate, &oldstate);
- if (ret) {
- ERR("pthread_setcancelstate: %s", strerror(ret));
- }
- }
-}
-
-static int dup_std_fd(int fd)
-{
- int ret, i;
- int fd_to_close[STDERR_FILENO + 1];
- int fd_to_close_count = 0;
- int dup_cmd = F_DUPFD; /* Default command */
- int fd_valid = -1;
-
- if (!(IS_FD_STD(fd))) {
- /* Should not be here */
- ret = -1;
- goto error;
- }
-
- /* Check for FD_CLOEXEC flag */
- ret = fcntl(fd, F_GETFD);
- if (ret < 0) {
- PERROR("fcntl on f_getfd");
- ret = -1;
- goto error;
- }
-
- if (ret & FD_CLOEXEC) {
- dup_cmd = F_DUPFD_CLOEXEC;
- }
-
- /* Perform dup */
- for (i = 0; i < STDERR_FILENO + 1; i++) {
- ret = fcntl(fd, dup_cmd, 0);
- if (ret < 0) {
- PERROR("fcntl dup fd");
- goto error;
- }
-
- if (!(IS_FD_STD(ret))) {
- /* fd is outside of STD range, use it. */
- fd_valid = ret;
- /* Close fd received as argument. */
- fd_to_close[i] = fd;
- fd_to_close_count++;
- break;
- }
-
- fd_to_close[i] = ret;
- fd_to_close_count++;
- }
-
- /* Close intermediary fds */
- for (i = 0; i < fd_to_close_count; i++) {
- ret = close(fd_to_close[i]);
- if (ret) {
- PERROR("close on temporary fd: %d.", fd_to_close[i]);
- /*
- * Not using an abort here would yield a complicated
- * error handling for the caller. If a failure occurs
- * here, the system is already in a bad state.
- */
- abort();
- }
- }
-
- ret = fd_valid;
-error:
- return ret;
-}
-
-/*
- * Needs to be called with ust_safe_guard_fd_mutex held when opening the fd.
- * Has strict checking of fd validity.
- *
- * If fd <= 2, dup the fd until fd > 2. This enables us to bypass
- * problems that can be encountered if UST uses stdin, stdout, stderr
- * fds for internal use (daemon etc.). This can happen if the
- * application closes either of those file descriptors. Intermediary fds
- * are closed as needed.
- *
- * Return -1 on error.
- *
- */
-int lttng_ust_add_fd_to_tracker(int fd)
-{
- int ret;
- /*
- * Ensure the tracker is initialized when called from
- * constructors.
- */
- lttng_ust_init_fd_tracker();
- assert(URCU_TLS(ust_fd_mutex_nest));
-
- if (IS_FD_STD(fd)) {
- ret = dup_std_fd(fd);
- if (ret < 0) {
- goto error;
- }
- fd = ret;
- }
-
- /* Trying to add an fd which we can not accommodate. */
- assert(IS_FD_VALID(fd));
- /* Setting an fd thats already set. */
- assert(!IS_FD_SET(fd, lttng_fd_set));
-
- ADD_FD_TO_SET(fd, lttng_fd_set);
- return fd;
-error:
- return ret;
-}
-
-/*
- * Needs to be called with ust_safe_guard_fd_mutex held when opening the fd.
- * Has strict checking for fd validity.
- */
-void lttng_ust_delete_fd_from_tracker(int fd)
-{
- /*
- * Ensure the tracker is initialized when called from
- * constructors.
- */
- lttng_ust_init_fd_tracker();
-
- assert(URCU_TLS(ust_fd_mutex_nest));
- /* Not a valid fd. */
- assert(IS_FD_VALID(fd));
- /* Deleting an fd which was not set. */
- assert(IS_FD_SET(fd, lttng_fd_set));
-
- DEL_FD_FROM_SET(fd, lttng_fd_set);
-}
-
-/*
- * Interface allowing applications to close arbitrary file descriptors.
- * We check if it is owned by lttng-ust, and return -1, errno=EBADF
- * instead of closing it if it is the case.
- */
-int lttng_ust_safe_close_fd(int fd, int (*close_cb)(int fd))
-{
- int ret = 0;
-
- lttng_ust_fixup_fd_tracker_tls();
-
- /*
- * Ensure the tracker is initialized when called from
- * constructors.
- */
- lttng_ust_init_fd_tracker();
-
- /*
- * If called from lttng-ust, we directly call close without
- * validating whether the FD is part of the tracked set.
- */
- if (URCU_TLS(ust_fd_mutex_nest))
- return close_cb(fd);
-
- lttng_ust_lock_fd_tracker();
- if (IS_FD_VALID(fd) && IS_FD_SET(fd, lttng_fd_set)) {
- ret = -1;
- errno = EBADF;
- } else {
- ret = close_cb(fd);
- }
- lttng_ust_unlock_fd_tracker();
-
- return ret;
-}
-
-/*
- * Interface allowing applications to close arbitrary streams.
- * We check if it is owned by lttng-ust, and return -1, errno=EBADF
- * instead of closing it if it is the case.
- */
-int lttng_ust_safe_fclose_stream(FILE *stream, int (*fclose_cb)(FILE *stream))
-{
- int ret = 0, fd;
-
- lttng_ust_fixup_fd_tracker_tls();
-
- /*
- * Ensure the tracker is initialized when called from
- * constructors.
- */
- lttng_ust_init_fd_tracker();
-
- /*
- * If called from lttng-ust, we directly call fclose without
- * validating whether the FD is part of the tracked set.
- */
- if (URCU_TLS(ust_fd_mutex_nest))
- return fclose_cb(stream);
-
- fd = fileno(stream);
-
- lttng_ust_lock_fd_tracker();
- if (IS_FD_VALID(fd) && IS_FD_SET(fd, lttng_fd_set)) {
- ret = -1;
- errno = EBADF;
- } else {
- ret = fclose_cb(stream);
- }
- lttng_ust_unlock_fd_tracker();
-
- return ret;
-}
-
-#ifdef __OpenBSD__
-static void set_close_success(int *p)
-{
- *p = 1;
-}
-static int test_close_success(const int *p)
-{
- return *p;
-}
-#else
-static void set_close_success(int *p __attribute__((unused)))
-{
-}
-static int test_close_success(const int *p __attribute__((unused)))
-{
- return 1;
-}
-#endif
-
-/*
- * Implement helper for closefrom() override.
- */
-int lttng_ust_safe_closefrom_fd(int lowfd, int (*close_cb)(int fd))
-{
- int ret = 0, close_success = 0, i;
-
- lttng_ust_fixup_fd_tracker_tls();
-
- /*
- * Ensure the tracker is initialized when called from
- * constructors.
- */
- lttng_ust_init_fd_tracker();
-
- if (lowfd < 0) {
- /*
- * NetBSD return EBADF if fd is invalid.
- */
- errno = EBADF;
- ret = -1;
- goto end;
- }
- /*
- * If called from lttng-ust, we directly call close without
- * validating whether the FD is part of the tracked set.
- */
- if (URCU_TLS(ust_fd_mutex_nest)) {
- for (i = lowfd; i < lttng_ust_max_fd; i++) {
- if (close_cb(i) < 0) {
- switch (errno) {
- case EBADF:
- continue;
- case EINTR:
- default:
- ret = -1;
- goto end;
- }
- }
- set_close_success(&close_success);
- }
- } else {
- lttng_ust_lock_fd_tracker();
- for (i = lowfd; i < lttng_ust_max_fd; i++) {
- if (IS_FD_VALID(i) && IS_FD_SET(i, lttng_fd_set))
- continue;
- if (close_cb(i) < 0) {
- switch (errno) {
- case EBADF:
- continue;
- case EINTR:
- default:
- ret = -1;
- lttng_ust_unlock_fd_tracker();
- goto end;
- }
- }
- set_close_success(&close_success);
- }
- lttng_ust_unlock_fd_tracker();
- }
- if (!test_close_success(&close_success)) {
- /*
- * OpenBSD return EBADF if fd is greater than all open
- * file descriptors.
- */
- ret = -1;
- errno = EBADF;
- }
-end:
- return ret;
-}
+++ /dev/null
-/*
- * SPDX-License-Identifier: LGPL-2.1-or-later
- *
- * Copyright 2011-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
- *
- * Userspace RCU - sys_futex/compat_futex header.
- */
-
-#ifndef _LTTNG_UST_FUTEX_H
-#define _LTTNG_UST_FUTEX_H
-
-#include <errno.h>
-#include <stdint.h>
-#include <time.h>
-#include <sys/syscall.h>
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-#define FUTEX_WAIT 0
-#define FUTEX_WAKE 1
-
-/*
- * sys_futex compatibility header.
- * Use *only* *either of* futex_noasync OR futex_async on a given address.
- *
- * futex_noasync cannot be executed in signal handlers, but ensures that
- * it will be put in a wait queue even in compatibility mode.
- *
- * futex_async is signal-handler safe for the wakeup. It uses polling
- * on the wait-side in compatibility mode.
- *
- * BEWARE: sys_futex() FUTEX_WAIT may return early if interrupted
- * (returns EINTR).
- */
-
-extern int lttng_ust_compat_futex_noasync(int32_t *uaddr, int op, int32_t val,
- const struct timespec *timeout, int32_t *uaddr2, int32_t val3)
- __attribute__((visibility("hidden")));
-
-extern int lttng_ust_compat_futex_async(int32_t *uaddr, int op, int32_t val,
- const struct timespec *timeout, int32_t *uaddr2, int32_t val3)
- __attribute__((visibility("hidden")));
-
-#if (defined(__linux__) && defined(__NR_futex))
-
-#include <unistd.h>
-#include <errno.h>
-#include <urcu/compiler.h>
-#include <urcu/arch.h>
-
-static inline int lttng_ust_futex(int32_t *uaddr, int op, int32_t val,
- const struct timespec *timeout, int32_t *uaddr2, int32_t val3)
-{
- return syscall(__NR_futex, uaddr, op, val, timeout,
- uaddr2, val3);
-}
-
-static inline int lttng_ust_futex_noasync(int32_t *uaddr, int op, int32_t val,
- const struct timespec *timeout, int32_t *uaddr2, int32_t val3)
-{
- int ret;
-
- ret = lttng_ust_futex(uaddr, op, val, timeout, uaddr2, val3);
- if (caa_unlikely(ret < 0 && errno == ENOSYS)) {
- /*
- * The fallback on ENOSYS is the async-safe version of
- * the compat futex implementation, because the
- * async-safe compat implementation allows being used
- * concurrently with calls to futex(). Indeed, sys_futex
- * FUTEX_WAIT, on some architectures (mips and parisc),
- * within a given process, spuriously return ENOSYS due
- * to signal restart bugs on some kernel versions.
- */
- return lttng_ust_compat_futex_async(uaddr, op, val, timeout,
- uaddr2, val3);
- }
- return ret;
-
-}
-
-static inline int lttng_ust_futex_async(int32_t *uaddr, int op, int32_t val,
- const struct timespec *timeout, int32_t *uaddr2, int32_t val3)
-{
- int ret;
-
- ret = lttng_ust_futex(uaddr, op, val, timeout, uaddr2, val3);
- if (caa_unlikely(ret < 0 && errno == ENOSYS)) {
- return lttng_ust_compat_futex_async(uaddr, op, val, timeout,
- uaddr2, val3);
- }
- return ret;
-}
-
-#elif defined(__FreeBSD__)
-
-#include <sys/types.h>
-#include <sys/umtx.h>
-
-static inline int lttng_ust_futex_async(int32_t *uaddr, int op, int32_t val,
- const struct timespec *timeout, int32_t *uaddr2, int32_t val3)
-{
- int umtx_op;
- void *umtx_uaddr = NULL, *umtx_uaddr2 = NULL;
- struct _umtx_time umtx_timeout = {
- ._flags = UMTX_ABSTIME,
- ._clockid = CLOCK_MONOTONIC,
- };
-
- switch (op) {
- case FUTEX_WAIT:
- /* On FreeBSD, a "u_int" is a 32-bit integer. */
- umtx_op = UMTX_OP_WAIT_UINT;
- if (timeout != NULL) {
- umtx_timeout._timeout = *timeout;
- umtx_uaddr = (void *) sizeof(umtx_timeout);
- umtx_uaddr2 = (void *) &umtx_timeout;
- }
- break;
- case FUTEX_WAKE:
- umtx_op = UMTX_OP_WAKE;
- break;
- default:
- errno = EINVAL;
- return -1;
- }
-
- return _umtx_op(uaddr, umtx_op, (uint32_t) val, umtx_uaddr,
- umtx_uaddr2);
-}
-
-static inline int lttng_ust_futex_noasync(int32_t *uaddr, int op, int32_t val,
- const struct timespec *timeout, int32_t *uaddr2, int32_t val3)
-{
- return lttng_ust_futex_async(uaddr, op, val, timeout, uaddr2, val3);
-}
-
-#elif defined(__CYGWIN__)
-
-/*
- * The futex_noasync compat code uses a weak symbol to share state across
- * different shared object which is not possible on Windows with the
- * Portable Executable format. Use the async compat code for both cases.
- */
-static inline int lttng_ust_futex_noasync(int32_t *uaddr, int op, int32_t val,
- const struct timespec *timeout, int32_t *uaddr2, int32_t val3)
-{
- return lttng_ust_compat_futex_async(uaddr, op, val, timeout, uaddr2, val3);
-}
-
-static inline int lttng_ust_futex_async(int32_t *uaddr, int op, int32_t val,
- const struct timespec *timeout, int32_t *uaddr2, int32_t val3)
-{
- return lttng_ust_compat_futex_async(uaddr, op, val, timeout, uaddr2, val3);
-}
-
-#else
-
-static inline int lttng_ust_futex_noasync(int32_t *uaddr, int op, int32_t val,
- const struct timespec *timeout, int32_t *uaddr2, int32_t val3)
-{
- return lttng_ust_compat_futex_noasync(uaddr, op, val, timeout, uaddr2, val3);
-}
-
-static inline int lttng_ust_futex_async(int32_t *uaddr, int op, int32_t val,
- const struct timespec *timeout, int32_t *uaddr2, int32_t val3)
-{
- return lttng_ust_compat_futex_async(uaddr, op, val, timeout, uaddr2, val3);
-}
-
-#endif
-
-#ifdef __cplusplus
-}
-#endif
-
-#endif /* _LTTNG_UST_FUTEX_H */
+++ /dev/null
-/*
- * SPDX-License-Identifier: LGPL-2.1-only
- *
- * Copyright (C) 2017 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
- */
-
-#include <stdlib.h>
-#include <unistd.h>
-#include <stdbool.h>
-#include <stddef.h>
-#include <sys/types.h>
-#include "common/logging.h"
-#include "common/macros.h"
-#include "getenv.h"
-
-enum lttng_env_secure {
- LTTNG_ENV_SECURE,
- LTTNG_ENV_NOT_SECURE,
-};
-
-struct lttng_env {
- const char *key;
- enum lttng_env_secure secure;
- char *value;
-};
-
-static struct lttng_env lttng_env[] = {
- /*
- * LTTNG_UST_DEBUG is used directly by snprintf, because it
- * needs to be already set for ERR() used in
- * lttng_ust_getenv_init().
- */
- { "LTTNG_UST_DEBUG", LTTNG_ENV_NOT_SECURE, NULL, },
-
- /* Env. var. which can be used in setuid/setgid executables. */
- { "LTTNG_UST_WITHOUT_BADDR_STATEDUMP", LTTNG_ENV_NOT_SECURE, NULL, },
- { "LTTNG_UST_REGISTER_TIMEOUT", LTTNG_ENV_NOT_SECURE, NULL, },
-
- /* Env. var. which are not fetched in setuid/setgid executables. */
- { "LTTNG_UST_CLOCK_PLUGIN", LTTNG_ENV_SECURE, NULL, },
- { "LTTNG_UST_GETCPU_PLUGIN", LTTNG_ENV_SECURE, NULL, },
- { "LTTNG_UST_ALLOW_BLOCKING", LTTNG_ENV_SECURE, NULL, },
- { "HOME", LTTNG_ENV_SECURE, NULL, },
- { "LTTNG_HOME", LTTNG_ENV_SECURE, NULL, },
-};
-
-static
-int lttng_is_setuid_setgid(void)
-{
- return geteuid() != getuid() || getegid() != getgid();
-}
-
-char *lttng_ust_getenv(const char *name)
-{
- size_t i;
- struct lttng_env *e;
- bool found = false;
-
- for (i = 0; i < LTTNG_ARRAY_SIZE(lttng_env); i++) {
- e = <tng_env[i];
-
- if (strcmp(e->key, name) == 0) {
- found = true;
- break;
- }
- }
- if (!found) {
- return NULL;
- }
- return e->value;
-}
-
-void lttng_ust_getenv_init(void)
-{
- size_t i;
-
- for (i = 0; i < LTTNG_ARRAY_SIZE(lttng_env); i++) {
- struct lttng_env *e = <tng_env[i];
-
- if (e->secure == LTTNG_ENV_SECURE && lttng_is_setuid_setgid()) {
- ERR("Getting environment variable '%s' from setuid/setgid binary refused for security reasons.",
- e->key);
- continue;
- }
- e->value = getenv(e->key);
- }
-}
+++ /dev/null
-/*
- * SPDX-License-Identifier: LGPL-2.1-only
- *
- * Copyright (C) 2015 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
- */
-
-#ifndef _COMPAT_GETENV_H
-#define _COMPAT_GETENV_H
-
-/*
- * Always add the lttng-ust environment variables using the lttng_ust_getenv()
- * infrastructure rather than using getenv() directly. This ensures that we
- * don't trigger races between getenv() invoked by lttng-ust listener threads
- * invoked concurrently with setenv() called by an otherwise single-threaded
- * application thread. (the application is not aware that it runs with
- * lttng-ust)
- */
-
-char *lttng_ust_getenv(const char *name)
- __attribute__((visibility("hidden")));
-
-void lttng_ust_getenv_init(void)
- __attribute__((visibility("hidden")));
-
-#endif /* _COMPAT_GETENV_H */
+++ /dev/null
-/*
- * SPDX-License-Identifier: LGPL-2.1-only
- *
- * Copyright (C) 2011 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
- */
-
-#include <stddef.h>
-#include <stdint.h>
-#include <urcu/compiler.h>
-#include <lttng/ust-endian.h>
-
-/*
- * Hash function
- * Source: http://burtleburtle.net/bob/c/lookup3.c
- * Originally Public Domain
- */
-
-#define rot(x, k) (((x) << (k)) | ((x) >> (32 - (k))))
-
-#define mix(a, b, c) \
-do { \
- a -= c; a ^= rot(c, 4); c += b; \
- b -= a; b ^= rot(a, 6); a += c; \
- c -= b; c ^= rot(b, 8); b += a; \
- a -= c; a ^= rot(c, 16); c += b; \
- b -= a; b ^= rot(a, 19); a += c; \
- c -= b; c ^= rot(b, 4); b += a; \
-} while (0)
-
-#define final(a, b, c) \
-{ \
- c ^= b; c -= rot(b, 14); \
- a ^= c; a -= rot(c, 11); \
- b ^= a; b -= rot(a, 25); \
- c ^= b; c -= rot(b, 16); \
- a ^= c; a -= rot(c, 4);\
- b ^= a; b -= rot(a, 14); \
- c ^= b; c -= rot(b, 24); \
-}
-
-#if (BYTE_ORDER == LITTLE_ENDIAN)
-#define HASH_LITTLE_ENDIAN 1
-#else
-#define HASH_LITTLE_ENDIAN 0
-#endif
-
-/*
- *
- * hashlittle() -- hash a variable-length key into a 32-bit value
- * k : the key (the unaligned variable-length array of bytes)
- * length : the length of the key, counting by bytes
- * initval : can be any 4-byte value
- * Returns a 32-bit value. Every bit of the key affects every bit of
- * the return value. Two keys differing by one or two bits will have
- * totally different hash values.
- *
- * The best hash table sizes are powers of 2. There is no need to do
- * mod a prime (mod is sooo slow!). If you need less than 32 bits,
- * use a bitmask. For example, if you need only 10 bits, do
- * h = (h & hashmask(10));
- * In which case, the hash table should have hashsize(10) elements.
- *
- * If you are hashing n strings (uint8_t **)k, do it like this:
- * for (i = 0, h = 0; i < n; ++i) h = hashlittle(k[i], len[i], h);
- *
- * By Bob Jenkins, 2006. bob_jenkins@burtleburtle.net. You may use this
- * code any way you wish, private, educational, or commercial. It's free.
- *
- * Use for hash table lookup, or anything where one collision in 2^^32 is
- * acceptable. Do NOT use for cryptographic purposes.
- */
-static
-uint32_t hashlittle(const void *key, size_t length, uint32_t initval)
-{
- uint32_t a, b, c; /* internal state */
- union {
- const void *ptr;
- size_t i;
- } u;
-
- /* Set up the internal state */
- a = b = c = 0xdeadbeef + ((uint32_t)length) + initval;
-
- u.ptr = key;
- if (HASH_LITTLE_ENDIAN && ((u.i & 0x3) == 0)) {
- const uint32_t *k = (const uint32_t *) key; /* read 32-bit chunks */
-
- /*------ all but last block: aligned reads and affect 32 bits of (a,b,c) */
- while (length > 12) {
- a += k[0];
- b += k[1];
- c += k[2];
- mix(a, b, c);
- length -= 12;
- k += 3;
- }
-
- /*----------------------------- handle the last (probably partial) block */
- /*
- * The original jhash.h reads beyond the end of string, and implements
- * a special code path for VALGRIND. It seems to make ASan unhappy too
- * though, so considering that hashing event names is not a fast-path
- * in lttng-ust, remove the "fast" code entirely and use the slower
- * but verifiable VALGRIND version of the code which does not issue
- * out-of-bound reads.
- */
- {
- const uint8_t *k8;
-
- k8 = (const uint8_t *) k;
- switch (length) {
- case 12: c+=k[2]; b+=k[1]; a+=k[0]; break;
- case 11: c+=((uint32_t) k8[10])<<16; /* fall through */
- case 10: c+=((uint32_t) k8[9])<<8; /* fall through */
- case 9 : c+=k8[8]; /* fall through */
- case 8 : b+=k[1]; a+=k[0]; break;
- case 7 : b+=((uint32_t) k8[6])<<16; /* fall through */
- case 6 : b+=((uint32_t) k8[5])<<8; /* fall through */
- case 5 : b+=k8[4]; /* fall through */
- case 4 : a+=k[0]; break;
- case 3 : a+=((uint32_t) k8[2])<<16; /* fall through */
- case 2 : a+=((uint32_t) k8[1])<<8; /* fall through */
- case 1 : a+=k8[0]; break;
- case 0 : return c;
- }
- }
-
- } else if (HASH_LITTLE_ENDIAN && ((u.i & 0x1) == 0)) {
- const uint16_t *k = (const uint16_t *) key; /* read 16-bit chunks */
- const uint8_t *k8;
-
- /*--------------- all but last block: aligned reads and different mixing */
- while (length > 12)
- {
- a += k[0] + (((uint32_t) k[1])<<16);
- b += k[2] + (((uint32_t) k[3])<<16);
- c += k[4] + (((uint32_t) k[5])<<16);
- mix(a, b, c);
- length -= 12;
- k += 6;
- }
-
- /*----------------------------- handle the last (probably partial) block */
- k8 = (const uint8_t *) k;
- switch(length)
- {
- case 12: c+=k[4]+(((uint32_t) k[5])<<16);
- b+=k[2]+(((uint32_t) k[3])<<16);
- a+=k[0]+(((uint32_t) k[1])<<16);
- break;
- case 11: c+=((uint32_t) k8[10])<<16; /* fall through */
- case 10: c+=k[4];
- b+=k[2]+(((uint32_t) k[3])<<16);
- a+=k[0]+(((uint32_t) k[1])<<16);
- break;
- case 9 : c+=k8[8]; /* fall through */
- case 8 : b+=k[2]+(((uint32_t) k[3])<<16);
- a+=k[0]+(((uint32_t) k[1])<<16);
- break;
- case 7 : b+=((uint32_t) k8[6])<<16; /* fall through */
- case 6 : b+=k[2];
- a+=k[0]+(((uint32_t) k[1])<<16);
- break;
- case 5 : b+=k8[4]; /* fall through */
- case 4 : a+=k[0]+(((uint32_t) k[1])<<16);
- break;
- case 3 : a+=((uint32_t) k8[2])<<16; /* fall through */
- case 2 : a+=k[0];
- break;
- case 1 : a+=k8[0];
- break;
- case 0 : return c; /* zero length requires no mixing */
- }
-
- } else { /* need to read the key one byte at a time */
- const uint8_t *k = (const uint8_t *)key;
-
- /*--------------- all but the last block: affect some 32 bits of (a, b, c) */
- while (length > 12) {
- a += k[0];
- a += ((uint32_t) k[1])<<8;
- a += ((uint32_t) k[2])<<16;
- a += ((uint32_t) k[3])<<24;
- b += k[4];
- b += ((uint32_t) k[5])<<8;
- b += ((uint32_t) k[6])<<16;
- b += ((uint32_t) k[7])<<24;
- c += k[8];
- c += ((uint32_t) k[9])<<8;
- c += ((uint32_t) k[10])<<16;
- c += ((uint32_t) k[11])<<24;
- mix(a,b,c);
- length -= 12;
- k += 12;
- }
-
- /*-------------------------------- last block: affect all 32 bits of (c) */
- switch (length) { /* all the case statements fall through */
- case 12: c+=((uint32_t) k[11])<<24; /* fall through */
- case 11: c+=((uint32_t) k[10])<<16; /* fall through */
- case 10: c+=((uint32_t) k[9])<<8; /* fall through */
- case 9 : c+=k[8]; /* fall through */
- case 8 : b+=((uint32_t) k[7])<<24; /* fall through */
- case 7 : b+=((uint32_t) k[6])<<16; /* fall through */
- case 6 : b+=((uint32_t) k[5])<<8; /* fall through */
- case 5 : b+=k[4]; /* fall through */
- case 4 : a+=((uint32_t) k[3])<<24; /* fall through */
- case 3 : a+=((uint32_t) k[2])<<16; /* fall through */
- case 2 : a+=((uint32_t) k[1])<<8; /* fall through */
- case 1 : a+=k[0];
- break;
- case 0 : return c;
- }
- }
-
- final(a, b, c);
- return c;
-}
-
-static inline
-uint32_t jhash(const void *key, size_t length, uint32_t seed)
-{
- return hashlittle(key, length, seed);
-}
+++ /dev/null
-/*
- * SPDX-License-Identifier: MIT
- *
- * Copyright (C) 2010-2016 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
- *
- * LTTng UST bytecode interpreter.
- */
-
-#define _LGPL_SOURCE
-#include <stddef.h>
-#include <stdint.h>
-
-#include <lttng/urcu/pointer.h>
-#include <urcu/rculist.h>
-#include <lttng/ust-endian.h>
-#include <lttng/ust-events.h>
-#include "ust-events-internal.h"
-
-#include "lttng-bytecode.h"
-#include "string-utils.h"
-
-
-/*
- * -1: wildcard found.
- * -2: unknown escape char.
- * 0: normal char.
- */
-
-static
-int parse_char(const char **p)
-{
- switch (**p) {
- case '\\':
- (*p)++;
- switch (**p) {
- case '\\':
- case '*':
- return 0;
- default:
- return -2;
- }
- case '*':
- return -1;
- default:
- return 0;
- }
-}
-
-/*
- * Returns SIZE_MAX if the string is null-terminated, or the number of
- * characters if not.
- */
-static
-size_t get_str_or_seq_len(const struct estack_entry *entry)
-{
- return entry->u.s.seq_len;
-}
-
-static
-int stack_star_glob_match(struct estack *stack, int top,
- const char *cmp_type __attribute__((unused)))
-{
- const char *pattern;
- const char *candidate;
- size_t pattern_len;
- size_t candidate_len;
-
- /* Find out which side is the pattern vs. the candidate. */
- if (estack_ax(stack, top)->u.s.literal_type == ESTACK_STRING_LITERAL_TYPE_STAR_GLOB) {
- pattern = estack_ax(stack, top)->u.s.str;
- pattern_len = get_str_or_seq_len(estack_ax(stack, top));
- candidate = estack_bx(stack, top)->u.s.str;
- candidate_len = get_str_or_seq_len(estack_bx(stack, top));
- } else {
- pattern = estack_bx(stack, top)->u.s.str;
- pattern_len = get_str_or_seq_len(estack_bx(stack, top));
- candidate = estack_ax(stack, top)->u.s.str;
- candidate_len = get_str_or_seq_len(estack_ax(stack, top));
- }
-
- /* Perform the match. Returns 0 when the result is true. */
- return !strutils_star_glob_match(pattern, pattern_len, candidate,
- candidate_len);
-}
-
-static
-int stack_strcmp(struct estack *stack, int top, const char *cmp_type __attribute__((unused)))
-{
- const char *p = estack_bx(stack, top)->u.s.str, *q = estack_ax(stack, top)->u.s.str;
- int ret;
- int diff;
-
- for (;;) {
- int escaped_r0 = 0;
-
- if (unlikely(p - estack_bx(stack, top)->u.s.str >= estack_bx(stack, top)->u.s.seq_len || *p == '\0')) {
- if (q - estack_ax(stack, top)->u.s.str >= estack_ax(stack, top)->u.s.seq_len || *q == '\0') {
- return 0;
- } else {
- if (estack_ax(stack, top)->u.s.literal_type ==
- ESTACK_STRING_LITERAL_TYPE_PLAIN) {
- ret = parse_char(&q);
- if (ret == -1)
- return 0;
- }
- return -1;
- }
- }
- if (unlikely(q - estack_ax(stack, top)->u.s.str >= estack_ax(stack, top)->u.s.seq_len || *q == '\0')) {
- if (estack_bx(stack, top)->u.s.literal_type ==
- ESTACK_STRING_LITERAL_TYPE_PLAIN) {
- ret = parse_char(&p);
- if (ret == -1)
- return 0;
- }
- return 1;
- }
- if (estack_bx(stack, top)->u.s.literal_type ==
- ESTACK_STRING_LITERAL_TYPE_PLAIN) {
- ret = parse_char(&p);
- if (ret == -1) {
- return 0;
- } else if (ret == -2) {
- escaped_r0 = 1;
- }
- /* else compare both char */
- }
- if (estack_ax(stack, top)->u.s.literal_type ==
- ESTACK_STRING_LITERAL_TYPE_PLAIN) {
- ret = parse_char(&q);
- if (ret == -1) {
- return 0;
- } else if (ret == -2) {
- if (!escaped_r0)
- return -1;
- } else {
- if (escaped_r0)
- return 1;
- }
- } else {
- if (escaped_r0)
- return 1;
- }
- diff = *p - *q;
- if (diff != 0)
- break;
- p++;
- q++;
- }
- return diff;
-}
-
-int lttng_bytecode_interpret_error(
- struct lttng_ust_bytecode_runtime *bytecode_runtime __attribute__((unused)),
- const char *stack_data __attribute__((unused)),
- void *ctx __attribute__((unused)))
-{
- return LTTNG_UST_BYTECODE_INTERPRETER_ERROR;
-}
-
-#ifdef INTERPRETER_USE_SWITCH
-
-/*
- * Fallback for compilers that do not support taking address of labels.
- */
-
-#define START_OP \
- start_pc = &bytecode->data[0]; \
- for (pc = next_pc = start_pc; pc - start_pc < bytecode->len; \
- pc = next_pc) { \
- dbg_printf("Executing op %s (%u)\n", \
- lttng_bytecode_print_op((unsigned int) *(bytecode_opcode_t *) pc), \
- (unsigned int) *(bytecode_opcode_t *) pc); \
- switch (*(bytecode_opcode_t *) pc) {
-
-#define OP(name) jump_target_##name: __attribute__((unused)); \
- case name
-
-#define PO break
-
-#define END_OP } \
- }
-
-#define JUMP_TO(name) \
- goto jump_target_##name
-
-#else
-
-/*
- * Dispatch-table based interpreter.
- */
-
-#define START_OP \
- start_pc = &bytecode->code[0]; \
- pc = next_pc = start_pc; \
- if (unlikely(pc - start_pc >= bytecode->len)) \
- goto end; \
- goto *dispatch[*(bytecode_opcode_t *) pc];
-
-#define OP(name) \
-LABEL_##name
-
-#define PO \
- pc = next_pc; \
- goto *dispatch[*(bytecode_opcode_t *) pc];
-
-#define END_OP
-
-#define JUMP_TO(name) \
- goto LABEL_##name
-
-#endif
-
-#define IS_INTEGER_REGISTER(reg_type) \
- (reg_type == REG_U64 || reg_type == REG_S64)
-
-static int context_get_index(struct lttng_ust_ctx *ctx,
- struct load_ptr *ptr,
- uint32_t idx)
-{
-
- const struct lttng_ust_ctx_field *ctx_field;
- const struct lttng_ust_event_field *field;
- struct lttng_ust_ctx_value v;
-
- ctx_field = &ctx->fields[idx];
- field = ctx_field->event_field;
- ptr->type = LOAD_OBJECT;
- ptr->field = field;
-
- switch (field->type->type) {
- case lttng_ust_type_integer:
- ctx_field->get_value(ctx_field->priv, &v);
- if (lttng_ust_get_type_integer(field->type)->signedness) {
- ptr->object_type = OBJECT_TYPE_S64;
- ptr->u.s64 = v.u.s64;
- ptr->ptr = &ptr->u.s64;
- } else {
- ptr->object_type = OBJECT_TYPE_U64;
- ptr->u.u64 = v.u.s64; /* Cast. */
- ptr->ptr = &ptr->u.u64;
- }
- break;
- case lttng_ust_type_enum:
- {
- const struct lttng_ust_type_integer *itype;
-
- itype = lttng_ust_get_type_integer(lttng_ust_get_type_enum(field->type)->container_type);
- ctx_field->get_value(ctx_field->priv, &v);
- if (itype->signedness) {
- ptr->object_type = OBJECT_TYPE_SIGNED_ENUM;
- ptr->u.s64 = v.u.s64;
- ptr->ptr = &ptr->u.s64;
- } else {
- ptr->object_type = OBJECT_TYPE_UNSIGNED_ENUM;
- ptr->u.u64 = v.u.s64; /* Cast. */
- ptr->ptr = &ptr->u.u64;
- }
- break;
- }
- case lttng_ust_type_array:
- if (lttng_ust_get_type_array(field->type)->elem_type->type != lttng_ust_type_integer) {
- ERR("Array nesting only supports integer types.");
- return -EINVAL;
- }
- if (lttng_ust_get_type_array(field->type)->encoding == lttng_ust_string_encoding_none) {
- ERR("Only string arrays are supported for contexts.");
- return -EINVAL;
- }
- ptr->object_type = OBJECT_TYPE_STRING;
- ctx_field->get_value(ctx_field->priv, &v);
- ptr->ptr = v.u.str;
- break;
- case lttng_ust_type_sequence:
- if (lttng_ust_get_type_sequence(field->type)->elem_type->type != lttng_ust_type_integer) {
- ERR("Sequence nesting only supports integer types.");
- return -EINVAL;
- }
- if (lttng_ust_get_type_sequence(field->type)->encoding == lttng_ust_string_encoding_none) {
- ERR("Only string sequences are supported for contexts.");
- return -EINVAL;
- }
- ptr->object_type = OBJECT_TYPE_STRING;
- ctx_field->get_value(ctx_field->priv, &v);
- ptr->ptr = v.u.str;
- break;
- case lttng_ust_type_string:
- ptr->object_type = OBJECT_TYPE_STRING;
- ctx_field->get_value(ctx_field->priv, &v);
- ptr->ptr = v.u.str;
- break;
- case lttng_ust_type_float:
- ptr->object_type = OBJECT_TYPE_DOUBLE;
- ctx_field->get_value(ctx_field->priv, &v);
- ptr->u.d = v.u.d;
- ptr->ptr = &ptr->u.d;
- break;
- case lttng_ust_type_dynamic:
- ctx_field->get_value(ctx_field->priv, &v);
- switch (v.sel) {
- case LTTNG_UST_DYNAMIC_TYPE_NONE:
- return -EINVAL;
- case LTTNG_UST_DYNAMIC_TYPE_U8:
- case LTTNG_UST_DYNAMIC_TYPE_U16:
- case LTTNG_UST_DYNAMIC_TYPE_U32:
- case LTTNG_UST_DYNAMIC_TYPE_U64:
- ptr->object_type = OBJECT_TYPE_U64;
- ptr->u.u64 = v.u.u64;
- ptr->ptr = &ptr->u.u64;
- dbg_printf("context get index dynamic u64 %" PRIi64 "\n", ptr->u.u64);
- break;
- case LTTNG_UST_DYNAMIC_TYPE_S8:
- case LTTNG_UST_DYNAMIC_TYPE_S16:
- case LTTNG_UST_DYNAMIC_TYPE_S32:
- case LTTNG_UST_DYNAMIC_TYPE_S64:
- ptr->object_type = OBJECT_TYPE_S64;
- ptr->u.s64 = v.u.s64;
- ptr->ptr = &ptr->u.s64;
- dbg_printf("context get index dynamic s64 %" PRIi64 "\n", ptr->u.s64);
- break;
- case LTTNG_UST_DYNAMIC_TYPE_FLOAT:
- case LTTNG_UST_DYNAMIC_TYPE_DOUBLE:
- ptr->object_type = OBJECT_TYPE_DOUBLE;
- ptr->u.d = v.u.d;
- ptr->ptr = &ptr->u.d;
- dbg_printf("context get index dynamic double %g\n", ptr->u.d);
- break;
- case LTTNG_UST_DYNAMIC_TYPE_STRING:
- ptr->object_type = OBJECT_TYPE_STRING;
- ptr->ptr = v.u.str;
- dbg_printf("context get index dynamic string %s\n", (const char *) ptr->ptr);
- break;
- default:
- dbg_printf("Interpreter warning: unknown dynamic type (%d).\n", (int) v.sel);
- return -EINVAL;
- }
- break;
- default:
- ERR("Unknown type: %d", (int) field->type->type);
- return -EINVAL;
- }
- return 0;
-}
-
-static int dynamic_get_index(struct lttng_ust_ctx *ctx,
- struct bytecode_runtime *runtime,
- uint64_t index, struct estack_entry *stack_top)
-{
- int ret;
- const struct bytecode_get_index_data *gid;
-
- gid = (const struct bytecode_get_index_data *) &runtime->data[index];
- switch (stack_top->u.ptr.type) {
- case LOAD_OBJECT:
- switch (stack_top->u.ptr.object_type) {
- case OBJECT_TYPE_ARRAY:
- {
- const char *ptr;
-
- assert(gid->offset < gid->array_len);
- /* Skip count (unsigned long) */
- ptr = *(const char **) (stack_top->u.ptr.ptr + sizeof(unsigned long));
- ptr = ptr + gid->offset;
- stack_top->u.ptr.ptr = ptr;
- stack_top->u.ptr.object_type = gid->elem.type;
- stack_top->u.ptr.rev_bo = gid->elem.rev_bo;
- assert(stack_top->u.ptr.field->type->type == lttng_ust_type_array);
- stack_top->u.ptr.field = NULL;
- break;
- }
- case OBJECT_TYPE_SEQUENCE:
- {
- const char *ptr;
- size_t ptr_seq_len;
-
- ptr = *(const char **) (stack_top->u.ptr.ptr + sizeof(unsigned long));
- ptr_seq_len = *(unsigned long *) stack_top->u.ptr.ptr;
- if (gid->offset >= gid->elem.len * ptr_seq_len) {
- ret = -EINVAL;
- goto end;
- }
- ptr = ptr + gid->offset;
- stack_top->u.ptr.ptr = ptr;
- stack_top->u.ptr.object_type = gid->elem.type;
- stack_top->u.ptr.rev_bo = gid->elem.rev_bo;
- assert(stack_top->u.ptr.field->type->type == lttng_ust_type_sequence);
- stack_top->u.ptr.field = NULL;
- break;
- }
- case OBJECT_TYPE_STRUCT:
- ERR("Nested structures are not supported yet.");
- ret = -EINVAL;
- goto end;
- case OBJECT_TYPE_VARIANT:
- default:
- ERR("Unexpected get index type %d",
- (int) stack_top->u.ptr.object_type);
- ret = -EINVAL;
- goto end;
- }
- break;
- case LOAD_ROOT_CONTEXT:
- case LOAD_ROOT_APP_CONTEXT: /* Fall-through */
- {
- ret = context_get_index(ctx,
- &stack_top->u.ptr,
- gid->ctx_index);
- if (ret) {
- goto end;
- }
- break;
- }
- case LOAD_ROOT_PAYLOAD:
- stack_top->u.ptr.ptr += gid->offset;
- if (gid->elem.type == OBJECT_TYPE_STRING)
- stack_top->u.ptr.ptr = *(const char * const *) stack_top->u.ptr.ptr;
- stack_top->u.ptr.object_type = gid->elem.type;
- stack_top->u.ptr.type = LOAD_OBJECT;
- stack_top->u.ptr.field = gid->field;
- stack_top->u.ptr.rev_bo = gid->elem.rev_bo;
- break;
- }
-
- stack_top->type = REG_PTR;
-
- return 0;
-
-end:
- return ret;
-}
-
-static int dynamic_load_field(struct estack_entry *stack_top)
-{
- int ret;
-
- switch (stack_top->u.ptr.type) {
- case LOAD_OBJECT:
- break;
- case LOAD_ROOT_CONTEXT:
- case LOAD_ROOT_APP_CONTEXT:
- case LOAD_ROOT_PAYLOAD:
- default:
- dbg_printf("Interpreter warning: cannot load root, missing field name.\n");
- ret = -EINVAL;
- goto end;
- }
- switch (stack_top->u.ptr.object_type) {
- case OBJECT_TYPE_S8:
- dbg_printf("op load field s8\n");
- stack_top->u.v = *(int8_t *) stack_top->u.ptr.ptr;
- stack_top->type = REG_S64;
- break;
- case OBJECT_TYPE_S16:
- {
- int16_t tmp;
-
- dbg_printf("op load field s16\n");
- tmp = *(int16_t *) stack_top->u.ptr.ptr;
- if (stack_top->u.ptr.rev_bo)
- tmp = bswap_16(tmp);
- stack_top->u.v = tmp;
- stack_top->type = REG_S64;
- break;
- }
- case OBJECT_TYPE_S32:
- {
- int32_t tmp;
-
- dbg_printf("op load field s32\n");
- tmp = *(int32_t *) stack_top->u.ptr.ptr;
- if (stack_top->u.ptr.rev_bo)
- tmp = bswap_32(tmp);
- stack_top->u.v = tmp;
- stack_top->type = REG_S64;
- break;
- }
- case OBJECT_TYPE_S64:
- {
- int64_t tmp;
-
- dbg_printf("op load field s64\n");
- tmp = *(int64_t *) stack_top->u.ptr.ptr;
- if (stack_top->u.ptr.rev_bo)
- tmp = bswap_64(tmp);
- stack_top->u.v = tmp;
- stack_top->type = REG_S64;
- break;
- }
- case OBJECT_TYPE_SIGNED_ENUM:
- {
- int64_t tmp;
-
- dbg_printf("op load field signed enumeration\n");
- tmp = *(int64_t *) stack_top->u.ptr.ptr;
- if (stack_top->u.ptr.rev_bo)
- tmp = bswap_64(tmp);
- stack_top->u.v = tmp;
- stack_top->type = REG_S64;
- break;
- }
- case OBJECT_TYPE_U8:
- dbg_printf("op load field u8\n");
- stack_top->u.v = *(uint8_t *) stack_top->u.ptr.ptr;
- stack_top->type = REG_U64;
- break;
- case OBJECT_TYPE_U16:
- {
- uint16_t tmp;
-
- dbg_printf("op load field u16\n");
- tmp = *(uint16_t *) stack_top->u.ptr.ptr;
- if (stack_top->u.ptr.rev_bo)
- tmp = bswap_16(tmp);
- stack_top->u.v = tmp;
- stack_top->type = REG_U64;
- break;
- }
- case OBJECT_TYPE_U32:
- {
- uint32_t tmp;
-
- dbg_printf("op load field u32\n");
- tmp = *(uint32_t *) stack_top->u.ptr.ptr;
- if (stack_top->u.ptr.rev_bo)
- tmp = bswap_32(tmp);
- stack_top->u.v = tmp;
- stack_top->type = REG_U64;
- break;
- }
- case OBJECT_TYPE_U64:
- {
- uint64_t tmp;
-
- dbg_printf("op load field u64\n");
- tmp = *(uint64_t *) stack_top->u.ptr.ptr;
- if (stack_top->u.ptr.rev_bo)
- tmp = bswap_64(tmp);
- stack_top->u.v = tmp;
- stack_top->type = REG_U64;
- break;
- }
- case OBJECT_TYPE_UNSIGNED_ENUM:
- {
- uint64_t tmp;
-
- dbg_printf("op load field unsigned enumeration\n");
- tmp = *(uint64_t *) stack_top->u.ptr.ptr;
- if (stack_top->u.ptr.rev_bo)
- tmp = bswap_64(tmp);
- stack_top->u.v = tmp;
- stack_top->type = REG_U64;
- break;
- }
- case OBJECT_TYPE_DOUBLE:
- memcpy(&stack_top->u.d,
- stack_top->u.ptr.ptr,
- sizeof(struct literal_double));
- stack_top->type = REG_DOUBLE;
- break;
- case OBJECT_TYPE_STRING:
- {
- const char *str;
-
- dbg_printf("op load field string\n");
- str = (const char *) stack_top->u.ptr.ptr;
- stack_top->u.s.str = str;
- if (unlikely(!stack_top->u.s.str)) {
- dbg_printf("Interpreter warning: loading a NULL string.\n");
- ret = -EINVAL;
- goto end;
- }
- stack_top->u.s.seq_len = SIZE_MAX;
- stack_top->u.s.literal_type =
- ESTACK_STRING_LITERAL_TYPE_NONE;
- stack_top->type = REG_STRING;
- break;
- }
- case OBJECT_TYPE_STRING_SEQUENCE:
- {
- const char *ptr;
-
- dbg_printf("op load field string sequence\n");
- ptr = stack_top->u.ptr.ptr;
- stack_top->u.s.seq_len = *(unsigned long *) ptr;
- stack_top->u.s.str = *(const char **) (ptr + sizeof(unsigned long));
- stack_top->type = REG_STRING;
- if (unlikely(!stack_top->u.s.str)) {
- dbg_printf("Interpreter warning: loading a NULL sequence.\n");
- ret = -EINVAL;
- goto end;
- }
- stack_top->u.s.literal_type =
- ESTACK_STRING_LITERAL_TYPE_NONE;
- break;
- }
- case OBJECT_TYPE_DYNAMIC:
- /*
- * Dynamic types in context are looked up
- * by context get index.
- */
- ret = -EINVAL;
- goto end;
- case OBJECT_TYPE_SEQUENCE:
- case OBJECT_TYPE_ARRAY:
- case OBJECT_TYPE_STRUCT:
- case OBJECT_TYPE_VARIANT:
- ERR("Sequences, arrays, struct and variant cannot be loaded (nested types).");
- ret = -EINVAL;
- goto end;
- }
- return 0;
-
-end:
- return ret;
-}
-
-static
-int lttng_bytecode_interpret_format_output(struct estack_entry *ax,
- struct lttng_interpreter_output *output)
-{
- int ret;
-
-again:
- switch (ax->type) {
- case REG_S64:
- output->type = LTTNG_INTERPRETER_TYPE_S64;
- output->u.s = ax->u.v;
- break;
- case REG_U64:
- output->type = LTTNG_INTERPRETER_TYPE_U64;
- output->u.u = (uint64_t) ax->u.v;
- break;
- case REG_DOUBLE:
- output->type = LTTNG_INTERPRETER_TYPE_DOUBLE;
- output->u.d = ax->u.d;
- break;
- case REG_STRING:
- output->type = LTTNG_INTERPRETER_TYPE_STRING;
- output->u.str.str = ax->u.s.str;
- output->u.str.len = ax->u.s.seq_len;
- break;
- case REG_PTR:
- switch (ax->u.ptr.object_type) {
- case OBJECT_TYPE_S8:
- case OBJECT_TYPE_S16:
- case OBJECT_TYPE_S32:
- case OBJECT_TYPE_S64:
- case OBJECT_TYPE_U8:
- case OBJECT_TYPE_U16:
- case OBJECT_TYPE_U32:
- case OBJECT_TYPE_U64:
- case OBJECT_TYPE_DOUBLE:
- case OBJECT_TYPE_STRING:
- case OBJECT_TYPE_STRING_SEQUENCE:
- ret = dynamic_load_field(ax);
- if (ret)
- return ret;
- /* Retry after loading ptr into stack top. */
- goto again;
- case OBJECT_TYPE_SEQUENCE:
- output->type = LTTNG_INTERPRETER_TYPE_SEQUENCE;
- output->u.sequence.ptr = *(const char **) (ax->u.ptr.ptr + sizeof(unsigned long));
- output->u.sequence.nr_elem = *(unsigned long *) ax->u.ptr.ptr;
- output->u.sequence.nested_type = lttng_ust_get_type_sequence(ax->u.ptr.field->type)->elem_type;
- break;
- case OBJECT_TYPE_ARRAY:
- /* Skip count (unsigned long) */
- output->type = LTTNG_INTERPRETER_TYPE_SEQUENCE;
- output->u.sequence.ptr = *(const char **) (ax->u.ptr.ptr + sizeof(unsigned long));
- output->u.sequence.nr_elem = lttng_ust_get_type_array(ax->u.ptr.field->type)->length;
- output->u.sequence.nested_type = lttng_ust_get_type_array(ax->u.ptr.field->type)->elem_type;
- break;
- case OBJECT_TYPE_SIGNED_ENUM:
- ret = dynamic_load_field(ax);
- if (ret)
- return ret;
- output->type = LTTNG_INTERPRETER_TYPE_SIGNED_ENUM;
- output->u.s = ax->u.v;
- break;
- case OBJECT_TYPE_UNSIGNED_ENUM:
- ret = dynamic_load_field(ax);
- if (ret)
- return ret;
- output->type = LTTNG_INTERPRETER_TYPE_UNSIGNED_ENUM;
- output->u.u = ax->u.v;
- break;
- case OBJECT_TYPE_STRUCT:
- case OBJECT_TYPE_VARIANT:
- default:
- return -EINVAL;
- }
-
- break;
- case REG_STAR_GLOB_STRING:
- case REG_UNKNOWN:
- default:
- return -EINVAL;
- }
-
- return 0;
-}
-
-/*
- * Return LTTNG_UST_BYTECODE_INTERPRETER_OK on success.
- * Return LTTNG_UST_BYTECODE_INTERPRETER_ERROR on error.
- *
- * For FILTER bytecode: expect a struct lttng_ust_bytecode_filter_ctx *
- * as @ctx argument.
- * For CAPTURE bytecode: expect a struct lttng_interpreter_output *
- * as @ctx argument.
- */
-int lttng_bytecode_interpret(struct lttng_ust_bytecode_runtime *ust_bytecode,
- const char *interpreter_stack_data,
- void *caller_ctx)
-{
- struct bytecode_runtime *bytecode = caa_container_of(ust_bytecode, struct bytecode_runtime, p);
- struct lttng_ust_ctx *ctx = lttng_ust_rcu_dereference(*ust_bytecode->pctx);
- void *pc, *next_pc, *start_pc;
- int ret = -EINVAL, retval = 0;
- struct estack _stack;
- struct estack *stack = &_stack;
- register int64_t ax = 0, bx = 0;
- register enum entry_type ax_t = REG_UNKNOWN, bx_t = REG_UNKNOWN;
- register int top = INTERPRETER_STACK_EMPTY;
-#ifndef INTERPRETER_USE_SWITCH
- static void *dispatch[NR_BYTECODE_OPS] = {
- [ BYTECODE_OP_UNKNOWN ] = &&LABEL_BYTECODE_OP_UNKNOWN,
-
- [ BYTECODE_OP_RETURN ] = &&LABEL_BYTECODE_OP_RETURN,
-
- /* binary */
- [ BYTECODE_OP_MUL ] = &&LABEL_BYTECODE_OP_MUL,
- [ BYTECODE_OP_DIV ] = &&LABEL_BYTECODE_OP_DIV,
- [ BYTECODE_OP_MOD ] = &&LABEL_BYTECODE_OP_MOD,
- [ BYTECODE_OP_PLUS ] = &&LABEL_BYTECODE_OP_PLUS,
- [ BYTECODE_OP_MINUS ] = &&LABEL_BYTECODE_OP_MINUS,
- [ BYTECODE_OP_BIT_RSHIFT ] = &&LABEL_BYTECODE_OP_BIT_RSHIFT,
- [ BYTECODE_OP_BIT_LSHIFT ] = &&LABEL_BYTECODE_OP_BIT_LSHIFT,
- [ BYTECODE_OP_BIT_AND ] = &&LABEL_BYTECODE_OP_BIT_AND,
- [ BYTECODE_OP_BIT_OR ] = &&LABEL_BYTECODE_OP_BIT_OR,
- [ BYTECODE_OP_BIT_XOR ] = &&LABEL_BYTECODE_OP_BIT_XOR,
-
- /* binary comparators */
- [ BYTECODE_OP_EQ ] = &&LABEL_BYTECODE_OP_EQ,
- [ BYTECODE_OP_NE ] = &&LABEL_BYTECODE_OP_NE,
- [ BYTECODE_OP_GT ] = &&LABEL_BYTECODE_OP_GT,
- [ BYTECODE_OP_LT ] = &&LABEL_BYTECODE_OP_LT,
- [ BYTECODE_OP_GE ] = &&LABEL_BYTECODE_OP_GE,
- [ BYTECODE_OP_LE ] = &&LABEL_BYTECODE_OP_LE,
-
- /* string binary comparator */
- [ BYTECODE_OP_EQ_STRING ] = &&LABEL_BYTECODE_OP_EQ_STRING,
- [ BYTECODE_OP_NE_STRING ] = &&LABEL_BYTECODE_OP_NE_STRING,
- [ BYTECODE_OP_GT_STRING ] = &&LABEL_BYTECODE_OP_GT_STRING,
- [ BYTECODE_OP_LT_STRING ] = &&LABEL_BYTECODE_OP_LT_STRING,
- [ BYTECODE_OP_GE_STRING ] = &&LABEL_BYTECODE_OP_GE_STRING,
- [ BYTECODE_OP_LE_STRING ] = &&LABEL_BYTECODE_OP_LE_STRING,
-
- /* globbing pattern binary comparator */
- [ BYTECODE_OP_EQ_STAR_GLOB_STRING ] = &&LABEL_BYTECODE_OP_EQ_STAR_GLOB_STRING,
- [ BYTECODE_OP_NE_STAR_GLOB_STRING ] = &&LABEL_BYTECODE_OP_NE_STAR_GLOB_STRING,
-
- /* s64 binary comparator */
- [ BYTECODE_OP_EQ_S64 ] = &&LABEL_BYTECODE_OP_EQ_S64,
- [ BYTECODE_OP_NE_S64 ] = &&LABEL_BYTECODE_OP_NE_S64,
- [ BYTECODE_OP_GT_S64 ] = &&LABEL_BYTECODE_OP_GT_S64,
- [ BYTECODE_OP_LT_S64 ] = &&LABEL_BYTECODE_OP_LT_S64,
- [ BYTECODE_OP_GE_S64 ] = &&LABEL_BYTECODE_OP_GE_S64,
- [ BYTECODE_OP_LE_S64 ] = &&LABEL_BYTECODE_OP_LE_S64,
-
- /* double binary comparator */
- [ BYTECODE_OP_EQ_DOUBLE ] = &&LABEL_BYTECODE_OP_EQ_DOUBLE,
- [ BYTECODE_OP_NE_DOUBLE ] = &&LABEL_BYTECODE_OP_NE_DOUBLE,
- [ BYTECODE_OP_GT_DOUBLE ] = &&LABEL_BYTECODE_OP_GT_DOUBLE,
- [ BYTECODE_OP_LT_DOUBLE ] = &&LABEL_BYTECODE_OP_LT_DOUBLE,
- [ BYTECODE_OP_GE_DOUBLE ] = &&LABEL_BYTECODE_OP_GE_DOUBLE,
- [ BYTECODE_OP_LE_DOUBLE ] = &&LABEL_BYTECODE_OP_LE_DOUBLE,
-
- /* Mixed S64-double binary comparators */
- [ BYTECODE_OP_EQ_DOUBLE_S64 ] = &&LABEL_BYTECODE_OP_EQ_DOUBLE_S64,
- [ BYTECODE_OP_NE_DOUBLE_S64 ] = &&LABEL_BYTECODE_OP_NE_DOUBLE_S64,
- [ BYTECODE_OP_GT_DOUBLE_S64 ] = &&LABEL_BYTECODE_OP_GT_DOUBLE_S64,
- [ BYTECODE_OP_LT_DOUBLE_S64 ] = &&LABEL_BYTECODE_OP_LT_DOUBLE_S64,
- [ BYTECODE_OP_GE_DOUBLE_S64 ] = &&LABEL_BYTECODE_OP_GE_DOUBLE_S64,
- [ BYTECODE_OP_LE_DOUBLE_S64 ] = &&LABEL_BYTECODE_OP_LE_DOUBLE_S64,
-
- [ BYTECODE_OP_EQ_S64_DOUBLE ] = &&LABEL_BYTECODE_OP_EQ_S64_DOUBLE,
- [ BYTECODE_OP_NE_S64_DOUBLE ] = &&LABEL_BYTECODE_OP_NE_S64_DOUBLE,
- [ BYTECODE_OP_GT_S64_DOUBLE ] = &&LABEL_BYTECODE_OP_GT_S64_DOUBLE,
- [ BYTECODE_OP_LT_S64_DOUBLE ] = &&LABEL_BYTECODE_OP_LT_S64_DOUBLE,
- [ BYTECODE_OP_GE_S64_DOUBLE ] = &&LABEL_BYTECODE_OP_GE_S64_DOUBLE,
- [ BYTECODE_OP_LE_S64_DOUBLE ] = &&LABEL_BYTECODE_OP_LE_S64_DOUBLE,
-
- /* unary */
- [ BYTECODE_OP_UNARY_PLUS ] = &&LABEL_BYTECODE_OP_UNARY_PLUS,
- [ BYTECODE_OP_UNARY_MINUS ] = &&LABEL_BYTECODE_OP_UNARY_MINUS,
- [ BYTECODE_OP_UNARY_NOT ] = &&LABEL_BYTECODE_OP_UNARY_NOT,
- [ BYTECODE_OP_UNARY_PLUS_S64 ] = &&LABEL_BYTECODE_OP_UNARY_PLUS_S64,
- [ BYTECODE_OP_UNARY_MINUS_S64 ] = &&LABEL_BYTECODE_OP_UNARY_MINUS_S64,
- [ BYTECODE_OP_UNARY_NOT_S64 ] = &&LABEL_BYTECODE_OP_UNARY_NOT_S64,
- [ BYTECODE_OP_UNARY_PLUS_DOUBLE ] = &&LABEL_BYTECODE_OP_UNARY_PLUS_DOUBLE,
- [ BYTECODE_OP_UNARY_MINUS_DOUBLE ] = &&LABEL_BYTECODE_OP_UNARY_MINUS_DOUBLE,
- [ BYTECODE_OP_UNARY_NOT_DOUBLE ] = &&LABEL_BYTECODE_OP_UNARY_NOT_DOUBLE,
-
- /* logical */
- [ BYTECODE_OP_AND ] = &&LABEL_BYTECODE_OP_AND,
- [ BYTECODE_OP_OR ] = &&LABEL_BYTECODE_OP_OR,
-
- /* load field ref */
- [ BYTECODE_OP_LOAD_FIELD_REF ] = &&LABEL_BYTECODE_OP_LOAD_FIELD_REF,
- [ BYTECODE_OP_LOAD_FIELD_REF_STRING ] = &&LABEL_BYTECODE_OP_LOAD_FIELD_REF_STRING,
- [ BYTECODE_OP_LOAD_FIELD_REF_SEQUENCE ] = &&LABEL_BYTECODE_OP_LOAD_FIELD_REF_SEQUENCE,
- [ BYTECODE_OP_LOAD_FIELD_REF_S64 ] = &&LABEL_BYTECODE_OP_LOAD_FIELD_REF_S64,
- [ BYTECODE_OP_LOAD_FIELD_REF_DOUBLE ] = &&LABEL_BYTECODE_OP_LOAD_FIELD_REF_DOUBLE,
-
- /* load from immediate operand */
- [ BYTECODE_OP_LOAD_STRING ] = &&LABEL_BYTECODE_OP_LOAD_STRING,
- [ BYTECODE_OP_LOAD_STAR_GLOB_STRING ] = &&LABEL_BYTECODE_OP_LOAD_STAR_GLOB_STRING,
- [ BYTECODE_OP_LOAD_S64 ] = &&LABEL_BYTECODE_OP_LOAD_S64,
- [ BYTECODE_OP_LOAD_DOUBLE ] = &&LABEL_BYTECODE_OP_LOAD_DOUBLE,
-
- /* cast */
- [ BYTECODE_OP_CAST_TO_S64 ] = &&LABEL_BYTECODE_OP_CAST_TO_S64,
- [ BYTECODE_OP_CAST_DOUBLE_TO_S64 ] = &&LABEL_BYTECODE_OP_CAST_DOUBLE_TO_S64,
- [ BYTECODE_OP_CAST_NOP ] = &&LABEL_BYTECODE_OP_CAST_NOP,
-
- /* get context ref */
- [ BYTECODE_OP_GET_CONTEXT_REF ] = &&LABEL_BYTECODE_OP_GET_CONTEXT_REF,
- [ BYTECODE_OP_GET_CONTEXT_REF_STRING ] = &&LABEL_BYTECODE_OP_GET_CONTEXT_REF_STRING,
- [ BYTECODE_OP_GET_CONTEXT_REF_S64 ] = &&LABEL_BYTECODE_OP_GET_CONTEXT_REF_S64,
- [ BYTECODE_OP_GET_CONTEXT_REF_DOUBLE ] = &&LABEL_BYTECODE_OP_GET_CONTEXT_REF_DOUBLE,
-
- /* Instructions for recursive traversal through composed types. */
- [ BYTECODE_OP_GET_CONTEXT_ROOT ] = &&LABEL_BYTECODE_OP_GET_CONTEXT_ROOT,
- [ BYTECODE_OP_GET_APP_CONTEXT_ROOT ] = &&LABEL_BYTECODE_OP_GET_APP_CONTEXT_ROOT,
- [ BYTECODE_OP_GET_PAYLOAD_ROOT ] = &&LABEL_BYTECODE_OP_GET_PAYLOAD_ROOT,
-
- [ BYTECODE_OP_GET_SYMBOL ] = &&LABEL_BYTECODE_OP_GET_SYMBOL,
- [ BYTECODE_OP_GET_SYMBOL_FIELD ] = &&LABEL_BYTECODE_OP_GET_SYMBOL_FIELD,
- [ BYTECODE_OP_GET_INDEX_U16 ] = &&LABEL_BYTECODE_OP_GET_INDEX_U16,
- [ BYTECODE_OP_GET_INDEX_U64 ] = &&LABEL_BYTECODE_OP_GET_INDEX_U64,
-
- [ BYTECODE_OP_LOAD_FIELD ] = &&LABEL_BYTECODE_OP_LOAD_FIELD,
- [ BYTECODE_OP_LOAD_FIELD_S8 ] = &&LABEL_BYTECODE_OP_LOAD_FIELD_S8,
- [ BYTECODE_OP_LOAD_FIELD_S16 ] = &&LABEL_BYTECODE_OP_LOAD_FIELD_S16,
- [ BYTECODE_OP_LOAD_FIELD_S32 ] = &&LABEL_BYTECODE_OP_LOAD_FIELD_S32,
- [ BYTECODE_OP_LOAD_FIELD_S64 ] = &&LABEL_BYTECODE_OP_LOAD_FIELD_S64,
- [ BYTECODE_OP_LOAD_FIELD_U8 ] = &&LABEL_BYTECODE_OP_LOAD_FIELD_U8,
- [ BYTECODE_OP_LOAD_FIELD_U16 ] = &&LABEL_BYTECODE_OP_LOAD_FIELD_U16,
- [ BYTECODE_OP_LOAD_FIELD_U32 ] = &&LABEL_BYTECODE_OP_LOAD_FIELD_U32,
- [ BYTECODE_OP_LOAD_FIELD_U64 ] = &&LABEL_BYTECODE_OP_LOAD_FIELD_U64,
- [ BYTECODE_OP_LOAD_FIELD_STRING ] = &&LABEL_BYTECODE_OP_LOAD_FIELD_STRING,
- [ BYTECODE_OP_LOAD_FIELD_SEQUENCE ] = &&LABEL_BYTECODE_OP_LOAD_FIELD_SEQUENCE,
- [ BYTECODE_OP_LOAD_FIELD_DOUBLE ] = &&LABEL_BYTECODE_OP_LOAD_FIELD_DOUBLE,
-
- [ BYTECODE_OP_UNARY_BIT_NOT ] = &&LABEL_BYTECODE_OP_UNARY_BIT_NOT,
-
- [ BYTECODE_OP_RETURN_S64 ] = &&LABEL_BYTECODE_OP_RETURN_S64,
- };
-#endif /* #ifndef INTERPRETER_USE_SWITCH */
-
- START_OP
-
- OP(BYTECODE_OP_UNKNOWN):
- OP(BYTECODE_OP_LOAD_FIELD_REF):
-#ifdef INTERPRETER_USE_SWITCH
- default:
-#endif /* INTERPRETER_USE_SWITCH */
- ERR("unknown bytecode op %u",
- (unsigned int) *(bytecode_opcode_t *) pc);
- ret = -EINVAL;
- goto end;
-
- OP(BYTECODE_OP_RETURN):
- /* LTTNG_UST_BYTECODE_INTERPRETER_ERROR or LTTNG_UST_BYTECODE_INTERPRETER_OK */
- /* Handle dynamic typing. */
- switch (estack_ax_t) {
- case REG_S64:
- case REG_U64:
- retval = !!estack_ax_v;
- break;
- case REG_DOUBLE:
- case REG_STRING:
- case REG_PTR:
- if (ust_bytecode->type != LTTNG_UST_BYTECODE_TYPE_CAPTURE) {
- ret = -EINVAL;
- goto end;
- }
- retval = 0;
- break;
- case REG_STAR_GLOB_STRING:
- case REG_UNKNOWN:
- default:
- ret = -EINVAL;
- goto end;
- }
- ret = 0;
- goto end;
-
- OP(BYTECODE_OP_RETURN_S64):
- /* LTTNG_UST_BYTECODE_INTERPRETER_ERROR or LTTNG_UST_BYTECODE_INTERPRETER_OK */
- retval = !!estack_ax_v;
- ret = 0;
- goto end;
-
- /* binary */
- OP(BYTECODE_OP_MUL):
- OP(BYTECODE_OP_DIV):
- OP(BYTECODE_OP_MOD):
- OP(BYTECODE_OP_PLUS):
- OP(BYTECODE_OP_MINUS):
- ERR("unsupported bytecode op %u",
- (unsigned int) *(bytecode_opcode_t *) pc);
- ret = -EINVAL;
- goto end;
-
- OP(BYTECODE_OP_EQ):
- {
- /* Dynamic typing. */
- switch (estack_ax_t) {
- case REG_S64: /* Fall-through */
- case REG_U64:
- switch (estack_bx_t) {
- case REG_S64: /* Fall-through */
- case REG_U64:
- JUMP_TO(BYTECODE_OP_EQ_S64);
- case REG_DOUBLE:
- JUMP_TO(BYTECODE_OP_EQ_DOUBLE_S64);
- case REG_STRING: /* Fall-through */
- case REG_STAR_GLOB_STRING:
- ret = -EINVAL;
- goto end;
- default:
- ERR("Unknown interpreter register type (%d)",
- (int) estack_bx_t);
- ret = -EINVAL;
- goto end;
- }
- break;
- case REG_DOUBLE:
- switch (estack_bx_t) {
- case REG_S64: /* Fall-through */
- case REG_U64:
- JUMP_TO(BYTECODE_OP_EQ_S64_DOUBLE);
- case REG_DOUBLE:
- JUMP_TO(BYTECODE_OP_EQ_DOUBLE);
- case REG_STRING: /* Fall-through */
- case REG_STAR_GLOB_STRING:
- ret = -EINVAL;
- goto end;
- default:
- ERR("Unknown interpreter register type (%d)",
- (int) estack_bx_t);
- ret = -EINVAL;
- goto end;
- }
- break;
- case REG_STRING:
- switch (estack_bx_t) {
- case REG_S64: /* Fall-through */
- case REG_U64: /* Fall-through */
- case REG_DOUBLE:
- ret = -EINVAL;
- goto end;
- case REG_STRING:
- JUMP_TO(BYTECODE_OP_EQ_STRING);
- case REG_STAR_GLOB_STRING:
- JUMP_TO(BYTECODE_OP_EQ_STAR_GLOB_STRING);
- default:
- ERR("Unknown interpreter register type (%d)",
- (int) estack_bx_t);
- ret = -EINVAL;
- goto end;
- }
- break;
- case REG_STAR_GLOB_STRING:
- switch (estack_bx_t) {
- case REG_S64: /* Fall-through */
- case REG_U64: /* Fall-through */
- case REG_DOUBLE:
- ret = -EINVAL;
- goto end;
- case REG_STRING:
- JUMP_TO(BYTECODE_OP_EQ_STAR_GLOB_STRING);
- case REG_STAR_GLOB_STRING:
- ret = -EINVAL;
- goto end;
- default:
- ERR("Unknown interpreter register type (%d)",
- (int) estack_bx_t);
- ret = -EINVAL;
- goto end;
- }
- break;
- default:
- ERR("Unknown interpreter register type (%d)",
- (int) estack_ax_t);
- ret = -EINVAL;
- goto end;
- }
- }
- OP(BYTECODE_OP_NE):
- {
- /* Dynamic typing. */
- switch (estack_ax_t) {
- case REG_S64: /* Fall-through */
- case REG_U64:
- switch (estack_bx_t) {
- case REG_S64: /* Fall-through */
- case REG_U64:
- JUMP_TO(BYTECODE_OP_NE_S64);
- case REG_DOUBLE:
- JUMP_TO(BYTECODE_OP_NE_DOUBLE_S64);
- case REG_STRING: /* Fall-through */
- case REG_STAR_GLOB_STRING:
- ret = -EINVAL;
- goto end;
- default:
- ERR("Unknown interpreter register type (%d)",
- (int) estack_bx_t);
- ret = -EINVAL;
- goto end;
- }
- break;
- case REG_DOUBLE:
- switch (estack_bx_t) {
- case REG_S64: /* Fall-through */
- case REG_U64:
- JUMP_TO(BYTECODE_OP_NE_S64_DOUBLE);
- case REG_DOUBLE:
- JUMP_TO(BYTECODE_OP_NE_DOUBLE);
- case REG_STRING: /* Fall-through */
- case REG_STAR_GLOB_STRING:
- ret = -EINVAL;
- goto end;
- default:
- ERR("Unknown interpreter register type (%d)",
- (int) estack_bx_t);
- ret = -EINVAL;
- goto end;
- }
- break;
- case REG_STRING:
- switch (estack_bx_t) {
- case REG_S64: /* Fall-through */
- case REG_U64:
- case REG_DOUBLE:
- ret = -EINVAL;
- goto end;
- case REG_STRING:
- JUMP_TO(BYTECODE_OP_NE_STRING);
- case REG_STAR_GLOB_STRING:
- JUMP_TO(BYTECODE_OP_NE_STAR_GLOB_STRING);
- default:
- ERR("Unknown interpreter register type (%d)",
- (int) estack_bx_t);
- ret = -EINVAL;
- goto end;
- }
- break;
- case REG_STAR_GLOB_STRING:
- switch (estack_bx_t) {
- case REG_S64: /* Fall-through */
- case REG_U64:
- case REG_DOUBLE:
- ret = -EINVAL;
- goto end;
- case REG_STRING:
- JUMP_TO(BYTECODE_OP_NE_STAR_GLOB_STRING);
- case REG_STAR_GLOB_STRING:
- ret = -EINVAL;
- goto end;
- default:
- ERR("Unknown interpreter register type (%d)",
- (int) estack_bx_t);
- ret = -EINVAL;
- goto end;
- }
- break;
- default:
- ERR("Unknown interpreter register type (%d)",
- (int) estack_ax_t);
- ret = -EINVAL;
- goto end;
- }
- }
- OP(BYTECODE_OP_GT):
- {
- /* Dynamic typing. */
- switch (estack_ax_t) {
- case REG_S64: /* Fall-through */
- case REG_U64:
- switch (estack_bx_t) {
- case REG_S64: /* Fall-through */
- case REG_U64:
- JUMP_TO(BYTECODE_OP_GT_S64);
- case REG_DOUBLE:
- JUMP_TO(BYTECODE_OP_GT_DOUBLE_S64);
- case REG_STRING: /* Fall-through */
- case REG_STAR_GLOB_STRING:
- ret = -EINVAL;
- goto end;
- default:
- ERR("Unknown interpreter register type (%d)",
- (int) estack_bx_t);
- ret = -EINVAL;
- goto end;
- }
- break;
- case REG_DOUBLE:
- switch (estack_bx_t) {
- case REG_S64: /* Fall-through */
- case REG_U64:
- JUMP_TO(BYTECODE_OP_GT_S64_DOUBLE);
- case REG_DOUBLE:
- JUMP_TO(BYTECODE_OP_GT_DOUBLE);
- case REG_STRING: /* Fall-through */
- case REG_STAR_GLOB_STRING:
- ret = -EINVAL;
- goto end;
- default:
- ERR("Unknown interpreter register type (%d)",
- (int) estack_bx_t);
- ret = -EINVAL;
- goto end;
- }
- break;
- case REG_STRING:
- switch (estack_bx_t) {
- case REG_S64: /* Fall-through */
- case REG_U64: /* Fall-through */
- case REG_DOUBLE: /* Fall-through */
- case REG_STAR_GLOB_STRING:
- ret = -EINVAL;
- goto end;
- case REG_STRING:
- JUMP_TO(BYTECODE_OP_GT_STRING);
- default:
- ERR("Unknown interpreter register type (%d)",
- (int) estack_bx_t);
- ret = -EINVAL;
- goto end;
- }
- break;
- default:
- ERR("Unknown interpreter register type (%d)",
- (int) estack_ax_t);
- ret = -EINVAL;
- goto end;
- }
- }
- OP(BYTECODE_OP_LT):
- {
- /* Dynamic typing. */
- switch (estack_ax_t) {
- case REG_S64: /* Fall-through */
- case REG_U64:
- switch (estack_bx_t) {
- case REG_S64: /* Fall-through */
- case REG_U64:
- JUMP_TO(BYTECODE_OP_LT_S64);
- case REG_DOUBLE:
- JUMP_TO(BYTECODE_OP_LT_DOUBLE_S64);
- case REG_STRING: /* Fall-through */
- case REG_STAR_GLOB_STRING:
- ret = -EINVAL;
- goto end;
- default:
- ERR("Unknown interpreter register type (%d)",
- (int) estack_bx_t);
- ret = -EINVAL;
- goto end;
- }
- break;
- case REG_DOUBLE:
- switch (estack_bx_t) {
- case REG_S64: /* Fall-through */
- case REG_U64:
- JUMP_TO(BYTECODE_OP_LT_S64_DOUBLE);
- case REG_DOUBLE:
- JUMP_TO(BYTECODE_OP_LT_DOUBLE);
- case REG_STRING: /* Fall-through */
- case REG_STAR_GLOB_STRING:
- ret = -EINVAL;
- goto end;
- default:
- ERR("Unknown interpreter register type (%d)",
- (int) estack_bx_t);
- ret = -EINVAL;
- goto end;
- }
- break;
- case REG_STRING:
- switch (estack_bx_t) {
- case REG_S64: /* Fall-through */
- case REG_U64: /* Fall-through */
- case REG_DOUBLE: /* Fall-through */
- case REG_STAR_GLOB_STRING:
- ret = -EINVAL;
- goto end;
- case REG_STRING:
- JUMP_TO(BYTECODE_OP_LT_STRING);
- default:
- ERR("Unknown interpreter register type (%d)",
- (int) estack_bx_t);
- ret = -EINVAL;
- goto end;
- }
- break;
- default:
- ERR("Unknown interpreter register type (%d)",
- (int) estack_ax_t);
- ret = -EINVAL;
- goto end;
- }
- }
- OP(BYTECODE_OP_GE):
- {
- /* Dynamic typing. */
- switch (estack_ax_t) {
- case REG_S64: /* Fall-through */
- case REG_U64:
- switch (estack_bx_t) {
- case REG_S64: /* Fall-through */
- case REG_U64:
- JUMP_TO(BYTECODE_OP_GE_S64);
- case REG_DOUBLE:
- JUMP_TO(BYTECODE_OP_GE_DOUBLE_S64);
- case REG_STRING: /* Fall-through */
- case REG_STAR_GLOB_STRING:
- ret = -EINVAL;
- goto end;
- default:
- ERR("Unknown interpreter register type (%d)",
- (int) estack_bx_t);
- ret = -EINVAL;
- goto end;
- }
- break;
- case REG_DOUBLE:
- switch (estack_bx_t) {
- case REG_S64: /* Fall-through */
- case REG_U64:
- JUMP_TO(BYTECODE_OP_GE_S64_DOUBLE);
- case REG_DOUBLE:
- JUMP_TO(BYTECODE_OP_GE_DOUBLE);
- case REG_STRING: /* Fall-through */
- case REG_STAR_GLOB_STRING:
- ret = -EINVAL;
- goto end;
- default:
- ERR("Unknown interpreter register type (%d)",
- (int) estack_bx_t);
- ret = -EINVAL;
- goto end;
- }
- break;
- case REG_STRING:
- switch (estack_bx_t) {
- case REG_S64: /* Fall-through */
- case REG_U64: /* Fall-through */
- case REG_DOUBLE: /* Fall-through */
- case REG_STAR_GLOB_STRING:
- ret = -EINVAL;
- goto end;
- case REG_STRING:
- JUMP_TO(BYTECODE_OP_GE_STRING);
- default:
- ERR("Unknown interpreter register type (%d)",
- (int) estack_bx_t);
- ret = -EINVAL;
- goto end;
- }
- break;
- default:
- ERR("Unknown interpreter register type (%d)",
- (int) estack_ax_t);
- ret = -EINVAL;
- goto end;
- }
- }
- OP(BYTECODE_OP_LE):
- {
- /* Dynamic typing. */
- switch (estack_ax_t) {
- case REG_S64: /* Fall-through */
- case REG_U64:
- switch (estack_bx_t) {
- case REG_S64: /* Fall-through */
- case REG_U64:
- JUMP_TO(BYTECODE_OP_LE_S64);
- case REG_DOUBLE:
- JUMP_TO(BYTECODE_OP_LE_DOUBLE_S64);
- case REG_STRING: /* Fall-through */
- case REG_STAR_GLOB_STRING:
- ret = -EINVAL;
- goto end;
- default:
- ERR("Unknown interpreter register type (%d)",
- (int) estack_bx_t);
- ret = -EINVAL;
- goto end;
- }
- break;
- case REG_DOUBLE:
- switch (estack_bx_t) {
- case REG_S64: /* Fall-through */
- case REG_U64:
- JUMP_TO(BYTECODE_OP_LE_S64_DOUBLE);
- case REG_DOUBLE:
- JUMP_TO(BYTECODE_OP_LE_DOUBLE);
- case REG_STRING: /* Fall-through */
- case REG_STAR_GLOB_STRING:
- ret = -EINVAL;
- goto end;
- default:
- ERR("Unknown interpreter register type (%d)",
- (int) estack_bx_t);
- ret = -EINVAL;
- goto end;
- }
- break;
- case REG_STRING:
- switch (estack_bx_t) {
- case REG_S64: /* Fall-through */
- case REG_U64: /* Fall-through */
- case REG_DOUBLE: /* Fall-through */
- case REG_STAR_GLOB_STRING:
- ret = -EINVAL;
- goto end;
- case REG_STRING:
- JUMP_TO(BYTECODE_OP_LE_STRING);
- default:
- ERR("Unknown interpreter register type (%d)",
- (int) estack_bx_t);
- ret = -EINVAL;
- goto end;
- }
- break;
- default:
- ERR("Unknown interpreter register type (%d)",
- (int) estack_ax_t);
- ret = -EINVAL;
- goto end;
- }
- }
-
- OP(BYTECODE_OP_EQ_STRING):
- {
- int res;
-
- res = (stack_strcmp(stack, top, "==") == 0);
- estack_pop(stack, top, ax, bx, ax_t, bx_t);
- estack_ax_v = res;
- estack_ax_t = REG_S64;
- next_pc += sizeof(struct binary_op);
- PO;
- }
- OP(BYTECODE_OP_NE_STRING):
- {
- int res;
-
- res = (stack_strcmp(stack, top, "!=") != 0);
- estack_pop(stack, top, ax, bx, ax_t, bx_t);
- estack_ax_v = res;
- estack_ax_t = REG_S64;
- next_pc += sizeof(struct binary_op);
- PO;
- }
- OP(BYTECODE_OP_GT_STRING):
- {
- int res;
-
- res = (stack_strcmp(stack, top, ">") > 0);
- estack_pop(stack, top, ax, bx, ax_t, bx_t);
- estack_ax_v = res;
- estack_ax_t = REG_S64;
- next_pc += sizeof(struct binary_op);
- PO;
- }
- OP(BYTECODE_OP_LT_STRING):
- {
- int res;
-
- res = (stack_strcmp(stack, top, "<") < 0);
- estack_pop(stack, top, ax, bx, ax_t, bx_t);
- estack_ax_v = res;
- estack_ax_t = REG_S64;
- next_pc += sizeof(struct binary_op);
- PO;
- }
- OP(BYTECODE_OP_GE_STRING):
- {
- int res;
-
- res = (stack_strcmp(stack, top, ">=") >= 0);
- estack_pop(stack, top, ax, bx, ax_t, bx_t);
- estack_ax_v = res;
- estack_ax_t = REG_S64;
- next_pc += sizeof(struct binary_op);
- PO;
- }
- OP(BYTECODE_OP_LE_STRING):
- {
- int res;
-
- res = (stack_strcmp(stack, top, "<=") <= 0);
- estack_pop(stack, top, ax, bx, ax_t, bx_t);
- estack_ax_v = res;
- estack_ax_t = REG_S64;
- next_pc += sizeof(struct binary_op);
- PO;
- }
-
- OP(BYTECODE_OP_EQ_STAR_GLOB_STRING):
- {
- int res;
-
- res = (stack_star_glob_match(stack, top, "==") == 0);
- estack_pop(stack, top, ax, bx, ax_t, bx_t);
- estack_ax_v = res;
- estack_ax_t = REG_S64;
- next_pc += sizeof(struct binary_op);
- PO;
- }
- OP(BYTECODE_OP_NE_STAR_GLOB_STRING):
- {
- int res;
-
- res = (stack_star_glob_match(stack, top, "!=") != 0);
- estack_pop(stack, top, ax, bx, ax_t, bx_t);
- estack_ax_v = res;
- estack_ax_t = REG_S64;
- next_pc += sizeof(struct binary_op);
- PO;
- }
-
- OP(BYTECODE_OP_EQ_S64):
- {
- int res;
-
- res = (estack_bx_v == estack_ax_v);
- estack_pop(stack, top, ax, bx, ax_t, bx_t);
- estack_ax_v = res;
- estack_ax_t = REG_S64;
- next_pc += sizeof(struct binary_op);
- PO;
- }
- OP(BYTECODE_OP_NE_S64):
- {
- int res;
-
- res = (estack_bx_v != estack_ax_v);
- estack_pop(stack, top, ax, bx, ax_t, bx_t);
- estack_ax_v = res;
- estack_ax_t = REG_S64;
- next_pc += sizeof(struct binary_op);
- PO;
- }
- OP(BYTECODE_OP_GT_S64):
- {
- int res;
-
- res = (estack_bx_v > estack_ax_v);
- estack_pop(stack, top, ax, bx, ax_t, bx_t);
- estack_ax_v = res;
- estack_ax_t = REG_S64;
- next_pc += sizeof(struct binary_op);
- PO;
- }
- OP(BYTECODE_OP_LT_S64):
- {
- int res;
-
- res = (estack_bx_v < estack_ax_v);
- estack_pop(stack, top, ax, bx, ax_t, bx_t);
- estack_ax_v = res;
- estack_ax_t = REG_S64;
- next_pc += sizeof(struct binary_op);
- PO;
- }
- OP(BYTECODE_OP_GE_S64):
- {
- int res;
-
- res = (estack_bx_v >= estack_ax_v);
- estack_pop(stack, top, ax, bx, ax_t, bx_t);
- estack_ax_v = res;
- estack_ax_t = REG_S64;
- next_pc += sizeof(struct binary_op);
- PO;
- }
- OP(BYTECODE_OP_LE_S64):
- {
- int res;
-
- res = (estack_bx_v <= estack_ax_v);
- estack_pop(stack, top, ax, bx, ax_t, bx_t);
- estack_ax_v = res;
- estack_ax_t = REG_S64;
- next_pc += sizeof(struct binary_op);
- PO;
- }
-
- OP(BYTECODE_OP_EQ_DOUBLE):
- {
- int res;
-
- res = (estack_bx(stack, top)->u.d == estack_ax(stack, top)->u.d);
- estack_pop(stack, top, ax, bx, ax_t, bx_t);
- estack_ax_v = res;
- estack_ax_t = REG_S64;
- next_pc += sizeof(struct binary_op);
- PO;
- }
- OP(BYTECODE_OP_NE_DOUBLE):
- {
- int res;
-
- res = (estack_bx(stack, top)->u.d != estack_ax(stack, top)->u.d);
- estack_pop(stack, top, ax, bx, ax_t, bx_t);
- estack_ax_v = res;
- estack_ax_t = REG_S64;
- next_pc += sizeof(struct binary_op);
- PO;
- }
- OP(BYTECODE_OP_GT_DOUBLE):
- {
- int res;
-
- res = (estack_bx(stack, top)->u.d > estack_ax(stack, top)->u.d);
- estack_pop(stack, top, ax, bx, ax_t, bx_t);
- estack_ax_v = res;
- estack_ax_t = REG_S64;
- next_pc += sizeof(struct binary_op);
- PO;
- }
- OP(BYTECODE_OP_LT_DOUBLE):
- {
- int res;
-
- res = (estack_bx(stack, top)->u.d < estack_ax(stack, top)->u.d);
- estack_pop(stack, top, ax, bx, ax_t, bx_t);
- estack_ax_v = res;
- estack_ax_t = REG_S64;
- next_pc += sizeof(struct binary_op);
- PO;
- }
- OP(BYTECODE_OP_GE_DOUBLE):
- {
- int res;
-
- res = (estack_bx(stack, top)->u.d >= estack_ax(stack, top)->u.d);
- estack_pop(stack, top, ax, bx, ax_t, bx_t);
- estack_ax_v = res;
- estack_ax_t = REG_S64;
- next_pc += sizeof(struct binary_op);
- PO;
- }
- OP(BYTECODE_OP_LE_DOUBLE):
- {
- int res;
-
- res = (estack_bx(stack, top)->u.d <= estack_ax(stack, top)->u.d);
- estack_pop(stack, top, ax, bx, ax_t, bx_t);
- estack_ax_v = res;
- estack_ax_t = REG_S64;
- next_pc += sizeof(struct binary_op);
- PO;
- }
-
- /* Mixed S64-double binary comparators */
- OP(BYTECODE_OP_EQ_DOUBLE_S64):
- {
- int res;
-
- res = (estack_bx(stack, top)->u.d == estack_ax_v);
- estack_pop(stack, top, ax, bx, ax_t, bx_t);
- estack_ax_v = res;
- estack_ax_t = REG_S64;
- next_pc += sizeof(struct binary_op);
- PO;
- }
- OP(BYTECODE_OP_NE_DOUBLE_S64):
- {
- int res;
-
- res = (estack_bx(stack, top)->u.d != estack_ax_v);
- estack_pop(stack, top, ax, bx, ax_t, bx_t);
- estack_ax_v = res;
- estack_ax_t = REG_S64;
- next_pc += sizeof(struct binary_op);
- PO;
- }
- OP(BYTECODE_OP_GT_DOUBLE_S64):
- {
- int res;
-
- res = (estack_bx(stack, top)->u.d > estack_ax_v);
- estack_pop(stack, top, ax, bx, ax_t, bx_t);
- estack_ax_v = res;
- estack_ax_t = REG_S64;
- next_pc += sizeof(struct binary_op);
- PO;
- }
- OP(BYTECODE_OP_LT_DOUBLE_S64):
- {
- int res;
-
- res = (estack_bx(stack, top)->u.d < estack_ax_v);
- estack_pop(stack, top, ax, bx, ax_t, bx_t);
- estack_ax_v = res;
- estack_ax_t = REG_S64;
- next_pc += sizeof(struct binary_op);
- PO;
- }
- OP(BYTECODE_OP_GE_DOUBLE_S64):
- {
- int res;
-
- res = (estack_bx(stack, top)->u.d >= estack_ax_v);
- estack_pop(stack, top, ax, bx, ax_t, bx_t);
- estack_ax_v = res;
- estack_ax_t = REG_S64;
- next_pc += sizeof(struct binary_op);
- PO;
- }
- OP(BYTECODE_OP_LE_DOUBLE_S64):
- {
- int res;
-
- res = (estack_bx(stack, top)->u.d <= estack_ax_v);
- estack_pop(stack, top, ax, bx, ax_t, bx_t);
- estack_ax_v = res;
- estack_ax_t = REG_S64;
- next_pc += sizeof(struct binary_op);
- PO;
- }
-
- OP(BYTECODE_OP_EQ_S64_DOUBLE):
- {
- int res;
-
- res = (estack_bx_v == estack_ax(stack, top)->u.d);
- estack_pop(stack, top, ax, bx, ax_t, bx_t);
- estack_ax_v = res;
- estack_ax_t = REG_S64;
- next_pc += sizeof(struct binary_op);
- PO;
- }
- OP(BYTECODE_OP_NE_S64_DOUBLE):
- {
- int res;
-
- res = (estack_bx_v != estack_ax(stack, top)->u.d);
- estack_pop(stack, top, ax, bx, ax_t, bx_t);
- estack_ax_v = res;
- estack_ax_t = REG_S64;
- next_pc += sizeof(struct binary_op);
- PO;
- }
- OP(BYTECODE_OP_GT_S64_DOUBLE):
- {
- int res;
-
- res = (estack_bx_v > estack_ax(stack, top)->u.d);
- estack_pop(stack, top, ax, bx, ax_t, bx_t);
- estack_ax_v = res;
- estack_ax_t = REG_S64;
- next_pc += sizeof(struct binary_op);
- PO;
- }
- OP(BYTECODE_OP_LT_S64_DOUBLE):
- {
- int res;
-
- res = (estack_bx_v < estack_ax(stack, top)->u.d);
- estack_pop(stack, top, ax, bx, ax_t, bx_t);
- estack_ax_v = res;
- estack_ax_t = REG_S64;
- next_pc += sizeof(struct binary_op);
- PO;
- }
- OP(BYTECODE_OP_GE_S64_DOUBLE):
- {
- int res;
-
- res = (estack_bx_v >= estack_ax(stack, top)->u.d);
- estack_pop(stack, top, ax, bx, ax_t, bx_t);
- estack_ax_v = res;
- estack_ax_t = REG_S64;
- next_pc += sizeof(struct binary_op);
- PO;
- }
- OP(BYTECODE_OP_LE_S64_DOUBLE):
- {
- int res;
-
- res = (estack_bx_v <= estack_ax(stack, top)->u.d);
- estack_pop(stack, top, ax, bx, ax_t, bx_t);
- estack_ax_v = res;
- estack_ax_t = REG_S64;
- next_pc += sizeof(struct binary_op);
- PO;
- }
- OP(BYTECODE_OP_BIT_RSHIFT):
- {
- int64_t res;
-
- if (!IS_INTEGER_REGISTER(estack_ax_t) || !IS_INTEGER_REGISTER(estack_bx_t)) {
- ret = -EINVAL;
- goto end;
- }
-
- /* Catch undefined behavior. */
- if (caa_unlikely(estack_ax_v < 0 || estack_ax_v >= 64)) {
- ret = -EINVAL;
- goto end;
- }
- res = ((uint64_t) estack_bx_v >> (uint32_t) estack_ax_v);
- estack_pop(stack, top, ax, bx, ax_t, bx_t);
- estack_ax_v = res;
- estack_ax_t = REG_U64;
- next_pc += sizeof(struct binary_op);
- PO;
- }
- OP(BYTECODE_OP_BIT_LSHIFT):
- {
- int64_t res;
-
- if (!IS_INTEGER_REGISTER(estack_ax_t) || !IS_INTEGER_REGISTER(estack_bx_t)) {
- ret = -EINVAL;
- goto end;
- }
-
- /* Catch undefined behavior. */
- if (caa_unlikely(estack_ax_v < 0 || estack_ax_v >= 64)) {
- ret = -EINVAL;
- goto end;
- }
- res = ((uint64_t) estack_bx_v << (uint32_t) estack_ax_v);
- estack_pop(stack, top, ax, bx, ax_t, bx_t);
- estack_ax_v = res;
- estack_ax_t = REG_U64;
- next_pc += sizeof(struct binary_op);
- PO;
- }
- OP(BYTECODE_OP_BIT_AND):
- {
- int64_t res;
-
- if (!IS_INTEGER_REGISTER(estack_ax_t) || !IS_INTEGER_REGISTER(estack_bx_t)) {
- ret = -EINVAL;
- goto end;
- }
-
- res = ((uint64_t) estack_bx_v & (uint64_t) estack_ax_v);
- estack_pop(stack, top, ax, bx, ax_t, bx_t);
- estack_ax_v = res;
- estack_ax_t = REG_U64;
- next_pc += sizeof(struct binary_op);
- PO;
- }
- OP(BYTECODE_OP_BIT_OR):
- {
- int64_t res;
-
- if (!IS_INTEGER_REGISTER(estack_ax_t) || !IS_INTEGER_REGISTER(estack_bx_t)) {
- ret = -EINVAL;
- goto end;
- }
-
- res = ((uint64_t) estack_bx_v | (uint64_t) estack_ax_v);
- estack_pop(stack, top, ax, bx, ax_t, bx_t);
- estack_ax_v = res;
- estack_ax_t = REG_U64;
- next_pc += sizeof(struct binary_op);
- PO;
- }
- OP(BYTECODE_OP_BIT_XOR):
- {
- int64_t res;
-
- if (!IS_INTEGER_REGISTER(estack_ax_t) || !IS_INTEGER_REGISTER(estack_bx_t)) {
- ret = -EINVAL;
- goto end;
- }
-
- res = ((uint64_t) estack_bx_v ^ (uint64_t) estack_ax_v);
- estack_pop(stack, top, ax, bx, ax_t, bx_t);
- estack_ax_v = res;
- estack_ax_t = REG_U64;
- next_pc += sizeof(struct binary_op);
- PO;
- }
-
- /* unary */
- OP(BYTECODE_OP_UNARY_PLUS):
- {
- /* Dynamic typing. */
- switch (estack_ax_t) {
- case REG_S64: /* Fall-through. */
- case REG_U64:
- JUMP_TO(BYTECODE_OP_UNARY_PLUS_S64);
- case REG_DOUBLE:
- JUMP_TO(BYTECODE_OP_UNARY_PLUS_DOUBLE);
- case REG_STRING: /* Fall-through */
- case REG_STAR_GLOB_STRING:
- ret = -EINVAL;
- goto end;
- default:
- ERR("Unknown interpreter register type (%d)",
- (int) estack_ax_t);
- ret = -EINVAL;
- goto end;
- }
- }
- OP(BYTECODE_OP_UNARY_MINUS):
- {
- /* Dynamic typing. */
- switch (estack_ax_t) {
- case REG_S64: /* Fall-through. */
- case REG_U64:
- JUMP_TO(BYTECODE_OP_UNARY_MINUS_S64);
- case REG_DOUBLE:
- JUMP_TO(BYTECODE_OP_UNARY_MINUS_DOUBLE);
- case REG_STRING: /* Fall-through */
- case REG_STAR_GLOB_STRING:
- ret = -EINVAL;
- goto end;
- default:
- ERR("Unknown interpreter register type (%d)",
- (int) estack_ax_t);
- ret = -EINVAL;
- goto end;
- }
- }
- OP(BYTECODE_OP_UNARY_NOT):
- {
- /* Dynamic typing. */
- switch (estack_ax_t) {
- case REG_S64: /* Fall-through. */
- case REG_U64:
- JUMP_TO(BYTECODE_OP_UNARY_NOT_S64);
- case REG_DOUBLE:
- JUMP_TO(BYTECODE_OP_UNARY_NOT_DOUBLE);
- case REG_STRING: /* Fall-through */
- case REG_STAR_GLOB_STRING:
- ret = -EINVAL;
- goto end;
- default:
- ERR("Unknown interpreter register type (%d)",
- (int) estack_ax_t);
- ret = -EINVAL;
- goto end;
- }
- next_pc += sizeof(struct unary_op);
- PO;
- }
-
- OP(BYTECODE_OP_UNARY_BIT_NOT):
- {
- /* Dynamic typing. */
- if (!IS_INTEGER_REGISTER(estack_ax_t)) {
- ret = -EINVAL;
- goto end;
- }
-
- estack_ax_v = ~(uint64_t) estack_ax_v;
- estack_ax_t = REG_U64;
- next_pc += sizeof(struct unary_op);
- PO;
- }
-
- OP(BYTECODE_OP_UNARY_PLUS_S64):
- OP(BYTECODE_OP_UNARY_PLUS_DOUBLE):
- {
- next_pc += sizeof(struct unary_op);
- PO;
- }
- OP(BYTECODE_OP_UNARY_MINUS_S64):
- {
- estack_ax_v = -estack_ax_v;
- next_pc += sizeof(struct unary_op);
- PO;
- }
- OP(BYTECODE_OP_UNARY_MINUS_DOUBLE):
- {
- estack_ax(stack, top)->u.d = -estack_ax(stack, top)->u.d;
- next_pc += sizeof(struct unary_op);
- PO;
- }
- OP(BYTECODE_OP_UNARY_NOT_S64):
- {
- estack_ax_v = !estack_ax_v;
- estack_ax_t = REG_S64;
- next_pc += sizeof(struct unary_op);
- PO;
- }
- OP(BYTECODE_OP_UNARY_NOT_DOUBLE):
- {
- estack_ax_v = !estack_ax(stack, top)->u.d;
- estack_ax_t = REG_S64;
- next_pc += sizeof(struct unary_op);
- PO;
- }
-
- /* logical */
- OP(BYTECODE_OP_AND):
- {
- struct logical_op *insn = (struct logical_op *) pc;
-
- if (estack_ax_t != REG_S64 && estack_ax_t != REG_U64) {
- ret = -EINVAL;
- goto end;
- }
- /* If AX is 0, skip and evaluate to 0 */
- if (unlikely(estack_ax_v == 0)) {
- dbg_printf("Jumping to bytecode offset %u\n",
- (unsigned int) insn->skip_offset);
- next_pc = start_pc + insn->skip_offset;
- } else {
- /* Pop 1 when jump not taken */
- estack_pop(stack, top, ax, bx, ax_t, bx_t);
- next_pc += sizeof(struct logical_op);
- }
- PO;
- }
- OP(BYTECODE_OP_OR):
- {
- struct logical_op *insn = (struct logical_op *) pc;
-
- if (estack_ax_t != REG_S64 && estack_ax_t != REG_U64) {
- ret = -EINVAL;
- goto end;
- }
- /* If AX is nonzero, skip and evaluate to 1 */
- if (unlikely(estack_ax_v != 0)) {
- estack_ax_v = 1;
- dbg_printf("Jumping to bytecode offset %u\n",
- (unsigned int) insn->skip_offset);
- next_pc = start_pc + insn->skip_offset;
- } else {
- /* Pop 1 when jump not taken */
- estack_pop(stack, top, ax, bx, ax_t, bx_t);
- next_pc += sizeof(struct logical_op);
- }
- PO;
- }
-
-
- /* load field ref */
- OP(BYTECODE_OP_LOAD_FIELD_REF_STRING):
- {
- struct load_op *insn = (struct load_op *) pc;
- struct field_ref *ref = (struct field_ref *) insn->data;
-
- dbg_printf("load field ref offset %u type string\n",
- ref->offset);
- estack_push(stack, top, ax, bx, ax_t, bx_t);
- estack_ax(stack, top)->u.s.str =
- *(const char * const *) &interpreter_stack_data[ref->offset];
- if (unlikely(!estack_ax(stack, top)->u.s.str)) {
- dbg_printf("Interpreter warning: loading a NULL string.\n");
- ret = -EINVAL;
- goto end;
- }
- estack_ax(stack, top)->u.s.seq_len = SIZE_MAX;
- estack_ax(stack, top)->u.s.literal_type =
- ESTACK_STRING_LITERAL_TYPE_NONE;
- estack_ax_t = REG_STRING;
- dbg_printf("ref load string %s\n", estack_ax(stack, top)->u.s.str);
- next_pc += sizeof(struct load_op) + sizeof(struct field_ref);
- PO;
- }
-
- OP(BYTECODE_OP_LOAD_FIELD_REF_SEQUENCE):
- {
- struct load_op *insn = (struct load_op *) pc;
- struct field_ref *ref = (struct field_ref *) insn->data;
-
- dbg_printf("load field ref offset %u type sequence\n",
- ref->offset);
- estack_push(stack, top, ax, bx, ax_t, bx_t);
- estack_ax(stack, top)->u.s.seq_len =
- *(unsigned long *) &interpreter_stack_data[ref->offset];
- estack_ax(stack, top)->u.s.str =
- *(const char **) (&interpreter_stack_data[ref->offset
- + sizeof(unsigned long)]);
- estack_ax_t = REG_STRING;
- if (unlikely(!estack_ax(stack, top)->u.s.str)) {
- dbg_printf("Interpreter warning: loading a NULL sequence.\n");
- ret = -EINVAL;
- goto end;
- }
- estack_ax(stack, top)->u.s.literal_type =
- ESTACK_STRING_LITERAL_TYPE_NONE;
- next_pc += sizeof(struct load_op) + sizeof(struct field_ref);
- PO;
- }
-
- OP(BYTECODE_OP_LOAD_FIELD_REF_S64):
- {
- struct load_op *insn = (struct load_op *) pc;
- struct field_ref *ref = (struct field_ref *) insn->data;
-
- dbg_printf("load field ref offset %u type s64\n",
- ref->offset);
- estack_push(stack, top, ax, bx, ax_t, bx_t);
- estack_ax_v =
- ((struct literal_numeric *) &interpreter_stack_data[ref->offset])->v;
- estack_ax_t = REG_S64;
- dbg_printf("ref load s64 %" PRIi64 "\n", estack_ax_v);
- next_pc += sizeof(struct load_op) + sizeof(struct field_ref);
- PO;
- }
-
- OP(BYTECODE_OP_LOAD_FIELD_REF_DOUBLE):
- {
- struct load_op *insn = (struct load_op *) pc;
- struct field_ref *ref = (struct field_ref *) insn->data;
-
- dbg_printf("load field ref offset %u type double\n",
- ref->offset);
- estack_push(stack, top, ax, bx, ax_t, bx_t);
- memcpy(&estack_ax(stack, top)->u.d, &interpreter_stack_data[ref->offset],
- sizeof(struct literal_double));
- estack_ax_t = REG_DOUBLE;
- dbg_printf("ref load double %g\n", estack_ax(stack, top)->u.d);
- next_pc += sizeof(struct load_op) + sizeof(struct field_ref);
- PO;
- }
-
- /* load from immediate operand */
- OP(BYTECODE_OP_LOAD_STRING):
- {
- struct load_op *insn = (struct load_op *) pc;
-
- dbg_printf("load string %s\n", insn->data);
- estack_push(stack, top, ax, bx, ax_t, bx_t);
- estack_ax(stack, top)->u.s.str = insn->data;
- estack_ax(stack, top)->u.s.seq_len = SIZE_MAX;
- estack_ax(stack, top)->u.s.literal_type =
- ESTACK_STRING_LITERAL_TYPE_PLAIN;
- estack_ax_t = REG_STRING;
- next_pc += sizeof(struct load_op) + strlen(insn->data) + 1;
- PO;
- }
-
- OP(BYTECODE_OP_LOAD_STAR_GLOB_STRING):
- {
- struct load_op *insn = (struct load_op *) pc;
-
- dbg_printf("load globbing pattern %s\n", insn->data);
- estack_push(stack, top, ax, bx, ax_t, bx_t);
- estack_ax(stack, top)->u.s.str = insn->data;
- estack_ax(stack, top)->u.s.seq_len = SIZE_MAX;
- estack_ax(stack, top)->u.s.literal_type =
- ESTACK_STRING_LITERAL_TYPE_STAR_GLOB;
- estack_ax_t = REG_STAR_GLOB_STRING;
- next_pc += sizeof(struct load_op) + strlen(insn->data) + 1;
- PO;
- }
-
- OP(BYTECODE_OP_LOAD_S64):
- {
- struct load_op *insn = (struct load_op *) pc;
-
- estack_push(stack, top, ax, bx, ax_t, bx_t);
- estack_ax_v = ((struct literal_numeric *) insn->data)->v;
- estack_ax_t = REG_S64;
- dbg_printf("load s64 %" PRIi64 "\n", estack_ax_v);
- next_pc += sizeof(struct load_op)
- + sizeof(struct literal_numeric);
- PO;
- }
-
- OP(BYTECODE_OP_LOAD_DOUBLE):
- {
- struct load_op *insn = (struct load_op *) pc;
-
- estack_push(stack, top, ax, bx, ax_t, bx_t);
- memcpy(&estack_ax(stack, top)->u.d, insn->data,
- sizeof(struct literal_double));
- estack_ax_t = REG_DOUBLE;
- dbg_printf("load double %g\n", estack_ax(stack, top)->u.d);
- next_pc += sizeof(struct load_op)
- + sizeof(struct literal_double);
- PO;
- }
-
- /* cast */
- OP(BYTECODE_OP_CAST_TO_S64):
- {
- /* Dynamic typing. */
- switch (estack_ax_t) {
- case REG_S64:
- JUMP_TO(BYTECODE_OP_CAST_NOP);
- case REG_DOUBLE:
- JUMP_TO(BYTECODE_OP_CAST_DOUBLE_TO_S64);
- case REG_U64:
- estack_ax_t = REG_S64;
- next_pc += sizeof(struct cast_op); /* Fall-through */
- case REG_STRING: /* Fall-through */
- case REG_STAR_GLOB_STRING:
- ret = -EINVAL;
- goto end;
- default:
- ERR("Unknown interpreter register type (%d)",
- (int) estack_ax_t);
- ret = -EINVAL;
- goto end;
- }
- }
-
- OP(BYTECODE_OP_CAST_DOUBLE_TO_S64):
- {
- estack_ax_v = (int64_t) estack_ax(stack, top)->u.d;
- estack_ax_t = REG_S64;
- next_pc += sizeof(struct cast_op);
- PO;
- }
-
- OP(BYTECODE_OP_CAST_NOP):
- {
- next_pc += sizeof(struct cast_op);
- PO;
- }
-
- /* get context ref */
- OP(BYTECODE_OP_GET_CONTEXT_REF):
- {
- struct load_op *insn = (struct load_op *) pc;
- struct field_ref *ref = (struct field_ref *) insn->data;
- const struct lttng_ust_ctx_field *ctx_field;
- struct lttng_ust_ctx_value v;
-
- dbg_printf("get context ref offset %u type dynamic\n",
- ref->offset);
- ctx_field = &ctx->fields[ref->offset];
- ctx_field->get_value(ctx_field->priv, &v);
- estack_push(stack, top, ax, bx, ax_t, bx_t);
- switch (v.sel) {
- case LTTNG_UST_DYNAMIC_TYPE_NONE:
- ret = -EINVAL;
- goto end;
- case LTTNG_UST_DYNAMIC_TYPE_S64:
- estack_ax_v = v.u.s64;
- estack_ax_t = REG_S64;
- dbg_printf("ref get context dynamic s64 %" PRIi64 "\n", estack_ax_v);
- break;
- case LTTNG_UST_DYNAMIC_TYPE_DOUBLE:
- estack_ax(stack, top)->u.d = v.u.d;
- estack_ax_t = REG_DOUBLE;
- dbg_printf("ref get context dynamic double %g\n", estack_ax(stack, top)->u.d);
- break;
- case LTTNG_UST_DYNAMIC_TYPE_STRING:
- estack_ax(stack, top)->u.s.str = v.u.str;
- if (unlikely(!estack_ax(stack, top)->u.s.str)) {
- dbg_printf("Interpreter warning: loading a NULL string.\n");
- ret = -EINVAL;
- goto end;
- }
- estack_ax(stack, top)->u.s.seq_len = SIZE_MAX;
- estack_ax(stack, top)->u.s.literal_type =
- ESTACK_STRING_LITERAL_TYPE_NONE;
- dbg_printf("ref get context dynamic string %s\n", estack_ax(stack, top)->u.s.str);
- estack_ax_t = REG_STRING;
- break;
- default:
- dbg_printf("Interpreter warning: unknown dynamic type (%d).\n", (int) v.sel);
- ret = -EINVAL;
- goto end;
- }
- next_pc += sizeof(struct load_op) + sizeof(struct field_ref);
- PO;
- }
-
- OP(BYTECODE_OP_GET_CONTEXT_REF_STRING):
- {
- struct load_op *insn = (struct load_op *) pc;
- struct field_ref *ref = (struct field_ref *) insn->data;
- const struct lttng_ust_ctx_field *ctx_field;
- struct lttng_ust_ctx_value v;
-
- dbg_printf("get context ref offset %u type string\n",
- ref->offset);
- ctx_field = &ctx->fields[ref->offset];
- ctx_field->get_value(ctx_field->priv, &v);
- estack_push(stack, top, ax, bx, ax_t, bx_t);
- estack_ax(stack, top)->u.s.str = v.u.str;
- if (unlikely(!estack_ax(stack, top)->u.s.str)) {
- dbg_printf("Interpreter warning: loading a NULL string.\n");
- ret = -EINVAL;
- goto end;
- }
- estack_ax(stack, top)->u.s.seq_len = SIZE_MAX;
- estack_ax(stack, top)->u.s.literal_type =
- ESTACK_STRING_LITERAL_TYPE_NONE;
- estack_ax_t = REG_STRING;
- dbg_printf("ref get context string %s\n", estack_ax(stack, top)->u.s.str);
- next_pc += sizeof(struct load_op) + sizeof(struct field_ref);
- PO;
- }
-
- OP(BYTECODE_OP_GET_CONTEXT_REF_S64):
- {
- struct load_op *insn = (struct load_op *) pc;
- struct field_ref *ref = (struct field_ref *) insn->data;
- const struct lttng_ust_ctx_field *ctx_field;
- struct lttng_ust_ctx_value v;
-
- dbg_printf("get context ref offset %u type s64\n",
- ref->offset);
- ctx_field = &ctx->fields[ref->offset];
- ctx_field->get_value(ctx_field->priv, &v);
- estack_push(stack, top, ax, bx, ax_t, bx_t);
- estack_ax_v = v.u.s64;
- estack_ax_t = REG_S64;
- dbg_printf("ref get context s64 %" PRIi64 "\n", estack_ax_v);
- next_pc += sizeof(struct load_op) + sizeof(struct field_ref);
- PO;
- }
-
- OP(BYTECODE_OP_GET_CONTEXT_REF_DOUBLE):
- {
- struct load_op *insn = (struct load_op *) pc;
- struct field_ref *ref = (struct field_ref *) insn->data;
- const struct lttng_ust_ctx_field *ctx_field;
- struct lttng_ust_ctx_value v;
-
- dbg_printf("get context ref offset %u type double\n",
- ref->offset);
- ctx_field = &ctx->fields[ref->offset];
- ctx_field->get_value(ctx_field->priv, &v);
- estack_push(stack, top, ax, bx, ax_t, bx_t);
- memcpy(&estack_ax(stack, top)->u.d, &v.u.d, sizeof(struct literal_double));
- estack_ax_t = REG_DOUBLE;
- dbg_printf("ref get context double %g\n", estack_ax(stack, top)->u.d);
- next_pc += sizeof(struct load_op) + sizeof(struct field_ref);
- PO;
- }
-
- OP(BYTECODE_OP_GET_CONTEXT_ROOT):
- {
- dbg_printf("op get context root\n");
- estack_push(stack, top, ax, bx, ax_t, bx_t);
- estack_ax(stack, top)->u.ptr.type = LOAD_ROOT_CONTEXT;
- /* "field" only needed for variants. */
- estack_ax(stack, top)->u.ptr.field = NULL;
- estack_ax_t = REG_PTR;
- next_pc += sizeof(struct load_op);
- PO;
- }
-
- OP(BYTECODE_OP_GET_APP_CONTEXT_ROOT):
- {
- dbg_printf("op get app context root\n");
- estack_push(stack, top, ax, bx, ax_t, bx_t);
- estack_ax(stack, top)->u.ptr.type = LOAD_ROOT_APP_CONTEXT;
- /* "field" only needed for variants. */
- estack_ax(stack, top)->u.ptr.field = NULL;
- estack_ax_t = REG_PTR;
- next_pc += sizeof(struct load_op);
- PO;
- }
-
- OP(BYTECODE_OP_GET_PAYLOAD_ROOT):
- {
- dbg_printf("op get app payload root\n");
- estack_push(stack, top, ax, bx, ax_t, bx_t);
- estack_ax(stack, top)->u.ptr.type = LOAD_ROOT_PAYLOAD;
- estack_ax(stack, top)->u.ptr.ptr = interpreter_stack_data;
- /* "field" only needed for variants. */
- estack_ax(stack, top)->u.ptr.field = NULL;
- estack_ax_t = REG_PTR;
- next_pc += sizeof(struct load_op);
- PO;
- }
-
- OP(BYTECODE_OP_GET_SYMBOL):
- {
- dbg_printf("op get symbol\n");
- switch (estack_ax(stack, top)->u.ptr.type) {
- case LOAD_OBJECT:
- ERR("Nested fields not implemented yet.");
- ret = -EINVAL;
- goto end;
- case LOAD_ROOT_CONTEXT:
- case LOAD_ROOT_APP_CONTEXT:
- case LOAD_ROOT_PAYLOAD:
- /*
- * symbol lookup is performed by
- * specialization.
- */
- ret = -EINVAL;
- goto end;
- }
- next_pc += sizeof(struct load_op) + sizeof(struct get_symbol);
- PO;
- }
-
- OP(BYTECODE_OP_GET_SYMBOL_FIELD):
- {
- /*
- * Used for first variant encountered in a
- * traversal. Variants are not implemented yet.
- */
- ret = -EINVAL;
- goto end;
- }
-
- OP(BYTECODE_OP_GET_INDEX_U16):
- {
- struct load_op *insn = (struct load_op *) pc;
- struct get_index_u16 *index = (struct get_index_u16 *) insn->data;
-
- dbg_printf("op get index u16\n");
- ret = dynamic_get_index(ctx, bytecode, index->index, estack_ax(stack, top));
- if (ret)
- goto end;
- estack_ax_v = estack_ax(stack, top)->u.v;
- estack_ax_t = estack_ax(stack, top)->type;
- next_pc += sizeof(struct load_op) + sizeof(struct get_index_u16);
- PO;
- }
-
- OP(BYTECODE_OP_GET_INDEX_U64):
- {
- struct load_op *insn = (struct load_op *) pc;
- struct get_index_u64 *index = (struct get_index_u64 *) insn->data;
-
- dbg_printf("op get index u64\n");
- ret = dynamic_get_index(ctx, bytecode, index->index, estack_ax(stack, top));
- if (ret)
- goto end;
- estack_ax_v = estack_ax(stack, top)->u.v;
- estack_ax_t = estack_ax(stack, top)->type;
- next_pc += sizeof(struct load_op) + sizeof(struct get_index_u64);
- PO;
- }
-
- OP(BYTECODE_OP_LOAD_FIELD):
- {
- dbg_printf("op load field\n");
- ret = dynamic_load_field(estack_ax(stack, top));
- if (ret)
- goto end;
- estack_ax_v = estack_ax(stack, top)->u.v;
- estack_ax_t = estack_ax(stack, top)->type;
- next_pc += sizeof(struct load_op);
- PO;
- }
-
- OP(BYTECODE_OP_LOAD_FIELD_S8):
- {
- dbg_printf("op load field s8\n");
-
- estack_ax_v = *(int8_t *) estack_ax(stack, top)->u.ptr.ptr;
- estack_ax_t = REG_S64;
- next_pc += sizeof(struct load_op);
- PO;
- }
- OP(BYTECODE_OP_LOAD_FIELD_S16):
- {
- dbg_printf("op load field s16\n");
-
- estack_ax_v = *(int16_t *) estack_ax(stack, top)->u.ptr.ptr;
- estack_ax_t = REG_S64;
- next_pc += sizeof(struct load_op);
- PO;
- }
- OP(BYTECODE_OP_LOAD_FIELD_S32):
- {
- dbg_printf("op load field s32\n");
-
- estack_ax_v = *(int32_t *) estack_ax(stack, top)->u.ptr.ptr;
- estack_ax_t = REG_S64;
- next_pc += sizeof(struct load_op);
- PO;
- }
- OP(BYTECODE_OP_LOAD_FIELD_S64):
- {
- dbg_printf("op load field s64\n");
-
- estack_ax_v = *(int64_t *) estack_ax(stack, top)->u.ptr.ptr;
- estack_ax_t = REG_S64;
- next_pc += sizeof(struct load_op);
- PO;
- }
- OP(BYTECODE_OP_LOAD_FIELD_U8):
- {
- dbg_printf("op load field u8\n");
-
- estack_ax_v = *(uint8_t *) estack_ax(stack, top)->u.ptr.ptr;
- estack_ax_t = REG_U64;
- next_pc += sizeof(struct load_op);
- PO;
- }
- OP(BYTECODE_OP_LOAD_FIELD_U16):
- {
- dbg_printf("op load field u16\n");
-
- estack_ax_v = *(uint16_t *) estack_ax(stack, top)->u.ptr.ptr;
- estack_ax_t = REG_U64;
- next_pc += sizeof(struct load_op);
- PO;
- }
- OP(BYTECODE_OP_LOAD_FIELD_U32):
- {
- dbg_printf("op load field u32\n");
-
- estack_ax_v = *(uint32_t *) estack_ax(stack, top)->u.ptr.ptr;
- estack_ax_t = REG_U64;
- next_pc += sizeof(struct load_op);
- PO;
- }
- OP(BYTECODE_OP_LOAD_FIELD_U64):
- {
- dbg_printf("op load field u64\n");
-
- estack_ax_v = *(uint64_t *) estack_ax(stack, top)->u.ptr.ptr;
- estack_ax_t = REG_U64;
- next_pc += sizeof(struct load_op);
- PO;
- }
- OP(BYTECODE_OP_LOAD_FIELD_DOUBLE):
- {
- dbg_printf("op load field double\n");
-
- memcpy(&estack_ax(stack, top)->u.d,
- estack_ax(stack, top)->u.ptr.ptr,
- sizeof(struct literal_double));
- estack_ax(stack, top)->type = REG_DOUBLE;
- next_pc += sizeof(struct load_op);
- PO;
- }
-
- OP(BYTECODE_OP_LOAD_FIELD_STRING):
- {
- const char *str;
-
- dbg_printf("op load field string\n");
- str = (const char *) estack_ax(stack, top)->u.ptr.ptr;
- estack_ax(stack, top)->u.s.str = str;
- if (unlikely(!estack_ax(stack, top)->u.s.str)) {
- dbg_printf("Interpreter warning: loading a NULL string.\n");
- ret = -EINVAL;
- goto end;
- }
- estack_ax(stack, top)->u.s.seq_len = SIZE_MAX;
- estack_ax(stack, top)->u.s.literal_type =
- ESTACK_STRING_LITERAL_TYPE_NONE;
- estack_ax(stack, top)->type = REG_STRING;
- next_pc += sizeof(struct load_op);
- PO;
- }
-
- OP(BYTECODE_OP_LOAD_FIELD_SEQUENCE):
- {
- const char *ptr;
-
- dbg_printf("op load field string sequence\n");
- ptr = estack_ax(stack, top)->u.ptr.ptr;
- estack_ax(stack, top)->u.s.seq_len = *(unsigned long *) ptr;
- estack_ax(stack, top)->u.s.str = *(const char **) (ptr + sizeof(unsigned long));
- estack_ax(stack, top)->type = REG_STRING;
- if (unlikely(!estack_ax(stack, top)->u.s.str)) {
- dbg_printf("Interpreter warning: loading a NULL sequence.\n");
- ret = -EINVAL;
- goto end;
- }
- estack_ax(stack, top)->u.s.literal_type =
- ESTACK_STRING_LITERAL_TYPE_NONE;
- next_pc += sizeof(struct load_op);
- PO;
- }
-
- END_OP
-end:
- /* No need to prepare output if an error occurred. */
- if (ret)
- return LTTNG_UST_BYTECODE_INTERPRETER_ERROR;
-
- /* Prepare output. */
- switch (ust_bytecode->type) {
- case LTTNG_UST_BYTECODE_TYPE_FILTER:
- {
- struct lttng_ust_bytecode_filter_ctx *filter_ctx =
- (struct lttng_ust_bytecode_filter_ctx *) caller_ctx;
- if (retval)
- filter_ctx->result = LTTNG_UST_BYTECODE_FILTER_ACCEPT;
- else
- filter_ctx->result = LTTNG_UST_BYTECODE_FILTER_REJECT;
- break;
- }
- case LTTNG_UST_BYTECODE_TYPE_CAPTURE:
- ret = lttng_bytecode_interpret_format_output(estack_ax(stack, top),
- (struct lttng_interpreter_output *) caller_ctx);
- break;
- default:
- ret = -EINVAL;
- break;
- }
- if (ret)
- return LTTNG_UST_BYTECODE_INTERPRETER_ERROR;
- else
- return LTTNG_UST_BYTECODE_INTERPRETER_OK;
-}
-
-/*
- * Return LTTNG_UST_EVENT_FILTER_ACCEPT or LTTNG_UST_EVENT_FILTER_REJECT.
- */
-int lttng_ust_interpret_event_filter(struct lttng_ust_event_common *event,
- const char *interpreter_stack_data,
- void *event_filter_ctx __attribute__((unused)))
-{
- struct lttng_ust_bytecode_runtime *filter_bc_runtime;
- struct cds_list_head *filter_bytecode_runtime_head = &event->priv->filter_bytecode_runtime_head;
- struct lttng_ust_bytecode_filter_ctx bytecode_filter_ctx;
- bool filter_record = false;
-
- cds_list_for_each_entry_rcu(filter_bc_runtime, filter_bytecode_runtime_head, node) {
- if (caa_likely(filter_bc_runtime->interpreter_func(filter_bc_runtime,
- interpreter_stack_data, &bytecode_filter_ctx) == LTTNG_UST_BYTECODE_INTERPRETER_OK)) {
- if (caa_unlikely(bytecode_filter_ctx.result == LTTNG_UST_BYTECODE_FILTER_ACCEPT)) {
- filter_record = true;
- break;
- }
- }
- }
- if (filter_record)
- return LTTNG_UST_EVENT_FILTER_ACCEPT;
- else
- return LTTNG_UST_EVENT_FILTER_REJECT;
-}
-
-#undef START_OP
-#undef OP
-#undef PO
-#undef END_OP
+++ /dev/null
-/*
- * SPDX-License-Identifier: MIT
- *
- * Copyright (C) 2010-2016 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
- *
- * LTTng UST bytecode specializer.
- */
-
-#define _LGPL_SOURCE
-#include <limits.h>
-#include <stddef.h>
-#include <stdint.h>
-
-#include <lttng/ust-utils.h>
-
-#include "context-internal.h"
-#include "lttng-bytecode.h"
-#include "ust-events-internal.h"
-#include "common/macros.h"
-
-static int lttng_fls(int val)
-{
- int r = 32;
- unsigned int x = (unsigned int) val;
-
- if (!x)
- return 0;
- if (!(x & 0xFFFF0000U)) {
- x <<= 16;
- r -= 16;
- }
- if (!(x & 0xFF000000U)) {
- x <<= 8;
- r -= 8;
- }
- if (!(x & 0xF0000000U)) {
- x <<= 4;
- r -= 4;
- }
- if (!(x & 0xC0000000U)) {
- x <<= 2;
- r -= 2;
- }
- if (!(x & 0x80000000U)) {
- r -= 1;
- }
- return r;
-}
-
-static int get_count_order(unsigned int count)
-{
- int order;
-
- order = lttng_fls(count) - 1;
- if (count & (count - 1))
- order++;
- return order;
-}
-
-static ssize_t bytecode_reserve_data(struct bytecode_runtime *runtime,
- size_t align, size_t len)
-{
- ssize_t ret;
- size_t padding = lttng_ust_offset_align(runtime->data_len, align);
- size_t new_len = runtime->data_len + padding + len;
- size_t new_alloc_len = new_len;
- size_t old_alloc_len = runtime->data_alloc_len;
-
- if (new_len > BYTECODE_MAX_DATA_LEN)
- return -EINVAL;
-
- if (new_alloc_len > old_alloc_len) {
- char *newptr;
-
- new_alloc_len =
- max_t(size_t, 1U << get_count_order(new_alloc_len), old_alloc_len << 1);
- newptr = realloc(runtime->data, new_alloc_len);
- if (!newptr)
- return -ENOMEM;
- runtime->data = newptr;
- /* We zero directly the memory from start of allocation. */
- memset(&runtime->data[old_alloc_len], 0, new_alloc_len - old_alloc_len);
- runtime->data_alloc_len = new_alloc_len;
- }
- runtime->data_len += padding;
- ret = runtime->data_len;
- runtime->data_len += len;
- return ret;
-}
-
-static ssize_t bytecode_push_data(struct bytecode_runtime *runtime,
- const void *p, size_t align, size_t len)
-{
- ssize_t offset;
-
- offset = bytecode_reserve_data(runtime, align, len);
- if (offset < 0)
- return -ENOMEM;
- memcpy(&runtime->data[offset], p, len);
- return offset;
-}
-
-static int specialize_load_field(struct vstack_entry *stack_top,
- struct load_op *insn)
-{
- int ret;
-
- switch (stack_top->load.type) {
- case LOAD_OBJECT:
- break;
- case LOAD_ROOT_CONTEXT:
- case LOAD_ROOT_APP_CONTEXT:
- case LOAD_ROOT_PAYLOAD:
- default:
- dbg_printf("Bytecode warning: cannot load root, missing field name.\n");
- ret = -EINVAL;
- goto end;
- }
- switch (stack_top->load.object_type) {
- case OBJECT_TYPE_S8:
- dbg_printf("op load field s8\n");
- stack_top->type = REG_S64;
- if (!stack_top->load.rev_bo)
- insn->op = BYTECODE_OP_LOAD_FIELD_S8;
- break;
- case OBJECT_TYPE_S16:
- dbg_printf("op load field s16\n");
- stack_top->type = REG_S64;
- if (!stack_top->load.rev_bo)
- insn->op = BYTECODE_OP_LOAD_FIELD_S16;
- break;
- case OBJECT_TYPE_S32:
- dbg_printf("op load field s32\n");
- stack_top->type = REG_S64;
- if (!stack_top->load.rev_bo)
- insn->op = BYTECODE_OP_LOAD_FIELD_S32;
- break;
- case OBJECT_TYPE_S64:
- dbg_printf("op load field s64\n");
- stack_top->type = REG_S64;
- if (!stack_top->load.rev_bo)
- insn->op = BYTECODE_OP_LOAD_FIELD_S64;
- break;
- case OBJECT_TYPE_SIGNED_ENUM:
- dbg_printf("op load field signed enumeration\n");
- stack_top->type = REG_PTR;
- break;
- case OBJECT_TYPE_U8:
- dbg_printf("op load field u8\n");
- stack_top->type = REG_U64;
- insn->op = BYTECODE_OP_LOAD_FIELD_U8;
- break;
- case OBJECT_TYPE_U16:
- dbg_printf("op load field u16\n");
- stack_top->type = REG_U64;
- if (!stack_top->load.rev_bo)
- insn->op = BYTECODE_OP_LOAD_FIELD_U16;
- break;
- case OBJECT_TYPE_U32:
- dbg_printf("op load field u32\n");
- stack_top->type = REG_U64;
- if (!stack_top->load.rev_bo)
- insn->op = BYTECODE_OP_LOAD_FIELD_U32;
- break;
- case OBJECT_TYPE_U64:
- dbg_printf("op load field u64\n");
- stack_top->type = REG_U64;
- if (!stack_top->load.rev_bo)
- insn->op = BYTECODE_OP_LOAD_FIELD_U64;
- break;
- case OBJECT_TYPE_UNSIGNED_ENUM:
- dbg_printf("op load field unsigned enumeration\n");
- stack_top->type = REG_PTR;
- break;
- case OBJECT_TYPE_DOUBLE:
- stack_top->type = REG_DOUBLE;
- insn->op = BYTECODE_OP_LOAD_FIELD_DOUBLE;
- break;
- case OBJECT_TYPE_STRING:
- dbg_printf("op load field string\n");
- stack_top->type = REG_STRING;
- insn->op = BYTECODE_OP_LOAD_FIELD_STRING;
- break;
- case OBJECT_TYPE_STRING_SEQUENCE:
- dbg_printf("op load field string sequence\n");
- stack_top->type = REG_STRING;
- insn->op = BYTECODE_OP_LOAD_FIELD_SEQUENCE;
- break;
- case OBJECT_TYPE_DYNAMIC:
- dbg_printf("op load field dynamic\n");
- stack_top->type = REG_UNKNOWN;
- /* Don't specialize load op. */
- break;
- case OBJECT_TYPE_SEQUENCE:
- case OBJECT_TYPE_ARRAY:
- case OBJECT_TYPE_STRUCT:
- case OBJECT_TYPE_VARIANT:
- ERR("Sequences, arrays, struct and variant cannot be loaded (nested types).");
- ret = -EINVAL;
- goto end;
- }
- return 0;
-
-end:
- return ret;
-}
-
-static int specialize_get_index_object_type(enum object_type *otype,
- int signedness, uint32_t elem_len)
-{
- switch (elem_len) {
- case 8:
- if (signedness)
- *otype = OBJECT_TYPE_S8;
- else
- *otype = OBJECT_TYPE_U8;
- break;
- case 16:
- if (signedness)
- *otype = OBJECT_TYPE_S16;
- else
- *otype = OBJECT_TYPE_U16;
- break;
- case 32:
- if (signedness)
- *otype = OBJECT_TYPE_S32;
- else
- *otype = OBJECT_TYPE_U32;
- break;
- case 64:
- if (signedness)
- *otype = OBJECT_TYPE_S64;
- else
- *otype = OBJECT_TYPE_U64;
- break;
- default:
- return -EINVAL;
- }
- return 0;
-}
-
-static int specialize_get_index(struct bytecode_runtime *runtime,
- struct load_op *insn, uint64_t index,
- struct vstack_entry *stack_top,
- int idx_len)
-{
- int ret;
- struct bytecode_get_index_data gid;
- ssize_t data_offset;
-
- memset(&gid, 0, sizeof(gid));
- switch (stack_top->load.type) {
- case LOAD_OBJECT:
- switch (stack_top->load.object_type) {
- case OBJECT_TYPE_ARRAY:
- {
- const struct lttng_ust_type_integer *integer_type;
- const struct lttng_ust_event_field *field;
- uint32_t elem_len, num_elems;
- int signedness;
-
- field = stack_top->load.field;
- switch (field->type->type) {
- case lttng_ust_type_array:
- if (lttng_ust_get_type_array(field->type)->elem_type->type != lttng_ust_type_integer) {
- ret = -EINVAL;
- goto end;
- }
- integer_type = lttng_ust_get_type_integer(lttng_ust_get_type_array(field->type)->elem_type);
- num_elems = lttng_ust_get_type_array(field->type)->length;
- break;
- default:
- ret = -EINVAL;
- goto end;
- }
- elem_len = integer_type->size;
- signedness = integer_type->signedness;
- if (index >= num_elems) {
- ret = -EINVAL;
- goto end;
- }
- ret = specialize_get_index_object_type(&stack_top->load.object_type,
- signedness, elem_len);
- if (ret)
- goto end;
- gid.offset = index * (elem_len / CHAR_BIT);
- gid.array_len = num_elems * (elem_len / CHAR_BIT);
- gid.elem.type = stack_top->load.object_type;
- gid.elem.len = elem_len;
- if (integer_type->reverse_byte_order)
- gid.elem.rev_bo = true;
- stack_top->load.rev_bo = gid.elem.rev_bo;
- break;
- }
- case OBJECT_TYPE_SEQUENCE:
- {
- const struct lttng_ust_type_integer *integer_type;
- const struct lttng_ust_event_field *field;
- uint32_t elem_len;
- int signedness;
-
- field = stack_top->load.field;
- switch (field->type->type) {
- case lttng_ust_type_sequence:
- if (lttng_ust_get_type_sequence(field->type)->elem_type->type != lttng_ust_type_integer) {
- ret = -EINVAL;
- goto end;
- }
- integer_type = lttng_ust_get_type_integer(lttng_ust_get_type_sequence(field->type)->elem_type);
- break;
- default:
- ret = -EINVAL;
- goto end;
- }
- elem_len = integer_type->size;
- signedness = integer_type->signedness;
- ret = specialize_get_index_object_type(&stack_top->load.object_type,
- signedness, elem_len);
- if (ret)
- goto end;
- gid.offset = index * (elem_len / CHAR_BIT);
- gid.elem.type = stack_top->load.object_type;
- gid.elem.len = elem_len;
- if (integer_type->reverse_byte_order)
- gid.elem.rev_bo = true;
- stack_top->load.rev_bo = gid.elem.rev_bo;
- break;
- }
- case OBJECT_TYPE_STRUCT:
- /* Only generated by the specialize phase. */
- case OBJECT_TYPE_VARIANT: /* Fall-through */
- default:
- ERR("Unexpected get index type %d",
- (int) stack_top->load.object_type);
- ret = -EINVAL;
- goto end;
- }
- break;
- case LOAD_ROOT_CONTEXT:
- case LOAD_ROOT_APP_CONTEXT:
- case LOAD_ROOT_PAYLOAD:
- ERR("Index lookup for root field not implemented yet.");
- ret = -EINVAL;
- goto end;
- }
- data_offset = bytecode_push_data(runtime, &gid,
- __alignof__(gid), sizeof(gid));
- if (data_offset < 0) {
- ret = -EINVAL;
- goto end;
- }
- switch (idx_len) {
- case 2:
- ((struct get_index_u16 *) insn->data)->index = data_offset;
- break;
- case 8:
- ((struct get_index_u64 *) insn->data)->index = data_offset;
- break;
- default:
- ret = -EINVAL;
- goto end;
- }
-
- return 0;
-
-end:
- return ret;
-}
-
-static int specialize_context_lookup_name(struct lttng_ust_ctx *ctx,
- struct bytecode_runtime *bytecode,
- struct load_op *insn)
-{
- uint16_t offset;
- const char *name;
-
- offset = ((struct get_symbol *) insn->data)->offset;
- name = bytecode->p.bc->bc.data + bytecode->p.bc->bc.reloc_offset + offset;
- return lttng_get_context_index(ctx, name);
-}
-
-static int specialize_load_object(const struct lttng_ust_event_field *field,
- struct vstack_load *load, bool is_context)
-{
- load->type = LOAD_OBJECT;
-
- switch (field->type->type) {
- case lttng_ust_type_integer:
- if (lttng_ust_get_type_integer(field->type)->signedness)
- load->object_type = OBJECT_TYPE_S64;
- else
- load->object_type = OBJECT_TYPE_U64;
- load->rev_bo = false;
- break;
- case lttng_ust_type_enum:
- {
- const struct lttng_ust_type_integer *itype;
-
- itype = lttng_ust_get_type_integer(lttng_ust_get_type_enum(field->type)->container_type);
- if (itype->signedness)
- load->object_type = OBJECT_TYPE_SIGNED_ENUM;
- else
- load->object_type = OBJECT_TYPE_UNSIGNED_ENUM;
- load->rev_bo = false;
- break;
- }
- case lttng_ust_type_array:
- if (lttng_ust_get_type_array(field->type)->elem_type->type != lttng_ust_type_integer) {
- ERR("Array nesting only supports integer types.");
- return -EINVAL;
- }
- if (is_context) {
- load->object_type = OBJECT_TYPE_STRING;
- } else {
- if (lttng_ust_get_type_array(field->type)->encoding == lttng_ust_string_encoding_none) {
- load->object_type = OBJECT_TYPE_ARRAY;
- load->field = field;
- } else {
- load->object_type = OBJECT_TYPE_STRING_SEQUENCE;
- }
- }
- break;
- case lttng_ust_type_sequence:
- if (lttng_ust_get_type_sequence(field->type)->elem_type->type != lttng_ust_type_integer) {
- ERR("Sequence nesting only supports integer types.");
- return -EINVAL;
- }
- if (is_context) {
- load->object_type = OBJECT_TYPE_STRING;
- } else {
- if (lttng_ust_get_type_sequence(field->type)->encoding == lttng_ust_string_encoding_none) {
- load->object_type = OBJECT_TYPE_SEQUENCE;
- load->field = field;
- } else {
- load->object_type = OBJECT_TYPE_STRING_SEQUENCE;
- }
- }
- break;
-
- case lttng_ust_type_string:
- load->object_type = OBJECT_TYPE_STRING;
- break;
- case lttng_ust_type_float:
- load->object_type = OBJECT_TYPE_DOUBLE;
- break;
- case lttng_ust_type_dynamic:
- load->object_type = OBJECT_TYPE_DYNAMIC;
- break;
- default:
- ERR("Unknown type: %d", (int) field->type->type);
- return -EINVAL;
- }
- return 0;
-}
-
-static int specialize_context_lookup(struct lttng_ust_ctx *ctx,
- struct bytecode_runtime *runtime,
- struct load_op *insn,
- struct vstack_load *load)
-{
- int idx, ret;
- const struct lttng_ust_ctx_field *ctx_field;
- const struct lttng_ust_event_field *field;
- struct bytecode_get_index_data gid;
- ssize_t data_offset;
-
- idx = specialize_context_lookup_name(ctx, runtime, insn);
- if (idx < 0) {
- return -ENOENT;
- }
- ctx_field = &ctx->fields[idx];
- field = ctx_field->event_field;
- ret = specialize_load_object(field, load, true);
- if (ret)
- return ret;
- /* Specialize each get_symbol into a get_index. */
- insn->op = BYTECODE_OP_GET_INDEX_U16;
- memset(&gid, 0, sizeof(gid));
- gid.ctx_index = idx;
- gid.elem.type = load->object_type;
- gid.elem.rev_bo = load->rev_bo;
- gid.field = field;
- data_offset = bytecode_push_data(runtime, &gid,
- __alignof__(gid), sizeof(gid));
- if (data_offset < 0) {
- return -EINVAL;
- }
- ((struct get_index_u16 *) insn->data)->index = data_offset;
- return 0;
-}
-
-static int specialize_app_context_lookup(struct lttng_ust_ctx **pctx,
- struct bytecode_runtime *runtime,
- struct load_op *insn,
- struct vstack_load *load)
-{
- uint16_t offset;
- const char *orig_name;
- char *name = NULL;
- int idx, ret;
- const struct lttng_ust_ctx_field *ctx_field;
- const struct lttng_ust_event_field *field;
- struct bytecode_get_index_data gid;
- ssize_t data_offset;
-
- offset = ((struct get_symbol *) insn->data)->offset;
- orig_name = runtime->p.bc->bc.data + runtime->p.bc->bc.reloc_offset + offset;
- name = zmalloc(strlen(orig_name) + strlen("$app.") + 1);
- if (!name) {
- ret = -ENOMEM;
- goto end;
- }
- strcpy(name, "$app.");
- strcat(name, orig_name);
- idx = lttng_get_context_index(*pctx, name);
- if (idx < 0) {
- assert(lttng_context_is_app(name));
- ret = lttng_ust_add_app_context_to_ctx_rcu(name,
- pctx);
- if (ret)
- return ret;
- idx = lttng_get_context_index(*pctx, name);
- if (idx < 0)
- return -ENOENT;
- }
- ctx_field = &(*pctx)->fields[idx];
- field = ctx_field->event_field;
- ret = specialize_load_object(field, load, true);
- if (ret)
- goto end;
- /* Specialize each get_symbol into a get_index. */
- insn->op = BYTECODE_OP_GET_INDEX_U16;
- memset(&gid, 0, sizeof(gid));
- gid.ctx_index = idx;
- gid.elem.type = load->object_type;
- gid.elem.rev_bo = load->rev_bo;
- gid.field = field;
- data_offset = bytecode_push_data(runtime, &gid,
- __alignof__(gid), sizeof(gid));
- if (data_offset < 0) {
- ret = -EINVAL;
- goto end;
- }
- ((struct get_index_u16 *) insn->data)->index = data_offset;
- ret = 0;
-end:
- free(name);
- return ret;
-}
-
-static int specialize_payload_lookup(const struct lttng_ust_event_desc *event_desc,
- struct bytecode_runtime *runtime,
- struct load_op *insn,
- struct vstack_load *load)
-{
- const char *name;
- uint16_t offset;
- unsigned int i, nr_fields;
- bool found = false;
- uint32_t field_offset = 0;
- const struct lttng_ust_event_field *field;
- int ret;
- struct bytecode_get_index_data gid;
- ssize_t data_offset;
-
- nr_fields = event_desc->nr_fields;
- offset = ((struct get_symbol *) insn->data)->offset;
- name = runtime->p.bc->bc.data + runtime->p.bc->bc.reloc_offset + offset;
- for (i = 0; i < nr_fields; i++) {
- field = event_desc->fields[i];
- if (field->nofilter) {
- continue;
- }
- if (!strcmp(field->name, name)) {
- found = true;
- break;
- }
- /* compute field offset on stack */
- switch (field->type->type) {
- case lttng_ust_type_integer:
- case lttng_ust_type_enum:
- field_offset += sizeof(int64_t);
- break;
- case lttng_ust_type_array:
- case lttng_ust_type_sequence:
- field_offset += sizeof(unsigned long);
- field_offset += sizeof(void *);
- break;
- case lttng_ust_type_string:
- field_offset += sizeof(void *);
- break;
- case lttng_ust_type_float:
- field_offset += sizeof(double);
- break;
- default:
- ret = -EINVAL;
- goto end;
- }
- }
- if (!found) {
- ret = -EINVAL;
- goto end;
- }
-
- ret = specialize_load_object(field, load, false);
- if (ret)
- goto end;
-
- /* Specialize each get_symbol into a get_index. */
- insn->op = BYTECODE_OP_GET_INDEX_U16;
- memset(&gid, 0, sizeof(gid));
- gid.offset = field_offset;
- gid.elem.type = load->object_type;
- gid.elem.rev_bo = load->rev_bo;
- gid.field = field;
- data_offset = bytecode_push_data(runtime, &gid,
- __alignof__(gid), sizeof(gid));
- if (data_offset < 0) {
- ret = -EINVAL;
- goto end;
- }
- ((struct get_index_u16 *) insn->data)->index = data_offset;
- ret = 0;
-end:
- return ret;
-}
-
-int lttng_bytecode_specialize(const struct lttng_ust_event_desc *event_desc,
- struct bytecode_runtime *bytecode)
-{
- void *pc, *next_pc, *start_pc;
- int ret = -EINVAL;
- struct vstack _stack;
- struct vstack *stack = &_stack;
- struct lttng_ust_ctx **pctx = bytecode->p.pctx;
-
- vstack_init(stack);
-
- start_pc = &bytecode->code[0];
- for (pc = next_pc = start_pc; pc - start_pc < bytecode->len;
- pc = next_pc) {
- switch (*(bytecode_opcode_t *) pc) {
- case BYTECODE_OP_UNKNOWN:
- default:
- ERR("unknown bytecode op %u\n",
- (unsigned int) *(bytecode_opcode_t *) pc);
- ret = -EINVAL;
- goto end;
-
- case BYTECODE_OP_RETURN:
- if (vstack_ax(stack)->type == REG_S64 ||
- vstack_ax(stack)->type == REG_U64)
- *(bytecode_opcode_t *) pc = BYTECODE_OP_RETURN_S64;
- ret = 0;
- goto end;
-
- case BYTECODE_OP_RETURN_S64:
- if (vstack_ax(stack)->type != REG_S64 &&
- vstack_ax(stack)->type != REG_U64) {
- ERR("Unexpected register type\n");
- ret = -EINVAL;
- goto end;
- }
- ret = 0;
- goto end;
-
- /* binary */
- case BYTECODE_OP_MUL:
- case BYTECODE_OP_DIV:
- case BYTECODE_OP_MOD:
- case BYTECODE_OP_PLUS:
- case BYTECODE_OP_MINUS:
- ERR("unsupported bytecode op %u\n",
- (unsigned int) *(bytecode_opcode_t *) pc);
- ret = -EINVAL;
- goto end;
-
- case BYTECODE_OP_EQ:
- {
- struct binary_op *insn = (struct binary_op *) pc;
-
- switch(vstack_ax(stack)->type) {
- default:
- ERR("unknown register type\n");
- ret = -EINVAL;
- goto end;
-
- case REG_STRING:
- if (vstack_bx(stack)->type == REG_UNKNOWN)
- break;
- if (vstack_bx(stack)->type == REG_STAR_GLOB_STRING)
- insn->op = BYTECODE_OP_EQ_STAR_GLOB_STRING;
- else
- insn->op = BYTECODE_OP_EQ_STRING;
- break;
- case REG_STAR_GLOB_STRING:
- if (vstack_bx(stack)->type == REG_UNKNOWN)
- break;
- insn->op = BYTECODE_OP_EQ_STAR_GLOB_STRING;
- break;
- case REG_S64:
- case REG_U64:
- if (vstack_bx(stack)->type == REG_UNKNOWN)
- break;
- if (vstack_bx(stack)->type == REG_S64 ||
- vstack_bx(stack)->type == REG_U64)
- insn->op = BYTECODE_OP_EQ_S64;
- else
- insn->op = BYTECODE_OP_EQ_DOUBLE_S64;
- break;
- case REG_DOUBLE:
- if (vstack_bx(stack)->type == REG_UNKNOWN)
- break;
- if (vstack_bx(stack)->type == REG_S64 ||
- vstack_bx(stack)->type == REG_U64)
- insn->op = BYTECODE_OP_EQ_S64_DOUBLE;
- else
- insn->op = BYTECODE_OP_EQ_DOUBLE;
- break;
- case REG_UNKNOWN:
- break; /* Dynamic typing. */
- }
- /* Pop 2, push 1 */
- if (vstack_pop(stack)) {
- ret = -EINVAL;
- goto end;
- }
- vstack_ax(stack)->type = REG_S64;
- next_pc += sizeof(struct binary_op);
- break;
- }
-
- case BYTECODE_OP_NE:
- {
- struct binary_op *insn = (struct binary_op *) pc;
-
- switch(vstack_ax(stack)->type) {
- default:
- ERR("unknown register type\n");
- ret = -EINVAL;
- goto end;
-
- case REG_STRING:
- if (vstack_bx(stack)->type == REG_UNKNOWN)
- break;
- if (vstack_bx(stack)->type == REG_STAR_GLOB_STRING)
- insn->op = BYTECODE_OP_NE_STAR_GLOB_STRING;
- else
- insn->op = BYTECODE_OP_NE_STRING;
- break;
- case REG_STAR_GLOB_STRING:
- if (vstack_bx(stack)->type == REG_UNKNOWN)
- break;
- insn->op = BYTECODE_OP_NE_STAR_GLOB_STRING;
- break;
- case REG_S64:
- case REG_U64:
- if (vstack_bx(stack)->type == REG_UNKNOWN)
- break;
- if (vstack_bx(stack)->type == REG_S64 ||
- vstack_bx(stack)->type == REG_U64)
- insn->op = BYTECODE_OP_NE_S64;
- else
- insn->op = BYTECODE_OP_NE_DOUBLE_S64;
- break;
- case REG_DOUBLE:
- if (vstack_bx(stack)->type == REG_UNKNOWN)
- break;
- if (vstack_bx(stack)->type == REG_S64 ||
- vstack_bx(stack)->type == REG_U64)
- insn->op = BYTECODE_OP_NE_S64_DOUBLE;
- else
- insn->op = BYTECODE_OP_NE_DOUBLE;
- break;
- case REG_UNKNOWN:
- break; /* Dynamic typing. */
- }
- /* Pop 2, push 1 */
- if (vstack_pop(stack)) {
- ret = -EINVAL;
- goto end;
- }
- vstack_ax(stack)->type = REG_S64;
- next_pc += sizeof(struct binary_op);
- break;
- }
-
- case BYTECODE_OP_GT:
- {
- struct binary_op *insn = (struct binary_op *) pc;
-
- switch(vstack_ax(stack)->type) {
- default:
- ERR("unknown register type\n");
- ret = -EINVAL;
- goto end;
-
- case REG_STAR_GLOB_STRING:
- ERR("invalid register type for > binary operator\n");
- ret = -EINVAL;
- goto end;
- case REG_STRING:
- if (vstack_bx(stack)->type == REG_UNKNOWN)
- break;
- insn->op = BYTECODE_OP_GT_STRING;
- break;
- case REG_S64:
- case REG_U64:
- if (vstack_bx(stack)->type == REG_UNKNOWN)
- break;
- if (vstack_bx(stack)->type == REG_S64 ||
- vstack_bx(stack)->type == REG_U64)
- insn->op = BYTECODE_OP_GT_S64;
- else
- insn->op = BYTECODE_OP_GT_DOUBLE_S64;
- break;
- case REG_DOUBLE:
- if (vstack_bx(stack)->type == REG_UNKNOWN)
- break;
- if (vstack_bx(stack)->type == REG_S64 ||
- vstack_bx(stack)->type == REG_U64)
- insn->op = BYTECODE_OP_GT_S64_DOUBLE;
- else
- insn->op = BYTECODE_OP_GT_DOUBLE;
- break;
- case REG_UNKNOWN:
- break; /* Dynamic typing. */
- }
- /* Pop 2, push 1 */
- if (vstack_pop(stack)) {
- ret = -EINVAL;
- goto end;
- }
- vstack_ax(stack)->type = REG_S64;
- next_pc += sizeof(struct binary_op);
- break;
- }
-
- case BYTECODE_OP_LT:
- {
- struct binary_op *insn = (struct binary_op *) pc;
-
- switch(vstack_ax(stack)->type) {
- default:
- ERR("unknown register type\n");
- ret = -EINVAL;
- goto end;
-
- case REG_STAR_GLOB_STRING:
- ERR("invalid register type for < binary operator\n");
- ret = -EINVAL;
- goto end;
- case REG_STRING:
- if (vstack_bx(stack)->type == REG_UNKNOWN)
- break;
- insn->op = BYTECODE_OP_LT_STRING;
- break;
- case REG_S64:
- case REG_U64:
- if (vstack_bx(stack)->type == REG_UNKNOWN)
- break;
- if (vstack_bx(stack)->type == REG_S64 ||
- vstack_bx(stack)->type == REG_U64)
- insn->op = BYTECODE_OP_LT_S64;
- else
- insn->op = BYTECODE_OP_LT_DOUBLE_S64;
- break;
- case REG_DOUBLE:
- if (vstack_bx(stack)->type == REG_UNKNOWN)
- break;
- if (vstack_bx(stack)->type == REG_S64 ||
- vstack_bx(stack)->type == REG_U64)
- insn->op = BYTECODE_OP_LT_S64_DOUBLE;
- else
- insn->op = BYTECODE_OP_LT_DOUBLE;
- break;
- case REG_UNKNOWN:
- break; /* Dynamic typing. */
- }
- /* Pop 2, push 1 */
- if (vstack_pop(stack)) {
- ret = -EINVAL;
- goto end;
- }
- vstack_ax(stack)->type = REG_S64;
- next_pc += sizeof(struct binary_op);
- break;
- }
-
- case BYTECODE_OP_GE:
- {
- struct binary_op *insn = (struct binary_op *) pc;
-
- switch(vstack_ax(stack)->type) {
- default:
- ERR("unknown register type\n");
- ret = -EINVAL;
- goto end;
-
- case REG_STAR_GLOB_STRING:
- ERR("invalid register type for >= binary operator\n");
- ret = -EINVAL;
- goto end;
- case REG_STRING:
- if (vstack_bx(stack)->type == REG_UNKNOWN)
- break;
- insn->op = BYTECODE_OP_GE_STRING;
- break;
- case REG_S64:
- case REG_U64:
- if (vstack_bx(stack)->type == REG_UNKNOWN)
- break;
- if (vstack_bx(stack)->type == REG_S64 ||
- vstack_bx(stack)->type == REG_U64)
- insn->op = BYTECODE_OP_GE_S64;
- else
- insn->op = BYTECODE_OP_GE_DOUBLE_S64;
- break;
- case REG_DOUBLE:
- if (vstack_bx(stack)->type == REG_UNKNOWN)
- break;
- if (vstack_bx(stack)->type == REG_S64 ||
- vstack_bx(stack)->type == REG_U64)
- insn->op = BYTECODE_OP_GE_S64_DOUBLE;
- else
- insn->op = BYTECODE_OP_GE_DOUBLE;
- break;
- case REG_UNKNOWN:
- break; /* Dynamic typing. */
- }
- /* Pop 2, push 1 */
- if (vstack_pop(stack)) {
- ret = -EINVAL;
- goto end;
- }
- vstack_ax(stack)->type = REG_U64;
- next_pc += sizeof(struct binary_op);
- break;
- }
- case BYTECODE_OP_LE:
- {
- struct binary_op *insn = (struct binary_op *) pc;
-
- switch(vstack_ax(stack)->type) {
- default:
- ERR("unknown register type\n");
- ret = -EINVAL;
- goto end;
-
- case REG_STAR_GLOB_STRING:
- ERR("invalid register type for <= binary operator\n");
- ret = -EINVAL;
- goto end;
- case REG_STRING:
- if (vstack_bx(stack)->type == REG_UNKNOWN)
- break;
- insn->op = BYTECODE_OP_LE_STRING;
- break;
- case REG_S64:
- case REG_U64:
- if (vstack_bx(stack)->type == REG_UNKNOWN)
- break;
- if (vstack_bx(stack)->type == REG_S64 ||
- vstack_bx(stack)->type == REG_U64)
- insn->op = BYTECODE_OP_LE_S64;
- else
- insn->op = BYTECODE_OP_LE_DOUBLE_S64;
- break;
- case REG_DOUBLE:
- if (vstack_bx(stack)->type == REG_UNKNOWN)
- break;
- if (vstack_bx(stack)->type == REG_S64 ||
- vstack_bx(stack)->type == REG_U64)
- insn->op = BYTECODE_OP_LE_S64_DOUBLE;
- else
- insn->op = BYTECODE_OP_LE_DOUBLE;
- break;
- case REG_UNKNOWN:
- break; /* Dynamic typing. */
- }
- vstack_ax(stack)->type = REG_S64;
- next_pc += sizeof(struct binary_op);
- break;
- }
-
- case BYTECODE_OP_EQ_STRING:
- case BYTECODE_OP_NE_STRING:
- case BYTECODE_OP_GT_STRING:
- case BYTECODE_OP_LT_STRING:
- case BYTECODE_OP_GE_STRING:
- case BYTECODE_OP_LE_STRING:
- case BYTECODE_OP_EQ_STAR_GLOB_STRING:
- case BYTECODE_OP_NE_STAR_GLOB_STRING:
- case BYTECODE_OP_EQ_S64:
- case BYTECODE_OP_NE_S64:
- case BYTECODE_OP_GT_S64:
- case BYTECODE_OP_LT_S64:
- case BYTECODE_OP_GE_S64:
- case BYTECODE_OP_LE_S64:
- case BYTECODE_OP_EQ_DOUBLE:
- case BYTECODE_OP_NE_DOUBLE:
- case BYTECODE_OP_GT_DOUBLE:
- case BYTECODE_OP_LT_DOUBLE:
- case BYTECODE_OP_GE_DOUBLE:
- case BYTECODE_OP_LE_DOUBLE:
- case BYTECODE_OP_EQ_DOUBLE_S64:
- case BYTECODE_OP_NE_DOUBLE_S64:
- case BYTECODE_OP_GT_DOUBLE_S64:
- case BYTECODE_OP_LT_DOUBLE_S64:
- case BYTECODE_OP_GE_DOUBLE_S64:
- case BYTECODE_OP_LE_DOUBLE_S64:
- case BYTECODE_OP_EQ_S64_DOUBLE:
- case BYTECODE_OP_NE_S64_DOUBLE:
- case BYTECODE_OP_GT_S64_DOUBLE:
- case BYTECODE_OP_LT_S64_DOUBLE:
- case BYTECODE_OP_GE_S64_DOUBLE:
- case BYTECODE_OP_LE_S64_DOUBLE:
- {
- /* Pop 2, push 1 */
- if (vstack_pop(stack)) {
- ret = -EINVAL;
- goto end;
- }
- vstack_ax(stack)->type = REG_S64;
- next_pc += sizeof(struct binary_op);
- break;
- }
-
- case BYTECODE_OP_BIT_RSHIFT:
- case BYTECODE_OP_BIT_LSHIFT:
- case BYTECODE_OP_BIT_AND:
- case BYTECODE_OP_BIT_OR:
- case BYTECODE_OP_BIT_XOR:
- {
- /* Pop 2, push 1 */
- if (vstack_pop(stack)) {
- ret = -EINVAL;
- goto end;
- }
- vstack_ax(stack)->type = REG_S64;
- next_pc += sizeof(struct binary_op);
- break;
- }
-
- /* unary */
- case BYTECODE_OP_UNARY_PLUS:
- {
- struct unary_op *insn = (struct unary_op *) pc;
-
- switch(vstack_ax(stack)->type) {
- default:
- ERR("unknown register type\n");
- ret = -EINVAL;
- goto end;
-
- case REG_S64:
- case REG_U64:
- insn->op = BYTECODE_OP_UNARY_PLUS_S64;
- break;
- case REG_DOUBLE:
- insn->op = BYTECODE_OP_UNARY_PLUS_DOUBLE;
- break;
- case REG_UNKNOWN: /* Dynamic typing. */
- break;
- }
- /* Pop 1, push 1 */
- next_pc += sizeof(struct unary_op);
- break;
- }
-
- case BYTECODE_OP_UNARY_MINUS:
- {
- struct unary_op *insn = (struct unary_op *) pc;
-
- switch(vstack_ax(stack)->type) {
- default:
- ERR("unknown register type\n");
- ret = -EINVAL;
- goto end;
-
- case REG_S64:
- case REG_U64:
- insn->op = BYTECODE_OP_UNARY_MINUS_S64;
- break;
- case REG_DOUBLE:
- insn->op = BYTECODE_OP_UNARY_MINUS_DOUBLE;
- break;
- case REG_UNKNOWN: /* Dynamic typing. */
- break;
- }
- /* Pop 1, push 1 */
- next_pc += sizeof(struct unary_op);
- break;
- }
-
- case BYTECODE_OP_UNARY_NOT:
- {
- struct unary_op *insn = (struct unary_op *) pc;
-
- switch(vstack_ax(stack)->type) {
- default:
- ERR("unknown register type\n");
- ret = -EINVAL;
- goto end;
-
- case REG_S64:
- case REG_U64:
- insn->op = BYTECODE_OP_UNARY_NOT_S64;
- break;
- case REG_DOUBLE:
- insn->op = BYTECODE_OP_UNARY_NOT_DOUBLE;
- break;
- case REG_UNKNOWN: /* Dynamic typing. */
- break;
- }
- /* Pop 1, push 1 */
- next_pc += sizeof(struct unary_op);
- break;
- }
-
- case BYTECODE_OP_UNARY_BIT_NOT:
- {
- /* Pop 1, push 1 */
- next_pc += sizeof(struct unary_op);
- break;
- }
-
- case BYTECODE_OP_UNARY_PLUS_S64:
- case BYTECODE_OP_UNARY_MINUS_S64:
- case BYTECODE_OP_UNARY_NOT_S64:
- case BYTECODE_OP_UNARY_PLUS_DOUBLE:
- case BYTECODE_OP_UNARY_MINUS_DOUBLE:
- case BYTECODE_OP_UNARY_NOT_DOUBLE:
- {
- /* Pop 1, push 1 */
- next_pc += sizeof(struct unary_op);
- break;
- }
-
- /* logical */
- case BYTECODE_OP_AND:
- case BYTECODE_OP_OR:
- {
- /* Continue to next instruction */
- /* Pop 1 when jump not taken */
- if (vstack_pop(stack)) {
- ret = -EINVAL;
- goto end;
- }
- next_pc += sizeof(struct logical_op);
- break;
- }
-
- /* load field ref */
- case BYTECODE_OP_LOAD_FIELD_REF:
- {
- ERR("Unknown field ref type\n");
- ret = -EINVAL;
- goto end;
- }
- /* get context ref */
- case BYTECODE_OP_GET_CONTEXT_REF:
- {
- if (vstack_push(stack)) {
- ret = -EINVAL;
- goto end;
- }
- vstack_ax(stack)->type = REG_UNKNOWN;
- next_pc += sizeof(struct load_op) + sizeof(struct field_ref);
- break;
- }
- case BYTECODE_OP_LOAD_FIELD_REF_STRING:
- case BYTECODE_OP_LOAD_FIELD_REF_SEQUENCE:
- case BYTECODE_OP_GET_CONTEXT_REF_STRING:
- {
- if (vstack_push(stack)) {
- ret = -EINVAL;
- goto end;
- }
- vstack_ax(stack)->type = REG_STRING;
- next_pc += sizeof(struct load_op) + sizeof(struct field_ref);
- break;
- }
- case BYTECODE_OP_LOAD_FIELD_REF_S64:
- case BYTECODE_OP_GET_CONTEXT_REF_S64:
- {
- if (vstack_push(stack)) {
- ret = -EINVAL;
- goto end;
- }
- vstack_ax(stack)->type = REG_S64;
- next_pc += sizeof(struct load_op) + sizeof(struct field_ref);
- break;
- }
- case BYTECODE_OP_LOAD_FIELD_REF_DOUBLE:
- case BYTECODE_OP_GET_CONTEXT_REF_DOUBLE:
- {
- if (vstack_push(stack)) {
- ret = -EINVAL;
- goto end;
- }
- vstack_ax(stack)->type = REG_DOUBLE;
- next_pc += sizeof(struct load_op) + sizeof(struct field_ref);
- break;
- }
-
- /* load from immediate operand */
- case BYTECODE_OP_LOAD_STRING:
- {
- struct load_op *insn = (struct load_op *) pc;
-
- if (vstack_push(stack)) {
- ret = -EINVAL;
- goto end;
- }
- vstack_ax(stack)->type = REG_STRING;
- next_pc += sizeof(struct load_op) + strlen(insn->data) + 1;
- break;
- }
-
- case BYTECODE_OP_LOAD_STAR_GLOB_STRING:
- {
- struct load_op *insn = (struct load_op *) pc;
-
- if (vstack_push(stack)) {
- ret = -EINVAL;
- goto end;
- }
- vstack_ax(stack)->type = REG_STAR_GLOB_STRING;
- next_pc += sizeof(struct load_op) + strlen(insn->data) + 1;
- break;
- }
-
- case BYTECODE_OP_LOAD_S64:
- {
- if (vstack_push(stack)) {
- ret = -EINVAL;
- goto end;
- }
- vstack_ax(stack)->type = REG_S64;
- next_pc += sizeof(struct load_op)
- + sizeof(struct literal_numeric);
- break;
- }
-
- case BYTECODE_OP_LOAD_DOUBLE:
- {
- if (vstack_push(stack)) {
- ret = -EINVAL;
- goto end;
- }
- vstack_ax(stack)->type = REG_DOUBLE;
- next_pc += sizeof(struct load_op)
- + sizeof(struct literal_double);
- break;
- }
-
- /* cast */
- case BYTECODE_OP_CAST_TO_S64:
- {
- struct cast_op *insn = (struct cast_op *) pc;
-
- switch (vstack_ax(stack)->type) {
- default:
- ERR("unknown register type\n");
- ret = -EINVAL;
- goto end;
-
- case REG_STRING:
- case REG_STAR_GLOB_STRING:
- ERR("Cast op can only be applied to numeric or floating point registers\n");
- ret = -EINVAL;
- goto end;
- case REG_S64:
- insn->op = BYTECODE_OP_CAST_NOP;
- break;
- case REG_DOUBLE:
- insn->op = BYTECODE_OP_CAST_DOUBLE_TO_S64;
- break;
- case REG_UNKNOWN:
- case REG_U64:
- break;
- }
- /* Pop 1, push 1 */
- vstack_ax(stack)->type = REG_S64;
- next_pc += sizeof(struct cast_op);
- break;
- }
- case BYTECODE_OP_CAST_DOUBLE_TO_S64:
- {
- /* Pop 1, push 1 */
- vstack_ax(stack)->type = REG_S64;
- next_pc += sizeof(struct cast_op);
- break;
- }
- case BYTECODE_OP_CAST_NOP:
- {
- next_pc += sizeof(struct cast_op);
- break;
- }
-
- /*
- * Instructions for recursive traversal through composed types.
- */
- case BYTECODE_OP_GET_CONTEXT_ROOT:
- {
- if (vstack_push(stack)) {
- ret = -EINVAL;
- goto end;
- }
- vstack_ax(stack)->type = REG_PTR;
- vstack_ax(stack)->load.type = LOAD_ROOT_CONTEXT;
- next_pc += sizeof(struct load_op);
- break;
- }
- case BYTECODE_OP_GET_APP_CONTEXT_ROOT:
- {
- if (vstack_push(stack)) {
- ret = -EINVAL;
- goto end;
- }
- vstack_ax(stack)->type = REG_PTR;
- vstack_ax(stack)->load.type = LOAD_ROOT_APP_CONTEXT;
- next_pc += sizeof(struct load_op);
- break;
- }
- case BYTECODE_OP_GET_PAYLOAD_ROOT:
- {
- if (vstack_push(stack)) {
- ret = -EINVAL;
- goto end;
- }
- vstack_ax(stack)->type = REG_PTR;
- vstack_ax(stack)->load.type = LOAD_ROOT_PAYLOAD;
- next_pc += sizeof(struct load_op);
- break;
- }
-
- case BYTECODE_OP_LOAD_FIELD:
- {
- struct load_op *insn = (struct load_op *) pc;
-
- assert(vstack_ax(stack)->type == REG_PTR);
- /* Pop 1, push 1 */
- ret = specialize_load_field(vstack_ax(stack), insn);
- if (ret)
- goto end;
-
- next_pc += sizeof(struct load_op);
- break;
- }
-
- case BYTECODE_OP_LOAD_FIELD_S8:
- case BYTECODE_OP_LOAD_FIELD_S16:
- case BYTECODE_OP_LOAD_FIELD_S32:
- case BYTECODE_OP_LOAD_FIELD_S64:
- {
- /* Pop 1, push 1 */
- vstack_ax(stack)->type = REG_S64;
- next_pc += sizeof(struct load_op);
- break;
- }
-
- case BYTECODE_OP_LOAD_FIELD_U8:
- case BYTECODE_OP_LOAD_FIELD_U16:
- case BYTECODE_OP_LOAD_FIELD_U32:
- case BYTECODE_OP_LOAD_FIELD_U64:
- {
- /* Pop 1, push 1 */
- vstack_ax(stack)->type = REG_U64;
- next_pc += sizeof(struct load_op);
- break;
- }
-
- case BYTECODE_OP_LOAD_FIELD_STRING:
- case BYTECODE_OP_LOAD_FIELD_SEQUENCE:
- {
- /* Pop 1, push 1 */
- vstack_ax(stack)->type = REG_STRING;
- next_pc += sizeof(struct load_op);
- break;
- }
-
- case BYTECODE_OP_LOAD_FIELD_DOUBLE:
- {
- /* Pop 1, push 1 */
- vstack_ax(stack)->type = REG_DOUBLE;
- next_pc += sizeof(struct load_op);
- break;
- }
-
- case BYTECODE_OP_GET_SYMBOL:
- {
- struct load_op *insn = (struct load_op *) pc;
-
- dbg_printf("op get symbol\n");
- switch (vstack_ax(stack)->load.type) {
- case LOAD_OBJECT:
- ERR("Nested fields not implemented yet.");
- ret = -EINVAL;
- goto end;
- case LOAD_ROOT_CONTEXT:
- /* Lookup context field. */
- ret = specialize_context_lookup(*pctx,
- bytecode, insn,
- &vstack_ax(stack)->load);
- if (ret)
- goto end;
- break;
- case LOAD_ROOT_APP_CONTEXT:
- /* Lookup app context field. */
- ret = specialize_app_context_lookup(pctx,
- bytecode, insn,
- &vstack_ax(stack)->load);
- if (ret)
- goto end;
- break;
- case LOAD_ROOT_PAYLOAD:
- /* Lookup event payload field. */
- ret = specialize_payload_lookup(event_desc,
- bytecode, insn,
- &vstack_ax(stack)->load);
- if (ret)
- goto end;
- break;
- }
- next_pc += sizeof(struct load_op) + sizeof(struct get_symbol);
- break;
- }
-
- case BYTECODE_OP_GET_SYMBOL_FIELD:
- {
- /* Always generated by specialize phase. */
- ret = -EINVAL;
- goto end;
- }
-
- case BYTECODE_OP_GET_INDEX_U16:
- {
- struct load_op *insn = (struct load_op *) pc;
- struct get_index_u16 *index = (struct get_index_u16 *) insn->data;
-
- dbg_printf("op get index u16\n");
- /* Pop 1, push 1 */
- ret = specialize_get_index(bytecode, insn, index->index,
- vstack_ax(stack), sizeof(*index));
- if (ret)
- goto end;
- next_pc += sizeof(struct load_op) + sizeof(struct get_index_u16);
- break;
- }
-
- case BYTECODE_OP_GET_INDEX_U64:
- {
- struct load_op *insn = (struct load_op *) pc;
- struct get_index_u64 *index = (struct get_index_u64 *) insn->data;
-
- dbg_printf("op get index u64\n");
- /* Pop 1, push 1 */
- ret = specialize_get_index(bytecode, insn, index->index,
- vstack_ax(stack), sizeof(*index));
- if (ret)
- goto end;
- next_pc += sizeof(struct load_op) + sizeof(struct get_index_u64);
- break;
- }
-
- }
- }
-end:
- return ret;
-}
+++ /dev/null
-/*
- * SPDX-License-Identifier: MIT
- *
- * Copyright (C) 2010-2016 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
- *
- * LTTng UST bytecode validator.
- */
-
-#define _LGPL_SOURCE
-#include <stddef.h>
-#include <stdint.h>
-#include <time.h>
-
-#include "rculfhash.h"
-
-#include "lttng-bytecode.h"
-#include "lttng-hash-helper.h"
-#include "string-utils.h"
-#include "ust-events-internal.h"
-#include "common/macros.h"
-
-/*
- * Number of merge points for hash table size. Hash table initialized to
- * that size, and we do not resize, because we do not want to trigger
- * RCU worker thread execution: fall-back on linear traversal if number
- * of merge points exceeds this value.
- */
-#define DEFAULT_NR_MERGE_POINTS 128
-#define MIN_NR_BUCKETS 128
-#define MAX_NR_BUCKETS 128
-
-/* merge point table node */
-struct lfht_mp_node {
- struct lttng_ust_lfht_node node;
-
- /* Context at merge point */
- struct vstack stack;
- unsigned long target_pc;
-};
-
-static unsigned long lttng_hash_seed;
-static unsigned int lttng_hash_seed_ready;
-
-static
-int lttng_hash_match(struct lttng_ust_lfht_node *node, const void *key)
-{
- struct lfht_mp_node *mp_node =
- caa_container_of(node, struct lfht_mp_node, node);
- unsigned long key_pc = (unsigned long) key;
-
- if (mp_node->target_pc == key_pc)
- return 1;
- else
- return 0;
-}
-
-static
-int merge_points_compare(const struct vstack *stacka,
- const struct vstack *stackb)
-{
- int i, len;
-
- if (stacka->top != stackb->top)
- return 1;
- len = stacka->top + 1;
- assert(len >= 0);
- for (i = 0; i < len; i++) {
- if (stacka->e[i].type != REG_UNKNOWN
- && stackb->e[i].type != REG_UNKNOWN
- && stacka->e[i].type != stackb->e[i].type)
- return 1;
- }
- return 0;
-}
-
-static
-int merge_point_add_check(struct lttng_ust_lfht *ht, unsigned long target_pc,
- const struct vstack *stack)
-{
- struct lfht_mp_node *node;
- unsigned long hash = lttng_hash_mix((const char *) target_pc,
- sizeof(target_pc),
- lttng_hash_seed);
- struct lttng_ust_lfht_node *ret;
-
- dbg_printf("Bytecode: adding merge point at offset %lu, hash %lu\n",
- target_pc, hash);
- node = zmalloc(sizeof(struct lfht_mp_node));
- if (!node)
- return -ENOMEM;
- node->target_pc = target_pc;
- memcpy(&node->stack, stack, sizeof(node->stack));
- ret = lttng_ust_lfht_add_unique(ht, hash, lttng_hash_match,
- (const char *) target_pc, &node->node);
- if (ret != &node->node) {
- struct lfht_mp_node *ret_mp =
- caa_container_of(ret, struct lfht_mp_node, node);
-
- /* Key already present */
- dbg_printf("Bytecode: compare merge points for offset %lu, hash %lu\n",
- target_pc, hash);
- free(node);
- if (merge_points_compare(stack, &ret_mp->stack)) {
- ERR("Merge points differ for offset %lu\n",
- target_pc);
- return -EINVAL;
- }
- }
- return 0;
-}
-
-/*
- * Binary comparators use top of stack and top of stack -1.
- * Return 0 if typing is known to match, 1 if typing is dynamic
- * (unknown), negative error value on error.
- */
-static
-int bin_op_compare_check(struct vstack *stack, bytecode_opcode_t opcode,
- const char *str)
-{
- if (unlikely(!vstack_ax(stack) || !vstack_bx(stack)))
- goto error_empty;
-
- switch (vstack_ax(stack)->type) {
- default:
- goto error_type;
-
- case REG_UNKNOWN:
- goto unknown;
- case REG_STRING:
- switch (vstack_bx(stack)->type) {
- default:
- goto error_type;
-
- case REG_UNKNOWN:
- goto unknown;
- case REG_STRING:
- break;
- case REG_STAR_GLOB_STRING:
- if (opcode != BYTECODE_OP_EQ && opcode != BYTECODE_OP_NE) {
- goto error_mismatch;
- }
- break;
- case REG_S64:
- case REG_U64:
- case REG_DOUBLE:
- goto error_mismatch;
- }
- break;
- case REG_STAR_GLOB_STRING:
- switch (vstack_bx(stack)->type) {
- default:
- goto error_type;
-
- case REG_UNKNOWN:
- goto unknown;
- case REG_STRING:
- if (opcode != BYTECODE_OP_EQ && opcode != BYTECODE_OP_NE) {
- goto error_mismatch;
- }
- break;
- case REG_STAR_GLOB_STRING:
- case REG_S64:
- case REG_U64:
- case REG_DOUBLE:
- goto error_mismatch;
- }
- break;
- case REG_S64:
- case REG_U64:
- case REG_DOUBLE:
- switch (vstack_bx(stack)->type) {
- default:
- goto error_type;
-
- case REG_UNKNOWN:
- goto unknown;
- case REG_STRING:
- case REG_STAR_GLOB_STRING:
- goto error_mismatch;
- case REG_S64:
- case REG_U64:
- case REG_DOUBLE:
- break;
- }
- break;
- }
- return 0;
-
-unknown:
- return 1;
-
-error_mismatch:
- ERR("type mismatch for '%s' binary operator\n", str);
- return -EINVAL;
-
-error_empty:
- ERR("empty stack for '%s' binary operator\n", str);
- return -EINVAL;
-
-error_type:
- ERR("unknown type for '%s' binary operator\n", str);
- return -EINVAL;
-}
-
-/*
- * Binary bitwise operators use top of stack and top of stack -1.
- * Return 0 if typing is known to match, 1 if typing is dynamic
- * (unknown), negative error value on error.
- */
-static
-int bin_op_bitwise_check(struct vstack *stack,
- bytecode_opcode_t opcode __attribute__((unused)),
- const char *str)
-{
- if (unlikely(!vstack_ax(stack) || !vstack_bx(stack)))
- goto error_empty;
-
- switch (vstack_ax(stack)->type) {
- default:
- goto error_type;
-
- case REG_UNKNOWN:
- goto unknown;
- case REG_S64:
- case REG_U64:
- switch (vstack_bx(stack)->type) {
- default:
- goto error_type;
-
- case REG_UNKNOWN:
- goto unknown;
- case REG_S64:
- case REG_U64:
- break;
- }
- break;
- }
- return 0;
-
-unknown:
- return 1;
-
-error_empty:
- ERR("empty stack for '%s' binary operator\n", str);
- return -EINVAL;
-
-error_type:
- ERR("unknown type for '%s' binary operator\n", str);
- return -EINVAL;
-}
-
-static
-int validate_get_symbol(struct bytecode_runtime *bytecode,
- const struct get_symbol *sym)
-{
- const char *str, *str_limit;
- size_t len_limit;
-
- if (sym->offset >= bytecode->p.bc->bc.len - bytecode->p.bc->bc.reloc_offset)
- return -EINVAL;
-
- str = bytecode->p.bc->bc.data + bytecode->p.bc->bc.reloc_offset + sym->offset;
- str_limit = bytecode->p.bc->bc.data + bytecode->p.bc->bc.len;
- len_limit = str_limit - str;
- if (strnlen(str, len_limit) == len_limit)
- return -EINVAL;
- return 0;
-}
-
-/*
- * Validate bytecode range overflow within the validation pass.
- * Called for each instruction encountered.
- */
-static
-int bytecode_validate_overflow(struct bytecode_runtime *bytecode,
- char *start_pc, char *pc)
-{
- int ret = 0;
-
- switch (*(bytecode_opcode_t *) pc) {
- case BYTECODE_OP_UNKNOWN:
- default:
- {
- ERR("unknown bytecode op %u\n",
- (unsigned int) *(bytecode_opcode_t *) pc);
- ret = -EINVAL;
- break;
- }
-
- case BYTECODE_OP_RETURN:
- case BYTECODE_OP_RETURN_S64:
- {
- if (unlikely(pc + sizeof(struct return_op)
- > start_pc + bytecode->len)) {
- ret = -ERANGE;
- }
- break;
- }
-
- /* binary */
- case BYTECODE_OP_MUL:
- case BYTECODE_OP_DIV:
- case BYTECODE_OP_MOD:
- case BYTECODE_OP_PLUS:
- case BYTECODE_OP_MINUS:
- {
- ERR("unsupported bytecode op %u\n",
- (unsigned int) *(bytecode_opcode_t *) pc);
- ret = -EINVAL;
- break;
- }
-
- case BYTECODE_OP_EQ:
- case BYTECODE_OP_NE:
- case BYTECODE_OP_GT:
- case BYTECODE_OP_LT:
- case BYTECODE_OP_GE:
- case BYTECODE_OP_LE:
- case BYTECODE_OP_EQ_STRING:
- case BYTECODE_OP_NE_STRING:
- case BYTECODE_OP_GT_STRING:
- case BYTECODE_OP_LT_STRING:
- case BYTECODE_OP_GE_STRING:
- case BYTECODE_OP_LE_STRING:
- case BYTECODE_OP_EQ_STAR_GLOB_STRING:
- case BYTECODE_OP_NE_STAR_GLOB_STRING:
- case BYTECODE_OP_EQ_S64:
- case BYTECODE_OP_NE_S64:
- case BYTECODE_OP_GT_S64:
- case BYTECODE_OP_LT_S64:
- case BYTECODE_OP_GE_S64:
- case BYTECODE_OP_LE_S64:
- case BYTECODE_OP_EQ_DOUBLE:
- case BYTECODE_OP_NE_DOUBLE:
- case BYTECODE_OP_GT_DOUBLE:
- case BYTECODE_OP_LT_DOUBLE:
- case BYTECODE_OP_GE_DOUBLE:
- case BYTECODE_OP_LE_DOUBLE:
- case BYTECODE_OP_EQ_DOUBLE_S64:
- case BYTECODE_OP_NE_DOUBLE_S64:
- case BYTECODE_OP_GT_DOUBLE_S64:
- case BYTECODE_OP_LT_DOUBLE_S64:
- case BYTECODE_OP_GE_DOUBLE_S64:
- case BYTECODE_OP_LE_DOUBLE_S64:
- case BYTECODE_OP_EQ_S64_DOUBLE:
- case BYTECODE_OP_NE_S64_DOUBLE:
- case BYTECODE_OP_GT_S64_DOUBLE:
- case BYTECODE_OP_LT_S64_DOUBLE:
- case BYTECODE_OP_GE_S64_DOUBLE:
- case BYTECODE_OP_LE_S64_DOUBLE:
- case BYTECODE_OP_BIT_RSHIFT:
- case BYTECODE_OP_BIT_LSHIFT:
- case BYTECODE_OP_BIT_AND:
- case BYTECODE_OP_BIT_OR:
- case BYTECODE_OP_BIT_XOR:
- {
- if (unlikely(pc + sizeof(struct binary_op)
- > start_pc + bytecode->len)) {
- ret = -ERANGE;
- }
- break;
- }
-
- /* unary */
- case BYTECODE_OP_UNARY_PLUS:
- case BYTECODE_OP_UNARY_MINUS:
- case BYTECODE_OP_UNARY_NOT:
- case BYTECODE_OP_UNARY_PLUS_S64:
- case BYTECODE_OP_UNARY_MINUS_S64:
- case BYTECODE_OP_UNARY_NOT_S64:
- case BYTECODE_OP_UNARY_PLUS_DOUBLE:
- case BYTECODE_OP_UNARY_MINUS_DOUBLE:
- case BYTECODE_OP_UNARY_NOT_DOUBLE:
- case BYTECODE_OP_UNARY_BIT_NOT:
- {
- if (unlikely(pc + sizeof(struct unary_op)
- > start_pc + bytecode->len)) {
- ret = -ERANGE;
- }
- break;
- }
-
- /* logical */
- case BYTECODE_OP_AND:
- case BYTECODE_OP_OR:
- {
- if (unlikely(pc + sizeof(struct logical_op)
- > start_pc + bytecode->len)) {
- ret = -ERANGE;
- }
- break;
- }
-
- /* load field ref */
- case BYTECODE_OP_LOAD_FIELD_REF:
- {
- ERR("Unknown field ref type\n");
- ret = -EINVAL;
- break;
- }
-
- /* get context ref */
- case BYTECODE_OP_GET_CONTEXT_REF:
- case BYTECODE_OP_LOAD_FIELD_REF_STRING:
- case BYTECODE_OP_LOAD_FIELD_REF_SEQUENCE:
- case BYTECODE_OP_LOAD_FIELD_REF_S64:
- case BYTECODE_OP_LOAD_FIELD_REF_DOUBLE:
- case BYTECODE_OP_GET_CONTEXT_REF_STRING:
- case BYTECODE_OP_GET_CONTEXT_REF_S64:
- case BYTECODE_OP_GET_CONTEXT_REF_DOUBLE:
- {
- if (unlikely(pc + sizeof(struct load_op) + sizeof(struct field_ref)
- > start_pc + bytecode->len)) {
- ret = -ERANGE;
- }
- break;
- }
-
- /* load from immediate operand */
- case BYTECODE_OP_LOAD_STRING:
- case BYTECODE_OP_LOAD_STAR_GLOB_STRING:
- {
- struct load_op *insn = (struct load_op *) pc;
- uint32_t str_len, maxlen;
-
- if (unlikely(pc + sizeof(struct load_op)
- > start_pc + bytecode->len)) {
- ret = -ERANGE;
- break;
- }
-
- maxlen = start_pc + bytecode->len - pc - sizeof(struct load_op);
- str_len = strnlen(insn->data, maxlen);
- if (unlikely(str_len >= maxlen)) {
- /* Final '\0' not found within range */
- ret = -ERANGE;
- }
- break;
- }
-
- case BYTECODE_OP_LOAD_S64:
- {
- if (unlikely(pc + sizeof(struct load_op) + sizeof(struct literal_numeric)
- > start_pc + bytecode->len)) {
- ret = -ERANGE;
- }
- break;
- }
-
- case BYTECODE_OP_LOAD_DOUBLE:
- {
- if (unlikely(pc + sizeof(struct load_op) + sizeof(struct literal_double)
- > start_pc + bytecode->len)) {
- ret = -ERANGE;
- }
- break;
- }
-
- case BYTECODE_OP_CAST_TO_S64:
- case BYTECODE_OP_CAST_DOUBLE_TO_S64:
- case BYTECODE_OP_CAST_NOP:
- {
- if (unlikely(pc + sizeof(struct cast_op)
- > start_pc + bytecode->len)) {
- ret = -ERANGE;
- }
- break;
- }
-
- /*
- * Instructions for recursive traversal through composed types.
- */
- case BYTECODE_OP_GET_CONTEXT_ROOT:
- case BYTECODE_OP_GET_APP_CONTEXT_ROOT:
- case BYTECODE_OP_GET_PAYLOAD_ROOT:
- case BYTECODE_OP_LOAD_FIELD:
- case BYTECODE_OP_LOAD_FIELD_S8:
- case BYTECODE_OP_LOAD_FIELD_S16:
- case BYTECODE_OP_LOAD_FIELD_S32:
- case BYTECODE_OP_LOAD_FIELD_S64:
- case BYTECODE_OP_LOAD_FIELD_U8:
- case BYTECODE_OP_LOAD_FIELD_U16:
- case BYTECODE_OP_LOAD_FIELD_U32:
- case BYTECODE_OP_LOAD_FIELD_U64:
- case BYTECODE_OP_LOAD_FIELD_STRING:
- case BYTECODE_OP_LOAD_FIELD_SEQUENCE:
- case BYTECODE_OP_LOAD_FIELD_DOUBLE:
- if (unlikely(pc + sizeof(struct load_op)
- > start_pc + bytecode->len)) {
- ret = -ERANGE;
- }
- break;
-
- case BYTECODE_OP_GET_SYMBOL:
- {
- struct load_op *insn = (struct load_op *) pc;
- struct get_symbol *sym = (struct get_symbol *) insn->data;
-
- if (unlikely(pc + sizeof(struct load_op) + sizeof(struct get_symbol)
- > start_pc + bytecode->len)) {
- ret = -ERANGE;
- break;
- }
- ret = validate_get_symbol(bytecode, sym);
- break;
- }
-
- case BYTECODE_OP_GET_SYMBOL_FIELD:
- ERR("Unexpected get symbol field");
- ret = -EINVAL;
- break;
-
- case BYTECODE_OP_GET_INDEX_U16:
- if (unlikely(pc + sizeof(struct load_op) + sizeof(struct get_index_u16)
- > start_pc + bytecode->len)) {
- ret = -ERANGE;
- }
- break;
-
- case BYTECODE_OP_GET_INDEX_U64:
- if (unlikely(pc + sizeof(struct load_op) + sizeof(struct get_index_u64)
- > start_pc + bytecode->len)) {
- ret = -ERANGE;
- }
- break;
- }
-
- return ret;
-}
-
-static
-unsigned long delete_all_nodes(struct lttng_ust_lfht *ht)
-{
- struct lttng_ust_lfht_iter iter;
- struct lfht_mp_node *node;
- unsigned long nr_nodes = 0;
-
- lttng_ust_lfht_for_each_entry(ht, &iter, node, node) {
- int ret;
-
- ret = lttng_ust_lfht_del(ht, lttng_ust_lfht_iter_get_node(&iter));
- assert(!ret);
- /* note: this hash table is never used concurrently */
- free(node);
- nr_nodes++;
- }
- return nr_nodes;
-}
-
-/*
- * Return value:
- * >=0: success
- * <0: error
- */
-static
-int validate_instruction_context(
- struct bytecode_runtime *bytecode __attribute__((unused)),
- struct vstack *stack,
- char *start_pc,
- char *pc)
-{
- int ret = 0;
- const bytecode_opcode_t opcode = *(bytecode_opcode_t *) pc;
-
- switch (opcode) {
- case BYTECODE_OP_UNKNOWN:
- default:
- {
- ERR("unknown bytecode op %u\n",
- (unsigned int) *(bytecode_opcode_t *) pc);
- ret = -EINVAL;
- goto end;
- }
-
- case BYTECODE_OP_RETURN:
- case BYTECODE_OP_RETURN_S64:
- {
- goto end;
- }
-
- /* binary */
- case BYTECODE_OP_MUL:
- case BYTECODE_OP_DIV:
- case BYTECODE_OP_MOD:
- case BYTECODE_OP_PLUS:
- case BYTECODE_OP_MINUS:
- {
- ERR("unsupported bytecode op %u\n",
- (unsigned int) opcode);
- ret = -EINVAL;
- goto end;
- }
-
- case BYTECODE_OP_EQ:
- {
- ret = bin_op_compare_check(stack, opcode, "==");
- if (ret < 0)
- goto end;
- break;
- }
- case BYTECODE_OP_NE:
- {
- ret = bin_op_compare_check(stack, opcode, "!=");
- if (ret < 0)
- goto end;
- break;
- }
- case BYTECODE_OP_GT:
- {
- ret = bin_op_compare_check(stack, opcode, ">");
- if (ret < 0)
- goto end;
- break;
- }
- case BYTECODE_OP_LT:
- {
- ret = bin_op_compare_check(stack, opcode, "<");
- if (ret < 0)
- goto end;
- break;
- }
- case BYTECODE_OP_GE:
- {
- ret = bin_op_compare_check(stack, opcode, ">=");
- if (ret < 0)
- goto end;
- break;
- }
- case BYTECODE_OP_LE:
- {
- ret = bin_op_compare_check(stack, opcode, "<=");
- if (ret < 0)
- goto end;
- break;
- }
-
- case BYTECODE_OP_EQ_STRING:
- case BYTECODE_OP_NE_STRING:
- case BYTECODE_OP_GT_STRING:
- case BYTECODE_OP_LT_STRING:
- case BYTECODE_OP_GE_STRING:
- case BYTECODE_OP_LE_STRING:
- {
- if (!vstack_ax(stack) || !vstack_bx(stack)) {
- ERR("Empty stack\n");
- ret = -EINVAL;
- goto end;
- }
- if (vstack_ax(stack)->type != REG_STRING
- || vstack_bx(stack)->type != REG_STRING) {
- ERR("Unexpected register type for string comparator\n");
- ret = -EINVAL;
- goto end;
- }
- break;
- }
-
- case BYTECODE_OP_EQ_STAR_GLOB_STRING:
- case BYTECODE_OP_NE_STAR_GLOB_STRING:
- {
- if (!vstack_ax(stack) || !vstack_bx(stack)) {
- ERR("Empty stack\n");
- ret = -EINVAL;
- goto end;
- }
- if (vstack_ax(stack)->type != REG_STAR_GLOB_STRING
- && vstack_bx(stack)->type != REG_STAR_GLOB_STRING) {
- ERR("Unexpected register type for globbing pattern comparator\n");
- ret = -EINVAL;
- goto end;
- }
- break;
- }
-
- case BYTECODE_OP_EQ_S64:
- case BYTECODE_OP_NE_S64:
- case BYTECODE_OP_GT_S64:
- case BYTECODE_OP_LT_S64:
- case BYTECODE_OP_GE_S64:
- case BYTECODE_OP_LE_S64:
- {
- if (!vstack_ax(stack) || !vstack_bx(stack)) {
- ERR("Empty stack\n");
- ret = -EINVAL;
- goto end;
- }
- switch (vstack_ax(stack)->type) {
- case REG_S64:
- case REG_U64:
- break;
- default:
- ERR("Unexpected register type for s64 comparator\n");
- ret = -EINVAL;
- goto end;
- }
- switch (vstack_bx(stack)->type) {
- case REG_S64:
- case REG_U64:
- break;
- default:
- ERR("Unexpected register type for s64 comparator\n");
- ret = -EINVAL;
- goto end;
- }
- break;
- }
-
- case BYTECODE_OP_EQ_DOUBLE:
- case BYTECODE_OP_NE_DOUBLE:
- case BYTECODE_OP_GT_DOUBLE:
- case BYTECODE_OP_LT_DOUBLE:
- case BYTECODE_OP_GE_DOUBLE:
- case BYTECODE_OP_LE_DOUBLE:
- {
- if (!vstack_ax(stack) || !vstack_bx(stack)) {
- ERR("Empty stack\n");
- ret = -EINVAL;
- goto end;
- }
- if (vstack_ax(stack)->type != REG_DOUBLE && vstack_bx(stack)->type != REG_DOUBLE) {
- ERR("Double operator should have two double registers\n");
- ret = -EINVAL;
- goto end;
- }
- break;
- }
-
- case BYTECODE_OP_EQ_DOUBLE_S64:
- case BYTECODE_OP_NE_DOUBLE_S64:
- case BYTECODE_OP_GT_DOUBLE_S64:
- case BYTECODE_OP_LT_DOUBLE_S64:
- case BYTECODE_OP_GE_DOUBLE_S64:
- case BYTECODE_OP_LE_DOUBLE_S64:
- {
- if (!vstack_ax(stack) || !vstack_bx(stack)) {
- ERR("Empty stack\n");
- ret = -EINVAL;
- goto end;
- }
- switch (vstack_ax(stack)->type) {
- case REG_S64:
- case REG_U64:
- break;
- default:
- ERR("Double-S64 operator has unexpected register types\n");
- ret = -EINVAL;
- goto end;
- }
- switch (vstack_bx(stack)->type) {
- case REG_DOUBLE:
- break;
- default:
- ERR("Double-S64 operator has unexpected register types\n");
- ret = -EINVAL;
- goto end;
- }
- break;
- }
-
- case BYTECODE_OP_EQ_S64_DOUBLE:
- case BYTECODE_OP_NE_S64_DOUBLE:
- case BYTECODE_OP_GT_S64_DOUBLE:
- case BYTECODE_OP_LT_S64_DOUBLE:
- case BYTECODE_OP_GE_S64_DOUBLE:
- case BYTECODE_OP_LE_S64_DOUBLE:
- {
- if (!vstack_ax(stack) || !vstack_bx(stack)) {
- ERR("Empty stack\n");
- ret = -EINVAL;
- goto end;
- }
- switch (vstack_ax(stack)->type) {
- case REG_DOUBLE:
- break;
- default:
- ERR("S64-Double operator has unexpected register types\n");
- ret = -EINVAL;
- goto end;
- }
- switch (vstack_bx(stack)->type) {
- case REG_S64:
- case REG_U64:
- break;
- default:
- ERR("S64-Double operator has unexpected register types\n");
- ret = -EINVAL;
- goto end;
- }
- break;
- }
-
- case BYTECODE_OP_BIT_RSHIFT:
- ret = bin_op_bitwise_check(stack, opcode, ">>");
- if (ret < 0)
- goto end;
- break;
- case BYTECODE_OP_BIT_LSHIFT:
- ret = bin_op_bitwise_check(stack, opcode, "<<");
- if (ret < 0)
- goto end;
- break;
- case BYTECODE_OP_BIT_AND:
- ret = bin_op_bitwise_check(stack, opcode, "&");
- if (ret < 0)
- goto end;
- break;
- case BYTECODE_OP_BIT_OR:
- ret = bin_op_bitwise_check(stack, opcode, "|");
- if (ret < 0)
- goto end;
- break;
- case BYTECODE_OP_BIT_XOR:
- ret = bin_op_bitwise_check(stack, opcode, "^");
- if (ret < 0)
- goto end;
- break;
-
- /* unary */
- case BYTECODE_OP_UNARY_PLUS:
- case BYTECODE_OP_UNARY_MINUS:
- case BYTECODE_OP_UNARY_NOT:
- {
- if (!vstack_ax(stack)) {
- ERR("Empty stack\n");
- ret = -EINVAL;
- goto end;
- }
- switch (vstack_ax(stack)->type) {
- default:
- ERR("unknown register type\n");
- ret = -EINVAL;
- goto end;
-
- case REG_STRING:
- case REG_STAR_GLOB_STRING:
- ERR("Unary op can only be applied to numeric or floating point registers\n");
- ret = -EINVAL;
- goto end;
- case REG_S64:
- break;
- case REG_U64:
- break;
- case REG_DOUBLE:
- break;
- case REG_UNKNOWN:
- break;
- }
- break;
- }
- case BYTECODE_OP_UNARY_BIT_NOT:
- {
- if (!vstack_ax(stack)) {
- ERR("Empty stack\n");
- ret = -EINVAL;
- goto end;
- }
- switch (vstack_ax(stack)->type) {
- default:
- ERR("unknown register type\n");
- ret = -EINVAL;
- goto end;
-
- case REG_STRING:
- case REG_STAR_GLOB_STRING:
- case REG_DOUBLE:
- ERR("Unary bitwise op can only be applied to numeric registers\n");
- ret = -EINVAL;
- goto end;
- case REG_S64:
- break;
- case REG_U64:
- break;
- case REG_UNKNOWN:
- break;
- }
- break;
- }
-
- case BYTECODE_OP_UNARY_PLUS_S64:
- case BYTECODE_OP_UNARY_MINUS_S64:
- case BYTECODE_OP_UNARY_NOT_S64:
- {
- if (!vstack_ax(stack)) {
- ERR("Empty stack\n");
- ret = -EINVAL;
- goto end;
- }
- if (vstack_ax(stack)->type != REG_S64 &&
- vstack_ax(stack)->type != REG_U64) {
- ERR("Invalid register type\n");
- ret = -EINVAL;
- goto end;
- }
- break;
- }
-
- case BYTECODE_OP_UNARY_PLUS_DOUBLE:
- case BYTECODE_OP_UNARY_MINUS_DOUBLE:
- case BYTECODE_OP_UNARY_NOT_DOUBLE:
- {
- if (!vstack_ax(stack)) {
- ERR("Empty stack\n");
- ret = -EINVAL;
- goto end;
- }
- if (vstack_ax(stack)->type != REG_DOUBLE) {
- ERR("Invalid register type\n");
- ret = -EINVAL;
- goto end;
- }
- break;
- }
-
- /* logical */
- case BYTECODE_OP_AND:
- case BYTECODE_OP_OR:
- {
- struct logical_op *insn = (struct logical_op *) pc;
-
- if (!vstack_ax(stack)) {
- ERR("Empty stack\n");
- ret = -EINVAL;
- goto end;
- }
- if (vstack_ax(stack)->type != REG_S64
- && vstack_ax(stack)->type != REG_U64
- && vstack_ax(stack)->type != REG_UNKNOWN) {
- ERR("Logical comparator expects S64, U64 or dynamic register\n");
- ret = -EINVAL;
- goto end;
- }
-
- dbg_printf("Validate jumping to bytecode offset %u\n",
- (unsigned int) insn->skip_offset);
- if (unlikely(start_pc + insn->skip_offset <= pc)) {
- ERR("Loops are not allowed in bytecode\n");
- ret = -EINVAL;
- goto end;
- }
- break;
- }
-
- /* load field ref */
- case BYTECODE_OP_LOAD_FIELD_REF:
- {
- ERR("Unknown field ref type\n");
- ret = -EINVAL;
- goto end;
- }
- case BYTECODE_OP_LOAD_FIELD_REF_STRING:
- case BYTECODE_OP_LOAD_FIELD_REF_SEQUENCE:
- {
- struct load_op *insn = (struct load_op *) pc;
- struct field_ref *ref = (struct field_ref *) insn->data;
-
- dbg_printf("Validate load field ref offset %u type string\n",
- ref->offset);
- break;
- }
- case BYTECODE_OP_LOAD_FIELD_REF_S64:
- {
- struct load_op *insn = (struct load_op *) pc;
- struct field_ref *ref = (struct field_ref *) insn->data;
-
- dbg_printf("Validate load field ref offset %u type s64\n",
- ref->offset);
- break;
- }
- case BYTECODE_OP_LOAD_FIELD_REF_DOUBLE:
- {
- struct load_op *insn = (struct load_op *) pc;
- struct field_ref *ref = (struct field_ref *) insn->data;
-
- dbg_printf("Validate load field ref offset %u type double\n",
- ref->offset);
- break;
- }
-
- /* load from immediate operand */
- case BYTECODE_OP_LOAD_STRING:
- case BYTECODE_OP_LOAD_STAR_GLOB_STRING:
- {
- break;
- }
-
- case BYTECODE_OP_LOAD_S64:
- {
- break;
- }
-
- case BYTECODE_OP_LOAD_DOUBLE:
- {
- break;
- }
-
- case BYTECODE_OP_CAST_TO_S64:
- case BYTECODE_OP_CAST_DOUBLE_TO_S64:
- {
- struct cast_op *insn = (struct cast_op *) pc;
-
- if (!vstack_ax(stack)) {
- ERR("Empty stack\n");
- ret = -EINVAL;
- goto end;
- }
- switch (vstack_ax(stack)->type) {
- default:
- ERR("unknown register type\n");
- ret = -EINVAL;
- goto end;
-
- case REG_STRING:
- case REG_STAR_GLOB_STRING:
- ERR("Cast op can only be applied to numeric or floating point registers\n");
- ret = -EINVAL;
- goto end;
- case REG_S64:
- break;
- case REG_U64:
- break;
- case REG_DOUBLE:
- break;
- case REG_UNKNOWN:
- break;
- }
- if (insn->op == BYTECODE_OP_CAST_DOUBLE_TO_S64) {
- if (vstack_ax(stack)->type != REG_DOUBLE) {
- ERR("Cast expects double\n");
- ret = -EINVAL;
- goto end;
- }
- }
- break;
- }
- case BYTECODE_OP_CAST_NOP:
- {
- break;
- }
-
- /* get context ref */
- case BYTECODE_OP_GET_CONTEXT_REF:
- {
- struct load_op *insn = (struct load_op *) pc;
- struct field_ref *ref = (struct field_ref *) insn->data;
-
- dbg_printf("Validate get context ref offset %u type dynamic\n",
- ref->offset);
- break;
- }
- case BYTECODE_OP_GET_CONTEXT_REF_STRING:
- {
- struct load_op *insn = (struct load_op *) pc;
- struct field_ref *ref = (struct field_ref *) insn->data;
-
- dbg_printf("Validate get context ref offset %u type string\n",
- ref->offset);
- break;
- }
- case BYTECODE_OP_GET_CONTEXT_REF_S64:
- {
- struct load_op *insn = (struct load_op *) pc;
- struct field_ref *ref = (struct field_ref *) insn->data;
-
- dbg_printf("Validate get context ref offset %u type s64\n",
- ref->offset);
- break;
- }
- case BYTECODE_OP_GET_CONTEXT_REF_DOUBLE:
- {
- struct load_op *insn = (struct load_op *) pc;
- struct field_ref *ref = (struct field_ref *) insn->data;
-
- dbg_printf("Validate get context ref offset %u type double\n",
- ref->offset);
- break;
- }
-
- /*
- * Instructions for recursive traversal through composed types.
- */
- case BYTECODE_OP_GET_CONTEXT_ROOT:
- {
- dbg_printf("Validate get context root\n");
- break;
- }
- case BYTECODE_OP_GET_APP_CONTEXT_ROOT:
- {
- dbg_printf("Validate get app context root\n");
- break;
- }
- case BYTECODE_OP_GET_PAYLOAD_ROOT:
- {
- dbg_printf("Validate get payload root\n");
- break;
- }
- case BYTECODE_OP_LOAD_FIELD:
- {
- /*
- * We tolerate that field type is unknown at validation,
- * because we are performing the load specialization in
- * a phase after validation.
- */
- dbg_printf("Validate load field\n");
- break;
- }
- case BYTECODE_OP_LOAD_FIELD_S8:
- {
- dbg_printf("Validate load field s8\n");
- break;
- }
- case BYTECODE_OP_LOAD_FIELD_S16:
- {
- dbg_printf("Validate load field s16\n");
- break;
- }
- case BYTECODE_OP_LOAD_FIELD_S32:
- {
- dbg_printf("Validate load field s32\n");
- break;
- }
- case BYTECODE_OP_LOAD_FIELD_S64:
- {
- dbg_printf("Validate load field s64\n");
- break;
- }
- case BYTECODE_OP_LOAD_FIELD_U8:
- {
- dbg_printf("Validate load field u8\n");
- break;
- }
- case BYTECODE_OP_LOAD_FIELD_U16:
- {
- dbg_printf("Validate load field u16\n");
- break;
- }
- case BYTECODE_OP_LOAD_FIELD_U32:
- {
- dbg_printf("Validate load field u32\n");
- break;
- }
- case BYTECODE_OP_LOAD_FIELD_U64:
- {
- dbg_printf("Validate load field u64\n");
- break;
- }
- case BYTECODE_OP_LOAD_FIELD_STRING:
- {
- dbg_printf("Validate load field string\n");
- break;
- }
- case BYTECODE_OP_LOAD_FIELD_SEQUENCE:
- {
- dbg_printf("Validate load field sequence\n");
- break;
- }
- case BYTECODE_OP_LOAD_FIELD_DOUBLE:
- {
- dbg_printf("Validate load field double\n");
- break;
- }
-
- case BYTECODE_OP_GET_SYMBOL:
- {
- struct load_op *insn = (struct load_op *) pc;
- struct get_symbol *sym = (struct get_symbol *) insn->data;
-
- dbg_printf("Validate get symbol offset %u\n", sym->offset);
- break;
- }
-
- case BYTECODE_OP_GET_SYMBOL_FIELD:
- {
- struct load_op *insn = (struct load_op *) pc;
- struct get_symbol *sym = (struct get_symbol *) insn->data;
-
- dbg_printf("Validate get symbol field offset %u\n", sym->offset);
- break;
- }
-
- case BYTECODE_OP_GET_INDEX_U16:
- {
- struct load_op *insn = (struct load_op *) pc;
- struct get_index_u16 *get_index = (struct get_index_u16 *) insn->data;
-
- dbg_printf("Validate get index u16 index %u\n", get_index->index);
- break;
- }
-
- case BYTECODE_OP_GET_INDEX_U64:
- {
- struct load_op *insn = (struct load_op *) pc;
- struct get_index_u64 *get_index = (struct get_index_u64 *) insn->data;
-
- dbg_printf("Validate get index u64 index %" PRIu64 "\n", get_index->index);
- break;
- }
- }
-end:
- return ret;
-}
-
-/*
- * Return value:
- * 0: success
- * <0: error
- */
-static
-int validate_instruction_all_contexts(struct bytecode_runtime *bytecode,
- struct lttng_ust_lfht *merge_points,
- struct vstack *stack,
- char *start_pc,
- char *pc)
-{
- int ret;
- unsigned long target_pc = pc - start_pc;
- struct lttng_ust_lfht_iter iter;
- struct lttng_ust_lfht_node *node;
- struct lfht_mp_node *mp_node;
- unsigned long hash;
-
- /* Validate the context resulting from the previous instruction */
- ret = validate_instruction_context(bytecode, stack, start_pc, pc);
- if (ret < 0)
- return ret;
-
- /* Validate merge points */
- hash = lttng_hash_mix((const char *) target_pc, sizeof(target_pc),
- lttng_hash_seed);
- lttng_ust_lfht_lookup(merge_points, hash, lttng_hash_match,
- (const char *) target_pc, &iter);
- node = lttng_ust_lfht_iter_get_node(&iter);
- if (node) {
- mp_node = caa_container_of(node, struct lfht_mp_node, node);
-
- dbg_printf("Bytecode: validate merge point at offset %lu\n",
- target_pc);
- if (merge_points_compare(stack, &mp_node->stack)) {
- ERR("Merge points differ for offset %lu\n",
- target_pc);
- return -EINVAL;
- }
- /* Once validated, we can remove the merge point */
- dbg_printf("Bytecode: remove merge point at offset %lu\n",
- target_pc);
- ret = lttng_ust_lfht_del(merge_points, node);
- assert(!ret);
- }
- return 0;
-}
-
-/*
- * Return value:
- * >0: going to next insn.
- * 0: success, stop iteration.
- * <0: error
- */
-static
-int exec_insn(struct bytecode_runtime *bytecode __attribute__((unused)),
- struct lttng_ust_lfht *merge_points,
- struct vstack *stack,
- char **_next_pc,
- char *pc)
-{
- int ret = 1;
- char *next_pc = *_next_pc;
-
- switch (*(bytecode_opcode_t *) pc) {
- case BYTECODE_OP_UNKNOWN:
- default:
- {
- ERR("unknown bytecode op %u\n",
- (unsigned int) *(bytecode_opcode_t *) pc);
- ret = -EINVAL;
- goto end;
- }
-
- case BYTECODE_OP_RETURN:
- {
- if (!vstack_ax(stack)) {
- ERR("Empty stack\n");
- ret = -EINVAL;
- goto end;
- }
- switch (vstack_ax(stack)->type) {
- case REG_S64:
- case REG_U64:
- case REG_DOUBLE:
- case REG_STRING:
- case REG_PTR:
- case REG_UNKNOWN:
- break;
- default:
- ERR("Unexpected register type %d at end of bytecode\n",
- (int) vstack_ax(stack)->type);
- ret = -EINVAL;
- goto end;
- }
-
- ret = 0;
- goto end;
- }
- case BYTECODE_OP_RETURN_S64:
- {
- if (!vstack_ax(stack)) {
- ERR("Empty stack\n");
- ret = -EINVAL;
- goto end;
- }
- switch (vstack_ax(stack)->type) {
- case REG_S64:
- case REG_U64:
- break;
- default:
- case REG_UNKNOWN:
- ERR("Unexpected register type %d at end of bytecode\n",
- (int) vstack_ax(stack)->type);
- ret = -EINVAL;
- goto end;
- }
-
- ret = 0;
- goto end;
- }
-
- /* binary */
- case BYTECODE_OP_MUL:
- case BYTECODE_OP_DIV:
- case BYTECODE_OP_MOD:
- case BYTECODE_OP_PLUS:
- case BYTECODE_OP_MINUS:
- {
- ERR("unsupported bytecode op %u\n",
- (unsigned int) *(bytecode_opcode_t *) pc);
- ret = -EINVAL;
- goto end;
- }
-
- case BYTECODE_OP_EQ:
- case BYTECODE_OP_NE:
- case BYTECODE_OP_GT:
- case BYTECODE_OP_LT:
- case BYTECODE_OP_GE:
- case BYTECODE_OP_LE:
- case BYTECODE_OP_EQ_STRING:
- case BYTECODE_OP_NE_STRING:
- case BYTECODE_OP_GT_STRING:
- case BYTECODE_OP_LT_STRING:
- case BYTECODE_OP_GE_STRING:
- case BYTECODE_OP_LE_STRING:
- case BYTECODE_OP_EQ_STAR_GLOB_STRING:
- case BYTECODE_OP_NE_STAR_GLOB_STRING:
- case BYTECODE_OP_EQ_S64:
- case BYTECODE_OP_NE_S64:
- case BYTECODE_OP_GT_S64:
- case BYTECODE_OP_LT_S64:
- case BYTECODE_OP_GE_S64:
- case BYTECODE_OP_LE_S64:
- case BYTECODE_OP_EQ_DOUBLE:
- case BYTECODE_OP_NE_DOUBLE:
- case BYTECODE_OP_GT_DOUBLE:
- case BYTECODE_OP_LT_DOUBLE:
- case BYTECODE_OP_GE_DOUBLE:
- case BYTECODE_OP_LE_DOUBLE:
- case BYTECODE_OP_EQ_DOUBLE_S64:
- case BYTECODE_OP_NE_DOUBLE_S64:
- case BYTECODE_OP_GT_DOUBLE_S64:
- case BYTECODE_OP_LT_DOUBLE_S64:
- case BYTECODE_OP_GE_DOUBLE_S64:
- case BYTECODE_OP_LE_DOUBLE_S64:
- case BYTECODE_OP_EQ_S64_DOUBLE:
- case BYTECODE_OP_NE_S64_DOUBLE:
- case BYTECODE_OP_GT_S64_DOUBLE:
- case BYTECODE_OP_LT_S64_DOUBLE:
- case BYTECODE_OP_GE_S64_DOUBLE:
- case BYTECODE_OP_LE_S64_DOUBLE:
- {
- /* Pop 2, push 1 */
- if (vstack_pop(stack)) {
- ret = -EINVAL;
- goto end;
- }
- if (!vstack_ax(stack)) {
- ERR("Empty stack\n");
- ret = -EINVAL;
- goto end;
- }
- switch (vstack_ax(stack)->type) {
- case REG_S64:
- case REG_U64:
- case REG_DOUBLE:
- case REG_STRING:
- case REG_STAR_GLOB_STRING:
- case REG_UNKNOWN:
- break;
- default:
- ERR("Unexpected register type %d for operation\n",
- (int) vstack_ax(stack)->type);
- ret = -EINVAL;
- goto end;
- }
-
- vstack_ax(stack)->type = REG_S64;
- next_pc += sizeof(struct binary_op);
- break;
- }
-
- case BYTECODE_OP_BIT_RSHIFT:
- case BYTECODE_OP_BIT_LSHIFT:
- case BYTECODE_OP_BIT_AND:
- case BYTECODE_OP_BIT_OR:
- case BYTECODE_OP_BIT_XOR:
- {
- /* Pop 2, push 1 */
- if (vstack_pop(stack)) {
- ret = -EINVAL;
- goto end;
- }
- if (!vstack_ax(stack)) {
- ERR("Empty stack\n");
- ret = -EINVAL;
- goto end;
- }
- switch (vstack_ax(stack)->type) {
- case REG_S64:
- case REG_U64:
- case REG_DOUBLE:
- case REG_STRING:
- case REG_STAR_GLOB_STRING:
- case REG_UNKNOWN:
- break;
- default:
- ERR("Unexpected register type %d for operation\n",
- (int) vstack_ax(stack)->type);
- ret = -EINVAL;
- goto end;
- }
-
- vstack_ax(stack)->type = REG_U64;
- next_pc += sizeof(struct binary_op);
- break;
- }
-
- /* unary */
- case BYTECODE_OP_UNARY_PLUS:
- case BYTECODE_OP_UNARY_MINUS:
- {
- /* Pop 1, push 1 */
- if (!vstack_ax(stack)) {
- ERR("Empty stack\n");
- ret = -EINVAL;
- goto end;
- }
- switch (vstack_ax(stack)->type) {
- case REG_UNKNOWN:
- case REG_DOUBLE:
- case REG_S64:
- case REG_U64:
- break;
- default:
- ERR("Unexpected register type %d for operation\n",
- (int) vstack_ax(stack)->type);
- ret = -EINVAL;
- goto end;
- }
- vstack_ax(stack)->type = REG_UNKNOWN;
- next_pc += sizeof(struct unary_op);
- break;
- }
-
- case BYTECODE_OP_UNARY_PLUS_S64:
- case BYTECODE_OP_UNARY_MINUS_S64:
- case BYTECODE_OP_UNARY_NOT_S64:
- {
- /* Pop 1, push 1 */
- if (!vstack_ax(stack)) {
- ERR("Empty stack\n");
- ret = -EINVAL;
- goto end;
- }
- switch (vstack_ax(stack)->type) {
- case REG_S64:
- case REG_U64:
- break;
- default:
- ERR("Unexpected register type %d for operation\n",
- (int) vstack_ax(stack)->type);
- ret = -EINVAL;
- goto end;
- }
-
- next_pc += sizeof(struct unary_op);
- break;
- }
-
- case BYTECODE_OP_UNARY_NOT:
- {
- /* Pop 1, push 1 */
- if (!vstack_ax(stack)) {
- ERR("Empty stack\n");
- ret = -EINVAL;
- goto end;
- }
- switch (vstack_ax(stack)->type) {
- case REG_UNKNOWN:
- case REG_DOUBLE:
- case REG_S64:
- case REG_U64:
- break;
- default:
- ERR("Unexpected register type %d for operation\n",
- (int) vstack_ax(stack)->type);
- ret = -EINVAL;
- goto end;
- }
-
- next_pc += sizeof(struct unary_op);
- break;
- }
-
- case BYTECODE_OP_UNARY_BIT_NOT:
- {
- /* Pop 1, push 1 */
- if (!vstack_ax(stack)) {
- ERR("Empty stack\n");
- ret = -EINVAL;
- goto end;
- }
- switch (vstack_ax(stack)->type) {
- case REG_UNKNOWN:
- case REG_S64:
- case REG_U64:
- break;
- case REG_DOUBLE:
- default:
- ERR("Unexpected register type %d for operation\n",
- (int) vstack_ax(stack)->type);
- ret = -EINVAL;
- goto end;
- }
-
- vstack_ax(stack)->type = REG_U64;
- next_pc += sizeof(struct unary_op);
- break;
- }
-
- case BYTECODE_OP_UNARY_NOT_DOUBLE:
- {
- /* Pop 1, push 1 */
- if (!vstack_ax(stack)) {
- ERR("Empty stack\n");
- ret = -EINVAL;
- goto end;
- }
- switch (vstack_ax(stack)->type) {
- case REG_DOUBLE:
- break;
- default:
- ERR("Incorrect register type %d for operation\n",
- (int) vstack_ax(stack)->type);
- ret = -EINVAL;
- goto end;
- }
-
- vstack_ax(stack)->type = REG_S64;
- next_pc += sizeof(struct unary_op);
- break;
- }
-
- case BYTECODE_OP_UNARY_PLUS_DOUBLE:
- case BYTECODE_OP_UNARY_MINUS_DOUBLE:
- {
- /* Pop 1, push 1 */
- if (!vstack_ax(stack)) {
- ERR("Empty stack\n");
- ret = -EINVAL;
- goto end;
- }
- switch (vstack_ax(stack)->type) {
- case REG_DOUBLE:
- break;
- default:
- ERR("Incorrect register type %d for operation\n",
- (int) vstack_ax(stack)->type);
- ret = -EINVAL;
- goto end;
- }
-
- vstack_ax(stack)->type = REG_DOUBLE;
- next_pc += sizeof(struct unary_op);
- break;
- }
-
- /* logical */
- case BYTECODE_OP_AND:
- case BYTECODE_OP_OR:
- {
- struct logical_op *insn = (struct logical_op *) pc;
- int merge_ret;
-
- /* Add merge point to table */
- merge_ret = merge_point_add_check(merge_points,
- insn->skip_offset, stack);
- if (merge_ret) {
- ret = merge_ret;
- goto end;
- }
-
- if (!vstack_ax(stack)) {
- ERR("Empty stack\n");
- ret = -EINVAL;
- goto end;
- }
- /* There is always a cast-to-s64 operation before a or/and op. */
- switch (vstack_ax(stack)->type) {
- case REG_S64:
- case REG_U64:
- break;
- default:
- ERR("Incorrect register type %d for operation\n",
- (int) vstack_ax(stack)->type);
- ret = -EINVAL;
- goto end;
- }
-
- /* Continue to next instruction */
- /* Pop 1 when jump not taken */
- if (vstack_pop(stack)) {
- ret = -EINVAL;
- goto end;
- }
- next_pc += sizeof(struct logical_op);
- break;
- }
-
- /* load field ref */
- case BYTECODE_OP_LOAD_FIELD_REF:
- {
- ERR("Unknown field ref type\n");
- ret = -EINVAL;
- goto end;
- }
- /* get context ref */
- case BYTECODE_OP_GET_CONTEXT_REF:
- {
- if (vstack_push(stack)) {
- ret = -EINVAL;
- goto end;
- }
- vstack_ax(stack)->type = REG_UNKNOWN;
- next_pc += sizeof(struct load_op) + sizeof(struct field_ref);
- break;
- }
- case BYTECODE_OP_LOAD_FIELD_REF_STRING:
- case BYTECODE_OP_LOAD_FIELD_REF_SEQUENCE:
- case BYTECODE_OP_GET_CONTEXT_REF_STRING:
- {
- if (vstack_push(stack)) {
- ret = -EINVAL;
- goto end;
- }
- vstack_ax(stack)->type = REG_STRING;
- next_pc += sizeof(struct load_op) + sizeof(struct field_ref);
- break;
- }
- case BYTECODE_OP_LOAD_FIELD_REF_S64:
- case BYTECODE_OP_GET_CONTEXT_REF_S64:
- {
- if (vstack_push(stack)) {
- ret = -EINVAL;
- goto end;
- }
- vstack_ax(stack)->type = REG_S64;
- next_pc += sizeof(struct load_op) + sizeof(struct field_ref);
- break;
- }
- case BYTECODE_OP_LOAD_FIELD_REF_DOUBLE:
- case BYTECODE_OP_GET_CONTEXT_REF_DOUBLE:
- {
- if (vstack_push(stack)) {
- ret = -EINVAL;
- goto end;
- }
- vstack_ax(stack)->type = REG_DOUBLE;
- next_pc += sizeof(struct load_op) + sizeof(struct field_ref);
- break;
- }
-
- /* load from immediate operand */
- case BYTECODE_OP_LOAD_STRING:
- {
- struct load_op *insn = (struct load_op *) pc;
-
- if (vstack_push(stack)) {
- ret = -EINVAL;
- goto end;
- }
- vstack_ax(stack)->type = REG_STRING;
- next_pc += sizeof(struct load_op) + strlen(insn->data) + 1;
- break;
- }
-
- case BYTECODE_OP_LOAD_STAR_GLOB_STRING:
- {
- struct load_op *insn = (struct load_op *) pc;
-
- if (vstack_push(stack)) {
- ret = -EINVAL;
- goto end;
- }
- vstack_ax(stack)->type = REG_STAR_GLOB_STRING;
- next_pc += sizeof(struct load_op) + strlen(insn->data) + 1;
- break;
- }
-
- case BYTECODE_OP_LOAD_S64:
- {
- if (vstack_push(stack)) {
- ret = -EINVAL;
- goto end;
- }
- vstack_ax(stack)->type = REG_S64;
- next_pc += sizeof(struct load_op)
- + sizeof(struct literal_numeric);
- break;
- }
-
- case BYTECODE_OP_LOAD_DOUBLE:
- {
- if (vstack_push(stack)) {
- ret = -EINVAL;
- goto end;
- }
- vstack_ax(stack)->type = REG_DOUBLE;
- next_pc += sizeof(struct load_op)
- + sizeof(struct literal_double);
- break;
- }
-
- case BYTECODE_OP_CAST_TO_S64:
- case BYTECODE_OP_CAST_DOUBLE_TO_S64:
- {
- /* Pop 1, push 1 */
- if (!vstack_ax(stack)) {
- ERR("Empty stack\n");
- ret = -EINVAL;
- goto end;
- }
- switch (vstack_ax(stack)->type) {
- case REG_S64:
- case REG_U64:
- case REG_DOUBLE:
- case REG_UNKNOWN:
- break;
- default:
- ERR("Incorrect register type %d for cast\n",
- (int) vstack_ax(stack)->type);
- ret = -EINVAL;
- goto end;
- }
- vstack_ax(stack)->type = REG_S64;
- next_pc += sizeof(struct cast_op);
- break;
- }
- case BYTECODE_OP_CAST_NOP:
- {
- next_pc += sizeof(struct cast_op);
- break;
- }
-
- /*
- * Instructions for recursive traversal through composed types.
- */
- case BYTECODE_OP_GET_CONTEXT_ROOT:
- case BYTECODE_OP_GET_APP_CONTEXT_ROOT:
- case BYTECODE_OP_GET_PAYLOAD_ROOT:
- {
- if (vstack_push(stack)) {
- ret = -EINVAL;
- goto end;
- }
- vstack_ax(stack)->type = REG_PTR;
- next_pc += sizeof(struct load_op);
- break;
- }
-
- case BYTECODE_OP_LOAD_FIELD:
- {
- /* Pop 1, push 1 */
- if (!vstack_ax(stack)) {
- ERR("Empty stack\n");
- ret = -EINVAL;
- goto end;
- }
- if (vstack_ax(stack)->type != REG_PTR) {
- ERR("Expecting pointer on top of stack\n");
- ret = -EINVAL;
- goto end;
- }
- vstack_ax(stack)->type = REG_UNKNOWN;
- next_pc += sizeof(struct load_op);
- break;
- }
-
- case BYTECODE_OP_LOAD_FIELD_S8:
- case BYTECODE_OP_LOAD_FIELD_S16:
- case BYTECODE_OP_LOAD_FIELD_S32:
- case BYTECODE_OP_LOAD_FIELD_S64:
- {
- /* Pop 1, push 1 */
- if (!vstack_ax(stack)) {
- ERR("Empty stack\n");
- ret = -EINVAL;
- goto end;
- }
- if (vstack_ax(stack)->type != REG_PTR) {
- ERR("Expecting pointer on top of stack\n");
- ret = -EINVAL;
- goto end;
- }
- vstack_ax(stack)->type = REG_S64;
- next_pc += sizeof(struct load_op);
- break;
- }
-
- case BYTECODE_OP_LOAD_FIELD_U8:
- case BYTECODE_OP_LOAD_FIELD_U16:
- case BYTECODE_OP_LOAD_FIELD_U32:
- case BYTECODE_OP_LOAD_FIELD_U64:
- {
- /* Pop 1, push 1 */
- if (!vstack_ax(stack)) {
- ERR("Empty stack\n");
- ret = -EINVAL;
- goto end;
- }
- if (vstack_ax(stack)->type != REG_PTR) {
- ERR("Expecting pointer on top of stack\n");
- ret = -EINVAL;
- goto end;
- }
- vstack_ax(stack)->type = REG_U64;
- next_pc += sizeof(struct load_op);
- break;
- }
-
- case BYTECODE_OP_LOAD_FIELD_STRING:
- case BYTECODE_OP_LOAD_FIELD_SEQUENCE:
- {
- /* Pop 1, push 1 */
- if (!vstack_ax(stack)) {
- ERR("Empty stack\n");
- ret = -EINVAL;
- goto end;
- }
- if (vstack_ax(stack)->type != REG_PTR) {
- ERR("Expecting pointer on top of stack\n");
- ret = -EINVAL;
- goto end;
- }
- vstack_ax(stack)->type = REG_STRING;
- next_pc += sizeof(struct load_op);
- break;
- }
-
- case BYTECODE_OP_LOAD_FIELD_DOUBLE:
- {
- /* Pop 1, push 1 */
- if (!vstack_ax(stack)) {
- ERR("Empty stack\n");
- ret = -EINVAL;
- goto end;
- }
- if (vstack_ax(stack)->type != REG_PTR) {
- ERR("Expecting pointer on top of stack\n");
- ret = -EINVAL;
- goto end;
- }
- vstack_ax(stack)->type = REG_DOUBLE;
- next_pc += sizeof(struct load_op);
- break;
- }
-
- case BYTECODE_OP_GET_SYMBOL:
- case BYTECODE_OP_GET_SYMBOL_FIELD:
- {
- /* Pop 1, push 1 */
- if (!vstack_ax(stack)) {
- ERR("Empty stack\n");
- ret = -EINVAL;
- goto end;
- }
- if (vstack_ax(stack)->type != REG_PTR) {
- ERR("Expecting pointer on top of stack\n");
- ret = -EINVAL;
- goto end;
- }
- next_pc += sizeof(struct load_op) + sizeof(struct get_symbol);
- break;
- }
-
- case BYTECODE_OP_GET_INDEX_U16:
- {
- /* Pop 1, push 1 */
- if (!vstack_ax(stack)) {
- ERR("Empty stack\n");
- ret = -EINVAL;
- goto end;
- }
- if (vstack_ax(stack)->type != REG_PTR) {
- ERR("Expecting pointer on top of stack\n");
- ret = -EINVAL;
- goto end;
- }
- next_pc += sizeof(struct load_op) + sizeof(struct get_index_u16);
- break;
- }
-
- case BYTECODE_OP_GET_INDEX_U64:
- {
- /* Pop 1, push 1 */
- if (!vstack_ax(stack)) {
- ERR("Empty stack\n");
- ret = -EINVAL;
- goto end;
- }
- if (vstack_ax(stack)->type != REG_PTR) {
- ERR("Expecting pointer on top of stack\n");
- ret = -EINVAL;
- goto end;
- }
- next_pc += sizeof(struct load_op) + sizeof(struct get_index_u64);
- break;
- }
-
- }
-end:
- *_next_pc = next_pc;
- return ret;
-}
-
-/*
- * Never called concurrently (hash seed is shared).
- */
-int lttng_bytecode_validate(struct bytecode_runtime *bytecode)
-{
- struct lttng_ust_lfht *merge_points;
- char *pc, *next_pc, *start_pc;
- int ret = -EINVAL;
- struct vstack stack;
-
- vstack_init(&stack);
-
- if (!lttng_hash_seed_ready) {
- lttng_hash_seed = time(NULL);
- lttng_hash_seed_ready = 1;
- }
- /*
- * Note: merge_points hash table used by single thread, and
- * never concurrently resized. Therefore, we can use it without
- * holding RCU read-side lock and free nodes without using
- * call_rcu.
- */
- merge_points = lttng_ust_lfht_new(DEFAULT_NR_MERGE_POINTS,
- MIN_NR_BUCKETS, MAX_NR_BUCKETS,
- 0, NULL);
- if (!merge_points) {
- ERR("Error allocating hash table for bytecode validation\n");
- return -ENOMEM;
- }
- start_pc = &bytecode->code[0];
- for (pc = next_pc = start_pc; pc - start_pc < bytecode->len;
- pc = next_pc) {
- ret = bytecode_validate_overflow(bytecode, start_pc, pc);
- if (ret != 0) {
- if (ret == -ERANGE)
- ERR("Bytecode overflow\n");
- goto end;
- }
- dbg_printf("Validating op %s (%u)\n",
- lttng_bytecode_print_op((unsigned int) *(bytecode_opcode_t *) pc),
- (unsigned int) *(bytecode_opcode_t *) pc);
-
- /*
- * For each instruction, validate the current context
- * (traversal of entire execution flow), and validate
- * all merge points targeting this instruction.
- */
- ret = validate_instruction_all_contexts(bytecode, merge_points,
- &stack, start_pc, pc);
- if (ret)
- goto end;
- ret = exec_insn(bytecode, merge_points, &stack, &next_pc, pc);
- if (ret <= 0)
- goto end;
- }
-end:
- if (delete_all_nodes(merge_points)) {
- if (!ret) {
- ERR("Unexpected merge points\n");
- ret = -EINVAL;
- }
- }
- if (lttng_ust_lfht_destroy(merge_points)) {
- ERR("Error destroying hash table\n");
- }
- return ret;
-}
+++ /dev/null
-/*
- * SPDX-License-Identifier: MIT
- *
- * Copyright (C) 2010-2016 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
- *
- * LTTng UST bytecode code.
- */
-
-#define _LGPL_SOURCE
-#include <stddef.h>
-#include <stdint.h>
-
-#include <urcu/rculist.h>
-
-#include "context-internal.h"
-#include "lttng-bytecode.h"
-#include "ust-events-internal.h"
-#include "common/macros.h"
-
-static const char *opnames[] = {
- [ BYTECODE_OP_UNKNOWN ] = "UNKNOWN",
-
- [ BYTECODE_OP_RETURN ] = "RETURN",
-
- /* binary */
- [ BYTECODE_OP_MUL ] = "MUL",
- [ BYTECODE_OP_DIV ] = "DIV",
- [ BYTECODE_OP_MOD ] = "MOD",
- [ BYTECODE_OP_PLUS ] = "PLUS",
- [ BYTECODE_OP_MINUS ] = "MINUS",
- [ BYTECODE_OP_BIT_RSHIFT ] = "BIT_RSHIFT",
- [ BYTECODE_OP_BIT_LSHIFT ] = "BIT_LSHIFT",
- [ BYTECODE_OP_BIT_AND ] = "BIT_AND",
- [ BYTECODE_OP_BIT_OR ] = "BIT_OR",
- [ BYTECODE_OP_BIT_XOR ] = "BIT_XOR",
-
- /* binary comparators */
- [ BYTECODE_OP_EQ ] = "EQ",
- [ BYTECODE_OP_NE ] = "NE",
- [ BYTECODE_OP_GT ] = "GT",
- [ BYTECODE_OP_LT ] = "LT",
- [ BYTECODE_OP_GE ] = "GE",
- [ BYTECODE_OP_LE ] = "LE",
-
- /* string binary comparators */
- [ BYTECODE_OP_EQ_STRING ] = "EQ_STRING",
- [ BYTECODE_OP_NE_STRING ] = "NE_STRING",
- [ BYTECODE_OP_GT_STRING ] = "GT_STRING",
- [ BYTECODE_OP_LT_STRING ] = "LT_STRING",
- [ BYTECODE_OP_GE_STRING ] = "GE_STRING",
- [ BYTECODE_OP_LE_STRING ] = "LE_STRING",
-
- /* s64 binary comparators */
- [ BYTECODE_OP_EQ_S64 ] = "EQ_S64",
- [ BYTECODE_OP_NE_S64 ] = "NE_S64",
- [ BYTECODE_OP_GT_S64 ] = "GT_S64",
- [ BYTECODE_OP_LT_S64 ] = "LT_S64",
- [ BYTECODE_OP_GE_S64 ] = "GE_S64",
- [ BYTECODE_OP_LE_S64 ] = "LE_S64",
-
- /* double binary comparators */
- [ BYTECODE_OP_EQ_DOUBLE ] = "EQ_DOUBLE",
- [ BYTECODE_OP_NE_DOUBLE ] = "NE_DOUBLE",
- [ BYTECODE_OP_GT_DOUBLE ] = "GT_DOUBLE",
- [ BYTECODE_OP_LT_DOUBLE ] = "LT_DOUBLE",
- [ BYTECODE_OP_GE_DOUBLE ] = "GE_DOUBLE",
- [ BYTECODE_OP_LE_DOUBLE ] = "LE_DOUBLE",
-
- /* Mixed S64-double binary comparators */
- [ BYTECODE_OP_EQ_DOUBLE_S64 ] = "EQ_DOUBLE_S64",
- [ BYTECODE_OP_NE_DOUBLE_S64 ] = "NE_DOUBLE_S64",
- [ BYTECODE_OP_GT_DOUBLE_S64 ] = "GT_DOUBLE_S64",
- [ BYTECODE_OP_LT_DOUBLE_S64 ] = "LT_DOUBLE_S64",
- [ BYTECODE_OP_GE_DOUBLE_S64 ] = "GE_DOUBLE_S64",
- [ BYTECODE_OP_LE_DOUBLE_S64 ] = "LE_DOUBLE_S64",
-
- [ BYTECODE_OP_EQ_S64_DOUBLE ] = "EQ_S64_DOUBLE",
- [ BYTECODE_OP_NE_S64_DOUBLE ] = "NE_S64_DOUBLE",
- [ BYTECODE_OP_GT_S64_DOUBLE ] = "GT_S64_DOUBLE",
- [ BYTECODE_OP_LT_S64_DOUBLE ] = "LT_S64_DOUBLE",
- [ BYTECODE_OP_GE_S64_DOUBLE ] = "GE_S64_DOUBLE",
- [ BYTECODE_OP_LE_S64_DOUBLE ] = "LE_S64_DOUBLE",
-
- /* unary */
- [ BYTECODE_OP_UNARY_PLUS ] = "UNARY_PLUS",
- [ BYTECODE_OP_UNARY_MINUS ] = "UNARY_MINUS",
- [ BYTECODE_OP_UNARY_NOT ] = "UNARY_NOT",
- [ BYTECODE_OP_UNARY_PLUS_S64 ] = "UNARY_PLUS_S64",
- [ BYTECODE_OP_UNARY_MINUS_S64 ] = "UNARY_MINUS_S64",
- [ BYTECODE_OP_UNARY_NOT_S64 ] = "UNARY_NOT_S64",
- [ BYTECODE_OP_UNARY_PLUS_DOUBLE ] = "UNARY_PLUS_DOUBLE",
- [ BYTECODE_OP_UNARY_MINUS_DOUBLE ] = "UNARY_MINUS_DOUBLE",
- [ BYTECODE_OP_UNARY_NOT_DOUBLE ] = "UNARY_NOT_DOUBLE",
-
- /* logical */
- [ BYTECODE_OP_AND ] = "AND",
- [ BYTECODE_OP_OR ] = "OR",
-
- /* load field ref */
- [ BYTECODE_OP_LOAD_FIELD_REF ] = "LOAD_FIELD_REF",
- [ BYTECODE_OP_LOAD_FIELD_REF_STRING ] = "LOAD_FIELD_REF_STRING",
- [ BYTECODE_OP_LOAD_FIELD_REF_SEQUENCE ] = "LOAD_FIELD_REF_SEQUENCE",
- [ BYTECODE_OP_LOAD_FIELD_REF_S64 ] = "LOAD_FIELD_REF_S64",
- [ BYTECODE_OP_LOAD_FIELD_REF_DOUBLE ] = "LOAD_FIELD_REF_DOUBLE",
-
- /* load from immediate operand */
- [ BYTECODE_OP_LOAD_STRING ] = "LOAD_STRING",
- [ BYTECODE_OP_LOAD_S64 ] = "LOAD_S64",
- [ BYTECODE_OP_LOAD_DOUBLE ] = "LOAD_DOUBLE",
-
- /* cast */
- [ BYTECODE_OP_CAST_TO_S64 ] = "CAST_TO_S64",
- [ BYTECODE_OP_CAST_DOUBLE_TO_S64 ] = "CAST_DOUBLE_TO_S64",
- [ BYTECODE_OP_CAST_NOP ] = "CAST_NOP",
-
- /* get context ref */
- [ BYTECODE_OP_GET_CONTEXT_REF ] = "GET_CONTEXT_REF",
- [ BYTECODE_OP_GET_CONTEXT_REF_STRING ] = "GET_CONTEXT_REF_STRING",
- [ BYTECODE_OP_GET_CONTEXT_REF_S64 ] = "GET_CONTEXT_REF_S64",
- [ BYTECODE_OP_GET_CONTEXT_REF_DOUBLE ] = "GET_CONTEXT_REF_DOUBLE",
-
- /* load userspace field ref */
- [ BYTECODE_OP_LOAD_FIELD_REF_USER_STRING ] = "LOAD_FIELD_REF_USER_STRING",
- [ BYTECODE_OP_LOAD_FIELD_REF_USER_SEQUENCE ] = "LOAD_FIELD_REF_USER_SEQUENCE",
-
- /*
- * load immediate star globbing pattern (literal string)
- * from immediate.
- */
- [ BYTECODE_OP_LOAD_STAR_GLOB_STRING ] = "LOAD_STAR_GLOB_STRING",
-
- /* globbing pattern binary operator: apply to */
- [ BYTECODE_OP_EQ_STAR_GLOB_STRING ] = "EQ_STAR_GLOB_STRING",
- [ BYTECODE_OP_NE_STAR_GLOB_STRING ] = "NE_STAR_GLOB_STRING",
-
- /*
- * Instructions for recursive traversal through composed types.
- */
- [ BYTECODE_OP_GET_CONTEXT_ROOT ] = "GET_CONTEXT_ROOT",
- [ BYTECODE_OP_GET_APP_CONTEXT_ROOT ] = "GET_APP_CONTEXT_ROOT",
- [ BYTECODE_OP_GET_PAYLOAD_ROOT ] = "GET_PAYLOAD_ROOT",
-
- [ BYTECODE_OP_GET_SYMBOL ] = "GET_SYMBOL",
- [ BYTECODE_OP_GET_SYMBOL_FIELD ] = "GET_SYMBOL_FIELD",
- [ BYTECODE_OP_GET_INDEX_U16 ] = "GET_INDEX_U16",
- [ BYTECODE_OP_GET_INDEX_U64 ] = "GET_INDEX_U64",
-
- [ BYTECODE_OP_LOAD_FIELD ] = "LOAD_FIELD",
- [ BYTECODE_OP_LOAD_FIELD_S8 ] = "LOAD_FIELD_S8",
- [ BYTECODE_OP_LOAD_FIELD_S16 ] = "LOAD_FIELD_S16",
- [ BYTECODE_OP_LOAD_FIELD_S32 ] = "LOAD_FIELD_S32",
- [ BYTECODE_OP_LOAD_FIELD_S64 ] = "LOAD_FIELD_S64",
- [ BYTECODE_OP_LOAD_FIELD_U8 ] = "LOAD_FIELD_U8",
- [ BYTECODE_OP_LOAD_FIELD_U16 ] = "LOAD_FIELD_U16",
- [ BYTECODE_OP_LOAD_FIELD_U32 ] = "LOAD_FIELD_U32",
- [ BYTECODE_OP_LOAD_FIELD_U64 ] = "LOAD_FIELD_U64",
- [ BYTECODE_OP_LOAD_FIELD_STRING ] = "LOAD_FIELD_STRING",
- [ BYTECODE_OP_LOAD_FIELD_SEQUENCE ] = "LOAD_FIELD_SEQUENCE",
- [ BYTECODE_OP_LOAD_FIELD_DOUBLE ] = "LOAD_FIELD_DOUBLE",
-
- [ BYTECODE_OP_UNARY_BIT_NOT ] = "UNARY_BIT_NOT",
-
- [ BYTECODE_OP_RETURN_S64 ] = "RETURN_S64",
-};
-
-const char *lttng_bytecode_print_op(enum bytecode_op op)
-{
- if (op >= NR_BYTECODE_OPS)
- return "UNKNOWN";
- else
- return opnames[op];
-}
-
-static
-int apply_field_reloc(const struct lttng_ust_event_desc *event_desc,
- struct bytecode_runtime *runtime,
- uint32_t runtime_len __attribute__((unused)),
- uint32_t reloc_offset,
- const char *field_name,
- enum bytecode_op bytecode_op)
-{
- const struct lttng_ust_event_field **fields, *field = NULL;
- unsigned int nr_fields, i;
- struct load_op *op;
- uint32_t field_offset = 0;
-
- dbg_printf("Apply field reloc: %u %s\n", reloc_offset, field_name);
-
- /* Lookup event by name */
- if (!event_desc)
- return -EINVAL;
- fields = event_desc->fields;
- if (!fields)
- return -EINVAL;
- nr_fields = event_desc->nr_fields;
- for (i = 0; i < nr_fields; i++) {
- if (fields[i]->nofilter) {
- continue;
- }
- if (!strcmp(fields[i]->name, field_name)) {
- field = fields[i];
- break;
- }
- /* compute field offset */
- switch (fields[i]->type->type) {
- case lttng_ust_type_integer:
- case lttng_ust_type_enum:
- field_offset += sizeof(int64_t);
- break;
- case lttng_ust_type_array:
- case lttng_ust_type_sequence:
- field_offset += sizeof(unsigned long);
- field_offset += sizeof(void *);
- break;
- case lttng_ust_type_string:
- field_offset += sizeof(void *);
- break;
- case lttng_ust_type_float:
- field_offset += sizeof(double);
- break;
- default:
- return -EINVAL;
- }
- }
- if (!field)
- return -EINVAL;
-
- /* Check if field offset is too large for 16-bit offset */
- if (field_offset > LTTNG_UST_ABI_FILTER_BYTECODE_MAX_LEN - 1)
- return -EINVAL;
-
- /* set type */
- op = (struct load_op *) &runtime->code[reloc_offset];
-
- switch (bytecode_op) {
- case BYTECODE_OP_LOAD_FIELD_REF:
- {
- struct field_ref *field_ref;
-
- field_ref = (struct field_ref *) op->data;
- switch (field->type->type) {
- case lttng_ust_type_integer:
- case lttng_ust_type_enum:
- op->op = BYTECODE_OP_LOAD_FIELD_REF_S64;
- break;
- case lttng_ust_type_array:
- {
- struct lttng_ust_type_array *array = (struct lttng_ust_type_array *) field->type;
-
- if (array->encoding == lttng_ust_string_encoding_none)
- return -EINVAL;
- op->op = BYTECODE_OP_LOAD_FIELD_REF_SEQUENCE;
- break;
- }
- case lttng_ust_type_sequence:
- {
- struct lttng_ust_type_sequence *sequence = (struct lttng_ust_type_sequence *) field->type;
-
- if (sequence->encoding == lttng_ust_string_encoding_none)
- return -EINVAL;
- op->op = BYTECODE_OP_LOAD_FIELD_REF_SEQUENCE;
- break;
- }
- case lttng_ust_type_string:
- op->op = BYTECODE_OP_LOAD_FIELD_REF_STRING;
- break;
- case lttng_ust_type_float:
- op->op = BYTECODE_OP_LOAD_FIELD_REF_DOUBLE;
- break;
- default:
- return -EINVAL;
- }
- /* set offset */
- field_ref->offset = (uint16_t) field_offset;
- break;
- }
- default:
- return -EINVAL;
- }
- return 0;
-}
-
-static
-int apply_context_reloc(struct bytecode_runtime *runtime,
- uint32_t runtime_len __attribute__((unused)),
- uint32_t reloc_offset,
- const char *context_name,
- enum bytecode_op bytecode_op)
-{
- struct load_op *op;
- const struct lttng_ust_ctx_field *ctx_field;
- int idx;
- struct lttng_ust_ctx **pctx = runtime->p.pctx;
-
- dbg_printf("Apply context reloc: %u %s\n", reloc_offset, context_name);
-
- /* Get context index */
- idx = lttng_get_context_index(*pctx, context_name);
- if (idx < 0) {
- if (lttng_context_is_app(context_name)) {
- int ret;
-
- ret = lttng_ust_add_app_context_to_ctx_rcu(context_name,
- pctx);
- if (ret)
- return ret;
- idx = lttng_get_context_index(*pctx, context_name);
- if (idx < 0)
- return -ENOENT;
- } else {
- return -ENOENT;
- }
- }
- /* Check if idx is too large for 16-bit offset */
- if (idx > LTTNG_UST_ABI_FILTER_BYTECODE_MAX_LEN - 1)
- return -EINVAL;
-
- /* Get context return type */
- ctx_field = &(*pctx)->fields[idx];
- op = (struct load_op *) &runtime->code[reloc_offset];
-
- switch (bytecode_op) {
- case BYTECODE_OP_GET_CONTEXT_REF:
- {
- struct field_ref *field_ref;
-
- field_ref = (struct field_ref *) op->data;
- switch (ctx_field->event_field->type->type) {
- case lttng_ust_type_integer:
- case lttng_ust_type_enum:
- op->op = BYTECODE_OP_GET_CONTEXT_REF_S64;
- break;
- /* Sequence and array supported only as string */
- case lttng_ust_type_array:
- {
- struct lttng_ust_type_array *array = (struct lttng_ust_type_array *) ctx_field->event_field->type;
-
- if (array->encoding == lttng_ust_string_encoding_none)
- return -EINVAL;
- op->op = BYTECODE_OP_GET_CONTEXT_REF_STRING;
- break;
- }
- case lttng_ust_type_sequence:
- {
- struct lttng_ust_type_sequence *sequence = (struct lttng_ust_type_sequence *) ctx_field->event_field->type;
-
- if (sequence->encoding == lttng_ust_string_encoding_none)
- return -EINVAL;
- op->op = BYTECODE_OP_GET_CONTEXT_REF_STRING;
- break;
- }
- case lttng_ust_type_string:
- op->op = BYTECODE_OP_GET_CONTEXT_REF_STRING;
- break;
- case lttng_ust_type_float:
- op->op = BYTECODE_OP_GET_CONTEXT_REF_DOUBLE;
- break;
- case lttng_ust_type_dynamic:
- op->op = BYTECODE_OP_GET_CONTEXT_REF;
- break;
- default:
- return -EINVAL;
- }
- /* set offset to context index within channel contexts */
- field_ref->offset = (uint16_t) idx;
- break;
- }
- default:
- return -EINVAL;
- }
- return 0;
-}
-
-static
-int apply_reloc(const struct lttng_ust_event_desc *event_desc,
- struct bytecode_runtime *runtime,
- uint32_t runtime_len,
- uint32_t reloc_offset,
- const char *name)
-{
- struct load_op *op;
-
- dbg_printf("Apply reloc: %u %s\n", reloc_offset, name);
-
- /* Ensure that the reloc is within the code */
- if (runtime_len - reloc_offset < sizeof(uint16_t))
- return -EINVAL;
-
- op = (struct load_op *) &runtime->code[reloc_offset];
- switch (op->op) {
- case BYTECODE_OP_LOAD_FIELD_REF:
- return apply_field_reloc(event_desc, runtime, runtime_len,
- reloc_offset, name, op->op);
- case BYTECODE_OP_GET_CONTEXT_REF:
- return apply_context_reloc(runtime, runtime_len,
- reloc_offset, name, op->op);
- case BYTECODE_OP_GET_SYMBOL:
- case BYTECODE_OP_GET_SYMBOL_FIELD:
- /*
- * Will be handled by load specialize phase or
- * dynamically by interpreter.
- */
- return 0;
- default:
- ERR("Unknown reloc op type %u\n", op->op);
- return -EINVAL;
- }
- return 0;
-}
-
-static
-int bytecode_is_linked(struct lttng_ust_bytecode_node *bytecode,
- struct cds_list_head *bytecode_runtime_head)
-{
- struct lttng_ust_bytecode_runtime *bc_runtime;
-
- cds_list_for_each_entry(bc_runtime, bytecode_runtime_head, node) {
- if (bc_runtime->bc == bytecode)
- return 1;
- }
- return 0;
-}
-
-/*
- * Take a bytecode with reloc table and link it to an event to create a
- * bytecode runtime.
- */
-static
-int link_bytecode(const struct lttng_ust_event_desc *event_desc,
- struct lttng_ust_ctx **ctx,
- struct lttng_ust_bytecode_node *bytecode,
- struct cds_list_head *bytecode_runtime_head,
- struct cds_list_head *insert_loc)
-{
- int ret, offset, next_offset;
- struct bytecode_runtime *runtime = NULL;
- size_t runtime_alloc_len;
-
- if (!bytecode)
- return 0;
- /* Bytecode already linked */
- if (bytecode_is_linked(bytecode, bytecode_runtime_head))
- return 0;
-
- dbg_printf("Linking...\n");
-
- /* We don't need the reloc table in the runtime */
- runtime_alloc_len = sizeof(*runtime) + bytecode->bc.reloc_offset;
- runtime = zmalloc(runtime_alloc_len);
- if (!runtime) {
- ret = -ENOMEM;
- goto alloc_error;
- }
- runtime->p.type = bytecode->type;
- runtime->p.bc = bytecode;
- runtime->p.pctx = ctx;
- runtime->len = bytecode->bc.reloc_offset;
- /* copy original bytecode */
- memcpy(runtime->code, bytecode->bc.data, runtime->len);
- /*
- * apply relocs. Those are a uint16_t (offset in bytecode)
- * followed by a string (field name).
- */
- for (offset = bytecode->bc.reloc_offset;
- offset < bytecode->bc.len;
- offset = next_offset) {
- uint16_t reloc_offset =
- *(uint16_t *) &bytecode->bc.data[offset];
- const char *name =
- (const char *) &bytecode->bc.data[offset + sizeof(uint16_t)];
-
- ret = apply_reloc(event_desc, runtime, runtime->len, reloc_offset, name);
- if (ret) {
- goto link_error;
- }
- next_offset = offset + sizeof(uint16_t) + strlen(name) + 1;
- }
- /* Validate bytecode */
- ret = lttng_bytecode_validate(runtime);
- if (ret) {
- goto link_error;
- }
- /* Specialize bytecode */
- ret = lttng_bytecode_specialize(event_desc, runtime);
- if (ret) {
- goto link_error;
- }
-
- runtime->p.interpreter_func = lttng_bytecode_interpret;
- runtime->p.link_failed = 0;
- cds_list_add_rcu(&runtime->p.node, insert_loc);
- dbg_printf("Linking successful.\n");
- return 0;
-
-link_error:
- runtime->p.interpreter_func = lttng_bytecode_interpret_error;
- runtime->p.link_failed = 1;
- cds_list_add_rcu(&runtime->p.node, insert_loc);
-alloc_error:
- dbg_printf("Linking failed.\n");
- return ret;
-}
-
-void lttng_bytecode_sync_state(struct lttng_ust_bytecode_runtime *runtime)
-{
- struct lttng_ust_bytecode_node *bc = runtime->bc;
-
- if (!bc->enabler->enabled || runtime->link_failed)
- runtime->interpreter_func = lttng_bytecode_interpret_error;
- else
- runtime->interpreter_func = lttng_bytecode_interpret;
-}
-
-/*
- * Given the lists of bytecode programs of an instance (trigger or event) and
- * of a matching enabler, try to link all the enabler's bytecode programs with
- * the instance.
- *
- * This function is called after we confirmed that name enabler and the
- * instance are name matching (or glob pattern matching).
- */
-void lttng_enabler_link_bytecode(const struct lttng_ust_event_desc *event_desc,
- struct lttng_ust_ctx **ctx,
- struct cds_list_head *instance_bytecode_head,
- struct cds_list_head *enabler_bytecode_head)
-{
- struct lttng_ust_bytecode_node *enabler_bc;
- struct lttng_ust_bytecode_runtime *runtime;
-
- assert(event_desc);
-
- /* Go over all the bytecode programs of the enabler. */
- cds_list_for_each_entry(enabler_bc, enabler_bytecode_head, node) {
- int found = 0, ret;
- struct cds_list_head *insert_loc;
-
- /*
- * Check if the current enabler bytecode program is already
- * linked with the instance.
- */
- cds_list_for_each_entry(runtime, instance_bytecode_head, node) {
- if (runtime->bc == enabler_bc) {
- found = 1;
- break;
- }
- }
-
- /*
- * Skip bytecode already linked, go to the next enabler
- * bytecode program.
- */
- if (found)
- continue;
-
- /*
- * Insert at specified priority (seqnum) in increasing
- * order. If there already is a bytecode of the same priority,
- * insert the new bytecode right after it.
- */
- cds_list_for_each_entry_reverse(runtime,
- instance_bytecode_head, node) {
- if (runtime->bc->bc.seqnum <= enabler_bc->bc.seqnum) {
- /* insert here */
- insert_loc = &runtime->node;
- goto add_within;
- }
- }
-
- /* Add to head to list */
- insert_loc = instance_bytecode_head;
- add_within:
- dbg_printf("linking bytecode\n");
- ret = link_bytecode(event_desc, ctx, enabler_bc, instance_bytecode_head, insert_loc);
- if (ret) {
- dbg_printf("[lttng filter] warning: cannot link event bytecode\n");
- }
- }
-}
-
-static
-void free_filter_runtime(struct cds_list_head *bytecode_runtime_head)
-{
- struct bytecode_runtime *runtime, *tmp;
-
- cds_list_for_each_entry_safe(runtime, tmp, bytecode_runtime_head,
- p.node) {
- free(runtime->data);
- free(runtime);
- }
-}
-
-void lttng_free_event_filter_runtime(struct lttng_ust_event_common *event)
-{
- free_filter_runtime(&event->priv->filter_bytecode_runtime_head);
-}
+++ /dev/null
-/*
- * SPDX-License-Identifier: MIT
- *
- * Copyright (C) 2010-2016 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
- *
- * LTTng UST bytecode header.
- */
-
-#ifndef _LTTNG_BYTECODE_H
-#define _LTTNG_BYTECODE_H
-
-#include <errno.h>
-#include <stdio.h>
-#include <stdbool.h>
-#include "common/macros.h"
-#include <lttng/ust-events.h>
-#include "common/ust-context-provider.h"
-#include <stdint.h>
-#include <assert.h>
-#include <errno.h>
-#include <string.h>
-#include <inttypes.h>
-#include <limits.h>
-#include "common/logging.h"
-#include "bytecode.h"
-#include "ust-events-internal.h"
-
-/* Interpreter stack length, in number of entries */
-#define INTERPRETER_STACK_LEN 10 /* includes 2 dummy */
-#define INTERPRETER_STACK_EMPTY 1
-
-#define BYTECODE_MAX_DATA_LEN 65536
-
-#ifndef min_t
-#define min_t(type, a, b) \
- ((type) (a) < (type) (b) ? (type) (a) : (type) (b))
-#endif
-
-#ifndef likely
-#define likely(x) __builtin_expect(!!(x), 1)
-#endif
-
-#ifndef unlikely
-#define unlikely(x) __builtin_expect(!!(x), 0)
-#endif
-
-#ifdef DEBUG
-#define dbg_printf(fmt, args...) \
- printf("[debug bytecode in %s:%s@%u] " fmt, \
- __FILE__, __func__, __LINE__, ## args)
-#else
-#define dbg_printf(fmt, args...) \
-do { \
- /* do nothing but check printf format */ \
- if (0) \
- printf("[debug bytecode in %s:%s@%u] " fmt, \
- __FILE__, __func__, __LINE__, ## args); \
-} while (0)
-#endif
-
-/* Linked bytecode. Child of struct lttng_bytecode_runtime. */
-struct bytecode_runtime {
- struct lttng_ust_bytecode_runtime p;
- size_t data_len;
- size_t data_alloc_len;
- char *data;
- uint16_t len;
- char code[0];
-};
-
-enum entry_type {
- REG_S64,
- REG_U64,
- REG_DOUBLE,
- REG_STRING,
- REG_STAR_GLOB_STRING,
- REG_UNKNOWN,
- REG_PTR,
-};
-
-enum load_type {
- LOAD_ROOT_CONTEXT,
- LOAD_ROOT_APP_CONTEXT,
- LOAD_ROOT_PAYLOAD,
- LOAD_OBJECT,
-};
-
-enum object_type {
- OBJECT_TYPE_S8,
- OBJECT_TYPE_S16,
- OBJECT_TYPE_S32,
- OBJECT_TYPE_S64,
- OBJECT_TYPE_U8,
- OBJECT_TYPE_U16,
- OBJECT_TYPE_U32,
- OBJECT_TYPE_U64,
-
- OBJECT_TYPE_SIGNED_ENUM,
- OBJECT_TYPE_UNSIGNED_ENUM,
-
- OBJECT_TYPE_DOUBLE,
- OBJECT_TYPE_STRING,
- OBJECT_TYPE_STRING_SEQUENCE,
-
- OBJECT_TYPE_SEQUENCE,
- OBJECT_TYPE_ARRAY,
- OBJECT_TYPE_STRUCT,
- OBJECT_TYPE_VARIANT,
-
- OBJECT_TYPE_DYNAMIC,
-};
-
-struct bytecode_get_index_data {
- uint64_t offset; /* in bytes */
- size_t ctx_index;
- size_t array_len;
- /*
- * Field is only populated for LOAD_ROOT_CONTEXT, LOAD_ROOT_APP_CONTEXT
- * and LOAD_ROOT_PAYLOAD. Left NULL for LOAD_OBJECT, considering that the
- * interpreter needs to find it from the event fields and types to
- * support variants.
- */
- const struct lttng_ust_event_field *field;
- struct {
- size_t len;
- enum object_type type;
- bool rev_bo; /* reverse byte order */
- } elem;
-};
-
-/* Validation stack */
-struct vstack_load {
- enum load_type type;
- enum object_type object_type;
- const struct lttng_ust_event_field *field;
- bool rev_bo; /* reverse byte order */
-};
-
-struct vstack_entry {
- enum entry_type type;
- struct vstack_load load;
-};
-
-struct vstack {
- int top; /* top of stack */
- struct vstack_entry e[INTERPRETER_STACK_LEN];
-};
-
-static inline
-void vstack_init(struct vstack *stack)
-{
- stack->top = -1;
-}
-
-static inline
-struct vstack_entry *vstack_ax(struct vstack *stack)
-{
- if (unlikely(stack->top < 0))
- return NULL;
- return &stack->e[stack->top];
-}
-
-static inline
-struct vstack_entry *vstack_bx(struct vstack *stack)
-{
- if (unlikely(stack->top < 1))
- return NULL;
- return &stack->e[stack->top - 1];
-}
-
-static inline
-int vstack_push(struct vstack *stack)
-{
- if (stack->top >= INTERPRETER_STACK_LEN - 1) {
- ERR("Stack full\n");
- return -EINVAL;
- }
- ++stack->top;
- return 0;
-}
-
-static inline
-int vstack_pop(struct vstack *stack)
-{
- if (unlikely(stack->top < 0)) {
- ERR("Stack empty\n");
- return -EINVAL;
- }
- stack->top--;
- return 0;
-}
-
-/* Execution stack */
-enum estack_string_literal_type {
- ESTACK_STRING_LITERAL_TYPE_NONE,
- ESTACK_STRING_LITERAL_TYPE_PLAIN,
- ESTACK_STRING_LITERAL_TYPE_STAR_GLOB,
-};
-
-struct load_ptr {
- enum load_type type;
- enum object_type object_type;
- const void *ptr;
- size_t nr_elem;
- bool rev_bo;
- /* Temporary place-holders for contexts. */
- union {
- int64_t s64;
- uint64_t u64;
- double d;
- } u;
- const struct lttng_ust_event_field *field;
-};
-
-struct estack_entry {
- enum entry_type type; /* For dynamic typing. */
- union {
- int64_t v;
- double d;
-
- struct {
- const char *str;
- size_t seq_len;
- enum estack_string_literal_type literal_type;
- } s;
- struct load_ptr ptr;
- } u;
-};
-
-struct estack {
- int top; /* top of stack */
- struct estack_entry e[INTERPRETER_STACK_LEN];
-};
-
-/*
- * Always use aliased type for ax/bx (top of stack).
- * When ax/bx are S64, use aliased value.
- */
-#define estack_ax_v ax
-#define estack_bx_v bx
-#define estack_ax_t ax_t
-#define estack_bx_t bx_t
-
-/*
- * ax and bx registers can hold either integer, double or string.
- */
-#define estack_ax(stack, top) \
- ({ \
- assert((top) > INTERPRETER_STACK_EMPTY); \
- &(stack)->e[top]; \
- })
-
-#define estack_bx(stack, top) \
- ({ \
- assert((top) > INTERPRETER_STACK_EMPTY + 1); \
- &(stack)->e[(top) - 1]; \
- })
-
-/*
- * Currently, only integers (REG_S64) can be pushed into the stack.
- */
-#define estack_push(stack, top, ax, bx, ax_t, bx_t) \
- do { \
- assert((top) < INTERPRETER_STACK_LEN - 1); \
- (stack)->e[(top) - 1].u.v = (bx); \
- (stack)->e[(top) - 1].type = (bx_t); \
- (bx) = (ax); \
- (bx_t) = (ax_t); \
- ++(top); \
- } while (0)
-
-#define estack_pop(stack, top, ax, bx, ax_t, bx_t) \
- do { \
- assert((top) > INTERPRETER_STACK_EMPTY); \
- (ax) = (bx); \
- (ax_t) = (bx_t); \
- (bx) = (stack)->e[(top) - 2].u.v; \
- (bx_t) = (stack)->e[(top) - 2].type; \
- (top)--; \
- } while (0)
-
-enum lttng_interpreter_type {
- LTTNG_INTERPRETER_TYPE_S64,
- LTTNG_INTERPRETER_TYPE_U64,
- LTTNG_INTERPRETER_TYPE_SIGNED_ENUM,
- LTTNG_INTERPRETER_TYPE_UNSIGNED_ENUM,
- LTTNG_INTERPRETER_TYPE_DOUBLE,
- LTTNG_INTERPRETER_TYPE_STRING,
- LTTNG_INTERPRETER_TYPE_SEQUENCE,
-};
-
-/*
- * Represents the output parameter of the lttng interpreter.
- * Currently capturable field classes are integer, double, string and sequence
- * of integer.
- */
-struct lttng_interpreter_output {
- enum lttng_interpreter_type type;
- union {
- int64_t s;
- uint64_t u;
- double d;
-
- struct {
- const char *str;
- size_t len;
- } str;
- struct {
- const void *ptr;
- size_t nr_elem;
-
- /* Inner type. */
- const struct lttng_ust_type_common *nested_type;
- } sequence;
- } u;
-};
-
-const char *lttng_bytecode_print_op(enum bytecode_op op)
- __attribute__((visibility("hidden")));
-
-void lttng_bytecode_sync_state(struct lttng_ust_bytecode_runtime *runtime)
- __attribute__((visibility("hidden")));
-
-int lttng_bytecode_validate(struct bytecode_runtime *bytecode)
- __attribute__((visibility("hidden")));
-
-int lttng_bytecode_specialize(const struct lttng_ust_event_desc *event_desc,
- struct bytecode_runtime *bytecode)
- __attribute__((visibility("hidden")));
-
-int lttng_bytecode_interpret_error(struct lttng_ust_bytecode_runtime *bytecode_runtime,
- const char *stack_data,
- void *ctx)
- __attribute__((visibility("hidden")));
-
-int lttng_bytecode_interpret(struct lttng_ust_bytecode_runtime *bytecode_runtime,
- const char *stack_data,
- void *ctx)
- __attribute__((visibility("hidden")));
-
-#endif /* _LTTNG_BYTECODE_H */
+++ /dev/null
-/*
- * SPDX-License-Identifier: LGPL-2.1-only
- *
- * Copyright (C) 2014 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
- */
-
-#define _LGPL_SOURCE
-#include <error.h>
-#include <dlfcn.h>
-#include <stdlib.h>
-#include <stdint.h>
-#include <stdio.h>
-
-#include <lttng/ust-clock.h>
-#include <urcu/system.h>
-#include <urcu/arch.h>
-
-#include "common/logging.h"
-
-#include "clock.h"
-#include "getenv.h"
-
-struct lttng_ust_trace_clock *lttng_ust_trace_clock;
-
-static
-struct lttng_ust_trace_clock user_tc;
-
-static
-void *clock_handle;
-
-static
-uint64_t trace_clock_freq_monotonic(void)
-{
- return 1000000000ULL;
-}
-
-static
-int trace_clock_uuid_monotonic(char *uuid)
-{
- int ret = 0;
- size_t len;
- FILE *fp;
-
- /*
- * boot_id needs to be read once before being used concurrently
- * to deal with a Linux kernel race. A fix is proposed for
- * upstream, but the work-around is needed for older kernels.
- */
- fp = fopen("/proc/sys/kernel/random/boot_id", "r");
- if (!fp) {
- return -ENOENT;
- }
- len = fread(uuid, 1, LTTNG_UST_UUID_STR_LEN - 1, fp);
- if (len < LTTNG_UST_UUID_STR_LEN - 1) {
- ret = -EINVAL;
- goto end;
- }
- uuid[LTTNG_UST_UUID_STR_LEN - 1] = '\0';
-end:
- fclose(fp);
- return ret;
-}
-
-static
-const char *trace_clock_name_monotonic(void)
-{
- return "monotonic";
-}
-
-static
-const char *trace_clock_description_monotonic(void)
-{
- return "Monotonic Clock";
-}
-
-int lttng_ust_trace_clock_set_read64_cb(lttng_ust_clock_read64_function read64_cb)
-{
- if (CMM_LOAD_SHARED(lttng_ust_trace_clock))
- return -EBUSY;
- user_tc.read64 = read64_cb;
- return 0;
-}
-
-int lttng_ust_trace_clock_get_read64_cb(lttng_ust_clock_read64_function *read64_cb)
-{
- struct lttng_ust_trace_clock *ltc = CMM_LOAD_SHARED(lttng_ust_trace_clock);
-
- if (caa_likely(!ltc)) {
- *read64_cb = &trace_clock_read64_monotonic;
- } else {
- cmm_read_barrier_depends(); /* load ltc before content */
- *read64_cb = ltc->read64;
- }
- return 0;
-}
-
-int lttng_ust_trace_clock_set_freq_cb(lttng_ust_clock_freq_function freq_cb)
-{
- if (CMM_LOAD_SHARED(lttng_ust_trace_clock))
- return -EBUSY;
- user_tc.freq = freq_cb;
- return 0;
-}
-
-int lttng_ust_trace_clock_get_freq_cb(lttng_ust_clock_freq_function *freq_cb)
-{
- struct lttng_ust_trace_clock *ltc = CMM_LOAD_SHARED(lttng_ust_trace_clock);
-
- if (caa_likely(!ltc)) {
- *freq_cb = &trace_clock_freq_monotonic;
- } else {
- cmm_read_barrier_depends(); /* load ltc before content */
- *freq_cb = ltc->freq;
- }
- return 0;
-}
-
-int lttng_ust_trace_clock_set_uuid_cb(lttng_ust_clock_uuid_function uuid_cb)
-{
- if (CMM_LOAD_SHARED(lttng_ust_trace_clock))
- return -EBUSY;
- user_tc.uuid = uuid_cb;
- return 0;
-}
-
-int lttng_ust_trace_clock_get_uuid_cb(lttng_ust_clock_uuid_function *uuid_cb)
-{
- struct lttng_ust_trace_clock *ltc = CMM_LOAD_SHARED(lttng_ust_trace_clock);
-
- if (caa_likely(!ltc)) {
- *uuid_cb = &trace_clock_uuid_monotonic;
- } else {
- cmm_read_barrier_depends(); /* load ltc before content */
- *uuid_cb = ltc->uuid;
- }
- return 0;
-}
-
-int lttng_ust_trace_clock_set_name_cb(lttng_ust_clock_name_function name_cb)
-{
- if (CMM_LOAD_SHARED(lttng_ust_trace_clock))
- return -EBUSY;
- user_tc.name = name_cb;
- return 0;
-}
-
-int lttng_ust_trace_clock_get_name_cb(lttng_ust_clock_name_function *name_cb)
-{
- struct lttng_ust_trace_clock *ltc = CMM_LOAD_SHARED(lttng_ust_trace_clock);
-
- if (caa_likely(!ltc)) {
- *name_cb = &trace_clock_name_monotonic;
- } else {
- cmm_read_barrier_depends(); /* load ltc before content */
- *name_cb = ltc->name;
- }
- return 0;
-}
-
-int lttng_ust_trace_clock_set_description_cb(lttng_ust_clock_description_function description_cb)
-{
- if (CMM_LOAD_SHARED(lttng_ust_trace_clock))
- return -EBUSY;
- user_tc.description = description_cb;
- return 0;
-}
-
-int lttng_ust_trace_clock_get_description_cb(lttng_ust_clock_description_function *description_cb)
-{
- struct lttng_ust_trace_clock *ltc = CMM_LOAD_SHARED(lttng_ust_trace_clock);
-
- if (caa_likely(!ltc)) {
- *description_cb = &trace_clock_description_monotonic;
- } else {
- cmm_read_barrier_depends(); /* load ltc before content */
- *description_cb = ltc->description;
- }
- return 0;
-}
-
-int lttng_ust_enable_trace_clock_override(void)
-{
- if (CMM_LOAD_SHARED(lttng_ust_trace_clock))
- return -EBUSY;
- if (!user_tc.read64)
- return -EINVAL;
- if (!user_tc.freq)
- return -EINVAL;
- if (!user_tc.name)
- return -EINVAL;
- if (!user_tc.description)
- return -EINVAL;
- /* Use default uuid cb when NULL */
- cmm_smp_mb(); /* Store callbacks before trace clock */
- CMM_STORE_SHARED(lttng_ust_trace_clock, &user_tc);
- return 0;
-}
-
-void lttng_ust_clock_init(void)
-{
- const char *libname;
- void (*libinit)(void);
-
- if (clock_handle)
- return;
- libname = lttng_ust_getenv("LTTNG_UST_CLOCK_PLUGIN");
- if (!libname)
- return;
- clock_handle = dlopen(libname, RTLD_NOW);
- if (!clock_handle) {
- PERROR("Cannot load LTTng UST clock override library %s",
- libname);
- return;
- }
- dlerror();
- libinit = (void (*)(void)) dlsym(clock_handle,
- "lttng_ust_clock_plugin_init");
- if (!libinit) {
- PERROR("Cannot find LTTng UST clock override library %s initialization function lttng_ust_clock_plugin_init()",
- libname);
- return;
- }
- libinit();
-}
+++ /dev/null
-/*
- * SPDX-License-Identifier: LGPL-2.1-only
- *
- * Copyright (C) 2009-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
- * Copyright (C) 2019 Michael Jeanson <mjeanson@efficios.com>
- *
- * LTTng UST cgroup namespace context.
- */
-
-#define _LGPL_SOURCE
-#include <limits.h>
-#include <stddef.h>
-#include <sys/types.h>
-#include <sys/stat.h>
-#include <unistd.h>
-#include <lttng/ust-events.h>
-#include <lttng/ust-tracer.h>
-#include "common/compat/tid.h"
-#include <urcu/tls-compat.h>
-#include <lttng/ringbuffer-context.h>
-
-#include "context-internal.h"
-#include "lttng-tracer-core.h"
-#include "ns.h"
-
-
-/*
- * We cache the result to ensure we don't stat(2) the proc filesystem on
- * each event.
- */
-static DEFINE_URCU_TLS_INIT(ino_t, cached_cgroup_ns, NS_INO_UNINITIALIZED);
-
-static
-ino_t get_cgroup_ns(void)
-{
- struct stat sb;
- ino_t cgroup_ns;
-
- cgroup_ns = CMM_LOAD_SHARED(URCU_TLS(cached_cgroup_ns));
-
- /*
- * If the cache is populated, do nothing and return the
- * cached inode number.
- */
- if (caa_likely(cgroup_ns != NS_INO_UNINITIALIZED))
- return cgroup_ns;
-
- /*
- * At this point we have to populate the cache, set the initial
- * value to NS_INO_UNAVAILABLE (0), if we fail to get the inode
- * number from the proc filesystem, this is the value we will
- * cache.
- */
- cgroup_ns = NS_INO_UNAVAILABLE;
-
- /*
- * /proc/thread-self was introduced in kernel v3.17
- */
- if (stat("/proc/thread-self/ns/cgroup", &sb) == 0) {
- cgroup_ns = sb.st_ino;
- } else {
- char proc_ns_path[LTTNG_PROC_NS_PATH_MAX];
-
- if (snprintf(proc_ns_path, LTTNG_PROC_NS_PATH_MAX,
- "/proc/self/task/%d/ns/cgroup",
- lttng_gettid()) >= 0) {
-
- if (stat(proc_ns_path, &sb) == 0) {
- cgroup_ns = sb.st_ino;
- }
- }
- }
-
- /*
- * And finally, store the inode number in the cache.
- */
- CMM_STORE_SHARED(URCU_TLS(cached_cgroup_ns), cgroup_ns);
-
- return cgroup_ns;
-}
-
-/*
- * The cgroup namespace can change for 3 reasons
- * * clone(2) called with CLONE_NEWCGROUP
- * * setns(2) called with the fd of a different cgroup ns
- * * unshare(2) called with CLONE_NEWCGROUP
- */
-void lttng_context_cgroup_ns_reset(void)
-{
- CMM_STORE_SHARED(URCU_TLS(cached_cgroup_ns), NS_INO_UNINITIALIZED);
-}
-
-static
-size_t cgroup_ns_get_size(void *priv __attribute__((unused)),
- size_t offset)
-{
- size_t size = 0;
-
- size += lttng_ust_lib_ring_buffer_align(offset, lttng_ust_rb_alignof(ino_t));
- size += sizeof(ino_t);
- return size;
-}
-
-static
-void cgroup_ns_record(void *priv __attribute__((unused)),
- struct lttng_ust_lib_ring_buffer_ctx *ctx,
- struct lttng_ust_channel_buffer *chan)
-{
- ino_t cgroup_ns;
-
- cgroup_ns = get_cgroup_ns();
- chan->ops->event_write(ctx, &cgroup_ns, sizeof(cgroup_ns),
- lttng_ust_rb_alignof(cgroup_ns));
-}
-
-static
-void cgroup_ns_get_value(void *priv __attribute__((unused)),
- struct lttng_ust_ctx_value *value)
-{
- value->u.s64 = get_cgroup_ns();
-}
-
-static const struct lttng_ust_ctx_field *ctx_field = lttng_ust_static_ctx_field(
- lttng_ust_static_event_field("cgroup_ns",
- lttng_ust_static_type_integer(sizeof(ino_t) * CHAR_BIT,
- lttng_ust_rb_alignof(ino_t) * CHAR_BIT,
- lttng_ust_is_signed_type(ino_t),
- BYTE_ORDER, 10),
- false, false),
- cgroup_ns_get_size,
- cgroup_ns_record,
- cgroup_ns_get_value,
- NULL, NULL);
-
-int lttng_add_cgroup_ns_to_ctx(struct lttng_ust_ctx **ctx)
-{
- int ret;
-
- if (lttng_find_context(*ctx, ctx_field->event_field->name)) {
- ret = -EEXIST;
- goto error_find_context;
- }
- ret = lttng_ust_context_append(ctx, ctx_field);
- if (ret)
- return ret;
- return 0;
-
-error_find_context:
- return ret;
-}
-
-/*
- * * Force a read (imply TLS fixup for dlopen) of TLS variables.
- * */
-void lttng_fixup_cgroup_ns_tls(void)
-{
- asm volatile ("" : : "m" (URCU_TLS(cached_cgroup_ns)));
-}
+++ /dev/null
-/*
- * SPDX-License-Identifier: LGPL-2.1-only
- *
- * Copyright (C) 2009-2015 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
- *
- * LTTng UST CPU id context.
- *
- * Note: threads can be migrated at any point while executing the
- * tracepoint probe. This means the CPU id field (and filter) is only
- * statistical. For instance, even though a user might select a
- * cpu_id==1 filter, there may be few events recorded into the channel
- * appearing from other CPUs, due to migration.
- */
-
-#define _LGPL_SOURCE
-#include <stddef.h>
-#include <sys/types.h>
-#include <unistd.h>
-#include <limits.h>
-#include <lttng/ust-events.h>
-#include <lttng/ust-tracer.h>
-#include "common/ringbuffer/getcpu.h"
-#include <lttng/ringbuffer-context.h>
-
-#include "context-internal.h"
-
-static
-size_t cpu_id_get_size(void *priv __attribute__((unused)),
- size_t offset)
-{
- size_t size = 0;
-
- size += lttng_ust_lib_ring_buffer_align(offset, lttng_ust_rb_alignof(int));
- size += sizeof(int);
- return size;
-}
-
-static
-void cpu_id_record(void *priv __attribute__((unused)),
- struct lttng_ust_lib_ring_buffer_ctx *ctx,
- struct lttng_ust_channel_buffer *chan)
-{
- int cpu;
-
- cpu = lttng_ust_get_cpu();
- chan->ops->event_write(ctx, &cpu, sizeof(cpu), lttng_ust_rb_alignof(cpu));
-}
-
-static
-void cpu_id_get_value(void *priv __attribute__((unused)),
- struct lttng_ust_ctx_value *value)
-{
- value->u.s64 = lttng_ust_get_cpu();
-}
-
-static const struct lttng_ust_ctx_field *ctx_field = lttng_ust_static_ctx_field(
- lttng_ust_static_event_field("cpu_id",
- lttng_ust_static_type_integer(sizeof(int) * CHAR_BIT,
- lttng_ust_rb_alignof(int) * CHAR_BIT,
- lttng_ust_is_signed_type(int),
- BYTE_ORDER, 10),
- false, false),
- cpu_id_get_size,
- cpu_id_record,
- cpu_id_get_value,
- NULL, NULL);
-
-int lttng_add_cpu_id_to_ctx(struct lttng_ust_ctx **ctx)
-{
- int ret;
-
- if (lttng_find_context(*ctx, ctx_field->event_field->name)) {
- ret = -EEXIST;
- goto error_find_context;
- }
- ret = lttng_ust_context_append(ctx, ctx_field);
- if (ret)
- return ret;
- return 0;
-
-error_find_context:
- return ret;
-}
+++ /dev/null
-/*
- * SPDX-License-Identifier: LGPL-2.1-only
- *
- * Copyright (C) 2009-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
- *
- * LTTng UST Instruction Pointer Context.
- */
-
-#define _LGPL_SOURCE
-#include <limits.h>
-#include <stddef.h>
-#include <sys/types.h>
-#include <unistd.h>
-#include <lttng/ust-events.h>
-#include <lttng/ust-tracer.h>
-#include <lttng/ringbuffer-context.h>
-
-#include "context-internal.h"
-
-static
-size_t ip_get_size(void *priv __attribute__((unused)),
- size_t offset)
-{
- size_t size = 0;
-
- size += lttng_ust_lib_ring_buffer_align(offset, lttng_ust_rb_alignof(void *));
- size += sizeof(void *);
- return size;
-}
-
-static
-void ip_record(void *priv __attribute__((unused)),
- struct lttng_ust_lib_ring_buffer_ctx *ctx,
- struct lttng_ust_channel_buffer *chan)
-{
- void *ip;
-
- ip = ctx->ip;
- chan->ops->event_write(ctx, &ip, sizeof(ip), lttng_ust_rb_alignof(ip));
-}
-
-static const struct lttng_ust_ctx_field *ctx_field = lttng_ust_static_ctx_field(
- lttng_ust_static_event_field("ip",
- lttng_ust_static_type_integer(sizeof(void *) * CHAR_BIT,
- lttng_ust_rb_alignof(void *) * CHAR_BIT,
- lttng_ust_is_signed_type(void *),
- BYTE_ORDER, 10),
- false, false),
- ip_get_size,
- ip_record,
- NULL, NULL, NULL);
-
-int lttng_add_ip_to_ctx(struct lttng_ust_ctx **ctx)
-{
- int ret;
-
- if (lttng_find_context(*ctx, ctx_field->event_field->name)) {
- ret = -EEXIST;
- goto error_find_context;
- }
- ret = lttng_ust_context_append(ctx, ctx_field);
- if (ret)
- return ret;
- return 0;
-
-error_find_context:
- return ret;
-}
+++ /dev/null
-/*
- * SPDX-License-Identifier: LGPL-2.1-only
- *
- * Copyright (C) 2009-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
- * Copyright (C) 2019 Michael Jeanson <mjeanson@efficios.com>
- *
- * LTTng UST ipc namespace context.
- */
-
-#define _LGPL_SOURCE
-#include <limits.h>
-#include <stddef.h>
-#include <sys/types.h>
-#include <sys/stat.h>
-#include <unistd.h>
-#include <lttng/ust-events.h>
-#include <lttng/ust-tracer.h>
-#include "common/compat/tid.h"
-#include <urcu/tls-compat.h>
-#include <lttng/ringbuffer-context.h>
-
-#include "context-internal.h"
-#include "lttng-tracer-core.h"
-#include "ns.h"
-
-/*
- * We cache the result to ensure we don't stat(2) the proc filesystem on
- * each event.
- */
-static DEFINE_URCU_TLS_INIT(ino_t, cached_ipc_ns, NS_INO_UNINITIALIZED);
-
-static
-ino_t get_ipc_ns(void)
-{
- struct stat sb;
- ino_t ipc_ns;
-
- ipc_ns = CMM_LOAD_SHARED(URCU_TLS(cached_ipc_ns));
-
- /*
- * If the cache is populated, do nothing and return the
- * cached inode number.
- */
- if (caa_likely(ipc_ns != NS_INO_UNINITIALIZED))
- return ipc_ns;
-
- /*
- * At this point we have to populate the cache, set the initial
- * value to NS_INO_UNAVAILABLE (0), if we fail to get the inode
- * number from the proc filesystem, this is the value we will
- * cache.
- */
- ipc_ns = NS_INO_UNAVAILABLE;
-
- /*
- * /proc/thread-self was introduced in kernel v3.17
- */
- if (stat("/proc/thread-self/ns/ipc", &sb) == 0) {
- ipc_ns = sb.st_ino;
- } else {
- char proc_ns_path[LTTNG_PROC_NS_PATH_MAX];
-
- if (snprintf(proc_ns_path, LTTNG_PROC_NS_PATH_MAX,
- "/proc/self/task/%d/ns/ipc",
- lttng_gettid()) >= 0) {
-
- if (stat(proc_ns_path, &sb) == 0) {
- ipc_ns = sb.st_ino;
- }
- }
- }
-
- /*
- * And finally, store the inode number in the cache.
- */
- CMM_STORE_SHARED(URCU_TLS(cached_ipc_ns), ipc_ns);
-
- return ipc_ns;
-}
-
-/*
- * The ipc namespace can change for 3 reasons
- * * clone(2) called with CLONE_NEWIPC
- * * setns(2) called with the fd of a different ipc ns
- * * unshare(2) called with CLONE_NEWIPC
- */
-void lttng_context_ipc_ns_reset(void)
-{
- CMM_STORE_SHARED(URCU_TLS(cached_ipc_ns), NS_INO_UNINITIALIZED);
-}
-
-static
-size_t ipc_ns_get_size(void *priv __attribute__((unused)),
- size_t offset)
-{
- size_t size = 0;
-
- size += lttng_ust_lib_ring_buffer_align(offset, lttng_ust_rb_alignof(ino_t));
- size += sizeof(ino_t);
- return size;
-}
-
-static
-void ipc_ns_record(void *priv __attribute__((unused)),
- struct lttng_ust_lib_ring_buffer_ctx *ctx,
- struct lttng_ust_channel_buffer *chan)
-{
- ino_t ipc_ns;
-
- ipc_ns = get_ipc_ns();
- chan->ops->event_write(ctx, &ipc_ns, sizeof(ipc_ns), lttng_ust_rb_alignof(ipc_ns));
-}
-
-static
-void ipc_ns_get_value(void *priv __attribute__((unused)),
- struct lttng_ust_ctx_value *value)
-{
- value->u.s64 = get_ipc_ns();
-}
-
-const struct lttng_ust_ctx_field *ctx_field = lttng_ust_static_ctx_field(
- lttng_ust_static_event_field("ipc_ns",
- lttng_ust_static_type_integer(sizeof(ino_t) * CHAR_BIT,
- lttng_ust_rb_alignof(ino_t) * CHAR_BIT,
- lttng_ust_is_signed_type(ino_t),
- BYTE_ORDER, 10),
- false, false),
- ipc_ns_get_size,
- ipc_ns_record,
- ipc_ns_get_value,
- NULL, NULL);
-
-int lttng_add_ipc_ns_to_ctx(struct lttng_ust_ctx **ctx)
-{
- int ret;
-
- if (lttng_find_context(*ctx, ctx_field->event_field->name)) {
- ret = -EEXIST;
- goto error_find_context;
- }
- ret = lttng_ust_context_append(ctx, ctx_field);
- if (ret)
- return ret;
- return 0;
-
-error_find_context:
- return ret;
-}
-
-/*
- * * Force a read (imply TLS fixup for dlopen) of TLS variables.
- * */
-void lttng_fixup_ipc_ns_tls(void)
-{
- asm volatile ("" : : "m" (URCU_TLS(cached_ipc_ns)));
-}
+++ /dev/null
-/*
- * SPDX-License-Identifier: LGPL-2.1-only
- *
- * Copyright (C) 2009-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
- * Copyright (C) 2019 Michael Jeanson <mjeanson@efficios.com>
- *
- * LTTng UST mnt namespace context.
- */
-
-#define _LGPL_SOURCE
-#include <limits.h>
-#include <stddef.h>
-#include <sys/types.h>
-#include <sys/stat.h>
-#include <unistd.h>
-#include <lttng/ust-events.h>
-#include <lttng/ust-tracer.h>
-#include <lttng/ringbuffer-context.h>
-
-#include "context-internal.h"
-#include "ns.h"
-
-/*
- * We cache the result to ensure we don't stat(2) the proc filesystem on
- * each event. The mount namespace is global to the process.
- */
-static ino_t cached_mnt_ns = NS_INO_UNINITIALIZED;
-
-static
-ino_t get_mnt_ns(void)
-{
- struct stat sb;
- ino_t mnt_ns;
-
- mnt_ns = CMM_LOAD_SHARED(cached_mnt_ns);
-
- /*
- * If the cache is populated, do nothing and return the
- * cached inode number.
- */
- if (caa_likely(mnt_ns != NS_INO_UNINITIALIZED))
- return mnt_ns;
-
- /*
- * At this point we have to populate the cache, set the initial
- * value to NS_INO_UNAVAILABLE (0), if we fail to get the inode
- * number from the proc filesystem, this is the value we will
- * cache.
- */
- mnt_ns = NS_INO_UNAVAILABLE;
-
- if (stat("/proc/self/ns/mnt", &sb) == 0) {
- mnt_ns = sb.st_ino;
- }
-
- /*
- * And finally, store the inode number in the cache.
- */
- CMM_STORE_SHARED(cached_mnt_ns, mnt_ns);
-
- return mnt_ns;
-}
-
-/*
- * The mnt namespace can change for 3 reasons
- * * clone(2) called with CLONE_NEWNS
- * * setns(2) called with the fd of a different mnt ns
- * * unshare(2) called with CLONE_NEWNS
- */
-void lttng_context_mnt_ns_reset(void)
-{
- CMM_STORE_SHARED(cached_mnt_ns, NS_INO_UNINITIALIZED);
-}
-
-static
-size_t mnt_ns_get_size(void *priv __attribute__((unused)),
- size_t offset)
-{
- size_t size = 0;
-
- size += lttng_ust_lib_ring_buffer_align(offset, lttng_ust_rb_alignof(ino_t));
- size += sizeof(ino_t);
- return size;
-}
-
-static
-void mnt_ns_record(void *priv __attribute__((unused)),
- struct lttng_ust_lib_ring_buffer_ctx *ctx,
- struct lttng_ust_channel_buffer *chan)
-{
- ino_t mnt_ns;
-
- mnt_ns = get_mnt_ns();
- chan->ops->event_write(ctx, &mnt_ns, sizeof(mnt_ns), lttng_ust_rb_alignof(mnt_ns));
-}
-
-static
-void mnt_ns_get_value(void *priv __attribute__((unused)),
- struct lttng_ust_ctx_value *value)
-{
- value->u.s64 = get_mnt_ns();
-}
-
-static const struct lttng_ust_ctx_field *ctx_field = lttng_ust_static_ctx_field(
- lttng_ust_static_event_field("mnt_ns",
- lttng_ust_static_type_integer(sizeof(ino_t) * CHAR_BIT,
- lttng_ust_rb_alignof(ino_t) * CHAR_BIT,
- lttng_ust_is_signed_type(ino_t),
- BYTE_ORDER, 10),
- false, false),
- mnt_ns_get_size,
- mnt_ns_record,
- mnt_ns_get_value,
- NULL, NULL);
-
-int lttng_add_mnt_ns_to_ctx(struct lttng_ust_ctx **ctx)
-{
- int ret;
-
- if (lttng_find_context(*ctx, ctx_field->event_field->name)) {
- ret = -EEXIST;
- goto error_find_context;
- }
- ret = lttng_ust_context_append(ctx, ctx_field);
- if (ret)
- return ret;
- return 0;
-
-error_find_context:
- return ret;
-}
+++ /dev/null
-/*
- * SPDX-License-Identifier: LGPL-2.1-only
- *
- * Copyright (C) 2009-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
- * Copyright (C) 2019 Michael Jeanson <mjeanson@efficios.com>
- *
- * LTTng UST net namespace context.
- */
-
-#define _LGPL_SOURCE
-#include <limits.h>
-#include <stddef.h>
-#include <sys/types.h>
-#include <sys/stat.h>
-#include <unistd.h>
-#include <lttng/ust-events.h>
-#include <lttng/ust-tracer.h>
-#include <lttng/ringbuffer-context.h>
-#include "common/compat/tid.h"
-#include <urcu/tls-compat.h>
-
-#include "context-internal.h"
-#include "lttng-tracer-core.h"
-#include "ns.h"
-
-/*
- * We cache the result to ensure we don't stat(2) the proc filesystem on
- * each event.
- */
-static DEFINE_URCU_TLS_INIT(ino_t, cached_net_ns, NS_INO_UNINITIALIZED);
-
-static
-ino_t get_net_ns(void)
-{
- struct stat sb;
- ino_t net_ns;
-
- net_ns = CMM_LOAD_SHARED(URCU_TLS(cached_net_ns));
-
- /*
- * If the cache is populated, do nothing and return the
- * cached inode number.
- */
- if (caa_likely(net_ns != NS_INO_UNINITIALIZED))
- return net_ns;
-
- /*
- * At this point we have to populate the cache, set the initial
- * value to NS_INO_UNAVAILABLE (0), if we fail to get the inode
- * number from the proc filesystem, this is the value we will
- * cache.
- */
- net_ns = NS_INO_UNAVAILABLE;
-
- /*
- * /proc/thread-self was introduced in kernel v3.17
- */
- if (stat("/proc/thread-self/ns/net", &sb) == 0) {
- net_ns = sb.st_ino;
- } else {
- char proc_ns_path[LTTNG_PROC_NS_PATH_MAX];
-
- if (snprintf(proc_ns_path, LTTNG_PROC_NS_PATH_MAX,
- "/proc/self/task/%d/ns/net",
- lttng_gettid()) >= 0) {
-
- if (stat(proc_ns_path, &sb) == 0) {
- net_ns = sb.st_ino;
- }
- }
- }
-
- /*
- * And finally, store the inode number in the cache.
- */
- CMM_STORE_SHARED(URCU_TLS(cached_net_ns), net_ns);
-
- return net_ns;
-}
-
-/*
- * The net namespace can change for 3 reasons
- * * clone(2) called with CLONE_NEWNET
- * * setns(2) called with the fd of a different net ns
- * * unshare(2) called with CLONE_NEWNET
- */
-void lttng_context_net_ns_reset(void)
-{
- CMM_STORE_SHARED(URCU_TLS(cached_net_ns), NS_INO_UNINITIALIZED);
-}
-
-static
-size_t net_ns_get_size(void *priv __attribute__((unused)),
- size_t offset)
-{
- size_t size = 0;
-
- size += lttng_ust_lib_ring_buffer_align(offset, lttng_ust_rb_alignof(ino_t));
- size += sizeof(ino_t);
- return size;
-}
-
-static
-void net_ns_record(void *priv __attribute__((unused)),
- struct lttng_ust_lib_ring_buffer_ctx *ctx,
- struct lttng_ust_channel_buffer *chan)
-{
- ino_t net_ns;
-
- net_ns = get_net_ns();
- chan->ops->event_write(ctx, &net_ns, sizeof(net_ns), lttng_ust_rb_alignof(net_ns));
-}
-
-static
-void net_ns_get_value(void *priv __attribute__((unused)),
- struct lttng_ust_ctx_value *value)
-{
- value->u.s64 = get_net_ns();
-}
-
-static const struct lttng_ust_ctx_field *ctx_field = lttng_ust_static_ctx_field(
- lttng_ust_static_event_field("net_ns",
- lttng_ust_static_type_integer(sizeof(ino_t) * CHAR_BIT,
- lttng_ust_rb_alignof(ino_t) * CHAR_BIT,
- lttng_ust_is_signed_type(ino_t),
- BYTE_ORDER, 10),
- false, false),
- net_ns_get_size,
- net_ns_record,
- net_ns_get_value,
- NULL, NULL);
-
-int lttng_add_net_ns_to_ctx(struct lttng_ust_ctx **ctx)
-{
- int ret;
-
- if (lttng_find_context(*ctx, ctx_field->event_field->name)) {
- ret = -EEXIST;
- goto error_find_context;
- }
- ret = lttng_ust_context_append(ctx, ctx_field);
- if (ret)
- return ret;
- return 0;
-
-error_find_context:
- return ret;
-}
-
-/*
- * * Force a read (imply TLS fixup for dlopen) of TLS variables.
- * */
-void lttng_fixup_net_ns_tls(void)
-{
- asm volatile ("" : : "m" (URCU_TLS(cached_net_ns)));
-}
+++ /dev/null
-/*
- * SPDX-License-Identifier: LGPL-2.1-only
- *
- * Copyright (C) 2009-2014 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
- *
- * LTTng UST performance monitoring counters (perf-counters) integration.
- */
-
-#define _LGPL_SOURCE
-#include <limits.h>
-#include <sys/types.h>
-#include <unistd.h>
-#include <string.h>
-#include <stdlib.h>
-#include <stdio.h>
-#include <stdbool.h>
-#include <stddef.h>
-#include <stdint.h>
-#include <sys/mman.h>
-#include <sys/syscall.h>
-#include <lttng/ust-arch.h>
-#include <lttng/ust-events.h>
-#include <lttng/ust-tracer.h>
-#include <lttng/ringbuffer-context.h>
-#include <urcu/system.h>
-#include <urcu/arch.h>
-#include <urcu/rculist.h>
-#include "common/macros.h"
-#include <urcu/ref.h>
-#include "common/logging.h"
-#include <signal.h>
-#include <urcu/tls-compat.h>
-#include "perf_event.h"
-
-#include "context-internal.h"
-#include "lttng-tracer-core.h"
-#include "ust-events-internal.h"
-
-/*
- * We use a global perf counter key and iterate on per-thread RCU lists
- * of fields in the fast path, even though this is not strictly speaking
- * what would provide the best fast-path complexity, to ensure teardown
- * of sessions vs thread exit is handled racelessly.
- *
- * Updates and traversals of thread_list are protected by UST lock.
- * Updates to rcu_field_list are protected by UST lock.
- */
-
-struct lttng_perf_counter_thread_field {
- struct lttng_perf_counter_field *field; /* Back reference */
- struct perf_event_mmap_page *pc;
- struct cds_list_head thread_field_node; /* Per-field list of thread fields (node) */
- struct cds_list_head rcu_field_node; /* RCU per-thread list of fields (node) */
- int fd; /* Perf FD */
-};
-
-struct lttng_perf_counter_thread {
- struct cds_list_head rcu_field_list; /* RCU per-thread list of fields */
-};
-
-struct lttng_perf_counter_field {
- struct perf_event_attr attr;
- struct cds_list_head thread_field_list; /* Per-field list of thread fields */
- char *name;
- struct lttng_ust_event_field *event_field;
-};
-
-static pthread_key_t perf_counter_key;
-
-/*
- * lttng_perf_lock - Protect lttng-ust perf counter data structures
- *
- * Nests within the ust_lock, and therefore within the libc dl lock.
- * Therefore, we need to fixup the TLS before nesting into this lock.
- * Nests inside RCU bp read-side lock. Protects against concurrent
- * fork.
- */
-static pthread_mutex_t ust_perf_mutex = PTHREAD_MUTEX_INITIALIZER;
-
-/*
- * Cancel state when grabbing the ust_perf_mutex. Saved when locking,
- * restored on unlock. Protected by ust_perf_mutex.
- */
-static int ust_perf_saved_cancelstate;
-
-/*
- * Track whether we are tracing from a signal handler nested on an
- * application thread.
- */
-static DEFINE_URCU_TLS(int, ust_perf_mutex_nest);
-
-/*
- * Force a read (imply TLS fixup for dlopen) of TLS variables.
- */
-void lttng_ust_fixup_perf_counter_tls(void)
-{
- asm volatile ("" : : "m" (URCU_TLS(ust_perf_mutex_nest)));
-}
-
-void lttng_perf_lock(void)
-{
- sigset_t sig_all_blocked, orig_mask;
- int ret, oldstate;
-
- ret = pthread_setcancelstate(PTHREAD_CANCEL_DISABLE, &oldstate);
- if (ret) {
- ERR("pthread_setcancelstate: %s", strerror(ret));
- }
- sigfillset(&sig_all_blocked);
- ret = pthread_sigmask(SIG_SETMASK, &sig_all_blocked, &orig_mask);
- if (ret) {
- ERR("pthread_sigmask: %s", strerror(ret));
- }
- if (!URCU_TLS(ust_perf_mutex_nest)++) {
- /*
- * Ensure the compiler don't move the store after the close()
- * call in case close() would be marked as leaf.
- */
- cmm_barrier();
- pthread_mutex_lock(&ust_perf_mutex);
- ust_perf_saved_cancelstate = oldstate;
- }
- ret = pthread_sigmask(SIG_SETMASK, &orig_mask, NULL);
- if (ret) {
- ERR("pthread_sigmask: %s", strerror(ret));
- }
-}
-
-void lttng_perf_unlock(void)
-{
- sigset_t sig_all_blocked, orig_mask;
- int ret, newstate, oldstate;
- bool restore_cancel = false;
-
- sigfillset(&sig_all_blocked);
- ret = pthread_sigmask(SIG_SETMASK, &sig_all_blocked, &orig_mask);
- if (ret) {
- ERR("pthread_sigmask: %s", strerror(ret));
- }
- /*
- * Ensure the compiler don't move the store before the close()
- * call, in case close() would be marked as leaf.
- */
- cmm_barrier();
- if (!--URCU_TLS(ust_perf_mutex_nest)) {
- newstate = ust_perf_saved_cancelstate;
- restore_cancel = true;
- pthread_mutex_unlock(&ust_perf_mutex);
- }
- ret = pthread_sigmask(SIG_SETMASK, &orig_mask, NULL);
- if (ret) {
- ERR("pthread_sigmask: %s", strerror(ret));
- }
- if (restore_cancel) {
- ret = pthread_setcancelstate(newstate, &oldstate);
- if (ret) {
- ERR("pthread_setcancelstate: %s", strerror(ret));
- }
- }
-}
-
-static
-size_t perf_counter_get_size(void *priv __attribute__((unused)),
- size_t offset)
-{
- size_t size = 0;
-
- size += lttng_ust_lib_ring_buffer_align(offset, lttng_ust_rb_alignof(uint64_t));
- size += sizeof(uint64_t);
- return size;
-}
-
-static
-uint64_t read_perf_counter_syscall(
- struct lttng_perf_counter_thread_field *thread_field)
-{
- uint64_t count;
-
- if (caa_unlikely(thread_field->fd < 0))
- return 0;
-
- if (caa_unlikely(read(thread_field->fd, &count, sizeof(count))
- < sizeof(count)))
- return 0;
-
- return count;
-}
-
-#if defined(LTTNG_UST_ARCH_X86)
-
-static
-uint64_t rdpmc(unsigned int counter)
-{
- unsigned int low, high;
-
- asm volatile("rdpmc" : "=a" (low), "=d" (high) : "c" (counter));
-
- return low | ((uint64_t) high) << 32;
-}
-
-static
-bool has_rdpmc(struct perf_event_mmap_page *pc)
-{
- if (caa_unlikely(!pc->cap_bit0_is_deprecated))
- return false;
- /* Since Linux kernel 3.12. */
- return pc->cap_user_rdpmc;
-}
-
-static
-uint64_t arch_read_perf_counter(
- struct lttng_perf_counter_thread_field *thread_field)
-{
- uint32_t seq, idx;
- uint64_t count;
- struct perf_event_mmap_page *pc = thread_field->pc;
-
- if (caa_unlikely(!pc))
- return 0;
-
- do {
- seq = CMM_LOAD_SHARED(pc->lock);
- cmm_barrier();
-
- idx = pc->index;
- if (caa_likely(has_rdpmc(pc) && idx)) {
- int64_t pmcval;
-
- pmcval = rdpmc(idx - 1);
- /* Sign-extend the pmc register result. */
- pmcval <<= 64 - pc->pmc_width;
- pmcval >>= 64 - pc->pmc_width;
- count = pc->offset + pmcval;
- } else {
- /* Fall-back on system call if rdpmc cannot be used. */
- return read_perf_counter_syscall(thread_field);
- }
- cmm_barrier();
- } while (CMM_LOAD_SHARED(pc->lock) != seq);
-
- return count;
-}
-
-static
-int arch_perf_keep_fd(struct lttng_perf_counter_thread_field *thread_field)
-{
- struct perf_event_mmap_page *pc = thread_field->pc;
-
- if (!pc)
- return 0;
- return !has_rdpmc(pc);
-}
-
-#else
-
-/* Generic (slow) implementation using a read system call. */
-static
-uint64_t arch_read_perf_counter(
- struct lttng_perf_counter_thread_field *thread_field)
-{
- return read_perf_counter_syscall(thread_field);
-}
-
-static
-int arch_perf_keep_fd(struct lttng_perf_counter_thread_field *thread_field)
-{
- return 1;
-}
-
-#endif
-
-static
-int sys_perf_event_open(struct perf_event_attr *attr,
- pid_t pid, int cpu, int group_fd,
- unsigned long flags)
-{
- return syscall(SYS_perf_event_open, attr, pid, cpu,
- group_fd, flags);
-}
-
-static
-int open_perf_fd(struct perf_event_attr *attr)
-{
- int fd;
-
- fd = sys_perf_event_open(attr, 0, -1, -1, 0);
- if (fd < 0)
- return -1;
-
- return fd;
-}
-
-static
-void close_perf_fd(int fd)
-{
- int ret;
-
- if (fd < 0)
- return;
-
- ret = close(fd);
- if (ret) {
- perror("Error closing LTTng-UST perf memory mapping FD");
- }
-}
-
-static void setup_perf(struct lttng_perf_counter_thread_field *thread_field)
-{
- void *perf_addr;
-
- perf_addr = mmap(NULL, sizeof(struct perf_event_mmap_page),
- PROT_READ, MAP_SHARED, thread_field->fd, 0);
- if (perf_addr == MAP_FAILED)
- perf_addr = NULL;
- thread_field->pc = perf_addr;
-
- if (!arch_perf_keep_fd(thread_field)) {
- close_perf_fd(thread_field->fd);
- thread_field->fd = -1;
- }
-}
-
-static
-void unmap_perf_page(struct perf_event_mmap_page *pc)
-{
- int ret;
-
- if (!pc)
- return;
- ret = munmap(pc, sizeof(struct perf_event_mmap_page));
- if (ret < 0) {
- PERROR("Error in munmap");
- abort();
- }
-}
-
-static
-struct lttng_perf_counter_thread *alloc_perf_counter_thread(void)
-{
- struct lttng_perf_counter_thread *perf_thread;
- sigset_t newmask, oldmask;
- int ret;
-
- ret = sigfillset(&newmask);
- if (ret)
- abort();
- ret = pthread_sigmask(SIG_BLOCK, &newmask, &oldmask);
- if (ret)
- abort();
- /* Check again with signals disabled */
- perf_thread = pthread_getspecific(perf_counter_key);
- if (perf_thread)
- goto skip;
- perf_thread = zmalloc(sizeof(*perf_thread));
- if (!perf_thread)
- abort();
- CDS_INIT_LIST_HEAD(&perf_thread->rcu_field_list);
- ret = pthread_setspecific(perf_counter_key, perf_thread);
- if (ret)
- abort();
-skip:
- ret = pthread_sigmask(SIG_SETMASK, &oldmask, NULL);
- if (ret)
- abort();
- return perf_thread;
-}
-
-static
-struct lttng_perf_counter_thread_field *
- add_thread_field(struct lttng_perf_counter_field *perf_field,
- struct lttng_perf_counter_thread *perf_thread)
-{
- struct lttng_perf_counter_thread_field *thread_field;
- sigset_t newmask, oldmask;
- int ret;
-
- ret = sigfillset(&newmask);
- if (ret)
- abort();
- ret = pthread_sigmask(SIG_BLOCK, &newmask, &oldmask);
- if (ret)
- abort();
- /* Check again with signals disabled */
- cds_list_for_each_entry_rcu(thread_field, &perf_thread->rcu_field_list,
- rcu_field_node) {
- if (thread_field->field == perf_field)
- goto skip;
- }
- thread_field = zmalloc(sizeof(*thread_field));
- if (!thread_field)
- abort();
- thread_field->field = perf_field;
- thread_field->fd = open_perf_fd(&perf_field->attr);
- if (thread_field->fd >= 0)
- setup_perf(thread_field);
- /*
- * Note: thread_field->pc can be NULL if setup_perf() fails.
- * Also, thread_field->fd can be -1 if open_perf_fd() fails.
- */
- lttng_perf_lock();
- cds_list_add_rcu(&thread_field->rcu_field_node,
- &perf_thread->rcu_field_list);
- cds_list_add(&thread_field->thread_field_node,
- &perf_field->thread_field_list);
- lttng_perf_unlock();
-skip:
- ret = pthread_sigmask(SIG_SETMASK, &oldmask, NULL);
- if (ret)
- abort();
- return thread_field;
-}
-
-static
-struct lttng_perf_counter_thread_field *
- get_thread_field(struct lttng_perf_counter_field *field)
-{
- struct lttng_perf_counter_thread *perf_thread;
- struct lttng_perf_counter_thread_field *thread_field;
-
- perf_thread = pthread_getspecific(perf_counter_key);
- if (!perf_thread)
- perf_thread = alloc_perf_counter_thread();
- cds_list_for_each_entry_rcu(thread_field, &perf_thread->rcu_field_list,
- rcu_field_node) {
- if (thread_field->field == field)
- return thread_field;
- }
- /* perf_counter_thread_field not found, need to add one */
- return add_thread_field(field, perf_thread);
-}
-
-static
-uint64_t wrapper_perf_counter_read(void *priv)
-{
- struct lttng_perf_counter_field *perf_field;
- struct lttng_perf_counter_thread_field *perf_thread_field;
-
- perf_field = (struct lttng_perf_counter_field *) priv;
- perf_thread_field = get_thread_field(perf_field);
- return arch_read_perf_counter(perf_thread_field);
-}
-
-static
-void perf_counter_record(void *priv,
- struct lttng_ust_lib_ring_buffer_ctx *ctx,
- struct lttng_ust_channel_buffer *chan)
-{
- uint64_t value;
-
- value = wrapper_perf_counter_read(priv);
- chan->ops->event_write(ctx, &value, sizeof(value), lttng_ust_rb_alignof(value));
-}
-
-static
-void perf_counter_get_value(void *priv,
- struct lttng_ust_ctx_value *value)
-{
- value->u.s64 = wrapper_perf_counter_read(priv);
-}
-
-/* Called with perf lock held */
-static
-void lttng_destroy_perf_thread_field(
- struct lttng_perf_counter_thread_field *thread_field)
-{
- close_perf_fd(thread_field->fd);
- unmap_perf_page(thread_field->pc);
- cds_list_del_rcu(&thread_field->rcu_field_node);
- cds_list_del(&thread_field->thread_field_node);
- free(thread_field);
-}
-
-static
-void lttng_destroy_perf_thread_key(void *_key)
-{
- struct lttng_perf_counter_thread *perf_thread = _key;
- struct lttng_perf_counter_thread_field *pos, *p;
-
- lttng_perf_lock();
- cds_list_for_each_entry_safe(pos, p, &perf_thread->rcu_field_list,
- rcu_field_node)
- lttng_destroy_perf_thread_field(pos);
- lttng_perf_unlock();
- free(perf_thread);
-}
-
-/* Called with UST lock held */
-static
-void lttng_destroy_perf_counter_ctx_field(void *priv)
-{
- struct lttng_perf_counter_field *perf_field;
- struct lttng_perf_counter_thread_field *pos, *p;
-
- perf_field = (struct lttng_perf_counter_field *) priv;
- free(perf_field->name);
- /*
- * This put is performed when no threads can concurrently
- * perform a "get" concurrently, thanks to urcu-bp grace
- * period. Holding the lttng perf lock protects against
- * concurrent modification of the per-thread thread field
- * list.
- */
- lttng_perf_lock();
- cds_list_for_each_entry_safe(pos, p, &perf_field->thread_field_list,
- thread_field_node)
- lttng_destroy_perf_thread_field(pos);
- lttng_perf_unlock();
- free(perf_field->event_field);
- free(perf_field);
-}
-
-#ifdef LTTNG_UST_ARCH_ARMV7
-
-static
-int perf_get_exclude_kernel(void)
-{
- return 0;
-}
-
-#else /* LTTNG_UST_ARCH_ARMV7 */
-
-static
-int perf_get_exclude_kernel(void)
-{
- return 1;
-}
-
-#endif /* LTTNG_UST_ARCH_ARMV7 */
-
-static const struct lttng_ust_type_common *ust_type =
- lttng_ust_static_type_integer(sizeof(uint64_t) * CHAR_BIT,
- lttng_ust_rb_alignof(uint64_t) * CHAR_BIT,
- lttng_ust_is_signed_type(uint64_t),
- BYTE_ORDER, 10);
-
-/* Called with UST lock held */
-int lttng_add_perf_counter_to_ctx(uint32_t type,
- uint64_t config,
- const char *name,
- struct lttng_ust_ctx **ctx)
-{
- struct lttng_ust_ctx_field ctx_field;
- struct lttng_ust_event_field *event_field;
- struct lttng_perf_counter_field *perf_field;
- char *name_alloc;
- int ret;
-
- if (lttng_find_context(*ctx, name)) {
- ret = -EEXIST;
- goto find_error;
- }
- name_alloc = strdup(name);
- if (!name_alloc) {
- ret = -ENOMEM;
- goto name_alloc_error;
- }
- event_field = zmalloc(sizeof(*event_field));
- if (!event_field) {
- ret = -ENOMEM;
- goto event_field_alloc_error;
- }
- event_field->name = name_alloc;
- event_field->type = ust_type;
-
- perf_field = zmalloc(sizeof(*perf_field));
- if (!perf_field) {
- ret = -ENOMEM;
- goto perf_field_alloc_error;
- }
- perf_field->attr.type = type;
- perf_field->attr.config = config;
- perf_field->attr.exclude_kernel = perf_get_exclude_kernel();
- CDS_INIT_LIST_HEAD(&perf_field->thread_field_list);
- perf_field->name = name_alloc;
- perf_field->event_field = event_field;
-
- /* Ensure that this perf counter can be used in this process. */
- ret = open_perf_fd(&perf_field->attr);
- if (ret < 0) {
- ret = -ENODEV;
- goto setup_error;
- }
- close_perf_fd(ret);
-
- ctx_field.event_field = event_field;
- ctx_field.get_size = perf_counter_get_size;
- ctx_field.record = perf_counter_record;
- ctx_field.get_value = perf_counter_get_value;
- ctx_field.destroy = lttng_destroy_perf_counter_ctx_field;
- ctx_field.priv = perf_field;
-
- ret = lttng_ust_context_append(ctx, &ctx_field);
- if (ret) {
- ret = -ENOMEM;
- goto append_context_error;
- }
- return 0;
-
-append_context_error:
-setup_error:
- free(perf_field);
-perf_field_alloc_error:
- free(event_field);
-event_field_alloc_error:
- free(name_alloc);
-name_alloc_error:
-find_error:
- return ret;
-}
-
-int lttng_perf_counter_init(void)
-{
- int ret;
-
- ret = pthread_key_create(&perf_counter_key,
- lttng_destroy_perf_thread_key);
- if (ret)
- ret = -ret;
- return ret;
-}
-
-void lttng_perf_counter_exit(void)
-{
- int ret;
-
- ret = pthread_key_delete(perf_counter_key);
- if (ret) {
- errno = ret;
- PERROR("Error in pthread_key_delete");
- }
-}
+++ /dev/null
-/*
- * SPDX-License-Identifier: LGPL-2.1-only
- *
- * Copyright (C) 2009-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
- * Copyright (C) 2019 Michael Jeanson <mjeanson@efficios.com>
- *
- * LTTng UST pid namespace context.
- */
-
-#define _LGPL_SOURCE
-#include <limits.h>
-#include <stddef.h>
-#include <sys/types.h>
-#include <sys/stat.h>
-#include <unistd.h>
-#include <lttng/ust-events.h>
-#include <lttng/ust-tracer.h>
-#include <lttng/ringbuffer-context.h>
-
-#include "context-internal.h"
-#include "ns.h"
-
-/*
- * We cache the result to ensure we don't stat(2) the proc filesystem on
- * each event. The PID namespace is global to the process.
- */
-static ino_t cached_pid_ns = NS_INO_UNINITIALIZED;
-
-static
-ino_t get_pid_ns(void)
-{
- struct stat sb;
- ino_t pid_ns;
-
- pid_ns = CMM_LOAD_SHARED(cached_pid_ns);
-
- /*
- * If the cache is populated, do nothing and return the
- * cached inode number.
- */
- if (caa_likely(pid_ns != NS_INO_UNINITIALIZED))
- return pid_ns;
-
- /*
- * At this point we have to populate the cache, set the initial
- * value to NS_INO_UNAVAILABLE (0), if we fail to get the inode
- * number from the proc filesystem, this is the value we will
- * cache.
- */
- pid_ns = NS_INO_UNAVAILABLE;
-
- if (stat("/proc/self/ns/pid", &sb) == 0) {
- pid_ns = sb.st_ino;
- }
-
- /*
- * And finally, store the inode number in the cache.
- */
- CMM_STORE_SHARED(cached_pid_ns, pid_ns);
-
- return pid_ns;
-}
-
-/*
- * A process's PID namespace membership is determined when the process is
- * created and cannot be changed thereafter.
- *
- * The pid namespace can change only on clone(2) / fork(2) :
- * - clone(2) with the CLONE_NEWPID flag
- * - clone(2) / fork(2) after a call to unshare(2) with the CLONE_NEWPID flag
- * - clone(2) / fork(2) after a call to setns(2) with a PID namespace fd
- */
-void lttng_context_pid_ns_reset(void)
-{
- CMM_STORE_SHARED(cached_pid_ns, NS_INO_UNINITIALIZED);
-}
-
-static
-size_t pid_ns_get_size(void *priv __attribute__((unused)),
- size_t offset)
-{
- size_t size = 0;
-
- size += lttng_ust_lib_ring_buffer_align(offset, lttng_ust_rb_alignof(ino_t));
- size += sizeof(ino_t);
- return size;
-}
-
-static
-void pid_ns_record(void *priv __attribute__((unused)),
- struct lttng_ust_lib_ring_buffer_ctx *ctx,
- struct lttng_ust_channel_buffer *chan)
-{
- ino_t pid_ns;
-
- pid_ns = get_pid_ns();
- chan->ops->event_write(ctx, &pid_ns, sizeof(pid_ns), lttng_ust_rb_alignof(pid_ns));
-}
-
-static
-void pid_ns_get_value(void *priv __attribute__((unused)),
- struct lttng_ust_ctx_value *value)
-{
- value->u.s64 = get_pid_ns();
-}
-
-static const struct lttng_ust_ctx_field *ctx_field = lttng_ust_static_ctx_field(
- lttng_ust_static_event_field("pid_ns",
- lttng_ust_static_type_integer(sizeof(ino_t) * CHAR_BIT,
- lttng_ust_rb_alignof(ino_t) * CHAR_BIT,
- lttng_ust_is_signed_type(ino_t),
- BYTE_ORDER, 10),
- false, false),
- pid_ns_get_size,
- pid_ns_record,
- pid_ns_get_value,
- NULL, NULL);
-
-int lttng_add_pid_ns_to_ctx(struct lttng_ust_ctx **ctx)
-{
- int ret;
-
- if (lttng_find_context(*ctx, ctx_field->event_field->name)) {
- ret = -EEXIST;
- goto error_find_context;
- }
- ret = lttng_ust_context_append(ctx, ctx_field);
- if (ret)
- return ret;
- return 0;
-
-error_find_context:
- return ret;
-}
+++ /dev/null
-/*
- * SPDX-License-Identifier: LGPL-2.1-only
- *
- * Copyright (C) 2009-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
- *
- * LTTng UST procname context.
- */
-
-#define _LGPL_SOURCE
-#include <stddef.h>
-#include <lttng/ust-events.h>
-#include <lttng/ust-tracer.h>
-#include <lttng/ringbuffer-context.h>
-#include <urcu/tls-compat.h>
-#include <assert.h>
-#include "common/compat/pthread.h"
-#include "lttng-tracer-core.h"
-
-#include "context-internal.h"
-
-/* Maximum number of nesting levels for the procname cache. */
-#define PROCNAME_NESTING_MAX 2
-
-/*
- * We cache the result to ensure we don't trigger a system call for
- * each event.
- * Upon exec, procname changes, but exec takes care of throwing away
- * this cached version.
- * The procname can also change by calling prctl(). The procname should
- * be set for a thread before the first event is logged within this
- * thread.
- */
-typedef char procname_array[PROCNAME_NESTING_MAX][17];
-
-static DEFINE_URCU_TLS(procname_array, cached_procname);
-
-static DEFINE_URCU_TLS(int, procname_nesting);
-
-static inline
-const char *wrapper_getprocname(void)
-{
- int nesting = CMM_LOAD_SHARED(URCU_TLS(procname_nesting));
-
- if (caa_unlikely(nesting >= PROCNAME_NESTING_MAX))
- return "<unknown>";
- if (caa_unlikely(!URCU_TLS(cached_procname)[nesting][0])) {
- CMM_STORE_SHARED(URCU_TLS(procname_nesting), nesting + 1);
- /* Increment nesting before updating cache. */
- cmm_barrier();
- lttng_pthread_getname_np(URCU_TLS(cached_procname)[nesting], LTTNG_UST_ABI_PROCNAME_LEN);
- URCU_TLS(cached_procname)[nesting][LTTNG_UST_ABI_PROCNAME_LEN - 1] = '\0';
- /* Decrement nesting after updating cache. */
- cmm_barrier();
- CMM_STORE_SHARED(URCU_TLS(procname_nesting), nesting);
- }
- return URCU_TLS(cached_procname)[nesting];
-}
-
-/* Reset should not be called from a signal handler. */
-void lttng_ust_context_procname_reset(void)
-{
- CMM_STORE_SHARED(URCU_TLS(cached_procname)[1][0], '\0');
- CMM_STORE_SHARED(URCU_TLS(procname_nesting), 1);
- CMM_STORE_SHARED(URCU_TLS(cached_procname)[0][0], '\0');
- CMM_STORE_SHARED(URCU_TLS(procname_nesting), 0);
-}
-
-static
-size_t procname_get_size(void *priv __attribute__((unused)),
- size_t offset __attribute__((unused)))
-{
- return LTTNG_UST_ABI_PROCNAME_LEN;
-}
-
-static
-void procname_record(void *priv __attribute__((unused)),
- struct lttng_ust_lib_ring_buffer_ctx *ctx,
- struct lttng_ust_channel_buffer *chan)
-{
- const char *procname;
-
- procname = wrapper_getprocname();
- chan->ops->event_write(ctx, procname, LTTNG_UST_ABI_PROCNAME_LEN, 1);
-}
-
-static
-void procname_get_value(void *priv __attribute__((unused)),
- struct lttng_ust_ctx_value *value)
-{
- value->u.str = wrapper_getprocname();
-}
-
-static const struct lttng_ust_ctx_field *ctx_field = lttng_ust_static_ctx_field(
- lttng_ust_static_event_field("procname",
- lttng_ust_static_type_array_text(LTTNG_UST_ABI_PROCNAME_LEN),
- false, false),
- procname_get_size,
- procname_record,
- procname_get_value,
- NULL, NULL);
-
-int lttng_add_procname_to_ctx(struct lttng_ust_ctx **ctx)
-{
- int ret;
-
- if (lttng_find_context(*ctx, ctx_field->event_field->name)) {
- ret = -EEXIST;
- goto error_find_context;
- }
- ret = lttng_ust_context_append(ctx, ctx_field);
- if (ret)
- return ret;
- return 0;
-
-error_find_context:
- return ret;
-}
-
-/*
- * Force a read (imply TLS fixup for dlopen) of TLS variables.
- */
-void lttng_fixup_procname_tls(void)
-{
- asm volatile ("" : : "m" (URCU_TLS(cached_procname)[0]));
-}
+++ /dev/null
-/*
- * SPDX-License-Identifier: LGPL-2.1-only
- *
- * Copyright (C) 2016 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
- *
- * LTTng UST application context provider.
- */
-
-#define _LGPL_SOURCE
-#include <stddef.h>
-#include <stdint.h>
-#include <sys/types.h>
-#include <unistd.h>
-
-#include <common/ust-context-provider.h>
-
-#include "context-internal.h"
-#include "lttng-tracer-core.h"
-#include "jhash.h"
-#include "context-provider-internal.h"
-#include "common/macros.h"
-
-struct lttng_ust_registered_context_provider {
- const struct lttng_ust_context_provider *provider;
-
- struct cds_hlist_node node;
-};
-
-struct lttng_ust_app_ctx {
- char *name;
- struct lttng_ust_event_field *event_field;
- struct lttng_ust_type_common *type;
-};
-
-#define CONTEXT_PROVIDER_HT_BITS 12
-#define CONTEXT_PROVIDER_HT_SIZE (1U << CONTEXT_PROVIDER_HT_BITS)
-struct context_provider_ht {
- struct cds_hlist_head table[CONTEXT_PROVIDER_HT_SIZE];
-};
-
-static struct context_provider_ht context_provider_ht;
-
-static const struct lttng_ust_context_provider *
- lookup_provider_by_name(const char *name)
-{
- struct cds_hlist_head *head;
- struct cds_hlist_node *node;
- struct lttng_ust_registered_context_provider *reg_provider;
- uint32_t hash;
- const char *end;
- size_t len;
-
- /* Lookup using everything before first ':' as key. */
- end = strchr(name, ':');
- if (end)
- len = end - name;
- else
- len = strlen(name);
- hash = jhash(name, len, 0);
- head = &context_provider_ht.table[hash & (CONTEXT_PROVIDER_HT_SIZE - 1)];
- cds_hlist_for_each_entry(reg_provider, node, head, node) {
- if (!strncmp(reg_provider->provider->name, name, len))
- return reg_provider->provider;
- }
- return NULL;
-}
-
-struct lttng_ust_registered_context_provider *lttng_ust_context_provider_register(struct lttng_ust_context_provider *provider)
-{
- struct lttng_ust_registered_context_provider *reg_provider = NULL;
- struct cds_hlist_head *head;
- size_t name_len = strlen(provider->name);
- uint32_t hash;
-
- lttng_ust_fixup_tls();
-
- /* Provider name starts with "$app.". */
- if (strncmp("$app.", provider->name, strlen("$app.")) != 0)
- return NULL;
- /* Provider name cannot contain a colon character. */
- if (strchr(provider->name, ':'))
- return NULL;
- if (ust_lock())
- goto end;
- if (lookup_provider_by_name(provider->name))
- goto end;
- reg_provider = zmalloc(sizeof(struct lttng_ust_registered_context_provider));
- if (!reg_provider)
- goto end;
- reg_provider->provider = provider;
- hash = jhash(provider->name, name_len, 0);
- head = &context_provider_ht.table[hash & (CONTEXT_PROVIDER_HT_SIZE - 1)];
- cds_hlist_add_head(®_provider->node, head);
-
- lttng_ust_context_set_session_provider(provider->name,
- provider->get_size, provider->record,
- provider->get_value, provider->priv);
-
- lttng_ust_context_set_event_notifier_group_provider(provider->name,
- provider->get_size, provider->record,
- provider->get_value, provider->priv);
-end:
- ust_unlock();
- return reg_provider;
-}
-
-void lttng_ust_context_provider_unregister(struct lttng_ust_registered_context_provider *reg_provider)
-{
- lttng_ust_fixup_tls();
-
- if (ust_lock())
- goto end;
- lttng_ust_context_set_session_provider(reg_provider->provider->name,
- lttng_ust_dummy_get_size, lttng_ust_dummy_record,
- lttng_ust_dummy_get_value, NULL);
-
- lttng_ust_context_set_event_notifier_group_provider(reg_provider->provider->name,
- lttng_ust_dummy_get_size, lttng_ust_dummy_record,
- lttng_ust_dummy_get_value, NULL);
-
- cds_hlist_del(®_provider->node);
-end:
- ust_unlock();
- free(reg_provider);
-}
-
-static void destroy_app_ctx(void *priv)
-{
- struct lttng_ust_app_ctx *app_ctx = (struct lttng_ust_app_ctx *) priv;
-
- free(app_ctx->name);
- free(app_ctx->event_field);
- free(app_ctx->type);
- free(app_ctx);
-}
-
-/*
- * Called with ust mutex held.
- * Add application context to array of context, even if the application
- * context is not currently loaded by application. It will then use the
- * dummy callbacks in that case.
- * Always performed before tracing is started, since it modifies
- * metadata describing the context.
- */
-int lttng_ust_add_app_context_to_ctx_rcu(const char *name,
- struct lttng_ust_ctx **ctx)
-{
- const struct lttng_ust_context_provider *provider;
- struct lttng_ust_ctx_field new_field = { 0 };
- struct lttng_ust_event_field *event_field = NULL;
- struct lttng_ust_type_common *type = NULL;
- struct lttng_ust_app_ctx *app_ctx = NULL;
- char *ctx_name;
- int ret;
-
- if (*ctx && lttng_find_context(*ctx, name))
- return -EEXIST;
- event_field = zmalloc(sizeof(struct lttng_ust_event_field));
- if (!event_field) {
- ret = -ENOMEM;
- goto error_event_field_alloc;
- }
- ctx_name = strdup(name);
- if (!ctx_name) {
- ret = -ENOMEM;
- goto error_field_name_alloc;
- }
- type = zmalloc(sizeof(struct lttng_ust_type_common));
- if (!type) {
- ret = -ENOMEM;
- goto error_field_type_alloc;
- }
- app_ctx = zmalloc(sizeof(struct lttng_ust_app_ctx));
- if (!app_ctx) {
- ret = -ENOMEM;
- goto error_app_ctx_alloc;
- }
- event_field->name = ctx_name;
- type->type = lttng_ust_type_dynamic;
- event_field->type = type;
- new_field.event_field = event_field;
- /*
- * If provider is not found, we add the context anyway, but
- * it will provide a dummy context.
- */
- provider = lookup_provider_by_name(name);
- if (provider) {
- new_field.get_size = provider->get_size;
- new_field.record = provider->record;
- new_field.get_value = provider->get_value;
- } else {
- new_field.get_size = lttng_ust_dummy_get_size;
- new_field.record = lttng_ust_dummy_record;
- new_field.get_value = lttng_ust_dummy_get_value;
- }
- new_field.destroy = destroy_app_ctx;
- new_field.priv = app_ctx;
- /*
- * For application context, add it by expanding
- * ctx array.
- */
- ret = lttng_ust_context_append_rcu(ctx, &new_field);
- if (ret) {
- destroy_app_ctx(app_ctx);
- return ret;
- }
- return 0;
-
-error_app_ctx_alloc:
- free(type);
-error_field_type_alloc:
- free(ctx_name);
-error_field_name_alloc:
- free(event_field);
-error_event_field_alloc:
- return ret;
-}
+++ /dev/null
-/*
- * SPDX-License-Identifier: LGPL-2.1-only
- *
- * Copyright (C) 2009-2011 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
- *
- * LTTng UST pthread_id context.
- */
-
-#define _LGPL_SOURCE
-#include <limits.h>
-#include <stddef.h>
-#include <pthread.h>
-#include <lttng/ust-events.h>
-#include <lttng/ust-tracer.h>
-#include <lttng/ringbuffer-context.h>
-
-#include "context-internal.h"
-
-static
-size_t pthread_id_get_size(void *priv __attribute__((unused)),
- size_t offset)
-{
- size_t size = 0;
-
- size += lttng_ust_lib_ring_buffer_align(offset, lttng_ust_rb_alignof(unsigned long));
- size += sizeof(unsigned long);
- return size;
-}
-
-static
-void pthread_id_record(void *priv __attribute__((unused)),
- struct lttng_ust_lib_ring_buffer_ctx *ctx,
- struct lttng_ust_channel_buffer *chan)
-{
- unsigned long pthread_id;
-
- pthread_id = (unsigned long) pthread_self();
- chan->ops->event_write(ctx, &pthread_id, sizeof(pthread_id), lttng_ust_rb_alignof(pthread_id));
-}
-
-static
-void pthread_id_get_value(void *priv __attribute__((unused)),
- struct lttng_ust_ctx_value *value)
-{
- value->u.s64 = (unsigned long) pthread_self();
-}
-
-static const struct lttng_ust_ctx_field *ctx_field = lttng_ust_static_ctx_field(
- lttng_ust_static_event_field("pthread_id",
- lttng_ust_static_type_integer(sizeof(unsigned long) * CHAR_BIT,
- lttng_ust_rb_alignof(unsigned long) * CHAR_BIT,
- lttng_ust_is_signed_type(unsigned long),
- BYTE_ORDER, 10),
- false, false),
- pthread_id_get_size,
- pthread_id_record,
- pthread_id_get_value,
- NULL, NULL);
-
-int lttng_add_pthread_id_to_ctx(struct lttng_ust_ctx **ctx)
-{
- int ret;
-
- if (lttng_find_context(*ctx, ctx_field->event_field->name)) {
- ret = -EEXIST;
- goto error_find_context;
- }
- ret = lttng_ust_context_append(ctx, ctx_field);
- if (ret)
- return ret;
- return 0;
-
-error_find_context:
- return ret;
-}
+++ /dev/null
-/*
- * SPDX-License-Identifier: LGPL-2.1-only
- *
- * Copyright (C) 2009-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
- * Copyright (C) 2020 Michael Jeanson <mjeanson@efficios.com>
- *
- * LTTng UST time namespace context.
- */
-
-#define _LGPL_SOURCE
-#include <limits.h>
-#include <stddef.h>
-#include <sys/types.h>
-#include <sys/stat.h>
-#include <unistd.h>
-#include <lttng/ust-events.h>
-#include <lttng/ust-tracer.h>
-#include <lttng/ringbuffer-context.h>
-#include "common/compat/tid.h"
-#include <urcu/tls-compat.h>
-#include "lttng-tracer-core.h"
-#include "ns.h"
-#include "context-internal.h"
-
-
-/*
- * We cache the result to ensure we don't stat(2) the proc filesystem on
- * each event.
- */
-static DEFINE_URCU_TLS_INIT(ino_t, cached_time_ns, NS_INO_UNINITIALIZED);
-
-static
-ino_t get_time_ns(void)
-{
- struct stat sb;
- ino_t time_ns;
-
- time_ns = CMM_LOAD_SHARED(URCU_TLS(cached_time_ns));
-
- /*
- * If the cache is populated, do nothing and return the
- * cached inode number.
- */
- if (caa_likely(time_ns != NS_INO_UNINITIALIZED))
- return time_ns;
-
- /*
- * At this point we have to populate the cache, set the initial
- * value to NS_INO_UNAVAILABLE (0), if we fail to get the inode
- * number from the proc filesystem, this is the value we will
- * cache.
- */
- time_ns = NS_INO_UNAVAILABLE;
-
- /*
- * /proc/thread-self was introduced in kernel v3.17
- */
- if (stat("/proc/thread-self/ns/time", &sb) == 0) {
- time_ns = sb.st_ino;
- } else {
- char proc_ns_path[LTTNG_PROC_NS_PATH_MAX];
-
- if (snprintf(proc_ns_path, LTTNG_PROC_NS_PATH_MAX,
- "/proc/self/task/%d/ns/time",
- lttng_gettid()) >= 0) {
-
- if (stat(proc_ns_path, &sb) == 0) {
- time_ns = sb.st_ino;
- }
- }
- }
-
- /*
- * And finally, store the inode number in the cache.
- */
- CMM_STORE_SHARED(URCU_TLS(cached_time_ns), time_ns);
-
- return time_ns;
-}
-
-/*
- * The time namespace can change for 2 reasons
- * * setns(2) called with the fd of a different time ns
- * * clone(2) / fork(2) after a call to unshare(2) with the CLONE_NEWTIME flag
- */
-void lttng_context_time_ns_reset(void)
-{
- CMM_STORE_SHARED(URCU_TLS(cached_time_ns), NS_INO_UNINITIALIZED);
-}
-
-static
-size_t time_ns_get_size(void *priv __attribute__((unused)),
- size_t offset)
-{
- size_t size = 0;
-
- size += lttng_ust_lib_ring_buffer_align(offset, lttng_ust_rb_alignof(ino_t));
- size += sizeof(ino_t);
- return size;
-}
-
-static
-void time_ns_record(void *priv __attribute__((unused)),
- struct lttng_ust_lib_ring_buffer_ctx *ctx,
- struct lttng_ust_channel_buffer *chan)
-{
- ino_t time_ns;
-
- time_ns = get_time_ns();
- chan->ops->event_write(ctx, &time_ns, sizeof(time_ns), lttng_ust_rb_alignof(time_ns));
-}
-
-static
-void time_ns_get_value(void *priv __attribute__((unused)),
- struct lttng_ust_ctx_value *value)
-{
- value->u.s64 = get_time_ns();
-}
-
-static const struct lttng_ust_ctx_field *ctx_field = lttng_ust_static_ctx_field(
- lttng_ust_static_event_field("time_ns",
- lttng_ust_static_type_integer(sizeof(ino_t) * CHAR_BIT,
- lttng_ust_rb_alignof(ino_t) * CHAR_BIT,
- lttng_ust_is_signed_type(ino_t),
- BYTE_ORDER, 10),
- false, false),
- time_ns_get_size,
- time_ns_record,
- time_ns_get_value,
- NULL, NULL);
-
-int lttng_add_time_ns_to_ctx(struct lttng_ust_ctx **ctx)
-{
- int ret;
-
- if (lttng_find_context(*ctx, ctx_field->event_field->name)) {
- ret = -EEXIST;
- goto error_find_context;
- }
- ret = lttng_ust_context_append(ctx, ctx_field);
- if (ret)
- return ret;
- return 0;
-
-error_find_context:
- return ret;
-}
-
-/*
- * * Force a read (imply TLS fixup for dlopen) of TLS variables.
- * */
-void lttng_fixup_time_ns_tls(void)
-{
- asm volatile ("" : : "m" (URCU_TLS(cached_time_ns)));
-}
+++ /dev/null
-/*
- * SPDX-License-Identifier: LGPL-2.1-only
- *
- * Copyright (C) 2009-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
- * Copyright (C) 2019 Michael Jeanson <mjeanson@efficios.com>
- *
- * LTTng UST user namespace context.
- */
-
-#define _LGPL_SOURCE
-#include <limits.h>
-#include <stddef.h>
-#include <sys/types.h>
-#include <sys/stat.h>
-#include <unistd.h>
-#include <lttng/ust-events.h>
-#include <lttng/ust-tracer.h>
-#include <lttng/ringbuffer-context.h>
-
-#include "context-internal.h"
-#include "ns.h"
-
-/*
- * We cache the result to ensure we don't stat(2) the proc filesystem on
- * each event. The user namespace is global to the process.
- */
-static ino_t cached_user_ns = NS_INO_UNINITIALIZED;
-
-static
-ino_t get_user_ns(void)
-{
- struct stat sb;
- ino_t user_ns;
-
- user_ns = CMM_LOAD_SHARED(cached_user_ns);
-
- /*
- * If the cache is populated, do nothing and return the
- * cached inode number.
- */
- if (caa_likely(user_ns != NS_INO_UNINITIALIZED))
- return user_ns;
-
- /*
- * At this point we have to populate the cache, set the initial
- * value to NS_INO_UNAVAILABLE (0), if we fail to get the inode
- * number from the proc filesystem, this is the value we will
- * cache.
- */
- user_ns = NS_INO_UNAVAILABLE;
-
- if (stat("/proc/self/ns/user", &sb) == 0) {
- user_ns = sb.st_ino;
- }
-
- /*
- * And finally, store the inode number in the cache.
- */
- CMM_STORE_SHARED(cached_user_ns, user_ns);
-
- return user_ns;
-}
-
-/*
- * The user namespace can change for 3 reasons
- * * clone(2) called with CLONE_NEWUSER
- * * setns(2) called with the fd of a different user ns
- * * unshare(2) called with CLONE_NEWUSER
- */
-void lttng_context_user_ns_reset(void)
-{
- CMM_STORE_SHARED(cached_user_ns, NS_INO_UNINITIALIZED);
-}
-
-static
-size_t user_ns_get_size(void *priv __attribute__((unused)),
- size_t offset)
-{
- size_t size = 0;
-
- size += lttng_ust_lib_ring_buffer_align(offset, lttng_ust_rb_alignof(ino_t));
- size += sizeof(ino_t);
- return size;
-}
-
-static
-void user_ns_record(void *priv __attribute__((unused)),
- struct lttng_ust_lib_ring_buffer_ctx *ctx,
- struct lttng_ust_channel_buffer *chan)
-{
- ino_t user_ns;
-
- user_ns = get_user_ns();
- chan->ops->event_write(ctx, &user_ns, sizeof(user_ns), lttng_ust_rb_alignof(user_ns));
-}
-
-static
-void user_ns_get_value(void *priv __attribute__((unused)),
- struct lttng_ust_ctx_value *value)
-{
- value->u.s64 = get_user_ns();
-}
-
-static const struct lttng_ust_ctx_field *ctx_field = lttng_ust_static_ctx_field(
- lttng_ust_static_event_field("user_ns",
- lttng_ust_static_type_integer(sizeof(ino_t) * CHAR_BIT,
- lttng_ust_rb_alignof(ino_t) * CHAR_BIT,
- lttng_ust_is_signed_type(ino_t),
- BYTE_ORDER, 10),
- false, false),
- user_ns_get_size,
- user_ns_record,
- user_ns_get_value,
- NULL, NULL);
-
-int lttng_add_user_ns_to_ctx(struct lttng_ust_ctx **ctx)
-{
- int ret;
-
- if (lttng_find_context(*ctx, ctx_field->event_field->name)) {
- ret = -EEXIST;
- goto error_find_context;
- }
- ret = lttng_ust_context_append(ctx, ctx_field);
- if (ret)
- return ret;
- return 0;
-
-error_find_context:
- return ret;
-}
+++ /dev/null
-/*
- * SPDX-License-Identifier: LGPL-2.1-only
- *
- * Copyright (C) 2009-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
- * Copyright (C) 2019 Michael Jeanson <mjeanson@efficios.com>
- *
- * LTTng UST uts namespace context.
- */
-
-#define _LGPL_SOURCE
-#include <limits.h>
-#include <stddef.h>
-#include <sys/types.h>
-#include <sys/stat.h>
-#include <unistd.h>
-#include <lttng/ust-events.h>
-#include <lttng/ust-tracer.h>
-#include <lttng/ringbuffer-context.h>
-#include "common/compat/tid.h"
-#include <urcu/tls-compat.h>
-
-#include "context-internal.h"
-#include "lttng-tracer-core.h"
-#include "ns.h"
-
-
-/*
- * We cache the result to ensure we don't stat(2) the proc filesystem on
- * each event.
- */
-static DEFINE_URCU_TLS_INIT(ino_t, cached_uts_ns, NS_INO_UNINITIALIZED);
-
-static
-ino_t get_uts_ns(void)
-{
- struct stat sb;
- ino_t uts_ns;
-
- uts_ns = CMM_LOAD_SHARED(URCU_TLS(cached_uts_ns));
-
- /*
- * If the cache is populated, do nothing and return the
- * cached inode number.
- */
- if (caa_likely(uts_ns != NS_INO_UNINITIALIZED))
- return uts_ns;
-
- /*
- * At this point we have to populate the cache, set the initial
- * value to NS_INO_UNAVAILABLE (0), if we fail to get the inode
- * number from the proc filesystem, this is the value we will
- * cache.
- */
- uts_ns = NS_INO_UNAVAILABLE;
-
- /*
- * /proc/thread-self was introduced in kernel v3.17
- */
- if (stat("/proc/thread-self/ns/uts", &sb) == 0) {
- uts_ns = sb.st_ino;
- } else {
- char proc_ns_path[LTTNG_PROC_NS_PATH_MAX];
-
- if (snprintf(proc_ns_path, LTTNG_PROC_NS_PATH_MAX,
- "/proc/self/task/%d/ns/uts",
- lttng_gettid()) >= 0) {
-
- if (stat(proc_ns_path, &sb) == 0) {
- uts_ns = sb.st_ino;
- }
- }
- }
-
- /*
- * And finally, store the inode number in the cache.
- */
- CMM_STORE_SHARED(URCU_TLS(cached_uts_ns), uts_ns);
-
- return uts_ns;
-}
-
-/*
- * The uts namespace can change for 3 reasons
- * * clone(2) called with CLONE_NEWUTS
- * * setns(2) called with the fd of a different uts ns
- * * unshare(2) called with CLONE_NEWUTS
- */
-void lttng_context_uts_ns_reset(void)
-{
- CMM_STORE_SHARED(URCU_TLS(cached_uts_ns), NS_INO_UNINITIALIZED);
-}
-
-static
-size_t uts_ns_get_size(void *priv __attribute__((unused)),
- size_t offset)
-{
- size_t size = 0;
-
- size += lttng_ust_lib_ring_buffer_align(offset, lttng_ust_rb_alignof(ino_t));
- size += sizeof(ino_t);
- return size;
-}
-
-static
-void uts_ns_record(void *priv __attribute__((unused)),
- struct lttng_ust_lib_ring_buffer_ctx *ctx,
- struct lttng_ust_channel_buffer *chan)
-{
- ino_t uts_ns;
-
- uts_ns = get_uts_ns();
- chan->ops->event_write(ctx, &uts_ns, sizeof(uts_ns), lttng_ust_rb_alignof(uts_ns));
-}
-
-static
-void uts_ns_get_value(void *priv __attribute__((unused)),
- struct lttng_ust_ctx_value *value)
-{
- value->u.s64 = get_uts_ns();
-}
-
-static const struct lttng_ust_ctx_field *ctx_field = lttng_ust_static_ctx_field(
- lttng_ust_static_event_field("uts_ns",
- lttng_ust_static_type_integer(sizeof(ino_t) * CHAR_BIT,
- lttng_ust_rb_alignof(ino_t) * CHAR_BIT,
- lttng_ust_is_signed_type(ino_t),
- BYTE_ORDER, 10),
- false, false),
- uts_ns_get_size,
- uts_ns_record,
- uts_ns_get_value,
- NULL, NULL);
-
-int lttng_add_uts_ns_to_ctx(struct lttng_ust_ctx **ctx)
-{
- int ret;
-
- if (lttng_find_context(*ctx, ctx_field->event_field->name)) {
- ret = -EEXIST;
- goto error_find_context;
- }
- ret = lttng_ust_context_append(ctx, ctx_field);
- if (ret)
- return ret;
- return 0;
-
-error_find_context:
- return ret;
-}
-
-/*
- * * Force a read (imply TLS fixup for dlopen) of TLS variables.
- * */
-void lttng_fixup_uts_ns_tls(void)
-{
- asm volatile ("" : : "m" (URCU_TLS(cached_uts_ns)));
-}
+++ /dev/null
-/*
- * SPDX-License-Identifier: LGPL-2.1-only
- *
- * Copyright (C) 2009-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
- * Copyright (C) 2019 Michael Jeanson <mjeanson@efficios.com>
- *
- * LTTng UST namespaced effective group ID context.
- */
-
-#define _LGPL_SOURCE
-#include <limits.h>
-#include <stddef.h>
-#include <sys/types.h>
-#include <sys/stat.h>
-#include <unistd.h>
-#include <lttng/ust-events.h>
-#include <lttng/ust-tracer.h>
-#include <lttng/ringbuffer-context.h>
-
-#include "context-internal.h"
-#include "creds.h"
-
-
-/*
- * At the kernel level, user IDs and group IDs are a per-thread attribute.
- * However, POSIX requires that all threads in a process share the same
- * credentials. The NPTL threading implementation handles the POSIX
- * requirements by providing wrapper functions for the various system calls
- * that change process UIDs and GIDs. These wrapper functions (including those
- * for setreuid() and setregid()) employ a signal-based technique to ensure
- * that when one thread changes credentials, all of the other threads in the
- * process also change their credentials.
- */
-
-/*
- * We cache the result to ensure we don't trigger a system call for
- * each event. User / group IDs are global to the process.
- */
-static gid_t cached_vegid = INVALID_GID;
-
-static
-gid_t get_vegid(void)
-{
- gid_t vegid;
-
- vegid = CMM_LOAD_SHARED(cached_vegid);
-
- if (caa_unlikely(vegid == INVALID_GID)) {
- vegid = getegid();
- CMM_STORE_SHARED(cached_vegid, vegid);
- }
-
- return vegid;
-}
-
-/*
- * The vegid can change on setuid, setreuid, setresuid and seteuid.
- */
-void lttng_context_vegid_reset(void)
-{
- CMM_STORE_SHARED(cached_vegid, INVALID_GID);
-}
-
-static
-size_t vegid_get_size(void *priv __attribute__((unused)),
- size_t offset)
-{
- size_t size = 0;
-
- size += lttng_ust_lib_ring_buffer_align(offset, lttng_ust_rb_alignof(gid_t));
- size += sizeof(gid_t);
- return size;
-}
-
-static
-void vegid_record(void *priv __attribute__((unused)),
- struct lttng_ust_lib_ring_buffer_ctx *ctx,
- struct lttng_ust_channel_buffer *chan)
-{
- gid_t vegid;
-
- vegid = get_vegid();
- chan->ops->event_write(ctx, &vegid, sizeof(vegid), lttng_ust_rb_alignof(vegid));
-}
-
-static
-void vegid_get_value(void *priv __attribute__((unused)),
- struct lttng_ust_ctx_value *value)
-{
- value->u.s64 = get_vegid();
-}
-
-static const struct lttng_ust_ctx_field *ctx_field = lttng_ust_static_ctx_field(
- lttng_ust_static_event_field("vegid",
- lttng_ust_static_type_integer(sizeof(gid_t) * CHAR_BIT,
- lttng_ust_rb_alignof(gid_t) * CHAR_BIT,
- lttng_ust_is_signed_type(gid_t),
- BYTE_ORDER, 10),
- false, false),
- vegid_get_size,
- vegid_record,
- vegid_get_value,
- NULL, NULL);
-
-int lttng_add_vegid_to_ctx(struct lttng_ust_ctx **ctx)
-{
- int ret;
-
- if (lttng_find_context(*ctx, ctx_field->event_field->name)) {
- ret = -EEXIST;
- goto error_find_context;
- }
- ret = lttng_ust_context_append(ctx, ctx_field);
- if (ret)
- return ret;
- return 0;
-
-error_find_context:
- return ret;
-}
+++ /dev/null
-/*
- * SPDX-License-Identifier: LGPL-2.1-only
- *
- * Copyright (C) 2009-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
- * Copyright (C) 2019 Michael Jeanson <mjeanson@efficios.com>
- *
- * LTTng UST namespaced effective user ID context.
- */
-
-#define _LGPL_SOURCE
-#include <limits.h>
-#include <stddef.h>
-#include <sys/types.h>
-#include <sys/stat.h>
-#include <unistd.h>
-#include <lttng/ust-events.h>
-#include <lttng/ust-tracer.h>
-#include <lttng/ringbuffer-context.h>
-
-#include "context-internal.h"
-#include "creds.h"
-
-
-/*
- * At the kernel level, user IDs and group IDs are a per-thread attribute.
- * However, POSIX requires that all threads in a process share the same
- * credentials. The NPTL threading implementation handles the POSIX
- * requirements by providing wrapper functions for the various system calls
- * that change process UIDs and GIDs. These wrapper functions (including those
- * for setreuid() and setregid()) employ a signal-based technique to ensure
- * that when one thread changes credentials, all of the other threads in the
- * process also change their credentials.
- */
-
-/*
- * We cache the result to ensure we don't trigger a system call for
- * each event. User / group IDs are global to the process.
- */
-static uid_t cached_veuid = INVALID_UID;
-
-static
-uid_t get_veuid(void)
-{
- uid_t veuid;
-
- veuid = CMM_LOAD_SHARED(cached_veuid);
-
- if (caa_unlikely(veuid == INVALID_UID)) {
- veuid = geteuid();
- CMM_STORE_SHARED(cached_veuid, veuid);
- }
-
- return veuid;
-}
-
-/*
- * The veuid can change on setuid, setreuid, setresuid and seteuid.
- */
-void lttng_context_veuid_reset(void)
-{
- CMM_STORE_SHARED(cached_veuid, INVALID_UID);
-}
-
-static
-size_t veuid_get_size(void *priv __attribute__((unused)),
- size_t offset)
-{
- size_t size = 0;
-
- size += lttng_ust_lib_ring_buffer_align(offset, lttng_ust_rb_alignof(uid_t));
- size += sizeof(uid_t);
- return size;
-}
-
-static
-void veuid_record(void *priv __attribute__((unused)),
- struct lttng_ust_lib_ring_buffer_ctx *ctx,
- struct lttng_ust_channel_buffer *chan)
-{
- uid_t veuid;
-
- veuid = get_veuid();
- chan->ops->event_write(ctx, &veuid, sizeof(veuid), lttng_ust_rb_alignof(veuid));
-}
-
-static
-void veuid_get_value(void *priv __attribute__((unused)),
- struct lttng_ust_ctx_value *value)
-{
- value->u.s64 = get_veuid();
-}
-
-static const struct lttng_ust_ctx_field *ctx_field = lttng_ust_static_ctx_field(
- lttng_ust_static_event_field("veuid",
- lttng_ust_static_type_integer(sizeof(uid_t) * CHAR_BIT,
- lttng_ust_rb_alignof(uid_t) * CHAR_BIT,
- lttng_ust_is_signed_type(uid_t),
- BYTE_ORDER, 10),
- false, false),
- veuid_get_size,
- veuid_record,
- veuid_get_value,
- NULL, NULL);
-
-int lttng_add_veuid_to_ctx(struct lttng_ust_ctx **ctx)
-{
- int ret;
-
- if (lttng_find_context(*ctx, ctx_field->event_field->name)) {
- ret = -EEXIST;
- goto error_find_context;
- }
- ret = lttng_ust_context_append(ctx, ctx_field);
- if (ret)
- return ret;
- return 0;
-
-error_find_context:
- return ret;
-}
+++ /dev/null
-/*
- * SPDX-License-Identifier: LGPL-2.1-only
- *
- * Copyright (C) 2009-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
- * Copyright (C) 2019 Michael Jeanson <mjeanson@efficios.com>
- *
- * LTTng UST namespaced real group ID context.
- */
-
-#define _LGPL_SOURCE
-#include <limits.h>
-#include <stddef.h>
-#include <sys/types.h>
-#include <sys/stat.h>
-#include <unistd.h>
-#include <lttng/ust-events.h>
-#include <lttng/ust-tracer.h>
-#include <lttng/ringbuffer-context.h>
-
-#include "context-internal.h"
-#include "creds.h"
-
-
-/*
- * At the kernel level, user IDs and group IDs are a per-thread attribute.
- * However, POSIX requires that all threads in a process share the same
- * credentials. The NPTL threading implementation handles the POSIX
- * requirements by providing wrapper functions for the various system calls
- * that change process UIDs and GIDs. These wrapper functions (including those
- * for setreuid() and setregid()) employ a signal-based technique to ensure
- * that when one thread changes credentials, all of the other threads in the
- * process also change their credentials.
- */
-
-/*
- * We cache the result to ensure we don't trigger a system call for
- * each event. User / group IDs are global to the process.
- */
-static gid_t cached_vgid = INVALID_GID;
-
-static
-gid_t get_vgid(void)
-{
- gid_t vgid;
-
- vgid = CMM_LOAD_SHARED(cached_vgid);
-
- if (caa_unlikely(cached_vgid == (gid_t) -1)) {
- vgid = getgid();
- CMM_STORE_SHARED(cached_vgid, vgid);
- }
-
- return vgid;
-}
-
-/*
- * The vgid can change on setuid, setreuid and setresuid.
- */
-void lttng_context_vgid_reset(void)
-{
- CMM_STORE_SHARED(cached_vgid, INVALID_GID);
-}
-
-static
-size_t vgid_get_size(void *priv __attribute__((unused)),
- size_t offset)
-{
- size_t size = 0;
-
- size += lttng_ust_lib_ring_buffer_align(offset, lttng_ust_rb_alignof(gid_t));
- size += sizeof(gid_t);
- return size;
-}
-
-static
-void vgid_record(void *priv __attribute__((unused)),
- struct lttng_ust_lib_ring_buffer_ctx *ctx,
- struct lttng_ust_channel_buffer *chan)
-{
- gid_t vgid;
-
- vgid = get_vgid();
- chan->ops->event_write(ctx, &vgid, sizeof(vgid), lttng_ust_rb_alignof(vgid));
-}
-
-static
-void vgid_get_value(void *priv __attribute__((unused)),
- struct lttng_ust_ctx_value *value)
-{
- value->u.s64 = get_vgid();
-}
-
-static const struct lttng_ust_ctx_field *ctx_field = lttng_ust_static_ctx_field(
- lttng_ust_static_event_field("vgid",
- lttng_ust_static_type_integer(sizeof(gid_t) * CHAR_BIT,
- lttng_ust_rb_alignof(gid_t) * CHAR_BIT,
- lttng_ust_is_signed_type(gid_t),
- BYTE_ORDER, 10),
- false, false),
- vgid_get_size,
- vgid_record,
- vgid_get_value,
- NULL, NULL);
-
-int lttng_add_vgid_to_ctx(struct lttng_ust_ctx **ctx)
-{
- int ret;
-
- if (lttng_find_context(*ctx, ctx_field->event_field->name)) {
- ret = -EEXIST;
- goto error_find_context;
- }
- ret = lttng_ust_context_append(ctx, ctx_field);
- if (ret)
- return ret;
- return 0;
-
-error_find_context:
- return ret;
-}
+++ /dev/null
-/*
- * SPDX-License-Identifier: LGPL-2.1-only
- *
- * Copyright (C) 2009-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
- *
- * LTTng UST vpid context.
- */
-
-#define _LGPL_SOURCE
-#include <limits.h>
-#include <stddef.h>
-#include <sys/types.h>
-#include <unistd.h>
-#include <lttng/ust-events.h>
-#include <lttng/ust-tracer.h>
-#include <lttng/ringbuffer-context.h>
-
-#include "context-internal.h"
-
-/*
- * We cache the result to ensure we don't trigger a system call for
- * each event.
- */
-static pid_t cached_vpid;
-
-static inline
-pid_t wrapper_getvpid(void)
-{
- pid_t vpid;
-
- vpid = CMM_LOAD_SHARED(cached_vpid);
- if (caa_unlikely(!vpid)) {
- vpid = getpid();
- CMM_STORE_SHARED(cached_vpid, vpid);
- }
- return vpid;
-}
-
-/*
- * Upon fork or clone, the PID assigned to our thread is not the same as
- * we kept in cache.
- */
-void lttng_context_vpid_reset(void)
-{
- CMM_STORE_SHARED(cached_vpid, 0);
-}
-
-static
-size_t vpid_get_size(void *priv __attribute__((unused)),
- size_t offset)
-{
- size_t size = 0;
-
- size += lttng_ust_lib_ring_buffer_align(offset, lttng_ust_rb_alignof(pid_t));
- size += sizeof(pid_t);
- return size;
-}
-
-static
-void vpid_record(void *priv __attribute__((unused)),
- struct lttng_ust_lib_ring_buffer_ctx *ctx,
- struct lttng_ust_channel_buffer *chan)
-{
- pid_t vpid = wrapper_getvpid();
-
- chan->ops->event_write(ctx, &vpid, sizeof(vpid), lttng_ust_rb_alignof(vpid));
-}
-
-static
-void vpid_get_value(void *priv __attribute__((unused)),
- struct lttng_ust_ctx_value *value)
-{
- value->u.s64 = wrapper_getvpid();
-}
-
-static const struct lttng_ust_ctx_field *ctx_field = lttng_ust_static_ctx_field(
- lttng_ust_static_event_field("vpid",
- lttng_ust_static_type_integer(sizeof(pid_t) * CHAR_BIT,
- lttng_ust_rb_alignof(pid_t) * CHAR_BIT,
- lttng_ust_is_signed_type(pid_t),
- BYTE_ORDER, 10),
- false, false),
- vpid_get_size,
- vpid_record,
- vpid_get_value,
- NULL, NULL);
-
-int lttng_add_vpid_to_ctx(struct lttng_ust_ctx **ctx)
-{
- int ret;
-
- if (lttng_find_context(*ctx, ctx_field->event_field->name)) {
- ret = -EEXIST;
- goto error_find_context;
- }
- ret = lttng_ust_context_append(ctx, ctx_field);
- if (ret)
- return ret;
- return 0;
-
-error_find_context:
- return ret;
-}
+++ /dev/null
-/*
- * SPDX-License-Identifier: LGPL-2.1-only
- *
- * Copyright (C) 2009-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
- * Copyright (C) 2019 Michael Jeanson <mjeanson@efficios.com>
- *
- * LTTng UST namespaced saved set-group ID context.
- */
-
-#define _LGPL_SOURCE
-#include <limits.h>
-#include <stddef.h>
-#include <sys/types.h>
-#include <sys/stat.h>
-#include <unistd.h>
-#include <lttng/ust-events.h>
-#include <lttng/ust-tracer.h>
-#include <lttng/ringbuffer-context.h>
-
-#include "context-internal.h"
-#include "creds.h"
-
-
-/*
- * At the kernel level, user IDs and group IDs are a per-thread attribute.
- * However, POSIX requires that all threads in a process share the same
- * credentials. The NPTL threading implementation handles the POSIX
- * requirements by providing wrapper functions for the various system calls
- * that change process UIDs and GIDs. These wrapper functions (including those
- * for setreuid() and setregid()) employ a signal-based technique to ensure
- * that when one thread changes credentials, all of the other threads in the
- * process also change their credentials.
- */
-
-/*
- * We cache the result to ensure we don't trigger a system call for
- * each event. User / group IDs are global to the process.
- */
-static gid_t cached_vsgid = INVALID_GID;
-
-static
-gid_t get_vsgid(void)
-{
- gid_t vsgid;
-
- vsgid = CMM_LOAD_SHARED(cached_vsgid);
-
- if (caa_unlikely(vsgid == INVALID_GID)) {
- gid_t gid, egid, sgid;
-
- if (getresgid(&gid, &egid, &sgid) == 0) {
- vsgid = sgid;
- CMM_STORE_SHARED(cached_vsgid, vsgid);
- }
- }
-
- return vsgid;
-}
-
-/*
- * The vsgid can change on setuid, setreuid and setresuid.
- */
-void lttng_context_vsgid_reset(void)
-{
- CMM_STORE_SHARED(cached_vsgid, INVALID_GID);
-}
-
-static
-size_t vsgid_get_size(void *priv __attribute__((unused)),
- size_t offset)
-{
- size_t size = 0;
-
- size += lttng_ust_lib_ring_buffer_align(offset, lttng_ust_rb_alignof(gid_t));
- size += sizeof(gid_t);
- return size;
-}
-
-static
-void vsgid_record(void *priv __attribute__((unused)),
- struct lttng_ust_lib_ring_buffer_ctx *ctx,
- struct lttng_ust_channel_buffer *chan)
-{
- gid_t vsgid;
-
- vsgid = get_vsgid();
- chan->ops->event_write(ctx, &vsgid, sizeof(vsgid), lttng_ust_rb_alignof(vsgid));
-}
-
-static
-void vsgid_get_value(void *priv __attribute__((unused)),
- struct lttng_ust_ctx_value *value)
-{
- value->u.s64 = get_vsgid();
-}
-
-static const struct lttng_ust_ctx_field *ctx_field = lttng_ust_static_ctx_field(
- lttng_ust_static_event_field("vsgid",
- lttng_ust_static_type_integer(sizeof(gid_t) * CHAR_BIT,
- lttng_ust_rb_alignof(gid_t) * CHAR_BIT,
- lttng_ust_is_signed_type(gid_t),
- BYTE_ORDER, 10),
- false, false),
- vsgid_get_size,
- vsgid_record,
- vsgid_get_value,
- NULL, NULL);
-
-int lttng_add_vsgid_to_ctx(struct lttng_ust_ctx **ctx)
-{
- int ret;
-
- if (lttng_find_context(*ctx, ctx_field->event_field->name)) {
- ret = -EEXIST;
- goto error_find_context;
- }
- ret = lttng_ust_context_append(ctx, ctx_field);
- if (ret)
- return ret;
- return 0;
-
-error_find_context:
- return ret;
-}
+++ /dev/null
-/*
- * SPDX-License-Identifier: LGPL-2.1-only
- *
- * Copyright (C) 2009-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
- * Copyright (C) 2019 Michael Jeanson <mjeanson@efficios.com>
- *
- * LTTng UST namespaced saved set-user ID context.
- */
-
-#define _LGPL_SOURCE
-#include <limits.h>
-#include <stddef.h>
-#include <sys/types.h>
-#include <sys/stat.h>
-#include <unistd.h>
-#include <lttng/ust-events.h>
-#include <lttng/ust-tracer.h>
-#include <lttng/ringbuffer-context.h>
-
-#include "context-internal.h"
-#include "creds.h"
-
-
-/*
- * At the kernel level, user IDs and group IDs are a per-thread attribute.
- * However, POSIX requires that all threads in a process share the same
- * credentials. The NPTL threading implementation handles the POSIX
- * requirements by providing wrapper functions for the various system calls
- * that change process UIDs and GIDs. These wrapper functions (including those
- * for setreuid() and setregid()) employ a signal-based technique to ensure
- * that when one thread changes credentials, all of the other threads in the
- * process also change their credentials.
- */
-
-/*
- * We cache the result to ensure we don't trigger a system call for
- * each event. User / group IDs are global to the process.
- */
-static uid_t cached_vsuid = INVALID_UID;
-
-static
-uid_t get_vsuid(void)
-{
- uid_t vsuid;
-
- vsuid = CMM_LOAD_SHARED(cached_vsuid);
-
- if (caa_unlikely(vsuid == INVALID_UID)) {
- uid_t uid, euid, suid;
-
- if (getresuid(&uid, &euid, &suid) == 0) {
- vsuid = suid;
- CMM_STORE_SHARED(cached_vsuid, vsuid);
- }
- }
-
- return vsuid;
-}
-
-/*
- * The vsuid can change on setuid, setreuid and setresuid.
- */
-void lttng_context_vsuid_reset(void)
-{
- CMM_STORE_SHARED(cached_vsuid, INVALID_UID);
-}
-
-static
-size_t vsuid_get_size(void *priv __attribute__((unused)),
- size_t offset)
-{
- size_t size = 0;
-
- size += lttng_ust_lib_ring_buffer_align(offset, lttng_ust_rb_alignof(uid_t));
- size += sizeof(uid_t);
- return size;
-}
-
-static
-void vsuid_record(void *priv __attribute__((unused)),
- struct lttng_ust_lib_ring_buffer_ctx *ctx,
- struct lttng_ust_channel_buffer *chan)
-{
- uid_t vsuid;
-
- vsuid = get_vsuid();
- chan->ops->event_write(ctx, &vsuid, sizeof(vsuid), lttng_ust_rb_alignof(vsuid));
-}
-
-static
-void vsuid_get_value(void *priv __attribute__((unused)),
- struct lttng_ust_ctx_value *value)
-{
- value->u.s64 = get_vsuid();
-}
-
-static const struct lttng_ust_ctx_field *ctx_field = lttng_ust_static_ctx_field(
- lttng_ust_static_event_field("vsuid",
- lttng_ust_static_type_integer(sizeof(uid_t) * CHAR_BIT,
- lttng_ust_rb_alignof(uid_t) * CHAR_BIT,
- lttng_ust_is_signed_type(uid_t),
- BYTE_ORDER, 10),
- false, false),
- vsuid_get_size,
- vsuid_record,
- vsuid_get_value,
- NULL, NULL);
-
-int lttng_add_vsuid_to_ctx(struct lttng_ust_ctx **ctx)
-{
- int ret;
-
- if (lttng_find_context(*ctx, ctx_field->event_field->name)) {
- ret = -EEXIST;
- goto error_find_context;
- }
- ret = lttng_ust_context_append(ctx, ctx_field);
- if (ret)
- return ret;
- return 0;
-
-error_find_context:
- return ret;
-}
+++ /dev/null
-/*
- * SPDX-License-Identifier: LGPL-2.1-only
- *
- * Copyright (C) 2009-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
- *
- * LTTng UST vtid context.
- */
-
-#define _LGPL_SOURCE
-#include <limits.h>
-#include <stddef.h>
-#include <sys/types.h>
-#include <unistd.h>
-#include <lttng/ust-events.h>
-#include <lttng/ust-tracer.h>
-#include <lttng/ringbuffer-context.h>
-#include "common/compat/tid.h"
-#include <urcu/tls-compat.h>
-
-#include "context-internal.h"
-#include "lttng-tracer-core.h"
-
-/*
- * We cache the result to ensure we don't trigger a system call for
- * each event.
- */
-static DEFINE_URCU_TLS(pid_t, cached_vtid);
-
-/*
- * Upon fork or clone, the TID assigned to our thread is not the same as
- * we kept in cache. Luckily, we are the only thread surviving in the
- * child process, so we can simply clear our cached version.
- */
-void lttng_context_vtid_reset(void)
-{
- CMM_STORE_SHARED(URCU_TLS(cached_vtid), 0);
-}
-
-static
-size_t vtid_get_size(void *priv __attribute__((unused)),
- size_t offset)
-{
- size_t size = 0;
-
- size += lttng_ust_lib_ring_buffer_align(offset, lttng_ust_rb_alignof(pid_t));
- size += sizeof(pid_t);
- return size;
-}
-
-static inline
-pid_t wrapper_getvtid(void)
-{
- pid_t vtid;
-
- vtid = CMM_LOAD_SHARED(URCU_TLS(cached_vtid));
- if (caa_unlikely(!vtid)) {
- vtid = lttng_gettid();
- CMM_STORE_SHARED(URCU_TLS(cached_vtid), vtid);
- }
- return vtid;
-}
-
-static
-void vtid_record(void *priv __attribute__((unused)),
- struct lttng_ust_lib_ring_buffer_ctx *ctx,
- struct lttng_ust_channel_buffer *chan)
-{
- pid_t vtid = wrapper_getvtid();
-
- chan->ops->event_write(ctx, &vtid, sizeof(vtid), lttng_ust_rb_alignof(vtid));
-}
-
-static
-void vtid_get_value(void *priv __attribute__((unused)),
- struct lttng_ust_ctx_value *value)
-{
- value->u.s64 = wrapper_getvtid();
-}
-
-static const struct lttng_ust_ctx_field *ctx_field = lttng_ust_static_ctx_field(
- lttng_ust_static_event_field("vtid",
- lttng_ust_static_type_integer(sizeof(pid_t) * CHAR_BIT,
- lttng_ust_rb_alignof(pid_t) * CHAR_BIT,
- lttng_ust_is_signed_type(pid_t),
- BYTE_ORDER, 10),
- false, false),
- vtid_get_size,
- vtid_record,
- vtid_get_value,
- NULL, NULL);
-
-int lttng_add_vtid_to_ctx(struct lttng_ust_ctx **ctx)
-{
- int ret;
-
- if (lttng_find_context(*ctx, ctx_field->event_field->name)) {
- ret = -EEXIST;
- goto error_find_context;
- }
- ret = lttng_ust_context_append(ctx, ctx_field);
- if (ret)
- return ret;
- return 0;
-
-error_find_context:
- return ret;
-}
-
-/*
- * Force a read (imply TLS fixup for dlopen) of TLS variables.
- */
-void lttng_fixup_vtid_tls(void)
-{
- asm volatile ("" : : "m" (URCU_TLS(cached_vtid)));
-}
+++ /dev/null
-/*
- * SPDX-License-Identifier: LGPL-2.1-only
- *
- * Copyright (C) 2009-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
- * Copyright (C) 2019 Michael Jeanson <mjeanson@efficios.com>
- *
- * LTTng UST namespaced real user ID context.
- */
-
-#define _LGPL_SOURCE
-#include <limits.h>
-#include <stddef.h>
-#include <sys/types.h>
-#include <sys/stat.h>
-#include <unistd.h>
-#include <lttng/ust-events.h>
-#include <lttng/ust-tracer.h>
-#include <lttng/ringbuffer-context.h>
-
-#include "context-internal.h"
-#include "creds.h"
-
-
-/*
- * At the kernel level, user IDs and group IDs are a per-thread attribute.
- * However, POSIX requires that all threads in a process share the same
- * credentials. The NPTL threading implementation handles the POSIX
- * requirements by providing wrapper functions for the various system calls
- * that change process UIDs and GIDs. These wrapper functions (including those
- * for setreuid() and setregid()) employ a signal-based technique to ensure
- * that when one thread changes credentials, all of the other threads in the
- * process also change their credentials.
- */
-
-/*
- * We cache the result to ensure we don't trigger a system call for
- * each event. User / group IDs are global to the process.
- */
-static uid_t cached_vuid = INVALID_UID;
-
-static
-uid_t get_vuid(void)
-{
- uid_t vuid;
-
- vuid = CMM_LOAD_SHARED(cached_vuid);
-
- if (caa_unlikely(vuid == INVALID_UID)) {
- vuid = getuid();
- CMM_STORE_SHARED(cached_vuid, vuid);
- }
-
- return vuid;
-}
-
-/*
- * The vuid can change on setuid, setreuid and setresuid.
- */
-void lttng_context_vuid_reset(void)
-{
- CMM_STORE_SHARED(cached_vuid, INVALID_UID);
-}
-
-static
-size_t vuid_get_size(void *priv __attribute__((unused)),
- size_t offset)
-{
- size_t size = 0;
-
- size += lttng_ust_lib_ring_buffer_align(offset, lttng_ust_rb_alignof(uid_t));
- size += sizeof(uid_t);
- return size;
-}
-
-static
-void vuid_record(void *priv __attribute__((unused)),
- struct lttng_ust_lib_ring_buffer_ctx *ctx,
- struct lttng_ust_channel_buffer *chan)
-{
- uid_t vuid;
-
- vuid = get_vuid();
- chan->ops->event_write(ctx, &vuid, sizeof(vuid), lttng_ust_rb_alignof(vuid));
-}
-
-static
-void vuid_get_value(void *priv __attribute__((unused)),
- struct lttng_ust_ctx_value *value)
-{
- value->u.s64 = get_vuid();
-}
-
-static const struct lttng_ust_ctx_field *ctx_field = lttng_ust_static_ctx_field(
- lttng_ust_static_event_field("vuid",
- lttng_ust_static_type_integer(sizeof(uid_t) * CHAR_BIT,
- lttng_ust_rb_alignof(uid_t) * CHAR_BIT,
- lttng_ust_is_signed_type(uid_t),
- BYTE_ORDER, 10),
- false, false),
- vuid_get_size,
- vuid_record,
- vuid_get_value,
- NULL, NULL);
-
-int lttng_add_vuid_to_ctx(struct lttng_ust_ctx **ctx)
-{
- int ret;
-
- if (lttng_find_context(*ctx, ctx_field->event_field->name)) {
- ret = -EEXIST;
- goto error_find_context;
- }
- ret = lttng_ust_context_append(ctx, ctx_field);
- if (ret)
- return ret;
- return 0;
-
-error_find_context:
- return ret;
-}
+++ /dev/null
-/*
- * SPDX-License-Identifier: LGPL-2.1-only
- *
- * Copyright (C) 2011 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
- *
- * LTTng UST trace/channel/event context management.
- */
-
-#define _LGPL_SOURCE
-#include <lttng/ust-events.h>
-#include <lttng/ust-tracer.h>
-#include <common/ust-context-provider.h>
-#include <lttng/urcu/pointer.h>
-#include <lttng/urcu/urcu-ust.h>
-#include "common/logging.h"
-#include "common/macros.h"
-#include <stddef.h>
-#include <string.h>
-#include <assert.h>
-#include <limits.h>
-#include "tracepoint-internal.h"
-
-#include "context-internal.h"
-
-/*
- * The filter implementation requires that two consecutive "get" for the
- * same context performed by the same thread return the same result.
- */
-
-int lttng_find_context(struct lttng_ust_ctx *ctx, const char *name)
-{
- unsigned int i;
- const char *subname;
-
- if (!ctx)
- return 0;
- if (strncmp(name, "$ctx.", strlen("$ctx.")) == 0) {
- subname = name + strlen("$ctx.");
- } else {
- subname = name;
- }
- for (i = 0; i < ctx->nr_fields; i++) {
- /* Skip allocated (but non-initialized) contexts */
- if (!ctx->fields[i].event_field->name)
- continue;
- if (!strcmp(ctx->fields[i].event_field->name, subname))
- return 1;
- }
- return 0;
-}
-
-int lttng_get_context_index(struct lttng_ust_ctx *ctx, const char *name)
-{
- unsigned int i;
- const char *subname;
-
- if (!ctx)
- return -1;
- if (strncmp(name, "$ctx.", strlen("$ctx.")) == 0) {
- subname = name + strlen("$ctx.");
- } else {
- subname = name;
- }
- for (i = 0; i < ctx->nr_fields; i++) {
- /* Skip allocated (but non-initialized) contexts */
- if (!ctx->fields[i].event_field->name)
- continue;
- if (!strcmp(ctx->fields[i].event_field->name, subname))
- return i;
- }
- return -1;
-}
-
-static int lttng_find_context_provider(struct lttng_ust_ctx *ctx, const char *name)
-{
- unsigned int i;
-
- for (i = 0; i < ctx->nr_fields; i++) {
- /* Skip allocated (but non-initialized) contexts */
- if (!ctx->fields[i].event_field->name)
- continue;
- if (!strncmp(ctx->fields[i].event_field->name, name,
- strlen(name)))
- return 1;
- }
- return 0;
-}
-
-/*
- * Note: as we append context information, the pointer location may change.
- * lttng_ust_context_add_field leaves the new last context initialized to NULL.
- */
-static
-int lttng_ust_context_add_field(struct lttng_ust_ctx **ctx_p)
-{
- struct lttng_ust_ctx *ctx;
-
- if (!*ctx_p) {
- *ctx_p = zmalloc(sizeof(struct lttng_ust_ctx));
- if (!*ctx_p)
- return -ENOMEM;
- (*ctx_p)->largest_align = 1;
- }
- ctx = *ctx_p;
- if (ctx->nr_fields + 1 > ctx->allocated_fields) {
- struct lttng_ust_ctx_field *new_fields;
-
- ctx->allocated_fields = max_t(size_t, 1, 2 * ctx->allocated_fields);
- new_fields = zmalloc(ctx->allocated_fields * sizeof(*new_fields));
- if (!new_fields)
- return -ENOMEM;
- /* Copy elements */
- if (ctx->fields)
- memcpy(new_fields, ctx->fields, sizeof(*ctx->fields) * ctx->nr_fields);
- free(ctx->fields);
- ctx->fields = new_fields;
- }
- ctx->nr_fields++;
- return 0;
-}
-
-static size_t get_type_max_align(const struct lttng_ust_type_common *type)
-{
- switch (type->type) {
- case lttng_ust_type_integer:
- return lttng_ust_get_type_integer(type)->alignment;
- case lttng_ust_type_string:
- return CHAR_BIT;
- case lttng_ust_type_dynamic:
- return 0;
- case lttng_ust_type_enum:
- return get_type_max_align(lttng_ust_get_type_enum(type)->container_type);
- case lttng_ust_type_array:
- return max_t(size_t, get_type_max_align(lttng_ust_get_type_array(type)->elem_type),
- lttng_ust_get_type_array(type)->alignment);
- case lttng_ust_type_sequence:
- return max_t(size_t, get_type_max_align(lttng_ust_get_type_sequence(type)->elem_type),
- lttng_ust_get_type_sequence(type)->alignment);
- case lttng_ust_type_struct:
- {
- unsigned int i;
- size_t field_align = 0;
- const struct lttng_ust_type_struct *struct_type = lttng_ust_get_type_struct(type);
-
- for (i = 0; i < struct_type->nr_fields; i++) {
- field_align = max_t(size_t,
- get_type_max_align(struct_type->fields[i]->type),
- field_align);
- }
- return field_align;
- }
- default:
- WARN_ON_ONCE(1);
- return 0;
- }
-}
-
-/*
- * lttng_context_update() should be called at least once between context
- * modification and trace start.
- */
-static
-void lttng_context_update(struct lttng_ust_ctx *ctx)
-{
- int i;
- size_t largest_align = 8; /* in bits */
-
- for (i = 0; i < ctx->nr_fields; i++) {
- size_t field_align = 8;
-
- field_align = get_type_max_align(ctx->fields[i].event_field->type);
- largest_align = max_t(size_t, largest_align, field_align);
- }
- ctx->largest_align = largest_align >> 3; /* bits to bytes */
-}
-
-int lttng_ust_context_append_rcu(struct lttng_ust_ctx **ctx_p,
- const struct lttng_ust_ctx_field *f)
-{
- struct lttng_ust_ctx *old_ctx = *ctx_p, *new_ctx = NULL;
- struct lttng_ust_ctx_field *new_fields = NULL;
- int ret;
-
- if (old_ctx) {
- new_ctx = zmalloc(sizeof(struct lttng_ust_ctx));
- if (!new_ctx)
- return -ENOMEM;
- *new_ctx = *old_ctx;
- new_fields = zmalloc(new_ctx->allocated_fields * sizeof(*new_fields));
- if (!new_fields) {
- free(new_ctx);
- return -ENOMEM;
- }
- /* Copy elements */
- memcpy(new_fields, old_ctx->fields,
- sizeof(*old_ctx->fields) * old_ctx->nr_fields);
- new_ctx->fields = new_fields;
- }
- ret = lttng_ust_context_add_field(&new_ctx);
- if (ret) {
- free(new_fields);
- free(new_ctx);
- return ret;
- }
- new_ctx->fields[new_ctx->nr_fields - 1] = *f;
- lttng_context_update(new_ctx);
- lttng_ust_rcu_assign_pointer(*ctx_p, new_ctx);
- lttng_ust_urcu_synchronize_rcu();
- if (old_ctx) {
- free(old_ctx->fields);
- free(old_ctx);
- }
- return 0;
-}
-
-int lttng_ust_context_append(struct lttng_ust_ctx **ctx_p,
- const struct lttng_ust_ctx_field *f)
-{
- int ret;
-
- ret = lttng_ust_context_add_field(ctx_p);
- if (ret)
- return ret;
- (*ctx_p)->fields[(*ctx_p)->nr_fields - 1] = *f;
- lttng_context_update(*ctx_p);
- return 0;
-}
-
-void lttng_destroy_context(struct lttng_ust_ctx *ctx)
-{
- int i;
-
- if (!ctx)
- return;
- for (i = 0; i < ctx->nr_fields; i++) {
- if (ctx->fields[i].destroy)
- ctx->fields[i].destroy(ctx->fields[i].priv);
- }
- free(ctx->fields);
- free(ctx);
-}
-
-/*
- * Can be safely performed concurrently with tracing using the struct
- * lttng_ctx. Using RCU update. Needs to match RCU read-side handling of
- * contexts.
- *
- * This does not allow adding, removing, or changing typing of the
- * contexts, since this needs to stay invariant for metadata. However,
- * it allows updating the handlers associated with all contexts matching
- * a provider (by name) while tracing is using it, in a way that ensures
- * a single RCU read-side critical section see either all old, or all
- * new handlers.
- */
-int lttng_ust_context_set_provider_rcu(struct lttng_ust_ctx **_ctx,
- const char *name,
- size_t (*get_size)(void *priv, size_t offset),
- void (*record)(void *priv, struct lttng_ust_lib_ring_buffer_ctx *ctx,
- struct lttng_ust_channel_buffer *chan),
- void (*get_value)(void *priv, struct lttng_ust_ctx_value *value),
- void *priv)
-{
- int i, ret;
- struct lttng_ust_ctx *ctx = *_ctx, *new_ctx;
- struct lttng_ust_ctx_field *new_fields;
-
- if (!ctx || !lttng_find_context_provider(ctx, name))
- return 0;
- /*
- * We have at least one instance of context for the provider.
- */
- new_ctx = zmalloc(sizeof(*new_ctx));
- if (!new_ctx)
- return -ENOMEM;
- *new_ctx = *ctx;
- new_fields = zmalloc(sizeof(*new_fields) * ctx->allocated_fields);
- if (!new_fields) {
- ret = -ENOMEM;
- goto field_error;
- }
- /* Copy elements */
- memcpy(new_fields, ctx->fields,
- sizeof(*new_fields) * ctx->allocated_fields);
- for (i = 0; i < ctx->nr_fields; i++) {
- if (strncmp(new_fields[i].event_field->name,
- name, strlen(name)) != 0)
- continue;
- new_fields[i].get_size = get_size;
- new_fields[i].record = record;
- new_fields[i].get_value = get_value;
- new_fields[i].priv = priv;
- }
- new_ctx->fields = new_fields;
- lttng_ust_rcu_assign_pointer(*_ctx, new_ctx);
- lttng_ust_urcu_synchronize_rcu();
- free(ctx->fields);
- free(ctx);
- return 0;
-
-field_error:
- free(new_ctx);
- return ret;
-}
-
-int lttng_context_init_all(struct lttng_ust_ctx **ctx)
-{
- int ret;
-
- ret = lttng_add_pthread_id_to_ctx(ctx);
- if (ret) {
- WARN("Cannot add context lttng_add_pthread_id_to_ctx");
- goto error;
- }
- ret = lttng_add_vtid_to_ctx(ctx);
- if (ret) {
- WARN("Cannot add context lttng_add_vtid_to_ctx");
- goto error;
- }
- ret = lttng_add_vpid_to_ctx(ctx);
- if (ret) {
- WARN("Cannot add context lttng_add_vpid_to_ctx");
- goto error;
- }
- ret = lttng_add_procname_to_ctx(ctx);
- if (ret) {
- WARN("Cannot add context lttng_add_procname_to_ctx");
- goto error;
- }
- ret = lttng_add_cpu_id_to_ctx(ctx);
- if (ret) {
- WARN("Cannot add context lttng_add_cpu_id_to_ctx");
- goto error;
- }
- ret = lttng_add_cgroup_ns_to_ctx(ctx);
- if (ret) {
- WARN("Cannot add context lttng_add_cgroup_ns_to_ctx");
- goto error;
- }
- ret = lttng_add_ipc_ns_to_ctx(ctx);
- if (ret) {
- WARN("Cannot add context lttng_add_ipc_ns_to_ctx");
- goto error;
- }
- ret = lttng_add_mnt_ns_to_ctx(ctx);
- if (ret) {
- WARN("Cannot add context lttng_add_mnt_ns_to_ctx");
- goto error;
- }
- ret = lttng_add_net_ns_to_ctx(ctx);
- if (ret) {
- WARN("Cannot add context lttng_add_net_ns_to_ctx");
- goto error;
- }
- ret = lttng_add_pid_ns_to_ctx(ctx);
- if (ret) {
- WARN("Cannot add context lttng_add_pid_ns_to_ctx");
- goto error;
- }
- ret = lttng_add_time_ns_to_ctx(ctx);
- if (ret) {
- WARN("Cannot add context lttng_add_time_ns_to_ctx");
- goto error;
- }
- ret = lttng_add_user_ns_to_ctx(ctx);
- if (ret) {
- WARN("Cannot add context lttng_add_user_ns_to_ctx");
- goto error;
- }
- ret = lttng_add_uts_ns_to_ctx(ctx);
- if (ret) {
- WARN("Cannot add context lttng_add_uts_ns_to_ctx");
- goto error;
- }
- ret = lttng_add_vuid_to_ctx(ctx);
- if (ret) {
- WARN("Cannot add context lttng_add_vuid_to_ctx");
- goto error;
- }
- ret = lttng_add_veuid_to_ctx(ctx);
- if (ret) {
- WARN("Cannot add context lttng_add_veuid_to_ctx");
- goto error;
- }
- ret = lttng_add_vsuid_to_ctx(ctx);
- if (ret) {
- WARN("Cannot add context lttng_add_vsuid_to_ctx");
- goto error;
- }
- ret = lttng_add_vgid_to_ctx(ctx);
- if (ret) {
- WARN("Cannot add context lttng_add_vgid_to_ctx");
- goto error;
- }
- ret = lttng_add_vegid_to_ctx(ctx);
- if (ret) {
- WARN("Cannot add context lttng_add_vegid_to_ctx");
- goto error;
- }
- ret = lttng_add_vsgid_to_ctx(ctx);
- if (ret) {
- WARN("Cannot add context lttng_add_vsgid_to_ctx");
- goto error;
- }
- lttng_context_update(*ctx);
- return 0;
-
-error:
- lttng_destroy_context(*ctx);
- return ret;
-}
+++ /dev/null
-/* SPDX-License-Identifier: (GPL-2.0-only or LGPL-2.1-only)
- *
- * lttng-counter-client-percpu-32-modular.c
- *
- * LTTng lib counter client. Per-cpu 32-bit counters in modular
- * arithmetic.
- *
- * Copyright (C) 2020 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
- */
-
-#include "ust-events-internal.h"
-#include "common/counter/counter.h"
-#include "common/counter/counter-api.h"
-#include "lttng-tracer-core.h"
-#include "lttng-counter-client.h"
-
-static const struct lib_counter_config client_config = {
- .alloc = COUNTER_ALLOC_PER_CPU,
- .sync = COUNTER_SYNC_PER_CPU,
- .arithmetic = COUNTER_ARITHMETIC_MODULAR,
- .counter_size = COUNTER_SIZE_32_BIT,
-};
-
-static struct lib_counter *counter_create(size_t nr_dimensions,
- const struct lttng_counter_dimension *dimensions,
- int64_t global_sum_step,
- int global_counter_fd,
- int nr_counter_cpu_fds,
- const int *counter_cpu_fds,
- bool is_daemon)
-{
- size_t max_nr_elem[LTTNG_COUNTER_DIMENSION_MAX], i;
-
- if (nr_dimensions > LTTNG_COUNTER_DIMENSION_MAX)
- return NULL;
- for (i = 0; i < nr_dimensions; i++) {
- if (dimensions[i].has_underflow || dimensions[i].has_overflow)
- return NULL;
- max_nr_elem[i] = dimensions[i].size;
- }
- return lttng_counter_create(&client_config, nr_dimensions, max_nr_elem,
- global_sum_step, global_counter_fd, nr_counter_cpu_fds,
- counter_cpu_fds, is_daemon);
-}
-
-static void counter_destroy(struct lib_counter *counter)
-{
- lttng_counter_destroy(counter);
-}
-
-static int counter_add(struct lib_counter *counter, const size_t *dimension_indexes, int64_t v)
-{
- return lttng_counter_add(&client_config, counter, dimension_indexes, v);
-}
-
-static int counter_read(struct lib_counter *counter, const size_t *dimension_indexes, int cpu,
- int64_t *value, bool *overflow, bool *underflow)
-{
- return lttng_counter_read(&client_config, counter, dimension_indexes, cpu, value,
- overflow, underflow);
-}
-
-static int counter_aggregate(struct lib_counter *counter, const size_t *dimension_indexes,
- int64_t *value, bool *overflow, bool *underflow)
-{
- return lttng_counter_aggregate(&client_config, counter, dimension_indexes, value,
- overflow, underflow);
-}
-
-static int counter_clear(struct lib_counter *counter, const size_t *dimension_indexes)
-{
- return lttng_counter_clear(&client_config, counter, dimension_indexes);
-}
-
-static struct lttng_counter_transport lttng_counter_transport = {
- .name = "counter-per-cpu-32-modular",
- .ops = {
- .counter_create = counter_create,
- .counter_destroy = counter_destroy,
- .counter_add = counter_add,
- .counter_read = counter_read,
- .counter_aggregate = counter_aggregate,
- .counter_clear = counter_clear,
- },
- .client_config = &client_config,
-};
-
-void lttng_counter_client_percpu_32_modular_init(void)
-{
- lttng_counter_transport_register(<tng_counter_transport);
-}
-
-void lttng_counter_client_percpu_32_modular_exit(void)
-{
- lttng_counter_transport_unregister(<tng_counter_transport);
-}
+++ /dev/null
-/* SPDX-License-Identifier: (GPL-2.0-only or LGPL-2.1-only)
- *
- * lttng-counter-client-percpu-64-modular.c
- *
- * LTTng lib counter client. Per-cpu 64-bit counters in modular
- * arithmetic.
- *
- * Copyright (C) 2020 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
- */
-
-#include "ust-events-internal.h"
-#include "common/counter/counter.h"
-#include "common/counter/counter-api.h"
-#include "lttng-tracer-core.h"
-#include "lttng-counter-client.h"
-
-static const struct lib_counter_config client_config = {
- .alloc = COUNTER_ALLOC_PER_CPU,
- .sync = COUNTER_SYNC_PER_CPU,
- .arithmetic = COUNTER_ARITHMETIC_MODULAR,
- .counter_size = COUNTER_SIZE_64_BIT,
-};
-
-static struct lib_counter *counter_create(size_t nr_dimensions,
- const struct lttng_counter_dimension *dimensions,
- int64_t global_sum_step,
- int global_counter_fd,
- int nr_counter_cpu_fds,
- const int *counter_cpu_fds,
- bool is_daemon)
-{
- size_t max_nr_elem[LTTNG_COUNTER_DIMENSION_MAX], i;
-
- if (nr_dimensions > LTTNG_COUNTER_DIMENSION_MAX)
- return NULL;
- for (i = 0; i < nr_dimensions; i++) {
- if (dimensions[i].has_underflow || dimensions[i].has_overflow)
- return NULL;
- max_nr_elem[i] = dimensions[i].size;
- }
- return lttng_counter_create(&client_config, nr_dimensions, max_nr_elem,
- global_sum_step, global_counter_fd, nr_counter_cpu_fds,
- counter_cpu_fds, is_daemon);
-}
-
-static void counter_destroy(struct lib_counter *counter)
-{
- lttng_counter_destroy(counter);
-}
-
-static int counter_add(struct lib_counter *counter, const size_t *dimension_indexes, int64_t v)
-{
- return lttng_counter_add(&client_config, counter, dimension_indexes, v);
-}
-
-static int counter_read(struct lib_counter *counter, const size_t *dimension_indexes, int cpu,
- int64_t *value, bool *overflow, bool *underflow)
-{
- return lttng_counter_read(&client_config, counter, dimension_indexes, cpu, value,
- overflow, underflow);
-}
-
-static int counter_aggregate(struct lib_counter *counter, const size_t *dimension_indexes,
- int64_t *value, bool *overflow, bool *underflow)
-{
- return lttng_counter_aggregate(&client_config, counter, dimension_indexes, value,
- overflow, underflow);
-}
-
-static int counter_clear(struct lib_counter *counter, const size_t *dimension_indexes)
-{
- return lttng_counter_clear(&client_config, counter, dimension_indexes);
-}
-
-static struct lttng_counter_transport lttng_counter_transport = {
- .name = "counter-per-cpu-64-modular",
- .ops = {
- .counter_create = counter_create,
- .counter_destroy = counter_destroy,
- .counter_add = counter_add,
- .counter_read = counter_read,
- .counter_aggregate = counter_aggregate,
- .counter_clear = counter_clear,
- },
- .client_config = &client_config,
-};
-
-void lttng_counter_client_percpu_64_modular_init(void)
-{
- lttng_counter_transport_register(<tng_counter_transport);
-}
-
-void lttng_counter_client_percpu_64_modular_exit(void)
-{
- lttng_counter_transport_unregister(<tng_counter_transport);
-}
+++ /dev/null
-/*
- * SPDX-License-Identifier: (GPL-2.0-only or LGPL-2.1-only)
- *
- * Copyright (C) 2020 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
- *
- * LTTng lib counter client.
- */
-
-#ifndef _LTTNG_UST_COUNTER_CLIENT_H
-#define _LTTNG_UST_COUNTER_CLIENT_H
-
-/*
- * The counter clients init/exit symbols are private ABI for
- * liblttng-ust-ctl, which is why they are not hidden.
- */
-
-void lttng_ust_counter_clients_init(void);
-void lttng_ust_counter_clients_exit(void);
-
-void lttng_counter_client_percpu_32_modular_init(void)
- __attribute__((visibility("hidden")));
-
-void lttng_counter_client_percpu_32_modular_exit(void)
- __attribute__((visibility("hidden")));
-
-void lttng_counter_client_percpu_64_modular_init(void)
- __attribute__((visibility("hidden")));
-
-void lttng_counter_client_percpu_64_modular_exit(void)
- __attribute__((visibility("hidden")));
-
-#endif
+++ /dev/null
-/*
- * SPDX-License-Identifier: LGPL-2.1-only
- *
- * Copyright (C) 2010-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
- *
- * Holds LTTng per-session event registry.
- */
-
-#define _LGPL_SOURCE
-#include <stdio.h>
-#include <assert.h>
-#include <errno.h>
-#include <limits.h>
-#include <pthread.h>
-#include <sys/shm.h>
-#include <sys/ipc.h>
-#include <stdint.h>
-#include <stddef.h>
-#include <inttypes.h>
-#include <time.h>
-#include <stdbool.h>
-#include <unistd.h>
-#include <dlfcn.h>
-#include <lttng/ust-endian.h>
-
-#include <urcu/arch.h>
-#include <urcu/compiler.h>
-#include <urcu/hlist.h>
-#include <urcu/list.h>
-#include <urcu/uatomic.h>
-
-#include <lttng/tracepoint.h>
-#include <lttng/ust-events.h>
-
-#include "common/logging.h"
-#include "common/macros.h"
-#include <lttng/ust-ctl.h>
-#include "common/ustcomm.h"
-#include "common/ust-fd.h"
-#include "common/dynamic-type.h"
-#include "common/ust-context-provider.h"
-#include "error.h"
-#include "lttng-ust-uuid.h"
-
-#include "tracepoint-internal.h"
-#include "string-utils.h"
-#include "lttng-bytecode.h"
-#include "lttng-tracer.h"
-#include "lttng-tracer-core.h"
-#include "lttng-ust-statedump.h"
-#include "context-internal.h"
-#include "ust-events-internal.h"
-#include "wait.h"
-#include "common/ringbuffer/shm.h"
-#include "common/ringbuffer/frontend_types.h"
-#include "common/ringbuffer/frontend.h"
-#include "common/counter/counter.h"
-#include "jhash.h"
-#include <lttng/ust-abi.h>
-#include "context-provider-internal.h"
-
-/*
- * All operations within this file are called by the communication
- * thread, under ust_lock protection.
- */
-
-static CDS_LIST_HEAD(sessions);
-static CDS_LIST_HEAD(event_notifier_groups);
-
-struct cds_list_head *lttng_get_sessions(void)
-{
- return &sessions;
-}
-
-static void _lttng_event_destroy(struct lttng_ust_event_common *event);
-static void _lttng_enum_destroy(struct lttng_enum *_enum);
-
-static
-void lttng_session_lazy_sync_event_enablers(struct lttng_ust_session *session);
-static
-void lttng_session_sync_event_enablers(struct lttng_ust_session *session);
-static
-void lttng_event_notifier_group_sync_enablers(
- struct lttng_event_notifier_group *event_notifier_group);
-static
-void lttng_enabler_destroy(struct lttng_enabler *enabler);
-
-bool lttng_ust_validate_event_name(const struct lttng_ust_event_desc *desc)
-{
- if (strlen(desc->probe_desc->provider_name) + 1 +
- strlen(desc->event_name) >= LTTNG_UST_ABI_SYM_NAME_LEN)
- return false;
- return true;
-}
-
-void lttng_ust_format_event_name(const struct lttng_ust_event_desc *desc,
- char *name)
-{
- strcpy(name, desc->probe_desc->provider_name);
- strcat(name, ":");
- strcat(name, desc->event_name);
-}
-
-/*
- * Called with ust lock held.
- */
-int lttng_session_active(void)
-{
- struct lttng_ust_session_private *iter;
-
- cds_list_for_each_entry(iter, &sessions, node) {
- if (iter->pub->active)
- return 1;
- }
- return 0;
-}
-
-static
-int lttng_loglevel_match(int loglevel,
- unsigned int has_loglevel,
- enum lttng_ust_abi_loglevel_type req_type,
- int req_loglevel)
-{
- if (!has_loglevel)
- loglevel = TRACE_DEFAULT;
- switch (req_type) {
- case LTTNG_UST_ABI_LOGLEVEL_RANGE:
- if (loglevel <= req_loglevel
- || (req_loglevel == -1 && loglevel <= TRACE_DEBUG))
- return 1;
- else
- return 0;
- case LTTNG_UST_ABI_LOGLEVEL_SINGLE:
- if (loglevel == req_loglevel
- || (req_loglevel == -1 && loglevel <= TRACE_DEBUG))
- return 1;
- else
- return 0;
- case LTTNG_UST_ABI_LOGLEVEL_ALL:
- default:
- if (loglevel <= TRACE_DEBUG)
- return 1;
- else
- return 0;
- }
-}
-
-struct lttng_ust_session *lttng_session_create(void)
-{
- struct lttng_ust_session *session;
- struct lttng_ust_session_private *session_priv;
- int i;
-
- session = zmalloc(sizeof(struct lttng_ust_session));
- if (!session)
- return NULL;
- session->struct_size = sizeof(struct lttng_ust_session);
- session_priv = zmalloc(sizeof(struct lttng_ust_session_private));
- if (!session_priv) {
- free(session);
- return NULL;
- }
- session->priv = session_priv;
- session_priv->pub = session;
- if (lttng_context_init_all(&session->priv->ctx)) {
- free(session_priv);
- free(session);
- return NULL;
- }
- CDS_INIT_LIST_HEAD(&session->priv->chan_head);
- CDS_INIT_LIST_HEAD(&session->priv->events_head);
- CDS_INIT_LIST_HEAD(&session->priv->enums_head);
- CDS_INIT_LIST_HEAD(&session->priv->enablers_head);
- for (i = 0; i < LTTNG_UST_EVENT_HT_SIZE; i++)
- CDS_INIT_HLIST_HEAD(&session->priv->events_ht.table[i]);
- for (i = 0; i < LTTNG_UST_ENUM_HT_SIZE; i++)
- CDS_INIT_HLIST_HEAD(&session->priv->enums_ht.table[i]);
- cds_list_add(&session->priv->node, &sessions);
- return session;
-}
-
-struct lttng_counter *lttng_ust_counter_create(
- const char *counter_transport_name,
- size_t number_dimensions, const struct lttng_counter_dimension *dimensions)
-{
- struct lttng_counter_transport *counter_transport = NULL;
- struct lttng_counter *counter = NULL;
-
- counter_transport = lttng_counter_transport_find(counter_transport_name);
- if (!counter_transport)
- goto notransport;
- counter = zmalloc(sizeof(struct lttng_counter));
- if (!counter)
- goto nomem;
-
- counter->ops = &counter_transport->ops;
- counter->transport = counter_transport;
-
- counter->counter = counter->ops->counter_create(
- number_dimensions, dimensions, 0,
- -1, 0, NULL, false);
- if (!counter->counter) {
- goto create_error;
- }
-
- return counter;
-
-create_error:
- free(counter);
-nomem:
-notransport:
- return NULL;
-}
-
-static
-void lttng_ust_counter_destroy(struct lttng_counter *counter)
-{
- counter->ops->counter_destroy(counter->counter);
- free(counter);
-}
-
-struct lttng_event_notifier_group *lttng_event_notifier_group_create(void)
-{
- struct lttng_event_notifier_group *event_notifier_group;
- int i;
-
- event_notifier_group = zmalloc(sizeof(struct lttng_event_notifier_group));
- if (!event_notifier_group)
- return NULL;
-
- /* Add all contexts. */
- if (lttng_context_init_all(&event_notifier_group->ctx)) {
- free(event_notifier_group);
- return NULL;
- }
-
- CDS_INIT_LIST_HEAD(&event_notifier_group->enablers_head);
- CDS_INIT_LIST_HEAD(&event_notifier_group->event_notifiers_head);
- for (i = 0; i < LTTNG_UST_EVENT_NOTIFIER_HT_SIZE; i++)
- CDS_INIT_HLIST_HEAD(&event_notifier_group->event_notifiers_ht.table[i]);
-
- cds_list_add(&event_notifier_group->node, &event_notifier_groups);
-
- return event_notifier_group;
-}
-
-/*
- * Only used internally at session destruction.
- */
-static
-void _lttng_channel_unmap(struct lttng_ust_channel_buffer *lttng_chan)
-{
- struct lttng_ust_lib_ring_buffer_channel *chan;
- struct lttng_ust_shm_handle *handle;
-
- cds_list_del(<tng_chan->priv->node);
- lttng_destroy_context(lttng_chan->priv->ctx);
- chan = lttng_chan->priv->rb_chan;
- handle = chan->handle;
- channel_destroy(chan, handle, 0);
- free(lttng_chan->parent);
- free(lttng_chan->priv);
- free(lttng_chan);
-}
-
-static
-void register_event(struct lttng_ust_event_common *event)
-{
- int ret;
- const struct lttng_ust_event_desc *desc;
- char name[LTTNG_UST_ABI_SYM_NAME_LEN];
-
- assert(event->priv->registered == 0);
- desc = event->priv->desc;
- lttng_ust_format_event_name(desc, name);
- ret = lttng_ust_tp_probe_register_queue_release(name,
- desc->probe_callback,
- event, desc->signature);
- WARN_ON_ONCE(ret);
- if (!ret)
- event->priv->registered = 1;
-}
-
-static
-void unregister_event(struct lttng_ust_event_common *event)
-{
- int ret;
- const struct lttng_ust_event_desc *desc;
- char name[LTTNG_UST_ABI_SYM_NAME_LEN];
-
- assert(event->priv->registered == 1);
- desc = event->priv->desc;
- lttng_ust_format_event_name(desc, name);
- ret = lttng_ust_tp_probe_unregister_queue_release(name,
- desc->probe_callback,
- event);
- WARN_ON_ONCE(ret);
- if (!ret)
- event->priv->registered = 0;
-}
-
-static
-void _lttng_event_unregister(struct lttng_ust_event_common *event)
-{
- if (event->priv->registered)
- unregister_event(event);
-}
-
-void lttng_session_destroy(struct lttng_ust_session *session)
-{
- struct lttng_ust_channel_buffer_private *chan, *tmpchan;
- struct lttng_ust_event_recorder_private *event_recorder_priv, *tmpevent_recorder_priv;
- struct lttng_enum *_enum, *tmp_enum;
- struct lttng_event_enabler *event_enabler, *event_tmpenabler;
-
- CMM_ACCESS_ONCE(session->active) = 0;
- cds_list_for_each_entry(event_recorder_priv, &session->priv->events_head, node) {
- _lttng_event_unregister(event_recorder_priv->parent.pub);
- }
- lttng_ust_urcu_synchronize_rcu(); /* Wait for in-flight events to complete */
- lttng_ust_tp_probe_prune_release_queue();
- cds_list_for_each_entry_safe(event_enabler, event_tmpenabler,
- &session->priv->enablers_head, node)
- lttng_event_enabler_destroy(event_enabler);
- cds_list_for_each_entry_safe(event_recorder_priv, tmpevent_recorder_priv,
- &session->priv->events_head, node)
- _lttng_event_destroy(event_recorder_priv->parent.pub);
- cds_list_for_each_entry_safe(_enum, tmp_enum,
- &session->priv->enums_head, node)
- _lttng_enum_destroy(_enum);
- cds_list_for_each_entry_safe(chan, tmpchan, &session->priv->chan_head, node)
- _lttng_channel_unmap(chan->pub);
- cds_list_del(&session->priv->node);
- lttng_destroy_context(session->priv->ctx);
- free(session->priv);
- free(session);
-}
-
-void lttng_event_notifier_group_destroy(
- struct lttng_event_notifier_group *event_notifier_group)
-{
- int close_ret;
- struct lttng_event_notifier_enabler *notifier_enabler, *tmpnotifier_enabler;
- struct lttng_ust_event_notifier_private *event_notifier_priv, *tmpevent_notifier_priv;
-
- if (!event_notifier_group) {
- return;
- }
-
- cds_list_for_each_entry(event_notifier_priv,
- &event_notifier_group->event_notifiers_head, node)
- _lttng_event_unregister(event_notifier_priv->parent.pub);
-
- lttng_ust_urcu_synchronize_rcu();
-
- cds_list_for_each_entry_safe(notifier_enabler, tmpnotifier_enabler,
- &event_notifier_group->enablers_head, node)
- lttng_event_notifier_enabler_destroy(notifier_enabler);
-
- cds_list_for_each_entry_safe(event_notifier_priv, tmpevent_notifier_priv,
- &event_notifier_group->event_notifiers_head, node)
- _lttng_event_destroy(event_notifier_priv->parent.pub);
-
- if (event_notifier_group->error_counter)
- lttng_ust_counter_destroy(event_notifier_group->error_counter);
-
- /* Close the notification fd to the listener of event_notifiers. */
-
- lttng_ust_lock_fd_tracker();
- close_ret = close(event_notifier_group->notification_fd);
- if (!close_ret) {
- lttng_ust_delete_fd_from_tracker(
- event_notifier_group->notification_fd);
- } else {
- PERROR("close");
- abort();
- }
- lttng_ust_unlock_fd_tracker();
-
- cds_list_del(&event_notifier_group->node);
- lttng_destroy_context(event_notifier_group->ctx);
- free(event_notifier_group);
-}
-
-static
-void lttng_enabler_destroy(struct lttng_enabler *enabler)
-{
- struct lttng_ust_bytecode_node *filter_node, *tmp_filter_node;
- struct lttng_ust_excluder_node *excluder_node, *tmp_excluder_node;
-
- if (!enabler) {
- return;
- }
-
- /* Destroy filter bytecode */
- cds_list_for_each_entry_safe(filter_node, tmp_filter_node,
- &enabler->filter_bytecode_head, node) {
- free(filter_node);
- }
-
- /* Destroy excluders */
- cds_list_for_each_entry_safe(excluder_node, tmp_excluder_node,
- &enabler->excluder_head, node) {
- free(excluder_node);
- }
-}
-
- void lttng_event_notifier_enabler_destroy(struct lttng_event_notifier_enabler *event_notifier_enabler)
-{
- if (!event_notifier_enabler) {
- return;
- }
-
- cds_list_del(&event_notifier_enabler->node);
-
- lttng_enabler_destroy(lttng_event_notifier_enabler_as_enabler(event_notifier_enabler));
-
- free(event_notifier_enabler);
-}
-
-static
-int lttng_enum_create(const struct lttng_ust_enum_desc *desc,
- struct lttng_ust_session *session)
-{
- const char *enum_name = desc->name;
- struct lttng_enum *_enum;
- struct cds_hlist_head *head;
- int ret = 0;
- size_t name_len = strlen(enum_name);
- uint32_t hash;
- int notify_socket;
-
- /* Check if this enum is already registered for this session. */
- hash = jhash(enum_name, name_len, 0);
- head = &session->priv->enums_ht.table[hash & (LTTNG_UST_ENUM_HT_SIZE - 1)];
-
- _enum = lttng_ust_enum_get_from_desc(session, desc);
- if (_enum) {
- ret = -EEXIST;
- goto exist;
- }
-
- notify_socket = lttng_get_notify_socket(session->priv->owner);
- if (notify_socket < 0) {
- ret = notify_socket;
- goto socket_error;
- }
-
- _enum = zmalloc(sizeof(*_enum));
- if (!_enum) {
- ret = -ENOMEM;
- goto cache_error;
- }
- _enum->session = session;
- _enum->desc = desc;
-
- ret = ustcomm_register_enum(notify_socket,
- session->priv->objd,
- enum_name,
- desc->nr_entries,
- desc->entries,
- &_enum->id);
- if (ret < 0) {
- DBG("Error (%d) registering enumeration to sessiond", ret);
- goto sessiond_register_error;
- }
- cds_list_add(&_enum->node, &session->priv->enums_head);
- cds_hlist_add_head(&_enum->hlist, head);
- return 0;
-
-sessiond_register_error:
- free(_enum);
-cache_error:
-socket_error:
-exist:
- return ret;
-}
-
-static
-int lttng_create_enum_check(const struct lttng_ust_type_common *type,
- struct lttng_ust_session *session)
-{
- switch (type->type) {
- case lttng_ust_type_enum:
- {
- const struct lttng_ust_enum_desc *enum_desc;
- int ret;
-
- enum_desc = lttng_ust_get_type_enum(type)->desc;
- ret = lttng_enum_create(enum_desc, session);
- if (ret && ret != -EEXIST) {
- DBG("Unable to create enum error: (%d)", ret);
- return ret;
- }
- break;
- }
- case lttng_ust_type_dynamic:
- {
- const struct lttng_ust_event_field *tag_field_generic;
- const struct lttng_ust_enum_desc *enum_desc;
- int ret;
-
- tag_field_generic = lttng_ust_dynamic_type_tag_field();
- enum_desc = lttng_ust_get_type_enum(tag_field_generic->type)->desc;
- ret = lttng_enum_create(enum_desc, session);
- if (ret && ret != -EEXIST) {
- DBG("Unable to create enum error: (%d)", ret);
- return ret;
- }
- break;
- }
- default:
- /* TODO: nested types when they become supported. */
- break;
- }
- return 0;
-}
-
-static
-int lttng_create_all_event_enums(size_t nr_fields,
- const struct lttng_ust_event_field **event_fields,
- struct lttng_ust_session *session)
-{
- size_t i;
- int ret;
-
- /* For each field, ensure enum is part of the session. */
- for (i = 0; i < nr_fields; i++) {
- const struct lttng_ust_type_common *type = event_fields[i]->type;
-
- ret = lttng_create_enum_check(type, session);
- if (ret)
- return ret;
- }
- return 0;
-}
-
-static
-int lttng_create_all_ctx_enums(size_t nr_fields,
- struct lttng_ust_ctx_field *ctx_fields,
- struct lttng_ust_session *session)
-{
- size_t i;
- int ret;
-
- /* For each field, ensure enum is part of the session. */
- for (i = 0; i < nr_fields; i++) {
- const struct lttng_ust_type_common *type = ctx_fields[i].event_field->type;
-
- ret = lttng_create_enum_check(type, session);
- if (ret)
- return ret;
- }
- return 0;
-}
-
-/*
- * Ensure that a state-dump will be performed for this session at the end
- * of the current handle_message().
- */
-int lttng_session_statedump(struct lttng_ust_session *session)
-{
- session->priv->statedump_pending = 1;
- lttng_ust_sockinfo_session_enabled(session->priv->owner);
- return 0;
-}
-
-int lttng_session_enable(struct lttng_ust_session *session)
-{
- int ret = 0;
- struct lttng_ust_channel_buffer_private *chan;
- int notify_socket;
-
- if (session->active) {
- ret = -EBUSY;
- goto end;
- }
-
- notify_socket = lttng_get_notify_socket(session->priv->owner);
- if (notify_socket < 0)
- return notify_socket;
-
- /* Set transient enabler state to "enabled" */
- session->priv->tstate = 1;
-
- /* We need to sync enablers with session before activation. */
- lttng_session_sync_event_enablers(session);
-
- /*
- * Snapshot the number of events per channel to know the type of header
- * we need to use.
- */
- cds_list_for_each_entry(chan, &session->priv->chan_head, node) {
- struct lttng_ust_ctx *ctx;
- struct lttng_ust_ctx_field *fields = NULL;
- size_t nr_fields = 0;
- uint32_t chan_id;
-
- /* don't change it if session stop/restart */
- if (chan->header_type)
- continue;
- ctx = chan->ctx;
- if (ctx) {
- nr_fields = ctx->nr_fields;
- fields = ctx->fields;
- ret = lttng_create_all_ctx_enums(nr_fields, fields,
- session);
- if (ret < 0) {
- DBG("Error (%d) adding enum to session", ret);
- return ret;
- }
- }
- ret = ustcomm_register_channel(notify_socket,
- session,
- session->priv->objd,
- chan->parent.objd,
- nr_fields,
- fields,
- &chan_id,
- &chan->header_type);
- if (ret) {
- DBG("Error (%d) registering channel to sessiond", ret);
- return ret;
- }
- if (chan_id != chan->id) {
- DBG("Error: channel registration id (%u) does not match id assigned at creation (%u)",
- chan_id, chan->id);
- return -EINVAL;
- }
- }
-
- /* Set atomically the state to "active" */
- CMM_ACCESS_ONCE(session->active) = 1;
- CMM_ACCESS_ONCE(session->priv->been_active) = 1;
-
- ret = lttng_session_statedump(session);
- if (ret)
- return ret;
-end:
- return ret;
-}
-
-int lttng_session_disable(struct lttng_ust_session *session)
-{
- int ret = 0;
-
- if (!session->active) {
- ret = -EBUSY;
- goto end;
- }
- /* Set atomically the state to "inactive" */
- CMM_ACCESS_ONCE(session->active) = 0;
-
- /* Set transient enabler state to "disabled" */
- session->priv->tstate = 0;
- lttng_session_sync_event_enablers(session);
-end:
- return ret;
-}
-
-int lttng_channel_enable(struct lttng_ust_channel_common *lttng_channel)
-{
- int ret = 0;
-
- if (lttng_channel->enabled) {
- ret = -EBUSY;
- goto end;
- }
- /* Set transient enabler state to "enabled" */
- lttng_channel->priv->tstate = 1;
- lttng_session_sync_event_enablers(lttng_channel->session);
- /* Set atomically the state to "enabled" */
- CMM_ACCESS_ONCE(lttng_channel->enabled) = 1;
-end:
- return ret;
-}
-
-int lttng_channel_disable(struct lttng_ust_channel_common *lttng_channel)
-{
- int ret = 0;
-
- if (!lttng_channel->enabled) {
- ret = -EBUSY;
- goto end;
- }
- /* Set atomically the state to "disabled" */
- CMM_ACCESS_ONCE(lttng_channel->enabled) = 0;
- /* Set transient enabler state to "enabled" */
- lttng_channel->priv->tstate = 0;
- lttng_session_sync_event_enablers(lttng_channel->session);
-end:
- return ret;
-}
-
-static inline
-struct cds_hlist_head *borrow_hash_table_bucket(
- struct cds_hlist_head *hash_table,
- unsigned int hash_table_size,
- const struct lttng_ust_event_desc *desc)
-{
- char name[LTTNG_UST_ABI_SYM_NAME_LEN];
- size_t name_len;
- uint32_t hash;
-
- lttng_ust_format_event_name(desc, name);
- name_len = strlen(name);
-
- hash = jhash(name, name_len, 0);
- return &hash_table[hash & (hash_table_size - 1)];
-}
-
-/*
- * Supports event creation while tracing session is active.
- */
-static
-int lttng_event_recorder_create(const struct lttng_ust_event_desc *desc,
- struct lttng_ust_channel_buffer *chan)
-{
- char name[LTTNG_UST_ABI_SYM_NAME_LEN];
- struct lttng_ust_event_recorder *event_recorder;
- struct lttng_ust_event_recorder_private *event_recorder_priv;
- struct lttng_ust_session *session = chan->parent->session;
- struct cds_hlist_head *head;
- int ret = 0;
- int notify_socket, loglevel;
- const char *uri;
-
- head = borrow_hash_table_bucket(chan->parent->session->priv->events_ht.table,
- LTTNG_UST_EVENT_HT_SIZE, desc);
-
- notify_socket = lttng_get_notify_socket(session->priv->owner);
- if (notify_socket < 0) {
- ret = notify_socket;
- goto socket_error;
- }
-
- ret = lttng_create_all_event_enums(desc->nr_fields, desc->fields,
- session);
- if (ret < 0) {
- DBG("Error (%d) adding enum to session", ret);
- goto create_enum_error;
- }
-
- /*
- * Check if loglevel match. Refuse to connect event if not.
- */
- event_recorder = zmalloc(sizeof(struct lttng_ust_event_recorder));
- if (!event_recorder) {
- ret = -ENOMEM;
- goto cache_error;
- }
- event_recorder->struct_size = sizeof(struct lttng_ust_event_recorder);
-
- event_recorder->parent = zmalloc(sizeof(struct lttng_ust_event_common));
- if (!event_recorder->parent) {
- ret = -ENOMEM;
- goto parent_error;
- }
- event_recorder->parent->struct_size = sizeof(struct lttng_ust_event_common);
- event_recorder->parent->type = LTTNG_UST_EVENT_TYPE_RECORDER;
- event_recorder->parent->child = event_recorder;
-
- event_recorder_priv = zmalloc(sizeof(struct lttng_ust_event_recorder_private));
- if (!event_recorder_priv) {
- ret = -ENOMEM;
- goto priv_error;
- }
- event_recorder->priv = event_recorder_priv;
- event_recorder_priv->pub = event_recorder;
- event_recorder->parent->priv = &event_recorder_priv->parent;
- event_recorder_priv->parent.pub = event_recorder->parent;
-
- event_recorder->chan = chan;
-
- /* Event will be enabled by enabler sync. */
- event_recorder->parent->run_filter = lttng_ust_interpret_event_filter;
- event_recorder->parent->enabled = 0;
- event_recorder->parent->priv->registered = 0;
- CDS_INIT_LIST_HEAD(&event_recorder->parent->priv->filter_bytecode_runtime_head);
- CDS_INIT_LIST_HEAD(&event_recorder->parent->priv->enablers_ref_head);
- event_recorder->parent->priv->desc = desc;
-
- if (desc->loglevel)
- loglevel = *(*event_recorder->parent->priv->desc->loglevel);
- else
- loglevel = TRACE_DEFAULT;
- if (desc->model_emf_uri)
- uri = *(desc->model_emf_uri);
- else
- uri = NULL;
-
- lttng_ust_format_event_name(desc, name);
-
- /* Fetch event ID from sessiond */
- ret = ustcomm_register_event(notify_socket,
- session,
- session->priv->objd,
- chan->priv->parent.objd,
- name,
- loglevel,
- desc->signature,
- desc->nr_fields,
- desc->fields,
- uri,
- &event_recorder->priv->id);
- if (ret < 0) {
- DBG("Error (%d) registering event to sessiond", ret);
- goto sessiond_register_error;
- }
-
- cds_list_add(&event_recorder_priv->node, &chan->parent->session->priv->events_head);
- cds_hlist_add_head(&event_recorder_priv->hlist, head);
- return 0;
-
-sessiond_register_error:
- free(event_recorder_priv);
-priv_error:
- free(event_recorder->parent);
-parent_error:
- free(event_recorder);
-cache_error:
-create_enum_error:
-socket_error:
- return ret;
-}
-
-static
-int lttng_event_notifier_create(const struct lttng_ust_event_desc *desc,
- uint64_t token, uint64_t error_counter_index,
- struct lttng_event_notifier_group *event_notifier_group)
-{
- struct lttng_ust_event_notifier *event_notifier;
- struct lttng_ust_event_notifier_private *event_notifier_priv;
- struct cds_hlist_head *head;
- int ret = 0;
-
- /*
- * Get the hashtable bucket the created lttng_event_notifier object
- * should be inserted.
- */
- head = borrow_hash_table_bucket(
- event_notifier_group->event_notifiers_ht.table,
- LTTNG_UST_EVENT_NOTIFIER_HT_SIZE, desc);
-
- event_notifier = zmalloc(sizeof(struct lttng_ust_event_notifier));
- if (!event_notifier) {
- ret = -ENOMEM;
- goto error;
- }
- event_notifier->struct_size = sizeof(struct lttng_ust_event_notifier);
-
- event_notifier->parent = zmalloc(sizeof(struct lttng_ust_event_common));
- if (!event_notifier->parent) {
- ret = -ENOMEM;
- goto parent_error;
- }
- event_notifier->parent->struct_size = sizeof(struct lttng_ust_event_common);
- event_notifier->parent->type = LTTNG_UST_EVENT_TYPE_NOTIFIER;
- event_notifier->parent->child = event_notifier;
-
- event_notifier_priv = zmalloc(sizeof(struct lttng_ust_event_notifier_private));
- if (!event_notifier_priv) {
- ret = -ENOMEM;
- goto priv_error;
- }
- event_notifier->priv = event_notifier_priv;
- event_notifier_priv->pub = event_notifier;
- event_notifier->parent->priv = &event_notifier_priv->parent;
- event_notifier_priv->parent.pub = event_notifier->parent;
-
- event_notifier_priv->group = event_notifier_group;
- event_notifier_priv->parent.user_token = token;
- event_notifier_priv->error_counter_index = error_counter_index;
-
- /* Event notifier will be enabled by enabler sync. */
- event_notifier->parent->run_filter = lttng_ust_interpret_event_filter;
- event_notifier->parent->enabled = 0;
- event_notifier_priv->parent.registered = 0;
-
- CDS_INIT_LIST_HEAD(&event_notifier->parent->priv->filter_bytecode_runtime_head);
- CDS_INIT_LIST_HEAD(&event_notifier->priv->capture_bytecode_runtime_head);
- CDS_INIT_LIST_HEAD(&event_notifier_priv->parent.enablers_ref_head);
- event_notifier_priv->parent.desc = desc;
- event_notifier->notification_send = lttng_event_notifier_notification_send;
-
- cds_list_add(&event_notifier_priv->node,
- &event_notifier_group->event_notifiers_head);
- cds_hlist_add_head(&event_notifier_priv->hlist, head);
-
- return 0;
-
-priv_error:
- free(event_notifier->parent);
-parent_error:
- free(event_notifier);
-error:
- return ret;
-}
-
-static
-int lttng_desc_match_star_glob_enabler(const struct lttng_ust_event_desc *desc,
- struct lttng_enabler *enabler)
-{
- char name[LTTNG_UST_ABI_SYM_NAME_LEN];
- int loglevel = 0;
- unsigned int has_loglevel = 0;
-
- lttng_ust_format_event_name(desc, name);
- assert(enabler->format_type == LTTNG_ENABLER_FORMAT_STAR_GLOB);
- if (!strutils_star_glob_match(enabler->event_param.name, SIZE_MAX,
- name, SIZE_MAX))
- return 0;
- if (desc->loglevel) {
- loglevel = *(*desc->loglevel);
- has_loglevel = 1;
- }
- if (!lttng_loglevel_match(loglevel,
- has_loglevel,
- enabler->event_param.loglevel_type,
- enabler->event_param.loglevel))
- return 0;
- return 1;
-}
-
-static
-int lttng_desc_match_event_enabler(const struct lttng_ust_event_desc *desc,
- struct lttng_enabler *enabler)
-{
- char name[LTTNG_UST_ABI_SYM_NAME_LEN];
- int loglevel = 0;
- unsigned int has_loglevel = 0;
-
- lttng_ust_format_event_name(desc, name);
- assert(enabler->format_type == LTTNG_ENABLER_FORMAT_EVENT);
- if (strcmp(name, enabler->event_param.name))
- return 0;
- if (desc->loglevel) {
- loglevel = *(*desc->loglevel);
- has_loglevel = 1;
- }
- if (!lttng_loglevel_match(loglevel,
- has_loglevel,
- enabler->event_param.loglevel_type,
- enabler->event_param.loglevel))
- return 0;
- return 1;
-}
-
-static
-int lttng_desc_match_enabler(const struct lttng_ust_event_desc *desc,
- struct lttng_enabler *enabler)
-{
- switch (enabler->format_type) {
- case LTTNG_ENABLER_FORMAT_STAR_GLOB:
- {
- struct lttng_ust_excluder_node *excluder;
-
- if (!lttng_desc_match_star_glob_enabler(desc, enabler)) {
- return 0;
- }
-
- /*
- * If the matching event matches with an excluder,
- * return 'does not match'
- */
- cds_list_for_each_entry(excluder, &enabler->excluder_head, node) {
- int count;
-
- for (count = 0; count < excluder->excluder.count; count++) {
- int len;
- char *excluder_name;
-
- excluder_name = (char *) (excluder->excluder.names)
- + count * LTTNG_UST_ABI_SYM_NAME_LEN;
- len = strnlen(excluder_name, LTTNG_UST_ABI_SYM_NAME_LEN);
- if (len > 0) {
- char name[LTTNG_UST_ABI_SYM_NAME_LEN];
-
- lttng_ust_format_event_name(desc, name);
- if (strutils_star_glob_match(excluder_name, len, name, SIZE_MAX)) {
- return 0;
- }
- }
- }
- }
- return 1;
- }
- case LTTNG_ENABLER_FORMAT_EVENT:
- return lttng_desc_match_event_enabler(desc, enabler);
- default:
- return -EINVAL;
- }
-}
-
-static
-int lttng_event_enabler_match_event(struct lttng_event_enabler *event_enabler,
- struct lttng_ust_event_recorder *event_recorder)
-{
- if (lttng_desc_match_enabler(event_recorder->parent->priv->desc,
- lttng_event_enabler_as_enabler(event_enabler))
- && event_recorder->chan == event_enabler->chan)
- return 1;
- else
- return 0;
-}
-
-static
-int lttng_event_notifier_enabler_match_event_notifier(
- struct lttng_event_notifier_enabler *event_notifier_enabler,
- struct lttng_ust_event_notifier *event_notifier)
-{
- int desc_matches = lttng_desc_match_enabler(event_notifier->priv->parent.desc,
- lttng_event_notifier_enabler_as_enabler(event_notifier_enabler));
-
- if (desc_matches && event_notifier->priv->group == event_notifier_enabler->group &&
- event_notifier->priv->parent.user_token == event_notifier_enabler->user_token)
- return 1;
- else
- return 0;
-}
-
-static
-struct lttng_enabler_ref *lttng_enabler_ref(
- struct cds_list_head *enabler_ref_list,
- struct lttng_enabler *enabler)
-{
- struct lttng_enabler_ref *enabler_ref;
-
- cds_list_for_each_entry(enabler_ref, enabler_ref_list, node) {
- if (enabler_ref->ref == enabler)
- return enabler_ref;
- }
- return NULL;
-}
-
-/*
- * Create struct lttng_event if it is missing and present in the list of
- * tracepoint probes.
- */
-static
-void lttng_create_event_recorder_if_missing(struct lttng_event_enabler *event_enabler)
-{
- struct lttng_ust_session *session = event_enabler->chan->parent->session;
- struct lttng_ust_registered_probe *reg_probe;
- const struct lttng_ust_event_desc *desc;
- struct lttng_ust_event_recorder_private *event_recorder_priv;
- int i;
- struct cds_list_head *probe_list;
-
- probe_list = lttng_get_probe_list_head();
- /*
- * For each probe event, if we find that a probe event matches
- * our enabler, create an associated lttng_event if not
- * already present.
- */
- cds_list_for_each_entry(reg_probe, probe_list, head) {
- const struct lttng_ust_probe_desc *probe_desc = reg_probe->desc;
-
- for (i = 0; i < probe_desc->nr_events; i++) {
- int ret;
- bool found = false;
- struct cds_hlist_head *head;
- struct cds_hlist_node *node;
-
- desc = probe_desc->event_desc[i];
- if (!lttng_desc_match_enabler(desc,
- lttng_event_enabler_as_enabler(event_enabler)))
- continue;
-
- head = borrow_hash_table_bucket(
- session->priv->events_ht.table,
- LTTNG_UST_EVENT_HT_SIZE, desc);
-
- cds_hlist_for_each_entry(event_recorder_priv, node, head, hlist) {
- if (event_recorder_priv->parent.desc == desc
- && event_recorder_priv->pub->chan == event_enabler->chan) {
- found = true;
- break;
- }
- }
- if (found)
- continue;
-
- /*
- * We need to create an event for this
- * event probe.
- */
- ret = lttng_event_recorder_create(probe_desc->event_desc[i],
- event_enabler->chan);
- if (ret) {
- DBG("Unable to create event \"%s:%s\", error %d\n",
- probe_desc->provider_name,
- probe_desc->event_desc[i]->event_name, ret);
- }
- }
- }
-}
-
-static
-void probe_provider_event_for_each(const struct lttng_ust_probe_desc *provider_desc,
- void (*event_func)(struct lttng_ust_event_common *event))
-{
- struct cds_hlist_node *node, *tmp_node;
- struct cds_list_head *sessionsp;
- unsigned int i;
-
- /* Get handle on list of sessions. */
- sessionsp = lttng_get_sessions();
-
- /*
- * Iterate over all events in the probe provider descriptions and
- * sessions to queue the unregistration of the events.
- */
- for (i = 0; i < provider_desc->nr_events; i++) {
- const struct lttng_ust_event_desc *event_desc;
- struct lttng_event_notifier_group *event_notifier_group;
- struct lttng_ust_event_recorder_private *event_recorder_priv;
- struct lttng_ust_event_notifier_private *event_notifier_priv;
- struct lttng_ust_session_private *session_priv;
- struct cds_hlist_head *head;
-
- event_desc = provider_desc->event_desc[i];
-
- /*
- * Iterate over all session to find the current event
- * description.
- */
- cds_list_for_each_entry(session_priv, sessionsp, node) {
- /*
- * Get the list of events in the hashtable bucket and
- * iterate to find the event matching this descriptor.
- */
- head = borrow_hash_table_bucket(
- session_priv->events_ht.table,
- LTTNG_UST_EVENT_HT_SIZE, event_desc);
-
- cds_hlist_for_each_entry_safe(event_recorder_priv, node, tmp_node, head, hlist) {
- if (event_desc == event_recorder_priv->parent.desc) {
- event_func(event_recorder_priv->parent.pub);
- break;
- }
- }
- }
-
- /*
- * Iterate over all event_notifier groups to find the current event
- * description.
- */
- cds_list_for_each_entry(event_notifier_group, &event_notifier_groups, node) {
- /*
- * Get the list of event_notifiers in the hashtable bucket and
- * iterate to find the event_notifier matching this
- * descriptor.
- */
- head = borrow_hash_table_bucket(
- event_notifier_group->event_notifiers_ht.table,
- LTTNG_UST_EVENT_NOTIFIER_HT_SIZE, event_desc);
-
- cds_hlist_for_each_entry_safe(event_notifier_priv, node, tmp_node, head, hlist) {
- if (event_desc == event_notifier_priv->parent.desc) {
- event_func(event_notifier_priv->parent.pub);
- break;
- }
- }
- }
- }
-}
-
-static
-void _event_enum_destroy(struct lttng_ust_event_common *event)
-{
-
- switch (event->type) {
- case LTTNG_UST_EVENT_TYPE_RECORDER:
- {
- struct lttng_ust_event_recorder *event_recorder = event->child;
- struct lttng_ust_session *session = event_recorder->chan->parent->session;
- unsigned int i;
-
- /* Destroy enums of the current event. */
- for (i = 0; i < event_recorder->parent->priv->desc->nr_fields; i++) {
- const struct lttng_ust_enum_desc *enum_desc;
- const struct lttng_ust_event_field *field;
- struct lttng_enum *curr_enum;
-
- field = event_recorder->parent->priv->desc->fields[i];
- switch (field->type->type) {
- case lttng_ust_type_enum:
- enum_desc = lttng_ust_get_type_enum(field->type)->desc;
- break;
- default:
- continue;
- }
-
- curr_enum = lttng_ust_enum_get_from_desc(session, enum_desc);
- if (curr_enum) {
- _lttng_enum_destroy(curr_enum);
- }
- }
- break;
- }
- case LTTNG_UST_EVENT_TYPE_NOTIFIER:
- break;
- default:
- abort();
- }
- /* Destroy event. */
- _lttng_event_destroy(event);
-}
-
-/*
- * Iterate over all the UST sessions to unregister and destroy all probes from
- * the probe provider descriptor received as argument. Must me called with the
- * ust_lock held.
- */
-void lttng_probe_provider_unregister_events(
- const struct lttng_ust_probe_desc *provider_desc)
-{
- /*
- * Iterate over all events in the probe provider descriptions and sessions
- * to queue the unregistration of the events.
- */
- probe_provider_event_for_each(provider_desc, _lttng_event_unregister);
-
- /* Wait for grace period. */
- lttng_ust_urcu_synchronize_rcu();
- /* Prune the unregistration queue. */
- lttng_ust_tp_probe_prune_release_queue();
-
- /*
- * It is now safe to destroy the events and remove them from the event list
- * and hashtables.
- */
- probe_provider_event_for_each(provider_desc, _event_enum_destroy);
-}
-
-/*
- * Create events associated with an event enabler (if not already present),
- * and add backward reference from the event to the enabler.
- */
-static
-int lttng_event_enabler_ref_event_recorders(struct lttng_event_enabler *event_enabler)
-{
- struct lttng_ust_session *session = event_enabler->chan->parent->session;
- struct lttng_ust_event_recorder_private *event_recorder_priv;
-
- if (!lttng_event_enabler_as_enabler(event_enabler)->enabled)
- goto end;
-
- /* First ensure that probe events are created for this enabler. */
- lttng_create_event_recorder_if_missing(event_enabler);
-
- /* For each event matching enabler in session event list. */
- cds_list_for_each_entry(event_recorder_priv, &session->priv->events_head, node) {
- struct lttng_enabler_ref *enabler_ref;
-
- if (!lttng_event_enabler_match_event(event_enabler, event_recorder_priv->pub))
- continue;
-
- enabler_ref = lttng_enabler_ref(&event_recorder_priv->parent.enablers_ref_head,
- lttng_event_enabler_as_enabler(event_enabler));
- if (!enabler_ref) {
- /*
- * If no backward ref, create it.
- * Add backward ref from event to enabler.
- */
- enabler_ref = zmalloc(sizeof(*enabler_ref));
- if (!enabler_ref)
- return -ENOMEM;
- enabler_ref->ref = lttng_event_enabler_as_enabler(
- event_enabler);
- cds_list_add(&enabler_ref->node,
- &event_recorder_priv->parent.enablers_ref_head);
- }
-
- /*
- * Link filter bytecodes if not linked yet.
- */
- lttng_enabler_link_bytecode(event_recorder_priv->parent.desc,
- &session->priv->ctx,
- &event_recorder_priv->parent.filter_bytecode_runtime_head,
- <tng_event_enabler_as_enabler(event_enabler)->filter_bytecode_head);
-
- /* TODO: merge event context. */
- }
-end:
- return 0;
-}
-
-/*
- * Called at library load: connect the probe on all enablers matching
- * this event.
- * Called with session mutex held.
- */
-int lttng_fix_pending_events(void)
-{
- struct lttng_ust_session_private *session_priv;
-
- cds_list_for_each_entry(session_priv, &sessions, node) {
- lttng_session_lazy_sync_event_enablers(session_priv->pub);
- }
- return 0;
-}
-
-int lttng_fix_pending_event_notifiers(void)
-{
- struct lttng_event_notifier_group *event_notifier_group;
-
- cds_list_for_each_entry(event_notifier_group, &event_notifier_groups, node) {
- lttng_event_notifier_group_sync_enablers(event_notifier_group);
- }
- return 0;
-}
-
-/*
- * For each session of the owner thread, execute pending statedump.
- * Only dump state for the sessions owned by the caller thread, because
- * we don't keep ust_lock across the entire iteration.
- */
-void lttng_handle_pending_statedump(void *owner)
-{
- struct lttng_ust_session_private *session_priv;
-
- /* Execute state dump */
- do_lttng_ust_statedump(owner);
-
- /* Clear pending state dump */
- if (ust_lock()) {
- goto end;
- }
- cds_list_for_each_entry(session_priv, &sessions, node) {
- if (session_priv->owner != owner)
- continue;
- if (!session_priv->statedump_pending)
- continue;
- session_priv->statedump_pending = 0;
- }
-end:
- ust_unlock();
- return;
-}
-
-static
-void _lttng_event_destroy(struct lttng_ust_event_common *event)
-{
- struct lttng_enabler_ref *enabler_ref, *tmp_enabler_ref;
-
- lttng_free_event_filter_runtime(event);
- /* Free event enabler refs */
- cds_list_for_each_entry_safe(enabler_ref, tmp_enabler_ref,
- &event->priv->enablers_ref_head, node)
- free(enabler_ref);
-
- switch (event->type) {
- case LTTNG_UST_EVENT_TYPE_RECORDER:
- {
- struct lttng_ust_event_recorder *event_recorder = event->child;
-
- /* Remove from event list. */
- cds_list_del(&event_recorder->priv->node);
- /* Remove from event hash table. */
- cds_hlist_del(&event_recorder->priv->hlist);
-
- lttng_destroy_context(event_recorder->priv->ctx);
- free(event_recorder->parent);
- free(event_recorder->priv);
- free(event_recorder);
- break;
- }
- case LTTNG_UST_EVENT_TYPE_NOTIFIER:
- {
- struct lttng_ust_event_notifier *event_notifier = event->child;
-
- /* Remove from event list. */
- cds_list_del(&event_notifier->priv->node);
- /* Remove from event hash table. */
- cds_hlist_del(&event_notifier->priv->hlist);
-
- free(event_notifier->priv);
- free(event_notifier->parent);
- free(event_notifier);
- break;
- }
- default:
- abort();
- }
-}
-
-static
-void _lttng_enum_destroy(struct lttng_enum *_enum)
-{
- cds_list_del(&_enum->node);
- cds_hlist_del(&_enum->hlist);
- free(_enum);
-}
-
-void lttng_ust_abi_events_exit(void)
-{
- struct lttng_ust_session_private *session_priv, *tmpsession_priv;
-
- cds_list_for_each_entry_safe(session_priv, tmpsession_priv, &sessions, node)
- lttng_session_destroy(session_priv->pub);
-}
-
-/*
- * Enabler management.
- */
-struct lttng_event_enabler *lttng_event_enabler_create(
- enum lttng_enabler_format_type format_type,
- struct lttng_ust_abi_event *event_param,
- struct lttng_ust_channel_buffer *chan)
-{
- struct lttng_event_enabler *event_enabler;
-
- event_enabler = zmalloc(sizeof(*event_enabler));
- if (!event_enabler)
- return NULL;
- event_enabler->base.format_type = format_type;
- CDS_INIT_LIST_HEAD(&event_enabler->base.filter_bytecode_head);
- CDS_INIT_LIST_HEAD(&event_enabler->base.excluder_head);
- memcpy(&event_enabler->base.event_param, event_param,
- sizeof(event_enabler->base.event_param));
- event_enabler->chan = chan;
- /* ctx left NULL */
- event_enabler->base.enabled = 0;
- cds_list_add(&event_enabler->node, &event_enabler->chan->parent->session->priv->enablers_head);
- lttng_session_lazy_sync_event_enablers(event_enabler->chan->parent->session);
-
- return event_enabler;
-}
-
-struct lttng_event_notifier_enabler *lttng_event_notifier_enabler_create(
- struct lttng_event_notifier_group *event_notifier_group,
- enum lttng_enabler_format_type format_type,
- struct lttng_ust_abi_event_notifier *event_notifier_param)
-{
- struct lttng_event_notifier_enabler *event_notifier_enabler;
-
- event_notifier_enabler = zmalloc(sizeof(*event_notifier_enabler));
- if (!event_notifier_enabler)
- return NULL;
- event_notifier_enabler->base.format_type = format_type;
- CDS_INIT_LIST_HEAD(&event_notifier_enabler->base.filter_bytecode_head);
- CDS_INIT_LIST_HEAD(&event_notifier_enabler->capture_bytecode_head);
- CDS_INIT_LIST_HEAD(&event_notifier_enabler->base.excluder_head);
-
- event_notifier_enabler->user_token = event_notifier_param->event.token;
- event_notifier_enabler->error_counter_index = event_notifier_param->error_counter_index;
- event_notifier_enabler->num_captures = 0;
-
- memcpy(&event_notifier_enabler->base.event_param.name,
- event_notifier_param->event.name,
- sizeof(event_notifier_enabler->base.event_param.name));
- event_notifier_enabler->base.event_param.instrumentation =
- event_notifier_param->event.instrumentation;
- event_notifier_enabler->base.event_param.loglevel =
- event_notifier_param->event.loglevel;
- event_notifier_enabler->base.event_param.loglevel_type =
- event_notifier_param->event.loglevel_type;
-
- event_notifier_enabler->base.enabled = 0;
- event_notifier_enabler->group = event_notifier_group;
-
- cds_list_add(&event_notifier_enabler->node,
- &event_notifier_group->enablers_head);
-
- lttng_event_notifier_group_sync_enablers(event_notifier_group);
-
- return event_notifier_enabler;
-}
-
-int lttng_event_enabler_enable(struct lttng_event_enabler *event_enabler)
-{
- lttng_event_enabler_as_enabler(event_enabler)->enabled = 1;
- lttng_session_lazy_sync_event_enablers(event_enabler->chan->parent->session);
-
- return 0;
-}
-
-int lttng_event_enabler_disable(struct lttng_event_enabler *event_enabler)
-{
- lttng_event_enabler_as_enabler(event_enabler)->enabled = 0;
- lttng_session_lazy_sync_event_enablers(event_enabler->chan->parent->session);
-
- return 0;
-}
-
-static
-void _lttng_enabler_attach_filter_bytecode(struct lttng_enabler *enabler,
- struct lttng_ust_bytecode_node **bytecode)
-{
- (*bytecode)->enabler = enabler;
- cds_list_add_tail(&(*bytecode)->node, &enabler->filter_bytecode_head);
- /* Take ownership of bytecode */
- *bytecode = NULL;
-}
-
-int lttng_event_enabler_attach_filter_bytecode(struct lttng_event_enabler *event_enabler,
- struct lttng_ust_bytecode_node **bytecode)
-{
- _lttng_enabler_attach_filter_bytecode(
- lttng_event_enabler_as_enabler(event_enabler), bytecode);
-
- lttng_session_lazy_sync_event_enablers(event_enabler->chan->parent->session);
- return 0;
-}
-
-static
-void _lttng_enabler_attach_exclusion(struct lttng_enabler *enabler,
- struct lttng_ust_excluder_node **excluder)
-{
- (*excluder)->enabler = enabler;
- cds_list_add_tail(&(*excluder)->node, &enabler->excluder_head);
- /* Take ownership of excluder */
- *excluder = NULL;
-}
-
-int lttng_event_enabler_attach_exclusion(struct lttng_event_enabler *event_enabler,
- struct lttng_ust_excluder_node **excluder)
-{
- _lttng_enabler_attach_exclusion(
- lttng_event_enabler_as_enabler(event_enabler), excluder);
-
- lttng_session_lazy_sync_event_enablers(event_enabler->chan->parent->session);
- return 0;
-}
-
-int lttng_event_notifier_enabler_enable(
- struct lttng_event_notifier_enabler *event_notifier_enabler)
-{
- lttng_event_notifier_enabler_as_enabler(event_notifier_enabler)->enabled = 1;
- lttng_event_notifier_group_sync_enablers(event_notifier_enabler->group);
-
- return 0;
-}
-
-int lttng_event_notifier_enabler_disable(
- struct lttng_event_notifier_enabler *event_notifier_enabler)
-{
- lttng_event_notifier_enabler_as_enabler(event_notifier_enabler)->enabled = 0;
- lttng_event_notifier_group_sync_enablers(event_notifier_enabler->group);
-
- return 0;
-}
-
-int lttng_event_notifier_enabler_attach_filter_bytecode(
- struct lttng_event_notifier_enabler *event_notifier_enabler,
- struct lttng_ust_bytecode_node **bytecode)
-{
- _lttng_enabler_attach_filter_bytecode(
- lttng_event_notifier_enabler_as_enabler(event_notifier_enabler),
- bytecode);
-
- lttng_event_notifier_group_sync_enablers(event_notifier_enabler->group);
- return 0;
-}
-
-int lttng_event_notifier_enabler_attach_capture_bytecode(
- struct lttng_event_notifier_enabler *event_notifier_enabler,
- struct lttng_ust_bytecode_node **bytecode)
-{
- (*bytecode)->enabler = lttng_event_notifier_enabler_as_enabler(
- event_notifier_enabler);
- cds_list_add_tail(&(*bytecode)->node,
- &event_notifier_enabler->capture_bytecode_head);
- /* Take ownership of bytecode */
- *bytecode = NULL;
- event_notifier_enabler->num_captures++;
-
- lttng_event_notifier_group_sync_enablers(event_notifier_enabler->group);
- return 0;
-}
-
-int lttng_event_notifier_enabler_attach_exclusion(
- struct lttng_event_notifier_enabler *event_notifier_enabler,
- struct lttng_ust_excluder_node **excluder)
-{
- _lttng_enabler_attach_exclusion(
- lttng_event_notifier_enabler_as_enabler(event_notifier_enabler),
- excluder);
-
- lttng_event_notifier_group_sync_enablers(event_notifier_enabler->group);
- return 0;
-}
-
-int lttng_attach_context(struct lttng_ust_abi_context *context_param,
- union lttng_ust_abi_args *uargs,
- struct lttng_ust_ctx **ctx, struct lttng_ust_session *session)
-{
- /*
- * We cannot attach a context after trace has been started for a
- * session because the metadata does not allow expressing this
- * information outside of the original channel scope.
- */
- if (session->priv->been_active)
- return -EPERM;
-
- switch (context_param->ctx) {
- case LTTNG_UST_ABI_CONTEXT_PTHREAD_ID:
- return lttng_add_pthread_id_to_ctx(ctx);
- case LTTNG_UST_ABI_CONTEXT_PERF_THREAD_COUNTER:
- {
- struct lttng_ust_abi_perf_counter_ctx *perf_ctx_param;
-
- perf_ctx_param = &context_param->u.perf_counter;
- return lttng_add_perf_counter_to_ctx(
- perf_ctx_param->type,
- perf_ctx_param->config,
- perf_ctx_param->name,
- ctx);
- }
- case LTTNG_UST_ABI_CONTEXT_VTID:
- return lttng_add_vtid_to_ctx(ctx);
- case LTTNG_UST_ABI_CONTEXT_VPID:
- return lttng_add_vpid_to_ctx(ctx);
- case LTTNG_UST_ABI_CONTEXT_PROCNAME:
- return lttng_add_procname_to_ctx(ctx);
- case LTTNG_UST_ABI_CONTEXT_IP:
- return lttng_add_ip_to_ctx(ctx);
- case LTTNG_UST_ABI_CONTEXT_CPU_ID:
- return lttng_add_cpu_id_to_ctx(ctx);
- case LTTNG_UST_ABI_CONTEXT_APP_CONTEXT:
- return lttng_ust_add_app_context_to_ctx_rcu(uargs->app_context.ctxname,
- ctx);
- case LTTNG_UST_ABI_CONTEXT_CGROUP_NS:
- return lttng_add_cgroup_ns_to_ctx(ctx);
- case LTTNG_UST_ABI_CONTEXT_IPC_NS:
- return lttng_add_ipc_ns_to_ctx(ctx);
- case LTTNG_UST_ABI_CONTEXT_MNT_NS:
- return lttng_add_mnt_ns_to_ctx(ctx);
- case LTTNG_UST_ABI_CONTEXT_NET_NS:
- return lttng_add_net_ns_to_ctx(ctx);
- case LTTNG_UST_ABI_CONTEXT_PID_NS:
- return lttng_add_pid_ns_to_ctx(ctx);
- case LTTNG_UST_ABI_CONTEXT_TIME_NS:
- return lttng_add_time_ns_to_ctx(ctx);
- case LTTNG_UST_ABI_CONTEXT_USER_NS:
- return lttng_add_user_ns_to_ctx(ctx);
- case LTTNG_UST_ABI_CONTEXT_UTS_NS:
- return lttng_add_uts_ns_to_ctx(ctx);
- case LTTNG_UST_ABI_CONTEXT_VUID:
- return lttng_add_vuid_to_ctx(ctx);
- case LTTNG_UST_ABI_CONTEXT_VEUID:
- return lttng_add_veuid_to_ctx(ctx);
- case LTTNG_UST_ABI_CONTEXT_VSUID:
- return lttng_add_vsuid_to_ctx(ctx);
- case LTTNG_UST_ABI_CONTEXT_VGID:
- return lttng_add_vgid_to_ctx(ctx);
- case LTTNG_UST_ABI_CONTEXT_VEGID:
- return lttng_add_vegid_to_ctx(ctx);
- case LTTNG_UST_ABI_CONTEXT_VSGID:
- return lttng_add_vsgid_to_ctx(ctx);
- default:
- return -EINVAL;
- }
-}
-
-int lttng_event_enabler_attach_context(
- struct lttng_event_enabler *enabler __attribute__((unused)),
- struct lttng_ust_abi_context *context_param __attribute__((unused)))
-{
- return -ENOSYS;
-}
-
-void lttng_event_enabler_destroy(struct lttng_event_enabler *event_enabler)
-{
- if (!event_enabler) {
- return;
- }
- cds_list_del(&event_enabler->node);
-
- lttng_enabler_destroy(lttng_event_enabler_as_enabler(event_enabler));
-
- lttng_destroy_context(event_enabler->ctx);
- free(event_enabler);
-}
-
-/*
- * lttng_session_sync_event_enablers should be called just before starting a
- * session.
- */
-static
-void lttng_session_sync_event_enablers(struct lttng_ust_session *session)
-{
- struct lttng_event_enabler *event_enabler;
- struct lttng_ust_event_recorder_private *event_recorder_priv;
-
- cds_list_for_each_entry(event_enabler, &session->priv->enablers_head, node)
- lttng_event_enabler_ref_event_recorders(event_enabler);
- /*
- * For each event, if at least one of its enablers is enabled,
- * and its channel and session transient states are enabled, we
- * enable the event, else we disable it.
- */
- cds_list_for_each_entry(event_recorder_priv, &session->priv->events_head, node) {
- struct lttng_enabler_ref *enabler_ref;
- struct lttng_ust_bytecode_runtime *runtime;
- int enabled = 0, has_enablers_without_filter_bytecode = 0;
- int nr_filters = 0;
-
- /* Enable events */
- cds_list_for_each_entry(enabler_ref,
- &event_recorder_priv->parent.enablers_ref_head, node) {
- if (enabler_ref->ref->enabled) {
- enabled = 1;
- break;
- }
- }
- /*
- * Enabled state is based on union of enablers, with
- * intesection of session and channel transient enable
- * states.
- */
- enabled = enabled && session->priv->tstate && event_recorder_priv->pub->chan->priv->parent.tstate;
-
- CMM_STORE_SHARED(event_recorder_priv->pub->parent->enabled, enabled);
- /*
- * Sync tracepoint registration with event enabled
- * state.
- */
- if (enabled) {
- if (!event_recorder_priv->parent.registered)
- register_event(event_recorder_priv->parent.pub);
- } else {
- if (event_recorder_priv->parent.registered)
- unregister_event(event_recorder_priv->parent.pub);
- }
-
- /* Check if has enablers without bytecode enabled */
- cds_list_for_each_entry(enabler_ref,
- &event_recorder_priv->parent.enablers_ref_head, node) {
- if (enabler_ref->ref->enabled
- && cds_list_empty(&enabler_ref->ref->filter_bytecode_head)) {
- has_enablers_without_filter_bytecode = 1;
- break;
- }
- }
- event_recorder_priv->parent.has_enablers_without_filter_bytecode =
- has_enablers_without_filter_bytecode;
-
- /* Enable filters */
- cds_list_for_each_entry(runtime,
- &event_recorder_priv->parent.filter_bytecode_runtime_head, node) {
- lttng_bytecode_sync_state(runtime);
- nr_filters++;
- }
- CMM_STORE_SHARED(event_recorder_priv->parent.pub->eval_filter,
- !(has_enablers_without_filter_bytecode || !nr_filters));
- }
- lttng_ust_tp_probe_prune_release_queue();
-}
-
-/* Support for event notifier is introduced by probe provider major version 2. */
-static
-bool lttng_ust_probe_supports_event_notifier(const struct lttng_ust_probe_desc *probe_desc)
-{
- return probe_desc->major >= 2;
-}
-
-static
-void lttng_create_event_notifier_if_missing(
- struct lttng_event_notifier_enabler *event_notifier_enabler)
-{
- struct lttng_event_notifier_group *event_notifier_group = event_notifier_enabler->group;
- struct lttng_ust_registered_probe *reg_probe;
- struct cds_list_head *probe_list;
- int i;
-
- probe_list = lttng_get_probe_list_head();
-
- cds_list_for_each_entry(reg_probe, probe_list, head) {
- const struct lttng_ust_probe_desc *probe_desc = reg_probe->desc;
-
- for (i = 0; i < probe_desc->nr_events; i++) {
- int ret;
- bool found = false;
- const struct lttng_ust_event_desc *desc;
- struct lttng_ust_event_notifier_private *event_notifier_priv;
- struct cds_hlist_head *head;
- struct cds_hlist_node *node;
-
- desc = probe_desc->event_desc[i];
-
- if (!lttng_desc_match_enabler(desc,
- lttng_event_notifier_enabler_as_enabler(event_notifier_enabler)))
- continue;
-
- /*
- * Given the current event_notifier group, get the bucket that
- * the target event_notifier would be if it was already
- * created.
- */
- head = borrow_hash_table_bucket(
- event_notifier_group->event_notifiers_ht.table,
- LTTNG_UST_EVENT_NOTIFIER_HT_SIZE, desc);
-
- cds_hlist_for_each_entry(event_notifier_priv, node, head, hlist) {
- /*
- * Check if event_notifier already exists by checking
- * if the event_notifier and enabler share the same
- * description and id.
- */
- if (event_notifier_priv->parent.desc == desc &&
- event_notifier_priv->parent.user_token == event_notifier_enabler->user_token) {
- found = true;
- break;
- }
- }
-
- if (found)
- continue;
-
- /* Check that the probe supports event notifiers, else report the error. */
- if (!lttng_ust_probe_supports_event_notifier(probe_desc)) {
- ERR("Probe \"%s\" contains event \"%s:%s\" which matches an enabled event notifier, "
- "but its version (%u.%u) is too old and does not implement event notifiers. "
- "It needs to be recompiled against a newer version of LTTng-UST, otherwise "
- "this event will not generate any notification.",
- probe_desc->provider_name,
- probe_desc->provider_name, desc->event_name,
- probe_desc->major,
- probe_desc->minor);
- continue;
- }
- /*
- * We need to create a event_notifier for this event probe.
- */
- ret = lttng_event_notifier_create(desc,
- event_notifier_enabler->user_token,
- event_notifier_enabler->error_counter_index,
- event_notifier_group);
- if (ret) {
- DBG("Unable to create event_notifier \"%s:%s\", error %d\n",
- probe_desc->provider_name,
- probe_desc->event_desc[i]->event_name, ret);
- }
- }
- }
-}
-
-/*
- * Create event_notifiers associated with a event_notifier enabler (if not already present).
- */
-static
-int lttng_event_notifier_enabler_ref_event_notifiers(
- struct lttng_event_notifier_enabler *event_notifier_enabler)
-{
- struct lttng_event_notifier_group *event_notifier_group = event_notifier_enabler->group;
- struct lttng_ust_event_notifier_private *event_notifier_priv;
-
- /*
- * Only try to create event_notifiers for enablers that are enabled, the user
- * might still be attaching filter or exclusion to the
- * event_notifier_enabler.
- */
- if (!lttng_event_notifier_enabler_as_enabler(event_notifier_enabler)->enabled)
- goto end;
-
- /* First, ensure that probe event_notifiers are created for this enabler. */
- lttng_create_event_notifier_if_missing(event_notifier_enabler);
-
- /* Link the created event_notifier with its associated enabler. */
- cds_list_for_each_entry(event_notifier_priv, &event_notifier_group->event_notifiers_head, node) {
- struct lttng_enabler_ref *enabler_ref;
-
- if (!lttng_event_notifier_enabler_match_event_notifier(event_notifier_enabler, event_notifier_priv->pub))
- continue;
-
- enabler_ref = lttng_enabler_ref(&event_notifier_priv->parent.enablers_ref_head,
- lttng_event_notifier_enabler_as_enabler(event_notifier_enabler));
- if (!enabler_ref) {
- /*
- * If no backward ref, create it.
- * Add backward ref from event_notifier to enabler.
- */
- enabler_ref = zmalloc(sizeof(*enabler_ref));
- if (!enabler_ref)
- return -ENOMEM;
-
- enabler_ref->ref = lttng_event_notifier_enabler_as_enabler(
- event_notifier_enabler);
- cds_list_add(&enabler_ref->node,
- &event_notifier_priv->parent.enablers_ref_head);
- }
-
- /*
- * Link filter bytecodes if not linked yet.
- */
- lttng_enabler_link_bytecode(event_notifier_priv->parent.desc,
- &event_notifier_group->ctx,
- &event_notifier_priv->parent.filter_bytecode_runtime_head,
- <tng_event_notifier_enabler_as_enabler(event_notifier_enabler)->filter_bytecode_head);
-
- /*
- * Link capture bytecodes if not linked yet.
- */
- lttng_enabler_link_bytecode(event_notifier_priv->parent.desc,
- &event_notifier_group->ctx, &event_notifier_priv->capture_bytecode_runtime_head,
- &event_notifier_enabler->capture_bytecode_head);
-
- event_notifier_priv->num_captures = event_notifier_enabler->num_captures;
- }
-end:
- return 0;
-}
-
-static
-void lttng_event_notifier_group_sync_enablers(struct lttng_event_notifier_group *event_notifier_group)
-{
- struct lttng_event_notifier_enabler *event_notifier_enabler;
- struct lttng_ust_event_notifier_private *event_notifier_priv;
-
- cds_list_for_each_entry(event_notifier_enabler, &event_notifier_group->enablers_head, node)
- lttng_event_notifier_enabler_ref_event_notifiers(event_notifier_enabler);
-
- /*
- * For each event_notifier, if at least one of its enablers is enabled,
- * we enable the event_notifier, else we disable it.
- */
- cds_list_for_each_entry(event_notifier_priv, &event_notifier_group->event_notifiers_head, node) {
- struct lttng_enabler_ref *enabler_ref;
- struct lttng_ust_bytecode_runtime *runtime;
- int enabled = 0, has_enablers_without_filter_bytecode = 0;
- int nr_filters = 0, nr_captures = 0;
-
- /* Enable event_notifiers */
- cds_list_for_each_entry(enabler_ref,
- &event_notifier_priv->parent.enablers_ref_head, node) {
- if (enabler_ref->ref->enabled) {
- enabled = 1;
- break;
- }
- }
-
- CMM_STORE_SHARED(event_notifier_priv->pub->parent->enabled, enabled);
- /*
- * Sync tracepoint registration with event_notifier enabled
- * state.
- */
- if (enabled) {
- if (!event_notifier_priv->parent.registered)
- register_event(event_notifier_priv->parent.pub);
- } else {
- if (event_notifier_priv->parent.registered)
- unregister_event(event_notifier_priv->parent.pub);
- }
-
- /* Check if has enablers without bytecode enabled */
- cds_list_for_each_entry(enabler_ref,
- &event_notifier_priv->parent.enablers_ref_head, node) {
- if (enabler_ref->ref->enabled
- && cds_list_empty(&enabler_ref->ref->filter_bytecode_head)) {
- has_enablers_without_filter_bytecode = 1;
- break;
- }
- }
- event_notifier_priv->parent.has_enablers_without_filter_bytecode =
- has_enablers_without_filter_bytecode;
-
- /* Enable filters */
- cds_list_for_each_entry(runtime,
- &event_notifier_priv->parent.filter_bytecode_runtime_head, node) {
- lttng_bytecode_sync_state(runtime);
- nr_filters++;
- }
- CMM_STORE_SHARED(event_notifier_priv->parent.pub->eval_filter,
- !(has_enablers_without_filter_bytecode || !nr_filters));
-
- /* Enable captures. */
- cds_list_for_each_entry(runtime,
- &event_notifier_priv->capture_bytecode_runtime_head, node) {
- lttng_bytecode_sync_state(runtime);
- nr_captures++;
- }
- CMM_STORE_SHARED(event_notifier_priv->pub->eval_capture,
- !!nr_captures);
- }
- lttng_ust_tp_probe_prune_release_queue();
-}
-
-/*
- * Apply enablers to session events, adding events to session if need
- * be. It is required after each modification applied to an active
- * session, and right before session "start".
- * "lazy" sync means we only sync if required.
- */
-static
-void lttng_session_lazy_sync_event_enablers(struct lttng_ust_session *session)
-{
- /* We can skip if session is not active */
- if (!session->active)
- return;
- lttng_session_sync_event_enablers(session);
-}
-
-/*
- * Update all sessions with the given app context.
- * Called with ust lock held.
- * This is invoked when an application context gets loaded/unloaded. It
- * ensures the context callbacks are in sync with the application
- * context (either app context callbacks, or dummy callbacks).
- */
-void lttng_ust_context_set_session_provider(const char *name,
- size_t (*get_size)(void *priv, size_t offset),
- void (*record)(void *priv, struct lttng_ust_lib_ring_buffer_ctx *ctx,
- struct lttng_ust_channel_buffer *chan),
- void (*get_value)(void *priv, struct lttng_ust_ctx_value *value),
- void *priv)
-{
- struct lttng_ust_session_private *session_priv;
-
- cds_list_for_each_entry(session_priv, &sessions, node) {
- struct lttng_ust_channel_buffer_private *chan;
- struct lttng_ust_event_recorder_private *event_recorder_priv;
- int ret;
-
- ret = lttng_ust_context_set_provider_rcu(&session_priv->ctx,
- name, get_size, record, get_value, priv);
- if (ret)
- abort();
- cds_list_for_each_entry(chan, &session_priv->chan_head, node) {
- ret = lttng_ust_context_set_provider_rcu(&chan->ctx,
- name, get_size, record, get_value, priv);
- if (ret)
- abort();
- }
- cds_list_for_each_entry(event_recorder_priv, &session_priv->events_head, node) {
- ret = lttng_ust_context_set_provider_rcu(&event_recorder_priv->ctx,
- name, get_size, record, get_value, priv);
- if (ret)
- abort();
- }
- }
-}
-
-/*
- * Update all event_notifier groups with the given app context.
- * Called with ust lock held.
- * This is invoked when an application context gets loaded/unloaded. It
- * ensures the context callbacks are in sync with the application
- * context (either app context callbacks, or dummy callbacks).
- */
-void lttng_ust_context_set_event_notifier_group_provider(const char *name,
- size_t (*get_size)(void *priv, size_t offset),
- void (*record)(void *priv, struct lttng_ust_lib_ring_buffer_ctx *ctx,
- struct lttng_ust_channel_buffer *chan),
- void (*get_value)(void *priv, struct lttng_ust_ctx_value *value),
- void *priv)
-{
- struct lttng_event_notifier_group *event_notifier_group;
-
- cds_list_for_each_entry(event_notifier_group, &event_notifier_groups, node) {
- int ret;
-
- ret = lttng_ust_context_set_provider_rcu(
- &event_notifier_group->ctx,
- name, get_size, record, get_value, priv);
- if (ret)
- abort();
- }
-}
-
-int lttng_ust_session_uuid_validate(struct lttng_ust_session *session,
- unsigned char *uuid)
-{
- if (!session)
- return 0;
- /* Compare UUID with session. */
- if (session->priv->uuid_set) {
- if (memcmp(session->priv->uuid, uuid, LTTNG_UST_UUID_LEN)) {
- return -1;
- }
- } else {
- memcpy(session->priv->uuid, uuid, LTTNG_UST_UUID_LEN);
- session->priv->uuid_set = true;
- }
- return 0;
-
-}
+++ /dev/null
-/*
- * SPDX-License-Identifier: LGPL-2.1-only
- *
- * Copyright (C) 2014 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
- */
-
-#define _LGPL_SOURCE
-#include <error.h>
-#include <dlfcn.h>
-#include <stdlib.h>
-#include "common/logging.h"
-#include <lttng/ust-getcpu.h>
-#include <urcu/system.h>
-#include <urcu/arch.h>
-
-#include "getenv.h"
-#include "common/ringbuffer/getcpu.h"
-
-int (*lttng_get_cpu)(void);
-
-static
-void *getcpu_handle;
-
-int lttng_ust_getcpu_override(int (*getcpu)(void))
-{
- CMM_STORE_SHARED(lttng_get_cpu, getcpu);
- return 0;
-}
-
-void lttng_ust_getcpu_init(void)
-{
- const char *libname;
- void (*libinit)(void);
-
- if (getcpu_handle)
- return;
- libname = lttng_ust_getenv("LTTNG_UST_GETCPU_PLUGIN");
- if (!libname)
- return;
- getcpu_handle = dlopen(libname, RTLD_NOW);
- if (!getcpu_handle) {
- PERROR("Cannot load LTTng UST getcpu override library %s",
- libname);
- return;
- }
- dlerror();
- libinit = (void (*)(void)) dlsym(getcpu_handle,
- "lttng_ust_getcpu_plugin_init");
- if (!libinit) {
- PERROR("Cannot find LTTng UST getcpu override library %s initialization function lttng_ust_getcpu_plugin_init()",
- libname);
- return;
- }
- libinit();
-}
+++ /dev/null
-/*
- * SPDX-License-Identifier: LGPL-2.1-only
- *
- * Copyright (C) 2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
- *
- * LTTng hash table helpers.
- */
-
-#ifndef _LTTNG_HASH_HELPER_H
-#define _LTTNG_HASH_HELPER_H
-
-#include <assert.h>
-#include <stddef.h>
-#include <stdint.h>
-#include <urcu/compiler.h>
-
-/*
- * Hash function
- * Source: http://burtleburtle.net/bob/c/lookup3.c
- * Originally Public Domain
- */
-
-#define rot(x, k) (((x) << (k)) | ((x) >> (32 - (k))))
-
-#define mix(a, b, c) \
-do { \
- a -= c; a ^= rot(c, 4); c += b; \
- b -= a; b ^= rot(a, 6); a += c; \
- c -= b; c ^= rot(b, 8); b += a; \
- a -= c; a ^= rot(c, 16); c += b; \
- b -= a; b ^= rot(a, 19); a += c; \
- c -= b; c ^= rot(b, 4); b += a; \
-} while (0)
-
-#define final(a, b, c) \
-{ \
- c ^= b; c -= rot(b, 14); \
- a ^= c; a -= rot(c, 11); \
- b ^= a; b -= rot(a, 25); \
- c ^= b; c -= rot(b, 16); \
- a ^= c; a -= rot(c, 4);\
- b ^= a; b -= rot(a, 14); \
- c ^= b; c -= rot(b, 24); \
-}
-
-static inline
-uint32_t lttng_hash_u32(const uint32_t *k, size_t length, uint32_t initval)
- __attribute__((unused));
-static inline
-uint32_t lttng_hash_u32(
- const uint32_t *k, /* the key, an array of uint32_t values */
- size_t length, /* the length of the key, in uint32_ts */
- uint32_t initval) /* the previous hash, or an arbitrary value */
-{
- uint32_t a, b, c;
-
- /* Set up the internal state */
- a = b = c = 0xdeadbeef + (((uint32_t) length) << 2) + initval;
-
- /*----------------------------------------- handle most of the key */
- while (length > 3) {
- a += k[0];
- b += k[1];
- c += k[2];
- mix(a, b, c);
- length -= 3;
- k += 3;
- }
-
- /*----------------------------------- handle the last 3 uint32_t's */
- switch (length) { /* all the case statements fall through */
- case 3: c += k[2];
- case 2: b += k[1];
- case 1: a += k[0];
- final(a, b, c);
- case 0: /* case 0: nothing left to add */
- break;
- }
- /*---------------------------------------------- report the result */
- return c;
-}
-
-static inline
-void lttng_hashword2(
- const uint32_t *k, /* the key, an array of uint32_t values */
- size_t length, /* the length of the key, in uint32_ts */
- uint32_t *pc, /* IN: seed OUT: primary hash value */
- uint32_t *pb) /* IN: more seed OUT: secondary hash value */
-{
- uint32_t a, b, c;
-
- /* Set up the internal state */
- a = b = c = 0xdeadbeef + ((uint32_t) (length << 2)) + *pc;
- c += *pb;
-
- /*----------------------------------------- handle most of the key */
- while (length > 3) {
- a += k[0];
- b += k[1];
- c += k[2];
- mix(a, b, c);
- length -= 3;
- k += 3;
- }
-
- /*----------------------------------- handle the last 3 uint32_t's */
- switch (length) { /* all the case statements fall through */
- case 3: c += k[2]; /* fall through */
- case 2: b += k[1]; /* fall through */
- case 1: a += k[0];
- final(a, b, c); /* fall through */
- case 0: /* case 0: nothing left to add */
- break;
- }
- /*---------------------------------------------- report the result */
- *pc = c;
- *pb = b;
-}
-
-#if (CAA_BITS_PER_LONG == 32)
-static inline
-unsigned long lttng_hash_mix(const void *_key, size_t length, unsigned long seed)
-{
- unsigned int key = (unsigned int) _key;
-
- assert(length == sizeof(unsigned int));
- return lttng_hash_u32(&key, 1, seed);
-}
-#else
-static inline
-unsigned long lttng_hash_mix(const void *_key, size_t length, unsigned long seed)
-{
- union {
- uint64_t v64;
- uint32_t v32[2];
- } v;
- union {
- uint64_t v64;
- uint32_t v32[2];
- } key;
-
- assert(length == sizeof(unsigned long));
- v.v64 = (uint64_t) seed;
- key.v64 = (uint64_t) _key;
- lttng_hashword2(key.v32, 2, &v.v32[0], &v.v32[1]);
- return v.v64;
-}
-#endif
-
-#endif /* _LTTNG_HASH_HELPER_H */
+++ /dev/null
-/*
- * SPDX-License-Identifier: LGPL-2.1-only
- *
- * Copyright 2010-2012 (C) Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
- *
- * Holds LTTng probes registry.
- */
-
-#define _LGPL_SOURCE
-#include <string.h>
-#include <errno.h>
-#include <urcu/list.h>
-#include <urcu/hlist.h>
-#include <lttng/ust-events.h>
-#include <lttng/tracepoint.h>
-#include "tracepoint-internal.h"
-#include <assert.h>
-#include "common/macros.h"
-#include <ctype.h>
-
-#include "lttng-tracer-core.h"
-#include "jhash.h"
-#include "error.h"
-#include "ust-events-internal.h"
-
-/*
- * probe list is protected by ust_lock()/ust_unlock().
- */
-static CDS_LIST_HEAD(_probe_list);
-
-/*
- * List of probes registered by not yet processed.
- */
-static CDS_LIST_HEAD(lazy_probe_init);
-
-/*
- * lazy_nesting counter ensures we don't trigger lazy probe registration
- * fixup while we are performing the fixup. It is protected by the ust
- * mutex.
- */
-static int lazy_nesting;
-
-/*
- * Validate that each event within the probe provider refers to the
- * right probe, and that the resulting name is not too long.
- */
-static
-bool check_event_provider(const struct lttng_ust_probe_desc *probe_desc)
-{
- int i;
-
- for (i = 0; i < probe_desc->nr_events; i++) {
- const struct lttng_ust_event_desc *event_desc = probe_desc->event_desc[i];
-
- if (event_desc->probe_desc != probe_desc) {
- ERR("Error registering probe provider '%s'. Event '%s:%s' refers to the wrong provider descriptor.",
- probe_desc->provider_name, probe_desc->provider_name, event_desc->event_name);
- return false; /* provider mismatch */
- }
- if (!lttng_ust_validate_event_name(event_desc)) {
- ERR("Error registering probe provider '%s'. Event '%s:%s' name is too long.",
- probe_desc->provider_name, probe_desc->provider_name, event_desc->event_name);
- return false; /* provider mismatch */
- }
- }
- return true;
-}
-
-/*
- * Called under ust lock.
- */
-static
-void lttng_lazy_probe_register(struct lttng_ust_registered_probe *reg_probe)
-{
- struct lttng_ust_registered_probe *iter;
- struct cds_list_head *probe_list;
-
- /*
- * The provider ensures there are no duplicate event names.
- * Duplicated TRACEPOINT_EVENT event names would generate a
- * compile-time error due to duplicated symbol names.
- */
-
- /*
- * We sort the providers by struct lttng_ust_probe_desc pointer
- * address.
- */
- probe_list = &_probe_list;
- cds_list_for_each_entry_reverse(iter, probe_list, head) {
- BUG_ON(iter == reg_probe); /* Should never be in the list twice */
- if (iter < reg_probe) {
- /* We belong to the location right after iter. */
- cds_list_add(®_probe->head, &iter->head);
- goto probe_added;
- }
- }
- /* We should be added at the head of the list */
- cds_list_add(®_probe->head, probe_list);
-probe_added:
- DBG("just registered probe %s containing %u events",
- reg_probe->desc->provider_name, reg_probe->desc->nr_events);
-}
-
-/*
- * Called under ust lock.
- */
-static
-void fixup_lazy_probes(void)
-{
- struct lttng_ust_registered_probe *iter, *tmp;
- int ret;
-
- lazy_nesting++;
- cds_list_for_each_entry_safe(iter, tmp,
- &lazy_probe_init, lazy_init_head) {
- lttng_lazy_probe_register(iter);
- iter->lazy = 0;
- cds_list_del(&iter->lazy_init_head);
- }
- ret = lttng_fix_pending_events();
- assert(!ret);
- lazy_nesting--;
-}
-
-/*
- * Called under ust lock.
- */
-struct cds_list_head *lttng_get_probe_list_head(void)
-{
- if (!lazy_nesting && !cds_list_empty(&lazy_probe_init))
- fixup_lazy_probes();
- return &_probe_list;
-}
-
-static
-int check_provider_version(const struct lttng_ust_probe_desc *desc)
-{
- /*
- * Check tracepoint provider version compatibility.
- */
- if (desc->major <= LTTNG_UST_PROVIDER_MAJOR) {
- DBG("Provider \"%s\" accepted, version %u.%u is compatible "
- "with LTTng UST provider version %u.%u.",
- desc->provider_name, desc->major, desc->minor,
- LTTNG_UST_PROVIDER_MAJOR,
- LTTNG_UST_PROVIDER_MINOR);
- if (desc->major < LTTNG_UST_PROVIDER_MAJOR) {
- DBG("However, some LTTng UST features might not be "
- "available for this provider unless it is "
- "recompiled against a more recent LTTng UST.");
- }
- return 1; /* accept */
- } else {
- ERR("Provider \"%s\" rejected, version %u.%u is incompatible "
- "with LTTng UST provider version %u.%u. Please upgrade "
- "LTTng UST.",
- desc->provider_name, desc->major, desc->minor,
- LTTNG_UST_PROVIDER_MAJOR,
- LTTNG_UST_PROVIDER_MINOR);
- return 0; /* reject */
- }
-}
-
-struct lttng_ust_registered_probe *lttng_ust_probe_register(const struct lttng_ust_probe_desc *desc)
-{
- struct lttng_ust_registered_probe *reg_probe = NULL;
-
- lttng_ust_fixup_tls();
-
- /*
- * If version mismatch, don't register, but don't trigger assert
- * on caller. The version check just prints an error.
- */
- if (!check_provider_version(desc))
- return NULL;
- if (!check_event_provider(desc))
- return NULL;
-
- ust_lock_nocheck();
-
- reg_probe = zmalloc(sizeof(struct lttng_ust_registered_probe));
- if (!reg_probe)
- goto end;
- reg_probe->desc = desc;
- cds_list_add(®_probe->lazy_init_head, &lazy_probe_init);
- reg_probe->lazy = 1;
-
- DBG("adding probe %s containing %u events to lazy registration list",
- desc->provider_name, desc->nr_events);
- /*
- * If there is at least one active session, we need to register
- * the probe immediately, since we cannot delay event
- * registration because they are needed ASAP.
- */
- if (lttng_session_active())
- fixup_lazy_probes();
-
- lttng_fix_pending_event_notifiers();
-end:
- ust_unlock();
- return reg_probe;
-}
-
-void lttng_ust_probe_unregister(struct lttng_ust_registered_probe *reg_probe)
-{
- lttng_ust_fixup_tls();
-
- if (!reg_probe)
- return;
- if (!check_provider_version(reg_probe->desc))
- return;
-
- ust_lock_nocheck();
- if (!reg_probe->lazy)
- cds_list_del(®_probe->head);
- else
- cds_list_del(®_probe->lazy_init_head);
-
- lttng_probe_provider_unregister_events(reg_probe->desc);
- DBG("just unregistered probes of provider %s", reg_probe->desc->provider_name);
- ust_unlock();
- free(reg_probe);
-}
-
-void lttng_probes_prune_event_list(struct lttng_ust_tracepoint_list *list)
-{
- struct tp_list_entry *list_entry, *tmp;
-
- cds_list_for_each_entry_safe(list_entry, tmp, &list->head, head) {
- cds_list_del(&list_entry->head);
- free(list_entry);
- }
-}
-
-/*
- * called with UST lock held.
- */
-int lttng_probes_get_event_list(struct lttng_ust_tracepoint_list *list)
-{
- struct lttng_ust_registered_probe *reg_probe;
- struct cds_list_head *probe_list;
- int i;
-
- probe_list = lttng_get_probe_list_head();
- CDS_INIT_LIST_HEAD(&list->head);
- cds_list_for_each_entry(reg_probe, probe_list, head) {
- const struct lttng_ust_probe_desc *probe_desc = reg_probe->desc;
-
- for (i = 0; i < probe_desc->nr_events; i++) {
- const struct lttng_ust_event_desc *event_desc =
- probe_desc->event_desc[i];
- struct tp_list_entry *list_entry;
-
- /* Skip event if name is too long. */
- if (!lttng_ust_validate_event_name(event_desc))
- continue;
- list_entry = zmalloc(sizeof(*list_entry));
- if (!list_entry)
- goto err_nomem;
- cds_list_add(&list_entry->head, &list->head);
- lttng_ust_format_event_name(event_desc, list_entry->tp.name);
- if (!event_desc->loglevel) {
- list_entry->tp.loglevel = TRACE_DEFAULT;
- } else {
- list_entry->tp.loglevel = *(*event_desc->loglevel);
- }
- }
- }
- if (cds_list_empty(&list->head))
- list->iter = NULL;
- else
- list->iter =
- cds_list_first_entry(&list->head, struct tp_list_entry, head);
- return 0;
-
-err_nomem:
- lttng_probes_prune_event_list(list);
- return -ENOMEM;
-}
-
-/*
- * Return current iteration position, advance internal iterator to next.
- * Return NULL if end of list.
- */
-struct lttng_ust_abi_tracepoint_iter *
- lttng_ust_tracepoint_list_get_iter_next(struct lttng_ust_tracepoint_list *list)
-{
- struct tp_list_entry *entry;
-
- if (!list->iter)
- return NULL;
- entry = list->iter;
- if (entry->head.next == &list->head)
- list->iter = NULL;
- else
- list->iter = cds_list_entry(entry->head.next,
- struct tp_list_entry, head);
- return &entry->tp;
-}
-
-void lttng_probes_prune_field_list(struct lttng_ust_field_list *list)
-{
- struct tp_field_list_entry *list_entry, *tmp;
-
- cds_list_for_each_entry_safe(list_entry, tmp, &list->head, head) {
- cds_list_del(&list_entry->head);
- free(list_entry);
- }
-}
-
-/*
- * called with UST lock held.
- */
-int lttng_probes_get_field_list(struct lttng_ust_field_list *list)
-{
- struct lttng_ust_registered_probe *reg_probe;
- struct cds_list_head *probe_list;
- int i;
-
- probe_list = lttng_get_probe_list_head();
- CDS_INIT_LIST_HEAD(&list->head);
- cds_list_for_each_entry(reg_probe, probe_list, head) {
- const struct lttng_ust_probe_desc *probe_desc = reg_probe->desc;
-
- for (i = 0; i < probe_desc->nr_events; i++) {
- const struct lttng_ust_event_desc *event_desc =
- probe_desc->event_desc[i];
- int j;
-
- if (event_desc->nr_fields == 0) {
- /* Events without fields. */
- struct tp_field_list_entry *list_entry;
-
- /* Skip event if name is too long. */
- if (!lttng_ust_validate_event_name(event_desc))
- continue;
- list_entry = zmalloc(sizeof(*list_entry));
- if (!list_entry)
- goto err_nomem;
- cds_list_add(&list_entry->head, &list->head);
- lttng_ust_format_event_name(event_desc, list_entry->field.event_name);
- list_entry->field.field_name[0] = '\0';
- list_entry->field.type = LTTNG_UST_ABI_FIELD_OTHER;
- if (!event_desc->loglevel) {
- list_entry->field.loglevel = TRACE_DEFAULT;
- } else {
- list_entry->field.loglevel = *(*event_desc->loglevel);
- }
- list_entry->field.nowrite = 1;
- }
-
- for (j = 0; j < event_desc->nr_fields; j++) {
- const struct lttng_ust_event_field *event_field =
- event_desc->fields[j];
- struct tp_field_list_entry *list_entry;
-
- /* Skip event if name is too long. */
- if (!lttng_ust_validate_event_name(event_desc))
- continue;
- list_entry = zmalloc(sizeof(*list_entry));
- if (!list_entry)
- goto err_nomem;
- cds_list_add(&list_entry->head, &list->head);
- lttng_ust_format_event_name(event_desc, list_entry->field.event_name);
- strncpy(list_entry->field.field_name,
- event_field->name,
- LTTNG_UST_ABI_SYM_NAME_LEN);
- list_entry->field.field_name[LTTNG_UST_ABI_SYM_NAME_LEN - 1] = '\0';
- switch (event_field->type->type) {
- case lttng_ust_type_integer:
- list_entry->field.type = LTTNG_UST_ABI_FIELD_INTEGER;
- break;
- case lttng_ust_type_string:
- list_entry->field.type = LTTNG_UST_ABI_FIELD_STRING;
- break;
- case lttng_ust_type_array:
- if (lttng_ust_get_type_array(event_field->type)->encoding == lttng_ust_string_encoding_none)
- list_entry->field.type = LTTNG_UST_ABI_FIELD_OTHER;
- else
- list_entry->field.type = LTTNG_UST_ABI_FIELD_STRING;
- break;
- case lttng_ust_type_sequence:
- if (lttng_ust_get_type_sequence(event_field->type)->encoding == lttng_ust_string_encoding_none)
- list_entry->field.type = LTTNG_UST_ABI_FIELD_OTHER;
- else
- list_entry->field.type = LTTNG_UST_ABI_FIELD_STRING;
- break;
- case lttng_ust_type_float:
- list_entry->field.type = LTTNG_UST_ABI_FIELD_FLOAT;
- break;
- case lttng_ust_type_enum:
- list_entry->field.type = LTTNG_UST_ABI_FIELD_ENUM;
- break;
- default:
- list_entry->field.type = LTTNG_UST_ABI_FIELD_OTHER;
- }
- if (!event_desc->loglevel) {
- list_entry->field.loglevel = TRACE_DEFAULT;
- } else {
- list_entry->field.loglevel = *(*event_desc->loglevel);
- }
- list_entry->field.nowrite = event_field->nowrite;
- }
- }
- }
- if (cds_list_empty(&list->head))
- list->iter = NULL;
- else
- list->iter =
- cds_list_first_entry(&list->head,
- struct tp_field_list_entry, head);
- return 0;
-
-err_nomem:
- lttng_probes_prune_field_list(list);
- return -ENOMEM;
-}
-
-/*
- * Return current iteration position, advance internal iterator to next.
- * Return NULL if end of list.
- */
-struct lttng_ust_abi_field_iter *
- lttng_ust_field_list_get_iter_next(struct lttng_ust_field_list *list)
-{
- struct tp_field_list_entry *entry;
-
- if (!list->iter)
- return NULL;
- entry = list->iter;
- if (entry->head.next == &list->head)
- list->iter = NULL;
- else
- list->iter = cds_list_entry(entry->head.next,
- struct tp_field_list_entry, head);
- return &entry->field;
-}
+++ /dev/null
-/*
- * SPDX-License-Identifier: LGPL-2.1-only
- *
- * Copyright (C) 2013 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
- */
-
-#ifndef _LTTNG_RB_CLIENT_H
-#define _LTTNG_RB_CLIENT_H
-
-#include <stdint.h>
-#include "common/ringbuffer/ringbuffer-config.h"
-
-struct lttng_ust_client_lib_ring_buffer_client_cb {
- struct lttng_ust_lib_ring_buffer_client_cb parent;
-
- int (*timestamp_begin) (struct lttng_ust_lib_ring_buffer *buf,
- struct lttng_ust_lib_ring_buffer_channel *chan,
- uint64_t *timestamp_begin);
- int (*timestamp_end) (struct lttng_ust_lib_ring_buffer *buf,
- struct lttng_ust_lib_ring_buffer_channel *chan,
- uint64_t *timestamp_end);
- int (*events_discarded) (struct lttng_ust_lib_ring_buffer *buf,
- struct lttng_ust_lib_ring_buffer_channel *chan,
- uint64_t *events_discarded);
- int (*content_size) (struct lttng_ust_lib_ring_buffer *buf,
- struct lttng_ust_lib_ring_buffer_channel *chan,
- uint64_t *content_size);
- int (*packet_size) (struct lttng_ust_lib_ring_buffer *buf,
- struct lttng_ust_lib_ring_buffer_channel *chan,
- uint64_t *packet_size);
- int (*stream_id) (struct lttng_ust_lib_ring_buffer *buf,
- struct lttng_ust_lib_ring_buffer_channel *chan,
- uint64_t *stream_id);
- int (*current_timestamp) (struct lttng_ust_lib_ring_buffer *buf,
- struct lttng_ust_lib_ring_buffer_channel *chan,
- uint64_t *ts);
- int (*sequence_number) (struct lttng_ust_lib_ring_buffer *buf,
- struct lttng_ust_lib_ring_buffer_channel *chan, uint64_t *seq);
- int (*instance_id) (struct lttng_ust_lib_ring_buffer *buf,
- struct lttng_ust_lib_ring_buffer_channel *chan, uint64_t *id);
-};
-
-/*
- * The ring buffer clients init/exit symbols are private ABI for
- * liblttng-ust-ctl, which is why they are not hidden.
- */
-void lttng_ust_ring_buffer_clients_init(void);
-void lttng_ust_ring_buffer_clients_exit(void);
-
-void lttng_ring_buffer_client_overwrite_init(void)
- __attribute__((visibility("hidden")));
-
-void lttng_ring_buffer_client_overwrite_rt_init(void)
- __attribute__((visibility("hidden")));
-
-void lttng_ring_buffer_client_discard_init(void)
- __attribute__((visibility("hidden")));
-
-void lttng_ring_buffer_client_discard_rt_init(void)
- __attribute__((visibility("hidden")));
-
-void lttng_ring_buffer_metadata_client_init(void)
- __attribute__((visibility("hidden")));
-
-
-void lttng_ring_buffer_client_overwrite_exit(void)
- __attribute__((visibility("hidden")));
-
-void lttng_ring_buffer_client_overwrite_rt_exit(void)
- __attribute__((visibility("hidden")));
-
-void lttng_ring_buffer_client_discard_exit(void)
- __attribute__((visibility("hidden")));
-
-void lttng_ring_buffer_client_discard_rt_exit(void)
- __attribute__((visibility("hidden")));
-
-void lttng_ring_buffer_metadata_client_exit(void)
- __attribute__((visibility("hidden")));
-
-
-void lttng_ust_fixup_ring_buffer_client_overwrite_tls(void)
- __attribute__((visibility("hidden")));
-
-void lttng_ust_fixup_ring_buffer_client_overwrite_rt_tls(void)
- __attribute__((visibility("hidden")));
-
-void lttng_ust_fixup_ring_buffer_client_discard_tls(void)
- __attribute__((visibility("hidden")));
-
-void lttng_ust_fixup_ring_buffer_client_discard_rt_tls(void)
- __attribute__((visibility("hidden")));
-
-#endif /* _LTTNG_RB_CLIENT_H */
+++ /dev/null
-/*
- * SPDX-License-Identifier: LGPL-2.1-only
- *
- * Copyright (C) 2010-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
- *
- * LTTng lib ring buffer client (discard mode) for RT.
- */
-
-#define _LGPL_SOURCE
-#include "lttng-tracer.h"
-#include "lttng-rb-clients.h"
-
-#define RING_BUFFER_MODE_TEMPLATE RING_BUFFER_DISCARD
-#define RING_BUFFER_MODE_TEMPLATE_STRING "discard-rt"
-#define RING_BUFFER_MODE_TEMPLATE_TLS_FIXUP \
- lttng_ust_fixup_ring_buffer_client_discard_rt_tls
-#define RING_BUFFER_MODE_TEMPLATE_INIT \
- lttng_ring_buffer_client_discard_rt_init
-#define RING_BUFFER_MODE_TEMPLATE_EXIT \
- lttng_ring_buffer_client_discard_rt_exit
-#define LTTNG_CLIENT_TYPE LTTNG_CLIENT_DISCARD_RT
-#define LTTNG_CLIENT_WAKEUP RING_BUFFER_WAKEUP_BY_TIMER
-#include "lttng-ring-buffer-client-template.h"
+++ /dev/null
-/*
- * SPDX-License-Identifier: LGPL-2.1-only
- *
- * Copyright (C) 2010-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
- *
- * LTTng lib ring buffer client (discard mode).
- */
-
-#define _LGPL_SOURCE
-#include "lttng-tracer.h"
-#include "lttng-rb-clients.h"
-
-#define RING_BUFFER_MODE_TEMPLATE RING_BUFFER_DISCARD
-#define RING_BUFFER_MODE_TEMPLATE_STRING "discard"
-#define RING_BUFFER_MODE_TEMPLATE_TLS_FIXUP \
- lttng_ust_fixup_ring_buffer_client_discard_tls
-#define RING_BUFFER_MODE_TEMPLATE_INIT \
- lttng_ring_buffer_client_discard_init
-#define RING_BUFFER_MODE_TEMPLATE_EXIT \
- lttng_ring_buffer_client_discard_exit
-#define LTTNG_CLIENT_TYPE LTTNG_CLIENT_DISCARD
-#define LTTNG_CLIENT_WAKEUP RING_BUFFER_WAKEUP_BY_WRITER
-#include "lttng-ring-buffer-client-template.h"
+++ /dev/null
-/*
- * SPDX-License-Identifier: LGPL-2.1-only
- *
- * Copyright (C) 2010-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
- *
- * LTTng lib ring buffer client (overwrite mode).
- */
-
-#define _LGPL_SOURCE
-#include "lttng-tracer.h"
-#include "lttng-rb-clients.h"
-
-#define RING_BUFFER_MODE_TEMPLATE RING_BUFFER_OVERWRITE
-#define RING_BUFFER_MODE_TEMPLATE_STRING "overwrite-rt"
-#define RING_BUFFER_MODE_TEMPLATE_TLS_FIXUP \
- lttng_ust_fixup_ring_buffer_client_overwrite_rt_tls
-#define RING_BUFFER_MODE_TEMPLATE_INIT \
- lttng_ring_buffer_client_overwrite_rt_init
-#define RING_BUFFER_MODE_TEMPLATE_EXIT \
- lttng_ring_buffer_client_overwrite_rt_exit
-#define LTTNG_CLIENT_TYPE LTTNG_CLIENT_OVERWRITE_RT
-#define LTTNG_CLIENT_WAKEUP RING_BUFFER_WAKEUP_BY_TIMER
-#include "lttng-ring-buffer-client-template.h"
+++ /dev/null
-/*
- * SPDX-License-Identifier: LGPL-2.1-only
- *
- * Copyright (C) 2010-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
- *
- * LTTng lib ring buffer client (overwrite mode).
- */
-
-#define _LGPL_SOURCE
-#include "lttng-tracer.h"
-#include "lttng-rb-clients.h"
-
-#define RING_BUFFER_MODE_TEMPLATE RING_BUFFER_OVERWRITE
-#define RING_BUFFER_MODE_TEMPLATE_STRING "overwrite"
-#define RING_BUFFER_MODE_TEMPLATE_TLS_FIXUP \
- lttng_ust_fixup_ring_buffer_client_overwrite_tls
-#define RING_BUFFER_MODE_TEMPLATE_INIT \
- lttng_ring_buffer_client_overwrite_init
-#define RING_BUFFER_MODE_TEMPLATE_EXIT \
- lttng_ring_buffer_client_overwrite_exit
-#define LTTNG_CLIENT_TYPE LTTNG_CLIENT_OVERWRITE
-#define LTTNG_CLIENT_WAKEUP RING_BUFFER_WAKEUP_BY_WRITER
-#include "lttng-ring-buffer-client-template.h"
+++ /dev/null
-/*
- * SPDX-License-Identifier: LGPL-2.1-only
- *
- * Copyright (C) 2010-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
- *
- * LTTng lib ring buffer client template.
- */
-
-#include <limits.h>
-#include <stddef.h>
-#include <stdint.h>
-
-#include <ust-events-internal.h>
-#include <lttng/urcu/pointer.h>
-#include "common/bitfield.h"
-#include "common/align.h"
-#include "clock.h"
-#include "context-internal.h"
-#include "lttng-tracer.h"
-#include "common/ringbuffer/frontend_types.h"
-#include <urcu/tls-compat.h>
-
-#define LTTNG_COMPACT_EVENT_BITS 5
-#define LTTNG_COMPACT_TSC_BITS 27
-
-/*
- * Keep the natural field alignment for _each field_ within this structure if
- * you ever add/remove a field from this header. Packed attribute is not used
- * because gcc generates poor code on at least powerpc and mips. Don't ever
- * let gcc add padding between the structure elements.
- */
-
-struct packet_header {
- /* Trace packet header */
- uint32_t magic; /*
- * Trace magic number.
- * contains endianness information.
- */
- uint8_t uuid[LTTNG_UST_UUID_LEN];
- uint32_t stream_id;
- uint64_t stream_instance_id;
-
- struct {
- /* Stream packet context */
- uint64_t timestamp_begin; /* Cycle count at subbuffer start */
- uint64_t timestamp_end; /* Cycle count at subbuffer end */
- uint64_t content_size; /* Size of data in subbuffer */
- uint64_t packet_size; /* Subbuffer size (include padding) */
- uint64_t packet_seq_num; /* Packet sequence number */
- unsigned long events_discarded; /*
- * Events lost in this subbuffer since
- * the beginning of the trace.
- * (may overflow)
- */
- uint32_t cpu_id; /* CPU id associated with stream */
- uint8_t header_end; /* End of header */
- } ctx;
-};
-
-struct lttng_client_ctx {
- size_t packet_context_len;
- size_t event_context_len;
- struct lttng_ust_ctx *chan_ctx;
- struct lttng_ust_ctx *event_ctx;
-};
-
-/*
- * Indexed by lib_ring_buffer_nesting_count().
- */
-typedef struct lttng_ust_lib_ring_buffer_ctx_private private_ctx_stack_t[LIB_RING_BUFFER_MAX_NESTING];
-static DEFINE_URCU_TLS(private_ctx_stack_t, private_ctx_stack);
-
-/*
- * Force a read (imply TLS fixup for dlopen) of TLS variables.
- */
-void RING_BUFFER_MODE_TEMPLATE_TLS_FIXUP(void)
-{
- asm volatile ("" : : "m" (URCU_TLS(private_ctx_stack)));
-}
-
-static inline uint64_t lib_ring_buffer_clock_read(
- struct lttng_ust_lib_ring_buffer_channel *chan __attribute__((unused)))
-{
- return trace_clock_read64();
-}
-
-static inline
-size_t ctx_get_aligned_size(size_t offset, struct lttng_ust_ctx *ctx,
- size_t ctx_len)
-{
- size_t orig_offset = offset;
-
- if (caa_likely(!ctx))
- return 0;
- offset += lttng_ust_lib_ring_buffer_align(offset, ctx->largest_align);
- offset += ctx_len;
- return offset - orig_offset;
-}
-
-static inline
-void ctx_get_struct_size(struct lttng_ust_ctx *ctx, size_t *ctx_len)
-{
- int i;
- size_t offset = 0;
-
- if (caa_likely(!ctx)) {
- *ctx_len = 0;
- return;
- }
- for (i = 0; i < ctx->nr_fields; i++)
- offset += ctx->fields[i].get_size(ctx->fields[i].priv, offset);
- *ctx_len = offset;
-}
-
-static inline
-void ctx_record(struct lttng_ust_lib_ring_buffer_ctx *bufctx,
- struct lttng_ust_channel_buffer *chan,
- struct lttng_ust_ctx *ctx)
-{
- int i;
-
- if (caa_likely(!ctx))
- return;
- lttng_ust_lib_ring_buffer_align_ctx(bufctx, ctx->largest_align);
- for (i = 0; i < ctx->nr_fields; i++)
- ctx->fields[i].record(ctx->fields[i].priv, bufctx, chan);
-}
-
-/*
- * record_header_size - Calculate the header size and padding necessary.
- * @config: ring buffer instance configuration
- * @chan: channel
- * @offset: offset in the write buffer
- * @pre_header_padding: padding to add before the header (output)
- * @ctx: reservation context
- *
- * Returns the event header size (including padding).
- *
- * The payload must itself determine its own alignment from the biggest type it
- * contains.
- */
-static __inline__
-size_t record_header_size(
- const struct lttng_ust_lib_ring_buffer_config *config __attribute__((unused)),
- struct lttng_ust_lib_ring_buffer_channel *chan,
- size_t offset,
- size_t *pre_header_padding,
- struct lttng_ust_lib_ring_buffer_ctx *ctx,
- struct lttng_client_ctx *client_ctx)
-{
- struct lttng_ust_channel_buffer *lttng_chan = channel_get_private(chan);
- size_t orig_offset = offset;
- size_t padding;
-
- switch (lttng_chan->priv->header_type) {
- case 1: /* compact */
- padding = lttng_ust_lib_ring_buffer_align(offset, lttng_ust_rb_alignof(uint32_t));
- offset += padding;
- if (!(ctx->priv->rflags & (RING_BUFFER_RFLAG_FULL_TSC | LTTNG_RFLAG_EXTENDED))) {
- offset += sizeof(uint32_t); /* id and timestamp */
- } else {
- /* Minimum space taken by LTTNG_COMPACT_EVENT_BITS id */
- offset += (LTTNG_COMPACT_EVENT_BITS + CHAR_BIT - 1) / CHAR_BIT;
- /* Align extended struct on largest member */
- offset += lttng_ust_lib_ring_buffer_align(offset, lttng_ust_rb_alignof(uint64_t));
- offset += sizeof(uint32_t); /* id */
- offset += lttng_ust_lib_ring_buffer_align(offset, lttng_ust_rb_alignof(uint64_t));
- offset += sizeof(uint64_t); /* timestamp */
- }
- break;
- case 2: /* large */
- padding = lttng_ust_lib_ring_buffer_align(offset, lttng_ust_rb_alignof(uint16_t));
- offset += padding;
- offset += sizeof(uint16_t);
- if (!(ctx->priv->rflags & (RING_BUFFER_RFLAG_FULL_TSC | LTTNG_RFLAG_EXTENDED))) {
- offset += lttng_ust_lib_ring_buffer_align(offset, lttng_ust_rb_alignof(uint32_t));
- offset += sizeof(uint32_t); /* timestamp */
- } else {
- /* Align extended struct on largest member */
- offset += lttng_ust_lib_ring_buffer_align(offset, lttng_ust_rb_alignof(uint64_t));
- offset += sizeof(uint32_t); /* id */
- offset += lttng_ust_lib_ring_buffer_align(offset, lttng_ust_rb_alignof(uint64_t));
- offset += sizeof(uint64_t); /* timestamp */
- }
- break;
- default:
- padding = 0;
- WARN_ON_ONCE(1);
- }
- offset += ctx_get_aligned_size(offset, client_ctx->chan_ctx,
- client_ctx->packet_context_len);
- offset += ctx_get_aligned_size(offset, client_ctx->event_ctx,
- client_ctx->event_context_len);
- *pre_header_padding = padding;
- return offset - orig_offset;
-}
-
-#include "common/ringbuffer/api.h"
-#include "lttng-rb-clients.h"
-
-static
-void lttng_write_event_header_slow(const struct lttng_ust_lib_ring_buffer_config *config,
- struct lttng_ust_lib_ring_buffer_ctx *ctx,
- struct lttng_client_ctx *client_ctx,
- uint32_t event_id);
-
-/*
- * lttng_write_event_header
- *
- * Writes the event header to the offset (already aligned on 32-bits).
- *
- * @config: ring buffer instance configuration
- * @ctx: reservation context
- * @event_id: event ID
- */
-static __inline__
-void lttng_write_event_header(const struct lttng_ust_lib_ring_buffer_config *config,
- struct lttng_ust_lib_ring_buffer_ctx *ctx,
- struct lttng_client_ctx *client_ctx,
- uint32_t event_id)
-{
- struct lttng_ust_channel_buffer *lttng_chan = channel_get_private(ctx->priv->chan);
-
- if (caa_unlikely(ctx->priv->rflags))
- goto slow_path;
-
- switch (lttng_chan->priv->header_type) {
- case 1: /* compact */
- {
- uint32_t id_time = 0;
-
- bt_bitfield_write(&id_time, uint32_t,
- 0,
- LTTNG_COMPACT_EVENT_BITS,
- event_id);
- bt_bitfield_write(&id_time, uint32_t,
- LTTNG_COMPACT_EVENT_BITS,
- LTTNG_COMPACT_TSC_BITS,
- ctx->priv->tsc);
- lib_ring_buffer_write(config, ctx, &id_time, sizeof(id_time));
- break;
- }
- case 2: /* large */
- {
- uint32_t timestamp = (uint32_t) ctx->priv->tsc;
- uint16_t id = event_id;
-
- lib_ring_buffer_write(config, ctx, &id, sizeof(id));
- lttng_ust_lib_ring_buffer_align_ctx(ctx, lttng_ust_rb_alignof(uint32_t));
- lib_ring_buffer_write(config, ctx, ×tamp, sizeof(timestamp));
- break;
- }
- default:
- WARN_ON_ONCE(1);
- }
-
- ctx_record(ctx, lttng_chan, client_ctx->chan_ctx);
- ctx_record(ctx, lttng_chan, client_ctx->event_ctx);
- lttng_ust_lib_ring_buffer_align_ctx(ctx, ctx->largest_align);
-
- return;
-
-slow_path:
- lttng_write_event_header_slow(config, ctx, client_ctx, event_id);
-}
-
-static
-void lttng_write_event_header_slow(const struct lttng_ust_lib_ring_buffer_config *config,
- struct lttng_ust_lib_ring_buffer_ctx *ctx,
- struct lttng_client_ctx *client_ctx,
- uint32_t event_id)
-{
- struct lttng_ust_lib_ring_buffer_ctx_private *ctx_private = ctx->priv;
- struct lttng_ust_channel_buffer *lttng_chan = channel_get_private(ctx->priv->chan);
-
- switch (lttng_chan->priv->header_type) {
- case 1: /* compact */
- if (!(ctx_private->rflags & (RING_BUFFER_RFLAG_FULL_TSC | LTTNG_RFLAG_EXTENDED))) {
- uint32_t id_time = 0;
-
- bt_bitfield_write(&id_time, uint32_t,
- 0,
- LTTNG_COMPACT_EVENT_BITS,
- event_id);
- bt_bitfield_write(&id_time, uint32_t,
- LTTNG_COMPACT_EVENT_BITS,
- LTTNG_COMPACT_TSC_BITS,
- ctx_private->tsc);
- lib_ring_buffer_write(config, ctx, &id_time, sizeof(id_time));
- } else {
- uint8_t id = 0;
- uint64_t timestamp = ctx_private->tsc;
-
- bt_bitfield_write(&id, uint8_t,
- 0,
- LTTNG_COMPACT_EVENT_BITS,
- 31);
- lib_ring_buffer_write(config, ctx, &id, sizeof(id));
- /* Align extended struct on largest member */
- lttng_ust_lib_ring_buffer_align_ctx(ctx, lttng_ust_rb_alignof(uint64_t));
- lib_ring_buffer_write(config, ctx, &event_id, sizeof(event_id));
- lttng_ust_lib_ring_buffer_align_ctx(ctx, lttng_ust_rb_alignof(uint64_t));
- lib_ring_buffer_write(config, ctx, ×tamp, sizeof(timestamp));
- }
- break;
- case 2: /* large */
- {
- if (!(ctx_private->rflags & (RING_BUFFER_RFLAG_FULL_TSC | LTTNG_RFLAG_EXTENDED))) {
- uint32_t timestamp = (uint32_t) ctx_private->tsc;
- uint16_t id = event_id;
-
- lib_ring_buffer_write(config, ctx, &id, sizeof(id));
- lttng_ust_lib_ring_buffer_align_ctx(ctx, lttng_ust_rb_alignof(uint32_t));
- lib_ring_buffer_write(config, ctx, ×tamp, sizeof(timestamp));
- } else {
- uint16_t id = 65535;
- uint64_t timestamp = ctx_private->tsc;
-
- lib_ring_buffer_write(config, ctx, &id, sizeof(id));
- /* Align extended struct on largest member */
- lttng_ust_lib_ring_buffer_align_ctx(ctx, lttng_ust_rb_alignof(uint64_t));
- lib_ring_buffer_write(config, ctx, &event_id, sizeof(event_id));
- lttng_ust_lib_ring_buffer_align_ctx(ctx, lttng_ust_rb_alignof(uint64_t));
- lib_ring_buffer_write(config, ctx, ×tamp, sizeof(timestamp));
- }
- break;
- }
- default:
- WARN_ON_ONCE(1);
- }
- ctx_record(ctx, lttng_chan, client_ctx->chan_ctx);
- ctx_record(ctx, lttng_chan, client_ctx->event_ctx);
- lttng_ust_lib_ring_buffer_align_ctx(ctx, ctx->largest_align);
-}
-
-static const struct lttng_ust_lib_ring_buffer_config client_config;
-
-static uint64_t client_ring_buffer_clock_read(struct lttng_ust_lib_ring_buffer_channel *chan)
-{
- return lib_ring_buffer_clock_read(chan);
-}
-
-static
-size_t client_record_header_size(const struct lttng_ust_lib_ring_buffer_config *config,
- struct lttng_ust_lib_ring_buffer_channel *chan,
- size_t offset,
- size_t *pre_header_padding,
- struct lttng_ust_lib_ring_buffer_ctx *ctx,
- void *client_ctx)
-{
- return record_header_size(config, chan, offset,
- pre_header_padding, ctx, client_ctx);
-}
-
-/**
- * client_packet_header_size - called on buffer-switch to a new sub-buffer
- *
- * Return header size without padding after the structure. Don't use packed
- * structure because gcc generates inefficient code on some architectures
- * (powerpc, mips..)
- */
-static size_t client_packet_header_size(void)
-{
- return offsetof(struct packet_header, ctx.header_end);
-}
-
-static void client_buffer_begin(struct lttng_ust_lib_ring_buffer *buf, uint64_t tsc,
- unsigned int subbuf_idx,
- struct lttng_ust_shm_handle *handle)
-{
- struct lttng_ust_lib_ring_buffer_channel *chan = shmp(handle, buf->backend.chan);
- struct packet_header *header =
- (struct packet_header *)
- lib_ring_buffer_offset_address(&buf->backend,
- subbuf_idx * chan->backend.subbuf_size,
- handle);
- struct lttng_ust_channel_buffer *lttng_chan = channel_get_private(chan);
- uint64_t cnt = shmp_index(handle, buf->backend.buf_cnt, subbuf_idx)->seq_cnt;
-
- assert(header);
- if (!header)
- return;
- header->magic = CTF_MAGIC_NUMBER;
- memcpy(header->uuid, lttng_chan->priv->uuid, sizeof(lttng_chan->priv->uuid));
- header->stream_id = lttng_chan->priv->id;
- header->stream_instance_id = buf->backend.cpu;
- header->ctx.timestamp_begin = tsc;
- header->ctx.timestamp_end = 0;
- header->ctx.content_size = ~0ULL; /* for debugging */
- header->ctx.packet_size = ~0ULL;
- header->ctx.packet_seq_num = chan->backend.num_subbuf * cnt + subbuf_idx;
- header->ctx.events_discarded = 0;
- header->ctx.cpu_id = buf->backend.cpu;
-}
-
-/*
- * offset is assumed to never be 0 here : never deliver a completely empty
- * subbuffer. data_size is between 1 and subbuf_size.
- */
-static void client_buffer_end(struct lttng_ust_lib_ring_buffer *buf, uint64_t tsc,
- unsigned int subbuf_idx, unsigned long data_size,
- struct lttng_ust_shm_handle *handle)
-{
- struct lttng_ust_lib_ring_buffer_channel *chan = shmp(handle, buf->backend.chan);
- struct packet_header *header =
- (struct packet_header *)
- lib_ring_buffer_offset_address(&buf->backend,
- subbuf_idx * chan->backend.subbuf_size,
- handle);
- unsigned long records_lost = 0;
-
- assert(header);
- if (!header)
- return;
- header->ctx.timestamp_end = tsc;
- header->ctx.content_size =
- (uint64_t) data_size * CHAR_BIT; /* in bits */
- header->ctx.packet_size =
- (uint64_t) LTTNG_UST_PAGE_ALIGN(data_size) * CHAR_BIT; /* in bits */
-
- records_lost += lib_ring_buffer_get_records_lost_full(&client_config, buf);
- records_lost += lib_ring_buffer_get_records_lost_wrap(&client_config, buf);
- records_lost += lib_ring_buffer_get_records_lost_big(&client_config, buf);
- header->ctx.events_discarded = records_lost;
-}
-
-static int client_buffer_create(
- struct lttng_ust_lib_ring_buffer *buf __attribute__((unused)),
- void *priv __attribute__((unused)),
- int cpu __attribute__((unused)),
- const char *name __attribute__((unused)),
- struct lttng_ust_shm_handle *handle __attribute__((unused)))
-{
- return 0;
-}
-
-static void client_buffer_finalize(
- struct lttng_ust_lib_ring_buffer *buf __attribute__((unused)),
- void *priv __attribute__((unused)),
- int cpu __attribute__((unused)),
- struct lttng_ust_shm_handle *handle __attribute__((unused)))
-{
-}
-
-static void client_content_size_field(
- const struct lttng_ust_lib_ring_buffer_config *config __attribute__((unused)),
- size_t *offset, size_t *length)
-{
- *offset = offsetof(struct packet_header, ctx.content_size);
- *length = sizeof(((struct packet_header *) NULL)->ctx.content_size);
-}
-
-static void client_packet_size_field(
- const struct lttng_ust_lib_ring_buffer_config *config __attribute__((unused)),
- size_t *offset, size_t *length)
-{
- *offset = offsetof(struct packet_header, ctx.packet_size);
- *length = sizeof(((struct packet_header *) NULL)->ctx.packet_size);
-}
-
-static struct packet_header *client_packet_header(struct lttng_ust_lib_ring_buffer *buf,
- struct lttng_ust_shm_handle *handle)
-{
- return lib_ring_buffer_read_offset_address(&buf->backend, 0, handle);
-}
-
-static int client_timestamp_begin(struct lttng_ust_lib_ring_buffer *buf,
- struct lttng_ust_lib_ring_buffer_channel *chan,
- uint64_t *timestamp_begin)
-{
- struct lttng_ust_shm_handle *handle = chan->handle;
- struct packet_header *header;
-
- header = client_packet_header(buf, handle);
- if (!header)
- return -1;
- *timestamp_begin = header->ctx.timestamp_begin;
- return 0;
-}
-
-static int client_timestamp_end(struct lttng_ust_lib_ring_buffer *buf,
- struct lttng_ust_lib_ring_buffer_channel *chan,
- uint64_t *timestamp_end)
-{
- struct lttng_ust_shm_handle *handle = chan->handle;
- struct packet_header *header;
-
- header = client_packet_header(buf, handle);
- if (!header)
- return -1;
- *timestamp_end = header->ctx.timestamp_end;
- return 0;
-}
-
-static int client_events_discarded(struct lttng_ust_lib_ring_buffer *buf,
- struct lttng_ust_lib_ring_buffer_channel *chan,
- uint64_t *events_discarded)
-{
- struct lttng_ust_shm_handle *handle = chan->handle;
- struct packet_header *header;
-
- header = client_packet_header(buf, handle);
- if (!header)
- return -1;
- *events_discarded = header->ctx.events_discarded;
- return 0;
-}
-
-static int client_content_size(struct lttng_ust_lib_ring_buffer *buf,
- struct lttng_ust_lib_ring_buffer_channel *chan,
- uint64_t *content_size)
-{
- struct lttng_ust_shm_handle *handle = chan->handle;
- struct packet_header *header;
-
- header = client_packet_header(buf, handle);
- if (!header)
- return -1;
- *content_size = header->ctx.content_size;
- return 0;
-}
-
-static int client_packet_size(struct lttng_ust_lib_ring_buffer *buf,
- struct lttng_ust_lib_ring_buffer_channel *chan,
- uint64_t *packet_size)
-{
- struct lttng_ust_shm_handle *handle = chan->handle;
- struct packet_header *header;
-
- header = client_packet_header(buf, handle);
- if (!header)
- return -1;
- *packet_size = header->ctx.packet_size;
- return 0;
-}
-
-static int client_stream_id(struct lttng_ust_lib_ring_buffer *buf __attribute__((unused)),
- struct lttng_ust_lib_ring_buffer_channel *chan,
- uint64_t *stream_id)
-{
- struct lttng_ust_channel_buffer *lttng_chan = channel_get_private(chan);
-
- *stream_id = lttng_chan->priv->id;
-
- return 0;
-}
-
-static int client_current_timestamp(
- struct lttng_ust_lib_ring_buffer *buf __attribute__((unused)),
- struct lttng_ust_lib_ring_buffer_channel *chan,
- uint64_t *ts)
-{
- *ts = client_ring_buffer_clock_read(chan);
-
- return 0;
-}
-
-static int client_sequence_number(struct lttng_ust_lib_ring_buffer *buf,
- struct lttng_ust_lib_ring_buffer_channel *chan,
- uint64_t *seq)
-{
- struct lttng_ust_shm_handle *handle = chan->handle;
- struct packet_header *header;
-
- header = client_packet_header(buf, handle);
- if (!header)
- return -1;
- *seq = header->ctx.packet_seq_num;
- return 0;
-}
-
-static int client_instance_id(struct lttng_ust_lib_ring_buffer *buf,
- struct lttng_ust_lib_ring_buffer_channel *chan __attribute__((unused)),
- uint64_t *id)
-{
- *id = buf->backend.cpu;
-
- return 0;
-}
-
-static const
-struct lttng_ust_client_lib_ring_buffer_client_cb client_cb = {
- .parent = {
- .ring_buffer_clock_read = client_ring_buffer_clock_read,
- .record_header_size = client_record_header_size,
- .subbuffer_header_size = client_packet_header_size,
- .buffer_begin = client_buffer_begin,
- .buffer_end = client_buffer_end,
- .buffer_create = client_buffer_create,
- .buffer_finalize = client_buffer_finalize,
- .content_size_field = client_content_size_field,
- .packet_size_field = client_packet_size_field,
- },
- .timestamp_begin = client_timestamp_begin,
- .timestamp_end = client_timestamp_end,
- .events_discarded = client_events_discarded,
- .content_size = client_content_size,
- .packet_size = client_packet_size,
- .stream_id = client_stream_id,
- .current_timestamp = client_current_timestamp,
- .sequence_number = client_sequence_number,
- .instance_id = client_instance_id,
-};
-
-static const struct lttng_ust_lib_ring_buffer_config client_config = {
- .cb.ring_buffer_clock_read = client_ring_buffer_clock_read,
- .cb.record_header_size = client_record_header_size,
- .cb.subbuffer_header_size = client_packet_header_size,
- .cb.buffer_begin = client_buffer_begin,
- .cb.buffer_end = client_buffer_end,
- .cb.buffer_create = client_buffer_create,
- .cb.buffer_finalize = client_buffer_finalize,
- .cb.content_size_field = client_content_size_field,
- .cb.packet_size_field = client_packet_size_field,
-
- .tsc_bits = LTTNG_COMPACT_TSC_BITS,
- .alloc = RING_BUFFER_ALLOC_PER_CPU,
- .sync = RING_BUFFER_SYNC_GLOBAL,
- .mode = RING_BUFFER_MODE_TEMPLATE,
- .backend = RING_BUFFER_PAGE,
- .output = RING_BUFFER_MMAP,
- .oops = RING_BUFFER_OOPS_CONSISTENCY,
- .ipi = RING_BUFFER_NO_IPI_BARRIER,
- .wakeup = LTTNG_CLIENT_WAKEUP,
- .client_type = LTTNG_CLIENT_TYPE,
-
- .cb_ptr = &client_cb.parent,
-};
-
-static
-struct lttng_ust_channel_buffer *_channel_create(const char *name,
- void *buf_addr,
- size_t subbuf_size, size_t num_subbuf,
- unsigned int switch_timer_interval,
- unsigned int read_timer_interval,
- unsigned char *uuid,
- uint32_t chan_id,
- const int *stream_fds, int nr_stream_fds,
- int64_t blocking_timeout)
-{
- struct lttng_ust_abi_channel_config chan_priv_init;
- struct lttng_ust_shm_handle *handle;
- struct lttng_ust_channel_buffer *lttng_chan_buf;
-
- lttng_chan_buf = lttng_ust_alloc_channel_buffer();
- if (!lttng_chan_buf)
- return NULL;
- memcpy(lttng_chan_buf->priv->uuid, uuid, LTTNG_UST_UUID_LEN);
- lttng_chan_buf->priv->id = chan_id;
-
- memset(&chan_priv_init, 0, sizeof(chan_priv_init));
- memcpy(chan_priv_init.uuid, uuid, LTTNG_UST_UUID_LEN);
- chan_priv_init.id = chan_id;
-
- handle = channel_create(&client_config, name,
- __alignof__(struct lttng_ust_abi_channel_config),
- sizeof(struct lttng_ust_abi_channel_config),
- &chan_priv_init,
- lttng_chan_buf, buf_addr, subbuf_size, num_subbuf,
- switch_timer_interval, read_timer_interval,
- stream_fds, nr_stream_fds, blocking_timeout);
- if (!handle)
- goto error;
- lttng_chan_buf->priv->rb_chan = shmp(handle, handle->chan);
- return lttng_chan_buf;
-
-error:
- lttng_ust_free_channel_common(lttng_chan_buf->parent);
- return NULL;
-}
-
-static
-void lttng_channel_destroy(struct lttng_ust_channel_buffer *lttng_chan_buf)
-{
- channel_destroy(lttng_chan_buf->priv->rb_chan, lttng_chan_buf->priv->rb_chan->handle, 1);
- lttng_ust_free_channel_common(lttng_chan_buf->parent);
-}
-
-static
-int lttng_event_reserve(struct lttng_ust_lib_ring_buffer_ctx *ctx)
-{
- struct lttng_ust_event_recorder *event_recorder = ctx->client_priv;
- struct lttng_ust_channel_buffer *lttng_chan = event_recorder->chan;
- struct lttng_client_ctx client_ctx;
- int ret, nesting;
- struct lttng_ust_lib_ring_buffer_ctx_private *private_ctx;
- uint32_t event_id;
-
- event_id = event_recorder->priv->id;
- client_ctx.chan_ctx = lttng_ust_rcu_dereference(lttng_chan->priv->ctx);
- client_ctx.event_ctx = lttng_ust_rcu_dereference(event_recorder->priv->ctx);
- /* Compute internal size of context structures. */
- ctx_get_struct_size(client_ctx.chan_ctx, &client_ctx.packet_context_len);
- ctx_get_struct_size(client_ctx.event_ctx, &client_ctx.event_context_len);
-
- nesting = lib_ring_buffer_nesting_inc(&client_config);
- if (nesting < 0)
- return -EPERM;
-
- private_ctx = &URCU_TLS(private_ctx_stack)[nesting];
- memset(private_ctx, 0, sizeof(*private_ctx));
- private_ctx->pub = ctx;
- private_ctx->chan = lttng_chan->priv->rb_chan;
-
- ctx->priv = private_ctx;
-
- switch (lttng_chan->priv->header_type) {
- case 1: /* compact */
- if (event_id > 30)
- private_ctx->rflags |= LTTNG_RFLAG_EXTENDED;
- break;
- case 2: /* large */
- if (event_id > 65534)
- private_ctx->rflags |= LTTNG_RFLAG_EXTENDED;
- break;
- default:
- WARN_ON_ONCE(1);
- }
-
- ret = lib_ring_buffer_reserve(&client_config, ctx, &client_ctx);
- if (caa_unlikely(ret))
- goto put;
- if (lib_ring_buffer_backend_get_pages(&client_config, ctx,
- &private_ctx->backend_pages)) {
- ret = -EPERM;
- goto put;
- }
- lttng_write_event_header(&client_config, ctx, &client_ctx, event_id);
- return 0;
-put:
- lib_ring_buffer_nesting_dec(&client_config);
- return ret;
-}
-
-static
-void lttng_event_commit(struct lttng_ust_lib_ring_buffer_ctx *ctx)
-{
- lib_ring_buffer_commit(&client_config, ctx);
- lib_ring_buffer_nesting_dec(&client_config);
-}
-
-static
-void lttng_event_write(struct lttng_ust_lib_ring_buffer_ctx *ctx,
- const void *src, size_t len, size_t alignment)
-{
- lttng_ust_lib_ring_buffer_align_ctx(ctx, alignment);
- lib_ring_buffer_write(&client_config, ctx, src, len);
-}
-
-static
-void lttng_event_strcpy(struct lttng_ust_lib_ring_buffer_ctx *ctx,
- const char *src, size_t len)
-{
- lib_ring_buffer_strcpy(&client_config, ctx, src, len, '#');
-}
-
-static
-void lttng_event_pstrcpy_pad(struct lttng_ust_lib_ring_buffer_ctx *ctx,
- const char *src, size_t len)
-{
- lib_ring_buffer_pstrcpy(&client_config, ctx, src, len, '\0');
-}
-
-static
-int lttng_is_finalized(struct lttng_ust_channel_buffer *chan)
-{
- struct lttng_ust_lib_ring_buffer_channel *rb_chan = chan->priv->rb_chan;
-
- return lib_ring_buffer_channel_is_finalized(rb_chan);
-}
-
-static
-int lttng_is_disabled(struct lttng_ust_channel_buffer *chan)
-{
- struct lttng_ust_lib_ring_buffer_channel *rb_chan = chan->priv->rb_chan;
-
- return lib_ring_buffer_channel_is_disabled(rb_chan);
-}
-
-static
-int lttng_flush_buffer(struct lttng_ust_channel_buffer *chan)
-{
- struct lttng_ust_lib_ring_buffer_channel *rb_chan = chan->priv->rb_chan;
- struct lttng_ust_lib_ring_buffer *buf;
- int cpu;
-
- for_each_channel_cpu(cpu, rb_chan) {
- int shm_fd, wait_fd, wakeup_fd;
- uint64_t memory_map_size;
-
- buf = channel_get_ring_buffer(&client_config, rb_chan,
- cpu, rb_chan->handle, &shm_fd, &wait_fd,
- &wakeup_fd, &memory_map_size);
- lib_ring_buffer_switch(&client_config, buf,
- SWITCH_ACTIVE, rb_chan->handle);
- }
- return 0;
-}
-
-static struct lttng_transport lttng_relay_transport = {
- .name = "relay-" RING_BUFFER_MODE_TEMPLATE_STRING "-mmap",
- .ops = {
- .struct_size = sizeof(struct lttng_ust_channel_buffer_ops),
- .priv = __LTTNG_COMPOUND_LITERAL(struct lttng_ust_channel_buffer_ops_private, {
- .pub = <tng_relay_transport.ops,
- .channel_create = _channel_create,
- .channel_destroy = lttng_channel_destroy,
- .packet_avail_size = NULL, /* Would be racy anyway */
- .is_finalized = lttng_is_finalized,
- .is_disabled = lttng_is_disabled,
- .flush_buffer = lttng_flush_buffer,
- }),
- .event_reserve = lttng_event_reserve,
- .event_commit = lttng_event_commit,
- .event_write = lttng_event_write,
- .event_strcpy = lttng_event_strcpy,
- .event_pstrcpy_pad = lttng_event_pstrcpy_pad,
- },
- .client_config = &client_config,
-};
-
-void RING_BUFFER_MODE_TEMPLATE_INIT(void)
-{
- DBG("LTT : ltt ring buffer client \"%s\" init\n",
- "relay-" RING_BUFFER_MODE_TEMPLATE_STRING "-mmap");
- lttng_transport_register(<tng_relay_transport);
-}
-
-void RING_BUFFER_MODE_TEMPLATE_EXIT(void)
-{
- DBG("LTT : ltt ring buffer client \"%s\" exit\n",
- "relay-" RING_BUFFER_MODE_TEMPLATE_STRING "-mmap");
- lttng_transport_unregister(<tng_relay_transport);
-}
+++ /dev/null
-/*
- * SPDX-License-Identifier: LGPL-2.1-only
- *
- * Copyright (C) 2010-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
- *
- * LTTng lib ring buffer client template.
- */
-
-#include <limits.h>
-#include <stddef.h>
-#include <stdint.h>
-
-#include <ust-events-internal.h>
-#include "common/bitfield.h"
-#include "common/align.h"
-#include "lttng-tracer.h"
-#include "common/ringbuffer/frontend_types.h"
-#include <urcu/tls-compat.h>
-
-struct metadata_packet_header {
- uint32_t magic; /* 0x75D11D57 */
- uint8_t uuid[LTTNG_UST_UUID_LEN]; /* Unique Universal Identifier */
- uint32_t checksum; /* 0 if unused */
- uint32_t content_size; /* in bits */
- uint32_t packet_size; /* in bits */
- uint8_t compression_scheme; /* 0 if unused */
- uint8_t encryption_scheme; /* 0 if unused */
- uint8_t checksum_scheme; /* 0 if unused */
- uint8_t major; /* CTF spec major version number */
- uint8_t minor; /* CTF spec minor version number */
- uint8_t header_end[0];
-};
-
-struct metadata_record_header {
- uint8_t header_end[0]; /* End of header */
-};
-
-static const struct lttng_ust_lib_ring_buffer_config client_config;
-
-/* No nested use supported for metadata ring buffer. */
-static DEFINE_URCU_TLS(struct lttng_ust_lib_ring_buffer_ctx_private, private_ctx);
-
-static inline uint64_t lib_ring_buffer_clock_read(
- struct lttng_ust_lib_ring_buffer_channel *chan __attribute__((unused)))
-{
- return 0;
-}
-
-static inline
-size_t record_header_size(
- const struct lttng_ust_lib_ring_buffer_config *config __attribute__((unused)),
- struct lttng_ust_lib_ring_buffer_channel *chan __attribute__((unused)),
- size_t offset __attribute__((unused)),
- size_t *pre_header_padding __attribute__((unused)),
- struct lttng_ust_lib_ring_buffer_ctx *ctx __attribute__((unused)),
- void *client_ctx __attribute__((unused)))
-{
- return 0;
-}
-
-#include "common/ringbuffer/api.h"
-#include "lttng-rb-clients.h"
-
-static uint64_t client_ring_buffer_clock_read(
- struct lttng_ust_lib_ring_buffer_channel *chan __attribute__((unused)))
-{
- return 0;
-}
-
-static
-size_t client_record_header_size(
- const struct lttng_ust_lib_ring_buffer_config *config __attribute__((unused)),
- struct lttng_ust_lib_ring_buffer_channel *chan __attribute__((unused)),
- size_t offset __attribute__((unused)),
- size_t *pre_header_padding __attribute__((unused)),
- struct lttng_ust_lib_ring_buffer_ctx *ctx __attribute__((unused)),
- void *client_ctx __attribute__((unused)))
-{
- return 0;
-}
-
-/**
- * client_packet_header_size - called on buffer-switch to a new sub-buffer
- *
- * Return header size without padding after the structure. Don't use packed
- * structure because gcc generates inefficient code on some architectures
- * (powerpc, mips..)
- */
-static size_t client_packet_header_size(void)
-{
- return offsetof(struct metadata_packet_header, header_end);
-}
-
-static void client_buffer_begin(struct lttng_ust_lib_ring_buffer *buf,
- uint64_t tsc __attribute__((unused)),
- unsigned int subbuf_idx,
- struct lttng_ust_shm_handle *handle)
-{
- struct lttng_ust_lib_ring_buffer_channel *chan = shmp(handle, buf->backend.chan);
- struct metadata_packet_header *header =
- (struct metadata_packet_header *)
- lib_ring_buffer_offset_address(&buf->backend,
- subbuf_idx * chan->backend.subbuf_size,
- handle);
- struct lttng_ust_channel_buffer *lttng_chan = channel_get_private(chan);
-
- assert(header);
- if (!header)
- return;
- header->magic = TSDL_MAGIC_NUMBER;
- memcpy(header->uuid, lttng_chan->priv->uuid, sizeof(lttng_chan->priv->uuid));
- header->checksum = 0; /* 0 if unused */
- header->content_size = 0xFFFFFFFF; /* in bits, for debugging */
- header->packet_size = 0xFFFFFFFF; /* in bits, for debugging */
- header->compression_scheme = 0; /* 0 if unused */
- header->encryption_scheme = 0; /* 0 if unused */
- header->checksum_scheme = 0; /* 0 if unused */
- header->major = CTF_SPEC_MAJOR;
- header->minor = CTF_SPEC_MINOR;
-}
-
-/*
- * offset is assumed to never be 0 here : never deliver a completely empty
- * subbuffer. data_size is between 1 and subbuf_size.
- */
-static void client_buffer_end(struct lttng_ust_lib_ring_buffer *buf,
- uint64_t tsc __attribute__((unused)),
- unsigned int subbuf_idx, unsigned long data_size,
- struct lttng_ust_shm_handle *handle)
-{
- struct lttng_ust_lib_ring_buffer_channel *chan = shmp(handle, buf->backend.chan);
- struct metadata_packet_header *header =
- (struct metadata_packet_header *)
- lib_ring_buffer_offset_address(&buf->backend,
- subbuf_idx * chan->backend.subbuf_size,
- handle);
- unsigned long records_lost = 0;
-
- assert(header);
- if (!header)
- return;
- header->content_size = data_size * CHAR_BIT; /* in bits */
- header->packet_size = LTTNG_UST_PAGE_ALIGN(data_size) * CHAR_BIT; /* in bits */
- /*
- * We do not care about the records lost count, because the metadata
- * channel waits and retry.
- */
- (void) lib_ring_buffer_get_records_lost_full(&client_config, buf);
- records_lost += lib_ring_buffer_get_records_lost_wrap(&client_config, buf);
- records_lost += lib_ring_buffer_get_records_lost_big(&client_config, buf);
- WARN_ON_ONCE(records_lost != 0);
-}
-
-static int client_buffer_create(
- struct lttng_ust_lib_ring_buffer *buf __attribute__((unused)),
- void *priv __attribute__((unused)),
- int cpu __attribute__((unused)),
- const char *name __attribute__((unused)),
- struct lttng_ust_shm_handle *handle __attribute__((unused)))
-{
- return 0;
-}
-
-static void client_buffer_finalize(
- struct lttng_ust_lib_ring_buffer *buf __attribute__((unused)),
- void *priv __attribute__((unused)),
- int cpu __attribute__((unused)),
- struct lttng_ust_shm_handle *handle __attribute__((unused)))
-{
-}
-
-static const
-struct lttng_ust_client_lib_ring_buffer_client_cb client_cb = {
- .parent = {
- .ring_buffer_clock_read = client_ring_buffer_clock_read,
- .record_header_size = client_record_header_size,
- .subbuffer_header_size = client_packet_header_size,
- .buffer_begin = client_buffer_begin,
- .buffer_end = client_buffer_end,
- .buffer_create = client_buffer_create,
- .buffer_finalize = client_buffer_finalize,
- },
-};
-
-static const struct lttng_ust_lib_ring_buffer_config client_config = {
- .cb.ring_buffer_clock_read = client_ring_buffer_clock_read,
- .cb.record_header_size = client_record_header_size,
- .cb.subbuffer_header_size = client_packet_header_size,
- .cb.buffer_begin = client_buffer_begin,
- .cb.buffer_end = client_buffer_end,
- .cb.buffer_create = client_buffer_create,
- .cb.buffer_finalize = client_buffer_finalize,
-
- .tsc_bits = 0,
- .alloc = RING_BUFFER_ALLOC_GLOBAL,
- .sync = RING_BUFFER_SYNC_GLOBAL,
- .mode = RING_BUFFER_MODE_TEMPLATE,
- .backend = RING_BUFFER_PAGE,
- .output = RING_BUFFER_MMAP,
- .oops = RING_BUFFER_OOPS_CONSISTENCY,
- .ipi = RING_BUFFER_NO_IPI_BARRIER,
- .wakeup = RING_BUFFER_WAKEUP_BY_WRITER,
- .client_type = LTTNG_CLIENT_TYPE,
-
- .cb_ptr = &client_cb.parent,
-};
-
-static
-struct lttng_ust_channel_buffer *_channel_create(const char *name,
- void *buf_addr,
- size_t subbuf_size, size_t num_subbuf,
- unsigned int switch_timer_interval,
- unsigned int read_timer_interval,
- unsigned char *uuid,
- uint32_t chan_id,
- const int *stream_fds, int nr_stream_fds,
- int64_t blocking_timeout)
-{
- struct lttng_ust_abi_channel_config chan_priv_init;
- struct lttng_ust_shm_handle *handle;
- struct lttng_ust_channel_buffer *lttng_chan_buf;
-
- lttng_chan_buf = lttng_ust_alloc_channel_buffer();
- if (!lttng_chan_buf)
- return NULL;
- memcpy(lttng_chan_buf->priv->uuid, uuid, LTTNG_UST_UUID_LEN);
- lttng_chan_buf->priv->id = chan_id;
-
- memset(&chan_priv_init, 0, sizeof(chan_priv_init));
- memcpy(chan_priv_init.uuid, uuid, LTTNG_UST_UUID_LEN);
- chan_priv_init.id = chan_id;
-
- handle = channel_create(&client_config, name,
- __alignof__(struct lttng_ust_channel_buffer),
- sizeof(struct lttng_ust_channel_buffer),
- &chan_priv_init,
- lttng_chan_buf, buf_addr, subbuf_size, num_subbuf,
- switch_timer_interval, read_timer_interval,
- stream_fds, nr_stream_fds, blocking_timeout);
- if (!handle)
- goto error;
- lttng_chan_buf->priv->rb_chan = shmp(handle, handle->chan);
- return lttng_chan_buf;
-
-error:
- lttng_ust_free_channel_common(lttng_chan_buf->parent);
- return NULL;
-}
-
-static
-void lttng_channel_destroy(struct lttng_ust_channel_buffer *lttng_chan_buf)
-{
- channel_destroy(lttng_chan_buf->priv->rb_chan, lttng_chan_buf->priv->rb_chan->handle, 1);
- lttng_ust_free_channel_common(lttng_chan_buf->parent);
-}
-
-static
-int lttng_event_reserve(struct lttng_ust_lib_ring_buffer_ctx *ctx)
-{
- int ret;
-
- memset(&URCU_TLS(private_ctx), 0, sizeof(struct lttng_ust_lib_ring_buffer_ctx_private));
- URCU_TLS(private_ctx).pub = ctx;
- URCU_TLS(private_ctx).chan = ctx->client_priv;
- ctx->priv = &URCU_TLS(private_ctx);
- ret = lib_ring_buffer_reserve(&client_config, ctx, NULL);
- if (ret)
- return ret;
- if (lib_ring_buffer_backend_get_pages(&client_config, ctx,
- &ctx->priv->backend_pages))
- return -EPERM;
- return 0;
-}
-
-static
-void lttng_event_commit(struct lttng_ust_lib_ring_buffer_ctx *ctx)
-{
- lib_ring_buffer_commit(&client_config, ctx);
-}
-
-static
-void lttng_event_write(struct lttng_ust_lib_ring_buffer_ctx *ctx,
- const void *src, size_t len, size_t alignment)
-{
- lttng_ust_lib_ring_buffer_align_ctx(ctx, alignment);
- lib_ring_buffer_write(&client_config, ctx, src, len);
-}
-
-static
-size_t lttng_packet_avail_size(struct lttng_ust_channel_buffer *chan)
-{
- struct lttng_ust_lib_ring_buffer_channel *rb_chan = chan->priv->rb_chan;
- unsigned long o_begin;
- struct lttng_ust_lib_ring_buffer *buf;
-
- buf = shmp(rb_chan->handle, rb_chan->backend.buf[0].shmp); /* Only for global buffer ! */
- o_begin = v_read(&client_config, &buf->offset);
- if (subbuf_offset(o_begin, rb_chan) != 0) {
- return rb_chan->backend.subbuf_size - subbuf_offset(o_begin, rb_chan);
- } else {
- return rb_chan->backend.subbuf_size - subbuf_offset(o_begin, rb_chan)
- - sizeof(struct metadata_packet_header);
- }
-}
-
-static
-int lttng_is_finalized(struct lttng_ust_channel_buffer *chan)
-{
- struct lttng_ust_lib_ring_buffer_channel *rb_chan = chan->priv->rb_chan;
-
- return lib_ring_buffer_channel_is_finalized(rb_chan);
-}
-
-static
-int lttng_is_disabled(struct lttng_ust_channel_buffer *chan)
-{
- struct lttng_ust_lib_ring_buffer_channel *rb_chan = chan->priv->rb_chan;
-
- return lib_ring_buffer_channel_is_disabled(rb_chan);
-}
-
-static
-int lttng_flush_buffer(struct lttng_ust_channel_buffer *chan)
-{
- struct lttng_ust_lib_ring_buffer_channel *rb_chan = chan->priv->rb_chan;
- struct lttng_ust_lib_ring_buffer *buf;
- int shm_fd, wait_fd, wakeup_fd;
- uint64_t memory_map_size;
-
- buf = channel_get_ring_buffer(&client_config, rb_chan,
- 0, rb_chan->handle, &shm_fd, &wait_fd, &wakeup_fd,
- &memory_map_size);
- lib_ring_buffer_switch(&client_config, buf,
- SWITCH_ACTIVE, rb_chan->handle);
- return 0;
-}
-
-static struct lttng_transport lttng_relay_transport = {
- .name = "relay-" RING_BUFFER_MODE_TEMPLATE_STRING "-mmap",
- .ops = {
- .struct_size = sizeof(struct lttng_ust_channel_buffer_ops),
-
- .priv = __LTTNG_COMPOUND_LITERAL(struct lttng_ust_channel_buffer_ops_private, {
- .pub = <tng_relay_transport.ops,
- .channel_create = _channel_create,
- .channel_destroy = lttng_channel_destroy,
- .packet_avail_size = lttng_packet_avail_size,
- .is_finalized = lttng_is_finalized,
- .is_disabled = lttng_is_disabled,
- .flush_buffer = lttng_flush_buffer,
- }),
- .event_reserve = lttng_event_reserve,
- .event_commit = lttng_event_commit,
- .event_write = lttng_event_write,
- },
- .client_config = &client_config,
-};
-
-void RING_BUFFER_MODE_TEMPLATE_INIT(void)
-{
- DBG("LTT : ltt ring buffer client \"%s\" init\n",
- "relay-" RING_BUFFER_MODE_TEMPLATE_STRING "-mmap");
- lttng_transport_register(<tng_relay_transport);
-}
-
-void RING_BUFFER_MODE_TEMPLATE_EXIT(void)
-{
- DBG("LTT : ltt ring buffer client \"%s\" exit\n",
- "relay-" RING_BUFFER_MODE_TEMPLATE_STRING "-mmap");
- lttng_transport_unregister(<tng_relay_transport);
-}
+++ /dev/null
-/*
- * SPDX-License-Identifier: LGPL-2.1-only
- *
- * Copyright (C) 2010-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
- *
- * LTTng lib ring buffer metadta client.
- */
-
-#define _LGPL_SOURCE
-#include "lttng-tracer.h"
-
-#define RING_BUFFER_MODE_TEMPLATE RING_BUFFER_DISCARD
-#define RING_BUFFER_MODE_TEMPLATE_STRING "metadata"
-#define RING_BUFFER_MODE_TEMPLATE_INIT \
- lttng_ring_buffer_metadata_client_init
-#define RING_BUFFER_MODE_TEMPLATE_EXIT \
- lttng_ring_buffer_metadata_client_exit
-#define LTTNG_CLIENT_TYPE LTTNG_CLIENT_METADATA
-#include "lttng-ring-buffer-metadata-client-template.h"
+++ /dev/null
-/*
- * SPDX-License-Identifier: LGPL-2.1-only
- *
- * Copyright (C) 2005-2011 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
- *
- * This contains the core definitions for the Linux Trace Toolkit.
- */
-
-#ifndef _LTTNG_TRACER_CORE_H
-#define _LTTNG_TRACER_CORE_H
-
-#include <stddef.h>
-#include <urcu/arch.h>
-#include <urcu/list.h>
-#include <lttng/ust-tracer.h>
-#include <lttng/ringbuffer-context.h>
-#include "common/logging.h"
-
-/*
- * The longuest possible namespace proc path is with the cgroup ns
- * and the maximum theoretical linux pid of 536870912 :
- *
- * /proc/self/task/536870912/ns/cgroup
- */
-#define LTTNG_PROC_NS_PATH_MAX 40
-
-struct lttng_ust_session;
-struct lttng_ust_channel_buffer;
-struct lttng_ust_ctx_field;
-struct lttng_ust_lib_ring_buffer_ctx;
-struct lttng_ust_ctx_value;
-struct lttng_ust_event_recorder;
-struct lttng_ust_event_notifier;
-struct lttng_ust_notification_ctx;
-
-int ust_lock(void) __attribute__ ((warn_unused_result))
- __attribute__((visibility("hidden")));
-
-void ust_lock_nocheck(void)
- __attribute__((visibility("hidden")));
-
-void ust_unlock(void)
- __attribute__((visibility("hidden")));
-
-void lttng_ust_fixup_tls(void)
- __attribute__((visibility("hidden")));
-
-void lttng_fixup_event_tls(void)
- __attribute__((visibility("hidden")));
-
-void lttng_fixup_vtid_tls(void)
- __attribute__((visibility("hidden")));
-
-void lttng_fixup_procname_tls(void)
- __attribute__((visibility("hidden")));
-
-void lttng_fixup_cgroup_ns_tls(void)
- __attribute__((visibility("hidden")));
-
-void lttng_fixup_ipc_ns_tls(void)
- __attribute__((visibility("hidden")));
-
-void lttng_fixup_net_ns_tls(void)
- __attribute__((visibility("hidden")));
-
-void lttng_fixup_time_ns_tls(void)
- __attribute__((visibility("hidden")));
-
-void lttng_fixup_uts_ns_tls(void)
- __attribute__((visibility("hidden")));
-
-const char *lttng_ust_obj_get_name(int id)
- __attribute__((visibility("hidden")));
-
-int lttng_get_notify_socket(void *owner)
- __attribute__((visibility("hidden")));
-
-char* lttng_ust_sockinfo_get_procname(void *owner)
- __attribute__((visibility("hidden")));
-
-void lttng_ust_sockinfo_session_enabled(void *owner)
- __attribute__((visibility("hidden")));
-
-ssize_t lttng_ust_read(int fd, void *buf, size_t len)
- __attribute__((visibility("hidden")));
-
-size_t lttng_ust_dummy_get_size(void *priv, size_t offset)
- __attribute__((visibility("hidden")));
-
-void lttng_ust_dummy_record(void *priv, struct lttng_ust_lib_ring_buffer_ctx *ctx,
- struct lttng_ust_channel_buffer *chan)
- __attribute__((visibility("hidden")));
-
-void lttng_ust_dummy_get_value(void *priv, struct lttng_ust_ctx_value *value)
- __attribute__((visibility("hidden")));
-
-void lttng_event_notifier_notification_send(
- struct lttng_ust_event_notifier *event_notifier,
- const char *stack_data,
- struct lttng_ust_notification_ctx *notif_ctx)
- __attribute__((visibility("hidden")));
-
-struct lttng_counter_transport *lttng_counter_transport_find(const char *name)
- __attribute__((visibility("hidden")));
-
-void lttng_counter_transport_register(struct lttng_counter_transport *transport)
- __attribute__((visibility("hidden")));
-
-void lttng_counter_transport_unregister(struct lttng_counter_transport *transport)
- __attribute__((visibility("hidden")));
-
-#ifdef HAVE_LINUX_PERF_EVENT_H
-void lttng_ust_fixup_perf_counter_tls(void)
- __attribute__((visibility("hidden")));
-
-void lttng_perf_lock(void)
- __attribute__((visibility("hidden")));
-
-void lttng_perf_unlock(void)
- __attribute__((visibility("hidden")));
-#else /* #ifdef HAVE_LINUX_PERF_EVENT_H */
-static inline
-void lttng_ust_fixup_perf_counter_tls(void)
-{
-}
-static inline
-void lttng_perf_lock(void)
-{
-}
-static inline
-void lttng_perf_unlock(void)
-{
-}
-#endif /* #else #ifdef HAVE_LINUX_PERF_EVENT_H */
-
-#endif /* _LTTNG_TRACER_CORE_H */
+++ /dev/null
-/*
- * SPDX-License-Identifier: LGPL-2.1-only
- *
- * Copyright (C) 2005-2011 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
- *
- * This contains the definitions for the Linux Trace Toolkit tracer.
- *
- * Ported to userspace by Pierre-Marc Fournier.
- */
-
-#ifndef _LTTNG_TRACER_H
-#define _LTTNG_TRACER_H
-
-#include <stdarg.h>
-#include <stdint.h>
-#include <lttng/ust-events.h>
-#include "lttng-tracer-core.h"
-
-/* Tracer properties */
-#define CTF_MAGIC_NUMBER 0xC1FC1FC1
-#define TSDL_MAGIC_NUMBER 0x75D11D57
-
-/* CTF specification version followed */
-#define CTF_SPEC_MAJOR 1
-#define CTF_SPEC_MINOR 8
-
-/*
- * Number of milliseconds to retry before failing metadata writes on buffer full
- * condition. (10 seconds)
- */
-#define LTTNG_METADATA_TIMEOUT_MSEC 10000
-
-#define LTTNG_RFLAG_EXTENDED RING_BUFFER_RFLAG_END
-#define LTTNG_RFLAG_END (LTTNG_RFLAG_EXTENDED << 1)
-
-/*
- * LTTng client type enumeration. Used by the consumer to map the
- * callbacks from its own address space.
- */
-enum lttng_client_types {
- LTTNG_CLIENT_METADATA = 0,
- LTTNG_CLIENT_DISCARD = 1,
- LTTNG_CLIENT_OVERWRITE = 2,
- LTTNG_CLIENT_DISCARD_RT = 3,
- LTTNG_CLIENT_OVERWRITE_RT = 4,
- LTTNG_NR_CLIENT_TYPES,
-};
-
-#endif /* _LTTNG_TRACER_H */
+++ /dev/null
-/*
- * SPDX-License-Identifier: LGPL-2.1-only
- *
- * Copyright (C) 2010-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
- *
- * LTTng UST ABI
- *
- * Mimic system calls for:
- * - session creation, returns an object descriptor or failure.
- * - channel creation, returns an object descriptor or failure.
- * - Operates on a session object descriptor
- * - Takes all channel options as parameters.
- * - stream get, returns an object descriptor or failure.
- * - Operates on a channel object descriptor.
- * - stream notifier get, returns an object descriptor or failure.
- * - Operates on a channel object descriptor.
- * - event creation, returns an object descriptor or failure.
- * - Operates on a channel object descriptor
- * - Takes an event name as parameter
- * - Takes an instrumentation source as parameter
- * - e.g. tracepoints, dynamic_probes...
- * - Takes instrumentation source specific arguments.
- */
-
-#define _LGPL_SOURCE
-#include <fcntl.h>
-#include <stdint.h>
-#include <unistd.h>
-
-#include <urcu/compiler.h>
-#include <urcu/list.h>
-
-#include <lttng/tracepoint.h>
-#include <lttng/ust-abi.h>
-#include <lttng/ust-error.h>
-#include <lttng/ust-events.h>
-#include <lttng/ust-version.h>
-
-#include "common/ust-fd.h"
-#include "common/logging.h"
-
-#include "common/ringbuffer/frontend_types.h"
-#include "common/ringbuffer/frontend.h"
-#include "common/ringbuffer/shm.h"
-#include "common/counter/counter.h"
-#include "tracepoint-internal.h"
-#include "lttng-tracer.h"
-#include "string-utils.h"
-#include "ust-events-internal.h"
-#include "context-internal.h"
-#include "common/macros.h"
-
-#define OBJ_NAME_LEN 16
-
-static int lttng_ust_abi_close_in_progress;
-
-static
-int lttng_abi_tracepoint_list(void *owner);
-static
-int lttng_abi_tracepoint_field_list(void *owner);
-
-/*
- * Object descriptor table. Should be protected from concurrent access
- * by the caller.
- */
-
-struct lttng_ust_abi_obj {
- union {
- struct {
- void *private_data;
- const struct lttng_ust_abi_objd_ops *ops;
- int f_count;
- int owner_ref; /* has ref from owner */
- void *owner;
- char name[OBJ_NAME_LEN];
- } s;
- int freelist_next; /* offset freelist. end is -1. */
- } u;
-};
-
-struct lttng_ust_abi_objd_table {
- struct lttng_ust_abi_obj *array;
- unsigned int len, allocated_len;
- int freelist_head; /* offset freelist head. end is -1 */
-};
-
-static struct lttng_ust_abi_objd_table objd_table = {
- .freelist_head = -1,
-};
-
-static
-int objd_alloc(void *private_data, const struct lttng_ust_abi_objd_ops *ops,
- void *owner, const char *name)
-{
- struct lttng_ust_abi_obj *obj;
-
- if (objd_table.freelist_head != -1) {
- obj = &objd_table.array[objd_table.freelist_head];
- objd_table.freelist_head = obj->u.freelist_next;
- goto end;
- }
-
- if (objd_table.len >= objd_table.allocated_len) {
- unsigned int new_allocated_len, old_allocated_len;
- struct lttng_ust_abi_obj *new_table, *old_table;
-
- old_allocated_len = objd_table.allocated_len;
- old_table = objd_table.array;
- if (!old_allocated_len)
- new_allocated_len = 1;
- else
- new_allocated_len = old_allocated_len << 1;
- new_table = zmalloc(sizeof(struct lttng_ust_abi_obj) * new_allocated_len);
- if (!new_table)
- return -ENOMEM;
- memcpy(new_table, old_table,
- sizeof(struct lttng_ust_abi_obj) * old_allocated_len);
- free(old_table);
- objd_table.array = new_table;
- objd_table.allocated_len = new_allocated_len;
- }
- obj = &objd_table.array[objd_table.len];
- objd_table.len++;
-end:
- obj->u.s.private_data = private_data;
- obj->u.s.ops = ops;
- obj->u.s.f_count = 2; /* count == 1 : object is allocated */
- /* count == 2 : allocated + hold ref */
- obj->u.s.owner_ref = 1; /* One owner reference */
- obj->u.s.owner = owner;
- strncpy(obj->u.s.name, name, OBJ_NAME_LEN);
- obj->u.s.name[OBJ_NAME_LEN - 1] = '\0';
- return obj - objd_table.array;
-}
-
-static
-struct lttng_ust_abi_obj *_objd_get(int id)
-{
- if (id >= objd_table.len)
- return NULL;
- if (!objd_table.array[id].u.s.f_count)
- return NULL;
- return &objd_table.array[id];
-}
-
-static
-void *objd_private(int id)
-{
- struct lttng_ust_abi_obj *obj = _objd_get(id);
- assert(obj);
- return obj->u.s.private_data;
-}
-
-static
-void objd_set_private(int id, void *private_data)
-{
- struct lttng_ust_abi_obj *obj = _objd_get(id);
- assert(obj);
- obj->u.s.private_data = private_data;
-}
-
-const struct lttng_ust_abi_objd_ops *lttng_ust_abi_objd_ops(int id)
-{
- struct lttng_ust_abi_obj *obj = _objd_get(id);
-
- if (!obj)
- return NULL;
- return obj->u.s.ops;
-}
-
-static
-void objd_free(int id)
-{
- struct lttng_ust_abi_obj *obj = _objd_get(id);
-
- assert(obj);
- obj->u.freelist_next = objd_table.freelist_head;
- objd_table.freelist_head = obj - objd_table.array;
- assert(obj->u.s.f_count == 1);
- obj->u.s.f_count = 0; /* deallocated */
-}
-
-static
-void objd_ref(int id)
-{
- struct lttng_ust_abi_obj *obj = _objd_get(id);
- assert(obj != NULL);
- obj->u.s.f_count++;
-}
-
-int lttng_ust_abi_objd_unref(int id, int is_owner)
-{
- struct lttng_ust_abi_obj *obj = _objd_get(id);
-
- if (!obj)
- return -EINVAL;
- if (obj->u.s.f_count == 1) {
- ERR("Reference counting error\n");
- return -EINVAL;
- }
- if (is_owner) {
- if (!obj->u.s.owner_ref) {
- ERR("Error decrementing owner reference");
- return -EINVAL;
- }
- obj->u.s.owner_ref--;
- }
- if ((--obj->u.s.f_count) == 1) {
- const struct lttng_ust_abi_objd_ops *ops = lttng_ust_abi_objd_ops(id);
-
- if (ops->release)
- ops->release(id);
- objd_free(id);
- }
- return 0;
-}
-
-static
-void objd_table_destroy(void)
-{
- int i;
-
- for (i = 0; i < objd_table.allocated_len; i++) {
- struct lttng_ust_abi_obj *obj;
-
- obj = _objd_get(i);
- if (!obj)
- continue;
- if (!obj->u.s.owner_ref)
- continue; /* only unref owner ref. */
- (void) lttng_ust_abi_objd_unref(i, 1);
- }
- free(objd_table.array);
- objd_table.array = NULL;
- objd_table.len = 0;
- objd_table.allocated_len = 0;
- objd_table.freelist_head = -1;
-}
-
-const char *lttng_ust_obj_get_name(int id)
-{
- struct lttng_ust_abi_obj *obj = _objd_get(id);
-
- if (!obj)
- return NULL;
- return obj->u.s.name;
-}
-
-void lttng_ust_abi_objd_table_owner_cleanup(void *owner)
-{
- int i;
-
- for (i = 0; i < objd_table.allocated_len; i++) {
- struct lttng_ust_abi_obj *obj;
-
- obj = _objd_get(i);
- if (!obj)
- continue;
- if (!obj->u.s.owner)
- continue; /* skip root handles */
- if (!obj->u.s.owner_ref)
- continue; /* only unref owner ref. */
- if (obj->u.s.owner == owner)
- (void) lttng_ust_abi_objd_unref(i, 1);
- }
-}
-
-/*
- * This is LTTng's own personal way to create an ABI for sessiond.
- * We send commands over a socket.
- */
-
-static const struct lttng_ust_abi_objd_ops lttng_ops;
-static const struct lttng_ust_abi_objd_ops lttng_event_notifier_group_ops;
-static const struct lttng_ust_abi_objd_ops lttng_session_ops;
-static const struct lttng_ust_abi_objd_ops lttng_channel_ops;
-static const struct lttng_ust_abi_objd_ops lttng_event_enabler_ops;
-static const struct lttng_ust_abi_objd_ops lttng_event_notifier_enabler_ops;
-static const struct lttng_ust_abi_objd_ops lttng_tracepoint_list_ops;
-static const struct lttng_ust_abi_objd_ops lttng_tracepoint_field_list_ops;
-
-int lttng_abi_create_root_handle(void)
-{
- int root_handle;
-
- /* root handles have NULL owners */
- root_handle = objd_alloc(NULL, <tng_ops, NULL, "root");
- return root_handle;
-}
-
-static
-int lttng_is_channel_ready(struct lttng_ust_channel_buffer *lttng_chan)
-{
- struct lttng_ust_lib_ring_buffer_channel *chan;
- unsigned int nr_streams, exp_streams;
-
- chan = lttng_chan->priv->rb_chan;
- nr_streams = channel_handle_get_nr_streams(lttng_chan->priv->rb_chan->handle);
- exp_streams = chan->nr_streams;
- return nr_streams == exp_streams;
-}
-
-static
-int lttng_abi_create_session(void *owner)
-{
- struct lttng_ust_session *session;
- int session_objd, ret;
-
- session = lttng_session_create();
- if (!session)
- return -ENOMEM;
- session_objd = objd_alloc(session, <tng_session_ops, owner, "session");
- if (session_objd < 0) {
- ret = session_objd;
- goto objd_error;
- }
- session->priv->objd = session_objd;
- session->priv->owner = owner;
- return session_objd;
-
-objd_error:
- lttng_session_destroy(session);
- return ret;
-}
-
-static
-long lttng_abi_tracer_version(int objd __attribute__((unused)),
- struct lttng_ust_abi_tracer_version *v)
-{
- v->major = LTTNG_UST_MAJOR_VERSION;
- v->minor = LTTNG_UST_MINOR_VERSION;
- v->patchlevel = LTTNG_UST_PATCHLEVEL_VERSION;
- return 0;
-}
-
-static
-int lttng_abi_event_notifier_send_fd(void *owner, int *event_notifier_notif_fd)
-{
- struct lttng_event_notifier_group *event_notifier_group;
- int event_notifier_group_objd, ret, fd_flag;
-
- event_notifier_group = lttng_event_notifier_group_create();
- if (!event_notifier_group)
- return -ENOMEM;
-
- /*
- * Set this file descriptor as NON-BLOCKING.
- */
- fd_flag = fcntl(*event_notifier_notif_fd, F_GETFL);
-
- fd_flag |= O_NONBLOCK;
-
- ret = fcntl(*event_notifier_notif_fd, F_SETFL, fd_flag);
- if (ret) {
- ret = -errno;
- goto fd_error;
- }
-
- event_notifier_group_objd = objd_alloc(event_notifier_group,
- <tng_event_notifier_group_ops, owner, "event_notifier_group");
- if (event_notifier_group_objd < 0) {
- ret = event_notifier_group_objd;
- goto objd_error;
- }
-
- event_notifier_group->objd = event_notifier_group_objd;
- event_notifier_group->owner = owner;
- event_notifier_group->notification_fd = *event_notifier_notif_fd;
- /* Object descriptor takes ownership of notification fd. */
- *event_notifier_notif_fd = -1;
-
- return event_notifier_group_objd;
-
-objd_error:
- lttng_event_notifier_group_destroy(event_notifier_group);
-fd_error:
- return ret;
-}
-
-static
-long lttng_abi_add_context(int objd __attribute__((unused)),
- struct lttng_ust_abi_context *context_param,
- union lttng_ust_abi_args *uargs,
- struct lttng_ust_ctx **ctx, struct lttng_ust_session *session)
-{
- return lttng_attach_context(context_param, uargs, ctx, session);
-}
-
-/**
- * lttng_cmd - lttng control through socket commands
- *
- * @objd: the object descriptor
- * @cmd: the command
- * @arg: command arg
- * @uargs: UST arguments (internal)
- * @owner: objd owner
- *
- * This descriptor implements lttng commands:
- * LTTNG_UST_ABI_SESSION
- * Returns a LTTng trace session object descriptor
- * LTTNG_UST_ABI_TRACER_VERSION
- * Returns the LTTng kernel tracer version
- * LTTNG_UST_ABI_TRACEPOINT_LIST
- * Returns a file descriptor listing available tracepoints
- * LTTNG_UST_ABI_TRACEPOINT_FIELD_LIST
- * Returns a file descriptor listing available tracepoint fields
- * LTTNG_UST_ABI_WAIT_QUIESCENT
- * Returns after all previously running probes have completed
- *
- * The returned session will be deleted when its file descriptor is closed.
- */
-static
-long lttng_cmd(int objd, unsigned int cmd, unsigned long arg,
- union lttng_ust_abi_args *uargs, void *owner)
-{
- switch (cmd) {
- case LTTNG_UST_ABI_SESSION:
- return lttng_abi_create_session(owner);
- case LTTNG_UST_ABI_TRACER_VERSION:
- return lttng_abi_tracer_version(objd,
- (struct lttng_ust_abi_tracer_version *) arg);
- case LTTNG_UST_ABI_TRACEPOINT_LIST:
- return lttng_abi_tracepoint_list(owner);
- case LTTNG_UST_ABI_TRACEPOINT_FIELD_LIST:
- return lttng_abi_tracepoint_field_list(owner);
- case LTTNG_UST_ABI_WAIT_QUIESCENT:
- lttng_ust_urcu_synchronize_rcu();
- return 0;
- case LTTNG_UST_ABI_EVENT_NOTIFIER_GROUP_CREATE:
- return lttng_abi_event_notifier_send_fd(owner,
- &uargs->event_notifier_handle.event_notifier_notif_fd);
- default:
- return -EINVAL;
- }
-}
-
-static const struct lttng_ust_abi_objd_ops lttng_ops = {
- .cmd = lttng_cmd,
-};
-
-static
-int lttng_abi_map_channel(int session_objd,
- struct lttng_ust_abi_channel *ust_chan,
- union lttng_ust_abi_args *uargs,
- void *owner)
-{
- struct lttng_ust_session *session = objd_private(session_objd);
- const char *transport_name;
- struct lttng_transport *transport;
- const char *chan_name;
- int chan_objd;
- struct lttng_ust_shm_handle *channel_handle;
- struct lttng_ust_abi_channel_config *lttng_chan_config;
- struct lttng_ust_channel_buffer *lttng_chan_buf;
- struct lttng_ust_lib_ring_buffer_channel *chan;
- struct lttng_ust_lib_ring_buffer_config *config;
- void *chan_data;
- int wakeup_fd;
- uint64_t len;
- int ret;
- enum lttng_ust_abi_chan_type type;
-
- chan_data = uargs->channel.chan_data;
- wakeup_fd = uargs->channel.wakeup_fd;
- len = ust_chan->len;
- type = ust_chan->type;
-
- switch (type) {
- case LTTNG_UST_ABI_CHAN_PER_CPU:
- break;
- default:
- ret = -EINVAL;
- goto invalid;
- }
-
- if (session->priv->been_active) {
- ret = -EBUSY;
- goto active; /* Refuse to add channel to active session */
- }
-
- lttng_chan_buf = lttng_ust_alloc_channel_buffer();
- if (!lttng_chan_buf) {
- ret = -ENOMEM;
- goto lttng_chan_buf_error;
- }
-
- channel_handle = channel_handle_create(chan_data, len, wakeup_fd);
- if (!channel_handle) {
- ret = -EINVAL;
- goto handle_error;
- }
-
- /* Ownership of chan_data and wakeup_fd taken by channel handle. */
- uargs->channel.chan_data = NULL;
- uargs->channel.wakeup_fd = -1;
-
- chan = shmp(channel_handle, channel_handle->chan);
- assert(chan);
- chan->handle = channel_handle;
- config = &chan->backend.config;
- lttng_chan_config = channel_get_private_config(chan);
- if (!lttng_chan_config) {
- ret = -EINVAL;
- goto alloc_error;
- }
-
- if (lttng_ust_session_uuid_validate(session, lttng_chan_config->uuid)) {
- ret = -EINVAL;
- goto uuid_error;
- }
-
- /* Lookup transport name */
- switch (type) {
- case LTTNG_UST_ABI_CHAN_PER_CPU:
- if (config->output == RING_BUFFER_MMAP) {
- if (config->mode == RING_BUFFER_OVERWRITE) {
- if (config->wakeup == RING_BUFFER_WAKEUP_BY_WRITER) {
- transport_name = "relay-overwrite-mmap";
- } else {
- transport_name = "relay-overwrite-rt-mmap";
- }
- } else {
- if (config->wakeup == RING_BUFFER_WAKEUP_BY_WRITER) {
- transport_name = "relay-discard-mmap";
- } else {
- transport_name = "relay-discard-rt-mmap";
- }
- }
- } else {
- ret = -EINVAL;
- goto notransport;
- }
- chan_name = "channel";
- break;
- default:
- ret = -EINVAL;
- goto notransport;
- }
- transport = lttng_ust_transport_find(transport_name);
- if (!transport) {
- DBG("LTTng transport %s not found\n",
- transport_name);
- ret = -EINVAL;
- goto notransport;
- }
-
- chan_objd = objd_alloc(NULL, <tng_channel_ops, owner, chan_name);
- if (chan_objd < 0) {
- ret = chan_objd;
- goto objd_error;
- }
-
- /* Initialize our lttng chan */
- lttng_chan_buf->parent->enabled = 1;
- lttng_chan_buf->parent->session = session;
-
- lttng_chan_buf->priv->parent.tstate = 1;
- lttng_chan_buf->priv->ctx = NULL;
- lttng_chan_buf->priv->rb_chan = chan;
-
- lttng_chan_buf->ops = &transport->ops;
-
- memcpy(&chan->backend.config,
- transport->client_config,
- sizeof(chan->backend.config));
- cds_list_add(<tng_chan_buf->priv->node, &session->priv->chan_head);
- lttng_chan_buf->priv->header_type = 0;
- lttng_chan_buf->priv->type = type;
- /* Copy fields from lttng ust chan config. */
- lttng_chan_buf->priv->id = lttng_chan_config->id;
- memcpy(lttng_chan_buf->priv->uuid, lttng_chan_config->uuid, LTTNG_UST_UUID_LEN);
- channel_set_private(chan, lttng_chan_buf);
-
- /*
- * We tolerate no failure path after channel creation. It will stay
- * invariant for the rest of the session.
- */
- objd_set_private(chan_objd, lttng_chan_buf);
- lttng_chan_buf->priv->parent.objd = chan_objd;
- /* The channel created holds a reference on the session */
- objd_ref(session_objd);
- return chan_objd;
-
- /* error path after channel was created */
-objd_error:
-notransport:
-uuid_error:
-alloc_error:
- channel_destroy(chan, channel_handle, 0);
- lttng_ust_free_channel_common(lttng_chan_buf->parent);
- return ret;
-
-handle_error:
- lttng_ust_free_channel_common(lttng_chan_buf->parent);
-lttng_chan_buf_error:
-active:
-invalid:
- return ret;
-}
-
-/**
- * lttng_session_cmd - lttng session object command
- *
- * @obj: the object
- * @cmd: the command
- * @arg: command arg
- * @uargs: UST arguments (internal)
- * @owner: objd owner
- *
- * This descriptor implements lttng commands:
- * LTTNG_UST_ABI_CHANNEL
- * Returns a LTTng channel object descriptor
- * LTTNG_UST_ABI_ENABLE
- * Enables tracing for a session (weak enable)
- * LTTNG_UST_ABI_DISABLE
- * Disables tracing for a session (strong disable)
- *
- * The returned channel will be deleted when its file descriptor is closed.
- */
-static
-long lttng_session_cmd(int objd, unsigned int cmd, unsigned long arg,
- union lttng_ust_abi_args *uargs, void *owner)
-{
- struct lttng_ust_session *session = objd_private(objd);
-
- switch (cmd) {
- case LTTNG_UST_ABI_CHANNEL:
- return lttng_abi_map_channel(objd,
- (struct lttng_ust_abi_channel *) arg,
- uargs, owner);
- case LTTNG_UST_ABI_SESSION_START:
- case LTTNG_UST_ABI_ENABLE:
- return lttng_session_enable(session);
- case LTTNG_UST_ABI_SESSION_STOP:
- case LTTNG_UST_ABI_DISABLE:
- return lttng_session_disable(session);
- case LTTNG_UST_ABI_SESSION_STATEDUMP:
- return lttng_session_statedump(session);
- case LTTNG_UST_ABI_COUNTER:
- case LTTNG_UST_ABI_COUNTER_GLOBAL:
- case LTTNG_UST_ABI_COUNTER_CPU:
- /* Not implemented yet. */
- return -EINVAL;
- default:
- return -EINVAL;
- }
-}
-
-/*
- * Called when the last file reference is dropped.
- *
- * Big fat note: channels and events are invariant for the whole session after
- * their creation. So this session destruction also destroys all channel and
- * event structures specific to this session (they are not destroyed when their
- * individual file is released).
- */
-static
-int lttng_release_session(int objd)
-{
- struct lttng_ust_session *session = objd_private(objd);
-
- if (session) {
- lttng_session_destroy(session);
- return 0;
- } else {
- return -EINVAL;
- }
-}
-
-static const struct lttng_ust_abi_objd_ops lttng_session_ops = {
- .release = lttng_release_session,
- .cmd = lttng_session_cmd,
-};
-
-static int lttng_ust_event_notifier_enabler_create(int event_notifier_group_obj,
- void *owner, struct lttng_ust_abi_event_notifier *event_notifier_param,
- enum lttng_enabler_format_type type)
-{
- struct lttng_event_notifier_group *event_notifier_group =
- objd_private(event_notifier_group_obj);
- struct lttng_event_notifier_enabler *event_notifier_enabler;
- int event_notifier_objd, ret;
-
- event_notifier_param->event.name[LTTNG_UST_ABI_SYM_NAME_LEN - 1] = '\0';
- event_notifier_objd = objd_alloc(NULL, <tng_event_notifier_enabler_ops, owner,
- "event_notifier enabler");
- if (event_notifier_objd < 0) {
- ret = event_notifier_objd;
- goto objd_error;
- }
-
- event_notifier_enabler = lttng_event_notifier_enabler_create(
- event_notifier_group, type, event_notifier_param);
- if (!event_notifier_enabler) {
- ret = -ENOMEM;
- goto event_notifier_error;
- }
-
- objd_set_private(event_notifier_objd, event_notifier_enabler);
- /* The event_notifier holds a reference on the event_notifier group. */
- objd_ref(event_notifier_enabler->group->objd);
-
- return event_notifier_objd;
-
-event_notifier_error:
- {
- int err;
-
- err = lttng_ust_abi_objd_unref(event_notifier_objd, 1);
- assert(!err);
- }
-objd_error:
- return ret;
-}
-
-static
-long lttng_event_notifier_enabler_cmd(int objd, unsigned int cmd, unsigned long arg,
- union lttng_ust_abi_args *uargs __attribute__((unused)),
- void *owner __attribute__((unused)))
-{
- struct lttng_event_notifier_enabler *event_notifier_enabler = objd_private(objd);
- switch (cmd) {
- case LTTNG_UST_ABI_FILTER:
- return lttng_event_notifier_enabler_attach_filter_bytecode(
- event_notifier_enabler,
- (struct lttng_ust_bytecode_node **) arg);
- case LTTNG_UST_ABI_EXCLUSION:
- return lttng_event_notifier_enabler_attach_exclusion(event_notifier_enabler,
- (struct lttng_ust_excluder_node **) arg);
- case LTTNG_UST_ABI_CAPTURE:
- return lttng_event_notifier_enabler_attach_capture_bytecode(
- event_notifier_enabler,
- (struct lttng_ust_bytecode_node **) arg);
- case LTTNG_UST_ABI_ENABLE:
- return lttng_event_notifier_enabler_enable(event_notifier_enabler);
- case LTTNG_UST_ABI_DISABLE:
- return lttng_event_notifier_enabler_disable(event_notifier_enabler);
- default:
- return -EINVAL;
- }
-}
-
-/**
- * lttng_event_notifier_group_error_counter_cmd - lttng event_notifier group error counter object command
- *
- * @obj: the object
- * @cmd: the command
- * @arg: command arg
- * @uargs: UST arguments (internal)
- * @owner: objd owner
- *
- * This descriptor implements lttng commands:
- * LTTNG_UST_ABI_COUNTER_GLOBAL
- * Return negative error code on error, 0 on success.
- * LTTNG_UST_ABI_COUNTER_CPU
- * Return negative error code on error, 0 on success.
- */
-static
-long lttng_event_notifier_group_error_counter_cmd(int objd, unsigned int cmd, unsigned long arg,
- union lttng_ust_abi_args *uargs, void *owner __attribute__((unused)))
-{
- int ret;
- struct lttng_counter *counter = objd_private(objd);
-
- switch (cmd) {
- case LTTNG_UST_ABI_COUNTER_GLOBAL:
- ret = -EINVAL; /* Unimplemented. */
- break;
- case LTTNG_UST_ABI_COUNTER_CPU:
- {
- struct lttng_ust_abi_counter_cpu *counter_cpu =
- (struct lttng_ust_abi_counter_cpu *)arg;
-
- ret = lttng_counter_set_cpu_shm(counter->counter,
- counter_cpu->cpu_nr, uargs->counter_shm.shm_fd);
- if (!ret) {
- /* Take ownership of the shm_fd. */
- uargs->counter_shm.shm_fd = -1;
- }
- break;
- }
- default:
- ret = -EINVAL;
- break;
- }
-
- return ret;
-}
-
-int lttng_release_event_notifier_group_error_counter(int objd)
- __attribute__((visibility("hidden")));
-int lttng_release_event_notifier_group_error_counter(int objd)
-{
- struct lttng_counter *counter = objd_private(objd);
-
- if (counter) {
- return lttng_ust_abi_objd_unref(counter->event_notifier_group->objd, 0);
- } else {
- return -EINVAL;
- }
-}
-
-static const struct lttng_ust_abi_objd_ops lttng_event_notifier_group_error_counter_ops = {
- .release = lttng_release_event_notifier_group_error_counter,
- .cmd = lttng_event_notifier_group_error_counter_cmd,
-};
-
-static
-int lttng_ust_event_notifier_group_create_error_counter(int event_notifier_group_objd, void *owner,
- struct lttng_ust_abi_counter_conf *error_counter_conf)
-{
- const char *counter_transport_name;
- struct lttng_event_notifier_group *event_notifier_group =
- objd_private(event_notifier_group_objd);
- struct lttng_counter *counter;
- int counter_objd, ret;
- struct lttng_counter_dimension dimensions[1];
- size_t counter_len;
-
- if (event_notifier_group->error_counter)
- return -EBUSY;
-
- if (error_counter_conf->arithmetic != LTTNG_UST_ABI_COUNTER_ARITHMETIC_MODULAR)
- return -EINVAL;
-
- if (error_counter_conf->number_dimensions != 1)
- return -EINVAL;
-
- switch (error_counter_conf->bitness) {
- case LTTNG_UST_ABI_COUNTER_BITNESS_64:
- counter_transport_name = "counter-per-cpu-64-modular";
- break;
- case LTTNG_UST_ABI_COUNTER_BITNESS_32:
- counter_transport_name = "counter-per-cpu-32-modular";
- break;
- default:
- return -EINVAL;
- }
-
- counter_objd = objd_alloc(NULL, <tng_event_notifier_group_error_counter_ops, owner,
- "event_notifier group error counter");
- if (counter_objd < 0) {
- ret = counter_objd;
- goto objd_error;
- }
-
- counter_len = error_counter_conf->dimensions[0].size;
- dimensions[0].size = counter_len;
- dimensions[0].underflow_index = 0;
- dimensions[0].overflow_index = 0;
- dimensions[0].has_underflow = 0;
- dimensions[0].has_overflow = 0;
-
- counter = lttng_ust_counter_create(counter_transport_name, 1, dimensions);
- if (!counter) {
- ret = -EINVAL;
- goto create_error;
- }
-
- event_notifier_group->error_counter_len = counter_len;
- /*
- * store-release to publish error counter matches load-acquire
- * in record_error. Ensures the counter is created and the
- * error_counter_len is set before they are used.
- * Currently a full memory barrier is used, which could be
- * turned into acquire-release barriers.
- */
- cmm_smp_mb();
- CMM_STORE_SHARED(event_notifier_group->error_counter, counter);
-
- counter->objd = counter_objd;
- counter->event_notifier_group = event_notifier_group; /* owner */
-
- objd_set_private(counter_objd, counter);
- /* The error counter holds a reference on the event_notifier group. */
- objd_ref(event_notifier_group->objd);
-
- return counter_objd;
-
-create_error:
- {
- int err;
-
- err = lttng_ust_abi_objd_unref(counter_objd, 1);
- assert(!err);
- }
-objd_error:
- return ret;
-}
-
-static
-long lttng_event_notifier_group_cmd(int objd, unsigned int cmd, unsigned long arg,
- union lttng_ust_abi_args *uargs, void *owner)
-{
- switch (cmd) {
- case LTTNG_UST_ABI_EVENT_NOTIFIER_CREATE:
- {
- struct lttng_ust_abi_event_notifier *event_notifier_param =
- (struct lttng_ust_abi_event_notifier *) arg;
- if (strutils_is_star_glob_pattern(event_notifier_param->event.name)) {
- /*
- * If the event name is a star globbing pattern,
- * we create the special star globbing enabler.
- */
- return lttng_ust_event_notifier_enabler_create(objd,
- owner, event_notifier_param,
- LTTNG_ENABLER_FORMAT_STAR_GLOB);
- } else {
- return lttng_ust_event_notifier_enabler_create(objd,
- owner, event_notifier_param,
- LTTNG_ENABLER_FORMAT_EVENT);
- }
- }
- case LTTNG_UST_ABI_COUNTER:
- {
- struct lttng_ust_abi_counter_conf *counter_conf =
- (struct lttng_ust_abi_counter_conf *) uargs->counter.counter_data;
- return lttng_ust_event_notifier_group_create_error_counter(
- objd, owner, counter_conf);
- }
- default:
- return -EINVAL;
- }
-}
-
-static
-int lttng_event_notifier_enabler_release(int objd)
-{
- struct lttng_event_notifier_enabler *event_notifier_enabler = objd_private(objd);
-
- if (event_notifier_enabler)
- return lttng_ust_abi_objd_unref(event_notifier_enabler->group->objd, 0);
- return 0;
-}
-
-static const struct lttng_ust_abi_objd_ops lttng_event_notifier_enabler_ops = {
- .release = lttng_event_notifier_enabler_release,
- .cmd = lttng_event_notifier_enabler_cmd,
-};
-
-static
-int lttng_release_event_notifier_group(int objd)
-{
- struct lttng_event_notifier_group *event_notifier_group = objd_private(objd);
-
- if (event_notifier_group) {
- lttng_event_notifier_group_destroy(event_notifier_group);
- return 0;
- } else {
- return -EINVAL;
- }
-}
-
-static const struct lttng_ust_abi_objd_ops lttng_event_notifier_group_ops = {
- .release = lttng_release_event_notifier_group,
- .cmd = lttng_event_notifier_group_cmd,
-};
-
-static
-long lttng_tracepoint_list_cmd(int objd, unsigned int cmd, unsigned long arg,
- union lttng_ust_abi_args *uargs __attribute__((unused)),
- void *owner __attribute__((unused)))
-{
- struct lttng_ust_tracepoint_list *list = objd_private(objd);
- struct lttng_ust_abi_tracepoint_iter *tp =
- (struct lttng_ust_abi_tracepoint_iter *) arg;
- struct lttng_ust_abi_tracepoint_iter *iter;
-
- switch (cmd) {
- case LTTNG_UST_ABI_TRACEPOINT_LIST_GET:
- {
- iter = lttng_ust_tracepoint_list_get_iter_next(list);
- if (!iter)
- return -LTTNG_UST_ERR_NOENT;
- memcpy(tp, iter, sizeof(*tp));
- return 0;
- }
- default:
- return -EINVAL;
- }
-}
-
-static
-int lttng_abi_tracepoint_list(void *owner)
-{
- int list_objd, ret;
- struct lttng_ust_tracepoint_list *list;
-
- list_objd = objd_alloc(NULL, <tng_tracepoint_list_ops, owner, "tp_list");
- if (list_objd < 0) {
- ret = list_objd;
- goto objd_error;
- }
- list = zmalloc(sizeof(*list));
- if (!list) {
- ret = -ENOMEM;
- goto alloc_error;
- }
- objd_set_private(list_objd, list);
-
- /* populate list by walking on all registered probes. */
- ret = lttng_probes_get_event_list(list);
- if (ret) {
- goto list_error;
- }
- return list_objd;
-
-list_error:
- free(list);
-alloc_error:
- {
- int err;
-
- err = lttng_ust_abi_objd_unref(list_objd, 1);
- assert(!err);
- }
-objd_error:
- return ret;
-}
-
-static
-int lttng_release_tracepoint_list(int objd)
-{
- struct lttng_ust_tracepoint_list *list = objd_private(objd);
-
- if (list) {
- lttng_probes_prune_event_list(list);
- free(list);
- return 0;
- } else {
- return -EINVAL;
- }
-}
-
-static const struct lttng_ust_abi_objd_ops lttng_tracepoint_list_ops = {
- .release = lttng_release_tracepoint_list,
- .cmd = lttng_tracepoint_list_cmd,
-};
-
-static
-long lttng_tracepoint_field_list_cmd(int objd, unsigned int cmd,
- unsigned long arg __attribute__((unused)), union lttng_ust_abi_args *uargs,
- void *owner __attribute__((unused)))
-{
- struct lttng_ust_field_list *list = objd_private(objd);
- struct lttng_ust_abi_field_iter *tp = &uargs->field_list.entry;
- struct lttng_ust_abi_field_iter *iter;
-
- switch (cmd) {
- case LTTNG_UST_ABI_TRACEPOINT_FIELD_LIST_GET:
- {
- iter = lttng_ust_field_list_get_iter_next(list);
- if (!iter)
- return -LTTNG_UST_ERR_NOENT;
- memcpy(tp, iter, sizeof(*tp));
- return 0;
- }
- default:
- return -EINVAL;
- }
-}
-
-static
-int lttng_abi_tracepoint_field_list(void *owner)
-{
- int list_objd, ret;
- struct lttng_ust_field_list *list;
-
- list_objd = objd_alloc(NULL, <tng_tracepoint_field_list_ops, owner,
- "tp_field_list");
- if (list_objd < 0) {
- ret = list_objd;
- goto objd_error;
- }
- list = zmalloc(sizeof(*list));
- if (!list) {
- ret = -ENOMEM;
- goto alloc_error;
- }
- objd_set_private(list_objd, list);
-
- /* populate list by walking on all registered probes. */
- ret = lttng_probes_get_field_list(list);
- if (ret) {
- goto list_error;
- }
- return list_objd;
-
-list_error:
- free(list);
-alloc_error:
- {
- int err;
-
- err = lttng_ust_abi_objd_unref(list_objd, 1);
- assert(!err);
- }
-objd_error:
- return ret;
-}
-
-static
-int lttng_release_tracepoint_field_list(int objd)
-{
- struct lttng_ust_field_list *list = objd_private(objd);
-
- if (list) {
- lttng_probes_prune_field_list(list);
- free(list);
- return 0;
- } else {
- return -EINVAL;
- }
-}
-
-static const struct lttng_ust_abi_objd_ops lttng_tracepoint_field_list_ops = {
- .release = lttng_release_tracepoint_field_list,
- .cmd = lttng_tracepoint_field_list_cmd,
-};
-
-static
-int lttng_abi_map_stream(int channel_objd, struct lttng_ust_abi_stream *info,
- union lttng_ust_abi_args *uargs, void *owner __attribute__((unused)))
-{
- struct lttng_ust_channel_buffer *lttng_chan_buf = objd_private(channel_objd);
- int ret;
-
- ret = channel_handle_add_stream(lttng_chan_buf->priv->rb_chan->handle,
- uargs->stream.shm_fd, uargs->stream.wakeup_fd,
- info->stream_nr, info->len);
- if (ret)
- goto error_add_stream;
- /* Take ownership of shm_fd and wakeup_fd. */
- uargs->stream.shm_fd = -1;
- uargs->stream.wakeup_fd = -1;
-
- return 0;
-
-error_add_stream:
- return ret;
-}
-
-static
-int lttng_abi_create_event_enabler(int channel_objd,
- struct lttng_ust_abi_event *event_param,
- void *owner,
- enum lttng_enabler_format_type format_type)
-{
- struct lttng_ust_channel_buffer *channel = objd_private(channel_objd);
- struct lttng_event_enabler *enabler;
- int event_objd, ret;
-
- event_param->name[LTTNG_UST_ABI_SYM_NAME_LEN - 1] = '\0';
- event_objd = objd_alloc(NULL, <tng_event_enabler_ops, owner,
- "event enabler");
- if (event_objd < 0) {
- ret = event_objd;
- goto objd_error;
- }
- /*
- * We tolerate no failure path after event creation. It will stay
- * invariant for the rest of the session.
- */
- enabler = lttng_event_enabler_create(format_type, event_param, channel);
- if (!enabler) {
- ret = -ENOMEM;
- goto event_error;
- }
- objd_set_private(event_objd, enabler);
- /* The event holds a reference on the channel */
- objd_ref(channel_objd);
- return event_objd;
-
-event_error:
- {
- int err;
-
- err = lttng_ust_abi_objd_unref(event_objd, 1);
- assert(!err);
- }
-objd_error:
- return ret;
-}
-
-/**
- * lttng_channel_cmd - lttng control through object descriptors
- *
- * @objd: the object descriptor
- * @cmd: the command
- * @arg: command arg
- * @uargs: UST arguments (internal)
- * @owner: objd owner
- *
- * This object descriptor implements lttng commands:
- * LTTNG_UST_ABI_STREAM
- * Returns an event stream object descriptor or failure.
- * (typically, one event stream records events from one CPU)
- * LTTNG_UST_ABI_EVENT
- * Returns an event object descriptor or failure.
- * LTTNG_UST_ABI_CONTEXT
- * Prepend a context field to each event in the channel
- * LTTNG_UST_ABI_ENABLE
- * Enable recording for events in this channel (weak enable)
- * LTTNG_UST_ABI_DISABLE
- * Disable recording for events in this channel (strong disable)
- *
- * Channel and event file descriptors also hold a reference on the session.
- */
-static
-long lttng_channel_cmd(int objd, unsigned int cmd, unsigned long arg,
- union lttng_ust_abi_args *uargs, void *owner)
-{
- struct lttng_ust_channel_buffer *lttng_chan_buf = objd_private(objd);
-
- if (cmd != LTTNG_UST_ABI_STREAM) {
- /*
- * Check if channel received all streams.
- */
- if (!lttng_is_channel_ready(lttng_chan_buf))
- return -EPERM;
- }
-
- switch (cmd) {
- case LTTNG_UST_ABI_STREAM:
- {
- struct lttng_ust_abi_stream *stream;
-
- stream = (struct lttng_ust_abi_stream *) arg;
- /* stream used as output */
- return lttng_abi_map_stream(objd, stream, uargs, owner);
- }
- case LTTNG_UST_ABI_EVENT:
- {
- struct lttng_ust_abi_event *event_param =
- (struct lttng_ust_abi_event *) arg;
-
- if (strutils_is_star_glob_pattern(event_param->name)) {
- /*
- * If the event name is a star globbing pattern,
- * we create the special star globbing enabler.
- */
- return lttng_abi_create_event_enabler(objd, event_param,
- owner, LTTNG_ENABLER_FORMAT_STAR_GLOB);
- } else {
- return lttng_abi_create_event_enabler(objd, event_param,
- owner, LTTNG_ENABLER_FORMAT_EVENT);
- }
- }
- case LTTNG_UST_ABI_CONTEXT:
- return lttng_abi_add_context(objd,
- (struct lttng_ust_abi_context *) arg, uargs,
- <tng_chan_buf->priv->ctx,
- lttng_chan_buf->parent->session);
- case LTTNG_UST_ABI_ENABLE:
- return lttng_channel_enable(lttng_chan_buf->parent);
- case LTTNG_UST_ABI_DISABLE:
- return lttng_channel_disable(lttng_chan_buf->parent);
- case LTTNG_UST_ABI_FLUSH_BUFFER:
- return lttng_chan_buf->ops->priv->flush_buffer(lttng_chan_buf);
- default:
- return -EINVAL;
- }
-}
-
-static
-int lttng_channel_release(int objd)
-{
- struct lttng_ust_channel_buffer *lttng_chan_buf = objd_private(objd);
-
- if (lttng_chan_buf)
- return lttng_ust_abi_objd_unref(lttng_chan_buf->parent->session->priv->objd, 0);
- return 0;
-}
-
-static const struct lttng_ust_abi_objd_ops lttng_channel_ops = {
- .release = lttng_channel_release,
- .cmd = lttng_channel_cmd,
-};
-
-/**
- * lttng_enabler_cmd - lttng control through object descriptors
- *
- * @objd: the object descriptor
- * @cmd: the command
- * @arg: command arg
- * @uargs: UST arguments (internal)
- * @owner: objd owner
- *
- * This object descriptor implements lttng commands:
- * LTTNG_UST_ABI_CONTEXT
- * Prepend a context field to each record of events of this
- * enabler.
- * LTTNG_UST_ABI_ENABLE
- * Enable recording for this enabler
- * LTTNG_UST_ABI_DISABLE
- * Disable recording for this enabler
- * LTTNG_UST_ABI_FILTER
- * Attach a filter to an enabler.
- * LTTNG_UST_ABI_EXCLUSION
- * Attach exclusions to an enabler.
- */
-static
-long lttng_event_enabler_cmd(int objd, unsigned int cmd, unsigned long arg,
- union lttng_ust_abi_args *uargs __attribute__((unused)),
- void *owner __attribute__((unused)))
-{
- struct lttng_event_enabler *enabler = objd_private(objd);
-
- switch (cmd) {
- case LTTNG_UST_ABI_CONTEXT:
- return lttng_event_enabler_attach_context(enabler,
- (struct lttng_ust_abi_context *) arg);
- case LTTNG_UST_ABI_ENABLE:
- return lttng_event_enabler_enable(enabler);
- case LTTNG_UST_ABI_DISABLE:
- return lttng_event_enabler_disable(enabler);
- case LTTNG_UST_ABI_FILTER:
- {
- int ret;
-
- ret = lttng_event_enabler_attach_filter_bytecode(enabler,
- (struct lttng_ust_bytecode_node **) arg);
- if (ret)
- return ret;
- return 0;
- }
- case LTTNG_UST_ABI_EXCLUSION:
- {
- return lttng_event_enabler_attach_exclusion(enabler,
- (struct lttng_ust_excluder_node **) arg);
- }
- default:
- return -EINVAL;
- }
-}
-
-static
-int lttng_event_enabler_release(int objd)
-{
- struct lttng_event_enabler *event_enabler = objd_private(objd);
-
- if (event_enabler)
- return lttng_ust_abi_objd_unref(event_enabler->chan->priv->parent.objd, 0);
-
- return 0;
-}
-
-static const struct lttng_ust_abi_objd_ops lttng_event_enabler_ops = {
- .release = lttng_event_enabler_release,
- .cmd = lttng_event_enabler_cmd,
-};
-
-void lttng_ust_abi_exit(void)
-{
- lttng_ust_abi_close_in_progress = 1;
- ust_lock_nocheck();
- objd_table_destroy();
- ust_unlock();
- lttng_ust_abi_close_in_progress = 0;
-}
+++ /dev/null
-/*
- * SPDX-License-Identifier: LGPL-2.1-only
- *
- * Copyright (C) 2011 David Goulet <david.goulet@polymtl.ca>
- * Copyright (C) 2011 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
- */
-
-#define _LGPL_SOURCE
-#include <stddef.h>
-#include <stdint.h>
-#include <sys/types.h>
-#include <sys/socket.h>
-#include <sys/mman.h>
-#include <sys/stat.h>
-#include <sys/types.h>
-#include <sys/wait.h>
-#include <dlfcn.h>
-#include <fcntl.h>
-#include <unistd.h>
-#include <errno.h>
-#include <pthread.h>
-#include <semaphore.h>
-#include <time.h>
-#include <assert.h>
-#include <signal.h>
-#include <limits.h>
-#include <urcu/uatomic.h>
-#include "futex.h"
-#include <urcu/compiler.h>
-#include <lttng/urcu/urcu-ust.h>
-
-#include <lttng/ust-utils.h>
-#include <lttng/ust-events.h>
-#include <lttng/ust-abi.h>
-#include <lttng/ust-fork.h>
-#include <lttng/ust-error.h>
-#include <lttng/ust-ctl.h>
-#include <lttng/ust-libc-wrapper.h>
-#include <lttng/ust-thread.h>
-#include <lttng/ust-tracer.h>
-#include <urcu/tls-compat.h>
-#include "common/ustcomm.h"
-#include "common/ust-fd.h"
-#include "common/logging.h"
-#include "common/macros.h"
-#include "tracepoint-internal.h"
-#include "lttng-tracer-core.h"
-#include "common/compat/pthread.h"
-#include "common/procname.h"
-#include "common/ringbuffer/rb-init.h"
-#include "lttng-ust-statedump.h"
-#include "clock.h"
-#include "common/ringbuffer/getcpu.h"
-#include "getenv.h"
-#include "ust-events-internal.h"
-#include "context-internal.h"
-#include "common/align.h"
-#include "lttng-counter-client.h"
-#include "lttng-rb-clients.h"
-
-/*
- * Has lttng ust comm constructor been called ?
- */
-static int initialized;
-
-/*
- * The ust_lock/ust_unlock lock is used as a communication thread mutex.
- * Held when handling a command, also held by fork() to deal with
- * removal of threads, and by exit path.
- *
- * The UST lock is the centralized mutex across UST tracing control and
- * probe registration.
- *
- * ust_exit_mutex must never nest in ust_mutex.
- *
- * ust_fork_mutex must never nest in ust_mutex.
- *
- * ust_mutex_nest is a per-thread nesting counter, allowing the perf
- * counter lazy initialization called by events within the statedump,
- * which traces while the ust_mutex is held.
- *
- * ust_lock nests within the dynamic loader lock (within glibc) because
- * it is taken within the library constructor.
- *
- * The ust fd tracker lock nests within the ust_mutex.
- */
-static pthread_mutex_t ust_mutex = PTHREAD_MUTEX_INITIALIZER;
-
-/* Allow nesting the ust_mutex within the same thread. */
-static DEFINE_URCU_TLS(int, ust_mutex_nest);
-
-/*
- * ust_exit_mutex protects thread_active variable wrt thread exit. It
- * cannot be done by ust_mutex because pthread_cancel(), which takes an
- * internal libc lock, cannot nest within ust_mutex.
- *
- * It never nests within a ust_mutex.
- */
-static pthread_mutex_t ust_exit_mutex = PTHREAD_MUTEX_INITIALIZER;
-
-/*
- * ust_fork_mutex protects base address statedump tracing against forks. It
- * prevents the dynamic loader lock to be taken (by base address statedump
- * tracing) while a fork is happening, thus preventing deadlock issues with
- * the dynamic loader lock.
- */
-static pthread_mutex_t ust_fork_mutex = PTHREAD_MUTEX_INITIALIZER;
-
-/* Should the ust comm thread quit ? */
-static int lttng_ust_comm_should_quit;
-
-/*
- * This variable can be tested by applications to check whether
- * lttng-ust is loaded. They simply have to define their own
- * "lttng_ust_loaded" weak symbol, and test it. It is set to 1 by the
- * library constructor.
- */
-int lttng_ust_loaded __attribute__((weak));
-
-/*
- * Return 0 on success, -1 if should quit.
- * The lock is taken in both cases.
- * Signal-safe.
- */
-int ust_lock(void)
-{
- sigset_t sig_all_blocked, orig_mask;
- int ret, oldstate;
-
- ret = pthread_setcancelstate(PTHREAD_CANCEL_DISABLE, &oldstate);
- if (ret) {
- ERR("pthread_setcancelstate: %s", strerror(ret));
- }
- if (oldstate != PTHREAD_CANCEL_ENABLE) {
- ERR("pthread_setcancelstate: unexpected oldstate");
- }
- sigfillset(&sig_all_blocked);
- ret = pthread_sigmask(SIG_SETMASK, &sig_all_blocked, &orig_mask);
- if (ret) {
- ERR("pthread_sigmask: %s", strerror(ret));
- }
- if (!URCU_TLS(ust_mutex_nest)++)
- pthread_mutex_lock(&ust_mutex);
- ret = pthread_sigmask(SIG_SETMASK, &orig_mask, NULL);
- if (ret) {
- ERR("pthread_sigmask: %s", strerror(ret));
- }
- if (lttng_ust_comm_should_quit) {
- return -1;
- } else {
- return 0;
- }
-}
-
-/*
- * ust_lock_nocheck() can be used in constructors/destructors, because
- * they are already nested within the dynamic loader lock, and therefore
- * have exclusive access against execution of liblttng-ust destructor.
- * Signal-safe.
- */
-void ust_lock_nocheck(void)
-{
- sigset_t sig_all_blocked, orig_mask;
- int ret, oldstate;
-
- ret = pthread_setcancelstate(PTHREAD_CANCEL_DISABLE, &oldstate);
- if (ret) {
- ERR("pthread_setcancelstate: %s", strerror(ret));
- }
- if (oldstate != PTHREAD_CANCEL_ENABLE) {
- ERR("pthread_setcancelstate: unexpected oldstate");
- }
- sigfillset(&sig_all_blocked);
- ret = pthread_sigmask(SIG_SETMASK, &sig_all_blocked, &orig_mask);
- if (ret) {
- ERR("pthread_sigmask: %s", strerror(ret));
- }
- if (!URCU_TLS(ust_mutex_nest)++)
- pthread_mutex_lock(&ust_mutex);
- ret = pthread_sigmask(SIG_SETMASK, &orig_mask, NULL);
- if (ret) {
- ERR("pthread_sigmask: %s", strerror(ret));
- }
-}
-
-/*
- * Signal-safe.
- */
-void ust_unlock(void)
-{
- sigset_t sig_all_blocked, orig_mask;
- int ret, oldstate;
-
- sigfillset(&sig_all_blocked);
- ret = pthread_sigmask(SIG_SETMASK, &sig_all_blocked, &orig_mask);
- if (ret) {
- ERR("pthread_sigmask: %s", strerror(ret));
- }
- if (!--URCU_TLS(ust_mutex_nest))
- pthread_mutex_unlock(&ust_mutex);
- ret = pthread_sigmask(SIG_SETMASK, &orig_mask, NULL);
- if (ret) {
- ERR("pthread_sigmask: %s", strerror(ret));
- }
- ret = pthread_setcancelstate(PTHREAD_CANCEL_ENABLE, &oldstate);
- if (ret) {
- ERR("pthread_setcancelstate: %s", strerror(ret));
- }
- if (oldstate != PTHREAD_CANCEL_DISABLE) {
- ERR("pthread_setcancelstate: unexpected oldstate");
- }
-}
-
-/*
- * Wait for either of these before continuing to the main
- * program:
- * - the register_done message from sessiond daemon
- * (will let the sessiond daemon enable sessions before main
- * starts.)
- * - sessiond daemon is not reachable.
- * - timeout (ensuring applications are resilient to session
- * daemon problems).
- */
-static sem_t constructor_wait;
-/*
- * Doing this for both the global and local sessiond.
- */
-enum {
- sem_count_initial_value = 4,
-};
-
-static int sem_count = sem_count_initial_value;
-
-/*
- * Counting nesting within lttng-ust. Used to ensure that calling fork()
- * from liblttng-ust does not execute the pre/post fork handlers.
- */
-static DEFINE_URCU_TLS(int, lttng_ust_nest_count);
-
-/*
- * Info about socket and associated listener thread.
- */
-struct sock_info {
- const char *name;
- pthread_t ust_listener; /* listener thread */
- int root_handle;
- int registration_done;
- int allowed;
- int global;
- int thread_active;
-
- char sock_path[PATH_MAX];
- int socket;
- int notify_socket;
-
- char wait_shm_path[PATH_MAX];
- char *wait_shm_mmap;
- /* Keep track of lazy state dump not performed yet. */
- int statedump_pending;
- int initial_statedump_done;
- /* Keep procname for statedump */
- char procname[LTTNG_UST_ABI_PROCNAME_LEN];
-};
-
-/* Socket from app (connect) to session daemon (listen) for communication */
-struct sock_info global_apps = {
- .name = "global",
- .global = 1,
-
- .root_handle = -1,
- .registration_done = 0,
- .allowed = 0,
- .thread_active = 0,
-
- .sock_path = LTTNG_DEFAULT_RUNDIR "/" LTTNG_UST_SOCK_FILENAME,
- .socket = -1,
- .notify_socket = -1,
-
- .wait_shm_path = "/" LTTNG_UST_WAIT_FILENAME,
-
- .statedump_pending = 0,
- .initial_statedump_done = 0,
- .procname[0] = '\0'
-};
-
-/* TODO: allow global_apps_sock_path override */
-
-struct sock_info local_apps = {
- .name = "local",
- .global = 0,
- .root_handle = -1,
- .registration_done = 0,
- .allowed = 0, /* Check setuid bit first */
- .thread_active = 0,
-
- .socket = -1,
- .notify_socket = -1,
-
- .statedump_pending = 0,
- .initial_statedump_done = 0,
- .procname[0] = '\0'
-};
-
-static int wait_poll_fallback;
-
-static const char *cmd_name_mapping[] = {
- [ LTTNG_UST_ABI_RELEASE ] = "Release",
- [ LTTNG_UST_ABI_SESSION ] = "Create Session",
- [ LTTNG_UST_ABI_TRACER_VERSION ] = "Get Tracer Version",
-
- [ LTTNG_UST_ABI_TRACEPOINT_LIST ] = "Create Tracepoint List",
- [ LTTNG_UST_ABI_WAIT_QUIESCENT ] = "Wait for Quiescent State",
- [ LTTNG_UST_ABI_REGISTER_DONE ] = "Registration Done",
- [ LTTNG_UST_ABI_TRACEPOINT_FIELD_LIST ] = "Create Tracepoint Field List",
-
- [ LTTNG_UST_ABI_EVENT_NOTIFIER_GROUP_CREATE ] = "Create event notifier group",
-
- /* Session FD commands */
- [ LTTNG_UST_ABI_CHANNEL ] = "Create Channel",
- [ LTTNG_UST_ABI_SESSION_START ] = "Start Session",
- [ LTTNG_UST_ABI_SESSION_STOP ] = "Stop Session",
-
- /* Channel FD commands */
- [ LTTNG_UST_ABI_STREAM ] = "Create Stream",
- [ LTTNG_UST_ABI_EVENT ] = "Create Event",
-
- /* Event and Channel FD commands */
- [ LTTNG_UST_ABI_CONTEXT ] = "Create Context",
- [ LTTNG_UST_ABI_FLUSH_BUFFER ] = "Flush Buffer",
-
- /* Event, Channel and Session commands */
- [ LTTNG_UST_ABI_ENABLE ] = "Enable",
- [ LTTNG_UST_ABI_DISABLE ] = "Disable",
-
- /* Tracepoint list commands */
- [ LTTNG_UST_ABI_TRACEPOINT_LIST_GET ] = "List Next Tracepoint",
- [ LTTNG_UST_ABI_TRACEPOINT_FIELD_LIST_GET ] = "List Next Tracepoint Field",
-
- /* Event FD commands */
- [ LTTNG_UST_ABI_FILTER ] = "Create Filter",
- [ LTTNG_UST_ABI_EXCLUSION ] = "Add exclusions to event",
-
- /* Event notifier group commands */
- [ LTTNG_UST_ABI_EVENT_NOTIFIER_CREATE ] = "Create event notifier",
-
- /* Session and event notifier group commands */
- [ LTTNG_UST_ABI_COUNTER ] = "Create Counter",
-
- /* Counter commands */
- [ LTTNG_UST_ABI_COUNTER_GLOBAL ] = "Create Counter Global",
- [ LTTNG_UST_ABI_COUNTER_CPU ] = "Create Counter CPU",
-};
-
-static const char *str_timeout;
-static int got_timeout_env;
-
-static char *get_map_shm(struct sock_info *sock_info);
-
-ssize_t lttng_ust_read(int fd, void *buf, size_t len)
-{
- ssize_t ret;
- size_t copied = 0, to_copy = len;
-
- do {
- ret = read(fd, buf + copied, to_copy);
- if (ret > 0) {
- copied += ret;
- to_copy -= ret;
- }
- } while ((ret > 0 && to_copy > 0)
- || (ret < 0 && errno == EINTR));
- if (ret > 0) {
- ret = copied;
- }
- return ret;
-}
-/*
- * Returns the HOME directory path. Caller MUST NOT free(3) the returned
- * pointer.
- */
-static
-const char *get_lttng_home_dir(void)
-{
- const char *val;
-
- val = (const char *) lttng_ust_getenv("LTTNG_HOME");
- if (val != NULL) {
- return val;
- }
- return (const char *) lttng_ust_getenv("HOME");
-}
-
-/*
- * Force a read (imply TLS fixup for dlopen) of TLS variables.
- */
-static
-void lttng_fixup_nest_count_tls(void)
-{
- asm volatile ("" : : "m" (URCU_TLS(lttng_ust_nest_count)));
-}
-
-static
-void lttng_fixup_ust_mutex_nest_tls(void)
-{
- asm volatile ("" : : "m" (URCU_TLS(ust_mutex_nest)));
-}
-
-/*
- * Fixup lttng-ust urcu TLS.
- */
-static
-void lttng_fixup_lttng_ust_urcu_tls(void)
-{
- (void) lttng_ust_urcu_read_ongoing();
-}
-
-void lttng_ust_fixup_tls(void)
-{
- lttng_fixup_lttng_ust_urcu_tls();
- lttng_fixup_ringbuffer_tls();
- lttng_fixup_vtid_tls();
- lttng_fixup_nest_count_tls();
- lttng_fixup_procname_tls();
- lttng_fixup_ust_mutex_nest_tls();
- lttng_ust_fixup_perf_counter_tls();
- lttng_ust_fixup_fd_tracker_tls();
- lttng_fixup_cgroup_ns_tls();
- lttng_fixup_ipc_ns_tls();
- lttng_fixup_net_ns_tls();
- lttng_fixup_time_ns_tls();
- lttng_fixup_uts_ns_tls();
- lttng_ust_fixup_ring_buffer_client_discard_tls();
- lttng_ust_fixup_ring_buffer_client_discard_rt_tls();
- lttng_ust_fixup_ring_buffer_client_overwrite_tls();
- lttng_ust_fixup_ring_buffer_client_overwrite_rt_tls();
-}
-
-/*
- * LTTng-UST uses Global Dynamic model TLS variables rather than IE
- * model because many versions of glibc don't preallocate a pool large
- * enough for TLS variables IE model defined in other shared libraries,
- * and causes issues when using LTTng-UST for Java tracing.
- *
- * Because of this use of Global Dynamic TLS variables, users wishing to
- * trace from signal handlers need to explicitly trigger the lazy
- * allocation of those variables for each thread before using them.
- * This can be triggered by calling lttng_ust_init_thread().
- */
-void lttng_ust_init_thread(void)
-{
- /*
- * Because those TLS variables are global dynamic, we need to
- * ensure those are initialized before a signal handler nesting over
- * this thread attempts to use them.
- */
- lttng_ust_fixup_tls();
-}
-
-int lttng_get_notify_socket(void *owner)
-{
- struct sock_info *info = owner;
-
- return info->notify_socket;
-}
-
-
-char* lttng_ust_sockinfo_get_procname(void *owner)
-{
- struct sock_info *info = owner;
-
- return info->procname;
-}
-
-static
-void print_cmd(int cmd, int handle)
-{
- const char *cmd_name = "Unknown";
-
- if (cmd >= 0 && cmd < LTTNG_ARRAY_SIZE(cmd_name_mapping)
- && cmd_name_mapping[cmd]) {
- cmd_name = cmd_name_mapping[cmd];
- }
- DBG("Message Received \"%s\" (%d), Handle \"%s\" (%d)",
- cmd_name, cmd,
- lttng_ust_obj_get_name(handle), handle);
-}
-
-static
-int setup_global_apps(void)
-{
- int ret = 0;
- assert(!global_apps.wait_shm_mmap);
-
- global_apps.wait_shm_mmap = get_map_shm(&global_apps);
- if (!global_apps.wait_shm_mmap) {
- WARN("Unable to get map shm for global apps. Disabling LTTng-UST global tracing.");
- global_apps.allowed = 0;
- ret = -EIO;
- goto error;
- }
-
- global_apps.allowed = 1;
- lttng_pthread_getname_np(global_apps.procname, LTTNG_UST_ABI_PROCNAME_LEN);
-error:
- return ret;
-}
-static
-int setup_local_apps(void)
-{
- int ret = 0;
- const char *home_dir;
- uid_t uid;
-
- assert(!local_apps.wait_shm_mmap);
-
- uid = getuid();
- /*
- * Disallow per-user tracing for setuid binaries.
- */
- if (uid != geteuid()) {
- assert(local_apps.allowed == 0);
- ret = 0;
- goto end;
- }
- home_dir = get_lttng_home_dir();
- if (!home_dir) {
- WARN("HOME environment variable not set. Disabling LTTng-UST per-user tracing.");
- assert(local_apps.allowed == 0);
- ret = -ENOENT;
- goto end;
- }
- local_apps.allowed = 1;
- snprintf(local_apps.sock_path, PATH_MAX, "%s/%s/%s",
- home_dir,
- LTTNG_DEFAULT_HOME_RUNDIR,
- LTTNG_UST_SOCK_FILENAME);
- snprintf(local_apps.wait_shm_path, PATH_MAX, "/%s-%u",
- LTTNG_UST_WAIT_FILENAME,
- uid);
-
- local_apps.wait_shm_mmap = get_map_shm(&local_apps);
- if (!local_apps.wait_shm_mmap) {
- WARN("Unable to get map shm for local apps. Disabling LTTng-UST per-user tracing.");
- local_apps.allowed = 0;
- ret = -EIO;
- goto end;
- }
-
- lttng_pthread_getname_np(local_apps.procname, LTTNG_UST_ABI_PROCNAME_LEN);
-end:
- return ret;
-}
-
-/*
- * Get socket timeout, in ms.
- * -1: wait forever. 0: don't wait. >0: timeout, in ms.
- */
-static
-long get_timeout(void)
-{
- long constructor_delay_ms = LTTNG_UST_DEFAULT_CONSTRUCTOR_TIMEOUT_MS;
-
- if (!got_timeout_env) {
- str_timeout = lttng_ust_getenv("LTTNG_UST_REGISTER_TIMEOUT");
- got_timeout_env = 1;
- }
- if (str_timeout)
- constructor_delay_ms = strtol(str_timeout, NULL, 10);
- /* All negative values are considered as "-1". */
- if (constructor_delay_ms < -1)
- constructor_delay_ms = -1;
- return constructor_delay_ms;
-}
-
-/* Timeout for notify socket send and recv. */
-static
-long get_notify_sock_timeout(void)
-{
- return get_timeout();
-}
-
-/* Timeout for connecting to cmd and notify sockets. */
-static
-long get_connect_sock_timeout(void)
-{
- return get_timeout();
-}
-
-/*
- * Return values: -1: wait forever. 0: don't wait. 1: timeout wait.
- */
-static
-int get_constructor_timeout(struct timespec *constructor_timeout)
-{
- long constructor_delay_ms;
- int ret;
-
- constructor_delay_ms = get_timeout();
-
- switch (constructor_delay_ms) {
- case -1:/* fall-through */
- case 0:
- return constructor_delay_ms;
- default:
- break;
- }
-
- /*
- * If we are unable to find the current time, don't wait.
- */
- ret = clock_gettime(CLOCK_REALTIME, constructor_timeout);
- if (ret) {
- /* Don't wait. */
- return 0;
- }
- constructor_timeout->tv_sec += constructor_delay_ms / 1000UL;
- constructor_timeout->tv_nsec +=
- (constructor_delay_ms % 1000UL) * 1000000UL;
- if (constructor_timeout->tv_nsec >= 1000000000UL) {
- constructor_timeout->tv_sec++;
- constructor_timeout->tv_nsec -= 1000000000UL;
- }
- /* Timeout wait (constructor_delay_ms). */
- return 1;
-}
-
-static
-void get_allow_blocking(void)
-{
- const char *str_allow_blocking =
- lttng_ust_getenv("LTTNG_UST_ALLOW_BLOCKING");
-
- if (str_allow_blocking) {
- DBG("%s environment variable is set",
- "LTTNG_UST_ALLOW_BLOCKING");
- lttng_ust_ringbuffer_set_allow_blocking();
- }
-}
-
-static
-int register_to_sessiond(int socket, enum ustctl_socket_type type)
-{
- return ustcomm_send_reg_msg(socket,
- type,
- CAA_BITS_PER_LONG,
- lttng_ust_rb_alignof(uint8_t) * CHAR_BIT,
- lttng_ust_rb_alignof(uint16_t) * CHAR_BIT,
- lttng_ust_rb_alignof(uint32_t) * CHAR_BIT,
- lttng_ust_rb_alignof(uint64_t) * CHAR_BIT,
- lttng_ust_rb_alignof(unsigned long) * CHAR_BIT);
-}
-
-static
-int send_reply(int sock, struct ustcomm_ust_reply *lur)
-{
- ssize_t len;
-
- len = ustcomm_send_unix_sock(sock, lur, sizeof(*lur));
- switch (len) {
- case sizeof(*lur):
- DBG("message successfully sent");
- return 0;
- default:
- if (len == -ECONNRESET) {
- DBG("remote end closed connection");
- return 0;
- }
- if (len < 0)
- return len;
- DBG("incorrect message size: %zd", len);
- return -EINVAL;
- }
-}
-
-static
-void decrement_sem_count(unsigned int count)
-{
- int ret;
-
- assert(uatomic_read(&sem_count) >= count);
-
- if (uatomic_read(&sem_count) <= 0) {
- return;
- }
-
- ret = uatomic_add_return(&sem_count, -count);
- if (ret == 0) {
- ret = sem_post(&constructor_wait);
- assert(!ret);
- }
-}
-
-static
-int handle_register_done(struct sock_info *sock_info)
-{
- if (sock_info->registration_done)
- return 0;
- sock_info->registration_done = 1;
-
- decrement_sem_count(1);
- if (!sock_info->statedump_pending) {
- sock_info->initial_statedump_done = 1;
- decrement_sem_count(1);
- }
-
- return 0;
-}
-
-static
-int handle_register_failed(struct sock_info *sock_info)
-{
- if (sock_info->registration_done)
- return 0;
- sock_info->registration_done = 1;
- sock_info->initial_statedump_done = 1;
-
- decrement_sem_count(2);
-
- return 0;
-}
-
-/*
- * Only execute pending statedump after the constructor semaphore has
- * been posted by the current listener thread. This means statedump will
- * only be performed after the "registration done" command is received
- * from this thread's session daemon.
- *
- * This ensures we don't run into deadlock issues with the dynamic
- * loader mutex, which is held while the constructor is called and
- * waiting on the constructor semaphore. All operations requiring this
- * dynamic loader lock need to be postponed using this mechanism.
- *
- * In a scenario with two session daemons connected to the application,
- * it is possible that the first listener thread which receives the
- * registration done command issues its statedump while the dynamic
- * loader lock is still held by the application constructor waiting on
- * the semaphore. It will however be allowed to proceed when the
- * second session daemon sends the registration done command to the
- * second listener thread. This situation therefore does not produce
- * a deadlock.
- */
-static
-void handle_pending_statedump(struct sock_info *sock_info)
-{
- if (sock_info->registration_done && sock_info->statedump_pending) {
- sock_info->statedump_pending = 0;
- pthread_mutex_lock(&ust_fork_mutex);
- lttng_handle_pending_statedump(sock_info);
- pthread_mutex_unlock(&ust_fork_mutex);
-
- if (!sock_info->initial_statedump_done) {
- sock_info->initial_statedump_done = 1;
- decrement_sem_count(1);
- }
- }
-}
-
-static inline
-const char *bytecode_type_str(uint32_t cmd)
-{
- switch (cmd) {
- case LTTNG_UST_ABI_CAPTURE:
- return "capture";
- case LTTNG_UST_ABI_FILTER:
- return "filter";
- default:
- abort();
- }
-}
-
-static
-int handle_bytecode_recv(struct sock_info *sock_info,
- int sock, struct ustcomm_ust_msg *lum)
-{
- struct lttng_ust_bytecode_node *bytecode = NULL;
- enum lttng_ust_bytecode_type type;
- const struct lttng_ust_abi_objd_ops *ops;
- uint32_t data_size, data_size_max, reloc_offset;
- uint64_t seqnum;
- ssize_t len;
- int ret = 0;
-
- switch (lum->cmd) {
- case LTTNG_UST_ABI_FILTER:
- type = LTTNG_UST_BYTECODE_TYPE_FILTER;
- data_size = lum->u.filter.data_size;
- data_size_max = LTTNG_UST_ABI_FILTER_BYTECODE_MAX_LEN;
- reloc_offset = lum->u.filter.reloc_offset;
- seqnum = lum->u.filter.seqnum;
- break;
- case LTTNG_UST_ABI_CAPTURE:
- type = LTTNG_UST_BYTECODE_TYPE_CAPTURE;
- data_size = lum->u.capture.data_size;
- data_size_max = LTTNG_UST_ABI_CAPTURE_BYTECODE_MAX_LEN;
- reloc_offset = lum->u.capture.reloc_offset;
- seqnum = lum->u.capture.seqnum;
- break;
- default:
- abort();
- }
-
- if (data_size > data_size_max) {
- ERR("Bytecode %s data size is too large: %u bytes",
- bytecode_type_str(lum->cmd), data_size);
- ret = -EINVAL;
- goto end;
- }
-
- if (reloc_offset > data_size) {
- ERR("Bytecode %s reloc offset %u is not within data",
- bytecode_type_str(lum->cmd), reloc_offset);
- ret = -EINVAL;
- goto end;
- }
-
- /* Allocate the structure AND the `data[]` field. */
- bytecode = zmalloc(sizeof(*bytecode) + data_size);
- if (!bytecode) {
- ret = -ENOMEM;
- goto end;
- }
-
- bytecode->bc.len = data_size;
- bytecode->bc.reloc_offset = reloc_offset;
- bytecode->bc.seqnum = seqnum;
- bytecode->type = type;
-
- len = ustcomm_recv_unix_sock(sock, bytecode->bc.data, bytecode->bc.len);
- switch (len) {
- case 0: /* orderly shutdown */
- ret = 0;
- goto end;
- default:
- if (len == bytecode->bc.len) {
- DBG("Bytecode %s data received",
- bytecode_type_str(lum->cmd));
- break;
- } else if (len < 0) {
- DBG("Receive failed from lttng-sessiond with errno %d",
- (int) -len);
- if (len == -ECONNRESET) {
- ERR("%s remote end closed connection",
- sock_info->name);
- ret = len;
- goto end;
- }
- ret = len;
- goto end;
- } else {
- DBG("Incorrect %s bytecode data message size: %zd",
- bytecode_type_str(lum->cmd), len);
- ret = -EINVAL;
- goto end;
- }
- }
-
- ops = lttng_ust_abi_objd_ops(lum->handle);
- if (!ops) {
- ret = -ENOENT;
- goto end;
- }
-
- if (ops->cmd)
- ret = ops->cmd(lum->handle, lum->cmd,
- (unsigned long) &bytecode,
- NULL, sock_info);
- else
- ret = -ENOSYS;
-
-end:
- free(bytecode);
- return ret;
-}
-
-static
-int handle_message(struct sock_info *sock_info,
- int sock, struct ustcomm_ust_msg *lum)
-{
- int ret = 0;
- const struct lttng_ust_abi_objd_ops *ops;
- struct ustcomm_ust_reply lur;
- union lttng_ust_abi_args args;
- char ctxstr[LTTNG_UST_ABI_SYM_NAME_LEN]; /* App context string. */
- ssize_t len;
-
- memset(&lur, 0, sizeof(lur));
-
- if (ust_lock()) {
- ret = -LTTNG_UST_ERR_EXITING;
- goto error;
- }
-
- ops = lttng_ust_abi_objd_ops(lum->handle);
- if (!ops) {
- ret = -ENOENT;
- goto error;
- }
-
- switch (lum->cmd) {
- case LTTNG_UST_ABI_REGISTER_DONE:
- if (lum->handle == LTTNG_UST_ABI_ROOT_HANDLE)
- ret = handle_register_done(sock_info);
- else
- ret = -EINVAL;
- break;
- case LTTNG_UST_ABI_RELEASE:
- if (lum->handle == LTTNG_UST_ABI_ROOT_HANDLE)
- ret = -EPERM;
- else
- ret = lttng_ust_abi_objd_unref(lum->handle, 1);
- break;
- case LTTNG_UST_ABI_CAPTURE:
- case LTTNG_UST_ABI_FILTER:
- ret = handle_bytecode_recv(sock_info, sock, lum);
- if (ret)
- goto error;
- break;
- case LTTNG_UST_ABI_EXCLUSION:
- {
- /* Receive exclusion names */
- struct lttng_ust_excluder_node *node;
- unsigned int count;
-
- count = lum->u.exclusion.count;
- if (count == 0) {
- /* There are no names to read */
- ret = 0;
- goto error;
- }
- node = zmalloc(sizeof(*node) +
- count * LTTNG_UST_ABI_SYM_NAME_LEN);
- if (!node) {
- ret = -ENOMEM;
- goto error;
- }
- node->excluder.count = count;
- len = ustcomm_recv_unix_sock(sock, node->excluder.names,
- count * LTTNG_UST_ABI_SYM_NAME_LEN);
- switch (len) {
- case 0: /* orderly shutdown */
- ret = 0;
- free(node);
- goto error;
- default:
- if (len == count * LTTNG_UST_ABI_SYM_NAME_LEN) {
- DBG("Exclusion data received");
- break;
- } else if (len < 0) {
- DBG("Receive failed from lttng-sessiond with errno %d", (int) -len);
- if (len == -ECONNRESET) {
- ERR("%s remote end closed connection", sock_info->name);
- ret = len;
- free(node);
- goto error;
- }
- ret = len;
- free(node);
- goto error;
- } else {
- DBG("Incorrect exclusion data message size: %zd", len);
- ret = -EINVAL;
- free(node);
- goto error;
- }
- }
- if (ops->cmd)
- ret = ops->cmd(lum->handle, lum->cmd,
- (unsigned long) &node,
- &args, sock_info);
- else
- ret = -ENOSYS;
- free(node);
- break;
- }
- case LTTNG_UST_ABI_EVENT_NOTIFIER_GROUP_CREATE:
- {
- int event_notifier_notif_fd, close_ret;
-
- len = ustcomm_recv_event_notifier_notif_fd_from_sessiond(sock,
- &event_notifier_notif_fd);
- switch (len) {
- case 0: /* orderly shutdown */
- ret = 0;
- goto error;
- case 1:
- break;
- default:
- if (len < 0) {
- DBG("Receive failed from lttng-sessiond with errno %d",
- (int) -len);
- if (len == -ECONNRESET) {
- ERR("%s remote end closed connection",
- sock_info->name);
- ret = len;
- goto error;
- }
- ret = len;
- goto error;
- } else {
- DBG("Incorrect event notifier fd message size: %zd",
- len);
- ret = -EINVAL;
- goto error;
- }
- }
- args.event_notifier_handle.event_notifier_notif_fd =
- event_notifier_notif_fd;
- if (ops->cmd)
- ret = ops->cmd(lum->handle, lum->cmd,
- (unsigned long) &lum->u,
- &args, sock_info);
- else
- ret = -ENOSYS;
- if (args.event_notifier_handle.event_notifier_notif_fd >= 0) {
- lttng_ust_lock_fd_tracker();
- close_ret = close(args.event_notifier_handle.event_notifier_notif_fd);
- lttng_ust_unlock_fd_tracker();
- if (close_ret)
- PERROR("close");
- }
- break;
- }
- case LTTNG_UST_ABI_CHANNEL:
- {
- void *chan_data;
- int wakeup_fd;
-
- len = ustcomm_recv_channel_from_sessiond(sock,
- &chan_data, lum->u.channel.len,
- &wakeup_fd);
- switch (len) {
- case 0: /* orderly shutdown */
- ret = 0;
- goto error;
- default:
- if (len == lum->u.channel.len) {
- DBG("channel data received");
- break;
- } else if (len < 0) {
- DBG("Receive failed from lttng-sessiond with errno %d", (int) -len);
- if (len == -ECONNRESET) {
- ERR("%s remote end closed connection", sock_info->name);
- ret = len;
- goto error;
- }
- ret = len;
- goto error;
- } else {
- DBG("incorrect channel data message size: %zd", len);
- ret = -EINVAL;
- goto error;
- }
- }
- args.channel.chan_data = chan_data;
- args.channel.wakeup_fd = wakeup_fd;
- if (ops->cmd)
- ret = ops->cmd(lum->handle, lum->cmd,
- (unsigned long) &lum->u,
- &args, sock_info);
- else
- ret = -ENOSYS;
- if (args.channel.wakeup_fd >= 0) {
- int close_ret;
-
- lttng_ust_lock_fd_tracker();
- close_ret = close(args.channel.wakeup_fd);
- lttng_ust_unlock_fd_tracker();
- args.channel.wakeup_fd = -1;
- if (close_ret)
- PERROR("close");
- }
- free(args.channel.chan_data);
- break;
- }
- case LTTNG_UST_ABI_STREAM:
- {
- int close_ret;
-
- /* Receive shm_fd, wakeup_fd */
- ret = ustcomm_recv_stream_from_sessiond(sock,
- NULL,
- &args.stream.shm_fd,
- &args.stream.wakeup_fd);
- if (ret) {
- goto error;
- }
-
- if (ops->cmd)
- ret = ops->cmd(lum->handle, lum->cmd,
- (unsigned long) &lum->u,
- &args, sock_info);
- else
- ret = -ENOSYS;
- if (args.stream.shm_fd >= 0) {
- lttng_ust_lock_fd_tracker();
- close_ret = close(args.stream.shm_fd);
- lttng_ust_unlock_fd_tracker();
- args.stream.shm_fd = -1;
- if (close_ret)
- PERROR("close");
- }
- if (args.stream.wakeup_fd >= 0) {
- lttng_ust_lock_fd_tracker();
- close_ret = close(args.stream.wakeup_fd);
- lttng_ust_unlock_fd_tracker();
- args.stream.wakeup_fd = -1;
- if (close_ret)
- PERROR("close");
- }
- break;
- }
- case LTTNG_UST_ABI_CONTEXT:
- switch (lum->u.context.ctx) {
- case LTTNG_UST_ABI_CONTEXT_APP_CONTEXT:
- {
- char *p;
- size_t ctxlen, recvlen;
-
- ctxlen = strlen("$app.") + lum->u.context.u.app_ctx.provider_name_len - 1
- + strlen(":") + lum->u.context.u.app_ctx.ctx_name_len;
- if (ctxlen >= LTTNG_UST_ABI_SYM_NAME_LEN) {
- ERR("Application context string length size is too large: %zu bytes",
- ctxlen);
- ret = -EINVAL;
- goto error;
- }
- strcpy(ctxstr, "$app.");
- p = &ctxstr[strlen("$app.")];
- recvlen = ctxlen - strlen("$app.");
- len = ustcomm_recv_unix_sock(sock, p, recvlen);
- switch (len) {
- case 0: /* orderly shutdown */
- ret = 0;
- goto error;
- default:
- if (len == recvlen) {
- DBG("app context data received");
- break;
- } else if (len < 0) {
- DBG("Receive failed from lttng-sessiond with errno %d", (int) -len);
- if (len == -ECONNRESET) {
- ERR("%s remote end closed connection", sock_info->name);
- ret = len;
- goto error;
- }
- ret = len;
- goto error;
- } else {
- DBG("incorrect app context data message size: %zd", len);
- ret = -EINVAL;
- goto error;
- }
- }
- /* Put : between provider and ctxname. */
- p[lum->u.context.u.app_ctx.provider_name_len - 1] = ':';
- args.app_context.ctxname = ctxstr;
- break;
- }
- default:
- break;
- }
- if (ops->cmd) {
- ret = ops->cmd(lum->handle, lum->cmd,
- (unsigned long) &lum->u,
- &args, sock_info);
- } else {
- ret = -ENOSYS;
- }
- break;
- case LTTNG_UST_ABI_COUNTER:
- {
- void *counter_data;
-
- len = ustcomm_recv_counter_from_sessiond(sock,
- &counter_data, lum->u.counter.len);
- switch (len) {
- case 0: /* orderly shutdown */
- ret = 0;
- goto error;
- default:
- if (len == lum->u.counter.len) {
- DBG("counter data received");
- break;
- } else if (len < 0) {
- DBG("Receive failed from lttng-sessiond with errno %d", (int) -len);
- if (len == -ECONNRESET) {
- ERR("%s remote end closed connection", sock_info->name);
- ret = len;
- goto error;
- }
- ret = len;
- goto error;
- } else {
- DBG("incorrect counter data message size: %zd", len);
- ret = -EINVAL;
- goto error;
- }
- }
- args.counter.counter_data = counter_data;
- if (ops->cmd)
- ret = ops->cmd(lum->handle, lum->cmd,
- (unsigned long) &lum->u,
- &args, sock_info);
- else
- ret = -ENOSYS;
- free(args.counter.counter_data);
- break;
- }
- case LTTNG_UST_ABI_COUNTER_GLOBAL:
- {
- /* Receive shm_fd */
- ret = ustcomm_recv_counter_shm_from_sessiond(sock,
- &args.counter_shm.shm_fd);
- if (ret) {
- goto error;
- }
-
- if (ops->cmd)
- ret = ops->cmd(lum->handle, lum->cmd,
- (unsigned long) &lum->u,
- &args, sock_info);
- else
- ret = -ENOSYS;
- if (args.counter_shm.shm_fd >= 0) {
- int close_ret;
-
- lttng_ust_lock_fd_tracker();
- close_ret = close(args.counter_shm.shm_fd);
- lttng_ust_unlock_fd_tracker();
- args.counter_shm.shm_fd = -1;
- if (close_ret)
- PERROR("close");
- }
- break;
- }
- case LTTNG_UST_ABI_COUNTER_CPU:
- {
- /* Receive shm_fd */
- ret = ustcomm_recv_counter_shm_from_sessiond(sock,
- &args.counter_shm.shm_fd);
- if (ret) {
- goto error;
- }
-
- if (ops->cmd)
- ret = ops->cmd(lum->handle, lum->cmd,
- (unsigned long) &lum->u,
- &args, sock_info);
- else
- ret = -ENOSYS;
- if (args.counter_shm.shm_fd >= 0) {
- int close_ret;
-
- lttng_ust_lock_fd_tracker();
- close_ret = close(args.counter_shm.shm_fd);
- lttng_ust_unlock_fd_tracker();
- args.counter_shm.shm_fd = -1;
- if (close_ret)
- PERROR("close");
- }
- break;
- }
- case LTTNG_UST_ABI_EVENT_NOTIFIER_CREATE:
- {
- /* Receive struct lttng_ust_event_notifier */
- struct lttng_ust_abi_event_notifier event_notifier;
-
- if (sizeof(event_notifier) != lum->u.event_notifier.len) {
- DBG("incorrect event notifier data message size: %u", lum->u.event_notifier.len);
- ret = -EINVAL;
- goto error;
- }
- len = ustcomm_recv_unix_sock(sock, &event_notifier, sizeof(event_notifier));
- switch (len) {
- case 0: /* orderly shutdown */
- ret = 0;
- goto error;
- default:
- if (len == sizeof(event_notifier)) {
- DBG("event notifier data received");
- break;
- } else if (len < 0) {
- DBG("Receive failed from lttng-sessiond with errno %d", (int) -len);
- if (len == -ECONNRESET) {
- ERR("%s remote end closed connection", sock_info->name);
- ret = len;
- goto error;
- }
- ret = len;
- goto error;
- } else {
- DBG("incorrect event notifier data message size: %zd", len);
- ret = -EINVAL;
- goto error;
- }
- }
- if (ops->cmd)
- ret = ops->cmd(lum->handle, lum->cmd,
- (unsigned long) &event_notifier,
- &args, sock_info);
- else
- ret = -ENOSYS;
- break;
- }
-
- default:
- if (ops->cmd)
- ret = ops->cmd(lum->handle, lum->cmd,
- (unsigned long) &lum->u,
- &args, sock_info);
- else
- ret = -ENOSYS;
- break;
- }
-
- lur.handle = lum->handle;
- lur.cmd = lum->cmd;
- lur.ret_val = ret;
- if (ret >= 0) {
- lur.ret_code = LTTNG_UST_OK;
- } else {
- /*
- * Use -LTTNG_UST_ERR as wildcard for UST internal
- * error that are not caused by the transport, except if
- * we already have a more precise error message to
- * report.
- */
- if (ret > -LTTNG_UST_ERR) {
- /* Translate code to UST error. */
- switch (ret) {
- case -EEXIST:
- lur.ret_code = -LTTNG_UST_ERR_EXIST;
- break;
- case -EINVAL:
- lur.ret_code = -LTTNG_UST_ERR_INVAL;
- break;
- case -ENOENT:
- lur.ret_code = -LTTNG_UST_ERR_NOENT;
- break;
- case -EPERM:
- lur.ret_code = -LTTNG_UST_ERR_PERM;
- break;
- case -ENOSYS:
- lur.ret_code = -LTTNG_UST_ERR_NOSYS;
- break;
- default:
- lur.ret_code = -LTTNG_UST_ERR;
- break;
- }
- } else {
- lur.ret_code = ret;
- }
- }
- if (ret >= 0) {
- switch (lum->cmd) {
- case LTTNG_UST_ABI_TRACER_VERSION:
- lur.u.version = lum->u.version;
- break;
- case LTTNG_UST_ABI_TRACEPOINT_LIST_GET:
- memcpy(&lur.u.tracepoint, &lum->u.tracepoint, sizeof(lur.u.tracepoint));
- break;
- }
- }
- DBG("Return value: %d", lur.ret_val);
-
- ust_unlock();
-
- /*
- * Performed delayed statedump operations outside of the UST
- * lock. We need to take the dynamic loader lock before we take
- * the UST lock internally within handle_pending_statedump().
- */
- handle_pending_statedump(sock_info);
-
- if (ust_lock()) {
- ret = -LTTNG_UST_ERR_EXITING;
- goto error;
- }
-
- ret = send_reply(sock, &lur);
- if (ret < 0) {
- DBG("error sending reply");
- goto error;
- }
-
- /*
- * LTTNG_UST_TRACEPOINT_FIELD_LIST_GET needs to send the field
- * after the reply.
- */
- if (lur.ret_code == LTTNG_UST_OK) {
- switch (lum->cmd) {
- case LTTNG_UST_ABI_TRACEPOINT_FIELD_LIST_GET:
- len = ustcomm_send_unix_sock(sock,
- &args.field_list.entry,
- sizeof(args.field_list.entry));
- if (len < 0) {
- ret = len;
- goto error;
- }
- if (len != sizeof(args.field_list.entry)) {
- ret = -EINVAL;
- goto error;
- }
- }
- }
-
-error:
- ust_unlock();
-
- return ret;
-}
-
-static
-void cleanup_sock_info(struct sock_info *sock_info, int exiting)
-{
- int ret;
-
- if (sock_info->root_handle != -1) {
- ret = lttng_ust_abi_objd_unref(sock_info->root_handle, 1);
- if (ret) {
- ERR("Error unref root handle");
- }
- sock_info->root_handle = -1;
- }
- sock_info->registration_done = 0;
- sock_info->initial_statedump_done = 0;
-
- /*
- * wait_shm_mmap, socket and notify socket are used by listener
- * threads outside of the ust lock, so we cannot tear them down
- * ourselves, because we cannot join on these threads. Leave
- * responsibility of cleaning up these resources to the OS
- * process exit.
- */
- if (exiting)
- return;
-
- if (sock_info->socket != -1) {
- ret = ustcomm_close_unix_sock(sock_info->socket);
- if (ret) {
- ERR("Error closing ust cmd socket");
- }
- sock_info->socket = -1;
- }
- if (sock_info->notify_socket != -1) {
- ret = ustcomm_close_unix_sock(sock_info->notify_socket);
- if (ret) {
- ERR("Error closing ust notify socket");
- }
- sock_info->notify_socket = -1;
- }
- if (sock_info->wait_shm_mmap) {
- long page_size;
-
- page_size = LTTNG_UST_PAGE_SIZE;
- if (page_size <= 0) {
- if (!page_size) {
- errno = EINVAL;
- }
- PERROR("Error in sysconf(_SC_PAGE_SIZE)");
- } else {
- ret = munmap(sock_info->wait_shm_mmap, page_size);
- if (ret) {
- ERR("Error unmapping wait shm");
- }
- }
- sock_info->wait_shm_mmap = NULL;
- }
-}
-
-/*
- * Using fork to set umask in the child process (not multi-thread safe).
- * We deal with the shm_open vs ftruncate race (happening when the
- * sessiond owns the shm and does not let everybody modify it, to ensure
- * safety against shm_unlink) by simply letting the mmap fail and
- * retrying after a few seconds.
- * For global shm, everybody has rw access to it until the sessiond
- * starts.
- */
-static
-int get_wait_shm(struct sock_info *sock_info, size_t mmap_size)
-{
- int wait_shm_fd, ret;
- pid_t pid;
-
- /*
- * Try to open read-only.
- */
- wait_shm_fd = shm_open(sock_info->wait_shm_path, O_RDONLY, 0);
- if (wait_shm_fd >= 0) {
- int32_t tmp_read;
- ssize_t len;
- size_t bytes_read = 0;
-
- /*
- * Try to read the fd. If unable to do so, try opening
- * it in write mode.
- */
- do {
- len = read(wait_shm_fd,
- &((char *) &tmp_read)[bytes_read],
- sizeof(tmp_read) - bytes_read);
- if (len > 0) {
- bytes_read += len;
- }
- } while ((len < 0 && errno == EINTR)
- || (len > 0 && bytes_read < sizeof(tmp_read)));
- if (bytes_read != sizeof(tmp_read)) {
- ret = close(wait_shm_fd);
- if (ret) {
- ERR("close wait_shm_fd");
- }
- goto open_write;
- }
- goto end;
- } else if (wait_shm_fd < 0 && errno != ENOENT) {
- /*
- * Real-only open did not work, and it's not because the
- * entry was not present. It's a failure that prohibits
- * using shm.
- */
- ERR("Error opening shm %s", sock_info->wait_shm_path);
- goto end;
- }
-
-open_write:
- /*
- * If the open failed because the file did not exist, or because
- * the file was not truncated yet, try creating it ourself.
- */
- URCU_TLS(lttng_ust_nest_count)++;
- pid = fork();
- URCU_TLS(lttng_ust_nest_count)--;
- if (pid > 0) {
- int status;
-
- /*
- * Parent: wait for child to return, in which case the
- * shared memory map will have been created.
- */
- pid = wait(&status);
- if (pid < 0 || !WIFEXITED(status) || WEXITSTATUS(status) != 0) {
- wait_shm_fd = -1;
- goto end;
- }
- /*
- * Try to open read-only again after creation.
- */
- wait_shm_fd = shm_open(sock_info->wait_shm_path, O_RDONLY, 0);
- if (wait_shm_fd < 0) {
- /*
- * Real-only open did not work. It's a failure
- * that prohibits using shm.
- */
- ERR("Error opening shm %s", sock_info->wait_shm_path);
- goto end;
- }
- goto end;
- } else if (pid == 0) {
- int create_mode;
-
- /* Child */
- create_mode = S_IRUSR | S_IWUSR | S_IRGRP;
- if (sock_info->global)
- create_mode |= S_IROTH | S_IWGRP | S_IWOTH;
- /*
- * We're alone in a child process, so we can modify the
- * process-wide umask.
- */
- umask(~create_mode);
- /*
- * Try creating shm (or get rw access).
- * We don't do an exclusive open, because we allow other
- * processes to create+ftruncate it concurrently.
- */
- wait_shm_fd = shm_open(sock_info->wait_shm_path,
- O_RDWR | O_CREAT, create_mode);
- if (wait_shm_fd >= 0) {
- ret = ftruncate(wait_shm_fd, mmap_size);
- if (ret) {
- PERROR("ftruncate");
- _exit(EXIT_FAILURE);
- }
- _exit(EXIT_SUCCESS);
- }
- /*
- * For local shm, we need to have rw access to accept
- * opening it: this means the local sessiond will be
- * able to wake us up. For global shm, we open it even
- * if rw access is not granted, because the root.root
- * sessiond will be able to override all rights and wake
- * us up.
- */
- if (!sock_info->global && errno != EACCES) {
- ERR("Error opening shm %s", sock_info->wait_shm_path);
- _exit(EXIT_FAILURE);
- }
- /*
- * The shm exists, but we cannot open it RW. Report
- * success.
- */
- _exit(EXIT_SUCCESS);
- } else {
- return -1;
- }
-end:
- if (wait_shm_fd >= 0 && !sock_info->global) {
- struct stat statbuf;
-
- /*
- * Ensure that our user is the owner of the shm file for
- * local shm. If we do not own the file, it means our
- * sessiond will not have access to wake us up (there is
- * probably a rogue process trying to fake our
- * sessiond). Fallback to polling method in this case.
- */
- ret = fstat(wait_shm_fd, &statbuf);
- if (ret) {
- PERROR("fstat");
- goto error_close;
- }
- if (statbuf.st_uid != getuid())
- goto error_close;
- }
- return wait_shm_fd;
-
-error_close:
- ret = close(wait_shm_fd);
- if (ret) {
- PERROR("Error closing fd");
- }
- return -1;
-}
-
-static
-char *get_map_shm(struct sock_info *sock_info)
-{
- long page_size;
- int wait_shm_fd, ret;
- char *wait_shm_mmap;
-
- page_size = sysconf(_SC_PAGE_SIZE);
- if (page_size <= 0) {
- if (!page_size) {
- errno = EINVAL;
- }
- PERROR("Error in sysconf(_SC_PAGE_SIZE)");
- goto error;
- }
-
- lttng_ust_lock_fd_tracker();
- wait_shm_fd = get_wait_shm(sock_info, page_size);
- if (wait_shm_fd < 0) {
- lttng_ust_unlock_fd_tracker();
- goto error;
- }
-
- ret = lttng_ust_add_fd_to_tracker(wait_shm_fd);
- if (ret < 0) {
- ret = close(wait_shm_fd);
- if (!ret) {
- PERROR("Error closing fd");
- }
- lttng_ust_unlock_fd_tracker();
- goto error;
- }
-
- wait_shm_fd = ret;
- lttng_ust_unlock_fd_tracker();
-
- wait_shm_mmap = mmap(NULL, page_size, PROT_READ,
- MAP_SHARED, wait_shm_fd, 0);
-
- /* close shm fd immediately after taking the mmap reference */
- lttng_ust_lock_fd_tracker();
- ret = close(wait_shm_fd);
- if (!ret) {
- lttng_ust_delete_fd_from_tracker(wait_shm_fd);
- } else {
- PERROR("Error closing fd");
- }
- lttng_ust_unlock_fd_tracker();
-
- if (wait_shm_mmap == MAP_FAILED) {
- DBG("mmap error (can be caused by race with sessiond). Fallback to poll mode.");
- goto error;
- }
- return wait_shm_mmap;
-
-error:
- return NULL;
-}
-
-static
-void wait_for_sessiond(struct sock_info *sock_info)
-{
- /* Use ust_lock to check if we should quit. */
- if (ust_lock()) {
- goto quit;
- }
- if (wait_poll_fallback) {
- goto error;
- }
- ust_unlock();
-
- assert(sock_info->wait_shm_mmap);
-
- DBG("Waiting for %s apps sessiond", sock_info->name);
- /* Wait for futex wakeup */
- if (uatomic_read((int32_t *) sock_info->wait_shm_mmap))
- goto end_wait;
-
- while (lttng_ust_futex_async((int32_t *) sock_info->wait_shm_mmap,
- FUTEX_WAIT, 0, NULL, NULL, 0)) {
- switch (errno) {
- case EWOULDBLOCK:
- /* Value already changed. */
- goto end_wait;
- case EINTR:
- /* Retry if interrupted by signal. */
- break; /* Get out of switch. */
- case EFAULT:
- wait_poll_fallback = 1;
- DBG(
-"Linux kernels 2.6.33 to 3.0 (with the exception of stable versions) "
-"do not support FUTEX_WAKE on read-only memory mappings correctly. "
-"Please upgrade your kernel "
-"(fix is commit 9ea71503a8ed9184d2d0b8ccc4d269d05f7940ae in Linux kernel "
-"mainline). LTTng-UST will use polling mode fallback.");
- if (ust_err_debug_enabled())
- PERROR("futex");
- goto end_wait;
- }
- }
-end_wait:
- return;
-
-quit:
- ust_unlock();
- return;
-
-error:
- ust_unlock();
- return;
-}
-
-/*
- * This thread does not allocate any resource, except within
- * handle_message, within mutex protection. This mutex protects against
- * fork and exit.
- * The other moment it allocates resources is at socket connection, which
- * is also protected by the mutex.
- */
-static
-void *ust_listener_thread(void *arg)
-{
- struct sock_info *sock_info = arg;
- int sock, ret, prev_connect_failed = 0, has_waited = 0, fd;
- long timeout;
-
- lttng_ust_fixup_tls();
- /*
- * If available, add '-ust' to the end of this thread's
- * process name
- */
- ret = lttng_ust_setustprocname();
- if (ret) {
- ERR("Unable to set UST process name");
- }
-
- /* Restart trying to connect to the session daemon */
-restart:
- if (prev_connect_failed) {
- /* Wait for sessiond availability with pipe */
- wait_for_sessiond(sock_info);
- if (has_waited) {
- has_waited = 0;
- /*
- * Sleep for 5 seconds before retrying after a
- * sequence of failure / wait / failure. This
- * deals with a killed or broken session daemon.
- */
- sleep(5);
- } else {
- has_waited = 1;
- }
- prev_connect_failed = 0;
- }
-
- if (ust_lock()) {
- goto quit;
- }
-
- if (sock_info->socket != -1) {
- /* FD tracker is updated by ustcomm_close_unix_sock() */
- ret = ustcomm_close_unix_sock(sock_info->socket);
- if (ret) {
- ERR("Error closing %s ust cmd socket",
- sock_info->name);
- }
- sock_info->socket = -1;
- }
- if (sock_info->notify_socket != -1) {
- /* FD tracker is updated by ustcomm_close_unix_sock() */
- ret = ustcomm_close_unix_sock(sock_info->notify_socket);
- if (ret) {
- ERR("Error closing %s ust notify socket",
- sock_info->name);
- }
- sock_info->notify_socket = -1;
- }
-
-
- /*
- * Register. We need to perform both connect and sending
- * registration message before doing the next connect otherwise
- * we may reach unix socket connect queue max limits and block
- * on the 2nd connect while the session daemon is awaiting the
- * first connect registration message.
- */
- /* Connect cmd socket */
- lttng_ust_lock_fd_tracker();
- ret = ustcomm_connect_unix_sock(sock_info->sock_path,
- get_connect_sock_timeout());
- if (ret < 0) {
- lttng_ust_unlock_fd_tracker();
- DBG("Info: sessiond not accepting connections to %s apps socket", sock_info->name);
- prev_connect_failed = 1;
-
- /*
- * If we cannot find the sessiond daemon, don't delay
- * constructor execution.
- */
- ret = handle_register_failed(sock_info);
- assert(!ret);
- ust_unlock();
- goto restart;
- }
- fd = ret;
- ret = lttng_ust_add_fd_to_tracker(fd);
- if (ret < 0) {
- ret = close(fd);
- if (ret) {
- PERROR("close on sock_info->socket");
- }
- ret = -1;
- lttng_ust_unlock_fd_tracker();
- ust_unlock();
- goto quit;
- }
-
- sock_info->socket = ret;
- lttng_ust_unlock_fd_tracker();
-
- ust_unlock();
- /*
- * Unlock/relock ust lock because connect is blocking (with
- * timeout). Don't delay constructors on the ust lock for too
- * long.
- */
- if (ust_lock()) {
- goto quit;
- }
-
- /*
- * Create only one root handle per listener thread for the whole
- * process lifetime, so we ensure we get ID which is statically
- * assigned to the root handle.
- */
- if (sock_info->root_handle == -1) {
- ret = lttng_abi_create_root_handle();
- if (ret < 0) {
- ERR("Error creating root handle");
- goto quit;
- }
- sock_info->root_handle = ret;
- }
-
- ret = register_to_sessiond(sock_info->socket, USTCTL_SOCKET_CMD);
- if (ret < 0) {
- ERR("Error registering to %s ust cmd socket",
- sock_info->name);
- prev_connect_failed = 1;
- /*
- * If we cannot register to the sessiond daemon, don't
- * delay constructor execution.
- */
- ret = handle_register_failed(sock_info);
- assert(!ret);
- ust_unlock();
- goto restart;
- }
-
- ust_unlock();
- /*
- * Unlock/relock ust lock because connect is blocking (with
- * timeout). Don't delay constructors on the ust lock for too
- * long.
- */
- if (ust_lock()) {
- goto quit;
- }
-
- /* Connect notify socket */
- lttng_ust_lock_fd_tracker();
- ret = ustcomm_connect_unix_sock(sock_info->sock_path,
- get_connect_sock_timeout());
- if (ret < 0) {
- lttng_ust_unlock_fd_tracker();
- DBG("Info: sessiond not accepting connections to %s apps socket", sock_info->name);
- prev_connect_failed = 1;
-
- /*
- * If we cannot find the sessiond daemon, don't delay
- * constructor execution.
- */
- ret = handle_register_failed(sock_info);
- assert(!ret);
- ust_unlock();
- goto restart;
- }
-
- fd = ret;
- ret = lttng_ust_add_fd_to_tracker(fd);
- if (ret < 0) {
- ret = close(fd);
- if (ret) {
- PERROR("close on sock_info->notify_socket");
- }
- ret = -1;
- lttng_ust_unlock_fd_tracker();
- ust_unlock();
- goto quit;
- }
-
- sock_info->notify_socket = ret;
- lttng_ust_unlock_fd_tracker();
-
- ust_unlock();
- /*
- * Unlock/relock ust lock because connect is blocking (with
- * timeout). Don't delay constructors on the ust lock for too
- * long.
- */
- if (ust_lock()) {
- goto quit;
- }
-
- timeout = get_notify_sock_timeout();
- if (timeout >= 0) {
- /*
- * Give at least 10ms to sessiond to reply to
- * notifications.
- */
- if (timeout < 10)
- timeout = 10;
- ret = ustcomm_setsockopt_rcv_timeout(sock_info->notify_socket,
- timeout);
- if (ret < 0) {
- WARN("Error setting socket receive timeout");
- }
- ret = ustcomm_setsockopt_snd_timeout(sock_info->notify_socket,
- timeout);
- if (ret < 0) {
- WARN("Error setting socket send timeout");
- }
- } else if (timeout < -1) {
- WARN("Unsupported timeout value %ld", timeout);
- }
-
- ret = register_to_sessiond(sock_info->notify_socket,
- USTCTL_SOCKET_NOTIFY);
- if (ret < 0) {
- ERR("Error registering to %s ust notify socket",
- sock_info->name);
- prev_connect_failed = 1;
- /*
- * If we cannot register to the sessiond daemon, don't
- * delay constructor execution.
- */
- ret = handle_register_failed(sock_info);
- assert(!ret);
- ust_unlock();
- goto restart;
- }
- sock = sock_info->socket;
-
- ust_unlock();
-
- for (;;) {
- ssize_t len;
- struct ustcomm_ust_msg lum;
-
- len = ustcomm_recv_unix_sock(sock, &lum, sizeof(lum));
- switch (len) {
- case 0: /* orderly shutdown */
- DBG("%s lttng-sessiond has performed an orderly shutdown", sock_info->name);
- if (ust_lock()) {
- goto quit;
- }
- /*
- * Either sessiond has shutdown or refused us by closing the socket.
- * In either case, we don't want to delay construction execution,
- * and we need to wait before retry.
- */
- prev_connect_failed = 1;
- /*
- * If we cannot register to the sessiond daemon, don't
- * delay constructor execution.
- */
- ret = handle_register_failed(sock_info);
- assert(!ret);
- ust_unlock();
- goto end;
- case sizeof(lum):
- print_cmd(lum.cmd, lum.handle);
- ret = handle_message(sock_info, sock, &lum);
- if (ret) {
- ERR("Error handling message for %s socket",
- sock_info->name);
- /*
- * Close socket if protocol error is
- * detected.
- */
- goto end;
- }
- continue;
- default:
- if (len < 0) {
- DBG("Receive failed from lttng-sessiond with errno %d", (int) -len);
- } else {
- DBG("incorrect message size (%s socket): %zd", sock_info->name, len);
- }
- if (len == -ECONNRESET) {
- DBG("%s remote end closed connection", sock_info->name);
- goto end;
- }
- goto end;
- }
-
- }
-end:
- if (ust_lock()) {
- goto quit;
- }
- /* Cleanup socket handles before trying to reconnect */
- lttng_ust_abi_objd_table_owner_cleanup(sock_info);
- ust_unlock();
- goto restart; /* try to reconnect */
-
-quit:
- ust_unlock();
-
- pthread_mutex_lock(&ust_exit_mutex);
- sock_info->thread_active = 0;
- pthread_mutex_unlock(&ust_exit_mutex);
- return NULL;
-}
-
-/*
- * Weak symbol to call when the ust malloc wrapper is not loaded.
- */
-__attribute__((weak))
-void lttng_ust_libc_wrapper_malloc_init(void)
-{
-}
-
-/*
- * sessiond monitoring thread: monitor presence of global and per-user
- * sessiond by polling the application common named pipe.
- */
-static
-void lttng_ust_init(void)
- __attribute__((constructor));
-static
-void lttng_ust_init(void)
-{
- struct timespec constructor_timeout;
- sigset_t sig_all_blocked, orig_parent_mask;
- pthread_attr_t thread_attr;
- int timeout_mode;
- int ret;
- void *handle;
-
- if (uatomic_xchg(&initialized, 1) == 1)
- return;
-
- /*
- * Fixup interdependency between TLS fixup mutex (which happens
- * to be the dynamic linker mutex) and ust_lock, taken within
- * the ust lock.
- */
- lttng_ust_fixup_tls();
-
- lttng_ust_loaded = 1;
-
- /*
- * We need to ensure that the liblttng-ust library is not unloaded to avoid
- * the unloading of code used by the ust_listener_threads as we can not
- * reliably know when they exited. To do that, manually load
- * liblttng-ust.so to increment the dynamic loader's internal refcount for
- * this library so it never becomes zero, thus never gets unloaded from the
- * address space of the process. Since we are already running in the
- * constructor of the LTTNG_UST_LIB_SONAME library, calling dlopen will
- * simply increment the refcount and no additionnal work is needed by the
- * dynamic loader as the shared library is already loaded in the address
- * space. As a safe guard, we use the RTLD_NODELETE flag to prevent
- * unloading of the UST library if its refcount becomes zero (which should
- * never happen). Do the return value check but discard the handle at the
- * end of the function as it's not needed.
- */
- handle = dlopen(LTTNG_UST_LIB_SONAME, RTLD_LAZY | RTLD_NODELETE);
- if (!handle) {
- ERR("dlopen of liblttng-ust shared library (%s).", LTTNG_UST_LIB_SONAME);
- }
-
- /*
- * We want precise control over the order in which we construct
- * our sub-libraries vs starting to receive commands from
- * sessiond (otherwise leading to errors when trying to create
- * sessiond before the init functions are completed).
- */
- ust_err_init();
- lttng_ust_getenv_init(); /* Needs ust_err_init() to be completed. */
- lttng_ust_tp_init();
- lttng_ust_init_fd_tracker();
- lttng_ust_clock_init();
- lttng_ust_getcpu_init();
- lttng_ust_statedump_init();
- lttng_ust_ring_buffer_clients_init();
- lttng_ust_counter_clients_init();
- lttng_perf_counter_init();
- /*
- * Invoke ust malloc wrapper init before starting other threads.
- */
- lttng_ust_libc_wrapper_malloc_init();
-
- timeout_mode = get_constructor_timeout(&constructor_timeout);
-
- get_allow_blocking();
-
- ret = sem_init(&constructor_wait, 0, 0);
- if (ret) {
- PERROR("sem_init");
- }
-
- ret = setup_global_apps();
- if (ret) {
- assert(global_apps.allowed == 0);
- DBG("global apps setup returned %d", ret);
- }
-
- ret = setup_local_apps();
- if (ret) {
- assert(local_apps.allowed == 0);
- DBG("local apps setup returned %d", ret);
- }
-
- /* A new thread created by pthread_create inherits the signal mask
- * from the parent. To avoid any signal being received by the
- * listener thread, we block all signals temporarily in the parent,
- * while we create the listener thread.
- */
- sigfillset(&sig_all_blocked);
- ret = pthread_sigmask(SIG_SETMASK, &sig_all_blocked, &orig_parent_mask);
- if (ret) {
- ERR("pthread_sigmask: %s", strerror(ret));
- }
-
- ret = pthread_attr_init(&thread_attr);
- if (ret) {
- ERR("pthread_attr_init: %s", strerror(ret));
- }
- ret = pthread_attr_setdetachstate(&thread_attr, PTHREAD_CREATE_DETACHED);
- if (ret) {
- ERR("pthread_attr_setdetachstate: %s", strerror(ret));
- }
-
- if (global_apps.allowed) {
- pthread_mutex_lock(&ust_exit_mutex);
- ret = pthread_create(&global_apps.ust_listener, &thread_attr,
- ust_listener_thread, &global_apps);
- if (ret) {
- ERR("pthread_create global: %s", strerror(ret));
- }
- global_apps.thread_active = 1;
- pthread_mutex_unlock(&ust_exit_mutex);
- } else {
- handle_register_done(&global_apps);
- }
-
- if (local_apps.allowed) {
- pthread_mutex_lock(&ust_exit_mutex);
- ret = pthread_create(&local_apps.ust_listener, &thread_attr,
- ust_listener_thread, &local_apps);
- if (ret) {
- ERR("pthread_create local: %s", strerror(ret));
- }
- local_apps.thread_active = 1;
- pthread_mutex_unlock(&ust_exit_mutex);
- } else {
- handle_register_done(&local_apps);
- }
- ret = pthread_attr_destroy(&thread_attr);
- if (ret) {
- ERR("pthread_attr_destroy: %s", strerror(ret));
- }
-
- /* Restore original signal mask in parent */
- ret = pthread_sigmask(SIG_SETMASK, &orig_parent_mask, NULL);
- if (ret) {
- ERR("pthread_sigmask: %s", strerror(ret));
- }
-
- switch (timeout_mode) {
- case 1: /* timeout wait */
- do {
- ret = sem_timedwait(&constructor_wait,
- &constructor_timeout);
- } while (ret < 0 && errno == EINTR);
- if (ret < 0) {
- switch (errno) {
- case ETIMEDOUT:
- ERR("Timed out waiting for lttng-sessiond");
- break;
- case EINVAL:
- PERROR("sem_timedwait");
- break;
- default:
- ERR("Unexpected error \"%s\" returned by sem_timedwait",
- strerror(errno));
- }
- }
- break;
- case -1:/* wait forever */
- do {
- ret = sem_wait(&constructor_wait);
- } while (ret < 0 && errno == EINTR);
- if (ret < 0) {
- switch (errno) {
- case EINVAL:
- PERROR("sem_wait");
- break;
- default:
- ERR("Unexpected error \"%s\" returned by sem_wait",
- strerror(errno));
- }
- }
- break;
- case 0: /* no timeout */
- break;
- }
-}
-
-static
-void lttng_ust_cleanup(int exiting)
-{
- cleanup_sock_info(&global_apps, exiting);
- cleanup_sock_info(&local_apps, exiting);
- local_apps.allowed = 0;
- global_apps.allowed = 0;
- /*
- * The teardown in this function all affect data structures
- * accessed under the UST lock by the listener thread. This
- * lock, along with the lttng_ust_comm_should_quit flag, ensure
- * that none of these threads are accessing this data at this
- * point.
- */
- lttng_ust_abi_exit();
- lttng_ust_abi_events_exit();
- lttng_perf_counter_exit();
- lttng_ust_ring_buffer_clients_exit();
- lttng_ust_counter_clients_exit();
- lttng_ust_statedump_destroy();
- lttng_ust_tp_exit();
- if (!exiting) {
- /* Reinitialize values for fork */
- sem_count = sem_count_initial_value;
- lttng_ust_comm_should_quit = 0;
- initialized = 0;
- }
-}
-
-static
-void lttng_ust_exit(void)
- __attribute__((destructor));
-static
-void lttng_ust_exit(void)
-{
- int ret;
-
- /*
- * Using pthread_cancel here because:
- * A) we don't want to hang application teardown.
- * B) the thread is not allocating any resource.
- */
-
- /*
- * Require the communication thread to quit. Synchronize with
- * mutexes to ensure it is not in a mutex critical section when
- * pthread_cancel is later called.
- */
- ust_lock_nocheck();
- lttng_ust_comm_should_quit = 1;
- ust_unlock();
-
- pthread_mutex_lock(&ust_exit_mutex);
- /* cancel threads */
- if (global_apps.thread_active) {
- ret = pthread_cancel(global_apps.ust_listener);
- if (ret) {
- ERR("Error cancelling global ust listener thread: %s",
- strerror(ret));
- } else {
- global_apps.thread_active = 0;
- }
- }
- if (local_apps.thread_active) {
- ret = pthread_cancel(local_apps.ust_listener);
- if (ret) {
- ERR("Error cancelling local ust listener thread: %s",
- strerror(ret));
- } else {
- local_apps.thread_active = 0;
- }
- }
- pthread_mutex_unlock(&ust_exit_mutex);
-
- /*
- * Do NOT join threads: use of sys_futex makes it impossible to
- * join the threads without using async-cancel, but async-cancel
- * is delivered by a signal, which could hit the target thread
- * anywhere in its code path, including while the ust_lock() is
- * held, causing a deadlock for the other thread. Let the OS
- * cleanup the threads if there are stalled in a syscall.
- */
- lttng_ust_cleanup(1);
-}
-
-static
-void ust_context_ns_reset(void)
-{
- lttng_context_pid_ns_reset();
- lttng_context_cgroup_ns_reset();
- lttng_context_ipc_ns_reset();
- lttng_context_mnt_ns_reset();
- lttng_context_net_ns_reset();
- lttng_context_user_ns_reset();
- lttng_context_time_ns_reset();
- lttng_context_uts_ns_reset();
-}
-
-static
-void ust_context_vuids_reset(void)
-{
- lttng_context_vuid_reset();
- lttng_context_veuid_reset();
- lttng_context_vsuid_reset();
-}
-
-static
-void ust_context_vgids_reset(void)
-{
- lttng_context_vgid_reset();
- lttng_context_vegid_reset();
- lttng_context_vsgid_reset();
-}
-
-/*
- * We exclude the worker threads across fork and clone (except
- * CLONE_VM), because these system calls only keep the forking thread
- * running in the child. Therefore, we don't want to call fork or clone
- * in the middle of an tracepoint or ust tracing state modification.
- * Holding this mutex protects these structures across fork and clone.
- */
-void lttng_ust_before_fork(sigset_t *save_sigset)
-{
- /*
- * Disable signals. This is to avoid that the child intervenes
- * before it is properly setup for tracing. It is safer to
- * disable all signals, because then we know we are not breaking
- * anything by restoring the original mask.
- */
- sigset_t all_sigs;
- int ret;
-
- /* Fixup lttng-ust TLS. */
- lttng_ust_fixup_tls();
-
- if (URCU_TLS(lttng_ust_nest_count))
- return;
- /* Disable signals */
- sigfillset(&all_sigs);
- ret = sigprocmask(SIG_BLOCK, &all_sigs, save_sigset);
- if (ret == -1) {
- PERROR("sigprocmask");
- }
-
- pthread_mutex_lock(&ust_fork_mutex);
-
- ust_lock_nocheck();
- lttng_ust_urcu_before_fork();
- lttng_ust_lock_fd_tracker();
- lttng_perf_lock();
-}
-
-static void ust_after_fork_common(sigset_t *restore_sigset)
-{
- int ret;
-
- DBG("process %d", getpid());
- lttng_perf_unlock();
- lttng_ust_unlock_fd_tracker();
- ust_unlock();
-
- pthread_mutex_unlock(&ust_fork_mutex);
-
- /* Restore signals */
- ret = sigprocmask(SIG_SETMASK, restore_sigset, NULL);
- if (ret == -1) {
- PERROR("sigprocmask");
- }
-}
-
-void lttng_ust_after_fork_parent(sigset_t *restore_sigset)
-{
- if (URCU_TLS(lttng_ust_nest_count))
- return;
- DBG("process %d", getpid());
- lttng_ust_urcu_after_fork_parent();
- /* Release mutexes and reenable signals */
- ust_after_fork_common(restore_sigset);
-}
-
-/*
- * After fork, in the child, we need to cleanup all the leftover state,
- * except the worker thread which already magically disappeared thanks
- * to the weird Linux fork semantics. After tyding up, we call
- * lttng_ust_init() again to start over as a new PID.
- *
- * This is meant for forks() that have tracing in the child between the
- * fork and following exec call (if there is any).
- */
-void lttng_ust_after_fork_child(sigset_t *restore_sigset)
-{
- if (URCU_TLS(lttng_ust_nest_count))
- return;
- lttng_context_vpid_reset();
- lttng_context_vtid_reset();
- lttng_ust_context_procname_reset();
- ust_context_ns_reset();
- ust_context_vuids_reset();
- ust_context_vgids_reset();
- DBG("process %d", getpid());
- /* Release urcu mutexes */
- lttng_ust_urcu_after_fork_child();
- lttng_ust_cleanup(0);
- /* Release mutexes and reenable signals */
- ust_after_fork_common(restore_sigset);
- lttng_ust_init();
-}
-
-void lttng_ust_after_setns(void)
-{
- ust_context_ns_reset();
- ust_context_vuids_reset();
- ust_context_vgids_reset();
-}
-
-void lttng_ust_after_unshare(void)
-{
- ust_context_ns_reset();
- ust_context_vuids_reset();
- ust_context_vgids_reset();
-}
-
-void lttng_ust_after_setuid(void)
-{
- ust_context_vuids_reset();
-}
-
-void lttng_ust_after_seteuid(void)
-{
- ust_context_vuids_reset();
-}
-
-void lttng_ust_after_setreuid(void)
-{
- ust_context_vuids_reset();
-}
-
-void lttng_ust_after_setresuid(void)
-{
- ust_context_vuids_reset();
-}
-
-void lttng_ust_after_setgid(void)
-{
- ust_context_vgids_reset();
-}
-
-void lttng_ust_after_setegid(void)
-{
- ust_context_vgids_reset();
-}
-
-void lttng_ust_after_setregid(void)
-{
- ust_context_vgids_reset();
-}
-
-void lttng_ust_after_setresgid(void)
-{
- ust_context_vgids_reset();
-}
-
-void lttng_ust_sockinfo_session_enabled(void *owner)
-{
- struct sock_info *sock_info = owner;
- sock_info->statedump_pending = 1;
-}
+++ /dev/null
-/*
- * SPDX-License-Identifier: LGPL-2.1-only
- *
- * Copyright (C) 2016 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
- *
- * UST dynamic type implementation.
- */
-
-#define _LGPL_SOURCE
-#include <stdio.h>
-#include <stdint.h>
-#include <stddef.h>
-#include <inttypes.h>
-
-#include "common/macros.h"
-#include "common/dynamic-type.h"
-
-#define ctf_enum_value(_string, _value) \
- __LTTNG_COMPOUND_LITERAL(struct lttng_ust_enum_entry, { \
- .struct_size = sizeof(struct lttng_ust_enum_entry), \
- .start = { \
- .signedness = lttng_ust_is_signed_type(__typeof__(_value)), \
- .value = lttng_ust_is_signed_type(__typeof__(_value)) ? \
- (long long) (_value) : (_value), \
- }, \
- .end = { \
- .signedness = lttng_ust_is_signed_type(__typeof__(_value)), \
- .value = lttng_ust_is_signed_type(__typeof__(_value)) ? \
- (long long) (_value) : (_value), \
- }, \
- .string = (_string), \
- }),
-
-static const struct lttng_ust_enum_entry *dt_enum[_NR_LTTNG_UST_DYNAMIC_TYPES] = {
- [LTTNG_UST_DYNAMIC_TYPE_NONE] = ctf_enum_value("_none", 0)
- [LTTNG_UST_DYNAMIC_TYPE_S8] = ctf_enum_value("_int8", 1)
- [LTTNG_UST_DYNAMIC_TYPE_S16] = ctf_enum_value("_int16", 2)
- [LTTNG_UST_DYNAMIC_TYPE_S32] = ctf_enum_value("_int32", 3)
- [LTTNG_UST_DYNAMIC_TYPE_S64] = ctf_enum_value("_int64", 4)
- [LTTNG_UST_DYNAMIC_TYPE_U8] = ctf_enum_value("_uint8", 5)
- [LTTNG_UST_DYNAMIC_TYPE_U16] = ctf_enum_value("_uint16", 6)
- [LTTNG_UST_DYNAMIC_TYPE_U32] = ctf_enum_value("_uint32", 7)
- [LTTNG_UST_DYNAMIC_TYPE_U64] = ctf_enum_value("_uint64", 8)
- [LTTNG_UST_DYNAMIC_TYPE_FLOAT] = ctf_enum_value("_float", 9)
- [LTTNG_UST_DYNAMIC_TYPE_DOUBLE] = ctf_enum_value("_double", 10)
- [LTTNG_UST_DYNAMIC_TYPE_STRING] = ctf_enum_value("_string", 11)
-};
-
-static struct lttng_ust_enum_desc dt_enum_desc = {
- .name = "dynamic_type_enum",
- .entries = dt_enum,
- .nr_entries = LTTNG_ARRAY_SIZE(dt_enum),
-};
-
-const struct lttng_ust_event_field *dt_var_fields[_NR_LTTNG_UST_DYNAMIC_TYPES] = {
- [LTTNG_UST_DYNAMIC_TYPE_NONE] = __LTTNG_COMPOUND_LITERAL(const struct lttng_ust_event_field, {
- .struct_size = sizeof(struct lttng_ust_event_field),
- .name = "none",
- .type = (struct lttng_ust_type_common *) __LTTNG_COMPOUND_LITERAL(struct lttng_ust_type_struct, {
- .parent = {
- .type = lttng_ust_type_struct,
- },
- .struct_size = sizeof(struct lttng_ust_type_struct),
- .nr_fields = 0, /* empty struct */
- .alignment = 0,
- }),
- .nowrite = 0,
- }),
- [LTTNG_UST_DYNAMIC_TYPE_S8] = __LTTNG_COMPOUND_LITERAL(const struct lttng_ust_event_field, {
- .struct_size = sizeof(struct lttng_ust_event_field),
- .name = "int8",
- .type = lttng_ust_type_integer_define(int8_t, BYTE_ORDER, 10),
- .nowrite = 0,
- }),
- [LTTNG_UST_DYNAMIC_TYPE_S16] = __LTTNG_COMPOUND_LITERAL(const struct lttng_ust_event_field, {
- .struct_size = sizeof(struct lttng_ust_event_field),
- .name = "int16",
- .type = lttng_ust_type_integer_define(int16_t, BYTE_ORDER, 10),
- .nowrite = 0,
- }),
- [LTTNG_UST_DYNAMIC_TYPE_S32] = __LTTNG_COMPOUND_LITERAL(const struct lttng_ust_event_field, {
- .struct_size = sizeof(struct lttng_ust_event_field),
- .name = "int32",
- .type = lttng_ust_type_integer_define(int32_t, BYTE_ORDER, 10),
- .nowrite = 0,
- }),
- [LTTNG_UST_DYNAMIC_TYPE_S64] = __LTTNG_COMPOUND_LITERAL(const struct lttng_ust_event_field, {
- .struct_size = sizeof(struct lttng_ust_event_field),
- .name = "int64",
- .type = lttng_ust_type_integer_define(int64_t, BYTE_ORDER, 10),
- .nowrite = 0,
- }),
- [LTTNG_UST_DYNAMIC_TYPE_U8] = __LTTNG_COMPOUND_LITERAL(const struct lttng_ust_event_field, {
- .struct_size = sizeof(struct lttng_ust_event_field),
- .name = "uint8",
- .type = lttng_ust_type_integer_define(uint8_t, BYTE_ORDER, 10),
- .nowrite = 0,
- }),
- [LTTNG_UST_DYNAMIC_TYPE_U16] = __LTTNG_COMPOUND_LITERAL(const struct lttng_ust_event_field, {
- .struct_size = sizeof(struct lttng_ust_event_field),
- .name = "uint16",
- .type = lttng_ust_type_integer_define(uint16_t, BYTE_ORDER, 10),
- .nowrite = 0,
- }),
- [LTTNG_UST_DYNAMIC_TYPE_U32] = __LTTNG_COMPOUND_LITERAL(const struct lttng_ust_event_field, {
- .struct_size = sizeof(struct lttng_ust_event_field),
- .name = "uint32",
- .type = lttng_ust_type_integer_define(uint32_t, BYTE_ORDER, 10),
- .nowrite = 0,
- }),
- [LTTNG_UST_DYNAMIC_TYPE_U64] = __LTTNG_COMPOUND_LITERAL(const struct lttng_ust_event_field, {
- .struct_size = sizeof(struct lttng_ust_event_field),
- .name = "uint64",
- .type = lttng_ust_type_integer_define(uint64_t, BYTE_ORDER, 10),
- .nowrite = 0,
- }),
- [LTTNG_UST_DYNAMIC_TYPE_FLOAT] = __LTTNG_COMPOUND_LITERAL(const struct lttng_ust_event_field, {
- .struct_size = sizeof(struct lttng_ust_event_field),
- .name = "float",
- .type = lttng_ust_type_float_define(float),
- .nowrite = 0,
- }),
- [LTTNG_UST_DYNAMIC_TYPE_DOUBLE] = __LTTNG_COMPOUND_LITERAL(const struct lttng_ust_event_field, {
- .struct_size = sizeof(struct lttng_ust_event_field),
- .name = "double",
- .type = lttng_ust_type_float_define(double),
- .nowrite = 0,
- }),
- [LTTNG_UST_DYNAMIC_TYPE_STRING] = __LTTNG_COMPOUND_LITERAL(const struct lttng_ust_event_field, {
- .struct_size = sizeof(struct lttng_ust_event_field),
- .name = "string",
- .type = (struct lttng_ust_type_common *) __LTTNG_COMPOUND_LITERAL(struct lttng_ust_type_string, {
- .parent = {
- .type = lttng_ust_type_string,
- },
- .struct_size = sizeof(struct lttng_ust_type_string),
- .encoding = lttng_ust_string_encoding_UTF8,
- }),
- .nowrite = 0,
- }),
-};
-
-static const struct lttng_ust_event_field dt_enum_field = {
- .struct_size = sizeof(struct lttng_ust_event_field),
- .name = NULL,
- .type = (struct lttng_ust_type_common *) __LTTNG_COMPOUND_LITERAL(struct lttng_ust_type_enum, {
- .parent = {
- .type = lttng_ust_type_enum,
- },
- .struct_size = sizeof(struct lttng_ust_type_enum),
- .desc = &dt_enum_desc,
- .container_type = lttng_ust_type_integer_define(char, BYTE_ORDER, 10),
- }),
- .nowrite = 0,
-};
-
-const struct lttng_ust_event_field *lttng_ust_dynamic_type_field(int64_t value)
-{
- if (value >= _NR_LTTNG_UST_DYNAMIC_TYPES || value < 0)
- return NULL;
- return dt_var_fields[value];
-}
-
-int lttng_ust_dynamic_type_choices(size_t *nr_choices, const struct lttng_ust_event_field ***choices)
-{
- *nr_choices = _NR_LTTNG_UST_DYNAMIC_TYPES;
- *choices = dt_var_fields;
- return 0;
-}
-
-const struct lttng_ust_event_field *lttng_ust_dynamic_type_tag_field(void)
-{
- return &dt_enum_field;
-}
+++ /dev/null
-/*
- * SPDX-License-Identifier: LGPL-2.1-or-later
- *
- * Copyright (C) 2015 Antoine Busque <abusque@efficios.com>
- */
-
-#define _LGPL_SOURCE
-#include <fcntl.h>
-#include <stdbool.h>
-#include <stdint.h>
-#include <string.h>
-#include <sys/stat.h>
-#include <sys/types.h>
-#include <unistd.h>
-
-#include <lttng/ust-utils.h>
-
-#include "common/elf.h"
-#include "common/ust-fd.h"
-
-#include "lttng-tracer-core.h"
-#include "lttng-ust-elf.h"
-#include "common/macros.h"
-
-#define BUF_LEN 4096
-
-#ifndef NT_GNU_BUILD_ID
-# define NT_GNU_BUILD_ID 3
-#endif
-
-/*
- * Retrieve the nth (where n is the `index` argument) phdr (program
- * header) from the given elf instance.
- *
- * A pointer to the phdr is returned on success, NULL on failure.
- */
-static
-struct lttng_ust_elf_phdr *lttng_ust_elf_get_phdr(struct lttng_ust_elf *elf,
- uint16_t index)
-{
- struct lttng_ust_elf_phdr *phdr = NULL;
- off_t offset;
-
- if (!elf) {
- goto error;
- }
-
- if (index >= elf->ehdr->e_phnum) {
- goto error;
- }
-
- phdr = zmalloc(sizeof(struct lttng_ust_elf_phdr));
- if (!phdr) {
- goto error;
- }
-
- offset = (off_t) elf->ehdr->e_phoff
- + (off_t) index * elf->ehdr->e_phentsize;
- if (lseek(elf->fd, offset, SEEK_SET) < 0) {
- goto error;
- }
-
- if (is_elf_32_bit(elf)) {
- Elf32_Phdr elf_phdr;
-
- if (lttng_ust_read(elf->fd, &elf_phdr, sizeof(elf_phdr))
- < sizeof(elf_phdr)) {
- goto error;
- }
- if (!is_elf_native_endian(elf)) {
- bswap_phdr(elf_phdr);
- }
- copy_phdr(elf_phdr, *phdr);
- } else {
- Elf64_Phdr elf_phdr;
-
- if (lttng_ust_read(elf->fd, &elf_phdr, sizeof(elf_phdr))
- < sizeof(elf_phdr)) {
- goto error;
- }
- if (!is_elf_native_endian(elf)) {
- bswap_phdr(elf_phdr);
- }
- copy_phdr(elf_phdr, *phdr);
- }
-
- return phdr;
-
-error:
- free(phdr);
- return NULL;
-}
-
-/*
- * Retrieve the nth (where n is the `index` argument) shdr (section
- * header) from the given elf instance.
- *
- * A pointer to the shdr is returned on success, NULL on failure.
- */
-static
-struct lttng_ust_elf_shdr *lttng_ust_elf_get_shdr(struct lttng_ust_elf *elf,
- uint16_t index)
-{
- struct lttng_ust_elf_shdr *shdr = NULL;
- off_t offset;
-
- if (!elf) {
- goto error;
- }
-
- if (index >= elf->ehdr->e_shnum) {
- goto error;
- }
-
- shdr = zmalloc(sizeof(struct lttng_ust_elf_shdr));
- if (!shdr) {
- goto error;
- }
-
- offset = (off_t) elf->ehdr->e_shoff
- + (off_t) index * elf->ehdr->e_shentsize;
- if (lseek(elf->fd, offset, SEEK_SET) < 0) {
- goto error;
- }
-
- if (is_elf_32_bit(elf)) {
- Elf32_Shdr elf_shdr;
-
- if (lttng_ust_read(elf->fd, &elf_shdr, sizeof(elf_shdr))
- < sizeof(elf_shdr)) {
- goto error;
- }
- if (!is_elf_native_endian(elf)) {
- bswap_shdr(elf_shdr);
- }
- copy_shdr(elf_shdr, *shdr);
- } else {
- Elf64_Shdr elf_shdr;
-
- if (lttng_ust_read(elf->fd, &elf_shdr, sizeof(elf_shdr))
- < sizeof(elf_shdr)) {
- goto error;
- }
- if (!is_elf_native_endian(elf)) {
- bswap_shdr(elf_shdr);
- }
- copy_shdr(elf_shdr, *shdr);
- }
-
- return shdr;
-
-error:
- free(shdr);
- return NULL;
-}
-
-/*
- * Lookup a section's name from a given offset (usually from an shdr's
- * sh_name value) in bytes relative to the beginning of the section
- * names string table.
- *
- * If no name is found, NULL is returned.
- */
-static
-char *lttng_ust_elf_get_section_name(struct lttng_ust_elf *elf, off_t offset)
-{
- char *name = NULL;
- size_t len = 0, to_read; /* len does not include \0 */
-
- if (!elf) {
- goto error;
- }
-
- if (offset >= elf->section_names_size) {
- goto error;
- }
-
- if (lseek(elf->fd, elf->section_names_offset + offset, SEEK_SET) < 0) {
- goto error;
- }
-
- to_read = elf->section_names_size - offset;
-
- /* Find first \0 after or at current location, remember len. */
- for (;;) {
- char buf[BUF_LEN];
- ssize_t read_len;
- size_t i;
-
- if (!to_read) {
- goto error;
- }
- read_len = lttng_ust_read(elf->fd, buf,
- min_t(size_t, BUF_LEN, to_read));
- if (read_len <= 0) {
- goto error;
- }
- for (i = 0; i < read_len; i++) {
- if (buf[i] == '\0') {
- len += i;
- goto end;
- }
- }
- len += read_len;
- to_read -= read_len;
- }
-end:
- name = zmalloc(sizeof(char) * (len + 1)); /* + 1 for \0 */
- if (!name) {
- goto error;
- }
- if (lseek(elf->fd, elf->section_names_offset + offset,
- SEEK_SET) < 0) {
- goto error;
- }
- if (lttng_ust_read(elf->fd, name, len + 1) < len + 1) {
- goto error;
- }
-
- return name;
-
-error:
- free(name);
- return NULL;
-}
-
-/*
- * Create an instance of lttng_ust_elf for the ELF file located at
- * `path`.
- *
- * Return a pointer to the instance on success, NULL on failure.
- */
-struct lttng_ust_elf *lttng_ust_elf_create(const char *path)
-{
- uint8_t e_ident[EI_NIDENT];
- struct lttng_ust_elf_shdr *section_names_shdr;
- struct lttng_ust_elf *elf = NULL;
- int ret, fd;
-
- elf = zmalloc(sizeof(struct lttng_ust_elf));
- if (!elf) {
- goto error;
- }
-
- /* Initialize fd field to -1. 0 is a valid fd number */
- elf->fd = -1;
-
- elf->path = strdup(path);
- if (!elf->path) {
- goto error;
- }
-
- lttng_ust_lock_fd_tracker();
- fd = open(elf->path, O_RDONLY | O_CLOEXEC);
- if (fd < 0) {
- lttng_ust_unlock_fd_tracker();
- goto error;
- }
-
- ret = lttng_ust_add_fd_to_tracker(fd);
- if (ret < 0) {
- ret = close(fd);
- if (ret) {
- PERROR("close on elf->fd");
- }
- ret = -1;
- lttng_ust_unlock_fd_tracker();
- goto error;
- }
- elf->fd = ret;
- lttng_ust_unlock_fd_tracker();
-
- if (lttng_ust_read(elf->fd, e_ident, EI_NIDENT) < EI_NIDENT) {
- goto error;
- }
- elf->bitness = e_ident[EI_CLASS];
- elf->endianness = e_ident[EI_DATA];
-
- if (lseek(elf->fd, 0, SEEK_SET) < 0) {
- goto error;
- }
-
- elf->ehdr = zmalloc(sizeof(struct lttng_ust_elf_ehdr));
- if (!elf->ehdr) {
- goto error;
- }
-
- if (is_elf_32_bit(elf)) {
- Elf32_Ehdr elf_ehdr;
-
- if (lttng_ust_read(elf->fd, &elf_ehdr, sizeof(elf_ehdr))
- < sizeof(elf_ehdr)) {
- goto error;
- }
- if (!is_elf_native_endian(elf)) {
- bswap_ehdr(elf_ehdr);
- }
- copy_ehdr(elf_ehdr, *(elf->ehdr));
- } else {
- Elf64_Ehdr elf_ehdr;
-
- if (lttng_ust_read(elf->fd, &elf_ehdr, sizeof(elf_ehdr))
- < sizeof(elf_ehdr)) {
- goto error;
- }
- if (!is_elf_native_endian(elf)) {
- bswap_ehdr(elf_ehdr);
- }
- copy_ehdr(elf_ehdr, *(elf->ehdr));
- }
-
- section_names_shdr = lttng_ust_elf_get_shdr(elf, elf->ehdr->e_shstrndx);
- if (!section_names_shdr) {
- goto error;
- }
-
- elf->section_names_offset = section_names_shdr->sh_offset;
- elf->section_names_size = section_names_shdr->sh_size;
-
- free(section_names_shdr);
- return elf;
-
-error:
- lttng_ust_elf_destroy(elf);
- return NULL;
-}
-
-/*
- * Test whether the ELF file is position independent code (PIC)
- */
-uint8_t lttng_ust_elf_is_pic(struct lttng_ust_elf *elf)
-{
- /*
- * PIC has and e_type value of ET_DYN, see ELF specification
- * version 1.1 p. 1-3.
- */
- return elf->ehdr->e_type == ET_DYN;
-}
-
-/*
- * Destroy the given lttng_ust_elf instance.
- */
-void lttng_ust_elf_destroy(struct lttng_ust_elf *elf)
-{
- int ret;
-
- if (!elf) {
- return;
- }
-
- if (elf->fd >= 0) {
- lttng_ust_lock_fd_tracker();
- ret = close(elf->fd);
- if (!ret) {
- lttng_ust_delete_fd_from_tracker(elf->fd);
- } else {
- PERROR("close");
- abort();
- }
- lttng_ust_unlock_fd_tracker();
- }
-
- free(elf->ehdr);
- free(elf->path);
- free(elf);
-}
-
-/*
- * Compute the total in-memory size of the ELF file, in bytes.
- *
- * Returns 0 if successful, -1 if not. On success, the memory size is
- * returned through the out parameter `memsz`.
- */
-int lttng_ust_elf_get_memsz(struct lttng_ust_elf *elf, uint64_t *memsz)
-{
- uint16_t i;
- uint64_t low_addr = UINT64_MAX, high_addr = 0;
-
- if (!elf || !memsz) {
- goto error;
- }
-
- for (i = 0; i < elf->ehdr->e_phnum; ++i) {
- struct lttng_ust_elf_phdr *phdr;
-
- phdr = lttng_ust_elf_get_phdr(elf, i);
- if (!phdr) {
- goto error;
- }
-
- /*
- * Only PT_LOAD segments contribute to memsz. Skip
- * other segments.
- */
- if (phdr->p_type != PT_LOAD) {
- goto next_loop;
- }
-
- low_addr = min_t(uint64_t, low_addr, phdr->p_vaddr);
- high_addr = max_t(uint64_t, high_addr,
- phdr->p_vaddr + phdr->p_memsz);
- next_loop:
- free(phdr);
- }
-
- if (high_addr < low_addr) {
- /* No PT_LOAD segments or corrupted data. */
- goto error;
- }
-
- *memsz = high_addr - low_addr;
- return 0;
-error:
- return -1;
-}
-
-/*
- * Internal method used to try and get the build_id from a PT_NOTE
- * segment ranging from `offset` to `segment_end`.
- *
- * If the function returns successfully, the out parameter `found`
- * indicates whether the build id information was present in the
- * segment or not. If `found` is not 0, the out parameters `build_id`
- * and `length` will both have been set with the retrieved
- * information.
- *
- * Returns 0 on success, -1 if an error occurred.
- */
-static
-int lttng_ust_elf_get_build_id_from_segment(
- struct lttng_ust_elf *elf, uint8_t **build_id, size_t *length,
- off_t offset, off_t segment_end)
-{
- uint8_t *_build_id = NULL; /* Silence old gcc warning. */
- size_t _length = 0; /* Silence old gcc warning. */
-
- while (offset < segment_end) {
- struct lttng_ust_elf_nhdr nhdr;
- size_t read_len;
-
- /* Align start of note entry */
- offset += lttng_ust_offset_align(offset, ELF_NOTE_ENTRY_ALIGN);
- if (offset >= segment_end) {
- break;
- }
- /*
- * We seek manually because if the note isn't the
- * build id the data following the header will not
- * have been read.
- */
- if (lseek(elf->fd, offset, SEEK_SET) < 0) {
- goto error;
- }
- if (lttng_ust_read(elf->fd, &nhdr, sizeof(nhdr))
- < sizeof(nhdr)) {
- goto error;
- }
-
- if (!is_elf_native_endian(elf)) {
- nhdr.n_namesz = bswap_32(nhdr.n_namesz);
- nhdr.n_descsz = bswap_32(nhdr.n_descsz);
- nhdr.n_type = bswap_32(nhdr.n_type);
- }
-
- offset += sizeof(nhdr) + nhdr.n_namesz;
- /* Align start of desc entry */
- offset += lttng_ust_offset_align(offset, ELF_NOTE_DESC_ALIGN);
-
- if (nhdr.n_type != NT_GNU_BUILD_ID) {
- /*
- * Ignore non build id notes but still
- * increase the offset.
- */
- offset += nhdr.n_descsz;
- continue;
- }
-
- _length = nhdr.n_descsz;
- _build_id = zmalloc(sizeof(uint8_t) * _length);
- if (!_build_id) {
- goto error;
- }
-
- if (lseek(elf->fd, offset, SEEK_SET) < 0) {
- goto error;
- }
- read_len = sizeof(*_build_id) * _length;
- if (lttng_ust_read(elf->fd, _build_id, read_len) < read_len) {
- goto error;
- }
-
- break;
- }
-
- if (_build_id) {
- *build_id = _build_id;
- *length = _length;
- }
-
- return 0;
-error:
- free(_build_id);
- return -1;
-}
-
-/*
- * Retrieve a build ID (an array of bytes) from the corresponding
- * section in the ELF file. The length of the build ID can be either
- * 16 or 20 bytes depending on the method used to generate it, hence
- * the length out parameter.
- *
- * If the function returns successfully, the out parameter `found`
- * indicates whether the build id information was present in the ELF
- * file or not. If `found` is not 0, the out parameters `build_id` and
- * `length` will both have been set with the retrieved information.
- *
- * Returns 0 on success, -1 if an error occurred.
- */
-int lttng_ust_elf_get_build_id(struct lttng_ust_elf *elf, uint8_t **build_id,
- size_t *length, int *found)
-{
- uint16_t i;
- uint8_t *_build_id = NULL; /* Silence old gcc warning. */
- size_t _length = 0; /* Silence old gcc warning. */
-
- if (!elf || !build_id || !length || !found) {
- goto error;
- }
-
- for (i = 0; i < elf->ehdr->e_phnum; ++i) {
- off_t offset, segment_end;
- struct lttng_ust_elf_phdr *phdr;
- int ret = 0;
-
- phdr = lttng_ust_elf_get_phdr(elf, i);
- if (!phdr) {
- goto error;
- }
-
- /* Build ID will be contained in a PT_NOTE segment. */
- if (phdr->p_type != PT_NOTE) {
- goto next_loop;
- }
-
- offset = phdr->p_offset;
- segment_end = offset + phdr->p_filesz;
- ret = lttng_ust_elf_get_build_id_from_segment(
- elf, &_build_id, &_length, offset, segment_end);
- next_loop:
- free(phdr);
- if (ret) {
- goto error;
- }
- if (_build_id) {
- break;
- }
- }
-
- if (_build_id) {
- *build_id = _build_id;
- *length = _length;
- *found = 1;
- } else {
- *found = 0;
- }
-
- return 0;
-error:
- free(_build_id);
- return -1;
-}
-
-/*
- * Try to retrieve filename and CRC from given ELF section `shdr`.
- *
- * If the function returns successfully, the out parameter `found`
- * indicates whether the debug link information was present in the ELF
- * section or not. If `found` is not 0, the out parameters `filename` and
- * `crc` will both have been set with the retrieved information.
- *
- * Returns 0 on success, -1 if an error occurred.
- */
-static
-int lttng_ust_elf_get_debug_link_from_section(struct lttng_ust_elf *elf,
- char **filename, uint32_t *crc,
- struct lttng_ust_elf_shdr *shdr)
-{
- char *_filename = NULL; /* Silence old gcc warning. */
- size_t filename_len;
- char *section_name = NULL;
- uint32_t _crc = 0; /* Silence old gcc warning. */
-
- if (!elf || !filename || !crc || !shdr) {
- goto error;
- }
-
- /*
- * The .gnu_debuglink section is of type SHT_PROGBITS,
- * skip the other sections.
- */
- if (shdr->sh_type != SHT_PROGBITS) {
- goto end;
- }
-
- section_name = lttng_ust_elf_get_section_name(elf,
- shdr->sh_name);
- if (!section_name) {
- goto end;
- }
- if (strcmp(section_name, ".gnu_debuglink")) {
- goto end;
- }
-
- /*
- * The length of the filename is the sh_size excluding the CRC
- * which comes after it in the section.
- */
- _filename = zmalloc(sizeof(char) * (shdr->sh_size - ELF_CRC_SIZE));
- if (!_filename) {
- goto error;
- }
- if (lseek(elf->fd, shdr->sh_offset, SEEK_SET) < 0) {
- goto error;
- }
- filename_len = sizeof(*_filename) * (shdr->sh_size - ELF_CRC_SIZE);
- if (lttng_ust_read(elf->fd, _filename, filename_len) < filename_len) {
- goto error;
- }
- if (lttng_ust_read(elf->fd, &_crc, sizeof(_crc)) < sizeof(_crc)) {
- goto error;
- }
- if (!is_elf_native_endian(elf)) {
- _crc = bswap_32(_crc);
- }
-
-end:
- free(section_name);
- if (_filename) {
- *filename = _filename;
- *crc = _crc;
- }
-
- return 0;
-
-error:
- free(_filename);
- free(section_name);
- return -1;
-}
-
-/*
- * Retrieve filename and CRC from ELF's .gnu_debuglink section, if any.
- *
- * If the function returns successfully, the out parameter `found`
- * indicates whether the debug link information was present in the ELF
- * file or not. If `found` is not 0, the out parameters `filename` and
- * `crc` will both have been set with the retrieved information.
- *
- * Returns 0 on success, -1 if an error occurred.
- */
-int lttng_ust_elf_get_debug_link(struct lttng_ust_elf *elf, char **filename,
- uint32_t *crc, int *found)
-{
- int ret;
- uint16_t i;
- char *_filename = NULL; /* Silence old gcc warning. */
- uint32_t _crc = 0; /* Silence old gcc warning. */
-
- if (!elf || !filename || !crc || !found) {
- goto error;
- }
-
- for (i = 0; i < elf->ehdr->e_shnum; ++i) {
- struct lttng_ust_elf_shdr *shdr = NULL;
-
- shdr = lttng_ust_elf_get_shdr(elf, i);
- if (!shdr) {
- goto error;
- }
-
- ret = lttng_ust_elf_get_debug_link_from_section(
- elf, &_filename, &_crc, shdr);
- free(shdr);
-
- if (ret) {
- goto error;
- }
- if (_filename) {
- break;
- }
- }
-
- if (_filename) {
- *filename = _filename;
- *crc = _crc;
- *found = 1;
- } else {
- *found = 0;
- }
-
- return 0;
-
-error:
- free(_filename);
- return -1;
-}
+++ /dev/null
-/*
- * SPDX-License-Identifier: LGPL-2.1-or-later
- *
- * Copyright (C) 2015 Antoine Busque <abusque@efficios.com>
- */
-
-#ifndef _LIB_LTTNG_UST_ELF_H
-#define _LIB_LTTNG_UST_ELF_H
-
-#include <elf.h>
-#include <lttng/ust-endian.h>
-
-/*
- * Determine native endianness in order to convert when reading an ELF
- * file if there is a mismatch.
- */
-#if BYTE_ORDER == LITTLE_ENDIAN
-#define NATIVE_ELF_ENDIANNESS ELFDATA2LSB
-#else
-#define NATIVE_ELF_ENDIANNESS ELFDATA2MSB
-#endif
-
-/*
- * The size in bytes of the debug link CRC as contained in an ELF
- * section.
- */
-#define ELF_CRC_SIZE 4
-/*
- * ELF notes are aligned on 4 bytes. ref: ELF specification version
- * 1.1 p. 2-5.
- */
-#define ELF_NOTE_ENTRY_ALIGN 4
-/*
- * Within an ELF note, the `desc` field is also aligned on 4
- * bytes. ref: ELF specification version 1.1 p. 2-5.
- */
-#define ELF_NOTE_DESC_ALIGN 4
-
-#define bswap(x) \
- do { \
- switch (sizeof(x)) { \
- case 8: \
- x = bswap_64(x); \
- break; \
- case 4: \
- x = bswap_32(x); \
- break; \
- case 2: \
- x = bswap_16(x); \
- break; \
- case 1: \
- break; \
- default: \
- abort(); \
- } \
- } while (0)
-
-#define bswap_phdr(phdr) \
- do { \
- bswap((phdr).p_type); \
- bswap((phdr).p_offset); \
- bswap((phdr).p_filesz); \
- bswap((phdr).p_memsz); \
- bswap((phdr).p_align); \
- bswap((phdr).p_vaddr); \
- } while (0)
-
-#define bswap_shdr(shdr) \
- do { \
- bswap((shdr).sh_name); \
- bswap((shdr).sh_type); \
- bswap((shdr).sh_flags); \
- bswap((shdr).sh_addr); \
- bswap((shdr).sh_offset); \
- bswap((shdr).sh_size); \
- bswap((shdr).sh_link); \
- bswap((shdr).sh_info); \
- bswap((shdr).sh_addralign); \
- bswap((shdr).sh_entsize); \
- } while (0)
-
-#define bswap_ehdr(ehdr) \
- do { \
- bswap((ehdr).e_type); \
- bswap((ehdr).e_machine); \
- bswap((ehdr).e_version); \
- bswap((ehdr).e_entry); \
- bswap((ehdr).e_phoff); \
- bswap((ehdr).e_shoff); \
- bswap((ehdr).e_flags); \
- bswap((ehdr).e_ehsize); \
- bswap((ehdr).e_phentsize); \
- bswap((ehdr).e_phnum); \
- bswap((ehdr).e_shentsize); \
- bswap((ehdr).e_shnum); \
- bswap((ehdr).e_shstrndx); \
- } while (0)
-
-#define copy_phdr(src_phdr, dst_phdr) \
- do { \
- (dst_phdr).p_type = (src_phdr).p_type; \
- (dst_phdr).p_offset = (src_phdr).p_offset; \
- (dst_phdr).p_filesz = (src_phdr).p_filesz; \
- (dst_phdr).p_memsz = (src_phdr).p_memsz; \
- (dst_phdr).p_align = (src_phdr).p_align; \
- (dst_phdr).p_vaddr = (src_phdr).p_vaddr; \
- } while (0)
-
-#define copy_shdr(src_shdr, dst_shdr) \
- do { \
- (dst_shdr).sh_name = (src_shdr).sh_name; \
- (dst_shdr).sh_type = (src_shdr).sh_type; \
- (dst_shdr).sh_flags = (src_shdr).sh_flags; \
- (dst_shdr).sh_addr = (src_shdr).sh_addr; \
- (dst_shdr).sh_offset = (src_shdr).sh_offset; \
- (dst_shdr).sh_size = (src_shdr).sh_size; \
- (dst_shdr).sh_link = (src_shdr).sh_link; \
- (dst_shdr).sh_info = (src_shdr).sh_info; \
- (dst_shdr).sh_addralign = (src_shdr).sh_addralign; \
- (dst_shdr).sh_entsize = (src_shdr).sh_entsize; \
- } while (0)
-
-#define copy_ehdr(src_ehdr, dst_ehdr) \
- do { \
- (dst_ehdr).e_type = (src_ehdr).e_type; \
- (dst_ehdr).e_machine = (src_ehdr).e_machine; \
- (dst_ehdr).e_version = (src_ehdr).e_version; \
- (dst_ehdr).e_entry = (src_ehdr).e_entry; \
- (dst_ehdr).e_phoff = (src_ehdr).e_phoff; \
- (dst_ehdr).e_shoff = (src_ehdr).e_shoff; \
- (dst_ehdr).e_flags = (src_ehdr).e_flags; \
- (dst_ehdr).e_ehsize = (src_ehdr).e_ehsize; \
- (dst_ehdr).e_phentsize = (src_ehdr).e_phentsize; \
- (dst_ehdr).e_phnum = (src_ehdr).e_phnum; \
- (dst_ehdr).e_shentsize = (src_ehdr).e_shentsize; \
- (dst_ehdr).e_shnum = (src_ehdr).e_shnum; \
- (dst_ehdr).e_shstrndx = (src_ehdr).e_shstrndx; \
- } while (0)
-
-static inline
-int is_elf_32_bit(struct lttng_ust_elf *elf)
-{
- return elf->bitness == ELFCLASS32;
-}
-
-static inline
-int is_elf_native_endian(struct lttng_ust_elf *elf)
-{
- return elf->endianness == NATIVE_ELF_ENDIANNESS;
-}
-
-#endif
+++ /dev/null
-/*
- * SPDX-License-Identifier: MIT
- *
- * Copyright (C) 2013 Paul Woegerer <paul_woegerer@mentor.com>
- * Copyright (C) 2015 Antoine Busque <abusque@efficios.com>
- */
-
-#undef TRACEPOINT_PROVIDER
-#define TRACEPOINT_PROVIDER lttng_ust_statedump
-
-#if !defined(_TRACEPOINT_LTTNG_UST_STATEDUMP_H) || defined(TRACEPOINT_HEADER_MULTI_READ)
-#define _TRACEPOINT_LTTNG_UST_STATEDUMP_H
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-#include <stddef.h>
-#include <stdint.h>
-#include <unistd.h>
-#include <lttng/ust-events.h>
-
-#define LTTNG_UST_STATEDUMP_PROVIDER
-#include <lttng/tracepoint.h>
-
-TRACEPOINT_EVENT(lttng_ust_statedump, start,
- TP_ARGS(struct lttng_ust_session *, session),
- TP_FIELDS(
- ctf_unused(session)
- )
-)
-
-TRACEPOINT_EVENT(lttng_ust_statedump, bin_info,
- TP_ARGS(
- struct lttng_ust_session *, session,
- void *, baddr,
- const char*, path,
- uint64_t, memsz,
- uint8_t, is_pic,
- uint8_t, has_build_id,
- uint8_t, has_debug_link
- ),
- TP_FIELDS(
- ctf_unused(session)
- ctf_integer_hex(void *, baddr, baddr)
- ctf_integer(uint64_t, memsz, memsz)
- ctf_string(path, path)
- ctf_integer(uint8_t, is_pic, is_pic)
- ctf_integer(uint8_t, has_build_id, has_build_id)
- ctf_integer(uint8_t, has_debug_link, has_debug_link)
- )
-)
-
-TRACEPOINT_EVENT(lttng_ust_statedump, build_id,
- TP_ARGS(
- struct lttng_ust_session *, session,
- void *, baddr,
- uint8_t *, build_id,
- size_t, build_id_len
- ),
- TP_FIELDS(
- ctf_unused(session)
- ctf_integer_hex(void *, baddr, baddr)
- ctf_sequence_hex(uint8_t, build_id, build_id,
- size_t, build_id_len)
- )
-)
-
-TRACEPOINT_EVENT(lttng_ust_statedump, debug_link,
- TP_ARGS(
- struct lttng_ust_session *, session,
- void *, baddr,
- char *, filename,
- uint32_t, crc
- ),
- TP_FIELDS(
- ctf_unused(session)
- ctf_integer_hex(void *, baddr, baddr)
- ctf_integer(uint32_t, crc, crc)
- ctf_string(filename, filename)
- )
-)
-
-TRACEPOINT_EVENT(lttng_ust_statedump, procname,
- TP_ARGS(
- struct lttng_ust_session *, session,
- char *, name
- ),
- TP_FIELDS(
- ctf_unused(session)
- ctf_array_text(char, procname, name, LTTNG_UST_ABI_PROCNAME_LEN)
- )
-)
-
-TRACEPOINT_EVENT(lttng_ust_statedump, end,
- TP_ARGS(struct lttng_ust_session *, session),
- TP_FIELDS(
- ctf_unused(session)
- )
-)
-
-#endif /* _TRACEPOINT_LTTNG_UST_STATEDUMP_H */
-
-#undef TRACEPOINT_INCLUDE
-#define TRACEPOINT_INCLUDE "./lttng-ust-statedump-provider.h"
-
-/* This part must be outside ifdef protection */
-#include <lttng/tracepoint-event.h>
-
-#ifdef __cplusplus
-}
-#endif
+++ /dev/null
-/*
- * SPDX-License-Identifier: LGPL-2.1-or-later
- *
- * Copyright (C) 2013 Paul Woegerer <paul_woegerer@mentor.com>
- * Copyright (C) 2015 Antoine Busque <abusque@efficios.com>
- * Copyright (C) 2016 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
- */
-
-#define _LGPL_SOURCE
-#include <link.h>
-#include <limits.h>
-#include <stdio.h>
-#include <stdint.h>
-#include <stdlib.h>
-#include <stdbool.h>
-#include <sys/types.h>
-#include <unistd.h>
-
-#include "common/elf.h"
-#include "common/macros.h"
-#include "lttng-tracer-core.h"
-#include "lttng-ust-statedump.h"
-#include "jhash.h"
-#include "getenv.h"
-#include "ust-events-internal.h"
-
-#define TRACEPOINT_DEFINE
-#include "ust_lib.h" /* Only define. */
-
-#define TRACEPOINT_CREATE_PROBES
-#define TP_SESSION_CHECK
-#include "lttng-ust-statedump-provider.h" /* Define and create probes. */
-
-struct dl_iterate_data {
- int exec_found;
- bool first;
- bool cancel;
-};
-
-struct bin_info_data {
- void *base_addr_ptr;
- char resolved_path[PATH_MAX];
- char *dbg_file;
- uint8_t *build_id;
- uint64_t memsz;
- size_t build_id_len;
- int vdso;
- uint32_t crc;
- uint8_t is_pic;
- uint8_t has_build_id;
- uint8_t has_debug_link;
-};
-
-struct lttng_ust_dl_node {
- struct bin_info_data bin_data;
- struct cds_hlist_node node;
- bool traced;
- bool marked;
-};
-
-#define UST_DL_STATE_HASH_BITS 8
-#define UST_DL_STATE_TABLE_SIZE (1 << UST_DL_STATE_HASH_BITS)
-struct cds_hlist_head dl_state_table[UST_DL_STATE_TABLE_SIZE];
-
-typedef void (*tracepoint_cb)(struct lttng_ust_session *session, void *priv);
-
-static
-struct lttng_ust_dl_node *alloc_dl_node(const struct bin_info_data *bin_data)
-{
- struct lttng_ust_dl_node *e;
-
- e = zmalloc(sizeof(struct lttng_ust_dl_node));
- if (!e)
- return NULL;
- if (bin_data->dbg_file) {
- e->bin_data.dbg_file = strdup(bin_data->dbg_file);
- if (!e->bin_data.dbg_file)
- goto error;
- }
- if (bin_data->build_id) {
- e->bin_data.build_id = zmalloc(bin_data->build_id_len);
- if (!e->bin_data.build_id)
- goto error;
- memcpy(e->bin_data.build_id, bin_data->build_id,
- bin_data->build_id_len);
- }
- e->bin_data.base_addr_ptr = bin_data->base_addr_ptr;
- memcpy(e->bin_data.resolved_path, bin_data->resolved_path, PATH_MAX);
- e->bin_data.memsz = bin_data->memsz;
- e->bin_data.build_id_len = bin_data->build_id_len;
- e->bin_data.vdso = bin_data->vdso;
- e->bin_data.crc = bin_data->crc;
- e->bin_data.is_pic = bin_data->is_pic;
- e->bin_data.has_build_id = bin_data->has_build_id;
- e->bin_data.has_debug_link = bin_data->has_debug_link;
- return e;
-
-error:
- free(e->bin_data.build_id);
- free(e->bin_data.dbg_file);
- free(e);
- return NULL;
-}
-
-static
-void free_dl_node(struct lttng_ust_dl_node *e)
-{
- free(e->bin_data.build_id);
- free(e->bin_data.dbg_file);
- free(e);
-}
-
-/* Return 0 if same, nonzero if not. */
-static
-int compare_bin_data(const struct bin_info_data *a,
- const struct bin_info_data *b)
-{
- if (a->base_addr_ptr != b->base_addr_ptr)
- return -1;
- if (strcmp(a->resolved_path, b->resolved_path) != 0)
- return -1;
- if (a->dbg_file && !b->dbg_file)
- return -1;
- if (!a->dbg_file && b->dbg_file)
- return -1;
- if (a->dbg_file && strcmp(a->dbg_file, b->dbg_file) != 0)
- return -1;
- if (a->build_id && !b->build_id)
- return -1;
- if (!a->build_id && b->build_id)
- return -1;
- if (a->build_id_len != b->build_id_len)
- return -1;
- if (a->build_id &&
- memcmp(a->build_id, b->build_id, a->build_id_len) != 0)
- return -1;
- if (a->memsz != b->memsz)
- return -1;
- if (a->vdso != b->vdso)
- return -1;
- if (a->crc != b->crc)
- return -1;
- if (a->is_pic != b->is_pic)
- return -1;
- if (a->has_build_id != b->has_build_id)
- return -1;
- if (a->has_debug_link != b->has_debug_link)
- return -1;
- return 0;
-}
-
-static
-struct lttng_ust_dl_node *find_or_create_dl_node(struct bin_info_data *bin_data)
-{
- struct cds_hlist_head *head;
- struct lttng_ust_dl_node *e;
- unsigned int hash;
- bool found = false;
-
- hash = jhash(&bin_data->base_addr_ptr,
- sizeof(bin_data->base_addr_ptr), 0);
- head = &dl_state_table[hash & (UST_DL_STATE_TABLE_SIZE - 1)];
- cds_hlist_for_each_entry_2(e, head, node) {
- if (compare_bin_data(&e->bin_data, bin_data) != 0)
- continue;
- found = true;
- break;
- }
- if (!found) {
- /* Create */
- e = alloc_dl_node(bin_data);
- if (!e)
- return NULL;
- cds_hlist_add_head(&e->node, head);
- }
- return e;
-}
-
-static
-void remove_dl_node(struct lttng_ust_dl_node *e)
-{
- cds_hlist_del(&e->node);
-}
-
-/*
- * Trace statedump event into all sessions owned by the caller thread
- * for which statedump is pending.
- */
-static
-void trace_statedump_event(tracepoint_cb tp_cb, void *owner, void *priv)
-{
- struct cds_list_head *sessionsp;
- struct lttng_ust_session_private *session_priv;
-
- sessionsp = lttng_get_sessions();
- cds_list_for_each_entry(session_priv, sessionsp, node) {
- if (session_priv->owner != owner)
- continue;
- if (!session_priv->statedump_pending)
- continue;
- tp_cb(session_priv->pub, priv);
- }
-}
-
-static
-void trace_bin_info_cb(struct lttng_ust_session *session, void *priv)
-{
- struct bin_info_data *bin_data = (struct bin_info_data *) priv;
-
- tracepoint(lttng_ust_statedump, bin_info,
- session, bin_data->base_addr_ptr,
- bin_data->resolved_path, bin_data->memsz,
- bin_data->is_pic, bin_data->has_build_id,
- bin_data->has_debug_link);
-}
-
-static
-void trace_build_id_cb(struct lttng_ust_session *session, void *priv)
-{
- struct bin_info_data *bin_data = (struct bin_info_data *) priv;
-
- tracepoint(lttng_ust_statedump, build_id,
- session, bin_data->base_addr_ptr,
- bin_data->build_id, bin_data->build_id_len);
-}
-
-static
-void trace_debug_link_cb(struct lttng_ust_session *session, void *priv)
-{
- struct bin_info_data *bin_data = (struct bin_info_data *) priv;
-
- tracepoint(lttng_ust_statedump, debug_link,
- session, bin_data->base_addr_ptr,
- bin_data->dbg_file, bin_data->crc);
-}
-
-static
-void procname_cb(struct lttng_ust_session *session, void *priv)
-{
- char *procname = (char *) priv;
- tracepoint(lttng_ust_statedump, procname, session, procname);
-}
-
-static
-void trace_start_cb(struct lttng_ust_session *session, void *priv __attribute__((unused)))
-{
- tracepoint(lttng_ust_statedump, start, session);
-}
-
-static
-void trace_end_cb(struct lttng_ust_session *session, void *priv __attribute__((unused)))
-{
- tracepoint(lttng_ust_statedump, end, session);
-}
-
-static
-int get_elf_info(struct bin_info_data *bin_data)
-{
- struct lttng_ust_elf *elf;
- int ret = 0, found;
-
- elf = lttng_ust_elf_create(bin_data->resolved_path);
- if (!elf) {
- ret = -1;
- goto end;
- }
-
- ret = lttng_ust_elf_get_memsz(elf, &bin_data->memsz);
- if (ret) {
- goto end;
- }
-
- found = 0;
- ret = lttng_ust_elf_get_build_id(elf, &bin_data->build_id,
- &bin_data->build_id_len,
- &found);
- if (ret) {
- goto end;
- }
- bin_data->has_build_id = !!found;
- found = 0;
- ret = lttng_ust_elf_get_debug_link(elf, &bin_data->dbg_file,
- &bin_data->crc,
- &found);
- if (ret) {
- goto end;
- }
- bin_data->has_debug_link = !!found;
-
- bin_data->is_pic = lttng_ust_elf_is_pic(elf);
-
-end:
- lttng_ust_elf_destroy(elf);
- return ret;
-}
-
-static
-void trace_baddr(struct bin_info_data *bin_data, void *owner)
-{
- trace_statedump_event(trace_bin_info_cb, owner, bin_data);
-
- if (bin_data->has_build_id)
- trace_statedump_event(trace_build_id_cb, owner, bin_data);
-
- if (bin_data->has_debug_link)
- trace_statedump_event(trace_debug_link_cb, owner, bin_data);
-}
-
-static
-int extract_baddr(struct bin_info_data *bin_data)
-{
- int ret = 0;
- struct lttng_ust_dl_node *e;
-
- if (!bin_data->vdso) {
- ret = get_elf_info(bin_data);
- if (ret) {
- goto end;
- }
- } else {
- bin_data->memsz = 0;
- bin_data->has_build_id = 0;
- bin_data->has_debug_link = 0;
- }
-
- e = find_or_create_dl_node(bin_data);
- if (!e) {
- ret = -1;
- goto end;
- }
- e->marked = true;
-end:
- free(bin_data->build_id);
- bin_data->build_id = NULL;
- free(bin_data->dbg_file);
- bin_data->dbg_file = NULL;
- return ret;
-}
-
-static
-void trace_statedump_start(void *owner)
-{
- trace_statedump_event(trace_start_cb, owner, NULL);
-}
-
-static
-void trace_statedump_end(void *owner)
-{
- trace_statedump_event(trace_end_cb, owner, NULL);
-}
-
-static
-void iter_begin(struct dl_iterate_data *data)
-{
- unsigned int i;
-
- /*
- * UST lock nests within dynamic loader lock.
- *
- * Hold this lock across handling of the module listing to
- * protect memory allocation at early process start, due to
- * interactions with libc-wrapper lttng malloc instrumentation.
- */
- if (ust_lock()) {
- data->cancel = true;
- return;
- }
-
- /* Ensure all entries are unmarked. */
- for (i = 0; i < UST_DL_STATE_TABLE_SIZE; i++) {
- struct cds_hlist_head *head;
- struct lttng_ust_dl_node *e;
-
- head = &dl_state_table[i];
- cds_hlist_for_each_entry_2(e, head, node)
- assert(!e->marked);
- }
-}
-
-static
-void trace_lib_load(const struct bin_info_data *bin_data, void *ip)
-{
- tracepoint(lttng_ust_lib, load,
- ip, bin_data->base_addr_ptr, bin_data->resolved_path,
- bin_data->memsz, bin_data->has_build_id,
- bin_data->has_debug_link);
-
- if (bin_data->has_build_id) {
- tracepoint(lttng_ust_lib, build_id,
- ip, bin_data->base_addr_ptr, bin_data->build_id,
- bin_data->build_id_len);
- }
-
- if (bin_data->has_debug_link) {
- tracepoint(lttng_ust_lib, debug_link,
- ip, bin_data->base_addr_ptr, bin_data->dbg_file,
- bin_data->crc);
- }
-}
-
-static
-void trace_lib_unload(const struct bin_info_data *bin_data, void *ip)
-{
- tracepoint(lttng_ust_lib, unload, ip, bin_data->base_addr_ptr);
-}
-
-static
-void iter_end(struct dl_iterate_data *data, void *ip)
-{
- unsigned int i;
-
- if (data->cancel)
- goto end;
- /*
- * Iterate on hash table.
- * For each marked, traced, do nothing.
- * For each marked, not traced, trace lib open event. traced = true.
- * For each unmarked, traced, trace lib close event. remove node.
- * For each unmarked, not traced, remove node.
- */
- for (i = 0; i < UST_DL_STATE_TABLE_SIZE; i++) {
- struct cds_hlist_head *head;
- struct lttng_ust_dl_node *e;
-
- head = &dl_state_table[i];
- cds_hlist_for_each_entry_2(e, head, node) {
- if (e->marked) {
- if (!e->traced) {
- trace_lib_load(&e->bin_data, ip);
- e->traced = true;
- }
- e->marked = false;
- } else {
- if (e->traced)
- trace_lib_unload(&e->bin_data, ip);
- remove_dl_node(e);
- free_dl_node(e);
- }
- }
- }
-end:
- ust_unlock();
-}
-
-static
-int extract_bin_info_events(struct dl_phdr_info *info, size_t size __attribute__((unused)), void *_data)
-{
- int j, ret = 0;
- struct dl_iterate_data *data = _data;
-
- if (data->first) {
- iter_begin(data);
- data->first = false;
- }
-
- if (data->cancel)
- goto end;
-
- for (j = 0; j < info->dlpi_phnum; j++) {
- struct bin_info_data bin_data;
-
- if (info->dlpi_phdr[j].p_type != PT_LOAD)
- continue;
-
- memset(&bin_data, 0, sizeof(bin_data));
-
- /* Calculate virtual memory address of the loadable segment */
- bin_data.base_addr_ptr = (void *) info->dlpi_addr +
- info->dlpi_phdr[j].p_vaddr;
-
- if ((info->dlpi_name == NULL || info->dlpi_name[0] == 0)) {
- /*
- * Only the first phdr without a dlpi_name
- * encountered is considered as the program
- * executable. The rest are vdsos.
- */
- if (!data->exec_found) {
- ssize_t path_len;
- data->exec_found = 1;
-
- /*
- * Use /proc/self/exe to resolve the
- * executable's full path.
- */
- path_len = readlink("/proc/self/exe",
- bin_data.resolved_path,
- PATH_MAX - 1);
- if (path_len <= 0)
- break;
-
- bin_data.resolved_path[path_len] = '\0';
- bin_data.vdso = 0;
- } else {
- snprintf(bin_data.resolved_path,
- PATH_MAX - 1, "[vdso]");
- bin_data.vdso = 1;
- }
- } else {
- /*
- * For regular dl_phdr_info entries check if
- * the path to the binary really exists. If not,
- * treat as vdso and use dlpi_name as 'path'.
- */
- if (!realpath(info->dlpi_name,
- bin_data.resolved_path)) {
- snprintf(bin_data.resolved_path,
- PATH_MAX - 1, "[%s]",
- info->dlpi_name);
- bin_data.vdso = 1;
- } else {
- bin_data.vdso = 0;
- }
- }
-
- ret = extract_baddr(&bin_data);
- break;
- }
-end:
- return ret;
-}
-
-static
-void ust_dl_table_statedump(void *owner)
-{
- unsigned int i;
-
- if (ust_lock())
- goto end;
-
- /* Statedump each traced table entry into session for owner. */
- for (i = 0; i < UST_DL_STATE_TABLE_SIZE; i++) {
- struct cds_hlist_head *head;
- struct lttng_ust_dl_node *e;
-
- head = &dl_state_table[i];
- cds_hlist_for_each_entry_2(e, head, node) {
- if (e->traced)
- trace_baddr(&e->bin_data, owner);
- }
- }
-
-end:
- ust_unlock();
-}
-
-void lttng_ust_dl_update(void *ip)
-{
- struct dl_iterate_data data;
-
- if (lttng_ust_getenv("LTTNG_UST_WITHOUT_BADDR_STATEDUMP"))
- return;
-
- /*
- * Fixup lttng-ust TLS when called from dlopen/dlclose
- * instrumentation.
- */
- lttng_ust_fixup_tls();
-
- data.exec_found = 0;
- data.first = true;
- data.cancel = false;
- /*
- * Iterate through the list of currently loaded shared objects and
- * generate tables entries for loadable segments using
- * extract_bin_info_events.
- * Removed libraries are detected by mark-and-sweep: marking is
- * done in the iteration over libraries, and sweeping is
- * performed by iter_end().
- */
- dl_iterate_phdr(extract_bin_info_events, &data);
- if (data.first)
- iter_begin(&data);
- iter_end(&data, ip);
-}
-
-/*
- * Generate a statedump of base addresses of all shared objects loaded
- * by the traced application, as well as for the application's
- * executable itself.
- */
-static
-int do_baddr_statedump(void *owner)
-{
- if (lttng_ust_getenv("LTTNG_UST_WITHOUT_BADDR_STATEDUMP"))
- return 0;
- lttng_ust_dl_update(LTTNG_UST_CALLER_IP());
- ust_dl_table_statedump(owner);
- return 0;
-}
-
-static
-int do_procname_statedump(void *owner)
-{
- if (lttng_ust_getenv("LTTNG_UST_WITHOUT_PROCNAME_STATEDUMP"))
- return 0;
-
- trace_statedump_event(procname_cb, owner, lttng_ust_sockinfo_get_procname(owner));
- return 0;
-}
-
-/*
- * Generate a statedump of a given traced application. A statedump is
- * delimited by start and end events. For a given (process, session)
- * pair, begin/end events are serialized and will match. However, in a
- * session, statedumps from different processes may be
- * interleaved. The vpid context should be used to identify which
- * events belong to which process.
- *
- * Grab the ust_lock outside of the RCU read-side lock because we
- * perform synchronize_rcu with the ust_lock held, which can trigger
- * deadlocks otherwise.
- */
-int do_lttng_ust_statedump(void *owner)
-{
- ust_lock_nocheck();
- trace_statedump_start(owner);
- ust_unlock();
-
- do_procname_statedump(owner);
- do_baddr_statedump(owner);
-
- ust_lock_nocheck();
- trace_statedump_end(owner);
- ust_unlock();
-
- return 0;
-}
-
-void lttng_ust_statedump_init(void)
-{
- __tracepoints__init();
- __tracepoints__ptrs_init();
- __lttng_events_init__lttng_ust_statedump();
- lttng_ust_dl_update(LTTNG_UST_CALLER_IP());
-}
-
-static
-void ust_dl_state_destroy(void)
-{
- unsigned int i;
-
- for (i = 0; i < UST_DL_STATE_TABLE_SIZE; i++) {
- struct cds_hlist_head *head;
- struct lttng_ust_dl_node *e, *tmp;
-
- head = &dl_state_table[i];
- cds_hlist_for_each_entry_safe_2(e, tmp, head, node)
- free_dl_node(e);
- CDS_INIT_HLIST_HEAD(head);
- }
-}
-
-void lttng_ust_statedump_destroy(void)
-{
- __lttng_events_exit__lttng_ust_statedump();
- __tracepoints__ptrs_destroy();
- __tracepoints__destroy();
- ust_dl_state_destroy();
-}
+++ /dev/null
-/*
- * SPDX-License-Identifier: LGPL-2.1-or-later
- *
- * Copyright (C) 2013 Paul Woegerer <paul_woegerer@mentor.com>
- * Copyright (C) 2015 Antoine Busque <abusque@efficios.com>
- */
-
-#ifndef LTTNG_UST_STATEDUMP_H
-#define LTTNG_UST_STATEDUMP_H
-
-#include <lttng/ust-events.h>
-
-void lttng_ust_statedump_init(void)
- __attribute__((visibility("hidden")));
-
-void lttng_ust_statedump_destroy(void)
- __attribute__((visibility("hidden")));
-
-int do_lttng_ust_statedump(void *owner)
- __attribute__((visibility("hidden")));
-
-#endif /* LTTNG_UST_STATEDUMP_H */
+++ /dev/null
-/*
- * SPDX-License-Identifier: MIT
- *
- * Copyright (C) 2011-2014 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
- */
-
-#undef TRACEPOINT_PROVIDER
-#define TRACEPOINT_PROVIDER lttng_ust_tracef
-
-#if !defined(_TRACEPOINT_LTTNG_UST_TRACEF_PROVIDER_H) || defined(TRACEPOINT_HEADER_MULTI_READ)
-#define _TRACEPOINT_LTTNG_UST_TRACEF_PROVIDER_H
-
-#include <lttng/tp/lttng-ust-tracef.h>
-
-#endif /* _TRACEPOINT_LTTNG_UST_TRACEF_PROVIDER_H */
-
-#define TP_IP_PARAM ip /* IP context received as parameter */
-#undef TRACEPOINT_INCLUDE
-#define TRACEPOINT_INCLUDE "./tp/lttng-ust-tracef.h"
-
-/* This part must be outside ifdef protection */
-#include <lttng/tracepoint-event.h>
+++ /dev/null
-/*
- * SPDX-License-Identifier: MIT
- *
- * Copyright (C) 2011-2015 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
- */
-
-#undef TRACEPOINT_PROVIDER
-#define TRACEPOINT_PROVIDER lttng_ust_tracelog
-
-#if !defined(_TRACEPOINT_LTTNG_UST_TRACELOG_PROVIDER_H) || defined(TRACEPOINT_HEADER_MULTI_READ)
-#define _TRACEPOINT_LTTNG_UST_TRACELOG_PROVIDER_H
-
-#include <lttng/tp/lttng-ust-tracelog.h>
-
-#endif /* _TRACEPOINT_LTTNG_UST_TRACEF_PROVIDER_H */
-
-#define TP_IP_PARAM ip /* IP context received as parameter */
-#undef TRACEPOINT_INCLUDE
-#define TRACEPOINT_INCLUDE "./tp/lttng-ust-tracelog.h"
-
-/* This part must be outside ifdef protection */
-#include <lttng/tracepoint-event.h>
+++ /dev/null
-/*
- * SPDX-License-Identifier: LGPL-2.1-or-later
- *
- * Copyright (c) 2009 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
- * Copyright (c) 2009 Paul E. McKenney, IBM Corporation.
- *
- * library wrappers to be used by non-LGPL compatible source code.
- */
-
-#include <urcu/uatomic.h>
-
-#include <lttng/urcu/static/pointer.h>
-/* Do not #define _LGPL_SOURCE to ensure we can emit the wrapper symbols */
-#include <lttng/urcu/pointer.h>
-
-void *lttng_ust_rcu_dereference_sym(void *p)
-{
- return _lttng_ust_rcu_dereference(p);
-}
-
-void *lttng_ust_rcu_set_pointer_sym(void **p, void *v)
-{
- cmm_wmb();
- uatomic_set(p, v);
- return v;
-}
-
-void *lttng_ust_rcu_xchg_pointer_sym(void **p, void *v)
-{
- cmm_wmb();
- return uatomic_xchg(p, v);
-}
-
-void *lttng_ust_rcu_cmpxchg_pointer_sym(void **p, void *old, void *_new)
-{
- cmm_wmb();
- return uatomic_cmpxchg(p, old, _new);
-}
+++ /dev/null
-/*
- * SPDX-License-Identifier: LGPL-2.1-or-later
- *
- * Copyright (c) 2009 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
- * Copyright (c) 2009 Paul E. McKenney, IBM Corporation.
- *
- * Userspace RCU library for LTTng-UST, derived from liburcu "bulletproof" version.
- */
-
-#define _LGPL_SOURCE
-#include <stdio.h>
-#include <pthread.h>
-#include <signal.h>
-#include <assert.h>
-#include <stdlib.h>
-#include <string.h>
-#include <errno.h>
-#include <poll.h>
-#include <unistd.h>
-#include <stdbool.h>
-#include <sys/mman.h>
-
-#include <urcu/arch.h>
-#include <urcu/wfcqueue.h>
-#include <lttng/urcu/static/urcu-ust.h>
-#include <lttng/urcu/pointer.h>
-#include <urcu/tls-compat.h>
-
-/* Do not #define _LGPL_SOURCE to ensure we can emit the wrapper symbols */
-#undef _LGPL_SOURCE
-#include <lttng/urcu/urcu-ust.h>
-#define _LGPL_SOURCE
-
-#ifndef MAP_ANONYMOUS
-#define MAP_ANONYMOUS MAP_ANON
-#endif
-
-#ifdef __linux__
-static
-void *mremap_wrapper(void *old_address, size_t old_size,
- size_t new_size, int flags)
-{
- return mremap(old_address, old_size, new_size, flags);
-}
-#else
-
-#define MREMAP_MAYMOVE 1
-#define MREMAP_FIXED 2
-
-/*
- * mremap wrapper for non-Linux systems not allowing MAYMOVE.
- * This is not generic.
-*/
-static
-void *mremap_wrapper(void *old_address, size_t old_size,
- size_t new_size, int flags)
-{
- assert(!(flags & MREMAP_MAYMOVE));
-
- return MAP_FAILED;
-}
-#endif
-
-/* Sleep delay in ms */
-#define RCU_SLEEP_DELAY_MS 10
-#define INIT_NR_THREADS 8
-#define ARENA_INIT_ALLOC \
- sizeof(struct registry_chunk) \
- + INIT_NR_THREADS * sizeof(struct lttng_ust_urcu_reader)
-
-/*
- * Active attempts to check for reader Q.S. before calling sleep().
- */
-#define RCU_QS_ACTIVE_ATTEMPTS 100
-
-static
-int lttng_ust_urcu_refcount;
-
-/* If the headers do not support membarrier system call, fall back smp_mb. */
-#ifdef __NR_membarrier
-# define membarrier(...) syscall(__NR_membarrier, __VA_ARGS__)
-#else
-# define membarrier(...) -ENOSYS
-#endif
-
-enum membarrier_cmd {
- MEMBARRIER_CMD_QUERY = 0,
- MEMBARRIER_CMD_SHARED = (1 << 0),
- /* reserved for MEMBARRIER_CMD_SHARED_EXPEDITED (1 << 1) */
- /* reserved for MEMBARRIER_CMD_PRIVATE (1 << 2) */
- MEMBARRIER_CMD_PRIVATE_EXPEDITED = (1 << 3),
- MEMBARRIER_CMD_REGISTER_PRIVATE_EXPEDITED = (1 << 4),
-};
-
-static
-void _lttng_ust_urcu_init(void)
- __attribute__((constructor));
-static
-void lttng_ust_urcu_exit(void)
- __attribute__((destructor));
-
-#ifndef CONFIG_RCU_FORCE_SYS_MEMBARRIER
-int lttng_ust_urcu_has_sys_membarrier;
-#endif
-
-/*
- * rcu_gp_lock ensures mutual exclusion between threads calling
- * synchronize_rcu().
- */
-static pthread_mutex_t rcu_gp_lock = PTHREAD_MUTEX_INITIALIZER;
-/*
- * rcu_registry_lock ensures mutual exclusion between threads
- * registering and unregistering themselves to/from the registry, and
- * with threads reading that registry from synchronize_rcu(). However,
- * this lock is not held all the way through the completion of awaiting
- * for the grace period. It is sporadically released between iterations
- * on the registry.
- * rcu_registry_lock may nest inside rcu_gp_lock.
- */
-static pthread_mutex_t rcu_registry_lock = PTHREAD_MUTEX_INITIALIZER;
-
-static pthread_mutex_t init_lock = PTHREAD_MUTEX_INITIALIZER;
-static int initialized;
-
-static pthread_key_t lttng_ust_urcu_key;
-
-struct lttng_ust_urcu_gp lttng_ust_urcu_gp = { .ctr = LTTNG_UST_URCU_GP_COUNT };
-
-/*
- * Pointer to registry elements. Written to only by each individual reader. Read
- * by both the reader and the writers.
- */
-DEFINE_URCU_TLS(struct lttng_ust_urcu_reader *, lttng_ust_urcu_reader);
-
-static CDS_LIST_HEAD(registry);
-
-struct registry_chunk {
- size_t data_len; /* data length */
- size_t used; /* amount of data used */
- struct cds_list_head node; /* chunk_list node */
- char data[];
-};
-
-struct registry_arena {
- struct cds_list_head chunk_list;
-};
-
-static struct registry_arena registry_arena = {
- .chunk_list = CDS_LIST_HEAD_INIT(registry_arena.chunk_list),
-};
-
-/* Saved fork signal mask, protected by rcu_gp_lock */
-static sigset_t saved_fork_signal_mask;
-
-static void mutex_lock(pthread_mutex_t *mutex)
-{
- int ret;
-
-#ifndef DISTRUST_SIGNALS_EXTREME
- ret = pthread_mutex_lock(mutex);
- if (ret)
- abort();
-#else /* #ifndef DISTRUST_SIGNALS_EXTREME */
- while ((ret = pthread_mutex_trylock(mutex)) != 0) {
- if (ret != EBUSY && ret != EINTR)
- abort();
- poll(NULL,0,10);
- }
-#endif /* #else #ifndef DISTRUST_SIGNALS_EXTREME */
-}
-
-static void mutex_unlock(pthread_mutex_t *mutex)
-{
- int ret;
-
- ret = pthread_mutex_unlock(mutex);
- if (ret)
- abort();
-}
-
-static void smp_mb_master(void)
-{
- if (caa_likely(lttng_ust_urcu_has_sys_membarrier)) {
- if (membarrier(MEMBARRIER_CMD_PRIVATE_EXPEDITED, 0))
- abort();
- } else {
- cmm_smp_mb();
- }
-}
-
-/*
- * Always called with rcu_registry lock held. Releases this lock between
- * iterations and grabs it again. Holds the lock when it returns.
- */
-static void wait_for_readers(struct cds_list_head *input_readers,
- struct cds_list_head *cur_snap_readers,
- struct cds_list_head *qsreaders)
-{
- unsigned int wait_loops = 0;
- struct lttng_ust_urcu_reader *index, *tmp;
-
- /*
- * Wait for each thread URCU_TLS(lttng_ust_urcu_reader).ctr to either
- * indicate quiescence (not nested), or observe the current
- * rcu_gp.ctr value.
- */
- for (;;) {
- if (wait_loops < RCU_QS_ACTIVE_ATTEMPTS)
- wait_loops++;
-
- cds_list_for_each_entry_safe(index, tmp, input_readers, node) {
- switch (lttng_ust_urcu_reader_state(&index->ctr)) {
- case LTTNG_UST_URCU_READER_ACTIVE_CURRENT:
- if (cur_snap_readers) {
- cds_list_move(&index->node,
- cur_snap_readers);
- break;
- }
- /* Fall-through */
- case LTTNG_UST_URCU_READER_INACTIVE:
- cds_list_move(&index->node, qsreaders);
- break;
- case LTTNG_UST_URCU_READER_ACTIVE_OLD:
- /*
- * Old snapshot. Leaving node in
- * input_readers will make us busy-loop
- * until the snapshot becomes current or
- * the reader becomes inactive.
- */
- break;
- }
- }
-
- if (cds_list_empty(input_readers)) {
- break;
- } else {
- /* Temporarily unlock the registry lock. */
- mutex_unlock(&rcu_registry_lock);
- if (wait_loops >= RCU_QS_ACTIVE_ATTEMPTS)
- (void) poll(NULL, 0, RCU_SLEEP_DELAY_MS);
- else
- caa_cpu_relax();
- /* Re-lock the registry lock before the next loop. */
- mutex_lock(&rcu_registry_lock);
- }
- }
-}
-
-void lttng_ust_urcu_synchronize_rcu(void)
-{
- CDS_LIST_HEAD(cur_snap_readers);
- CDS_LIST_HEAD(qsreaders);
- sigset_t newmask, oldmask;
- int ret;
-
- ret = sigfillset(&newmask);
- assert(!ret);
- ret = pthread_sigmask(SIG_BLOCK, &newmask, &oldmask);
- assert(!ret);
-
- mutex_lock(&rcu_gp_lock);
-
- mutex_lock(&rcu_registry_lock);
-
- if (cds_list_empty(®istry))
- goto out;
-
- /* All threads should read qparity before accessing data structure
- * where new ptr points to. */
- /* Write new ptr before changing the qparity */
- smp_mb_master();
-
- /*
- * Wait for readers to observe original parity or be quiescent.
- * wait_for_readers() can release and grab again rcu_registry_lock
- * interally.
- */
- wait_for_readers(®istry, &cur_snap_readers, &qsreaders);
-
- /*
- * Adding a cmm_smp_mb() which is _not_ formally required, but makes the
- * model easier to understand. It does not have a big performance impact
- * anyway, given this is the write-side.
- */
- cmm_smp_mb();
-
- /* Switch parity: 0 -> 1, 1 -> 0 */
- CMM_STORE_SHARED(lttng_ust_urcu_gp.ctr, lttng_ust_urcu_gp.ctr ^ LTTNG_UST_URCU_GP_CTR_PHASE);
-
- /*
- * Must commit qparity update to memory before waiting for other parity
- * quiescent state. Failure to do so could result in the writer waiting
- * forever while new readers are always accessing data (no progress).
- * Ensured by CMM_STORE_SHARED and CMM_LOAD_SHARED.
- */
-
- /*
- * Adding a cmm_smp_mb() which is _not_ formally required, but makes the
- * model easier to understand. It does not have a big performance impact
- * anyway, given this is the write-side.
- */
- cmm_smp_mb();
-
- /*
- * Wait for readers to observe new parity or be quiescent.
- * wait_for_readers() can release and grab again rcu_registry_lock
- * interally.
- */
- wait_for_readers(&cur_snap_readers, NULL, &qsreaders);
-
- /*
- * Put quiescent reader list back into registry.
- */
- cds_list_splice(&qsreaders, ®istry);
-
- /*
- * Finish waiting for reader threads before letting the old ptr being
- * freed.
- */
- smp_mb_master();
-out:
- mutex_unlock(&rcu_registry_lock);
- mutex_unlock(&rcu_gp_lock);
- ret = pthread_sigmask(SIG_SETMASK, &oldmask, NULL);
- assert(!ret);
-}
-
-/*
- * library wrappers to be used by non-LGPL compatible source code.
- */
-
-void lttng_ust_urcu_read_lock(void)
-{
- _lttng_ust_urcu_read_lock();
-}
-
-void lttng_ust_urcu_read_unlock(void)
-{
- _lttng_ust_urcu_read_unlock();
-}
-
-int lttng_ust_urcu_read_ongoing(void)
-{
- return _lttng_ust_urcu_read_ongoing();
-}
-
-/*
- * Only grow for now. If empty, allocate a ARENA_INIT_ALLOC sized chunk.
- * Else, try expanding the last chunk. If this fails, allocate a new
- * chunk twice as big as the last chunk.
- * Memory used by chunks _never_ moves. A chunk could theoretically be
- * freed when all "used" slots are released, but we don't do it at this
- * point.
- */
-static
-void expand_arena(struct registry_arena *arena)
-{
- struct registry_chunk *new_chunk, *last_chunk;
- size_t old_chunk_len, new_chunk_len;
-
- /* No chunk. */
- if (cds_list_empty(&arena->chunk_list)) {
- assert(ARENA_INIT_ALLOC >=
- sizeof(struct registry_chunk)
- + sizeof(struct lttng_ust_urcu_reader));
- new_chunk_len = ARENA_INIT_ALLOC;
- new_chunk = (struct registry_chunk *) mmap(NULL,
- new_chunk_len,
- PROT_READ | PROT_WRITE,
- MAP_ANONYMOUS | MAP_PRIVATE,
- -1, 0);
- if (new_chunk == MAP_FAILED)
- abort();
- memset(new_chunk, 0, new_chunk_len);
- new_chunk->data_len =
- new_chunk_len - sizeof(struct registry_chunk);
- cds_list_add_tail(&new_chunk->node, &arena->chunk_list);
- return; /* We're done. */
- }
-
- /* Try expanding last chunk. */
- last_chunk = cds_list_entry(arena->chunk_list.prev,
- struct registry_chunk, node);
- old_chunk_len =
- last_chunk->data_len + sizeof(struct registry_chunk);
- new_chunk_len = old_chunk_len << 1;
-
- /* Don't allow memory mapping to move, just expand. */
- new_chunk = mremap_wrapper(last_chunk, old_chunk_len,
- new_chunk_len, 0);
- if (new_chunk != MAP_FAILED) {
- /* Should not have moved. */
- assert(new_chunk == last_chunk);
- memset((char *) last_chunk + old_chunk_len, 0,
- new_chunk_len - old_chunk_len);
- last_chunk->data_len =
- new_chunk_len - sizeof(struct registry_chunk);
- return; /* We're done. */
- }
-
- /* Remap did not succeed, we need to add a new chunk. */
- new_chunk = (struct registry_chunk *) mmap(NULL,
- new_chunk_len,
- PROT_READ | PROT_WRITE,
- MAP_ANONYMOUS | MAP_PRIVATE,
- -1, 0);
- if (new_chunk == MAP_FAILED)
- abort();
- memset(new_chunk, 0, new_chunk_len);
- new_chunk->data_len =
- new_chunk_len - sizeof(struct registry_chunk);
- cds_list_add_tail(&new_chunk->node, &arena->chunk_list);
-}
-
-static
-struct lttng_ust_urcu_reader *arena_alloc(struct registry_arena *arena)
-{
- struct registry_chunk *chunk;
- struct lttng_ust_urcu_reader *rcu_reader_reg;
- int expand_done = 0; /* Only allow to expand once per alloc */
- size_t len = sizeof(struct lttng_ust_urcu_reader);
-
-retry:
- cds_list_for_each_entry(chunk, &arena->chunk_list, node) {
- if (chunk->data_len - chunk->used < len)
- continue;
- /* Find spot */
- for (rcu_reader_reg = (struct lttng_ust_urcu_reader *) &chunk->data[0];
- rcu_reader_reg < (struct lttng_ust_urcu_reader *) &chunk->data[chunk->data_len];
- rcu_reader_reg++) {
- if (!rcu_reader_reg->alloc) {
- rcu_reader_reg->alloc = 1;
- chunk->used += len;
- return rcu_reader_reg;
- }
- }
- }
-
- if (!expand_done) {
- expand_arena(arena);
- expand_done = 1;
- goto retry;
- }
-
- return NULL;
-}
-
-/* Called with signals off and mutex locked */
-static
-void add_thread(void)
-{
- struct lttng_ust_urcu_reader *rcu_reader_reg;
- int ret;
-
- rcu_reader_reg = arena_alloc(®istry_arena);
- if (!rcu_reader_reg)
- abort();
- ret = pthread_setspecific(lttng_ust_urcu_key, rcu_reader_reg);
- if (ret)
- abort();
-
- /* Add to registry */
- rcu_reader_reg->tid = pthread_self();
- assert(rcu_reader_reg->ctr == 0);
- cds_list_add(&rcu_reader_reg->node, ®istry);
- /*
- * Reader threads are pointing to the reader registry. This is
- * why its memory should never be relocated.
- */
- URCU_TLS(lttng_ust_urcu_reader) = rcu_reader_reg;
-}
-
-/* Called with mutex locked */
-static
-void cleanup_thread(struct registry_chunk *chunk,
- struct lttng_ust_urcu_reader *rcu_reader_reg)
-{
- rcu_reader_reg->ctr = 0;
- cds_list_del(&rcu_reader_reg->node);
- rcu_reader_reg->tid = 0;
- rcu_reader_reg->alloc = 0;
- chunk->used -= sizeof(struct lttng_ust_urcu_reader);
-}
-
-static
-struct registry_chunk *find_chunk(struct lttng_ust_urcu_reader *rcu_reader_reg)
-{
- struct registry_chunk *chunk;
-
- cds_list_for_each_entry(chunk, ®istry_arena.chunk_list, node) {
- if (rcu_reader_reg < (struct lttng_ust_urcu_reader *) &chunk->data[0])
- continue;
- if (rcu_reader_reg >= (struct lttng_ust_urcu_reader *) &chunk->data[chunk->data_len])
- continue;
- return chunk;
- }
- return NULL;
-}
-
-/* Called with signals off and mutex locked */
-static
-void remove_thread(struct lttng_ust_urcu_reader *rcu_reader_reg)
-{
- cleanup_thread(find_chunk(rcu_reader_reg), rcu_reader_reg);
- URCU_TLS(lttng_ust_urcu_reader) = NULL;
-}
-
-/* Disable signals, take mutex, add to registry */
-void lttng_ust_urcu_register(void)
-{
- sigset_t newmask, oldmask;
- int ret;
-
- ret = sigfillset(&newmask);
- if (ret)
- abort();
- ret = pthread_sigmask(SIG_BLOCK, &newmask, &oldmask);
- if (ret)
- abort();
-
- /*
- * Check if a signal concurrently registered our thread since
- * the check in rcu_read_lock().
- */
- if (URCU_TLS(lttng_ust_urcu_reader))
- goto end;
-
- /*
- * Take care of early registration before lttng_ust_urcu constructor.
- */
- _lttng_ust_urcu_init();
-
- mutex_lock(&rcu_registry_lock);
- add_thread();
- mutex_unlock(&rcu_registry_lock);
-end:
- ret = pthread_sigmask(SIG_SETMASK, &oldmask, NULL);
- if (ret)
- abort();
-}
-
-void lttng_ust_urcu_register_thread(void)
-{
- if (caa_unlikely(!URCU_TLS(lttng_ust_urcu_reader)))
- lttng_ust_urcu_register(); /* If not yet registered. */
-}
-
-/* Disable signals, take mutex, remove from registry */
-static
-void lttng_ust_urcu_unregister(struct lttng_ust_urcu_reader *rcu_reader_reg)
-{
- sigset_t newmask, oldmask;
- int ret;
-
- ret = sigfillset(&newmask);
- if (ret)
- abort();
- ret = pthread_sigmask(SIG_BLOCK, &newmask, &oldmask);
- if (ret)
- abort();
-
- mutex_lock(&rcu_registry_lock);
- remove_thread(rcu_reader_reg);
- mutex_unlock(&rcu_registry_lock);
- ret = pthread_sigmask(SIG_SETMASK, &oldmask, NULL);
- if (ret)
- abort();
- lttng_ust_urcu_exit();
-}
-
-/*
- * Remove thread from the registry when it exits, and flag it as
- * destroyed so garbage collection can take care of it.
- */
-static
-void lttng_ust_urcu_thread_exit_notifier(void *rcu_key)
-{
- lttng_ust_urcu_unregister(rcu_key);
-}
-
-#ifdef CONFIG_RCU_FORCE_SYS_MEMBARRIER
-static
-void lttng_ust_urcu_sys_membarrier_status(bool available)
-{
- if (!available)
- abort();
-}
-#else
-static
-void lttng_ust_urcu_sys_membarrier_status(bool available)
-{
- if (!available)
- return;
- lttng_ust_urcu_has_sys_membarrier = 1;
-}
-#endif
-
-static
-void lttng_ust_urcu_sys_membarrier_init(void)
-{
- bool available = false;
- int mask;
-
- mask = membarrier(MEMBARRIER_CMD_QUERY, 0);
- if (mask >= 0) {
- if (mask & MEMBARRIER_CMD_PRIVATE_EXPEDITED) {
- if (membarrier(MEMBARRIER_CMD_REGISTER_PRIVATE_EXPEDITED, 0))
- abort();
- available = true;
- }
- }
- lttng_ust_urcu_sys_membarrier_status(available);
-}
-
-static
-void _lttng_ust_urcu_init(void)
-{
- mutex_lock(&init_lock);
- if (!lttng_ust_urcu_refcount++) {
- int ret;
-
- ret = pthread_key_create(<tng_ust_urcu_key,
- lttng_ust_urcu_thread_exit_notifier);
- if (ret)
- abort();
- lttng_ust_urcu_sys_membarrier_init();
- initialized = 1;
- }
- mutex_unlock(&init_lock);
-}
-
-static
-void lttng_ust_urcu_exit(void)
-{
- mutex_lock(&init_lock);
- if (!--lttng_ust_urcu_refcount) {
- struct registry_chunk *chunk, *tmp;
- int ret;
-
- cds_list_for_each_entry_safe(chunk, tmp,
- ®istry_arena.chunk_list, node) {
- munmap((void *) chunk, chunk->data_len
- + sizeof(struct registry_chunk));
- }
- CDS_INIT_LIST_HEAD(®istry_arena.chunk_list);
- ret = pthread_key_delete(lttng_ust_urcu_key);
- if (ret)
- abort();
- }
- mutex_unlock(&init_lock);
-}
-
-/*
- * Holding the rcu_gp_lock and rcu_registry_lock across fork will make
- * sure we fork() don't race with a concurrent thread executing with
- * any of those locks held. This ensures that the registry and data
- * protected by rcu_gp_lock are in a coherent state in the child.
- */
-void lttng_ust_urcu_before_fork(void)
-{
- sigset_t newmask, oldmask;
- int ret;
-
- ret = sigfillset(&newmask);
- assert(!ret);
- ret = pthread_sigmask(SIG_BLOCK, &newmask, &oldmask);
- assert(!ret);
- mutex_lock(&rcu_gp_lock);
- mutex_lock(&rcu_registry_lock);
- saved_fork_signal_mask = oldmask;
-}
-
-void lttng_ust_urcu_after_fork_parent(void)
-{
- sigset_t oldmask;
- int ret;
-
- oldmask = saved_fork_signal_mask;
- mutex_unlock(&rcu_registry_lock);
- mutex_unlock(&rcu_gp_lock);
- ret = pthread_sigmask(SIG_SETMASK, &oldmask, NULL);
- assert(!ret);
-}
-
-/*
- * Prune all entries from registry except our own thread. Fits the Linux
- * fork behavior. Called with rcu_gp_lock and rcu_registry_lock held.
- */
-static
-void lttng_ust_urcu_prune_registry(void)
-{
- struct registry_chunk *chunk;
- struct lttng_ust_urcu_reader *rcu_reader_reg;
-
- cds_list_for_each_entry(chunk, ®istry_arena.chunk_list, node) {
- for (rcu_reader_reg = (struct lttng_ust_urcu_reader *) &chunk->data[0];
- rcu_reader_reg < (struct lttng_ust_urcu_reader *) &chunk->data[chunk->data_len];
- rcu_reader_reg++) {
- if (!rcu_reader_reg->alloc)
- continue;
- if (rcu_reader_reg->tid == pthread_self())
- continue;
- cleanup_thread(chunk, rcu_reader_reg);
- }
- }
-}
-
-void lttng_ust_urcu_after_fork_child(void)
-{
- sigset_t oldmask;
- int ret;
-
- lttng_ust_urcu_prune_registry();
- oldmask = saved_fork_signal_mask;
- mutex_unlock(&rcu_registry_lock);
- mutex_unlock(&rcu_gp_lock);
- ret = pthread_sigmask(SIG_SETMASK, &oldmask, NULL);
- assert(!ret);
-}
+++ /dev/null
-/*
- * SPDX-License-Identifier: MIT
- *
- * Copyright (C) 2011 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
- */
-
-#ifndef _LTTNG_UST_UUID_H
-#define _LTTNG_UST_UUID_H
-
-#include <lttng/ust-events.h> /* For LTTNG_UST_UUID_LEN */
-#include <lttng/ust-clock.h>
-
-#endif /* _LTTNG_UST_UUID_H */
+++ /dev/null
-/*
- * SPDX-License-Identifier: LGPL-2.1-only
- *
- * Copyright (C) 2019 Michael Jeanson <mjeanson@efficios.com>
- */
-
-#ifndef _LTTNG_NS_H
-#define _LTTNG_NS_H
-
-/*
- * The lowest valid inode number that can be allocated in the proc filesystem
- * is 0xF0000000. Any number below can be used internally as an error code.
- *
- * Zero is used in the kernel as an error code, it's the value we will return
- * when we fail to read the proper inode number.
- *
- * One is used internally to identify an uninitialized cache entry, it should
- * never be returned.
- */
-
-enum ns_ino_state {
- NS_INO_UNAVAILABLE = 0x0,
- NS_INO_UNINITIALIZED = 0x1,
- NS_INO_MIN = 0xF0000000,
-};
-
-#endif /* _LTTNG_NS_H */
+++ /dev/null
-/*
- * SPDX-License-Identifier: GPL-2.0-only
- *
- * Performance events:
- *
- * Copyright (C) 2008-2009, Thomas Gleixner <tglx@linutronix.de>
- * Copyright (C) 2008-2011, Red Hat, Inc., Ingo Molnar
- * Copyright (C) 2008-2011, Red Hat, Inc., Peter Zijlstra
- *
- * Data type definitions, declarations, prototypes.
- *
- * Started by: Thomas Gleixner and Ingo Molnar
- *
- * Header copied from Linux kernel v4.7 installed headers.
- */
-
-#ifndef _UAPI_LINUX_PERF_EVENT_H
-#define _UAPI_LINUX_PERF_EVENT_H
-
-#include <linux/types.h>
-#include <linux/ioctl.h>
-#include <asm/byteorder.h>
-
-/*
- * User-space ABI bits:
- */
-
-/*
- * attr.type
- */
-enum perf_type_id {
- PERF_TYPE_HARDWARE = 0,
- PERF_TYPE_SOFTWARE = 1,
- PERF_TYPE_TRACEPOINT = 2,
- PERF_TYPE_HW_CACHE = 3,
- PERF_TYPE_RAW = 4,
- PERF_TYPE_BREAKPOINT = 5,
-
- PERF_TYPE_MAX, /* non-ABI */
-};
-
-/*
- * Generalized performance event event_id types, used by the
- * attr.event_id parameter of the sys_perf_event_open()
- * syscall:
- */
-enum perf_hw_id {
- /*
- * Common hardware events, generalized by the kernel:
- */
- PERF_COUNT_HW_CPU_CYCLES = 0,
- PERF_COUNT_HW_INSTRUCTIONS = 1,
- PERF_COUNT_HW_CACHE_REFERENCES = 2,
- PERF_COUNT_HW_CACHE_MISSES = 3,
- PERF_COUNT_HW_BRANCH_INSTRUCTIONS = 4,
- PERF_COUNT_HW_BRANCH_MISSES = 5,
- PERF_COUNT_HW_BUS_CYCLES = 6,
- PERF_COUNT_HW_STALLED_CYCLES_FRONTEND = 7,
- PERF_COUNT_HW_STALLED_CYCLES_BACKEND = 8,
- PERF_COUNT_HW_REF_CPU_CYCLES = 9,
-
- PERF_COUNT_HW_MAX, /* non-ABI */
-};
-
-/*
- * Generalized hardware cache events:
- *
- * { L1-D, L1-I, LLC, ITLB, DTLB, BPU, NODE } x
- * { read, write, prefetch } x
- * { accesses, misses }
- */
-enum perf_hw_cache_id {
- PERF_COUNT_HW_CACHE_L1D = 0,
- PERF_COUNT_HW_CACHE_L1I = 1,
- PERF_COUNT_HW_CACHE_LL = 2,
- PERF_COUNT_HW_CACHE_DTLB = 3,
- PERF_COUNT_HW_CACHE_ITLB = 4,
- PERF_COUNT_HW_CACHE_BPU = 5,
- PERF_COUNT_HW_CACHE_NODE = 6,
-
- PERF_COUNT_HW_CACHE_MAX, /* non-ABI */
-};
-
-enum perf_hw_cache_op_id {
- PERF_COUNT_HW_CACHE_OP_READ = 0,
- PERF_COUNT_HW_CACHE_OP_WRITE = 1,
- PERF_COUNT_HW_CACHE_OP_PREFETCH = 2,
-
- PERF_COUNT_HW_CACHE_OP_MAX, /* non-ABI */
-};
-
-enum perf_hw_cache_op_result_id {
- PERF_COUNT_HW_CACHE_RESULT_ACCESS = 0,
- PERF_COUNT_HW_CACHE_RESULT_MISS = 1,
-
- PERF_COUNT_HW_CACHE_RESULT_MAX, /* non-ABI */
-};
-
-/*
- * Special "software" events provided by the kernel, even if the hardware
- * does not support performance events. These events measure various
- * physical and sw events of the kernel (and allow the profiling of them as
- * well):
- */
-enum perf_sw_ids {
- PERF_COUNT_SW_CPU_CLOCK = 0,
- PERF_COUNT_SW_TASK_CLOCK = 1,
- PERF_COUNT_SW_PAGE_FAULTS = 2,
- PERF_COUNT_SW_CONTEXT_SWITCHES = 3,
- PERF_COUNT_SW_CPU_MIGRATIONS = 4,
- PERF_COUNT_SW_PAGE_FAULTS_MIN = 5,
- PERF_COUNT_SW_PAGE_FAULTS_MAJ = 6,
- PERF_COUNT_SW_ALIGNMENT_FAULTS = 7,
- PERF_COUNT_SW_EMULATION_FAULTS = 8,
- PERF_COUNT_SW_DUMMY = 9,
- PERF_COUNT_SW_BPF_OUTPUT = 10,
-
- PERF_COUNT_SW_MAX, /* non-ABI */
-};
-
-/*
- * Bits that can be set in attr.sample_type to request information
- * in the overflow packets.
- */
-enum perf_event_sample_format {
- PERF_SAMPLE_IP = 1U << 0,
- PERF_SAMPLE_TID = 1U << 1,
- PERF_SAMPLE_TIME = 1U << 2,
- PERF_SAMPLE_ADDR = 1U << 3,
- PERF_SAMPLE_READ = 1U << 4,
- PERF_SAMPLE_CALLCHAIN = 1U << 5,
- PERF_SAMPLE_ID = 1U << 6,
- PERF_SAMPLE_CPU = 1U << 7,
- PERF_SAMPLE_PERIOD = 1U << 8,
- PERF_SAMPLE_STREAM_ID = 1U << 9,
- PERF_SAMPLE_RAW = 1U << 10,
- PERF_SAMPLE_BRANCH_STACK = 1U << 11,
- PERF_SAMPLE_REGS_USER = 1U << 12,
- PERF_SAMPLE_STACK_USER = 1U << 13,
- PERF_SAMPLE_WEIGHT = 1U << 14,
- PERF_SAMPLE_DATA_SRC = 1U << 15,
- PERF_SAMPLE_IDENTIFIER = 1U << 16,
- PERF_SAMPLE_TRANSACTION = 1U << 17,
- PERF_SAMPLE_REGS_INTR = 1U << 18,
-
- PERF_SAMPLE_MAX = 1U << 19, /* non-ABI */
-};
-
-/*
- * values to program into branch_sample_type when PERF_SAMPLE_BRANCH is set
- *
- * If the user does not pass priv level information via branch_sample_type,
- * the kernel uses the event's priv level. Branch and event priv levels do
- * not have to match. Branch priv level is checked for permissions.
- *
- * The branch types can be combined, however BRANCH_ANY covers all types
- * of branches and therefore it supersedes all the other types.
- */
-enum perf_branch_sample_type_shift {
- PERF_SAMPLE_BRANCH_USER_SHIFT = 0, /* user branches */
- PERF_SAMPLE_BRANCH_KERNEL_SHIFT = 1, /* kernel branches */
- PERF_SAMPLE_BRANCH_HV_SHIFT = 2, /* hypervisor branches */
-
- PERF_SAMPLE_BRANCH_ANY_SHIFT = 3, /* any branch types */
- PERF_SAMPLE_BRANCH_ANY_CALL_SHIFT = 4, /* any call branch */
- PERF_SAMPLE_BRANCH_ANY_RETURN_SHIFT = 5, /* any return branch */
- PERF_SAMPLE_BRANCH_IND_CALL_SHIFT = 6, /* indirect calls */
- PERF_SAMPLE_BRANCH_ABORT_TX_SHIFT = 7, /* transaction aborts */
- PERF_SAMPLE_BRANCH_IN_TX_SHIFT = 8, /* in transaction */
- PERF_SAMPLE_BRANCH_NO_TX_SHIFT = 9, /* not in transaction */
- PERF_SAMPLE_BRANCH_COND_SHIFT = 10, /* conditional branches */
-
- PERF_SAMPLE_BRANCH_CALL_STACK_SHIFT = 11, /* call/ret stack */
- PERF_SAMPLE_BRANCH_IND_JUMP_SHIFT = 12, /* indirect jumps */
- PERF_SAMPLE_BRANCH_CALL_SHIFT = 13, /* direct call */
-
- PERF_SAMPLE_BRANCH_NO_FLAGS_SHIFT = 14, /* no flags */
- PERF_SAMPLE_BRANCH_NO_CYCLES_SHIFT = 15, /* no cycles */
-
- PERF_SAMPLE_BRANCH_MAX_SHIFT /* non-ABI */
-};
-
-enum perf_branch_sample_type {
- PERF_SAMPLE_BRANCH_USER = 1U << PERF_SAMPLE_BRANCH_USER_SHIFT,
- PERF_SAMPLE_BRANCH_KERNEL = 1U << PERF_SAMPLE_BRANCH_KERNEL_SHIFT,
- PERF_SAMPLE_BRANCH_HV = 1U << PERF_SAMPLE_BRANCH_HV_SHIFT,
-
- PERF_SAMPLE_BRANCH_ANY = 1U << PERF_SAMPLE_BRANCH_ANY_SHIFT,
- PERF_SAMPLE_BRANCH_ANY_CALL = 1U << PERF_SAMPLE_BRANCH_ANY_CALL_SHIFT,
- PERF_SAMPLE_BRANCH_ANY_RETURN = 1U << PERF_SAMPLE_BRANCH_ANY_RETURN_SHIFT,
- PERF_SAMPLE_BRANCH_IND_CALL = 1U << PERF_SAMPLE_BRANCH_IND_CALL_SHIFT,
- PERF_SAMPLE_BRANCH_ABORT_TX = 1U << PERF_SAMPLE_BRANCH_ABORT_TX_SHIFT,
- PERF_SAMPLE_BRANCH_IN_TX = 1U << PERF_SAMPLE_BRANCH_IN_TX_SHIFT,
- PERF_SAMPLE_BRANCH_NO_TX = 1U << PERF_SAMPLE_BRANCH_NO_TX_SHIFT,
- PERF_SAMPLE_BRANCH_COND = 1U << PERF_SAMPLE_BRANCH_COND_SHIFT,
-
- PERF_SAMPLE_BRANCH_CALL_STACK = 1U << PERF_SAMPLE_BRANCH_CALL_STACK_SHIFT,
- PERF_SAMPLE_BRANCH_IND_JUMP = 1U << PERF_SAMPLE_BRANCH_IND_JUMP_SHIFT,
- PERF_SAMPLE_BRANCH_CALL = 1U << PERF_SAMPLE_BRANCH_CALL_SHIFT,
-
- PERF_SAMPLE_BRANCH_NO_FLAGS = 1U << PERF_SAMPLE_BRANCH_NO_FLAGS_SHIFT,
- PERF_SAMPLE_BRANCH_NO_CYCLES = 1U << PERF_SAMPLE_BRANCH_NO_CYCLES_SHIFT,
-
- PERF_SAMPLE_BRANCH_MAX = 1U << PERF_SAMPLE_BRANCH_MAX_SHIFT,
-};
-
-#define PERF_SAMPLE_BRANCH_PLM_ALL \
- (PERF_SAMPLE_BRANCH_USER|\
- PERF_SAMPLE_BRANCH_KERNEL|\
- PERF_SAMPLE_BRANCH_HV)
-
-/*
- * Values to determine ABI of the registers dump.
- */
-enum perf_sample_regs_abi {
- PERF_SAMPLE_REGS_ABI_NONE = 0,
- PERF_SAMPLE_REGS_ABI_32 = 1,
- PERF_SAMPLE_REGS_ABI_64 = 2,
-};
-
-/*
- * Values for the memory transaction event qualifier, mostly for
- * abort events. Multiple bits can be set.
- */
-enum {
- PERF_TXN_ELISION = (1 << 0), /* From elision */
- PERF_TXN_TRANSACTION = (1 << 1), /* From transaction */
- PERF_TXN_SYNC = (1 << 2), /* Instruction is related */
- PERF_TXN_ASYNC = (1 << 3), /* Instruction not related */
- PERF_TXN_RETRY = (1 << 4), /* Retry possible */
- PERF_TXN_CONFLICT = (1 << 5), /* Conflict abort */
- PERF_TXN_CAPACITY_WRITE = (1 << 6), /* Capacity write abort */
- PERF_TXN_CAPACITY_READ = (1 << 7), /* Capacity read abort */
-
- PERF_TXN_MAX = (1 << 8), /* non-ABI */
-
- /* bits 32..63 are reserved for the abort code */
-
- PERF_TXN_ABORT_MASK = (0xffffffffULL << 32),
- PERF_TXN_ABORT_SHIFT = 32,
-};
-
-/*
- * The format of the data returned by read() on a perf event fd,
- * as specified by attr.read_format:
- *
- * struct read_format {
- * { u64 value;
- * { u64 time_enabled; } && PERF_FORMAT_TOTAL_TIME_ENABLED
- * { u64 time_running; } && PERF_FORMAT_TOTAL_TIME_RUNNING
- * { u64 id; } && PERF_FORMAT_ID
- * } && !PERF_FORMAT_GROUP
- *
- * { u64 nr;
- * { u64 time_enabled; } && PERF_FORMAT_TOTAL_TIME_ENABLED
- * { u64 time_running; } && PERF_FORMAT_TOTAL_TIME_RUNNING
- * { u64 value;
- * { u64 id; } && PERF_FORMAT_ID
- * } cntr[nr];
- * } && PERF_FORMAT_GROUP
- * };
- */
-enum perf_event_read_format {
- PERF_FORMAT_TOTAL_TIME_ENABLED = 1U << 0,
- PERF_FORMAT_TOTAL_TIME_RUNNING = 1U << 1,
- PERF_FORMAT_ID = 1U << 2,
- PERF_FORMAT_GROUP = 1U << 3,
-
- PERF_FORMAT_MAX = 1U << 4, /* non-ABI */
-};
-
-#define PERF_ATTR_SIZE_VER0 64 /* sizeof first published struct */
-#define PERF_ATTR_SIZE_VER1 72 /* add: config2 */
-#define PERF_ATTR_SIZE_VER2 80 /* add: branch_sample_type */
-#define PERF_ATTR_SIZE_VER3 96 /* add: sample_regs_user */
- /* add: sample_stack_user */
-#define PERF_ATTR_SIZE_VER4 104 /* add: sample_regs_intr */
-#define PERF_ATTR_SIZE_VER5 112 /* add: aux_watermark */
-
-/*
- * Hardware event_id to monitor via a performance monitoring event:
- */
-struct perf_event_attr {
-
- /*
- * Major type: hardware/software/tracepoint/etc.
- */
- __u32 type;
-
- /*
- * Size of the attr structure, for fwd/bwd compat.
- */
- __u32 size;
-
- /*
- * Type specific configuration information.
- */
- __u64 config;
-
- union {
- __u64 sample_period;
- __u64 sample_freq;
- };
-
- __u64 sample_type;
- __u64 read_format;
-
- __u64 disabled : 1, /* off by default */
- inherit : 1, /* children inherit it */
- pinned : 1, /* must always be on PMU */
- exclusive : 1, /* only group on PMU */
- exclude_user : 1, /* don't count user */
- exclude_kernel : 1, /* ditto kernel */
- exclude_hv : 1, /* ditto hypervisor */
- exclude_idle : 1, /* don't count when idle */
- mmap : 1, /* include mmap data */
- comm : 1, /* include comm data */
- freq : 1, /* use freq, not period */
- inherit_stat : 1, /* per task counts */
- enable_on_exec : 1, /* next exec enables */
- task : 1, /* trace fork/exit */
- watermark : 1, /* wakeup_watermark */
- /*
- * precise_ip:
- *
- * 0 - SAMPLE_IP can have arbitrary skid
- * 1 - SAMPLE_IP must have constant skid
- * 2 - SAMPLE_IP requested to have 0 skid
- * 3 - SAMPLE_IP must have 0 skid
- *
- * See also PERF_RECORD_MISC_EXACT_IP
- */
- precise_ip : 2, /* skid constraint */
- mmap_data : 1, /* non-exec mmap data */
- sample_id_all : 1, /* sample_type all events */
-
- exclude_host : 1, /* don't count in host */
- exclude_guest : 1, /* don't count in guest */
-
- exclude_callchain_kernel : 1, /* exclude kernel callchains */
- exclude_callchain_user : 1, /* exclude user callchains */
- mmap2 : 1, /* include mmap with inode data */
- comm_exec : 1, /* flag comm events that are due to an exec */
- use_clockid : 1, /* use @clockid for time fields */
- context_switch : 1, /* context switch data */
- write_backward : 1, /* Write ring buffer from end to beginning */
- __reserved_1 : 36;
-
- union {
- __u32 wakeup_events; /* wakeup every n events */
- __u32 wakeup_watermark; /* bytes before wakeup */
- };
-
- __u32 bp_type;
- union {
- __u64 bp_addr;
- __u64 config1; /* extension of config */
- };
- union {
- __u64 bp_len;
- __u64 config2; /* extension of config1 */
- };
- __u64 branch_sample_type; /* enum perf_branch_sample_type */
-
- /*
- * Defines set of user regs to dump on samples.
- * See asm/perf_regs.h for details.
- */
- __u64 sample_regs_user;
-
- /*
- * Defines size of the user stack to dump on samples.
- */
- __u32 sample_stack_user;
-
- __s32 clockid;
- /*
- * Defines set of regs to dump for each sample
- * state captured on:
- * - precise = 0: PMU interrupt
- * - precise > 0: sampled instruction
- *
- * See asm/perf_regs.h for details.
- */
- __u64 sample_regs_intr;
-
- /*
- * Wakeup watermark for AUX area
- */
- __u32 aux_watermark;
- __u32 __reserved_2; /* align to __u64 */
-};
-
-#define perf_flags(attr) (*(&(attr)->read_format + 1))
-
-/*
- * Ioctls that can be done on a perf event fd:
- */
-#define PERF_EVENT_IOC_ENABLE _IO ('$', 0)
-#define PERF_EVENT_IOC_DISABLE _IO ('$', 1)
-#define PERF_EVENT_IOC_REFRESH _IO ('$', 2)
-#define PERF_EVENT_IOC_RESET _IO ('$', 3)
-#define PERF_EVENT_IOC_PERIOD _IOW('$', 4, __u64)
-#define PERF_EVENT_IOC_SET_OUTPUT _IO ('$', 5)
-#define PERF_EVENT_IOC_SET_FILTER _IOW('$', 6, char *)
-#define PERF_EVENT_IOC_ID _IOR('$', 7, __u64 *)
-#define PERF_EVENT_IOC_SET_BPF _IOW('$', 8, __u32)
-#define PERF_EVENT_IOC_PAUSE_OUTPUT _IOW('$', 9, __u32)
-
-enum perf_event_ioc_flags {
- PERF_IOC_FLAG_GROUP = 1U << 0,
-};
-
-/*
- * Structure of the page that can be mapped via mmap
- */
-struct perf_event_mmap_page {
- __u32 version; /* version number of this structure */
- __u32 compat_version; /* lowest version this is compat with */
-
- /*
- * Bits needed to read the hw events in user-space.
- *
- * u32 seq, time_mult, time_shift, index, width;
- * u64 count, enabled, running;
- * u64 cyc, time_offset;
- * s64 pmc = 0;
- *
- * do {
- * seq = pc->lock;
- * barrier()
- *
- * enabled = pc->time_enabled;
- * running = pc->time_running;
- *
- * if (pc->cap_usr_time && enabled != running) {
- * cyc = rdtsc();
- * time_offset = pc->time_offset;
- * time_mult = pc->time_mult;
- * time_shift = pc->time_shift;
- * }
- *
- * index = pc->index;
- * count = pc->offset;
- * if (pc->cap_user_rdpmc && index) {
- * width = pc->pmc_width;
- * pmc = rdpmc(index - 1);
- * }
- *
- * barrier();
- * } while (pc->lock != seq);
- *
- * NOTE: for obvious reason this only works on self-monitoring
- * processes.
- */
- __u32 lock; /* seqlock for synchronization */
- __u32 index; /* hardware event identifier */
- __s64 offset; /* add to hardware event value */
- __u64 time_enabled; /* time event active */
- __u64 time_running; /* time event on cpu */
- union {
- __u64 capabilities;
- struct {
- __u64 cap_bit0 : 1, /* Always 0, deprecated, see commit 860f085b74e9 */
- cap_bit0_is_deprecated : 1, /* Always 1, signals that bit 0 is zero */
-
- cap_user_rdpmc : 1, /* The RDPMC instruction can be used to read counts */
- cap_user_time : 1, /* The time_* fields are used */
- cap_user_time_zero : 1, /* The time_zero field is used */
- cap_____res : 59;
- };
- };
-
- /*
- * If cap_user_rdpmc this field provides the bit-width of the value
- * read using the rdpmc() or equivalent instruction. This can be used
- * to sign extend the result like:
- *
- * pmc <<= 64 - width;
- * pmc >>= 64 - width; // signed shift right
- * count += pmc;
- */
- __u16 pmc_width;
-
- /*
- * If cap_usr_time the below fields can be used to compute the time
- * delta since time_enabled (in ns) using rdtsc or similar.
- *
- * u64 quot, rem;
- * u64 delta;
- *
- * quot = (cyc >> time_shift);
- * rem = cyc & (((u64)1 << time_shift) - 1);
- * delta = time_offset + quot * time_mult +
- * ((rem * time_mult) >> time_shift);
- *
- * Where time_offset,time_mult,time_shift and cyc are read in the
- * seqcount loop described above. This delta can then be added to
- * enabled and possible running (if index), improving the scaling:
- *
- * enabled += delta;
- * if (index)
- * running += delta;
- *
- * quot = count / running;
- * rem = count % running;
- * count = quot * enabled + (rem * enabled) / running;
- */
- __u16 time_shift;
- __u32 time_mult;
- __u64 time_offset;
- /*
- * If cap_usr_time_zero, the hardware clock (e.g. TSC) can be calculated
- * from sample timestamps.
- *
- * time = timestamp - time_zero;
- * quot = time / time_mult;
- * rem = time % time_mult;
- * cyc = (quot << time_shift) + (rem << time_shift) / time_mult;
- *
- * And vice versa:
- *
- * quot = cyc >> time_shift;
- * rem = cyc & (((u64)1 << time_shift) - 1);
- * timestamp = time_zero + quot * time_mult +
- * ((rem * time_mult) >> time_shift);
- */
- __u64 time_zero;
- __u32 size; /* Header size up to __reserved[] fields. */
-
- /*
- * Hole for extension of the self monitor capabilities
- */
-
- __u8 __reserved[118*8+4]; /* align to 1k. */
-
- /*
- * Control data for the mmap() data buffer.
- *
- * User-space reading the @data_head value should issue an smp_rmb(),
- * after reading this value.
- *
- * When the mapping is PROT_WRITE the @data_tail value should be
- * written by userspace to reflect the last read data, after issueing
- * an smp_mb() to separate the data read from the ->data_tail store.
- * In this case the kernel will not over-write unread data.
- *
- * See perf_output_put_handle() for the data ordering.
- *
- * data_{offset,size} indicate the location and size of the perf record
- * buffer within the mmapped area.
- */
- __u64 data_head; /* head in the data section */
- __u64 data_tail; /* user-space written tail */
- __u64 data_offset; /* where the buffer starts */
- __u64 data_size; /* data buffer size */
-
- /*
- * AUX area is defined by aux_{offset,size} fields that should be set
- * by the userspace, so that
- *
- * aux_offset >= data_offset + data_size
- *
- * prior to mmap()ing it. Size of the mmap()ed area should be aux_size.
- *
- * Ring buffer pointers aux_{head,tail} have the same semantics as
- * data_{head,tail} and same ordering rules apply.
- */
- __u64 aux_head;
- __u64 aux_tail;
- __u64 aux_offset;
- __u64 aux_size;
-};
-
-#define PERF_RECORD_MISC_CPUMODE_MASK (7 << 0)
-#define PERF_RECORD_MISC_CPUMODE_UNKNOWN (0 << 0)
-#define PERF_RECORD_MISC_KERNEL (1 << 0)
-#define PERF_RECORD_MISC_USER (2 << 0)
-#define PERF_RECORD_MISC_HYPERVISOR (3 << 0)
-#define PERF_RECORD_MISC_GUEST_KERNEL (4 << 0)
-#define PERF_RECORD_MISC_GUEST_USER (5 << 0)
-
-/*
- * Indicates that /proc/PID/maps parsing are truncated by time out.
- */
-#define PERF_RECORD_MISC_PROC_MAP_PARSE_TIMEOUT (1 << 12)
-/*
- * PERF_RECORD_MISC_MMAP_DATA and PERF_RECORD_MISC_COMM_EXEC are used on
- * different events so can reuse the same bit position.
- * Ditto PERF_RECORD_MISC_SWITCH_OUT.
- */
-#define PERF_RECORD_MISC_MMAP_DATA (1 << 13)
-#define PERF_RECORD_MISC_COMM_EXEC (1 << 13)
-#define PERF_RECORD_MISC_SWITCH_OUT (1 << 13)
-/*
- * Indicates that the content of PERF_SAMPLE_IP points to
- * the actual instruction that triggered the event. See also
- * perf_event_attr::precise_ip.
- */
-#define PERF_RECORD_MISC_EXACT_IP (1 << 14)
-/*
- * Reserve the last bit to indicate some extended misc field
- */
-#define PERF_RECORD_MISC_EXT_RESERVED (1 << 15)
-
-struct perf_event_header {
- __u32 type;
- __u16 misc;
- __u16 size;
-};
-
-enum perf_event_type {
-
- /*
- * If perf_event_attr.sample_id_all is set then all event types will
- * have the sample_type selected fields related to where/when
- * (identity) an event took place (TID, TIME, ID, STREAM_ID, CPU,
- * IDENTIFIER) described in PERF_RECORD_SAMPLE below, it will be stashed
- * just after the perf_event_header and the fields already present for
- * the existing fields, i.e. at the end of the payload. That way a newer
- * perf.data file will be supported by older perf tools, with these new
- * optional fields being ignored.
- *
- * struct sample_id {
- * { u32 pid, tid; } && PERF_SAMPLE_TID
- * { u64 time; } && PERF_SAMPLE_TIME
- * { u64 id; } && PERF_SAMPLE_ID
- * { u64 stream_id;} && PERF_SAMPLE_STREAM_ID
- * { u32 cpu, res; } && PERF_SAMPLE_CPU
- * { u64 id; } && PERF_SAMPLE_IDENTIFIER
- * } && perf_event_attr::sample_id_all
- *
- * Note that PERF_SAMPLE_IDENTIFIER duplicates PERF_SAMPLE_ID. The
- * advantage of PERF_SAMPLE_IDENTIFIER is that its position is fixed
- * relative to header.size.
- */
-
- /*
- * The MMAP events record the PROT_EXEC mappings so that we can
- * correlate userspace IPs to code. They have the following structure:
- *
- * struct {
- * struct perf_event_header header;
- *
- * u32 pid, tid;
- * u64 addr;
- * u64 len;
- * u64 pgoff;
- * char filename[];
- * struct sample_id sample_id;
- * };
- */
- PERF_RECORD_MMAP = 1,
-
- /*
- * struct {
- * struct perf_event_header header;
- * u64 id;
- * u64 lost;
- * struct sample_id sample_id;
- * };
- */
- PERF_RECORD_LOST = 2,
-
- /*
- * struct {
- * struct perf_event_header header;
- *
- * u32 pid, tid;
- * char comm[];
- * struct sample_id sample_id;
- * };
- */
- PERF_RECORD_COMM = 3,
-
- /*
- * struct {
- * struct perf_event_header header;
- * u32 pid, ppid;
- * u32 tid, ptid;
- * u64 time;
- * struct sample_id sample_id;
- * };
- */
- PERF_RECORD_EXIT = 4,
-
- /*
- * struct {
- * struct perf_event_header header;
- * u64 time;
- * u64 id;
- * u64 stream_id;
- * struct sample_id sample_id;
- * };
- */
- PERF_RECORD_THROTTLE = 5,
- PERF_RECORD_UNTHROTTLE = 6,
-
- /*
- * struct {
- * struct perf_event_header header;
- * u32 pid, ppid;
- * u32 tid, ptid;
- * u64 time;
- * struct sample_id sample_id;
- * };
- */
- PERF_RECORD_FORK = 7,
-
- /*
- * struct {
- * struct perf_event_header header;
- * u32 pid, tid;
- *
- * struct read_format values;
- * struct sample_id sample_id;
- * };
- */
- PERF_RECORD_READ = 8,
-
- /*
- * struct {
- * struct perf_event_header header;
- *
- * #
- * # Note that PERF_SAMPLE_IDENTIFIER duplicates PERF_SAMPLE_ID.
- * # The advantage of PERF_SAMPLE_IDENTIFIER is that its position
- * # is fixed relative to header.
- * #
- *
- * { u64 id; } && PERF_SAMPLE_IDENTIFIER
- * { u64 ip; } && PERF_SAMPLE_IP
- * { u32 pid, tid; } && PERF_SAMPLE_TID
- * { u64 time; } && PERF_SAMPLE_TIME
- * { u64 addr; } && PERF_SAMPLE_ADDR
- * { u64 id; } && PERF_SAMPLE_ID
- * { u64 stream_id;} && PERF_SAMPLE_STREAM_ID
- * { u32 cpu, res; } && PERF_SAMPLE_CPU
- * { u64 period; } && PERF_SAMPLE_PERIOD
- *
- * { struct read_format values; } && PERF_SAMPLE_READ
- *
- * { u64 nr,
- * u64 ips[nr]; } && PERF_SAMPLE_CALLCHAIN
- *
- * #
- * # The RAW record below is opaque data wrt the ABI
- * #
- * # That is, the ABI doesn't make any promises wrt to
- * # the stability of its content, it may vary depending
- * # on event, hardware, kernel version and phase of
- * # the moon.
- * #
- * # In other words, PERF_SAMPLE_RAW contents are not an ABI.
- * #
- *
- * { u32 size;
- * char data[size];}&& PERF_SAMPLE_RAW
- *
- * { u64 nr;
- * { u64 from, to, flags } lbr[nr];} && PERF_SAMPLE_BRANCH_STACK
- *
- * { u64 abi; # enum perf_sample_regs_abi
- * u64 regs[weight(mask)]; } && PERF_SAMPLE_REGS_USER
- *
- * { u64 size;
- * char data[size];
- * u64 dyn_size; } && PERF_SAMPLE_STACK_USER
- *
- * { u64 weight; } && PERF_SAMPLE_WEIGHT
- * { u64 data_src; } && PERF_SAMPLE_DATA_SRC
- * { u64 transaction; } && PERF_SAMPLE_TRANSACTION
- * { u64 abi; # enum perf_sample_regs_abi
- * u64 regs[weight(mask)]; } && PERF_SAMPLE_REGS_INTR
- * };
- */
- PERF_RECORD_SAMPLE = 9,
-
- /*
- * The MMAP2 records are an augmented version of MMAP, they add
- * maj, min, ino numbers to be used to uniquely identify each mapping
- *
- * struct {
- * struct perf_event_header header;
- *
- * u32 pid, tid;
- * u64 addr;
- * u64 len;
- * u64 pgoff;
- * u32 maj;
- * u32 min;
- * u64 ino;
- * u64 ino_generation;
- * u32 prot, flags;
- * char filename[];
- * struct sample_id sample_id;
- * };
- */
- PERF_RECORD_MMAP2 = 10,
-
- /*
- * Records that new data landed in the AUX buffer part.
- *
- * struct {
- * struct perf_event_header header;
- *
- * u64 aux_offset;
- * u64 aux_size;
- * u64 flags;
- * struct sample_id sample_id;
- * };
- */
- PERF_RECORD_AUX = 11,
-
- /*
- * Indicates that instruction trace has started
- *
- * struct {
- * struct perf_event_header header;
- * u32 pid;
- * u32 tid;
- * };
- */
- PERF_RECORD_ITRACE_START = 12,
-
- /*
- * Records the dropped/lost sample number.
- *
- * struct {
- * struct perf_event_header header;
- *
- * u64 lost;
- * struct sample_id sample_id;
- * };
- */
- PERF_RECORD_LOST_SAMPLES = 13,
-
- /*
- * Records a context switch in or out (flagged by
- * PERF_RECORD_MISC_SWITCH_OUT). See also
- * PERF_RECORD_SWITCH_CPU_WIDE.
- *
- * struct {
- * struct perf_event_header header;
- * struct sample_id sample_id;
- * };
- */
- PERF_RECORD_SWITCH = 14,
-
- /*
- * CPU-wide version of PERF_RECORD_SWITCH with next_prev_pid and
- * next_prev_tid that are the next (switching out) or previous
- * (switching in) pid/tid.
- *
- * struct {
- * struct perf_event_header header;
- * u32 next_prev_pid;
- * u32 next_prev_tid;
- * struct sample_id sample_id;
- * };
- */
- PERF_RECORD_SWITCH_CPU_WIDE = 15,
-
- PERF_RECORD_MAX, /* non-ABI */
-};
-
-#define PERF_MAX_STACK_DEPTH 127
-#define PERF_MAX_CONTEXTS_PER_STACK 8
-
-enum perf_callchain_context {
- PERF_CONTEXT_HV = (__u64)-32,
- PERF_CONTEXT_KERNEL = (__u64)-128,
- PERF_CONTEXT_USER = (__u64)-512,
-
- PERF_CONTEXT_GUEST = (__u64)-2048,
- PERF_CONTEXT_GUEST_KERNEL = (__u64)-2176,
- PERF_CONTEXT_GUEST_USER = (__u64)-2560,
-
- PERF_CONTEXT_MAX = (__u64)-4095,
-};
-
-/**
- * PERF_RECORD_AUX::flags bits
- */
-#define PERF_AUX_FLAG_TRUNCATED 0x01 /* record was truncated to fit */
-#define PERF_AUX_FLAG_OVERWRITE 0x02 /* snapshot from overwrite mode */
-
-#define PERF_FLAG_FD_NO_GROUP (1UL << 0)
-#define PERF_FLAG_FD_OUTPUT (1UL << 1)
-#define PERF_FLAG_PID_CGROUP (1UL << 2) /* pid=cgroup id, per-cpu mode only */
-#define PERF_FLAG_FD_CLOEXEC (1UL << 3) /* O_CLOEXEC */
-
-union perf_mem_data_src {
- __u64 val;
- struct {
- __u64 mem_op:5, /* type of opcode */
- mem_lvl:14, /* memory hierarchy level */
- mem_snoop:5, /* snoop mode */
- mem_lock:2, /* lock instr */
- mem_dtlb:7, /* tlb access */
- mem_rsvd:31;
- };
-};
-
-/* type of opcode (load/store/prefetch,code) */
-#define PERF_MEM_OP_NA 0x01 /* not available */
-#define PERF_MEM_OP_LOAD 0x02 /* load instruction */
-#define PERF_MEM_OP_STORE 0x04 /* store instruction */
-#define PERF_MEM_OP_PFETCH 0x08 /* prefetch */
-#define PERF_MEM_OP_EXEC 0x10 /* code (execution) */
-#define PERF_MEM_OP_SHIFT 0
-
-/* memory hierarchy (memory level, hit or miss) */
-#define PERF_MEM_LVL_NA 0x01 /* not available */
-#define PERF_MEM_LVL_HIT 0x02 /* hit level */
-#define PERF_MEM_LVL_MISS 0x04 /* miss level */
-#define PERF_MEM_LVL_L1 0x08 /* L1 */
-#define PERF_MEM_LVL_LFB 0x10 /* Line Fill Buffer */
-#define PERF_MEM_LVL_L2 0x20 /* L2 */
-#define PERF_MEM_LVL_L3 0x40 /* L3 */
-#define PERF_MEM_LVL_LOC_RAM 0x80 /* Local DRAM */
-#define PERF_MEM_LVL_REM_RAM1 0x100 /* Remote DRAM (1 hop) */
-#define PERF_MEM_LVL_REM_RAM2 0x200 /* Remote DRAM (2 hops) */
-#define PERF_MEM_LVL_REM_CCE1 0x400 /* Remote Cache (1 hop) */
-#define PERF_MEM_LVL_REM_CCE2 0x800 /* Remote Cache (2 hops) */
-#define PERF_MEM_LVL_IO 0x1000 /* I/O memory */
-#define PERF_MEM_LVL_UNC 0x2000 /* Uncached memory */
-#define PERF_MEM_LVL_SHIFT 5
-
-/* snoop mode */
-#define PERF_MEM_SNOOP_NA 0x01 /* not available */
-#define PERF_MEM_SNOOP_NONE 0x02 /* no snoop */
-#define PERF_MEM_SNOOP_HIT 0x04 /* snoop hit */
-#define PERF_MEM_SNOOP_MISS 0x08 /* snoop miss */
-#define PERF_MEM_SNOOP_HITM 0x10 /* snoop hit modified */
-#define PERF_MEM_SNOOP_SHIFT 19
-
-/* locked instruction */
-#define PERF_MEM_LOCK_NA 0x01 /* not available */
-#define PERF_MEM_LOCK_LOCKED 0x02 /* locked transaction */
-#define PERF_MEM_LOCK_SHIFT 24
-
-/* TLB access */
-#define PERF_MEM_TLB_NA 0x01 /* not available */
-#define PERF_MEM_TLB_HIT 0x02 /* hit level */
-#define PERF_MEM_TLB_MISS 0x04 /* miss level */
-#define PERF_MEM_TLB_L1 0x08 /* L1 */
-#define PERF_MEM_TLB_L2 0x10 /* L2 */
-#define PERF_MEM_TLB_WK 0x20 /* Hardware Walker*/
-#define PERF_MEM_TLB_OS 0x40 /* OS fault handler */
-#define PERF_MEM_TLB_SHIFT 26
-
-#define PERF_MEM_S(a, s) \
- (((__u64)PERF_MEM_##a##_##s) << PERF_MEM_##a##_SHIFT)
-
-/*
- * single taken branch record layout:
- *
- * from: source instruction (may not always be a branch insn)
- * to: branch target
- * mispred: branch target was mispredicted
- * predicted: branch target was predicted
- *
- * support for mispred, predicted is optional. In case it
- * is not supported mispred = predicted = 0.
- *
- * in_tx: running in a hardware transaction
- * abort: aborting a hardware transaction
- * cycles: cycles from last branch (or 0 if not supported)
- */
-struct perf_branch_entry {
- __u64 from;
- __u64 to;
- __u64 mispred:1, /* target mispredicted */
- predicted:1,/* target predicted */
- in_tx:1, /* in transaction */
- abort:1, /* transaction abort */
- cycles:16, /* cycle count to last branch */
- reserved:44;
-};
-
-#endif /* _UAPI_LINUX_PERF_EVENT_H */
+++ /dev/null
-/*
- * SPDX-License-Identifier: LGPL-2.1-or-later
- *
- * Copyright 2011 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
- * Copyright 2011 Lai Jiangshan <laijs@cn.fujitsu.com>
- *
- * Internal header for Lock-Free RCU Hash Table
- */
-
-#ifndef _LTTNG_UST_RCULFHASH_INTERNAL_H
-#define _LTTNG_UST_RCULFHASH_INTERNAL_H
-
-#include "rculfhash.h"
-#include <stdio.h>
-#include <stdlib.h>
-#include <assert.h>
-
-#ifdef DEBUG
-#define dbg_printf(fmt, args...) printf("[debug lttng-ust rculfhash] " fmt, ## args)
-#else
-#define dbg_printf(fmt, args...) \
-do { \
- /* do nothing but check printf format */ \
- if (0) \
- printf("[debug lttng-ust rculfhash] " fmt, ## args); \
-} while (0)
-#endif
-
-#if (CAA_BITS_PER_LONG == 32)
-#define MAX_TABLE_ORDER 32
-#else
-#define MAX_TABLE_ORDER 64
-#endif
-
-#define MAX_CHUNK_TABLE (1UL << 10)
-
-#ifndef min
-#define min(a, b) ((a) < (b) ? (a) : (b))
-#endif
-
-#ifndef max
-#define max(a, b) ((a) > (b) ? (a) : (b))
-#endif
-
-/*
- * lttng_ust_lfht: Top-level data structure representing a lock-free hash
- * table. Defined in the implementation file to make it be an opaque
- * cookie to users.
- *
- * The fields used in fast-paths are placed near the end of the
- * structure, because we need to have a variable-sized union to contain
- * the mm plugin fields, which are used in the fast path.
- */
-struct lttng_ust_lfht {
- /* Initial configuration items */
- unsigned long max_nr_buckets;
- const struct lttng_ust_lfht_mm_type *mm; /* memory management plugin */
- const struct rcu_flavor_struct *flavor; /* RCU flavor */
-
- /*
- * We need to put the work threads offline (QSBR) when taking this
- * mutex, because we use synchronize_rcu within this mutex critical
- * section, which waits on read-side critical sections, and could
- * therefore cause grace-period deadlock if we hold off RCU G.P.
- * completion.
- */
- pthread_mutex_t resize_mutex; /* resize mutex: add/del mutex */
- unsigned int in_progress_destroy;
- unsigned long resize_target;
- int resize_initiated;
-
- /*
- * Variables needed for add and remove fast-paths.
- */
- int flags;
- unsigned long min_alloc_buckets_order;
- unsigned long min_nr_alloc_buckets;
-
- /*
- * Variables needed for the lookup, add and remove fast-paths.
- */
- unsigned long size; /* always a power of 2, shared (RCU) */
- /*
- * bucket_at pointer is kept here to skip the extra level of
- * dereference needed to get to "mm" (this is a fast-path).
- */
- struct lttng_ust_lfht_node *(*bucket_at)(struct lttng_ust_lfht *ht,
- unsigned long index);
- /*
- * Dynamic length "tbl_chunk" needs to be at the end of
- * lttng_ust_lfht.
- */
- union {
- /*
- * Contains the per order-index-level bucket node table.
- * The size of each bucket node table is half the number
- * of hashes contained in this order (except for order 0).
- * The minimum allocation buckets size parameter allows
- * combining the bucket node arrays of the lowermost
- * levels to improve cache locality for small index orders.
- */
- struct lttng_ust_lfht_node *tbl_order[MAX_TABLE_ORDER];
-
- /*
- * Contains the bucket node chunks. The size of each
- * bucket node chunk is ->min_alloc_size (we avoid to
- * allocate chunks with different size). Chunks improve
- * cache locality for small index orders, and are more
- * friendly with environments where allocation of large
- * contiguous memory areas is challenging due to memory
- * fragmentation concerns or inability to use virtual
- * memory addressing.
- */
- struct lttng_ust_lfht_node *tbl_chunk[0];
-
- /*
- * Memory mapping with room for all possible buckets.
- * Their memory is allocated when needed.
- */
- struct lttng_ust_lfht_node *tbl_mmap;
- };
- /*
- * End of variables needed for the lookup, add and remove
- * fast-paths.
- */
-};
-
-extern unsigned int lttng_ust_lfht_fls_ulong(unsigned long x)
- __attribute__((visibility("hidden")));
-
-extern int lttng_ust_lfht_get_count_order_u32(uint32_t x)
- __attribute__((visibility("hidden")));
-
-extern int lttng_ust_lfht_get_count_order_ulong(unsigned long x)
- __attribute__((visibility("hidden")));
-
-#ifdef POISON_FREE
-#define poison_free(ptr) \
- do { \
- if (ptr) { \
- memset(ptr, 0x42, sizeof(*(ptr))); \
- free(ptr); \
- } \
- } while (0)
-#else
-#define poison_free(ptr) free(ptr)
-#endif
-
-static inline
-struct lttng_ust_lfht *__default_alloc_lttng_ust_lfht(
- const struct lttng_ust_lfht_mm_type *mm,
- unsigned long lttng_ust_lfht_size,
- unsigned long min_nr_alloc_buckets,
- unsigned long max_nr_buckets)
-{
- struct lttng_ust_lfht *ht;
-
- ht = calloc(1, lttng_ust_lfht_size);
- assert(ht);
-
- ht->mm = mm;
- ht->bucket_at = mm->bucket_at;
- ht->min_nr_alloc_buckets = min_nr_alloc_buckets;
- ht->min_alloc_buckets_order =
- lttng_ust_lfht_get_count_order_ulong(min_nr_alloc_buckets);
- ht->max_nr_buckets = max_nr_buckets;
-
- return ht;
-}
-
-#endif /* _LTTNG_UST_RCULFHASH_INTERNAL_H */
+++ /dev/null
-/*
- * SPDX-License-Identifier: LGPL-2.1-or-later
- *
- * Copyright 2011 Lai Jiangshan <laijs@cn.fujitsu.com>
- *
- * Chunk based memory management for Lock-Free RCU Hash Table
- */
-
-#include <stddef.h>
-#include "rculfhash-internal.h"
-
-static
-void lttng_ust_lfht_alloc_bucket_table(struct lttng_ust_lfht *ht, unsigned long order)
-{
- if (order == 0) {
- ht->tbl_chunk[0] = calloc(ht->min_nr_alloc_buckets,
- sizeof(struct lttng_ust_lfht_node));
- assert(ht->tbl_chunk[0]);
- } else if (order > ht->min_alloc_buckets_order) {
- unsigned long i, len = 1UL << (order - 1 - ht->min_alloc_buckets_order);
-
- for (i = len; i < 2 * len; i++) {
- ht->tbl_chunk[i] = calloc(ht->min_nr_alloc_buckets,
- sizeof(struct lttng_ust_lfht_node));
- assert(ht->tbl_chunk[i]);
- }
- }
- /* Nothing to do for 0 < order && order <= ht->min_alloc_buckets_order */
-}
-
-/*
- * lttng_ust_lfht_free_bucket_table() should be called with decreasing order.
- * When lttng_ust_lfht_free_bucket_table(0) is called, it means the whole
- * lfht is destroyed.
- */
-static
-void lttng_ust_lfht_free_bucket_table(struct lttng_ust_lfht *ht, unsigned long order)
-{
- if (order == 0)
- poison_free(ht->tbl_chunk[0]);
- else if (order > ht->min_alloc_buckets_order) {
- unsigned long i, len = 1UL << (order - 1 - ht->min_alloc_buckets_order);
-
- for (i = len; i < 2 * len; i++)
- poison_free(ht->tbl_chunk[i]);
- }
- /* Nothing to do for 0 < order && order <= ht->min_alloc_buckets_order */
-}
-
-static
-struct lttng_ust_lfht_node *bucket_at(struct lttng_ust_lfht *ht, unsigned long index)
-{
- unsigned long chunk, offset;
-
- chunk = index >> ht->min_alloc_buckets_order;
- offset = index & (ht->min_nr_alloc_buckets - 1);
- return &ht->tbl_chunk[chunk][offset];
-}
-
-static
-struct lttng_ust_lfht *alloc_lttng_ust_lfht(unsigned long min_nr_alloc_buckets,
- unsigned long max_nr_buckets)
-{
- unsigned long nr_chunks, lttng_ust_lfht_size;
-
- min_nr_alloc_buckets = max(min_nr_alloc_buckets,
- max_nr_buckets / MAX_CHUNK_TABLE);
- nr_chunks = max_nr_buckets / min_nr_alloc_buckets;
- lttng_ust_lfht_size = offsetof(struct lttng_ust_lfht, tbl_chunk) +
- sizeof(struct lttng_ust_lfht_node *) * nr_chunks;
- lttng_ust_lfht_size = max(lttng_ust_lfht_size, sizeof(struct lttng_ust_lfht));
-
- return __default_alloc_lttng_ust_lfht(
- <tng_ust_lfht_mm_chunk, lttng_ust_lfht_size,
- min_nr_alloc_buckets, max_nr_buckets);
-}
-
-const struct lttng_ust_lfht_mm_type lttng_ust_lfht_mm_chunk = {
- .alloc_lttng_ust_lfht = alloc_lttng_ust_lfht,
- .alloc_bucket_table = lttng_ust_lfht_alloc_bucket_table,
- .free_bucket_table = lttng_ust_lfht_free_bucket_table,
- .bucket_at = bucket_at,
-};
+++ /dev/null
-/*
- * SPDX-License-Identifier: LGPL-2.1-or-later
- *
- * Copyright 2011 Lai Jiangshan <laijs@cn.fujitsu.com>
- *
- * mmap/reservation based memory management for Lock-Free RCU Hash Table
- */
-
-#include <unistd.h>
-#include <stdio.h>
-#include <errno.h>
-#include <stdlib.h>
-#include <sys/mman.h>
-#include "rculfhash-internal.h"
-
-#ifndef MAP_ANONYMOUS
-#define MAP_ANONYMOUS MAP_ANON
-#endif
-
-/*
- * The allocation scheme used by the mmap based RCU hash table is to make a
- * large unaccessible mapping to reserve memory without allocating it.
- * Then smaller chunks are allocated by overlapping read/write mappings which
- * do allocate memory. Deallocation is done by an overlapping unaccessible
- * mapping.
- *
- * This scheme was tested on Linux, macOS and Solaris. However, on Cygwin the
- * mmap wrapper is based on the Windows NtMapViewOfSection API which doesn't
- * support overlapping mappings.
- *
- * An alternative to the overlapping mappings is to use mprotect to change the
- * protection on chunks of the large mapping, read/write to allocate and none
- * to deallocate. This works perfecty on Cygwin and Solaris but on Linux a
- * call to madvise is also required to deallocate and it just doesn't work on
- * macOS.
- *
- * For this reason, we keep to original scheme on all platforms except Cygwin.
- */
-
-
-/* Reserve inaccessible memory space without allocating it */
-static
-void *memory_map(size_t length)
-{
- void *ret;
-
- ret = mmap(NULL, length, PROT_NONE, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
- if (ret == MAP_FAILED) {
- perror("mmap");
- abort();
- }
- return ret;
-}
-
-static
-void memory_unmap(void *ptr, size_t length)
-{
- if (munmap(ptr, length)) {
- perror("munmap");
- abort();
- }
-}
-
-#ifdef __CYGWIN__
-/* Set protection to read/write to allocate a memory chunk */
-static
-void memory_populate(void *ptr, size_t length)
-{
- if (mprotect(ptr, length, PROT_READ | PROT_WRITE)) {
- perror("mprotect");
- abort();
- }
-}
-
-/* Set protection to none to deallocate a memory chunk */
-static
-void memory_discard(void *ptr, size_t length)
-{
- if (mprotect(ptr, length, PROT_NONE)) {
- perror("mprotect");
- abort();
- }
-}
-
-#else /* __CYGWIN__ */
-
-static
-void memory_populate(void *ptr, size_t length)
-{
- if (mmap(ptr, length, PROT_READ | PROT_WRITE,
- MAP_FIXED | MAP_PRIVATE | MAP_ANONYMOUS,
- -1, 0) != ptr) {
- perror("mmap");
- abort();
- }
-}
-
-/*
- * Discard garbage memory and avoid system save it when try to swap it out.
- * Make it still reserved, inaccessible.
- */
-static
-void memory_discard(void *ptr, size_t length)
-{
- if (mmap(ptr, length, PROT_NONE,
- MAP_FIXED | MAP_PRIVATE | MAP_ANONYMOUS,
- -1, 0) != ptr) {
- perror("mmap");
- abort();
- }
-}
-#endif /* __CYGWIN__ */
-
-static
-void lttng_ust_lfht_alloc_bucket_table(struct lttng_ust_lfht *ht, unsigned long order)
-{
- if (order == 0) {
- if (ht->min_nr_alloc_buckets == ht->max_nr_buckets) {
- /* small table */
- ht->tbl_mmap = calloc(ht->max_nr_buckets,
- sizeof(*ht->tbl_mmap));
- assert(ht->tbl_mmap);
- return;
- }
- /* large table */
- ht->tbl_mmap = memory_map(ht->max_nr_buckets
- * sizeof(*ht->tbl_mmap));
- memory_populate(ht->tbl_mmap,
- ht->min_nr_alloc_buckets * sizeof(*ht->tbl_mmap));
- } else if (order > ht->min_alloc_buckets_order) {
- /* large table */
- unsigned long len = 1UL << (order - 1);
-
- assert(ht->min_nr_alloc_buckets < ht->max_nr_buckets);
- memory_populate(ht->tbl_mmap + len,
- len * sizeof(*ht->tbl_mmap));
- }
- /* Nothing to do for 0 < order && order <= ht->min_alloc_buckets_order */
-}
-
-/*
- * lttng_ust_lfht_free_bucket_table() should be called with decreasing order.
- * When lttng_ust_lfht_free_bucket_table(0) is called, it means the whole
- * lfht is destroyed.
- */
-static
-void lttng_ust_lfht_free_bucket_table(struct lttng_ust_lfht *ht, unsigned long order)
-{
- if (order == 0) {
- if (ht->min_nr_alloc_buckets == ht->max_nr_buckets) {
- /* small table */
- poison_free(ht->tbl_mmap);
- return;
- }
- /* large table */
- memory_unmap(ht->tbl_mmap,
- ht->max_nr_buckets * sizeof(*ht->tbl_mmap));
- } else if (order > ht->min_alloc_buckets_order) {
- /* large table */
- unsigned long len = 1UL << (order - 1);
-
- assert(ht->min_nr_alloc_buckets < ht->max_nr_buckets);
- memory_discard(ht->tbl_mmap + len, len * sizeof(*ht->tbl_mmap));
- }
- /* Nothing to do for 0 < order && order <= ht->min_alloc_buckets_order */
-}
-
-static
-struct lttng_ust_lfht_node *bucket_at(struct lttng_ust_lfht *ht, unsigned long index)
-{
- return &ht->tbl_mmap[index];
-}
-
-static
-struct lttng_ust_lfht *alloc_lttng_ust_lfht(unsigned long min_nr_alloc_buckets,
- unsigned long max_nr_buckets)
-{
- unsigned long page_bucket_size;
-
- page_bucket_size = getpagesize() / sizeof(struct lttng_ust_lfht_node);
- if (max_nr_buckets <= page_bucket_size) {
- /* small table */
- min_nr_alloc_buckets = max_nr_buckets;
- } else {
- /* large table */
- min_nr_alloc_buckets = max(min_nr_alloc_buckets,
- page_bucket_size);
- }
-
- return __default_alloc_lttng_ust_lfht(
- <tng_ust_lfht_mm_mmap, sizeof(struct lttng_ust_lfht),
- min_nr_alloc_buckets, max_nr_buckets);
-}
-
-const struct lttng_ust_lfht_mm_type lttng_ust_lfht_mm_mmap = {
- .alloc_lttng_ust_lfht = alloc_lttng_ust_lfht,
- .alloc_bucket_table = lttng_ust_lfht_alloc_bucket_table,
- .free_bucket_table = lttng_ust_lfht_free_bucket_table,
- .bucket_at = bucket_at,
-};
+++ /dev/null
-/*
- * SPDX-License-Identifier: LGPL-2.1-or-later
- *
- * Copyright 2011 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
- * Copyright 2011 Lai Jiangshan <laijs@cn.fujitsu.com>
- *
- * Order based memory management for Lock-Free RCU Hash Table
- */
-
-#include <rculfhash-internal.h>
-
-static
-void lttng_ust_lfht_alloc_bucket_table(struct lttng_ust_lfht *ht, unsigned long order)
-{
- if (order == 0) {
- ht->tbl_order[0] = calloc(ht->min_nr_alloc_buckets,
- sizeof(struct lttng_ust_lfht_node));
- assert(ht->tbl_order[0]);
- } else if (order > ht->min_alloc_buckets_order) {
- ht->tbl_order[order] = calloc(1UL << (order -1),
- sizeof(struct lttng_ust_lfht_node));
- assert(ht->tbl_order[order]);
- }
- /* Nothing to do for 0 < order && order <= ht->min_alloc_buckets_order */
-}
-
-/*
- * lttng_ust_lfht_free_bucket_table() should be called with decreasing order.
- * When lttng_ust_lfht_free_bucket_table(0) is called, it means the whole
- * lfht is destroyed.
- */
-static
-void lttng_ust_lfht_free_bucket_table(struct lttng_ust_lfht *ht, unsigned long order)
-{
- if (order == 0)
- poison_free(ht->tbl_order[0]);
- else if (order > ht->min_alloc_buckets_order)
- poison_free(ht->tbl_order[order]);
- /* Nothing to do for 0 < order && order <= ht->min_alloc_buckets_order */
-}
-
-static
-struct lttng_ust_lfht_node *bucket_at(struct lttng_ust_lfht *ht, unsigned long index)
-{
- unsigned long order;
-
- if (index < ht->min_nr_alloc_buckets) {
- dbg_printf("bucket index %lu order 0 aridx 0\n", index);
- return &ht->tbl_order[0][index];
- }
- /*
- * equivalent to lttng_ust_lfht_get_count_order_ulong(index + 1), but
- * optimizes away the non-existing 0 special-case for
- * lttng_ust_lfht_get_count_order_ulong.
- */
- order = lttng_ust_lfht_fls_ulong(index);
- dbg_printf("bucket index %lu order %lu aridx %lu\n",
- index, order, index & ((1UL << (order - 1)) - 1));
- return &ht->tbl_order[order][index & ((1UL << (order - 1)) - 1)];
-}
-
-static
-struct lttng_ust_lfht *alloc_lttng_ust_lfht(unsigned long min_nr_alloc_buckets,
- unsigned long max_nr_buckets)
-{
- return __default_alloc_lttng_ust_lfht(
- <tng_ust_lfht_mm_order, sizeof(struct lttng_ust_lfht),
- min_nr_alloc_buckets, max_nr_buckets);
-}
-
-const struct lttng_ust_lfht_mm_type lttng_ust_lfht_mm_order = {
- .alloc_lttng_ust_lfht = alloc_lttng_ust_lfht,
- .alloc_bucket_table = lttng_ust_lfht_alloc_bucket_table,
- .free_bucket_table = lttng_ust_lfht_free_bucket_table,
- .bucket_at = bucket_at,
-};
+++ /dev/null
-/*
- * SPDX-License-Identifier: LGPL-2.1-or-later
- *
- * Copyright 2010-2011 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
- * Copyright 2011 Lai Jiangshan <laijs@cn.fujitsu.com>
- *
- * Userspace RCU library - Lock-Free Resizable RCU Hash Table
- */
-
-/*
- * Based on the following articles:
- * - Ori Shalev and Nir Shavit. Split-ordered lists: Lock-free
- * extensible hash tables. J. ACM 53, 3 (May 2006), 379-405.
- * - Michael, M. M. High performance dynamic lock-free hash tables
- * and list-based sets. In Proceedings of the fourteenth annual ACM
- * symposium on Parallel algorithms and architectures, ACM Press,
- * (2002), 73-82.
- *
- * Some specificities of this Lock-Free Resizable RCU Hash Table
- * implementation:
- *
- * - RCU read-side critical section allows readers to perform hash
- * table lookups, as well as traversals, and use the returned objects
- * safely by allowing memory reclaim to take place only after a grace
- * period.
- * - Add and remove operations are lock-free, and do not need to
- * allocate memory. They need to be executed within RCU read-side
- * critical section to ensure the objects they read are valid and to
- * deal with the cmpxchg ABA problem.
- * - add and add_unique operations are supported. add_unique checks if
- * the node key already exists in the hash table. It ensures not to
- * populate a duplicate key if the node key already exists in the hash
- * table.
- * - The resize operation executes concurrently with
- * add/add_unique/add_replace/remove/lookup/traversal.
- * - Hash table nodes are contained within a split-ordered list. This
- * list is ordered by incrementing reversed-bits-hash value.
- * - An index of bucket nodes is kept. These bucket nodes are the hash
- * table "buckets". These buckets are internal nodes that allow to
- * perform a fast hash lookup, similarly to a skip list. These
- * buckets are chained together in the split-ordered list, which
- * allows recursive expansion by inserting new buckets between the
- * existing buckets. The split-ordered list allows adding new buckets
- * between existing buckets as the table needs to grow.
- * - The resize operation for small tables only allows expanding the
- * hash table. It is triggered automatically by detecting long chains
- * in the add operation.
- * - The resize operation for larger tables (and available through an
- * API) allows both expanding and shrinking the hash table.
- * - Split-counters are used to keep track of the number of
- * nodes within the hash table for automatic resize triggering.
- * - Resize operation initiated by long chain detection is executed by a
- * worker thread, which keeps lock-freedom of add and remove.
- * - Resize operations are protected by a mutex.
- * - The removal operation is split in two parts: first, a "removed"
- * flag is set in the next pointer within the node to remove. Then,
- * a "garbage collection" is performed in the bucket containing the
- * removed node (from the start of the bucket up to the removed node).
- * All encountered nodes with "removed" flag set in their next
- * pointers are removed from the linked-list. If the cmpxchg used for
- * removal fails (due to concurrent garbage-collection or concurrent
- * add), we retry from the beginning of the bucket. This ensures that
- * the node with "removed" flag set is removed from the hash table
- * (not visible to lookups anymore) before the RCU read-side critical
- * section held across removal ends. Furthermore, this ensures that
- * the node with "removed" flag set is removed from the linked-list
- * before its memory is reclaimed. After setting the "removal" flag,
- * only the thread which removal is the first to set the "removal
- * owner" flag (with an xchg) into a node's next pointer is considered
- * to have succeeded its removal (and thus owns the node to reclaim).
- * Because we garbage-collect starting from an invariant node (the
- * start-of-bucket bucket node) up to the "removed" node (or find a
- * reverse-hash that is higher), we are sure that a successful
- * traversal of the chain leads to a chain that is present in the
- * linked-list (the start node is never removed) and that it does not
- * contain the "removed" node anymore, even if concurrent delete/add
- * operations are changing the structure of the list concurrently.
- * - The add operations perform garbage collection of buckets if they
- * encounter nodes with removed flag set in the bucket where they want
- * to add their new node. This ensures lock-freedom of add operation by
- * helping the remover unlink nodes from the list rather than to wait
- * for it do to so.
- * - There are three memory backends for the hash table buckets: the
- * "order table", the "chunks", and the "mmap".
- * - These bucket containers contain a compact version of the hash table
- * nodes.
- * - The RCU "order table":
- * - has a first level table indexed by log2(hash index) which is
- * copied and expanded by the resize operation. This order table
- * allows finding the "bucket node" tables.
- * - There is one bucket node table per hash index order. The size of
- * each bucket node table is half the number of hashes contained in
- * this order (except for order 0).
- * - The RCU "chunks" is best suited for close interaction with a page
- * allocator. It uses a linear array as index to "chunks" containing
- * each the same number of buckets.
- * - The RCU "mmap" memory backend uses a single memory map to hold
- * all buckets.
- * - synchronize_rcu is used to garbage-collect the old bucket node table.
- *
- * Ordering Guarantees:
- *
- * To discuss these guarantees, we first define "read" operation as any
- * of the the basic lttng_ust_lfht_lookup, lttng_ust_lfht_next_duplicate,
- * lttng_ust_lfht_first, lttng_ust_lfht_next operation, as well as
- * lttng_ust_lfht_add_unique (failure).
- *
- * We define "read traversal" operation as any of the following
- * group of operations
- * - lttng_ust_lfht_lookup followed by iteration with lttng_ust_lfht_next_duplicate
- * (and/or lttng_ust_lfht_next, although less common).
- * - lttng_ust_lfht_add_unique (failure) followed by iteration with
- * lttng_ust_lfht_next_duplicate (and/or lttng_ust_lfht_next, although less
- * common).
- * - lttng_ust_lfht_first followed iteration with lttng_ust_lfht_next (and/or
- * lttng_ust_lfht_next_duplicate, although less common).
- *
- * We define "write" operations as any of lttng_ust_lfht_add, lttng_ust_lfht_replace,
- * lttng_ust_lfht_add_unique (success), lttng_ust_lfht_add_replace, lttng_ust_lfht_del.
- *
- * When lttng_ust_lfht_add_unique succeeds (returns the node passed as
- * parameter), it acts as a "write" operation. When lttng_ust_lfht_add_unique
- * fails (returns a node different from the one passed as parameter), it
- * acts as a "read" operation. A lttng_ust_lfht_add_unique failure is a
- * lttng_ust_lfht_lookup "read" operation, therefore, any ordering guarantee
- * referring to "lookup" imply any of "lookup" or lttng_ust_lfht_add_unique
- * (failure).
- *
- * We define "prior" and "later" node as nodes observable by reads and
- * read traversals respectively before and after a write or sequence of
- * write operations.
- *
- * Hash-table operations are often cascaded, for example, the pointer
- * returned by a lttng_ust_lfht_lookup() might be passed to a lttng_ust_lfht_next(),
- * whose return value might in turn be passed to another hash-table
- * operation. This entire cascaded series of operations must be enclosed
- * by a pair of matching rcu_read_lock() and rcu_read_unlock()
- * operations.
- *
- * The following ordering guarantees are offered by this hash table:
- *
- * A.1) "read" after "write": if there is ordering between a write and a
- * later read, then the read is guaranteed to see the write or some
- * later write.
- * A.2) "read traversal" after "write": given that there is dependency
- * ordering between reads in a "read traversal", if there is
- * ordering between a write and the first read of the traversal,
- * then the "read traversal" is guaranteed to see the write or
- * some later write.
- * B.1) "write" after "read": if there is ordering between a read and a
- * later write, then the read will never see the write.
- * B.2) "write" after "read traversal": given that there is dependency
- * ordering between reads in a "read traversal", if there is
- * ordering between the last read of the traversal and a later
- * write, then the "read traversal" will never see the write.
- * C) "write" while "read traversal": if a write occurs during a "read
- * traversal", the traversal may, or may not, see the write.
- * D.1) "write" after "write": if there is ordering between a write and
- * a later write, then the later write is guaranteed to see the
- * effects of the first write.
- * D.2) Concurrent "write" pairs: The system will assign an arbitrary
- * order to any pair of concurrent conflicting writes.
- * Non-conflicting writes (for example, to different keys) are
- * unordered.
- * E) If a grace period separates a "del" or "replace" operation
- * and a subsequent operation, then that subsequent operation is
- * guaranteed not to see the removed item.
- * F) Uniqueness guarantee: given a hash table that does not contain
- * duplicate items for a given key, there will only be one item in
- * the hash table after an arbitrary sequence of add_unique and/or
- * add_replace operations. Note, however, that a pair of
- * concurrent read operations might well access two different items
- * with that key.
- * G.1) If a pair of lookups for a given key are ordered (e.g. by a
- * memory barrier), then the second lookup will return the same
- * node as the previous lookup, or some later node.
- * G.2) A "read traversal" that starts after the end of a prior "read
- * traversal" (ordered by memory barriers) is guaranteed to see the
- * same nodes as the previous traversal, or some later nodes.
- * G.3) Concurrent "read" pairs: concurrent reads are unordered. For
- * example, if a pair of reads to the same key run concurrently
- * with an insertion of that same key, the reads remain unordered
- * regardless of their return values. In other words, you cannot
- * rely on the values returned by the reads to deduce ordering.
- *
- * Progress guarantees:
- *
- * * Reads are wait-free. These operations always move forward in the
- * hash table linked list, and this list has no loop.
- * * Writes are lock-free. Any retry loop performed by a write operation
- * is triggered by progress made within another update operation.
- *
- * Bucket node tables:
- *
- * hash table hash table the last all bucket node tables
- * order size bucket node 0 1 2 3 4 5 6(index)
- * table size
- * 0 1 1 1
- * 1 2 1 1 1
- * 2 4 2 1 1 2
- * 3 8 4 1 1 2 4
- * 4 16 8 1 1 2 4 8
- * 5 32 16 1 1 2 4 8 16
- * 6 64 32 1 1 2 4 8 16 32
- *
- * When growing/shrinking, we only focus on the last bucket node table
- * which size is (!order ? 1 : (1 << (order -1))).
- *
- * Example for growing/shrinking:
- * grow hash table from order 5 to 6: init the index=6 bucket node table
- * shrink hash table from order 6 to 5: fini the index=6 bucket node table
- *
- * A bit of ascii art explanation:
- *
- * The order index is the off-by-one compared to the actual power of 2
- * because we use index 0 to deal with the 0 special-case.
- *
- * This shows the nodes for a small table ordered by reversed bits:
- *
- * bits reverse
- * 0 000 000
- * 4 100 001
- * 2 010 010
- * 6 110 011
- * 1 001 100
- * 5 101 101
- * 3 011 110
- * 7 111 111
- *
- * This shows the nodes in order of non-reversed bits, linked by
- * reversed-bit order.
- *
- * order bits reverse
- * 0 0 000 000
- * 1 | 1 001 100 <-
- * 2 | | 2 010 010 <- |
- * | | | 3 011 110 | <- |
- * 3 -> | | | 4 100 001 | |
- * -> | | 5 101 101 |
- * -> | 6 110 011
- * -> 7 111 111
- */
-
-/*
- * Note on port to lttng-ust: auto-resize and accounting features are
- * removed.
- */
-
-#define _LGPL_SOURCE
-#include <stdlib.h>
-#include <errno.h>
-#include <assert.h>
-#include <stdio.h>
-#include <stdint.h>
-#include <string.h>
-#include <sched.h>
-#include <unistd.h>
-
-#include <lttng/ust-arch.h>
-#include <lttng/urcu/pointer.h>
-#include <urcu/arch.h>
-#include <urcu/uatomic.h>
-#include <urcu/compiler.h>
-#include "rculfhash.h"
-#include "rculfhash-internal.h"
-#include <stdio.h>
-#include <pthread.h>
-#include <signal.h>
-
-/*
- * Split-counters lazily update the global counter each 1024
- * addition/removal. It automatically keeps track of resize required.
- * We use the bucket length as indicator for need to expand for small
- * tables and machines lacking per-cpu data support.
- */
-#define COUNT_COMMIT_ORDER 10
-
-/*
- * Define the minimum table size.
- */
-#define MIN_TABLE_ORDER 0
-#define MIN_TABLE_SIZE (1UL << MIN_TABLE_ORDER)
-
-/*
- * Minimum number of bucket nodes to touch per thread to parallelize grow/shrink.
- */
-#define MIN_PARTITION_PER_THREAD_ORDER 12
-#define MIN_PARTITION_PER_THREAD (1UL << MIN_PARTITION_PER_THREAD_ORDER)
-
-/*
- * The removed flag needs to be updated atomically with the pointer.
- * It indicates that no node must attach to the node scheduled for
- * removal, and that node garbage collection must be performed.
- * The bucket flag does not require to be updated atomically with the
- * pointer, but it is added as a pointer low bit flag to save space.
- * The "removal owner" flag is used to detect which of the "del"
- * operation that has set the "removed flag" gets to return the removed
- * node to its caller. Note that the replace operation does not need to
- * iteract with the "removal owner" flag, because it validates that
- * the "removed" flag is not set before performing its cmpxchg.
- */
-#define REMOVED_FLAG (1UL << 0)
-#define BUCKET_FLAG (1UL << 1)
-#define REMOVAL_OWNER_FLAG (1UL << 2)
-#define FLAGS_MASK ((1UL << 3) - 1)
-
-/* Value of the end pointer. Should not interact with flags. */
-#define END_VALUE NULL
-
-/*
- * ht_items_count: Split-counters counting the number of node addition
- * and removal in the table. Only used if the LTTNG_UST_LFHT_ACCOUNTING flag
- * is set at hash table creation.
- *
- * These are free-running counters, never reset to zero. They count the
- * number of add/remove, and trigger every (1 << COUNT_COMMIT_ORDER)
- * operations to update the global counter. We choose a power-of-2 value
- * for the trigger to deal with 32 or 64-bit overflow of the counter.
- */
-struct ht_items_count {
- unsigned long add, del;
-} __attribute__((aligned(CAA_CACHE_LINE_SIZE)));
-
-#ifdef CONFIG_LTTNG_UST_LFHT_ITER_DEBUG
-
-static
-void lttng_ust_lfht_iter_debug_set_ht(struct lttng_ust_lfht *ht, struct lttng_ust_lfht_iter *iter)
-{
- iter->lfht = ht;
-}
-
-#define lttng_ust_lfht_iter_debug_assert(...) assert(__VA_ARGS__)
-
-#else
-
-static
-void lttng_ust_lfht_iter_debug_set_ht(struct lttng_ust_lfht *ht __attribute__((unused)),
- struct lttng_ust_lfht_iter *iter __attribute__((unused)))
-{
-}
-
-#define lttng_ust_lfht_iter_debug_assert(...)
-
-#endif
-
-/*
- * Algorithm to reverse bits in a word by lookup table, extended to
- * 64-bit words.
- * Source:
- * http://graphics.stanford.edu/~seander/bithacks.html#BitReverseTable
- * Originally from Public Domain.
- */
-
-static const uint8_t BitReverseTable256[256] =
-{
-#define R2(n) (n), (n) + 2*64, (n) + 1*64, (n) + 3*64
-#define R4(n) R2(n), R2((n) + 2*16), R2((n) + 1*16), R2((n) + 3*16)
-#define R6(n) R4(n), R4((n) + 2*4 ), R4((n) + 1*4 ), R4((n) + 3*4 )
- R6(0), R6(2), R6(1), R6(3)
-};
-#undef R2
-#undef R4
-#undef R6
-
-static
-uint8_t bit_reverse_u8(uint8_t v)
-{
- return BitReverseTable256[v];
-}
-
-#if (CAA_BITS_PER_LONG == 32)
-static
-uint32_t bit_reverse_u32(uint32_t v)
-{
- return ((uint32_t) bit_reverse_u8(v) << 24) |
- ((uint32_t) bit_reverse_u8(v >> 8) << 16) |
- ((uint32_t) bit_reverse_u8(v >> 16) << 8) |
- ((uint32_t) bit_reverse_u8(v >> 24));
-}
-#else
-static
-uint64_t bit_reverse_u64(uint64_t v)
-{
- return ((uint64_t) bit_reverse_u8(v) << 56) |
- ((uint64_t) bit_reverse_u8(v >> 8) << 48) |
- ((uint64_t) bit_reverse_u8(v >> 16) << 40) |
- ((uint64_t) bit_reverse_u8(v >> 24) << 32) |
- ((uint64_t) bit_reverse_u8(v >> 32) << 24) |
- ((uint64_t) bit_reverse_u8(v >> 40) << 16) |
- ((uint64_t) bit_reverse_u8(v >> 48) << 8) |
- ((uint64_t) bit_reverse_u8(v >> 56));
-}
-#endif
-
-static
-unsigned long bit_reverse_ulong(unsigned long v)
-{
-#if (CAA_BITS_PER_LONG == 32)
- return bit_reverse_u32(v);
-#else
- return bit_reverse_u64(v);
-#endif
-}
-
-/*
- * fls: returns the position of the most significant bit.
- * Returns 0 if no bit is set, else returns the position of the most
- * significant bit (from 1 to 32 on 32-bit, from 1 to 64 on 64-bit).
- */
-#if defined(LTTNG_UST_ARCH_X86)
-static inline
-unsigned int fls_u32(uint32_t x)
-{
- int r;
-
- __asm__ ("bsrl %1,%0\n\t"
- "jnz 1f\n\t"
- "movl $-1,%0\n\t"
- "1:\n\t"
- : "=r" (r) : "rm" (x));
- return r + 1;
-}
-#define HAS_FLS_U32
-#endif
-
-#if defined(LTTNG_UST_ARCH_AMD64)
-static inline
-unsigned int fls_u64(uint64_t x)
-{
- long r;
-
- __asm__ ("bsrq %1,%0\n\t"
- "jnz 1f\n\t"
- "movq $-1,%0\n\t"
- "1:\n\t"
- : "=r" (r) : "rm" (x));
- return r + 1;
-}
-#define HAS_FLS_U64
-#endif
-
-#ifndef HAS_FLS_U64
-static
-unsigned int fls_u64(uint64_t x)
- __attribute__((unused));
-static
-unsigned int fls_u64(uint64_t x)
-{
- unsigned int r = 64;
-
- if (!x)
- return 0;
-
- if (!(x & 0xFFFFFFFF00000000ULL)) {
- x <<= 32;
- r -= 32;
- }
- if (!(x & 0xFFFF000000000000ULL)) {
- x <<= 16;
- r -= 16;
- }
- if (!(x & 0xFF00000000000000ULL)) {
- x <<= 8;
- r -= 8;
- }
- if (!(x & 0xF000000000000000ULL)) {
- x <<= 4;
- r -= 4;
- }
- if (!(x & 0xC000000000000000ULL)) {
- x <<= 2;
- r -= 2;
- }
- if (!(x & 0x8000000000000000ULL)) {
- x <<= 1;
- r -= 1;
- }
- return r;
-}
-#endif
-
-#ifndef HAS_FLS_U32
-static
-unsigned int fls_u32(uint32_t x)
- __attribute__((unused));
-static
-unsigned int fls_u32(uint32_t x)
-{
- unsigned int r = 32;
-
- if (!x)
- return 0;
- if (!(x & 0xFFFF0000U)) {
- x <<= 16;
- r -= 16;
- }
- if (!(x & 0xFF000000U)) {
- x <<= 8;
- r -= 8;
- }
- if (!(x & 0xF0000000U)) {
- x <<= 4;
- r -= 4;
- }
- if (!(x & 0xC0000000U)) {
- x <<= 2;
- r -= 2;
- }
- if (!(x & 0x80000000U)) {
- x <<= 1;
- r -= 1;
- }
- return r;
-}
-#endif
-
-unsigned int lttng_ust_lfht_fls_ulong(unsigned long x)
-{
-#if (CAA_BITS_PER_LONG == 32)
- return fls_u32(x);
-#else
- return fls_u64(x);
-#endif
-}
-
-/*
- * Return the minimum order for which x <= (1UL << order).
- * Return -1 if x is 0.
- */
-int lttng_ust_lfht_get_count_order_u32(uint32_t x)
-{
- if (!x)
- return -1;
-
- return fls_u32(x - 1);
-}
-
-/*
- * Return the minimum order for which x <= (1UL << order).
- * Return -1 if x is 0.
- */
-int lttng_ust_lfht_get_count_order_ulong(unsigned long x)
-{
- if (!x)
- return -1;
-
- return lttng_ust_lfht_fls_ulong(x - 1);
-}
-
-static
-struct lttng_ust_lfht_node *clear_flag(struct lttng_ust_lfht_node *node)
-{
- return (struct lttng_ust_lfht_node *) (((unsigned long) node) & ~FLAGS_MASK);
-}
-
-static
-int is_removed(const struct lttng_ust_lfht_node *node)
-{
- return ((unsigned long) node) & REMOVED_FLAG;
-}
-
-static
-int is_bucket(struct lttng_ust_lfht_node *node)
-{
- return ((unsigned long) node) & BUCKET_FLAG;
-}
-
-static
-struct lttng_ust_lfht_node *flag_bucket(struct lttng_ust_lfht_node *node)
-{
- return (struct lttng_ust_lfht_node *) (((unsigned long) node) | BUCKET_FLAG);
-}
-
-static
-int is_removal_owner(struct lttng_ust_lfht_node *node)
-{
- return ((unsigned long) node) & REMOVAL_OWNER_FLAG;
-}
-
-static
-struct lttng_ust_lfht_node *flag_removal_owner(struct lttng_ust_lfht_node *node)
-{
- return (struct lttng_ust_lfht_node *) (((unsigned long) node) | REMOVAL_OWNER_FLAG);
-}
-
-static
-struct lttng_ust_lfht_node *flag_removed_or_removal_owner(struct lttng_ust_lfht_node *node)
-{
- return (struct lttng_ust_lfht_node *) (((unsigned long) node) | REMOVED_FLAG | REMOVAL_OWNER_FLAG);
-}
-
-static
-struct lttng_ust_lfht_node *get_end(void)
-{
- return (struct lttng_ust_lfht_node *) END_VALUE;
-}
-
-static
-int is_end(struct lttng_ust_lfht_node *node)
-{
- return clear_flag(node) == (struct lttng_ust_lfht_node *) END_VALUE;
-}
-
-static
-void lttng_ust_lfht_alloc_bucket_table(struct lttng_ust_lfht *ht, unsigned long order)
-{
- return ht->mm->alloc_bucket_table(ht, order);
-}
-
-/*
- * lttng_ust_lfht_free_bucket_table() should be called with decreasing order.
- * When lttng_ust_lfht_free_bucket_table(0) is called, it means the whole
- * lfht is destroyed.
- */
-static
-void lttng_ust_lfht_free_bucket_table(struct lttng_ust_lfht *ht, unsigned long order)
-{
- return ht->mm->free_bucket_table(ht, order);
-}
-
-static inline
-struct lttng_ust_lfht_node *bucket_at(struct lttng_ust_lfht *ht, unsigned long index)
-{
- return ht->bucket_at(ht, index);
-}
-
-static inline
-struct lttng_ust_lfht_node *lookup_bucket(struct lttng_ust_lfht *ht, unsigned long size,
- unsigned long hash)
-{
- assert(size > 0);
- return bucket_at(ht, hash & (size - 1));
-}
-
-/*
- * Remove all logically deleted nodes from a bucket up to a certain node key.
- */
-static
-void _lttng_ust_lfht_gc_bucket(struct lttng_ust_lfht_node *bucket, struct lttng_ust_lfht_node *node)
-{
- struct lttng_ust_lfht_node *iter_prev, *iter, *next, *new_next;
-
- assert(!is_bucket(bucket));
- assert(!is_removed(bucket));
- assert(!is_removal_owner(bucket));
- assert(!is_bucket(node));
- assert(!is_removed(node));
- assert(!is_removal_owner(node));
- for (;;) {
- iter_prev = bucket;
- /* We can always skip the bucket node initially */
- iter = lttng_ust_rcu_dereference(iter_prev->next);
- assert(!is_removed(iter));
- assert(!is_removal_owner(iter));
- assert(iter_prev->reverse_hash <= node->reverse_hash);
- /*
- * We should never be called with bucket (start of chain)
- * and logically removed node (end of path compression
- * marker) being the actual same node. This would be a
- * bug in the algorithm implementation.
- */
- assert(bucket != node);
- for (;;) {
- if (caa_unlikely(is_end(iter)))
- return;
- if (caa_likely(clear_flag(iter)->reverse_hash > node->reverse_hash))
- return;
- next = lttng_ust_rcu_dereference(clear_flag(iter)->next);
- if (caa_likely(is_removed(next)))
- break;
- iter_prev = clear_flag(iter);
- iter = next;
- }
- assert(!is_removed(iter));
- assert(!is_removal_owner(iter));
- if (is_bucket(iter))
- new_next = flag_bucket(clear_flag(next));
- else
- new_next = clear_flag(next);
- (void) uatomic_cmpxchg(&iter_prev->next, iter, new_next);
- }
-}
-
-static
-int _lttng_ust_lfht_replace(struct lttng_ust_lfht *ht, unsigned long size,
- struct lttng_ust_lfht_node *old_node,
- struct lttng_ust_lfht_node *old_next,
- struct lttng_ust_lfht_node *new_node)
-{
- struct lttng_ust_lfht_node *bucket, *ret_next;
-
- if (!old_node) /* Return -ENOENT if asked to replace NULL node */
- return -ENOENT;
-
- assert(!is_removed(old_node));
- assert(!is_removal_owner(old_node));
- assert(!is_bucket(old_node));
- assert(!is_removed(new_node));
- assert(!is_removal_owner(new_node));
- assert(!is_bucket(new_node));
- assert(new_node != old_node);
- for (;;) {
- /* Insert after node to be replaced */
- if (is_removed(old_next)) {
- /*
- * Too late, the old node has been removed under us
- * between lookup and replace. Fail.
- */
- return -ENOENT;
- }
- assert(old_next == clear_flag(old_next));
- assert(new_node != old_next);
- /*
- * REMOVAL_OWNER flag is _NEVER_ set before the REMOVED
- * flag. It is either set atomically at the same time
- * (replace) or after (del).
- */
- assert(!is_removal_owner(old_next));
- new_node->next = old_next;
- /*
- * Here is the whole trick for lock-free replace: we add
- * the replacement node _after_ the node we want to
- * replace by atomically setting its next pointer at the
- * same time we set its removal flag. Given that
- * the lookups/get next use an iterator aware of the
- * next pointer, they will either skip the old node due
- * to the removal flag and see the new node, or use
- * the old node, but will not see the new one.
- * This is a replacement of a node with another node
- * that has the same value: we are therefore not
- * removing a value from the hash table. We set both the
- * REMOVED and REMOVAL_OWNER flags atomically so we own
- * the node after successful cmpxchg.
- */
- ret_next = uatomic_cmpxchg(&old_node->next,
- old_next, flag_removed_or_removal_owner(new_node));
- if (ret_next == old_next)
- break; /* We performed the replacement. */
- old_next = ret_next;
- }
-
- /*
- * Ensure that the old node is not visible to readers anymore:
- * lookup for the node, and remove it (along with any other
- * logically removed node) if found.
- */
- bucket = lookup_bucket(ht, size, bit_reverse_ulong(old_node->reverse_hash));
- _lttng_ust_lfht_gc_bucket(bucket, new_node);
-
- assert(is_removed(CMM_LOAD_SHARED(old_node->next)));
- return 0;
-}
-
-/*
- * A non-NULL unique_ret pointer uses the "add unique" (or uniquify) add
- * mode. A NULL unique_ret allows creation of duplicate keys.
- */
-static
-void _lttng_ust_lfht_add(struct lttng_ust_lfht *ht,
- unsigned long hash,
- lttng_ust_lfht_match_fct match,
- const void *key,
- unsigned long size,
- struct lttng_ust_lfht_node *node,
- struct lttng_ust_lfht_iter *unique_ret,
- int bucket_flag)
-{
- struct lttng_ust_lfht_node *iter_prev, *iter, *next, *new_node, *new_next,
- *return_node;
- struct lttng_ust_lfht_node *bucket;
-
- assert(!is_bucket(node));
- assert(!is_removed(node));
- assert(!is_removal_owner(node));
- bucket = lookup_bucket(ht, size, hash);
- for (;;) {
- /*
- * iter_prev points to the non-removed node prior to the
- * insert location.
- */
- iter_prev = bucket;
- /* We can always skip the bucket node initially */
- iter = lttng_ust_rcu_dereference(iter_prev->next);
- assert(iter_prev->reverse_hash <= node->reverse_hash);
- for (;;) {
- if (caa_unlikely(is_end(iter)))
- goto insert;
- if (caa_likely(clear_flag(iter)->reverse_hash > node->reverse_hash))
- goto insert;
-
- /* bucket node is the first node of the identical-hash-value chain */
- if (bucket_flag && clear_flag(iter)->reverse_hash == node->reverse_hash)
- goto insert;
-
- next = lttng_ust_rcu_dereference(clear_flag(iter)->next);
- if (caa_unlikely(is_removed(next)))
- goto gc_node;
-
- /* uniquely add */
- if (unique_ret
- && !is_bucket(next)
- && clear_flag(iter)->reverse_hash == node->reverse_hash) {
- struct lttng_ust_lfht_iter d_iter = {
- .node = node,
- .next = iter,
-#ifdef CONFIG_LTTNG_UST_LFHT_ITER_DEBUG
- .lfht = ht,
-#endif
- };
-
- /*
- * uniquely adding inserts the node as the first
- * node of the identical-hash-value node chain.
- *
- * This semantic ensures no duplicated keys
- * should ever be observable in the table
- * (including traversing the table node by
- * node by forward iterations)
- */
- lttng_ust_lfht_next_duplicate(ht, match, key, &d_iter);
- if (!d_iter.node)
- goto insert;
-
- *unique_ret = d_iter;
- return;
- }
-
- iter_prev = clear_flag(iter);
- iter = next;
- }
-
- insert:
- assert(node != clear_flag(iter));
- assert(!is_removed(iter_prev));
- assert(!is_removal_owner(iter_prev));
- assert(!is_removed(iter));
- assert(!is_removal_owner(iter));
- assert(iter_prev != node);
- if (!bucket_flag)
- node->next = clear_flag(iter);
- else
- node->next = flag_bucket(clear_flag(iter));
- if (is_bucket(iter))
- new_node = flag_bucket(node);
- else
- new_node = node;
- if (uatomic_cmpxchg(&iter_prev->next, iter,
- new_node) != iter) {
- continue; /* retry */
- } else {
- return_node = node;
- goto end;
- }
-
- gc_node:
- assert(!is_removed(iter));
- assert(!is_removal_owner(iter));
- if (is_bucket(iter))
- new_next = flag_bucket(clear_flag(next));
- else
- new_next = clear_flag(next);
- (void) uatomic_cmpxchg(&iter_prev->next, iter, new_next);
- /* retry */
- }
-end:
- if (unique_ret) {
- unique_ret->node = return_node;
- /* unique_ret->next left unset, never used. */
- }
-}
-
-static
-int _lttng_ust_lfht_del(struct lttng_ust_lfht *ht, unsigned long size,
- struct lttng_ust_lfht_node *node)
-{
- struct lttng_ust_lfht_node *bucket, *next;
-
- if (!node) /* Return -ENOENT if asked to delete NULL node */
- return -ENOENT;
-
- /* logically delete the node */
- assert(!is_bucket(node));
- assert(!is_removed(node));
- assert(!is_removal_owner(node));
-
- /*
- * We are first checking if the node had previously been
- * logically removed (this check is not atomic with setting the
- * logical removal flag). Return -ENOENT if the node had
- * previously been removed.
- */
- next = CMM_LOAD_SHARED(node->next); /* next is not dereferenced */
- if (caa_unlikely(is_removed(next)))
- return -ENOENT;
- assert(!is_bucket(next));
- /*
- * The del operation semantic guarantees a full memory barrier
- * before the uatomic_or atomic commit of the deletion flag.
- */
- cmm_smp_mb__before_uatomic_or();
- /*
- * We set the REMOVED_FLAG unconditionally. Note that there may
- * be more than one concurrent thread setting this flag.
- * Knowing which wins the race will be known after the garbage
- * collection phase, stay tuned!
- */
- uatomic_or(&node->next, REMOVED_FLAG);
- /* We performed the (logical) deletion. */
-
- /*
- * Ensure that the node is not visible to readers anymore: lookup for
- * the node, and remove it (along with any other logically removed node)
- * if found.
- */
- bucket = lookup_bucket(ht, size, bit_reverse_ulong(node->reverse_hash));
- _lttng_ust_lfht_gc_bucket(bucket, node);
-
- assert(is_removed(CMM_LOAD_SHARED(node->next)));
- /*
- * Last phase: atomically exchange node->next with a version
- * having "REMOVAL_OWNER_FLAG" set. If the returned node->next
- * pointer did _not_ have "REMOVAL_OWNER_FLAG" set, we now own
- * the node and win the removal race.
- * It is interesting to note that all "add" paths are forbidden
- * to change the next pointer starting from the point where the
- * REMOVED_FLAG is set, so here using a read, followed by a
- * xchg() suffice to guarantee that the xchg() will ever only
- * set the "REMOVAL_OWNER_FLAG" (or change nothing if the flag
- * was already set).
- */
- if (!is_removal_owner(uatomic_xchg(&node->next,
- flag_removal_owner(node->next))))
- return 0;
- else
- return -ENOENT;
-}
-
-/*
- * Never called with size < 1.
- */
-static
-void lttng_ust_lfht_create_bucket(struct lttng_ust_lfht *ht, unsigned long size)
-{
- struct lttng_ust_lfht_node *prev, *node;
- unsigned long order, len, i;
- int bucket_order;
-
- lttng_ust_lfht_alloc_bucket_table(ht, 0);
-
- dbg_printf("create bucket: order 0 index 0 hash 0\n");
- node = bucket_at(ht, 0);
- node->next = flag_bucket(get_end());
- node->reverse_hash = 0;
-
- bucket_order = lttng_ust_lfht_get_count_order_ulong(size);
- assert(bucket_order >= 0);
-
- for (order = 1; order < (unsigned long) bucket_order + 1; order++) {
- len = 1UL << (order - 1);
- lttng_ust_lfht_alloc_bucket_table(ht, order);
-
- for (i = 0; i < len; i++) {
- /*
- * Now, we are trying to init the node with the
- * hash=(len+i) (which is also a bucket with the
- * index=(len+i)) and insert it into the hash table,
- * so this node has to be inserted after the bucket
- * with the index=(len+i)&(len-1)=i. And because there
- * is no other non-bucket node nor bucket node with
- * larger index/hash inserted, so the bucket node
- * being inserted should be inserted directly linked
- * after the bucket node with index=i.
- */
- prev = bucket_at(ht, i);
- node = bucket_at(ht, len + i);
-
- dbg_printf("create bucket: order %lu index %lu hash %lu\n",
- order, len + i, len + i);
- node->reverse_hash = bit_reverse_ulong(len + i);
-
- /* insert after prev */
- assert(is_bucket(prev->next));
- node->next = prev->next;
- prev->next = flag_bucket(node);
- }
- }
-}
-
-#if (CAA_BITS_PER_LONG > 32)
-/*
- * For 64-bit architectures, with max number of buckets small enough not to
- * use the entire 64-bit memory mapping space (and allowing a fair number of
- * hash table instances), use the mmap allocator, which is faster. Otherwise,
- * fallback to the order allocator.
- */
-static
-const struct lttng_ust_lfht_mm_type *get_mm_type(unsigned long max_nr_buckets)
-{
- if (max_nr_buckets && max_nr_buckets <= (1ULL << 32))
- return <tng_ust_lfht_mm_mmap;
- else
- return <tng_ust_lfht_mm_order;
-}
-#else
-/*
- * For 32-bit architectures, use the order allocator.
- */
-static
-const struct lttng_ust_lfht_mm_type *get_mm_type(unsigned long max_nr_buckets)
-{
- return <tng_ust_lfht_mm_order;
-}
-#endif
-
-struct lttng_ust_lfht *lttng_ust_lfht_new(unsigned long init_size,
- unsigned long min_nr_alloc_buckets,
- unsigned long max_nr_buckets,
- int flags,
- const struct lttng_ust_lfht_mm_type *mm)
-{
- struct lttng_ust_lfht *ht;
- unsigned long order;
-
- /* min_nr_alloc_buckets must be power of two */
- if (!min_nr_alloc_buckets || (min_nr_alloc_buckets & (min_nr_alloc_buckets - 1)))
- return NULL;
-
- /* init_size must be power of two */
- if (!init_size || (init_size & (init_size - 1)))
- return NULL;
-
- /*
- * Memory management plugin default.
- */
- if (!mm)
- mm = get_mm_type(max_nr_buckets);
-
- /* max_nr_buckets == 0 for order based mm means infinite */
- if (mm == <tng_ust_lfht_mm_order && !max_nr_buckets)
- max_nr_buckets = 1UL << (MAX_TABLE_ORDER - 1);
-
- /* max_nr_buckets must be power of two */
- if (!max_nr_buckets || (max_nr_buckets & (max_nr_buckets - 1)))
- return NULL;
-
- if (flags & LTTNG_UST_LFHT_AUTO_RESIZE)
- return NULL;
-
- min_nr_alloc_buckets = max(min_nr_alloc_buckets, MIN_TABLE_SIZE);
- init_size = max(init_size, MIN_TABLE_SIZE);
- max_nr_buckets = max(max_nr_buckets, min_nr_alloc_buckets);
- init_size = min(init_size, max_nr_buckets);
-
- ht = mm->alloc_lttng_ust_lfht(min_nr_alloc_buckets, max_nr_buckets);
- assert(ht);
- assert(ht->mm == mm);
- assert(ht->bucket_at == mm->bucket_at);
-
- ht->flags = flags;
- /* this mutex should not nest in read-side C.S. */
- pthread_mutex_init(&ht->resize_mutex, NULL);
- order = lttng_ust_lfht_get_count_order_ulong(init_size);
- ht->resize_target = 1UL << order;
- lttng_ust_lfht_create_bucket(ht, 1UL << order);
- ht->size = 1UL << order;
- return ht;
-}
-
-void lttng_ust_lfht_lookup(struct lttng_ust_lfht *ht, unsigned long hash,
- lttng_ust_lfht_match_fct match, const void *key,
- struct lttng_ust_lfht_iter *iter)
-{
- struct lttng_ust_lfht_node *node, *next, *bucket;
- unsigned long reverse_hash, size;
-
- lttng_ust_lfht_iter_debug_set_ht(ht, iter);
-
- reverse_hash = bit_reverse_ulong(hash);
-
- size = lttng_ust_rcu_dereference(ht->size);
- bucket = lookup_bucket(ht, size, hash);
- /* We can always skip the bucket node initially */
- node = lttng_ust_rcu_dereference(bucket->next);
- node = clear_flag(node);
- for (;;) {
- if (caa_unlikely(is_end(node))) {
- node = next = NULL;
- break;
- }
- if (caa_unlikely(node->reverse_hash > reverse_hash)) {
- node = next = NULL;
- break;
- }
- next = lttng_ust_rcu_dereference(node->next);
- assert(node == clear_flag(node));
- if (caa_likely(!is_removed(next))
- && !is_bucket(next)
- && node->reverse_hash == reverse_hash
- && caa_likely(match(node, key))) {
- break;
- }
- node = clear_flag(next);
- }
- assert(!node || !is_bucket(CMM_LOAD_SHARED(node->next)));
- iter->node = node;
- iter->next = next;
-}
-
-void lttng_ust_lfht_next_duplicate(struct lttng_ust_lfht *ht __attribute__((unused)),
- lttng_ust_lfht_match_fct match,
- const void *key, struct lttng_ust_lfht_iter *iter)
-{
- struct lttng_ust_lfht_node *node, *next;
- unsigned long reverse_hash;
-
- lttng_ust_lfht_iter_debug_assert(ht == iter->lfht);
- node = iter->node;
- reverse_hash = node->reverse_hash;
- next = iter->next;
- node = clear_flag(next);
-
- for (;;) {
- if (caa_unlikely(is_end(node))) {
- node = next = NULL;
- break;
- }
- if (caa_unlikely(node->reverse_hash > reverse_hash)) {
- node = next = NULL;
- break;
- }
- next = lttng_ust_rcu_dereference(node->next);
- if (caa_likely(!is_removed(next))
- && !is_bucket(next)
- && caa_likely(match(node, key))) {
- break;
- }
- node = clear_flag(next);
- }
- assert(!node || !is_bucket(CMM_LOAD_SHARED(node->next)));
- iter->node = node;
- iter->next = next;
-}
-
-void lttng_ust_lfht_next(struct lttng_ust_lfht *ht __attribute__((unused)),
- struct lttng_ust_lfht_iter *iter)
-{
- struct lttng_ust_lfht_node *node, *next;
-
- lttng_ust_lfht_iter_debug_assert(ht == iter->lfht);
- node = clear_flag(iter->next);
- for (;;) {
- if (caa_unlikely(is_end(node))) {
- node = next = NULL;
- break;
- }
- next = lttng_ust_rcu_dereference(node->next);
- if (caa_likely(!is_removed(next))
- && !is_bucket(next)) {
- break;
- }
- node = clear_flag(next);
- }
- assert(!node || !is_bucket(CMM_LOAD_SHARED(node->next)));
- iter->node = node;
- iter->next = next;
-}
-
-void lttng_ust_lfht_first(struct lttng_ust_lfht *ht, struct lttng_ust_lfht_iter *iter)
-{
- lttng_ust_lfht_iter_debug_set_ht(ht, iter);
- /*
- * Get next after first bucket node. The first bucket node is the
- * first node of the linked list.
- */
- iter->next = bucket_at(ht, 0)->next;
- lttng_ust_lfht_next(ht, iter);
-}
-
-void lttng_ust_lfht_add(struct lttng_ust_lfht *ht, unsigned long hash,
- struct lttng_ust_lfht_node *node)
-{
- unsigned long size;
-
- node->reverse_hash = bit_reverse_ulong(hash);
- size = lttng_ust_rcu_dereference(ht->size);
- _lttng_ust_lfht_add(ht, hash, NULL, NULL, size, node, NULL, 0);
-}
-
-struct lttng_ust_lfht_node *lttng_ust_lfht_add_unique(struct lttng_ust_lfht *ht,
- unsigned long hash,
- lttng_ust_lfht_match_fct match,
- const void *key,
- struct lttng_ust_lfht_node *node)
-{
- unsigned long size;
- struct lttng_ust_lfht_iter iter;
-
- node->reverse_hash = bit_reverse_ulong(hash);
- size = lttng_ust_rcu_dereference(ht->size);
- _lttng_ust_lfht_add(ht, hash, match, key, size, node, &iter, 0);
- return iter.node;
-}
-
-struct lttng_ust_lfht_node *lttng_ust_lfht_add_replace(struct lttng_ust_lfht *ht,
- unsigned long hash,
- lttng_ust_lfht_match_fct match,
- const void *key,
- struct lttng_ust_lfht_node *node)
-{
- unsigned long size;
- struct lttng_ust_lfht_iter iter;
-
- node->reverse_hash = bit_reverse_ulong(hash);
- size = lttng_ust_rcu_dereference(ht->size);
- for (;;) {
- _lttng_ust_lfht_add(ht, hash, match, key, size, node, &iter, 0);
- if (iter.node == node) {
- return NULL;
- }
-
- if (!_lttng_ust_lfht_replace(ht, size, iter.node, iter.next, node))
- return iter.node;
- }
-}
-
-int lttng_ust_lfht_replace(struct lttng_ust_lfht *ht,
- struct lttng_ust_lfht_iter *old_iter,
- unsigned long hash,
- lttng_ust_lfht_match_fct match,
- const void *key,
- struct lttng_ust_lfht_node *new_node)
-{
- unsigned long size;
-
- new_node->reverse_hash = bit_reverse_ulong(hash);
- if (!old_iter->node)
- return -ENOENT;
- if (caa_unlikely(old_iter->node->reverse_hash != new_node->reverse_hash))
- return -EINVAL;
- if (caa_unlikely(!match(old_iter->node, key)))
- return -EINVAL;
- size = lttng_ust_rcu_dereference(ht->size);
- return _lttng_ust_lfht_replace(ht, size, old_iter->node, old_iter->next,
- new_node);
-}
-
-int lttng_ust_lfht_del(struct lttng_ust_lfht *ht, struct lttng_ust_lfht_node *node)
-{
- unsigned long size;
-
- size = lttng_ust_rcu_dereference(ht->size);
- return _lttng_ust_lfht_del(ht, size, node);
-}
-
-int lttng_ust_lfht_is_node_deleted(const struct lttng_ust_lfht_node *node)
-{
- return is_removed(CMM_LOAD_SHARED(node->next));
-}
-
-static
-int lttng_ust_lfht_delete_bucket(struct lttng_ust_lfht *ht)
-{
- struct lttng_ust_lfht_node *node;
- unsigned long order, i, size;
-
- /* Check that the table is empty */
- node = bucket_at(ht, 0);
- do {
- node = clear_flag(node)->next;
- if (!is_bucket(node))
- return -EPERM;
- assert(!is_removed(node));
- assert(!is_removal_owner(node));
- } while (!is_end(node));
- /*
- * size accessed without lttng_ust_rcu_dereference because hash table is
- * being destroyed.
- */
- size = ht->size;
- /* Internal sanity check: all nodes left should be buckets */
- for (i = 0; i < size; i++) {
- node = bucket_at(ht, i);
- dbg_printf("delete bucket: index %lu expected hash %lu hash %lu\n",
- i, i, bit_reverse_ulong(node->reverse_hash));
- assert(is_bucket(node->next));
- }
-
- for (order = lttng_ust_lfht_get_count_order_ulong(size); (long)order >= 0; order--)
- lttng_ust_lfht_free_bucket_table(ht, order);
-
- return 0;
-}
-
-/*
- * Should only be called when no more concurrent readers nor writers can
- * possibly access the table.
- */
-int lttng_ust_lfht_destroy(struct lttng_ust_lfht *ht)
-{
- int ret;
-
- ret = lttng_ust_lfht_delete_bucket(ht);
- if (ret)
- return ret;
- ret = pthread_mutex_destroy(&ht->resize_mutex);
- if (ret)
- ret = -EBUSY;
- poison_free(ht);
- return ret;
-}
+++ /dev/null
-/*
- * SPDX-License-Identifier: LGPL-2.1-or-later
- *
- * Copyright 2011 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
- * Copyright 2011 Lai Jiangshan <laijs@cn.fujitsu.com>
- *
- * Userspace RCU library - Lock-Free RCU Hash Table
- */
-
-#ifndef _LTTNG_UST_RCULFHASH_H
-#define _LTTNG_UST_RCULFHASH_H
-
-#include <stdint.h>
-#include <pthread.h>
-#include <urcu/compiler.h>
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-struct lttng_ust_lfht;
-
-/*
- * lttng_ust_lfht_node: Contains the next pointers and reverse-hash
- * value required for lookup and traversal of the hash table.
- *
- * struct lttng_ust_lfht_node should be aligned on 8-bytes boundaries because
- * the three lower bits are used as flags. It is worth noting that the
- * information contained within these three bits could be represented on
- * two bits by re-using the same bit for REMOVAL_OWNER_FLAG and
- * BUCKET_FLAG. This can be done if we ensure that no iterator nor
- * updater check the BUCKET_FLAG after it detects that the REMOVED_FLAG
- * is set. Given the minimum size of struct lttng_ust_lfht_node is 8 bytes on
- * 32-bit architectures, we choose to go for simplicity and reserve
- * three bits.
- *
- * struct lttng_ust_lfht_node can be embedded into a structure (as a field).
- * caa_container_of() can be used to get the structure from the struct
- * lttng_ust_lfht_node after a lookup.
- *
- * The structure which embeds it typically holds the key (or key-value
- * pair) of the object. The caller code is responsible for calculation
- * of the hash value for lttng_ust_lfht APIs.
- */
-struct lttng_ust_lfht_node {
- struct lttng_ust_lfht_node *next; /* ptr | REMOVAL_OWNER_FLAG | BUCKET_FLAG | REMOVED_FLAG */
- unsigned long reverse_hash;
-} __attribute__((aligned(8)));
-
-/* lttng_ust_lfht_iter: Used to track state while traversing a hash chain. */
-struct lttng_ust_lfht_iter {
- struct lttng_ust_lfht_node *node, *next;
-};
-
-static inline
-struct lttng_ust_lfht_node *lttng_ust_lfht_iter_get_node(struct lttng_ust_lfht_iter *iter)
-{
- return iter->node;
-}
-
-struct rcu_flavor_struct;
-
-/*
- * Caution !
- * Ensure reader and writer threads are registered as urcu readers.
- */
-
-typedef int (*lttng_ust_lfht_match_fct)(struct lttng_ust_lfht_node *node, const void *key);
-
-/*
- * lttng_ust_lfht_node_init - initialize a hash table node
- * @node: the node to initialize.
- *
- * This function is kept to be eventually used for debugging purposes
- * (detection of memory corruption).
- */
-static inline
-void lttng_ust_lfht_node_init(struct lttng_ust_lfht_node *node __attribute__((unused)))
-{
-}
-
-/*
- * Hash table creation flags.
- */
-enum {
- LTTNG_UST_LFHT_AUTO_RESIZE = (1U << 0),
- LTTNG_UST_LFHT_ACCOUNTING = (1U << 1),
-};
-
-struct lttng_ust_lfht_mm_type {
- struct lttng_ust_lfht *(*alloc_lttng_ust_lfht)(unsigned long min_nr_alloc_buckets,
- unsigned long max_nr_buckets);
- void (*alloc_bucket_table)(struct lttng_ust_lfht *ht, unsigned long order);
- void (*free_bucket_table)(struct lttng_ust_lfht *ht, unsigned long order);
- struct lttng_ust_lfht_node *(*bucket_at)(struct lttng_ust_lfht *ht,
- unsigned long index);
-};
-
-extern const struct lttng_ust_lfht_mm_type lttng_ust_lfht_mm_order
- __attribute__((visibility("hidden")));
-
-extern const struct lttng_ust_lfht_mm_type lttng_ust_lfht_mm_chunk
- __attribute__((visibility("hidden")));
-
-extern const struct lttng_ust_lfht_mm_type lttng_ust_lfht_mm_mmap
- __attribute__((visibility("hidden")));
-
-/*
- * lttng_ust_lfht_new - allocate a hash table.
- * @init_size: number of buckets to allocate initially. Must be power of two.
- * @min_nr_alloc_buckets: the minimum number of allocated buckets.
- * (must be power of two)
- * @max_nr_buckets: the maximum number of hash table buckets allowed.
- * (must be power of two, 0 is accepted, means
- * "infinite")
- * @flags: hash table creation flags (can be combined with bitwise or: '|').
- * 0: no flags.
- * LTTNG_UST_LFHT_AUTO_RESIZE: automatically resize hash table.
- * LTTNG_UST_LFHT_ACCOUNTING: count the number of node addition
- * and removal in the table
- *
- * Return NULL on error.
- * Note: the RCU flavor must be already included before the hash table header.
- */
-extern struct lttng_ust_lfht *lttng_ust_lfht_new(unsigned long init_size,
- unsigned long min_nr_alloc_buckets,
- unsigned long max_nr_buckets,
- int flags,
- const struct lttng_ust_lfht_mm_type *mm)
- __attribute__((visibility("hidden")));
-
-/*
- * lttng_ust_lfht_destroy - destroy a hash table.
- * @ht: the hash table to destroy.
- *
- * Return 0 on success, negative error value on error.
-
- * Prior to liburcu 0.10:
- * - Threads calling this API need to be registered RCU read-side
- * threads.
- * - lttng_ust_lfht_destroy should *not* be called from a RCU read-side
- * critical section. It should *not* be called from a call_rcu thread
- * context neither.
- *
- * Starting from liburcu 0.10, rculfhash implements its own worker
- * thread to handle resize operations, which removes RCU requirements on
- * lttng_ust_lfht_destroy.
- */
-extern int lttng_ust_lfht_destroy(struct lttng_ust_lfht *ht)
- __attribute__((visibility("hidden")));
-
-/*
- * lttng_ust_lfht_count_nodes - count the number of nodes in the hash table.
- * @ht: the hash table.
- * @split_count_before: sample the node count split-counter before traversal.
- * @count: traverse the hash table, count the number of nodes observed.
- * @split_count_after: sample the node count split-counter after traversal.
- *
- * Call with rcu_read_lock held.
- * Threads calling this API need to be registered RCU read-side threads.
- */
-extern void lttng_ust_lfht_count_nodes(struct lttng_ust_lfht *ht,
- long *split_count_before,
- unsigned long *count,
- long *split_count_after)
- __attribute__((visibility("hidden")));
-
-/*
- * lttng_ust_lfht_lookup - lookup a node by key.
- * @ht: the hash table.
- * @hash: the key hash.
- * @match: the key match function.
- * @key: the current node key.
- * @iter: node, if found (output). *iter->node set to NULL if not found.
- *
- * Call with rcu_read_lock held.
- * Threads calling this API need to be registered RCU read-side threads.
- * This function acts as a rcu_dereference() to read the node pointer.
- */
-extern void lttng_ust_lfht_lookup(struct lttng_ust_lfht *ht, unsigned long hash,
- lttng_ust_lfht_match_fct match, const void *key,
- struct lttng_ust_lfht_iter *iter)
- __attribute__((visibility("hidden")));
-
-/*
- * lttng_ust_lfht_next_duplicate - get the next item with same key, after iterator.
- * @ht: the hash table.
- * @match: the key match function.
- * @key: the current node key.
- * @iter: input: current iterator.
- * output: node, if found. *iter->node set to NULL if not found.
- *
- * Uses an iterator initialized by a lookup or traversal. Important: the
- * iterator _needs_ to be initialized before calling
- * lttng_ust_lfht_next_duplicate.
- * Sets *iter-node to the following node with same key.
- * Sets *iter->node to NULL if no following node exists with same key.
- * RCU read-side lock must be held across lttng_ust_lfht_lookup and
- * lttng_ust_lfht_next calls, and also between lttng_ust_lfht_next calls using the
- * node returned by a previous lttng_ust_lfht_next.
- * Call with rcu_read_lock held.
- * Threads calling this API need to be registered RCU read-side threads.
- * This function acts as a rcu_dereference() to read the node pointer.
- */
-extern void lttng_ust_lfht_next_duplicate(struct lttng_ust_lfht *ht,
- lttng_ust_lfht_match_fct match, const void *key,
- struct lttng_ust_lfht_iter *iter)
- __attribute__((visibility("hidden")));
-
-/*
- * lttng_ust_lfht_first - get the first node in the table.
- * @ht: the hash table.
- * @iter: First node, if exists (output). *iter->node set to NULL if not found.
- *
- * Output in "*iter". *iter->node set to NULL if table is empty.
- * Call with rcu_read_lock held.
- * Threads calling this API need to be registered RCU read-side threads.
- * This function acts as a rcu_dereference() to read the node pointer.
- */
-extern void lttng_ust_lfht_first(struct lttng_ust_lfht *ht, struct lttng_ust_lfht_iter *iter)
- __attribute__((visibility("hidden")));
-
-/*
- * lttng_ust_lfht_next - get the next node in the table.
- * @ht: the hash table.
- * @iter: input: current iterator.
- * output: next node, if exists. *iter->node set to NULL if not found.
- *
- * Input/Output in "*iter". *iter->node set to NULL if *iter was
- * pointing to the last table node.
- * Call with rcu_read_lock held.
- * Threads calling this API need to be registered RCU read-side threads.
- * This function acts as a rcu_dereference() to read the node pointer.
- */
-extern void lttng_ust_lfht_next(struct lttng_ust_lfht *ht, struct lttng_ust_lfht_iter *iter)
- __attribute__((visibility("hidden")));
-
-/*
- * lttng_ust_lfht_add - add a node to the hash table.
- * @ht: the hash table.
- * @hash: the key hash.
- * @node: the node to add.
- *
- * This function supports adding redundant keys into the table.
- * Call with rcu_read_lock held.
- * Threads calling this API need to be registered RCU read-side threads.
- * This function issues a full memory barrier before and after its
- * atomic commit.
- */
-extern void lttng_ust_lfht_add(struct lttng_ust_lfht *ht, unsigned long hash,
- struct lttng_ust_lfht_node *node)
- __attribute__((visibility("hidden")));
-
-/*
- * lttng_ust_lfht_add_unique - add a node to hash table, if key is not present.
- * @ht: the hash table.
- * @hash: the node's hash.
- * @match: the key match function.
- * @key: the node's key.
- * @node: the node to try adding.
- *
- * Return the node added upon success.
- * Return the unique node already present upon failure. If
- * lttng_ust_lfht_add_unique fails, the node passed as parameter should be
- * freed by the caller. In this case, the caller does NOT need to wait
- * for a grace period before freeing or re-using the node.
- * Call with rcu_read_lock held.
- * Threads calling this API need to be registered RCU read-side threads.
- *
- * The semantic of this function is that if only this function is used
- * to add keys into the table, no duplicated keys should ever be
- * observable in the table. The same guarantee apply for combination of
- * add_unique and add_replace (see below).
- *
- * Upon success, this function issues a full memory barrier before and
- * after its atomic commit. Upon failure, this function acts like a
- * simple lookup operation: it acts as a rcu_dereference() to read the
- * node pointer. The failure case does not guarantee any other memory
- * barrier.
- */
-extern struct lttng_ust_lfht_node *lttng_ust_lfht_add_unique(struct lttng_ust_lfht *ht,
- unsigned long hash,
- lttng_ust_lfht_match_fct match,
- const void *key,
- struct lttng_ust_lfht_node *node)
- __attribute__((visibility("hidden")));
-
-/*
- * lttng_ust_lfht_add_replace - replace or add a node within hash table.
- * @ht: the hash table.
- * @hash: the node's hash.
- * @match: the key match function.
- * @key: the node's key.
- * @node: the node to add.
- *
- * Return the node replaced upon success. If no node matching the key
- * was present, return NULL, which also means the operation succeeded.
- * This replacement operation should never fail.
- * Call with rcu_read_lock held.
- * Threads calling this API need to be registered RCU read-side threads.
- * After successful replacement, a grace period must be waited for before
- * freeing or re-using the memory reserved for the returned node.
- *
- * The semantic of replacement vs lookups and traversals is the
- * following: if lookups and traversals are performed between a key
- * unique insertion and its removal, we guarantee that the lookups and
- * traversals will always find exactly one instance of the key if it is
- * replaced concurrently with the lookups.
- *
- * Providing this semantic allows us to ensure that replacement-only
- * schemes will never generate duplicated keys. It also allows us to
- * guarantee that a combination of add_replace and add_unique updates
- * will never generate duplicated keys.
- *
- * This function issues a full memory barrier before and after its
- * atomic commit.
- */
-extern struct lttng_ust_lfht_node *lttng_ust_lfht_add_replace(struct lttng_ust_lfht *ht,
- unsigned long hash,
- lttng_ust_lfht_match_fct match,
- const void *key,
- struct lttng_ust_lfht_node *node)
- __attribute__((visibility("hidden")));
-
-/*
- * lttng_ust_lfht_replace - replace a node pointed to by iter within hash table.
- * @ht: the hash table.
- * @old_iter: the iterator position of the node to replace.
- * @hash: the node's hash.
- * @match: the key match function.
- * @key: the node's key.
- * @new_node: the new node to use as replacement.
- *
- * Return 0 if replacement is successful, negative value otherwise.
- * Replacing a NULL old node or an already removed node will fail with
- * -ENOENT.
- * If the hash or value of the node to replace and the new node differ,
- * this function returns -EINVAL without proceeding to the replacement.
- * Old node can be looked up with lttng_ust_lfht_lookup and lttng_ust_lfht_next.
- * RCU read-side lock must be held between lookup and replacement.
- * Call with rcu_read_lock held.
- * Threads calling this API need to be registered RCU read-side threads.
- * After successful replacement, a grace period must be waited for before
- * freeing or re-using the memory reserved for the old node (which can
- * be accessed with lttng_ust_lfht_iter_get_node).
- *
- * The semantic of replacement vs lookups is the same as
- * lttng_ust_lfht_add_replace().
- *
- * Upon success, this function issues a full memory barrier before and
- * after its atomic commit. Upon failure, this function does not issue
- * any memory barrier.
- */
-extern int lttng_ust_lfht_replace(struct lttng_ust_lfht *ht,
- struct lttng_ust_lfht_iter *old_iter,
- unsigned long hash,
- lttng_ust_lfht_match_fct match,
- const void *key,
- struct lttng_ust_lfht_node *new_node)
- __attribute__((visibility("hidden")));
-
-/*
- * lttng_ust_lfht_del - remove node pointed to by iterator from hash table.
- * @ht: the hash table.
- * @node: the node to delete.
- *
- * Return 0 if the node is successfully removed, negative value
- * otherwise.
- * Deleting a NULL node or an already removed node will fail with a
- * negative value.
- * Node can be looked up with lttng_ust_lfht_lookup and lttng_ust_lfht_next,
- * followed by use of lttng_ust_lfht_iter_get_node.
- * RCU read-side lock must be held between lookup and removal.
- * Call with rcu_read_lock held.
- * Threads calling this API need to be registered RCU read-side threads.
- * After successful removal, a grace period must be waited for before
- * freeing or re-using the memory reserved for old node (which can be
- * accessed with lttng_ust_lfht_iter_get_node).
- * Upon success, this function issues a full memory barrier before and
- * after its atomic commit. Upon failure, this function does not issue
- * any memory barrier.
- */
-extern int lttng_ust_lfht_del(struct lttng_ust_lfht *ht, struct lttng_ust_lfht_node *node)
- __attribute__((visibility("hidden")));
-
-/*
- * lttng_ust_lfht_is_node_deleted - query whether a node is removed from hash table.
- *
- * Return non-zero if the node is deleted from the hash table, 0
- * otherwise.
- * Node can be looked up with lttng_ust_lfht_lookup and lttng_ust_lfht_next,
- * followed by use of lttng_ust_lfht_iter_get_node.
- * RCU read-side lock must be held between lookup and call to this
- * function.
- * Call with rcu_read_lock held.
- * Threads calling this API need to be registered RCU read-side threads.
- * This function does not issue any memory barrier.
- */
-extern int lttng_ust_lfht_is_node_deleted(const struct lttng_ust_lfht_node *node)
- __attribute__((visibility("hidden")));
-
-/*
- * lttng_ust_lfht_resize - Force a hash table resize
- * @ht: the hash table.
- * @new_size: update to this hash table size.
- *
- * Threads calling this API need to be registered RCU read-side threads.
- * This function does not (necessarily) issue memory barriers.
- * lttng_ust_lfht_resize should *not* be called from a RCU read-side critical
- * section.
- */
-extern void lttng_ust_lfht_resize(struct lttng_ust_lfht *ht, unsigned long new_size)
- __attribute__((visibility("hidden")));
-
-/*
- * Note: it is safe to perform element removal (del), replacement, or
- * any hash table update operation during any of the following hash
- * table traversals.
- * These functions act as rcu_dereference() to read the node pointers.
- */
-#define lttng_ust_lfht_for_each(ht, iter, node) \
- for (lttng_ust_lfht_first(ht, iter), \
- node = lttng_ust_lfht_iter_get_node(iter); \
- node != NULL; \
- lttng_ust_lfht_next(ht, iter), \
- node = lttng_ust_lfht_iter_get_node(iter))
-
-#define lttng_ust_lfht_for_each_duplicate(ht, hash, match, key, iter, node) \
- for (lttng_ust_lfht_lookup(ht, hash, match, key, iter), \
- node = lttng_ust_lfht_iter_get_node(iter); \
- node != NULL; \
- lttng_ust_lfht_next_duplicate(ht, match, key, iter), \
- node = lttng_ust_lfht_iter_get_node(iter))
-
-#define lttng_ust_lfht_for_each_entry(ht, iter, pos, member) \
- for (lttng_ust_lfht_first(ht, iter), \
- pos = caa_container_of(lttng_ust_lfht_iter_get_node(iter), \
- __typeof__(*(pos)), member); \
- lttng_ust_lfht_iter_get_node(iter) != NULL; \
- lttng_ust_lfht_next(ht, iter), \
- pos = caa_container_of(lttng_ust_lfht_iter_get_node(iter), \
- __typeof__(*(pos)), member))
-
-#define lttng_ust_lfht_for_each_entry_duplicate(ht, hash, match, key, \
- iter, pos, member) \
- for (lttng_ust_lfht_lookup(ht, hash, match, key, iter), \
- pos = caa_container_of(lttng_ust_lfht_iter_get_node(iter), \
- __typeof__(*(pos)), member); \
- lttng_ust_lfht_iter_get_node(iter) != NULL; \
- lttng_ust_lfht_next_duplicate(ht, match, key, iter), \
- pos = caa_container_of(lttng_ust_lfht_iter_get_node(iter), \
- __typeof__(*(pos)), member))
-
-#ifdef __cplusplus
-}
-#endif
-
-#endif /* _LTTNG_UST_RCULFHASH_H */
+++ /dev/null
-/*
- * SPDX-License-Identifier: MIT
- *
- * Copyright (C) 2017 Philippe Proulx <pproulx@efficios.com>
- */
-
-#define _LGPL_SOURCE
-#include <stdlib.h>
-#include <string.h>
-#include <stdbool.h>
-#include <assert.h>
-
-#include "string-utils.h"
-
-enum star_glob_pattern_type_flags {
- STAR_GLOB_PATTERN_TYPE_FLAG_NONE = 0,
- STAR_GLOB_PATTERN_TYPE_FLAG_PATTERN = 1,
- STAR_GLOB_PATTERN_TYPE_FLAG_END_ONLY = 2,
-};
-
-static
-enum star_glob_pattern_type_flags strutils_test_glob_pattern(const char *pattern)
-{
- enum star_glob_pattern_type_flags ret =
- STAR_GLOB_PATTERN_TYPE_FLAG_NONE;
- const char *p;
-
- assert(pattern);
-
- for (p = pattern; *p != '\0'; p++) {
- switch (*p) {
- case '*':
- ret = STAR_GLOB_PATTERN_TYPE_FLAG_PATTERN;
-
- if (p[1] == '\0') {
- ret |= STAR_GLOB_PATTERN_TYPE_FLAG_END_ONLY;
- }
-
- goto end;
- case '\\':
- p++;
-
- if (*p == '\0') {
- goto end;
- }
- break;
- default:
- break;
- }
- }
-
-end:
- return ret;
-}
-
-/*
- * Returns true if `pattern` is a star-only globbing pattern, that is,
- * it contains at least one non-escaped `*`.
- */
-bool strutils_is_star_glob_pattern(const char *pattern)
-{
- return strutils_test_glob_pattern(pattern) &
- STAR_GLOB_PATTERN_TYPE_FLAG_PATTERN;
-}
-
-/*
- * Returns true if `pattern` is a globbing pattern with a globbing,
- * non-escaped star only at its very end.
- */
-bool strutils_is_star_at_the_end_only_glob_pattern(const char *pattern)
-{
- return strutils_test_glob_pattern(pattern) &
- STAR_GLOB_PATTERN_TYPE_FLAG_END_ONLY;
-}
-
-static inline
-bool at_end_of_pattern(const char *p, const char *pattern, size_t pattern_len)
-{
- return (p - pattern) == pattern_len || *p == '\0';
-}
-
-/*
- * Globbing matching function with the star feature only (`?` and
- * character sets are not supported). This matches `candidate` (plain
- * string) against `pattern`. A literal star can be escaped with `\` in
- * `pattern`.
- *
- * `pattern_len` or `candidate_len` can be greater than the actual
- * string length of `pattern` or `candidate` if the string is
- * null-terminated.
- */
-bool strutils_star_glob_match(const char *pattern, size_t pattern_len,
- const char *candidate, size_t candidate_len) {
- const char *retry_c = candidate, *retry_p = pattern, *c, *p;
- bool got_a_star = false;
-
-retry:
- c = retry_c;
- p = retry_p;
-
- /*
- * The concept here is to retry a match in the specific case
- * where we already got a star. The retry position for the
- * pattern is just after the most recent star, and the retry
- * position for the candidate is the character following the
- * last try's first character.
- *
- * Example:
- *
- * candidate: hi ev every onyx one
- * ^
- * pattern: hi*every*one
- * ^
- *
- * candidate: hi ev every onyx one
- * ^
- * pattern: hi*every*one
- * ^
- *
- * candidate: hi ev every onyx one
- * ^
- * pattern: hi*every*one
- * ^
- *
- * candidate: hi ev every onyx one
- * ^
- * pattern: hi*every*one
- * ^ MISMATCH
- *
- * candidate: hi ev every onyx one
- * ^
- * pattern: hi*every*one
- * ^
- *
- * candidate: hi ev every onyx one
- * ^^
- * pattern: hi*every*one
- * ^^
- *
- * candidate: hi ev every onyx one
- * ^ ^
- * pattern: hi*every*one
- * ^ ^ MISMATCH
- *
- * candidate: hi ev every onyx one
- * ^
- * pattern: hi*every*one
- * ^ MISMATCH
- *
- * candidate: hi ev every onyx one
- * ^
- * pattern: hi*every*one
- * ^ MISMATCH
- *
- * candidate: hi ev every onyx one
- * ^
- * pattern: hi*every*one
- * ^
- *
- * candidate: hi ev every onyx one
- * ^^
- * pattern: hi*every*one
- * ^^
- *
- * candidate: hi ev every onyx one
- * ^ ^
- * pattern: hi*every*one
- * ^ ^
- *
- * candidate: hi ev every onyx one
- * ^ ^
- * pattern: hi*every*one
- * ^ ^
- *
- * candidate: hi ev every onyx one
- * ^ ^
- * pattern: hi*every*one
- * ^ ^
- *
- * candidate: hi ev every onyx one
- * ^
- * pattern: hi*every*one
- * ^
- *
- * candidate: hi ev every onyx one
- * ^
- * pattern: hi*every*one
- * ^ MISMATCH
- *
- * candidate: hi ev every onyx one
- * ^
- * pattern: hi*every*one
- * ^
- *
- * candidate: hi ev every onyx one
- * ^^
- * pattern: hi*every*one
- * ^^
- *
- * candidate: hi ev every onyx one
- * ^ ^
- * pattern: hi*every*one
- * ^ ^ MISMATCH
- *
- * candidate: hi ev every onyx one
- * ^
- * pattern: hi*every*one
- * ^ MISMATCH
- *
- * candidate: hi ev every onyx one
- * ^
- * pattern: hi*every*one
- * ^ MISMATCH
- *
- * candidate: hi ev every onyx one
- * ^
- * pattern: hi*every*one
- * ^ MISMATCH
- *
- * candidate: hi ev every onyx one
- * ^
- * pattern: hi*every*one
- * ^ MISMATCH
- *
- * candidate: hi ev every onyx one
- * ^
- * pattern: hi*every*one
- * ^
- *
- * candidate: hi ev every onyx one
- * ^^
- * pattern: hi*every*one
- * ^^
- *
- * candidate: hi ev every onyx one
- * ^ ^
- * pattern: hi*every*one
- * ^ ^
- *
- * candidate: hi ev every onyx one
- * ^ ^
- * pattern: hi*every*one
- * ^ ^ SUCCESS
- */
- while ((c - candidate) < candidate_len && *c != '\0') {
- assert(*c);
-
- if (at_end_of_pattern(p, pattern, pattern_len)) {
- goto end_of_pattern;
- }
-
- switch (*p) {
- case '*':
- got_a_star = true;
-
- /*
- * Our first try starts at the current candidate
- * character and after the star in the pattern.
- */
- retry_c = c;
- retry_p = p + 1;
-
- if (at_end_of_pattern(retry_p, pattern, pattern_len)) {
- /*
- * Star at the end of the pattern at
- * this point: automatic match.
- */
- return true;
- }
-
- goto retry;
- case '\\':
- /* Go to escaped character. */
- p++; /* Fallthrough */
-
- /*
- * Fall through the default case which will
- * compare the escaped character now.
- */
- default:
- if (at_end_of_pattern(p, pattern, pattern_len) ||
- *c != *p) {
-end_of_pattern:
- /* Character mismatch OR end of pattern. */
- if (!got_a_star) {
- /*
- * We didn't get any star yet,
- * so this first mismatch
- * automatically makes the whole
- * test fail.
- */
- return false;
- }
-
- /*
- * Next try: next candidate character,
- * original pattern character (following
- * the most recent star).
- */
- retry_c++;
- goto retry;
- }
- break;
- }
-
- /* Next pattern and candidate characters. */
- c++;
- p++;
- }
-
- /*
- * We checked every candidate character and we're still in a
- * success state: the only pattern character allowed to remain
- * is a star.
- */
- if (at_end_of_pattern(p, pattern, pattern_len)) {
- return true;
- }
-
- p++;
- return p[-1] == '*' && at_end_of_pattern(p, pattern, pattern_len);
-}
+++ /dev/null
-/*
- * SPDX-License-Identifier: MIT
- *
- * Copyright (C) 2017 Philippe Proulx <pproulx@efficios.com>
- */
-
-#ifndef _STRING_UTILS_H
-#define _STRING_UTILS_H
-
-#include <stdbool.h>
-#include <stddef.h>
-
-bool strutils_is_star_glob_pattern(const char *pattern)
- __attribute__((visibility("hidden")));
-
-bool strutils_is_star_at_the_end_only_glob_pattern(const char *pattern)
- __attribute__((visibility("hidden")));
-
-bool strutils_star_glob_match(const char *pattern, size_t pattern_len,
- const char *candidate, size_t candidate_len)
- __attribute__((visibility("hidden")));
-
-#endif /* _STRING_UTILS_H */
+++ /dev/null
-/*
- * SPDX-License-Identifier: MIT
- *
- * Copyright (C) 2013-2014 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
- */
-
-#define _LGPL_SOURCE
-#include <stdio.h>
-#include "common/macros.h"
-
-#define TRACEPOINT_CREATE_PROBES
-#define TRACEPOINT_DEFINE
-#include "lttng-ust-tracef-provider.h"
-
-static inline
-void __lttng_ust_vtracef(const char *fmt, va_list ap)
- __attribute__((always_inline, format(printf, 1, 0)));
-static inline
-void __lttng_ust_vtracef(const char *fmt, va_list ap)
-{
- char *msg;
- const int len = vasprintf(&msg, fmt, ap);
-
- /* len does not include the final \0 */
- if (len < 0)
- goto end;
- __tracepoint_cb_lttng_ust_tracef___event(msg, len,
- LTTNG_UST_CALLER_IP());
- free(msg);
-end:
- return;
-}
-
-/*
- * FIXME: We should include <lttng/tracef.h> for the declarations here, but it
- * fails with tracepoint magic above my paygrade.
- */
-
-void _lttng_ust_vtracef(const char *fmt, va_list ap)
- __attribute__((format(printf, 1, 0)));
-void _lttng_ust_vtracef(const char *fmt, va_list ap)
-{
- __lttng_ust_vtracef(fmt, ap);
-}
-
-void _lttng_ust_tracef(const char *fmt, ...)
- __attribute__((format(printf, 1, 2)));
-void _lttng_ust_tracef(const char *fmt, ...)
-{
- va_list ap;
-
- va_start(ap, fmt);
- __lttng_ust_vtracef(fmt, ap);
- va_end(ap);
-}
+++ /dev/null
-/*
- * SPDX-License-Identifier: MIT
- *
- * Copyright (C) 2013-2014 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
- */
-
-#define _LGPL_SOURCE
-#include <stdio.h>
-#include "common/macros.h"
-
-#define TRACEPOINT_CREATE_PROBES
-#define TRACEPOINT_DEFINE
-#include "lttng-ust-tracelog-provider.h"
-
-#define TRACELOG_CB(level) \
- static inline \
- void __lttng_ust_vtracelog_##level(const char *file, \
- int line, const char *func, \
- const char *fmt, va_list ap) \
- __attribute__((always_inline, format(printf, 4, 0))); \
- \
- static inline \
- void __lttng_ust_vtracelog_##level(const char *file, \
- int line, const char *func, \
- const char *fmt, va_list ap) \
- { \
- char *msg; \
- const int len = vasprintf(&msg, fmt, ap); \
- \
- /* len does not include the final \0 */ \
- if (len < 0) \
- goto end; \
- __tracepoint_cb_lttng_ust_tracelog___##level(file, \
- line, func, msg, len, \
- LTTNG_UST_CALLER_IP()); \
- free(msg); \
- end: \
- return; \
- } \
- \
- void _lttng_ust_vtracelog_##level(const char *file, \
- int line, const char *func, \
- const char *fmt, va_list ap) \
- __attribute__ ((format(printf, 4, 0))); \
- \
- void _lttng_ust_vtracelog_##level(const char *file, \
- int line, const char *func, \
- const char *fmt, va_list ap); \
- void _lttng_ust_vtracelog_##level(const char *file, \
- int line, const char *func, \
- const char *fmt, va_list ap) \
- { \
- __lttng_ust_vtracelog_##level(file, line, func, fmt, ap); \
- } \
- \
- void _lttng_ust_tracelog_##level(const char *file, \
- int line, const char *func, \
- const char *fmt, ...) \
- __attribute__ ((format(printf, 4, 5))); \
- \
- void _lttng_ust_tracelog_##level(const char *file, \
- int line, const char *func, \
- const char *fmt, ...); \
- void _lttng_ust_tracelog_##level(const char *file, \
- int line, const char *func, \
- const char *fmt, ...) \
- { \
- va_list ap; \
- \
- va_start(ap, fmt); \
- __lttng_ust_vtracelog_##level(file, line, func, fmt, ap); \
- va_end(ap); \
- }
-
-TRACELOG_CB(TRACE_EMERG)
-TRACELOG_CB(TRACE_ALERT)
-TRACELOG_CB(TRACE_CRIT)
-TRACELOG_CB(TRACE_ERR)
-TRACELOG_CB(TRACE_WARNING)
-TRACELOG_CB(TRACE_NOTICE)
-TRACELOG_CB(TRACE_INFO)
-TRACELOG_CB(TRACE_DEBUG_SYSTEM)
-TRACELOG_CB(TRACE_DEBUG_PROGRAM)
-TRACELOG_CB(TRACE_DEBUG_PROCESS)
-TRACELOG_CB(TRACE_DEBUG_MODULE)
-TRACELOG_CB(TRACE_DEBUG_UNIT)
-TRACELOG_CB(TRACE_DEBUG_FUNCTION)
-TRACELOG_CB(TRACE_DEBUG_LINE)
-TRACELOG_CB(TRACE_DEBUG)
+++ /dev/null
-/*
- * SPDX-License-Identifier: LGPL-2.1-only
- *
- * Copyright (C) 2011 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
- */
-
-#ifndef _LTTNG_TRACEPOINT_INTERNAL_H
-#define _LTTNG_TRACEPOINT_INTERNAL_H
-
-#include <urcu/list.h>
-#include <lttng/tracepoint-types.h>
-#include <lttng/ust-events.h>
-
-#define TRACE_DEFAULT TRACE_DEBUG_LINE
-
-struct tracepoint_lib {
- struct cds_list_head list; /* list of registered libs */
- struct lttng_ust_tracepoint * const *tracepoints_start;
- int tracepoints_count;
- struct cds_list_head callsites;
-};
-
-int tracepoint_probe_register_noupdate(const char *name,
- void (*callback)(void), void *priv,
- const char *signature)
- __attribute__((visibility("hidden")));
-
-int tracepoint_probe_unregister_noupdate(const char *name,
- void (*callback)(void), void *priv)
- __attribute__((visibility("hidden")));
-
-void tracepoint_probe_update_all(void)
- __attribute__((visibility("hidden")));
-
-
-void *lttng_ust_tp_check_weak_hidden1(void)
- __attribute__((visibility("hidden")));
-
-void *lttng_ust_tp_check_weak_hidden2(void)
- __attribute__((visibility("hidden")));
-
-void *lttng_ust_tp_check_weak_hidden3(void)
- __attribute__((visibility("hidden")));
-
-/*
- * These symbols are ABI between liblttng-ust-tracepoint and liblttng-ust,
- * which is why they are not hidden and not part of the public API.
- */
-int lttng_ust_tp_probe_register_queue_release(const char *name,
- void (*func)(void), void *data, const char *signature);
-int lttng_ust_tp_probe_unregister_queue_release(const char *name,
- void (*func)(void), void *data);
-void lttng_ust_tp_probe_prune_release_queue(void);
-
-void lttng_ust_tp_init(void);
-void lttng_ust_tp_exit(void);
-
-
-#endif /* _LTTNG_TRACEPOINT_INTERNAL_H */
+++ /dev/null
-/*
- * SPDX-License-Identifier: LGPL-2.1-only
- *
- * Copyright (C) 2016 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
- */
-
-#include "tracepoint-internal.h"
-
-/* Test compiler support for weak symbols with hidden visibility. */
-int __tracepoint_test_symbol1 __attribute__((weak, visibility("hidden")));
-void *__tracepoint_test_symbol2 __attribute__((weak, visibility("hidden")));
-struct {
- char a[24];
-} __tracepoint_test_symbol3 __attribute__((weak, visibility("hidden")));
-
-void *lttng_ust_tp_check_weak_hidden1(void)
-{
- return &__tracepoint_test_symbol1;
-}
-
-void *lttng_ust_tp_check_weak_hidden2(void)
-{
- return &__tracepoint_test_symbol2;
-}
-
-void *lttng_ust_tp_check_weak_hidden3(void)
-{
- return &__tracepoint_test_symbol3;
-}
+++ /dev/null
-/*
- * SPDX-License-Identifier: LGPL-2.1-only
- *
- * Copyright (C) 2008-2011 Mathieu Desnoyers
- * Copyright (C) 2009 Pierre-Marc Fournier
- *
- * Ported to userspace by Pierre-Marc Fournier.
- */
-
-#define _LGPL_SOURCE
-#include <errno.h>
-#include <stdint.h>
-#include <stddef.h>
-#include <stdio.h>
-
-#include <urcu/arch.h>
-#include <lttng/urcu/urcu-ust.h>
-#include <urcu/hlist.h>
-#include <urcu/uatomic.h>
-#include <urcu/compiler.h>
-#include <urcu/system.h>
-
-#include <lttng/tracepoint.h>
-#include <lttng/ust-abi.h> /* for LTTNG_UST_ABI_SYM_NAME_LEN */
-
-#include "common/logging.h"
-#include "common/macros.h"
-
-#include "tracepoint-internal.h"
-#include "lttng-tracer-core.h"
-#include "jhash.h"
-#include "error.h"
-
-/* Test compiler support for weak symbols with hidden visibility. */
-int __tracepoint_test_symbol1 __attribute__((weak, visibility("hidden")));
-void *__tracepoint_test_symbol2 __attribute__((weak, visibility("hidden")));
-struct {
- char a[24];
-} __tracepoint_test_symbol3 __attribute__((weak, visibility("hidden")));
-
-/* Set to 1 to enable tracepoint debug output */
-static const int tracepoint_debug;
-static int initialized;
-
-/*
- * If tracepoint_destructors_state = 1, tracepoint destructors are
- * enabled. They are disabled otherwise.
- */
-static int tracepoint_destructors_state = 1;
-
-static void (*new_tracepoint_cb)(struct lttng_ust_tracepoint *);
-
-/*
- * tracepoint_mutex nests inside UST mutex.
- *
- * Note about interaction with fork/clone: UST does not hold the
- * tracepoint mutex across fork/clone because it is either:
- * - nested within UST mutex, in which case holding the UST mutex across
- * fork/clone suffice,
- * - taken by a library constructor, which should never race with a
- * fork/clone if the application is expected to continue running with
- * the same memory layout (no following exec()).
- */
-static pthread_mutex_t tracepoint_mutex = PTHREAD_MUTEX_INITIALIZER;
-
-/*
- * libraries that contain tracepoints (struct tracepoint_lib).
- * Protected by tracepoint mutex.
- */
-static CDS_LIST_HEAD(libs);
-
-/*
- * The tracepoint mutex protects the library tracepoints, the hash table, and
- * the library list.
- * All calls to the tracepoint API must be protected by the tracepoint mutex,
- * excepts calls to tracepoint_register_lib and
- * tracepoint_unregister_lib, which take the tracepoint mutex themselves.
- */
-
-/*
- * Tracepoint hash table, containing the active tracepoints.
- * Protected by tracepoint mutex.
- */
-#define TRACEPOINT_HASH_BITS 12
-#define TRACEPOINT_TABLE_SIZE (1 << TRACEPOINT_HASH_BITS)
-static struct cds_hlist_head tracepoint_table[TRACEPOINT_TABLE_SIZE];
-
-static CDS_LIST_HEAD(old_probes);
-static int need_update;
-
-static CDS_LIST_HEAD(release_queue);
-static int release_queue_need_update;
-
-/*
- * Note about RCU :
- * It is used to to delay the free of multiple probes array until a quiescent
- * state is reached.
- * Tracepoint entries modifications are protected by the tracepoint mutex.
- */
-struct tracepoint_entry {
- struct cds_hlist_node hlist;
- struct lttng_ust_tracepoint_probe *probes;
- int refcount; /* Number of times armed. 0 if disarmed. */
- int callsite_refcount; /* how many libs use this tracepoint */
- char *signature;
- char *name;
-};
-
-struct tp_probes {
- union {
- struct cds_list_head list;
- /* Field below only used for call_rcu scheme */
- /* struct rcu_head head; */
- } u;
- struct lttng_ust_tracepoint_probe probes[0];
-};
-
-/*
- * Callsite hash table, containing the tracepoint call sites.
- * Protected by tracepoint mutex.
- */
-#define CALLSITE_HASH_BITS 12
-#define CALLSITE_TABLE_SIZE (1 << CALLSITE_HASH_BITS)
-static struct cds_hlist_head callsite_table[CALLSITE_TABLE_SIZE];
-
-struct callsite_entry {
- struct cds_hlist_node hlist; /* hash table node */
- struct cds_list_head node; /* lib list of callsites node */
- struct lttng_ust_tracepoint *tp;
- bool tp_entry_callsite_ref; /* Has a tp_entry took a ref on this callsite */
-};
-
-/* coverity[+alloc] */
-static void *allocate_probes(int count)
-{
- struct tp_probes *p =
- zmalloc(count * sizeof(struct lttng_ust_tracepoint_probe)
- + sizeof(struct tp_probes));
- return p == NULL ? NULL : p->probes;
-}
-
-/* coverity[+free : arg-0] */
-static void release_probes(void *old)
-{
- if (old) {
- struct tp_probes *tp_probes = caa_container_of(old,
- struct tp_probes, probes[0]);
- lttng_ust_urcu_synchronize_rcu();
- free(tp_probes);
- }
-}
-
-static void debug_print_probes(struct tracepoint_entry *entry)
-{
- int i;
-
- if (!tracepoint_debug || !entry->probes)
- return;
-
- for (i = 0; entry->probes[i].func; i++)
- DBG("Probe %d : %p", i, entry->probes[i].func);
-}
-
-static void *
-tracepoint_entry_add_probe(struct tracepoint_entry *entry,
- void (*probe)(void), void *data)
-{
- int nr_probes = 0;
- struct lttng_ust_tracepoint_probe *old, *new;
-
- if (!probe) {
- WARN_ON(1);
- return ERR_PTR(-EINVAL);
- }
- debug_print_probes(entry);
- old = entry->probes;
- if (old) {
- /* (N -> N+1), (N != 0, 1) probes */
- for (nr_probes = 0; old[nr_probes].func; nr_probes++)
- if (old[nr_probes].func == probe &&
- old[nr_probes].data == data)
- return ERR_PTR(-EEXIST);
- }
- /* + 2 : one for new probe, one for NULL func */
- new = allocate_probes(nr_probes + 2);
- if (new == NULL)
- return ERR_PTR(-ENOMEM);
- if (old)
- memcpy(new, old,
- nr_probes * sizeof(struct lttng_ust_tracepoint_probe));
- new[nr_probes].func = probe;
- new[nr_probes].data = data;
- new[nr_probes + 1].func = NULL;
- entry->refcount = nr_probes + 1;
- entry->probes = new;
- debug_print_probes(entry);
- return old;
-}
-
-static void *
-tracepoint_entry_remove_probe(struct tracepoint_entry *entry,
- void (*probe)(void), void *data)
-{
- int nr_probes = 0, nr_del = 0, i;
- struct lttng_ust_tracepoint_probe *old, *new;
-
- old = entry->probes;
-
- if (!old)
- return ERR_PTR(-ENOENT);
-
- debug_print_probes(entry);
- /* (N -> M), (N > 1, M >= 0) probes */
- if (probe) {
- for (nr_probes = 0; old[nr_probes].func; nr_probes++) {
- if (old[nr_probes].func == probe &&
- old[nr_probes].data == data)
- nr_del++;
- }
- }
-
- if (nr_probes - nr_del == 0) {
- /* N -> 0, (N > 1) */
- entry->probes = NULL;
- entry->refcount = 0;
- debug_print_probes(entry);
- return old;
- } else {
- int j = 0;
- /* N -> M, (N > 1, M > 0) */
- /* + 1 for NULL */
- new = allocate_probes(nr_probes - nr_del + 1);
- if (new == NULL)
- return ERR_PTR(-ENOMEM);
- for (i = 0; old[i].func; i++)
- if (old[i].func != probe || old[i].data != data)
- new[j++] = old[i];
- new[nr_probes - nr_del].func = NULL;
- entry->refcount = nr_probes - nr_del;
- entry->probes = new;
- }
- debug_print_probes(entry);
- return old;
-}
-
-/*
- * Get tracepoint if the tracepoint is present in the tracepoint hash table.
- * Must be called with tracepoint mutex held.
- * Returns NULL if not present.
- */
-static struct tracepoint_entry *get_tracepoint(const char *name)
-{
- struct cds_hlist_head *head;
- struct cds_hlist_node *node;
- struct tracepoint_entry *e;
- size_t name_len = strlen(name);
- uint32_t hash;
-
- if (name_len > LTTNG_UST_ABI_SYM_NAME_LEN - 1) {
- WARN("Truncating tracepoint name %s which exceeds size limits of %u chars", name, LTTNG_UST_ABI_SYM_NAME_LEN - 1);
- name_len = LTTNG_UST_ABI_SYM_NAME_LEN - 1;
- }
- hash = jhash(name, name_len, 0);
- head = &tracepoint_table[hash & (TRACEPOINT_TABLE_SIZE - 1)];
- cds_hlist_for_each_entry(e, node, head, hlist) {
- if (!strncmp(name, e->name, LTTNG_UST_ABI_SYM_NAME_LEN - 1))
- return e;
- }
- return NULL;
-}
-
-/*
- * Add the tracepoint to the tracepoint hash table. Must be called with
- * tracepoint mutex held.
- */
-static struct tracepoint_entry *add_tracepoint(const char *name,
- const char *signature)
-{
- struct cds_hlist_head *head;
- struct cds_hlist_node *node;
- struct tracepoint_entry *e;
- size_t name_len = strlen(name);
- size_t sig_len = strlen(signature);
- size_t sig_off, name_off;
- uint32_t hash;
-
- if (name_len > LTTNG_UST_ABI_SYM_NAME_LEN - 1) {
- WARN("Truncating tracepoint name %s which exceeds size limits of %u chars", name, LTTNG_UST_ABI_SYM_NAME_LEN - 1);
- name_len = LTTNG_UST_ABI_SYM_NAME_LEN - 1;
- }
- hash = jhash(name, name_len, 0);
- head = &tracepoint_table[hash & (TRACEPOINT_TABLE_SIZE - 1)];
- cds_hlist_for_each_entry(e, node, head, hlist) {
- if (!strncmp(name, e->name, LTTNG_UST_ABI_SYM_NAME_LEN - 1)) {
- DBG("tracepoint %s busy", name);
- return ERR_PTR(-EEXIST); /* Already there */
- }
- }
-
- /*
- * Using zmalloc here to allocate a variable length elements: name and
- * signature. Could cause some memory fragmentation if overused.
- */
- name_off = sizeof(struct tracepoint_entry);
- sig_off = name_off + name_len + 1;
-
- e = zmalloc(sizeof(struct tracepoint_entry) + name_len + 1 + sig_len + 1);
- if (!e)
- return ERR_PTR(-ENOMEM);
- e->name = (char *) e + name_off;
- memcpy(e->name, name, name_len + 1);
- e->name[name_len] = '\0';
-
- e->signature = (char *) e + sig_off;
- memcpy(e->signature, signature, sig_len + 1);
- e->signature[sig_len] = '\0';
-
- e->probes = NULL;
- e->refcount = 0;
- e->callsite_refcount = 0;
-
- cds_hlist_add_head(&e->hlist, head);
- return e;
-}
-
-/*
- * Remove the tracepoint from the tracepoint hash table. Must be called with
- * tracepoint mutex held.
- */
-static void remove_tracepoint(struct tracepoint_entry *e)
-{
- cds_hlist_del(&e->hlist);
- free(e);
-}
-
-/*
- * Sets the probe callback corresponding to one tracepoint.
- */
-static void set_tracepoint(struct tracepoint_entry **entry,
- struct lttng_ust_tracepoint *elem, int active)
-{
- WARN_ON(strncmp((*entry)->name, elem->name, LTTNG_UST_ABI_SYM_NAME_LEN - 1) != 0);
- /*
- * Check that signatures match before connecting a probe to a
- * tracepoint. Warn the user if they don't.
- */
- if (strcmp(elem->signature, (*entry)->signature) != 0) {
- static int warned = 0;
-
- /* Only print once, don't flood console. */
- if (!warned) {
- WARN("Tracepoint signature mismatch, not enabling one or more tracepoints. Ensure that the tracepoint probes prototypes match the application.");
- WARN("Tracepoint \"%s\" signatures: call: \"%s\" vs probe: \"%s\".",
- elem->name, elem->signature, (*entry)->signature);
- warned = 1;
- }
- /* Don't accept connecting non-matching signatures. */
- return;
- }
-
- /*
- * rcu_assign_pointer has a cmm_smp_wmb() which makes sure that the new
- * probe callbacks array is consistent before setting a pointer to it.
- * This array is referenced by __DO_TRACE from
- * include/linux/tracepoints.h. A matching cmm_smp_read_barrier_depends()
- * is used.
- */
- lttng_ust_rcu_assign_pointer(elem->probes, (*entry)->probes);
- CMM_STORE_SHARED(elem->state, active);
-}
-
-/*
- * Disable a tracepoint and its probe callback.
- * Note: only waiting an RCU period after setting elem->call to the empty
- * function insures that the original callback is not used anymore. This insured
- * by preempt_disable around the call site.
- */
-static void disable_tracepoint(struct lttng_ust_tracepoint *elem)
-{
- CMM_STORE_SHARED(elem->state, 0);
- lttng_ust_rcu_assign_pointer(elem->probes, NULL);
-}
-
-/*
- * Add the callsite to the callsite hash table. Must be called with
- * tracepoint mutex held.
- */
-static void add_callsite(struct tracepoint_lib * lib, struct lttng_ust_tracepoint *tp)
-{
- struct cds_hlist_head *head;
- struct callsite_entry *e;
- const char *name = tp->name;
- size_t name_len = strlen(name);
- uint32_t hash;
- struct tracepoint_entry *tp_entry;
-
- if (name_len > LTTNG_UST_ABI_SYM_NAME_LEN - 1) {
- WARN("Truncating tracepoint name %s which exceeds size limits of %u chars", name, LTTNG_UST_ABI_SYM_NAME_LEN - 1);
- name_len = LTTNG_UST_ABI_SYM_NAME_LEN - 1;
- }
- hash = jhash(name, name_len, 0);
- head = &callsite_table[hash & (CALLSITE_TABLE_SIZE - 1)];
- e = zmalloc(sizeof(struct callsite_entry));
- if (!e) {
- PERROR("Unable to add callsite for tracepoint \"%s\"", name);
- return;
- }
- cds_hlist_add_head(&e->hlist, head);
- e->tp = tp;
- cds_list_add(&e->node, &lib->callsites);
-
- tp_entry = get_tracepoint(name);
- if (!tp_entry)
- return;
- tp_entry->callsite_refcount++;
- e->tp_entry_callsite_ref = true;
-}
-
-/*
- * Remove the callsite from the callsite hash table and from lib
- * callsite list. Must be called with tracepoint mutex held.
- */
-static void remove_callsite(struct callsite_entry *e)
-{
- struct tracepoint_entry *tp_entry;
-
- tp_entry = get_tracepoint(e->tp->name);
- if (tp_entry) {
- if (e->tp_entry_callsite_ref)
- tp_entry->callsite_refcount--;
- if (tp_entry->callsite_refcount == 0)
- disable_tracepoint(e->tp);
- }
- cds_hlist_del(&e->hlist);
- cds_list_del(&e->node);
- free(e);
-}
-
-/*
- * Enable/disable all callsites based on the state of a specific
- * tracepoint entry.
- * Must be called with tracepoint mutex held.
- */
-static void tracepoint_sync_callsites(const char *name)
-{
- struct cds_hlist_head *head;
- struct cds_hlist_node *node;
- struct callsite_entry *e;
- size_t name_len = strlen(name);
- uint32_t hash;
- struct tracepoint_entry *tp_entry;
-
- tp_entry = get_tracepoint(name);
- if (name_len > LTTNG_UST_ABI_SYM_NAME_LEN - 1) {
- WARN("Truncating tracepoint name %s which exceeds size limits of %u chars", name, LTTNG_UST_ABI_SYM_NAME_LEN - 1);
- name_len = LTTNG_UST_ABI_SYM_NAME_LEN - 1;
- }
- hash = jhash(name, name_len, 0);
- head = &callsite_table[hash & (CALLSITE_TABLE_SIZE - 1)];
- cds_hlist_for_each_entry(e, node, head, hlist) {
- struct lttng_ust_tracepoint *tp = e->tp;
-
- if (strncmp(name, tp->name, LTTNG_UST_ABI_SYM_NAME_LEN - 1))
- continue;
- if (tp_entry) {
- if (!e->tp_entry_callsite_ref) {
- tp_entry->callsite_refcount++;
- e->tp_entry_callsite_ref = true;
- }
- set_tracepoint(&tp_entry, tp,
- !!tp_entry->refcount);
- } else {
- disable_tracepoint(tp);
- e->tp_entry_callsite_ref = false;
- }
- }
-}
-
-/**
- * tracepoint_update_probe_range - Update a probe range
- * @begin: beginning of the range
- * @end: end of the range
- *
- * Updates the probe callback corresponding to a range of tracepoints.
- */
-static
-void tracepoint_update_probe_range(struct lttng_ust_tracepoint * const *begin,
- struct lttng_ust_tracepoint * const *end)
-{
- struct lttng_ust_tracepoint * const *iter;
- struct tracepoint_entry *mark_entry;
-
- for (iter = begin; iter < end; iter++) {
- if (!*iter)
- continue; /* skip dummy */
- if (!(*iter)->name) {
- disable_tracepoint(*iter);
- continue;
- }
- mark_entry = get_tracepoint((*iter)->name);
- if (mark_entry) {
- set_tracepoint(&mark_entry, *iter,
- !!mark_entry->refcount);
- } else {
- disable_tracepoint(*iter);
- }
- }
-}
-
-static void lib_update_tracepoints(struct tracepoint_lib *lib)
-{
- tracepoint_update_probe_range(lib->tracepoints_start,
- lib->tracepoints_start + lib->tracepoints_count);
-}
-
-static void lib_register_callsites(struct tracepoint_lib *lib)
-{
- struct lttng_ust_tracepoint * const *begin;
- struct lttng_ust_tracepoint * const *end;
- struct lttng_ust_tracepoint * const *iter;
-
- begin = lib->tracepoints_start;
- end = lib->tracepoints_start + lib->tracepoints_count;
-
- for (iter = begin; iter < end; iter++) {
- if (!*iter)
- continue; /* skip dummy */
- if (!(*iter)->name) {
- continue;
- }
- add_callsite(lib, *iter);
- }
-}
-
-static void lib_unregister_callsites(struct tracepoint_lib *lib)
-{
- struct callsite_entry *callsite, *tmp;
-
- cds_list_for_each_entry_safe(callsite, tmp, &lib->callsites, node)
- remove_callsite(callsite);
-}
-
-/*
- * Update probes, removing the faulty probes.
- */
-static void tracepoint_update_probes(void)
-{
- struct tracepoint_lib *lib;
-
- /* tracepoints registered from libraries and executable. */
- cds_list_for_each_entry(lib, &libs, list)
- lib_update_tracepoints(lib);
-}
-
-static struct lttng_ust_tracepoint_probe *
-tracepoint_add_probe(const char *name, void (*probe)(void), void *data,
- const char *signature)
-{
- struct tracepoint_entry *entry;
- struct lttng_ust_tracepoint_probe *old;
-
- entry = get_tracepoint(name);
- if (entry) {
- if (strcmp(entry->signature, signature) != 0) {
- ERR("Tracepoint and probe signature do not match.");
- return ERR_PTR(-EINVAL);
- }
- } else {
- entry = add_tracepoint(name, signature);
- if (IS_ERR(entry))
- return (struct lttng_ust_tracepoint_probe *)entry;
- }
- old = tracepoint_entry_add_probe(entry, probe, data);
- if (IS_ERR(old) && !entry->refcount)
- remove_tracepoint(entry);
- return old;
-}
-
-static void tracepoint_release_queue_add_old_probes(void *old)
-{
- release_queue_need_update = 1;
- if (old) {
- struct tp_probes *tp_probes = caa_container_of(old,
- struct tp_probes, probes[0]);
- cds_list_add(&tp_probes->u.list, &release_queue);
- }
-}
-
-/**
- * __tracepoint_probe_register - Connect a probe to a tracepoint
- * @name: tracepoint name
- * @probe: probe handler
- *
- * Returns 0 if ok, error value on error.
- * The probe address must at least be aligned on the architecture pointer size.
- * Called with the tracepoint mutex held.
- */
-int __tracepoint_probe_register(const char *name, void (*probe)(void),
- void *data, const char *signature)
-{
- void *old;
- int ret = 0;
-
- DBG("Registering probe to tracepoint %s", name);
-
- pthread_mutex_lock(&tracepoint_mutex);
- old = tracepoint_add_probe(name, probe, data, signature);
- if (IS_ERR(old)) {
- ret = PTR_ERR(old);
- goto end;
- }
-
- tracepoint_sync_callsites(name);
- release_probes(old);
-end:
- pthread_mutex_unlock(&tracepoint_mutex);
- return ret;
-}
-
-/*
- * Caller needs to invoke __tracepoint_probe_release_queue() after
- * calling lttng_ust_tp_probe_register_queue_release() one or multiple
- * times to ensure it does not leak memory.
- */
-int lttng_ust_tp_probe_register_queue_release(const char *name,
- void (*probe)(void), void *data, const char *signature)
-{
- void *old;
- int ret = 0;
-
- DBG("Registering probe to tracepoint %s. Queuing release.", name);
-
- pthread_mutex_lock(&tracepoint_mutex);
- old = tracepoint_add_probe(name, probe, data, signature);
- if (IS_ERR(old)) {
- ret = PTR_ERR(old);
- goto end;
- }
-
- tracepoint_sync_callsites(name);
- tracepoint_release_queue_add_old_probes(old);
-end:
- pthread_mutex_unlock(&tracepoint_mutex);
- return ret;
-}
-
-static void *tracepoint_remove_probe(const char *name, void (*probe)(void),
- void *data)
-{
- struct tracepoint_entry *entry;
- void *old;
-
- entry = get_tracepoint(name);
- if (!entry)
- return ERR_PTR(-ENOENT);
- old = tracepoint_entry_remove_probe(entry, probe, data);
- if (IS_ERR(old))
- return old;
- if (!entry->refcount)
- remove_tracepoint(entry);
- return old;
-}
-
-/**
- * tracepoint_probe_unregister - Disconnect a probe from a tracepoint
- * @name: tracepoint name
- * @probe: probe function pointer
- * @probe: probe data pointer
- */
-int __tracepoint_probe_unregister(const char *name, void (*probe)(void),
- void *data)
-{
- void *old;
- int ret = 0;
-
- DBG("Un-registering probe from tracepoint %s", name);
-
- pthread_mutex_lock(&tracepoint_mutex);
- old = tracepoint_remove_probe(name, probe, data);
- if (IS_ERR(old)) {
- ret = PTR_ERR(old);
- goto end;
- }
- tracepoint_sync_callsites(name);
- release_probes(old);
-end:
- pthread_mutex_unlock(&tracepoint_mutex);
- return ret;
-}
-
-/*
- * Caller needs to invoke __tracepoint_probe_release_queue() after
- * calling lttng_ust_tp_probe_unregister_queue_release() one or multiple
- * times to ensure it does not leak memory.
- */
-int lttng_ust_tp_probe_unregister_queue_release(const char *name,
- void (*probe)(void), void *data)
-{
- void *old;
- int ret = 0;
-
- DBG("Un-registering probe from tracepoint %s. Queuing release.", name);
-
- pthread_mutex_lock(&tracepoint_mutex);
- old = tracepoint_remove_probe(name, probe, data);
- if (IS_ERR(old)) {
- ret = PTR_ERR(old);
- goto end;
- }
- tracepoint_sync_callsites(name);
- tracepoint_release_queue_add_old_probes(old);
-end:
- pthread_mutex_unlock(&tracepoint_mutex);
- return ret;
-}
-
-void lttng_ust_tp_probe_prune_release_queue(void)
-{
- CDS_LIST_HEAD(release_probes);
- struct tp_probes *pos, *next;
-
- DBG("Release queue of unregistered tracepoint probes.");
-
- pthread_mutex_lock(&tracepoint_mutex);
- if (!release_queue_need_update)
- goto end;
- if (!cds_list_empty(&release_queue))
- cds_list_replace_init(&release_queue, &release_probes);
- release_queue_need_update = 0;
-
- /* Wait for grace period between all sync_callsites and free. */
- lttng_ust_urcu_synchronize_rcu();
-
- cds_list_for_each_entry_safe(pos, next, &release_probes, u.list) {
- cds_list_del(&pos->u.list);
- free(pos);
- }
-end:
- pthread_mutex_unlock(&tracepoint_mutex);
-}
-
-static void tracepoint_add_old_probes(void *old)
-{
- need_update = 1;
- if (old) {
- struct tp_probes *tp_probes = caa_container_of(old,
- struct tp_probes, probes[0]);
- cds_list_add(&tp_probes->u.list, &old_probes);
- }
-}
-
-/**
- * tracepoint_probe_register_noupdate - register a probe but not connect
- * @name: tracepoint name
- * @probe: probe handler
- *
- * caller must call tracepoint_probe_update_all()
- */
-int tracepoint_probe_register_noupdate(const char *name, void (*probe)(void),
- void *data, const char *signature)
-{
- void *old;
- int ret = 0;
-
- pthread_mutex_lock(&tracepoint_mutex);
- old = tracepoint_add_probe(name, probe, data, signature);
- if (IS_ERR(old)) {
- ret = PTR_ERR(old);
- goto end;
- }
- tracepoint_add_old_probes(old);
-end:
- pthread_mutex_unlock(&tracepoint_mutex);
- return ret;
-}
-
-/**
- * tracepoint_probe_unregister_noupdate - remove a probe but not disconnect
- * @name: tracepoint name
- * @probe: probe function pointer
- *
- * caller must call tracepoint_probe_update_all()
- * Called with the tracepoint mutex held.
- */
-int tracepoint_probe_unregister_noupdate(const char *name, void (*probe)(void),
- void *data)
-{
- void *old;
- int ret = 0;
-
- DBG("Un-registering probe from tracepoint %s", name);
-
- pthread_mutex_lock(&tracepoint_mutex);
- old = tracepoint_remove_probe(name, probe, data);
- if (IS_ERR(old)) {
- ret = PTR_ERR(old);
- goto end;
- }
- tracepoint_add_old_probes(old);
-end:
- pthread_mutex_unlock(&tracepoint_mutex);
- return ret;
-}
-
-/**
- * tracepoint_probe_update_all - update tracepoints
- */
-void tracepoint_probe_update_all(void)
-{
- CDS_LIST_HEAD(release_probes);
- struct tp_probes *pos, *next;
-
- pthread_mutex_lock(&tracepoint_mutex);
- if (!need_update) {
- goto end;
- }
- if (!cds_list_empty(&old_probes))
- cds_list_replace_init(&old_probes, &release_probes);
- need_update = 0;
-
- tracepoint_update_probes();
- /* Wait for grace period between update_probes and free. */
- lttng_ust_urcu_synchronize_rcu();
- cds_list_for_each_entry_safe(pos, next, &release_probes, u.list) {
- cds_list_del(&pos->u.list);
- free(pos);
- }
-end:
- pthread_mutex_unlock(&tracepoint_mutex);
-}
-
-static void new_tracepoints(struct lttng_ust_tracepoint * const *start,
- struct lttng_ust_tracepoint * const *end)
-{
- if (new_tracepoint_cb) {
- struct lttng_ust_tracepoint * const *t;
-
- for (t = start; t < end; t++) {
- if (*t)
- new_tracepoint_cb(*t);
- }
- }
-}
-
-/*
- * tracepoint_{un,}register_lib is meant to be looked up by instrumented
- * applications through dlsym(). If found, those can register their
- * tracepoints, else those tracepoints will not be available for
- * tracing. The number at the end of those symbols acts as a major
- * version for tracepoints.
- *
- * Older instrumented applications should still work with newer
- * liblttng-ust, but it is fine that instrumented applications compiled
- * against recent liblttng-ust headers require a recent liblttng-ust
- * runtime for those tracepoints to be taken into account.
- */
-int tracepoint_register_lib(struct lttng_ust_tracepoint * const *tracepoints_start,
- int tracepoints_count);
-int tracepoint_register_lib(struct lttng_ust_tracepoint * const *tracepoints_start,
- int tracepoints_count)
-{
- struct tracepoint_lib *pl, *iter;
-
- lttng_ust_tp_init();
-
- pl = (struct tracepoint_lib *) zmalloc(sizeof(struct tracepoint_lib));
- if (!pl) {
- PERROR("Unable to register tracepoint lib");
- return -1;
- }
- pl->tracepoints_start = tracepoints_start;
- pl->tracepoints_count = tracepoints_count;
- CDS_INIT_LIST_HEAD(&pl->callsites);
-
- pthread_mutex_lock(&tracepoint_mutex);
- /*
- * We sort the libs by struct lib pointer address.
- */
- cds_list_for_each_entry_reverse(iter, &libs, list) {
- BUG_ON(iter == pl); /* Should never be in the list twice */
- if (iter < pl) {
- /* We belong to the location right after iter. */
- cds_list_add(&pl->list, &iter->list);
- goto lib_added;
- }
- }
- /* We should be added at the head of the list */
- cds_list_add(&pl->list, &libs);
-lib_added:
- new_tracepoints(tracepoints_start, tracepoints_start + tracepoints_count);
- lib_register_callsites(pl);
- lib_update_tracepoints(pl);
- pthread_mutex_unlock(&tracepoint_mutex);
-
- DBG("just registered a tracepoints section from %p and having %d tracepoints",
- tracepoints_start, tracepoints_count);
- if (ust_err_debug_enabled()) {
- int i;
-
- for (i = 0; i < tracepoints_count; i++) {
- DBG("registered tracepoint: %s", tracepoints_start[i]->name);
- }
- }
-
- return 0;
-}
-
-int tracepoint_unregister_lib(struct lttng_ust_tracepoint * const *tracepoints_start);
-int tracepoint_unregister_lib(struct lttng_ust_tracepoint * const *tracepoints_start)
-{
- struct tracepoint_lib *lib;
-
- pthread_mutex_lock(&tracepoint_mutex);
- cds_list_for_each_entry(lib, &libs, list) {
- if (lib->tracepoints_start != tracepoints_start)
- continue;
-
- cds_list_del(&lib->list);
- /*
- * Unregistering a callsite also decreases the
- * callsite reference count of the corresponding
- * tracepoint, and disables the tracepoint if
- * the reference count drops to zero.
- */
- lib_unregister_callsites(lib);
- DBG("just unregistered a tracepoints section from %p",
- lib->tracepoints_start);
- free(lib);
- break;
- }
- pthread_mutex_unlock(&tracepoint_mutex);
- return 0;
-}
-
-/*
- * Report in debug message whether the compiler correctly supports weak
- * hidden symbols. This test checks that the address associated with two
- * weak symbols with hidden visibility is the same when declared within
- * two compile units part of the same module.
- */
-static void check_weak_hidden(void)
-{
- DBG("Your compiler treats weak symbols with hidden visibility for integer objects as %s between compile units part of the same module.",
- &__tracepoint_test_symbol1 == lttng_ust_tp_check_weak_hidden1() ?
- "SAME address" :
- "DIFFERENT addresses");
- DBG("Your compiler treats weak symbols with hidden visibility for pointer objects as %s between compile units part of the same module.",
- &__tracepoint_test_symbol2 == lttng_ust_tp_check_weak_hidden2() ?
- "SAME address" :
- "DIFFERENT addresses");
- DBG("Your compiler treats weak symbols with hidden visibility for 24-byte structure objects as %s between compile units part of the same module.",
- &__tracepoint_test_symbol3 == lttng_ust_tp_check_weak_hidden3() ?
- "SAME address" :
- "DIFFERENT addresses");
-}
-
-void lttng_ust_tp_init(void)
-{
- if (uatomic_xchg(&initialized, 1) == 1)
- return;
- ust_err_init();
- check_weak_hidden();
-}
-
-void lttng_ust_tp_exit(void)
-{
- initialized = 0;
-}
-
-/*
- * Create the wrapper symbols.
- */
-#undef tp_rcu_read_lock
-#undef tp_rcu_read_unlock
-#undef tp_rcu_dereference
-
-void tp_rcu_read_lock(void);
-void tp_rcu_read_lock(void)
-{
- lttng_ust_urcu_read_lock();
-}
-
-void tp_rcu_read_unlock(void);
-void tp_rcu_read_unlock(void)
-{
- lttng_ust_urcu_read_unlock();
-}
-
-void *tp_rcu_dereference_sym(void *p);
-void *tp_rcu_dereference_sym(void *p)
-{
- return lttng_ust_rcu_dereference(p);
-}
-
-/*
- * Programs that have threads that survive after they exit, and therefore call
- * library destructors, should disable the tracepoint destructors by calling
- * tp_disable_destructors(). This will leak the tracepoint
- * instrumentation library shared object, leaving its teardown to the operating
- * system process teardown.
- *
- * To access and/or modify this value, users need to use a combination of
- * dlopen(3) and dlsym(3) to get an handle on the
- * tp_disable_destructors and tp_get_destructors_state symbols below.
- */
-void tp_disable_destructors(void);
-void tp_disable_destructors(void)
-{
- uatomic_set(&tracepoint_destructors_state, 0);
-}
-
-/*
- * Returns 1 if the destructors are enabled and should be executed.
- * Returns 0 if the destructors are disabled.
- */
-int tp_get_destructors_state(void);
-int tp_get_destructors_state(void)
-{
- return uatomic_read(&tracepoint_destructors_state);
-}
+++ /dev/null
-/*
- * SPDX-License-Identifier: LGPL-2.1-only
- *
- * Copyright (C) 2021 Michael Jeanson <mjeanson@efficios.com>
- */
-
-#include "common/logging.h"
-#include "common/ust-fd.h"
-
-static
-void lttng_ust_common_init(void)
- __attribute__((constructor));
-static
-void lttng_ust_common_init(void)
-{
- /* Initialize logging for liblttng-ust-common */
- ust_err_init();
-
- /*
- * Initialize the fd-tracker, other libraries using it should also call
- * this in their constructor in case it gets executed before this one.
- */
- lttng_ust_init_fd_tracker();
-}
+++ /dev/null
-/*
- * SPDX-License-Identifier: LGPL-2.1-only
- *
- * Copyright (C) 2011 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
- */
-
-#define _LGPL_SOURCE
-#include <stdint.h>
-#include <stddef.h>
-#include <stdlib.h>
-
-#include "context-internal.h"
-#include "ust-events-internal.h"
-#include "common/logging.h"
-#include "lttng-tracer-core.h"
-#include "lttng-rb-clients.h"
-#include "lttng-counter-client.h"
-#include "jhash.h"
-
-static CDS_LIST_HEAD(lttng_transport_list);
-static CDS_LIST_HEAD(lttng_counter_transport_list);
-
-struct lttng_transport *lttng_ust_transport_find(const char *name)
-{
- struct lttng_transport *transport;
-
- cds_list_for_each_entry(transport, <tng_transport_list, node) {
- if (!strcmp(transport->name, name))
- return transport;
- }
- return NULL;
-}
-
-struct lttng_counter_transport *lttng_counter_transport_find(const char *name)
-{
- struct lttng_counter_transport *transport;
-
- cds_list_for_each_entry(transport, <tng_counter_transport_list, node) {
- if (!strcmp(transport->name, name))
- return transport;
- }
- return NULL;
-}
-
-/**
- * lttng_transport_register - LTT transport registration
- * @transport: transport structure
- *
- * Registers a transport which can be used as output to extract the data out of
- * LTTng. Called with ust_lock held.
- */
-void lttng_transport_register(struct lttng_transport *transport)
-{
- cds_list_add_tail(&transport->node, <tng_transport_list);
-}
-
-/**
- * lttng_transport_unregister - LTT transport unregistration
- * @transport: transport structure
- * Called with ust_lock held.
- */
-void lttng_transport_unregister(struct lttng_transport *transport)
-{
- cds_list_del(&transport->node);
-}
-
-/**
- * lttng_counter_transport_register - LTTng counter transport registration
- * @transport: transport structure
- *
- * Registers a counter transport which can be used as output to extract
- * the data out of LTTng. Called with ust_lock held.
- */
-void lttng_counter_transport_register(struct lttng_counter_transport *transport)
-{
- cds_list_add_tail(&transport->node, <tng_counter_transport_list);
-}
-
-/**
- * lttng_counter_transport_unregister - LTTng counter transport unregistration
- * @transport: transport structure
- * Called with ust_lock held.
- */
-void lttng_counter_transport_unregister(struct lttng_counter_transport *transport)
-{
- cds_list_del(&transport->node);
-}
-
-/*
- * Needed by comm layer.
- */
-struct lttng_enum *lttng_ust_enum_get_from_desc(struct lttng_ust_session *session,
- const struct lttng_ust_enum_desc *enum_desc)
-{
- struct lttng_enum *_enum;
- struct cds_hlist_head *head;
- struct cds_hlist_node *node;
- size_t name_len = strlen(enum_desc->name);
- uint32_t hash;
-
- hash = jhash(enum_desc->name, name_len, 0);
- head = &session->priv->enums_ht.table[hash & (LTTNG_UST_ENUM_HT_SIZE - 1)];
- cds_hlist_for_each_entry(_enum, node, head, hlist) {
- assert(_enum->desc);
- if (_enum->desc == enum_desc)
- return _enum;
- }
- return NULL;
-}
-
-size_t lttng_ust_dummy_get_size(void *priv __attribute__((unused)),
- size_t offset)
-{
- size_t size = 0;
-
- size += lttng_ust_lib_ring_buffer_align(offset, lttng_ust_rb_alignof(char));
- size += sizeof(char); /* tag */
- return size;
-}
-
-void lttng_ust_dummy_record(void *priv __attribute__((unused)),
- struct lttng_ust_lib_ring_buffer_ctx *ctx,
- struct lttng_ust_channel_buffer *chan)
-{
- char sel_char = (char) LTTNG_UST_DYNAMIC_TYPE_NONE;
-
- chan->ops->event_write(ctx, &sel_char, sizeof(sel_char), lttng_ust_rb_alignof(sel_char));
-}
-
-void lttng_ust_dummy_get_value(void *priv __attribute__((unused)),
- struct lttng_ust_ctx_value *value)
-{
- value->sel = LTTNG_UST_DYNAMIC_TYPE_NONE;
-}
-
-int lttng_context_is_app(const char *name)
-{
- if (strncmp(name, "$app.", strlen("$app.")) != 0) {
- return 0;
- }
- return 1;
-}
-
-struct lttng_ust_channel_buffer *lttng_ust_alloc_channel_buffer(void)
-{
- struct lttng_ust_channel_buffer *lttng_chan_buf;
- struct lttng_ust_channel_common *lttng_chan_common;
- struct lttng_ust_channel_buffer_private *lttng_chan_buf_priv;
-
- lttng_chan_buf = zmalloc(sizeof(struct lttng_ust_channel_buffer));
- if (!lttng_chan_buf)
- goto lttng_chan_buf_error;
- lttng_chan_buf->struct_size = sizeof(struct lttng_ust_channel_buffer);
- lttng_chan_common = zmalloc(sizeof(struct lttng_ust_channel_common));
- if (!lttng_chan_common)
- goto lttng_chan_common_error;
- lttng_chan_common->struct_size = sizeof(struct lttng_ust_channel_common);
- lttng_chan_buf_priv = zmalloc(sizeof(struct lttng_ust_channel_buffer_private));
- if (!lttng_chan_buf_priv)
- goto lttng_chan_buf_priv_error;
- lttng_chan_buf->parent = lttng_chan_common;
- lttng_chan_common->type = LTTNG_UST_CHANNEL_TYPE_BUFFER;
- lttng_chan_common->child = lttng_chan_buf;
- lttng_chan_buf->priv = lttng_chan_buf_priv;
- lttng_chan_common->priv = <tng_chan_buf_priv->parent;
- lttng_chan_buf_priv->pub = lttng_chan_buf;
- lttng_chan_buf_priv->parent.pub = lttng_chan_common;
-
- return lttng_chan_buf;
-
-lttng_chan_buf_priv_error:
- free(lttng_chan_common);
-lttng_chan_common_error:
- free(lttng_chan_buf);
-lttng_chan_buf_error:
- return NULL;
-}
-
-void lttng_ust_free_channel_common(struct lttng_ust_channel_common *chan)
-{
- switch (chan->type) {
- case LTTNG_UST_CHANNEL_TYPE_BUFFER:
- {
- struct lttng_ust_channel_buffer *chan_buf;
-
- chan_buf = (struct lttng_ust_channel_buffer *)chan->child;
- free(chan_buf->parent);
- free(chan_buf->priv);
- free(chan_buf);
- break;
- }
- default:
- abort();
- }
-}
-
-void lttng_ust_ring_buffer_clients_init(void)
-{
- lttng_ring_buffer_metadata_client_init();
- lttng_ring_buffer_client_overwrite_init();
- lttng_ring_buffer_client_overwrite_rt_init();
- lttng_ring_buffer_client_discard_init();
- lttng_ring_buffer_client_discard_rt_init();
-}
-
-void lttng_ust_ring_buffer_clients_exit(void)
-{
- lttng_ring_buffer_client_discard_rt_exit();
- lttng_ring_buffer_client_discard_exit();
- lttng_ring_buffer_client_overwrite_rt_exit();
- lttng_ring_buffer_client_overwrite_exit();
- lttng_ring_buffer_metadata_client_exit();
-}
-
-void lttng_ust_counter_clients_init(void)
-{
- lttng_counter_client_percpu_64_modular_init();
- lttng_counter_client_percpu_32_modular_init();
-}
-
-void lttng_ust_counter_clients_exit(void)
-{
- lttng_counter_client_percpu_32_modular_exit();
- lttng_counter_client_percpu_64_modular_exit();
-}
+++ /dev/null
-/*
- * SPDX-License-Identifier: MIT
- *
- * Copyright 2019 (c) Francis Deslauriers <francis.deslauriers@efficios.com>
- */
-
-#ifndef _LTTNG_UST_EVENTS_INTERNAL_H
-#define _LTTNG_UST_EVENTS_INTERNAL_H
-
-#include <limits.h>
-#include <stdint.h>
-
-#include <urcu/list.h>
-#include <urcu/hlist.h>
-
-#include <lttng/ust-events.h>
-
-#include "common/macros.h"
-#include "common/ust-context-provider.h"
-
-struct lttng_ust_abi_obj;
-struct lttng_event_notifier_group;
-
-union lttng_ust_abi_args {
- struct {
- void *chan_data;
- int wakeup_fd;
- } channel;
- struct {
- int shm_fd;
- int wakeup_fd;
- } stream;
- struct {
- struct lttng_ust_abi_field_iter entry;
- } field_list;
- struct {
- char *ctxname;
- } app_context;
- struct {
- int event_notifier_notif_fd;
- } event_notifier_handle;
- struct {
- void *counter_data;
- } counter;
- struct {
- int shm_fd;
- } counter_shm;
-};
-
-struct lttng_ust_abi_objd_ops {
- long (*cmd)(int objd, unsigned int cmd, unsigned long arg,
- union lttng_ust_abi_args *args, void *owner);
- int (*release)(int objd);
-};
-
-enum lttng_enabler_format_type {
- LTTNG_ENABLER_FORMAT_STAR_GLOB,
- LTTNG_ENABLER_FORMAT_EVENT,
-};
-
-/*
- * Enabler field, within whatever object is enabling an event. Target of
- * backward reference.
- */
-struct lttng_enabler {
- enum lttng_enabler_format_type format_type;
-
- /* head list of struct lttng_ust_filter_bytecode_node */
- struct cds_list_head filter_bytecode_head;
- /* head list of struct lttng_ust_excluder_node */
- struct cds_list_head excluder_head;
-
- struct lttng_ust_abi_event event_param;
- unsigned int enabled:1;
-};
-
-struct lttng_event_enabler {
- struct lttng_enabler base;
- struct cds_list_head node; /* per-session list of enablers */
- struct lttng_ust_channel_buffer *chan;
- /*
- * Unused, but kept around to make it explicit that the tracer can do
- * it.
- */
- struct lttng_ust_ctx *ctx;
-};
-
-struct lttng_event_notifier_enabler {
- struct lttng_enabler base;
- uint64_t error_counter_index;
- struct cds_list_head node; /* per-app list of event_notifier enablers */
- struct cds_list_head capture_bytecode_head;
- struct lttng_event_notifier_group *group; /* weak ref */
- uint64_t user_token; /* User-provided token */
- uint64_t num_captures;
-};
-
-enum lttng_ust_bytecode_type {
- LTTNG_UST_BYTECODE_TYPE_FILTER,
- LTTNG_UST_BYTECODE_TYPE_CAPTURE,
-};
-
-struct lttng_ust_bytecode_node {
- enum lttng_ust_bytecode_type type;
- struct cds_list_head node;
- struct lttng_enabler *enabler;
- struct {
- uint32_t len;
- uint32_t reloc_offset;
- uint64_t seqnum;
- char data[];
- } bc;
-};
-
-/*
- * Bytecode interpreter return value.
- */
-enum lttng_ust_bytecode_interpreter_ret {
- LTTNG_UST_BYTECODE_INTERPRETER_ERROR = -1,
- LTTNG_UST_BYTECODE_INTERPRETER_OK = 0,
-};
-
-struct lttng_interpreter_output;
-struct lttng_ust_bytecode_runtime_private;
-
-enum lttng_ust_bytecode_filter_result {
- LTTNG_UST_BYTECODE_FILTER_ACCEPT = 0,
- LTTNG_UST_BYTECODE_FILTER_REJECT = 1,
-};
-
-struct lttng_ust_bytecode_filter_ctx {
- enum lttng_ust_bytecode_filter_result result;
-};
-
-struct lttng_ust_excluder_node {
- struct cds_list_head node;
- struct lttng_enabler *enabler;
- /*
- * struct lttng_ust_event_exclusion had variable sized array,
- * must be last field.
- */
- struct lttng_ust_abi_event_exclusion excluder;
-};
-
-/* Data structures used by the tracer. */
-
-struct tp_list_entry {
- struct lttng_ust_abi_tracepoint_iter tp;
- struct cds_list_head head;
-};
-
-struct lttng_ust_tracepoint_list {
- struct tp_list_entry *iter;
- struct cds_list_head head;
-};
-
-struct tp_field_list_entry {
- struct lttng_ust_abi_field_iter field;
- struct cds_list_head head;
-};
-
-struct lttng_ust_field_list {
- struct tp_field_list_entry *iter;
- struct cds_list_head head;
-};
-
-/*
- * Objects in a linked-list of enablers, owned by an event or event_notifier.
- * This is used because an event (or a event_notifier) can be enabled by more
- * than one enabler and we want a quick way to iterate over all enablers of an
- * object.
- *
- * For example, event rules "my_app:a*" and "my_app:ab*" will both match the
- * event with the name "my_app:abc".
- */
-struct lttng_enabler_ref {
- struct cds_list_head node; /* enabler ref list */
- struct lttng_enabler *ref; /* backward ref */
-};
-
-#define LTTNG_COUNTER_DIMENSION_MAX 8
-struct lttng_counter_dimension {
- uint64_t size;
- uint64_t underflow_index;
- uint64_t overflow_index;
- uint8_t has_underflow;
- uint8_t has_overflow;
-};
-
-struct lttng_counter_ops {
- struct lib_counter *(*counter_create)(size_t nr_dimensions,
- const struct lttng_counter_dimension *dimensions,
- int64_t global_sum_step,
- int global_counter_fd,
- int nr_counter_cpu_fds,
- const int *counter_cpu_fds,
- bool is_daemon);
- void (*counter_destroy)(struct lib_counter *counter);
- int (*counter_add)(struct lib_counter *counter,
- const size_t *dimension_indexes, int64_t v);
- int (*counter_read)(struct lib_counter *counter,
- const size_t *dimension_indexes, int cpu,
- int64_t *value, bool *overflow, bool *underflow);
- int (*counter_aggregate)(struct lib_counter *counter,
- const size_t *dimension_indexes, int64_t *value,
- bool *overflow, bool *underflow);
- int (*counter_clear)(struct lib_counter *counter, const size_t *dimension_indexes);
-};
-
-struct lttng_counter {
- int objd;
- struct lttng_event_notifier_group *event_notifier_group; /* owner */
- struct lttng_counter_transport *transport;
- struct lib_counter *counter;
- struct lttng_counter_ops *ops;
-};
-
-#define LTTNG_UST_EVENT_HT_BITS 12
-#define LTTNG_UST_EVENT_HT_SIZE (1U << LTTNG_UST_EVENT_HT_BITS)
-
-struct lttng_ust_event_ht {
- struct cds_hlist_head table[LTTNG_UST_EVENT_HT_SIZE];
-};
-
-#define LTTNG_UST_EVENT_NOTIFIER_HT_BITS 12
-#define LTTNG_UST_EVENT_NOTIFIER_HT_SIZE (1U << LTTNG_UST_EVENT_NOTIFIER_HT_BITS)
-struct lttng_ust_event_notifier_ht {
- struct cds_hlist_head table[LTTNG_UST_EVENT_NOTIFIER_HT_SIZE];
-};
-
-#define LTTNG_UST_ENUM_HT_BITS 12
-#define LTTNG_UST_ENUM_HT_SIZE (1U << LTTNG_UST_ENUM_HT_BITS)
-
-struct lttng_ust_enum_ht {
- struct cds_hlist_head table[LTTNG_UST_ENUM_HT_SIZE];
-};
-
-struct lttng_event_notifier_group {
- int objd;
- void *owner;
- int notification_fd;
- struct cds_list_head node; /* Event notifier group handle list */
- struct cds_list_head enablers_head;
- struct cds_list_head event_notifiers_head; /* list of event_notifiers */
- struct lttng_ust_event_notifier_ht event_notifiers_ht; /* hashtable of event_notifiers */
- struct lttng_ust_ctx *ctx; /* contexts for filters. */
-
- struct lttng_counter *error_counter;
- size_t error_counter_len;
-};
-
-struct lttng_transport {
- const char *name;
- struct cds_list_head node;
- struct lttng_ust_channel_buffer_ops ops;
- const struct lttng_ust_lib_ring_buffer_config *client_config;
-};
-
-struct lttng_counter_transport {
- const char *name;
- struct cds_list_head node;
- struct lttng_counter_ops ops;
- const struct lib_counter_config *client_config;
-};
-
-struct lttng_ust_event_common_private {
- struct lttng_ust_event_common *pub; /* Public event interface */
-
- const struct lttng_ust_event_desc *desc;
- /* Backward references: list of lttng_enabler_ref (ref to enablers) */
- struct cds_list_head enablers_ref_head;
- int registered; /* has reg'd tracepoint probe */
- uint64_t user_token;
-
- int has_enablers_without_filter_bytecode;
- /* list of struct lttng_ust_bytecode_runtime, sorted by seqnum */
- struct cds_list_head filter_bytecode_runtime_head;
-};
-
-struct lttng_ust_event_recorder_private {
- struct lttng_ust_event_common_private parent;
-
- struct lttng_ust_event_recorder *pub; /* Public event interface */
- struct cds_list_head node; /* Event recorder list */
- struct cds_hlist_node hlist; /* Hash table of event recorders */
- struct lttng_ust_ctx *ctx;
- unsigned int id;
-};
-
-struct lttng_ust_event_notifier_private {
- struct lttng_ust_event_common_private parent;
-
- struct lttng_ust_event_notifier *pub; /* Public event notifier interface */
- struct lttng_event_notifier_group *group; /* weak ref */
- size_t num_captures; /* Needed to allocate the msgpack array. */
- uint64_t error_counter_index;
- struct cds_list_head node; /* Event notifier list */
- struct cds_hlist_node hlist; /* Hash table of event notifiers */
- struct cds_list_head capture_bytecode_runtime_head;
-};
-
-struct lttng_ust_bytecode_runtime {
- enum lttng_ust_bytecode_type type;
- struct lttng_ust_bytecode_node *bc;
- int link_failed;
- int (*interpreter_func)(struct lttng_ust_bytecode_runtime *bytecode_runtime,
- const char *interpreter_stack_data,
- void *ctx);
- struct cds_list_head node; /* list of bytecode runtime in event */
- /*
- * Pointer to a URCU-protected pointer owned by an `struct
- * lttng_session`or `struct lttng_event_notifier_group`.
- */
- struct lttng_ust_ctx **pctx;
-};
-
-struct lttng_ust_session_private {
- struct lttng_ust_session *pub; /* Public session interface */
-
- int been_active; /* Been active ? */
- int objd; /* Object associated */
- struct cds_list_head chan_head; /* Channel list head */
- struct cds_list_head events_head; /* list of events */
- struct cds_list_head node; /* Session list */
-
- /* List of enablers */
- struct cds_list_head enablers_head;
- struct lttng_ust_event_ht events_ht; /* ht of events */
- void *owner; /* object owner */
- int tstate:1; /* Transient enable state */
-
- int statedump_pending:1;
-
- struct lttng_ust_enum_ht enums_ht; /* ht of enumerations */
- struct cds_list_head enums_head;
- struct lttng_ust_ctx *ctx; /* contexts for filters. */
-
- unsigned char uuid[LTTNG_UST_UUID_LEN]; /* Trace session unique ID */
- bool uuid_set; /* Is uuid set ? */
-};
-
-struct lttng_enum {
- const struct lttng_ust_enum_desc *desc;
- struct lttng_ust_session *session;
- struct cds_list_head node; /* Enum list in session */
- struct cds_hlist_node hlist; /* Session ht of enums */
- uint64_t id; /* Enumeration ID in sessiond */
-};
-
-struct lttng_ust_shm_handle;
-
-struct lttng_ust_channel_buffer_ops_private {
- struct lttng_ust_channel_buffer_ops *pub; /* Public channel buffer ops interface */
-
- struct lttng_ust_channel_buffer *(*channel_create)(const char *name,
- void *buf_addr,
- size_t subbuf_size, size_t num_subbuf,
- unsigned int switch_timer_interval,
- unsigned int read_timer_interval,
- unsigned char *uuid,
- uint32_t chan_id,
- const int *stream_fds, int nr_stream_fds,
- int64_t blocking_timeout);
- void (*channel_destroy)(struct lttng_ust_channel_buffer *chan);
- /*
- * packet_avail_size returns the available size in the current
- * packet. Note that the size returned is only a hint, since it
- * may change due to concurrent writes.
- */
- size_t (*packet_avail_size)(struct lttng_ust_channel_buffer *chan);
- int (*is_finalized)(struct lttng_ust_channel_buffer *chan);
- int (*is_disabled)(struct lttng_ust_channel_buffer *chan);
- int (*flush_buffer)(struct lttng_ust_channel_buffer *chan);
-};
-
-struct lttng_ust_channel_common_private {
- struct lttng_ust_channel_common *pub; /* Public channel interface */
-
- int objd; /* Object associated with channel. */
- int tstate:1; /* Transient enable state */
-};
-
-struct lttng_ust_channel_buffer_private {
- struct lttng_ust_channel_common_private parent;
-
- struct lttng_ust_channel_buffer *pub; /* Public channel buffer interface */
- struct cds_list_head node; /* Channel list in session */
- int header_type; /* 0: unset, 1: compact, 2: large */
- unsigned int id; /* Channel ID */
- enum lttng_ust_abi_chan_type type;
- struct lttng_ust_ctx *ctx;
- struct lttng_ust_lib_ring_buffer_channel *rb_chan; /* Ring buffer channel */
- unsigned char uuid[LTTNG_UST_UUID_LEN]; /* Trace session unique ID */
-};
-
-/*
- * IMPORTANT: this structure is part of the ABI between the consumer
- * daemon and the UST library within traced applications. Changing it
- * breaks the UST communication protocol.
- *
- * TODO: remove unused fields on next UST communication protocol
- * breaking update.
- */
-struct lttng_ust_abi_channel_config {
- void *unused1;
- int unused2;
- void *unused3;
- void *unused4;
- int unused5;
- struct cds_list_head unused6;
- void *unused7;
- int unused8;
- void *unused9;
-
- /* Channel ID */
- unsigned int id;
- enum lttng_ust_abi_chan_type unused10;
- unsigned char uuid[LTTNG_UST_UUID_LEN]; /* Trace session unique ID */
- int unused11:1;
-};
-
-/* Global (filter), event and channel contexts. */
-struct lttng_ust_ctx {
- struct lttng_ust_ctx_field *fields;
- unsigned int nr_fields;
- unsigned int allocated_fields;
- unsigned int largest_align;
-};
-
-struct lttng_ust_registered_probe {
- const struct lttng_ust_probe_desc *desc;
-
- struct cds_list_head head; /* chain registered probes */
- struct cds_list_head lazy_init_head;
- int lazy; /* lazy registration */
-};
-
-/*
- * Context field
- */
-
-struct lttng_ust_ctx_field {
- const struct lttng_ust_event_field *event_field;
- size_t (*get_size)(void *priv, size_t offset);
- void (*record)(void *priv, struct lttng_ust_lib_ring_buffer_ctx *ctx,
- struct lttng_ust_channel_buffer *chan);
- void (*get_value)(void *priv, struct lttng_ust_ctx_value *value);
- void (*destroy)(void *priv);
- void *priv;
-};
-
-static inline
-const struct lttng_ust_type_integer *lttng_ust_get_type_integer(const struct lttng_ust_type_common *type)
-{
- if (type->type != lttng_ust_type_integer)
- return NULL;
- return caa_container_of(type, const struct lttng_ust_type_integer, parent);
-}
-
-static inline
-const struct lttng_ust_type_float *lttng_ust_get_type_float(const struct lttng_ust_type_common *type)
-{
- if (type->type != lttng_ust_type_float)
- return NULL;
- return caa_container_of(type, const struct lttng_ust_type_float, parent);
-}
-
-static inline
-const struct lttng_ust_type_string *lttng_ust_get_type_string(const struct lttng_ust_type_common *type)
-{
- if (type->type != lttng_ust_type_string)
- return NULL;
- return caa_container_of(type, const struct lttng_ust_type_string, parent);
-}
-
-static inline
-const struct lttng_ust_type_enum *lttng_ust_get_type_enum(const struct lttng_ust_type_common *type)
-{
- if (type->type != lttng_ust_type_enum)
- return NULL;
- return caa_container_of(type, const struct lttng_ust_type_enum, parent);
-}
-
-static inline
-const struct lttng_ust_type_array *lttng_ust_get_type_array(const struct lttng_ust_type_common *type)
-{
- if (type->type != lttng_ust_type_array)
- return NULL;
- return caa_container_of(type, const struct lttng_ust_type_array, parent);
-}
-
-static inline
-const struct lttng_ust_type_sequence *lttng_ust_get_type_sequence(const struct lttng_ust_type_common *type)
-{
- if (type->type != lttng_ust_type_sequence)
- return NULL;
- return caa_container_of(type, const struct lttng_ust_type_sequence, parent);
-}
-
-static inline
-const struct lttng_ust_type_struct *lttng_ust_get_type_struct(const struct lttng_ust_type_common *type)
-{
- if (type->type != lttng_ust_type_struct)
- return NULL;
- return caa_container_of(type, const struct lttng_ust_type_struct, parent);
-}
-
-#define lttng_ust_static_type_integer(_size, _alignment, _signedness, _byte_order, _base) \
- ((const struct lttng_ust_type_common *) __LTTNG_COMPOUND_LITERAL(const struct lttng_ust_type_integer, { \
- .parent = { \
- .type = lttng_ust_type_integer, \
- }, \
- .struct_size = sizeof(struct lttng_ust_type_integer), \
- .size = (_size), \
- .alignment = (_alignment), \
- .signedness = (_signedness), \
- .reverse_byte_order = (_byte_order) != BYTE_ORDER, \
- .base = (_base), \
- }))
-
-#define lttng_ust_static_type_array_text(_length) \
- ((const struct lttng_ust_type_common *) __LTTNG_COMPOUND_LITERAL(const struct lttng_ust_type_array, { \
- .parent = { \
- .type = lttng_ust_type_array, \
- }, \
- .struct_size = sizeof(struct lttng_ust_type_array), \
- .length = (_length), \
- .alignment = 0, \
- .encoding = lttng_ust_string_encoding_UTF8, \
- .elem_type = lttng_ust_static_type_integer(sizeof(char) * CHAR_BIT, \
- lttng_ust_rb_alignof(char) * CHAR_BIT, lttng_ust_is_signed_type(char), \
- BYTE_ORDER, 10), \
- }))
-
-#define lttng_ust_static_event_field(_name, _type, _nowrite, _nofilter) \
- __LTTNG_COMPOUND_LITERAL(const struct lttng_ust_event_field, { \
- .struct_size = sizeof(struct lttng_ust_event_field), \
- .name = (_name), \
- .type = (_type), \
- .nowrite = (_nowrite), \
- .nofilter = (_nofilter), \
- })
-
-#define lttng_ust_static_ctx_field(_event_field, _get_size, _record, _get_value, _destroy, _priv) \
- __LTTNG_COMPOUND_LITERAL(const struct lttng_ust_ctx_field, { \
- .event_field = (_event_field), \
- .get_size = (_get_size), \
- .record = (_record), \
- .get_value = (_get_value), \
- .destroy = (_destroy), \
- .priv = (_priv), \
- })
-
-static inline
-struct lttng_enabler *lttng_event_enabler_as_enabler(
- struct lttng_event_enabler *event_enabler)
-{
- return &event_enabler->base;
-}
-
-static inline
-struct lttng_enabler *lttng_event_notifier_enabler_as_enabler(
- struct lttng_event_notifier_enabler *event_notifier_enabler)
-{
- return &event_notifier_enabler->base;
-}
-
-/*
- * Allocate and initialize a `struct lttng_event_enabler` object.
- *
- * On success, returns a `struct lttng_event_enabler`,
- * On memory error, returns NULL.
- */
-struct lttng_event_enabler *lttng_event_enabler_create(
- enum lttng_enabler_format_type format_type,
- struct lttng_ust_abi_event *event_param,
- struct lttng_ust_channel_buffer *chan)
- __attribute__((visibility("hidden")));
-
-/*
- * Destroy a `struct lttng_event_enabler` object.
- */
-void lttng_event_enabler_destroy(struct lttng_event_enabler *enabler)
- __attribute__((visibility("hidden")));
-
-/*
- * Enable a `struct lttng_event_enabler` object and all events related to this
- * enabler.
- */
-int lttng_event_enabler_enable(struct lttng_event_enabler *enabler)
- __attribute__((visibility("hidden")));
-
-/*
- * Disable a `struct lttng_event_enabler` object and all events related to this
- * enabler.
- */
-int lttng_event_enabler_disable(struct lttng_event_enabler *enabler)
- __attribute__((visibility("hidden")));
-
-/*
- * Attach filter bytecode program to `struct lttng_event_enabler` and all
- * events related to this enabler.
- */
-int lttng_event_enabler_attach_filter_bytecode(
- struct lttng_event_enabler *enabler,
- struct lttng_ust_bytecode_node **bytecode)
- __attribute__((visibility("hidden")));
-
-/*
- * Attach an application context to an event enabler.
- *
- * Not implemented.
- */
-int lttng_event_enabler_attach_context(struct lttng_event_enabler *enabler,
- struct lttng_ust_abi_context *ctx)
- __attribute__((visibility("hidden")));
-
-/*
- * Attach exclusion list to `struct lttng_event_enabler` and all
- * events related to this enabler.
- */
-int lttng_event_enabler_attach_exclusion(struct lttng_event_enabler *enabler,
- struct lttng_ust_excluder_node **excluder)
- __attribute__((visibility("hidden")));
-
-/*
- * Synchronize bytecodes for the enabler and the instance (event or
- * event_notifier).
- *
- * This function goes over all bytecode programs of the enabler (event or
- * event_notifier enabler) to ensure each is linked to the provided instance.
- */
-void lttng_enabler_link_bytecode(const struct lttng_ust_event_desc *event_desc,
- struct lttng_ust_ctx **ctx,
- struct cds_list_head *instance_bytecode_runtime_head,
- struct cds_list_head *enabler_bytecode_runtime_head)
- __attribute__((visibility("hidden")));
-
-/*
- * Allocate and initialize a `struct lttng_event_notifier_group` object.
- *
- * On success, returns a `struct lttng_triggre_group`,
- * on memory error, returns NULL.
- */
-struct lttng_event_notifier_group *lttng_event_notifier_group_create(void)
- __attribute__((visibility("hidden")));
-
-/*
- * Destroy a `struct lttng_event_notifier_group` object.
- */
-void lttng_event_notifier_group_destroy(
- struct lttng_event_notifier_group *event_notifier_group)
- __attribute__((visibility("hidden")));
-
-/*
- * Allocate and initialize a `struct lttng_event_notifier_enabler` object.
- *
- * On success, returns a `struct lttng_event_notifier_enabler`,
- * On memory error, returns NULL.
- */
-struct lttng_event_notifier_enabler *lttng_event_notifier_enabler_create(
- struct lttng_event_notifier_group *event_notifier_group,
- enum lttng_enabler_format_type format_type,
- struct lttng_ust_abi_event_notifier *event_notifier_param)
- __attribute__((visibility("hidden")));
-
-/*
- * Destroy a `struct lttng_event_notifier_enabler` object.
- */
-void lttng_event_notifier_enabler_destroy(
- struct lttng_event_notifier_enabler *event_notifier_enabler)
- __attribute__((visibility("hidden")));
-
-/*
- * Enable a `struct lttng_event_notifier_enabler` object and all event
- * notifiers related to this enabler.
- */
-int lttng_event_notifier_enabler_enable(
- struct lttng_event_notifier_enabler *event_notifier_enabler)
- __attribute__((visibility("hidden")));
-
-/*
- * Disable a `struct lttng_event_notifier_enabler` object and all event
- * notifiers related to this enabler.
- */
-int lttng_event_notifier_enabler_disable(
- struct lttng_event_notifier_enabler *event_notifier_enabler)
- __attribute__((visibility("hidden")));
-
-/*
- * Attach filter bytecode program to `struct lttng_event_notifier_enabler` and
- * all event notifiers related to this enabler.
- */
-int lttng_event_notifier_enabler_attach_filter_bytecode(
- struct lttng_event_notifier_enabler *event_notifier_enabler,
- struct lttng_ust_bytecode_node **bytecode)
- __attribute__((visibility("hidden")));
-
-/*
- * Attach capture bytecode program to `struct lttng_event_notifier_enabler` and
- * all event_notifiers related to this enabler.
- */
-int lttng_event_notifier_enabler_attach_capture_bytecode(
- struct lttng_event_notifier_enabler *event_notifier_enabler,
- struct lttng_ust_bytecode_node **bytecode)
- __attribute__((visibility("hidden")));
-
-/*
- * Attach exclusion list to `struct lttng_event_notifier_enabler` and all
- * event notifiers related to this enabler.
- */
-int lttng_event_notifier_enabler_attach_exclusion(
- struct lttng_event_notifier_enabler *event_notifier_enabler,
- struct lttng_ust_excluder_node **excluder)
- __attribute__((visibility("hidden")));
-
-void lttng_free_event_filter_runtime(struct lttng_ust_event_common *event)
- __attribute__((visibility("hidden")));
-
-/*
- * Connect the probe on all enablers matching this event description.
- * Called on library load.
- */
-int lttng_fix_pending_event_notifiers(void)
- __attribute__((visibility("hidden")));
-
-struct lttng_counter *lttng_ust_counter_create(
- const char *counter_transport_name,
- size_t number_dimensions, const struct lttng_counter_dimension *dimensions)
- __attribute__((visibility("hidden")));
-
-#ifdef HAVE_LINUX_PERF_EVENT_H
-
-int lttng_add_perf_counter_to_ctx(uint32_t type,
- uint64_t config,
- const char *name,
- struct lttng_ust_ctx **ctx)
- __attribute__((visibility("hidden")));
-
-int lttng_perf_counter_init(void)
- __attribute__((visibility("hidden")));
-
-void lttng_perf_counter_exit(void)
- __attribute__((visibility("hidden")));
-
-#else /* #ifdef HAVE_LINUX_PERF_EVENT_H */
-
-static inline
-int lttng_add_perf_counter_to_ctx(uint32_t type,
- uint64_t config,
- const char *name,
- struct lttng_ust_ctx **ctx)
-{
- return -ENOSYS;
-}
-static inline
-int lttng_perf_counter_init(void)
-{
- return 0;
-}
-static inline
-void lttng_perf_counter_exit(void)
-{
-}
-#endif /* #else #ifdef HAVE_LINUX_PERF_EVENT_H */
-
-int lttng_probes_get_event_list(struct lttng_ust_tracepoint_list *list)
- __attribute__((visibility("hidden")));
-
-void lttng_probes_prune_event_list(struct lttng_ust_tracepoint_list *list)
- __attribute__((visibility("hidden")));
-
-int lttng_probes_get_field_list(struct lttng_ust_field_list *list)
- __attribute__((visibility("hidden")));
-
-void lttng_probes_prune_field_list(struct lttng_ust_field_list *list)
- __attribute__((visibility("hidden")));
-
-struct lttng_ust_abi_tracepoint_iter *
- lttng_ust_tracepoint_list_get_iter_next(struct lttng_ust_tracepoint_list *list)
- __attribute__((visibility("hidden")));
-
-struct lttng_ust_abi_field_iter *
- lttng_ust_field_list_get_iter_next(struct lttng_ust_field_list *list)
- __attribute__((visibility("hidden")));
-
-struct lttng_ust_session *lttng_session_create(void)
- __attribute__((visibility("hidden")));
-
-int lttng_session_enable(struct lttng_ust_session *session)
- __attribute__((visibility("hidden")));
-
-int lttng_session_disable(struct lttng_ust_session *session)
- __attribute__((visibility("hidden")));
-
-int lttng_session_statedump(struct lttng_ust_session *session)
- __attribute__((visibility("hidden")));
-
-void lttng_session_destroy(struct lttng_ust_session *session)
- __attribute__((visibility("hidden")));
-
-/*
- * Called with ust lock held.
- */
-int lttng_session_active(void)
- __attribute__((visibility("hidden")));
-
-struct cds_list_head *lttng_get_sessions(void)
- __attribute__((visibility("hidden")));
-
-void lttng_handle_pending_statedump(void *owner)
- __attribute__((visibility("hidden")));
-
-int lttng_channel_enable(struct lttng_ust_channel_common *lttng_channel)
- __attribute__((visibility("hidden")));
-
-int lttng_channel_disable(struct lttng_ust_channel_common *lttng_channel)
- __attribute__((visibility("hidden")));
-
-void lttng_transport_register(struct lttng_transport *transport)
- __attribute__((visibility("hidden")));
-
-void lttng_transport_unregister(struct lttng_transport *transport)
- __attribute__((visibility("hidden")));
-
-/* This is ABI between liblttng-ust and liblttng-ust-ctl */
-struct lttng_transport *lttng_ust_transport_find(const char *name);
-
-/* This is ABI between liblttng-ust and liblttng-ust-dl */
-void lttng_ust_dl_update(void *ip);
-
-void lttng_probe_provider_unregister_events(const struct lttng_ust_probe_desc *desc)
- __attribute__((visibility("hidden")));
-
-int lttng_fix_pending_events(void)
- __attribute__((visibility("hidden")));
-
-struct cds_list_head *lttng_get_probe_list_head(void)
- __attribute__((visibility("hidden")));
-
-struct lttng_enum *lttng_ust_enum_get_from_desc(struct lttng_ust_session *session,
- const struct lttng_ust_enum_desc *enum_desc)
- __attribute__((visibility("hidden")));
-
-int lttng_abi_create_root_handle(void)
- __attribute__((visibility("hidden")));
-
-const struct lttng_ust_abi_objd_ops *lttng_ust_abi_objd_ops(int id)
- __attribute__((visibility("hidden")));
-
-int lttng_ust_abi_objd_unref(int id, int is_owner)
- __attribute__((visibility("hidden")));
-
-void lttng_ust_abi_exit(void)
- __attribute__((visibility("hidden")));
-
-void lttng_ust_abi_events_exit(void)
- __attribute__((visibility("hidden")));
-
-void lttng_ust_abi_objd_table_owner_cleanup(void *owner)
- __attribute__((visibility("hidden")));
-
-struct lttng_ust_channel_buffer *lttng_ust_alloc_channel_buffer(void)
- __attribute__((visibility("hidden")));
-
-void lttng_ust_free_channel_common(struct lttng_ust_channel_common *chan)
- __attribute__((visibility("hidden")));
-
-int lttng_ust_interpret_event_filter(struct lttng_ust_event_common *event,
- const char *interpreter_stack_data,
- void *filter_ctx)
- __attribute__((visibility("hidden")));
-
-int lttng_ust_session_uuid_validate(struct lttng_ust_session *session,
- unsigned char *uuid)
- __attribute__((visibility("hidden")));
-
-bool lttng_ust_validate_event_name(const struct lttng_ust_event_desc *desc)
- __attribute__((visibility("hidden")));
-
-void lttng_ust_format_event_name(const struct lttng_ust_event_desc *desc,
- char *name)
- __attribute__((visibility("hidden")));
-
-int lttng_ust_add_app_context_to_ctx_rcu(const char *name, struct lttng_ust_ctx **ctx)
- __attribute__((visibility("hidden")));
-
-int lttng_ust_context_set_provider_rcu(struct lttng_ust_ctx **_ctx,
- const char *name,
- size_t (*get_size)(void *priv, size_t offset),
- void (*record)(void *priv, struct lttng_ust_lib_ring_buffer_ctx *ctx,
- struct lttng_ust_channel_buffer *chan),
- void (*get_value)(void *priv, struct lttng_ust_ctx_value *value),
- void *priv)
- __attribute__((visibility("hidden")));
-
-void lttng_ust_context_set_session_provider(const char *name,
- size_t (*get_size)(void *priv, size_t offset),
- void (*record)(void *priv, struct lttng_ust_lib_ring_buffer_ctx *ctx,
- struct lttng_ust_channel_buffer *chan),
- void (*get_value)(void *priv, struct lttng_ust_ctx_value *value),
- void *priv)
- __attribute__((visibility("hidden")));
-
-#endif /* _LTTNG_UST_EVENTS_INTERNAL_H */
+++ /dev/null
-/*
- * SPDX-License-Identifier: LGPL-2.1-or-later
- *
- * Copyright (C) 2013 Paul Woegerer <paul_woegerer@mentor.com>
- * Copyright (C) 2016 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
- */
-
-#define _LGPL_SOURCE
-#define TRACEPOINT_CREATE_PROBES
-#define TP_IP_PARAM ip
-#include "ust_lib.h"
+++ /dev/null
-/*
- * SPDX-License-Identifier: MIT
- *
- * Copyright (C) 2013 Paul Woegerer <paul_woegerer@mentor.com>
- * Copyright (C) 2015 Antoine Busque <abusque@efficios.com>
- * Copyright (C) 2016 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
- */
-
-#undef TRACEPOINT_PROVIDER
-#define TRACEPOINT_PROVIDER lttng_ust_lib
-
-#if !defined(_TRACEPOINT_UST_LIB_H) || defined(TRACEPOINT_HEADER_MULTI_READ)
-#define _TRACEPOINT_UST_LIB_H
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-#include <stddef.h>
-#include <stdint.h>
-#include <unistd.h>
-
-#define LTTNG_UST_LIB_PROVIDER
-#include <lttng/tracepoint.h>
-
-TRACEPOINT_EVENT(lttng_ust_lib, load,
- TP_ARGS(void *, ip, void *, baddr, const char*, path,
- uint64_t, memsz, uint8_t, has_build_id,
- uint8_t, has_debug_link),
- TP_FIELDS(
- ctf_unused(ip)
- ctf_integer_hex(void *, baddr, baddr)
- ctf_integer(uint64_t, memsz, memsz)
- ctf_string(path, path)
- ctf_integer(uint8_t, has_build_id, has_build_id)
- ctf_integer(uint8_t, has_debug_link, has_debug_link)
- )
-)
-
-TRACEPOINT_EVENT(lttng_ust_lib, build_id,
- TP_ARGS(
- void *, ip,
- void *, baddr,
- uint8_t *, build_id,
- size_t, build_id_len
- ),
- TP_FIELDS(
- ctf_unused(ip)
- ctf_integer_hex(void *, baddr, baddr)
- ctf_sequence_hex(uint8_t, build_id, build_id,
- size_t, build_id_len)
- )
-)
-
-TRACEPOINT_EVENT(lttng_ust_lib, debug_link,
- TP_ARGS(
- void *, ip,
- void *, baddr,
- char *, filename,
- uint32_t, crc
- ),
- TP_FIELDS(
- ctf_unused(ip)
- ctf_integer_hex(void *, baddr, baddr)
- ctf_integer(uint32_t, crc, crc)
- ctf_string(filename, filename)
- )
-)
-
-TRACEPOINT_EVENT(lttng_ust_lib, unload,
- TP_ARGS(void *, ip, void *, baddr),
- TP_FIELDS(
- ctf_unused(ip)
- ctf_integer_hex(void *, baddr, baddr)
- )
-)
-
-#endif /* _TRACEPOINT_UST_LIB_H */
-
-#undef TRACEPOINT_INCLUDE
-#define TRACEPOINT_INCLUDE "./ust_lib.h"
-
-/* This part must be outside ifdef protection */
-#include <lttng/tracepoint-event.h>
-
-#ifdef __cplusplus
-}
-#endif
+++ /dev/null
-/*
- * SPDX-License-Identifier: LGPL-2.1-only
- *
- * Copyright (C) 2011 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
- */
-
-#ifndef _UST_WAIT_H
-#define _UST_WAIT_H
-
-#include <poll.h>
-
-/*
- * Wait until "cond" gets true or timeout (in ms).
- */
-#define wait_cond_interruptible_timeout(_cond, _timeout) \
- ({ \
- int __ret = 0, __pollret; \
- int __timeout = _timeout; \
- \
- for (;;) { \
- if (_cond) \
- break; \
- if (__timeout <= 0) { \
- __ret = -ETIMEDOUT; \
- break; \
- } \
- __pollret = poll(NULL, 0, 10); /* wait 10ms */ \
- if (__pollret < 0) { \
- __ret = -errno; \
- break; \
- } \
- __timeout -= 10; \
- } \
- __ret; \
- })
-
-
-#endif /* _UST_WAIT_H */
noinst_PROGRAMS = bench1 bench2
bench1_SOURCES = bench.c tp.c ust_tests_benchmark.h
bench1_LDADD = \
- $(top_builddir)/src/liblttng-ust/liblttng-ust.la \
+ $(top_builddir)/src/lib/lttng-ust/liblttng-ust.la \
$(DL_LIBS)
bench2_SOURCES = bench.c tp.c ust_tests_benchmark.h
bench2_CFLAGS = -DTRACING $(AM_CFLAGS)
bench2_LDADD = \
- $(top_builddir)/src/liblttng-ust/liblttng-ust.la \
+ $(top_builddir)/src/lib/lttng-ust/liblttng-ust.la \
$(DL_LIBS)
dist_noinst_SCRIPTS = test_benchmark ptime
ctf_types_SOURCES = ctf-types.c tp.c ust_tests_ctf_types.h
ctf_types_CFLAGS = -Werror=old-style-definition $(AM_CFLAGS)
ctf_types_LDADD = \
- $(top_builddir)/src/liblttng-ust/liblttng-ust.la \
+ $(top_builddir)/src/lib/lttng-ust/liblttng-ust.la \
$(DL_LIBS)
EXTRA_DIST = README
hello_many_SOURCES = hello-many.c tp.c ust_tests_hello_many.h
hello_many_CFLAGS = -Werror=old-style-definition $(AM_CFLAGS)
hello_many_LDADD = \
- $(top_builddir)/src/liblttng-ust/liblttng-ust.la \
+ $(top_builddir)/src/lib/lttng-ust/liblttng-ust.la \
$(DL_LIBS)
EXTRA_DIST = README
noinst_PROGRAMS = hello
hello_SOURCES = hello.cpp tp-cpp.cpp ust_tests_hello.h
hello_LDADD = \
- $(top_builddir)/src/liblttng-ust/liblttng-ust.la \
+ $(top_builddir)/src/lib/lttng-ust/liblttng-ust.la \
$(DL_LIBS)
EXTRA_DIST = README
hello_SOURCES = hello.c tp.c ust_tests_hello.h
hello_CFLAGS = -Werror=old-style-definition $(AM_CFLAGS)
hello_LDADD = \
- $(top_builddir)/src/liblttng-ust/liblttng-ust.la \
+ $(top_builddir)/src/lib/lttng-ust/liblttng-ust.la \
$(DL_LIBS)
EXTRA_DIST = README
noinst_PROGRAMS = same_line_tracepoint
same_line_tracepoint_SOURCES = same_line_tracepoint.c ust_tests_sameline.h
same_line_tracepoint_LDADD = \
- $(top_builddir)/src/liblttng-ust/liblttng-ust.la \
+ $(top_builddir)/src/lib/lttng-ust/liblttng-ust.la \
$(DL_LIBS)
EXTRA_DIST = README
hello_SOURCES = hello.c tp.c ust_tests_hello.h
hello_CFLAGS = -Werror=old-style-definition $(AM_CFLAGS)
hello_LDADD = \
- $(top_builddir)/src/liblttng-ust/liblttng-ust.la \
+ $(top_builddir)/src/lib/lttng-ust/liblttng-ust.la \
$(DL_LIBS)
test_shm_SOURCES = shm.c
test_shm_LDADD = \
$(top_builddir)/src/common/libringbuffer.la \
- $(top_builddir)/src/liblttng-ust/liblttng-ust-common.la \
+ $(top_builddir)/src/lib/lttng-ust/liblttng-ust-common.la \
$(top_builddir)/src/common/libcommon.la \
$(top_builddir)/tests/utils/libtap.a
noinst_PROGRAMS = ust-elf
ust_elf_SOURCES = ust-elf.c
ust_elf_LDADD = \
- $(top_builddir)/src/liblttng-ust/liblttng-ust.la \
+ $(top_builddir)/src/lib/lttng-ust/liblttng-ust.la \
$(top_builddir)/tests/utils/libtap.a
dist_check_SCRIPTS = test_ust_elf
noinst_PROGRAMS = test_ust_error
test_ust_error_SOURCES = ust-error.c
test_ust_error_LDADD = \
- $(top_builddir)/src/liblttng-ust/liblttng-ust.la \
+ $(top_builddir)/src/lib/lttng-ust/liblttng-ust.la \
$(top_builddir)/tests/utils/libtap.a
noinst_PROGRAMS = test_ust_utils
test_ust_utils_SOURCES = ust-utils.c ust-utils-common.h
test_ust_utils_LDADD = \
- $(top_builddir)/src/liblttng-ust/liblttng-ust.la \
+ $(top_builddir)/src/lib/lttng-ust/liblttng-ust.la \
$(top_builddir)/tests/utils/libtap.a
if HAVE_CXX
noinst_PROGRAMS += test_ust_utils_cxx
test_ust_utils_cxx_SOURCES = ust-utils-cxx.cpp ust-utils-common.h
test_ust_utils_cxx_LDADD = \
- $(top_builddir)/src/liblttng-ust/liblttng-ust.la \
+ $(top_builddir)/src/lib/lttng-ust/liblttng-ust.la \
$(top_builddir)/tests/utils/libtap.a
endif