Move all public libraries under 'src/lib/'.
This is part of an effort to standardize our autotools setup across
projects to simplify maintenance.
Change-Id: I81b23df257bbfe3490f6ee7836b93732fcfb9fb2
Signed-off-by: Michael Jeanson <mjeanson@efficios.com>
Signed-off-by: Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
/src/liblttng-ust-java-agent/jni/jul/Makefile
/src/liblttng-ust-java-agent/jni/log4j/Makefile
/src/liblttng-ust-java/Makefile
-/src/liblttng-ust-libc-wrapper/Makefile
+/src/lib/lttng-ust-libc-wrapper/Makefile
/src/lib/lttng-ust-python-agent/Makefile
/src/lib/Makefile
/src/liblttng-ust/Makefile
src/liblttng-ust-java-agent/jni/Makefile
src/liblttng-ust-java-agent/Makefile
src/liblttng-ust-java/Makefile
- src/liblttng-ust-libc-wrapper/Makefile
+ src/lib/lttng-ust-libc-wrapper/Makefile
src/liblttng-ust/Makefile
src/lib/lttng-ust-python-agent/Makefile
src/lib/Makefile
lib \
liblttng-ust-fd \
liblttng-ust-fork \
- liblttng-ust-libc-wrapper \
liblttng-ust-cyg-profile
if ENABLE_UST_DL
# SPDX-License-Identifier: LGPL-2.1-only
-SUBDIRS =
+SUBDIRS = \
+ lttng-ust-libc-wrapper
if ENABLE_PYTHON_AGENT
SUBDIRS += lttng-ust-python-agent
--- /dev/null
+# SPDX-License-Identifier: LGPL-2.1-only
+
+AM_CFLAGS += -I$(srcdir) -fno-strict-aliasing
+
+lib_LTLIBRARIES = liblttng-ust-libc-wrapper.la \
+ liblttng-ust-pthread-wrapper.la
+
+liblttng_ust_libc_wrapper_la_SOURCES = \
+ lttng-ust-malloc.c \
+ ust_libc.h
+
+liblttng_ust_libc_wrapper_la_LIBADD = \
+ $(top_builddir)/src/liblttng-ust/liblttng-ust.la \
+ $(DL_LIBS)
+
+liblttng_ust_libc_wrapper_la_LDFLAGS = -version-info $(LTTNG_UST_LIBRARY_VERSION)
+
+liblttng_ust_pthread_wrapper_la_SOURCES = \
+ lttng-ust-pthread.c \
+ ust_pthread.h
+
+liblttng_ust_pthread_wrapper_la_LIBADD = \
+ $(top_builddir)/src/liblttng-ust/liblttng-ust.la \
+ $(DL_LIBS)
+
+liblttng_ust_pthread_wrapper_la_LDFLAGS = -version-info $(LTTNG_UST_LIBRARY_VERSION)
+
+dist_noinst_SCRIPTS = run
+EXTRA_DIST = README
--- /dev/null
+liblttng-ust-libc is used for instrumenting some calls to libc in a
+program, without need for recompiling it.
+
+This library defines a malloc() function that is instrumented with a
+tracepoint. It also calls the libc malloc afterwards. When loaded with
+LD_PRELOAD, it replaces the libc malloc() function, in effect
+instrumenting all calls to malloc(). The same is performed for free().
+
+See the "run" script for a usage example.
--- /dev/null
+/*
+ * SPDX-License-Identifier: LGPL-2.1-or-later
+ *
+ * Copyright (C) 2009 Pierre-Marc Fournier
+ * Copyright (C) 2011 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ */
+
+/*
+ * Do _not_ define _LGPL_SOURCE because we don't want to create a
+ * circular dependency loop between this malloc wrapper, liburcu and
+ * libc.
+ */
+
+/* Has to be included first to override dlfcn.h */
+#include <common/compat/dlfcn.h>
+
+#include <sys/types.h>
+#include <stdio.h>
+#include <assert.h>
+#include <malloc.h>
+
+#include <urcu/system.h>
+#include <urcu/uatomic.h>
+#include <urcu/compiler.h>
+#include <urcu/tls-compat.h>
+#include <urcu/arch.h>
+
+#include <lttng/ust-libc-wrapper.h>
+
+#include "common/macros.h"
+#include "common/align.h"
+
+#define TRACEPOINT_DEFINE
+#define TRACEPOINT_CREATE_PROBES
+#define TP_IP_PARAM ip
+#include "ust_libc.h"
+
+#define STATIC_CALLOC_LEN 4096
+static char static_calloc_buf[STATIC_CALLOC_LEN];
+static unsigned long static_calloc_buf_offset;
+
+struct alloc_functions {
+ void *(*calloc)(size_t nmemb, size_t size);
+ void *(*malloc)(size_t size);
+ void (*free)(void *ptr);
+ void *(*realloc)(void *ptr, size_t size);
+ void *(*memalign)(size_t alignment, size_t size);
+ int (*posix_memalign)(void **memptr, size_t alignment, size_t size);
+};
+
+static
+struct alloc_functions cur_alloc;
+
+/*
+ * Make sure our own use of the LTS compat layer will not cause infinite
+ * recursion by calling calloc.
+ */
+
+static
+void *static_calloc(size_t nmemb, size_t size);
+
+/*
+ * pthread mutex replacement for URCU tls compat layer.
+ */
+static int ust_malloc_lock;
+
+static
+void ust_malloc_spin_lock(pthread_mutex_t *lock)
+ __attribute__((unused));
+static
+void ust_malloc_spin_lock(pthread_mutex_t *lock __attribute__((unused)))
+{
+ /*
+ * The memory barrier within cmpxchg takes care of ordering
+ * memory accesses with respect to the start of the critical
+ * section.
+ */
+ while (uatomic_cmpxchg(&ust_malloc_lock, 0, 1) != 0)
+ caa_cpu_relax();
+}
+
+static
+void ust_malloc_spin_unlock(pthread_mutex_t *lock)
+ __attribute__((unused));
+static
+void ust_malloc_spin_unlock(pthread_mutex_t *lock __attribute__((unused)))
+{
+ /*
+ * Ensure memory accesses within the critical section do not
+ * leak outside.
+ */
+ cmm_smp_mb();
+ uatomic_set(&ust_malloc_lock, 0);
+}
+
+#define calloc static_calloc
+#define pthread_mutex_lock ust_malloc_spin_lock
+#define pthread_mutex_unlock ust_malloc_spin_unlock
+static DEFINE_URCU_TLS(int, malloc_nesting);
+#undef pthread_mutex_unlock
+#undef pthread_mutex_lock
+#undef calloc
+
+/*
+ * Static allocator to use when initially executing dlsym(). It keeps a
+ * size_t value of each object size prior to the object.
+ */
+static
+void *static_calloc_aligned(size_t nmemb, size_t size, size_t alignment)
+{
+ size_t prev_offset, new_offset, res_offset, aligned_offset;
+
+ if (nmemb * size == 0) {
+ return NULL;
+ }
+
+ /*
+ * Protect static_calloc_buf_offset from concurrent updates
+ * using a cmpxchg loop rather than a mutex to remove a
+ * dependency on pthread. This will minimize the risk of bad
+ * interaction between mutex and malloc instrumentation.
+ */
+ res_offset = CMM_LOAD_SHARED(static_calloc_buf_offset);
+ do {
+ prev_offset = res_offset;
+ aligned_offset = LTTNG_UST_ALIGN(prev_offset + sizeof(size_t), alignment);
+ new_offset = aligned_offset + nmemb * size;
+ if (new_offset > sizeof(static_calloc_buf)) {
+ abort();
+ }
+ } while ((res_offset = uatomic_cmpxchg(&static_calloc_buf_offset,
+ prev_offset, new_offset)) != prev_offset);
+ *(size_t *) &static_calloc_buf[aligned_offset - sizeof(size_t)] = size;
+ return &static_calloc_buf[aligned_offset];
+}
+
+static
+void *static_calloc(size_t nmemb, size_t size)
+{
+ void *retval;
+
+ retval = static_calloc_aligned(nmemb, size, 1);
+ return retval;
+}
+
+static
+void *static_malloc(size_t size)
+{
+ void *retval;
+
+ retval = static_calloc_aligned(1, size, 1);
+ return retval;
+}
+
+static
+void static_free(void *ptr __attribute__((unused)))
+{
+ /* no-op. */
+}
+
+static
+void *static_realloc(void *ptr, size_t size)
+{
+ size_t *old_size = NULL;
+ void *retval;
+
+ if (size == 0) {
+ retval = NULL;
+ goto end;
+ }
+
+ if (ptr) {
+ old_size = (size_t *) ptr - 1;
+ if (size <= *old_size) {
+ /* We can re-use the old entry. */
+ *old_size = size;
+ retval = ptr;
+ goto end;
+ }
+ }
+ /* We need to expand. Don't free previous memory location. */
+ retval = static_calloc_aligned(1, size, 1);
+ assert(retval);
+ if (ptr)
+ memcpy(retval, ptr, *old_size);
+end:
+ return retval;
+}
+
+static
+void *static_memalign(size_t alignment, size_t size)
+{
+ void *retval;
+
+ retval = static_calloc_aligned(1, size, alignment);
+ return retval;
+}
+
+static
+int static_posix_memalign(void **memptr, size_t alignment, size_t size)
+{
+ void *ptr;
+
+ /* Check for power of 2, larger than void *. */
+ if (alignment & (alignment - 1)
+ || alignment < sizeof(void *)
+ || alignment == 0) {
+ goto end;
+ }
+ ptr = static_calloc_aligned(1, size, alignment);
+ *memptr = ptr;
+end:
+ return 0;
+}
+
+static
+void setup_static_allocator(void)
+{
+ assert(cur_alloc.calloc == NULL);
+ cur_alloc.calloc = static_calloc;
+ assert(cur_alloc.malloc == NULL);
+ cur_alloc.malloc = static_malloc;
+ assert(cur_alloc.free == NULL);
+ cur_alloc.free = static_free;
+ assert(cur_alloc.realloc == NULL);
+ cur_alloc.realloc = static_realloc;
+ assert(cur_alloc.memalign == NULL);
+ cur_alloc.memalign = static_memalign;
+ assert(cur_alloc.posix_memalign == NULL);
+ cur_alloc.posix_memalign = static_posix_memalign;
+}
+
+static
+void lookup_all_symbols(void)
+{
+ struct alloc_functions af;
+
+ /*
+ * Temporarily redirect allocation functions to
+ * static_calloc_aligned, and free function to static_free
+ * (no-op), until the dlsym lookup has completed.
+ */
+ setup_static_allocator();
+
+ /* Perform the actual lookups */
+ af.calloc = dlsym(RTLD_NEXT, "calloc");
+ af.malloc = dlsym(RTLD_NEXT, "malloc");
+ af.free = dlsym(RTLD_NEXT, "free");
+ af.realloc = dlsym(RTLD_NEXT, "realloc");
+ af.memalign = dlsym(RTLD_NEXT, "memalign");
+ af.posix_memalign = dlsym(RTLD_NEXT, "posix_memalign");
+
+ /* Populate the new allocator functions */
+ memcpy(&cur_alloc, &af, sizeof(cur_alloc));
+}
+
+void *malloc(size_t size)
+{
+ void *retval;
+
+ URCU_TLS(malloc_nesting)++;
+ if (cur_alloc.malloc == NULL) {
+ lookup_all_symbols();
+ if (cur_alloc.malloc == NULL) {
+ fprintf(stderr, "mallocwrap: unable to find malloc\n");
+ abort();
+ }
+ }
+ retval = cur_alloc.malloc(size);
+ if (URCU_TLS(malloc_nesting) == 1) {
+ tracepoint(lttng_ust_libc, malloc,
+ size, retval, LTTNG_UST_CALLER_IP());
+ }
+ URCU_TLS(malloc_nesting)--;
+ return retval;
+}
+
+void free(void *ptr)
+{
+ URCU_TLS(malloc_nesting)++;
+ /*
+ * Check whether the memory was allocated with
+ * static_calloc_align, in which case there is nothing to free.
+ */
+ if (caa_unlikely((char *)ptr >= static_calloc_buf &&
+ (char *)ptr < static_calloc_buf + STATIC_CALLOC_LEN)) {
+ goto end;
+ }
+
+ if (URCU_TLS(malloc_nesting) == 1) {
+ tracepoint(lttng_ust_libc, free,
+ ptr, LTTNG_UST_CALLER_IP());
+ }
+
+ if (cur_alloc.free == NULL) {
+ lookup_all_symbols();
+ if (cur_alloc.free == NULL) {
+ fprintf(stderr, "mallocwrap: unable to find free\n");
+ abort();
+ }
+ }
+ cur_alloc.free(ptr);
+end:
+ URCU_TLS(malloc_nesting)--;
+}
+
+void *calloc(size_t nmemb, size_t size)
+{
+ void *retval;
+
+ URCU_TLS(malloc_nesting)++;
+ if (cur_alloc.calloc == NULL) {
+ lookup_all_symbols();
+ if (cur_alloc.calloc == NULL) {
+ fprintf(stderr, "callocwrap: unable to find calloc\n");
+ abort();
+ }
+ }
+ retval = cur_alloc.calloc(nmemb, size);
+ if (URCU_TLS(malloc_nesting) == 1) {
+ tracepoint(lttng_ust_libc, calloc,
+ nmemb, size, retval, LTTNG_UST_CALLER_IP());
+ }
+ URCU_TLS(malloc_nesting)--;
+ return retval;
+}
+
+void *realloc(void *ptr, size_t size)
+{
+ void *retval;
+
+ URCU_TLS(malloc_nesting)++;
+ /*
+ * Check whether the memory was allocated with
+ * static_calloc_align, in which case there is nothing
+ * to free, and we need to copy the old data.
+ */
+ if (caa_unlikely((char *)ptr >= static_calloc_buf &&
+ (char *)ptr < static_calloc_buf + STATIC_CALLOC_LEN)) {
+ size_t *old_size;
+
+ old_size = (size_t *) ptr - 1;
+ if (cur_alloc.calloc == NULL) {
+ lookup_all_symbols();
+ if (cur_alloc.calloc == NULL) {
+ fprintf(stderr, "reallocwrap: unable to find calloc\n");
+ abort();
+ }
+ }
+ retval = cur_alloc.calloc(1, size);
+ if (retval) {
+ memcpy(retval, ptr, *old_size);
+ }
+ /*
+ * Mimick that a NULL pointer has been received, so
+ * memory allocation analysis based on the trace don't
+ * get confused by the address from the static
+ * allocator.
+ */
+ ptr = NULL;
+ goto end;
+ }
+
+ if (cur_alloc.realloc == NULL) {
+ lookup_all_symbols();
+ if (cur_alloc.realloc == NULL) {
+ fprintf(stderr, "reallocwrap: unable to find realloc\n");
+ abort();
+ }
+ }
+ retval = cur_alloc.realloc(ptr, size);
+end:
+ if (URCU_TLS(malloc_nesting) == 1) {
+ tracepoint(lttng_ust_libc, realloc,
+ ptr, size, retval, LTTNG_UST_CALLER_IP());
+ }
+ URCU_TLS(malloc_nesting)--;
+ return retval;
+}
+
+void *memalign(size_t alignment, size_t size)
+{
+ void *retval;
+
+ URCU_TLS(malloc_nesting)++;
+ if (cur_alloc.memalign == NULL) {
+ lookup_all_symbols();
+ if (cur_alloc.memalign == NULL) {
+ fprintf(stderr, "memalignwrap: unable to find memalign\n");
+ abort();
+ }
+ }
+ retval = cur_alloc.memalign(alignment, size);
+ if (URCU_TLS(malloc_nesting) == 1) {
+ tracepoint(lttng_ust_libc, memalign,
+ alignment, size, retval,
+ LTTNG_UST_CALLER_IP());
+ }
+ URCU_TLS(malloc_nesting)--;
+ return retval;
+}
+
+int posix_memalign(void **memptr, size_t alignment, size_t size)
+{
+ int retval;
+
+ URCU_TLS(malloc_nesting)++;
+ if (cur_alloc.posix_memalign == NULL) {
+ lookup_all_symbols();
+ if (cur_alloc.posix_memalign == NULL) {
+ fprintf(stderr, "posix_memalignwrap: unable to find posix_memalign\n");
+ abort();
+ }
+ }
+ retval = cur_alloc.posix_memalign(memptr, alignment, size);
+ if (URCU_TLS(malloc_nesting) == 1) {
+ tracepoint(lttng_ust_libc, posix_memalign,
+ *memptr, alignment, size,
+ retval, LTTNG_UST_CALLER_IP());
+ }
+ URCU_TLS(malloc_nesting)--;
+ return retval;
+}
+
+static
+void lttng_ust_fixup_malloc_nesting_tls(void)
+{
+ asm volatile ("" : : "m" (URCU_TLS(malloc_nesting)));
+}
+
+void lttng_ust_libc_wrapper_malloc_init(void)
+{
+ /* Initialization already done */
+ if (cur_alloc.calloc) {
+ return;
+ }
+ lttng_ust_fixup_malloc_nesting_tls();
+ /*
+ * Ensure the allocator is in place before the process becomes
+ * multithreaded.
+ */
+ lookup_all_symbols();
+}
--- /dev/null
+/*
+ * SPDX-License-Identifier: LGPL-2.1-or-later
+ *
+ * Copyright (C) 2013 Mentor Graphics
+ */
+
+/*
+ * Do _not_ define _LGPL_SOURCE because we don't want to create a
+ * circular dependency loop between this malloc wrapper, liburcu and
+ * libc.
+ */
+
+/* Has to be included first to override dlfcn.h */
+#include <common/compat/dlfcn.h>
+
+#include "common/macros.h"
+#include <pthread.h>
+
+#define TRACEPOINT_DEFINE
+#define TRACEPOINT_CREATE_PROBES
+#define TP_IP_PARAM ip
+#include "ust_pthread.h"
+
+static __thread int thread_in_trace;
+
+int pthread_mutex_lock(pthread_mutex_t *mutex)
+{
+ static int (*mutex_lock)(pthread_mutex_t *);
+ int retval;
+
+ if (!mutex_lock) {
+ mutex_lock = dlsym(RTLD_NEXT, "pthread_mutex_lock");
+ if (!mutex_lock) {
+ if (thread_in_trace) {
+ abort();
+ }
+ fprintf(stderr, "unable to initialize pthread wrapper library.\n");
+ return EINVAL;
+ }
+ }
+ if (thread_in_trace) {
+ return mutex_lock(mutex);
+ }
+
+ thread_in_trace = 1;
+ tracepoint(lttng_ust_pthread, pthread_mutex_lock_req, mutex,
+ LTTNG_UST_CALLER_IP());
+ retval = mutex_lock(mutex);
+ tracepoint(lttng_ust_pthread, pthread_mutex_lock_acq, mutex,
+ retval, LTTNG_UST_CALLER_IP());
+ thread_in_trace = 0;
+ return retval;
+}
+
+int pthread_mutex_trylock(pthread_mutex_t *mutex)
+{
+ static int (*mutex_trylock)(pthread_mutex_t *);
+ int retval;
+
+ if (!mutex_trylock) {
+ mutex_trylock = dlsym(RTLD_NEXT, "pthread_mutex_trylock");
+ if (!mutex_trylock) {
+ if (thread_in_trace) {
+ abort();
+ }
+ fprintf(stderr, "unable to initialize pthread wrapper library.\n");
+ return EINVAL;
+ }
+ }
+ if (thread_in_trace) {
+ return mutex_trylock(mutex);
+ }
+
+ thread_in_trace = 1;
+ retval = mutex_trylock(mutex);
+ tracepoint(lttng_ust_pthread, pthread_mutex_trylock, mutex,
+ retval, LTTNG_UST_CALLER_IP());
+ thread_in_trace = 0;
+ return retval;
+}
+
+int pthread_mutex_unlock(pthread_mutex_t *mutex)
+{
+ static int (*mutex_unlock)(pthread_mutex_t *);
+ int retval;
+
+ if (!mutex_unlock) {
+ mutex_unlock = dlsym(RTLD_NEXT, "pthread_mutex_unlock");
+ if (!mutex_unlock) {
+ if (thread_in_trace) {
+ abort();
+ }
+ fprintf(stderr, "unable to initialize pthread wrapper library.\n");
+ return EINVAL;
+ }
+ }
+ if (thread_in_trace) {
+ return mutex_unlock(mutex);
+ }
+
+ thread_in_trace = 1;
+ retval = mutex_unlock(mutex);
+ tracepoint(lttng_ust_pthread, pthread_mutex_unlock, mutex,
+ retval, LTTNG_UST_CALLER_IP());
+ thread_in_trace = 0;
+ return retval;
+}
--- /dev/null
+#!/bin/sh
+#
+# SPDX-License-Identifier: LGPL-2.1-only
+
+LD_VERBOSE=1 LD_PRELOAD=.libs/liblttng-ust-libc-wrapper.so ${*}
--- /dev/null
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright (C) 2011 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ */
+
+#undef TRACEPOINT_PROVIDER
+#define TRACEPOINT_PROVIDER lttng_ust_libc
+
+#if !defined(_TRACEPOINT_UST_LIBC_H) || defined(TRACEPOINT_HEADER_MULTI_READ)
+#define _TRACEPOINT_UST_LIBC_H
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <lttng/tracepoint.h>
+
+TRACEPOINT_EVENT(lttng_ust_libc, malloc,
+ TP_ARGS(size_t, size, void *, ptr, void *, ip),
+ TP_FIELDS(
+ ctf_integer(size_t, size, size)
+ ctf_integer_hex(void *, ptr, ptr)
+ ctf_unused(ip)
+ )
+)
+
+TRACEPOINT_EVENT(lttng_ust_libc, free,
+ TP_ARGS(void *, ptr, void *, ip),
+ TP_FIELDS(
+ ctf_integer_hex(void *, ptr, ptr)
+ ctf_unused(ip)
+ )
+)
+
+TRACEPOINT_EVENT(lttng_ust_libc, calloc,
+ TP_ARGS(size_t, nmemb, size_t, size, void *, ptr, void *, ip),
+ TP_FIELDS(
+ ctf_integer(size_t, nmemb, nmemb)
+ ctf_integer(size_t, size, size)
+ ctf_integer_hex(void *, ptr, ptr)
+ ctf_unused(ip)
+ )
+)
+
+TRACEPOINT_EVENT(lttng_ust_libc, realloc,
+ TP_ARGS(void *, in_ptr, size_t, size, void *, ptr, void *, ip),
+ TP_FIELDS(
+ ctf_integer_hex(void *, in_ptr, in_ptr)
+ ctf_integer(size_t, size, size)
+ ctf_integer_hex(void *, ptr, ptr)
+ ctf_unused(ip)
+ )
+)
+
+TRACEPOINT_EVENT(lttng_ust_libc, memalign,
+ TP_ARGS(size_t, alignment, size_t, size, void *, ptr, void *, ip),
+ TP_FIELDS(
+ ctf_integer(size_t, alignment, alignment)
+ ctf_integer(size_t, size, size)
+ ctf_integer_hex(void *, ptr, ptr)
+ ctf_unused(ip)
+ )
+)
+
+TRACEPOINT_EVENT(lttng_ust_libc, posix_memalign,
+ TP_ARGS(void *, out_ptr, size_t, alignment, size_t, size, int, result, void *, ip),
+ TP_FIELDS(
+ ctf_integer_hex(void *, out_ptr, out_ptr)
+ ctf_integer(size_t, alignment, alignment)
+ ctf_integer(size_t, size, size)
+ ctf_integer(int, result, result)
+ ctf_unused(ip)
+ )
+)
+
+#endif /* _TRACEPOINT_UST_LIBC_H */
+
+#undef TRACEPOINT_INCLUDE
+#define TRACEPOINT_INCLUDE "./ust_libc.h"
+
+/* This part must be outside ifdef protection */
+#include <lttng/tracepoint-event.h>
+
+#ifdef __cplusplus
+}
+#endif
--- /dev/null
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright (C) 2013 Mentor Graphics
+ */
+
+#undef TRACEPOINT_PROVIDER
+#define TRACEPOINT_PROVIDER lttng_ust_pthread
+
+#if !defined(_TRACEPOINT_UST_PTHREAD_H) || defined(TRACEPOINT_HEADER_MULTI_READ)
+#define _TRACEPOINT_UST_PTHREAD_H
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <lttng/tracepoint.h>
+
+TRACEPOINT_EVENT(lttng_ust_pthread, pthread_mutex_lock_req,
+ TP_ARGS(pthread_mutex_t *, mutex, void *, ip),
+ TP_FIELDS(
+ ctf_integer_hex(void *, mutex, mutex)
+ ctf_unused(ip)
+ )
+)
+
+TRACEPOINT_EVENT(lttng_ust_pthread, pthread_mutex_lock_acq,
+ TP_ARGS(pthread_mutex_t *, mutex, int, status, void *, ip),
+ TP_FIELDS(
+ ctf_integer_hex(void *, mutex, mutex)
+ ctf_integer(int, status, status)
+ ctf_unused(ip)
+ )
+)
+
+TRACEPOINT_EVENT(lttng_ust_pthread, pthread_mutex_trylock,
+ TP_ARGS(pthread_mutex_t *, mutex, int, status, void *, ip),
+ TP_FIELDS(
+ ctf_integer_hex(void *, mutex, mutex)
+ ctf_integer(int, status, status)
+ ctf_unused(ip)
+ )
+)
+
+TRACEPOINT_EVENT(lttng_ust_pthread, pthread_mutex_unlock,
+ TP_ARGS(pthread_mutex_t *, mutex, int, status, void *, ip),
+ TP_FIELDS(
+ ctf_integer_hex(void *, mutex, mutex)
+ ctf_integer(int, status, status)
+ ctf_unused(ip)
+ )
+)
+
+#endif /* _TRACEPOINT_UST_PTHREAD_H */
+
+#undef TRACEPOINT_INCLUDE
+#define TRACEPOINT_INCLUDE "./ust_pthread.h"
+
+/* This part must be outside ifdef protection */
+#include <lttng/tracepoint-event.h>
+
+#ifdef __cplusplus
+}
+#endif
+++ /dev/null
-# SPDX-License-Identifier: LGPL-2.1-only
-
-AM_CFLAGS += -I$(srcdir) -fno-strict-aliasing
-
-lib_LTLIBRARIES = liblttng-ust-libc-wrapper.la \
- liblttng-ust-pthread-wrapper.la
-
-liblttng_ust_libc_wrapper_la_SOURCES = \
- lttng-ust-malloc.c \
- ust_libc.h
-
-liblttng_ust_libc_wrapper_la_LIBADD = \
- $(top_builddir)/src/liblttng-ust/liblttng-ust.la \
- $(DL_LIBS)
-
-liblttng_ust_libc_wrapper_la_LDFLAGS = -version-info $(LTTNG_UST_LIBRARY_VERSION)
-
-liblttng_ust_pthread_wrapper_la_SOURCES = \
- lttng-ust-pthread.c \
- ust_pthread.h
-
-liblttng_ust_pthread_wrapper_la_LIBADD = \
- $(top_builddir)/src/liblttng-ust/liblttng-ust.la \
- $(DL_LIBS)
-
-liblttng_ust_pthread_wrapper_la_LDFLAGS = -version-info $(LTTNG_UST_LIBRARY_VERSION)
-
-dist_noinst_SCRIPTS = run
-EXTRA_DIST = README
+++ /dev/null
-liblttng-ust-libc is used for instrumenting some calls to libc in a
-program, without need for recompiling it.
-
-This library defines a malloc() function that is instrumented with a
-tracepoint. It also calls the libc malloc afterwards. When loaded with
-LD_PRELOAD, it replaces the libc malloc() function, in effect
-instrumenting all calls to malloc(). The same is performed for free().
-
-See the "run" script for a usage example.
+++ /dev/null
-/*
- * SPDX-License-Identifier: LGPL-2.1-or-later
- *
- * Copyright (C) 2009 Pierre-Marc Fournier
- * Copyright (C) 2011 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
- */
-
-/*
- * Do _not_ define _LGPL_SOURCE because we don't want to create a
- * circular dependency loop between this malloc wrapper, liburcu and
- * libc.
- */
-
-/* Has to be included first to override dlfcn.h */
-#include <common/compat/dlfcn.h>
-
-#include <sys/types.h>
-#include <stdio.h>
-#include <assert.h>
-#include <malloc.h>
-
-#include <urcu/system.h>
-#include <urcu/uatomic.h>
-#include <urcu/compiler.h>
-#include <urcu/tls-compat.h>
-#include <urcu/arch.h>
-
-#include <lttng/ust-libc-wrapper.h>
-
-#include "common/macros.h"
-#include "common/align.h"
-
-#define TRACEPOINT_DEFINE
-#define TRACEPOINT_CREATE_PROBES
-#define TP_IP_PARAM ip
-#include "ust_libc.h"
-
-#define STATIC_CALLOC_LEN 4096
-static char static_calloc_buf[STATIC_CALLOC_LEN];
-static unsigned long static_calloc_buf_offset;
-
-struct alloc_functions {
- void *(*calloc)(size_t nmemb, size_t size);
- void *(*malloc)(size_t size);
- void (*free)(void *ptr);
- void *(*realloc)(void *ptr, size_t size);
- void *(*memalign)(size_t alignment, size_t size);
- int (*posix_memalign)(void **memptr, size_t alignment, size_t size);
-};
-
-static
-struct alloc_functions cur_alloc;
-
-/*
- * Make sure our own use of the LTS compat layer will not cause infinite
- * recursion by calling calloc.
- */
-
-static
-void *static_calloc(size_t nmemb, size_t size);
-
-/*
- * pthread mutex replacement for URCU tls compat layer.
- */
-static int ust_malloc_lock;
-
-static
-void ust_malloc_spin_lock(pthread_mutex_t *lock)
- __attribute__((unused));
-static
-void ust_malloc_spin_lock(pthread_mutex_t *lock __attribute__((unused)))
-{
- /*
- * The memory barrier within cmpxchg takes care of ordering
- * memory accesses with respect to the start of the critical
- * section.
- */
- while (uatomic_cmpxchg(&ust_malloc_lock, 0, 1) != 0)
- caa_cpu_relax();
-}
-
-static
-void ust_malloc_spin_unlock(pthread_mutex_t *lock)
- __attribute__((unused));
-static
-void ust_malloc_spin_unlock(pthread_mutex_t *lock __attribute__((unused)))
-{
- /*
- * Ensure memory accesses within the critical section do not
- * leak outside.
- */
- cmm_smp_mb();
- uatomic_set(&ust_malloc_lock, 0);
-}
-
-#define calloc static_calloc
-#define pthread_mutex_lock ust_malloc_spin_lock
-#define pthread_mutex_unlock ust_malloc_spin_unlock
-static DEFINE_URCU_TLS(int, malloc_nesting);
-#undef pthread_mutex_unlock
-#undef pthread_mutex_lock
-#undef calloc
-
-/*
- * Static allocator to use when initially executing dlsym(). It keeps a
- * size_t value of each object size prior to the object.
- */
-static
-void *static_calloc_aligned(size_t nmemb, size_t size, size_t alignment)
-{
- size_t prev_offset, new_offset, res_offset, aligned_offset;
-
- if (nmemb * size == 0) {
- return NULL;
- }
-
- /*
- * Protect static_calloc_buf_offset from concurrent updates
- * using a cmpxchg loop rather than a mutex to remove a
- * dependency on pthread. This will minimize the risk of bad
- * interaction between mutex and malloc instrumentation.
- */
- res_offset = CMM_LOAD_SHARED(static_calloc_buf_offset);
- do {
- prev_offset = res_offset;
- aligned_offset = LTTNG_UST_ALIGN(prev_offset + sizeof(size_t), alignment);
- new_offset = aligned_offset + nmemb * size;
- if (new_offset > sizeof(static_calloc_buf)) {
- abort();
- }
- } while ((res_offset = uatomic_cmpxchg(&static_calloc_buf_offset,
- prev_offset, new_offset)) != prev_offset);
- *(size_t *) &static_calloc_buf[aligned_offset - sizeof(size_t)] = size;
- return &static_calloc_buf[aligned_offset];
-}
-
-static
-void *static_calloc(size_t nmemb, size_t size)
-{
- void *retval;
-
- retval = static_calloc_aligned(nmemb, size, 1);
- return retval;
-}
-
-static
-void *static_malloc(size_t size)
-{
- void *retval;
-
- retval = static_calloc_aligned(1, size, 1);
- return retval;
-}
-
-static
-void static_free(void *ptr __attribute__((unused)))
-{
- /* no-op. */
-}
-
-static
-void *static_realloc(void *ptr, size_t size)
-{
- size_t *old_size = NULL;
- void *retval;
-
- if (size == 0) {
- retval = NULL;
- goto end;
- }
-
- if (ptr) {
- old_size = (size_t *) ptr - 1;
- if (size <= *old_size) {
- /* We can re-use the old entry. */
- *old_size = size;
- retval = ptr;
- goto end;
- }
- }
- /* We need to expand. Don't free previous memory location. */
- retval = static_calloc_aligned(1, size, 1);
- assert(retval);
- if (ptr)
- memcpy(retval, ptr, *old_size);
-end:
- return retval;
-}
-
-static
-void *static_memalign(size_t alignment, size_t size)
-{
- void *retval;
-
- retval = static_calloc_aligned(1, size, alignment);
- return retval;
-}
-
-static
-int static_posix_memalign(void **memptr, size_t alignment, size_t size)
-{
- void *ptr;
-
- /* Check for power of 2, larger than void *. */
- if (alignment & (alignment - 1)
- || alignment < sizeof(void *)
- || alignment == 0) {
- goto end;
- }
- ptr = static_calloc_aligned(1, size, alignment);
- *memptr = ptr;
-end:
- return 0;
-}
-
-static
-void setup_static_allocator(void)
-{
- assert(cur_alloc.calloc == NULL);
- cur_alloc.calloc = static_calloc;
- assert(cur_alloc.malloc == NULL);
- cur_alloc.malloc = static_malloc;
- assert(cur_alloc.free == NULL);
- cur_alloc.free = static_free;
- assert(cur_alloc.realloc == NULL);
- cur_alloc.realloc = static_realloc;
- assert(cur_alloc.memalign == NULL);
- cur_alloc.memalign = static_memalign;
- assert(cur_alloc.posix_memalign == NULL);
- cur_alloc.posix_memalign = static_posix_memalign;
-}
-
-static
-void lookup_all_symbols(void)
-{
- struct alloc_functions af;
-
- /*
- * Temporarily redirect allocation functions to
- * static_calloc_aligned, and free function to static_free
- * (no-op), until the dlsym lookup has completed.
- */
- setup_static_allocator();
-
- /* Perform the actual lookups */
- af.calloc = dlsym(RTLD_NEXT, "calloc");
- af.malloc = dlsym(RTLD_NEXT, "malloc");
- af.free = dlsym(RTLD_NEXT, "free");
- af.realloc = dlsym(RTLD_NEXT, "realloc");
- af.memalign = dlsym(RTLD_NEXT, "memalign");
- af.posix_memalign = dlsym(RTLD_NEXT, "posix_memalign");
-
- /* Populate the new allocator functions */
- memcpy(&cur_alloc, &af, sizeof(cur_alloc));
-}
-
-void *malloc(size_t size)
-{
- void *retval;
-
- URCU_TLS(malloc_nesting)++;
- if (cur_alloc.malloc == NULL) {
- lookup_all_symbols();
- if (cur_alloc.malloc == NULL) {
- fprintf(stderr, "mallocwrap: unable to find malloc\n");
- abort();
- }
- }
- retval = cur_alloc.malloc(size);
- if (URCU_TLS(malloc_nesting) == 1) {
- tracepoint(lttng_ust_libc, malloc,
- size, retval, LTTNG_UST_CALLER_IP());
- }
- URCU_TLS(malloc_nesting)--;
- return retval;
-}
-
-void free(void *ptr)
-{
- URCU_TLS(malloc_nesting)++;
- /*
- * Check whether the memory was allocated with
- * static_calloc_align, in which case there is nothing to free.
- */
- if (caa_unlikely((char *)ptr >= static_calloc_buf &&
- (char *)ptr < static_calloc_buf + STATIC_CALLOC_LEN)) {
- goto end;
- }
-
- if (URCU_TLS(malloc_nesting) == 1) {
- tracepoint(lttng_ust_libc, free,
- ptr, LTTNG_UST_CALLER_IP());
- }
-
- if (cur_alloc.free == NULL) {
- lookup_all_symbols();
- if (cur_alloc.free == NULL) {
- fprintf(stderr, "mallocwrap: unable to find free\n");
- abort();
- }
- }
- cur_alloc.free(ptr);
-end:
- URCU_TLS(malloc_nesting)--;
-}
-
-void *calloc(size_t nmemb, size_t size)
-{
- void *retval;
-
- URCU_TLS(malloc_nesting)++;
- if (cur_alloc.calloc == NULL) {
- lookup_all_symbols();
- if (cur_alloc.calloc == NULL) {
- fprintf(stderr, "callocwrap: unable to find calloc\n");
- abort();
- }
- }
- retval = cur_alloc.calloc(nmemb, size);
- if (URCU_TLS(malloc_nesting) == 1) {
- tracepoint(lttng_ust_libc, calloc,
- nmemb, size, retval, LTTNG_UST_CALLER_IP());
- }
- URCU_TLS(malloc_nesting)--;
- return retval;
-}
-
-void *realloc(void *ptr, size_t size)
-{
- void *retval;
-
- URCU_TLS(malloc_nesting)++;
- /*
- * Check whether the memory was allocated with
- * static_calloc_align, in which case there is nothing
- * to free, and we need to copy the old data.
- */
- if (caa_unlikely((char *)ptr >= static_calloc_buf &&
- (char *)ptr < static_calloc_buf + STATIC_CALLOC_LEN)) {
- size_t *old_size;
-
- old_size = (size_t *) ptr - 1;
- if (cur_alloc.calloc == NULL) {
- lookup_all_symbols();
- if (cur_alloc.calloc == NULL) {
- fprintf(stderr, "reallocwrap: unable to find calloc\n");
- abort();
- }
- }
- retval = cur_alloc.calloc(1, size);
- if (retval) {
- memcpy(retval, ptr, *old_size);
- }
- /*
- * Mimick that a NULL pointer has been received, so
- * memory allocation analysis based on the trace don't
- * get confused by the address from the static
- * allocator.
- */
- ptr = NULL;
- goto end;
- }
-
- if (cur_alloc.realloc == NULL) {
- lookup_all_symbols();
- if (cur_alloc.realloc == NULL) {
- fprintf(stderr, "reallocwrap: unable to find realloc\n");
- abort();
- }
- }
- retval = cur_alloc.realloc(ptr, size);
-end:
- if (URCU_TLS(malloc_nesting) == 1) {
- tracepoint(lttng_ust_libc, realloc,
- ptr, size, retval, LTTNG_UST_CALLER_IP());
- }
- URCU_TLS(malloc_nesting)--;
- return retval;
-}
-
-void *memalign(size_t alignment, size_t size)
-{
- void *retval;
-
- URCU_TLS(malloc_nesting)++;
- if (cur_alloc.memalign == NULL) {
- lookup_all_symbols();
- if (cur_alloc.memalign == NULL) {
- fprintf(stderr, "memalignwrap: unable to find memalign\n");
- abort();
- }
- }
- retval = cur_alloc.memalign(alignment, size);
- if (URCU_TLS(malloc_nesting) == 1) {
- tracepoint(lttng_ust_libc, memalign,
- alignment, size, retval,
- LTTNG_UST_CALLER_IP());
- }
- URCU_TLS(malloc_nesting)--;
- return retval;
-}
-
-int posix_memalign(void **memptr, size_t alignment, size_t size)
-{
- int retval;
-
- URCU_TLS(malloc_nesting)++;
- if (cur_alloc.posix_memalign == NULL) {
- lookup_all_symbols();
- if (cur_alloc.posix_memalign == NULL) {
- fprintf(stderr, "posix_memalignwrap: unable to find posix_memalign\n");
- abort();
- }
- }
- retval = cur_alloc.posix_memalign(memptr, alignment, size);
- if (URCU_TLS(malloc_nesting) == 1) {
- tracepoint(lttng_ust_libc, posix_memalign,
- *memptr, alignment, size,
- retval, LTTNG_UST_CALLER_IP());
- }
- URCU_TLS(malloc_nesting)--;
- return retval;
-}
-
-static
-void lttng_ust_fixup_malloc_nesting_tls(void)
-{
- asm volatile ("" : : "m" (URCU_TLS(malloc_nesting)));
-}
-
-void lttng_ust_libc_wrapper_malloc_init(void)
-{
- /* Initialization already done */
- if (cur_alloc.calloc) {
- return;
- }
- lttng_ust_fixup_malloc_nesting_tls();
- /*
- * Ensure the allocator is in place before the process becomes
- * multithreaded.
- */
- lookup_all_symbols();
-}
+++ /dev/null
-/*
- * SPDX-License-Identifier: LGPL-2.1-or-later
- *
- * Copyright (C) 2013 Mentor Graphics
- */
-
-/*
- * Do _not_ define _LGPL_SOURCE because we don't want to create a
- * circular dependency loop between this malloc wrapper, liburcu and
- * libc.
- */
-
-/* Has to be included first to override dlfcn.h */
-#include <common/compat/dlfcn.h>
-
-#include "common/macros.h"
-#include <pthread.h>
-
-#define TRACEPOINT_DEFINE
-#define TRACEPOINT_CREATE_PROBES
-#define TP_IP_PARAM ip
-#include "ust_pthread.h"
-
-static __thread int thread_in_trace;
-
-int pthread_mutex_lock(pthread_mutex_t *mutex)
-{
- static int (*mutex_lock)(pthread_mutex_t *);
- int retval;
-
- if (!mutex_lock) {
- mutex_lock = dlsym(RTLD_NEXT, "pthread_mutex_lock");
- if (!mutex_lock) {
- if (thread_in_trace) {
- abort();
- }
- fprintf(stderr, "unable to initialize pthread wrapper library.\n");
- return EINVAL;
- }
- }
- if (thread_in_trace) {
- return mutex_lock(mutex);
- }
-
- thread_in_trace = 1;
- tracepoint(lttng_ust_pthread, pthread_mutex_lock_req, mutex,
- LTTNG_UST_CALLER_IP());
- retval = mutex_lock(mutex);
- tracepoint(lttng_ust_pthread, pthread_mutex_lock_acq, mutex,
- retval, LTTNG_UST_CALLER_IP());
- thread_in_trace = 0;
- return retval;
-}
-
-int pthread_mutex_trylock(pthread_mutex_t *mutex)
-{
- static int (*mutex_trylock)(pthread_mutex_t *);
- int retval;
-
- if (!mutex_trylock) {
- mutex_trylock = dlsym(RTLD_NEXT, "pthread_mutex_trylock");
- if (!mutex_trylock) {
- if (thread_in_trace) {
- abort();
- }
- fprintf(stderr, "unable to initialize pthread wrapper library.\n");
- return EINVAL;
- }
- }
- if (thread_in_trace) {
- return mutex_trylock(mutex);
- }
-
- thread_in_trace = 1;
- retval = mutex_trylock(mutex);
- tracepoint(lttng_ust_pthread, pthread_mutex_trylock, mutex,
- retval, LTTNG_UST_CALLER_IP());
- thread_in_trace = 0;
- return retval;
-}
-
-int pthread_mutex_unlock(pthread_mutex_t *mutex)
-{
- static int (*mutex_unlock)(pthread_mutex_t *);
- int retval;
-
- if (!mutex_unlock) {
- mutex_unlock = dlsym(RTLD_NEXT, "pthread_mutex_unlock");
- if (!mutex_unlock) {
- if (thread_in_trace) {
- abort();
- }
- fprintf(stderr, "unable to initialize pthread wrapper library.\n");
- return EINVAL;
- }
- }
- if (thread_in_trace) {
- return mutex_unlock(mutex);
- }
-
- thread_in_trace = 1;
- retval = mutex_unlock(mutex);
- tracepoint(lttng_ust_pthread, pthread_mutex_unlock, mutex,
- retval, LTTNG_UST_CALLER_IP());
- thread_in_trace = 0;
- return retval;
-}
+++ /dev/null
-#!/bin/sh
-#
-# SPDX-License-Identifier: LGPL-2.1-only
-
-LD_VERBOSE=1 LD_PRELOAD=.libs/liblttng-ust-libc-wrapper.so ${*}
+++ /dev/null
-/*
- * SPDX-License-Identifier: MIT
- *
- * Copyright (C) 2011 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
- */
-
-#undef TRACEPOINT_PROVIDER
-#define TRACEPOINT_PROVIDER lttng_ust_libc
-
-#if !defined(_TRACEPOINT_UST_LIBC_H) || defined(TRACEPOINT_HEADER_MULTI_READ)
-#define _TRACEPOINT_UST_LIBC_H
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-#include <lttng/tracepoint.h>
-
-TRACEPOINT_EVENT(lttng_ust_libc, malloc,
- TP_ARGS(size_t, size, void *, ptr, void *, ip),
- TP_FIELDS(
- ctf_integer(size_t, size, size)
- ctf_integer_hex(void *, ptr, ptr)
- ctf_unused(ip)
- )
-)
-
-TRACEPOINT_EVENT(lttng_ust_libc, free,
- TP_ARGS(void *, ptr, void *, ip),
- TP_FIELDS(
- ctf_integer_hex(void *, ptr, ptr)
- ctf_unused(ip)
- )
-)
-
-TRACEPOINT_EVENT(lttng_ust_libc, calloc,
- TP_ARGS(size_t, nmemb, size_t, size, void *, ptr, void *, ip),
- TP_FIELDS(
- ctf_integer(size_t, nmemb, nmemb)
- ctf_integer(size_t, size, size)
- ctf_integer_hex(void *, ptr, ptr)
- ctf_unused(ip)
- )
-)
-
-TRACEPOINT_EVENT(lttng_ust_libc, realloc,
- TP_ARGS(void *, in_ptr, size_t, size, void *, ptr, void *, ip),
- TP_FIELDS(
- ctf_integer_hex(void *, in_ptr, in_ptr)
- ctf_integer(size_t, size, size)
- ctf_integer_hex(void *, ptr, ptr)
- ctf_unused(ip)
- )
-)
-
-TRACEPOINT_EVENT(lttng_ust_libc, memalign,
- TP_ARGS(size_t, alignment, size_t, size, void *, ptr, void *, ip),
- TP_FIELDS(
- ctf_integer(size_t, alignment, alignment)
- ctf_integer(size_t, size, size)
- ctf_integer_hex(void *, ptr, ptr)
- ctf_unused(ip)
- )
-)
-
-TRACEPOINT_EVENT(lttng_ust_libc, posix_memalign,
- TP_ARGS(void *, out_ptr, size_t, alignment, size_t, size, int, result, void *, ip),
- TP_FIELDS(
- ctf_integer_hex(void *, out_ptr, out_ptr)
- ctf_integer(size_t, alignment, alignment)
- ctf_integer(size_t, size, size)
- ctf_integer(int, result, result)
- ctf_unused(ip)
- )
-)
-
-#endif /* _TRACEPOINT_UST_LIBC_H */
-
-#undef TRACEPOINT_INCLUDE
-#define TRACEPOINT_INCLUDE "./ust_libc.h"
-
-/* This part must be outside ifdef protection */
-#include <lttng/tracepoint-event.h>
-
-#ifdef __cplusplus
-}
-#endif
+++ /dev/null
-/*
- * SPDX-License-Identifier: MIT
- *
- * Copyright (C) 2013 Mentor Graphics
- */
-
-#undef TRACEPOINT_PROVIDER
-#define TRACEPOINT_PROVIDER lttng_ust_pthread
-
-#if !defined(_TRACEPOINT_UST_PTHREAD_H) || defined(TRACEPOINT_HEADER_MULTI_READ)
-#define _TRACEPOINT_UST_PTHREAD_H
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-#include <lttng/tracepoint.h>
-
-TRACEPOINT_EVENT(lttng_ust_pthread, pthread_mutex_lock_req,
- TP_ARGS(pthread_mutex_t *, mutex, void *, ip),
- TP_FIELDS(
- ctf_integer_hex(void *, mutex, mutex)
- ctf_unused(ip)
- )
-)
-
-TRACEPOINT_EVENT(lttng_ust_pthread, pthread_mutex_lock_acq,
- TP_ARGS(pthread_mutex_t *, mutex, int, status, void *, ip),
- TP_FIELDS(
- ctf_integer_hex(void *, mutex, mutex)
- ctf_integer(int, status, status)
- ctf_unused(ip)
- )
-)
-
-TRACEPOINT_EVENT(lttng_ust_pthread, pthread_mutex_trylock,
- TP_ARGS(pthread_mutex_t *, mutex, int, status, void *, ip),
- TP_FIELDS(
- ctf_integer_hex(void *, mutex, mutex)
- ctf_integer(int, status, status)
- ctf_unused(ip)
- )
-)
-
-TRACEPOINT_EVENT(lttng_ust_pthread, pthread_mutex_unlock,
- TP_ARGS(pthread_mutex_t *, mutex, int, status, void *, ip),
- TP_FIELDS(
- ctf_integer_hex(void *, mutex, mutex)
- ctf_integer(int, status, status)
- ctf_unused(ip)
- )
-)
-
-#endif /* _TRACEPOINT_UST_PTHREAD_H */
-
-#undef TRACEPOINT_INCLUDE
-#define TRACEPOINT_INCLUDE "./ust_pthread.h"
-
-/* This part must be outside ifdef protection */
-#include <lttng/tracepoint-event.h>
-
-#ifdef __cplusplus
-}
-#endif