Missing pieces: Now libust needs to talk to the session daemon.
Signed-off-by: Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
# libust and '.' (that contains the linker script). However, '.'
# must be installed after libust so it can overwrite libust.so with
# the linker script.
-SUBDIRS = snprintf libringbuffer libust include doc
+SUBDIRS = snprintf libringbuffer libust include doc tests
#temporarily disabled
-# . tests libustinstr-malloc libustfork
+# . libustinstr-malloc libustfork
EXTRA_DIST = libust.ldscript.in libust-initializer.c libust-initializer.h
-/* Copyright (C) 2010 Pierre-Marc Fournier
+/*
+ * Copyright (C) 2010 Pierre-Marc Fournier
+ * Copyright (C) 2011 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
- * License as published by the Free Software Foundation; either
- * version 2.1 of the License, or (at your option) any later version.
+ * License as published by the Free Software Foundation; version 2.1 of
+ * the License.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
/* TRACE CLOCK */
-/* There are two types of clocks that can be used.
- - TSC based clock
- - gettimeofday() clock
-
- Microbenchmarks on Linux 2.6.30 on Core2 Duo 3GHz (functions are inlined):
- Calls (100000000) to tsc(): 4004035641 cycles or 40 cycles/call
- Calls (100000000) to gettimeofday(): 9723158352 cycles or 97 cycles/call
-
- For merging traces with the kernel, a time source compatible with that of
- the kernel is necessary.
-
- Instead of gettimeofday(), we are now using clock_gettime for better
- precision and monotonicity.
-*/
-
-/* Only available for x86 arch */
-#define CLOCK_TRACE_FREQ 14
-#define CLOCK_TRACE 15
-union lttng_timespec {
- struct timespec ts;
- uint64_t lttng_ts;
-};
-
-extern int ust_clock_source;
+/*
+ * Currently using the kernel MONOTONIC clock, waiting for kernel-side
+ * LTTng to implement mmap'd trace clock.
+ */
/* Choosing correct trace clock */
{
struct timespec ts;
uint64_t retval;
- union lttng_timespec *lts = (union lttng_timespec *) &ts;
- clock_gettime(ust_clock_source, &ts);
- /*
- * Clock source can change when loading the binary (tracectl.c)
- * so we must check if the clock source has changed before
- * returning the correct value
- */
- if (likely(ust_clock_source == CLOCK_TRACE)) {
- retval = lts->lttng_ts;
- } else { /* CLOCK_MONOTONIC */
- retval = ts.tv_sec;
- retval *= 1000000000;
- retval += ts.tv_nsec;
- }
+ clock_gettime(CLOCK_MONOTONIC, &ts);
+ retval = ts.tv_sec;
+ retval *= 1000000000;
+ retval += ts.tv_nsec;
return retval;
}
#if __i386__ || __x86_64__
static __inline__ uint64_t trace_clock_frequency(void)
-{
- struct timespec ts;
- union lttng_timespec *lts = (union lttng_timespec *) &ts;
-
- if (likely(ust_clock_source == CLOCK_TRACE)) {
- clock_gettime(CLOCK_TRACE_FREQ, &ts);
- return lts->lttng_ts;
- }
- return 1000000000LL;
-}
-#else /* #if __i386__ || __x86_64__ */
-static __inline__ uint64_t trace_clock_frequency(void)
{
return 1000000000LL;
}
int metadata_dumped:1;
};
+struct channel;
+
struct ltt_channel_ops {
- struct channel *(*channel_create)(const char *name,
+ struct shm_handle *(*channel_create)(const char *name,
struct ltt_channel *ltt_chan,
void *buf_addr,
size_t subbuf_size, size_t num_subbuf,
unsigned int switch_timer_interval,
- unsigned int read_timer_interval,
- int *shmid);
- void (*channel_destroy)(struct channel *chan);
+ unsigned int read_timer_interval);
+ void (*channel_destroy)(struct shm_handle *handle);
struct lib_ring_buffer *(*buffer_read_open)(struct channel *chan);
void (*buffer_read_close)(struct lib_ring_buffer *buf);
int (*event_reserve)(struct lib_ring_buffer_ctx *ctx,
struct cds_list_head list; /* Channel list */
struct ltt_channel_ops *ops;
int header_type; /* 0: unset, 1: compact, 2: large */
- int shmfd; /* shared-memory file descriptor */
+ struct shm_handle *handle; /* shared-memory handle */
int metadata_dumped:1;
};
* Dual LGPL v2.1/GPL v2 license.
*/
-/* Reset macros used within TRACE_EVENT to "nothing" */
+/* Reset macros used within TRACEPOINT_EVENT to "nothing" */
#undef ctf_integer_ext
#define ctf_integer_ext(_type, _item, _src, _byte_order, _base)
#undef TRACEPOINT_EVENT_CLASS
#define TRACEPOINT_EVENT_CLASS(_name, _proto, _args, _fields)
+#undef TRACEPOINT_EVENT_CLASS_NOARGS
+#define TRACEPOINT_EVENT_CLASS_NOARGS(_name, _fields)
+
#undef TRACEPOINT_EVENT_INSTANCE
#define TRACEPOINT_EVENT_INSTANCE(_template, _name, _proto, _args)
+
+#undef TRACEPOINT_EVENT_INSTANCE_NOARGS
+#define TRACEPOINT_EVENT_INSTANCE_NOARGS(_template, _name)
*/
#include <stdio.h>
+#include <urcu/compiler.h>
#include <ust/lttng-events.h>
+#include <ust/ringbuffer-config.h>
/*
* Macro declarations used for all stages.
TP_PARAMS(fields)) \
TRACEPOINT_EVENT_INSTANCE(name, name, TP_PARAMS(proto), TP_PARAMS(args))
+#undef TRACEPOINT_EVENT_NOARGS
+#define TRACEPOINT_EVENT_NOARGS(name, fields) \
+ TRACEPOINT_EVENT_CLASS_NOARGS(name, \
+ TP_PARAMS(fields)) \
+ TRACEPOINT_EVENT_INSTANCE_NOARGS(name, name)
+
+/* Helpers */
+#define _TP_ARRAY_SIZE(arr) (sizeof(arr) / sizeof((arr)[0]))
+
/*
* Stage 1 of the trace events.
*
* Each event produce an array of fields.
*/
-/* Reset all macros within TRACE_EVENT */
-#include "lttng-tracepoint-events-reset.h"
+/* Reset all macros within TRACEPOINT_EVENT */
+#include <ust/lttng-tracepoint-event-reset.h>
/* Named field types must be defined in lttng-types.h */
#undef TP_FIELDS
#define TP_FIELDS(args...) args /* Only one used in this phase */
-#undef TRACEPOINT_EVENT_CLASS
-#define TRACEPOINT_EVENT_CLASS(_name, _proto, _args, _fields) \
+#undef TRACEPOINT_EVENT_CLASS_NOARGS
+#define TRACEPOINT_EVENT_CLASS_NOARGS(_name, _fields) \
static const struct lttng_event_field __event_fields___##_name[] = { \
_fields \
};
-#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
+#undef TRACEPOINT_EVENT_CLASS
+#define TRACEPOINT_EVENT_CLASS(_name, _proto, _args, _fields) \
+ TRACEPOINT_EVENT_CLASS_NOARGS(_name, _fields)
+
+#include TRACEPOINT_INCLUDE(TRACEPOINT_INCLUDE_FILE)
/*
* Stage 2 of the trace events.
* Create probe callback prototypes.
*/
-/* Reset all macros within TRACE_EVENT */
-#include "lttng-tracepoint-events-reset.h"
+/* Reset all macros within TRACEPOINT_EVENT */
+#include <ust/lttng-tracepoint-event-reset.h>
#undef TP_PROTO
#define TP_PROTO(args...) args
#undef TRACEPOINT_EVENT_CLASS
-#define TRACEPOINT_EVENT_CLASS(_name, _proto, _args, _fields) \
+#define TRACEPOINT_EVENT_CLASS(_name, _proto, _args, _fields) \
static void __event_probe__##_name(void *__data, _proto);
-#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
+#undef TRACEPOINT_EVENT_CLASS_NOARGS
+#define TRACEPOINT_EVENT_CLASS_NOARGS(_name, _fields) \
+static void __event_probe__##_name(void *__data);
+
+#include TRACEPOINT_INCLUDE(TRACEPOINT_INCLUDE_FILE)
/*
* Stage 3 of the trace events.
/* Named field types must be defined in lttng-types.h */
-/* Reset all macros within TRACE_EVENT */
-#include "lttng-tracepoint-events-reset.h"
+/* Reset all macros within TRACEPOINT_EVENT */
+#include <ust/lttng-tracepoint-event-reset.h>
-#undef TRACEPOINT_EVENT_INSTANCE
-#define TRACEPOINT_EVENT_INSTANCE(_template, _name, _proto, _args) \
+#undef TRACEPOINT_EVENT_INSTANCE_NOARGS
+#define TRACEPOINT_EVENT_INSTANCE_NOARGS(_template, _name) \
{ \
.fields = __event_fields___##_template, \
.name = #_name, \
.probe_callback = (void *) &__event_probe__##_template,\
- .nr_fields = ARRAY_SIZE(__event_fields___##_template), \
+ .nr_fields = _TP_ARRAY_SIZE(__event_fields___##_template), \
},
+#undef TRACEPOINT_EVENT_INSTANCE
+#define TRACEPOINT_EVENT_INSTANCE(_template, _name, _proto, _args) \
+ TRACEPOINT_EVENT_INSTANCE_NOARGS(_template, _name)
+
#define TP_ID1(_token, _system) _token##_system
#define TP_ID(_token, _system) TP_ID1(_token, _system)
-static const struct lttng_event_desc TP_ID(__event_desc___, TRACE_SYSTEM)[] = {
-#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
+static const struct lttng_event_desc TP_ID(__event_desc___, TRACEPOINT_SYSTEM)[] = {
+#include TRACEPOINT_INCLUDE(TRACEPOINT_INCLUDE_FILE)
};
#undef TP_ID1
#define TP_ID(_token, _system) TP_ID1(_token, _system)
/* non-const because list head will be modified when registered. */
-static struct lttng_probe_desc TP_ID(__probe_desc___, TRACE_SYSTEM) = {
- .event_desc = TP_ID(__event_desc___, TRACE_SYSTEM),
- .nr_events = ARRAY_SIZE(TP_ID(__event_desc___, TRACE_SYSTEM)),
+static struct lttng_probe_desc TP_ID(__probe_desc___, TRACEPOINT_SYSTEM) = {
+ .event_desc = TP_ID(__event_desc___, TRACEPOINT_SYSTEM),
+ .nr_events = _TP_ARRAY_SIZE(TP_ID(__event_desc___, TRACEPOINT_SYSTEM)),
};
#undef TP_ID1
* Create static inline function that calculates event size.
*/
-/* Reset all macros within TRACE_EVENT */
-#include "lttng-tracepoint-events-reset.h"
+/* Reset all macros within TRACEPOINT_EVENT */
+#include <ust/lttng-tracepoint-event-reset.h>
/* Named field types must be defined in lttng-types.h */
return __event_len; \
}
-#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
+#undef TRACEPOINT_EVENT_CLASS_NOARGS
+#define TRACEPOINT_EVENT_CLASS_NOARGS(_name, _fields) \
+static inline size_t __event_get_size__##_name(size_t *__dynamic_len) \
+{ \
+ size_t __event_len = 0; \
+ unsigned int __dynamic_len_idx = 0; \
+ \
+ if (0) \
+ (void) __dynamic_len_idx; /* don't warn if unused */ \
+ _fields \
+ return __event_len; \
+}
+
+#include TRACEPOINT_INCLUDE(TRACEPOINT_INCLUDE_FILE)
/*
* Stage 6 of the trace events.
* Create static inline function that calculates event payload alignment.
*/
-/* Reset all macros within TRACE_EVENT */
-#include "lttng-tracepoint-events-reset.h"
+/* Reset all macros within TRACEPOINT_EVENT */
+#include <ust/lttng-tracepoint-event-reset.h>
/* Named field types must be defined in lttng-types.h */
return __event_align; \
}
-#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
+#undef TRACEPOINT_EVENT_CLASS_NOARGS
+#define TRACEPOINT_EVENT_CLASS_NOARGS(_name, _fields) \
+static inline size_t __event_get_align__##_name(void) \
+{ \
+ size_t __event_align = 1; \
+ _fields \
+ return __event_align; \
+}
+
+#include TRACEPOINT_INCLUDE(TRACEPOINT_INCLUDE_FILE)
/*
* execution order, jumping to the appropriate assignment block.
*/
-/* Reset all macros within TRACE_EVENT */
-#include "lttng-tracepoint-events-reset.h"
+/* Reset all macros within TRACEPOINT_EVENT */
+#include <ust/lttng-tracepoint-event-reset.h>
#undef ctf_integer_ext
#define ctf_integer_ext(_type, _item, _src, _byte_order, _base) \
struct lib_ring_buffer_ctx ctx; \
size_t __event_len, __event_align; \
size_t __dynamic_len_idx = 0; \
- size_t __dynamic_len[ARRAY_SIZE(__event_fields___##_name)]; \
+ size_t __dynamic_len[_TP_ARRAY_SIZE(__event_fields___##_name)]; \
int __ret; \
\
if (0) \
(void) __dynamic_len_idx; /* don't warn if unused */ \
- if (unlikely(!ACCESS_ONCE(__chan->session->active))) \
+ if (unlikely(!CMM_ACCESS_ONCE(__chan->session->active))) \
return; \
- if (unlikely(!ACCESS_ONCE(__chan->enabled))) \
+ if (unlikely(!CMM_ACCESS_ONCE(__chan->enabled))) \
return; \
- if (unlikely(!ACCESS_ONCE(__event->enabled))) \
+ if (unlikely(!CMM_ACCESS_ONCE(__event->enabled))) \
return; \
__event_len = __event_get_size__##_name(__dynamic_len, _args); \
__event_align = __event_get_align__##_name(_args); \
__chan->ops->event_commit(&ctx); \
}
-#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
+#undef TRACEPOINT_EVENT_CLASS_NOARGS
+#define TRACEPOINT_EVENT_CLASS_NOARGS(_name, _fields) \
+static void __event_probe__##_name(void *__data) \
+{ \
+ struct ltt_event *__event = __data; \
+ struct ltt_channel *__chan = __event->chan; \
+ struct lib_ring_buffer_ctx ctx; \
+ size_t __event_len, __event_align; \
+ size_t __dynamic_len_idx = 0; \
+ size_t __dynamic_len[_TP_ARRAY_SIZE(__event_fields___##_name)]; \
+ int __ret; \
+ \
+ if (0) \
+ (void) __dynamic_len_idx; /* don't warn if unused */ \
+ if (unlikely(!CMM_ACCESS_ONCE(__chan->session->active))) \
+ return; \
+ if (unlikely(!CMM_ACCESS_ONCE(__chan->enabled))) \
+ return; \
+ if (unlikely(!CMM_ACCESS_ONCE(__event->enabled))) \
+ return; \
+ __event_len = __event_get_size__##_name(__dynamic_len); \
+ __event_align = __event_get_align__##_name(); \
+ lib_ring_buffer_ctx_init(&ctx, __chan->chan, __event, __event_len, \
+ __event_align, -1); \
+ __ret = __chan->ops->event_reserve(&ctx, __event->id); \
+ if (__ret < 0) \
+ return; \
+ _fields \
+ __chan->ops->event_commit(&ctx); \
+}
+
+#include TRACEPOINT_INCLUDE(TRACEPOINT_INCLUDE_FILE)
/*
* Stage 8 of the trace events.
* Register/unregister probes at module load/unload.
*/
-/* Reset all macros within TRACE_EVENT */
-#include "lttng-tracepoint-events-reset.h"
+/* Reset all macros within TRACEPOINT_EVENT */
+#include <ust/lttng-tracepoint-event-reset.h>
#define TP_ID1(_token, _system) _token##_system
#define TP_ID(_token, _system) TP_ID1(_token, _system)
static void __attribute__((constructor))
-TP_ID(__lttng_events_init__, TRACE_SYSTEM)(void)
+TP_ID(__lttng_events_init__, TRACEPOINT_SYSTEM)(void)
{
int ret;
- ret = ltt_probe_register(&TP_ID(__probe_desc___, TRACE_SYSTEM));
+ ret = ltt_probe_register(&TP_ID(__probe_desc___, TRACEPOINT_SYSTEM));
assert(!ret);
}
static void __attribute__((destructor))
-TP_ID(__lttng_events_exit__, TRACE_SYSTEM)(void)
+TP_ID(__lttng_events_exit__, TRACEPOINT_SYSTEM)(void)
{
- ltt_probe_unregister(&TP_ID(__probe_desc___, TRACE_SYSTEM));
+ ltt_probe_unregister(&TP_ID(__probe_desc___, TRACEPOINT_SYSTEM));
}
#undef TP_ID1
--- /dev/null
+#ifndef _LINUX_RING_BUFFER_CONFIG_H
+#define _LINUX_RING_BUFFER_CONFIG_H
+
+/*
+ * linux/ringbuffer/config.h
+ *
+ * Copyright (C) 2010 - Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ *
+ * Ring buffer configuration header. Note: after declaring the standard inline
+ * functions, clients should also include linux/ringbuffer/api.h.
+ *
+ * Dual LGPL v2.1/GPL v2 license.
+ */
+
+#include <errno.h>
+#include "ust/kcompat/kcompat.h"
+#include "ust/align.h"
+
+struct lib_ring_buffer;
+struct channel;
+struct lib_ring_buffer_config;
+struct lib_ring_buffer_ctx;
+
+/*
+ * Ring buffer client callbacks. Only used by slow path, never on fast path.
+ * For the fast path, record_header_size(), ring_buffer_clock_read() should be
+ * provided as inline functions too. These may simply return 0 if not used by
+ * the client.
+ */
+struct lib_ring_buffer_client_cb {
+ /* Mandatory callbacks */
+
+ /* A static inline version is also required for fast path */
+ u64 (*ring_buffer_clock_read) (struct channel *chan);
+ size_t (*record_header_size) (const struct lib_ring_buffer_config *config,
+ struct channel *chan, size_t offset,
+ size_t *pre_header_padding,
+ struct lib_ring_buffer_ctx *ctx);
+
+ /* Slow path only, at subbuffer switch */
+ size_t (*subbuffer_header_size) (void);
+ void (*buffer_begin) (struct lib_ring_buffer *buf, u64 tsc,
+ unsigned int subbuf_idx);
+ void (*buffer_end) (struct lib_ring_buffer *buf, u64 tsc,
+ unsigned int subbuf_idx, unsigned long data_size);
+
+ /* Optional callbacks (can be set to NULL) */
+
+ /* Called at buffer creation/finalize */
+ int (*buffer_create) (struct lib_ring_buffer *buf, void *priv,
+ int cpu, const char *name);
+ /*
+ * Clients should guarantee that no new reader handle can be opened
+ * after finalize.
+ */
+ void (*buffer_finalize) (struct lib_ring_buffer *buf, void *priv, int cpu);
+
+ /*
+ * Extract header length, payload length and timestamp from event
+ * record. Used by buffer iterators. Timestamp is only used by channel
+ * iterator.
+ */
+ void (*record_get) (const struct lib_ring_buffer_config *config,
+ struct channel *chan, struct lib_ring_buffer *buf,
+ size_t offset, size_t *header_len,
+ size_t *payload_len, u64 *timestamp);
+};
+
+/*
+ * Ring buffer instance configuration.
+ *
+ * Declare as "static const" within the client object to ensure the inline fast
+ * paths can be optimized.
+ *
+ * alloc/sync pairs:
+ *
+ * RING_BUFFER_ALLOC_PER_CPU and RING_BUFFER_SYNC_PER_CPU :
+ * Per-cpu buffers with per-cpu synchronization. Tracing must be performed
+ * with preemption disabled (lib_ring_buffer_get_cpu() and
+ * lib_ring_buffer_put_cpu()).
+ *
+ * RING_BUFFER_ALLOC_PER_CPU and RING_BUFFER_SYNC_GLOBAL :
+ * Per-cpu buffer with global synchronization. Tracing can be performed with
+ * preemption enabled, statistically stays on the local buffers.
+ *
+ * RING_BUFFER_ALLOC_GLOBAL and RING_BUFFER_SYNC_PER_CPU :
+ * Should only be used for buffers belonging to a single thread or protected
+ * by mutual exclusion by the client. Note that periodical sub-buffer switch
+ * should be disabled in this kind of configuration.
+ *
+ * RING_BUFFER_ALLOC_GLOBAL and RING_BUFFER_SYNC_GLOBAL :
+ * Global shared buffer with global synchronization.
+ *
+ * wakeup:
+ *
+ * RING_BUFFER_WAKEUP_BY_TIMER uses per-cpu deferrable timers to poll the
+ * buffers and wake up readers if data is ready. Mainly useful for tracers which
+ * don't want to call into the wakeup code on the tracing path. Use in
+ * combination with "read_timer_interval" channel_create() argument.
+ *
+ * RING_BUFFER_WAKEUP_BY_WRITER directly wakes up readers when a subbuffer is
+ * ready to read. Lower latencies before the reader is woken up. Mainly suitable
+ * for drivers.
+ *
+ * RING_BUFFER_WAKEUP_NONE does not perform any wakeup whatsoever. The client
+ * has the responsibility to perform wakeups.
+ */
+struct lib_ring_buffer_config {
+ enum {
+ RING_BUFFER_ALLOC_PER_CPU,
+ RING_BUFFER_ALLOC_GLOBAL,
+ } alloc;
+ enum {
+ RING_BUFFER_SYNC_PER_CPU, /* Wait-free */
+ RING_BUFFER_SYNC_GLOBAL, /* Lock-free */
+ } sync;
+ enum {
+ RING_BUFFER_OVERWRITE, /* Overwrite when buffer full */
+ RING_BUFFER_DISCARD, /* Discard when buffer full */
+ } mode;
+ enum {
+ RING_BUFFER_SPLICE,
+ RING_BUFFER_MMAP,
+ RING_BUFFER_READ, /* TODO */
+ RING_BUFFER_ITERATOR,
+ RING_BUFFER_NONE,
+ } output;
+ enum {
+ RING_BUFFER_PAGE,
+ RING_BUFFER_VMAP, /* TODO */
+ RING_BUFFER_STATIC, /* TODO */
+ } backend;
+ enum {
+ RING_BUFFER_NO_OOPS_CONSISTENCY,
+ RING_BUFFER_OOPS_CONSISTENCY,
+ } oops;
+ enum {
+ RING_BUFFER_IPI_BARRIER,
+ RING_BUFFER_NO_IPI_BARRIER,
+ } ipi;
+ enum {
+ RING_BUFFER_WAKEUP_BY_TIMER, /* wake up performed by timer */
+ RING_BUFFER_WAKEUP_BY_WRITER, /*
+ * writer wakes up reader,
+ * not lock-free
+ * (takes spinlock).
+ */
+ } wakeup;
+ /*
+ * tsc_bits: timestamp bits saved at each record.
+ * 0 and 64 disable the timestamp compression scheme.
+ */
+ unsigned int tsc_bits;
+ struct lib_ring_buffer_client_cb cb;
+};
+
+/*
+ * ring buffer context
+ *
+ * Context passed to lib_ring_buffer_reserve(), lib_ring_buffer_commit(),
+ * lib_ring_buffer_try_discard_reserve(), lib_ring_buffer_align_ctx() and
+ * lib_ring_buffer_write().
+ */
+struct lib_ring_buffer_ctx {
+ /* input received by lib_ring_buffer_reserve(), saved here. */
+ struct channel *chan; /* channel */
+ void *priv; /* client private data */
+ size_t data_size; /* size of payload */
+ int largest_align; /*
+ * alignment of the largest element
+ * in the payload
+ */
+ int cpu; /* processor id */
+
+ /* output from lib_ring_buffer_reserve() */
+ struct lib_ring_buffer *buf; /*
+ * buffer corresponding to processor id
+ * for this channel
+ */
+ size_t slot_size; /* size of the reserved slot */
+ unsigned long buf_offset; /* offset following the record header */
+ unsigned long pre_offset; /*
+ * Initial offset position _before_
+ * the record is written. Positioned
+ * prior to record header alignment
+ * padding.
+ */
+ u64 tsc; /* time-stamp counter value */
+ unsigned int rflags; /* reservation flags */
+};
+
+/**
+ * lib_ring_buffer_ctx_init - initialize ring buffer context
+ * @ctx: ring buffer context to initialize
+ * @chan: channel
+ * @priv: client private data
+ * @data_size: size of record data payload
+ * @largest_align: largest alignment within data payload types
+ * @cpu: processor id
+ */
+static inline
+void lib_ring_buffer_ctx_init(struct lib_ring_buffer_ctx *ctx,
+ struct channel *chan, void *priv,
+ size_t data_size, int largest_align,
+ int cpu)
+{
+ ctx->chan = chan;
+ ctx->priv = priv;
+ ctx->data_size = data_size;
+ ctx->largest_align = largest_align;
+ ctx->cpu = cpu;
+ ctx->rflags = 0;
+}
+
+/*
+ * Reservation flags.
+ *
+ * RING_BUFFER_RFLAG_FULL_TSC
+ *
+ * This flag is passed to record_header_size() and to the primitive used to
+ * write the record header. It indicates that the full 64-bit time value is
+ * needed in the record header. If this flag is not set, the record header needs
+ * only to contain "tsc_bits" bit of time value.
+ *
+ * Reservation flags can be added by the client, starting from
+ * "(RING_BUFFER_FLAGS_END << 0)". It can be used to pass information from
+ * record_header_size() to lib_ring_buffer_write_record_header().
+ */
+#define RING_BUFFER_RFLAG_FULL_TSC (1U << 0)
+#define RING_BUFFER_RFLAG_END (1U << 1)
+
+/*
+ * We need to define RING_BUFFER_ALIGN_ATTR so it is known early at
+ * compile-time. We have to duplicate the "config->align" information and the
+ * definition here because config->align is used both in the slow and fast
+ * paths, but RING_BUFFER_ALIGN_ATTR is only available for the client code.
+ */
+#ifdef RING_BUFFER_ALIGN
+
+# define RING_BUFFER_ALIGN_ATTR /* Default arch alignment */
+
+/*
+ * Calculate the offset needed to align the type.
+ * size_of_type must be non-zero.
+ */
+static inline
+unsigned int lib_ring_buffer_align(size_t align_drift, size_t size_of_type)
+{
+ return offset_align(align_drift, size_of_type);
+}
+
+#else
+
+# define RING_BUFFER_ALIGN_ATTR __attribute__((packed))
+
+/*
+ * Calculate the offset needed to align the type.
+ * size_of_type must be non-zero.
+ */
+static inline
+unsigned int lib_ring_buffer_align(size_t align_drift, size_t size_of_type)
+{
+ return 0;
+}
+
+#endif
+
+/**
+ * lib_ring_buffer_align_ctx - Align context offset on "alignment"
+ * @ctx: ring buffer context.
+ */
+static inline
+void lib_ring_buffer_align_ctx(struct lib_ring_buffer_ctx *ctx,
+ size_t alignment)
+{
+ ctx->buf_offset += lib_ring_buffer_align(ctx->buf_offset,
+ alignment);
+}
+
+/*
+ * lib_ring_buffer_check_config() returns 0 on success.
+ * Used internally to check for valid configurations at channel creation.
+ */
+static inline
+int lib_ring_buffer_check_config(const struct lib_ring_buffer_config *config,
+ unsigned int switch_timer_interval,
+ unsigned int read_timer_interval)
+{
+ if (config->alloc == RING_BUFFER_ALLOC_GLOBAL
+ && config->sync == RING_BUFFER_SYNC_PER_CPU
+ && switch_timer_interval)
+ return -EINVAL;
+ return 0;
+}
+
+#include <ust/vatomic.h>
+
+#endif /* _LINUX_RING_BUFFER_CONFIG_H */
* in their file should include this file. The following are macros that the
* trace file may define:
*
- * TRACE_SYSTEM defines the system the tracepoint is for
+ * TRACEPOINT_SYSTEM defines the system the tracepoint is for:
+ * < [com_company_]project_[component_] >
*
- * TRACE_INCLUDE_FILE if the file name is something other than TRACE_SYSTEM.h
- * This macro may be defined to tell define_trace.h what file to include.
- * Note, leave off the ".h".
+ * TRACEPOINT_INCLUDE_FILE if the file name is something other than
+ * TRACEPOINT_SYSTEM.h. This macro may be defined to tell
+ * define_trace.h what file to include. Note, leave off the ".h".
*
- * TRACE_INCLUDE_PATH if the path is something other than core kernel include/trace
- * then this macro can define the path to use. Note, the path is relative to
- * tracepoint_event.h, not the file including it. Full path names for out of tree
- * modules must be used.
+ * TRACEPOINT_INCLUDE_PATH if the path is something other than core
+ * kernel include/trace then this macro can define the path to use.
+ * Note, the path is relative to tracepoint_event.h, not the file
+ * including it. Full path names for out of tree modules must be
+ * used.
*/
#ifdef TRACEPOINT_CREATE_PROBES
#define TRACEPOINT_EVENT_INSTANCE_NOARGS(template, name) \
_DEFINE_TRACEPOINT(name)
-#undef TRACE_INCLUDE
-#undef __TRACE_INCLUDE
+#undef TRACEPOINT_INCLUDE
+#undef __TRACEPOINT_INCLUDE
-#ifndef TRACE_INCLUDE_FILE
-# define TRACE_INCLUDE_FILE TRACE_SYSTEM
-# define UNDEF_TRACE_INCLUDE_FILE
+#ifndef TRACEPOINT_INCLUDE_FILE
+# define TRACEPOINT_INCLUDE_FILE TRACEPOINT_SYSTEM
+# define UNDEF_TRACEPOINT_INCLUDE_FILE
#endif
-#ifndef TRACE_INCLUDE_PATH
-# define __TRACE_INCLUDE(system) <trace/events/system.h>
-# define UNDEF_TRACE_INCLUDE_PATH
+#ifndef TRACEPOINT_INCLUDE_PATH
+# define __TRACEPOINT_INCLUDE(system) <tracepoint/system.h>
+# define UNDEF_TRACEPOINT_INCLUDE_PATH
#else
-# define __TRACE_INCLUDE(system) __tp_stringify(TRACE_INCLUDE_PATH/system.h)
+# define __TRACEPOINT_INCLUDE(system) \
+ __tp_stringify(TRACEPOINT_INCLUDE_PATH/system.h)
#endif
-# define TRACE_INCLUDE(system) __TRACE_INCLUDE(system)
+# define TRACEPOINT_INCLUDE(system) __TRACEPOINT_INCLUDE(system)
/* Let the trace headers be reread */
-#define TRACE_HEADER_MULTI_READ
+#define TRACEPOINT_HEADER_MULTI_READ
-#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
+#include TRACEPOINT_INCLUDE(TRACEPOINT_INCLUDE_FILE)
#ifndef CONFIG_NO_EVENT_TRACING
#include <ust/lttng-tracepoint-event.h>
#undef TRACEPOINT_EVENT_NOARGS
#undef TRACEPOINT_EVENT_CLASS_NOARGS
#undef TRACEPOINT_EVENT_INSTANCE_NOARGS
-#undef TRACE_HEADER_MULTI_READ
+#undef TRACEPOINT_HEADER_MULTI_READ
/* Only undef what we defined in this file */
-#ifdef UNDEF_TRACE_INCLUDE_FILE
-# undef TRACE_INCLUDE_FILE
-# undef UNDEF_TRACE_INCLUDE_FILE
+#ifdef UNDEF_TRACEPOINT_INCLUDE_FILE
+# undef TRACEPOINT_INCLUDE_FILE
+# undef UNDEF_TRACEPOINT_INCLUDE_FILE
#endif
-#ifdef UNDEF_TRACE_INCLUDE_PATH
-# undef TRACE_INCLUDE_PATH
-# undef UNDEF_TRACE_INCLUDE_PATH
+#ifdef UNDEF_TRACEPOINT_INCLUDE_PATH
+# undef TRACEPOINT_INCLUDE_PATH
+# undef UNDEF_TRACEPOINT_INCLUDE_PATH
#endif
/* We may be processing more files */
--- /dev/null
+#ifndef _LINUX_RING_BUFFER_VATOMIC_H
+#define _LINUX_RING_BUFFER_VATOMIC_H
+
+/*
+ * linux/ringbuffer/vatomic.h
+ *
+ * Copyright (C) 2010 - Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ *
+ * Dual LGPL v2.1/GPL v2 license.
+ */
+
+#include <assert.h>
+#include <urcu/uatomic.h>
+
+/*
+ * Same data type (long) accessed differently depending on configuration.
+ * v field is for non-atomic access (protected by mutual exclusion).
+ * In the fast-path, the ring_buffer_config structure is constant, so the
+ * compiler can statically select the appropriate branch.
+ * local_t is used for per-cpu and per-thread buffers.
+ * atomic_long_t is used for globally shared buffers.
+ */
+union v_atomic {
+ long a; /* accessed through uatomic */
+ long v;
+};
+
+static inline
+long v_read(const struct lib_ring_buffer_config *config, union v_atomic *v_a)
+{
+ assert(config->sync != RING_BUFFER_SYNC_PER_CPU);
+ return uatomic_read(&v_a->a);
+}
+
+static inline
+void v_set(const struct lib_ring_buffer_config *config, union v_atomic *v_a,
+ long v)
+{
+ assert(config->sync != RING_BUFFER_SYNC_PER_CPU);
+ uatomic_set(&v_a->a, v);
+}
+
+static inline
+void v_add(const struct lib_ring_buffer_config *config, long v, union v_atomic *v_a)
+{
+ assert(config->sync != RING_BUFFER_SYNC_PER_CPU);
+ uatomic_add(&v_a->a, v);
+}
+
+static inline
+void v_inc(const struct lib_ring_buffer_config *config, union v_atomic *v_a)
+{
+ assert(config->sync != RING_BUFFER_SYNC_PER_CPU);
+ uatomic_inc(&v_a->a);
+}
+
+/*
+ * Non-atomic decrement. Only used by reader, apply to reader-owned subbuffer.
+ */
+static inline
+void _v_dec(const struct lib_ring_buffer_config *config, union v_atomic *v_a)
+{
+ --v_a->v;
+}
+
+static inline
+long v_cmpxchg(const struct lib_ring_buffer_config *config, union v_atomic *v_a,
+ long old, long _new)
+{
+ assert(config->sync != RING_BUFFER_SYNC_PER_CPU);
+ return uatomic_cmpxchg(&v_a->a, old, _new);
+}
+
+#endif /* _LINUX_RING_BUFFER_VATOMIC_H */
#include <unistd.h>
#include <urcu/compiler.h>
-#include "config.h"
+#include <ust/ringbuffer-config.h>
#include "backend_types.h"
#include "frontend_types.h"
#include "shm.h"
+++ /dev/null
-#ifndef _LINUX_RING_BUFFER_CONFIG_H
-#define _LINUX_RING_BUFFER_CONFIG_H
-
-/*
- * linux/ringbuffer/config.h
- *
- * Copyright (C) 2010 - Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
- *
- * Ring buffer configuration header. Note: after declaring the standard inline
- * functions, clients should also include linux/ringbuffer/api.h.
- *
- * Dual LGPL v2.1/GPL v2 license.
- */
-
-#include <errno.h>
-#include "ust/kcompat/kcompat.h"
-#include "ust/align.h"
-
-struct lib_ring_buffer;
-struct channel;
-struct lib_ring_buffer_config;
-struct lib_ring_buffer_ctx;
-
-/*
- * Ring buffer client callbacks. Only used by slow path, never on fast path.
- * For the fast path, record_header_size(), ring_buffer_clock_read() should be
- * provided as inline functions too. These may simply return 0 if not used by
- * the client.
- */
-struct lib_ring_buffer_client_cb {
- /* Mandatory callbacks */
-
- /* A static inline version is also required for fast path */
- u64 (*ring_buffer_clock_read) (struct channel *chan);
- size_t (*record_header_size) (const struct lib_ring_buffer_config *config,
- struct channel *chan, size_t offset,
- size_t *pre_header_padding,
- struct lib_ring_buffer_ctx *ctx);
-
- /* Slow path only, at subbuffer switch */
- size_t (*subbuffer_header_size) (void);
- void (*buffer_begin) (struct lib_ring_buffer *buf, u64 tsc,
- unsigned int subbuf_idx);
- void (*buffer_end) (struct lib_ring_buffer *buf, u64 tsc,
- unsigned int subbuf_idx, unsigned long data_size);
-
- /* Optional callbacks (can be set to NULL) */
-
- /* Called at buffer creation/finalize */
- int (*buffer_create) (struct lib_ring_buffer *buf, void *priv,
- int cpu, const char *name);
- /*
- * Clients should guarantee that no new reader handle can be opened
- * after finalize.
- */
- void (*buffer_finalize) (struct lib_ring_buffer *buf, void *priv, int cpu);
-
- /*
- * Extract header length, payload length and timestamp from event
- * record. Used by buffer iterators. Timestamp is only used by channel
- * iterator.
- */
- void (*record_get) (const struct lib_ring_buffer_config *config,
- struct channel *chan, struct lib_ring_buffer *buf,
- size_t offset, size_t *header_len,
- size_t *payload_len, u64 *timestamp);
-};
-
-/*
- * Ring buffer instance configuration.
- *
- * Declare as "static const" within the client object to ensure the inline fast
- * paths can be optimized.
- *
- * alloc/sync pairs:
- *
- * RING_BUFFER_ALLOC_PER_CPU and RING_BUFFER_SYNC_PER_CPU :
- * Per-cpu buffers with per-cpu synchronization. Tracing must be performed
- * with preemption disabled (lib_ring_buffer_get_cpu() and
- * lib_ring_buffer_put_cpu()).
- *
- * RING_BUFFER_ALLOC_PER_CPU and RING_BUFFER_SYNC_GLOBAL :
- * Per-cpu buffer with global synchronization. Tracing can be performed with
- * preemption enabled, statistically stays on the local buffers.
- *
- * RING_BUFFER_ALLOC_GLOBAL and RING_BUFFER_SYNC_PER_CPU :
- * Should only be used for buffers belonging to a single thread or protected
- * by mutual exclusion by the client. Note that periodical sub-buffer switch
- * should be disabled in this kind of configuration.
- *
- * RING_BUFFER_ALLOC_GLOBAL and RING_BUFFER_SYNC_GLOBAL :
- * Global shared buffer with global synchronization.
- *
- * wakeup:
- *
- * RING_BUFFER_WAKEUP_BY_TIMER uses per-cpu deferrable timers to poll the
- * buffers and wake up readers if data is ready. Mainly useful for tracers which
- * don't want to call into the wakeup code on the tracing path. Use in
- * combination with "read_timer_interval" channel_create() argument.
- *
- * RING_BUFFER_WAKEUP_BY_WRITER directly wakes up readers when a subbuffer is
- * ready to read. Lower latencies before the reader is woken up. Mainly suitable
- * for drivers.
- *
- * RING_BUFFER_WAKEUP_NONE does not perform any wakeup whatsoever. The client
- * has the responsibility to perform wakeups.
- */
-struct lib_ring_buffer_config {
- enum {
- RING_BUFFER_ALLOC_PER_CPU,
- RING_BUFFER_ALLOC_GLOBAL,
- } alloc;
- enum {
- RING_BUFFER_SYNC_PER_CPU, /* Wait-free */
- RING_BUFFER_SYNC_GLOBAL, /* Lock-free */
- } sync;
- enum {
- RING_BUFFER_OVERWRITE, /* Overwrite when buffer full */
- RING_BUFFER_DISCARD, /* Discard when buffer full */
- } mode;
- enum {
- RING_BUFFER_SPLICE,
- RING_BUFFER_MMAP,
- RING_BUFFER_READ, /* TODO */
- RING_BUFFER_ITERATOR,
- RING_BUFFER_NONE,
- } output;
- enum {
- RING_BUFFER_PAGE,
- RING_BUFFER_VMAP, /* TODO */
- RING_BUFFER_STATIC, /* TODO */
- } backend;
- enum {
- RING_BUFFER_NO_OOPS_CONSISTENCY,
- RING_BUFFER_OOPS_CONSISTENCY,
- } oops;
- enum {
- RING_BUFFER_IPI_BARRIER,
- RING_BUFFER_NO_IPI_BARRIER,
- } ipi;
- enum {
- RING_BUFFER_WAKEUP_BY_TIMER, /* wake up performed by timer */
- RING_BUFFER_WAKEUP_BY_WRITER, /*
- * writer wakes up reader,
- * not lock-free
- * (takes spinlock).
- */
- } wakeup;
- /*
- * tsc_bits: timestamp bits saved at each record.
- * 0 and 64 disable the timestamp compression scheme.
- */
- unsigned int tsc_bits;
- struct lib_ring_buffer_client_cb cb;
-};
-
-/*
- * ring buffer context
- *
- * Context passed to lib_ring_buffer_reserve(), lib_ring_buffer_commit(),
- * lib_ring_buffer_try_discard_reserve(), lib_ring_buffer_align_ctx() and
- * lib_ring_buffer_write().
- */
-struct lib_ring_buffer_ctx {
- /* input received by lib_ring_buffer_reserve(), saved here. */
- struct channel *chan; /* channel */
- void *priv; /* client private data */
- size_t data_size; /* size of payload */
- int largest_align; /*
- * alignment of the largest element
- * in the payload
- */
- int cpu; /* processor id */
-
- /* output from lib_ring_buffer_reserve() */
- struct lib_ring_buffer *buf; /*
- * buffer corresponding to processor id
- * for this channel
- */
- size_t slot_size; /* size of the reserved slot */
- unsigned long buf_offset; /* offset following the record header */
- unsigned long pre_offset; /*
- * Initial offset position _before_
- * the record is written. Positioned
- * prior to record header alignment
- * padding.
- */
- u64 tsc; /* time-stamp counter value */
- unsigned int rflags; /* reservation flags */
-};
-
-/**
- * lib_ring_buffer_ctx_init - initialize ring buffer context
- * @ctx: ring buffer context to initialize
- * @chan: channel
- * @priv: client private data
- * @data_size: size of record data payload
- * @largest_align: largest alignment within data payload types
- * @cpu: processor id
- */
-static inline
-void lib_ring_buffer_ctx_init(struct lib_ring_buffer_ctx *ctx,
- struct channel *chan, void *priv,
- size_t data_size, int largest_align,
- int cpu)
-{
- ctx->chan = chan;
- ctx->priv = priv;
- ctx->data_size = data_size;
- ctx->largest_align = largest_align;
- ctx->cpu = cpu;
- ctx->rflags = 0;
-}
-
-/*
- * Reservation flags.
- *
- * RING_BUFFER_RFLAG_FULL_TSC
- *
- * This flag is passed to record_header_size() and to the primitive used to
- * write the record header. It indicates that the full 64-bit time value is
- * needed in the record header. If this flag is not set, the record header needs
- * only to contain "tsc_bits" bit of time value.
- *
- * Reservation flags can be added by the client, starting from
- * "(RING_BUFFER_FLAGS_END << 0)". It can be used to pass information from
- * record_header_size() to lib_ring_buffer_write_record_header().
- */
-#define RING_BUFFER_RFLAG_FULL_TSC (1U << 0)
-#define RING_BUFFER_RFLAG_END (1U << 1)
-
-/*
- * We need to define RING_BUFFER_ALIGN_ATTR so it is known early at
- * compile-time. We have to duplicate the "config->align" information and the
- * definition here because config->align is used both in the slow and fast
- * paths, but RING_BUFFER_ALIGN_ATTR is only available for the client code.
- */
-#ifdef RING_BUFFER_ALIGN
-
-# define RING_BUFFER_ALIGN_ATTR /* Default arch alignment */
-
-/*
- * Calculate the offset needed to align the type.
- * size_of_type must be non-zero.
- */
-static inline
-unsigned int lib_ring_buffer_align(size_t align_drift, size_t size_of_type)
-{
- return offset_align(align_drift, size_of_type);
-}
-
-#else
-
-# define RING_BUFFER_ALIGN_ATTR __attribute__((packed))
-
-/*
- * Calculate the offset needed to align the type.
- * size_of_type must be non-zero.
- */
-static inline
-unsigned int lib_ring_buffer_align(size_t align_drift, size_t size_of_type)
-{
- return 0;
-}
-
-#endif
-
-/**
- * lib_ring_buffer_align_ctx - Align context offset on "alignment"
- * @ctx: ring buffer context.
- */
-static inline
-void lib_ring_buffer_align_ctx(struct lib_ring_buffer_ctx *ctx,
- size_t alignment)
-{
- ctx->buf_offset += lib_ring_buffer_align(ctx->buf_offset,
- alignment);
-}
-
-/*
- * lib_ring_buffer_check_config() returns 0 on success.
- * Used internally to check for valid configurations at channel creation.
- */
-static inline
-int lib_ring_buffer_check_config(const struct lib_ring_buffer_config *config,
- unsigned int switch_timer_interval,
- unsigned int read_timer_interval)
-{
- if (config->alloc == RING_BUFFER_ALLOC_GLOBAL
- && config->sync == RING_BUFFER_SYNC_PER_CPU
- && switch_timer_interval)
- return -EINVAL;
- return 0;
-}
-
-#include "vatomic.h"
-
-#endif /* _LINUX_RING_BUFFER_CONFIG_H */
#include <urcu/compiler.h>
-#include "config.h"
+#include <ust/ringbuffer-config.h>
#include "backend_types.h"
#include "frontend_types.h"
#include "shm.h"
#include "ust/core.h"
#include "usterr_signal_safe.h"
-#include "config.h"
+#include <ust/ringbuffer-config.h>
#include "backend_types.h"
#include "shm.h"
#include "ust/core.h"
-#include "config.h"
+#include <ust/ringbuffer-config.h>
#include "backend.h"
#include "frontend.h"
#include "smp.h"
#include <urcu/ref.h>
#include "smp.h"
-#include "config.h"
+#include <ust/ringbuffer-config.h>
#include "backend.h"
#include "frontend.h"
#include "shm.h"
lib_ring_buffer_stop_switch_timer(buf);
lib_ring_buffer_stop_read_timer(buf);
}
- channel_backend_unregister_notifiers(&chan->backend);
+ //channel_backend_unregister_notifiers(&chan->backend);
}
static void channel_free(struct shm_handle *handle)
* padding to let readers get those sub-buffers.
* Used for live streaming.
* @read_timer_interval: Time interval (in us) to wake up pending readers.
- * @shmfd: shared memory file descriptor (output, needs to be closed by
- * the caller)
*
* Holds cpu hotplug.
* Returns NULL on failure.
+++ /dev/null
-#ifndef _LINUX_RING_BUFFER_VATOMIC_H
-#define _LINUX_RING_BUFFER_VATOMIC_H
-
-/*
- * linux/ringbuffer/vatomic.h
- *
- * Copyright (C) 2010 - Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
- *
- * Dual LGPL v2.1/GPL v2 license.
- */
-
-#include <assert.h>
-#include <urcu/uatomic.h>
-
-/*
- * Same data type (long) accessed differently depending on configuration.
- * v field is for non-atomic access (protected by mutual exclusion).
- * In the fast-path, the ring_buffer_config structure is constant, so the
- * compiler can statically select the appropriate branch.
- * local_t is used for per-cpu and per-thread buffers.
- * atomic_long_t is used for globally shared buffers.
- */
-union v_atomic {
- long a; /* accessed through uatomic */
- long v;
-};
-
-static inline
-long v_read(const struct lib_ring_buffer_config *config, union v_atomic *v_a)
-{
- assert(config->sync != RING_BUFFER_SYNC_PER_CPU);
- return uatomic_read(&v_a->a);
-}
-
-static inline
-void v_set(const struct lib_ring_buffer_config *config, union v_atomic *v_a,
- long v)
-{
- assert(config->sync != RING_BUFFER_SYNC_PER_CPU);
- uatomic_set(&v_a->a, v);
-}
-
-static inline
-void v_add(const struct lib_ring_buffer_config *config, long v, union v_atomic *v_a)
-{
- assert(config->sync != RING_BUFFER_SYNC_PER_CPU);
- uatomic_add(&v_a->a, v);
-}
-
-static inline
-void v_inc(const struct lib_ring_buffer_config *config, union v_atomic *v_a)
-{
- assert(config->sync != RING_BUFFER_SYNC_PER_CPU);
- uatomic_inc(&v_a->a);
-}
-
-/*
- * Non-atomic decrement. Only used by reader, apply to reader-owned subbuffer.
- */
-static inline
-void _v_dec(const struct lib_ring_buffer_config *config, union v_atomic *v_a)
-{
- --v_a->v;
-}
-
-static inline
-long v_cmpxchg(const struct lib_ring_buffer_config *config, union v_atomic *v_a,
- long old, long _new)
-{
- assert(config->sync != RING_BUFFER_SYNC_PER_CPU);
- return uatomic_cmpxchg(&v_a->a, old, _new);
-}
-
-#endif /* _LINUX_RING_BUFFER_VATOMIC_H */
* published by the Free Software Foundation.
*/
-#include <ust/marker.h>
+//DISABLED #include <ust/marker.h>
#include <ust/tracepoint.h>
-UST_MARKER_LIB;
+//DISABLED UST_MARKER_LIB;
TRACEPOINT_LIB;
-TRACEPOINT_EVENT_LIB;
libust_la_SOURCES = \
tracepoint.c \
- trace_event.c \
ltt-tracer.h \
ltt-tracer-core.h \
ltt-ring-buffer-client.h \
ltt-ring-buffer-metadata-client.h \
ltt-ring-buffer-metadata-client.c \
ltt-events.c \
- lttng-ust-abi.c
+ ltt-context.c \
+ ltt-probes.c \
+ lttng-ust-abi.c \
+ lttng-ust-comm.c
#removed: buffers.c buffers.h
libust_la_LIBADD = \
-lpthread \
-lrt \
- $(top_builddir)/snprintf/libustsnprintf.la
+ -luuid \
+ $(top_builddir)/snprintf/libustsnprintf.la \
+ $(top_builddir)/libringbuffer/libringbuffer.la
libust_la_CFLAGS = -DUST_COMPONENT="libust" -fno-strict-aliasing
* Dual LGPL v2.1/GPL v2 license.
*/
-#include <linux/module.h>
-#include <linux/list.h>
-#include <linux/mutex.h>
-#include <linux/slab.h>
#include <ust/lttng-events.h>
-#include "wrapper/vmalloc.h" /* for wrapper_vmalloc_sync_all() */
-#include "ltt-tracer.h"
+#include <ust/core.h>
+#include <string.h>
+#include <usterr_signal_safe.h>
/*
* Note: as we append context information, the pointer location may change.
struct lttng_ctx *ctx;
if (!*ctx_p) {
- *ctx_p = kzalloc(sizeof(struct lttng_ctx), GFP_KERNEL);
+ *ctx_p = zmalloc(sizeof(struct lttng_ctx));
if (!*ctx_p)
return NULL;
}
struct lttng_ctx_field *new_fields;
ctx->allocated_fields = max_t(size_t, 1, 2 * ctx->allocated_fields);
- new_fields = kzalloc(ctx->allocated_fields * sizeof(struct lttng_ctx_field), GFP_KERNEL);
+ new_fields = zmalloc(ctx->allocated_fields * sizeof(struct lttng_ctx_field));
if (!new_fields)
return NULL;
if (ctx->fields)
memcpy(new_fields, ctx->fields, sizeof(*ctx->fields) * ctx->nr_fields);
- kfree(ctx->fields);
+ free(ctx->fields);
ctx->fields = new_fields;
}
field = &ctx->fields[ctx->nr_fields];
ctx->nr_fields++;
return field;
}
-EXPORT_SYMBOL_GPL(lttng_append_context);
/*
* Remove last context field.
WARN_ON_ONCE(&ctx->fields[ctx->nr_fields] != field);
memset(&ctx->fields[ctx->nr_fields], 0, sizeof(struct lttng_ctx_field));
}
-EXPORT_SYMBOL_GPL(lttng_remove_context_field);
void lttng_destroy_context(struct lttng_ctx *ctx)
{
if (ctx->fields[i].destroy)
ctx->fields[i].destroy(&ctx->fields[i]);
}
- kfree(ctx->fields);
- kfree(ctx);
+ free(ctx->fields);
+ free(ctx);
}
#include "ust/core.h"
#include "ltt-tracer.h"
#include "ust/wait.h"
+#include "../libringbuffer/shm.h"
static CDS_LIST_HEAD(sessions);
static CDS_LIST_HEAD(ltt_transport_list);
goto nomem;
chan->session = session;
chan->id = session->free_chan_id++;
- //chan->shmid = shmget(getpid(), shmlen, IPC_CREAT | IPC_EXCL | 0700);
/*
* Note: the channel creation op already writes into the packet
* headers. Therefore the "chan" information used as input
* should be already accessible.
*/
- chan->chan = transport->ops.channel_create("[lttng]", chan, buf_addr,
+ chan->handle = transport->ops.channel_create("[lttng]", chan, buf_addr,
subbuf_size, num_subbuf, switch_timer_interval,
- read_timer_interval, shmid);
+ read_timer_interval);
+ chan->chan = shmp(chan->handle->header->chan);
if (!chan->chan)
goto create_error;
chan->enabled = 1;
static
void _ltt_channel_destroy(struct ltt_channel *chan)
{
- chan->ops->channel_destroy(chan->chan);
+ chan->ops->channel_destroy(chan->handle);
cds_list_del(&chan->list);
lttng_destroy_context(chan->ctx);
free(chan);
* Dual LGPL v2.1/GPL v2 license.
*/
-#include <linux/module.h>
-#include <linux/list.h>
-#include <linux/mutex.h>
-#include <linux/seq_file.h>
+#include <string.h>
+#include <errno.h>
+#include <urcu/list.h>
+#include <ust/core.h>
#include <ust/lttng-events.h>
-static LIST_HEAD(probe_list);
+static CDS_LIST_HEAD(probe_list);
static DEFINE_MUTEX(probe_mutex);
static
struct lttng_probe_desc *probe_desc;
int i;
- list_for_each_entry(probe_desc, &probe_list, head) {
+ cds_list_for_each_entry(probe_desc, &probe_list, head) {
for (i = 0; i < probe_desc->nr_events; i++) {
if (!strcmp(probe_desc->event_desc[i].name, name))
return &probe_desc->event_desc[i];
int ret = 0;
int i;
- mutex_lock(&probe_mutex);
+ pthread_mutex_lock(&probe_mutex);
/*
* TODO: This is O(N^2). Turn into a hash table when probe registration
* overhead becomes an issue.
goto end;
}
}
- list_add(&desc->head, &probe_list);
+ cds_list_add(&desc->head, &probe_list);
end:
- mutex_unlock(&probe_mutex);
+ pthread_mutex_unlock(&probe_mutex);
return ret;
}
-EXPORT_SYMBOL_GPL(ltt_probe_register);
void ltt_probe_unregister(struct lttng_probe_desc *desc)
{
- mutex_lock(&probe_mutex);
- list_del(&desc->head);
- mutex_unlock(&probe_mutex);
+ pthread_mutex_lock(&probe_mutex);
+ cds_list_del(&desc->head);
+ pthread_mutex_unlock(&probe_mutex);
}
-EXPORT_SYMBOL_GPL(ltt_probe_unregister);
const struct lttng_event_desc *ltt_event_get(const char *name)
{
const struct lttng_event_desc *event;
- int ret;
- mutex_lock(&probe_mutex);
+ pthread_mutex_lock(&probe_mutex);
event = find_event(name);
- mutex_unlock(&probe_mutex);
+ pthread_mutex_unlock(&probe_mutex);
if (!event)
return NULL;
- ret = try_module_get(event->owner);
- WARN_ON_ONCE(!ret);
return event;
}
-EXPORT_SYMBOL_GPL(ltt_event_get);
void ltt_event_put(const struct lttng_event_desc *event)
{
- module_put(event->owner);
}
-EXPORT_SYMBOL_GPL(ltt_event_put);
+#if 0
static
void *tp_list_start(struct seq_file *m, loff_t *pos)
{
struct lttng_probe_desc *probe_desc;
int iter = 0, i;
- mutex_lock(&probe_mutex);
- list_for_each_entry(probe_desc, &probe_list, head) {
+ pthread_mutex_lock(&probe_mutex);
+ cds_list_for_each_entry(probe_desc, &probe_list, head) {
for (i = 0; i < probe_desc->nr_events; i++) {
if (iter++ >= *pos)
return (void *) &probe_desc->event_desc[i];
int iter = 0, i;
(*ppos)++;
- list_for_each_entry(probe_desc, &probe_list, head) {
+ cds_list_for_each_entry(probe_desc, &probe_list, head) {
for (i = 0; i < probe_desc->nr_events; i++) {
if (iter++ >= *ppos)
return (void *) &probe_desc->event_desc[i];
static
void tp_list_stop(struct seq_file *m, void *p)
{
- mutex_unlock(&probe_mutex);
+ pthread_mutex_unlock(&probe_mutex);
}
static
.llseek = seq_lseek,
.release = seq_release,
};
+#endif //0
};
static
-struct channel *_channel_create(const char *name,
+struct shm_handle *_channel_create(const char *name,
struct ltt_channel *ltt_chan, void *buf_addr,
size_t subbuf_size, size_t num_subbuf,
unsigned int switch_timer_interval,
- unsigned int read_timer_interval,
- int *shmid)
+ unsigned int read_timer_interval)
{
return channel_create(&client_config, name, ltt_chan, buf_addr,
subbuf_size, num_subbuf, switch_timer_interval,
- read_timer_interval, shmid);
+ read_timer_interval);
}
static
-void ltt_channel_destroy(struct channel *chan)
+void ltt_channel_destroy(struct shm_handle *handle)
{
- channel_destroy(chan);
+ channel_destroy(handle);
}
static
};
static
-struct channel *_channel_create(const char *name,
+struct shm_handle *_channel_create(const char *name,
struct ltt_channel *ltt_chan, void *buf_addr,
size_t subbuf_size, size_t num_subbuf,
unsigned int switch_timer_interval,
- unsigned int read_timer_interval,
- int *shmid)
+ unsigned int read_timer_interval)
{
return channel_create(&client_config, name, ltt_chan, buf_addr,
subbuf_size, num_subbuf, switch_timer_interval,
- read_timer_interval, shmid);
+ read_timer_interval);
}
static
-void ltt_channel_destroy(struct channel *chan)
+void ltt_channel_destroy(struct shm_handle *handle)
{
- channel_destroy(chan);
+ channel_destroy(handle);
}
static
#include "usterr_signal_safe.h"
#include "ust/bug.h"
-#include "../libringbuffer/config.h"
+#include <ust/ringbuffer-config.h>
struct ltt_session;
struct ltt_channel;
+++ /dev/null
-/*
- * Copyright (C) 2010 Nils Carlson
- *
- * This library is free software; you can redistribute it and/or
- * modify it under the terms of the GNU Lesser General Public
- * License as published by the Free Software Foundation;
- * version 2.1 of the License.
- *
- * This library is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public
- * License along with this library; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
- *
- */
-
-#define _LGPL_SOURCE
-#include <errno.h>
-#include <ust/tracepoint.h>
-#include <ust/tracepoint-internal.h>
-#include <ust/core.h>
-#include <ust/kcompat/kcompat.h>
-#include <urcu-bp.h>
-
-#include "usterr_signal_safe.h"
-
-/* libraries that contain trace_events (struct trace_event_lib) */
-static CDS_LIST_HEAD(libs);
-/*
- * Nested mutex is not required here, but provide the same guaranteed
- * for start/stop iteration vs nested ops as markers and tracepoints.
- */
-static __thread int nested_mutex;
-static DEFINE_MUTEX(trace_events_mutex);
-
-static
-int trace_event_get_iter_range(struct trace_event * const **trace_event,
- struct trace_event * const *begin,
- struct trace_event * const *end);
-
-static
-void lock_trace_events(void)
-{
- if (!(nested_mutex++))
- pthread_mutex_lock(&trace_events_mutex);
-}
-
-static
-void unlock_trace_events(void)
-{
- if (!(--nested_mutex))
- pthread_mutex_unlock(&trace_events_mutex);
-}
-
-static
-int lib_get_iter_trace_events(struct trace_event_iter *iter)
-{
- struct trace_event_lib *iter_lib;
- int found = 0;
-
- cds_list_for_each_entry(iter_lib, &libs, list) {
- if (iter_lib < iter->lib)
- continue;
- else if (iter_lib > iter->lib)
- iter->trace_event = NULL;
- found = trace_event_get_iter_range(&iter->trace_event,
- iter_lib->trace_events_start,
- iter_lib->trace_events_start + iter_lib->trace_events_count);
- if (found) {
- iter->lib = iter_lib;
- break;
- }
- }
- return found;
-}
-
-/**
- * trace_event_get_iter_range - Get a next trace_event iterator given a range.
- * @trace_event: current trace_events (in), next trace_event (out)
- * @begin: beginning of the range
- * @end: end of the range
- *
- * Returns whether a next trace_event has been found (1) or not (0).
- * Will return the first trace_event in the range if the input trace_event is NULL.
- * Called with trace event mutex held.
- */
-static
-int trace_event_get_iter_range(struct trace_event * const **trace_event,
- struct trace_event * const *begin,
- struct trace_event * const *end)
-{
- if (!*trace_event && begin != end)
- *trace_event = begin;
- while (*trace_event >= begin && *trace_event < end) {
- if (!**trace_event)
- (*trace_event)++; /* skip dummy */
- else
- return 1;
- }
- return 0;
-}
-
-static void trace_event_get_iter(struct trace_event_iter *iter)
-{
- int found = 0;
-
- found = lib_get_iter_trace_events(iter);
-
- if (!found)
- trace_event_iter_reset(iter);
-}
-
-void trace_event_iter_start(struct trace_event_iter *iter)
-{
- lock_trace_events();
- trace_event_get_iter(iter);
-}
-
-/*
- * Called with trace event mutex held.
- */
-void trace_event_iter_next(struct trace_event_iter *iter)
-{
- iter->trace_event++;
- /*
- * iter->trace_event may be invalid because we blindly incremented it.
- * Make sure it is valid by marshalling on the trace_events, getting the
- * trace_events from following modules if necessary.
- */
- trace_event_get_iter(iter);
-}
-
-void trace_event_iter_stop(struct trace_event_iter *iter)
-{
- unlock_trace_events();
-}
-
-void trace_event_iter_reset(struct trace_event_iter *iter)
-{
- iter->lib = NULL;
- iter->trace_event = NULL;
-}
-
-int trace_event_register_lib(struct trace_event * const *trace_events_start,
- int trace_events_count)
-{
- struct trace_event_lib *pl, *iter;
-
- pl = (struct trace_event_lib *) malloc(sizeof(struct trace_event_lib));
-
- pl->trace_events_start = trace_events_start;
- pl->trace_events_count = trace_events_count;
-
- lock_trace_events();
- /*
- * We sort the libs by struct lib pointer address.
- */
- cds_list_for_each_entry_reverse(iter, &libs, list) {
- BUG_ON(iter == pl); /* Should never be in the list twice */
- if (iter < pl) {
- /* We belong to the location right after iter. */
- cds_list_add(&pl->list, &iter->list);
- goto lib_added;
- }
- }
- /* We should be added at the head of the list */
- cds_list_add(&pl->list, &libs);
-lib_added:
- unlock_trace_events();
-
- /* trace_events_count - 1: skip dummy */
- DBG("just registered a trace_events section from %p and having %d trace_events (minus dummy trace_event)", trace_events_start, trace_events_count);
-
- return 0;
-}
-
-int trace_event_unregister_lib(struct trace_event * const *trace_events_start)
-{
- struct trace_event_lib *lib;
-
- unlock_trace_events();
- cds_list_for_each_entry(lib, &libs, list) {
- if(lib->trace_events_start == trace_events_start) {
- struct trace_event_lib *lib2free = lib;
- cds_list_del(&lib->list);
- free(lib2free);
- break;
- }
- }
- unlock_trace_events();
-
- return 0;
-}
-SUBDIRS = . hello hello2 basic basic_long fork simple_include snprintf test-nevents test-libustinstr-malloc dlopen same_line_marker trace_event register_test tracepoint libustctl_function_tests exit-fast
+SUBDIRS = . hello
+#SUBDIRS = . hello hello2 basic basic_long fork simple_include snprintf test-nevents test-libustinstr-malloc dlopen same_line_marker trace_event register_test tracepoint libustctl_function_tests exit-fast
dist_noinst_SCRIPTS = test_loop runtests trace_matches
noinst_PROGRAMS = hello
hello_SOURCES = hello.c tp.c tp.h
hello_LDADD = $(top_builddir)/libust/libust.la \
- $(top_builddir)/libustctl/libustctl.la \
$(top_builddir)/libust-initializer.o
noinst_SCRIPTS = run
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
- * License as published by the Free Software Foundation; either
- * version 2.1 of the License, or (at your option) any later version.
+ * License as published by the Free Software Foundation; version 2.1 of
+ * the License.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
#include <fcntl.h>
#include <signal.h>
-#include <ust/marker.h>
-#include <ust/ustctl.h>
#include "usterr.h"
#include "tp.h"
void inthandler(int sig)
{
- printf("in handler\n");
- exit(0);
+ printf("in SIGUSR1 handler\n");
+ tracepoint(ust_tests_hello_tptest_sighandler);
}
int init_int_handler(void)
/* Only defer ourselves. Also, try to restart interrupted
* syscalls to disturb the traced program as little as possible.
*/
- result = sigaction(SIGINT, &act, NULL);
+ result = sigaction(SIGUSR1, &act, NULL);
if (result == -1) {
PERROR("sigaction");
return -1;
return 0;
}
-int main()
+int main(int argc, char **argv)
{
int i;
printf("Hello, World!\n");
sleep(1);
+
for (i = 0; i < 50; i++) {
- ust_marker(bar, "str %s", "FOOBAZ");
- ust_marker(bar2, "number1 %d number2 %d", 53, 9800);
- tracepoint(hello_tptest, i);
+ tracepoint(ust_tests_hello_tptest, i);
usleep(100000);
}
-
- if (scanf("%*s") == EOF)
- PERROR("scanf failed");
-
- ustctl_stop_trace(getpid(), "auto");
- ustctl_destroy_trace(getpid(), "auto");
-
- DBG("TRACE STOPPED");
- if (scanf("%*s") == EOF)
- PERROR("scanf failed");
-
return 0;
}
/*
- * Copyright (C) 2009 Pierre-Marc Fournier
- * Copyright (C) 2011 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ * tp.c
+ *
+ * Copyright (c) 2011 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
- * License as published by the Free Software Foundation; either
- * version 2.1 of the License, or (at your option) any later version.
+ * License as published by the Free Software Foundation; version 2.1 of
+ * the License.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
*
* You should have received a copy of the GNU Lesser General Public
* License along with this library; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
-#include <ust/marker.h>
-#include "usterr.h"
-
#define TRACEPOINT_CREATE_PROBES
#include "tp.h"
-
-struct hello_trace_struct {
- char *message;
-};
-
-struct hello_trace_struct hello_struct = {
- .message = "ehlo\n",
-};
-
-void tptest_probe(void *data, int anint)
-{
- struct hello_trace_struct *hello;
- hello = (struct hello_trace_struct *)data;
- DBG("in tracepoint probe...");
- printf("this is the message: %s\n", hello->message);
-}
-
-void tptest2_probe(void *data)
-{
-}
-
-static void __attribute__((constructor)) init()
-{
- DBG("connecting tracepoint...\n");
- /*
- * Note: this is an internal API that will be used within
- * TRACEPOINT_EVENT only eventually.
- */
- __register_tracepoint(hello_tptest, tptest_probe, &hello_struct);
- __register_tracepoint(hello_tptest2, tptest2_probe, &hello_struct);
-}
-#undef TRACE_SYSTEM
-#define TRACE_SYSTEM tp
+#undef TRACEPOINT_SYSTEM
+#define TRACEPOINT_SYSTEM tp
-#if !defined(_TRACE_TP_H) || defined(TRACE_HEADER_MULTI_READ)
-#define _TRACE_TP_H
+#if !defined(_TRACEPOINT_TP_H) || defined(TRACEPOINT_HEADER_MULTI_READ)
+#define _TRACEPOINT_TP_H
/*
* Copyright (C) 2011 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
#include <ust/tracepoint.h>
-TRACEPOINT_EVENT(hello_tptest,
+TRACEPOINT_EVENT(ust_tests_hello_tptest,
TP_PROTO(int anint),
TP_ARGS(anint),
- TP_FIELDS());
+ TP_FIELDS())
-TRACEPOINT_EVENT_NOARGS(hello_tptest2,
- TP_FIELDS());
+TRACEPOINT_EVENT_NOARGS(ust_tests_hello_tptest_sighandler,
+ TP_FIELDS())
-#endif /* _TRACE_TP_H */
+#endif /* _TRACEPOINT_TP_H */
-#undef TRACE_INCLUDE_PATH
-#define TRACE_INCLUDE_PATH .
-#undef TRACE_INCLUDE_FILE
-#define TRACE_INCLUDE_FILE tp
+#undef TRACEPOINT_INCLUDE_PATH
+#define TRACEPOINT_INCLUDE_PATH .
+#undef TRACEPOINT_INCLUDE_FILE
+#define TRACEPOINT_INCLUDE_FILE tp
/* This part must be outside protection */
-#include <ust/tracepoint_event.h>
+#include <ust/tracepoint-event.h>