ust/probe.h \
ust/ust.h \
ust/tracectl.h \
+ ust/core.h \
+ ust/type-serializer.h \
ust/kcompat/kcompat.h \
ust/kcompat/compiler.h \
ust/kcompat/disable.h \
--- /dev/null
+#ifndef UST_CORE_H
+#define UST_CORE_H
+
+#if defined(CONFIG_LTT) && defined(CONFIG_LTT_ALIGNMENT)
+
+/*
+ * Calculate the offset needed to align the type.
+ * size_of_type must be non-zero.
+ */
+static inline unsigned int ltt_align(size_t align_drift, size_t size_of_type)
+{
+ size_t alignment = min(sizeof(void *), size_of_type);
+ return (alignment - align_drift) & (alignment - 1);
+}
+/* Default arch alignment */
+#define LTT_ALIGN
+
+static inline int ltt_get_alignment(void)
+{
+ return sizeof(void *);
+}
+
+#else
+
+static inline unsigned int ltt_align(size_t align_drift,
+ size_t size_of_type)
+{
+ return 0;
+}
+
+#define LTT_ALIGN __attribute__((packed))
+
+static inline int ltt_get_alignment(void)
+{
+ return 0;
+}
+#endif /* defined(CONFIG_LTT) && defined(CONFIG_LTT_ALIGNMENT) */
+
+#endif /* UST_CORE_H */
--- /dev/null
+#ifndef _LTT_TYPE_SERIALIZER_H
+#define _LTT_TYPE_SERIALIZER_H
+
+//ust// #include "tracer.h"
+#include <ust/marker.h>
+#include <ust/core.h>
+
+/*
+ * largest_align must be non-zero, equal to the minimum between the largest type
+ * and sizeof(void *).
+ */
+extern void _ltt_specialized_trace(const struct marker *mdata, void *probe_data,
+ void *serialize_private, unsigned int data_size,
+ unsigned int largest_align);
+
+/*
+ * Statically check that 0 < largest_align < sizeof(void *) to make sure it is
+ * dumb-proof. It will make sure 0 is changed into 1 and unsigned long long is
+ * changed into sizeof(void *) on 32-bit architectures.
+ */
+static inline void ltt_specialized_trace(const struct marker *mdata,
+ void *probe_data,
+ void *serialize_private, unsigned int data_size,
+ unsigned int largest_align)
+{
+ largest_align = min_t(unsigned int, largest_align, sizeof(void *));
+ largest_align = max_t(unsigned int, largest_align, 1);
+ _ltt_specialized_trace(mdata, probe_data, serialize_private, data_size,
+ largest_align);
+}
+
+/*
+ * Type serializer definitions.
+ */
+
+/*
+ * Return size of structure without end-of-structure padding.
+ */
+#define serialize_sizeof(type) offsetof(typeof(type), end_field)
+
+struct serialize_long_int {
+ unsigned long f1;
+ unsigned int f2;
+ unsigned char end_field[0];
+} LTT_ALIGN;
+
+struct serialize_int_int_long {
+ unsigned int f1;
+ unsigned int f2;
+ unsigned long f3;
+ unsigned char end_field[0];
+} LTT_ALIGN;
+
+struct serialize_int_int_short {
+ unsigned int f1;
+ unsigned int f2;
+ unsigned short f3;
+ unsigned char end_field[0];
+} LTT_ALIGN;
+
+struct serialize_long_long_long {
+ unsigned long f1;
+ unsigned long f2;
+ unsigned long f3;
+ unsigned char end_field[0];
+} LTT_ALIGN;
+
+struct serialize_long_long_int {
+ unsigned long f1;
+ unsigned long f2;
+ unsigned int f3;
+ unsigned char end_field[0];
+} LTT_ALIGN;
+
+struct serialize_long_long_short_char {
+ unsigned long f1;
+ unsigned long f2;
+ unsigned short f3;
+ unsigned char f4;
+ unsigned char end_field[0];
+} LTT_ALIGN;
+
+struct serialize_long_long_short {
+ unsigned long f1;
+ unsigned long f2;
+ unsigned short f3;
+ unsigned char end_field[0];
+} LTT_ALIGN;
+
+struct serialize_long_short_char {
+ unsigned long f1;
+ unsigned short f2;
+ unsigned char f3;
+ unsigned char end_field[0];
+} LTT_ALIGN;
+
+struct serialize_long_short {
+ unsigned long f1;
+ unsigned short f2;
+ unsigned char end_field[0];
+} LTT_ALIGN;
+
+struct serialize_long_char {
+ unsigned long f1;
+ unsigned char f2;
+ unsigned char end_field[0];
+} LTT_ALIGN;
+
+struct serialize_sizet_int {
+ size_t f1;
+ unsigned int f2;
+ unsigned char end_field[0];
+} LTT_ALIGN;
+
+struct serialize_long_long_sizet_int {
+ unsigned long f1;
+ unsigned long f2;
+ size_t f3;
+ unsigned int f4;
+ unsigned char end_field[0];
+} LTT_ALIGN;
+
+struct serialize_long_long_sizet_int_int {
+ unsigned long f1;
+ unsigned long f2;
+ size_t f3;
+ unsigned int f4;
+ unsigned int f5;
+ unsigned char end_field[0];
+} LTT_ALIGN;
+
+struct serialize_l4421224411111 {
+ unsigned long f1;
+ uint32_t f2;
+ uint32_t f3;
+ uint16_t f4;
+ uint8_t f5;
+ uint16_t f6;
+ uint16_t f7;
+ uint32_t f8;
+ uint32_t f9;
+ uint8_t f10;
+ uint8_t f11;
+ uint8_t f12;
+ uint8_t f13;
+ uint8_t f14;
+ unsigned char end_field[0];
+} LTT_ALIGN;
+
+struct serialize_l214421224411111 {
+ unsigned long f1;
+ uint16_t f2;
+ uint8_t f3;
+ uint32_t f4;
+ uint32_t f5;
+ uint16_t f6;
+ uint8_t f7;
+ uint16_t f8;
+ uint16_t f9;
+ uint32_t f10;
+ uint32_t f11;
+ uint8_t f12;
+ uint8_t f13;
+ uint8_t f14;
+ uint8_t f15;
+ uint8_t f16;
+ uint8_t end_field[0];
+} LTT_ALIGN;
+
+struct serialize_l4412228 {
+ unsigned long f1;
+ uint32_t f2;
+ uint32_t f3;
+ uint8_t f4;
+ uint16_t f5;
+ uint16_t f6;
+ uint16_t f7;
+ uint64_t f8;
+ unsigned char end_field[0];
+} LTT_ALIGN;
+#endif /* _LTT_TYPE_SERIALIZER_H */
#include <ust/processor.h>
#include <ust/tracepoint.h>
#include <ust/probe.h>
+#include <ust/type-serializer.h>
+#include <ust/core.h>
#endif /* UST_H */
* avoid this.
*/
-DECLARE_TRACE(ust_dummytp, TPPROTO(void), TPARGS());
+DECLARE_TRACE(ust_dummytp, TP_PROTO(void), TP_ARGS());
DEFINE_TRACE(ust_dummytp);
void dummy_libust_initializer_func(void)
tracectl.c \
$(top_builddir)/libustcomm/multipoll.c \
tracerconst.h \
- header-inline.h
+ header-inline.h \
+ type-serializer.c
libust_la_LDFLAGS = -no-undefined -version-info 0:0:0
* Return : -ENOSPC if not enough space, else returns 0.
* It will take care of sub-buffer switching.
*/
-int ltt_reserve_slot_lockless_slow(struct ust_trace *trace,
- struct ust_channel *chan, void **transport_data,
- size_t data_size, size_t *slot_size, long *buf_offset, u64 *tsc,
- unsigned int *rflags, int largest_align, int cpu)
+int ltt_reserve_slot_lockless_slow(struct ust_channel *chan,
+ struct ust_trace *trace, size_t data_size,
+ int largest_align, int cpu,
+ struct ust_buffer **ret_buf,
+ size_t *slot_size, long *buf_offset,
+ u64 *tsc, unsigned int *rflags)
{
- struct ust_buffer *buf = chan->buf[cpu];
+ struct ust_buffer *buf = *ret_buf = chan->buf[cpu];
struct ltt_reserve_switch_offsets offsets;
offsets.size = 0;
ltt_transport_unregister(&ust_relay_transport);
}
-size_t ltt_write_event_header_slow(struct ust_trace *trace,
- struct ust_channel *channel,
+size_t ltt_write_event_header_slow(struct ust_channel *channel,
struct ust_buffer *buf, long buf_offset,
u16 eID, u32 event_size,
u64 tsc, unsigned int rflags)
*/
enum force_switch_mode { FORCE_ACTIVE, FORCE_FLUSH };
-extern int ltt_reserve_slot_lockless_slow(struct ust_trace *trace,
- struct ust_channel *ltt_channel, void **transport_data,
- size_t data_size, size_t *slot_size, long *buf_offset, u64 *tsc,
- unsigned int *rflags, int largest_align, int cpu);
+extern int ltt_reserve_slot_lockless_slow(struct ust_channel *chan,
+ struct ust_trace *trace, size_t data_size,
+ int largest_align, int cpu,
+ struct ust_buffer **ret_buf,
+ size_t *slot_size, long *buf_offset,
+ u64 *tsc, unsigned int *rflags);
extern void ltt_force_switch_lockless_slow(struct ust_buffer *buf,
enum force_switch_mode mode);
return 0;
}
-static __inline__ int ltt_reserve_slot(struct ust_trace *trace,
- struct ust_channel *chan, void **transport_data,
- size_t data_size, size_t *slot_size, long *buf_offset, u64 *tsc,
- unsigned int *rflags, int largest_align, int cpu)
+static __inline__ int ltt_reserve_slot(struct ust_channel *chan,
+ struct ust_trace *trace, size_t data_size,
+ int largest_align, int cpu,
+ struct ust_buffer **ret_buf,
+ size_t *slot_size, long *buf_offset, u64 *tsc,
+ unsigned int *rflags)
{
- struct ust_buffer *buf = chan->buf[cpu];
+ struct ust_buffer *buf = *ret_buf = chan->buf[cpu];
long o_begin, o_end, o_old;
size_t before_hdr_pad;
*buf_offset = o_begin + before_hdr_pad;
return 0;
slow_path:
- return ltt_reserve_slot_lockless_slow(trace, chan,
- transport_data, data_size, slot_size, buf_offset, tsc,
- rflags, largest_align, cpu);
+ return ltt_reserve_slot_lockless_slow(chan, trace, data_size,
+ largest_align, cpu, ret_buf,
+ slot_size, buf_offset, tsc,
+ rflags);
}
/*
#ifndef UST_HEADER_INLINE_H
#define UST_HEADER_INLINE_H
-#include "tracercore.h"
+#include <ust/core.h>
/*
* ust_get_header_size
smp_wmb();
elem->ptype = entry->ptype;
-//ust// if (elem->tp_name && (active ^ _imv_read(elem->state))) {
-//ust// WARN_ON(!elem->tp_cb);
-//ust// /*
-//ust// * It is ok to directly call the probe registration because type
-//ust// * checking has been done in the __trace_mark_tp() macro.
-//ust// */
-//ust//
-//ust// if (active) {
-//ust// /*
-//ust// * try_module_get should always succeed because we hold
-//ust// * markers_mutex to get the tp_cb address.
-//ust// */
+ if (elem->tp_name && (active ^ _imv_read(elem->state))) {
+ WARN_ON(!elem->tp_cb);
+ /*
+ * It is ok to directly call the probe registration because type
+ * checking has been done in the __trace_mark_tp() macro.
+ */
+
+ if (active) {
+ /*
+ * try_module_get should always succeed because we hold
+ * markers_mutex to get the tp_cb address.
+ */
//ust// ret = try_module_get(__module_text_address(
//ust// (unsigned long)elem->tp_cb));
//ust// BUG_ON(!ret);
-//ust// ret = tracepoint_probe_register_noupdate(
-//ust// elem->tp_name,
-//ust// elem->tp_cb);
-//ust// } else {
-//ust// ret = tracepoint_probe_unregister_noupdate(
-//ust// elem->tp_name,
-//ust// elem->tp_cb);
-//ust// /*
-//ust// * tracepoint_probe_update_all() must be called
-//ust// * before the module containing tp_cb is unloaded.
-//ust// */
+ ret = tracepoint_probe_register_noupdate(
+ elem->tp_name,
+ elem->tp_cb);
+ } else {
+ ret = tracepoint_probe_unregister_noupdate(
+ elem->tp_name,
+ elem->tp_cb);
+ /*
+ * tracepoint_probe_update_all() must be called
+ * before the module containing tp_cb is unloaded.
+ */
//ust// module_put(__module_text_address(
//ust// (unsigned long)elem->tp_cb));
-//ust// }
-//ust// }
+ }
+ }
elem->state__imv = active;
return ret;
*/
static void disable_marker(struct marker *elem)
{
-//ust// int ret;
-//ust//
-//ust// /* leave "call" as is. It is known statically. */
-//ust// if (elem->tp_name && _imv_read(elem->state)) {
-//ust// WARN_ON(!elem->tp_cb);
-//ust// /*
-//ust// * It is ok to directly call the probe registration because type
-//ust// * checking has been done in the __trace_mark_tp() macro.
-//ust// */
-//ust// ret = tracepoint_probe_unregister_noupdate(elem->tp_name,
-//ust// elem->tp_cb);
-//ust// WARN_ON(ret);
-//ust// /*
-//ust// * tracepoint_probe_update_all() must be called
-//ust// * before the module containing tp_cb is unloaded.
-//ust// */
+ int ret;
+
+ /* leave "call" as is. It is known statically. */
+ if (elem->tp_name && _imv_read(elem->state)) {
+ WARN_ON(!elem->tp_cb);
+ /*
+ * It is ok to directly call the probe registration because type
+ * checking has been done in the __trace_mark_tp() macro.
+ */
+ ret = tracepoint_probe_unregister_noupdate(elem->tp_name,
+ elem->tp_cb);
+ WARN_ON(ret);
+ /*
+ * tracepoint_probe_update_all() must be called
+ * before the module containing tp_cb is unloaded.
+ */
//ust// module_put(__module_text_address((unsigned long)elem->tp_cb));
-//ust// }
+ }
elem->state__imv = 0;
elem->single.func = __mark_empty_function;
/* Update the function before setting the ptype */
/* Markers in modules. */
//ust// module_update_markers();
lib_update_markers();
-//ust// tracepoint_probe_update_all();
+ tracepoint_probe_update_all();
/* Update immediate values */
core_imv_update();
//ust// module_imv_update(); /* FIXME: need to port for libs? */
#include <stdint.h>
#include <stdio.h>
-#include <ust/kernelcompat.h>
#define _LGPL_SOURCE
#include <urcu-bp.h>
#include <urcu/rculist.h>
+#include <ust/kernelcompat.h>
+#include <ust/core.h>
#include "buffers.h"
#include "tracer.h"
//#include "list.h"
LTT_TYPE_NONE,
};
-static int ust_get_cpu(void)
-{
- return sched_getcpu();
-}
-
#define LTT_ATTRIBUTE_NETWORK_BYTE_ORDER (1<<1)
/*
}
/* reserve space : header and data */
- ret = ltt_reserve_slot(trace, channel, &transport_data,
- data_size, &slot_size, &buf_offset,
- &tsc, &rflags,
- largest_align, cpu);
+ ret = ltt_reserve_slot(channel, trace, data_size, largest_align,
+ cpu, &buf, &slot_size, &buf_offset,
+ &tsc, &rflags);
if (unlikely(ret < 0))
continue; /* buffer full */
//ust// buf = ((struct rchan *)channel->trans_channel_data)->buf[cpu];
buf = channel->buf[cpu];
/* Out-of-order write : header and data */
- buf_offset = ltt_write_event_header(trace,
- channel, buf, buf_offset,
+ buf_offset = ltt_write_event_header(channel, buf, buf_offset,
eID, data_size, tsc, rflags);
ltt_write_event_data(buf, buf_offset, &closure,
serialize_private,
return offsetof(struct ltt_subbuffer_header, header_end);
}
-extern size_t ltt_write_event_header_slow(struct ust_trace *trace,
- struct ust_channel *channel,
+extern size_t ltt_write_event_header_slow(struct ust_channel *channel,
struct ust_buffer *buf, long buf_offset,
u16 eID, u32 event_size,
u64 tsc, unsigned int rflags);
*
* returns : offset where the event data must be written.
*/
-static __inline__ size_t ltt_write_event_header(struct ust_trace *trace,
- struct ust_channel *chan,
+static __inline__ size_t ltt_write_event_header(struct ust_channel *chan,
struct ust_buffer *buf, long buf_offset,
u16 eID, u32 event_size,
u64 tsc, unsigned int rflags)
return buf_offset;
slow_path:
- return ltt_write_event_header_slow(trace, chan, buf, buf_offset,
+ return ltt_write_event_header_slow(chan, buf, buf_offset,
eID, event_size, tsc, rflags);
}
header->freq_scale = trace->freq_scale;
}
+static __inline__ int ust_get_cpu(void)
+{
+#ifndef UST_VALGRIND
+ return sched_getcpu();
+#else
+ /* Valgrind does not support the sched_getcpu() vsyscall.
+ * It causes it to detect a segfault in the program and stop it.
+ * So if we want to check libust with valgrind, we have to refrain
+ * from using this call. TODO: it would probably be better to return
+ * other values too, to better test it.
+ */
+ return 0;
+#endif
+}
+
/*
* Size reserved for high priority events (interrupts, NMI, BH) at the end of a
extern void ltt_filter_register(ltt_run_filter_functor func);
extern void ltt_filter_unregister(void);
-#if defined(CONFIG_LTT) && defined(CONFIG_LTT_ALIGNMENT)
-
-/*
- * Calculate the offset needed to align the type.
- * size_of_type must be non-zero.
- */
-static inline unsigned int ltt_align(size_t align_drift, size_t size_of_type)
-{
- size_t alignment = min(sizeof(void *), size_of_type);
- return (alignment - align_drift) & (alignment - 1);
-}
-/* Default arch alignment */
-#define LTT_ALIGN
-
-static inline int ltt_get_alignment(void)
-{
- return sizeof(void *);
-}
-
-#else
-
-static inline unsigned int ltt_align(size_t align_drift,
- size_t size_of_type)
-{
- return 0;
-}
-
-#define LTT_ALIGN __attribute__((packed))
-
-static inline int ltt_get_alignment(void)
-{
- return 0;
-}
-#endif /* defined(CONFIG_LTT) && defined(CONFIG_LTT_ALIGNMENT) */
-
#endif /* UST_TRACERCORE_H */
--- /dev/null
+/**
+ * ltt-type-serializer.c
+ *
+ * LTTng specialized type serializer.
+ *
+ * Copyright Mathieu Desnoyers, 2008.
+ *
+ * Dual LGPL v2.1/GPL v2 license.
+ */
+#include <urcu/rculist.h>
+#include <ust/type-serializer.h>
+#include <ust/core.h>
+#include "tracer.h"
+
+notrace
+void _ltt_specialized_trace(const struct marker *mdata, void *probe_data,
+ void *serialize_private, unsigned int data_size,
+ unsigned int largest_align)
+{
+ int ret;
+ uint16_t eID;
+ size_t slot_size;
+ unsigned int chan_index;
+ struct ust_buffer *buf;
+ struct ust_channel *chan;
+ struct ust_trace *trace;
+ u64 tsc;
+ long buf_offset;
+ int cpu;
+ unsigned int rflags;
+
+ /*
+ * If we get here, it's probably because we have useful work to do.
+ */
+ if (unlikely(ltt_traces.num_active_traces == 0))
+ return;
+
+ rcu_read_lock();
+ cpu = ust_get_cpu();
+
+ /* Force volatile access. */
+ STORE_SHARED(ltt_nesting, LOAD_SHARED(ltt_nesting) + 1);
+
+ /*
+ * asm volatile and "memory" clobber prevent the compiler from moving
+ * instructions out of the ltt nesting count. This is required to ensure
+ * that probe side-effects which can cause recursion (e.g. unforeseen
+ * traps, divisions by 0, ...) are triggered within the incremented
+ * nesting count section.
+ */
+ barrier();
+ eID = mdata->event_id;
+ chan_index = mdata->channel_id;
+
+ /*
+ * Iterate on each trace, typically small number of active traces,
+ * list iteration with prefetch is usually slower.
+ */
+ list_for_each_entry_rcu(trace, <t_traces.head, list) {
+ if (unlikely(!trace->active))
+ continue;
+//ust// if (unlikely(!ltt_run_filter(trace, eID)))
+//ust// continue;
+#ifdef CONFIG_LTT_DEBUG_EVENT_SIZE
+ rflags = LTT_RFLAG_ID_SIZE;
+#else
+ if (unlikely(eID >= LTT_FREE_EVENTS))
+ rflags = LTT_RFLAG_ID;
+ else
+ rflags = 0;
+#endif
+ /*
+ * Skip channels added after trace creation.
+ */
+ if (unlikely(chan_index >= trace->nr_channels))
+ continue;
+ chan = &trace->channels[chan_index];
+ if (!chan->active)
+ continue;
+
+ /* reserve space : header and data */
+ ret = ltt_reserve_slot(chan, trace, data_size, largest_align,
+ cpu, &buf, &slot_size, &buf_offset, &tsc,
+ &rflags);
+ if (unlikely(ret < 0))
+ continue; /* buffer full */
+
+ /* Out-of-order write : header and data */
+ buf_offset = ltt_write_event_header(chan, buf,
+ buf_offset, eID, data_size,
+ tsc, rflags);
+ if (data_size) {
+ buf_offset += ltt_align(buf_offset, largest_align);
+ ust_buffers_write(buf, buf_offset,
+ serialize_private, data_size);
+ buf_offset += data_size;
+ }
+ /* Out-of-order commit */
+ ltt_commit_slot(chan, buf, buf_offset, data_size, slot_size);
+ }
+ /*
+ * asm volatile and "memory" clobber prevent the compiler from moving
+ * instructions out of the ltt nesting count. This is required to ensure
+ * that probe side-effects which can cause recursion (e.g. unforeseen
+ * traps, divisions by 0, ...) are triggered within the incremented
+ * nesting count section.
+ */
+ barrier();
+ STORE_SHARED(ltt_nesting, LOAD_SHARED(ltt_nesting) - 1);
+ rcu_read_unlock();
+}
#include <ust/tracepoint.h>
DECLARE_TRACE(hello_tptest,
- TPPROTO(int anint),
- TPARGS(anint));
+ TP_PROTO(int anint),
+ TP_ARGS(anint));