Port changes from lttng-kt
authorPierre-Marc Fournier <pierre-marc.fournier@polymtl.ca>
Fri, 5 Feb 2010 17:17:48 +0000 (12:17 -0500)
committerPierre-Marc Fournier <pierre-marc.fournier@polymtl.ca>
Mon, 15 Feb 2010 05:39:03 +0000 (00:39 -0500)
14 files changed:
include/ust/kernelcompat.h
libust/Makefile.am
libust/buffers.c
libust/buffers.h
libust/channels.h
libust/header-inline.h [new file with mode: 0644]
libust/marker.c
libust/serialize.c
libust/tracectl.c
libust/tracer.c
libust/tracer.h
libust/tracerconst.h [new file with mode: 0644]
libust/tracercore.h
ustd/lowlevel.c

index 032030ac1f1526cb252ab94776a43af83c66f146..da2326192928cd3d81e233a1f4120d2598e101fe 100644 (file)
@@ -197,4 +197,8 @@ static inline u32 trace_clock_freq_scale(void)
 }
 
 
+/* PERCPU */
+
+#define __get_cpu_var(x) x
+
 #endif /* KERNELCOMPAT_H */
index 087dc0a978a3475ae56ef799243a1d09ddbea4db..612ac93a2b7a3cc83c169ae06a94a0f35b4662cc 100644 (file)
@@ -1,7 +1,24 @@
 AM_CPPFLAGS = -I$(top_srcdir)/include -I$(top_srcdir)/libustcomm
 
 lib_LTLIBRARIES = libust.la
-libust_la_SOURCES = marker.c tracepoint.c channels.c channels.h marker-control.c marker-control.h buffers.c buffers.h tracer.c tracer.h tracercore.c tracercore.h serialize.c tracectl.c $(top_builddir)/libustcomm/ustcomm.c
+libust_la_SOURCES = \
+       marker.c \
+       tracepoint.c \
+       channels.c \
+       channels.h \
+       marker-control.c \
+       marker-control.h \
+       buffers.c \
+       buffers.h \
+       tracer.c \
+       tracer.h \
+       tracercore.c \
+       tracercore.h \
+       serialize.c \
+       tracectl.c \
+       $(top_builddir)/libustcomm/ustcomm.c \
+       tracerconst.h \
+       header-inline.h
 libust_la_LDFLAGS = -no-undefined -version-info 0:0:0
 libust_la_LIBADD = -lpthread
 libust_la_CFLAGS = -DUST_COMPONENT="libust"
index 241ca74d4172160000c3dbed8840acf617a90bde..12753c1961345f4430779ef8d1a69f48ce2aaf21 100644 (file)
 #include "tracercore.h"
 #include "usterr.h"
 
+struct ltt_reserve_switch_offsets {
+       long begin, end, old;
+       long begin_switch, end_switch_current, end_switch_old;
+       size_t before_hdr_pad, size;
+};
+
+
 static DEFINE_MUTEX(ust_buffers_channels_mutex);
 static LIST_HEAD(ust_buffers_channels);
 
@@ -63,7 +70,31 @@ static int get_n_cpus(void)
        return result;
 }
 
-static int ust_buffers_init_buffer(struct ltt_trace_struct *trace,
+/* _ust_buffers_write()
+ *
+ * @buf: destination buffer
+ * @offset: offset in destination
+ * @src: source buffer
+ * @len: length of source
+ * @cpy: already copied
+ */
+
+void _ust_buffers_write(struct ust_buffer *buf, size_t offset,
+        const void *src, size_t len, ssize_t cpy)
+{
+       do {
+               len -= cpy;
+               src += cpy;
+               offset += cpy;
+
+               WARN_ON(offset >= buf->buf_size);
+
+               cpy = min_t(size_t, len, buf->buf_size - offset);
+               ust_buffers_do_copy(buf->buf_data + offset, src, cpy);
+       } while (unlikely(len != cpy));
+}
+
+static int ust_buffers_init_buffer(struct ust_trace *trace,
                struct ust_channel *ltt_chan,
                struct ust_buffer *buf,
                unsigned int n_subbufs);
@@ -194,11 +225,20 @@ int ust_buffers_channel_open(struct ust_channel *chan, size_t subbuf_size, size_
        if(subbuf_size == 0 || subbuf_cnt == 0)
                return -1;
 
+       /* Check that the subbuffer size is larger than a page. */
+       WARN_ON_ONCE(subbuf_size < PAGE_SIZE);
+
+       /*
+        * Make sure the number of subbuffers and subbuffer size are power of 2.
+        */
+       WARN_ON_ONCE(hweight32(subbuf_size) != 1);
+       WARN_ON(hweight32(subbuf_cnt) != 1);
+
        chan->version = UST_CHANNEL_VERSION;
        chan->subbuf_cnt = subbuf_cnt;
        chan->subbuf_size = subbuf_size;
        chan->subbuf_size_order = get_count_order(subbuf_size);
-       chan->alloc_size = FIX_SIZE(subbuf_size * subbuf_cnt);
+       chan->alloc_size = subbuf_size * subbuf_cnt;
 
        kref_init(&chan->kref);
 
@@ -245,86 +285,10 @@ void ust_buffers_channel_close(struct ust_channel *chan)
        mutex_unlock(&ust_buffers_channels_mutex);
 }
 
-/* _ust_buffers_write()
- *
- * @buf: destination buffer
- * @offset: offset in destination
- * @src: source buffer
- * @len: length of source
- * @cpy: already copied
- */
-
-void _ust_buffers_write(struct ust_buffer *buf, size_t offset,
-       const void *src, size_t len, ssize_t cpy)
-{
-       do {
-               len -= cpy;
-               src += cpy;
-               offset += cpy;
-
-               WARN_ON(offset >= buf->buf_size);
-
-               cpy = min_t(size_t, len, buf->buf_size - offset);
-               ust_buffers_do_copy(buf->buf_data + offset, src, cpy);
-       } while (unlikely(len != cpy));
-}
-
-void *ltt_buffers_offset_address(struct ust_buffer *buf, size_t offset)
-{
-       return ((char *)buf->buf_data)+offset;
-}
-
 /*
  * -------
  */
 
-/*
- * Last TSC comparison functions. Check if the current TSC overflows
- * LTT_TSC_BITS bits from the last TSC read. Reads and writes last_tsc
- * atomically.
- */
-
-/* FIXME: does this test work properly? */
-#if (BITS_PER_LONG == 32)
-static inline void save_last_tsc(struct ust_buffer *ltt_buf,
-                                       u64 tsc)
-{
-       ltt_buf->last_tsc = (unsigned long)(tsc >> LTT_TSC_BITS);
-}
-
-static inline int last_tsc_overflow(struct ust_buffer *ltt_buf,
-                                       u64 tsc)
-{
-       unsigned long tsc_shifted = (unsigned long)(tsc >> LTT_TSC_BITS);
-
-       if (unlikely((tsc_shifted - ltt_buf->last_tsc)))
-               return 1;
-       else
-               return 0;
-}
-#else
-static inline void save_last_tsc(struct ust_buffer *ltt_buf,
-                                       u64 tsc)
-{
-       ltt_buf->last_tsc = (unsigned long)tsc;
-}
-
-static inline int last_tsc_overflow(struct ust_buffer *ltt_buf,
-                                       u64 tsc)
-{
-       if (unlikely((tsc - ltt_buf->last_tsc) >> LTT_TSC_BITS))
-               return 1;
-       else
-               return 0;
-}
-#endif
-
-/*
- * A switch is done during tracing or as a final flush after tracing (so it
- * won't write in the new sub-buffer).
- */
-enum force_switch_mode { FORCE_ACTIVE, FORCE_FLUSH };
-
 static void ust_buffers_destroy_buffer(struct ust_channel *ltt_chan, int cpu);
 
 static void ltt_force_switch(struct ust_buffer *buf,
@@ -333,13 +297,13 @@ static void ltt_force_switch(struct ust_buffer *buf,
 /*
  * Trace callbacks
  */
-static void ltt_buffer_begin_callback(struct ust_buffer *buf,
+static void ltt_buffer_begin(struct ust_buffer *buf,
                        u64 tsc, unsigned int subbuf_idx)
 {
        struct ust_channel *channel = buf->chan;
        struct ltt_subbuffer_header *header =
                (struct ltt_subbuffer_header *)
-                       ltt_buffers_offset_address(buf,
+                       ust_buffers_offset_address(buf,
                                subbuf_idx * buf->chan->subbuf_size);
 
        header->cycle_count_begin = tsc;
@@ -352,12 +316,12 @@ static void ltt_buffer_begin_callback(struct ust_buffer *buf,
  * offset is assumed to never be 0 here : never deliver a completely empty
  * subbuffer. The lost size is between 0 and subbuf_size-1.
  */
-static notrace void ltt_buffer_end_callback(struct ust_buffer *buf,
+static notrace void ltt_buffer_end(struct ust_buffer *buf,
                u64 tsc, unsigned int offset, unsigned int subbuf_idx)
 {
        struct ltt_subbuffer_header *header =
                (struct ltt_subbuffer_header *)
-                       ltt_buffers_offset_address(buf,
+                       ust_buffers_offset_address(buf,
                                subbuf_idx * buf->chan->subbuf_size);
 
        header->lost_size = SUBBUF_OFFSET((buf->chan->subbuf_size - offset),
@@ -365,38 +329,6 @@ static notrace void ltt_buffer_end_callback(struct ust_buffer *buf,
        header->cycle_count_end = tsc;
        header->events_lost = local_read(&buf->events_lost);
        header->subbuf_corrupt = local_read(&buf->corrupted_subbuffers);
-
-}
-
-void (*wake_consumer)(void *, int) = NULL;
-
-void relay_set_wake_consumer(void (*wake)(void *, int))
-{
-       wake_consumer = wake;
-}
-
-void relay_wake_consumer(void *arg, int finished)
-{
-       if(wake_consumer)
-               wake_consumer(arg, finished);
-}
-
-static notrace void ltt_deliver(struct ust_buffer *buf, unsigned int subbuf_idx,
-               long commit_count)
-{
-       int result;
-
-//ust// #ifdef CONFIG_LTT_VMCORE
-       local_set(&buf->commit_seq[subbuf_idx], commit_count);
-//ust// #endif
-
-       /* wakeup consumer */
-       result = write(buf->data_ready_fd_write, "1", 1);
-       if(result == -1) {
-               PERROR("write (in ltt_relay_buffer_flush)");
-               ERR("this should never happen!");
-       }
-//ust//        atomic_set(&ltt_buf->wakeup_readers, 1);
 }
 
 /*
@@ -406,27 +338,79 @@ static notrace void ltt_buf_unfull(struct ust_buffer *buf,
                unsigned int subbuf_idx,
                long offset)
 {
-//ust//        struct ltt_channel_struct *ltt_channel =
-//ust//                (struct ltt_channel_struct *)buf->chan->private_data;
-//ust//        struct ltt_channel_buf_struct *ltt_buf = ltt_channel->buf;
-//ust//
-//ust//        ltt_relay_wake_writers(ltt_buf);
 }
 
-int ust_buffers_do_get_subbuf(struct ust_buffer *buf, long *pconsumed_old)
+/*
+ * Promote compiler barrier to a smp_mb().
+ * For the specific LTTng case, this IPI call should be removed if the
+ * architecture does not reorder writes.  This should eventually be provided by
+ * a separate architecture-specific infrastructure.
+ */
+static void remote_mb(void *info)
+{
+       smp_mb();
+}
+
+int ust_buffers_get_subbuf(struct ust_buffer *buf, long *consumed)
 {
        struct ust_channel *channel = buf->chan;
        long consumed_old, consumed_idx, commit_count, write_offset;
+//ust//        int retval;
+
        consumed_old = atomic_long_read(&buf->consumed);
        consumed_idx = SUBBUF_INDEX(consumed_old, buf->chan);
-       commit_count = local_read(&buf->commit_count[consumed_idx]);
+       commit_count = local_read(&buf->commit_count[consumed_idx].cc_sb);
        /*
         * Make sure we read the commit count before reading the buffer
         * data and the write offset. Correct consumed offset ordering
         * wrt commit count is insured by the use of cmpxchg to update
         * the consumed offset.
+        * smp_call_function_single can fail if the remote CPU is offline,
+        * this is OK because then there is no wmb to execute there.
+        * If our thread is executing on the same CPU as the on the buffers
+        * belongs to, we don't have to synchronize it at all. If we are
+        * migrated, the scheduler will take care of the memory barriers.
+        * Normally, smp_call_function_single() should ensure program order when
+        * executing the remote function, which implies that it surrounds the
+        * function execution with :
+        * smp_mb()
+        * send IPI
+        * csd_lock_wait
+        *                recv IPI
+        *                smp_mb()
+        *                exec. function
+        *                smp_mb()
+        *                csd unlock
+        * smp_mb()
+        *
+        * However, smp_call_function_single() does not seem to clearly execute
+        * such barriers. It depends on spinlock semantic to provide the barrier
+        * before executing the IPI and, when busy-looping, csd_lock_wait only
+        * executes smp_mb() when it has to wait for the other CPU.
+        *
+        * I don't trust this code. Therefore, let's add the smp_mb() sequence
+        * required ourself, even if duplicated. It has no performance impact
+        * anyway.
+        *
+        * smp_mb() is needed because smp_rmb() and smp_wmb() only order read vs
+        * read and write vs write. They do not ensure core synchronization. We
+        * really have to ensure total order between the 3 barriers running on
+        * the 2 CPUs.
+        */
+//ust// #ifdef LTT_NO_IPI_BARRIER
+       /*
+        * Local rmb to match the remote wmb to read the commit count before the
+        * buffer data and the write offset.
         */
        smp_rmb();
+//ust// #else
+//ust//        if (raw_smp_processor_id() != buf->cpu) {
+//ust//                smp_mb();       /* Total order with IPI handler smp_mb() */
+//ust//                smp_call_function_single(buf->cpu, remote_mb, NULL, 1);
+//ust//                smp_mb();       /* Total order with IPI handler smp_mb() */
+//ust//        }
+//ust// #endif
+
        write_offset = local_read(&buf->offset);
        /*
         * Check that the subbuffer we are trying to consume has been
@@ -449,11 +433,17 @@ int ust_buffers_do_get_subbuf(struct ust_buffer *buf, long *pconsumed_old)
                return -EAGAIN;
        }
 
-       *pconsumed_old = consumed_old;
+       /* FIXME: is this ok to disable the reading feature? */
+//ust//        retval = update_read_sb_index(buf, consumed_idx);
+//ust//        if (retval)
+//ust//                return retval;
+
+       *consumed = consumed_old;
+
        return 0;
 }
 
-int ust_buffers_do_put_subbuf(struct ust_buffer *buf, u32 uconsumed_old)
+int ust_buffers_put_subbuf(struct ust_buffer *buf, unsigned long uconsumed_old)
 {
        long consumed_new, consumed_old;
 
@@ -483,36 +473,125 @@ int ust_buffers_do_put_subbuf(struct ust_buffer *buf, u32 uconsumed_old)
        return 0;
 }
 
+//ust// static void switch_buffer(unsigned long data)
+//ust// {
+//ust//        struct ltt_channel_buf_struct *ltt_buf =
+//ust//                (struct ltt_channel_buf_struct *)data;
+//ust//        struct rchan_buf *buf = ltt_buf->rbuf;
+//ust//
+//ust//        if (buf)
+//ust//                ltt_force_switch(buf, FORCE_ACTIVE);
+//ust//
+//ust//        ltt_buf->switch_timer.expires += ltt_buf->switch_timer_interval;
+//ust//        add_timer_on(&ltt_buf->switch_timer, smp_processor_id());
+//ust// }
+//ust//
+//ust// static void start_switch_timer(struct ltt_channel_struct *ltt_channel)
+//ust// {
+//ust//        struct rchan *rchan = ltt_channel->trans_channel_data;
+//ust//        int cpu;
+//ust//
+//ust//        if (!ltt_channel->switch_timer_interval)
+//ust//                return;
+//ust//
+//ust//        // TODO : hotplug
+//ust//        for_each_online_cpu(cpu) {
+//ust//                struct ltt_channel_buf_struct *ltt_buf;
+//ust//                struct rchan_buf *buf;
+//ust//
+//ust//                buf = rchan->buf[cpu];
+//ust//                ltt_buf = buf->chan_private;
+//ust//                buf->random_access = 1;
+//ust//                ltt_buf->switch_timer_interval =
+//ust//                        ltt_channel->switch_timer_interval;
+//ust//                init_timer(&ltt_buf->switch_timer);
+//ust//                ltt_buf->switch_timer.function = switch_buffer;
+//ust//                ltt_buf->switch_timer.expires = jiffies +
+//ust//                                        ltt_buf->switch_timer_interval;
+//ust//                ltt_buf->switch_timer.data = (unsigned long)ltt_buf;
+//ust//                add_timer_on(&ltt_buf->switch_timer, cpu);
+//ust//        }
+//ust// }
+//ust//
+//ust// /*
+//ust//  * Cannot use del_timer_sync with add_timer_on, so use an IPI to locally
+//ust//  * delete the timer.
+//ust//  */
+//ust// static void stop_switch_timer_ipi(void *info)
+//ust// {
+//ust//        struct ltt_channel_buf_struct *ltt_buf =
+//ust//                (struct ltt_channel_buf_struct *)info;
+//ust//
+//ust//        del_timer(&ltt_buf->switch_timer);
+//ust// }
+//ust//
+//ust// static void stop_switch_timer(struct ltt_channel_struct *ltt_channel)
+//ust// {
+//ust//        struct rchan *rchan = ltt_channel->trans_channel_data;
+//ust//        int cpu;
+//ust//
+//ust//        if (!ltt_channel->switch_timer_interval)
+//ust//                return;
+//ust//
+//ust//        // TODO : hotplug
+//ust//        for_each_online_cpu(cpu) {
+//ust//                struct ltt_channel_buf_struct *ltt_buf;
+//ust//                struct rchan_buf *buf;
+//ust//
+//ust//                buf = rchan->buf[cpu];
+//ust//                ltt_buf = buf->chan_private;
+//ust//                smp_call_function(stop_switch_timer_ipi, ltt_buf, 1);
+//ust//                buf->random_access = 0;
+//ust//        }
+//ust// }
+
+static void ust_buffers_print_written(struct ust_channel *chan,
+               long cons_off, unsigned int cpu)
+{
+       struct ust_buffer *buf = chan->buf[cpu];
+       long cons_idx, events_count;
+
+       cons_idx = SUBBUF_INDEX(cons_off, chan);
+       events_count = local_read(&buf->commit_count[cons_idx].events);
+
+       if (events_count)
+               printk(KERN_INFO
+                       "channel %s: %lu events written (cpu %u, index %lu)\n",
+                       chan->channel_name, events_count, cpu, cons_idx);
+}
+
 static void ltt_relay_print_subbuffer_errors(
                struct ust_channel *channel,
                long cons_off, int cpu)
 {
        struct ust_buffer *ltt_buf = channel->buf[cpu];
-       long cons_idx, commit_count, write_offset;
+       long cons_idx, commit_count, commit_count_sb, write_offset;
 
        cons_idx = SUBBUF_INDEX(cons_off, channel);
-       commit_count = local_read(&ltt_buf->commit_count[cons_idx]);
+       commit_count = local_read(&ltt_buf->commit_count[cons_idx].cc);
+       commit_count_sb = local_read(&ltt_buf->commit_count[cons_idx].cc_sb);
+
        /*
         * No need to order commit_count and write_offset reads because we
         * execute after trace is stopped when there are no readers left.
         */
        write_offset = local_read(&ltt_buf->offset);
        WARN( "LTT : unread channel %s offset is %ld "
-               "and cons_off : %ld\n",
-               channel->channel_name, write_offset, cons_off);
+               "and cons_off : %ld (cpu %d)\n",
+               channel->channel_name, write_offset, cons_off, cpu);
        /* Check each sub-buffer for non filled commit count */
        if (((commit_count - channel->subbuf_size) & channel->commit_count_mask)
            - (BUFFER_TRUNC(cons_off, channel) >> channel->n_subbufs_order) != 0) {
                ERR("LTT : %s : subbuffer %lu has non filled "
-                       "commit count %lu.\n",
-                       channel->channel_name, cons_idx, commit_count);
+                       "commit count [cc, cc_sb] [%lu,%lu].\n",
+                       channel->channel_name, cons_idx, commit_count, commit_count_sb);
        }
        ERR("LTT : %s : commit count : %lu, subbuf size %zd\n",
                        channel->channel_name, commit_count,
                        channel->subbuf_size);
 }
 
-static void ltt_relay_print_errors(struct ltt_trace_struct *trace,
+static void ltt_relay_print_errors(struct ust_trace *trace,
                struct ust_channel *channel, int cpu)
 {
        struct ust_buffer *ltt_buf = channel->buf[cpu];
@@ -525,6 +604,9 @@ static void ltt_relay_print_errors(struct ltt_trace_struct *trace,
        if (!channel)
                return;
 
+       for (cons_off = 0; cons_off < rchan->alloc_size;
+            cons_off = SUBBUF_ALIGN(cons_off, rchan))
+               ust_buffers_print_written(ltt_chan, cons_off, cpu);
        for (cons_off = atomic_long_read(&ltt_buf->consumed);
                        (SUBBUF_TRUNC(local_read(&ltt_buf->offset),
                                      channel)
@@ -535,17 +617,17 @@ static void ltt_relay_print_errors(struct ltt_trace_struct *trace,
 
 static void ltt_relay_print_buffer_errors(struct ust_channel *channel, int cpu)
 {
-       struct ltt_trace_struct *trace = channel->trace;
+       struct ust_trace *trace = channel->trace;
        struct ust_buffer *ltt_buf = channel->buf[cpu];
 
        if (local_read(&ltt_buf->events_lost))
-               ERR("channel %s: %ld events lost",
+               ERR("channel %s: %ld events lost (cpu %d)",
                        channel->channel_name,
-                       local_read(&ltt_buf->events_lost));
+                       local_read(&ltt_buf->events_lost), cpu);
        if (local_read(&ltt_buf->corrupted_subbuffers))
-               ERR("channel %s : %ld corrupted subbuffers",
+               ERR("channel %s : %ld corrupted subbuffers (cpu %d)",
                        channel->channel_name,
-                       local_read(&ltt_buf->corrupted_subbuffers));
+                       local_read(&ltt_buf->corrupted_subbuffers), cpu);
 
        ltt_relay_print_errors(trace, channel, cpu);
 }
@@ -560,14 +642,14 @@ static void ltt_relay_release_channel(struct kref *kref)
 /*
  * Create ltt buffer.
  */
-//ust// static int ltt_relay_create_buffer(struct ltt_trace_struct *trace,
+//ust// static int ltt_relay_create_buffer(struct ust_trace *trace,
 //ust//                struct ltt_channel_struct *ltt_chan, struct rchan_buf *buf,
 //ust//                unsigned int cpu, unsigned int n_subbufs)
 //ust// {
 //ust//        struct ltt_channel_buf_struct *ltt_buf =
 //ust//                percpu_ptr(ltt_chan->buf, cpu);
 //ust//        unsigned int j;
-//ust// 
+//ust//
 //ust//        ltt_buf->commit_count =
 //ust//                kzalloc_node(sizeof(ltt_buf->commit_count) * n_subbufs,
 //ust//                        GFP_KERNEL, cpu_to_node(cpu));
@@ -584,19 +666,19 @@ static void ltt_relay_release_channel(struct kref *kref)
 //ust//        init_waitqueue_head(&ltt_buf->write_wait);
 //ust//        atomic_set(&ltt_buf->wakeup_readers, 0);
 //ust//        spin_lock_init(&ltt_buf->full_lock);
-//ust// 
+//ust//
 //ust//        ltt_buffer_begin_callback(buf, trace->start_tsc, 0);
 //ust//        /* atomic_add made on local variable on data that belongs to
 //ust//         * various CPUs : ok because tracing not started (for this cpu). */
 //ust//        local_add(ltt_subbuffer_header_size(), &ltt_buf->commit_count[0]);
-//ust// 
+//ust//
 //ust//        local_set(&ltt_buf->events_lost, 0);
 //ust//        local_set(&ltt_buf->corrupted_subbuffers, 0);
-//ust// 
+//ust//
 //ust//        return 0;
 //ust// }
 
-static int ust_buffers_init_buffer(struct ltt_trace_struct *trace,
+static int ust_buffers_init_buffer(struct ust_trace *trace,
                struct ust_channel *ltt_chan, struct ust_buffer *buf,
                unsigned int n_subbufs)
 {
@@ -605,7 +687,7 @@ static int ust_buffers_init_buffer(struct ltt_trace_struct *trace,
        int result;
 
        buf->commit_count =
-               zmalloc(sizeof(buf->commit_count) * n_subbufs);
+               zmalloc(sizeof(*buf->commit_count) * n_subbufs);
        if (!buf->commit_count)
                return -ENOMEM;
        kref_get(&trace->kref);
@@ -614,15 +696,17 @@ static int ust_buffers_init_buffer(struct ltt_trace_struct *trace,
        local_set(&buf->offset, ltt_subbuffer_header_size());
        atomic_long_set(&buf->consumed, 0);
        atomic_long_set(&buf->active_readers, 0);
-       for (j = 0; j < n_subbufs; j++)
-               local_set(&buf->commit_count[j], 0);
+       for (j = 0; j < n_subbufs; j++) {
+               local_set(&buf->commit_count[j].cc, 0);
+               local_set(&buf->commit_count[j].cc_sb, 0);
+       }
 //ust//        init_waitqueue_head(&buf->write_wait);
 //ust//        atomic_set(&buf->wakeup_readers, 0);
 //ust//        spin_lock_init(&buf->full_lock);
 
-       ltt_buffer_begin_callback(buf, trace->start_tsc, 0);
+       ltt_buffer_begin(buf, trace->start_tsc, 0);
 
-       local_add(ltt_subbuffer_header_size(), &buf->commit_count[0]);
+       local_add(ltt_subbuffer_header_size(), &buf->commit_count[0].cc);
 
        local_set(&buf->events_lost, 0);
        local_set(&buf->corrupted_subbuffers, 0);
@@ -654,7 +738,7 @@ static int ust_buffers_init_buffer(struct ltt_trace_struct *trace,
 /* FIXME: use this function */
 static void ust_buffers_destroy_buffer(struct ust_channel *ltt_chan, int cpu)
 {
-       struct ltt_trace_struct *trace = ltt_chan->trace;
+       struct ust_trace *trace = ltt_chan->trace;
        struct ust_buffer *ltt_buf = ltt_chan->buf[cpu];
 
        kref_put(&ltt_chan->trace->ltt_transport_kref,
@@ -726,7 +810,7 @@ static int ust_buffers_alloc_channel_buf_structs(struct ust_channel *chan)
 /*
  * Create channel.
  */
-static int ust_buffers_create_channel(const char *trace_name, struct ltt_trace_struct *trace,
+static int ust_buffers_create_channel(const char *trace_name, struct ust_trace *trace,
        const char *channel_name, struct ust_channel *ltt_chan,
        unsigned int subbuf_size, unsigned int n_subbufs, int overwrite)
 {
@@ -735,8 +819,6 @@ static int ust_buffers_create_channel(const char *trace_name, struct ltt_trace_s
        kref_init(&ltt_chan->kref);
 
        ltt_chan->trace = trace;
-       ltt_chan->buffer_begin = ltt_buffer_begin_callback;
-       ltt_chan->buffer_end = ltt_buffer_end_callback;
        ltt_chan->overwrite = overwrite;
        ltt_chan->n_subbufs_order = get_count_order(n_subbufs);
        ltt_chan->commit_count_mask = (~0UL >> ltt_chan->n_subbufs_order);
@@ -847,137 +929,512 @@ static void ltt_relay_remove_channel(struct ust_channel *channel)
        kref_put(&channel->kref, ltt_relay_release_channel);
 }
 
-struct ltt_reserve_switch_offsets {
-       long begin, end, old;
-       long begin_switch, end_switch_current, end_switch_old;
-       long commit_count, reserve_commit_diff;
-       size_t before_hdr_pad, size;
-};
+//ust// /*
+//ust//  * Returns :
+//ust//  * 0 if ok
+//ust//  * !0 if execution must be aborted.
+//ust//  */
+//ust// static inline int ltt_relay_try_reserve(
+//ust//                struct ust_channel *channel, struct ust_buffer *buf,
+//ust//                struct ltt_reserve_switch_offsets *offsets, size_t data_size,
+//ust//                u64 *tsc, unsigned int *rflags, int largest_align)
+//ust// {
+//ust//        offsets->begin = local_read(&buf->offset);
+//ust//        offsets->old = offsets->begin;
+//ust//        offsets->begin_switch = 0;
+//ust//        offsets->end_switch_current = 0;
+//ust//        offsets->end_switch_old = 0;
+//ust//
+//ust//        *tsc = trace_clock_read64();
+//ust//        if (last_tsc_overflow(buf, *tsc))
+//ust//                *rflags = LTT_RFLAG_ID_SIZE_TSC;
+//ust//
+//ust//        if (SUBBUF_OFFSET(offsets->begin, buf->chan) == 0) {
+//ust//                offsets->begin_switch = 1;              /* For offsets->begin */
+//ust//        } else {
+//ust//                offsets->size = ust_get_header_size(channel,
+//ust//                                        offsets->begin, data_size,
+//ust//                                        &offsets->before_hdr_pad, *rflags);
+//ust//                offsets->size += ltt_align(offsets->begin + offsets->size,
+//ust//                                           largest_align)
+//ust//                                 + data_size;
+//ust//                if ((SUBBUF_OFFSET(offsets->begin, buf->chan) + offsets->size)
+//ust//                                > buf->chan->subbuf_size) {
+//ust//                        offsets->end_switch_old = 1;    /* For offsets->old */
+//ust//                        offsets->begin_switch = 1;      /* For offsets->begin */
+//ust//                }
+//ust//        }
+//ust//        if (offsets->begin_switch) {
+//ust//                long subbuf_index;
+//ust//
+//ust//                if (offsets->end_switch_old)
+//ust//                        offsets->begin = SUBBUF_ALIGN(offsets->begin,
+//ust//                                                      buf->chan);
+//ust//                offsets->begin = offsets->begin + ltt_subbuffer_header_size();
+//ust//                /* Test new buffer integrity */
+//ust//                subbuf_index = SUBBUF_INDEX(offsets->begin, buf->chan);
+//ust//                offsets->reserve_commit_diff =
+//ust//                        (BUFFER_TRUNC(offsets->begin, buf->chan)
+//ust//                         >> channel->n_subbufs_order)
+//ust//                        - (local_read(&buf->commit_count[subbuf_index])
+//ust//                                & channel->commit_count_mask);
+//ust//                if (offsets->reserve_commit_diff == 0) {
+//ust//                        long consumed;
+//ust//
+//ust//                        consumed = atomic_long_read(&buf->consumed);
+//ust//
+//ust//                        /* Next buffer not corrupted. */
+//ust//                        if (!channel->overwrite &&
+//ust//                                (SUBBUF_TRUNC(offsets->begin, buf->chan)
+//ust//                                 - SUBBUF_TRUNC(consumed, buf->chan))
+//ust//                                >= channel->alloc_size) {
+//ust//
+//ust//                                long consumed_idx = SUBBUF_INDEX(consumed, buf->chan);
+//ust//                                long commit_count = local_read(&buf->commit_count[consumed_idx]);
+//ust//                                if(((commit_count - buf->chan->subbuf_size) & channel->commit_count_mask) - (BUFFER_TRUNC(consumed, buf->chan) >> channel->n_subbufs_order) != 0) {
+//ust//                                        WARN("Event dropped. Caused by non-committed event.");
+//ust//                                }
+//ust//                                else {
+//ust//                                        WARN("Event dropped. Caused by non-consumed buffer.");
+//ust//                                }
+//ust//                                /*
+//ust//                                 * We do not overwrite non consumed buffers
+//ust//                                 * and we are full : event is lost.
+//ust//                                 */
+//ust//                                local_inc(&buf->events_lost);
+//ust//                                return -1;
+//ust//                        } else {
+//ust//                                /*
+//ust//                                 * next buffer not corrupted, we are either in
+//ust//                                 * overwrite mode or the buffer is not full.
+//ust//                                 * It's safe to write in this new subbuffer.
+//ust//                                 */
+//ust//                        }
+//ust//                } else {
+//ust//                        /*
+//ust//                         * Next subbuffer corrupted. Force pushing reader even
+//ust//                         * in normal mode. It's safe to write in this new
+//ust//                         * subbuffer.
+//ust//                         */
+//ust//                }
+//ust//                offsets->size = ust_get_header_size(channel,
+//ust//                                        offsets->begin, data_size,
+//ust//                                        &offsets->before_hdr_pad, *rflags);
+//ust//                offsets->size += ltt_align(offsets->begin + offsets->size,
+//ust//                                           largest_align)
+//ust//                                 + data_size;
+//ust//                if ((SUBBUF_OFFSET(offsets->begin, buf->chan) + offsets->size)
+//ust//                                > buf->chan->subbuf_size) {
+//ust//                        /*
+//ust//                         * Event too big for subbuffers, report error, don't
+//ust//                         * complete the sub-buffer switch.
+//ust//                         */
+//ust//                        local_inc(&buf->events_lost);
+//ust//                        return -1;
+//ust//                } else {
+//ust//                        /*
+//ust//                         * We just made a successful buffer switch and the event
+//ust//                         * fits in the new subbuffer. Let's write.
+//ust//                         */
+//ust//                }
+//ust//        } else {
+//ust//                /*
+//ust//                 * Event fits in the current buffer and we are not on a switch
+//ust//                 * boundary. It's safe to write.
+//ust//                 */
+//ust//        }
+//ust//        offsets->end = offsets->begin + offsets->size;
+//ust//
+//ust//        if ((SUBBUF_OFFSET(offsets->end, buf->chan)) == 0) {
+//ust//                /*
+//ust//                 * The offset_end will fall at the very beginning of the next
+//ust//                 * subbuffer.
+//ust//                 */
+//ust//                offsets->end_switch_current = 1;        /* For offsets->begin */
+//ust//        }
+//ust//        return 0;
+//ust// }
+//ust//
+//ust// /*
+//ust//  * Returns :
+//ust//  * 0 if ok
+//ust//  * !0 if execution must be aborted.
+//ust//  */
+//ust// static inline int ltt_relay_try_switch(
+//ust//                enum force_switch_mode mode,
+//ust//                struct ust_channel *channel,
+//ust//                struct ust_buffer *buf,
+//ust//                struct ltt_reserve_switch_offsets *offsets,
+//ust//                u64 *tsc)
+//ust// {
+//ust//        long subbuf_index;
+//ust//
+//ust//        offsets->begin = local_read(&buf->offset);
+//ust//        offsets->old = offsets->begin;
+//ust//        offsets->begin_switch = 0;
+//ust//        offsets->end_switch_old = 0;
+//ust//
+//ust//        *tsc = trace_clock_read64();
+//ust//
+//ust//        if (SUBBUF_OFFSET(offsets->begin, buf->chan) != 0) {
+//ust//                offsets->begin = SUBBUF_ALIGN(offsets->begin, buf->chan);
+//ust//                offsets->end_switch_old = 1;
+//ust//        } else {
+//ust//                /* we do not have to switch : buffer is empty */
+//ust//                return -1;
+//ust//        }
+//ust//        if (mode == FORCE_ACTIVE)
+//ust//                offsets->begin += ltt_subbuffer_header_size();
+//ust//        /*
+//ust//         * Always begin_switch in FORCE_ACTIVE mode.
+//ust//         * Test new buffer integrity
+//ust//         */
+//ust//        subbuf_index = SUBBUF_INDEX(offsets->begin, buf->chan);
+//ust//        offsets->reserve_commit_diff =
+//ust//                (BUFFER_TRUNC(offsets->begin, buf->chan)
+//ust//                 >> channel->n_subbufs_order)
+//ust//                - (local_read(&buf->commit_count[subbuf_index])
+//ust//                        & channel->commit_count_mask);
+//ust//        if (offsets->reserve_commit_diff == 0) {
+//ust//                /* Next buffer not corrupted. */
+//ust//                if (mode == FORCE_ACTIVE
+//ust//                    && !channel->overwrite
+//ust//                    && offsets->begin - atomic_long_read(&buf->consumed)
+//ust//                       >= channel->alloc_size) {
+//ust//                        /*
+//ust//                         * We do not overwrite non consumed buffers and we are
+//ust//                         * full : ignore switch while tracing is active.
+//ust//                         */
+//ust//                        return -1;
+//ust//                }
+//ust//        } else {
+//ust//                /*
+//ust//                 * Next subbuffer corrupted. Force pushing reader even in normal
+//ust//                 * mode
+//ust//                 */
+//ust//        }
+//ust//        offsets->end = offsets->begin;
+//ust//        return 0;
+//ust// }
+//ust//
+//ust// static inline void ltt_reserve_push_reader(
+//ust//                struct ust_channel *channel,
+//ust//                struct ust_buffer *buf,
+//ust//                struct ltt_reserve_switch_offsets *offsets)
+//ust// {
+//ust//        long consumed_old, consumed_new;
+//ust//
+//ust//        do {
+//ust//                consumed_old = atomic_long_read(&buf->consumed);
+//ust//                /*
+//ust//                 * If buffer is in overwrite mode, push the reader consumed
+//ust//                 * count if the write position has reached it and we are not
+//ust//                 * at the first iteration (don't push the reader farther than
+//ust//                 * the writer). This operation can be done concurrently by many
+//ust//                 * writers in the same buffer, the writer being at the farthest
+//ust//                 * write position sub-buffer index in the buffer being the one
+//ust//                 * which will win this loop.
+//ust//                 * If the buffer is not in overwrite mode, pushing the reader
+//ust//                 * only happens if a sub-buffer is corrupted.
+//ust//                 */
+//ust//                if ((SUBBUF_TRUNC(offsets->end-1, buf->chan)
+//ust//                   - SUBBUF_TRUNC(consumed_old, buf->chan))
+//ust//                   >= channel->alloc_size)
+//ust//                        consumed_new = SUBBUF_ALIGN(consumed_old, buf->chan);
+//ust//                else {
+//ust//                        consumed_new = consumed_old;
+//ust//                        break;
+//ust//                }
+//ust//        } while (atomic_long_cmpxchg(&buf->consumed, consumed_old,
+//ust//                        consumed_new) != consumed_old);
+//ust//
+//ust//        if (consumed_old != consumed_new) {
+//ust//                /*
+//ust//                 * Reader pushed : we are the winner of the push, we can
+//ust//                 * therefore reequilibrate reserve and commit. Atomic increment
+//ust//                 * of the commit count permits other writers to play around
+//ust//                 * with this variable before us. We keep track of
+//ust//                 * corrupted_subbuffers even in overwrite mode :
+//ust//                 * we never want to write over a non completely committed
+//ust//                 * sub-buffer : possible causes : the buffer size is too low
+//ust//                 * compared to the unordered data input, or there is a writer
+//ust//                 * that died between the reserve and the commit.
+//ust//                 */
+//ust//                if (offsets->reserve_commit_diff) {
+//ust//                        /*
+//ust//                         * We have to alter the sub-buffer commit count.
+//ust//                         * We do not deliver the previous subbuffer, given it
+//ust//                         * was either corrupted or not consumed (overwrite
+//ust//                         * mode).
+//ust//                         */
+//ust//                        local_add(offsets->reserve_commit_diff,
+//ust//                                  &buf->commit_count[
+//ust//                                        SUBBUF_INDEX(offsets->begin,
+//ust//                                                     buf->chan)]);
+//ust//                        if (!channel->overwrite
+//ust//                            || offsets->reserve_commit_diff
+//ust//                               != channel->subbuf_size) {
+//ust//                                /*
+//ust//                                 * The reserve commit diff was not subbuf_size :
+//ust//                                 * it means the subbuffer was partly written to
+//ust//                                 * and is therefore corrupted. If it is multiple
+//ust//                                 * of subbuffer size and we are in flight
+//ust//                                 * recorder mode, we are skipping over a whole
+//ust//                                 * subbuffer.
+//ust//                                 */
+//ust//                                local_inc(&buf->corrupted_subbuffers);
+//ust//                        }
+//ust//                }
+//ust//        }
+//ust// }
+//ust//
+//ust// /**
+//ust//  * ltt_relay_reserve_slot - Atomic slot reservation in a LTTng buffer.
+//ust//  * @trace: the trace structure to log to.
+//ust//  * @ltt_channel: channel structure
+//ust//  * @transport_data: data structure specific to ltt relay
+//ust//  * @data_size: size of the variable length data to log.
+//ust//  * @slot_size: pointer to total size of the slot (out)
+//ust//  * @buf_offset : pointer to reserved buffer offset (out)
+//ust//  * @tsc: pointer to the tsc at the slot reservation (out)
+//ust//  * @cpu: cpuid
+//ust//  *
+//ust//  * Return : -ENOSPC if not enough space, else returns 0.
+//ust//  * It will take care of sub-buffer switching.
+//ust//  */
+//ust// static notrace int ltt_relay_reserve_slot(struct ust_trace *trace,
+//ust//                struct ust_channel *channel, void **transport_data,
+//ust//                size_t data_size, size_t *slot_size, long *buf_offset, u64 *tsc,
+//ust//                unsigned int *rflags, int largest_align, int cpu)
+//ust// {
+//ust//        struct ust_buffer *buf = *transport_data = channel->buf[cpu];
+//ust//        struct ltt_reserve_switch_offsets offsets;
+//ust//
+//ust//        offsets.reserve_commit_diff = 0;
+//ust//        offsets.size = 0;
+//ust//
+//ust//        /*
+//ust//         * Perform retryable operations.
+//ust//         */
+//ust//        if (ltt_nesting > 4) {
+//ust//                local_inc(&buf->events_lost);
+//ust//                return -EPERM;
+//ust//        }
+//ust//        do {
+//ust//                if (ltt_relay_try_reserve(channel, buf, &offsets, data_size, tsc, rflags,
+//ust//                                largest_align))
+//ust//                        return -ENOSPC;
+//ust//        } while (local_cmpxchg(&buf->offset, offsets.old,
+//ust//                        offsets.end) != offsets.old);
+//ust//
+//ust//        /*
+//ust//         * Atomically update last_tsc. This update races against concurrent
+//ust//         * atomic updates, but the race will always cause supplementary full TSC
+//ust//         * events, never the opposite (missing a full TSC event when it would be
+//ust//         * needed).
+//ust//         */
+//ust//        save_last_tsc(buf, *tsc);
+//ust//
+//ust//        /*
+//ust//         * Push the reader if necessary
+//ust//         */
+//ust//        ltt_reserve_push_reader(channel, buf, &offsets);
+//ust//
+//ust//        /*
+//ust//         * Switch old subbuffer if needed.
+//ust//         */
+//ust//        if (offsets.end_switch_old)
+//ust//                ltt_reserve_switch_old_subbuf(channel, buf, &offsets, tsc);
+//ust//
+//ust//        /*
+//ust//         * Populate new subbuffer.
+//ust//         */
+//ust//        if (offsets.begin_switch)
+//ust//                ltt_reserve_switch_new_subbuf(channel, buf, &offsets, tsc);
+//ust//
+//ust//        if (offsets.end_switch_current)
+//ust//                ltt_reserve_end_switch_current(channel, buf, &offsets, tsc);
+//ust//
+//ust//        *slot_size = offsets.size;
+//ust//        *buf_offset = offsets.begin + offsets.before_hdr_pad;
+//ust//        return 0;
+//ust// }
+//ust//
+//ust// /*
+//ust//  * Force a sub-buffer switch for a per-cpu buffer. This operation is
+//ust//  * completely reentrant : can be called while tracing is active with
+//ust//  * absolutely no lock held.
+//ust//  *
+//ust//  * Note, however, that as a local_cmpxchg is used for some atomic
+//ust//  * operations, this function must be called from the CPU which owns the buffer
+//ust//  * for a ACTIVE flush.
+//ust//  */
+//ust// static notrace void ltt_force_switch(struct ust_buffer *buf,
+//ust//                enum force_switch_mode mode)
+//ust// {
+//ust//        struct ust_channel *channel = buf->chan;
+//ust//        struct ltt_reserve_switch_offsets offsets;
+//ust//        u64 tsc;
+//ust//
+//ust//        offsets.reserve_commit_diff = 0;
+//ust//        offsets.size = 0;
+//ust//
+//ust//        /*
+//ust//         * Perform retryable operations.
+//ust//         */
+//ust//        do {
+//ust//                if (ltt_relay_try_switch(mode, channel, buf, &offsets, &tsc))
+//ust//                        return;
+//ust//        } while (local_cmpxchg(&buf->offset, offsets.old,
+//ust//                        offsets.end) != offsets.old);
+//ust//
+//ust//        /*
+//ust//         * Atomically update last_tsc. This update races against concurrent
+//ust//         * atomic updates, but the race will always cause supplementary full TSC
+//ust//         * events, never the opposite (missing a full TSC event when it would be
+//ust//         * needed).
+//ust//         */
+//ust//        save_last_tsc(buf, tsc);
+//ust//
+//ust//        /*
+//ust//         * Push the reader if necessary
+//ust//         */
+//ust//        if (mode == FORCE_ACTIVE)
+//ust//                ltt_reserve_push_reader(channel, buf, &offsets);
+//ust//
+//ust//        /*
+//ust//         * Switch old subbuffer if needed.
+//ust//         */
+//ust//        if (offsets.end_switch_old)
+//ust//                ltt_reserve_switch_old_subbuf(channel, buf, &offsets, &tsc);
+//ust//
+//ust//        /*
+//ust//         * Populate new subbuffer.
+//ust//         */
+//ust//        if (mode == FORCE_ACTIVE)
+//ust//                ltt_reserve_switch_new_subbuf(channel, buf, &offsets, &tsc);
+//ust// }
 
 /*
- * Returns :
- * 0 if ok
- * !0 if execution must be aborted.
+ * ltt_reserve_switch_old_subbuf: switch old subbuffer
+ *
+ * Concurrency safe because we are the last and only thread to alter this
+ * sub-buffer. As long as it is not delivered and read, no other thread can
+ * alter the offset, alter the reserve_count or call the
+ * client_buffer_end_callback on this sub-buffer.
+ *
+ * The only remaining threads could be the ones with pending commits. They will
+ * have to do the deliver themselves.  Not concurrency safe in overwrite mode.
+ * We detect corrupted subbuffers with commit and reserve counts. We keep a
+ * corrupted sub-buffers count and push the readers across these sub-buffers.
+ *
+ * Not concurrency safe if a writer is stalled in a subbuffer and another writer
+ * switches in, finding out it's corrupted.  The result will be than the old
+ * (uncommited) subbuffer will be declared corrupted, and that the new subbuffer
+ * will be declared corrupted too because of the commit count adjustment.
+ *
+ * Note : offset_old should never be 0 here.
  */
-static inline int ltt_relay_try_reserve(
-               struct ust_channel *channel, struct ust_buffer *buf,
-               struct ltt_reserve_switch_offsets *offsets, size_t data_size,
-               u64 *tsc, unsigned int *rflags, int largest_align)
+static void ltt_reserve_switch_old_subbuf(
+               struct ust_channel *chan, struct ust_buffer *buf,
+               struct ltt_reserve_switch_offsets *offsets, u64 *tsc)
 {
-       offsets->begin = local_read(&buf->offset);
-       offsets->old = offsets->begin;
-       offsets->begin_switch = 0;
-       offsets->end_switch_current = 0;
-       offsets->end_switch_old = 0;
+       long oldidx = SUBBUF_INDEX(offsets->old - 1, chan);
+       long commit_count, padding_size;
 
-       *tsc = trace_clock_read64();
-       if (last_tsc_overflow(buf, *tsc))
-               *rflags = LTT_RFLAG_ID_SIZE_TSC;
+       padding_size = chan->subbuf_size
+                       - (SUBBUF_OFFSET(offsets->old - 1, chan) + 1);
+       ltt_buffer_end(buf, *tsc, offsets->old, oldidx);
 
-       if (SUBBUF_OFFSET(offsets->begin, buf->chan) == 0) {
-               offsets->begin_switch = 1;              /* For offsets->begin */
-       } else {
-               offsets->size = ust_get_header_size(channel,
-                                       offsets->begin, data_size,
-                                       &offsets->before_hdr_pad, *rflags);
-               offsets->size += ltt_align(offsets->begin + offsets->size,
-                                          largest_align)
-                                + data_size;
-               if ((SUBBUF_OFFSET(offsets->begin, buf->chan) + offsets->size)
-                               > buf->chan->subbuf_size) {
-                       offsets->end_switch_old = 1;    /* For offsets->old */
-                       offsets->begin_switch = 1;      /* For offsets->begin */
-               }
-       }
-       if (offsets->begin_switch) {
-               long subbuf_index;
+       /*
+        * Must write slot data before incrementing commit count.
+        * This compiler barrier is upgraded into a smp_wmb() by the IPI
+        * sent by get_subbuf() when it does its smp_rmb().
+        */
+       barrier();
+       local_add(padding_size,
+                 &buf->commit_count[oldidx].cc);
+       commit_count = local_read(&buf->commit_count[oldidx].cc);
+       ltt_check_deliver(chan, buf, offsets->old - 1, commit_count, oldidx);
+       ltt_write_commit_counter(buf, oldidx,
+               offsets->old, commit_count, padding_size);
+}
 
-               if (offsets->end_switch_old)
-                       offsets->begin = SUBBUF_ALIGN(offsets->begin,
-                                                     buf->chan);
-               offsets->begin = offsets->begin + ltt_subbuffer_header_size();
-               /* Test new buffer integrity */
-               subbuf_index = SUBBUF_INDEX(offsets->begin, buf->chan);
-               offsets->reserve_commit_diff =
-                       (BUFFER_TRUNC(offsets->begin, buf->chan)
-                        >> channel->n_subbufs_order)
-                       - (local_read(&buf->commit_count[subbuf_index])
-                               & channel->commit_count_mask);
-               if (offsets->reserve_commit_diff == 0) {
-                       long consumed;
+/*
+ * ltt_reserve_switch_new_subbuf: Populate new subbuffer.
+ *
+ * This code can be executed unordered : writers may already have written to the
+ * sub-buffer before this code gets executed, caution.  The commit makes sure
+ * that this code is executed before the deliver of this sub-buffer.
+ */
+static void ltt_reserve_switch_new_subbuf(
+               struct ust_channel *chan, struct ust_buffer *buf,
+               struct ltt_reserve_switch_offsets *offsets, u64 *tsc)
+{
+       long beginidx = SUBBUF_INDEX(offsets->begin, chan);
+       long commit_count;
 
-                       consumed = atomic_long_read(&buf->consumed);
+       ltt_buffer_begin(buf, *tsc, beginidx);
 
-                       /* Next buffer not corrupted. */
-                       if (!channel->overwrite &&
-                               (SUBBUF_TRUNC(offsets->begin, buf->chan)
-                                - SUBBUF_TRUNC(consumed, buf->chan))
-                               >= channel->alloc_size) {
-
-                               long consumed_idx = SUBBUF_INDEX(consumed, buf->chan);
-                               long commit_count = local_read(&buf->commit_count[consumed_idx]);
-                               if(((commit_count - buf->chan->subbuf_size) & channel->commit_count_mask) - (BUFFER_TRUNC(consumed, buf->chan) >> channel->n_subbufs_order) != 0) {
-                                       WARN("Event dropped. Caused by non-committed event.");
-                               }
-                               else {
-                                       WARN("Event dropped. Caused by non-consumed buffer.");
-                               }
-                               /*
-                                * We do not overwrite non consumed buffers
-                                * and we are full : event is lost.
-                                */
-                               local_inc(&buf->events_lost);
-                               return -1;
-                       } else {
-                               /*
-                                * next buffer not corrupted, we are either in
-                                * overwrite mode or the buffer is not full.
-                                * It's safe to write in this new subbuffer.
-                                */
-                       }
-               } else {
-                       /*
-                        * Next subbuffer corrupted. Force pushing reader even
-                        * in normal mode. It's safe to write in this new
-                        * subbuffer.
-                        */
-               }
-               offsets->size = ust_get_header_size(channel,
-                                       offsets->begin, data_size,
-                                       &offsets->before_hdr_pad, *rflags);
-               offsets->size += ltt_align(offsets->begin + offsets->size,
-                                          largest_align)
-                                + data_size;
-               if ((SUBBUF_OFFSET(offsets->begin, buf->chan) + offsets->size)
-                               > buf->chan->subbuf_size) {
-                       /*
-                        * Event too big for subbuffers, report error, don't
-                        * complete the sub-buffer switch.
-                        */
-                       local_inc(&buf->events_lost);
-                       return -1;
-               } else {
-                       /*
-                        * We just made a successful buffer switch and the event
-                        * fits in the new subbuffer. Let's write.
-                        */
-               }
-       } else {
-               /*
-                * Event fits in the current buffer and we are not on a switch
-                * boundary. It's safe to write.
-                */
-       }
-       offsets->end = offsets->begin + offsets->size;
+       /*
+        * Must write slot data before incrementing commit count.
+        * This compiler barrier is upgraded into a smp_wmb() by the IPI
+        * sent by get_subbuf() when it does its smp_rmb().
+        */
+       barrier();
+       local_add(ltt_subbuffer_header_size(),
+                 &buf->commit_count[beginidx].cc);
+       commit_count = local_read(&buf->commit_count[beginidx].cc);
+       /* Check if the written buffer has to be delivered */
+       ltt_check_deliver(chan, buf, offsets->begin, commit_count, beginidx);
+       ltt_write_commit_counter(buf, beginidx,
+               offsets->begin, commit_count, ltt_subbuffer_header_size());
+}
 
-       if ((SUBBUF_OFFSET(offsets->end, buf->chan)) == 0) {
-               /*
-                * The offset_end will fall at the very beginning of the next
-                * subbuffer.
-                */
-               offsets->end_switch_current = 1;        /* For offsets->begin */
-       }
-       return 0;
+/*
+ * ltt_reserve_end_switch_current: finish switching current subbuffer
+ *
+ * Concurrency safe because we are the last and only thread to alter this
+ * sub-buffer. As long as it is not delivered and read, no other thread can
+ * alter the offset, alter the reserve_count or call the
+ * client_buffer_end_callback on this sub-buffer.
+ *
+ * The only remaining threads could be the ones with pending commits. They will
+ * have to do the deliver themselves.  Not concurrency safe in overwrite mode.
+ * We detect corrupted subbuffers with commit and reserve counts. We keep a
+ * corrupted sub-buffers count and push the readers across these sub-buffers.
+ *
+ * Not concurrency safe if a writer is stalled in a subbuffer and another writer
+ * switches in, finding out it's corrupted.  The result will be than the old
+ * (uncommited) subbuffer will be declared corrupted, and that the new subbuffer
+ * will be declared corrupted too because of the commit count adjustment.
+ */
+static void ltt_reserve_end_switch_current(
+               struct ust_channel *chan,
+               struct ust_buffer *buf,
+               struct ltt_reserve_switch_offsets *offsets, u64 *tsc)
+{
+       long endidx = SUBBUF_INDEX(offsets->end - 1, chan);
+       long commit_count, padding_size;
+
+       padding_size = chan->subbuf_size
+                       - (SUBBUF_OFFSET(offsets->end - 1, chan) + 1);
+
+       ltt_buffer_end(buf, *tsc, offsets->end, endidx);
+
+       /*
+        * Must write slot data before incrementing commit count.
+        * This compiler barrier is upgraded into a smp_wmb() by the IPI
+        * sent by get_subbuf() when it does its smp_rmb().
+        */
+       barrier();
+       local_add(padding_size,
+                 &buf->commit_count[endidx].cc);
+       commit_count = local_read(&buf->commit_count[endidx].cc);
+       ltt_check_deliver(chan, buf,
+               offsets->end - 1, commit_count, endidx);
+       ltt_write_commit_counter(buf, endidx,
+               offsets->end, commit_count, padding_size);
 }
 
 /*
@@ -985,14 +1442,15 @@ static inline int ltt_relay_try_reserve(
  * 0 if ok
  * !0 if execution must be aborted.
  */
-static inline int ltt_relay_try_switch(
+static int ltt_relay_try_switch_slow(
                enum force_switch_mode mode,
-               struct ust_channel *channel,
+               struct ust_channel *chan,
                struct ust_buffer *buf,
                struct ltt_reserve_switch_offsets *offsets,
                u64 *tsc)
 {
        long subbuf_index;
+       long reserve_commit_diff;
 
        offsets->begin = local_read(&buf->offset);
        offsets->old = offsets->begin;
@@ -1015,17 +1473,17 @@ static inline int ltt_relay_try_switch(
         * Test new buffer integrity
         */
        subbuf_index = SUBBUF_INDEX(offsets->begin, buf->chan);
-       offsets->reserve_commit_diff =
+       reserve_commit_diff =
                (BUFFER_TRUNC(offsets->begin, buf->chan)
-                >> channel->n_subbufs_order)
-               - (local_read(&buf->commit_count[subbuf_index])
-                       & channel->commit_count_mask);
-       if (offsets->reserve_commit_diff == 0) {
+                >> chan->n_subbufs_order)
+               - (local_read(&buf->commit_count[subbuf_index].cc_sb)
+                       & chan->commit_count_mask);
+       if (reserve_commit_diff == 0) {
                /* Next buffer not corrupted. */
                if (mode == FORCE_ACTIVE
-                   && !channel->overwrite
+                   && !chan->overwrite
                    && offsets->begin - atomic_long_read(&buf->consumed)
-                      >= channel->alloc_size) {
+                      >= chan->alloc_size) {
                        /*
                         * We do not overwrite non consumed buffers and we are
                         * full : ignore switch while tracing is active.
@@ -1042,224 +1500,31 @@ static inline int ltt_relay_try_switch(
        return 0;
 }
 
-static inline void ltt_reserve_push_reader(
-               struct ust_channel *channel,
-               struct ust_buffer *buf,
-               struct ltt_reserve_switch_offsets *offsets)
-{
-       long consumed_old, consumed_new;
-
-       do {
-               consumed_old = atomic_long_read(&buf->consumed);
-               /*
-                * If buffer is in overwrite mode, push the reader consumed
-                * count if the write position has reached it and we are not
-                * at the first iteration (don't push the reader farther than
-                * the writer). This operation can be done concurrently by many
-                * writers in the same buffer, the writer being at the farthest
-                * write position sub-buffer index in the buffer being the one
-                * which will win this loop.
-                * If the buffer is not in overwrite mode, pushing the reader
-                * only happens if a sub-buffer is corrupted.
-                */
-               if ((SUBBUF_TRUNC(offsets->end-1, buf->chan)
-                  - SUBBUF_TRUNC(consumed_old, buf->chan))
-                  >= channel->alloc_size)
-                       consumed_new = SUBBUF_ALIGN(consumed_old, buf->chan);
-               else {
-                       consumed_new = consumed_old;
-                       break;
-               }
-       } while (atomic_long_cmpxchg(&buf->consumed, consumed_old,
-                       consumed_new) != consumed_old);
-
-       if (consumed_old != consumed_new) {
-               /*
-                * Reader pushed : we are the winner of the push, we can
-                * therefore reequilibrate reserve and commit. Atomic increment
-                * of the commit count permits other writers to play around
-                * with this variable before us. We keep track of
-                * corrupted_subbuffers even in overwrite mode :
-                * we never want to write over a non completely committed
-                * sub-buffer : possible causes : the buffer size is too low
-                * compared to the unordered data input, or there is a writer
-                * that died between the reserve and the commit.
-                */
-               if (offsets->reserve_commit_diff) {
-                       /*
-                        * We have to alter the sub-buffer commit count.
-                        * We do not deliver the previous subbuffer, given it
-                        * was either corrupted or not consumed (overwrite
-                        * mode).
-                        */
-                       local_add(offsets->reserve_commit_diff,
-                                 &buf->commit_count[
-                                       SUBBUF_INDEX(offsets->begin,
-                                                    buf->chan)]);
-                       if (!channel->overwrite
-                           || offsets->reserve_commit_diff
-                              != channel->subbuf_size) {
-                               /*
-                                * The reserve commit diff was not subbuf_size :
-                                * it means the subbuffer was partly written to
-                                * and is therefore corrupted. If it is multiple
-                                * of subbuffer size and we are in flight
-                                * recorder mode, we are skipping over a whole
-                                * subbuffer.
-                                */
-                               local_inc(&buf->corrupted_subbuffers);
-                       }
-               }
-       }
-}
-
-
-/*
- * ltt_reserve_switch_old_subbuf: switch old subbuffer
- *
- * Concurrency safe because we are the last and only thread to alter this
- * sub-buffer. As long as it is not delivered and read, no other thread can
- * alter the offset, alter the reserve_count or call the
- * client_buffer_end_callback on this sub-buffer.
- *
- * The only remaining threads could be the ones with pending commits. They will
- * have to do the deliver themselves.  Not concurrency safe in overwrite mode.
- * We detect corrupted subbuffers with commit and reserve counts. We keep a
- * corrupted sub-buffers count and push the readers across these sub-buffers.
- *
- * Not concurrency safe if a writer is stalled in a subbuffer and another writer
- * switches in, finding out it's corrupted.  The result will be than the old
- * (uncommited) subbuffer will be declared corrupted, and that the new subbuffer
- * will be declared corrupted too because of the commit count adjustment.
- *
- * Note : offset_old should never be 0 here.
- */
-static inline void ltt_reserve_switch_old_subbuf(
-               struct ust_channel *channel,
-               struct ust_buffer *buf,
-               struct ltt_reserve_switch_offsets *offsets, u64 *tsc)
-{
-       long oldidx = SUBBUF_INDEX(offsets->old - 1, channel);
-
-       channel->buffer_end(buf, *tsc, offsets->old, oldidx);
-       /* Must write buffer end before incrementing commit count */
-       smp_wmb();
-       offsets->commit_count =
-               local_add_return(channel->subbuf_size
-                                - (SUBBUF_OFFSET(offsets->old - 1, channel)
-                                + 1),
-                                &buf->commit_count[oldidx]);
-       if ((BUFFER_TRUNC(offsets->old - 1, channel)
-                       >> channel->n_subbufs_order)
-                       - ((offsets->commit_count - channel->subbuf_size)
-                               & channel->commit_count_mask) == 0)
-               ltt_deliver(buf, oldidx, offsets->commit_count);
-}
-
 /*
- * ltt_reserve_switch_new_subbuf: Populate new subbuffer.
- *
- * This code can be executed unordered : writers may already have written to the
- * sub-buffer before this code gets executed, caution.  The commit makes sure
- * that this code is executed before the deliver of this sub-buffer.
- */
-static /*inline*/ void ltt_reserve_switch_new_subbuf(
-               struct ust_channel *channel,
-               struct ust_buffer *buf,
-               struct ltt_reserve_switch_offsets *offsets, u64 *tsc)
-{
-       long beginidx = SUBBUF_INDEX(offsets->begin, channel);
-
-       channel->buffer_begin(buf, *tsc, beginidx);
-       /* Must write buffer end before incrementing commit count */
-       smp_wmb();
-       offsets->commit_count = local_add_return(ltt_subbuffer_header_size(),
-                       &buf->commit_count[beginidx]);
-       /* Check if the written buffer has to be delivered */
-       if ((BUFFER_TRUNC(offsets->begin, channel)
-                       >> channel->n_subbufs_order)
-                       - ((offsets->commit_count - channel->subbuf_size)
-                               & channel->commit_count_mask) == 0)
-               ltt_deliver(buf, beginidx, offsets->commit_count);
-}
-
-
-/*
- * ltt_reserve_end_switch_current: finish switching current subbuffer
- *
- * Concurrency safe because we are the last and only thread to alter this
- * sub-buffer. As long as it is not delivered and read, no other thread can
- * alter the offset, alter the reserve_count or call the
- * client_buffer_end_callback on this sub-buffer.
- *
- * The only remaining threads could be the ones with pending commits. They will
- * have to do the deliver themselves.  Not concurrency safe in overwrite mode.
- * We detect corrupted subbuffers with commit and reserve counts. We keep a
- * corrupted sub-buffers count and push the readers across these sub-buffers.
- *
- * Not concurrency safe if a writer is stalled in a subbuffer and another writer
- * switches in, finding out it's corrupted.  The result will be than the old
- * (uncommited) subbuffer will be declared corrupted, and that the new subbuffer
- * will be declared corrupted too because of the commit count adjustment.
- */
-static inline void ltt_reserve_end_switch_current(
-               struct ust_channel *channel,
-               struct ust_buffer *buf,
-               struct ltt_reserve_switch_offsets *offsets, u64 *tsc)
-{
-       long endidx = SUBBUF_INDEX(offsets->end - 1, channel);
-
-       channel->buffer_end(buf, *tsc, offsets->end, endidx);
-       /* Must write buffer begin before incrementing commit count */
-       smp_wmb();
-       offsets->commit_count =
-               local_add_return(channel->subbuf_size
-                                - (SUBBUF_OFFSET(offsets->end - 1, channel)
-                                + 1),
-                                &buf->commit_count[endidx]);
-       if ((BUFFER_TRUNC(offsets->end - 1, channel)
-                       >> channel->n_subbufs_order)
-                       - ((offsets->commit_count - channel->subbuf_size)
-                               & channel->commit_count_mask) == 0)
-               ltt_deliver(buf, endidx, offsets->commit_count);
-}
-
-/**
- * ltt_relay_reserve_slot - Atomic slot reservation in a LTTng buffer.
- * @trace: the trace structure to log to.
- * @ltt_channel: channel structure
- * @transport_data: data structure specific to ltt relay
- * @data_size: size of the variable length data to log.
- * @slot_size: pointer to total size of the slot (out)
- * @buf_offset : pointer to reserved buffer offset (out)
- * @tsc: pointer to the tsc at the slot reservation (out)
- * @cpu: cpuid
+ * Force a sub-buffer switch for a per-cpu buffer. This operation is
+ * completely reentrant : can be called while tracing is active with
+ * absolutely no lock held.
  *
- * Return : -ENOSPC if not enough space, else returns 0.
- * It will take care of sub-buffer switching.
+ * Note, however, that as a local_cmpxchg is used for some atomic
+ * operations, this function must be called from the CPU which owns the buffer
+ * for a ACTIVE flush.
  */
-static notrace int ltt_relay_reserve_slot(struct ltt_trace_struct *trace,
-               struct ust_channel *channel, void **transport_data,
-               size_t data_size, size_t *slot_size, long *buf_offset, u64 *tsc,
-               unsigned int *rflags, int largest_align, int cpu)
+void ltt_force_switch_lockless_slow(struct ust_buffer *buf,
+               enum force_switch_mode mode)
 {
-       struct ust_buffer *buf = *transport_data = channel->buf[cpu];
+       struct ust_channel *chan = buf->chan;
        struct ltt_reserve_switch_offsets offsets;
+       u64 tsc;
 
-       offsets.reserve_commit_diff = 0;
        offsets.size = 0;
 
        /*
         * Perform retryable operations.
         */
-       if (ltt_nesting > 4) {
-               local_inc(&buf->events_lost);
-               return -EPERM;
-       }
        do {
-               if (ltt_relay_try_reserve(channel, buf, &offsets, data_size, tsc, rflags,
-                               largest_align))
-                       return -ENOSPC;
+               if (ltt_relay_try_switch_slow(mode, chan, buf,
+                               &offsets, &tsc))
+                       return;
        } while (local_cmpxchg(&buf->offset, offsets.old,
                        offsets.end) != offsets.old);
 
@@ -1269,60 +1534,182 @@ static notrace int ltt_relay_reserve_slot(struct ltt_trace_struct *trace,
         * events, never the opposite (missing a full TSC event when it would be
         * needed).
         */
-       save_last_tsc(buf, *tsc);
+       save_last_tsc(buf, tsc);
 
        /*
         * Push the reader if necessary
         */
-       ltt_reserve_push_reader(channel, buf, &offsets);
+       if (mode == FORCE_ACTIVE) {
+               ltt_reserve_push_reader(chan, buf, offsets.end - 1);
+//ust//                ltt_clear_noref_flag(chan, buf, SUBBUF_INDEX(offsets.end - 1, chan));
+       }
 
        /*
         * Switch old subbuffer if needed.
         */
-       if (offsets.end_switch_old)
-               ltt_reserve_switch_old_subbuf(channel, buf, &offsets, tsc);
+       if (offsets.end_switch_old) {
+//ust//                ltt_clear_noref_flag(rchan, buf, SUBBUF_INDEX(offsets.old - 1, rchan));
+               ltt_reserve_switch_old_subbuf(chan, buf, &offsets, &tsc);
+       }
 
        /*
         * Populate new subbuffer.
         */
-       if (offsets.begin_switch)
-               ltt_reserve_switch_new_subbuf(channel, buf, &offsets, tsc);
+       if (mode == FORCE_ACTIVE)
+               ltt_reserve_switch_new_subbuf(chan, buf, &offsets, &tsc);
+}
 
-       if (offsets.end_switch_current)
-               ltt_reserve_end_switch_current(channel, buf, &offsets, tsc);
+/*
+ * Returns :
+ * 0 if ok
+ * !0 if execution must be aborted.
+ */
+static int ltt_relay_try_reserve_slow(struct ust_channel *chan, struct ust_buffer *buf,
+               struct ltt_reserve_switch_offsets *offsets, size_t data_size,
+               u64 *tsc, unsigned int *rflags, int largest_align)
+{
+       long reserve_commit_diff;
 
-       *slot_size = offsets.size;
-       *buf_offset = offsets.begin + offsets.before_hdr_pad;
+       offsets->begin = local_read(&buf->offset);
+       offsets->old = offsets->begin;
+       offsets->begin_switch = 0;
+       offsets->end_switch_current = 0;
+       offsets->end_switch_old = 0;
+
+       *tsc = trace_clock_read64();
+       if (last_tsc_overflow(buf, *tsc))
+               *rflags = LTT_RFLAG_ID_SIZE_TSC;
+
+       if (unlikely(SUBBUF_OFFSET(offsets->begin, buf->chan) == 0)) {
+               offsets->begin_switch = 1;              /* For offsets->begin */
+       } else {
+               offsets->size = ust_get_header_size(chan,
+                                       offsets->begin, data_size,
+                                       &offsets->before_hdr_pad, *rflags);
+               offsets->size += ltt_align(offsets->begin + offsets->size,
+                                          largest_align)
+                                + data_size;
+               if (unlikely((SUBBUF_OFFSET(offsets->begin, buf->chan) +
+                            offsets->size) > buf->chan->subbuf_size)) {
+                       offsets->end_switch_old = 1;    /* For offsets->old */
+                       offsets->begin_switch = 1;      /* For offsets->begin */
+               }
+       }
+       if (unlikely(offsets->begin_switch)) {
+               long subbuf_index;
+
+               /*
+                * We are typically not filling the previous buffer completely.
+                */
+               if (likely(offsets->end_switch_old))
+                       offsets->begin = SUBBUF_ALIGN(offsets->begin,
+                                                     buf->chan);
+               offsets->begin = offsets->begin + ltt_subbuffer_header_size();
+               /* Test new buffer integrity */
+               subbuf_index = SUBBUF_INDEX(offsets->begin, buf->chan);
+               reserve_commit_diff =
+                 (BUFFER_TRUNC(offsets->begin, buf->chan)
+                  >> chan->n_subbufs_order)
+                 - (local_read(&buf->commit_count[subbuf_index].cc_sb)
+                               & chan->commit_count_mask);
+               if (likely(reserve_commit_diff == 0)) {
+                       /* Next buffer not corrupted. */
+                       if (unlikely(!chan->overwrite &&
+                               (SUBBUF_TRUNC(offsets->begin, buf->chan)
+                                - SUBBUF_TRUNC(atomic_long_read(
+                                                       &buf->consumed),
+                                               buf->chan))
+                               >= chan->alloc_size)) {
+                               /*
+                                * We do not overwrite non consumed buffers
+                                * and we are full : event is lost.
+                                */
+                               local_inc(&buf->events_lost);
+                               return -1;
+                       } else {
+                               /*
+                                * next buffer not corrupted, we are either in
+                                * overwrite mode or the buffer is not full.
+                                * It's safe to write in this new subbuffer.
+                                */
+                       }
+               } else {
+                       /*
+                        * Next subbuffer corrupted. Drop event in normal and
+                        * overwrite mode. Caused by either a writer OOPS or
+                        * too many nested writes over a reserve/commit pair.
+                        */
+                       local_inc(&buf->events_lost);
+                       return -1;
+               }
+               offsets->size = ust_get_header_size(chan,
+                                       offsets->begin, data_size,
+                                       &offsets->before_hdr_pad, *rflags);
+               offsets->size += ltt_align(offsets->begin + offsets->size,
+                                          largest_align)
+                                + data_size;
+               if (unlikely((SUBBUF_OFFSET(offsets->begin, buf->chan)
+                            + offsets->size) > buf->chan->subbuf_size)) {
+                       /*
+                        * Event too big for subbuffers, report error, don't
+                        * complete the sub-buffer switch.
+                        */
+                       local_inc(&buf->events_lost);
+                       return -1;
+               } else {
+                       /*
+                        * We just made a successful buffer switch and the event
+                        * fits in the new subbuffer. Let's write.
+                        */
+               }
+       } else {
+               /*
+                * Event fits in the current buffer and we are not on a switch
+                * boundary. It's safe to write.
+                */
+       }
+       offsets->end = offsets->begin + offsets->size;
+
+       if (unlikely((SUBBUF_OFFSET(offsets->end, buf->chan)) == 0)) {
+               /*
+                * The offset_end will fall at the very beginning of the next
+                * subbuffer.
+                */
+               offsets->end_switch_current = 1;        /* For offsets->begin */
+       }
        return 0;
 }
 
-/*
- * Force a sub-buffer switch for a per-cpu buffer. This operation is
- * completely reentrant : can be called while tracing is active with
- * absolutely no lock held.
+/**
+ * ltt_relay_reserve_slot_lockless_slow - Atomic slot reservation in a buffer.
+ * @trace: the trace structure to log to.
+ * @ltt_channel: channel structure
+ * @transport_data: data structure specific to ltt relay
+ * @data_size: size of the variable length data to log.
+ * @slot_size: pointer to total size of the slot (out)
+ * @buf_offset : pointer to reserved buffer offset (out)
+ * @tsc: pointer to the tsc at the slot reservation (out)
+ * @cpu: cpuid
  *
- * Note, however, that as a local_cmpxchg is used for some atomic
- * operations, this function must be called from the CPU which owns the buffer
- * for a ACTIVE flush.
+ * Return : -ENOSPC if not enough space, else returns 0.
+ * It will take care of sub-buffer switching.
  */
-static notrace void ltt_force_switch(struct ust_buffer *buf,
-               enum force_switch_mode mode)
+int ltt_reserve_slot_lockless_slow(struct ust_trace *trace,
+               struct ust_channel *chan, void **transport_data,
+               size_t data_size, size_t *slot_size, long *buf_offset, u64 *tsc,
+               unsigned int *rflags, int largest_align, int cpu)
 {
-       struct ust_channel *channel = buf->chan;
+       struct ust_buffer *buf = chan->buf[cpu];
        struct ltt_reserve_switch_offsets offsets;
-       u64 tsc;
 
-       offsets.reserve_commit_diff = 0;
        offsets.size = 0;
 
-       /*
-        * Perform retryable operations.
-        */
        do {
-               if (ltt_relay_try_switch(mode, channel, buf, &offsets, &tsc))
-                       return;
-       } while (local_cmpxchg(&buf->offset, offsets.old,
-                       offsets.end) != offsets.old);
+               if (unlikely(ltt_relay_try_reserve_slow(chan, buf, &offsets,
+                               data_size, tsc, rflags, largest_align)))
+                       return -ENOSPC;
+       } while (unlikely(local_cmpxchg(&buf->offset, offsets.old,
+                       offsets.end) != offsets.old));
 
        /*
         * Atomically update last_tsc. This update races against concurrent
@@ -1330,25 +1717,38 @@ static notrace void ltt_force_switch(struct ust_buffer *buf,
         * events, never the opposite (missing a full TSC event when it would be
         * needed).
         */
-       save_last_tsc(buf, tsc);
+       save_last_tsc(buf, *tsc);
 
        /*
         * Push the reader if necessary
         */
-       if (mode == FORCE_ACTIVE)
-               ltt_reserve_push_reader(channel, buf, &offsets);
+       ltt_reserve_push_reader(chan, buf, offsets.end - 1);
+
+       /*
+        * Clear noref flag for this subbuffer.
+        */
+//ust//        ltt_clear_noref_flag(chan, buf, SUBBUF_INDEX(offsets.end - 1, chan));
 
        /*
         * Switch old subbuffer if needed.
         */
-       if (offsets.end_switch_old)
-               ltt_reserve_switch_old_subbuf(channel, buf, &offsets, &tsc);
+       if (unlikely(offsets.end_switch_old)) {
+//ust//                ltt_clear_noref_flag(chan, buf, SUBBUF_INDEX(offsets.old - 1, chan));
+               ltt_reserve_switch_old_subbuf(chan, buf, &offsets, tsc);
+       }
 
        /*
         * Populate new subbuffer.
         */
-       if (mode == FORCE_ACTIVE)
-               ltt_reserve_switch_new_subbuf(channel, buf, &offsets, &tsc);
+       if (unlikely(offsets.begin_switch))
+               ltt_reserve_switch_new_subbuf(chan, buf, &offsets, tsc);
+
+       if (unlikely(offsets.end_switch_current))
+               ltt_reserve_end_switch_current(chan, buf, &offsets, tsc);
+
+       *slot_size = offsets.size;
+       *buf_offset = offsets.begin + offsets.before_hdr_pad;
+       return 0;
 }
 
 static struct ltt_transport ust_relay_transport = {
@@ -1358,83 +1758,9 @@ static struct ltt_transport ust_relay_transport = {
                .finish_channel = ltt_relay_finish_channel,
                .remove_channel = ltt_relay_remove_channel,
                .wakeup_channel = ltt_relay_async_wakeup_chan,
-//             .commit_slot = ltt_relay_commit_slot,
-               .reserve_slot = ltt_relay_reserve_slot,
        },
 };
 
-/*
- * for flight recording. must be called after relay_commit.
- * This function decrements de subbuffer's lost_size each time the commit count
- * reaches back the reserve offset (module subbuffer size). It is useful for
- * crash dump.
- */
-static /* inline */ void ltt_write_commit_counter(struct ust_buffer *buf,
-               struct ust_buffer *ltt_buf,
-               long idx, long buf_offset, long commit_count, size_t data_size)
-{
-       long offset;
-       long commit_seq_old;
-
-       offset = buf_offset + data_size;
-
-       /*
-        * SUBBUF_OFFSET includes commit_count_mask. We can simply
-        * compare the offsets within the subbuffer without caring about
-        * buffer full/empty mismatch because offset is never zero here
-        * (subbuffer header and event headers have non-zero length).
-        */
-       if (unlikely(SUBBUF_OFFSET(offset - commit_count, buf->chan)))
-               return;
-
-       commit_seq_old = local_read(&ltt_buf->commit_seq[idx]);
-       while (commit_seq_old < commit_count)
-               commit_seq_old = local_cmpxchg(&ltt_buf->commit_seq[idx],
-                                        commit_seq_old, commit_count);
-}
-
-/*
- * Atomic unordered slot commit. Increments the commit count in the
- * specified sub-buffer, and delivers it if necessary.
- *
- * Parameters:
- *
- * @ltt_channel : channel structure
- * @transport_data: transport-specific data
- * @buf_offset : offset following the event header.
- * @data_size : size of the event data.
- * @slot_size : size of the reserved slot.
- */
-/* FIXME: make this function static inline in the .h! */
-/*static*/ /* inline */ notrace void ltt_commit_slot(
-               struct ust_channel *channel,
-               void **transport_data, long buf_offset,
-               size_t data_size, size_t slot_size)
-{
-       struct ust_buffer *buf = *transport_data;
-       long offset_end = buf_offset;
-       long endidx = SUBBUF_INDEX(offset_end - 1, channel);
-       long commit_count;
-
-       /* Must write slot data before incrementing commit count */
-       smp_wmb();
-       commit_count = local_add_return(slot_size,
-               &buf->commit_count[endidx]);
-       /* Check if all commits have been done */
-       if ((BUFFER_TRUNC(offset_end - 1, channel)
-                       >> channel->n_subbufs_order)
-                       - ((commit_count - channel->subbuf_size)
-                          & channel->commit_count_mask) == 0)
-               ltt_deliver(buf, endidx, commit_count);
-       /*
-        * Update lost_size for each commit. It's needed only for extracting
-        * ltt buffers from vmcore, after crash.
-        */
-       ltt_write_commit_counter(buf, buf, endidx,
-                                buf_offset, commit_count, data_size);
-}
-
-
 static char initialized = 0;
 
 void __attribute__((constructor)) init_ustrelay_transport(void)
@@ -1445,7 +1771,75 @@ void __attribute__((constructor)) init_ustrelay_transport(void)
        }
 }
 
-static void __attribute__((destructor)) ltt_relay_exit(void)
+static void __attribute__((destructor)) ust_buffers_exit(void)
 {
        ltt_transport_unregister(&ust_relay_transport);
 }
+
+size_t ltt_write_event_header_slow(struct ust_trace *trace,
+               struct ust_channel *channel,
+               struct ust_buffer *buf, long buf_offset,
+               u16 eID, u32 event_size,
+               u64 tsc, unsigned int rflags)
+{
+       struct ltt_event_header header;
+       u16 small_size;
+
+       switch (rflags) {
+       case LTT_RFLAG_ID_SIZE_TSC:
+               header.id_time = 29 << LTT_TSC_BITS;
+               break;
+       case LTT_RFLAG_ID_SIZE:
+               header.id_time = 30 << LTT_TSC_BITS;
+               break;
+       case LTT_RFLAG_ID:
+               header.id_time = 31 << LTT_TSC_BITS;
+               break;
+       }
+
+       header.id_time |= (u32)tsc & LTT_TSC_MASK;
+       ust_buffers_write(buf, buf_offset, &header, sizeof(header));
+       buf_offset += sizeof(header);
+
+       switch (rflags) {
+       case LTT_RFLAG_ID_SIZE_TSC:
+               small_size = (u16)min_t(u32, event_size, LTT_MAX_SMALL_SIZE);
+               ust_buffers_write(buf, buf_offset,
+                       &eID, sizeof(u16));
+               buf_offset += sizeof(u16);
+               ust_buffers_write(buf, buf_offset,
+                       &small_size, sizeof(u16));
+               buf_offset += sizeof(u16);
+               if (small_size == LTT_MAX_SMALL_SIZE) {
+                       ust_buffers_write(buf, buf_offset,
+                               &event_size, sizeof(u32));
+                       buf_offset += sizeof(u32);
+               }
+               buf_offset += ltt_align(buf_offset, sizeof(u64));
+               ust_buffers_write(buf, buf_offset,
+                       &tsc, sizeof(u64));
+               buf_offset += sizeof(u64);
+               break;
+       case LTT_RFLAG_ID_SIZE:
+               small_size = (u16)min_t(u32, event_size, LTT_MAX_SMALL_SIZE);
+               ust_buffers_write(buf, buf_offset,
+                       &eID, sizeof(u16));
+               buf_offset += sizeof(u16);
+               ust_buffers_write(buf, buf_offset,
+                       &small_size, sizeof(u16));
+               buf_offset += sizeof(u16);
+               if (small_size == LTT_MAX_SMALL_SIZE) {
+                       ust_buffers_write(buf, buf_offset,
+                               &event_size, sizeof(u32));
+                       buf_offset += sizeof(u32);
+               }
+               break;
+       case LTT_RFLAG_ID:
+               ust_buffers_write(buf, buf_offset,
+                       &eID, sizeof(u16));
+               buf_offset += sizeof(u16);
+               break;
+       }
+
+       return buf_offset;
+}
index c6b13d076ecbc26d445efec8c5cbd96775cc732b..bff3ed54ba2f2eb4c06192c283f6aa3f6e68bd13 100644 (file)
 #include <kcompat/kref.h>
 #include <assert.h>
 #include "channels.h"
-#include "buffers.h"
+#include "tracerconst.h"
+#include "tracercore.h"
+#include "header-inline.h"
+#include <usterr.h>
 
-/* Return the size of the minimum number of pages that can contain x. */
-#define FIX_SIZE(x) ((((x) - 1) & PAGE_MASK) + PAGE_SIZE)
+/***** SHOULD BE REMOVED ***** */
 
 /*
  * BUFFER_TRUNC zeroes the subbuffer offset and the subbuffer number parts of
  */
 #define UST_CHANNEL_VERSION            8
 
+/**************************************/
+
+struct commit_counters {
+       local_t cc;
+       local_t cc_sb;                  /* Incremented _once_ at sb switch */
+};
+
 struct ust_buffer {
        /* First 32 bytes cache-hot cacheline */
        local_t offset;                 /* Current offset in the buffer */
-       local_t *commit_count;          /* Commit count per sub-buffer */
+       struct commit_counters *commit_count;   /* Commit count per sub-buffer */
        atomic_long_t consumed;         /*
                                         * Current offset in the buffer
                                         * standard atomic access (shared)
@@ -64,7 +73,12 @@ struct ust_buffer {
        /* the reading end of the pipe */
        int data_ready_fd_read;
 
+       unsigned int finalized;
+//ust//        struct timer_list switch_timer; /* timer for periodical switch */
+       unsigned long switch_timer_interval; /* 0 = unset */
+
        struct ust_channel *chan;
+
        struct kref kref;
        void *buf_data;
        size_t buf_size;
@@ -75,19 +89,22 @@ struct ust_buffer {
        local_t commit_seq[0] ____cacheline_aligned;
 } ____cacheline_aligned;
 
-extern void _ust_buffers_write(struct ust_buffer *buf, size_t offset,
-       const void *src, size_t len, ssize_t cpy);
-
 /*
- * Return the address where a given offset is located.
- * Should be used to get the current subbuffer header pointer. Given we know
- * it's never on a page boundary, it's safe to write directly to this address,
- * as long as the write is never bigger than a page size.
+ * A switch is done during tracing or as a final flush after tracing (so it
+ * won't write in the new sub-buffer).
+ * FIXME: make this message clearer
  */
-extern void *ust_buffers_offset_address(struct ust_buffer *buf,
-       size_t offset);
+enum force_switch_mode { FORCE_ACTIVE, FORCE_FLUSH };
+
+extern int ltt_reserve_slot_lockless_slow(struct ust_trace *trace,
+               struct ust_channel *ltt_channel, void **transport_data,
+               size_t data_size, size_t *slot_size, long *buf_offset, u64 *tsc,
+               unsigned int *rflags, int largest_align, int cpu);
+
+extern void ltt_force_switch_lockless_slow(struct ust_buffer *buf,
+               enum force_switch_mode mode);
+
 
-/* FIXME: lttng has a version for systems with inefficient unaligned access */
 static __inline__ void ust_buffers_do_copy(void *dest, const void *src, size_t len)
 {
        union {
@@ -99,23 +116,402 @@ static __inline__ void ust_buffers_do_copy(void *dest, const void *src, size_t l
        } u = { .src = src };
 
        switch (len) {
-       case 0: break;
-       case 1: *(u8 *)dest = *u.src8;
+       case 0: break;
+       case 1: *(u8 *)dest = *u.src8;
                break;
-       case 2: *(u16 *)dest = *u.src16;
+       case 2: *(u16 *)dest = *u.src16;
                break;
-       case 4: *(u32 *)dest = *u.src32;
+       case 4: *(u32 *)dest = *u.src32;
                break;
-       case 8: *(u64 *)dest = *u.src64;
+       case 8: *(u64 *)dest = *u.src64;
                break;
        default:
                memcpy(dest, src, len);
        }
 }
 
-/* FIXME: there is both a static inline and a '_' non static inline version ?? */
+static __inline__ void *ust_buffers_offset_address(struct ust_buffer *buf, size_t offset)
+{
+       return ((char *)buf->buf_data)+offset;
+}
+
+/*
+ * Last TSC comparison functions. Check if the current TSC overflows
+ * LTT_TSC_BITS bits from the last TSC read. Reads and writes last_tsc
+ * atomically.
+ */
+
+/* FIXME: does this test work properly? */
+#if (BITS_PER_LONG == 32)
+static __inline__ void save_last_tsc(struct ust_buffer *ltt_buf,
+                                       u64 tsc)
+{
+       ltt_buf->last_tsc = (unsigned long)(tsc >> LTT_TSC_BITS);
+}
+
+static __inline__ int last_tsc_overflow(struct ust_buffer *ltt_buf,
+                                       u64 tsc)
+{
+       unsigned long tsc_shifted = (unsigned long)(tsc >> LTT_TSC_BITS);
+
+       if (unlikely((tsc_shifted - ltt_buf->last_tsc)))
+               return 1;
+       else
+               return 0;
+}
+#else
+static __inline__ void save_last_tsc(struct ust_buffer *ltt_buf,
+                                       u64 tsc)
+{
+       ltt_buf->last_tsc = (unsigned long)tsc;
+}
+
+static __inline__ int last_tsc_overflow(struct ust_buffer *ltt_buf,
+                                       u64 tsc)
+{
+       if (unlikely((tsc - ltt_buf->last_tsc) >> LTT_TSC_BITS))
+               return 1;
+       else
+               return 0;
+}
+#endif
+
+static __inline__ void ltt_reserve_push_reader(
+               struct ust_channel *rchan,
+               struct ust_buffer *buf,
+               long offset)
+{
+       long consumed_old, consumed_new;
+
+       do {
+               consumed_old = atomic_long_read(&buf->consumed);
+               /*
+                * If buffer is in overwrite mode, push the reader consumed
+                * count if the write position has reached it and we are not
+                * at the first iteration (don't push the reader farther than
+                * the writer). This operation can be done concurrently by many
+                * writers in the same buffer, the writer being at the farthest
+                * write position sub-buffer index in the buffer being the one
+                * which will win this loop.
+                * If the buffer is not in overwrite mode, pushing the reader
+                * only happens if a sub-buffer is corrupted.
+                */
+               if (unlikely((SUBBUF_TRUNC(offset, buf->chan)
+                  - SUBBUF_TRUNC(consumed_old, buf->chan))
+                  >= rchan->alloc_size))
+                       consumed_new = SUBBUF_ALIGN(consumed_old, buf->chan);
+               else
+                       return;
+       } while (unlikely(atomic_long_cmpxchg(&buf->consumed, consumed_old,
+                       consumed_new) != consumed_old));
+}
+
+static __inline__ void ltt_vmcore_check_deliver(
+               struct ust_buffer *buf,
+               long commit_count, long idx)
+{
+       local_set(&buf->commit_seq[idx], commit_count);
+}
+
+static __inline__ void ltt_check_deliver(struct ust_channel *chan,
+               struct ust_buffer *buf,
+               long offset, long commit_count, long idx)
+{
+       long old_commit_count = commit_count - chan->subbuf_size;
+
+       /* Check if all commits have been done */
+       if (unlikely((BUFFER_TRUNC(offset, chan)
+                       >> chan->n_subbufs_order)
+                       - (old_commit_count
+                          & chan->commit_count_mask) == 0)) {
+               /*
+                * If we succeeded in updating the cc_sb, we are delivering
+                * the subbuffer. Deals with concurrent updates of the "cc"
+                * value without adding a add_return atomic operation to the
+                * fast path.
+                */
+               if (likely(local_cmpxchg(&buf->commit_count[idx].cc_sb,
+                                        old_commit_count, commit_count)
+                          == old_commit_count)) {
+                       int result;
+
+                       /*
+                        * Set noref flag for this subbuffer.
+                        */
+//ust//                        ltt_set_noref_flag(rchan, buf, idx);
+                       ltt_vmcore_check_deliver(buf, commit_count, idx);
+
+                       /* wakeup consumer */
+                       result = write(buf->data_ready_fd_write, "1", 1);
+                       if(result == -1) {
+                               PERROR("write (in ltt_relay_buffer_flush)");
+                               ERR("this should never happen!");
+                       }
+               }
+       }
+}
+
+static __inline__ int ltt_poll_deliver(struct ust_channel *chan, struct ust_buffer *buf)
+{
+       long consumed_old, consumed_idx, commit_count, write_offset;
+
+       consumed_old = atomic_long_read(&buf->consumed);
+       consumed_idx = SUBBUF_INDEX(consumed_old, buf->chan);
+       commit_count = local_read(&buf->commit_count[consumed_idx].cc_sb);
+       /*
+        * No memory barrier here, since we are only interested
+        * in a statistically correct polling result. The next poll will
+        * get the data is we are racing. The mb() that ensures correct
+        * memory order is in get_subbuf.
+        */
+       write_offset = local_read(&buf->offset);
+
+       /*
+        * Check that the subbuffer we are trying to consume has been
+        * already fully committed.
+        */
+
+       if (((commit_count - chan->subbuf_size)
+            & chan->commit_count_mask)
+           - (BUFFER_TRUNC(consumed_old, buf->chan)
+              >> chan->n_subbufs_order)
+           != 0)
+               return 0;
+
+       /*
+        * Check that we are not about to read the same subbuffer in
+        * which the writer head is.
+        */
+       if ((SUBBUF_TRUNC(write_offset, buf->chan)
+          - SUBBUF_TRUNC(consumed_old, buf->chan))
+          == 0)
+               return 0;
+
+       return 1;
+
+}
+
+/*
+ * returns 0 if reserve ok, or 1 if the slow path must be taken.
+ */
+static __inline__ int ltt_relay_try_reserve(
+               struct ust_channel *chan,
+               struct ust_buffer *buf,
+               size_t data_size,
+               u64 *tsc, unsigned int *rflags, int largest_align,
+               long *o_begin, long *o_end, long *o_old,
+               size_t *before_hdr_pad, size_t *size)
+{
+       *o_begin = local_read(&buf->offset);
+       *o_old = *o_begin;
+
+       *tsc = trace_clock_read64();
+
+//ust// #ifdef CONFIG_LTT_VMCORE
+//ust//        prefetch(&buf->commit_count[SUBBUF_INDEX(*o_begin, rchan)]);
+//ust//        prefetch(&buf->commit_seq[SUBBUF_INDEX(*o_begin, rchan)]);
+//ust// #else
+//ust//        prefetchw(&buf->commit_count[SUBBUF_INDEX(*o_begin, rchan)]);
+//ust// #endif
+       if (last_tsc_overflow(buf, *tsc))
+               *rflags = LTT_RFLAG_ID_SIZE_TSC;
+
+       if (unlikely(SUBBUF_OFFSET(*o_begin, buf->chan) == 0))
+               return 1;
+
+       *size = ust_get_header_size(chan,
+                               *o_begin, data_size,
+                               before_hdr_pad, *rflags);
+       *size += ltt_align(*o_begin + *size, largest_align) + data_size;
+       if (unlikely((SUBBUF_OFFSET(*o_begin, buf->chan) + *size)
+                    > buf->chan->subbuf_size))
+               return 1;
+
+       /*
+        * Event fits in the current buffer and we are not on a switch
+        * boundary. It's safe to write.
+        */
+       *o_end = *o_begin + *size;
+
+       if (unlikely((SUBBUF_OFFSET(*o_end, buf->chan)) == 0))
+               /*
+                * The offset_end will fall at the very beginning of the next
+                * subbuffer.
+                */
+               return 1;
+
+       return 0;
+}
+
+static __inline__ int ltt_reserve_slot(struct ust_trace *trace,
+               struct ust_channel *chan, void **transport_data,
+               size_t data_size, size_t *slot_size, long *buf_offset, u64 *tsc,
+               unsigned int *rflags, int largest_align, int cpu)
+{
+       struct ust_buffer *buf = chan->buf[cpu];
+       long o_begin, o_end, o_old;
+       size_t before_hdr_pad;
+
+       /*
+        * Perform retryable operations.
+        */
+       /* FIXME: make this rellay per cpu? */
+       if (unlikely(__get_cpu_var(ltt_nesting) > 4)) {
+               local_inc(&buf->events_lost);
+               return -EPERM;
+       }
+
+       if (unlikely(ltt_relay_try_reserve(chan, buf,
+                       data_size, tsc, rflags,
+                       largest_align, &o_begin, &o_end, &o_old,
+                       &before_hdr_pad, slot_size)))
+               goto slow_path;
+
+       if (unlikely(local_cmpxchg(&buf->offset, o_old, o_end) != o_old))
+               goto slow_path;
+
+       /*
+        * Atomically update last_tsc. This update races against concurrent
+        * atomic updates, but the race will always cause supplementary full TSC
+        * events, never the opposite (missing a full TSC event when it would be
+        * needed).
+        */
+       save_last_tsc(buf, *tsc);
+
+       /*
+        * Push the reader if necessary
+        */
+       ltt_reserve_push_reader(chan, buf, o_end - 1);
+
+       /*
+        * Clear noref flag for this subbuffer.
+        */
+//ust//        ltt_clear_noref_flag(chan, buf, SUBBUF_INDEX(o_end - 1, chan));
+
+       *buf_offset = o_begin + before_hdr_pad;
+       return 0;
+slow_path:
+       return ltt_reserve_slot_lockless_slow(trace, chan,
+               transport_data, data_size, slot_size, buf_offset, tsc,
+               rflags, largest_align, cpu);
+}
+
+/*
+ * Force a sub-buffer switch for a per-cpu buffer. This operation is
+ * completely reentrant : can be called while tracing is active with
+ * absolutely no lock held.
+ *
+ * Note, however, that as a local_cmpxchg is used for some atomic
+ * operations, this function must be called from the CPU which owns the buffer
+ * for a ACTIVE flush.
+ */
+static __inline__ void ltt_force_switch(struct ust_buffer *buf,
+               enum force_switch_mode mode)
+{
+       return ltt_force_switch_lockless_slow(buf, mode);
+}
+
+/*
+ * for flight recording. must be called after relay_commit.
+ * This function decrements de subbuffer's lost_size each time the commit count
+ * reaches back the reserve offset (module subbuffer size). It is useful for
+ * crash dump.
+ */
+#ifdef CONFIG_LTT_VMCORE
+static __inline__ void ltt_write_commit_counter(struct rchan_buf *buf,
+               struct ltt_channel_buf_struct *ltt_buf,
+               long idx, long buf_offset, long commit_count, size_t data_size)
+{
+       long offset;
+       long commit_seq_old;
+
+       offset = buf_offset + data_size;
+
+       /*
+        * SUBBUF_OFFSET includes commit_count_mask. We can simply
+        * compare the offsets within the subbuffer without caring about
+        * buffer full/empty mismatch because offset is never zero here
+        * (subbuffer header and event headers have non-zero length).
+        */
+       if (unlikely(SUBBUF_OFFSET(offset - commit_count, buf->chan)))
+               return;
+
+       commit_seq_old = local_read(&ltt_buf->commit_seq[idx]);
+       while (commit_seq_old < commit_count)
+               commit_seq_old = local_cmpxchg(&ltt_buf->commit_seq[idx],
+                                        commit_seq_old, commit_count);
+}
+#else
+static __inline__ void ltt_write_commit_counter(struct ust_buffer *buf,
+               long idx, long buf_offset, long commit_count, size_t data_size)
+{
+}
+#endif
+
+/*
+ * Atomic unordered slot commit. Increments the commit count in the
+ * specified sub-buffer, and delivers it if necessary.
+ *
+ * Parameters:
+ *
+ * @ltt_channel : channel structure
+ * @transport_data: transport-specific data
+ * @buf_offset : offset following the event header.
+ * @data_size : size of the event data.
+ * @slot_size : size of the reserved slot.
+ */
+static __inline__ void ltt_commit_slot(
+               struct ust_channel *chan,
+               struct ust_buffer *buf, long buf_offset,
+               size_t data_size, size_t slot_size)
+{
+       long offset_end = buf_offset;
+       long endidx = SUBBUF_INDEX(offset_end - 1, chan);
+       long commit_count;
+
+#ifdef LTT_NO_IPI_BARRIER
+       smp_wmb();
+#else
+       /*
+        * Must write slot data before incrementing commit count.
+        * This compiler barrier is upgraded into a smp_mb() by the IPI
+        * sent by get_subbuf().
+        */
+       barrier();
+#endif
+       local_add(slot_size, &buf->commit_count[endidx].cc);
+       /*
+        * commit count read can race with concurrent OOO commit count updates.
+        * This is only needed for ltt_check_deliver (for non-polling delivery
+        * only) and for ltt_write_commit_counter. The race can only cause the
+        * counter to be read with the same value more than once, which could
+        * cause :
+        * - Multiple delivery for the same sub-buffer (which is handled
+        *   gracefully by the reader code) if the value is for a full
+        *   sub-buffer. It's important that we can never miss a sub-buffer
+        *   delivery. Re-reading the value after the local_add ensures this.
+        * - Reading a commit_count with a higher value that what was actually
+        *   added to it for the ltt_write_commit_counter call (again caused by
+        *   a concurrent committer). It does not matter, because this function
+        *   is interested in the fact that the commit count reaches back the
+        *   reserve offset for a specific sub-buffer, which is completely
+        *   independent of the order.
+        */
+       commit_count = local_read(&buf->commit_count[endidx].cc);
+
+       ltt_check_deliver(chan, buf, offset_end - 1, commit_count, endidx);
+       /*
+        * Update lost_size for each commit. It's needed only for extracting
+        * ltt buffers from vmcore, after crash.
+        */
+       ltt_write_commit_counter(buf, endidx, buf_offset, commit_count, data_size);
+}
+
+void _ust_buffers_write(struct ust_buffer *buf, size_t offset,
+        const void *src, size_t len, ssize_t cpy);
+
 static __inline__ int ust_buffers_write(struct ust_buffer *buf, size_t offset,
-       const void *src, size_t len)
+        const void *src, size_t len)
 {
        size_t cpy;
        size_t buf_offset = BUFFER_OFFSET(offset, buf->chan);
@@ -124,24 +520,13 @@ static __inline__ int ust_buffers_write(struct ust_buffer *buf, size_t offset,
 
        cpy = min_t(size_t, len, buf->buf_size - buf_offset);
        ust_buffers_do_copy(buf->buf_data + buf_offset, src, cpy);
-       
+
        if (unlikely(len != cpy))
                _ust_buffers_write(buf, buf_offset, src, len, cpy);
        return len;
 }
 
-int ust_buffers_channel_open(struct ust_channel *chan, size_t subbuf_size, size_t n_subbufs);
-extern void ust_buffers_channel_close(struct ust_channel *chan);
-
-extern int ust_buffers_do_get_subbuf(struct ust_buffer *buf, long *pconsumed_old);
-
-extern int ust_buffers_do_put_subbuf(struct ust_buffer *buf, u32 uconsumed_old);
-
-extern void init_ustrelay_transport(void);
-
-/*static*/ /* inline */ notrace void ltt_commit_slot(
-               struct ust_channel *channel,
-               void **transport_data, long buf_offset,
-               size_t data_size, size_t slot_size);
+int ust_buffers_get_subbuf(struct ust_buffer *buf, long *consumed);
+int ust_buffers_put_subbuf(struct ust_buffer *buf, unsigned long uconsumed_old);
 
 #endif /* _UST_BUFFERS_H */
index 6f57f13de6de2b7c58800b5bda5d284f7f0f31ea..0d2715d17166fbabfa4768b9f2591214d0546239 100644 (file)
 #define EVENTS_PER_CHANNEL     65536
 #define MAX_CPUS               32
 
-struct ltt_trace_struct;
+struct ust_trace;
 
 struct ust_buffer;
 
 struct ust_channel {
        /* First 32 bytes cache-hot cacheline */
-       struct ltt_trace_struct *trace;
+       struct ust_trace *trace;
        int *buf_struct_shmids;
        struct ust_buffer **buf;
        int overwrite:1;
@@ -50,18 +50,6 @@ struct ust_channel {
                                                 */
        /* End of first 32 bytes cacheline */
 
-       /*
-        * buffer_begin - called on buffer-switch to a new sub-buffer
-        * @buf: the channel buffer containing the new sub-buffer
-        */
-       void (*buffer_begin) (struct ust_buffer *buf,
-                       u64 tsc, unsigned int subbuf_idx);
-       /*
-        * buffer_end - called on buffer-switch to a new sub-buffer
-        * @buf: the channel buffer containing the previous sub-buffer
-        */
-       void (*buffer_end) (struct ust_buffer *buf,
-                       u64 tsc, unsigned int offset, unsigned int subbuf_idx);
        struct kref kref;       /* Channel transport reference count */
        size_t subbuf_size;
        int subbuf_size_order;
diff --git a/libust/header-inline.h b/libust/header-inline.h
new file mode 100644 (file)
index 0000000..1f1c6df
--- /dev/null
@@ -0,0 +1,63 @@
+#ifndef UST_HEADER_INLINE_H
+#define UST_HEADER_INLINE_H
+
+#include "tracercore.h"
+
+/*
+ * ust_get_header_size
+ *
+ * Calculate alignment offset to 32-bits. This is the alignment offset of the
+ * event header.
+ *
+ * Important note :
+ * The event header must be 32-bits. The total offset calculated here :
+ *
+ * Alignment of header struct on 32 bits (min arch size, header size)
+ * + sizeof(header struct)  (32-bits)
+ * + (opt) u16 (ext. event id)
+ * + (opt) u16 (event_size) (if event_size == 0xFFFFUL, has ext. event size)
+ * + (opt) u32 (ext. event size)
+ * + (opt) u64 full TSC (aligned on min(64-bits, arch size))
+ *
+ * The payload must itself determine its own alignment from the biggest type it
+ * contains.
+ * */
+static __inline__ unsigned char ust_get_header_size(
+               struct ust_channel *channel,
+               size_t offset,
+               size_t data_size,
+               size_t *before_hdr_pad,
+               unsigned int rflags)
+{
+       size_t orig_offset = offset;
+       size_t padding;
+
+       padding = ltt_align(offset, sizeof(struct ltt_event_header));
+       offset += padding;
+       offset += sizeof(struct ltt_event_header);
+
+       if(unlikely(rflags)) {
+               switch (rflags) {
+               case LTT_RFLAG_ID_SIZE_TSC:
+                       offset += sizeof(u16) + sizeof(u16);
+                       if (data_size >= 0xFFFFU)
+                               offset += sizeof(u32);
+                       offset += ltt_align(offset, sizeof(u64));
+                       offset += sizeof(u64);
+                       break;
+               case LTT_RFLAG_ID_SIZE:
+                       offset += sizeof(u16) + sizeof(u16);
+                       if (data_size >= 0xFFFFU)
+                               offset += sizeof(u32);
+                       break;
+               case LTT_RFLAG_ID:
+                       offset += sizeof(u16);
+                       break;
+               }
+       }
+
+       *before_hdr_pad = padding;
+       return offset - orig_offset;
+}
+
+#endif /* UST_HEADER_INLINE_H */
index 8e8fb71ee52999f8c6b542eb30c2001f3d8662ab..b1e457e7fbb9c05f12e6829bf4380afee3f9174e 100644 (file)
@@ -1316,7 +1316,7 @@ struct notifier_block marker_module_nb = {
 
 #endif /* CONFIG_MODULES */
 
-void ltt_dump_marker_state(struct ltt_trace_struct *trace)
+void ltt_dump_marker_state(struct ust_trace *trace)
 {
        struct marker_entry *entry;
        struct ltt_probe_private_data call_data;
index 9cb6bcaed6cd52794a01ebbc80635c197fb4b6d1..7f9ce5ad44f2a4f80cd77c68ca593fe0096120c8 100644 (file)
@@ -256,8 +256,9 @@ parse_end:
  * Field width and precision are *not* supported.
  * %n not supported.
  */
-static inline const char *parse_c_type(const char *fmt,
-               char *c_size, enum ltt_type *c_type)
+static inline
+const char *parse_c_type(const char *fmt, char *c_size, enum ltt_type *c_type,
+                        char *outfmt)
 {
        int qualifier;          /* 'h', 'l', or 'L' for integer fields */
                                /* 'z' support added 23/7/1999 S.H.    */
@@ -289,6 +290,13 @@ repeat:
                }
        }
 
+       if (outfmt) {
+               if (qualifier != -1)
+                       *outfmt++ = (char)qualifier;
+               *outfmt++ = *fmt;
+               *outfmt = 0;
+       }
+
        switch (*fmt) {
        case 'c':
                *c_type = LTT_TYPE_UNSIGNED_INT;
@@ -382,16 +390,13 @@ static inline size_t serialize_trace_data(struct ust_buffer *buf,
        case LTT_TYPE_UNSIGNED_INT:
                switch (c_size) {
                case 1:
-                       tmp.v_ulong = (unsigned long)(uint8_t)
-                                       va_arg(*args, unsigned int);
+                       tmp.v_ulong = (unsigned long)(uint8_t)va_arg(*args, unsigned int);
                        break;
                case 2:
-                       tmp.v_ulong = (unsigned long)(uint16_t)
-                                       va_arg(*args, unsigned int);
+                       tmp.v_ulong = (unsigned long)(uint16_t)va_arg(*args, unsigned int);
                        break;
                case 4:
-                       tmp.v_ulong = (unsigned long)(uint32_t)
-                                       va_arg(*args, unsigned int);
+                       tmp.v_ulong = (unsigned long)(uint32_t)va_arg(*args, unsigned int);
                        break;
                case 8:
                        tmp.v_uint64 = va_arg(*args, uint64_t);
@@ -505,92 +510,6 @@ copydone:
        return buf_offset;
 }
 
-static notrace void skip_space(const char **ps)
-{
-       while(**ps == ' ')
-               (*ps)++;
-}
-
-static notrace void copy_token(char **out, const char **in)
-{
-       while(**in != ' ' && **in != '\0') {
-               **out = **in;
-               (*out)++;
-               (*in)++;
-       }
-}
-
-/* serialize_to_text
- *
- * Given a format string and a va_list of arguments, convert them to a
- * human-readable string.
- *
- * @outbuf: the buffer to output the string to
- * @bufsize: the max size that can be used in outbuf
- * @fmt: the marker format string
- * @ap: a va_list that contains the arguments corresponding to fmt
- *
- * Return value: the number of chars that have been put in outbuf, excluding
- * the final \0, or, if the buffer was too small, the number of chars that
- * would have been written in outbuf if it had been large enough.
- *
- * outbuf may be NULL. The return value may then be used be allocate an
- * appropriate outbuf.
- *
- */
-
-notrace
-int serialize_to_text(char *outbuf, int bufsize, const char *fmt, va_list ap)
-{
-       int fmt_len = strlen(fmt);
-       char *new_fmt = alloca(fmt_len + 1);
-       const char *orig_fmt_p = fmt;
-       char *new_fmt_p = new_fmt;
-       char false_buf;
-       int result;
-       enum { none, cfmt, tracefmt, argname } prev_token = none;
-
-       while(*orig_fmt_p != '\0') {
-               if(*orig_fmt_p == '%') {
-                       prev_token = cfmt;
-                       copy_token(&new_fmt_p, &orig_fmt_p);
-               }
-               else if(*orig_fmt_p == '#') {
-                       prev_token = tracefmt;
-                       do {
-                               orig_fmt_p++;
-                       } while(*orig_fmt_p != ' ' && *orig_fmt_p != '\0');
-               }
-               else if(*orig_fmt_p == ' ') {
-                       if(prev_token == argname) {
-                               *new_fmt_p = '=';
-                               new_fmt_p++;
-                       }
-                       else if(prev_token == cfmt) {
-                               *new_fmt_p = ' ';
-                               new_fmt_p++;
-                       }
-
-                       skip_space(&orig_fmt_p);
-               }
-               else {
-                       prev_token = argname;
-                       copy_token(&new_fmt_p, &orig_fmt_p);
-               }
-       }
-
-       *new_fmt_p = '\0';
-
-       if(outbuf == NULL) {
-               /* use this false_buffer for compatibility with pre-C99 */
-               outbuf = &false_buf;
-               bufsize = 1;
-       }
-       result = vsnprintf(outbuf, bufsize, new_fmt, ap);
-
-       return result;
-}
-
 notrace size_t ltt_serialize_data(struct ust_buffer *buf, size_t buf_offset,
                        struct ltt_serialize_closure *closure,
                        void *serialize_private, int *largest_align,
@@ -683,7 +602,7 @@ notrace void ltt_vtrace(const struct marker *mdata, void *probe_data,
        size_t data_size, slot_size;
        unsigned int chan_index;
        struct ust_channel *channel;
-       struct ltt_trace_struct *trace, *dest_trace = NULL;
+       struct ust_trace *trace, *dest_trace = NULL;
        struct ust_buffer *buf;
        void *transport_data;
        u64 tsc;
@@ -782,15 +701,14 @@ notrace void ltt_vtrace(const struct marker *mdata, void *probe_data,
                buf = channel->buf[cpu];
                /* Out-of-order write : header and data */
                buf_offset = ltt_write_event_header(trace,
-                                       buf, buf_offset,
+                                       channel, buf, buf_offset,
                                        eID, data_size, tsc, rflags);
                ltt_write_event_data(buf, buf_offset, &closure,
                                        serialize_private,
                                        largest_align, fmt, &args_copy);
                va_end(args_copy);
                /* Out-of-order commit */
-               ltt_commit_slot(channel, &transport_data, buf_offset,
-                               data_size, slot_size);
+               ltt_commit_slot(channel, buf, buf_offset, data_size, slot_size);
                DBG("just commited event at offset %ld and size %zd", buf_offset, slot_size);
        }
 //ust//        __get_cpu_var(ltt_nesting)--;
@@ -809,6 +727,88 @@ notrace void ltt_trace(const struct marker *mdata, void *probe_data,
        va_end(args);
 }
 
-//ust// MODULE_LICENSE("GPL");
-//ust// MODULE_AUTHOR("Mathieu Desnoyers");
-//ust// MODULE_DESCRIPTION("Linux Trace Toolkit Next Generation Serializer");
+static notrace void skip_space(const char **ps)
+{
+       while(**ps == ' ')
+               (*ps)++;
+}
+
+static notrace void copy_token(char **out, const char **in)
+{
+       while(**in != ' ' && **in != '\0') {
+               **out = **in;
+               (*out)++;
+               (*in)++;
+       }
+}
+
+/* serialize_to_text
+ *
+ * Given a format string and a va_list of arguments, convert them to a
+ * human-readable string.
+ *
+ * @outbuf: the buffer to output the string to
+ * @bufsize: the max size that can be used in outbuf
+ * @fmt: the marker format string
+ * @ap: a va_list that contains the arguments corresponding to fmt
+ *
+ * Return value: the number of chars that have been put in outbuf, excluding
+ * the final \0, or, if the buffer was too small, the number of chars that
+ * would have been written in outbuf if it had been large enough.
+ *
+ * outbuf may be NULL. The return value may then be used be allocate an
+ * appropriate outbuf.
+ *
+ */
+
+notrace
+int serialize_to_text(char *outbuf, int bufsize, const char *fmt, va_list ap)
+{
+       int fmt_len = strlen(fmt);
+       char *new_fmt = alloca(fmt_len + 1);
+       const char *orig_fmt_p = fmt;
+       char *new_fmt_p = new_fmt;
+       char false_buf;
+       int result;
+       enum { none, cfmt, tracefmt, argname } prev_token = none;
+
+       while(*orig_fmt_p != '\0') {
+               if(*orig_fmt_p == '%') {
+                       prev_token = cfmt;
+                       copy_token(&new_fmt_p, &orig_fmt_p);
+               }
+               else if(*orig_fmt_p == '#') {
+                       prev_token = tracefmt;
+                       do {
+                               orig_fmt_p++;
+                       } while(*orig_fmt_p != ' ' && *orig_fmt_p != '\0');
+               }
+               else if(*orig_fmt_p == ' ') {
+                       if(prev_token == argname) {
+                               *new_fmt_p = '=';
+                               new_fmt_p++;
+                       }
+                       else if(prev_token == cfmt) {
+                               *new_fmt_p = ' ';
+                               new_fmt_p++;
+                       }
+
+                       skip_space(&orig_fmt_p);
+               }
+               else {
+                       prev_token = argname;
+                       copy_token(&new_fmt_p, &orig_fmt_p);
+               }
+       }
+
+       *new_fmt_p = '\0';
+
+       if(outbuf == NULL) {
+               /* use this false_buffer for compatibility with pre-C99 */
+               outbuf = &false_buf;
+               bufsize = 1;
+       }
+       result = vsnprintf(outbuf, bufsize, new_fmt, ap);
+
+       return result;
+}
index 27e5bf07851c4597e0f147c00c2c65afb80d0929..0a8fc8e80f2e2d2e7bdbc3ead5df30a97d40a61f 100644 (file)
@@ -34,7 +34,7 @@
 #include "tracer.h"
 #include "usterr.h"
 #include "ustcomm.h"
-#include "buffers.h" /* FIXME: remove */
+#include "buffers.h"
 #include "marker-control.h"
 
 //#define USE_CLONE
@@ -88,7 +88,7 @@ struct blocked_consumer {
        struct ustcomm_server server;
        struct ustcomm_source src;
 
-       /* args to ust_buffers_do_get_subbuf */
+       /* args to ust_buffers_get_subbuf */
        struct ust_buffer *buf;
 
        struct list_head list;
@@ -166,7 +166,7 @@ void notif_cb(void)
 static void inform_consumer_daemon(const char *trace_name)
 {
        int i,j;
-       struct ltt_trace_struct *trace;
+       struct ust_trace *trace;
        pid_t pid = getpid();
        int result;
 
@@ -256,13 +256,13 @@ void process_blocked_consumers(void)
                                continue;
                        }
 
-                       result = ust_buffers_do_get_subbuf(bc->buf, &consumed_old);
+                       result = ust_buffers_get_subbuf(bc->buf, &consumed_old);
                        if(result == -EAGAIN) {
                                WARN("missed buffer?");
                                continue;
                        }
                        else if(result < 0) {
-                               DBG("ust_buffers_do_get_subbuf: error: %s", strerror(-result));
+                               DBG("ust_buffers_get_subbuf: error: %s", strerror(-result));
                        }
                        asprintf(&reply, "%s %ld", "OK", consumed_old);
                        result = ustcomm_send_reply(&bc->server, reply, &bc->src);
@@ -298,7 +298,7 @@ void seperate_channel_cpu(const char *channel_and_cpu, char **channel, int *cpu)
 static int do_cmd_get_shmid(const char *recvbuf, struct ustcomm_source *src)
 {
        int retval = 0;
-       struct ltt_trace_struct *trace;
+       struct ust_trace *trace;
        char trace_name[] = "auto";
        int i;
        char *channel_and_cpu;
@@ -374,7 +374,7 @@ static int do_cmd_get_shmid(const char *recvbuf, struct ustcomm_source *src)
 static int do_cmd_get_n_subbufs(const char *recvbuf, struct ustcomm_source *src)
 {
        int retval = 0;
-       struct ltt_trace_struct *trace;
+       struct ust_trace *trace;
        char trace_name[] = "auto";
        int i;
        char *channel_and_cpu;
@@ -443,7 +443,7 @@ static int do_cmd_get_n_subbufs(const char *recvbuf, struct ustcomm_source *src)
 static int do_cmd_get_subbuf_size(const char *recvbuf, struct ustcomm_source *src)
 {
        int retval = 0;
-       struct ltt_trace_struct *trace;
+       struct ust_trace *trace;
        char trace_name[] = "auto";
        int i;
        char *channel_and_cpu;
@@ -512,7 +512,7 @@ static int do_cmd_get_subbuf_size(const char *recvbuf, struct ustcomm_source *sr
 static int do_cmd_get_subbuffer(const char *recvbuf, struct ustcomm_source *src)
 {
        int retval = 0;
-       struct ltt_trace_struct *trace;
+       struct ust_trace *trace;
        char trace_name[] = "auto";
        int i;
        char *channel_and_cpu;
@@ -583,7 +583,7 @@ static int do_cmd_get_subbuffer(const char *recvbuf, struct ustcomm_source *src)
 static int do_cmd_put_subbuffer(const char *recvbuf, struct ustcomm_source *src)
 {
        int retval = 0;
-       struct ltt_trace_struct *trace;
+       struct ust_trace *trace;
        char trace_name[] = "auto";
        int i;
        char *channel_and_cpu;
@@ -644,13 +644,13 @@ static int do_cmd_put_subbuffer(const char *recvbuf, struct ustcomm_source *src)
 
                        found = 1;
 
-                       result = ust_buffers_do_put_subbuf(buf, consumed_old);
+                       result = ust_buffers_put_subbuf(buf, consumed_old);
                        if(result < 0) {
-                               WARN("ust_buffers_do_put_subbuf: error (subbuf=%s)", channel_and_cpu);
+                               WARN("ust_buffers_put_subbuf: error (subbuf=%s)", channel_and_cpu);
                                asprintf(&reply, "%s", "ERROR");
                        }
                        else {
-                               DBG("ust_buffers_do_put_subbuf: success (subbuf=%s)", channel_and_cpu);
+                               DBG("ust_buffers_put_subbuf: success (subbuf=%s)", channel_and_cpu);
                                asprintf(&reply, "%s", "OK");
                        }
 
@@ -900,7 +900,7 @@ void *listener_main(void *p)
                        free(reply);
                }
 //             else if(nth_token_is(recvbuf, "get_notifications", 0) == 1) {
-//                     struct ltt_trace_struct *trace;
+//                     struct ust_trace *trace;
 //                     char trace_name[] = "auto";
 //                     int i;
 //                     char *channel_name;
@@ -1114,9 +1114,6 @@ static void __attribute__((constructor)) init()
                /* Ensure marker control is initialized */
                init_marker_control();
 
-               /* Ensure relay is initialized */
-               init_ustrelay_transport();
-
                /* Ensure markers are initialized */
                init_markers();
 
@@ -1209,7 +1206,7 @@ static void destroy_traces(void)
 static int trace_recording(void)
 {
        int retval = 0;
-       struct ltt_trace_struct *trace;
+       struct ust_trace *trace;
 
        ltt_lock_traces();
 
index 5e4e1c0919e207f8eed2d54fcdcf861d6ba8c4e8..d5ec43e8356c2e527bc5642cc301fe1edb273770 100644 (file)
 
 /* Default callbacks for modules */
 notrace int ltt_filter_control_default(enum ltt_filter_control_msg msg,
-               struct ltt_trace_struct *trace)
+               struct ust_trace *trace)
 {
        return 0;
 }
 
-int ltt_statedump_default(struct ltt_trace_struct *trace)
+int ltt_statedump_default(struct ust_trace *trace)
 {
        return 0;
 }
@@ -58,13 +58,13 @@ int ltt_statedump_default(struct ltt_trace_struct *trace)
 /* Callbacks for registered modules */
 
 int (*ltt_filter_control_functor)
-       (enum ltt_filter_control_msg msg, struct ltt_trace_struct *trace) =
+       (enum ltt_filter_control_msg msg, struct ust_trace *trace) =
                                        ltt_filter_control_default;
 struct module *ltt_filter_control_owner;
 
 /* These function pointers are protected by a trace activation check */
 struct module *ltt_run_filter_owner;
-int (*ltt_statedump_functor)(struct ltt_trace_struct *trace) =
+int (*ltt_statedump_functor)(struct ust_trace *trace) =
                                        ltt_statedump_default;
 struct module *ltt_statedump_owner;
 
@@ -140,7 +140,7 @@ static enum ltt_channels get_channel_type_from_name(const char *name)
 //ust//                }
 //ust//                ltt_filter_control_functor =
 //ust//                        (int (*)(enum ltt_filter_control_msg,
-//ust//                        struct ltt_trace_struct *))function;
+//ust//                        struct ust_trace *))function;
 //ust//                ltt_filter_control_owner = owner;
 //ust//                break;
 //ust//        case LTT_FUNCTION_STATEDUMP:
@@ -149,7 +149,7 @@ static enum ltt_channels get_channel_type_from_name(const char *name)
 //ust//                        goto end;
 //ust//                }
 //ust//                ltt_statedump_functor =
-//ust//                        (int (*)(struct ltt_trace_struct *))function;
+//ust//                        (int (*)(struct ust_trace *))function;
 //ust//                ltt_statedump_owner = owner;
 //ust//                break;
 //ust//        }
@@ -248,26 +248,7 @@ static inline int is_channel_overwrite(enum ltt_channels chan,
        }
 }
 
-/**
- * ltt_write_trace_header - Write trace header
- * @trace: Trace information
- * @header: Memory address where the information must be written to
- */
-void notrace ltt_write_trace_header(struct ltt_trace_struct *trace,
-               struct ltt_subbuffer_header *header)
-{
-       header->magic_number = LTT_TRACER_MAGIC_NUMBER;
-       header->major_version = LTT_TRACER_VERSION_MAJOR;
-       header->minor_version = LTT_TRACER_VERSION_MINOR;
-       header->arch_size = sizeof(void *);
-       header->alignment = ltt_get_alignment();
-       header->start_time_sec = trace->start_time.tv_sec;
-       header->start_time_usec = trace->start_time.tv_usec;
-       header->start_freq = trace->start_freq;
-       header->freq_scale = trace->freq_scale;
-}
-
-static void trace_async_wakeup(struct ltt_trace_struct *trace)
+static void trace_async_wakeup(struct ust_trace *trace)
 {
        int i;
        struct ust_channel *chan;
@@ -283,7 +264,7 @@ static void trace_async_wakeup(struct ltt_trace_struct *trace)
 //ust// /* Timer to send async wakeups to the readers */
 //ust// static void async_wakeup(unsigned long data)
 //ust// {
-//ust//        struct ltt_trace_struct *trace;
+//ust//        struct ust_trace *trace;
 //ust// 
 //ust//        /*
 //ust//         * PREEMPT_RT does not allow spinlocks to be taken within preempt
@@ -315,9 +296,9 @@ static void trace_async_wakeup(struct ltt_trace_struct *trace)
  *
  * Returns a pointer to the trace structure, NULL if not found.
  */
-struct ltt_trace_struct *_ltt_trace_find(const char *trace_name)
+struct ust_trace *_ltt_trace_find(const char *trace_name)
 {
-       struct ltt_trace_struct *trace;
+       struct ust_trace *trace;
 
        list_for_each_entry(trace, &ltt_traces.head, list)
                if (!strncmp(trace->trace_name, trace_name, NAME_MAX))
@@ -331,9 +312,9 @@ struct ltt_trace_struct *_ltt_trace_find(const char *trace_name)
  *
  * Returns a pointer to the trace structure, NULL if not found.
  */
-struct ltt_trace_struct *_ltt_trace_find_setup(const char *trace_name)
+struct ust_trace *_ltt_trace_find_setup(const char *trace_name)
 {
-       struct ltt_trace_struct *trace;
+       struct ust_trace *trace;
 
        list_for_each_entry(trace, &ltt_traces.setup_head, list)
                if (!strncmp(trace->trace_name, trace_name, NAME_MAX))
@@ -348,8 +329,8 @@ struct ltt_trace_struct *_ltt_trace_find_setup(const char *trace_name)
  */
 void ltt_release_transport(struct kref *kref)
 {
-//ust//        struct ltt_trace_struct *trace = container_of(kref,
-//ust//                        struct ltt_trace_struct, ltt_transport_kref);
+//ust//        struct ust_trace *trace = container_of(kref,
+//ust//                        struct ust_trace, ltt_transport_kref);
 //ust//        trace->ops->remove_dirs(trace);
 }
 
@@ -359,8 +340,8 @@ void ltt_release_transport(struct kref *kref)
  */
 void ltt_release_trace(struct kref *kref)
 {
-       struct ltt_trace_struct *trace = container_of(kref,
-                       struct ltt_trace_struct, kref);
+       struct ust_trace *trace = container_of(kref,
+                       struct ust_trace, kref);
        ltt_channels_trace_free(trace->channels);
        kfree(trace);
 }
@@ -368,6 +349,10 @@ void ltt_release_trace(struct kref *kref)
 static inline void prepare_chan_size_num(unsigned int *subbuf_size,
                                         unsigned int *n_subbufs)
 {
+       /* Make sure the subbuffer size is larger than a page */
+       *subbuf_size = max_t(unsigned int, *subbuf_size, PAGE_SIZE);
+
+       /* round to next power of 2 */
        *subbuf_size = 1 << get_count_order(*subbuf_size);
        *n_subbufs = 1 << get_count_order(*n_subbufs);
 
@@ -379,7 +364,7 @@ static inline void prepare_chan_size_num(unsigned int *subbuf_size,
 int _ltt_trace_setup(const char *trace_name)
 {
        int err = 0;
-       struct ltt_trace_struct *new_trace = NULL;
+       struct ust_trace *new_trace = NULL;
        int metadata_index;
        unsigned int chan;
        enum ltt_channels chantype;
@@ -396,7 +381,7 @@ int _ltt_trace_setup(const char *trace_name)
                goto traces_error;
        }
 
-       new_trace = kzalloc(sizeof(struct ltt_trace_struct), GFP_KERNEL);
+       new_trace = kzalloc(sizeof(struct ust_trace), GFP_KERNEL);
        if (!new_trace) {
                ERR("Unable to allocate memory for trace %s", trace_name);
                err = -ENOMEM;
@@ -454,7 +439,7 @@ int ltt_trace_setup(const char *trace_name)
 }
 
 /* must be called from within a traces lock. */
-static void _ltt_trace_free(struct ltt_trace_struct *trace)
+static void _ltt_trace_free(struct ust_trace *trace)
 {
        list_del(&trace->list);
        kfree(trace);
@@ -463,7 +448,7 @@ static void _ltt_trace_free(struct ltt_trace_struct *trace)
 int ltt_trace_set_type(const char *trace_name, const char *trace_type)
 {
        int err = 0;
-       struct ltt_trace_struct *trace;
+       struct ust_trace *trace;
        struct ltt_transport *tran_iter, *transport = NULL;
 
        ltt_lock_traces();
@@ -498,7 +483,7 @@ int ltt_trace_set_channel_subbufsize(const char *trace_name,
                const char *channel_name, unsigned int size)
 {
        int err = 0;
-       struct ltt_trace_struct *trace;
+       struct ust_trace *trace;
        int index;
 
        ltt_lock_traces();
@@ -527,7 +512,7 @@ int ltt_trace_set_channel_subbufcount(const char *trace_name,
                const char *channel_name, unsigned int cnt)
 {
        int err = 0;
-       struct ltt_trace_struct *trace;
+       struct ust_trace *trace;
        int index;
 
        ltt_lock_traces();
@@ -556,7 +541,7 @@ int ltt_trace_set_channel_enable(const char *trace_name,
                const char *channel_name, unsigned int enable)
 {
        int err = 0;
-       struct ltt_trace_struct *trace;
+       struct ust_trace *trace;
        int index;
 
        ltt_lock_traces();
@@ -596,7 +581,7 @@ int ltt_trace_set_channel_overwrite(const char *trace_name,
                const char *channel_name, unsigned int overwrite)
 {
        int err = 0;
-       struct ltt_trace_struct *trace;
+       struct ust_trace *trace;
        int index;
 
        ltt_lock_traces();
@@ -637,7 +622,7 @@ traces_error:
 int ltt_trace_alloc(const char *trace_name)
 {
        int err = 0;
-       struct ltt_trace_struct *trace;
+       struct ust_trace *trace;
        unsigned int subbuf_size, subbuf_cnt;
 //ust//        unsigned long flags;
        int chan;
@@ -760,7 +745,7 @@ traces_error:
 //ust// }
 
 /* Must be called while sure that trace is in the list. */
-static int _ltt_trace_destroy(struct ltt_trace_struct *trace)
+static int _ltt_trace_destroy(struct ust_trace *trace)
 {
        int err = -EPERM;
 
@@ -794,7 +779,7 @@ traces_error:
 }
 
 /* Sleepable part of the destroy */
-static void __ltt_trace_destroy(struct ltt_trace_struct        *trace)
+static void __ltt_trace_destroy(struct ust_trace *trace)
 {
        int i;
        struct ust_channel *chan;
@@ -840,7 +825,7 @@ static void __ltt_trace_destroy(struct ltt_trace_struct     *trace)
 int ltt_trace_destroy(const char *trace_name)
 {
        int err = 0;
-       struct ltt_trace_struct *trace;
+       struct ust_trace *trace;
 
        ltt_lock_traces();
 
@@ -874,7 +859,7 @@ error:
 }
 
 /* must be called from within a traces lock. */
-static int _ltt_trace_start(struct ltt_trace_struct *trace)
+static int _ltt_trace_start(struct ust_trace *trace)
 {
        int err = 0;
 
@@ -903,7 +888,7 @@ traces_error:
 int ltt_trace_start(const char *trace_name)
 {
        int err = 0;
-       struct ltt_trace_struct *trace;
+       struct ust_trace *trace;
 
        ltt_lock_traces();
 
@@ -941,7 +926,7 @@ no_trace:
 }
 
 /* must be called from within traces lock */
-static int _ltt_trace_stop(struct ltt_trace_struct *trace)
+static int _ltt_trace_stop(struct ust_trace *trace)
 {
        int err = -EPERM;
 
@@ -968,7 +953,7 @@ traces_error:
 int ltt_trace_stop(const char *trace_name)
 {
        int err = 0;
-       struct ltt_trace_struct *trace;
+       struct ust_trace *trace;
 
        ltt_lock_traces();
        trace = _ltt_trace_find(trace_name);
@@ -985,7 +970,7 @@ int ltt_trace_stop(const char *trace_name)
 int ltt_filter_control(enum ltt_filter_control_msg msg, const char *trace_name)
 {
        int err;
-       struct ltt_trace_struct *trace;
+       struct ust_trace *trace;
 
        DBG("ltt_filter_control : trace %s", trace_name);
        ltt_lock_traces();
index b1fbf107c0ae7beba094729ee5ba4144f04345fc..e4ff21e4044da00aa9e347a1eea0d9cab2ba41e3 100644 (file)
 
 #include <sys/types.h>
 #include <stdarg.h>
-//#include "list.h"
 #include <ust/kernelcompat.h>
-#include "buffers.h"
 #include "channels.h"
 #include "tracercore.h"
+#include "tracerconst.h"
 #include <ust/marker.h>
 #include <ust/probe.h>
 
@@ -68,7 +67,7 @@ extern size_t ltt_serialize_data(struct ust_buffer *buf, size_t buf_offset,
                        int *largest_align, const char *fmt, va_list *args);
 
 struct ltt_probe_private_data {
-       struct ltt_trace_struct *trace; /*
+       struct ust_trace *trace;        /*
                                         * Target trace, for metadata
                                         * or statedump.
                                         */
@@ -128,34 +127,25 @@ struct user_dbg_data {
 
 struct ltt_trace_ops {
        /* First 32 bytes cache-hot cacheline */
-       int (*reserve_slot) (struct ltt_trace_struct *trace,
-                               struct ust_channel *channel,
-                               void **transport_data, size_t data_size,
-                               size_t *slot_size, long *buf_offset, u64 *tsc,
-                               unsigned int *rflags,
-                               int largest_align, int cpu);
-//ust//        void (*commit_slot) (struct ltt_channel_struct *channel,
-//ust//                                void **transport_data, long buf_offset,
-//ust//                                size_t slot_size);
        void (*wakeup_channel) (struct ust_channel *channel);
-       int (*user_blocking) (struct ltt_trace_struct *trace,
+       int (*user_blocking) (struct ust_trace *trace,
                                unsigned int index, size_t data_size,
                                struct user_dbg_data *dbg);
        /* End of first 32 bytes cacheline */
-       int (*create_dirs) (struct ltt_trace_struct *new_trace);
-       void (*remove_dirs) (struct ltt_trace_struct *new_trace);
+       int (*create_dirs) (struct ust_trace *new_trace);
+       void (*remove_dirs) (struct ust_trace *new_trace);
        int (*create_channel) (const char *trace_name,
-                               struct ltt_trace_struct *trace,
+                               struct ust_trace *trace,
                                const char *channel_name,
                                struct ust_channel *channel,
                                unsigned int subbuf_size,
                                unsigned int n_subbufs, int overwrite);
        void (*finish_channel) (struct ust_channel *channel);
        void (*remove_channel) (struct ust_channel *channel);
-       void (*user_errors) (struct ltt_trace_struct *trace,
+       void (*user_errors) (struct ust_trace *trace,
                                unsigned int index, size_t data_size,
                                struct user_dbg_data *dbg, unsigned int cpu);
-} ____cacheline_aligned;
+};
 
 struct ltt_transport {
        char *name;
@@ -170,7 +160,7 @@ enum trace_mode { LTT_TRACE_NORMAL, LTT_TRACE_FLIGHT, LTT_TRACE_HYBRID };
 #define CHANNEL_FLAG_OVERWRITE (1U<<1)
 
 /* Per-trace information - each trace/flight recorder represented by one */
-struct ltt_trace_struct {
+struct ust_trace {
        /* First 32 bytes cache-hot cacheline */
        struct list_head list;
        struct ltt_trace_ops *ops;
@@ -193,40 +183,6 @@ struct ltt_trace_struct {
        char trace_name[NAME_MAX];
 } ____cacheline_aligned;
 
-/* Hardcoded event headers
- *
- * event header for a trace with active heartbeat : 27 bits timestamps
- *
- * headers are 32-bits aligned. In order to insure such alignment, a dynamic per
- * trace alignment value must be done.
- *
- * Remember that the C compiler does align each member on the boundary
- * equivalent to their own size.
- *
- * As relay subbuffers are aligned on pages, we are sure that they are 4 and 8
- * bytes aligned, so the buffer header and trace header are aligned.
- *
- * Event headers are aligned depending on the trace alignment option.
- *
- * Note using C structure bitfields for cross-endianness and portability
- * concerns.
- */
-
-#define LTT_RESERVED_EVENTS    3
-#define LTT_EVENT_BITS         5
-#define LTT_FREE_EVENTS                ((1 << LTT_EVENT_BITS) - LTT_RESERVED_EVENTS)
-#define LTT_TSC_BITS           27
-#define LTT_TSC_MASK           ((1 << LTT_TSC_BITS) - 1)
-
-struct ltt_event_header {
-       u32 id_time;            /* 5 bits event id (MSB); 27 bits time (LSB) */
-};
-
-/* Reservation flags */
-#define        LTT_RFLAG_ID                    (1 << 0)
-#define        LTT_RFLAG_ID_SIZE               (1 << 1)
-#define        LTT_RFLAG_ID_SIZE_TSC           (1 << 2)
-
 /*
  * We use asm/timex.h : cpu_khz/HZ variable in here : we might have to deal
  * specifically with CPU frequency scaling someday, so using an interpolation
@@ -284,60 +240,12 @@ static __inline__ size_t ltt_subbuffer_header_size(void)
        return offsetof(struct ltt_subbuffer_header, header_end);
 }
 
-/*
- * ust_get_header_size
- *
- * Calculate alignment offset to 32-bits. This is the alignment offset of the
- * event header.
- *
- * Important note :
- * The event header must be 32-bits. The total offset calculated here :
- *
- * Alignment of header struct on 32 bits (min arch size, header size)
- * + sizeof(header struct)  (32-bits)
- * + (opt) u16 (ext. event id)
- * + (opt) u16 (event_size) (if event_size == 0xFFFFUL, has ext. event size)
- * + (opt) u32 (ext. event size)
- * + (opt) u64 full TSC (aligned on min(64-bits, arch size))
- *
- * The payload must itself determine its own alignment from the biggest type it
- * contains.
- * */
-static __inline__ unsigned char ust_get_header_size(
-               struct ust_channel *channel,
-               size_t offset,
-               size_t data_size,
-               size_t *before_hdr_pad,
-               unsigned int rflags)
-{
-       size_t orig_offset = offset;
-       size_t padding;
-
-       padding = ltt_align(offset, sizeof(struct ltt_event_header));
-       offset += padding;
-       offset += sizeof(struct ltt_event_header);
-
-       switch (rflags) {
-       case LTT_RFLAG_ID_SIZE_TSC:
-               offset += sizeof(u16) + sizeof(u16);
-               if (data_size >= 0xFFFFU)
-                       offset += sizeof(u32);
-               offset += ltt_align(offset, sizeof(u64));
-               offset += sizeof(u64);
-               break;
-       case LTT_RFLAG_ID_SIZE:
-               offset += sizeof(u16) + sizeof(u16);
-               if (data_size >= 0xFFFFU)
-                       offset += sizeof(u32);
-               break;
-       case LTT_RFLAG_ID:
-               offset += sizeof(u16);
-               break;
-       }
-
-       *before_hdr_pad = padding;
-       return offset - orig_offset;
-}
+extern size_t ltt_write_event_header_slow(struct ust_trace *trace,
+               struct ust_channel *channel,
+               struct ust_buffer *buf, long buf_offset,
+               u16 eID, u32 event_size,
+               u64 tsc, unsigned int rflags);
+
 
 /*
  * ltt_write_event_header
@@ -355,139 +263,25 @@ static __inline__ unsigned char ust_get_header_size(
  *
  * returns : offset where the event data must be written.
  */
-static __inline__ size_t ltt_write_event_header(struct ltt_trace_struct *trace,
+static __inline__ size_t ltt_write_event_header(struct ust_trace *trace,
+               struct ust_channel *channel,
                struct ust_buffer *buf, long buf_offset,
-               u16 eID, size_t event_size,
+               u16 eID, u32 event_size,
                u64 tsc, unsigned int rflags)
 {
        struct ltt_event_header header;
-       size_t small_size;
-
-       switch (rflags) {
-       case LTT_RFLAG_ID_SIZE_TSC:
-               header.id_time = 29 << LTT_TSC_BITS;
-               break;
-       case LTT_RFLAG_ID_SIZE:
-               header.id_time = 30 << LTT_TSC_BITS;
-               break;
-       case LTT_RFLAG_ID:
-               header.id_time = 31 << LTT_TSC_BITS;
-               break;
-       default:
-               header.id_time = eID << LTT_TSC_BITS;
-               break;
-       }
-       header.id_time |= (u32)tsc & LTT_TSC_MASK;
-       ust_buffers_write(buf, buf_offset, &header, sizeof(header));
-       buf_offset += sizeof(header);
-
-       switch (rflags) {
-       case LTT_RFLAG_ID_SIZE_TSC:
-               small_size = min_t(size_t, event_size, 0xFFFFU);
-               ust_buffers_write(buf, buf_offset,
-                       (u16[]){ (u16)eID }, sizeof(u16));
-               buf_offset += sizeof(u16);
-               ust_buffers_write(buf, buf_offset,
-                       (u16[]){ (u16)small_size }, sizeof(u16));
-               buf_offset += sizeof(u16);
-               if (small_size == 0xFFFFU) {
-                       ust_buffers_write(buf, buf_offset,
-                               (u32[]){ (u32)event_size }, sizeof(u32));
-                       buf_offset += sizeof(u32);
-               }
-               buf_offset += ltt_align(buf_offset, sizeof(u64));
-               ust_buffers_write(buf, buf_offset,
-                       (u64[]){ (u64)tsc }, sizeof(u64));
-               buf_offset += sizeof(u64);
-               break;
-       case LTT_RFLAG_ID_SIZE:
-               small_size = min_t(size_t, event_size, 0xFFFFU);
-               ust_buffers_write(buf, buf_offset,
-                       (u16[]){ (u16)eID }, sizeof(u16));
-               buf_offset += sizeof(u16);
-               ust_buffers_write(buf, buf_offset,
-                       (u16[]){ (u16)small_size }, sizeof(u16));
-               buf_offset += sizeof(u16);
-               if (small_size == 0xFFFFU) {
-                       ust_buffers_write(buf, buf_offset,
-                               (u32[]){ (u32)event_size }, sizeof(u32));
-                       buf_offset += sizeof(u32);
-               }
-               break;
-       case LTT_RFLAG_ID:
-               ust_buffers_write(buf, buf_offset,
-                       (u16[]){ (u16)eID }, sizeof(u16));
-               buf_offset += sizeof(u16);
-               break;
-       default:
-               break;
-       }
-
-       return buf_offset;
-}
 
-/* Lockless LTTng */
+       if (unlikely(rflags))
+               goto slow_path;
 
-/*
- * ltt_reserve_slot
- *
- * Atomic slot reservation in a LTTng buffer. It will take care of
- * sub-buffer switching.
- *
- * Parameters:
- *
- * @trace : the trace structure to log to.
- * @channel : the chanel to reserve space into.
- * @transport_data : specific transport data.
- * @data_size : size of the variable length data to log.
- * @slot_size : pointer to total size of the slot (out)
- * @buf_offset : pointer to reserve offset (out)
- * @tsc : pointer to the tsc at the slot reservation (out)
- * @rflags : reservation flags (header specificity)
- * @cpu : cpu id
- *
- * Return : -ENOSPC if not enough space, else 0.
- */
-static __inline__ int ltt_reserve_slot(
-               struct ltt_trace_struct *trace,
-               struct ust_channel *channel,
-               void **transport_data,
-               size_t data_size,
-               size_t *slot_size,
-               long *buf_offset,
-               u64 *tsc,
-               unsigned int *rflags,
-               int largest_align, int cpu)
-{
-       return trace->ops->reserve_slot(trace, channel, transport_data,
-                       data_size, slot_size, buf_offset, tsc, rflags,
-                       largest_align, cpu);
-}
+       header.id_time = eID << LTT_TSC_BITS;
 
+       return buf_offset;
 
-///*
-// * ltt_commit_slot
-// *
-// * Atomic unordered slot commit. Increments the commit count in the
-// * specified sub-buffer, and delivers it if necessary.
-// *
-// * Parameters:
-// *
-// * @channel : the chanel to reserve space into.
-// * @transport_data : specific transport data.
-// * @buf_offset : offset of beginning of reserved slot
-// * @slot_size : size of the reserved slot.
-// */
-//static inline void ltt_commit_slot(
-//             struct ltt_channel_struct *channel,
-//             void **transport_data,
-//             long buf_offset,
-//             size_t slot_size)
-//{
-//     struct ltt_trace_struct *trace = channel->trace;
-//
-//     trace->ops->commit_slot(channel, transport_data, buf_offset, slot_size);
-//}
+slow_path:
+       return ltt_write_event_header_slow(trace, channel, buf, buf_offset,
+                               eID, event_size, tsc, rflags);
+}
 
 /*
  * Control channels :
@@ -521,6 +315,26 @@ static __inline__ int ltt_reserve_slot(
 #define LTT_TRACER_VERSION_MAJOR       2
 #define LTT_TRACER_VERSION_MINOR       3
 
+/**
+ * ust_write_trace_header - Write trace header
+ * @trace: Trace information
+ * @header: Memory address where the information must be written to
+ */
+static __inline__ void ltt_write_trace_header(struct ust_trace *trace,
+               struct ltt_subbuffer_header *header)
+{
+       header->magic_number = LTT_TRACER_MAGIC_NUMBER;
+       header->major_version = LTT_TRACER_VERSION_MAJOR;
+       header->minor_version = LTT_TRACER_VERSION_MINOR;
+       header->arch_size = sizeof(void *);
+       header->alignment = ltt_get_alignment();
+       header->start_time_sec = trace->start_time.tv_sec;
+       header->start_time_usec = trace->start_time.tv_usec;
+       header->start_freq = trace->start_freq;
+       header->freq_scale = trace->freq_scale;
+}
+
+
 /*
  * Size reserved for high priority events (interrupts, NMI, BH) at the end of a
  * nearly full buffer. User space won't use this last amount of space when in
@@ -556,7 +370,7 @@ union ltt_control_args {
 
 extern int _ltt_trace_setup(const char *trace_name);
 extern int ltt_trace_setup(const char *trace_name);
-extern struct ltt_trace_struct *_ltt_trace_find_setup(const char *trace_name);
+extern struct ust_trace *_ltt_trace_find_setup(const char *trace_name);
 extern int ltt_trace_set_type(const char *trace_name, const char *trace_type);
 extern int ltt_trace_set_channel_subbufsize(const char *trace_name,
                const char *channel_name, unsigned int size);
@@ -581,7 +395,7 @@ extern int ltt_filter_control(enum ltt_filter_control_msg msg,
 
 extern struct dentry *get_filter_root(void);
 
-extern void ltt_write_trace_header(struct ltt_trace_struct *trace,
+extern void ltt_write_trace_header(struct ust_trace *trace,
                struct ltt_subbuffer_header *header);
 extern void ltt_buffer_destroy(struct ust_channel *ltt_chan);
 
@@ -592,11 +406,11 @@ extern void ltt_core_unregister(void);
 extern void ltt_release_trace(struct kref *kref);
 extern void ltt_release_transport(struct kref *kref);
 
-extern void ltt_dump_marker_state(struct ltt_trace_struct *trace);
+extern void ltt_dump_marker_state(struct ust_trace *trace);
 
 extern void ltt_lock_traces(void);
 extern void ltt_unlock_traces(void);
 
-extern struct ltt_trace_struct *_ltt_trace_find(const char *trace_name);
+extern struct ust_trace *_ltt_trace_find(const char *trace_name);
 
 #endif /* _LTT_TRACER_H */
diff --git a/libust/tracerconst.h b/libust/tracerconst.h
new file mode 100644 (file)
index 0000000..b23e743
--- /dev/null
@@ -0,0 +1,40 @@
+#ifndef TRACERCONST_H
+#define TRACERCONST_H
+
+/* Hardcoded event headers
+ *
+ * event header for a trace with active heartbeat : 27 bits timestamps
+ *
+ * headers are 32-bits aligned. In order to insure such alignment, a dynamic per
+ * trace alignment value must be done.
+ *
+ * Remember that the C compiler does align each member on the boundary
+ * equivalent to their own size.
+ *
+ * As relay subbuffers are aligned on pages, we are sure that they are 4 and 8
+ * bytes aligned, so the buffer header and trace header are aligned.
+ *
+ * Event headers are aligned depending on the trace alignment option.
+ *
+ * Note using C structure bitfields for cross-endianness and portability
+ * concerns.
+ */
+
+#define LTT_RESERVED_EVENTS    3
+#define LTT_EVENT_BITS         5
+#define LTT_FREE_EVENTS                ((1 << LTT_EVENT_BITS) - LTT_RESERVED_EVENTS)
+#define LTT_TSC_BITS           27
+#define LTT_TSC_MASK           ((1 << LTT_TSC_BITS) - 1)
+
+struct ltt_event_header {
+       u32 id_time;            /* 5 bits event id (MSB); 27 bits time (LSB) */
+};
+
+/* Reservation flags */
+#define        LTT_RFLAG_ID                    (1 << 0)
+#define        LTT_RFLAG_ID_SIZE               (1 << 1)
+#define        LTT_RFLAG_ID_SIZE_TSC           (1 << 2)
+
+#define LTT_MAX_SMALL_SIZE              0xFFFFU
+
+#endif /* TRACERCONST_H */
index 5a088db65e4e2c5c4e81ec91ecb640da7897aed5..3113383cfd69184e52cd06f1a81f3b47566f8472 100644 (file)
@@ -18,8 +18,8 @@
  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301 USA
  */
 
-#ifndef LTT_CORE_H
-#define LTT_CORE_H
+#ifndef UST_TRACERCORE_H
+#define UST_TRACERCORE_H
 
 #include <ust/kernelcompat.h>
 //ust// #include <linux/percpu.h>
@@ -41,11 +41,6 @@ struct ltt_traces {
 
 extern struct ltt_traces ltt_traces;
 
-/*
- * get dentry of ltt's root dir
- */
-struct dentry *get_ltt_root(void);
-
 /* Keep track of trap nesting inside LTT */
 //ust// DECLARE_PER_CPU(unsigned int, ltt_nesting);
 extern unsigned int ltt_nesting;
@@ -93,4 +88,4 @@ static inline int ltt_get_alignment(void)
 }
 #endif /* defined(CONFIG_LTT) && defined(CONFIG_LTT_ALIGNMENT) */
 
-#endif /* LTT_CORE_H */
+#endif /* UST_TRACERCORE_H */
index 48bd580ee71d59d93a8be604561b148081e7119f..6295a8bc5a4391d6cd7b38b45c07dad66c1950c7 100644 (file)
@@ -17,6 +17,7 @@
 
 #include <assert.h>
 
+#include "buffers.h"
 #include "tracer.h"
 #include "ustd.h"
 #include "usterr.h"
This page took 0.076635 seconds and 4 git commands to generate.