Cleanup: ring buffer: remove lib_ring_buffer_switch_new_end()
authorMathieu Desnoyers <mathieu.desnoyers@efficios.com>
Sun, 30 Jun 2013 19:36:52 +0000 (15:36 -0400)
committerMathieu Desnoyers <mathieu.desnoyers@efficios.com>
Sun, 30 Jun 2013 19:36:52 +0000 (15:36 -0400)
lib_ring_buffer_switch_new_end() is a leftover from the days where an
event that would exactly fill the current sub-buffer would automatically
trigger a sub-buffer switch into the next sub-buffer.

Even before the ring buffer code has been moved into lttng-modules, this
behavior had been changed: an event that exactly fills a sub-buffer only
fills this current sub-buffer, and does not need to switch into the
next one to populate the sub-buffer header. This change had been done so
periodical timer switch, which shares the same semantic as an event
exactly filling a sub-buffer, would not create tons of empty
sub-buffers.

However, when doing this change, lib_ring_buffer_switch_new_end() has
not been removed, but clearly should have been. Its job is now performed
by the event "commit".

lib_ring_buffer_switch_new_end() has no effect, since padding_size is
always 0.

Signed-off-by: Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
libringbuffer/frontend_api.h
libringbuffer/ring_buffer_frontend.c

index a2a9af39aea777998c5d96cdde5f632f500c8cf7..b5406b0ef9d3678d44f706b23d66117b5659fd9e 100644 (file)
@@ -126,14 +126,6 @@ int lib_ring_buffer_try_reserve(const struct lttng_ust_lib_ring_buffer_config *c
         * boundary. It's safe to write.
         */
        *o_end = *o_begin + ctx->slot_size;
-
-       if (caa_unlikely((subbuf_offset(*o_end, chan)) == 0))
-               /*
-                * The offset_end will fall at the very beginning of the next
-                * subbuffer.
-                */
-               return 1;
-
        return 0;
 }
 
index c177f337da01aa4f457fbea66add969a837d691f..84c6726e97a2d66ec612bc083aaa4ca36ab6225a 100644 (file)
 struct switch_offsets {
        unsigned long begin, end, old;
        size_t pre_header_padding, size;
-       unsigned int switch_new_start:1, switch_new_end:1, switch_old_start:1,
-                    switch_old_end:1;
+       unsigned int switch_new_start:1, switch_old_start:1, switch_old_end:1;
 };
 
 DEFINE_URCU_TLS(unsigned int, lib_ring_buffer_nesting);
@@ -1415,42 +1414,6 @@ void lib_ring_buffer_switch_new_start(struct lttng_ust_lib_ring_buffer *buf,
                                             handle);
 }
 
-/*
- * lib_ring_buffer_switch_new_end: finish switching current subbuffer
- *
- * The only remaining threads could be the ones with pending commits. They will
- * have to do the deliver themselves.
- */
-static
-void lib_ring_buffer_switch_new_end(struct lttng_ust_lib_ring_buffer *buf,
-                                   struct channel *chan,
-                                   struct switch_offsets *offsets,
-                                   uint64_t tsc,
-                                   struct lttng_ust_shm_handle *handle)
-{
-       const struct lttng_ust_lib_ring_buffer_config *config = &chan->backend.config;
-       unsigned long endidx = subbuf_index(offsets->end - 1, chan);
-       unsigned long commit_count, padding_size, data_size;
-
-       data_size = subbuf_offset(offsets->end - 1, chan) + 1;
-       padding_size = chan->backend.subbuf_size - data_size;
-       subbuffer_set_data_size(config, &buf->backend, endidx, data_size,
-                               handle);
-
-       /*
-        * Order all writes to buffer before the commit count update that will
-        * determine that the subbuffer is full.
-        */
-       cmm_smp_wmb();
-       v_add(config, padding_size, &shmp_index(handle, buf->commit_hot, endidx)->cc);
-       commit_count = v_read(config, &shmp_index(handle, buf->commit_hot, endidx)->cc);
-       lib_ring_buffer_check_deliver(config, buf, chan, offsets->end - 1,
-                                 commit_count, endidx, handle);
-       lib_ring_buffer_write_commit_counter(config, buf, chan, endidx,
-                                            offsets->end, commit_count,
-                                            padding_size, handle);
-}
-
 /*
  * Returns :
  * 0 if ok
@@ -1603,7 +1566,6 @@ int lib_ring_buffer_try_reserve_slow(struct lttng_ust_lib_ring_buffer *buf,
        offsets->begin = v_read(config, &buf->offset);
        offsets->old = offsets->begin;
        offsets->switch_new_start = 0;
-       offsets->switch_new_end = 0;
        offsets->switch_old_end = 0;
        offsets->pre_header_padding = 0;
 
@@ -1735,14 +1697,6 @@ int lib_ring_buffer_try_reserve_slow(struct lttng_ust_lib_ring_buffer *buf,
                 */
        }
        offsets->end = offsets->begin + offsets->size;
-
-       if (caa_unlikely(subbuf_offset(offsets->end, chan) == 0)) {
-               /*
-                * The offset_end will fall at the very beginning of the next
-                * subbuffer.
-                */
-               offsets->switch_new_end = 1;    /* For offsets->begin */
-       }
        return 0;
 }
 
@@ -1816,9 +1770,6 @@ int lib_ring_buffer_reserve_slow(struct lttng_ust_lib_ring_buffer_ctx *ctx)
        if (caa_unlikely(offsets.switch_new_start))
                lib_ring_buffer_switch_new_start(buf, chan, &offsets, ctx->tsc, handle);
 
-       if (caa_unlikely(offsets.switch_new_end))
-               lib_ring_buffer_switch_new_end(buf, chan, &offsets, ctx->tsc, handle);
-
        ctx->slot_size = offsets.size;
        ctx->pre_offset = offsets.begin;
        ctx->buf_offset = offsets.begin + offsets.pre_header_padding;
This page took 0.029651 seconds and 4 git commands to generate.