X-Git-Url: http://git.lttng.org./?a=blobdiff_plain;ds=sidebyside;f=libringbuffer%2Fring_buffer_frontend.c;h=8c8126025878ed72929bf6ecd8ebedf9b2806148;hb=3962118d1f08fd33ad6adfad10452962c3662bd9;hp=84c6726e97a2d66ec612bc083aaa4ca36ab6225a;hpb=019fbcb65ec7eec2c86019ad1afce8fa47dc1269;p=lttng-ust.git diff --git a/libringbuffer/ring_buffer_frontend.c b/libringbuffer/ring_buffer_frontend.c index 84c6726e..8c812602 100644 --- a/libringbuffer/ring_buffer_frontend.c +++ b/libringbuffer/ring_buffer_frontend.c @@ -107,7 +107,8 @@ struct switch_offsets { unsigned long begin, end, old; size_t pre_header_padding, size; - unsigned int switch_new_start:1, switch_old_start:1, switch_old_end:1; + unsigned int switch_new_start:1, switch_new_end:1, switch_old_start:1, + switch_old_end:1; }; DEFINE_URCU_TLS(unsigned int, lib_ring_buffer_nesting); @@ -1414,6 +1415,30 @@ void lib_ring_buffer_switch_new_start(struct lttng_ust_lib_ring_buffer *buf, handle); } +/* + * lib_ring_buffer_switch_new_end: finish switching current subbuffer + * + * Calls subbuffer_set_data_size() to set the data size of the current + * sub-buffer. We do not need to perform check_deliver nor commit here, + * since this task will be done by the "commit" of the event for which + * we are currently doing the space reservation. + */ +static +void lib_ring_buffer_switch_new_end(struct lttng_ust_lib_ring_buffer *buf, + struct channel *chan, + struct switch_offsets *offsets, + uint64_t tsc, + struct lttng_ust_shm_handle *handle) +{ + const struct lttng_ust_lib_ring_buffer_config *config = &chan->backend.config; + unsigned long endidx, data_size; + + endidx = subbuf_index(offsets->end - 1, chan); + data_size = subbuf_offset(offsets->end - 1, chan) + 1; + subbuffer_set_data_size(config, &buf->backend, endidx, data_size, + handle); +} + /* * Returns : * 0 if ok @@ -1566,6 +1591,7 @@ int lib_ring_buffer_try_reserve_slow(struct lttng_ust_lib_ring_buffer *buf, offsets->begin = v_read(config, &buf->offset); offsets->old = offsets->begin; offsets->switch_new_start = 0; + offsets->switch_new_end = 0; offsets->switch_old_end = 0; offsets->pre_header_padding = 0; @@ -1697,6 +1723,14 @@ int lib_ring_buffer_try_reserve_slow(struct lttng_ust_lib_ring_buffer *buf, */ } offsets->end = offsets->begin + offsets->size; + + if (caa_unlikely(subbuf_offset(offsets->end, chan) == 0)) { + /* + * The offset_end will fall at the very beginning of the next + * subbuffer. + */ + offsets->switch_new_end = 1; /* For offsets->begin */ + } return 0; } @@ -1770,6 +1804,9 @@ int lib_ring_buffer_reserve_slow(struct lttng_ust_lib_ring_buffer_ctx *ctx) if (caa_unlikely(offsets.switch_new_start)) lib_ring_buffer_switch_new_start(buf, chan, &offsets, ctx->tsc, handle); + if (caa_unlikely(offsets.switch_new_end)) + lib_ring_buffer_switch_new_end(buf, chan, &offsets, ctx->tsc, handle); + ctx->slot_size = offsets.size; ctx->pre_offset = offsets.begin; ctx->buf_offset = offsets.begin + offsets.pre_header_padding;