X-Git-Url: http://git.lttng.org./?a=blobdiff_plain;f=libringbuffer%2Ffrontend_api.h;h=4207574f55e59f36c2aab06a922fcadf4543d6d0;hb=3b8bedd809f1a5b56da5fc101a90b44263b0f473;hp=fd601ce5ca93589636d6932cc435a3a1c8f1677b;hpb=5198080d2234eb06e95d245d0a9747810f496475;p=lttng-ust.git diff --git a/libringbuffer/frontend_api.h b/libringbuffer/frontend_api.h index fd601ce5..4207574f 100644 --- a/libringbuffer/frontend_api.h +++ b/libringbuffer/frontend_api.h @@ -18,12 +18,11 @@ #include "frontend.h" /** - * lib_ring_buffer_get_cpu - Precedes ring buffer reserve/commit. + * lib_ring_buffer_nesting_inc - Ring buffer recursive use protection. * - * Keeps a ring buffer nesting count as supplementary safety net to - * ensure tracer client code will never trigger an endless recursion. - * Returns the processor ID on success, -EPERM on failure (nesting count - * too high). + * The rint buffer buffer nesting count is a safety net to ensure tracer + * client code will never trigger an endless recursion. + * Returns 0 on success, -EPERM on failure (nesting count too high). * * asm volatile and "memory" clobber prevent the compiler from moving * instructions out of the ring buffer nesting count. This is required to ensure @@ -32,27 +31,22 @@ * section. */ static inline -int lib_ring_buffer_get_cpu(const struct lttng_ust_lib_ring_buffer_config *config) +int lib_ring_buffer_nesting_inc(const struct lttng_ust_lib_ring_buffer_config *config) { - int cpu, nesting; + int nesting; - cpu = lttng_ust_get_cpu(); nesting = ++URCU_TLS(lib_ring_buffer_nesting); cmm_barrier(); - if (caa_unlikely(nesting > 4)) { WARN_ON_ONCE(1); URCU_TLS(lib_ring_buffer_nesting)--; return -EPERM; - } else - return cpu; + } + return 0; } -/** - * lib_ring_buffer_put_cpu - Follows ring buffer reserve/commit. - */ static inline -void lib_ring_buffer_put_cpu(const struct lttng_ust_lib_ring_buffer_config *config) +void lib_ring_buffer_nesting_dec(const struct lttng_ust_lib_ring_buffer_config *config) { cmm_barrier(); URCU_TLS(lib_ring_buffer_nesting)--; /* TLS */ @@ -96,7 +90,7 @@ int lib_ring_buffer_try_reserve(const struct lttng_ust_lib_ring_buffer_config *c ctx->slot_size = record_header_size(config, chan, *o_begin, before_hdr_pad, ctx, client_ctx); ctx->slot_size += - lib_ring_buffer_align(*o_begin + ctx->slot_size, + lttng_ust_lib_ring_buffer_align(*o_begin + ctx->slot_size, ctx->largest_align) + ctx->data_size; if (caa_unlikely((subbuf_offset(*o_begin, chan) + ctx->slot_size) > chan->backend.subbuf_size)) @@ -140,7 +134,7 @@ int lib_ring_buffer_reserve(const struct lttng_ust_lib_ring_buffer_config *confi void *client_ctx) { struct lttng_ust_lib_ring_buffer_channel *chan = ctx->chan; - struct lttng_ust_shm_handle *handle = ctx->handle; + struct lttng_ust_shm_handle *handle = ctx->chan->handle; struct lttng_ust_lib_ring_buffer *buf; unsigned long o_begin, o_end, o_old; size_t before_hdr_pad = 0; @@ -148,10 +142,12 @@ int lib_ring_buffer_reserve(const struct lttng_ust_lib_ring_buffer_config *confi if (caa_unlikely(uatomic_read(&chan->record_disabled))) return -EAGAIN; - if (config->alloc == RING_BUFFER_ALLOC_PER_CPU) - buf = shmp(handle, chan->backend.buf[ctx->cpu].shmp); - else + if (config->alloc == RING_BUFFER_ALLOC_PER_CPU) { + ctx->reserve_cpu = lttng_ust_get_cpu(); + buf = shmp(handle, chan->backend.buf[ctx->reserve_cpu].shmp); + } else { buf = shmp(handle, chan->backend.buf[0].shmp); + } if (caa_unlikely(!buf)) return -EIO; if (caa_unlikely(uatomic_read(&buf->record_disabled))) @@ -232,7 +228,7 @@ void lib_ring_buffer_commit(const struct lttng_ust_lib_ring_buffer_config *confi const struct lttng_ust_lib_ring_buffer_ctx *ctx) { struct lttng_ust_lib_ring_buffer_channel *chan = ctx->chan; - struct lttng_ust_shm_handle *handle = ctx->handle; + struct lttng_ust_shm_handle *handle = ctx->chan->handle; struct lttng_ust_lib_ring_buffer *buf = ctx->buf; unsigned long offset_end = ctx->buf_offset; unsigned long endidx = subbuf_index(offset_end - 1, chan);