From: Mathieu Desnoyers Date: Fri, 26 Mar 2021 17:52:25 +0000 (-0400) Subject: ring buffer context: cpu number becomes an output of reserve X-Git-Tag: v2.13.0-rc1~202 X-Git-Url: https://git.lttng.org./?a=commitdiff_plain;h=7489fcb466935b3eeb36e99afbbb87188278cb91;p=lttng-ust.git ring buffer context: cpu number becomes an output of reserve In order to facilitate eventual integration of a ring buffer scheme based on Restartable Sequences (sys_rseq), change the ownership of the ring buffer context "cpu" field so it is now populated by the ring buffer reserve operation. This means a rseq-based reserve could retry on a new current cpu after a rseq-cmpxchg fails to reserve and then a migration occurs. Signed-off-by: Mathieu Desnoyers Change-Id: If0c0689446975085b5e22b14aef6a15f12f8ff9f --- diff --git a/include/lttng/ringbuffer-context.h b/include/lttng/ringbuffer-context.h index e42c3f52..53670d93 100644 --- a/include/lttng/ringbuffer-context.h +++ b/include/lttng/ringbuffer-context.h @@ -51,9 +51,9 @@ struct lttng_ust_lib_ring_buffer_ctx { * alignment of the largest element * in the payload */ - int cpu; /* processor id */ /* output from lib_ring_buffer_reserve() */ + int reserve_cpu; /* processor id updated by the reserve */ struct lttng_ust_lib_ring_buffer *buf; /* * buffer corresponding to processor id * for this channel @@ -81,25 +81,24 @@ struct lttng_ust_lib_ring_buffer_ctx { * @priv: client private data * @data_size: size of record data payload * @largest_align: largest alignment within data payload types - * @cpu: processor id */ static inline lttng_ust_notrace void lib_ring_buffer_ctx_init(struct lttng_ust_lib_ring_buffer_ctx *ctx, struct lttng_ust_lib_ring_buffer_channel *chan, void *priv, size_t data_size, int largest_align, - int cpu, struct lttng_ust_shm_handle *handle); + struct lttng_ust_shm_handle *handle); static inline void lib_ring_buffer_ctx_init(struct lttng_ust_lib_ring_buffer_ctx *ctx, struct lttng_ust_lib_ring_buffer_channel *chan, void *priv, size_t data_size, int largest_align, - int cpu, struct lttng_ust_shm_handle *handle) + struct lttng_ust_shm_handle *handle) { ctx->struct_size = sizeof(struct lttng_ust_lib_ring_buffer_ctx); ctx->chan = chan; ctx->priv = priv; ctx->data_size = data_size; + ctx->reserve_cpu = -1; ctx->largest_align = largest_align; - ctx->cpu = cpu; ctx->rflags = 0; ctx->handle = handle; ctx->ip = 0; diff --git a/include/lttng/ust-tracepoint-event.h b/include/lttng/ust-tracepoint-event.h index d12afa0b..971a83c8 100644 --- a/include/lttng/ust-tracepoint-event.h +++ b/include/lttng/ust-tracepoint-event.h @@ -853,7 +853,7 @@ void __event_probe__##_provider##___##_name(_TP_ARGS_DATA_PROTO(_args)) \ __lttng_ctx.struct_size = sizeof(struct lttng_ust_stack_ctx); \ __lttng_ctx.event_recorder = __event_recorder; \ lib_ring_buffer_ctx_init(&__ctx, __chan->chan, &__lttng_ctx, __event_len, \ - __event_align, -1, __chan->handle); \ + __event_align, __chan->handle); \ __ctx.ip = _TP_IP_PARAM(TP_IP_PARAM); \ __ret = __chan->ops->event_reserve(&__ctx, __event_recorder->id); \ if (__ret < 0) \ diff --git a/liblttng-ust-ctl/ustctl.c b/liblttng-ust-ctl/ustctl.c index 89b566bd..394e1417 100644 --- a/liblttng-ust-ctl/ustctl.c +++ b/liblttng-ust-ctl/ustctl.c @@ -1354,7 +1354,7 @@ int ustctl_write_metadata_to_channel( lttng_chan_buf->ops->priv->packet_avail_size(lttng_chan_buf->chan, lttng_chan_buf->handle), len - pos); lib_ring_buffer_ctx_init(&ctx, lttng_chan_buf->chan, NULL, reserve_len, - sizeof(char), -1, lttng_chan_buf->handle); + sizeof(char), lttng_chan_buf->handle); /* * We don't care about metadata buffer's records lost * count, because we always retry here. Report error if @@ -1401,7 +1401,7 @@ ssize_t ustctl_write_one_packet_to_channel( lttng_chan_buf->ops->priv->packet_avail_size(lttng_chan_buf->chan, lttng_chan_buf->handle), len); lib_ring_buffer_ctx_init(&ctx, lttng_chan_buf->chan, NULL, reserve_len, - sizeof(char), -1, lttng_chan_buf->handle); + sizeof(char), lttng_chan_buf->handle); ret = lttng_chan_buf->ops->event_reserve(&ctx, 0); if (ret != 0) { DBG("LTTng: event reservation failed"); diff --git a/liblttng-ust/lttng-ring-buffer-client.h b/liblttng-ust/lttng-ring-buffer-client.h index 8e14acf5..4e2b86a2 100644 --- a/liblttng-ust/lttng-ring-buffer-client.h +++ b/liblttng-ust/lttng-ring-buffer-client.h @@ -699,7 +699,7 @@ int lttng_event_reserve(struct lttng_ust_lib_ring_buffer_ctx *ctx, struct lttng_ust_stack_ctx *lttng_ctx = ctx->priv; struct lttng_ust_event_recorder *event_recorder = lttng_ctx->event_recorder; struct lttng_client_ctx client_ctx; - int ret, cpu; + int ret; client_ctx.chan_ctx = lttng_ust_rcu_dereference(lttng_chan->priv->ctx); client_ctx.event_ctx = lttng_ust_rcu_dereference(event_recorder->priv->ctx); @@ -709,10 +709,8 @@ int lttng_event_reserve(struct lttng_ust_lib_ring_buffer_ctx *ctx, ctx_get_struct_size(client_ctx.event_ctx, &client_ctx.event_context_len, APP_CTX_ENABLED); - cpu = lib_ring_buffer_get_cpu(&client_config); - if (cpu < 0) + if (lib_ring_buffer_nesting_inc(&client_config) < 0) return -EPERM; - ctx->cpu = cpu; switch (lttng_chan->priv->header_type) { case 1: /* compact */ @@ -738,7 +736,7 @@ int lttng_event_reserve(struct lttng_ust_lib_ring_buffer_ctx *ctx, lttng_write_event_header(&client_config, ctx, &client_ctx, event_id); return 0; put: - lib_ring_buffer_put_cpu(&client_config); + lib_ring_buffer_nesting_dec(&client_config); return ret; } @@ -746,7 +744,7 @@ static void lttng_event_commit(struct lttng_ust_lib_ring_buffer_ctx *ctx) { lib_ring_buffer_commit(&client_config, ctx); - lib_ring_buffer_put_cpu(&client_config); + lib_ring_buffer_nesting_dec(&client_config); } static diff --git a/libringbuffer/frontend_api.h b/libringbuffer/frontend_api.h index fd601ce5..96f79554 100644 --- a/libringbuffer/frontend_api.h +++ b/libringbuffer/frontend_api.h @@ -18,12 +18,11 @@ #include "frontend.h" /** - * lib_ring_buffer_get_cpu - Precedes ring buffer reserve/commit. + * lib_ring_buffer_nesting_inc - Ring buffer recursive use protection. * - * Keeps a ring buffer nesting count as supplementary safety net to - * ensure tracer client code will never trigger an endless recursion. - * Returns the processor ID on success, -EPERM on failure (nesting count - * too high). + * The rint buffer buffer nesting count is a safety net to ensure tracer + * client code will never trigger an endless recursion. + * Returns 0 on success, -EPERM on failure (nesting count too high). * * asm volatile and "memory" clobber prevent the compiler from moving * instructions out of the ring buffer nesting count. This is required to ensure @@ -32,27 +31,22 @@ * section. */ static inline -int lib_ring_buffer_get_cpu(const struct lttng_ust_lib_ring_buffer_config *config) +int lib_ring_buffer_nesting_inc(const struct lttng_ust_lib_ring_buffer_config *config) { - int cpu, nesting; + int nesting; - cpu = lttng_ust_get_cpu(); nesting = ++URCU_TLS(lib_ring_buffer_nesting); cmm_barrier(); - if (caa_unlikely(nesting > 4)) { WARN_ON_ONCE(1); URCU_TLS(lib_ring_buffer_nesting)--; return -EPERM; - } else - return cpu; + } + return 0; } -/** - * lib_ring_buffer_put_cpu - Follows ring buffer reserve/commit. - */ static inline -void lib_ring_buffer_put_cpu(const struct lttng_ust_lib_ring_buffer_config *config) +void lib_ring_buffer_nesting_dec(const struct lttng_ust_lib_ring_buffer_config *config) { cmm_barrier(); URCU_TLS(lib_ring_buffer_nesting)--; /* TLS */ @@ -148,10 +142,12 @@ int lib_ring_buffer_reserve(const struct lttng_ust_lib_ring_buffer_config *confi if (caa_unlikely(uatomic_read(&chan->record_disabled))) return -EAGAIN; - if (config->alloc == RING_BUFFER_ALLOC_PER_CPU) - buf = shmp(handle, chan->backend.buf[ctx->cpu].shmp); - else + if (config->alloc == RING_BUFFER_ALLOC_PER_CPU) { + ctx->reserve_cpu = lttng_ust_get_cpu(); + buf = shmp(handle, chan->backend.buf[ctx->reserve_cpu].shmp); + } else { buf = shmp(handle, chan->backend.buf[0].shmp); + } if (caa_unlikely(!buf)) return -EIO; if (caa_unlikely(uatomic_read(&buf->record_disabled))) diff --git a/libringbuffer/ring_buffer_frontend.c b/libringbuffer/ring_buffer_frontend.c index 7b276ad9..83da122b 100644 --- a/libringbuffer/ring_buffer_frontend.c +++ b/libringbuffer/ring_buffer_frontend.c @@ -2321,7 +2321,7 @@ int lib_ring_buffer_reserve_slow(struct lttng_ust_lib_ring_buffer_ctx *ctx, int ret; if (config->alloc == RING_BUFFER_ALLOC_PER_CPU) - buf = shmp(handle, chan->backend.buf[ctx->cpu].shmp); + buf = shmp(handle, chan->backend.buf[ctx->reserve_cpu].shmp); else buf = shmp(handle, chan->backend.buf[0].shmp); if (!buf) diff --git a/libringbuffer/ringbuffer-config.h b/libringbuffer/ringbuffer-config.h index 0d33ace2..badf7566 100644 --- a/libringbuffer/ringbuffer-config.h +++ b/libringbuffer/ringbuffer-config.h @@ -96,9 +96,7 @@ struct lttng_ust_lib_ring_buffer_client_cb { * alloc/sync pairs: * * RING_BUFFER_ALLOC_PER_CPU and RING_BUFFER_SYNC_PER_CPU : - * Per-cpu buffers with per-cpu synchronization. Tracing must be performed - * with preemption disabled (lib_ring_buffer_get_cpu() and - * lib_ring_buffer_put_cpu()). + * Per-cpu buffers with per-cpu synchronization. * * RING_BUFFER_ALLOC_PER_CPU and RING_BUFFER_SYNC_GLOBAL : * Per-cpu buffer with global synchronization. Tracing can be performed with