* alignment of the largest element
* in the payload
*/
- int cpu; /* processor id */
/* output from lib_ring_buffer_reserve() */
+ int reserve_cpu; /* processor id updated by the reserve */
struct lttng_ust_lib_ring_buffer *buf; /*
* buffer corresponding to processor id
* for this channel
* @priv: client private data
* @data_size: size of record data payload
* @largest_align: largest alignment within data payload types
- * @cpu: processor id
*/
static inline lttng_ust_notrace
void lib_ring_buffer_ctx_init(struct lttng_ust_lib_ring_buffer_ctx *ctx,
struct lttng_ust_lib_ring_buffer_channel *chan,
void *priv, size_t data_size, int largest_align,
- int cpu, struct lttng_ust_shm_handle *handle);
+ struct lttng_ust_shm_handle *handle);
static inline
void lib_ring_buffer_ctx_init(struct lttng_ust_lib_ring_buffer_ctx *ctx,
struct lttng_ust_lib_ring_buffer_channel *chan,
void *priv, size_t data_size, int largest_align,
- int cpu, struct lttng_ust_shm_handle *handle)
+ struct lttng_ust_shm_handle *handle)
{
ctx->struct_size = sizeof(struct lttng_ust_lib_ring_buffer_ctx);
ctx->chan = chan;
ctx->priv = priv;
ctx->data_size = data_size;
+ ctx->reserve_cpu = -1;
ctx->largest_align = largest_align;
- ctx->cpu = cpu;
ctx->rflags = 0;
ctx->handle = handle;
ctx->ip = 0;
__lttng_ctx.struct_size = sizeof(struct lttng_ust_stack_ctx); \
__lttng_ctx.event_recorder = __event_recorder; \
lib_ring_buffer_ctx_init(&__ctx, __chan->chan, &__lttng_ctx, __event_len, \
- __event_align, -1, __chan->handle); \
+ __event_align, __chan->handle); \
__ctx.ip = _TP_IP_PARAM(TP_IP_PARAM); \
__ret = __chan->ops->event_reserve(&__ctx, __event_recorder->id); \
if (__ret < 0) \
lttng_chan_buf->ops->priv->packet_avail_size(lttng_chan_buf->chan, lttng_chan_buf->handle),
len - pos);
lib_ring_buffer_ctx_init(&ctx, lttng_chan_buf->chan, NULL, reserve_len,
- sizeof(char), -1, lttng_chan_buf->handle);
+ sizeof(char), lttng_chan_buf->handle);
/*
* We don't care about metadata buffer's records lost
* count, because we always retry here. Report error if
lttng_chan_buf->ops->priv->packet_avail_size(lttng_chan_buf->chan, lttng_chan_buf->handle),
len);
lib_ring_buffer_ctx_init(&ctx, lttng_chan_buf->chan, NULL, reserve_len,
- sizeof(char), -1, lttng_chan_buf->handle);
+ sizeof(char), lttng_chan_buf->handle);
ret = lttng_chan_buf->ops->event_reserve(&ctx, 0);
if (ret != 0) {
DBG("LTTng: event reservation failed");
struct lttng_ust_stack_ctx *lttng_ctx = ctx->priv;
struct lttng_ust_event_recorder *event_recorder = lttng_ctx->event_recorder;
struct lttng_client_ctx client_ctx;
- int ret, cpu;
+ int ret;
client_ctx.chan_ctx = lttng_ust_rcu_dereference(lttng_chan->priv->ctx);
client_ctx.event_ctx = lttng_ust_rcu_dereference(event_recorder->priv->ctx);
ctx_get_struct_size(client_ctx.event_ctx, &client_ctx.event_context_len,
APP_CTX_ENABLED);
- cpu = lib_ring_buffer_get_cpu(&client_config);
- if (cpu < 0)
+ if (lib_ring_buffer_nesting_inc(&client_config) < 0)
return -EPERM;
- ctx->cpu = cpu;
switch (lttng_chan->priv->header_type) {
case 1: /* compact */
lttng_write_event_header(&client_config, ctx, &client_ctx, event_id);
return 0;
put:
- lib_ring_buffer_put_cpu(&client_config);
+ lib_ring_buffer_nesting_dec(&client_config);
return ret;
}
void lttng_event_commit(struct lttng_ust_lib_ring_buffer_ctx *ctx)
{
lib_ring_buffer_commit(&client_config, ctx);
- lib_ring_buffer_put_cpu(&client_config);
+ lib_ring_buffer_nesting_dec(&client_config);
}
static
#include "frontend.h"
/**
- * lib_ring_buffer_get_cpu - Precedes ring buffer reserve/commit.
+ * lib_ring_buffer_nesting_inc - Ring buffer recursive use protection.
*
- * Keeps a ring buffer nesting count as supplementary safety net to
- * ensure tracer client code will never trigger an endless recursion.
- * Returns the processor ID on success, -EPERM on failure (nesting count
- * too high).
+ * The rint buffer buffer nesting count is a safety net to ensure tracer
+ * client code will never trigger an endless recursion.
+ * Returns 0 on success, -EPERM on failure (nesting count too high).
*
* asm volatile and "memory" clobber prevent the compiler from moving
* instructions out of the ring buffer nesting count. This is required to ensure
* section.
*/
static inline
-int lib_ring_buffer_get_cpu(const struct lttng_ust_lib_ring_buffer_config *config)
+int lib_ring_buffer_nesting_inc(const struct lttng_ust_lib_ring_buffer_config *config)
{
- int cpu, nesting;
+ int nesting;
- cpu = lttng_ust_get_cpu();
nesting = ++URCU_TLS(lib_ring_buffer_nesting);
cmm_barrier();
-
if (caa_unlikely(nesting > 4)) {
WARN_ON_ONCE(1);
URCU_TLS(lib_ring_buffer_nesting)--;
return -EPERM;
- } else
- return cpu;
+ }
+ return 0;
}
-/**
- * lib_ring_buffer_put_cpu - Follows ring buffer reserve/commit.
- */
static inline
-void lib_ring_buffer_put_cpu(const struct lttng_ust_lib_ring_buffer_config *config)
+void lib_ring_buffer_nesting_dec(const struct lttng_ust_lib_ring_buffer_config *config)
{
cmm_barrier();
URCU_TLS(lib_ring_buffer_nesting)--; /* TLS */
if (caa_unlikely(uatomic_read(&chan->record_disabled)))
return -EAGAIN;
- if (config->alloc == RING_BUFFER_ALLOC_PER_CPU)
- buf = shmp(handle, chan->backend.buf[ctx->cpu].shmp);
- else
+ if (config->alloc == RING_BUFFER_ALLOC_PER_CPU) {
+ ctx->reserve_cpu = lttng_ust_get_cpu();
+ buf = shmp(handle, chan->backend.buf[ctx->reserve_cpu].shmp);
+ } else {
buf = shmp(handle, chan->backend.buf[0].shmp);
+ }
if (caa_unlikely(!buf))
return -EIO;
if (caa_unlikely(uatomic_read(&buf->record_disabled)))
int ret;
if (config->alloc == RING_BUFFER_ALLOC_PER_CPU)
- buf = shmp(handle, chan->backend.buf[ctx->cpu].shmp);
+ buf = shmp(handle, chan->backend.buf[ctx->reserve_cpu].shmp);
else
buf = shmp(handle, chan->backend.buf[0].shmp);
if (!buf)
* alloc/sync pairs:
*
* RING_BUFFER_ALLOC_PER_CPU and RING_BUFFER_SYNC_PER_CPU :
- * Per-cpu buffers with per-cpu synchronization. Tracing must be performed
- * with preemption disabled (lib_ring_buffer_get_cpu() and
- * lib_ring_buffer_put_cpu()).
+ * Per-cpu buffers with per-cpu synchronization.
*
* RING_BUFFER_ALLOC_PER_CPU and RING_BUFFER_SYNC_GLOBAL :
* Per-cpu buffer with global synchronization. Tracing can be performed with