size_t (*record_header_size) (const struct lttng_ust_lib_ring_buffer_config *config,
struct channel *chan, size_t offset,
size_t *pre_header_padding,
- struct lttng_ust_lib_ring_buffer_ctx *ctx);
+ struct lttng_ust_lib_ring_buffer_ctx *ctx,
+ void *client_ctx);
/* Slow path only, at subbuffer switch */
size_t (*subbuffer_header_size) (void);
} ctx;
};
+struct lttng_client_ctx {
+ size_t packet_context_len;
+ size_t event_context_len;
+};
static inline uint64_t lib_ring_buffer_clock_read(struct channel *chan)
{
}
static inline
-size_t ctx_get_size(size_t offset, struct lttng_ctx *ctx,
- enum app_ctx_mode mode)
+size_t ctx_get_aligned_size(size_t offset, struct lttng_ctx *ctx,
+ size_t ctx_len)
{
- int i;
size_t orig_offset = offset;
if (caa_likely(!ctx))
return 0;
offset += lib_ring_buffer_align(offset, ctx->largest_align);
+ offset += ctx_len;
+ return offset - orig_offset;
+}
+
+static inline
+void ctx_get_struct_size(struct lttng_ctx *ctx, size_t *ctx_len,
+ enum app_ctx_mode mode)
+{
+ int i;
+ size_t offset = 0;
+
+ if (caa_likely(!ctx)) {
+ *ctx_len = 0;
+ return;
+ }
for (i = 0; i < ctx->nr_fields; i++) {
if (mode == APP_CTX_ENABLED) {
offset += ctx->fields[i].get_size(&ctx->fields[i], offset);
}
}
}
- return offset - orig_offset;
+ *ctx_len = offset;
}
static inline
size_t record_header_size(const struct lttng_ust_lib_ring_buffer_config *config,
struct channel *chan, size_t offset,
size_t *pre_header_padding,
- struct lttng_ust_lib_ring_buffer_ctx *ctx)
+ struct lttng_ust_lib_ring_buffer_ctx *ctx,
+ struct lttng_client_ctx *client_ctx)
{
struct lttng_channel *lttng_chan = channel_get_private(chan);
struct lttng_event *event = ctx->priv;
}
if (lttng_ctx) {
/* 2.8+ probe ABI. */
- offset += ctx_get_size(offset, lttng_ctx->chan_ctx, APP_CTX_ENABLED);
- offset += ctx_get_size(offset, lttng_ctx->event_ctx, APP_CTX_ENABLED);
+ offset += ctx_get_aligned_size(offset, lttng_ctx->chan_ctx,
+ client_ctx->packet_context_len);
+ offset += ctx_get_aligned_size(offset, lttng_ctx->event_ctx,
+ client_ctx->event_context_len);
} else {
/* Pre 2.8 probe ABI. */
- offset += ctx_get_size(offset, lttng_chan->ctx, APP_CTX_DISABLED);
- offset += ctx_get_size(offset, event->ctx, APP_CTX_DISABLED);
+ offset += ctx_get_aligned_size(offset, lttng_chan->ctx,
+ client_ctx->packet_context_len);
+ offset += ctx_get_aligned_size(offset, event->ctx,
+ client_ctx->event_context_len);
}
*pre_header_padding = padding;
return offset - orig_offset;
size_t client_record_header_size(const struct lttng_ust_lib_ring_buffer_config *config,
struct channel *chan, size_t offset,
size_t *pre_header_padding,
- struct lttng_ust_lib_ring_buffer_ctx *ctx)
+ struct lttng_ust_lib_ring_buffer_ctx *ctx,
+ void *client_ctx)
{
return record_header_size(config, chan, offset,
- pre_header_padding, ctx);
+ pre_header_padding, ctx, client_ctx);
}
/**
uint32_t event_id)
{
struct lttng_channel *lttng_chan = channel_get_private(ctx->chan);
+ struct lttng_event *event = ctx->priv;
+ struct lttng_stack_ctx *lttng_ctx = ctx->priv2;
+ struct lttng_client_ctx client_ctx;
int ret, cpu;
+ /* Compute internal size of context structures. */
+
+ if (lttng_ctx) {
+ /* 2.8+ probe ABI. */
+ ctx_get_struct_size(lttng_ctx->chan_ctx, &client_ctx.packet_context_len,
+ APP_CTX_ENABLED);
+ ctx_get_struct_size(lttng_ctx->event_ctx, &client_ctx.event_context_len,
+ APP_CTX_ENABLED);
+ } else {
+ /* Pre 2.8 probe ABI. */
+ ctx_get_struct_size(lttng_chan->ctx, &client_ctx.packet_context_len,
+ APP_CTX_DISABLED);
+ ctx_get_struct_size(event->ctx, &client_ctx.event_context_len,
+ APP_CTX_DISABLED);
+ }
+
cpu = lib_ring_buffer_get_cpu(&client_config);
if (cpu < 0)
return -EPERM;
WARN_ON_ONCE(1);
}
- ret = lib_ring_buffer_reserve(&client_config, ctx);
+ ret = lib_ring_buffer_reserve(&client_config, ctx, &client_ctx);
if (caa_unlikely(ret))
goto put;
if (caa_likely(ctx->ctx_len
size_t record_header_size(const struct lttng_ust_lib_ring_buffer_config *config,
struct channel *chan, size_t offset,
size_t *pre_header_padding,
- struct lttng_ust_lib_ring_buffer_ctx *ctx)
+ struct lttng_ust_lib_ring_buffer_ctx *ctx,
+ void *client_ctx)
{
return 0;
}
size_t client_record_header_size(const struct lttng_ust_lib_ring_buffer_config *config,
struct channel *chan, size_t offset,
size_t *pre_header_padding,
- struct lttng_ust_lib_ring_buffer_ctx *ctx)
+ struct lttng_ust_lib_ring_buffer_ctx *ctx,
+ void *client_ctx)
{
return 0;
}
{
int ret;
- ret = lib_ring_buffer_reserve(&client_config, ctx);
+ ret = lib_ring_buffer_reserve(&client_config, ctx, NULL);
if (ret)
return ret;
if (caa_likely(ctx->ctx_len
static inline
int lib_ring_buffer_try_reserve(const struct lttng_ust_lib_ring_buffer_config *config,
struct lttng_ust_lib_ring_buffer_ctx *ctx,
+ void *client_ctx,
unsigned long *o_begin, unsigned long *o_end,
unsigned long *o_old, size_t *before_hdr_pad)
{
return 1;
ctx->slot_size = record_header_size(config, chan, *o_begin,
- before_hdr_pad, ctx);
+ before_hdr_pad, ctx, client_ctx);
ctx->slot_size +=
lib_ring_buffer_align(*o_begin + ctx->slot_size,
ctx->largest_align) + ctx->data_size;
static inline
int lib_ring_buffer_reserve(const struct lttng_ust_lib_ring_buffer_config *config,
- struct lttng_ust_lib_ring_buffer_ctx *ctx)
+ struct lttng_ust_lib_ring_buffer_ctx *ctx,
+ void *client_ctx)
{
struct channel *chan = ctx->chan;
struct lttng_ust_shm_handle *handle = ctx->handle;
/*
* Perform retryable operations.
*/
- if (caa_unlikely(lib_ring_buffer_try_reserve(config, ctx, &o_begin,
+ if (caa_unlikely(lib_ring_buffer_try_reserve(config, ctx, client_ctx, &o_begin,
&o_end, &o_old, &before_hdr_pad)))
goto slow_path;
ctx->buf_offset = o_begin + before_hdr_pad;
return 0;
slow_path:
- return lib_ring_buffer_reserve_slow(ctx);
+ return lib_ring_buffer_reserve_slow(ctx, client_ctx);
}
/**
#endif
extern
-int lib_ring_buffer_reserve_slow(struct lttng_ust_lib_ring_buffer_ctx *ctx);
+int lib_ring_buffer_reserve_slow(struct lttng_ust_lib_ring_buffer_ctx *ctx,
+ void *client_ctx);
extern
void lib_ring_buffer_switch_slow(struct lttng_ust_lib_ring_buffer *buf,
int lib_ring_buffer_try_reserve_slow(struct lttng_ust_lib_ring_buffer *buf,
struct channel *chan,
struct switch_offsets *offsets,
- struct lttng_ust_lib_ring_buffer_ctx *ctx)
+ struct lttng_ust_lib_ring_buffer_ctx *ctx,
+ void *client_ctx)
{
const struct lttng_ust_lib_ring_buffer_config *config = &chan->backend.config;
struct lttng_ust_shm_handle *handle = ctx->handle;
offsets->size = config->cb.record_header_size(config, chan,
offsets->begin,
&offsets->pre_header_padding,
- ctx);
+ ctx, client_ctx);
offsets->size +=
lib_ring_buffer_align(offsets->begin + offsets->size,
ctx->largest_align)
config->cb.record_header_size(config, chan,
offsets->begin,
&offsets->pre_header_padding,
- ctx);
+ ctx, client_ctx);
offsets->size +=
lib_ring_buffer_align(offsets->begin + offsets->size,
ctx->largest_align)
* -EIO for other errors, else returns 0.
* It will take care of sub-buffer switching.
*/
-int lib_ring_buffer_reserve_slow(struct lttng_ust_lib_ring_buffer_ctx *ctx)
+int lib_ring_buffer_reserve_slow(struct lttng_ust_lib_ring_buffer_ctx *ctx,
+ void *client_ctx)
{
struct channel *chan = ctx->chan;
struct lttng_ust_shm_handle *handle = ctx->handle;
do {
ret = lib_ring_buffer_try_reserve_slow(buf, chan, &offsets,
- ctx);
+ ctx, client_ctx);
if (caa_unlikely(ret))
return ret;
} while (caa_unlikely(v_cmpxchg(config, &buf->offset, offsets.old,