size_t (*record_header_size) (const struct lib_ring_buffer_config *config,
struct channel *chan, size_t offset,
size_t *pre_header_padding,
- struct lib_ring_buffer_ctx *ctx);
+ struct lib_ring_buffer_ctx *ctx,
+ void *client_ctx);
/* Slow path only, at subbuffer switch */
size_t (*subbuffer_header_size) (void);
static inline
int lib_ring_buffer_try_reserve(const struct lib_ring_buffer_config *config,
struct lib_ring_buffer_ctx *ctx,
+ void *client_ctx,
unsigned long *o_begin, unsigned long *o_end,
unsigned long *o_old, size_t *before_hdr_pad)
{
return 1;
ctx->slot_size = record_header_size(config, chan, *o_begin,
- before_hdr_pad, ctx);
+ before_hdr_pad, ctx, client_ctx);
ctx->slot_size +=
lib_ring_buffer_align(*o_begin + ctx->slot_size,
ctx->largest_align) + ctx->data_size;
static inline
int lib_ring_buffer_reserve(const struct lib_ring_buffer_config *config,
- struct lib_ring_buffer_ctx *ctx)
+ struct lib_ring_buffer_ctx *ctx,
+ void *client_ctx)
{
struct channel *chan = ctx->chan;
struct lib_ring_buffer *buf;
/*
* Perform retryable operations.
*/
- if (unlikely(lib_ring_buffer_try_reserve(config, ctx, &o_begin,
+ if (unlikely(lib_ring_buffer_try_reserve(config, ctx, client_ctx, &o_begin,
&o_end, &o_old, &before_hdr_pad)))
goto slow_path;
ctx->buf_offset = o_begin + before_hdr_pad;
return 0;
slow_path:
- return lib_ring_buffer_reserve_slow(ctx);
+ return lib_ring_buffer_reserve_slow(ctx, client_ctx);
}
/**
#endif
extern
-int lib_ring_buffer_reserve_slow(struct lib_ring_buffer_ctx *ctx);
+int lib_ring_buffer_reserve_slow(struct lib_ring_buffer_ctx *ctx,
+ void *client_ctx);
extern
void lib_ring_buffer_switch_slow(struct lib_ring_buffer *buf,
int lib_ring_buffer_try_reserve_slow(struct lib_ring_buffer *buf,
struct channel *chan,
struct switch_offsets *offsets,
- struct lib_ring_buffer_ctx *ctx)
+ struct lib_ring_buffer_ctx *ctx,
+ void *client_ctx)
{
const struct lib_ring_buffer_config *config = &chan->backend.config;
unsigned long reserve_commit_diff, offset_cmp;
offsets->size = config->cb.record_header_size(config, chan,
offsets->begin,
&offsets->pre_header_padding,
- ctx);
+ ctx, client_ctx);
offsets->size +=
lib_ring_buffer_align(offsets->begin + offsets->size,
ctx->largest_align)
config->cb.record_header_size(config, chan,
offsets->begin,
&offsets->pre_header_padding,
- ctx);
+ ctx, client_ctx);
offsets->size +=
lib_ring_buffer_align(offsets->begin + offsets->size,
ctx->largest_align)
* -EIO for other errors, else returns 0.
* It will take care of sub-buffer switching.
*/
-int lib_ring_buffer_reserve_slow(struct lib_ring_buffer_ctx *ctx)
+int lib_ring_buffer_reserve_slow(struct lib_ring_buffer_ctx *ctx,
+ void *client_ctx)
{
struct channel *chan = ctx->chan;
const struct lib_ring_buffer_config *config = &chan->backend.config;
do {
ret = lib_ring_buffer_try_reserve_slow(buf, chan, &offsets,
- ctx);
+ ctx, client_ctx);
if (unlikely(ret))
return ret;
} while (unlikely(v_cmpxchg(config, &buf->offset, offsets.old,
} ctx;
};
+struct lttng_client_ctx {
+ size_t packet_context_len;
+ size_t event_context_len;
+};
static inline notrace u64 lib_ring_buffer_clock_read(struct channel *chan)
{
}
static inline
-size_t ctx_get_size(size_t offset, struct lttng_ctx *ctx)
+size_t ctx_get_aligned_size(size_t offset, struct lttng_ctx *ctx,
+ size_t ctx_len)
{
- int i;
size_t orig_offset = offset;
if (likely(!ctx))
return 0;
offset += lib_ring_buffer_align(offset, ctx->largest_align);
+ offset += ctx_len;
+ return offset - orig_offset;
+}
+
+static inline
+void ctx_get_struct_size(struct lttng_ctx *ctx, size_t *ctx_len)
+{
+ int i;
+ size_t offset = 0;
+
+ if (likely(!ctx)) {
+ *ctx_len = 0;
+ return;
+ }
for (i = 0; i < ctx->nr_fields; i++)
offset += ctx->fields[i].get_size(offset);
- return offset - orig_offset;
+ *ctx_len = offset;
}
static inline
size_t record_header_size(const struct lib_ring_buffer_config *config,
struct channel *chan, size_t offset,
size_t *pre_header_padding,
- struct lib_ring_buffer_ctx *ctx)
+ struct lib_ring_buffer_ctx *ctx,
+ struct lttng_client_ctx *client_ctx)
{
struct lttng_channel *lttng_chan = channel_get_private(chan);
struct lttng_probe_ctx *lttng_probe_ctx = ctx->priv;
padding = 0;
WARN_ON_ONCE(1);
}
- offset += ctx_get_size(offset, lttng_chan->ctx);
- offset += ctx_get_size(offset, event->ctx);
+ offset += ctx_get_aligned_size(offset, lttng_chan->ctx,
+ client_ctx->packet_context_len);
+ offset += ctx_get_aligned_size(offset, event->ctx,
+ client_ctx->event_context_len);
*pre_header_padding = padding;
return offset - orig_offset;
size_t client_record_header_size(const struct lib_ring_buffer_config *config,
struct channel *chan, size_t offset,
size_t *pre_header_padding,
- struct lib_ring_buffer_ctx *ctx)
+ struct lib_ring_buffer_ctx *ctx,
+ void *client_ctx)
{
return record_header_size(config, chan, offset,
- pre_header_padding, ctx);
+ pre_header_padding, ctx, client_ctx);
}
/**
uint32_t event_id)
{
struct lttng_channel *lttng_chan = channel_get_private(ctx->chan);
+ struct lttng_probe_ctx *lttng_probe_ctx = ctx->priv;
+ struct lttng_event *event = lttng_probe_ctx->event;
+ struct lttng_client_ctx client_ctx;
int ret, cpu;
+ /* Compute internal size of context structures. */
+ ctx_get_struct_size(lttng_chan->ctx, &client_ctx.packet_context_len);
+ ctx_get_struct_size(event->ctx, &client_ctx.event_context_len);
+
cpu = lib_ring_buffer_get_cpu(&client_config);
if (unlikely(cpu < 0))
return -EPERM;
WARN_ON_ONCE(1);
}
- ret = lib_ring_buffer_reserve(&client_config, ctx);
+ ret = lib_ring_buffer_reserve(&client_config, ctx, &client_ctx);
if (unlikely(ret))
goto put;
lib_ring_buffer_backend_get_pages(&client_config, ctx,
size_t record_header_size(const struct lib_ring_buffer_config *config,
struct channel *chan, size_t offset,
size_t *pre_header_padding,
- struct lib_ring_buffer_ctx *ctx)
+ struct lib_ring_buffer_ctx *ctx,
+ void *client_ctx)
{
return 0;
}
size_t client_record_header_size(const struct lib_ring_buffer_config *config,
struct channel *chan, size_t offset,
size_t *pre_header_padding,
- struct lib_ring_buffer_ctx *ctx)
+ struct lib_ring_buffer_ctx *ctx,
+ void *client_ctx)
{
return 0;
}
{
int ret;
- ret = lib_ring_buffer_reserve(&client_config, ctx);
+ ret = lib_ring_buffer_reserve(&client_config, ctx, NULL);
if (ret)
return ret;
lib_ring_buffer_backend_get_pages(&client_config, ctx,