goto __post; \
} \
__event_align = __event_get_align__##_name(_locvar_args); \
- lib_ring_buffer_ctx_init(&__ctx, __chan->chan, __event_len, \
- __event_align, -1, &__lttng_probe_ctx); \
+ lib_ring_buffer_ctx_init(&__ctx, __event_recorder, __event_len, \
+ __event_align, &__lttng_probe_ctx); \
__ret = __chan->ops->event_reserve(&__ctx, __event_recorder->priv->id); \
if (__ret < 0) \
goto __post; \
struct lib_ring_buffer_ctx *ctx,
const void *src, size_t len)
{
- struct lib_ring_buffer_backend *bufb = &ctx->buf->backend;
- struct channel_backend *chanb = &ctx->chan->backend;
+ struct lib_ring_buffer_backend *bufb = &ctx->priv.buf->backend;
+ struct channel_backend *chanb = &ctx->priv.chan->backend;
size_t index, pagecpy;
- size_t offset = ctx->buf_offset;
+ size_t offset = ctx->priv.buf_offset;
struct lib_ring_buffer_backend_pages *backend_pages;
if (unlikely(!len))
src, len);
else
_lib_ring_buffer_write(bufb, offset, src, len, 0);
- ctx->buf_offset += len;
+ ctx->priv.buf_offset += len;
}
/**
struct lib_ring_buffer_ctx *ctx, int c, size_t len)
{
- struct lib_ring_buffer_backend *bufb = &ctx->buf->backend;
- struct channel_backend *chanb = &ctx->chan->backend;
+ struct lib_ring_buffer_backend *bufb = &ctx->priv.buf->backend;
+ struct channel_backend *chanb = &ctx->priv.chan->backend;
size_t index, pagecpy;
- size_t offset = ctx->buf_offset;
+ size_t offset = ctx->priv.buf_offset;
struct lib_ring_buffer_backend_pages *backend_pages;
if (unlikely(!len))
c, len);
else
_lib_ring_buffer_memset(bufb, offset, c, len, 0);
- ctx->buf_offset += len;
+ ctx->priv.buf_offset += len;
}
/*
struct lib_ring_buffer_ctx *ctx,
const char *src, size_t len, int pad)
{
- struct lib_ring_buffer_backend *bufb = &ctx->buf->backend;
- struct channel_backend *chanb = &ctx->chan->backend;
+ struct lib_ring_buffer_backend *bufb = &ctx->priv.buf->backend;
+ struct channel_backend *chanb = &ctx->priv.chan->backend;
size_t index, pagecpy;
- size_t offset = ctx->buf_offset;
+ size_t offset = ctx->priv.buf_offset;
struct lib_ring_buffer_backend_pages *backend_pages;
if (unlikely(!len))
} else {
_lib_ring_buffer_strcpy(bufb, offset, src, len, 0, pad);
}
- ctx->buf_offset += len;
+ ctx->priv.buf_offset += len;
}
/**
struct lib_ring_buffer_ctx *ctx,
const void __user *src, size_t len)
{
- struct lib_ring_buffer_backend *bufb = &ctx->buf->backend;
- struct channel_backend *chanb = &ctx->chan->backend;
+ struct lib_ring_buffer_backend *bufb = &ctx->priv.buf->backend;
+ struct channel_backend *chanb = &ctx->priv.chan->backend;
size_t index, pagecpy;
- size_t offset = ctx->buf_offset;
+ size_t offset = ctx->priv.buf_offset;
struct lib_ring_buffer_backend_pages *backend_pages;
unsigned long ret;
_lib_ring_buffer_copy_from_user_inatomic(bufb, offset, src, len, 0);
}
pagefault_enable();
- ctx->buf_offset += len;
+ ctx->priv.buf_offset += len;
return;
struct lib_ring_buffer_ctx *ctx,
const void __user *src, size_t len, int pad)
{
- struct lib_ring_buffer_backend *bufb = &ctx->buf->backend;
- struct channel_backend *chanb = &ctx->chan->backend;
+ struct lib_ring_buffer_backend *bufb = &ctx->priv.buf->backend;
+ struct channel_backend *chanb = &ctx->priv.chan->backend;
size_t index, pagecpy;
- size_t offset = ctx->buf_offset;
+ size_t offset = ctx->priv.buf_offset;
struct lib_ring_buffer_backend_pages *backend_pages;
if (unlikely(!len))
len, 0, pad);
}
pagefault_enable();
- ctx->buf_offset += len;
+ ctx->priv.buf_offset += len;
return;
struct lib_ring_buffer_ctx *ctx,
struct lib_ring_buffer_backend_pages **backend_pages)
{
- struct lib_ring_buffer_backend *bufb = &ctx->buf->backend;
- struct channel_backend *chanb = &ctx->chan->backend;
- size_t sbidx, offset = ctx->buf_offset;
+ struct lib_ring_buffer_backend *bufb = &ctx->priv.buf->backend;
+ struct channel_backend *chanb = &ctx->priv.chan->backend;
+ size_t sbidx, offset = ctx->priv.buf_offset;
unsigned long sb_bindex, id;
struct lib_ring_buffer_backend_pages *rpages;
id = bufb->buf_wsb[sbidx].id;
sb_bindex = subbuffer_id_get_index(config, id);
rpages = bufb->array[sb_bindex];
- CHAN_WARN_ON(ctx->chan,
+ CHAN_WARN_ON(ctx->priv.chan,
config->mode == RING_BUFFER_OVERWRITE
&& subbuffer_id_is_noref(config, id));
*backend_pages = rpages;
lib_ring_buffer_get_backend_pages_from_ctx(const struct lib_ring_buffer_config *config,
struct lib_ring_buffer_ctx *ctx)
{
- return ctx->backend_pages;
+ return ctx->priv.backend_pages;
}
/*
struct channel;
struct lib_ring_buffer_config;
struct lib_ring_buffer_ctx;
+struct lttng_kernel_ring_buffer_ctx_private;
/*
* Ring buffer client callbacks. Only used by slow path, never on fast path.
struct lib_ring_buffer_client_cb cb;
};
+/*
+ * ring buffer private context
+ *
+ * Private context passed to lib_ring_buffer_reserve(), lib_ring_buffer_commit(),
+ * lib_ring_buffer_try_discard_reserve(), lib_ring_buffer_align_ctx() and
+ * lib_ring_buffer_write().
+ *
+ * Get struct lttng_kernel_ring_buffer_ctx parent with container_of().
+ */
+
+struct lttng_kernel_ring_buffer_ctx_private {
+ /* input received by lib_ring_buffer_reserve(). */
+ struct channel *chan; /* ring buffer channel */
+
+ /* output from lib_ring_buffer_reserve() */
+ int reserve_cpu; /* processor id updated by the reserve */
+ size_t slot_size; /* size of the reserved slot */
+ unsigned long buf_offset; /* offset following the record header */
+ unsigned long pre_offset; /*
+ * Initial offset position _before_
+ * the record is written. Positioned
+ * prior to record header alignment
+ * padding.
+ */
+ u64 tsc; /* time-stamp counter value */
+ unsigned int rflags; /* reservation flags */
+
+ struct lib_ring_buffer *buf; /*
+ * buffer corresponding to processor id
+ * for this channel
+ */
+ struct lib_ring_buffer_backend_pages *backend_pages;
+};
+
/*
* ring buffer context
*
* lib_ring_buffer_write().
*/
struct lib_ring_buffer_ctx {
+ /* Private ring buffer context, set by reserve callback. */
+ struct lttng_kernel_ring_buffer_ctx_private priv;
+
/* input received by lib_ring_buffer_reserve(), saved here. */
- struct channel *chan; /* channel */
- void *priv; /* client private data */
+ void *client_priv; /* Ring buffer client private data */
+
size_t data_size; /* size of payload */
int largest_align; /*
* alignment of the largest element
* in the payload
*/
- int cpu; /* processor id */
-
- /* output from lib_ring_buffer_reserve() */
- struct lib_ring_buffer *buf; /*
- * buffer corresponding to processor id
- * for this channel
- */
- size_t slot_size; /* size of the reserved slot */
- unsigned long buf_offset; /* offset following the record header */
- unsigned long pre_offset; /*
- * Initial offset position _before_
- * the record is written. Positioned
- * prior to record header alignment
- * padding.
- */
- u64 tsc; /* time-stamp counter value */
- unsigned int rflags; /* reservation flags */
- /* Cache backend pages pointer chasing. */
- struct lib_ring_buffer_backend_pages *backend_pages;
+ struct lttng_probe_ctx *probe_ctx; /* Probe context */
};
/**
* lib_ring_buffer_ctx_init - initialize ring buffer context
* @ctx: ring buffer context to initialize
- * @chan: channel
- * @priv: client private data
+ * @client_priv: client private data
* @data_size: size of record data payload. It must be greater than 0.
* @largest_align: largest alignment within data payload types
- * @cpu: processor id
*/
static inline
void lib_ring_buffer_ctx_init(struct lib_ring_buffer_ctx *ctx,
- struct channel *chan,
+ void *client_priv,
size_t data_size, int largest_align,
- int cpu, void *priv)
+ struct lttng_probe_ctx *probe_ctx)
{
- ctx->chan = chan;
- ctx->priv = priv;
+ ctx->client_priv = client_priv;
ctx->data_size = data_size;
ctx->largest_align = largest_align;
- ctx->cpu = cpu;
- ctx->rflags = 0;
- ctx->backend_pages = NULL;
+ ctx->probe_ctx = probe_ctx;
}
/*
void lib_ring_buffer_align_ctx(struct lib_ring_buffer_ctx *ctx,
size_t alignment)
{
- ctx->buf_offset += lib_ring_buffer_align(ctx->buf_offset,
+ ctx->priv.buf_offset += lib_ring_buffer_align(ctx->priv.buf_offset,
alignment);
}
unsigned long *o_begin, unsigned long *o_end,
unsigned long *o_old, size_t *before_hdr_pad)
{
- struct channel *chan = ctx->chan;
- struct lib_ring_buffer *buf = ctx->buf;
+ struct channel *chan = ctx->priv.chan;
+ struct lib_ring_buffer *buf = ctx->priv.buf;
*o_begin = v_read(config, &buf->offset);
*o_old = *o_begin;
- ctx->tsc = lib_ring_buffer_clock_read(chan);
- if ((int64_t) ctx->tsc == -EIO)
+ ctx->priv.tsc = lib_ring_buffer_clock_read(chan);
+ if ((int64_t) ctx->priv.tsc == -EIO)
return 1;
/*
*/
prefetch(&buf->commit_hot[subbuf_index(*o_begin, chan)]);
- if (last_tsc_overflow(config, buf, ctx->tsc))
- ctx->rflags |= RING_BUFFER_RFLAG_FULL_TSC;
+ if (last_tsc_overflow(config, buf, ctx->priv.tsc))
+ ctx->priv.rflags |= RING_BUFFER_RFLAG_FULL_TSC;
if (unlikely(subbuf_offset(*o_begin, chan) == 0))
return 1;
- ctx->slot_size = record_header_size(config, chan, *o_begin,
+ ctx->priv.slot_size = record_header_size(config, chan, *o_begin,
before_hdr_pad, ctx, client_ctx);
- ctx->slot_size +=
- lib_ring_buffer_align(*o_begin + ctx->slot_size,
+ ctx->priv.slot_size +=
+ lib_ring_buffer_align(*o_begin + ctx->priv.slot_size,
ctx->largest_align) + ctx->data_size;
- if (unlikely((subbuf_offset(*o_begin, chan) + ctx->slot_size)
+ if (unlikely((subbuf_offset(*o_begin, chan) + ctx->priv.slot_size)
> chan->backend.subbuf_size))
return 1;
* Record fits in the current buffer and we are not on a switch
* boundary. It's safe to write.
*/
- *o_end = *o_begin + ctx->slot_size;
+ *o_end = *o_begin + ctx->priv.slot_size;
if (unlikely((subbuf_offset(*o_end, chan)) == 0))
/*
struct lib_ring_buffer_ctx *ctx,
void *client_ctx)
{
- struct channel *chan = ctx->chan;
+ struct channel *chan = ctx->priv.chan;
struct lib_ring_buffer *buf;
unsigned long o_begin, o_end, o_old;
size_t before_hdr_pad = 0;
return -EAGAIN;
if (config->alloc == RING_BUFFER_ALLOC_PER_CPU)
- buf = per_cpu_ptr(chan->backend.buf, ctx->cpu);
+ buf = per_cpu_ptr(chan->backend.buf, ctx->priv.reserve_cpu);
else
buf = chan->backend.buf;
if (unlikely(atomic_read(&buf->record_disabled)))
return -EAGAIN;
- ctx->buf = buf;
+ ctx->priv.buf = buf;
/*
* Perform retryable operations.
&o_end, &o_old, &before_hdr_pad)))
goto slow_path;
- if (unlikely(v_cmpxchg(config, &ctx->buf->offset, o_old, o_end)
+ if (unlikely(v_cmpxchg(config, &ctx->priv.buf->offset, o_old, o_end)
!= o_old))
goto slow_path;
* record headers, never the opposite (missing a full TSC record header
* when it would be needed).
*/
- save_last_tsc(config, ctx->buf, ctx->tsc);
+ save_last_tsc(config, ctx->priv.buf, ctx->priv.tsc);
/*
* Push the reader if necessary
*/
- lib_ring_buffer_reserve_push_reader(ctx->buf, chan, o_end - 1);
+ lib_ring_buffer_reserve_push_reader(ctx->priv.buf, chan, o_end - 1);
/*
* Clear noref flag for this subbuffer.
*/
- lib_ring_buffer_clear_noref(config, &ctx->buf->backend,
+ lib_ring_buffer_clear_noref(config, &ctx->priv.buf->backend,
subbuf_index(o_end - 1, chan));
- ctx->pre_offset = o_begin;
- ctx->buf_offset = o_begin + before_hdr_pad;
+ ctx->priv.pre_offset = o_begin;
+ ctx->priv.buf_offset = o_begin + before_hdr_pad;
return 0;
slow_path:
return lib_ring_buffer_reserve_slow(ctx, client_ctx);
void lib_ring_buffer_commit(const struct lib_ring_buffer_config *config,
const struct lib_ring_buffer_ctx *ctx)
{
- struct channel *chan = ctx->chan;
- struct lib_ring_buffer *buf = ctx->buf;
- unsigned long offset_end = ctx->buf_offset;
+ struct channel *chan = ctx->priv.chan;
+ struct lib_ring_buffer *buf = ctx->priv.buf;
+ unsigned long offset_end = ctx->priv.buf_offset;
unsigned long endidx = subbuf_index(offset_end - 1, chan);
unsigned long commit_count;
struct commit_counters_hot *cc_hot = &buf->commit_hot[endidx];
} else
smp_wmb();
- v_add(config, ctx->slot_size, &cc_hot->cc);
+ v_add(config, ctx->priv.slot_size, &cc_hot->cc);
/*
* commit count read can race with concurrent OOO commit count updates.
commit_count = v_read(config, &cc_hot->cc);
lib_ring_buffer_check_deliver(config, buf, chan, offset_end - 1,
- commit_count, endidx, ctx->tsc);
+ commit_count, endidx, ctx->priv.tsc);
/*
* Update used size at each commit. It's needed only for extracting
* ring_buffer buffers from vmcore, after crash.
int lib_ring_buffer_try_discard_reserve(const struct lib_ring_buffer_config *config,
const struct lib_ring_buffer_ctx *ctx)
{
- struct lib_ring_buffer *buf = ctx->buf;
- unsigned long end_offset = ctx->pre_offset + ctx->slot_size;
+ struct lib_ring_buffer *buf = ctx->priv.buf;
+ unsigned long end_offset = ctx->priv.pre_offset + ctx->priv.slot_size;
/*
* We need to ensure that if the cmpxchg succeeds and discards the
*/
save_last_tsc(config, buf, 0ULL);
- if (likely(v_cmpxchg(config, &buf->offset, end_offset, ctx->pre_offset)
+ if (likely(v_cmpxchg(config, &buf->offset, end_offset, ctx->priv.pre_offset)
!= end_offset))
return -EPERM;
else
offsets->switch_old_end = 0;
offsets->pre_header_padding = 0;
- ctx->tsc = config->cb.ring_buffer_clock_read(chan);
- if ((int64_t) ctx->tsc == -EIO)
+ ctx->priv.tsc = config->cb.ring_buffer_clock_read(chan);
+ if ((int64_t) ctx->priv.tsc == -EIO)
return -EIO;
- if (last_tsc_overflow(config, buf, ctx->tsc))
- ctx->rflags |= RING_BUFFER_RFLAG_FULL_TSC;
+ if (last_tsc_overflow(config, buf, ctx->priv.tsc))
+ ctx->priv.rflags |= RING_BUFFER_RFLAG_FULL_TSC;
- if (unlikely(subbuf_offset(offsets->begin, ctx->chan) == 0)) {
+ if (unlikely(subbuf_offset(offsets->begin, ctx->priv.chan) == 0)) {
offsets->switch_new_start = 1; /* For offsets->begin */
} else {
offsets->size = config->cb.record_header_size(config, chan,
int lib_ring_buffer_reserve_slow(struct lib_ring_buffer_ctx *ctx,
void *client_ctx)
{
- struct channel *chan = ctx->chan;
+ struct channel *chan = ctx->priv.chan;
const struct lib_ring_buffer_config *config = &chan->backend.config;
struct lib_ring_buffer *buf;
struct switch_offsets offsets;
int ret;
- ctx->buf = buf = get_current_buf(chan, ctx->cpu);
+ ctx->priv.buf = buf = get_current_buf(chan, ctx->priv.reserve_cpu);
offsets.size = 0;
do {
* records, never the opposite (missing a full TSC record when it would
* be needed).
*/
- save_last_tsc(config, buf, ctx->tsc);
+ save_last_tsc(config, buf, ctx->priv.tsc);
/*
* Push the reader if necessary
if (unlikely(offsets.switch_old_end)) {
lib_ring_buffer_clear_noref(config, &buf->backend,
subbuf_index(offsets.old - 1, chan));
- lib_ring_buffer_switch_old_end(buf, chan, &offsets, ctx->tsc);
+ lib_ring_buffer_switch_old_end(buf, chan, &offsets, ctx->priv.tsc);
}
/*
* Populate new subbuffer.
*/
if (unlikely(offsets.switch_new_start))
- lib_ring_buffer_switch_new_start(buf, chan, &offsets, ctx->tsc);
+ lib_ring_buffer_switch_new_start(buf, chan, &offsets, ctx->priv.tsc);
if (unlikely(offsets.switch_new_end))
- lib_ring_buffer_switch_new_end(buf, chan, &offsets, ctx->tsc);
+ lib_ring_buffer_switch_new_end(buf, chan, &offsets, ctx->priv.tsc);
- ctx->slot_size = offsets.size;
- ctx->pre_offset = offsets.begin;
- ctx->buf_offset = offsets.begin + offsets.pre_header_padding;
+ ctx->priv.slot_size = offsets.size;
+ ctx->priv.pre_offset = offsets.begin;
+ ctx->priv.buf_offset = offsets.begin + offsets.pre_header_padding;
return 0;
}
EXPORT_SYMBOL_GPL(lib_ring_buffer_reserve_slow);
* Do not gather the userspace callstack context when the event was
* triggered by the userspace callstack context saving mechanism.
*/
- cs_user_nesting = per_cpu(callstack_user_nesting, ctx->cpu);
+ cs_user_nesting = per_cpu(callstack_user_nesting, ctx->priv.reserve_cpu);
if (fdata->mode == CALLSTACK_USER && cs_user_nesting >= 1)
return NULL;
* max nesting is checked in lib_ring_buffer_get_cpu().
* Check it again as a safety net.
*/
- cs = per_cpu_ptr(fdata->cs_percpu, ctx->cpu);
- buffer_nesting = per_cpu(lib_ring_buffer_nesting, ctx->cpu) - 1;
+ cs = per_cpu_ptr(fdata->cs_percpu, ctx->priv.reserve_cpu);
+ buffer_nesting = per_cpu(lib_ring_buffer_nesting, ctx->priv.reserve_cpu) - 1;
if (buffer_nesting >= RING_BUFFER_MAX_NESTING)
return NULL;
trace->nr_entries = 0;
if (fdata->mode == CALLSTACK_USER)
- ++per_cpu(callstack_user_nesting, ctx->cpu);
+ ++per_cpu(callstack_user_nesting, ctx->priv.reserve_cpu);
/* do the real work and reserve space */
cs_types[fdata->mode].save_func(trace);
if (fdata->mode == CALLSTACK_USER)
- per_cpu(callstack_user_nesting, ctx->cpu)--;
+ per_cpu(callstack_user_nesting, ctx->priv.reserve_cpu)--;
/*
* Remove final ULONG_MAX delimiter. If we cannot find it, add
* Do not gather the userspace callstack context when the event was
* triggered by the userspace callstack context saving mechanism.
*/
- cs_user_nesting = per_cpu(callstack_user_nesting, ctx->cpu);
+ cs_user_nesting = per_cpu(callstack_user_nesting, ctx->priv.reserve_cpu);
if (fdata->mode == CALLSTACK_USER && cs_user_nesting >= 1)
return NULL;
* max nesting is checked in lib_ring_buffer_get_cpu().
* Check it again as a safety net.
*/
- cs = per_cpu_ptr(fdata->cs_percpu, ctx->cpu);
- buffer_nesting = per_cpu(lib_ring_buffer_nesting, ctx->cpu) - 1;
+ cs = per_cpu_ptr(fdata->cs_percpu, ctx->priv.reserve_cpu);
+ buffer_nesting = per_cpu(lib_ring_buffer_nesting, ctx->priv.reserve_cpu) - 1;
if (buffer_nesting >= RING_BUFFER_MAX_NESTING)
return NULL;
MAX_ENTRIES, 0);
break;
case CALLSTACK_USER:
- ++per_cpu(callstack_user_nesting, ctx->cpu);
+ ++per_cpu(callstack_user_nesting, ctx->priv.reserve_cpu);
/* do the real work and reserve space */
trace->nr_entries = save_func_user(trace->entries,
MAX_ENTRIES);
- per_cpu(callstack_user_nesting, ctx->cpu)--;
+ per_cpu(callstack_user_nesting, ctx->priv.reserve_cpu)--;
break;
default:
WARN_ON_ONCE(1);
{
int cpu;
- cpu = ctx->cpu;
+ cpu = ctx->priv.reserve_cpu;
lib_ring_buffer_align_ctx(ctx, lttng_alignof(cpu));
chan->ops->event_write(ctx, &cpu, sizeof(cpu));
}
struct lib_ring_buffer_ctx *ctx,
struct lttng_channel *chan)
{
- struct lttng_probe_ctx *lttng_probe_ctx = ctx->priv;
+ struct lttng_probe_ctx *lttng_probe_ctx = ctx->probe_ctx;
int8_t interruptible = lttng_probe_ctx->interruptible;
lib_ring_buffer_align_ctx(ctx, lttng_alignof(interruptible));
struct perf_event *event;
uint64_t value;
- event = perf_field->e[ctx->cpu];
+ event = perf_field->e[ctx->priv.reserve_cpu];
if (likely(event)) {
if (unlikely(event->state == PERF_EVENT_STATE_ERROR)) {
value = 0;
kernel_notif.capture_buf_size = capture_buffer_content_len;
lib_ring_buffer_ctx_init(&ctx, event_notifier_group->chan, reserve_size,
- lttng_alignof(kernel_notif), -1, NULL);
+ lttng_alignof(kernel_notif), NULL);
ret = event_notifier_group->ops->event_reserve(&ctx, 0);
if (ret < 0) {
record_error(event_notifier);
stream->transport->ops.packet_avail_size(chan),
len);
lib_ring_buffer_ctx_init(&ctx, chan, reserve_len,
- sizeof(char), -1, NULL);
+ sizeof(char), NULL);
/*
* If reservation failed, return an error to the caller.
*/
case 1: /* compact */
padding = lib_ring_buffer_align(offset, lttng_alignof(uint32_t));
offset += padding;
- if (!(ctx->rflags & (RING_BUFFER_RFLAG_FULL_TSC | LTTNG_RFLAG_EXTENDED))) {
+ if (!(ctx->priv.rflags & (RING_BUFFER_RFLAG_FULL_TSC | LTTNG_RFLAG_EXTENDED))) {
offset += sizeof(uint32_t); /* id and timestamp */
} else {
/* Minimum space taken by LTTNG_COMPACT_EVENT_BITS id */
padding = lib_ring_buffer_align(offset, lttng_alignof(uint16_t));
offset += padding;
offset += sizeof(uint16_t);
- if (!(ctx->rflags & (RING_BUFFER_RFLAG_FULL_TSC | LTTNG_RFLAG_EXTENDED))) {
+ if (!(ctx->priv.rflags & (RING_BUFFER_RFLAG_FULL_TSC | LTTNG_RFLAG_EXTENDED))) {
offset += lib_ring_buffer_align(offset, lttng_alignof(uint32_t));
offset += sizeof(uint32_t); /* timestamp */
} else {
struct lib_ring_buffer_ctx *ctx,
uint32_t event_id)
{
- struct lttng_channel *lttng_chan = channel_get_private(ctx->chan);
+ struct lttng_channel *lttng_chan = channel_get_private(ctx->priv.chan);
- if (unlikely(ctx->rflags))
+ if (unlikely(ctx->priv.rflags))
goto slow_path;
switch (lttng_chan->header_type) {
bt_bitfield_write(&id_time, uint32_t,
LTTNG_COMPACT_EVENT_BITS,
LTTNG_COMPACT_TSC_BITS,
- ctx->tsc);
+ ctx->priv.tsc);
lib_ring_buffer_write(config, ctx, &id_time, sizeof(id_time));
break;
}
case 2: /* large */
{
- uint32_t timestamp = (uint32_t) ctx->tsc;
+ uint32_t timestamp = (uint32_t) ctx->priv.tsc;
uint16_t id = event_id;
lib_ring_buffer_write(config, ctx, &id, sizeof(id));
struct lib_ring_buffer_ctx *ctx,
uint32_t event_id)
{
- struct lttng_channel *lttng_chan = channel_get_private(ctx->chan);
+ struct lttng_channel *lttng_chan = channel_get_private(ctx->priv.chan);
switch (lttng_chan->header_type) {
case 1: /* compact */
- if (!(ctx->rflags & (RING_BUFFER_RFLAG_FULL_TSC | LTTNG_RFLAG_EXTENDED))) {
+ if (!(ctx->priv.rflags & (RING_BUFFER_RFLAG_FULL_TSC | LTTNG_RFLAG_EXTENDED))) {
uint32_t id_time = 0;
bt_bitfield_write(&id_time, uint32_t,
event_id);
bt_bitfield_write(&id_time, uint32_t,
LTTNG_COMPACT_EVENT_BITS,
- LTTNG_COMPACT_TSC_BITS, ctx->tsc);
+ LTTNG_COMPACT_TSC_BITS, ctx->priv.tsc);
lib_ring_buffer_write(config, ctx, &id_time, sizeof(id_time));
} else {
uint8_t id = 0;
- uint64_t timestamp = ctx->tsc;
+ uint64_t timestamp = ctx->priv.tsc;
bt_bitfield_write(&id, uint8_t,
0,
break;
case 2: /* large */
{
- if (!(ctx->rflags & (RING_BUFFER_RFLAG_FULL_TSC | LTTNG_RFLAG_EXTENDED))) {
- uint32_t timestamp = (uint32_t) ctx->tsc;
+ if (!(ctx->priv.rflags & (RING_BUFFER_RFLAG_FULL_TSC | LTTNG_RFLAG_EXTENDED))) {
+ uint32_t timestamp = (uint32_t) ctx->priv.tsc;
uint16_t id = event_id;
lib_ring_buffer_write(config, ctx, &id, sizeof(id));
lib_ring_buffer_write(config, ctx, ×tamp, sizeof(timestamp));
} else {
uint16_t id = 65535;
- uint64_t timestamp = ctx->tsc;
+ uint64_t timestamp = ctx->priv.tsc;
lib_ring_buffer_write(config, ctx, &id, sizeof(id));
/* Align extended struct on largest member */
int lttng_event_reserve(struct lib_ring_buffer_ctx *ctx,
uint32_t event_id)
{
- struct lttng_channel *lttng_chan = channel_get_private(ctx->chan);
+ struct lttng_kernel_event_recorder *event_recorder = ctx->client_priv;
+ struct lttng_channel *lttng_chan = event_recorder->chan;
struct lttng_client_ctx client_ctx;
int ret, cpu;
cpu = lib_ring_buffer_get_cpu(&client_config);
if (unlikely(cpu < 0))
return -EPERM;
- ctx->cpu = cpu;
+ memset(&ctx->priv, 0, sizeof(ctx->priv));
+ ctx->priv.chan = lttng_chan->chan;
+ ctx->priv.reserve_cpu = cpu;
/* Compute internal size of context structures. */
ctx_get_struct_size(lttng_chan->ctx, &client_ctx.packet_context_len, lttng_chan, ctx);
switch (lttng_chan->header_type) {
case 1: /* compact */
if (event_id > 30)
- ctx->rflags |= LTTNG_RFLAG_EXTENDED;
+ ctx->priv.rflags |= LTTNG_RFLAG_EXTENDED;
break;
case 2: /* large */
if (event_id > 65534)
- ctx->rflags |= LTTNG_RFLAG_EXTENDED;
+ ctx->priv.rflags |= LTTNG_RFLAG_EXTENDED;
break;
default:
WARN_ON_ONCE(1);
if (unlikely(ret))
goto put;
lib_ring_buffer_backend_get_pages(&client_config, ctx,
- &ctx->backend_pages);
+ &ctx->priv.backend_pages);
lttng_write_event_header(&client_config, ctx, event_id);
return 0;
put:
static
int lttng_event_reserve(struct lib_ring_buffer_ctx *ctx, uint32_t event_id)
{
+ struct channel *chan = ctx->client_priv;
int ret;
+ memset(&ctx->priv, 0, sizeof(ctx->priv));
+ ctx->priv.chan = chan;
+
ret = lib_ring_buffer_reserve(&client_config, ctx, NULL);
if (ret)
return ret;
lib_ring_buffer_backend_get_pages(&client_config, ctx,
- &ctx->backend_pages);
+ &ctx->priv.backend_pages);
lttng_write_event_notifier_header(&client_config, ctx);
return 0;
static
int lttng_event_reserve(struct lib_ring_buffer_ctx *ctx, uint32_t event_id)
{
+ struct channel *chan = ctx->client_priv;
int ret;
+ memset(&ctx->priv, 0, sizeof(ctx->priv));
+ ctx->priv.chan = chan;
+
ret = lib_ring_buffer_reserve(&client_config, ctx, NULL);
if (ret)
return ret;
lib_ring_buffer_backend_get_pages(&client_config, ctx,
- &ctx->backend_pages);
+ &ctx->priv.backend_pages);
return 0;
}
struct lib_ring_buffer_ctx ctx;
int ret;
- lib_ring_buffer_ctx_init(&ctx, chan->chan, sizeof(data),
- lttng_alignof(data), -1, <tng_probe_ctx);
+ lib_ring_buffer_ctx_init(&ctx, event_recorder, sizeof(data),
+ lttng_alignof(data), <tng_probe_ctx);
ret = chan->ops->event_reserve(&ctx, event_recorder->priv->id);
if (ret < 0)
return 0;
payload.ip = (unsigned long) lttng_get_kretprobe(krpi)->kp.addr;
payload.parent_ip = (unsigned long) krpi->ret_addr;
- lib_ring_buffer_ctx_init(&ctx, chan->chan, sizeof(payload),
- lttng_alignof(payload), -1, <tng_probe_ctx);
+ lib_ring_buffer_ctx_init(&ctx, event_recorder, sizeof(payload),
+ lttng_alignof(payload), <tng_probe_ctx);
ret = chan->ops->event_reserve(&ctx, event_recorder->priv->id);
if (ret < 0)
return 0;
struct lib_ring_buffer_ctx ctx;
int ret;
- lib_ring_buffer_ctx_init(&ctx, chan->chan,
- sizeof(payload), lttng_alignof(payload), -1, <tng_probe_ctx);
+ lib_ring_buffer_ctx_init(&ctx, event_recorder,
+ sizeof(payload), lttng_alignof(payload), <tng_probe_ctx);
ret = chan->ops->event_reserve(&ctx, event_recorder->priv->id);
if (ret < 0)