struct lttng_ust_lib_ring_buffer;
struct lttng_ust_lib_ring_buffer_channel;
struct lttng_ust_lib_ring_buffer_ctx;
+struct lttng_ust_lib_ring_buffer_ctx_private;
/*
* ring buffer context
*
- * Context passed to lib_ring_buffer_reserve(), lib_ring_buffer_commit(),
- * lib_ring_buffer_try_discard_reserve(), lttng_ust_lib_ring_buffer_align_ctx() and
- * lib_ring_buffer_write().
- *
* IMPORTANT: this structure is part of the ABI between the probe and
* UST. Fields need to be only added at the end, never reordered, never
* removed.
struct lttng_ust_lib_ring_buffer_ctx {
uint32_t struct_size; /* Size of this structure. */
- /* input received by lib_ring_buffer_reserve(). */
- struct lttng_ust_lib_ring_buffer_channel *chan; /* channel */
- void *priv; /* client private data */
+ void *client_priv; /* Ring buffer client private data */
size_t data_size; /* size of payload */
int largest_align; /*
* alignment of the largest element
* in the payload
*/
-
- /* output from lib_ring_buffer_reserve() */
- int reserve_cpu; /* processor id updated by the reserve */
- size_t slot_size; /* size of the reserved slot */
- unsigned long buf_offset; /* offset following the record header */
- unsigned long pre_offset; /*
- * Initial offset position _before_
- * the record is written. Positioned
- * prior to record header alignment
- * padding.
- */
- uint64_t tsc; /* time-stamp counter value */
- unsigned int rflags; /* reservation flags */
void *ip; /* caller ip address */
- struct lttng_ust_lib_ring_buffer *buf; /*
- * buffer corresponding to processor id
- * for this channel
- */
- struct lttng_ust_lib_ring_buffer_backend_pages *backend_pages;
+ /* Private ring buffer context, set by reserve callback. */
+ struct lttng_ust_lib_ring_buffer_ctx_private *priv;
/* End of base ABI. Fields below should be used after checking struct_size. */
};
/**
* lttng_ust_lib_ring_buffer_ctx_init - initialize ring buffer context
* @ctx: ring buffer context to initialize
- * @chan: channel
- * @priv: client private data
+ * @client_priv: client private data
* @data_size: size of record data payload
* @largest_align: largest alignment within data payload types
+ * @ip: caller ip address
*/
static inline lttng_ust_notrace
void lttng_ust_lib_ring_buffer_ctx_init(struct lttng_ust_lib_ring_buffer_ctx *ctx,
- struct lttng_ust_lib_ring_buffer_channel *chan,
- void *priv, size_t data_size, int largest_align);
+ void *client_priv, size_t data_size, int largest_align,
+ void *ip);
static inline
void lttng_ust_lib_ring_buffer_ctx_init(struct lttng_ust_lib_ring_buffer_ctx *ctx,
- struct lttng_ust_lib_ring_buffer_channel *chan,
- void *priv, size_t data_size, int largest_align)
+ void *client_priv, size_t data_size, int largest_align,
+ void *ip)
{
ctx->struct_size = sizeof(struct lttng_ust_lib_ring_buffer_ctx);
- ctx->chan = chan;
- ctx->priv = priv;
+ ctx->client_priv = client_priv;
ctx->data_size = data_size;
- ctx->reserve_cpu = -1;
ctx->largest_align = largest_align;
- ctx->rflags = 0;
- ctx->ip = 0;
+ ctx->ip = ip;
+ ctx->priv = NULL;
}
/*
#endif
-/**
- * lttng_ust_lib_ring_buffer_align_ctx - Align context offset on "alignment"
- * @ctx: ring buffer context.
- */
-static inline lttng_ust_notrace
-void lttng_ust_lib_ring_buffer_align_ctx(struct lttng_ust_lib_ring_buffer_ctx *ctx,
- size_t alignment);
-static inline
-void lttng_ust_lib_ring_buffer_align_ctx(struct lttng_ust_lib_ring_buffer_ctx *ctx,
- size_t alignment)
-{
- ctx->buf_offset += lttng_ust_lib_ring_buffer_align(ctx->buf_offset,
- alignment);
-}
-
#endif /* _LTTNG_RING_BUFFER_CONTEXT_H */
struct lttng_ust_event_common *parent; /* Inheritance by aggregation. */
struct lttng_ust_event_recorder_private *priv; /* Private event record interface */
- unsigned int id;
struct lttng_ust_channel_buffer *chan;
/* End of base ABI. Fields below should be used after checking struct_size. */
struct lttng_ust_channel_buffer_ops_private *priv; /* Private channel buffer ops interface */
- int (*event_reserve)(struct lttng_ust_lib_ring_buffer_ctx *ctx,
- uint32_t event_id);
+ int (*event_reserve)(struct lttng_ust_lib_ring_buffer_ctx *ctx);
void (*event_commit)(struct lttng_ust_lib_ring_buffer_ctx *ctx);
void (*event_write)(struct lttng_ust_lib_ring_buffer_ctx *ctx,
- const void *src, size_t len);
+ const void *src, size_t len, size_t alignment);
void (*event_strcpy)(struct lttng_ust_lib_ring_buffer_ctx *ctx,
const char *src, size_t len);
void (*event_pstrcpy_pad)(struct lttng_ust_lib_ring_buffer_ctx *ctx,
#define _ctf_integer_ext(_type, _item, _src, _byte_order, _base, _nowrite) \
{ \
_type __tmp = (_src); \
- lttng_ust_lib_ring_buffer_align_ctx(&__ctx, lttng_ust_rb_alignof(__tmp));\
- __chan->ops->event_write(&__ctx, &__tmp, sizeof(__tmp));\
+ __chan->ops->event_write(&__ctx, &__tmp, sizeof(__tmp), lttng_ust_rb_alignof(__tmp));\
}
#undef _ctf_float
#define _ctf_float(_type, _item, _src, _nowrite) \
{ \
_type __tmp = (_src); \
- lttng_ust_lib_ring_buffer_align_ctx(&__ctx, lttng_ust_rb_alignof(__tmp));\
- __chan->ops->event_write(&__ctx, &__tmp, sizeof(__tmp));\
+ __chan->ops->event_write(&__ctx, &__tmp, sizeof(__tmp), lttng_ust_rb_alignof(__tmp));\
}
#undef _ctf_array_encoded
#define _ctf_array_encoded(_type, _item, _src, _byte_order, _length, \
_encoding, _nowrite, _elem_type_base) \
- lttng_ust_lib_ring_buffer_align_ctx(&__ctx, lttng_ust_rb_alignof(_type)); \
if (lttng_ust_string_encoding_##_encoding == lttng_ust_string_encoding_none) \
- __chan->ops->event_write(&__ctx, _src, sizeof(_type) * (_length)); \
+ __chan->ops->event_write(&__ctx, _src, sizeof(_type) * (_length), lttng_ust_rb_alignof(_type)); \
else \
__chan->ops->event_pstrcpy_pad(&__ctx, (const char *) (_src), _length); \
_src_length, _encoding, _nowrite, _elem_type_base) \
{ \
_length_type __tmpl = __stackvar.__dynamic_len[__dynamic_len_idx]; \
- lttng_ust_lib_ring_buffer_align_ctx(&__ctx, lttng_ust_rb_alignof(_length_type));\
- __chan->ops->event_write(&__ctx, &__tmpl, sizeof(_length_type));\
+ __chan->ops->event_write(&__ctx, &__tmpl, sizeof(_length_type), lttng_ust_rb_alignof(_length_type));\
} \
- lttng_ust_lib_ring_buffer_align_ctx(&__ctx, lttng_ust_rb_alignof(_type)); \
if (lttng_ust_string_encoding_##_encoding == lttng_ust_string_encoding_none) \
__chan->ops->event_write(&__ctx, _src, \
- sizeof(_type) * __get_dynamic_len(dest)); \
+ sizeof(_type) * __get_dynamic_len(dest), lttng_ust_rb_alignof(_type)); \
else \
__chan->ops->event_pstrcpy_pad(&__ctx, (const char *) (_src), __get_dynamic_len(dest)); \
#undef _ctf_string
-#define _ctf_string(_item, _src, _nowrite) \
+#define _ctf_string(_item, _src, _nowrite) \
{ \
const char *__ctf_tmp_string = \
((_src) ? (_src) : __LTTNG_UST_NULL_STRING); \
- lttng_ust_lib_ring_buffer_align_ctx(&__ctx, \
- lttng_ust_rb_alignof(*__ctf_tmp_string)); \
__chan->ops->event_strcpy(&__ctx, __ctf_tmp_string, \
__get_dynamic_len(dest)); \
}
struct lttng_ust_event_recorder *__event_recorder = (struct lttng_ust_event_recorder *) __event->child; \
struct lttng_ust_channel_buffer *__chan = __event_recorder->chan; \
struct lttng_ust_lib_ring_buffer_ctx __ctx; \
- struct lttng_ust_stack_ctx __lttng_ctx; \
\
__event_len = __event_get_size__##_provider##___##_name(__stackvar.__dynamic_len, \
_TP_ARGS_DATA_VAR(_args)); \
__event_align = __event_get_align__##_provider##___##_name(_TP_ARGS_VAR(_args)); \
- memset(&__lttng_ctx, 0, sizeof(__lttng_ctx)); \
- __lttng_ctx.struct_size = sizeof(struct lttng_ust_stack_ctx); \
- __lttng_ctx.event_recorder = __event_recorder; \
- lttng_ust_lib_ring_buffer_ctx_init(&__ctx, NULL, &__lttng_ctx, __event_len, __event_align); \
- __ctx.ip = _TP_IP_PARAM(TP_IP_PARAM); \
- __ret = __chan->ops->event_reserve(&__ctx, __event_recorder->id); \
+ lttng_ust_lib_ring_buffer_ctx_init(&__ctx, __event_recorder, __event_len, __event_align, \
+ _TP_IP_PARAM(TP_IP_PARAM)); \
+ __ret = __chan->ops->event_reserve(&__ctx); \
if (__ret < 0) \
return; \
_fields \
reserve_len = min_t(size_t,
lttng_chan_buf->ops->priv->packet_avail_size(lttng_chan_buf),
len - pos);
- lttng_ust_lib_ring_buffer_ctx_init(&ctx, rb_chan, NULL, reserve_len, sizeof(char));
+ lttng_ust_lib_ring_buffer_ctx_init(&ctx, rb_chan, reserve_len, sizeof(char), NULL);
/*
* We don't care about metadata buffer's records lost
* count, because we always retry here. Report error if
*/
waitret = wait_cond_interruptible_timeout(
({
- ret = lttng_chan_buf->ops->event_reserve(&ctx, 0);
+ ret = lttng_chan_buf->ops->event_reserve(&ctx);
ret != -ENOBUFS || !ret;
}),
LTTNG_METADATA_TIMEOUT_MSEC);
ret = waitret;
goto end;
}
- lttng_chan_buf->ops->event_write(&ctx, &str[pos], reserve_len);
+ lttng_chan_buf->ops->event_write(&ctx, &str[pos], reserve_len, 1);
lttng_chan_buf->ops->event_commit(&ctx);
}
end:
reserve_len = min_t(ssize_t,
lttng_chan_buf->ops->priv->packet_avail_size(lttng_chan_buf),
len);
- lttng_ust_lib_ring_buffer_ctx_init(&ctx, rb_chan, NULL, reserve_len, sizeof(char));
- ret = lttng_chan_buf->ops->event_reserve(&ctx, 0);
+ lttng_ust_lib_ring_buffer_ctx_init(&ctx, rb_chan, reserve_len, sizeof(char), NULL);
+ ret = lttng_chan_buf->ops->event_reserve(&ctx);
if (ret != 0) {
DBG("LTTng: event reservation failed");
assert(ret < 0);
reserve_len = ret;
goto end;
}
- lttng_chan_buf->ops->event_write(&ctx, str, reserve_len);
+ lttng_chan_buf->ops->event_write(&ctx, str, reserve_len, 1);
lttng_chan_buf->ops->event_commit(&ctx);
end:
switch (jni_type) {
case JNI_TYPE_NULL:
sel_char = LTTNG_UST_DYNAMIC_TYPE_NONE;
- lttng_ust_lib_ring_buffer_align_ctx(ctx, lttng_ust_rb_alignof(char));
- lttng_chan_buf->ops->event_write(ctx, &sel_char, sizeof(sel_char));
+ lttng_chan_buf->ops->event_write(ctx, &sel_char, sizeof(sel_char), lttng_ust_rb_alignof(char));
break;
case JNI_TYPE_INTEGER:
{
int32_t v = jctx->value._integer;
sel_char = LTTNG_UST_DYNAMIC_TYPE_S32;
- lttng_ust_lib_ring_buffer_align_ctx(ctx, lttng_ust_rb_alignof(char));
- lttng_chan_buf->ops->event_write(ctx, &sel_char, sizeof(sel_char));
- lttng_ust_lib_ring_buffer_align_ctx(ctx, lttng_ust_rb_alignof(v));
- lttng_chan_buf->ops->event_write(ctx, &v, sizeof(v));
+ lttng_chan_buf->ops->event_write(ctx, &sel_char, sizeof(sel_char), lttng_ust_rb_alignof(char));
+ lttng_chan_buf->ops->event_write(ctx, &v, sizeof(v), lttng_ust_rb_alignof(v));
break;
}
case JNI_TYPE_LONG:
int64_t v = jctx->value._long;
sel_char = LTTNG_UST_DYNAMIC_TYPE_S64;
- lttng_ust_lib_ring_buffer_align_ctx(ctx, lttng_ust_rb_alignof(char));
- lttng_chan_buf->ops->event_write(ctx, &sel_char, sizeof(sel_char));
- lttng_ust_lib_ring_buffer_align_ctx(ctx, lttng_ust_rb_alignof(v));
- lttng_chan_buf->ops->event_write(ctx, &v, sizeof(v));
+ lttng_chan_buf->ops->event_write(ctx, &sel_char, sizeof(sel_char), lttng_ust_rb_alignof(char));
+ lttng_chan_buf->ops->event_write(ctx, &v, sizeof(v), lttng_ust_rb_alignof(v));
break;
}
case JNI_TYPE_DOUBLE:
double v = jctx->value._double;
sel_char = LTTNG_UST_DYNAMIC_TYPE_DOUBLE;
- lttng_ust_lib_ring_buffer_align_ctx(ctx, lttng_ust_rb_alignof(char));
- lttng_chan_buf->ops->event_write(ctx, &sel_char, sizeof(sel_char));
- lttng_ust_lib_ring_buffer_align_ctx(ctx, lttng_ust_rb_alignof(v));
- lttng_chan_buf->ops->event_write(ctx, &v, sizeof(v));
+ lttng_chan_buf->ops->event_write(ctx, &sel_char, sizeof(sel_char), lttng_ust_rb_alignof(char));
+ lttng_chan_buf->ops->event_write(ctx, &v, sizeof(v), lttng_ust_rb_alignof(v));
break;
}
case JNI_TYPE_FLOAT:
float v = jctx->value._float;
sel_char = LTTNG_UST_DYNAMIC_TYPE_FLOAT;
- lttng_ust_lib_ring_buffer_align_ctx(ctx, lttng_ust_rb_alignof(char));
- lttng_chan_buf->ops->event_write(ctx, &sel_char, sizeof(sel_char));
- lttng_ust_lib_ring_buffer_align_ctx(ctx, lttng_ust_rb_alignof(v));
- lttng_chan_buf->ops->event_write(ctx, &v, sizeof(v));
+ lttng_chan_buf->ops->event_write(ctx, &sel_char, sizeof(sel_char), lttng_ust_rb_alignof(char));
+ lttng_chan_buf->ops->event_write(ctx, &v, sizeof(v), lttng_ust_rb_alignof(v));
break;
}
case JNI_TYPE_SHORT:
int16_t v = jctx->value._short;
sel_char = LTTNG_UST_DYNAMIC_TYPE_S16;
- lttng_ust_lib_ring_buffer_align_ctx(ctx, lttng_ust_rb_alignof(char));
- lttng_chan_buf->ops->event_write(ctx, &sel_char, sizeof(sel_char));
- lttng_ust_lib_ring_buffer_align_ctx(ctx, lttng_ust_rb_alignof(v));
- lttng_chan_buf->ops->event_write(ctx, &v, sizeof(v));
+ lttng_chan_buf->ops->event_write(ctx, &sel_char, sizeof(sel_char), lttng_ust_rb_alignof(char));
+ lttng_chan_buf->ops->event_write(ctx, &v, sizeof(v), lttng_ust_rb_alignof(v));
break;
}
case JNI_TYPE_BYTE:
char v = jctx->value._byte;
sel_char = LTTNG_UST_DYNAMIC_TYPE_S8;
- lttng_ust_lib_ring_buffer_align_ctx(ctx, lttng_ust_rb_alignof(char));
- lttng_chan_buf->ops->event_write(ctx, &sel_char, sizeof(sel_char));
- lttng_ust_lib_ring_buffer_align_ctx(ctx, lttng_ust_rb_alignof(v));
- lttng_chan_buf->ops->event_write(ctx, &v, sizeof(v));
+ lttng_chan_buf->ops->event_write(ctx, &sel_char, sizeof(sel_char), lttng_ust_rb_alignof(char));
+ lttng_chan_buf->ops->event_write(ctx, &v, sizeof(v), lttng_ust_rb_alignof(v));
break;
}
case JNI_TYPE_BOOLEAN:
char v = jctx->value._boolean;
sel_char = LTTNG_UST_DYNAMIC_TYPE_S8;
- lttng_ust_lib_ring_buffer_align_ctx(ctx, lttng_ust_rb_alignof(char));
- lttng_chan_buf->ops->event_write(ctx, &sel_char, sizeof(sel_char));
- lttng_ust_lib_ring_buffer_align_ctx(ctx, lttng_ust_rb_alignof(v));
- lttng_chan_buf->ops->event_write(ctx, &v, sizeof(v));
+ lttng_chan_buf->ops->event_write(ctx, &sel_char, sizeof(sel_char), lttng_ust_rb_alignof(char));
+ lttng_chan_buf->ops->event_write(ctx, &v, sizeof(v), lttng_ust_rb_alignof(v));
break;
}
case JNI_TYPE_STRING:
} else {
sel_char = LTTNG_UST_DYNAMIC_TYPE_NONE;
}
- lttng_ust_lib_ring_buffer_align_ctx(ctx, lttng_ust_rb_alignof(char));
- lttng_chan_buf->ops->event_write(ctx, &sel_char, sizeof(sel_char));
+ lttng_chan_buf->ops->event_write(ctx, &sel_char, sizeof(sel_char), lttng_ust_rb_alignof(char));
if (str) {
- lttng_chan_buf->ops->event_write(ctx, str, strlen(str) + 1);
+ lttng_chan_buf->ops->event_write(ctx, str, strlen(str) + 1, 1);
}
break;
}
ino_t cgroup_ns;
cgroup_ns = get_cgroup_ns();
- lttng_ust_lib_ring_buffer_align_ctx(ctx, lttng_ust_rb_alignof(cgroup_ns));
- chan->ops->event_write(ctx, &cgroup_ns, sizeof(cgroup_ns));
+ chan->ops->event_write(ctx, &cgroup_ns, sizeof(cgroup_ns),
+ lttng_ust_rb_alignof(cgroup_ns));
}
static
int cpu;
cpu = lttng_ust_get_cpu();
- lttng_ust_lib_ring_buffer_align_ctx(ctx, lttng_ust_rb_alignof(cpu));
- chan->ops->event_write(ctx, &cpu, sizeof(cpu));
+ chan->ops->event_write(ctx, &cpu, sizeof(cpu), lttng_ust_rb_alignof(cpu));
}
static
void *ip;
ip = ctx->ip;
- lttng_ust_lib_ring_buffer_align_ctx(ctx, lttng_ust_rb_alignof(ip));
- chan->ops->event_write(ctx, &ip, sizeof(ip));
+ chan->ops->event_write(ctx, &ip, sizeof(ip), lttng_ust_rb_alignof(ip));
}
int lttng_add_ip_to_ctx(struct lttng_ust_ctx **ctx)
ino_t ipc_ns;
ipc_ns = get_ipc_ns();
- lttng_ust_lib_ring_buffer_align_ctx(ctx, lttng_ust_rb_alignof(ipc_ns));
- chan->ops->event_write(ctx, &ipc_ns, sizeof(ipc_ns));
+ chan->ops->event_write(ctx, &ipc_ns, sizeof(ipc_ns), lttng_ust_rb_alignof(ipc_ns));
}
static
ino_t mnt_ns;
mnt_ns = get_mnt_ns();
- lttng_ust_lib_ring_buffer_align_ctx(ctx, lttng_ust_rb_alignof(mnt_ns));
- chan->ops->event_write(ctx, &mnt_ns, sizeof(mnt_ns));
+ chan->ops->event_write(ctx, &mnt_ns, sizeof(mnt_ns), lttng_ust_rb_alignof(mnt_ns));
}
static
ino_t net_ns;
net_ns = get_net_ns();
- lttng_ust_lib_ring_buffer_align_ctx(ctx, lttng_ust_rb_alignof(net_ns));
- chan->ops->event_write(ctx, &net_ns, sizeof(net_ns));
+ chan->ops->event_write(ctx, &net_ns, sizeof(net_ns), lttng_ust_rb_alignof(net_ns));
}
static
uint64_t value;
value = wrapper_perf_counter_read(field);
- lttng_ust_lib_ring_buffer_align_ctx(ctx, lttng_ust_rb_alignof(value));
- chan->ops->event_write(ctx, &value, sizeof(value));
+ chan->ops->event_write(ctx, &value, sizeof(value), lttng_ust_rb_alignof(value));
}
static
ino_t pid_ns;
pid_ns = get_pid_ns();
- lttng_ust_lib_ring_buffer_align_ctx(ctx, lttng_ust_rb_alignof(pid_ns));
- chan->ops->event_write(ctx, &pid_ns, sizeof(pid_ns));
+ chan->ops->event_write(ctx, &pid_ns, sizeof(pid_ns), lttng_ust_rb_alignof(pid_ns));
}
static
char *procname;
procname = wrapper_getprocname();
- chan->ops->event_write(ctx, procname, LTTNG_UST_ABI_PROCNAME_LEN);
+ chan->ops->event_write(ctx, procname, LTTNG_UST_ABI_PROCNAME_LEN, 1);
}
static
unsigned long pthread_id;
pthread_id = (unsigned long) pthread_self();
- lttng_ust_lib_ring_buffer_align_ctx(ctx, lttng_ust_rb_alignof(pthread_id));
- chan->ops->event_write(ctx, &pthread_id, sizeof(pthread_id));
+ chan->ops->event_write(ctx, &pthread_id, sizeof(pthread_id), lttng_ust_rb_alignof(pthread_id));
}
static
ino_t time_ns;
time_ns = get_time_ns();
- lttng_ust_lib_ring_buffer_align_ctx(ctx, lttng_ust_rb_alignof(time_ns));
- chan->ops->event_write(ctx, &time_ns, sizeof(time_ns));
+ chan->ops->event_write(ctx, &time_ns, sizeof(time_ns), lttng_ust_rb_alignof(time_ns));
}
static
ino_t user_ns;
user_ns = get_user_ns();
- lttng_ust_lib_ring_buffer_align_ctx(ctx, lttng_ust_rb_alignof(user_ns));
- chan->ops->event_write(ctx, &user_ns, sizeof(user_ns));
+ chan->ops->event_write(ctx, &user_ns, sizeof(user_ns), lttng_ust_rb_alignof(user_ns));
}
static
ino_t uts_ns;
uts_ns = get_uts_ns();
- lttng_ust_lib_ring_buffer_align_ctx(ctx, lttng_ust_rb_alignof(uts_ns));
- chan->ops->event_write(ctx, &uts_ns, sizeof(uts_ns));
+ chan->ops->event_write(ctx, &uts_ns, sizeof(uts_ns), lttng_ust_rb_alignof(uts_ns));
}
static
gid_t vegid;
vegid = get_vegid();
- lttng_ust_lib_ring_buffer_align_ctx(ctx, lttng_ust_rb_alignof(vegid));
- chan->ops->event_write(ctx, &vegid, sizeof(vegid));
+ chan->ops->event_write(ctx, &vegid, sizeof(vegid), lttng_ust_rb_alignof(vegid));
}
static
uid_t veuid;
veuid = get_veuid();
- lttng_ust_lib_ring_buffer_align_ctx(ctx, lttng_ust_rb_alignof(veuid));
- chan->ops->event_write(ctx, &veuid, sizeof(veuid));
+ chan->ops->event_write(ctx, &veuid, sizeof(veuid), lttng_ust_rb_alignof(veuid));
}
static
gid_t vgid;
vgid = get_vgid();
- lttng_ust_lib_ring_buffer_align_ctx(ctx, lttng_ust_rb_alignof(vgid));
- chan->ops->event_write(ctx, &vgid, sizeof(vgid));
+ chan->ops->event_write(ctx, &vgid, sizeof(vgid), lttng_ust_rb_alignof(vgid));
}
static
{
pid_t vpid = wrapper_getvpid();
- lttng_ust_lib_ring_buffer_align_ctx(ctx, lttng_ust_rb_alignof(vpid));
- chan->ops->event_write(ctx, &vpid, sizeof(vpid));
+ chan->ops->event_write(ctx, &vpid, sizeof(vpid), lttng_ust_rb_alignof(vpid));
}
static
gid_t vsgid;
vsgid = get_vsgid();
- lttng_ust_lib_ring_buffer_align_ctx(ctx, lttng_ust_rb_alignof(vsgid));
- chan->ops->event_write(ctx, &vsgid, sizeof(vsgid));
+ chan->ops->event_write(ctx, &vsgid, sizeof(vsgid), lttng_ust_rb_alignof(vsgid));
}
static
uid_t vsuid;
vsuid = get_vsuid();
- lttng_ust_lib_ring_buffer_align_ctx(ctx, lttng_ust_rb_alignof(vsuid));
- chan->ops->event_write(ctx, &vsuid, sizeof(vsuid));
+ chan->ops->event_write(ctx, &vsuid, sizeof(vsuid), lttng_ust_rb_alignof(vsuid));
}
static
{
pid_t vtid = wrapper_getvtid();
- lttng_ust_lib_ring_buffer_align_ctx(ctx, lttng_ust_rb_alignof(vtid));
- chan->ops->event_write(ctx, &vtid, sizeof(vtid));
+ chan->ops->event_write(ctx, &vtid, sizeof(vtid), lttng_ust_rb_alignof(vtid));
}
static
uid_t vuid;
vuid = get_vuid();
- lttng_ust_lib_ring_buffer_align_ctx(ctx, lttng_ust_rb_alignof(vuid));
- chan->ops->event_write(ctx, &vuid, sizeof(vuid));
+ chan->ops->event_write(ctx, &vuid, sizeof(vuid), lttng_ust_rb_alignof(vuid));
}
static
desc->nr_fields,
desc->fields,
uri,
- &event_recorder->id);
+ &event_recorder->priv->id);
if (ret < 0) {
DBG("Error (%d) registering event to sessiond", ret);
goto sessiond_register_error;
#include "context-internal.h"
#include "lttng-tracer.h"
#include "../libringbuffer/frontend_types.h"
+#include <urcu/tls-compat.h>
#define LTTNG_COMPACT_EVENT_BITS 5
#define LTTNG_COMPACT_TSC_BITS 27
struct lttng_ust_ctx *event_ctx;
};
+/*
+ * Indexed by lib_ring_buffer_nesting_count().
+ */
+typedef struct lttng_ust_lib_ring_buffer_ctx_private private_ctx_stack_t[LIB_RING_BUFFER_MAX_NESTING];
+static DEFINE_URCU_TLS(private_ctx_stack_t, private_ctx_stack);
+
+/*
+ * Force a read (imply TLS fixup for dlopen) of TLS variables.
+ */
+static
+void lttng_fixup_rb_client_tls(void)
+{
+ asm volatile ("" : : "m" (URCU_TLS(private_ctx_stack)));
+}
+
static inline uint64_t lib_ring_buffer_clock_read(struct lttng_ust_lib_ring_buffer_channel *chan)
{
return trace_clock_read64();
case 1: /* compact */
padding = lttng_ust_lib_ring_buffer_align(offset, lttng_ust_rb_alignof(uint32_t));
offset += padding;
- if (!(ctx->rflags & (RING_BUFFER_RFLAG_FULL_TSC | LTTNG_RFLAG_EXTENDED))) {
+ if (!(ctx->priv->rflags & (RING_BUFFER_RFLAG_FULL_TSC | LTTNG_RFLAG_EXTENDED))) {
offset += sizeof(uint32_t); /* id and timestamp */
} else {
/* Minimum space taken by LTTNG_COMPACT_EVENT_BITS id */
padding = lttng_ust_lib_ring_buffer_align(offset, lttng_ust_rb_alignof(uint16_t));
offset += padding;
offset += sizeof(uint16_t);
- if (!(ctx->rflags & (RING_BUFFER_RFLAG_FULL_TSC | LTTNG_RFLAG_EXTENDED))) {
+ if (!(ctx->priv->rflags & (RING_BUFFER_RFLAG_FULL_TSC | LTTNG_RFLAG_EXTENDED))) {
offset += lttng_ust_lib_ring_buffer_align(offset, lttng_ust_rb_alignof(uint32_t));
offset += sizeof(uint32_t); /* timestamp */
} else {
struct lttng_client_ctx *client_ctx,
uint32_t event_id)
{
- struct lttng_ust_channel_buffer *lttng_chan = channel_get_private(ctx->chan);
+ struct lttng_ust_channel_buffer *lttng_chan = channel_get_private(ctx->priv->chan);
- if (caa_unlikely(ctx->rflags))
+ if (caa_unlikely(ctx->priv->rflags))
goto slow_path;
switch (lttng_chan->priv->header_type) {
bt_bitfield_write(&id_time, uint32_t,
LTTNG_COMPACT_EVENT_BITS,
LTTNG_COMPACT_TSC_BITS,
- ctx->tsc);
+ ctx->priv->tsc);
lib_ring_buffer_write(config, ctx, &id_time, sizeof(id_time));
break;
}
case 2: /* large */
{
- uint32_t timestamp = (uint32_t) ctx->tsc;
+ uint32_t timestamp = (uint32_t) ctx->priv->tsc;
uint16_t id = event_id;
lib_ring_buffer_write(config, ctx, &id, sizeof(id));
struct lttng_client_ctx *client_ctx,
uint32_t event_id)
{
- struct lttng_ust_channel_buffer *lttng_chan = channel_get_private(ctx->chan);
+ struct lttng_ust_lib_ring_buffer_ctx_private *ctx_private = ctx->priv;
+ struct lttng_ust_channel_buffer *lttng_chan = channel_get_private(ctx->priv->chan);
switch (lttng_chan->priv->header_type) {
case 1: /* compact */
- if (!(ctx->rflags & (RING_BUFFER_RFLAG_FULL_TSC | LTTNG_RFLAG_EXTENDED))) {
+ if (!(ctx_private->rflags & (RING_BUFFER_RFLAG_FULL_TSC | LTTNG_RFLAG_EXTENDED))) {
uint32_t id_time = 0;
bt_bitfield_write(&id_time, uint32_t,
bt_bitfield_write(&id_time, uint32_t,
LTTNG_COMPACT_EVENT_BITS,
LTTNG_COMPACT_TSC_BITS,
- ctx->tsc);
+ ctx_private->tsc);
lib_ring_buffer_write(config, ctx, &id_time, sizeof(id_time));
} else {
uint8_t id = 0;
- uint64_t timestamp = ctx->tsc;
+ uint64_t timestamp = ctx_private->tsc;
bt_bitfield_write(&id, uint8_t,
0,
break;
case 2: /* large */
{
- if (!(ctx->rflags & (RING_BUFFER_RFLAG_FULL_TSC | LTTNG_RFLAG_EXTENDED))) {
- uint32_t timestamp = (uint32_t) ctx->tsc;
+ if (!(ctx_private->rflags & (RING_BUFFER_RFLAG_FULL_TSC | LTTNG_RFLAG_EXTENDED))) {
+ uint32_t timestamp = (uint32_t) ctx_private->tsc;
uint16_t id = event_id;
lib_ring_buffer_write(config, ctx, &id, sizeof(id));
lib_ring_buffer_write(config, ctx, ×tamp, sizeof(timestamp));
} else {
uint16_t id = 65535;
- uint64_t timestamp = ctx->tsc;
+ uint64_t timestamp = ctx_private->tsc;
lib_ring_buffer_write(config, ctx, &id, sizeof(id));
/* Align extended struct on largest member */
}
static
-int lttng_event_reserve(struct lttng_ust_lib_ring_buffer_ctx *ctx,
- uint32_t event_id)
+int lttng_event_reserve(struct lttng_ust_lib_ring_buffer_ctx *ctx)
{
- struct lttng_ust_stack_ctx *lttng_ctx = ctx->priv;
- struct lttng_ust_event_recorder *event_recorder = lttng_ctx->event_recorder;
+ struct lttng_ust_event_recorder *event_recorder = ctx->client_priv;
struct lttng_ust_channel_buffer *lttng_chan = event_recorder->chan;
struct lttng_client_ctx client_ctx;
- int ret;
+ int ret, nesting;
+ struct lttng_ust_lib_ring_buffer_ctx_private *private_ctx;
+ uint32_t event_id;
- ctx->chan = lttng_chan->priv->rb_chan;
+ event_id = event_recorder->priv->id;
client_ctx.chan_ctx = lttng_ust_rcu_dereference(lttng_chan->priv->ctx);
client_ctx.event_ctx = lttng_ust_rcu_dereference(event_recorder->priv->ctx);
/* Compute internal size of context structures. */
ctx_get_struct_size(client_ctx.event_ctx, &client_ctx.event_context_len,
APP_CTX_ENABLED);
- if (lib_ring_buffer_nesting_inc(&client_config) < 0)
+ nesting = lib_ring_buffer_nesting_inc(&client_config);
+ if (nesting < 0)
return -EPERM;
+ private_ctx = &private_ctx_stack[nesting];
+ memset(private_ctx, 0, sizeof(*private_ctx));
+ private_ctx->pub = ctx;
+ private_ctx->chan = lttng_chan->priv->rb_chan;
+
+ ctx->priv = private_ctx;
+
switch (lttng_chan->priv->header_type) {
case 1: /* compact */
if (event_id > 30)
- ctx->rflags |= LTTNG_RFLAG_EXTENDED;
+ private_ctx->rflags |= LTTNG_RFLAG_EXTENDED;
break;
case 2: /* large */
if (event_id > 65534)
- ctx->rflags |= LTTNG_RFLAG_EXTENDED;
+ private_ctx->rflags |= LTTNG_RFLAG_EXTENDED;
break;
default:
WARN_ON_ONCE(1);
if (caa_unlikely(ret))
goto put;
if (lib_ring_buffer_backend_get_pages(&client_config, ctx,
- &ctx->backend_pages)) {
+ &private_ctx->backend_pages)) {
ret = -EPERM;
goto put;
}
}
static
-void lttng_event_write(struct lttng_ust_lib_ring_buffer_ctx *ctx, const void *src,
- size_t len)
+void lttng_event_write(struct lttng_ust_lib_ring_buffer_ctx *ctx,
+ const void *src, size_t len, size_t alignment)
{
+ lttng_ust_lib_ring_buffer_align_ctx(ctx, alignment);
lib_ring_buffer_write(&client_config, ctx, src, len);
}
static
-void lttng_event_strcpy(struct lttng_ust_lib_ring_buffer_ctx *ctx, const char *src,
- size_t len)
+void lttng_event_strcpy(struct lttng_ust_lib_ring_buffer_ctx *ctx,
+ const char *src, size_t len)
{
lib_ring_buffer_strcpy(&client_config, ctx, src, len, '#');
}
static
void lttng_event_pstrcpy_pad(struct lttng_ust_lib_ring_buffer_ctx *ctx,
- const char *src, size_t len)
+ const char *src, size_t len)
{
lib_ring_buffer_pstrcpy(&client_config, ctx, src, len, '\0');
}
{
DBG("LTT : ltt ring buffer client \"%s\" init\n",
"relay-" RING_BUFFER_MODE_TEMPLATE_STRING "-mmap");
+ lttng_fixup_rb_client_tls();
lttng_transport_register(<tng_relay_transport);
}
#include "ust-compat.h"
#include "lttng-tracer.h"
#include "../libringbuffer/frontend_types.h"
+#include <urcu/tls-compat.h>
struct metadata_packet_header {
uint32_t magic; /* 0x75D11D57 */
static const struct lttng_ust_lib_ring_buffer_config client_config;
+/* No nested use supported for metadata ring buffer. */
+static DEFINE_URCU_TLS(struct lttng_ust_lib_ring_buffer_ctx_private, private_ctx);
+
static inline uint64_t lib_ring_buffer_clock_read(struct lttng_ust_lib_ring_buffer_channel *chan)
{
return 0;
}
static
-int lttng_event_reserve(struct lttng_ust_lib_ring_buffer_ctx *ctx, uint32_t event_id)
+int lttng_event_reserve(struct lttng_ust_lib_ring_buffer_ctx *ctx)
{
int ret;
+ memset(&private_ctx, 0, sizeof(private_ctx));
+ private_ctx.pub = ctx;
+ private_ctx.chan = ctx->client_priv;
+ ctx->priv = &private_ctx;
ret = lib_ring_buffer_reserve(&client_config, ctx, NULL);
if (ret)
return ret;
if (lib_ring_buffer_backend_get_pages(&client_config, ctx,
- &ctx->backend_pages))
+ &ctx->priv->backend_pages))
return -EPERM;
return 0;
}
}
static
-void lttng_event_write(struct lttng_ust_lib_ring_buffer_ctx *ctx, const void *src,
- size_t len)
+void lttng_event_write(struct lttng_ust_lib_ring_buffer_ctx *ctx,
+ const void *src, size_t len, size_t alignment)
{
+ lttng_ust_lib_ring_buffer_align_ctx(ctx, alignment);
lib_ring_buffer_write(&client_config, ctx, src, len);
}
{
char sel_char = (char) LTTNG_UST_DYNAMIC_TYPE_NONE;
- lttng_ust_lib_ring_buffer_align_ctx(ctx, lttng_ust_rb_alignof(sel_char));
- chan->ops->event_write(ctx, &sel_char, sizeof(sel_char));
+ chan->ops->event_write(ctx, &sel_char, sizeof(sel_char), lttng_ust_rb_alignof(sel_char));
}
void lttng_ust_dummy_get_value(struct lttng_ust_ctx_field *field,
struct cds_list_head node; /* Event recorder list */
struct cds_hlist_node hlist; /* Hash table of event recorders */
struct lttng_ust_ctx *ctx;
+ unsigned int id;
};
struct lttng_ust_event_notifier_private {
struct lttng_ust_lib_ring_buffer_ctx *ctx,
const void *src, size_t len)
{
- struct channel_backend *chanb = &ctx->chan->backend;
- struct lttng_ust_shm_handle *handle = ctx->chan->handle;
- size_t offset = ctx->buf_offset;
+ struct lttng_ust_lib_ring_buffer_ctx_private *ctx_private = ctx->priv;
+ struct channel_backend *chanb = &ctx_private->chan->backend;
+ struct lttng_ust_shm_handle *handle = ctx_private->chan->handle;
+ size_t offset = ctx_private->buf_offset;
struct lttng_ust_lib_ring_buffer_backend_pages *backend_pages;
void *p;
if (caa_unlikely(!p))
return;
lib_ring_buffer_do_copy(config, p, src, len);
- ctx->buf_offset += len;
+ ctx_private->buf_offset += len;
}
/*
struct lttng_ust_lib_ring_buffer_ctx *ctx,
const char *src, size_t len, char pad)
{
- struct channel_backend *chanb = &ctx->chan->backend;
- struct lttng_ust_shm_handle *handle = ctx->chan->handle;
+ struct lttng_ust_lib_ring_buffer_ctx_private *ctx_private = ctx->priv;
+ struct channel_backend *chanb = &ctx_private->chan->backend;
+ struct lttng_ust_shm_handle *handle = ctx_private->chan->handle;
size_t count;
- size_t offset = ctx->buf_offset;
+ size_t offset = ctx_private->buf_offset;
struct lttng_ust_lib_ring_buffer_backend_pages *backend_pages;
void *p;
if (caa_unlikely(!p))
return;
lib_ring_buffer_do_memset(p, '\0', 1);
- ctx->buf_offset += len;
+ ctx_private->buf_offset += len;
}
/**
struct lttng_ust_lib_ring_buffer_ctx *ctx,
const char *src, size_t len, char pad)
{
- struct channel_backend *chanb = &ctx->chan->backend;
- struct lttng_ust_shm_handle *handle = ctx->chan->handle;
+ struct lttng_ust_lib_ring_buffer_ctx_private *ctx_private = ctx->priv;
+ struct channel_backend *chanb = &ctx_private->chan->backend;
+ struct lttng_ust_shm_handle *handle = ctx_private->chan->handle;
size_t count;
- size_t offset = ctx->buf_offset;
+ size_t offset = ctx_private->buf_offset;
struct lttng_ust_lib_ring_buffer_backend_pages *backend_pages;
void *p;
return;
lib_ring_buffer_do_memset(p, pad, pad_len);
}
- ctx->buf_offset += len;
+ ctx_private->buf_offset += len;
}
/*
struct lttng_ust_lib_ring_buffer_ctx *ctx,
struct lttng_ust_lib_ring_buffer_backend_pages **backend_pages)
{
- struct lttng_ust_lib_ring_buffer_backend *bufb = &ctx->buf->backend;
- struct channel_backend *chanb = &ctx->chan->backend;
- struct lttng_ust_shm_handle *handle = ctx->chan->handle;
+ struct lttng_ust_lib_ring_buffer_ctx_private *ctx_private = ctx->priv;
+ struct lttng_ust_lib_ring_buffer_backend *bufb = &ctx_private->buf->backend;
+ struct channel_backend *chanb = &ctx_private->chan->backend;
+ struct lttng_ust_shm_handle *handle = ctx_private->chan->handle;
size_t sbidx;
- size_t offset = ctx->buf_offset;
+ size_t offset = ctx_private->buf_offset;
struct lttng_ust_lib_ring_buffer_backend_subbuffer *wsb;
struct lttng_ust_lib_ring_buffer_backend_pages_shmp *rpages;
unsigned long sb_bindex, id;
rpages = shmp_index(handle, bufb->array, sb_bindex);
if (caa_unlikely(!rpages))
return -1;
- CHAN_WARN_ON(ctx->chan,
+ CHAN_WARN_ON(ctx_private->chan,
config->mode == RING_BUFFER_OVERWRITE
&& subbuffer_id_is_noref(config, id));
_backend_pages = shmp(handle, rpages->shmp);
lib_ring_buffer_get_backend_pages_from_ctx(const struct lttng_ust_lib_ring_buffer_config *config,
struct lttng_ust_lib_ring_buffer_ctx *ctx)
{
- return ctx->backend_pages;
+ return ctx->priv->backend_pages;
}
/*
*
* The rint buffer buffer nesting count is a safety net to ensure tracer
* client code will never trigger an endless recursion.
- * Returns 0 on success, -EPERM on failure (nesting count too high).
+ * Returns a nesting level >= 0 on success, -EPERM on failure (nesting
+ * count too high).
*
* asm volatile and "memory" clobber prevent the compiler from moving
* instructions out of the ring buffer nesting count. This is required to ensure
nesting = ++URCU_TLS(lib_ring_buffer_nesting);
cmm_barrier();
- if (caa_unlikely(nesting > 4)) {
+ if (caa_unlikely(nesting >= LIB_RING_BUFFER_MAX_NESTING)) {
WARN_ON_ONCE(1);
URCU_TLS(lib_ring_buffer_nesting)--;
return -EPERM;
}
- return 0;
+ return nesting - 1;
+}
+
+static inline
+int lib_ring_buffer_nesting_count(const struct lttng_ust_lib_ring_buffer_config *config)
+{
+ return URCU_TLS(lib_ring_buffer_nesting);
}
static inline
unsigned long *o_begin, unsigned long *o_end,
unsigned long *o_old, size_t *before_hdr_pad)
{
- struct lttng_ust_lib_ring_buffer_channel *chan = ctx->chan;
- struct lttng_ust_lib_ring_buffer *buf = ctx->buf;
+ struct lttng_ust_lib_ring_buffer_ctx_private *ctx_private = ctx->priv;
+ struct lttng_ust_lib_ring_buffer_channel *chan = ctx_private->chan;
+ struct lttng_ust_lib_ring_buffer *buf = ctx_private->buf;
*o_begin = v_read(config, &buf->offset);
*o_old = *o_begin;
- ctx->tsc = lib_ring_buffer_clock_read(chan);
- if ((int64_t) ctx->tsc == -EIO)
+ ctx_private->tsc = lib_ring_buffer_clock_read(chan);
+ if ((int64_t) ctx_private->tsc == -EIO)
return 1;
/*
*/
//prefetch(&buf->commit_hot[subbuf_index(*o_begin, chan)]);
- if (last_tsc_overflow(config, buf, ctx->tsc))
- ctx->rflags |= RING_BUFFER_RFLAG_FULL_TSC;
+ if (last_tsc_overflow(config, buf, ctx_private->tsc))
+ ctx_private->rflags |= RING_BUFFER_RFLAG_FULL_TSC;
if (caa_unlikely(subbuf_offset(*o_begin, chan) == 0))
return 1;
- ctx->slot_size = record_header_size(config, chan, *o_begin,
+ ctx_private->slot_size = record_header_size(config, chan, *o_begin,
before_hdr_pad, ctx, client_ctx);
- ctx->slot_size +=
- lttng_ust_lib_ring_buffer_align(*o_begin + ctx->slot_size,
+ ctx_private->slot_size +=
+ lttng_ust_lib_ring_buffer_align(*o_begin + ctx_private->slot_size,
ctx->largest_align) + ctx->data_size;
- if (caa_unlikely((subbuf_offset(*o_begin, chan) + ctx->slot_size)
+ if (caa_unlikely((subbuf_offset(*o_begin, chan) + ctx_private->slot_size)
> chan->backend.subbuf_size))
return 1;
* Record fits in the current buffer and we are not on a switch
* boundary. It's safe to write.
*/
- *o_end = *o_begin + ctx->slot_size;
+ *o_end = *o_begin + ctx_private->slot_size;
if (caa_unlikely((subbuf_offset(*o_end, chan)) == 0))
/*
struct lttng_ust_lib_ring_buffer_ctx *ctx,
void *client_ctx)
{
- struct lttng_ust_lib_ring_buffer_channel *chan = ctx->chan;
- struct lttng_ust_shm_handle *handle = ctx->chan->handle;
+ struct lttng_ust_lib_ring_buffer_ctx_private *ctx_private = ctx->priv;
+ struct lttng_ust_lib_ring_buffer_channel *chan = ctx_private->chan;
+ struct lttng_ust_shm_handle *handle = chan->handle;
struct lttng_ust_lib_ring_buffer *buf;
unsigned long o_begin, o_end, o_old;
size_t before_hdr_pad = 0;
return -EAGAIN;
if (config->alloc == RING_BUFFER_ALLOC_PER_CPU) {
- ctx->reserve_cpu = lttng_ust_get_cpu();
- buf = shmp(handle, chan->backend.buf[ctx->reserve_cpu].shmp);
+ ctx_private->reserve_cpu = lttng_ust_get_cpu();
+ buf = shmp(handle, chan->backend.buf[ctx_private->reserve_cpu].shmp);
} else {
buf = shmp(handle, chan->backend.buf[0].shmp);
}
return -EIO;
if (caa_unlikely(uatomic_read(&buf->record_disabled)))
return -EAGAIN;
- ctx->buf = buf;
+ ctx_private->buf = buf;
/*
* Perform retryable operations.
&o_end, &o_old, &before_hdr_pad)))
goto slow_path;
- if (caa_unlikely(v_cmpxchg(config, &ctx->buf->offset, o_old, o_end)
+ if (caa_unlikely(v_cmpxchg(config, &buf->offset, o_old, o_end)
!= o_old))
goto slow_path;
* record headers, never the opposite (missing a full TSC record header
* when it would be needed).
*/
- save_last_tsc(config, ctx->buf, ctx->tsc);
+ save_last_tsc(config, buf, ctx_private->tsc);
/*
* Push the reader if necessary
*/
- lib_ring_buffer_reserve_push_reader(ctx->buf, chan, o_end - 1);
+ lib_ring_buffer_reserve_push_reader(buf, chan, o_end - 1);
/*
* Clear noref flag for this subbuffer.
*/
- lib_ring_buffer_clear_noref(config, &ctx->buf->backend,
+ lib_ring_buffer_clear_noref(config, &buf->backend,
subbuf_index(o_end - 1, chan), handle);
- ctx->pre_offset = o_begin;
- ctx->buf_offset = o_begin + before_hdr_pad;
+ ctx_private->pre_offset = o_begin;
+ ctx_private->buf_offset = o_begin + before_hdr_pad;
return 0;
slow_path:
return lib_ring_buffer_reserve_slow(ctx, client_ctx);
void lib_ring_buffer_commit(const struct lttng_ust_lib_ring_buffer_config *config,
const struct lttng_ust_lib_ring_buffer_ctx *ctx)
{
- struct lttng_ust_lib_ring_buffer_channel *chan = ctx->chan;
- struct lttng_ust_shm_handle *handle = ctx->chan->handle;
- struct lttng_ust_lib_ring_buffer *buf = ctx->buf;
- unsigned long offset_end = ctx->buf_offset;
+ struct lttng_ust_lib_ring_buffer_ctx_private *ctx_private = ctx->priv;
+ struct lttng_ust_lib_ring_buffer_channel *chan = ctx_private->chan;
+ struct lttng_ust_shm_handle *handle = chan->handle;
+ struct lttng_ust_lib_ring_buffer *buf = ctx_private->buf;
+ unsigned long offset_end = ctx_private->buf_offset;
unsigned long endidx = subbuf_index(offset_end - 1, chan);
unsigned long commit_count;
struct commit_counters_hot *cc_hot = shmp_index(handle,
*/
cmm_smp_wmb();
- v_add(config, ctx->slot_size, &cc_hot->cc);
+ v_add(config, ctx_private->slot_size, &cc_hot->cc);
/*
* commit count read can race with concurrent OOO commit count updates.
commit_count = v_read(config, &cc_hot->cc);
lib_ring_buffer_check_deliver(config, buf, chan, offset_end - 1,
- commit_count, endidx, handle, ctx->tsc);
+ commit_count, endidx, handle, ctx_private->tsc);
/*
* Update used size at each commit. It's needed only for extracting
* ring_buffer buffers from vmcore, after crash.
int lib_ring_buffer_try_discard_reserve(const struct lttng_ust_lib_ring_buffer_config *config,
const struct lttng_ust_lib_ring_buffer_ctx *ctx)
{
- struct lttng_ust_lib_ring_buffer *buf = ctx->buf;
- unsigned long end_offset = ctx->pre_offset + ctx->slot_size;
+ struct lttng_ust_lib_ring_buffer_ctx_private *ctx_private = ctx->priv;
+ struct lttng_ust_lib_ring_buffer *buf = ctx_private->buf;
+ unsigned long end_offset = ctx_private->pre_offset + ctx_private->slot_size;
/*
* We need to ensure that if the cmpxchg succeeds and discards the
*/
save_last_tsc(config, buf, 0ULL);
- if (caa_likely(v_cmpxchg(config, &buf->offset, end_offset, ctx->pre_offset)
+ if (caa_likely(v_cmpxchg(config, &buf->offset, end_offset, ctx_private->pre_offset)
!= end_offset))
return -EPERM;
else
#include "shm_types.h"
#include "vatomic.h"
+#define LIB_RING_BUFFER_MAX_NESTING 5
+
/*
* A switch is done during tracing or as a final flush after tracing (so it
* won't write in the new sub-buffer).
char padding[RB_RING_BUFFER_PADDING];
} __attribute__((aligned(CAA_CACHE_LINE_SIZE)));
+/*
+ * ring buffer private context
+ *
+ * Private context passed to lib_ring_buffer_reserve(), lib_ring_buffer_commit(),
+ * lib_ring_buffer_try_discard_reserve(), lttng_ust_lib_ring_buffer_align_ctx() and
+ * lib_ring_buffer_write().
+ *
+ * This context is allocated on an internal shadow-stack by a successful reserve
+ * operation, used by align/write, and freed by commit.
+ */
+
+struct lttng_ust_lib_ring_buffer_ctx_private {
+ /* input received by lib_ring_buffer_reserve(). */
+ struct lttng_ust_lib_ring_buffer_ctx *pub;
+ struct lttng_ust_lib_ring_buffer_channel *chan; /* channel */
+
+ /* output from lib_ring_buffer_reserve() */
+ int reserve_cpu; /* processor id updated by the reserve */
+ size_t slot_size; /* size of the reserved slot */
+ unsigned long buf_offset; /* offset following the record header */
+ unsigned long pre_offset; /*
+ * Initial offset position _before_
+ * the record is written. Positioned
+ * prior to record header alignment
+ * padding.
+ */
+ uint64_t tsc; /* time-stamp counter value */
+ unsigned int rflags; /* reservation flags */
+ void *ip; /* caller ip address */
+
+ struct lttng_ust_lib_ring_buffer *buf; /*
+ * buffer corresponding to processor id
+ * for this channel
+ */
+ struct lttng_ust_lib_ring_buffer_backend_pages *backend_pages;
+};
+
static inline
void *channel_get_private_config(struct lttng_ust_lib_ring_buffer_channel *chan)
{
_____ret = _____ret; /* For clang "unused result". */ \
})
+/**
+ * lttng_ust_lib_ring_buffer_align_ctx - Align context offset on "alignment"
+ * @ctx: ring buffer context.
+ */
+static inline lttng_ust_notrace
+void lttng_ust_lib_ring_buffer_align_ctx(struct lttng_ust_lib_ring_buffer_ctx *ctx,
+ size_t alignment);
+static inline
+void lttng_ust_lib_ring_buffer_align_ctx(struct lttng_ust_lib_ring_buffer_ctx *ctx,
+ size_t alignment)
+{
+ struct lttng_ust_lib_ring_buffer_ctx_private *ctx_private = ctx->priv;
+
+ ctx_private->buf_offset += lttng_ust_lib_ring_buffer_align(ctx_private->buf_offset,
+ alignment);
+}
+
#endif /* _LTTNG_RING_BUFFER_FRONTEND_TYPES_H */
struct lttng_ust_lib_ring_buffer_ctx *ctx,
void *client_ctx)
{
+ struct lttng_ust_lib_ring_buffer_ctx_private *ctx_private = ctx->priv;
const struct lttng_ust_lib_ring_buffer_config *config = &chan->backend.config;
- struct lttng_ust_shm_handle *handle = ctx->chan->handle;
+ struct lttng_ust_shm_handle *handle = chan->handle;
unsigned long reserve_commit_diff, offset_cmp;
int timeout_left_ms = lttng_ust_ringbuffer_get_timeout(chan);
offsets->switch_old_end = 0;
offsets->pre_header_padding = 0;
- ctx->tsc = config->cb.ring_buffer_clock_read(chan);
- if ((int64_t) ctx->tsc == -EIO)
+ ctx_private->tsc = config->cb.ring_buffer_clock_read(chan);
+ if ((int64_t) ctx_private->tsc == -EIO)
return -EIO;
- if (last_tsc_overflow(config, buf, ctx->tsc))
- ctx->rflags |= RING_BUFFER_RFLAG_FULL_TSC;
+ if (last_tsc_overflow(config, buf, ctx_private->tsc))
+ ctx_private->rflags |= RING_BUFFER_RFLAG_FULL_TSC;
- if (caa_unlikely(subbuf_offset(offsets->begin, ctx->chan) == 0)) {
+ if (caa_unlikely(subbuf_offset(offsets->begin, chan) == 0)) {
offsets->switch_new_start = 1; /* For offsets->begin */
} else {
offsets->size = config->cb.record_header_size(config, chan,
ctx, client_ctx);
offsets->size +=
lttng_ust_lib_ring_buffer_align(offsets->begin + offsets->size,
- ctx->largest_align)
+ ctx->largest_align)
+ ctx->data_size;
if (caa_unlikely(subbuf_offset(offsets->begin, chan) +
offsets->size > chan->backend.subbuf_size)) {
int lib_ring_buffer_reserve_slow(struct lttng_ust_lib_ring_buffer_ctx *ctx,
void *client_ctx)
{
- struct lttng_ust_lib_ring_buffer_channel *chan = ctx->chan;
- struct lttng_ust_shm_handle *handle = ctx->chan->handle;
+ struct lttng_ust_lib_ring_buffer_ctx_private *ctx_private = ctx->priv;
+ struct lttng_ust_lib_ring_buffer_channel *chan = ctx_private->chan;
+ struct lttng_ust_shm_handle *handle = chan->handle;
const struct lttng_ust_lib_ring_buffer_config *config = &chan->backend.config;
struct lttng_ust_lib_ring_buffer *buf;
struct switch_offsets offsets;
int ret;
if (config->alloc == RING_BUFFER_ALLOC_PER_CPU)
- buf = shmp(handle, chan->backend.buf[ctx->reserve_cpu].shmp);
+ buf = shmp(handle, chan->backend.buf[ctx_private->reserve_cpu].shmp);
else
buf = shmp(handle, chan->backend.buf[0].shmp);
if (!buf)
return -EIO;
- ctx->buf = buf;
+ ctx_private->buf = buf;
offsets.size = 0;
* records, never the opposite (missing a full TSC record when it would
* be needed).
*/
- save_last_tsc(config, buf, ctx->tsc);
+ save_last_tsc(config, buf, ctx_private->tsc);
/*
* Push the reader if necessary
lib_ring_buffer_clear_noref(config, &buf->backend,
subbuf_index(offsets.old - 1, chan),
handle);
- lib_ring_buffer_switch_old_end(buf, chan, &offsets, ctx->tsc, handle);
+ lib_ring_buffer_switch_old_end(buf, chan, &offsets, ctx_private->tsc, handle);
}
/*
* Populate new subbuffer.
*/
if (caa_unlikely(offsets.switch_new_start))
- lib_ring_buffer_switch_new_start(buf, chan, &offsets, ctx->tsc, handle);
+ lib_ring_buffer_switch_new_start(buf, chan, &offsets, ctx_private->tsc, handle);
if (caa_unlikely(offsets.switch_new_end))
- lib_ring_buffer_switch_new_end(buf, chan, &offsets, ctx->tsc, handle);
+ lib_ring_buffer_switch_new_end(buf, chan, &offsets, ctx_private->tsc, handle);
- ctx->slot_size = offsets.size;
- ctx->pre_offset = offsets.begin;
- ctx->buf_offset = offsets.begin + offsets.pre_header_padding;
+ ctx_private->slot_size = offsets.size;
+ ctx_private->pre_offset = offsets.begin;
+ ctx_private->buf_offset = offsets.begin + offsets.pre_header_padding;
return 0;
}
struct lttng_ust_lib_ring_buffer;
struct lttng_ust_lib_ring_buffer_channel;
struct lttng_ust_lib_ring_buffer_config;
-struct lttng_ust_lib_ring_buffer_ctx;
+struct lttng_ust_lib_ring_buffer_ctx_private;
struct lttng_ust_shm_handle;
/*
int sel = test_count % _NR_LTTNG_UST_DYNAMIC_TYPES;
char sel_char = (char) sel;
- lttng_ust_lib_ring_buffer_align_ctx(ctx, lttng_ust_rb_alignof(char));
- lttng_chan_buf->ops->event_write(ctx, &sel_char, sizeof(sel_char));
+ lttng_chan_buf->ops->event_write(ctx, &sel_char, sizeof(sel_char), lttng_ust_rb_alignof(char));
switch (sel) {
case LTTNG_UST_DYNAMIC_TYPE_NONE:
break;
{
int8_t v = -8;
- lttng_ust_lib_ring_buffer_align_ctx(ctx, lttng_ust_rb_alignof(v));
- lttng_chan_buf->ops->event_write(ctx, &v, sizeof(v));
+ lttng_chan_buf->ops->event_write(ctx, &v, sizeof(v), lttng_ust_rb_alignof(v));
break;
}
case LTTNG_UST_DYNAMIC_TYPE_S16:
{
int16_t v = -16;
- lttng_ust_lib_ring_buffer_align_ctx(ctx, lttng_ust_rb_alignof(v));
- lttng_chan_buf->ops->event_write(ctx, &v, sizeof(v));
+ lttng_chan_buf->ops->event_write(ctx, &v, sizeof(v), lttng_ust_rb_alignof(v));
break;
}
case LTTNG_UST_DYNAMIC_TYPE_S32:
{
int32_t v = -32;
- lttng_ust_lib_ring_buffer_align_ctx(ctx, lttng_ust_rb_alignof(v));
- lttng_chan_buf->ops->event_write(ctx, &v, sizeof(v));
+ lttng_chan_buf->ops->event_write(ctx, &v, sizeof(v), lttng_ust_rb_alignof(v));
break;
}
case LTTNG_UST_DYNAMIC_TYPE_S64:
{
int64_t v = -64;
- lttng_ust_lib_ring_buffer_align_ctx(ctx, lttng_ust_rb_alignof(v));
- lttng_chan_buf->ops->event_write(ctx, &v, sizeof(v));
+ lttng_chan_buf->ops->event_write(ctx, &v, sizeof(v), lttng_ust_rb_alignof(v));
break;
}
case LTTNG_UST_DYNAMIC_TYPE_U8:
{
uint8_t v = 8;
- lttng_ust_lib_ring_buffer_align_ctx(ctx, lttng_ust_rb_alignof(v));
- lttng_chan_buf->ops->event_write(ctx, &v, sizeof(v));
+ lttng_chan_buf->ops->event_write(ctx, &v, sizeof(v), lttng_ust_rb_alignof(v));
break;
}
case LTTNG_UST_DYNAMIC_TYPE_U16:
{
uint16_t v = 16;
- lttng_ust_lib_ring_buffer_align_ctx(ctx, lttng_ust_rb_alignof(v));
- lttng_chan_buf->ops->event_write(ctx, &v, sizeof(v));
+ lttng_chan_buf->ops->event_write(ctx, &v, sizeof(v), lttng_ust_rb_alignof(v));
break;
}
case LTTNG_UST_DYNAMIC_TYPE_U32:
{
uint32_t v = 32;
- lttng_ust_lib_ring_buffer_align_ctx(ctx, lttng_ust_rb_alignof(v));
- lttng_chan_buf->ops->event_write(ctx, &v, sizeof(v));
+ lttng_chan_buf->ops->event_write(ctx, &v, sizeof(v), lttng_ust_rb_alignof(v));
break;
}
case LTTNG_UST_DYNAMIC_TYPE_U64:
{
uint64_t v = 64;
- lttng_ust_lib_ring_buffer_align_ctx(ctx, lttng_ust_rb_alignof(v));
- lttng_chan_buf->ops->event_write(ctx, &v, sizeof(v));
+ lttng_chan_buf->ops->event_write(ctx, &v, sizeof(v), lttng_ust_rb_alignof(v));
break;
}
case LTTNG_UST_DYNAMIC_TYPE_FLOAT:
{
float f = 22322.0;
- lttng_ust_lib_ring_buffer_align_ctx(ctx, lttng_ust_rb_alignof(f));
- lttng_chan_buf->ops->event_write(ctx, &f, sizeof(f));
+ lttng_chan_buf->ops->event_write(ctx, &f, sizeof(f), lttng_ust_rb_alignof(f));
break;
}
case LTTNG_UST_DYNAMIC_TYPE_DOUBLE:
{
double d = 2.0;
- lttng_ust_lib_ring_buffer_align_ctx(ctx, lttng_ust_rb_alignof(d));
- lttng_chan_buf->ops->event_write(ctx, &d, sizeof(d));
+ lttng_chan_buf->ops->event_write(ctx, &d, sizeof(d), lttng_ust_rb_alignof(d));
break;
}
case LTTNG_UST_DYNAMIC_TYPE_STRING:
{
const char *str = "teststr";
- lttng_chan_buf->ops->event_write(ctx, str, strlen(str) + 1);
+ lttng_chan_buf->ops->event_write(ctx, str, strlen(str) + 1, 1);
break;
}
default: