return ret;
}
-static
-size_t dummy_get_size(struct lttng_ctx_field *field, size_t offset)
+size_t lttng_ust_dummy_get_size(struct lttng_ctx_field *field, size_t offset)
{
size_t size = 0;
return size;
}
-static
-void dummy_record(struct lttng_ctx_field *field,
+void lttng_ust_dummy_record(struct lttng_ctx_field *field,
struct lttng_ust_lib_ring_buffer_ctx *ctx,
struct lttng_channel *chan)
{
chan->ops->event_write(ctx, &sel_char, sizeof(sel_char));
}
-static
-void dummy_get_value(struct lttng_ctx_field *field,
+void lttng_ust_dummy_get_value(struct lttng_ctx_field *field,
struct lttng_ctx_value *value)
{
value->sel = LTTNG_UST_DYNAMIC_TYPE_NONE;
if (ust_lock())
goto end;
lttng_ust_context_set_session_provider(provider->name,
- dummy_get_size, dummy_record, dummy_get_value);
+ lttng_ust_dummy_get_size, lttng_ust_dummy_record,
+ lttng_ust_dummy_get_value);
cds_hlist_del(&provider->node);
end:
ust_unlock();
new_field.record = provider->record;
new_field.get_value = provider->get_value;
} else {
- new_field.get_size = dummy_get_size;
- new_field.record = dummy_record;
- new_field.get_value = dummy_get_value;
+ new_field.get_size = lttng_ust_dummy_get_size;
+ new_field.record = lttng_ust_dummy_record;
+ new_field.get_value = lttng_ust_dummy_get_value;
}
ret = lttng_context_add_rcu(ctx, &new_field);
if (ret) {
#define LTTNG_COMPACT_EVENT_BITS 5
#define LTTNG_COMPACT_TSC_BITS 27
+enum app_ctx_mode {
+ APP_CTX_DISABLED,
+ APP_CTX_ENABLED,
+};
+
/*
* Keep the natural field alignment for _each field_ within this structure if
* you ever add/remove a field from this header. Packed attribute is not used
}
static inline
-size_t ctx_get_size(size_t offset, struct lttng_ctx *ctx)
+size_t ctx_get_size(size_t offset, struct lttng_ctx *ctx,
+ enum app_ctx_mode mode)
{
int i;
size_t orig_offset = offset;
if (caa_likely(!ctx))
return 0;
offset += lib_ring_buffer_align(offset, ctx->largest_align);
- for (i = 0; i < ctx->nr_fields; i++)
- offset += ctx->fields[i].get_size(&ctx->fields[i], offset);
+ for (i = 0; i < ctx->nr_fields; i++) {
+ if (mode == APP_CTX_ENABLED) {
+ offset += ctx->fields[i].get_size(&ctx->fields[i], offset);
+ } else {
+ if (lttng_context_is_app(ctx->fields[i].event_field.name)) {
+ /*
+ * Before UST 2.8, we cannot use the
+ * application context, because we
+ * cannot trust that the handler used
+ * for get_size is the same used for
+ * ctx_record, which would result in
+ * corrupted traces when tracing
+ * concurrently with application context
+ * register/unregister.
+ */
+ offset += lttng_ust_dummy_get_size(&ctx->fields[i], offset);
+ } else {
+ offset += ctx->fields[i].get_size(&ctx->fields[i], offset);
+ }
+ }
+ }
return offset - orig_offset;
}
static inline
void ctx_record(struct lttng_ust_lib_ring_buffer_ctx *bufctx,
struct lttng_channel *chan,
- struct lttng_ctx *ctx)
+ struct lttng_ctx *ctx,
+ enum app_ctx_mode mode)
{
int i;
if (caa_likely(!ctx))
return;
lib_ring_buffer_align_ctx(bufctx, ctx->largest_align);
- for (i = 0; i < ctx->nr_fields; i++)
- ctx->fields[i].record(&ctx->fields[i], bufctx, chan);
+ for (i = 0; i < ctx->nr_fields; i++) {
+ if (mode == APP_CTX_ENABLED) {
+ ctx->fields[i].record(&ctx->fields[i], bufctx, chan);
+ } else {
+ if (lttng_context_is_app(ctx->fields[i].event_field.name)) {
+ /*
+ * Before UST 2.8, we cannot use the
+ * application context, because we
+ * cannot trust that the handler used
+ * for get_size is the same used for
+ * ctx_record, which would result in
+ * corrupted traces when tracing
+ * concurrently with application context
+ * register/unregister.
+ */
+ lttng_ust_dummy_record(&ctx->fields[i], bufctx, chan);
+ } else {
+ ctx->fields[i].record(&ctx->fields[i], bufctx, chan);
+ }
+ }
+ }
}
/*
struct lttng_ust_lib_ring_buffer_ctx *ctx)
{
struct lttng_channel *lttng_chan = channel_get_private(chan);
+ struct lttng_event *event = ctx->priv;
struct lttng_stack_ctx *lttng_ctx = ctx->priv2;
size_t orig_offset = offset;
size_t padding;
padding = 0;
WARN_ON_ONCE(1);
}
- offset += ctx_get_size(offset, lttng_ctx->chan_ctx);
- offset += ctx_get_size(offset, lttng_ctx->event_ctx);
-
+ if (lttng_ctx) {
+ /* 2.8+ probe ABI. */
+ offset += ctx_get_size(offset, lttng_ctx->chan_ctx, APP_CTX_ENABLED);
+ offset += ctx_get_size(offset, lttng_ctx->event_ctx, APP_CTX_ENABLED);
+ } else {
+ /* Pre 2.8 probe ABI. */
+ offset += ctx_get_size(offset, lttng_chan->ctx, APP_CTX_DISABLED);
+ offset += ctx_get_size(offset, event->ctx, APP_CTX_DISABLED);
+ }
*pre_header_padding = padding;
return offset - orig_offset;
}
uint32_t event_id)
{
struct lttng_channel *lttng_chan = channel_get_private(ctx->chan);
+ struct lttng_event *event = ctx->priv;
struct lttng_stack_ctx *lttng_ctx = ctx->priv2;
if (caa_unlikely(ctx->rflags))
WARN_ON_ONCE(1);
}
- ctx_record(ctx, lttng_chan, lttng_ctx->chan_ctx);
- ctx_record(ctx, lttng_chan, lttng_ctx->event_ctx);
+ if (lttng_ctx) {
+ /* 2.8+ probe ABI. */
+ ctx_record(ctx, lttng_chan, lttng_ctx->chan_ctx, APP_CTX_ENABLED);
+ ctx_record(ctx, lttng_chan, lttng_ctx->event_ctx, APP_CTX_ENABLED);
+ } else {
+ /* Pre 2.8 probe ABI. */
+ ctx_record(ctx, lttng_chan, lttng_chan->ctx, APP_CTX_DISABLED);
+ ctx_record(ctx, lttng_chan, event->ctx, APP_CTX_DISABLED);
+ }
lib_ring_buffer_align_ctx(ctx, ctx->largest_align);
return;
uint32_t event_id)
{
struct lttng_channel *lttng_chan = channel_get_private(ctx->chan);
+ struct lttng_event *event = ctx->priv;
struct lttng_stack_ctx *lttng_ctx = ctx->priv2;
switch (lttng_chan->header_type) {
default:
WARN_ON_ONCE(1);
}
- ctx_record(ctx, lttng_chan, lttng_ctx->chan_ctx);
- ctx_record(ctx, lttng_chan, lttng_ctx->event_ctx);
+ if (lttng_ctx) {
+ /* 2.8+ probe ABI. */
+ ctx_record(ctx, lttng_chan, lttng_ctx->chan_ctx, APP_CTX_ENABLED);
+ ctx_record(ctx, lttng_chan, lttng_ctx->event_ctx, APP_CTX_ENABLED);
+ } else {
+ /* Pre 2.8 probe ABI. */
+ ctx_record(ctx, lttng_chan, lttng_chan->ctx, APP_CTX_DISABLED);
+ ctx_record(ctx, lttng_chan, event->ctx, APP_CTX_DISABLED);
+ }
lib_ring_buffer_align_ctx(ctx, ctx->largest_align);
}