Refactoring: ring buffer context
authorMathieu Desnoyers <mathieu.desnoyers@efficios.com>
Wed, 28 Apr 2021 18:56:05 +0000 (14:56 -0400)
committerMathieu Desnoyers <mathieu.desnoyers@efficios.com>
Thu, 29 Apr 2021 14:33:51 +0000 (10:33 -0400)
Split the ring buffer context into:

- Public ring buffer context (initialized by probe),
- Private context (initialized by reserve callback),

Pass event recorder rather than channel as client_ctx for events
generated from instrumentation (calling ring buffer client).

Signed-off-by: Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
Change-Id: Iecd23f11c54da4ba58a97c46192f775a0a74bd85

19 files changed:
include/lttng/tracepoint-event-impl.h
include/ringbuffer/backend.h
include/ringbuffer/backend_internal.h
include/ringbuffer/config.h
include/ringbuffer/frontend_api.h
src/lib/ringbuffer/ring_buffer_frontend.c
src/lttng-context-callstack-legacy-impl.h
src/lttng-context-callstack-stackwalk-impl.h
src/lttng-context-cpu-id.c
src/lttng-context-interruptible.c
src/lttng-context-perf-counters.c
src/lttng-event-notifier-notification.c
src/lttng-events.c
src/lttng-ring-buffer-client.h
src/lttng-ring-buffer-event-notifier-client.h
src/lttng-ring-buffer-metadata-client.h
src/probes/lttng-kprobes.c
src/probes/lttng-kretprobes.c
src/probes/lttng-uprobes.c

index a8a438b2bcc9e5d828ab12f2a8a99580c35315a8..dbb92b8535485b9805705736d3ab0a00668508d7 100644 (file)
@@ -1092,8 +1092,8 @@ static void __event_probe__##_name(_data_proto)                                           \
                        goto __post;                                                    \
                }                                                                       \
                __event_align = __event_get_align__##_name(_locvar_args);               \
-               lib_ring_buffer_ctx_init(&__ctx, __chan->chan, __event_len,             \
-                                        __event_align, -1, &__lttng_probe_ctx);        \
+               lib_ring_buffer_ctx_init(&__ctx, __event_recorder, __event_len,         \
+                                        __event_align, &__lttng_probe_ctx);            \
                __ret = __chan->ops->event_reserve(&__ctx, __event_recorder->priv->id); \
                if (__ret < 0)                                                          \
                        goto __post;                                                    \
index a975c7ec429ca075abdc96670ef944e53b197144..5ceb4f1a26181a5f8b582ac7c7a5e5028cbfee9a 100644 (file)
@@ -75,10 +75,10 @@ void lib_ring_buffer_write(const struct lib_ring_buffer_config *config,
                           struct lib_ring_buffer_ctx *ctx,
                           const void *src, size_t len)
 {
-       struct lib_ring_buffer_backend *bufb = &ctx->buf->backend;
-       struct channel_backend *chanb = &ctx->chan->backend;
+       struct lib_ring_buffer_backend *bufb = &ctx->priv.buf->backend;
+       struct channel_backend *chanb = &ctx->priv.chan->backend;
        size_t index, pagecpy;
-       size_t offset = ctx->buf_offset;
+       size_t offset = ctx->priv.buf_offset;
        struct lib_ring_buffer_backend_pages *backend_pages;
 
        if (unlikely(!len))
@@ -95,7 +95,7 @@ void lib_ring_buffer_write(const struct lib_ring_buffer_config *config,
                                        src, len);
        else
                _lib_ring_buffer_write(bufb, offset, src, len, 0);
-       ctx->buf_offset += len;
+       ctx->priv.buf_offset += len;
 }
 
 /**
@@ -116,10 +116,10 @@ void lib_ring_buffer_memset(const struct lib_ring_buffer_config *config,
                            struct lib_ring_buffer_ctx *ctx, int c, size_t len)
 {
 
-       struct lib_ring_buffer_backend *bufb = &ctx->buf->backend;
-       struct channel_backend *chanb = &ctx->chan->backend;
+       struct lib_ring_buffer_backend *bufb = &ctx->priv.buf->backend;
+       struct channel_backend *chanb = &ctx->priv.chan->backend;
        size_t index, pagecpy;
-       size_t offset = ctx->buf_offset;
+       size_t offset = ctx->priv.buf_offset;
        struct lib_ring_buffer_backend_pages *backend_pages;
 
        if (unlikely(!len))
@@ -135,7 +135,7 @@ void lib_ring_buffer_memset(const struct lib_ring_buffer_config *config,
                                          c, len);
        else
                _lib_ring_buffer_memset(bufb, offset, c, len, 0);
-       ctx->buf_offset += len;
+       ctx->priv.buf_offset += len;
 }
 
 /*
@@ -213,10 +213,10 @@ void lib_ring_buffer_strcpy(const struct lib_ring_buffer_config *config,
                           struct lib_ring_buffer_ctx *ctx,
                           const char *src, size_t len, int pad)
 {
-       struct lib_ring_buffer_backend *bufb = &ctx->buf->backend;
-       struct channel_backend *chanb = &ctx->chan->backend;
+       struct lib_ring_buffer_backend *bufb = &ctx->priv.buf->backend;
+       struct channel_backend *chanb = &ctx->priv.chan->backend;
        size_t index, pagecpy;
-       size_t offset = ctx->buf_offset;
+       size_t offset = ctx->priv.buf_offset;
        struct lib_ring_buffer_backend_pages *backend_pages;
 
        if (unlikely(!len))
@@ -250,7 +250,7 @@ void lib_ring_buffer_strcpy(const struct lib_ring_buffer_config *config,
        } else {
                _lib_ring_buffer_strcpy(bufb, offset, src, len, 0, pad);
        }
-       ctx->buf_offset += len;
+       ctx->priv.buf_offset += len;
 }
 
 /**
@@ -271,10 +271,10 @@ void lib_ring_buffer_copy_from_user_inatomic(const struct lib_ring_buffer_config
                                    struct lib_ring_buffer_ctx *ctx,
                                    const void __user *src, size_t len)
 {
-       struct lib_ring_buffer_backend *bufb = &ctx->buf->backend;
-       struct channel_backend *chanb = &ctx->chan->backend;
+       struct lib_ring_buffer_backend *bufb = &ctx->priv.buf->backend;
+       struct channel_backend *chanb = &ctx->priv.chan->backend;
        size_t index, pagecpy;
-       size_t offset = ctx->buf_offset;
+       size_t offset = ctx->priv.buf_offset;
        struct lib_ring_buffer_backend_pages *backend_pages;
        unsigned long ret;
 
@@ -302,7 +302,7 @@ void lib_ring_buffer_copy_from_user_inatomic(const struct lib_ring_buffer_config
                _lib_ring_buffer_copy_from_user_inatomic(bufb, offset, src, len, 0);
        }
        pagefault_enable();
-       ctx->buf_offset += len;
+       ctx->priv.buf_offset += len;
 
        return;
 
@@ -338,10 +338,10 @@ void lib_ring_buffer_strcpy_from_user_inatomic(const struct lib_ring_buffer_conf
                struct lib_ring_buffer_ctx *ctx,
                const void __user *src, size_t len, int pad)
 {
-       struct lib_ring_buffer_backend *bufb = &ctx->buf->backend;
-       struct channel_backend *chanb = &ctx->chan->backend;
+       struct lib_ring_buffer_backend *bufb = &ctx->priv.buf->backend;
+       struct channel_backend *chanb = &ctx->priv.chan->backend;
        size_t index, pagecpy;
-       size_t offset = ctx->buf_offset;
+       size_t offset = ctx->priv.buf_offset;
        struct lib_ring_buffer_backend_pages *backend_pages;
 
        if (unlikely(!len))
@@ -382,7 +382,7 @@ void lib_ring_buffer_strcpy_from_user_inatomic(const struct lib_ring_buffer_conf
                                        len, 0, pad);
        }
        pagefault_enable();
-       ctx->buf_offset += len;
+       ctx->priv.buf_offset += len;
 
        return;
 
index fd24c6741551f79aec2560dd598f65209dcfc5a5..8a93ab07ad1fe6ca03111538c3c561341bf51ffe 100644 (file)
@@ -195,9 +195,9 @@ void lib_ring_buffer_backend_get_pages(const struct lib_ring_buffer_config *conf
                        struct lib_ring_buffer_ctx *ctx,
                        struct lib_ring_buffer_backend_pages **backend_pages)
 {
-       struct lib_ring_buffer_backend *bufb = &ctx->buf->backend;
-       struct channel_backend *chanb = &ctx->chan->backend;
-       size_t sbidx, offset = ctx->buf_offset;
+       struct lib_ring_buffer_backend *bufb = &ctx->priv.buf->backend;
+       struct channel_backend *chanb = &ctx->priv.chan->backend;
+       size_t sbidx, offset = ctx->priv.buf_offset;
        unsigned long sb_bindex, id;
        struct lib_ring_buffer_backend_pages *rpages;
 
@@ -206,7 +206,7 @@ void lib_ring_buffer_backend_get_pages(const struct lib_ring_buffer_config *conf
        id = bufb->buf_wsb[sbidx].id;
        sb_bindex = subbuffer_id_get_index(config, id);
        rpages = bufb->array[sb_bindex];
-       CHAN_WARN_ON(ctx->chan,
+       CHAN_WARN_ON(ctx->priv.chan,
                     config->mode == RING_BUFFER_OVERWRITE
                     && subbuffer_id_is_noref(config, id));
        *backend_pages = rpages;
@@ -218,7 +218,7 @@ struct lib_ring_buffer_backend_pages *
        lib_ring_buffer_get_backend_pages_from_ctx(const struct lib_ring_buffer_config *config,
                struct lib_ring_buffer_ctx *ctx)
 {
-       return ctx->backend_pages;
+       return ctx->priv.backend_pages;
 }
 
 /*
index 2019e14cbf4052c1789689c77e3bb59d24169a33..bc638f941f20365d76fef12b004c0619fe10934a 100644 (file)
@@ -20,6 +20,7 @@ struct lib_ring_buffer;
 struct channel;
 struct lib_ring_buffer_config;
 struct lib_ring_buffer_ctx;
+struct lttng_kernel_ring_buffer_ctx_private;
 
 /*
  * Ring buffer client callbacks. Only used by slow path, never on fast path.
@@ -156,6 +157,40 @@ struct lib_ring_buffer_config {
        struct lib_ring_buffer_client_cb cb;
 };
 
+/*
+ * ring buffer private context
+ *
+ * Private context passed to lib_ring_buffer_reserve(), lib_ring_buffer_commit(),
+ * lib_ring_buffer_try_discard_reserve(), lib_ring_buffer_align_ctx() and
+ * lib_ring_buffer_write().
+ *
+ * Get struct lttng_kernel_ring_buffer_ctx parent with container_of().
+ */
+
+struct lttng_kernel_ring_buffer_ctx_private {
+       /* input received by lib_ring_buffer_reserve(). */
+       struct channel *chan;                   /* ring buffer channel */
+
+       /* output from lib_ring_buffer_reserve() */
+       int reserve_cpu;                        /* processor id updated by the reserve */
+       size_t slot_size;                       /* size of the reserved slot */
+       unsigned long buf_offset;               /* offset following the record header */
+       unsigned long pre_offset;               /*
+                                                * Initial offset position _before_
+                                                * the record is written. Positioned
+                                                * prior to record header alignment
+                                                * padding.
+                                                */
+       u64 tsc;                                /* time-stamp counter value */
+       unsigned int rflags;                    /* reservation flags */
+
+       struct lib_ring_buffer *buf;            /*
+                                                * buffer corresponding to processor id
+                                                * for this channel
+                                                */
+       struct lib_ring_buffer_backend_pages *backend_pages;
+};
+
 /*
  * ring buffer context
  *
@@ -164,57 +199,37 @@ struct lib_ring_buffer_config {
  * lib_ring_buffer_write().
  */
 struct lib_ring_buffer_ctx {
+       /* Private ring buffer context, set by reserve callback. */
+       struct lttng_kernel_ring_buffer_ctx_private priv;
+
        /* input received by lib_ring_buffer_reserve(), saved here. */
-       struct channel *chan;           /* channel */
-       void *priv;                     /* client private data */
+       void *client_priv;              /* Ring buffer client private data */
+
        size_t data_size;               /* size of payload */
        int largest_align;              /*
                                         * alignment of the largest element
                                         * in the payload
                                         */
-       int cpu;                        /* processor id */
-
-       /* output from lib_ring_buffer_reserve() */
-       struct lib_ring_buffer *buf;    /*
-                                        * buffer corresponding to processor id
-                                        * for this channel
-                                        */
-       size_t slot_size;               /* size of the reserved slot */
-       unsigned long buf_offset;       /* offset following the record header */
-       unsigned long pre_offset;       /*
-                                        * Initial offset position _before_
-                                        * the record is written. Positioned
-                                        * prior to record header alignment
-                                        * padding.
-                                        */
-       u64 tsc;                        /* time-stamp counter value */
-       unsigned int rflags;            /* reservation flags */
-       /* Cache backend pages pointer chasing. */
-       struct lib_ring_buffer_backend_pages *backend_pages;
+       struct lttng_probe_ctx *probe_ctx;      /* Probe context */
 };
 
 /**
  * lib_ring_buffer_ctx_init - initialize ring buffer context
  * @ctx: ring buffer context to initialize
- * @chan: channel
- * @priv: client private data
+ * @client_priv: client private data
  * @data_size: size of record data payload. It must be greater than 0.
  * @largest_align: largest alignment within data payload types
- * @cpu: processor id
  */
 static inline
 void lib_ring_buffer_ctx_init(struct lib_ring_buffer_ctx *ctx,
-                             struct channel *chan,
+                             void *client_priv,
                              size_t data_size, int largest_align,
-                             int cpu, void *priv)
+                             struct lttng_probe_ctx *probe_ctx)
 {
-       ctx->chan = chan;
-       ctx->priv = priv;
+       ctx->client_priv = client_priv;
        ctx->data_size = data_size;
        ctx->largest_align = largest_align;
-       ctx->cpu = cpu;
-       ctx->rflags = 0;
-       ctx->backend_pages = NULL;
+       ctx->probe_ctx = probe_ctx;
 }
 
 /*
@@ -282,7 +297,7 @@ static inline
 void lib_ring_buffer_align_ctx(struct lib_ring_buffer_ctx *ctx,
                           size_t alignment)
 {
-       ctx->buf_offset += lib_ring_buffer_align(ctx->buf_offset,
+       ctx->priv.buf_offset += lib_ring_buffer_align(ctx->priv.buf_offset,
                                                 alignment);
 }
 
index 3fa6c82fb59fbe4ebbc3ee6f83fd7839f1c66ae7..1444e60adc1350234e405e164f8b8662654386c6 100644 (file)
@@ -75,13 +75,13 @@ int lib_ring_buffer_try_reserve(const struct lib_ring_buffer_config *config,
                                unsigned long *o_begin, unsigned long *o_end,
                                unsigned long *o_old, size_t *before_hdr_pad)
 {
-       struct channel *chan = ctx->chan;
-       struct lib_ring_buffer *buf = ctx->buf;
+       struct channel *chan = ctx->priv.chan;
+       struct lib_ring_buffer *buf = ctx->priv.buf;
        *o_begin = v_read(config, &buf->offset);
        *o_old = *o_begin;
 
-       ctx->tsc = lib_ring_buffer_clock_read(chan);
-       if ((int64_t) ctx->tsc == -EIO)
+       ctx->priv.tsc = lib_ring_buffer_clock_read(chan);
+       if ((int64_t) ctx->priv.tsc == -EIO)
                return 1;
 
        /*
@@ -91,18 +91,18 @@ int lib_ring_buffer_try_reserve(const struct lib_ring_buffer_config *config,
         */
        prefetch(&buf->commit_hot[subbuf_index(*o_begin, chan)]);
 
-       if (last_tsc_overflow(config, buf, ctx->tsc))
-               ctx->rflags |= RING_BUFFER_RFLAG_FULL_TSC;
+       if (last_tsc_overflow(config, buf, ctx->priv.tsc))
+               ctx->priv.rflags |= RING_BUFFER_RFLAG_FULL_TSC;
 
        if (unlikely(subbuf_offset(*o_begin, chan) == 0))
                return 1;
 
-       ctx->slot_size = record_header_size(config, chan, *o_begin,
+       ctx->priv.slot_size = record_header_size(config, chan, *o_begin,
                                            before_hdr_pad, ctx, client_ctx);
-       ctx->slot_size +=
-               lib_ring_buffer_align(*o_begin + ctx->slot_size,
+       ctx->priv.slot_size +=
+               lib_ring_buffer_align(*o_begin + ctx->priv.slot_size,
                                      ctx->largest_align) + ctx->data_size;
-       if (unlikely((subbuf_offset(*o_begin, chan) + ctx->slot_size)
+       if (unlikely((subbuf_offset(*o_begin, chan) + ctx->priv.slot_size)
                     > chan->backend.subbuf_size))
                return 1;
 
@@ -110,7 +110,7 @@ int lib_ring_buffer_try_reserve(const struct lib_ring_buffer_config *config,
         * Record fits in the current buffer and we are not on a switch
         * boundary. It's safe to write.
         */
-       *o_end = *o_begin + ctx->slot_size;
+       *o_end = *o_begin + ctx->priv.slot_size;
 
        if (unlikely((subbuf_offset(*o_end, chan)) == 0))
                /*
@@ -143,7 +143,7 @@ int lib_ring_buffer_reserve(const struct lib_ring_buffer_config *config,
                            struct lib_ring_buffer_ctx *ctx,
                            void *client_ctx)
 {
-       struct channel *chan = ctx->chan;
+       struct channel *chan = ctx->priv.chan;
        struct lib_ring_buffer *buf;
        unsigned long o_begin, o_end, o_old;
        size_t before_hdr_pad = 0;
@@ -152,12 +152,12 @@ int lib_ring_buffer_reserve(const struct lib_ring_buffer_config *config,
                return -EAGAIN;
 
        if (config->alloc == RING_BUFFER_ALLOC_PER_CPU)
-               buf = per_cpu_ptr(chan->backend.buf, ctx->cpu);
+               buf = per_cpu_ptr(chan->backend.buf, ctx->priv.reserve_cpu);
        else
                buf = chan->backend.buf;
        if (unlikely(atomic_read(&buf->record_disabled)))
                return -EAGAIN;
-       ctx->buf = buf;
+       ctx->priv.buf = buf;
 
        /*
         * Perform retryable operations.
@@ -166,7 +166,7 @@ int lib_ring_buffer_reserve(const struct lib_ring_buffer_config *config,
                                                 &o_end, &o_old, &before_hdr_pad)))
                goto slow_path;
 
-       if (unlikely(v_cmpxchg(config, &ctx->buf->offset, o_old, o_end)
+       if (unlikely(v_cmpxchg(config, &ctx->priv.buf->offset, o_old, o_end)
                     != o_old))
                goto slow_path;
 
@@ -176,21 +176,21 @@ int lib_ring_buffer_reserve(const struct lib_ring_buffer_config *config,
         * record headers, never the opposite (missing a full TSC record header
         * when it would be needed).
         */
-       save_last_tsc(config, ctx->buf, ctx->tsc);
+       save_last_tsc(config, ctx->priv.buf, ctx->priv.tsc);
 
        /*
         * Push the reader if necessary
         */
-       lib_ring_buffer_reserve_push_reader(ctx->buf, chan, o_end - 1);
+       lib_ring_buffer_reserve_push_reader(ctx->priv.buf, chan, o_end - 1);
 
        /*
         * Clear noref flag for this subbuffer.
         */
-       lib_ring_buffer_clear_noref(config, &ctx->buf->backend,
+       lib_ring_buffer_clear_noref(config, &ctx->priv.buf->backend,
                                subbuf_index(o_end - 1, chan));
 
-       ctx->pre_offset = o_begin;
-       ctx->buf_offset = o_begin + before_hdr_pad;
+       ctx->priv.pre_offset = o_begin;
+       ctx->priv.buf_offset = o_begin + before_hdr_pad;
        return 0;
 slow_path:
        return lib_ring_buffer_reserve_slow(ctx, client_ctx);
@@ -231,9 +231,9 @@ static inline
 void lib_ring_buffer_commit(const struct lib_ring_buffer_config *config,
                            const struct lib_ring_buffer_ctx *ctx)
 {
-       struct channel *chan = ctx->chan;
-       struct lib_ring_buffer *buf = ctx->buf;
-       unsigned long offset_end = ctx->buf_offset;
+       struct channel *chan = ctx->priv.chan;
+       struct lib_ring_buffer *buf = ctx->priv.buf;
+       unsigned long offset_end = ctx->priv.buf_offset;
        unsigned long endidx = subbuf_index(offset_end - 1, chan);
        unsigned long commit_count;
        struct commit_counters_hot *cc_hot = &buf->commit_hot[endidx];
@@ -257,7 +257,7 @@ void lib_ring_buffer_commit(const struct lib_ring_buffer_config *config,
        } else
                smp_wmb();
 
-       v_add(config, ctx->slot_size, &cc_hot->cc);
+       v_add(config, ctx->priv.slot_size, &cc_hot->cc);
 
        /*
         * commit count read can race with concurrent OOO commit count updates.
@@ -280,7 +280,7 @@ void lib_ring_buffer_commit(const struct lib_ring_buffer_config *config,
        commit_count = v_read(config, &cc_hot->cc);
 
        lib_ring_buffer_check_deliver(config, buf, chan, offset_end - 1,
-                                     commit_count, endidx, ctx->tsc);
+                                     commit_count, endidx, ctx->priv.tsc);
        /*
         * Update used size at each commit. It's needed only for extracting
         * ring_buffer buffers from vmcore, after crash.
@@ -303,8 +303,8 @@ static inline
 int lib_ring_buffer_try_discard_reserve(const struct lib_ring_buffer_config *config,
                                        const struct lib_ring_buffer_ctx *ctx)
 {
-       struct lib_ring_buffer *buf = ctx->buf;
-       unsigned long end_offset = ctx->pre_offset + ctx->slot_size;
+       struct lib_ring_buffer *buf = ctx->priv.buf;
+       unsigned long end_offset = ctx->priv.pre_offset + ctx->priv.slot_size;
 
        /*
         * We need to ensure that if the cmpxchg succeeds and discards the
@@ -320,7 +320,7 @@ int lib_ring_buffer_try_discard_reserve(const struct lib_ring_buffer_config *con
         */
        save_last_tsc(config, buf, 0ULL);
 
-       if (likely(v_cmpxchg(config, &buf->offset, end_offset, ctx->pre_offset)
+       if (likely(v_cmpxchg(config, &buf->offset, end_offset, ctx->priv.pre_offset)
                   != end_offset))
                return -EPERM;
        else
index 38ba05d9cb2344ff17e7ff6b954ff39d60347aa5..d9e64dff9d15eb616df6b5147f4f0b4ec557da1b 100644 (file)
@@ -2011,14 +2011,14 @@ retry:
        offsets->switch_old_end = 0;
        offsets->pre_header_padding = 0;
 
-       ctx->tsc = config->cb.ring_buffer_clock_read(chan);
-       if ((int64_t) ctx->tsc == -EIO)
+       ctx->priv.tsc = config->cb.ring_buffer_clock_read(chan);
+       if ((int64_t) ctx->priv.tsc == -EIO)
                return -EIO;
 
-       if (last_tsc_overflow(config, buf, ctx->tsc))
-               ctx->rflags |= RING_BUFFER_RFLAG_FULL_TSC;
+       if (last_tsc_overflow(config, buf, ctx->priv.tsc))
+               ctx->priv.rflags |= RING_BUFFER_RFLAG_FULL_TSC;
 
-       if (unlikely(subbuf_offset(offsets->begin, ctx->chan) == 0)) {
+       if (unlikely(subbuf_offset(offsets->begin, ctx->priv.chan) == 0)) {
                offsets->switch_new_start = 1;          /* For offsets->begin */
        } else {
                offsets->size = config->cb.record_header_size(config, chan,
@@ -2175,13 +2175,13 @@ EXPORT_SYMBOL_GPL(lib_ring_buffer_lost_event_too_big);
 int lib_ring_buffer_reserve_slow(struct lib_ring_buffer_ctx *ctx,
                void *client_ctx)
 {
-       struct channel *chan = ctx->chan;
+       struct channel *chan = ctx->priv.chan;
        const struct lib_ring_buffer_config *config = &chan->backend.config;
        struct lib_ring_buffer *buf;
        struct switch_offsets offsets;
        int ret;
 
-       ctx->buf = buf = get_current_buf(chan, ctx->cpu);
+       ctx->priv.buf = buf = get_current_buf(chan, ctx->priv.reserve_cpu);
        offsets.size = 0;
 
        do {
@@ -2199,7 +2199,7 @@ int lib_ring_buffer_reserve_slow(struct lib_ring_buffer_ctx *ctx,
         * records, never the opposite (missing a full TSC record when it would
         * be needed).
         */
-       save_last_tsc(config, buf, ctx->tsc);
+       save_last_tsc(config, buf, ctx->priv.tsc);
 
        /*
         * Push the reader if necessary
@@ -2218,21 +2218,21 @@ int lib_ring_buffer_reserve_slow(struct lib_ring_buffer_ctx *ctx,
        if (unlikely(offsets.switch_old_end)) {
                lib_ring_buffer_clear_noref(config, &buf->backend,
                                            subbuf_index(offsets.old - 1, chan));
-               lib_ring_buffer_switch_old_end(buf, chan, &offsets, ctx->tsc);
+               lib_ring_buffer_switch_old_end(buf, chan, &offsets, ctx->priv.tsc);
        }
 
        /*
         * Populate new subbuffer.
         */
        if (unlikely(offsets.switch_new_start))
-               lib_ring_buffer_switch_new_start(buf, chan, &offsets, ctx->tsc);
+               lib_ring_buffer_switch_new_start(buf, chan, &offsets, ctx->priv.tsc);
 
        if (unlikely(offsets.switch_new_end))
-               lib_ring_buffer_switch_new_end(buf, chan, &offsets, ctx->tsc);
+               lib_ring_buffer_switch_new_end(buf, chan, &offsets, ctx->priv.tsc);
 
-       ctx->slot_size = offsets.size;
-       ctx->pre_offset = offsets.begin;
-       ctx->buf_offset = offsets.begin + offsets.pre_header_padding;
+       ctx->priv.slot_size = offsets.size;
+       ctx->priv.pre_offset = offsets.begin;
+       ctx->priv.buf_offset = offsets.begin + offsets.pre_header_padding;
        return 0;
 }
 EXPORT_SYMBOL_GPL(lib_ring_buffer_reserve_slow);
index b74b966a21b7c3e74663fdff6b60b827aff7ce67..edd13cf5d30445278cec1b49f522e54734e6ba92 100644 (file)
@@ -107,7 +107,7 @@ struct stack_trace *stack_trace_context(struct lttng_kernel_ctx_field *field,
         * Do not gather the userspace callstack context when the event was
         * triggered by the userspace callstack context saving mechanism.
         */
-       cs_user_nesting = per_cpu(callstack_user_nesting, ctx->cpu);
+       cs_user_nesting = per_cpu(callstack_user_nesting, ctx->priv.reserve_cpu);
 
        if (fdata->mode == CALLSTACK_USER && cs_user_nesting >= 1)
                return NULL;
@@ -119,8 +119,8 @@ struct stack_trace *stack_trace_context(struct lttng_kernel_ctx_field *field,
         * max nesting is checked in lib_ring_buffer_get_cpu().
         * Check it again as a safety net.
         */
-       cs = per_cpu_ptr(fdata->cs_percpu, ctx->cpu);
-       buffer_nesting = per_cpu(lib_ring_buffer_nesting, ctx->cpu) - 1;
+       cs = per_cpu_ptr(fdata->cs_percpu, ctx->priv.reserve_cpu);
+       buffer_nesting = per_cpu(lib_ring_buffer_nesting, ctx->priv.reserve_cpu) - 1;
        if (buffer_nesting >= RING_BUFFER_MAX_NESTING)
                return NULL;
 
@@ -163,13 +163,13 @@ size_t lttng_callstack_sequence_get_size(size_t offset, struct lttng_kernel_ctx_
        trace->nr_entries = 0;
 
        if (fdata->mode == CALLSTACK_USER)
-               ++per_cpu(callstack_user_nesting, ctx->cpu);
+               ++per_cpu(callstack_user_nesting, ctx->priv.reserve_cpu);
 
        /* do the real work and reserve space */
        cs_types[fdata->mode].save_func(trace);
 
        if (fdata->mode == CALLSTACK_USER)
-               per_cpu(callstack_user_nesting, ctx->cpu)--;
+               per_cpu(callstack_user_nesting, ctx->priv.reserve_cpu)--;
 
        /*
         * Remove final ULONG_MAX delimiter. If we cannot find it, add
index 7c452491ea46458f7f524c2f189904266f392728..a7c5a062fb10f521f8a3722f2f0c1a40bab88850 100644 (file)
@@ -109,7 +109,7 @@ struct lttng_stack_trace *stack_trace_context(struct lttng_kernel_ctx_field *fie
         * Do not gather the userspace callstack context when the event was
         * triggered by the userspace callstack context saving mechanism.
         */
-       cs_user_nesting = per_cpu(callstack_user_nesting, ctx->cpu);
+       cs_user_nesting = per_cpu(callstack_user_nesting, ctx->priv.reserve_cpu);
 
        if (fdata->mode == CALLSTACK_USER && cs_user_nesting >= 1)
                return NULL;
@@ -121,8 +121,8 @@ struct lttng_stack_trace *stack_trace_context(struct lttng_kernel_ctx_field *fie
         * max nesting is checked in lib_ring_buffer_get_cpu().
         * Check it again as a safety net.
         */
-       cs = per_cpu_ptr(fdata->cs_percpu, ctx->cpu);
-       buffer_nesting = per_cpu(lib_ring_buffer_nesting, ctx->cpu) - 1;
+       cs = per_cpu_ptr(fdata->cs_percpu, ctx->priv.reserve_cpu);
+       buffer_nesting = per_cpu(lib_ring_buffer_nesting, ctx->priv.reserve_cpu) - 1;
        if (buffer_nesting >= RING_BUFFER_MAX_NESTING)
                return NULL;
 
@@ -171,11 +171,11 @@ size_t lttng_callstack_sequence_get_size(size_t offset, struct lttng_kernel_ctx_
                                                MAX_ENTRIES, 0);
                break;
        case CALLSTACK_USER:
-               ++per_cpu(callstack_user_nesting, ctx->cpu);
+               ++per_cpu(callstack_user_nesting, ctx->priv.reserve_cpu);
                /* do the real work and reserve space */
                trace->nr_entries = save_func_user(trace->entries,
                                                MAX_ENTRIES);
-               per_cpu(callstack_user_nesting, ctx->cpu)--;
+               per_cpu(callstack_user_nesting, ctx->priv.reserve_cpu)--;
                break;
        default:
                WARN_ON_ONCE(1);
index 47c4aa20f5d3dfec78a3b4317780122bcbcf9f57..7ae79a64347739ada4eec1172e22963e1553ddc0 100644 (file)
@@ -33,7 +33,7 @@ void cpu_id_record(struct lttng_kernel_ctx_field *field,
 {
        int cpu;
 
-       cpu = ctx->cpu;
+       cpu = ctx->priv.reserve_cpu;
        lib_ring_buffer_align_ctx(ctx, lttng_alignof(cpu));
        chan->ops->event_write(ctx, &cpu, sizeof(cpu));
 }
index 5cc64b2be46037c8d385ea961d72ccc5e95cb9c5..1c78d59b737c21afb686923fc6bff11963518138 100644 (file)
@@ -36,7 +36,7 @@ void interruptible_record(struct lttng_kernel_ctx_field *field,
                struct lib_ring_buffer_ctx *ctx,
                struct lttng_channel *chan)
 {
-       struct lttng_probe_ctx *lttng_probe_ctx = ctx->priv;
+       struct lttng_probe_ctx *lttng_probe_ctx = ctx->probe_ctx;
        int8_t interruptible = lttng_probe_ctx->interruptible;
 
        lib_ring_buffer_align_ctx(ctx, lttng_alignof(interruptible));
index e637bb75ca94c3ea2c991871ba2ce40617894f03..535784752a790604f4fe933774d5de23d9d01e05 100644 (file)
@@ -38,7 +38,7 @@ void perf_counter_record(struct lttng_kernel_ctx_field *field,
        struct perf_event *event;
        uint64_t value;
 
-       event = perf_field->e[ctx->cpu];
+       event = perf_field->e[ctx->priv.reserve_cpu];
        if (likely(event)) {
                if (unlikely(event->state == PERF_EVENT_STATE_ERROR)) {
                        value = 0;
index ce5a670f2d825e96db21f317476a0c1a2062d9c4..e5bd172a6417e514dada2f4f57de32e45ec6de4b 100644 (file)
@@ -401,7 +401,7 @@ void notification_send(struct lttng_event_notifier_notification *notif,
        kernel_notif.capture_buf_size = capture_buffer_content_len;
 
        lib_ring_buffer_ctx_init(&ctx, event_notifier_group->chan, reserve_size,
-                       lttng_alignof(kernel_notif), -1, NULL);
+                       lttng_alignof(kernel_notif), NULL);
        ret = event_notifier_group->ops->event_reserve(&ctx, 0);
        if (ret < 0) {
                record_error(event_notifier);
index 51c5cb39a58bcecfeb23c4cc267a36b3356b4408..4c266466953a7ed0706092b42d8a9d87ca2a0659 100644 (file)
@@ -2882,7 +2882,7 @@ int lttng_metadata_output_channel(struct lttng_metadata_stream *stream,
                        stream->transport->ops.packet_avail_size(chan),
                        len);
        lib_ring_buffer_ctx_init(&ctx, chan, reserve_len,
-                       sizeof(char), -1, NULL);
+                       sizeof(char), NULL);
        /*
         * If reservation failed, return an error to the caller.
         */
index 26034e92cf79b864f0f02087f880d240f2f2bfa8..ff404368edc6fac5ae6b48fddf0658f53a9bd842 100644 (file)
@@ -149,7 +149,7 @@ size_t record_header_size(const struct lib_ring_buffer_config *config,
        case 1: /* compact */
                padding = lib_ring_buffer_align(offset, lttng_alignof(uint32_t));
                offset += padding;
-               if (!(ctx->rflags & (RING_BUFFER_RFLAG_FULL_TSC | LTTNG_RFLAG_EXTENDED))) {
+               if (!(ctx->priv.rflags & (RING_BUFFER_RFLAG_FULL_TSC | LTTNG_RFLAG_EXTENDED))) {
                        offset += sizeof(uint32_t);     /* id and timestamp */
                } else {
                        /* Minimum space taken by LTTNG_COMPACT_EVENT_BITS id */
@@ -165,7 +165,7 @@ size_t record_header_size(const struct lib_ring_buffer_config *config,
                padding = lib_ring_buffer_align(offset, lttng_alignof(uint16_t));
                offset += padding;
                offset += sizeof(uint16_t);
-               if (!(ctx->rflags & (RING_BUFFER_RFLAG_FULL_TSC | LTTNG_RFLAG_EXTENDED))) {
+               if (!(ctx->priv.rflags & (RING_BUFFER_RFLAG_FULL_TSC | LTTNG_RFLAG_EXTENDED))) {
                        offset += lib_ring_buffer_align(offset, lttng_alignof(uint32_t));
                        offset += sizeof(uint32_t);     /* timestamp */
                } else {
@@ -207,9 +207,9 @@ void lttng_write_event_header(const struct lib_ring_buffer_config *config,
                            struct lib_ring_buffer_ctx *ctx,
                            uint32_t event_id)
 {
-       struct lttng_channel *lttng_chan = channel_get_private(ctx->chan);
+       struct lttng_channel *lttng_chan = channel_get_private(ctx->priv.chan);
 
-       if (unlikely(ctx->rflags))
+       if (unlikely(ctx->priv.rflags))
                goto slow_path;
 
        switch (lttng_chan->header_type) {
@@ -224,13 +224,13 @@ void lttng_write_event_header(const struct lib_ring_buffer_config *config,
                bt_bitfield_write(&id_time, uint32_t,
                                LTTNG_COMPACT_EVENT_BITS,
                                LTTNG_COMPACT_TSC_BITS,
-                               ctx->tsc);
+                               ctx->priv.tsc);
                lib_ring_buffer_write(config, ctx, &id_time, sizeof(id_time));
                break;
        }
        case 2: /* large */
        {
-               uint32_t timestamp = (uint32_t) ctx->tsc;
+               uint32_t timestamp = (uint32_t) ctx->priv.tsc;
                uint16_t id = event_id;
 
                lib_ring_buffer_write(config, ctx, &id, sizeof(id));
@@ -256,11 +256,11 @@ void lttng_write_event_header_slow(const struct lib_ring_buffer_config *config,
                                 struct lib_ring_buffer_ctx *ctx,
                                 uint32_t event_id)
 {
-       struct lttng_channel *lttng_chan = channel_get_private(ctx->chan);
+       struct lttng_channel *lttng_chan = channel_get_private(ctx->priv.chan);
 
        switch (lttng_chan->header_type) {
        case 1: /* compact */
-               if (!(ctx->rflags & (RING_BUFFER_RFLAG_FULL_TSC | LTTNG_RFLAG_EXTENDED))) {
+               if (!(ctx->priv.rflags & (RING_BUFFER_RFLAG_FULL_TSC | LTTNG_RFLAG_EXTENDED))) {
                        uint32_t id_time = 0;
 
                        bt_bitfield_write(&id_time, uint32_t,
@@ -269,11 +269,11 @@ void lttng_write_event_header_slow(const struct lib_ring_buffer_config *config,
                                        event_id);
                        bt_bitfield_write(&id_time, uint32_t,
                                        LTTNG_COMPACT_EVENT_BITS,
-                                       LTTNG_COMPACT_TSC_BITS, ctx->tsc);
+                                       LTTNG_COMPACT_TSC_BITS, ctx->priv.tsc);
                        lib_ring_buffer_write(config, ctx, &id_time, sizeof(id_time));
                } else {
                        uint8_t id = 0;
-                       uint64_t timestamp = ctx->tsc;
+                       uint64_t timestamp = ctx->priv.tsc;
 
                        bt_bitfield_write(&id, uint8_t,
                                        0,
@@ -289,8 +289,8 @@ void lttng_write_event_header_slow(const struct lib_ring_buffer_config *config,
                break;
        case 2: /* large */
        {
-               if (!(ctx->rflags & (RING_BUFFER_RFLAG_FULL_TSC | LTTNG_RFLAG_EXTENDED))) {
-                       uint32_t timestamp = (uint32_t) ctx->tsc;
+               if (!(ctx->priv.rflags & (RING_BUFFER_RFLAG_FULL_TSC | LTTNG_RFLAG_EXTENDED))) {
+                       uint32_t timestamp = (uint32_t) ctx->priv.tsc;
                        uint16_t id = event_id;
 
                        lib_ring_buffer_write(config, ctx, &id, sizeof(id));
@@ -298,7 +298,7 @@ void lttng_write_event_header_slow(const struct lib_ring_buffer_config *config,
                        lib_ring_buffer_write(config, ctx, &timestamp, sizeof(timestamp));
                } else {
                        uint16_t id = 65535;
-                       uint64_t timestamp = ctx->tsc;
+                       uint64_t timestamp = ctx->priv.tsc;
 
                        lib_ring_buffer_write(config, ctx, &id, sizeof(id));
                        /* Align extended struct on largest member */
@@ -607,14 +607,17 @@ static
 int lttng_event_reserve(struct lib_ring_buffer_ctx *ctx,
                      uint32_t event_id)
 {
-       struct lttng_channel *lttng_chan = channel_get_private(ctx->chan);
+       struct lttng_kernel_event_recorder *event_recorder = ctx->client_priv;
+       struct lttng_channel *lttng_chan = event_recorder->chan;
        struct lttng_client_ctx client_ctx;
        int ret, cpu;
 
        cpu = lib_ring_buffer_get_cpu(&client_config);
        if (unlikely(cpu < 0))
                return -EPERM;
-       ctx->cpu = cpu;
+       memset(&ctx->priv, 0, sizeof(ctx->priv));
+       ctx->priv.chan = lttng_chan->chan;
+       ctx->priv.reserve_cpu = cpu;
 
        /* Compute internal size of context structures. */
        ctx_get_struct_size(lttng_chan->ctx, &client_ctx.packet_context_len, lttng_chan, ctx);
@@ -622,11 +625,11 @@ int lttng_event_reserve(struct lib_ring_buffer_ctx *ctx,
        switch (lttng_chan->header_type) {
        case 1: /* compact */
                if (event_id > 30)
-                       ctx->rflags |= LTTNG_RFLAG_EXTENDED;
+                       ctx->priv.rflags |= LTTNG_RFLAG_EXTENDED;
                break;
        case 2: /* large */
                if (event_id > 65534)
-                       ctx->rflags |= LTTNG_RFLAG_EXTENDED;
+                       ctx->priv.rflags |= LTTNG_RFLAG_EXTENDED;
                break;
        default:
                WARN_ON_ONCE(1);
@@ -636,7 +639,7 @@ int lttng_event_reserve(struct lib_ring_buffer_ctx *ctx,
        if (unlikely(ret))
                goto put;
        lib_ring_buffer_backend_get_pages(&client_config, ctx,
-                       &ctx->backend_pages);
+                       &ctx->priv.backend_pages);
        lttng_write_event_header(&client_config, ctx, event_id);
        return 0;
 put:
index b5e91c550c812d538528c9fe17e428dc02836bfd..993c96da3699048ab58fe2989fea93d00737aeed 100644 (file)
@@ -303,13 +303,17 @@ void lttng_write_event_notifier_header(const struct lib_ring_buffer_config *conf
 static
 int lttng_event_reserve(struct lib_ring_buffer_ctx *ctx, uint32_t event_id)
 {
+       struct channel *chan = ctx->client_priv;
        int ret;
 
+       memset(&ctx->priv, 0, sizeof(ctx->priv));
+       ctx->priv.chan = chan;
+
        ret = lib_ring_buffer_reserve(&client_config, ctx, NULL);
        if (ret)
                return ret;
        lib_ring_buffer_backend_get_pages(&client_config, ctx,
-                       &ctx->backend_pages);
+                       &ctx->priv.backend_pages);
 
        lttng_write_event_notifier_header(&client_config, ctx);
        return 0;
index 6fa0c2b1f55dfc0dc219b9ac0baa2549191c27e8..7e418001f85fb51fadb2a31c5a38dec4f703a29e 100644 (file)
@@ -302,13 +302,17 @@ void lttng_buffer_read_close(struct lib_ring_buffer *buf)
 static
 int lttng_event_reserve(struct lib_ring_buffer_ctx *ctx, uint32_t event_id)
 {
+       struct channel *chan = ctx->client_priv;
        int ret;
 
+       memset(&ctx->priv, 0, sizeof(ctx->priv));
+       ctx->priv.chan = chan;
+
        ret = lib_ring_buffer_reserve(&client_config, ctx, NULL);
        if (ret)
                return ret;
        lib_ring_buffer_backend_get_pages(&client_config, ctx,
-                       &ctx->backend_pages);
+                       &ctx->priv.backend_pages);
        return 0;
 
 }
index 238441f586410a9c28be9a445692543bc132fdc4..39de7d3d3e1e1d6bdb53a780036a56909dedeb81 100644 (file)
@@ -63,8 +63,8 @@ int lttng_kprobes_event_handler_pre(struct kprobe *p, struct pt_regs *regs)
                struct lib_ring_buffer_ctx ctx;
                int ret;
 
-               lib_ring_buffer_ctx_init(&ctx, chan->chan, sizeof(data),
-                                        lttng_alignof(data), -1, &lttng_probe_ctx);
+               lib_ring_buffer_ctx_init(&ctx, event_recorder, sizeof(data),
+                                        lttng_alignof(data), &lttng_probe_ctx);
                ret = chan->ops->event_reserve(&ctx, event_recorder->priv->id);
                if (ret < 0)
                        return 0;
index 378a0e833d51bb2da2b42e193fd5a98153c7a981..03561703dcbc3d3e66c8b80803d9ac11f491f28d 100644 (file)
@@ -81,8 +81,8 @@ int _lttng_kretprobes_handler(struct kretprobe_instance *krpi,
                payload.ip = (unsigned long) lttng_get_kretprobe(krpi)->kp.addr;
                payload.parent_ip = (unsigned long) krpi->ret_addr;
 
-               lib_ring_buffer_ctx_init(&ctx, chan->chan, sizeof(payload),
-                                        lttng_alignof(payload), -1, &lttng_probe_ctx);
+               lib_ring_buffer_ctx_init(&ctx, event_recorder, sizeof(payload),
+                                        lttng_alignof(payload), &lttng_probe_ctx);
                ret = chan->ops->event_reserve(&ctx, event_recorder->priv->id);
                if (ret < 0)
                        return 0;
index 233813f0d6231dceaaee0e9024aaed63f8ea5546..20865889bd17084b7bcc72396f27a672ccb22b16 100644 (file)
@@ -68,8 +68,8 @@ int lttng_uprobes_event_handler_pre(struct uprobe_consumer *uc, struct pt_regs *
                struct lib_ring_buffer_ctx ctx;
                int ret;
 
-               lib_ring_buffer_ctx_init(&ctx, chan->chan,
-                       sizeof(payload), lttng_alignof(payload), -1, &lttng_probe_ctx);
+               lib_ring_buffer_ctx_init(&ctx, event_recorder,
+                       sizeof(payload), lttng_alignof(payload), &lttng_probe_ctx);
 
                ret = chan->ops->event_reserve(&ctx, event_recorder->priv->id);
                if (ret < 0)
This page took 0.047094 seconds and 4 git commands to generate.