From: Mathieu Desnoyers Date: Sat, 27 May 2017 11:28:03 +0000 (+0200) Subject: Calculate context length outside of retry loop X-Git-Tag: v2.11.0-rc1~123 X-Git-Url: https://git.lttng.org./?a=commitdiff_plain;h=cc62f29ec5d5149ae6dba1a9acb538e06da07a94;p=lttng-modules.git Calculate context length outside of retry loop Allow context length calculation to have side-effects (e.g. page faults) which trigger event tracing by moving the calculation outside of the buffer space reservation retry loop. This also paves the way to have dynamically sized contexts, which would expect to put their size of the internal stack. Note that the context length calculation is performed *after* the event payload field length calculation, so the stack needs to be used accordingly. Signed-off-by: Mathieu Desnoyers --- diff --git a/lib/ringbuffer/config.h b/lib/ringbuffer/config.h index 60174db6..05985197 100644 --- a/lib/ringbuffer/config.h +++ b/lib/ringbuffer/config.h @@ -48,7 +48,8 @@ struct lib_ring_buffer_client_cb { size_t (*record_header_size) (const struct lib_ring_buffer_config *config, struct channel *chan, size_t offset, size_t *pre_header_padding, - struct lib_ring_buffer_ctx *ctx); + struct lib_ring_buffer_ctx *ctx, + void *client_ctx); /* Slow path only, at subbuffer switch */ size_t (*subbuffer_header_size) (void); diff --git a/lib/ringbuffer/frontend_api.h b/lib/ringbuffer/frontend_api.h index a6f2c6f4..23118c46 100644 --- a/lib/ringbuffer/frontend_api.h +++ b/lib/ringbuffer/frontend_api.h @@ -87,6 +87,7 @@ void lib_ring_buffer_put_cpu(const struct lib_ring_buffer_config *config) static inline int lib_ring_buffer_try_reserve(const struct lib_ring_buffer_config *config, struct lib_ring_buffer_ctx *ctx, + void *client_ctx, unsigned long *o_begin, unsigned long *o_end, unsigned long *o_old, size_t *before_hdr_pad) { @@ -113,7 +114,7 @@ int lib_ring_buffer_try_reserve(const struct lib_ring_buffer_config *config, return 1; ctx->slot_size = record_header_size(config, chan, *o_begin, - before_hdr_pad, ctx); + before_hdr_pad, ctx, client_ctx); ctx->slot_size += lib_ring_buffer_align(*o_begin + ctx->slot_size, ctx->largest_align) + ctx->data_size; @@ -155,7 +156,8 @@ int lib_ring_buffer_try_reserve(const struct lib_ring_buffer_config *config, static inline int lib_ring_buffer_reserve(const struct lib_ring_buffer_config *config, - struct lib_ring_buffer_ctx *ctx) + struct lib_ring_buffer_ctx *ctx, + void *client_ctx) { struct channel *chan = ctx->chan; struct lib_ring_buffer *buf; @@ -176,7 +178,7 @@ int lib_ring_buffer_reserve(const struct lib_ring_buffer_config *config, /* * Perform retryable operations. */ - if (unlikely(lib_ring_buffer_try_reserve(config, ctx, &o_begin, + if (unlikely(lib_ring_buffer_try_reserve(config, ctx, client_ctx, &o_begin, &o_end, &o_old, &before_hdr_pad))) goto slow_path; @@ -207,7 +209,7 @@ int lib_ring_buffer_reserve(const struct lib_ring_buffer_config *config, ctx->buf_offset = o_begin + before_hdr_pad; return 0; slow_path: - return lib_ring_buffer_reserve_slow(ctx); + return lib_ring_buffer_reserve_slow(ctx, client_ctx); } /** diff --git a/lib/ringbuffer/frontend_internal.h b/lib/ringbuffer/frontend_internal.h index f143ecae..26bd7c24 100644 --- a/lib/ringbuffer/frontend_internal.h +++ b/lib/ringbuffer/frontend_internal.h @@ -150,7 +150,8 @@ int last_tsc_overflow(const struct lib_ring_buffer_config *config, #endif extern -int lib_ring_buffer_reserve_slow(struct lib_ring_buffer_ctx *ctx); +int lib_ring_buffer_reserve_slow(struct lib_ring_buffer_ctx *ctx, + void *client_ctx); extern void lib_ring_buffer_switch_slow(struct lib_ring_buffer *buf, diff --git a/lib/ringbuffer/ring_buffer_frontend.c b/lib/ringbuffer/ring_buffer_frontend.c index 3d2e400d..31075250 100644 --- a/lib/ringbuffer/ring_buffer_frontend.c +++ b/lib/ringbuffer/ring_buffer_frontend.c @@ -1886,7 +1886,8 @@ static int lib_ring_buffer_try_reserve_slow(struct lib_ring_buffer *buf, struct channel *chan, struct switch_offsets *offsets, - struct lib_ring_buffer_ctx *ctx) + struct lib_ring_buffer_ctx *ctx, + void *client_ctx) { const struct lib_ring_buffer_config *config = &chan->backend.config; unsigned long reserve_commit_diff, offset_cmp; @@ -1912,7 +1913,7 @@ retry: offsets->size = config->cb.record_header_size(config, chan, offsets->begin, &offsets->pre_header_padding, - ctx); + ctx, client_ctx); offsets->size += lib_ring_buffer_align(offsets->begin + offsets->size, ctx->largest_align) @@ -1996,7 +1997,7 @@ retry: config->cb.record_header_size(config, chan, offsets->begin, &offsets->pre_header_padding, - ctx); + ctx, client_ctx); offsets->size += lib_ring_buffer_align(offsets->begin + offsets->size, ctx->largest_align) @@ -2060,7 +2061,8 @@ EXPORT_SYMBOL_GPL(lib_ring_buffer_lost_event_too_big); * -EIO for other errors, else returns 0. * It will take care of sub-buffer switching. */ -int lib_ring_buffer_reserve_slow(struct lib_ring_buffer_ctx *ctx) +int lib_ring_buffer_reserve_slow(struct lib_ring_buffer_ctx *ctx, + void *client_ctx) { struct channel *chan = ctx->chan; const struct lib_ring_buffer_config *config = &chan->backend.config; @@ -2073,7 +2075,7 @@ int lib_ring_buffer_reserve_slow(struct lib_ring_buffer_ctx *ctx) do { ret = lib_ring_buffer_try_reserve_slow(buf, chan, &offsets, - ctx); + ctx, client_ctx); if (unlikely(ret)) return ret; } while (unlikely(v_cmpxchg(config, &buf->offset, offsets.old, diff --git a/lttng-ring-buffer-client.h b/lttng-ring-buffer-client.h index 63f2b4ca..48d30429 100644 --- a/lttng-ring-buffer-client.h +++ b/lttng-ring-buffer-client.h @@ -76,6 +76,10 @@ struct packet_header { } ctx; }; +struct lttng_client_ctx { + size_t packet_context_len; + size_t event_context_len; +}; static inline notrace u64 lib_ring_buffer_clock_read(struct channel *chan) { @@ -83,17 +87,31 @@ static inline notrace u64 lib_ring_buffer_clock_read(struct channel *chan) } static inline -size_t ctx_get_size(size_t offset, struct lttng_ctx *ctx) +size_t ctx_get_aligned_size(size_t offset, struct lttng_ctx *ctx, + size_t ctx_len) { - int i; size_t orig_offset = offset; if (likely(!ctx)) return 0; offset += lib_ring_buffer_align(offset, ctx->largest_align); + offset += ctx_len; + return offset - orig_offset; +} + +static inline +void ctx_get_struct_size(struct lttng_ctx *ctx, size_t *ctx_len) +{ + int i; + size_t offset = 0; + + if (likely(!ctx)) { + *ctx_len = 0; + return; + } for (i = 0; i < ctx->nr_fields; i++) offset += ctx->fields[i].get_size(offset); - return offset - orig_offset; + *ctx_len = offset; } static inline @@ -127,7 +145,8 @@ static __inline__ size_t record_header_size(const struct lib_ring_buffer_config *config, struct channel *chan, size_t offset, size_t *pre_header_padding, - struct lib_ring_buffer_ctx *ctx) + struct lib_ring_buffer_ctx *ctx, + struct lttng_client_ctx *client_ctx) { struct lttng_channel *lttng_chan = channel_get_private(chan); struct lttng_probe_ctx *lttng_probe_ctx = ctx->priv; @@ -170,8 +189,10 @@ size_t record_header_size(const struct lib_ring_buffer_config *config, padding = 0; WARN_ON_ONCE(1); } - offset += ctx_get_size(offset, lttng_chan->ctx); - offset += ctx_get_size(offset, event->ctx); + offset += ctx_get_aligned_size(offset, lttng_chan->ctx, + client_ctx->packet_context_len); + offset += ctx_get_aligned_size(offset, event->ctx, + client_ctx->event_context_len); *pre_header_padding = padding; return offset - orig_offset; @@ -324,10 +345,11 @@ static size_t client_record_header_size(const struct lib_ring_buffer_config *config, struct channel *chan, size_t offset, size_t *pre_header_padding, - struct lib_ring_buffer_ctx *ctx) + struct lib_ring_buffer_ctx *ctx, + void *client_ctx) { return record_header_size(config, chan, offset, - pre_header_padding, ctx); + pre_header_padding, ctx, client_ctx); } /** @@ -603,8 +625,15 @@ int lttng_event_reserve(struct lib_ring_buffer_ctx *ctx, uint32_t event_id) { struct lttng_channel *lttng_chan = channel_get_private(ctx->chan); + struct lttng_probe_ctx *lttng_probe_ctx = ctx->priv; + struct lttng_event *event = lttng_probe_ctx->event; + struct lttng_client_ctx client_ctx; int ret, cpu; + /* Compute internal size of context structures. */ + ctx_get_struct_size(lttng_chan->ctx, &client_ctx.packet_context_len); + ctx_get_struct_size(event->ctx, &client_ctx.event_context_len); + cpu = lib_ring_buffer_get_cpu(&client_config); if (unlikely(cpu < 0)) return -EPERM; @@ -623,7 +652,7 @@ int lttng_event_reserve(struct lib_ring_buffer_ctx *ctx, WARN_ON_ONCE(1); } - ret = lib_ring_buffer_reserve(&client_config, ctx); + ret = lib_ring_buffer_reserve(&client_config, ctx, &client_ctx); if (unlikely(ret)) goto put; lib_ring_buffer_backend_get_pages(&client_config, ctx, diff --git a/lttng-ring-buffer-metadata-client.h b/lttng-ring-buffer-metadata-client.h index b2c0c821..1d03591b 100644 --- a/lttng-ring-buffer-metadata-client.h +++ b/lttng-ring-buffer-metadata-client.h @@ -58,7 +58,8 @@ static inline size_t record_header_size(const struct lib_ring_buffer_config *config, struct channel *chan, size_t offset, size_t *pre_header_padding, - struct lib_ring_buffer_ctx *ctx) + struct lib_ring_buffer_ctx *ctx, + void *client_ctx) { return 0; } @@ -74,7 +75,8 @@ static size_t client_record_header_size(const struct lib_ring_buffer_config *config, struct channel *chan, size_t offset, size_t *pre_header_padding, - struct lib_ring_buffer_ctx *ctx) + struct lib_ring_buffer_ctx *ctx, + void *client_ctx) { return 0; } @@ -314,7 +316,7 @@ int lttng_event_reserve(struct lib_ring_buffer_ctx *ctx, uint32_t event_id) { int ret; - ret = lib_ring_buffer_reserve(&client_config, ctx); + ret = lib_ring_buffer_reserve(&client_config, ctx, NULL); if (ret) return ret; lib_ring_buffer_backend_get_pages(&client_config, ctx,