X-Git-Url: http://git.lttng.org./?a=blobdiff_plain;f=lib%2Fringbuffer%2Fbackend_internal.h;h=2e59b68b482a6cc3520d87c09278663aaa237553;hb=c11ab967decd550b28111b2767eda20e7e6be3fd;hp=d18967d6738d48721eed9d1471c0140d02b99629;hpb=5671a6610b06175832338ed78db486b59bc5246c;p=lttng-modules.git diff --git a/lib/ringbuffer/backend_internal.h b/lib/ringbuffer/backend_internal.h index d18967d6..2e59b68b 100644 --- a/lib/ringbuffer/backend_internal.h +++ b/lib/ringbuffer/backend_internal.h @@ -201,6 +201,42 @@ int subbuffer_id_check_index(const struct lib_ring_buffer_config *config, return 0; } +static inline +void lib_ring_buffer_backend_get_pages(const struct lib_ring_buffer_config *config, + struct lib_ring_buffer_ctx *ctx, + struct lib_ring_buffer_backend_pages **backend_pages) +{ + struct lib_ring_buffer_backend *bufb = &ctx->buf->backend; + struct channel_backend *chanb = &ctx->chan->backend; + size_t sbidx, offset = ctx->buf_offset; + unsigned long sb_bindex, id; + struct lib_ring_buffer_backend_pages *rpages; + + offset &= chanb->buf_size - 1; + sbidx = offset >> chanb->subbuf_size_order; + id = bufb->buf_wsb[sbidx].id; + sb_bindex = subbuffer_id_get_index(config, id); + rpages = bufb->array[sb_bindex]; + CHAN_WARN_ON(ctx->chan, + config->mode == RING_BUFFER_OVERWRITE + && subbuffer_id_is_noref(config, id)); + *backend_pages = rpages; +} + +/* Get backend pages from cache. */ +static inline +struct lib_ring_buffer_backend_pages * + lib_ring_buffer_get_backend_pages_from_ctx(const struct lib_ring_buffer_config *config, + struct lib_ring_buffer_ctx *ctx) +{ + return ctx->backend_pages; +} + +/* + * The ring buffer can count events recorded and overwritten per buffer, + * but it is disabled by default due to its performance overhead. + */ +#ifdef LTTNG_RING_BUFFER_COUNT_EVENTS static inline void subbuffer_count_record(const struct lib_ring_buffer_config *config, struct lib_ring_buffer_backend *bufb, @@ -211,6 +247,14 @@ void subbuffer_count_record(const struct lib_ring_buffer_config *config, sb_bindex = subbuffer_id_get_index(config, bufb->buf_wsb[idx].id); v_inc(config, &bufb->array[sb_bindex]->records_commit); } +#else /* LTTNG_RING_BUFFER_COUNT_EVENTS */ +static inline +void subbuffer_count_record(const struct lib_ring_buffer_config *config, + struct lib_ring_buffer_backend *bufb, + unsigned long idx) +{ +} +#endif /* #else LTTNG_RING_BUFFER_COUNT_EVENTS */ /* * Reader has exclusive subbuffer access for record consumption. No need to @@ -434,6 +478,28 @@ int update_read_sb_index(const struct lib_ring_buffer_config *config, return 0; } +static inline __attribute__((always_inline)) +void lttng_inline_memcpy(void *dest, const void *src, + unsigned long len) +{ + switch (len) { + case 1: + *(uint8_t *) dest = *(const uint8_t *) src; + break; + case 2: + *(uint16_t *) dest = *(const uint16_t *) src; + break; + case 4: + *(uint32_t *) dest = *(const uint32_t *) src; + break; + case 8: + *(uint64_t *) dest = *(const uint64_t *) src; + break; + default: + inline_memcpy(dest, src, len); + } +} + /* * Use the architecture-specific memcpy implementation for constant-sized * inputs, but rely on an inline memcpy for length statically unknown. @@ -445,12 +511,14 @@ do { \ if (__builtin_constant_p(len)) \ memcpy(dest, src, __len); \ else \ - inline_memcpy(dest, src, __len); \ + lttng_inline_memcpy(dest, src, __len); \ } while (0) /* * We use __copy_from_user_inatomic to copy userspace data since we already * did the access_ok for the whole range. + * + * Return 0 if OK, nonzero on error. */ static inline unsigned long lib_ring_buffer_do_copy_from_user_inatomic(void *dest,