Performance: cache the backend pages pointer in context
authorMathieu Desnoyers <mathieu.desnoyers@efficios.com>
Sun, 25 Sep 2016 16:02:25 +0000 (12:02 -0400)
committerMathieu Desnoyers <mathieu.desnoyers@efficios.com>
Sun, 25 Sep 2016 16:02:25 +0000 (12:02 -0400)
Getting the backend pages pointer requires pointer chasing through the
ring buffer backend tables. Cache the current value so it can be re-used
for all backend write operations writing fields for the same event.

Signed-off-by: Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
lib/ringbuffer/backend.h
lib/ringbuffer/backend_internal.h
lib/ringbuffer/config.h
lttng-ring-buffer-client.h
lttng-ring-buffer-metadata-client.h

index b362a04a436bfe64c640f694968032fb7de5aa32..9db0095638973a2e0fb6da77affde9fb30bed8f5 100644 (file)
@@ -90,26 +90,20 @@ void lib_ring_buffer_write(const struct lib_ring_buffer_config *config,
 {
        struct lib_ring_buffer_backend *bufb = &ctx->buf->backend;
        struct channel_backend *chanb = &ctx->chan->backend;
-       size_t sbidx, index, pagecpy;
+       size_t index, pagecpy;
        size_t offset = ctx->buf_offset;
-       struct lib_ring_buffer_backend_pages *rpages;
-       unsigned long sb_bindex, id;
+       struct lib_ring_buffer_backend_pages *backend_pages;
 
        if (unlikely(!len))
                return;
+       backend_pages =
+               lib_ring_buffer_get_backend_pages_from_ctx(config, ctx);
        offset &= chanb->buf_size - 1;
-       sbidx = offset >> chanb->subbuf_size_order;
        index = (offset & (chanb->subbuf_size - 1)) >> PAGE_SHIFT;
        pagecpy = min_t(size_t, len, (-offset) & ~PAGE_MASK);
-       id = bufb->buf_wsb[sbidx].id;
-       sb_bindex = subbuffer_id_get_index(config, id);
-       rpages = bufb->array[sb_bindex];
-       CHAN_WARN_ON(ctx->chan,
-                    config->mode == RING_BUFFER_OVERWRITE
-                    && subbuffer_id_is_noref(config, id));
        if (likely(pagecpy == len))
                lib_ring_buffer_do_copy(config,
-                                       rpages->p[index].virt
+                                       backend_pages->p[index].virt
                                            + (offset & ~PAGE_MASK),
                                        src, len);
        else
@@ -137,25 +131,19 @@ void lib_ring_buffer_memset(const struct lib_ring_buffer_config *config,
 
        struct lib_ring_buffer_backend *bufb = &ctx->buf->backend;
        struct channel_backend *chanb = &ctx->chan->backend;
-       size_t sbidx, index, pagecpy;
+       size_t index, pagecpy;
        size_t offset = ctx->buf_offset;
-       struct lib_ring_buffer_backend_pages *rpages;
-       unsigned long sb_bindex, id;
+       struct lib_ring_buffer_backend_pages *backend_pages;
 
        if (unlikely(!len))
                return;
+       backend_pages =
+               lib_ring_buffer_get_backend_pages_from_ctx(config, ctx);
        offset &= chanb->buf_size - 1;
-       sbidx = offset >> chanb->subbuf_size_order;
        index = (offset & (chanb->subbuf_size - 1)) >> PAGE_SHIFT;
        pagecpy = min_t(size_t, len, (-offset) & ~PAGE_MASK);
-       id = bufb->buf_wsb[sbidx].id;
-       sb_bindex = subbuffer_id_get_index(config, id);
-       rpages = bufb->array[sb_bindex];
-       CHAN_WARN_ON(ctx->chan,
-                    config->mode == RING_BUFFER_OVERWRITE
-                    && subbuffer_id_is_noref(config, id));
        if (likely(pagecpy == len))
-               lib_ring_buffer_do_memset(rpages->p[index].virt
+               lib_ring_buffer_do_memset(backend_pages->p[index].virt
                                          + (offset & ~PAGE_MASK),
                                          c, len);
        else
@@ -240,28 +228,22 @@ void lib_ring_buffer_strcpy(const struct lib_ring_buffer_config *config,
 {
        struct lib_ring_buffer_backend *bufb = &ctx->buf->backend;
        struct channel_backend *chanb = &ctx->chan->backend;
-       size_t sbidx, index, pagecpy;
+       size_t index, pagecpy;
        size_t offset = ctx->buf_offset;
-       struct lib_ring_buffer_backend_pages *rpages;
-       unsigned long sb_bindex, id;
+       struct lib_ring_buffer_backend_pages *backend_pages;
 
        if (unlikely(!len))
                return;
+       backend_pages =
+               lib_ring_buffer_get_backend_pages_from_ctx(config, ctx);
        offset &= chanb->buf_size - 1;
-       sbidx = offset >> chanb->subbuf_size_order;
        index = (offset & (chanb->subbuf_size - 1)) >> PAGE_SHIFT;
        pagecpy = min_t(size_t, len, (-offset) & ~PAGE_MASK);
-       id = bufb->buf_wsb[sbidx].id;
-       sb_bindex = subbuffer_id_get_index(config, id);
-       rpages = bufb->array[sb_bindex];
-       CHAN_WARN_ON(ctx->chan,
-                    config->mode == RING_BUFFER_OVERWRITE
-                    && subbuffer_id_is_noref(config, id));
        if (likely(pagecpy == len)) {
                size_t count;
 
                count = lib_ring_buffer_do_strcpy(config,
-                                       rpages->p[index].virt
+                                       backend_pages->p[index].virt
                                            + (offset & ~PAGE_MASK),
                                        src, len - 1);
                offset += count;
@@ -269,13 +251,13 @@ void lib_ring_buffer_strcpy(const struct lib_ring_buffer_config *config,
                if (unlikely(count < len - 1)) {
                        size_t pad_len = len - 1 - count;
 
-                       lib_ring_buffer_do_memset(rpages->p[index].virt
+                       lib_ring_buffer_do_memset(backend_pages->p[index].virt
                                                + (offset & ~PAGE_MASK),
                                        pad, pad_len);
                        offset += pad_len;
                }
                /* Ending '\0' */
-               lib_ring_buffer_do_memset(rpages->p[index].virt
+               lib_ring_buffer_do_memset(backend_pages->p[index].virt
                                        + (offset & ~PAGE_MASK),
                                '\0', 1);
        } else {
@@ -304,25 +286,19 @@ void lib_ring_buffer_copy_from_user_inatomic(const struct lib_ring_buffer_config
 {
        struct lib_ring_buffer_backend *bufb = &ctx->buf->backend;
        struct channel_backend *chanb = &ctx->chan->backend;
-       size_t sbidx, index, pagecpy;
+       size_t index, pagecpy;
        size_t offset = ctx->buf_offset;
-       struct lib_ring_buffer_backend_pages *rpages;
-       unsigned long sb_bindex, id;
+       struct lib_ring_buffer_backend_pages *backend_pages;
        unsigned long ret;
        mm_segment_t old_fs = get_fs();
 
        if (unlikely(!len))
                return;
+       backend_pages =
+               lib_ring_buffer_get_backend_pages_from_ctx(config, ctx);
        offset &= chanb->buf_size - 1;
-       sbidx = offset >> chanb->subbuf_size_order;
        index = (offset & (chanb->subbuf_size - 1)) >> PAGE_SHIFT;
        pagecpy = min_t(size_t, len, (-offset) & ~PAGE_MASK);
-       id = bufb->buf_wsb[sbidx].id;
-       sb_bindex = subbuffer_id_get_index(config, id);
-       rpages = bufb->array[sb_bindex];
-       CHAN_WARN_ON(ctx->chan,
-                    config->mode == RING_BUFFER_OVERWRITE
-                    && subbuffer_id_is_noref(config, id));
 
        set_fs(KERNEL_DS);
        pagefault_disable();
@@ -331,7 +307,7 @@ void lib_ring_buffer_copy_from_user_inatomic(const struct lib_ring_buffer_config
 
        if (likely(pagecpy == len)) {
                ret = lib_ring_buffer_do_copy_from_user_inatomic(
-                       rpages->p[index].virt + (offset & ~PAGE_MASK),
+                       backend_pages->p[index].virt + (offset & ~PAGE_MASK),
                        src, len);
                if (unlikely(ret > 0)) {
                        /* Copy failed. */
@@ -381,24 +357,18 @@ void lib_ring_buffer_strcpy_from_user_inatomic(const struct lib_ring_buffer_conf
 {
        struct lib_ring_buffer_backend *bufb = &ctx->buf->backend;
        struct channel_backend *chanb = &ctx->chan->backend;
-       size_t sbidx, index, pagecpy;
+       size_t index, pagecpy;
        size_t offset = ctx->buf_offset;
-       struct lib_ring_buffer_backend_pages *rpages;
-       unsigned long sb_bindex, id;
+       struct lib_ring_buffer_backend_pages *backend_pages;
        mm_segment_t old_fs = get_fs();
 
        if (unlikely(!len))
                return;
+       backend_pages =
+               lib_ring_buffer_get_backend_pages_from_ctx(config, ctx);
        offset &= chanb->buf_size - 1;
-       sbidx = offset >> chanb->subbuf_size_order;
        index = (offset & (chanb->subbuf_size - 1)) >> PAGE_SHIFT;
        pagecpy = min_t(size_t, len, (-offset) & ~PAGE_MASK);
-       id = bufb->buf_wsb[sbidx].id;
-       sb_bindex = subbuffer_id_get_index(config, id);
-       rpages = bufb->array[sb_bindex];
-       CHAN_WARN_ON(ctx->chan,
-                    config->mode == RING_BUFFER_OVERWRITE
-                    && subbuffer_id_is_noref(config, id));
 
        set_fs(KERNEL_DS);
        pagefault_disable();
@@ -409,7 +379,7 @@ void lib_ring_buffer_strcpy_from_user_inatomic(const struct lib_ring_buffer_conf
                size_t count;
 
                count = lib_ring_buffer_do_strcpy_from_user_inatomic(config,
-                                       rpages->p[index].virt
+                                       backend_pages->p[index].virt
                                            + (offset & ~PAGE_MASK),
                                        src, len - 1);
                offset += count;
@@ -417,13 +387,13 @@ void lib_ring_buffer_strcpy_from_user_inatomic(const struct lib_ring_buffer_conf
                if (unlikely(count < len - 1)) {
                        size_t pad_len = len - 1 - count;
 
-                       lib_ring_buffer_do_memset(rpages->p[index].virt
+                       lib_ring_buffer_do_memset(backend_pages->p[index].virt
                                                + (offset & ~PAGE_MASK),
                                        pad, pad_len);
                        offset += pad_len;
                }
                /* Ending '\0' */
-               lib_ring_buffer_do_memset(rpages->p[index].virt
+               lib_ring_buffer_do_memset(backend_pages->p[index].virt
                                        + (offset & ~PAGE_MASK),
                                '\0', 1);
        } else {
index fc5bec535c591b3d9487e555c0dbbbf2055f5dc9..e03d8c0b1b23a1363fdebbe5ed8589223226983c 100644 (file)
@@ -201,6 +201,37 @@ int subbuffer_id_check_index(const struct lib_ring_buffer_config *config,
                return 0;
 }
 
+static inline
+void lib_ring_buffer_backend_get_pages(const struct lib_ring_buffer_config *config,
+                       struct lib_ring_buffer_ctx *ctx,
+                       struct lib_ring_buffer_backend_pages **backend_pages)
+{
+       struct lib_ring_buffer_backend *bufb = &ctx->buf->backend;
+       struct channel_backend *chanb = &ctx->chan->backend;
+       size_t sbidx, offset = ctx->buf_offset;
+       unsigned long sb_bindex, id;
+       struct lib_ring_buffer_backend_pages *rpages;
+
+       offset &= chanb->buf_size - 1;
+       sbidx = offset >> chanb->subbuf_size_order;
+       id = bufb->buf_wsb[sbidx].id;
+       sb_bindex = subbuffer_id_get_index(config, id);
+       rpages = bufb->array[sb_bindex];
+       CHAN_WARN_ON(ctx->chan,
+                    config->mode == RING_BUFFER_OVERWRITE
+                    && subbuffer_id_is_noref(config, id));
+       *backend_pages = rpages;
+}
+
+/* Get backend pages from cache. */
+static inline
+struct lib_ring_buffer_backend_pages *
+       lib_ring_buffer_get_backend_pages_from_ctx(const struct lib_ring_buffer_config *config,
+               struct lib_ring_buffer_ctx *ctx)
+{
+       return ctx->backend_pages;
+}
+
 /*
  * The ring buffer can count events recorded and overwritten per buffer,
  * but it is disabled by default due to its performance overhead.
index 63189b950faa68fa4e5e348c41f1aba55dcdc098..60174db63a38687e256b3726e7a83dcd0af47209 100644 (file)
@@ -200,6 +200,8 @@ struct lib_ring_buffer_ctx {
                                         */
        u64 tsc;                        /* time-stamp counter value */
        unsigned int rflags;            /* reservation flags */
+       /* Cache backend pages pointer chasing. */
+       struct lib_ring_buffer_backend_pages *backend_pages;
 };
 
 /**
@@ -223,6 +225,7 @@ void lib_ring_buffer_ctx_init(struct lib_ring_buffer_ctx *ctx,
        ctx->largest_align = largest_align;
        ctx->cpu = cpu;
        ctx->rflags = 0;
+       ctx->backend_pages = NULL;
 }
 
 /*
index 7c9ae443419926433796ec79df5a6709c5398e84..c4b7304fdfa6026153e14affbee429cf57ac53c7 100644 (file)
@@ -626,6 +626,8 @@ int lttng_event_reserve(struct lib_ring_buffer_ctx *ctx,
        ret = lib_ring_buffer_reserve(&client_config, ctx);
        if (ret)
                goto put;
+       lib_ring_buffer_backend_get_pages(&client_config, ctx,
+                       &ctx->backend_pages);
        lttng_write_event_header(&client_config, ctx, event_id);
        return 0;
 put:
index c441372e3a45245ba34191d63ef1f976bfd67ccd..b2c0c82147999d2c09042431cf65ba9313802965 100644 (file)
@@ -312,7 +312,15 @@ void lttng_buffer_read_close(struct lib_ring_buffer *buf)
 static
 int lttng_event_reserve(struct lib_ring_buffer_ctx *ctx, uint32_t event_id)
 {
-       return lib_ring_buffer_reserve(&client_config, ctx);
+       int ret;
+
+       ret = lib_ring_buffer_reserve(&client_config, ctx);
+       if (ret)
+               return ret;
+       lib_ring_buffer_backend_get_pages(&client_config, ctx,
+                       &ctx->backend_pages);
+       return 0;
+
 }
 
 static
This page took 0.03142 seconds and 4 git commands to generate.