{
struct lib_ring_buffer_backend *bufb = &ctx->buf->backend;
struct channel_backend *chanb = &ctx->chan->backend;
- size_t sbidx, index, pagecpy;
+ size_t index, pagecpy;
size_t offset = ctx->buf_offset;
- struct lib_ring_buffer_backend_pages *rpages;
- unsigned long sb_bindex, id;
+ struct lib_ring_buffer_backend_pages *backend_pages;
if (unlikely(!len))
return;
+ backend_pages =
+ lib_ring_buffer_get_backend_pages_from_ctx(config, ctx);
offset &= chanb->buf_size - 1;
- sbidx = offset >> chanb->subbuf_size_order;
index = (offset & (chanb->subbuf_size - 1)) >> PAGE_SHIFT;
pagecpy = min_t(size_t, len, (-offset) & ~PAGE_MASK);
- id = bufb->buf_wsb[sbidx].id;
- sb_bindex = subbuffer_id_get_index(config, id);
- rpages = bufb->array[sb_bindex];
- CHAN_WARN_ON(ctx->chan,
- config->mode == RING_BUFFER_OVERWRITE
- && subbuffer_id_is_noref(config, id));
if (likely(pagecpy == len))
lib_ring_buffer_do_copy(config,
- rpages->p[index].virt
+ backend_pages->p[index].virt
+ (offset & ~PAGE_MASK),
src, len);
else
struct lib_ring_buffer_backend *bufb = &ctx->buf->backend;
struct channel_backend *chanb = &ctx->chan->backend;
- size_t sbidx, index, pagecpy;
+ size_t index, pagecpy;
size_t offset = ctx->buf_offset;
- struct lib_ring_buffer_backend_pages *rpages;
- unsigned long sb_bindex, id;
+ struct lib_ring_buffer_backend_pages *backend_pages;
if (unlikely(!len))
return;
+ backend_pages =
+ lib_ring_buffer_get_backend_pages_from_ctx(config, ctx);
offset &= chanb->buf_size - 1;
- sbidx = offset >> chanb->subbuf_size_order;
index = (offset & (chanb->subbuf_size - 1)) >> PAGE_SHIFT;
pagecpy = min_t(size_t, len, (-offset) & ~PAGE_MASK);
- id = bufb->buf_wsb[sbidx].id;
- sb_bindex = subbuffer_id_get_index(config, id);
- rpages = bufb->array[sb_bindex];
- CHAN_WARN_ON(ctx->chan,
- config->mode == RING_BUFFER_OVERWRITE
- && subbuffer_id_is_noref(config, id));
if (likely(pagecpy == len))
- lib_ring_buffer_do_memset(rpages->p[index].virt
+ lib_ring_buffer_do_memset(backend_pages->p[index].virt
+ (offset & ~PAGE_MASK),
c, len);
else
{
struct lib_ring_buffer_backend *bufb = &ctx->buf->backend;
struct channel_backend *chanb = &ctx->chan->backend;
- size_t sbidx, index, pagecpy;
+ size_t index, pagecpy;
size_t offset = ctx->buf_offset;
- struct lib_ring_buffer_backend_pages *rpages;
- unsigned long sb_bindex, id;
+ struct lib_ring_buffer_backend_pages *backend_pages;
if (unlikely(!len))
return;
+ backend_pages =
+ lib_ring_buffer_get_backend_pages_from_ctx(config, ctx);
offset &= chanb->buf_size - 1;
- sbidx = offset >> chanb->subbuf_size_order;
index = (offset & (chanb->subbuf_size - 1)) >> PAGE_SHIFT;
pagecpy = min_t(size_t, len, (-offset) & ~PAGE_MASK);
- id = bufb->buf_wsb[sbidx].id;
- sb_bindex = subbuffer_id_get_index(config, id);
- rpages = bufb->array[sb_bindex];
- CHAN_WARN_ON(ctx->chan,
- config->mode == RING_BUFFER_OVERWRITE
- && subbuffer_id_is_noref(config, id));
if (likely(pagecpy == len)) {
size_t count;
count = lib_ring_buffer_do_strcpy(config,
- rpages->p[index].virt
+ backend_pages->p[index].virt
+ (offset & ~PAGE_MASK),
src, len - 1);
offset += count;
if (unlikely(count < len - 1)) {
size_t pad_len = len - 1 - count;
- lib_ring_buffer_do_memset(rpages->p[index].virt
+ lib_ring_buffer_do_memset(backend_pages->p[index].virt
+ (offset & ~PAGE_MASK),
pad, pad_len);
offset += pad_len;
}
/* Ending '\0' */
- lib_ring_buffer_do_memset(rpages->p[index].virt
+ lib_ring_buffer_do_memset(backend_pages->p[index].virt
+ (offset & ~PAGE_MASK),
'\0', 1);
} else {
{
struct lib_ring_buffer_backend *bufb = &ctx->buf->backend;
struct channel_backend *chanb = &ctx->chan->backend;
- size_t sbidx, index, pagecpy;
+ size_t index, pagecpy;
size_t offset = ctx->buf_offset;
- struct lib_ring_buffer_backend_pages *rpages;
- unsigned long sb_bindex, id;
+ struct lib_ring_buffer_backend_pages *backend_pages;
unsigned long ret;
mm_segment_t old_fs = get_fs();
if (unlikely(!len))
return;
+ backend_pages =
+ lib_ring_buffer_get_backend_pages_from_ctx(config, ctx);
offset &= chanb->buf_size - 1;
- sbidx = offset >> chanb->subbuf_size_order;
index = (offset & (chanb->subbuf_size - 1)) >> PAGE_SHIFT;
pagecpy = min_t(size_t, len, (-offset) & ~PAGE_MASK);
- id = bufb->buf_wsb[sbidx].id;
- sb_bindex = subbuffer_id_get_index(config, id);
- rpages = bufb->array[sb_bindex];
- CHAN_WARN_ON(ctx->chan,
- config->mode == RING_BUFFER_OVERWRITE
- && subbuffer_id_is_noref(config, id));
set_fs(KERNEL_DS);
pagefault_disable();
if (likely(pagecpy == len)) {
ret = lib_ring_buffer_do_copy_from_user_inatomic(
- rpages->p[index].virt + (offset & ~PAGE_MASK),
+ backend_pages->p[index].virt + (offset & ~PAGE_MASK),
src, len);
if (unlikely(ret > 0)) {
/* Copy failed. */
{
struct lib_ring_buffer_backend *bufb = &ctx->buf->backend;
struct channel_backend *chanb = &ctx->chan->backend;
- size_t sbidx, index, pagecpy;
+ size_t index, pagecpy;
size_t offset = ctx->buf_offset;
- struct lib_ring_buffer_backend_pages *rpages;
- unsigned long sb_bindex, id;
+ struct lib_ring_buffer_backend_pages *backend_pages;
mm_segment_t old_fs = get_fs();
if (unlikely(!len))
return;
+ backend_pages =
+ lib_ring_buffer_get_backend_pages_from_ctx(config, ctx);
offset &= chanb->buf_size - 1;
- sbidx = offset >> chanb->subbuf_size_order;
index = (offset & (chanb->subbuf_size - 1)) >> PAGE_SHIFT;
pagecpy = min_t(size_t, len, (-offset) & ~PAGE_MASK);
- id = bufb->buf_wsb[sbidx].id;
- sb_bindex = subbuffer_id_get_index(config, id);
- rpages = bufb->array[sb_bindex];
- CHAN_WARN_ON(ctx->chan,
- config->mode == RING_BUFFER_OVERWRITE
- && subbuffer_id_is_noref(config, id));
set_fs(KERNEL_DS);
pagefault_disable();
size_t count;
count = lib_ring_buffer_do_strcpy_from_user_inatomic(config,
- rpages->p[index].virt
+ backend_pages->p[index].virt
+ (offset & ~PAGE_MASK),
src, len - 1);
offset += count;
if (unlikely(count < len - 1)) {
size_t pad_len = len - 1 - count;
- lib_ring_buffer_do_memset(rpages->p[index].virt
+ lib_ring_buffer_do_memset(backend_pages->p[index].virt
+ (offset & ~PAGE_MASK),
pad, pad_len);
offset += pad_len;
}
/* Ending '\0' */
- lib_ring_buffer_do_memset(rpages->p[index].virt
+ lib_ring_buffer_do_memset(backend_pages->p[index].virt
+ (offset & ~PAGE_MASK),
'\0', 1);
} else {
return 0;
}
+static inline
+void lib_ring_buffer_backend_get_pages(const struct lib_ring_buffer_config *config,
+ struct lib_ring_buffer_ctx *ctx,
+ struct lib_ring_buffer_backend_pages **backend_pages)
+{
+ struct lib_ring_buffer_backend *bufb = &ctx->buf->backend;
+ struct channel_backend *chanb = &ctx->chan->backend;
+ size_t sbidx, offset = ctx->buf_offset;
+ unsigned long sb_bindex, id;
+ struct lib_ring_buffer_backend_pages *rpages;
+
+ offset &= chanb->buf_size - 1;
+ sbidx = offset >> chanb->subbuf_size_order;
+ id = bufb->buf_wsb[sbidx].id;
+ sb_bindex = subbuffer_id_get_index(config, id);
+ rpages = bufb->array[sb_bindex];
+ CHAN_WARN_ON(ctx->chan,
+ config->mode == RING_BUFFER_OVERWRITE
+ && subbuffer_id_is_noref(config, id));
+ *backend_pages = rpages;
+}
+
+/* Get backend pages from cache. */
+static inline
+struct lib_ring_buffer_backend_pages *
+ lib_ring_buffer_get_backend_pages_from_ctx(const struct lib_ring_buffer_config *config,
+ struct lib_ring_buffer_ctx *ctx)
+{
+ return ctx->backend_pages;
+}
+
/*
* The ring buffer can count events recorded and overwritten per buffer,
* but it is disabled by default due to its performance overhead.