ctx->buf_offset += len;
}
+/**
+ * lib_ring_buffer_memset - write len bytes of c to a buffer backend
+ * @config : ring buffer instance configuration
+ * @bufb : ring buffer backend
+ * @offset : offset within the buffer
+ * @c : the byte to copy
+ * @len : number of bytes to copy
+ *
+ * This function writes "len" bytes of "c" to a buffer backend, at a specific
+ * offset. This is more or less a buffer backend-specific memset() operation.
+ * Calls the slow path (_ring_buffer_memset) if write is crossing a page
+ * boundary.
+ */
+static inline
+void lib_ring_buffer_memset(const struct lib_ring_buffer_config *config,
+ struct lib_ring_buffer_ctx *ctx, int c, size_t len)
+{
+
+ struct lib_ring_buffer_backend *bufb = &ctx->buf->backend;
+ struct channel_backend *chanb = &ctx->chan->backend;
+ size_t sbidx, index;
+ size_t offset = ctx->buf_offset;
+ ssize_t pagecpy;
+ struct lib_ring_buffer_backend_pages *rpages;
+ unsigned long sb_bindex, id;
+
+ offset &= chanb->buf_size - 1;
+ sbidx = offset >> chanb->subbuf_size_order;
+ index = (offset & (chanb->subbuf_size - 1)) >> PAGE_SHIFT;
+ pagecpy = min_t(size_t, len, (-offset) & ~PAGE_MASK);
+ id = bufb->buf_wsb[sbidx].id;
+ sb_bindex = subbuffer_id_get_index(config, id);
+ rpages = bufb->array[sb_bindex];
+ CHAN_WARN_ON(ctx->chan,
+ config->mode == RING_BUFFER_OVERWRITE
+ && subbuffer_id_is_noref(config, id));
+ if (likely(pagecpy == len))
+ lib_ring_buffer_do_memset(rpages->p[index].virt
+ + (offset & ~PAGE_MASK),
+ c, len);
+ else
+ _lib_ring_buffer_memset(bufb, offset, c, len, 0);
+ ctx->buf_offset += len;
+}
+
+/**
+ * lib_ring_buffer_copy_from_user - write userspace data to a buffer backend
+ * @config : ring buffer instance configuration
+ * @ctx: ring buffer context. (input arguments only)
+ * @src : userspace source pointer to copy from
+ * @len : length of data to copy
+ *
+ * This function copies "len" bytes of data from a userspace pointer to a
+ * buffer backend, at the current context offset. This is more or less a buffer
+ * backend-specific memcpy() operation. Calls the slow path
+ * (_ring_buffer_write_from_user) if copy is crossing a page boundary.
+ */
+static inline
+void lib_ring_buffer_copy_from_user(const struct lib_ring_buffer_config *config,
+ struct lib_ring_buffer_ctx *ctx,
+ const void __user *src, size_t len)
+{
+ struct lib_ring_buffer_backend *bufb = &ctx->buf->backend;
+ struct channel_backend *chanb = &ctx->chan->backend;
+ size_t sbidx, index;
+ size_t offset = ctx->buf_offset;
+ ssize_t pagecpy;
+ struct lib_ring_buffer_backend_pages *rpages;
+ unsigned long sb_bindex, id;
+ unsigned long ret;
+
+ offset &= chanb->buf_size - 1;
+ sbidx = offset >> chanb->subbuf_size_order;
+ index = (offset & (chanb->subbuf_size - 1)) >> PAGE_SHIFT;
+ pagecpy = min_t(size_t, len, (-offset) & ~PAGE_MASK);
+ id = bufb->buf_wsb[sbidx].id;
+ sb_bindex = subbuffer_id_get_index(config, id);
+ rpages = bufb->array[sb_bindex];
+ CHAN_WARN_ON(ctx->chan,
+ config->mode == RING_BUFFER_OVERWRITE
+ && subbuffer_id_is_noref(config, id));
+
+ if (unlikely(!access_ok(VERIFY_READ, src, len)))
+ goto fill_buffer;
+
+ if (likely(pagecpy == len)) {
+ ret = lib_ring_buffer_do_copy_from_user(
+ rpages->p[index].virt + (offset & ~PAGE_MASK),
+ src, len);
+ if (unlikely(ret > 0)) {
+ len -= (pagecpy - ret);
+ offset += (pagecpy - ret);
+ goto fill_buffer;
+ }
+ } else {
+ _lib_ring_buffer_copy_from_user(bufb, offset, src, len, 0);
+ }
+ ctx->buf_offset += len;
+
+ return;
+
+fill_buffer:
+ /*
+ * In the error path we call the slow path version to avoid
+ * the pollution of static inline code.
+ */
+ _lib_ring_buffer_memset(bufb, offset, 0, len, 0);
+}
+
/*
* This accessor counts the number of unread records in a buffer.
* It only provides a consistent value if no reads not writes are performed
#include "../../wrapper/ringbuffer/backend_types.h"
#include "../../wrapper/ringbuffer/frontend_types.h"
#include <linux/string.h>
+#include <linux/uaccess.h>
/* Ring buffer backend API presented to the frontend */
extern void _lib_ring_buffer_write(struct lib_ring_buffer_backend *bufb,
size_t offset, const void *src, size_t len,
ssize_t pagecpy);
+extern void _lib_ring_buffer_memset(struct lib_ring_buffer_backend *bufb,
+ size_t offset, int c, size_t len,
+ ssize_t pagecpy);
+extern void _lib_ring_buffer_copy_from_user(struct lib_ring_buffer_backend *bufb,
+ size_t offset, const void *src,
+ size_t len, ssize_t pagecpy);
/*
* Subbuffer ID bits for overwrite mode. Need to fit within a single word to be
inline_memcpy(dest, src, __len); \
} while (0)
+/*
+ * We use __copy_from_user to copy userspace data since we already
+ * did the access_ok for the whole range.
+ */
+static inline
+unsigned long lib_ring_buffer_do_copy_from_user(void *dest,
+ const void __user *src,
+ unsigned long len)
+{
+ return __copy_from_user(dest, src, len);
+}
+
+/*
+ * write len bytes to dest with c
+ */
+static inline
+void lib_ring_buffer_do_memset(char *dest, int c,
+ unsigned long len)
+{
+ unsigned long i;
+
+ for (i = 0; i < len; i++)
+ dest[i] = c;
+}
+
#endif /* _LINUX_RING_BUFFER_BACKEND_INTERNAL_H */
}
EXPORT_SYMBOL_GPL(_lib_ring_buffer_write);
+
+/**
+ * lib_ring_buffer_memset - write len bytes of c to a ring_buffer buffer.
+ * @bufb : buffer backend
+ * @offset : offset within the buffer
+ * @c : the byte to write
+ * @len : length to write
+ * @pagecpy : page size copied so far
+ */
+void _lib_ring_buffer_memset(struct lib_ring_buffer_backend *bufb,
+ size_t offset,
+ int c, size_t len, ssize_t pagecpy)
+{
+ struct channel_backend *chanb = &bufb->chan->backend;
+ const struct lib_ring_buffer_config *config = chanb->config;
+ size_t sbidx, index;
+ struct lib_ring_buffer_backend_pages *rpages;
+ unsigned long sb_bindex, id;
+
+ do {
+ len -= pagecpy;
+ offset += pagecpy;
+ sbidx = offset >> chanb->subbuf_size_order;
+ index = (offset & (chanb->subbuf_size - 1)) >> PAGE_SHIFT;
+
+ /*
+ * Underlying layer should never ask for writes across
+ * subbuffers.
+ */
+ CHAN_WARN_ON(chanb, offset >= chanb->buf_size);
+
+ pagecpy = min_t(size_t, len, PAGE_SIZE - (offset & ~PAGE_MASK));
+ id = bufb->buf_wsb[sbidx].id;
+ sb_bindex = subbuffer_id_get_index(config, id);
+ rpages = bufb->array[sb_bindex];
+ CHAN_WARN_ON(chanb, config->mode == RING_BUFFER_OVERWRITE
+ && subbuffer_id_is_noref(config, id));
+ lib_ring_buffer_do_memset(rpages->p[index].virt
+ + (offset & ~PAGE_MASK),
+ c, pagecpy);
+ } while (unlikely(len != pagecpy));
+}
+EXPORT_SYMBOL_GPL(_lib_ring_buffer_memset);
+
+
+/**
+ * lib_ring_buffer_copy_from_user - write user data to a ring_buffer buffer.
+ * @bufb : buffer backend
+ * @offset : offset within the buffer
+ * @src : source address
+ * @len : length to write
+ * @pagecpy : page size copied so far
+ *
+ * This function deals with userspace pointers, it should never be called
+ * directly without having the src pointer checked with access_ok()
+ * previously.
+ */
+void _lib_ring_buffer_copy_from_user(struct lib_ring_buffer_backend *bufb,
+ size_t offset,
+ const void __user *src, size_t len,
+ ssize_t pagecpy)
+{
+ struct channel_backend *chanb = &bufb->chan->backend;
+ const struct lib_ring_buffer_config *config = chanb->config;
+ size_t sbidx, index;
+ struct lib_ring_buffer_backend_pages *rpages;
+ unsigned long sb_bindex, id;
+ int ret;
+
+ do {
+ len -= pagecpy;
+ src += pagecpy;
+ offset += pagecpy;
+ sbidx = offset >> chanb->subbuf_size_order;
+ index = (offset & (chanb->subbuf_size - 1)) >> PAGE_SHIFT;
+
+ /*
+ * Underlying layer should never ask for writes across
+ * subbuffers.
+ */
+ CHAN_WARN_ON(chanb, offset >= chanb->buf_size);
+
+ pagecpy = min_t(size_t, len, PAGE_SIZE - (offset & ~PAGE_MASK));
+ id = bufb->buf_wsb[sbidx].id;
+ sb_bindex = subbuffer_id_get_index(config, id);
+ rpages = bufb->array[sb_bindex];
+ CHAN_WARN_ON(chanb, config->mode == RING_BUFFER_OVERWRITE
+ && subbuffer_id_is_noref(config, id));
+ ret = lib_ring_buffer_do_copy_from_user(rpages->p[index].virt
+ + (offset & ~PAGE_MASK),
+ src, pagecpy) != 0;
+ if (ret > 0) {
+ offset += (pagecpy - ret);
+ len -= (pagecpy - ret);
+ _lib_ring_buffer_memset(bufb, offset, 0, len, 0);
+ break; /* stop copy */
+ }
+ } while (unlikely(len != pagecpy));
+}
+EXPORT_SYMBOL_GPL(_lib_ring_buffer_copy_from_user);
+
/**
* lib_ring_buffer_read - read data from ring_buffer_buffer.
* @bufb : buffer backend
void (*event_commit)(struct lib_ring_buffer_ctx *ctx);
void (*event_write)(struct lib_ring_buffer_ctx *ctx, const void *src,
size_t len);
+ void (*event_write_from_user)(struct lib_ring_buffer_ctx *ctx,
+ const void *src, size_t len);
/*
* packet_avail_size returns the available size in the current
* packet. Note that the size returned is only a hint, since it
lib_ring_buffer_write(&client_config, ctx, src, len);
}
+static
+void ltt_event_write_from_user(struct lib_ring_buffer_ctx *ctx,
+ const void __user *src, size_t len)
+{
+ lib_ring_buffer_copy_from_user(&client_config, ctx, src, len);
+}
+
static
wait_queue_head_t *ltt_get_writer_buf_wait_queue(struct channel *chan, int cpu)
{
.event_reserve = ltt_event_reserve,
.event_commit = ltt_event_commit,
.event_write = ltt_event_write,
+ .event_write_from_user = ltt_event_write_from_user,
.packet_avail_size = NULL, /* Would be racy anyway */
.get_writer_buf_wait_queue = ltt_get_writer_buf_wait_queue,
.get_hp_wait_queue = ltt_get_hp_wait_queue,
lib_ring_buffer_write(&client_config, ctx, src, len);
}
+static
+void ltt_event_write_from_user(struct lib_ring_buffer_ctx *ctx,
+ const void __user *src, size_t len)
+{
+ lib_ring_buffer_copy_from_user(&client_config, ctx, src, len);
+}
+
static
size_t ltt_packet_avail_size(struct channel *chan)
.buffer_read_close = ltt_buffer_read_close,
.event_reserve = ltt_event_reserve,
.event_commit = ltt_event_commit,
+ .event_write_from_user = ltt_event_write_from_user,
.event_write = ltt_event_write,
.packet_avail_size = ltt_packet_avail_size,
.get_writer_buf_wait_queue = ltt_get_writer_buf_wait_queue,
__chan->ops->event_write(&__ctx, src, \
sizeof(__typemap.dest) * __get_dynamic_array_len(dest));\
goto __end_field_##dest##_2;
+#undef tp_memcpy_from_user
+#define tp_memcpy_from_user(dest, src, len) \
+ __assign_##dest: \
+ if (0) \
+ (void) __typemap.dest; \
+ lib_ring_buffer_align_ctx(&__ctx, ltt_alignof(__typemap.dest)); \
+ __chan->ops->event_write_from_user(&__ctx, src, len); \
+ goto __end_field_##dest;
+/*
+ * tp_copy_string_from_user "len" parameter is the length of the string
+ * excluding the final \0.
+ */
+#undef tp_copy_string_from_user
+#define tp_copy_string_from_user(dest, src, len) \
+ __assign_##dest: \
+ if (0) \
+ (void) __typemap.dest; \
+ lib_ring_buffer_align_ctx(&__ctx, ltt_alignof(__typemap.dest)); \
+ __chan->ops->event_write_from_user(&__ctx, src, len); \
+ __chan->ops->event_memset(&__ctx, 0, 1); \
+ goto __end_field_##dest;
#undef tp_strcpy
#define tp_strcpy(dest, src) \