size_t len);
void (*event_strcpy_from_user)(struct lttng_kernel_ring_buffer_ctx *ctx,
const char __user *src, size_t len);
+ void (*event_pstrcpy_pad)(struct lttng_kernel_ring_buffer_ctx *ctx,
+ const char *src, size_t len);
+ void (*event_pstrcpy_pad_from_user)(struct lttng_kernel_ring_buffer_ctx *ctx,
+ const char __user *src, size_t len);
void (*lost_event_too_big)(struct lttng_kernel_channel_buffer *lttng_channel);
};
#undef _ctf_array_encoded
#define _ctf_array_encoded(_type, _item, _src, _length, _encoding, _byte_order, _base, _user, _nowrite) \
- if (_user) { \
- __chan->ops->event_write_from_user(&__ctx, _src, sizeof(_type) * (_length), lttng_alignof(_type)); \
- } else { \
- __chan->ops->event_write(&__ctx, _src, sizeof(_type) * (_length), lttng_alignof(_type)); \
+ if (lttng_kernel_string_encoding_##_encoding == lttng_kernel_string_encoding_none) { \
+ if (_user) { \
+ __chan->ops->event_write_from_user(&__ctx, _src, sizeof(_type) * (_length), lttng_alignof(_type)); \
+ } else { \
+ __chan->ops->event_write(&__ctx, _src, sizeof(_type) * (_length), lttng_alignof(_type)); \
+ } \
+ } else { \
+ if (_user) { \
+ __chan->ops->event_pstrcpy_pad_from_user(&__ctx, (const char __user *) (_src), _length); \
+ } else { \
+ __chan->ops->event_pstrcpy_pad(&__ctx, (const char *) (_src), _length); \
+ } \
}
#if (__BYTE_ORDER == __LITTLE_ENDIAN)
#endif /* #else #if (__BYTE_ORDER == __LITTLE_ENDIAN) */
#undef _ctf_sequence_encoded
-#define _ctf_sequence_encoded(_type, _item, _src, _length_type, \
+#define _ctf_sequence_encoded(_type, _item, _src, _length_type, \
_src_length, _encoding, _byte_order, _base, _user, _nowrite) \
- { \
+ { \
_length_type __tmpl = this_cpu_ptr(<tng_dynamic_len_stack)->stack[__dynamic_len_idx]; \
__chan->ops->event_write(&__ctx, &__tmpl, sizeof(_length_type), lttng_alignof(_length_type));\
- } \
- if (_user) { \
- __chan->ops->event_write_from_user(&__ctx, _src, \
- sizeof(_type) * __get_dynamic_len(dest), lttng_alignof(_type)); \
- } else { \
- __chan->ops->event_write(&__ctx, _src, \
- sizeof(_type) * __get_dynamic_len(dest), lttng_alignof(_type)); \
+ } \
+ if (lttng_kernel_string_encoding_##_encoding == lttng_kernel_string_encoding_none) { \
+ if (_user) { \
+ __chan->ops->event_write_from_user(&__ctx, _src, \
+ sizeof(_type) * __get_dynamic_len(dest), lttng_alignof(_type)); \
+ } else { \
+ __chan->ops->event_write(&__ctx, _src, \
+ sizeof(_type) * __get_dynamic_len(dest), lttng_alignof(_type)); \
+ } \
+ } else { \
+ if (_user) { \
+ __chan->ops->event_pstrcpy_pad_from_user(&__ctx, (const char __user *) (_src), \
+ __get_dynamic_len(dest)); \
+ } else { \
+ __chan->ops->event_pstrcpy_pad(&__ctx, (const char *) (_src), \
+ __get_dynamic_len(dest)); \
+ } \
}
#if (__BYTE_ORDER == __LITTLE_ENDIAN)
ctx->priv.buf_offset += len;
}
+/**
+ * lib_ring_buffer_pstrcpy - write kernel C-string (input) to a buffer backend P-string
+ * @config : ring buffer instance configuration
+ * @ctx: ring buffer context. (input arguments only)
+ * @src : source pointer to copy from
+ * @len : length of data to copy
+ * @pad : character to use for padding
+ *
+ * This function copies up to @len bytes of data from a source pointer
+ * to a Pascal String into the buffer backend. If a terminating '\0'
+ * character is found in @src before @len characters are copied, pad the
+ * buffer with @pad characters (e.g. '\0').
+ *
+ * The length of the pascal strings in the ring buffer is explicit: it
+ * is either the array or sequence length.
+ */
+static inline
+void lib_ring_buffer_pstrcpy(const struct lttng_kernel_ring_buffer_config *config,
+ struct lttng_kernel_ring_buffer_ctx *ctx,
+ const char *src, size_t len, char pad)
+ __attribute__((always_inline));
+static inline
+void lib_ring_buffer_pstrcpy(const struct lttng_kernel_ring_buffer_config *config,
+ struct lttng_kernel_ring_buffer_ctx *ctx,
+ const char *src, size_t len, char pad)
+{
+ struct lttng_kernel_ring_buffer_backend *bufb = &ctx->priv.buf->backend;
+ struct channel_backend *chanb = &ctx->priv.chan->backend;
+ size_t index, bytes_left_in_page;
+ size_t offset = ctx->priv.buf_offset;
+ struct lttng_kernel_ring_buffer_backend_pages *backend_pages;
+
+ if (unlikely(!len))
+ return;
+ backend_pages =
+ lib_ring_buffer_get_backend_pages_from_ctx(config, ctx);
+ offset &= chanb->buf_size - 1;
+ index = (offset & (chanb->subbuf_size - 1)) >> PAGE_SHIFT;
+ bytes_left_in_page = min_t(size_t, len, (-offset) & ~PAGE_MASK);
+ if (likely(bytes_left_in_page == len)) {
+ size_t count;
+
+ count = lib_ring_buffer_do_strcpy(config,
+ backend_pages->p[index].virt
+ + (offset & ~PAGE_MASK),
+ src, len);
+ offset += count;
+ /* Padding */
+ if (unlikely(count < len)) {
+ size_t pad_len = len - count;
+
+ lib_ring_buffer_do_memset(backend_pages->p[index].virt
+ + (offset & ~PAGE_MASK),
+ pad, pad_len);
+ offset += pad_len;
+ }
+ } else {
+ _lib_ring_buffer_pstrcpy(bufb, offset, src, len, pad);
+ }
+ ctx->priv.buf_offset += len;
+}
+
/**
* lib_ring_buffer_copy_from_user_inatomic - write userspace data to a buffer backend
* @config : ring buffer instance configuration
ctx->priv.buf_offset += len;
}
+/**
+ * lib_ring_buffer_pstrcpy_from_user_inatomic - write user-space C-string (input) to a buffer backend P-string
+ * @config : ring buffer instance configuration
+ * @ctx: ring buffer context. (input arguments only)
+ * @src : source pointer to copy from
+ * @len : length of data to copy
+ * @pad : character to use for padding
+ *
+ * This function copies up to @len bytes of data from a source pointer
+ * to a Pascal String into the buffer backend. If a terminating '\0'
+ * character is found in @src before @len characters are copied, pad the
+ * buffer with @pad characters (e.g. '\0').
+ *
+ * The length of the pascal strings in the ring buffer is explicit: it
+ * is either the array or sequence length.
+ */
+static inline
+void lib_ring_buffer_pstrcpy_from_user_inatomic(const struct lttng_kernel_ring_buffer_config *config,
+ struct lttng_kernel_ring_buffer_ctx *ctx,
+ const char __user *src, size_t len, char pad)
+ __attribute__((always_inline));
+static inline
+void lib_ring_buffer_pstrcpy_from_user_inatomic(const struct lttng_kernel_ring_buffer_config *config,
+ struct lttng_kernel_ring_buffer_ctx *ctx,
+ const char __user *src, size_t len, char pad)
+{
+ struct lttng_kernel_ring_buffer_backend *bufb = &ctx->priv.buf->backend;
+ struct channel_backend *chanb = &ctx->priv.chan->backend;
+ size_t index, bytes_left_in_page;
+ size_t offset = ctx->priv.buf_offset;
+ struct lttng_kernel_ring_buffer_backend_pages *backend_pages;
+
+ if (unlikely(!len))
+ return;
+ backend_pages =
+ lib_ring_buffer_get_backend_pages_from_ctx(config, ctx);
+ offset &= chanb->buf_size - 1;
+ index = (offset & (chanb->subbuf_size - 1)) >> PAGE_SHIFT;
+ bytes_left_in_page = min_t(size_t, len, (-offset) & ~PAGE_MASK);
+
+ if (unlikely(!lttng_access_ok(VERIFY_READ, src, len)))
+ goto fill_buffer;
+
+ pagefault_disable();
+ if (likely(bytes_left_in_page == len)) {
+ size_t count;
+
+ count = lib_ring_buffer_do_strcpy_from_user_inatomic(config,
+ backend_pages->p[index].virt
+ + (offset & ~PAGE_MASK),
+ src, len);
+ offset += count;
+ /* Padding */
+ if (unlikely(count < len)) {
+ size_t pad_len = len - count;
+
+ lib_ring_buffer_do_memset(backend_pages->p[index].virt
+ + (offset & ~PAGE_MASK),
+ pad, pad_len);
+ offset += pad_len;
+ }
+ } else {
+ _lib_ring_buffer_pstrcpy_from_user_inatomic(bufb, offset, src, len, pad);
+ }
+ ctx->priv.buf_offset += len;
+ pagefault_enable();
+
+ return;
+
+fill_buffer:
+ /*
+ * In the error path we call the slow path version to avoid
+ * the pollution of static inline code.
+ */
+ _lib_ring_buffer_memset(bufb, offset, pad, len, 0);
+ ctx->priv.buf_offset += len;
+}
+
/*
* This accessor counts the number of unread records in a buffer.
* It only provides a consistent value if no reads not writes are performed
extern void _lib_ring_buffer_strcpy(struct lttng_kernel_ring_buffer_backend *bufb,
size_t offset, const char *src, size_t len,
size_t pagecpy, int pad);
+extern void _lib_ring_buffer_pstrcpy(struct lttng_kernel_ring_buffer_backend *bufb,
+ size_t offset, const char *src, size_t len, int pad);
extern void _lib_ring_buffer_copy_from_user_inatomic(struct lttng_kernel_ring_buffer_backend *bufb,
size_t offset, const void *src,
size_t len, size_t pagecpy);
extern void _lib_ring_buffer_strcpy_from_user_inatomic(struct lttng_kernel_ring_buffer_backend *bufb,
size_t offset, const char __user *src, size_t len,
size_t pagecpy, int pad);
+extern void _lib_ring_buffer_pstrcpy_from_user_inatomic(struct lttng_kernel_ring_buffer_backend *bufb,
+ size_t offset, const char __user *src, size_t len, int pad);
/*
* Subbuffer ID bits for overwrite mode. Need to fit within a single word to be
}
EXPORT_SYMBOL_GPL(_lib_ring_buffer_strcpy);
+/**
+ * _lib_ring_buffer_pstrcpy - write to a buffer backend P-string
+ * @bufb : buffer backend
+ * @src : source pointer to copy from
+ * @len : length of data to copy
+ * @pad : character to use for padding
+ *
+ * This function copies up to @len bytes of data from a source pointer
+ * to a Pascal String into the buffer backend. If a terminating '\0'
+ * character is found in @src before @len characters are copied, pad the
+ * buffer with @pad characters (e.g. '\0').
+ *
+ * The length of the pascal strings in the ring buffer is explicit: it
+ * is either the array or sequence length.
+ */
+void _lib_ring_buffer_pstrcpy(struct lttng_kernel_ring_buffer_backend *bufb,
+ size_t offset, const char *src, size_t len, int pad)
+{
+ struct channel_backend *chanb = &bufb->chan->backend;
+ const struct lttng_kernel_ring_buffer_config *config = &chanb->config;
+ size_t sbidx, index, bytes_left_in_page;
+ struct lttng_kernel_ring_buffer_backend_pages *rpages;
+ unsigned long sb_bindex, id;
+ bool src_terminated = false;
+
+ CHAN_WARN_ON(chanb, !len);
+ do {
+ sbidx = offset >> chanb->subbuf_size_order;
+ index = (offset & (chanb->subbuf_size - 1)) >> PAGE_SHIFT;
+
+ /*
+ * Underlying layer should never ask for writes across
+ * subbuffers.
+ */
+ CHAN_WARN_ON(chanb, offset >= chanb->buf_size);
+
+ bytes_left_in_page = min_t(size_t, len, PAGE_SIZE - (offset & ~PAGE_MASK));
+ id = bufb->buf_wsb[sbidx].id;
+ sb_bindex = subbuffer_id_get_index(config, id);
+ rpages = bufb->array[sb_bindex];
+ CHAN_WARN_ON(chanb, config->mode == RING_BUFFER_OVERWRITE
+ && subbuffer_id_is_noref(config, id));
+
+ if (likely(!src_terminated)) {
+ size_t count, to_copy;
+
+ to_copy = bytes_left_in_page;
+ count = lib_ring_buffer_do_strcpy(config,
+ rpages->p[index].virt
+ + (offset & ~PAGE_MASK),
+ src, to_copy);
+ offset += count;
+ /* Padding */
+ if (unlikely(count < to_copy)) {
+ size_t pad_len = to_copy - count;
+
+ /* Next pages will have padding */
+ src_terminated = true;
+ lib_ring_buffer_do_memset(rpages->p[index].virt
+ + (offset & ~PAGE_MASK),
+ pad, pad_len);
+ offset += pad_len;
+ }
+ } else {
+ size_t pad_len;
+
+ pad_len = bytes_left_in_page;
+ lib_ring_buffer_do_memset(rpages->p[index].virt
+ + (offset & ~PAGE_MASK),
+ pad, pad_len);
+ offset += pad_len;
+ }
+ len -= bytes_left_in_page;
+ if (!src_terminated)
+ src += bytes_left_in_page;
+ } while (unlikely(len));
+}
+EXPORT_SYMBOL_GPL(_lib_ring_buffer_pstrcpy);
+
/**
* lib_ring_buffer_copy_from_user_inatomic - write user data to a ring_buffer buffer.
* @bufb : buffer backend
}
EXPORT_SYMBOL_GPL(_lib_ring_buffer_strcpy_from_user_inatomic);
+/**
+ * _lib_ring_buffer_pstrcpy_from_user_inatomic - write userspace string to a buffer backend P-string
+ * @bufb : buffer backend
+ * @src : source pointer to copy from
+ * @len : length of data to copy
+ * @pad : character to use for padding
+ *
+ * This function copies up to @len bytes of data from a source pointer
+ * to a Pascal String into the buffer backend. If a terminating '\0'
+ * character is found in @src before @len characters are copied, pad the
+ * buffer with @pad characters (e.g. '\0').
+ *
+ * The length of the pascal strings in the ring buffer is explicit: it
+ * is either the array or sequence length.
+ *
+ * This function deals with userspace pointers, it should never be called
+ * directly without having the src pointer checked with access_ok()
+ * previously.
+ */
+void _lib_ring_buffer_pstrcpy_from_user_inatomic(struct lttng_kernel_ring_buffer_backend *bufb,
+ size_t offset, const char __user *src, size_t len, int pad)
+{
+ struct channel_backend *chanb = &bufb->chan->backend;
+ const struct lttng_kernel_ring_buffer_config *config = &chanb->config;
+ size_t sbidx, index, bytes_left_in_page;
+ struct lttng_kernel_ring_buffer_backend_pages *rpages;
+ unsigned long sb_bindex, id;
+ bool src_terminated = false;
+
+ CHAN_WARN_ON(chanb, !len);
+ do {
+ sbidx = offset >> chanb->subbuf_size_order;
+ index = (offset & (chanb->subbuf_size - 1)) >> PAGE_SHIFT;
+
+ /*
+ * Underlying layer should never ask for writes across
+ * subbuffers.
+ */
+ CHAN_WARN_ON(chanb, offset >= chanb->buf_size);
+
+ bytes_left_in_page = min_t(size_t, len, PAGE_SIZE - (offset & ~PAGE_MASK));
+ id = bufb->buf_wsb[sbidx].id;
+ sb_bindex = subbuffer_id_get_index(config, id);
+ rpages = bufb->array[sb_bindex];
+ CHAN_WARN_ON(chanb, config->mode == RING_BUFFER_OVERWRITE
+ && subbuffer_id_is_noref(config, id));
+
+ if (likely(!src_terminated)) {
+ size_t count, to_copy;
+
+ to_copy = bytes_left_in_page;
+ count = lib_ring_buffer_do_strcpy_from_user_inatomic(config,
+ rpages->p[index].virt
+ + (offset & ~PAGE_MASK),
+ src, to_copy);
+ offset += count;
+ /* Padding */
+ if (unlikely(count < to_copy)) {
+ size_t pad_len = to_copy - count;
+
+ /* Next pages will have padding */
+ src_terminated = true;
+ lib_ring_buffer_do_memset(rpages->p[index].virt
+ + (offset & ~PAGE_MASK),
+ pad, pad_len);
+ offset += pad_len;
+ }
+ } else {
+ size_t pad_len;
+
+ pad_len = bytes_left_in_page;
+ lib_ring_buffer_do_memset(rpages->p[index].virt
+ + (offset & ~PAGE_MASK),
+ pad, pad_len);
+ offset += pad_len;
+ }
+ len -= bytes_left_in_page;
+ if (!src_terminated)
+ src += bytes_left_in_page;
+ } while (unlikely(len));
+}
+EXPORT_SYMBOL_GPL(_lib_ring_buffer_pstrcpy_from_user_inatomic);
+
/**
* lib_ring_buffer_read - read data from ring_buffer_buffer.
* @bufb : buffer backend
len, '#');
}
+static
+void lttng_event_pstrcpy_pad(struct lttng_kernel_ring_buffer_ctx *ctx, const char *src,
+ size_t len)
+{
+ lib_ring_buffer_pstrcpy(&client_config, ctx, src, len, '\0');
+}
+
+static
+void lttng_event_pstrcpy_pad_from_user(struct lttng_kernel_ring_buffer_ctx *ctx, const char __user *src,
+ size_t len)
+{
+ lib_ring_buffer_pstrcpy_from_user_inatomic(&client_config, ctx, src, len, '\0');
+}
+
static
void lttng_channel_buffer_lost_event_too_big(struct lttng_kernel_channel_buffer *lttng_chan)
{
.event_memset = lttng_event_memset,
.event_strcpy = lttng_event_strcpy,
.event_strcpy_from_user = lttng_event_strcpy_from_user,
+ .event_pstrcpy_pad = lttng_event_pstrcpy_pad,
+ .event_pstrcpy_pad_from_user = lttng_event_pstrcpy_pad_from_user,
.lost_event_too_big = lttng_channel_buffer_lost_event_too_big,
},
};
lib_ring_buffer_strcpy(&client_config, ctx, src, len, '#');
}
+static
+void lttng_event_pstrcpy_pad(struct lttng_kernel_ring_buffer_ctx *ctx, const char *src,
+ size_t len)
+{
+ lib_ring_buffer_pstrcpy(&client_config, ctx, src, len, '\0');
+}
+
static
size_t lttng_packet_avail_size(struct lttng_kernel_ring_buffer_channel *chan)
{
.event_memset = lttng_event_memset,
.event_write = lttng_event_write,
.event_strcpy = lttng_event_strcpy,
+ .event_pstrcpy_pad = lttng_event_pstrcpy_pad,
},
};