X-Git-Url: http://git.lttng.org./?a=blobdiff_plain;f=src%2Flib%2Fringbuffer%2Fring_buffer_backend.c;h=9a339be0a6e9b7040abce39b9585a2f2e5d95f48;hb=6b757027d2404725d3ae002ae682f4db878adfbb;hp=1625804f8b5ef845060c82221e293c9ed3a714d0;hpb=5a15f70c5211ff4a398171a6971586e2948eb419;p=lttng-modules.git diff --git a/src/lib/ringbuffer/ring_buffer_backend.c b/src/lib/ringbuffer/ring_buffer_backend.c index 1625804f..9a339be0 100644 --- a/src/lib/ringbuffer/ring_buffer_backend.c +++ b/src/lib/ringbuffer/ring_buffer_backend.c @@ -12,10 +12,10 @@ #include #include #include -#include #include #include +#include #include #include /* for wrapper_vmalloc_sync_mappings() */ #include @@ -31,8 +31,8 @@ * @extra_reader_sb: need extra subbuffer for reader */ static -int lib_ring_buffer_backend_allocate(const struct lib_ring_buffer_config *config, - struct lib_ring_buffer_backend *bufb, +int lib_ring_buffer_backend_allocate(const struct lttng_kernel_ring_buffer_config *config, + struct lttng_kernel_ring_buffer_backend *bufb, size_t size, size_t num_subbuf, int extra_reader_sb) { @@ -98,8 +98,8 @@ int lib_ring_buffer_backend_allocate(const struct lib_ring_buffer_config *config for (i = 0; i < num_subbuf_alloc; i++) { bufb->array[i] = lttng_kvzalloc_node(ALIGN( - sizeof(struct lib_ring_buffer_backend_pages) + - sizeof(struct lib_ring_buffer_backend_page) + sizeof(struct lttng_kernel_ring_buffer_backend_pages) + + sizeof(struct lttng_kernel_ring_buffer_backend_page) * num_pages_per_subbuf, 1 << INTERNODE_CACHE_SHIFT), GFP_KERNEL | __GFP_NOWARN, @@ -110,7 +110,7 @@ int lib_ring_buffer_backend_allocate(const struct lib_ring_buffer_config *config /* Allocate write-side subbuffer table */ bufb->buf_wsb = lttng_kvzalloc_node(ALIGN( - sizeof(struct lib_ring_buffer_backend_subbuffer) + sizeof(struct lttng_kernel_ring_buffer_backend_subbuffer) * num_subbuf, 1 << INTERNODE_CACHE_SHIFT), GFP_KERNEL | __GFP_NOWARN, @@ -130,7 +130,7 @@ int lib_ring_buffer_backend_allocate(const struct lib_ring_buffer_config *config /* Allocate subbuffer packet counter table */ bufb->buf_cnt = lttng_kvzalloc_node(ALIGN( - sizeof(struct lib_ring_buffer_backend_counts) + sizeof(struct lttng_kernel_ring_buffer_backend_counts) * num_subbuf, 1 << INTERNODE_CACHE_SHIFT), GFP_KERNEL | __GFP_NOWARN, @@ -179,12 +179,12 @@ not_enough_pages: return -ENOMEM; } -int lib_ring_buffer_backend_create(struct lib_ring_buffer_backend *bufb, +int lib_ring_buffer_backend_create(struct lttng_kernel_ring_buffer_backend *bufb, struct channel_backend *chanb, int cpu) { - const struct lib_ring_buffer_config *config = &chanb->config; + const struct lttng_kernel_ring_buffer_config *config = &chanb->config; - bufb->chan = container_of(chanb, struct channel, backend); + bufb->chan = container_of(chanb, struct lttng_kernel_ring_buffer_channel, backend); bufb->cpu = cpu; return lib_ring_buffer_backend_allocate(config, bufb, chanb->buf_size, @@ -192,7 +192,7 @@ int lib_ring_buffer_backend_create(struct lib_ring_buffer_backend *bufb, chanb->extra_reader_sb); } -void lib_ring_buffer_backend_free(struct lib_ring_buffer_backend *bufb) +void lib_ring_buffer_backend_free(struct lttng_kernel_ring_buffer_backend *bufb) { struct channel_backend *chanb = &bufb->chan->backend; unsigned long i, j, num_subbuf_alloc; @@ -212,10 +212,10 @@ void lib_ring_buffer_backend_free(struct lib_ring_buffer_backend *bufb) bufb->allocated = 0; } -void lib_ring_buffer_backend_reset(struct lib_ring_buffer_backend *bufb) +void lib_ring_buffer_backend_reset(struct lttng_kernel_ring_buffer_backend *bufb) { struct channel_backend *chanb = &bufb->chan->backend; - const struct lib_ring_buffer_config *config = &chanb->config; + const struct lttng_kernel_ring_buffer_config *config = &chanb->config; unsigned long num_subbuf_alloc; unsigned int i; @@ -248,8 +248,8 @@ void lib_ring_buffer_backend_reset(struct lib_ring_buffer_backend *bufb) */ void channel_backend_reset(struct channel_backend *chanb) { - struct channel *chan = container_of(chanb, struct channel, backend); - const struct lib_ring_buffer_config *config = &chanb->config; + struct lttng_kernel_ring_buffer_channel *chan = container_of(chanb, struct lttng_kernel_ring_buffer_channel, backend); + const struct lttng_kernel_ring_buffer_config *config = &chanb->config; /* * Don't reset buf_size, subbuf_size, subbuf_size_order, @@ -259,7 +259,7 @@ void channel_backend_reset(struct channel_backend *chanb) chanb->start_tsc = config->cb.ring_buffer_clock_read(chan); } -#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) +#if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(4,10,0)) /* * No need to implement a "dead" callback to do a buffer switch here, @@ -273,8 +273,8 @@ int lttng_cpuhp_rb_backend_prepare(unsigned int cpu, { struct channel_backend *chanb = container_of(node, struct channel_backend, cpuhp_prepare); - const struct lib_ring_buffer_config *config = &chanb->config; - struct lib_ring_buffer *buf; + const struct lttng_kernel_ring_buffer_config *config = &chanb->config; + struct lttng_kernel_ring_buffer *buf; int ret; CHAN_WARN_ON(chanb, config->alloc == RING_BUFFER_ALLOC_GLOBAL); @@ -291,7 +291,7 @@ int lttng_cpuhp_rb_backend_prepare(unsigned int cpu, } EXPORT_SYMBOL_GPL(lttng_cpuhp_rb_backend_prepare); -#else /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) */ +#else /* #if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(4,10,0)) */ #ifdef CONFIG_HOTPLUG_CPU @@ -311,8 +311,8 @@ int lib_ring_buffer_cpu_hp_callback(struct notifier_block *nb, unsigned int cpu = (unsigned long)hcpu; struct channel_backend *chanb = container_of(nb, struct channel_backend, cpu_hp_notifier); - const struct lib_ring_buffer_config *config = &chanb->config; - struct lib_ring_buffer *buf; + const struct lttng_kernel_ring_buffer_config *config = &chanb->config; + struct lttng_kernel_ring_buffer *buf; int ret; CHAN_WARN_ON(chanb, config->alloc == RING_BUFFER_ALLOC_GLOBAL); @@ -341,7 +341,7 @@ int lib_ring_buffer_cpu_hp_callback(struct notifier_block *nb, #endif -#endif /* #else #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) */ +#endif /* #else #if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(4,10,0)) */ /** * channel_backend_init - initialize a channel backend @@ -363,10 +363,10 @@ int lib_ring_buffer_cpu_hp_callback(struct notifier_block *nb, */ int channel_backend_init(struct channel_backend *chanb, const char *name, - const struct lib_ring_buffer_config *config, + const struct lttng_kernel_ring_buffer_config *config, void *priv, size_t subbuf_size, size_t num_subbuf) { - struct channel *chan = container_of(chanb, struct channel, backend); + struct lttng_kernel_ring_buffer_channel *chan = container_of(chanb, struct lttng_kernel_ring_buffer_channel, backend); unsigned int i; int ret; @@ -415,17 +415,17 @@ int channel_backend_init(struct channel_backend *chanb, if (config->alloc == RING_BUFFER_ALLOC_PER_CPU) { /* Allocating the buffer per-cpu structures */ - chanb->buf = alloc_percpu(struct lib_ring_buffer); + chanb->buf = alloc_percpu(struct lttng_kernel_ring_buffer); if (!chanb->buf) goto free_cpumask; -#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) +#if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(4,10,0)) chanb->cpuhp_prepare.component = LTTNG_RING_BUFFER_BACKEND; ret = cpuhp_state_add_instance(lttng_rb_hp_prepare, &chanb->cpuhp_prepare.node); if (ret) goto free_bufs; -#else /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) */ +#else /* #if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(4,10,0)) */ { /* @@ -445,14 +445,14 @@ int channel_backend_init(struct channel_backend *chanb, chanb->cpu_hp_notifier.priority = 5; register_hotcpu_notifier(&chanb->cpu_hp_notifier); - get_online_cpus(); + lttng_cpus_read_lock(); for_each_online_cpu(i) { ret = lib_ring_buffer_create(per_cpu_ptr(chanb->buf, i), chanb, i); if (ret) goto free_bufs; /* cpu hotplug locked */ } - put_online_cpus(); + lttng_cpus_read_unlock(); #else for_each_possible_cpu(i) { ret = lib_ring_buffer_create(per_cpu_ptr(chanb->buf, i), @@ -462,9 +462,9 @@ int channel_backend_init(struct channel_backend *chanb, } #endif } -#endif /* #else #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) */ +#endif /* #else #if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(4,10,0)) */ } else { - chanb->buf = kzalloc(sizeof(struct lib_ring_buffer), GFP_KERNEL); + chanb->buf = kzalloc(sizeof(struct lttng_kernel_ring_buffer), GFP_KERNEL); if (!chanb->buf) goto free_cpumask; ret = lib_ring_buffer_create(chanb->buf, chanb, -1); @@ -477,20 +477,20 @@ int channel_backend_init(struct channel_backend *chanb, free_bufs: if (config->alloc == RING_BUFFER_ALLOC_PER_CPU) { -#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) +#if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(4,10,0)) /* * Teardown of lttng_rb_hp_prepare instance * on "add" error is handled within cpu hotplug, * no teardown to do from the caller. */ -#else /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) */ +#else /* #if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(4,10,0)) */ #ifdef CONFIG_HOTPLUG_CPU - put_online_cpus(); + lttng_cpus_read_unlock(); unregister_hotcpu_notifier(&chanb->cpu_hp_notifier); #endif -#endif /* #else #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) */ +#endif /* #else #if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(4,10,0)) */ for_each_possible_cpu(i) { - struct lib_ring_buffer *buf = + struct lttng_kernel_ring_buffer *buf = per_cpu_ptr(chanb->buf, i); if (!buf->backend.allocated) @@ -514,18 +514,18 @@ free_cpumask: */ void channel_backend_unregister_notifiers(struct channel_backend *chanb) { - const struct lib_ring_buffer_config *config = &chanb->config; + const struct lttng_kernel_ring_buffer_config *config = &chanb->config; if (config->alloc == RING_BUFFER_ALLOC_PER_CPU) { -#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) +#if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(4,10,0)) int ret; ret = cpuhp_state_remove_instance(lttng_rb_hp_prepare, &chanb->cpuhp_prepare.node); WARN_ON(ret); -#else /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) */ +#else /* #if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(4,10,0)) */ unregister_hotcpu_notifier(&chanb->cpu_hp_notifier); -#endif /* #else #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) */ +#endif /* #else #if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(4,10,0)) */ } } @@ -537,12 +537,12 @@ void channel_backend_unregister_notifiers(struct channel_backend *chanb) */ void channel_backend_free(struct channel_backend *chanb) { - const struct lib_ring_buffer_config *config = &chanb->config; + const struct lttng_kernel_ring_buffer_config *config = &chanb->config; unsigned int i; if (config->alloc == RING_BUFFER_ALLOC_PER_CPU) { for_each_possible_cpu(i) { - struct lib_ring_buffer *buf = per_cpu_ptr(chanb->buf, i); + struct lttng_kernel_ring_buffer *buf = per_cpu_ptr(chanb->buf, i); if (!buf->backend.allocated) continue; @@ -551,7 +551,7 @@ void channel_backend_free(struct channel_backend *chanb) free_cpumask_var(chanb->cpumask); free_percpu(chanb->buf); } else { - struct lib_ring_buffer *buf = chanb->buf; + struct lttng_kernel_ring_buffer *buf = chanb->buf; CHAN_WARN_ON(chanb, !buf->backend.allocated); lib_ring_buffer_free(buf); @@ -565,21 +565,17 @@ void channel_backend_free(struct channel_backend *chanb) * @offset : offset within the buffer * @src : source address * @len : length to write - * @pagecpy : page size copied so far */ -void _lib_ring_buffer_write(struct lib_ring_buffer_backend *bufb, size_t offset, - const void *src, size_t len, size_t pagecpy) +void _lib_ring_buffer_write(struct lttng_kernel_ring_buffer_backend *bufb, size_t offset, + const void *src, size_t len) { struct channel_backend *chanb = &bufb->chan->backend; - const struct lib_ring_buffer_config *config = &chanb->config; - size_t sbidx, index; - struct lib_ring_buffer_backend_pages *rpages; + const struct lttng_kernel_ring_buffer_config *config = &chanb->config; + size_t sbidx, index, bytes_left_in_page; + struct lttng_kernel_ring_buffer_backend_pages *rpages; unsigned long sb_bindex, id; do { - len -= pagecpy; - src += pagecpy; - offset += pagecpy; sbidx = offset >> chanb->subbuf_size_order; index = (offset & (chanb->subbuf_size - 1)) >> PAGE_SHIFT; @@ -589,7 +585,7 @@ void _lib_ring_buffer_write(struct lib_ring_buffer_backend *bufb, size_t offset, */ CHAN_WARN_ON(chanb, offset >= chanb->buf_size); - pagecpy = min_t(size_t, len, PAGE_SIZE - (offset & ~PAGE_MASK)); + bytes_left_in_page = min_t(size_t, len, PAGE_SIZE - (offset & ~PAGE_MASK)); id = bufb->buf_wsb[sbidx].id; sb_bindex = subbuffer_id_get_index(config, id); rpages = bufb->array[sb_bindex]; @@ -598,33 +594,31 @@ void _lib_ring_buffer_write(struct lib_ring_buffer_backend *bufb, size_t offset, lib_ring_buffer_do_copy(config, rpages->p[index].virt + (offset & ~PAGE_MASK), - src, pagecpy); - } while (unlikely(len != pagecpy)); + src, bytes_left_in_page); + len -= bytes_left_in_page; + src += bytes_left_in_page; + offset += bytes_left_in_page; + } while (unlikely(len)); } EXPORT_SYMBOL_GPL(_lib_ring_buffer_write); - /** * lib_ring_buffer_memset - write len bytes of c to a ring_buffer buffer. * @bufb : buffer backend * @offset : offset within the buffer * @c : the byte to write * @len : length to write - * @pagecpy : page size copied so far */ -void _lib_ring_buffer_memset(struct lib_ring_buffer_backend *bufb, - size_t offset, - int c, size_t len, size_t pagecpy) +void _lib_ring_buffer_memset(struct lttng_kernel_ring_buffer_backend *bufb, + size_t offset, int c, size_t len) { struct channel_backend *chanb = &bufb->chan->backend; - const struct lib_ring_buffer_config *config = &chanb->config; - size_t sbidx, index; - struct lib_ring_buffer_backend_pages *rpages; + const struct lttng_kernel_ring_buffer_config *config = &chanb->config; + size_t sbidx, index, bytes_left_in_page; + struct lttng_kernel_ring_buffer_backend_pages *rpages; unsigned long sb_bindex, id; do { - len -= pagecpy; - offset += pagecpy; sbidx = offset >> chanb->subbuf_size_order; index = (offset & (chanb->subbuf_size - 1)) >> PAGE_SHIFT; @@ -634,7 +628,7 @@ void _lib_ring_buffer_memset(struct lib_ring_buffer_backend *bufb, */ CHAN_WARN_ON(chanb, offset >= chanb->buf_size); - pagecpy = min_t(size_t, len, PAGE_SIZE - (offset & ~PAGE_MASK)); + bytes_left_in_page = min_t(size_t, len, PAGE_SIZE - (offset & ~PAGE_MASK)); id = bufb->buf_wsb[sbidx].id; sb_bindex = subbuffer_id_get_index(config, id); rpages = bufb->array[sb_bindex]; @@ -642,8 +636,10 @@ void _lib_ring_buffer_memset(struct lib_ring_buffer_backend *bufb, && subbuffer_id_is_noref(config, id)); lib_ring_buffer_do_memset(rpages->p[index].virt + (offset & ~PAGE_MASK), - c, pagecpy); - } while (unlikely(len != pagecpy)); + c, bytes_left_in_page); + len -= bytes_left_in_page; + offset += bytes_left_in_page; + } while (unlikely(len)); } EXPORT_SYMBOL_GPL(_lib_ring_buffer_memset); @@ -653,26 +649,20 @@ EXPORT_SYMBOL_GPL(_lib_ring_buffer_memset); * @offset : offset within the buffer * @src : source address * @len : length to write - * @pagecpy : page size copied so far * @pad : character to use for padding */ -void _lib_ring_buffer_strcpy(struct lib_ring_buffer_backend *bufb, - size_t offset, const char *src, size_t len, - size_t pagecpy, int pad) +void _lib_ring_buffer_strcpy(struct lttng_kernel_ring_buffer_backend *bufb, + size_t offset, const char *src, size_t len, int pad) { struct channel_backend *chanb = &bufb->chan->backend; - const struct lib_ring_buffer_config *config = &chanb->config; - size_t sbidx, index; - struct lib_ring_buffer_backend_pages *rpages; + const struct lttng_kernel_ring_buffer_config *config = &chanb->config; + size_t sbidx, index, bytes_left_in_page; + struct lttng_kernel_ring_buffer_backend_pages *rpages; unsigned long sb_bindex, id; - int src_terminated = 0; + bool src_terminated = false; CHAN_WARN_ON(chanb, !len); - offset += pagecpy; do { - len -= pagecpy; - if (!src_terminated) - src += pagecpy; sbidx = offset >> chanb->subbuf_size_order; index = (offset & (chanb->subbuf_size - 1)) >> PAGE_SHIFT; @@ -682,7 +672,7 @@ void _lib_ring_buffer_strcpy(struct lib_ring_buffer_backend *bufb, */ CHAN_WARN_ON(chanb, offset >= chanb->buf_size); - pagecpy = min_t(size_t, len, PAGE_SIZE - (offset & ~PAGE_MASK)); + bytes_left_in_page = min_t(size_t, len, PAGE_SIZE - (offset & ~PAGE_MASK)); id = bufb->buf_wsb[sbidx].id; sb_bindex = subbuffer_id_get_index(config, id); rpages = bufb->array[sb_bindex]; @@ -692,8 +682,8 @@ void _lib_ring_buffer_strcpy(struct lib_ring_buffer_backend *bufb, if (likely(!src_terminated)) { size_t count, to_copy; - to_copy = pagecpy; - if (pagecpy == len) + to_copy = bytes_left_in_page; + if (bytes_left_in_page == len) to_copy--; /* Final '\0' */ count = lib_ring_buffer_do_strcpy(config, rpages->p[index].virt @@ -705,7 +695,7 @@ void _lib_ring_buffer_strcpy(struct lib_ring_buffer_backend *bufb, size_t pad_len = to_copy - count; /* Next pages will have padding */ - src_terminated = 1; + src_terminated = true; lib_ring_buffer_do_memset(rpages->p[index].virt + (offset & ~PAGE_MASK), pad, pad_len); @@ -714,49 +704,126 @@ void _lib_ring_buffer_strcpy(struct lib_ring_buffer_backend *bufb, } else { size_t pad_len; - pad_len = pagecpy; - if (pagecpy == len) + pad_len = bytes_left_in_page; + if (bytes_left_in_page == len) pad_len--; /* Final '\0' */ lib_ring_buffer_do_memset(rpages->p[index].virt + (offset & ~PAGE_MASK), pad, pad_len); offset += pad_len; } - } while (unlikely(len != pagecpy)); + len -= bytes_left_in_page; + if (!src_terminated) + src += bytes_left_in_page; + } while (unlikely(len)); + /* Ending '\0' */ lib_ring_buffer_do_memset(rpages->p[index].virt + (offset & ~PAGE_MASK), '\0', 1); } EXPORT_SYMBOL_GPL(_lib_ring_buffer_strcpy); +/** + * _lib_ring_buffer_pstrcpy - write to a buffer backend P-string + * @bufb : buffer backend + * @src : source pointer to copy from + * @len : length of data to copy + * @pad : character to use for padding + * + * This function copies up to @len bytes of data from a source pointer + * to a Pascal String into the buffer backend. If a terminating '\0' + * character is found in @src before @len characters are copied, pad the + * buffer with @pad characters (e.g. '\0'). + * + * The length of the pascal strings in the ring buffer is explicit: it + * is either the array or sequence length. + */ +void _lib_ring_buffer_pstrcpy(struct lttng_kernel_ring_buffer_backend *bufb, + size_t offset, const char *src, size_t len, int pad) +{ + struct channel_backend *chanb = &bufb->chan->backend; + const struct lttng_kernel_ring_buffer_config *config = &chanb->config; + size_t sbidx, index, bytes_left_in_page; + struct lttng_kernel_ring_buffer_backend_pages *rpages; + unsigned long sb_bindex, id; + bool src_terminated = false; + + CHAN_WARN_ON(chanb, !len); + do { + sbidx = offset >> chanb->subbuf_size_order; + index = (offset & (chanb->subbuf_size - 1)) >> PAGE_SHIFT; + + /* + * Underlying layer should never ask for writes across + * subbuffers. + */ + CHAN_WARN_ON(chanb, offset >= chanb->buf_size); + + bytes_left_in_page = min_t(size_t, len, PAGE_SIZE - (offset & ~PAGE_MASK)); + id = bufb->buf_wsb[sbidx].id; + sb_bindex = subbuffer_id_get_index(config, id); + rpages = bufb->array[sb_bindex]; + CHAN_WARN_ON(chanb, config->mode == RING_BUFFER_OVERWRITE + && subbuffer_id_is_noref(config, id)); + + if (likely(!src_terminated)) { + size_t count, to_copy; + + to_copy = bytes_left_in_page; + count = lib_ring_buffer_do_strcpy(config, + rpages->p[index].virt + + (offset & ~PAGE_MASK), + src, to_copy); + offset += count; + /* Padding */ + if (unlikely(count < to_copy)) { + size_t pad_len = to_copy - count; + + /* Next pages will have padding */ + src_terminated = true; + lib_ring_buffer_do_memset(rpages->p[index].virt + + (offset & ~PAGE_MASK), + pad, pad_len); + offset += pad_len; + } + } else { + size_t pad_len; + + pad_len = bytes_left_in_page; + lib_ring_buffer_do_memset(rpages->p[index].virt + + (offset & ~PAGE_MASK), + pad, pad_len); + offset += pad_len; + } + len -= bytes_left_in_page; + if (!src_terminated) + src += bytes_left_in_page; + } while (unlikely(len)); +} +EXPORT_SYMBOL_GPL(_lib_ring_buffer_pstrcpy); + /** * lib_ring_buffer_copy_from_user_inatomic - write user data to a ring_buffer buffer. * @bufb : buffer backend * @offset : offset within the buffer * @src : source address * @len : length to write - * @pagecpy : page size copied so far * * This function deals with userspace pointers, it should never be called * directly without having the src pointer checked with access_ok() * previously. */ -void _lib_ring_buffer_copy_from_user_inatomic(struct lib_ring_buffer_backend *bufb, - size_t offset, - const void __user *src, size_t len, - size_t pagecpy) +void _lib_ring_buffer_copy_from_user_inatomic(struct lttng_kernel_ring_buffer_backend *bufb, + size_t offset, const void __user *src, size_t len) { struct channel_backend *chanb = &bufb->chan->backend; - const struct lib_ring_buffer_config *config = &chanb->config; - size_t sbidx, index; - struct lib_ring_buffer_backend_pages *rpages; + const struct lttng_kernel_ring_buffer_config *config = &chanb->config; + size_t sbidx, index, bytes_left_in_page; + struct lttng_kernel_ring_buffer_backend_pages *rpages; unsigned long sb_bindex, id; int ret; do { - len -= pagecpy; - src += pagecpy; - offset += pagecpy; sbidx = offset >> chanb->subbuf_size_order; index = (offset & (chanb->subbuf_size - 1)) >> PAGE_SHIFT; @@ -766,7 +833,7 @@ void _lib_ring_buffer_copy_from_user_inatomic(struct lib_ring_buffer_backend *bu */ CHAN_WARN_ON(chanb, offset >= chanb->buf_size); - pagecpy = min_t(size_t, len, PAGE_SIZE - (offset & ~PAGE_MASK)); + bytes_left_in_page = min_t(size_t, len, PAGE_SIZE - (offset & ~PAGE_MASK)); id = bufb->buf_wsb[sbidx].id; sb_bindex = subbuffer_id_get_index(config, id); rpages = bufb->array[sb_bindex]; @@ -774,13 +841,16 @@ void _lib_ring_buffer_copy_from_user_inatomic(struct lib_ring_buffer_backend *bu && subbuffer_id_is_noref(config, id)); ret = lib_ring_buffer_do_copy_from_user_inatomic(rpages->p[index].virt + (offset & ~PAGE_MASK), - src, pagecpy) != 0; + src, bytes_left_in_page) != 0; if (ret > 0) { /* Copy failed. */ - _lib_ring_buffer_memset(bufb, offset, 0, len, 0); + _lib_ring_buffer_memset(bufb, offset, 0, len); break; /* stop copy */ } - } while (unlikely(len != pagecpy)); + len -= bytes_left_in_page; + src += bytes_left_in_page; + offset += bytes_left_in_page; + } while (unlikely(len)); } EXPORT_SYMBOL_GPL(_lib_ring_buffer_copy_from_user_inatomic); @@ -790,29 +860,23 @@ EXPORT_SYMBOL_GPL(_lib_ring_buffer_copy_from_user_inatomic); * @offset : offset within the buffer * @src : source address * @len : length to write - * @pagecpy : page size copied so far * @pad : character to use for padding * * This function deals with userspace pointers, it should never be called * directly without having the src pointer checked with access_ok() * previously. */ -void _lib_ring_buffer_strcpy_from_user_inatomic(struct lib_ring_buffer_backend *bufb, - size_t offset, const char __user *src, size_t len, - size_t pagecpy, int pad) +void _lib_ring_buffer_strcpy_from_user_inatomic(struct lttng_kernel_ring_buffer_backend *bufb, + size_t offset, const char __user *src, size_t len, int pad) { struct channel_backend *chanb = &bufb->chan->backend; - const struct lib_ring_buffer_config *config = &chanb->config; - size_t sbidx, index; - struct lib_ring_buffer_backend_pages *rpages; + const struct lttng_kernel_ring_buffer_config *config = &chanb->config; + size_t sbidx, index, bytes_left_in_page; + struct lttng_kernel_ring_buffer_backend_pages *rpages; unsigned long sb_bindex, id; - int src_terminated = 0; + bool src_terminated = false; - offset += pagecpy; do { - len -= pagecpy; - if (!src_terminated) - src += pagecpy; sbidx = offset >> chanb->subbuf_size_order; index = (offset & (chanb->subbuf_size - 1)) >> PAGE_SHIFT; @@ -822,7 +886,7 @@ void _lib_ring_buffer_strcpy_from_user_inatomic(struct lib_ring_buffer_backend * */ CHAN_WARN_ON(chanb, offset >= chanb->buf_size); - pagecpy = min_t(size_t, len, PAGE_SIZE - (offset & ~PAGE_MASK)); + bytes_left_in_page = min_t(size_t, len, PAGE_SIZE - (offset & ~PAGE_MASK)); id = bufb->buf_wsb[sbidx].id; sb_bindex = subbuffer_id_get_index(config, id); rpages = bufb->array[sb_bindex]; @@ -832,8 +896,8 @@ void _lib_ring_buffer_strcpy_from_user_inatomic(struct lib_ring_buffer_backend * if (likely(!src_terminated)) { size_t count, to_copy; - to_copy = pagecpy; - if (pagecpy == len) + to_copy = bytes_left_in_page; + if (bytes_left_in_page == len) to_copy--; /* Final '\0' */ count = lib_ring_buffer_do_strcpy_from_user_inatomic(config, rpages->p[index].virt @@ -845,7 +909,7 @@ void _lib_ring_buffer_strcpy_from_user_inatomic(struct lib_ring_buffer_backend * size_t pad_len = to_copy - count; /* Next pages will have padding */ - src_terminated = 1; + src_terminated = true; lib_ring_buffer_do_memset(rpages->p[index].virt + (offset & ~PAGE_MASK), pad, pad_len); @@ -854,21 +918,108 @@ void _lib_ring_buffer_strcpy_from_user_inatomic(struct lib_ring_buffer_backend * } else { size_t pad_len; - pad_len = pagecpy; - if (pagecpy == len) + pad_len = bytes_left_in_page; + if (bytes_left_in_page == len) pad_len--; /* Final '\0' */ lib_ring_buffer_do_memset(rpages->p[index].virt + (offset & ~PAGE_MASK), pad, pad_len); offset += pad_len; } - } while (unlikely(len != pagecpy)); + len -= bytes_left_in_page; + if (!src_terminated) + src += bytes_left_in_page; + } while (unlikely(len)); + /* Ending '\0' */ lib_ring_buffer_do_memset(rpages->p[index].virt + (offset & ~PAGE_MASK), '\0', 1); } EXPORT_SYMBOL_GPL(_lib_ring_buffer_strcpy_from_user_inatomic); +/** + * _lib_ring_buffer_pstrcpy_from_user_inatomic - write userspace string to a buffer backend P-string + * @bufb : buffer backend + * @src : source pointer to copy from + * @len : length of data to copy + * @pad : character to use for padding + * + * This function copies up to @len bytes of data from a source pointer + * to a Pascal String into the buffer backend. If a terminating '\0' + * character is found in @src before @len characters are copied, pad the + * buffer with @pad characters (e.g. '\0'). + * + * The length of the pascal strings in the ring buffer is explicit: it + * is either the array or sequence length. + * + * This function deals with userspace pointers, it should never be called + * directly without having the src pointer checked with access_ok() + * previously. + */ +void _lib_ring_buffer_pstrcpy_from_user_inatomic(struct lttng_kernel_ring_buffer_backend *bufb, + size_t offset, const char __user *src, size_t len, int pad) +{ + struct channel_backend *chanb = &bufb->chan->backend; + const struct lttng_kernel_ring_buffer_config *config = &chanb->config; + size_t sbidx, index, bytes_left_in_page; + struct lttng_kernel_ring_buffer_backend_pages *rpages; + unsigned long sb_bindex, id; + bool src_terminated = false; + + CHAN_WARN_ON(chanb, !len); + do { + sbidx = offset >> chanb->subbuf_size_order; + index = (offset & (chanb->subbuf_size - 1)) >> PAGE_SHIFT; + + /* + * Underlying layer should never ask for writes across + * subbuffers. + */ + CHAN_WARN_ON(chanb, offset >= chanb->buf_size); + + bytes_left_in_page = min_t(size_t, len, PAGE_SIZE - (offset & ~PAGE_MASK)); + id = bufb->buf_wsb[sbidx].id; + sb_bindex = subbuffer_id_get_index(config, id); + rpages = bufb->array[sb_bindex]; + CHAN_WARN_ON(chanb, config->mode == RING_BUFFER_OVERWRITE + && subbuffer_id_is_noref(config, id)); + + if (likely(!src_terminated)) { + size_t count, to_copy; + + to_copy = bytes_left_in_page; + count = lib_ring_buffer_do_strcpy_from_user_inatomic(config, + rpages->p[index].virt + + (offset & ~PAGE_MASK), + src, to_copy); + offset += count; + /* Padding */ + if (unlikely(count < to_copy)) { + size_t pad_len = to_copy - count; + + /* Next pages will have padding */ + src_terminated = true; + lib_ring_buffer_do_memset(rpages->p[index].virt + + (offset & ~PAGE_MASK), + pad, pad_len); + offset += pad_len; + } + } else { + size_t pad_len; + + pad_len = bytes_left_in_page; + lib_ring_buffer_do_memset(rpages->p[index].virt + + (offset & ~PAGE_MASK), + pad, pad_len); + offset += pad_len; + } + len -= bytes_left_in_page; + if (!src_terminated) + src += bytes_left_in_page; + } while (unlikely(len)); +} +EXPORT_SYMBOL_GPL(_lib_ring_buffer_pstrcpy_from_user_inatomic); + /** * lib_ring_buffer_read - read data from ring_buffer_buffer. * @bufb : buffer backend @@ -879,13 +1030,13 @@ EXPORT_SYMBOL_GPL(_lib_ring_buffer_strcpy_from_user_inatomic); * Should be protected by get_subbuf/put_subbuf. * Returns the length copied. */ -size_t lib_ring_buffer_read(struct lib_ring_buffer_backend *bufb, size_t offset, +size_t lib_ring_buffer_read(struct lttng_kernel_ring_buffer_backend *bufb, size_t offset, void *dest, size_t len) { struct channel_backend *chanb = &bufb->chan->backend; - const struct lib_ring_buffer_config *config = &chanb->config; - size_t index, pagecpy, orig_len; - struct lib_ring_buffer_backend_pages *rpages; + const struct lttng_kernel_ring_buffer_config *config = &chanb->config; + size_t index, bytes_left_in_page, orig_len; + struct lttng_kernel_ring_buffer_backend_pages *rpages; unsigned long sb_bindex, id; orig_len = len; @@ -894,19 +1045,19 @@ size_t lib_ring_buffer_read(struct lib_ring_buffer_backend *bufb, size_t offset, if (unlikely(!len)) return 0; for (;;) { - pagecpy = min_t(size_t, len, PAGE_SIZE - (offset & ~PAGE_MASK)); + bytes_left_in_page = min_t(size_t, len, PAGE_SIZE - (offset & ~PAGE_MASK)); id = bufb->buf_rsb.id; sb_bindex = subbuffer_id_get_index(config, id); rpages = bufb->array[sb_bindex]; CHAN_WARN_ON(chanb, config->mode == RING_BUFFER_OVERWRITE && subbuffer_id_is_noref(config, id)); memcpy(dest, rpages->p[index].virt + (offset & ~PAGE_MASK), - pagecpy); - len -= pagecpy; + bytes_left_in_page); + len -= bytes_left_in_page; if (likely(!len)) break; - dest += pagecpy; - offset += pagecpy; + dest += bytes_left_in_page; + offset += bytes_left_in_page; index = (offset & (chanb->subbuf_size - 1)) >> PAGE_SHIFT; /* * Underlying layer should never ask for reads across @@ -930,14 +1081,14 @@ EXPORT_SYMBOL_GPL(lib_ring_buffer_read); * function. * Returns -EFAULT on error, 0 if ok. */ -int __lib_ring_buffer_copy_to_user(struct lib_ring_buffer_backend *bufb, +int __lib_ring_buffer_copy_to_user(struct lttng_kernel_ring_buffer_backend *bufb, size_t offset, void __user *dest, size_t len) { struct channel_backend *chanb = &bufb->chan->backend; - const struct lib_ring_buffer_config *config = &chanb->config; + const struct lttng_kernel_ring_buffer_config *config = &chanb->config; size_t index; - ssize_t pagecpy; - struct lib_ring_buffer_backend_pages *rpages; + ssize_t bytes_left_in_page; + struct lttng_kernel_ring_buffer_backend_pages *rpages; unsigned long sb_bindex, id; offset &= chanb->buf_size - 1; @@ -945,7 +1096,7 @@ int __lib_ring_buffer_copy_to_user(struct lib_ring_buffer_backend *bufb, if (unlikely(!len)) return 0; for (;;) { - pagecpy = min_t(size_t, len, PAGE_SIZE - (offset & ~PAGE_MASK)); + bytes_left_in_page = min_t(size_t, len, PAGE_SIZE - (offset & ~PAGE_MASK)); id = bufb->buf_rsb.id; sb_bindex = subbuffer_id_get_index(config, id); rpages = bufb->array[sb_bindex]; @@ -953,13 +1104,13 @@ int __lib_ring_buffer_copy_to_user(struct lib_ring_buffer_backend *bufb, && subbuffer_id_is_noref(config, id)); if (__copy_to_user(dest, rpages->p[index].virt + (offset & ~PAGE_MASK), - pagecpy)) + bytes_left_in_page)) return -EFAULT; - len -= pagecpy; + len -= bytes_left_in_page; if (likely(!len)) break; - dest += pagecpy; - offset += pagecpy; + dest += bytes_left_in_page; + offset += bytes_left_in_page; index = (offset & (chanb->subbuf_size - 1)) >> PAGE_SHIFT; /* * Underlying layer should never ask for reads across @@ -982,15 +1133,15 @@ EXPORT_SYMBOL_GPL(__lib_ring_buffer_copy_to_user); * Should be protected by get_subbuf/put_subbuf. * Destination length should be at least 1 to hold '\0'. */ -int lib_ring_buffer_read_cstr(struct lib_ring_buffer_backend *bufb, size_t offset, +int lib_ring_buffer_read_cstr(struct lttng_kernel_ring_buffer_backend *bufb, size_t offset, void *dest, size_t len) { struct channel_backend *chanb = &bufb->chan->backend; - const struct lib_ring_buffer_config *config = &chanb->config; + const struct lttng_kernel_ring_buffer_config *config = &chanb->config; size_t index; - ssize_t pagecpy, pagelen, strpagelen, orig_offset; + ssize_t bytes_left_in_page, pagelen, strpagelen, orig_offset; char *str; - struct lib_ring_buffer_backend_pages *rpages; + struct lttng_kernel_ring_buffer_backend_pages *rpages; unsigned long sb_bindex, id; offset &= chanb->buf_size - 1; @@ -1008,12 +1159,12 @@ int lib_ring_buffer_read_cstr(struct lib_ring_buffer_backend *bufb, size_t offse pagelen = PAGE_SIZE - (offset & ~PAGE_MASK); strpagelen = strnlen(str, pagelen); if (len) { - pagecpy = min_t(size_t, len, strpagelen); + bytes_left_in_page = min_t(size_t, len, strpagelen); if (dest) { - memcpy(dest, str, pagecpy); - dest += pagecpy; + memcpy(dest, str, bytes_left_in_page); + dest += bytes_left_in_page; } - len -= pagecpy; + len -= bytes_left_in_page; } offset += strpagelen; index = (offset & (chanb->subbuf_size - 1)) >> PAGE_SHIFT; @@ -1040,13 +1191,13 @@ EXPORT_SYMBOL_GPL(lib_ring_buffer_read_cstr); * Should be protected by get_subbuf/put_subbuf. * Returns the pointer to the page frame number unsigned long. */ -unsigned long *lib_ring_buffer_read_get_pfn(struct lib_ring_buffer_backend *bufb, +unsigned long *lib_ring_buffer_read_get_pfn(struct lttng_kernel_ring_buffer_backend *bufb, size_t offset, void ***virt) { size_t index; - struct lib_ring_buffer_backend_pages *rpages; + struct lttng_kernel_ring_buffer_backend_pages *rpages; struct channel_backend *chanb = &bufb->chan->backend; - const struct lib_ring_buffer_config *config = &chanb->config; + const struct lttng_kernel_ring_buffer_config *config = &chanb->config; unsigned long sb_bindex, id; offset &= chanb->buf_size - 1; @@ -1072,13 +1223,13 @@ EXPORT_SYMBOL_GPL(lib_ring_buffer_read_get_pfn); * from/to this address, as long as the read/write is never bigger than a * page size. */ -void *lib_ring_buffer_read_offset_address(struct lib_ring_buffer_backend *bufb, +void *lib_ring_buffer_read_offset_address(struct lttng_kernel_ring_buffer_backend *bufb, size_t offset) { size_t index; - struct lib_ring_buffer_backend_pages *rpages; + struct lttng_kernel_ring_buffer_backend_pages *rpages; struct channel_backend *chanb = &bufb->chan->backend; - const struct lib_ring_buffer_config *config = &chanb->config; + const struct lttng_kernel_ring_buffer_config *config = &chanb->config; unsigned long sb_bindex, id; offset &= chanb->buf_size - 1; @@ -1102,13 +1253,13 @@ EXPORT_SYMBOL_GPL(lib_ring_buffer_read_offset_address); * it's always at the beginning of a page, it's safe to write directly to this * address, as long as the write is never bigger than a page size. */ -void *lib_ring_buffer_offset_address(struct lib_ring_buffer_backend *bufb, +void *lib_ring_buffer_offset_address(struct lttng_kernel_ring_buffer_backend *bufb, size_t offset) { size_t sbidx, index; - struct lib_ring_buffer_backend_pages *rpages; + struct lttng_kernel_ring_buffer_backend_pages *rpages; struct channel_backend *chanb = &bufb->chan->backend; - const struct lib_ring_buffer_config *config = &chanb->config; + const struct lttng_kernel_ring_buffer_config *config = &chanb->config; unsigned long sb_bindex, id; offset &= chanb->buf_size - 1;