1 /* SPDX-License-Identifier: (GPL-2.0 OR LGPL-2.1)
3 * ring_buffer_backend.c
5 * Copyright (C) 2005-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
8 #include <linux/stddef.h>
9 #include <linux/module.h>
10 #include <linux/string.h>
11 #include <linux/bitops.h>
12 #include <linux/delay.h>
13 #include <linux/errno.h>
14 #include <linux/slab.h>
15 #include <linux/cpu.h>
17 #include <linux/vmalloc.h>
19 #include <wrapper/vmalloc.h> /* for wrapper_vmalloc_sync_all() */
20 #include <wrapper/ringbuffer/config.h>
21 #include <wrapper/ringbuffer/backend.h>
22 #include <wrapper/ringbuffer/frontend.h>
25 * lib_ring_buffer_backend_allocate - allocate a channel buffer
26 * @config: ring buffer instance configuration
27 * @buf: the buffer struct
28 * @size: total size of the buffer
29 * @num_subbuf: number of subbuffers
30 * @extra_reader_sb: need extra subbuffer for reader
33 int lib_ring_buffer_backend_allocate(const struct lib_ring_buffer_config
*config
,
34 struct lib_ring_buffer_backend
*bufb
,
35 size_t size
, size_t num_subbuf
,
38 struct channel_backend
*chanb
= &bufb
->chan
->backend
;
39 unsigned long j
, num_pages
, num_pages_per_subbuf
, page_idx
= 0;
40 unsigned long subbuf_size
, mmap_offset
= 0;
41 unsigned long num_subbuf_alloc
;
45 num_pages
= size
>> PAGE_SHIFT
;
46 num_pages_per_subbuf
= num_pages
>> get_count_order(num_subbuf
);
47 subbuf_size
= chanb
->subbuf_size
;
48 num_subbuf_alloc
= num_subbuf
;
50 if (extra_reader_sb
) {
51 num_pages
+= num_pages_per_subbuf
; /* Add pages for reader */
55 pages
= vmalloc_node(ALIGN(sizeof(*pages
) * num_pages
,
56 1 << INTERNODE_CACHE_SHIFT
),
57 cpu_to_node(max(bufb
->cpu
, 0)));
61 bufb
->array
= lttng_kvmalloc_node(ALIGN(sizeof(*bufb
->array
)
63 1 << INTERNODE_CACHE_SHIFT
),
64 GFP_KERNEL
| __GFP_NOWARN
,
65 cpu_to_node(max(bufb
->cpu
, 0)));
66 if (unlikely(!bufb
->array
))
69 for (i
= 0; i
< num_pages
; i
++) {
70 pages
[i
] = alloc_pages_node(cpu_to_node(max(bufb
->cpu
, 0)),
71 GFP_KERNEL
| __GFP_NOWARN
| __GFP_ZERO
, 0);
72 if (unlikely(!pages
[i
]))
75 bufb
->num_pages_per_subbuf
= num_pages_per_subbuf
;
77 /* Allocate backend pages array elements */
78 for (i
= 0; i
< num_subbuf_alloc
; i
++) {
80 lttng_kvzalloc_node(ALIGN(
81 sizeof(struct lib_ring_buffer_backend_pages
) +
82 sizeof(struct lib_ring_buffer_backend_page
)
83 * num_pages_per_subbuf
,
84 1 << INTERNODE_CACHE_SHIFT
),
85 GFP_KERNEL
| __GFP_NOWARN
,
86 cpu_to_node(max(bufb
->cpu
, 0)));
91 /* Allocate write-side subbuffer table */
92 bufb
->buf_wsb
= lttng_kvzalloc_node(ALIGN(
93 sizeof(struct lib_ring_buffer_backend_subbuffer
)
95 1 << INTERNODE_CACHE_SHIFT
),
96 GFP_KERNEL
| __GFP_NOWARN
,
97 cpu_to_node(max(bufb
->cpu
, 0)));
98 if (unlikely(!bufb
->buf_wsb
))
101 for (i
= 0; i
< num_subbuf
; i
++)
102 bufb
->buf_wsb
[i
].id
= subbuffer_id(config
, 0, 1, i
);
104 /* Assign read-side subbuffer table */
106 bufb
->buf_rsb
.id
= subbuffer_id(config
, 0, 1,
107 num_subbuf_alloc
- 1);
109 bufb
->buf_rsb
.id
= subbuffer_id(config
, 0, 1, 0);
111 /* Allocate subbuffer packet counter table */
112 bufb
->buf_cnt
= lttng_kvzalloc_node(ALIGN(
113 sizeof(struct lib_ring_buffer_backend_counts
)
115 1 << INTERNODE_CACHE_SHIFT
),
116 GFP_KERNEL
| __GFP_NOWARN
,
117 cpu_to_node(max(bufb
->cpu
, 0)));
118 if (unlikely(!bufb
->buf_cnt
))
121 /* Assign pages to page index */
122 for (i
= 0; i
< num_subbuf_alloc
; i
++) {
123 for (j
= 0; j
< num_pages_per_subbuf
; j
++) {
124 CHAN_WARN_ON(chanb
, page_idx
> num_pages
);
125 bufb
->array
[i
]->p
[j
].virt
= page_address(pages
[page_idx
]);
126 bufb
->array
[i
]->p
[j
].pfn
= page_to_pfn(pages
[page_idx
]);
129 if (config
->output
== RING_BUFFER_MMAP
) {
130 bufb
->array
[i
]->mmap_offset
= mmap_offset
;
131 mmap_offset
+= subbuf_size
;
136 * If kmalloc ever uses vmalloc underneath, make sure the buffer pages
139 wrapper_vmalloc_sync_all();
144 lttng_kvfree(bufb
->buf_wsb
);
146 for (i
= 0; (i
< num_subbuf_alloc
&& bufb
->array
[i
]); i
++)
147 lttng_kvfree(bufb
->array
[i
]);
149 /* Free all allocated pages */
150 for (i
= 0; (i
< num_pages
&& pages
[i
]); i
++)
151 __free_page(pages
[i
]);
152 lttng_kvfree(bufb
->array
);
159 int lib_ring_buffer_backend_create(struct lib_ring_buffer_backend
*bufb
,
160 struct channel_backend
*chanb
, int cpu
)
162 const struct lib_ring_buffer_config
*config
= &chanb
->config
;
164 bufb
->chan
= container_of(chanb
, struct channel
, backend
);
167 return lib_ring_buffer_backend_allocate(config
, bufb
, chanb
->buf_size
,
169 chanb
->extra_reader_sb
);
172 void lib_ring_buffer_backend_free(struct lib_ring_buffer_backend
*bufb
)
174 struct channel_backend
*chanb
= &bufb
->chan
->backend
;
175 unsigned long i
, j
, num_subbuf_alloc
;
177 num_subbuf_alloc
= chanb
->num_subbuf
;
178 if (chanb
->extra_reader_sb
)
181 lttng_kvfree(bufb
->buf_wsb
);
182 lttng_kvfree(bufb
->buf_cnt
);
183 for (i
= 0; i
< num_subbuf_alloc
; i
++) {
184 for (j
= 0; j
< bufb
->num_pages_per_subbuf
; j
++)
185 __free_page(pfn_to_page(bufb
->array
[i
]->p
[j
].pfn
));
186 lttng_kvfree(bufb
->array
[i
]);
188 lttng_kvfree(bufb
->array
);
192 void lib_ring_buffer_backend_reset(struct lib_ring_buffer_backend
*bufb
)
194 struct channel_backend
*chanb
= &bufb
->chan
->backend
;
195 const struct lib_ring_buffer_config
*config
= &chanb
->config
;
196 unsigned long num_subbuf_alloc
;
199 num_subbuf_alloc
= chanb
->num_subbuf
;
200 if (chanb
->extra_reader_sb
)
203 for (i
= 0; i
< chanb
->num_subbuf
; i
++)
204 bufb
->buf_wsb
[i
].id
= subbuffer_id(config
, 0, 1, i
);
205 if (chanb
->extra_reader_sb
)
206 bufb
->buf_rsb
.id
= subbuffer_id(config
, 0, 1,
207 num_subbuf_alloc
- 1);
209 bufb
->buf_rsb
.id
= subbuffer_id(config
, 0, 1, 0);
211 for (i
= 0; i
< num_subbuf_alloc
; i
++) {
212 /* Don't reset mmap_offset */
213 v_set(config
, &bufb
->array
[i
]->records_commit
, 0);
214 v_set(config
, &bufb
->array
[i
]->records_unread
, 0);
215 bufb
->array
[i
]->data_size
= 0;
216 /* Don't reset backend page and virt addresses */
218 /* Don't reset num_pages_per_subbuf, cpu, allocated */
219 v_set(config
, &bufb
->records_read
, 0);
223 * The frontend is responsible for also calling ring_buffer_backend_reset for
224 * each buffer when calling channel_backend_reset.
226 void channel_backend_reset(struct channel_backend
*chanb
)
228 struct channel
*chan
= container_of(chanb
, struct channel
, backend
);
229 const struct lib_ring_buffer_config
*config
= &chanb
->config
;
232 * Don't reset buf_size, subbuf_size, subbuf_size_order,
233 * num_subbuf_order, buf_size_order, extra_reader_sb, num_subbuf,
234 * priv, notifiers, config, cpumask and name.
236 chanb
->start_tsc
= config
->cb
.ring_buffer_clock_read(chan
);
239 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0))
242 * No need to implement a "dead" callback to do a buffer switch here,
243 * because it will happen when tracing is stopped, or will be done by
244 * switch timer CPU DEAD callback.
245 * We don't free buffers when CPU go away, because it would make trace
246 * data vanish, which is unwanted.
248 int lttng_cpuhp_rb_backend_prepare(unsigned int cpu
,
249 struct lttng_cpuhp_node
*node
)
251 struct channel_backend
*chanb
= container_of(node
,
252 struct channel_backend
, cpuhp_prepare
);
253 const struct lib_ring_buffer_config
*config
= &chanb
->config
;
254 struct lib_ring_buffer
*buf
;
257 CHAN_WARN_ON(chanb
, config
->alloc
== RING_BUFFER_ALLOC_GLOBAL
);
259 buf
= per_cpu_ptr(chanb
->buf
, cpu
);
260 ret
= lib_ring_buffer_create(buf
, chanb
, cpu
);
263 "ring_buffer_cpu_hp_callback: cpu %d "
264 "buffer creation failed\n", cpu
);
269 EXPORT_SYMBOL_GPL(lttng_cpuhp_rb_backend_prepare
);
271 #else /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) */
273 #ifdef CONFIG_HOTPLUG_CPU
276 * lib_ring_buffer_cpu_hp_callback - CPU hotplug callback
277 * @nb: notifier block
278 * @action: hotplug action to take
281 * Returns the success/failure of the operation. (%NOTIFY_OK, %NOTIFY_BAD)
284 int lib_ring_buffer_cpu_hp_callback(struct notifier_block
*nb
,
285 unsigned long action
,
288 unsigned int cpu
= (unsigned long)hcpu
;
289 struct channel_backend
*chanb
= container_of(nb
, struct channel_backend
,
291 const struct lib_ring_buffer_config
*config
= &chanb
->config
;
292 struct lib_ring_buffer
*buf
;
295 CHAN_WARN_ON(chanb
, config
->alloc
== RING_BUFFER_ALLOC_GLOBAL
);
299 case CPU_UP_PREPARE_FROZEN
:
300 buf
= per_cpu_ptr(chanb
->buf
, cpu
);
301 ret
= lib_ring_buffer_create(buf
, chanb
, cpu
);
304 "ring_buffer_cpu_hp_callback: cpu %d "
305 "buffer creation failed\n", cpu
);
310 case CPU_DEAD_FROZEN
:
311 /* No need to do a buffer switch here, because it will happen
312 * when tracing is stopped, or will be done by switch timer CPU
321 #endif /* #else #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) */
324 * channel_backend_init - initialize a channel backend
325 * @chanb: channel backend
326 * @name: channel name
327 * @config: client ring buffer configuration
328 * @priv: client private data
329 * @parent: dentry of parent directory, %NULL for root directory
330 * @subbuf_size: size of sub-buffers (> PAGE_SIZE, power of 2)
331 * @num_subbuf: number of sub-buffers (power of 2)
333 * Returns channel pointer if successful, %NULL otherwise.
335 * Creates per-cpu channel buffers using the sizes and attributes
336 * specified. The created channel buffer files will be named
337 * name_0...name_N-1. File permissions will be %S_IRUSR.
339 * Called with CPU hotplug disabled.
341 int channel_backend_init(struct channel_backend
*chanb
,
343 const struct lib_ring_buffer_config
*config
,
344 void *priv
, size_t subbuf_size
, size_t num_subbuf
)
346 struct channel
*chan
= container_of(chanb
, struct channel
, backend
);
353 /* Check that the subbuffer size is larger than a page. */
354 if (subbuf_size
< PAGE_SIZE
)
358 * Make sure the number of subbuffers and subbuffer size are
359 * power of 2 and nonzero.
361 if (!subbuf_size
|| (subbuf_size
& (subbuf_size
- 1)))
363 if (!num_subbuf
|| (num_subbuf
& (num_subbuf
- 1)))
366 * Overwrite mode buffers require at least 2 subbuffers per
369 if (config
->mode
== RING_BUFFER_OVERWRITE
&& num_subbuf
< 2)
372 ret
= subbuffer_id_check_index(config
, num_subbuf
);
377 chanb
->buf_size
= num_subbuf
* subbuf_size
;
378 chanb
->subbuf_size
= subbuf_size
;
379 chanb
->buf_size_order
= get_count_order(chanb
->buf_size
);
380 chanb
->subbuf_size_order
= get_count_order(subbuf_size
);
381 chanb
->num_subbuf_order
= get_count_order(num_subbuf
);
382 chanb
->extra_reader_sb
=
383 (config
->mode
== RING_BUFFER_OVERWRITE
) ? 1 : 0;
384 chanb
->num_subbuf
= num_subbuf
;
385 strlcpy(chanb
->name
, name
, NAME_MAX
);
386 memcpy(&chanb
->config
, config
, sizeof(chanb
->config
));
388 if (config
->alloc
== RING_BUFFER_ALLOC_PER_CPU
) {
389 if (!zalloc_cpumask_var(&chanb
->cpumask
, GFP_KERNEL
))
393 if (config
->alloc
== RING_BUFFER_ALLOC_PER_CPU
) {
394 /* Allocating the buffer per-cpu structures */
395 chanb
->buf
= alloc_percpu(struct lib_ring_buffer
);
399 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0))
400 chanb
->cpuhp_prepare
.component
= LTTNG_RING_BUFFER_BACKEND
;
401 ret
= cpuhp_state_add_instance(lttng_rb_hp_prepare
,
402 &chanb
->cpuhp_prepare
.node
);
405 #else /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) */
409 * In case of non-hotplug cpu, if the ring-buffer is allocated
410 * in early initcall, it will not be notified of secondary cpus.
411 * In that off case, we need to allocate for all possible cpus.
413 #ifdef CONFIG_HOTPLUG_CPU
415 * buf->backend.allocated test takes care of concurrent CPU
417 * Priority higher than frontend, so we create the ring buffer
418 * before we start the timer.
420 chanb
->cpu_hp_notifier
.notifier_call
=
421 lib_ring_buffer_cpu_hp_callback
;
422 chanb
->cpu_hp_notifier
.priority
= 5;
423 register_hotcpu_notifier(&chanb
->cpu_hp_notifier
);
426 for_each_online_cpu(i
) {
427 ret
= lib_ring_buffer_create(per_cpu_ptr(chanb
->buf
, i
),
430 goto free_bufs
; /* cpu hotplug locked */
434 for_each_possible_cpu(i
) {
435 ret
= lib_ring_buffer_create(per_cpu_ptr(chanb
->buf
, i
),
442 #endif /* #else #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) */
444 chanb
->buf
= kzalloc(sizeof(struct lib_ring_buffer
), GFP_KERNEL
);
447 ret
= lib_ring_buffer_create(chanb
->buf
, chanb
, -1);
451 chanb
->start_tsc
= config
->cb
.ring_buffer_clock_read(chan
);
456 if (config
->alloc
== RING_BUFFER_ALLOC_PER_CPU
) {
457 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0))
459 * Teardown of lttng_rb_hp_prepare instance
460 * on "add" error is handled within cpu hotplug,
461 * no teardown to do from the caller.
463 #else /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) */
464 #ifdef CONFIG_HOTPLUG_CPU
466 unregister_hotcpu_notifier(&chanb
->cpu_hp_notifier
);
468 #endif /* #else #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) */
469 for_each_possible_cpu(i
) {
470 struct lib_ring_buffer
*buf
=
471 per_cpu_ptr(chanb
->buf
, i
);
473 if (!buf
->backend
.allocated
)
475 lib_ring_buffer_free(buf
);
477 free_percpu(chanb
->buf
);
481 if (config
->alloc
== RING_BUFFER_ALLOC_PER_CPU
)
482 free_cpumask_var(chanb
->cpumask
);
487 * channel_backend_unregister_notifiers - unregister notifiers
492 void channel_backend_unregister_notifiers(struct channel_backend
*chanb
)
494 const struct lib_ring_buffer_config
*config
= &chanb
->config
;
496 if (config
->alloc
== RING_BUFFER_ALLOC_PER_CPU
) {
497 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0))
500 ret
= cpuhp_state_remove_instance(lttng_rb_hp_prepare
,
501 &chanb
->cpuhp_prepare
.node
);
503 #else /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) */
504 unregister_hotcpu_notifier(&chanb
->cpu_hp_notifier
);
505 #endif /* #else #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) */
510 * channel_backend_free - destroy the channel
513 * Destroy all channel buffers and frees the channel.
515 void channel_backend_free(struct channel_backend
*chanb
)
517 const struct lib_ring_buffer_config
*config
= &chanb
->config
;
520 if (config
->alloc
== RING_BUFFER_ALLOC_PER_CPU
) {
521 for_each_possible_cpu(i
) {
522 struct lib_ring_buffer
*buf
= per_cpu_ptr(chanb
->buf
, i
);
524 if (!buf
->backend
.allocated
)
526 lib_ring_buffer_free(buf
);
528 free_cpumask_var(chanb
->cpumask
);
529 free_percpu(chanb
->buf
);
531 struct lib_ring_buffer
*buf
= chanb
->buf
;
533 CHAN_WARN_ON(chanb
, !buf
->backend
.allocated
);
534 lib_ring_buffer_free(buf
);
540 * lib_ring_buffer_write - write data to a ring_buffer buffer.
541 * @bufb : buffer backend
542 * @offset : offset within the buffer
543 * @src : source address
544 * @len : length to write
545 * @pagecpy : page size copied so far
547 void _lib_ring_buffer_write(struct lib_ring_buffer_backend
*bufb
, size_t offset
,
548 const void *src
, size_t len
, size_t pagecpy
)
550 struct channel_backend
*chanb
= &bufb
->chan
->backend
;
551 const struct lib_ring_buffer_config
*config
= &chanb
->config
;
553 struct lib_ring_buffer_backend_pages
*rpages
;
554 unsigned long sb_bindex
, id
;
560 sbidx
= offset
>> chanb
->subbuf_size_order
;
561 index
= (offset
& (chanb
->subbuf_size
- 1)) >> PAGE_SHIFT
;
564 * Underlying layer should never ask for writes across
567 CHAN_WARN_ON(chanb
, offset
>= chanb
->buf_size
);
569 pagecpy
= min_t(size_t, len
, PAGE_SIZE
- (offset
& ~PAGE_MASK
));
570 id
= bufb
->buf_wsb
[sbidx
].id
;
571 sb_bindex
= subbuffer_id_get_index(config
, id
);
572 rpages
= bufb
->array
[sb_bindex
];
573 CHAN_WARN_ON(chanb
, config
->mode
== RING_BUFFER_OVERWRITE
574 && subbuffer_id_is_noref(config
, id
));
575 lib_ring_buffer_do_copy(config
,
576 rpages
->p
[index
].virt
577 + (offset
& ~PAGE_MASK
),
579 } while (unlikely(len
!= pagecpy
));
581 EXPORT_SYMBOL_GPL(_lib_ring_buffer_write
);
585 * lib_ring_buffer_memset - write len bytes of c to a ring_buffer buffer.
586 * @bufb : buffer backend
587 * @offset : offset within the buffer
588 * @c : the byte to write
589 * @len : length to write
590 * @pagecpy : page size copied so far
592 void _lib_ring_buffer_memset(struct lib_ring_buffer_backend
*bufb
,
594 int c
, size_t len
, size_t pagecpy
)
596 struct channel_backend
*chanb
= &bufb
->chan
->backend
;
597 const struct lib_ring_buffer_config
*config
= &chanb
->config
;
599 struct lib_ring_buffer_backend_pages
*rpages
;
600 unsigned long sb_bindex
, id
;
605 sbidx
= offset
>> chanb
->subbuf_size_order
;
606 index
= (offset
& (chanb
->subbuf_size
- 1)) >> PAGE_SHIFT
;
609 * Underlying layer should never ask for writes across
612 CHAN_WARN_ON(chanb
, offset
>= chanb
->buf_size
);
614 pagecpy
= min_t(size_t, len
, PAGE_SIZE
- (offset
& ~PAGE_MASK
));
615 id
= bufb
->buf_wsb
[sbidx
].id
;
616 sb_bindex
= subbuffer_id_get_index(config
, id
);
617 rpages
= bufb
->array
[sb_bindex
];
618 CHAN_WARN_ON(chanb
, config
->mode
== RING_BUFFER_OVERWRITE
619 && subbuffer_id_is_noref(config
, id
));
620 lib_ring_buffer_do_memset(rpages
->p
[index
].virt
621 + (offset
& ~PAGE_MASK
),
623 } while (unlikely(len
!= pagecpy
));
625 EXPORT_SYMBOL_GPL(_lib_ring_buffer_memset
);
628 * lib_ring_buffer_strcpy - write string data to a ring_buffer buffer.
629 * @bufb : buffer backend
630 * @offset : offset within the buffer
631 * @src : source address
632 * @len : length to write
633 * @pagecpy : page size copied so far
634 * @pad : character to use for padding
636 void _lib_ring_buffer_strcpy(struct lib_ring_buffer_backend
*bufb
,
637 size_t offset
, const char *src
, size_t len
,
638 size_t pagecpy
, int pad
)
640 struct channel_backend
*chanb
= &bufb
->chan
->backend
;
641 const struct lib_ring_buffer_config
*config
= &chanb
->config
;
643 struct lib_ring_buffer_backend_pages
*rpages
;
644 unsigned long sb_bindex
, id
;
645 int src_terminated
= 0;
647 CHAN_WARN_ON(chanb
, !len
);
653 sbidx
= offset
>> chanb
->subbuf_size_order
;
654 index
= (offset
& (chanb
->subbuf_size
- 1)) >> PAGE_SHIFT
;
657 * Underlying layer should never ask for writes across
660 CHAN_WARN_ON(chanb
, offset
>= chanb
->buf_size
);
662 pagecpy
= min_t(size_t, len
, PAGE_SIZE
- (offset
& ~PAGE_MASK
));
663 id
= bufb
->buf_wsb
[sbidx
].id
;
664 sb_bindex
= subbuffer_id_get_index(config
, id
);
665 rpages
= bufb
->array
[sb_bindex
];
666 CHAN_WARN_ON(chanb
, config
->mode
== RING_BUFFER_OVERWRITE
667 && subbuffer_id_is_noref(config
, id
));
669 if (likely(!src_terminated
)) {
670 size_t count
, to_copy
;
674 to_copy
--; /* Final '\0' */
675 count
= lib_ring_buffer_do_strcpy(config
,
676 rpages
->p
[index
].virt
677 + (offset
& ~PAGE_MASK
),
681 if (unlikely(count
< to_copy
)) {
682 size_t pad_len
= to_copy
- count
;
684 /* Next pages will have padding */
686 lib_ring_buffer_do_memset(rpages
->p
[index
].virt
687 + (offset
& ~PAGE_MASK
),
696 pad_len
--; /* Final '\0' */
697 lib_ring_buffer_do_memset(rpages
->p
[index
].virt
698 + (offset
& ~PAGE_MASK
),
702 } while (unlikely(len
!= pagecpy
));
704 lib_ring_buffer_do_memset(rpages
->p
[index
].virt
+ (offset
& ~PAGE_MASK
),
707 EXPORT_SYMBOL_GPL(_lib_ring_buffer_strcpy
);
710 * lib_ring_buffer_copy_from_user_inatomic - write user data to a ring_buffer buffer.
711 * @bufb : buffer backend
712 * @offset : offset within the buffer
713 * @src : source address
714 * @len : length to write
715 * @pagecpy : page size copied so far
717 * This function deals with userspace pointers, it should never be called
718 * directly without having the src pointer checked with access_ok()
721 void _lib_ring_buffer_copy_from_user_inatomic(struct lib_ring_buffer_backend
*bufb
,
723 const void __user
*src
, size_t len
,
726 struct channel_backend
*chanb
= &bufb
->chan
->backend
;
727 const struct lib_ring_buffer_config
*config
= &chanb
->config
;
729 struct lib_ring_buffer_backend_pages
*rpages
;
730 unsigned long sb_bindex
, id
;
737 sbidx
= offset
>> chanb
->subbuf_size_order
;
738 index
= (offset
& (chanb
->subbuf_size
- 1)) >> PAGE_SHIFT
;
741 * Underlying layer should never ask for writes across
744 CHAN_WARN_ON(chanb
, offset
>= chanb
->buf_size
);
746 pagecpy
= min_t(size_t, len
, PAGE_SIZE
- (offset
& ~PAGE_MASK
));
747 id
= bufb
->buf_wsb
[sbidx
].id
;
748 sb_bindex
= subbuffer_id_get_index(config
, id
);
749 rpages
= bufb
->array
[sb_bindex
];
750 CHAN_WARN_ON(chanb
, config
->mode
== RING_BUFFER_OVERWRITE
751 && subbuffer_id_is_noref(config
, id
));
752 ret
= lib_ring_buffer_do_copy_from_user_inatomic(rpages
->p
[index
].virt
753 + (offset
& ~PAGE_MASK
),
757 _lib_ring_buffer_memset(bufb
, offset
, 0, len
, 0);
758 break; /* stop copy */
760 } while (unlikely(len
!= pagecpy
));
762 EXPORT_SYMBOL_GPL(_lib_ring_buffer_copy_from_user_inatomic
);
765 * lib_ring_buffer_strcpy_from_user_inatomic - write userspace string data to a ring_buffer buffer.
766 * @bufb : buffer backend
767 * @offset : offset within the buffer
768 * @src : source address
769 * @len : length to write
770 * @pagecpy : page size copied so far
771 * @pad : character to use for padding
773 * This function deals with userspace pointers, it should never be called
774 * directly without having the src pointer checked with access_ok()
777 void _lib_ring_buffer_strcpy_from_user_inatomic(struct lib_ring_buffer_backend
*bufb
,
778 size_t offset
, const char __user
*src
, size_t len
,
779 size_t pagecpy
, int pad
)
781 struct channel_backend
*chanb
= &bufb
->chan
->backend
;
782 const struct lib_ring_buffer_config
*config
= &chanb
->config
;
784 struct lib_ring_buffer_backend_pages
*rpages
;
785 unsigned long sb_bindex
, id
;
786 int src_terminated
= 0;
793 sbidx
= offset
>> chanb
->subbuf_size_order
;
794 index
= (offset
& (chanb
->subbuf_size
- 1)) >> PAGE_SHIFT
;
797 * Underlying layer should never ask for writes across
800 CHAN_WARN_ON(chanb
, offset
>= chanb
->buf_size
);
802 pagecpy
= min_t(size_t, len
, PAGE_SIZE
- (offset
& ~PAGE_MASK
));
803 id
= bufb
->buf_wsb
[sbidx
].id
;
804 sb_bindex
= subbuffer_id_get_index(config
, id
);
805 rpages
= bufb
->array
[sb_bindex
];
806 CHAN_WARN_ON(chanb
, config
->mode
== RING_BUFFER_OVERWRITE
807 && subbuffer_id_is_noref(config
, id
));
809 if (likely(!src_terminated
)) {
810 size_t count
, to_copy
;
814 to_copy
--; /* Final '\0' */
815 count
= lib_ring_buffer_do_strcpy_from_user_inatomic(config
,
816 rpages
->p
[index
].virt
817 + (offset
& ~PAGE_MASK
),
821 if (unlikely(count
< to_copy
)) {
822 size_t pad_len
= to_copy
- count
;
824 /* Next pages will have padding */
826 lib_ring_buffer_do_memset(rpages
->p
[index
].virt
827 + (offset
& ~PAGE_MASK
),
836 pad_len
--; /* Final '\0' */
837 lib_ring_buffer_do_memset(rpages
->p
[index
].virt
838 + (offset
& ~PAGE_MASK
),
842 } while (unlikely(len
!= pagecpy
));
844 lib_ring_buffer_do_memset(rpages
->p
[index
].virt
+ (offset
& ~PAGE_MASK
),
847 EXPORT_SYMBOL_GPL(_lib_ring_buffer_strcpy_from_user_inatomic
);
850 * lib_ring_buffer_read - read data from ring_buffer_buffer.
851 * @bufb : buffer backend
852 * @offset : offset within the buffer
853 * @dest : destination address
854 * @len : length to copy to destination
856 * Should be protected by get_subbuf/put_subbuf.
857 * Returns the length copied.
859 size_t lib_ring_buffer_read(struct lib_ring_buffer_backend
*bufb
, size_t offset
,
860 void *dest
, size_t len
)
862 struct channel_backend
*chanb
= &bufb
->chan
->backend
;
863 const struct lib_ring_buffer_config
*config
= &chanb
->config
;
864 size_t index
, pagecpy
, orig_len
;
865 struct lib_ring_buffer_backend_pages
*rpages
;
866 unsigned long sb_bindex
, id
;
869 offset
&= chanb
->buf_size
- 1;
870 index
= (offset
& (chanb
->subbuf_size
- 1)) >> PAGE_SHIFT
;
874 pagecpy
= min_t(size_t, len
, PAGE_SIZE
- (offset
& ~PAGE_MASK
));
875 id
= bufb
->buf_rsb
.id
;
876 sb_bindex
= subbuffer_id_get_index(config
, id
);
877 rpages
= bufb
->array
[sb_bindex
];
878 CHAN_WARN_ON(chanb
, config
->mode
== RING_BUFFER_OVERWRITE
879 && subbuffer_id_is_noref(config
, id
));
880 memcpy(dest
, rpages
->p
[index
].virt
+ (offset
& ~PAGE_MASK
),
887 index
= (offset
& (chanb
->subbuf_size
- 1)) >> PAGE_SHIFT
;
889 * Underlying layer should never ask for reads across
892 CHAN_WARN_ON(chanb
, offset
>= chanb
->buf_size
);
896 EXPORT_SYMBOL_GPL(lib_ring_buffer_read
);
899 * __lib_ring_buffer_copy_to_user - read data from ring_buffer to userspace
900 * @bufb : buffer backend
901 * @offset : offset within the buffer
902 * @dest : destination userspace address
903 * @len : length to copy to destination
905 * Should be protected by get_subbuf/put_subbuf.
906 * access_ok() must have been performed on dest addresses prior to call this
908 * Returns -EFAULT on error, 0 if ok.
910 int __lib_ring_buffer_copy_to_user(struct lib_ring_buffer_backend
*bufb
,
911 size_t offset
, void __user
*dest
, size_t len
)
913 struct channel_backend
*chanb
= &bufb
->chan
->backend
;
914 const struct lib_ring_buffer_config
*config
= &chanb
->config
;
917 struct lib_ring_buffer_backend_pages
*rpages
;
918 unsigned long sb_bindex
, id
;
920 offset
&= chanb
->buf_size
- 1;
921 index
= (offset
& (chanb
->subbuf_size
- 1)) >> PAGE_SHIFT
;
925 pagecpy
= min_t(size_t, len
, PAGE_SIZE
- (offset
& ~PAGE_MASK
));
926 id
= bufb
->buf_rsb
.id
;
927 sb_bindex
= subbuffer_id_get_index(config
, id
);
928 rpages
= bufb
->array
[sb_bindex
];
929 CHAN_WARN_ON(chanb
, config
->mode
== RING_BUFFER_OVERWRITE
930 && subbuffer_id_is_noref(config
, id
));
931 if (__copy_to_user(dest
,
932 rpages
->p
[index
].virt
+ (offset
& ~PAGE_MASK
),
940 index
= (offset
& (chanb
->subbuf_size
- 1)) >> PAGE_SHIFT
;
942 * Underlying layer should never ask for reads across
945 CHAN_WARN_ON(chanb
, offset
>= chanb
->buf_size
);
949 EXPORT_SYMBOL_GPL(__lib_ring_buffer_copy_to_user
);
952 * lib_ring_buffer_read_cstr - read a C-style string from ring_buffer.
953 * @bufb : buffer backend
954 * @offset : offset within the buffer
955 * @dest : destination address
956 * @len : destination's length
958 * Return string's length, or -EINVAL on error.
959 * Should be protected by get_subbuf/put_subbuf.
960 * Destination length should be at least 1 to hold '\0'.
962 int lib_ring_buffer_read_cstr(struct lib_ring_buffer_backend
*bufb
, size_t offset
,
963 void *dest
, size_t len
)
965 struct channel_backend
*chanb
= &bufb
->chan
->backend
;
966 const struct lib_ring_buffer_config
*config
= &chanb
->config
;
968 ssize_t pagecpy
, pagelen
, strpagelen
, orig_offset
;
970 struct lib_ring_buffer_backend_pages
*rpages
;
971 unsigned long sb_bindex
, id
;
973 offset
&= chanb
->buf_size
- 1;
974 index
= (offset
& (chanb
->subbuf_size
- 1)) >> PAGE_SHIFT
;
975 orig_offset
= offset
;
979 id
= bufb
->buf_rsb
.id
;
980 sb_bindex
= subbuffer_id_get_index(config
, id
);
981 rpages
= bufb
->array
[sb_bindex
];
982 CHAN_WARN_ON(chanb
, config
->mode
== RING_BUFFER_OVERWRITE
983 && subbuffer_id_is_noref(config
, id
));
984 str
= (char *)rpages
->p
[index
].virt
+ (offset
& ~PAGE_MASK
);
985 pagelen
= PAGE_SIZE
- (offset
& ~PAGE_MASK
);
986 strpagelen
= strnlen(str
, pagelen
);
988 pagecpy
= min_t(size_t, len
, strpagelen
);
990 memcpy(dest
, str
, pagecpy
);
995 offset
+= strpagelen
;
996 index
= (offset
& (chanb
->subbuf_size
- 1)) >> PAGE_SHIFT
;
997 if (strpagelen
< pagelen
)
1000 * Underlying layer should never ask for reads across
1003 CHAN_WARN_ON(chanb
, offset
>= chanb
->buf_size
);
1006 ((char *)dest
)[0] = 0;
1007 return offset
- orig_offset
;
1009 EXPORT_SYMBOL_GPL(lib_ring_buffer_read_cstr
);
1012 * lib_ring_buffer_read_get_pfn - Get a page frame number to read from
1013 * @bufb : buffer backend
1014 * @offset : offset within the buffer
1015 * @virt : pointer to page address (output)
1017 * Should be protected by get_subbuf/put_subbuf.
1018 * Returns the pointer to the page frame number unsigned long.
1020 unsigned long *lib_ring_buffer_read_get_pfn(struct lib_ring_buffer_backend
*bufb
,
1021 size_t offset
, void ***virt
)
1024 struct lib_ring_buffer_backend_pages
*rpages
;
1025 struct channel_backend
*chanb
= &bufb
->chan
->backend
;
1026 const struct lib_ring_buffer_config
*config
= &chanb
->config
;
1027 unsigned long sb_bindex
, id
;
1029 offset
&= chanb
->buf_size
- 1;
1030 index
= (offset
& (chanb
->subbuf_size
- 1)) >> PAGE_SHIFT
;
1031 id
= bufb
->buf_rsb
.id
;
1032 sb_bindex
= subbuffer_id_get_index(config
, id
);
1033 rpages
= bufb
->array
[sb_bindex
];
1034 CHAN_WARN_ON(chanb
, config
->mode
== RING_BUFFER_OVERWRITE
1035 && subbuffer_id_is_noref(config
, id
));
1036 *virt
= &rpages
->p
[index
].virt
;
1037 return &rpages
->p
[index
].pfn
;
1039 EXPORT_SYMBOL_GPL(lib_ring_buffer_read_get_pfn
);
1042 * lib_ring_buffer_read_offset_address - get address of a buffer location
1043 * @bufb : buffer backend
1044 * @offset : offset within the buffer.
1046 * Return the address where a given offset is located (for read).
1047 * Should be used to get the current subbuffer header pointer. Given we know
1048 * it's never on a page boundary, it's safe to read/write directly
1049 * from/to this address, as long as the read/write is never bigger than a
1052 void *lib_ring_buffer_read_offset_address(struct lib_ring_buffer_backend
*bufb
,
1056 struct lib_ring_buffer_backend_pages
*rpages
;
1057 struct channel_backend
*chanb
= &bufb
->chan
->backend
;
1058 const struct lib_ring_buffer_config
*config
= &chanb
->config
;
1059 unsigned long sb_bindex
, id
;
1061 offset
&= chanb
->buf_size
- 1;
1062 index
= (offset
& (chanb
->subbuf_size
- 1)) >> PAGE_SHIFT
;
1063 id
= bufb
->buf_rsb
.id
;
1064 sb_bindex
= subbuffer_id_get_index(config
, id
);
1065 rpages
= bufb
->array
[sb_bindex
];
1066 CHAN_WARN_ON(chanb
, config
->mode
== RING_BUFFER_OVERWRITE
1067 && subbuffer_id_is_noref(config
, id
));
1068 return rpages
->p
[index
].virt
+ (offset
& ~PAGE_MASK
);
1070 EXPORT_SYMBOL_GPL(lib_ring_buffer_read_offset_address
);
1073 * lib_ring_buffer_offset_address - get address of a location within the buffer
1074 * @bufb : buffer backend
1075 * @offset : offset within the buffer.
1077 * Return the address where a given offset is located.
1078 * Should be used to get the current subbuffer header pointer. Given we know
1079 * it's always at the beginning of a page, it's safe to write directly to this
1080 * address, as long as the write is never bigger than a page size.
1082 void *lib_ring_buffer_offset_address(struct lib_ring_buffer_backend
*bufb
,
1085 size_t sbidx
, index
;
1086 struct lib_ring_buffer_backend_pages
*rpages
;
1087 struct channel_backend
*chanb
= &bufb
->chan
->backend
;
1088 const struct lib_ring_buffer_config
*config
= &chanb
->config
;
1089 unsigned long sb_bindex
, id
;
1091 offset
&= chanb
->buf_size
- 1;
1092 sbidx
= offset
>> chanb
->subbuf_size_order
;
1093 index
= (offset
& (chanb
->subbuf_size
- 1)) >> PAGE_SHIFT
;
1094 id
= bufb
->buf_wsb
[sbidx
].id
;
1095 sb_bindex
= subbuffer_id_get_index(config
, id
);
1096 rpages
= bufb
->array
[sb_bindex
];
1097 CHAN_WARN_ON(chanb
, config
->mode
== RING_BUFFER_OVERWRITE
1098 && subbuffer_id_is_noref(config
, id
));
1099 return rpages
->p
[index
].virt
+ (offset
& ~PAGE_MASK
);
1101 EXPORT_SYMBOL_GPL(lib_ring_buffer_offset_address
);