2 * ring_buffer_backend.c
4 * Copyright (C) 2005-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; only
9 * version 2.1 of the License.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
21 #include <linux/stddef.h>
22 #include <linux/module.h>
23 #include <linux/string.h>
24 #include <linux/bitops.h>
25 #include <linux/delay.h>
26 #include <linux/errno.h>
27 #include <linux/slab.h>
28 #include <linux/cpu.h>
30 #include <linux/vmalloc.h>
32 #include <wrapper/vmalloc.h> /* for wrapper_vmalloc_sync_all() */
33 #include <wrapper/ringbuffer/config.h>
34 #include <wrapper/ringbuffer/backend.h>
35 #include <wrapper/ringbuffer/frontend.h>
38 * lib_ring_buffer_backend_allocate - allocate a channel buffer
39 * @config: ring buffer instance configuration
40 * @buf: the buffer struct
41 * @size: total size of the buffer
42 * @num_subbuf: number of subbuffers
43 * @extra_reader_sb: need extra subbuffer for reader
46 int lib_ring_buffer_backend_allocate(const struct lib_ring_buffer_config
*config
,
47 struct lib_ring_buffer_backend
*bufb
,
48 size_t size
, size_t num_subbuf
,
51 struct channel_backend
*chanb
= &bufb
->chan
->backend
;
52 unsigned long j
, num_pages
, num_pages_per_subbuf
, page_idx
= 0;
53 unsigned long subbuf_size
, mmap_offset
= 0;
54 unsigned long num_subbuf_alloc
;
58 num_pages
= size
>> PAGE_SHIFT
;
59 num_pages_per_subbuf
= num_pages
>> get_count_order(num_subbuf
);
60 subbuf_size
= chanb
->subbuf_size
;
61 num_subbuf_alloc
= num_subbuf
;
63 if (extra_reader_sb
) {
64 num_pages
+= num_pages_per_subbuf
; /* Add pages for reader */
68 pages
= vmalloc_node(ALIGN(sizeof(*pages
) * num_pages
,
69 1 << INTERNODE_CACHE_SHIFT
),
70 cpu_to_node(max(bufb
->cpu
, 0)));
74 bufb
->array
= lttng_kvmalloc_node(ALIGN(sizeof(*bufb
->array
)
76 1 << INTERNODE_CACHE_SHIFT
),
77 GFP_KERNEL
| __GFP_NOWARN
,
78 cpu_to_node(max(bufb
->cpu
, 0)));
79 if (unlikely(!bufb
->array
))
82 for (i
= 0; i
< num_pages
; i
++) {
83 pages
[i
] = alloc_pages_node(cpu_to_node(max(bufb
->cpu
, 0)),
84 GFP_KERNEL
| __GFP_NOWARN
| __GFP_ZERO
, 0);
85 if (unlikely(!pages
[i
]))
88 bufb
->num_pages_per_subbuf
= num_pages_per_subbuf
;
90 /* Allocate backend pages array elements */
91 for (i
= 0; i
< num_subbuf_alloc
; i
++) {
93 lttng_kvzalloc_node(ALIGN(
94 sizeof(struct lib_ring_buffer_backend_pages
) +
95 sizeof(struct lib_ring_buffer_backend_page
)
96 * num_pages_per_subbuf
,
97 1 << INTERNODE_CACHE_SHIFT
),
98 GFP_KERNEL
| __GFP_NOWARN
,
99 cpu_to_node(max(bufb
->cpu
, 0)));
104 /* Allocate write-side subbuffer table */
105 bufb
->buf_wsb
= lttng_kvzalloc_node(ALIGN(
106 sizeof(struct lib_ring_buffer_backend_subbuffer
)
108 1 << INTERNODE_CACHE_SHIFT
),
109 GFP_KERNEL
| __GFP_NOWARN
,
110 cpu_to_node(max(bufb
->cpu
, 0)));
111 if (unlikely(!bufb
->buf_wsb
))
114 for (i
= 0; i
< num_subbuf
; i
++)
115 bufb
->buf_wsb
[i
].id
= subbuffer_id(config
, 0, 1, i
);
117 /* Assign read-side subbuffer table */
119 bufb
->buf_rsb
.id
= subbuffer_id(config
, 0, 1,
120 num_subbuf_alloc
- 1);
122 bufb
->buf_rsb
.id
= subbuffer_id(config
, 0, 1, 0);
124 /* Allocate subbuffer packet counter table */
125 bufb
->buf_cnt
= lttng_kvzalloc_node(ALIGN(
126 sizeof(struct lib_ring_buffer_backend_counts
)
128 1 << INTERNODE_CACHE_SHIFT
),
129 GFP_KERNEL
| __GFP_NOWARN
,
130 cpu_to_node(max(bufb
->cpu
, 0)));
131 if (unlikely(!bufb
->buf_cnt
))
134 /* Assign pages to page index */
135 for (i
= 0; i
< num_subbuf_alloc
; i
++) {
136 for (j
= 0; j
< num_pages_per_subbuf
; j
++) {
137 CHAN_WARN_ON(chanb
, page_idx
> num_pages
);
138 bufb
->array
[i
]->p
[j
].virt
= page_address(pages
[page_idx
]);
139 bufb
->array
[i
]->p
[j
].pfn
= page_to_pfn(pages
[page_idx
]);
142 if (config
->output
== RING_BUFFER_MMAP
) {
143 bufb
->array
[i
]->mmap_offset
= mmap_offset
;
144 mmap_offset
+= subbuf_size
;
149 * If kmalloc ever uses vmalloc underneath, make sure the buffer pages
152 wrapper_vmalloc_sync_all();
157 lttng_kvfree(bufb
->buf_wsb
);
159 for (i
= 0; (i
< num_subbuf_alloc
&& bufb
->array
[i
]); i
++)
160 lttng_kvfree(bufb
->array
[i
]);
162 /* Free all allocated pages */
163 for (i
= 0; (i
< num_pages
&& pages
[i
]); i
++)
164 __free_page(pages
[i
]);
165 lttng_kvfree(bufb
->array
);
172 int lib_ring_buffer_backend_create(struct lib_ring_buffer_backend
*bufb
,
173 struct channel_backend
*chanb
, int cpu
)
175 const struct lib_ring_buffer_config
*config
= &chanb
->config
;
177 bufb
->chan
= container_of(chanb
, struct channel
, backend
);
180 return lib_ring_buffer_backend_allocate(config
, bufb
, chanb
->buf_size
,
182 chanb
->extra_reader_sb
);
185 void lib_ring_buffer_backend_free(struct lib_ring_buffer_backend
*bufb
)
187 struct channel_backend
*chanb
= &bufb
->chan
->backend
;
188 unsigned long i
, j
, num_subbuf_alloc
;
190 num_subbuf_alloc
= chanb
->num_subbuf
;
191 if (chanb
->extra_reader_sb
)
194 lttng_kvfree(bufb
->buf_wsb
);
195 lttng_kvfree(bufb
->buf_cnt
);
196 for (i
= 0; i
< num_subbuf_alloc
; i
++) {
197 for (j
= 0; j
< bufb
->num_pages_per_subbuf
; j
++)
198 __free_page(pfn_to_page(bufb
->array
[i
]->p
[j
].pfn
));
199 lttng_kvfree(bufb
->array
[i
]);
201 lttng_kvfree(bufb
->array
);
205 void lib_ring_buffer_backend_reset(struct lib_ring_buffer_backend
*bufb
)
207 struct channel_backend
*chanb
= &bufb
->chan
->backend
;
208 const struct lib_ring_buffer_config
*config
= &chanb
->config
;
209 unsigned long num_subbuf_alloc
;
212 num_subbuf_alloc
= chanb
->num_subbuf
;
213 if (chanb
->extra_reader_sb
)
216 for (i
= 0; i
< chanb
->num_subbuf
; i
++)
217 bufb
->buf_wsb
[i
].id
= subbuffer_id(config
, 0, 1, i
);
218 if (chanb
->extra_reader_sb
)
219 bufb
->buf_rsb
.id
= subbuffer_id(config
, 0, 1,
220 num_subbuf_alloc
- 1);
222 bufb
->buf_rsb
.id
= subbuffer_id(config
, 0, 1, 0);
224 for (i
= 0; i
< num_subbuf_alloc
; i
++) {
225 /* Don't reset mmap_offset */
226 v_set(config
, &bufb
->array
[i
]->records_commit
, 0);
227 v_set(config
, &bufb
->array
[i
]->records_unread
, 0);
228 bufb
->array
[i
]->data_size
= 0;
229 /* Don't reset backend page and virt addresses */
231 /* Don't reset num_pages_per_subbuf, cpu, allocated */
232 v_set(config
, &bufb
->records_read
, 0);
236 * The frontend is responsible for also calling ring_buffer_backend_reset for
237 * each buffer when calling channel_backend_reset.
239 void channel_backend_reset(struct channel_backend
*chanb
)
241 struct channel
*chan
= container_of(chanb
, struct channel
, backend
);
242 const struct lib_ring_buffer_config
*config
= &chanb
->config
;
245 * Don't reset buf_size, subbuf_size, subbuf_size_order,
246 * num_subbuf_order, buf_size_order, extra_reader_sb, num_subbuf,
247 * priv, notifiers, config, cpumask and name.
249 chanb
->start_tsc
= config
->cb
.ring_buffer_clock_read(chan
);
252 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0))
255 * No need to implement a "dead" callback to do a buffer switch here,
256 * because it will happen when tracing is stopped, or will be done by
257 * switch timer CPU DEAD callback.
258 * We don't free buffers when CPU go away, because it would make trace
259 * data vanish, which is unwanted.
261 int lttng_cpuhp_rb_backend_prepare(unsigned int cpu
,
262 struct lttng_cpuhp_node
*node
)
264 struct channel_backend
*chanb
= container_of(node
,
265 struct channel_backend
, cpuhp_prepare
);
266 const struct lib_ring_buffer_config
*config
= &chanb
->config
;
267 struct lib_ring_buffer
*buf
;
270 CHAN_WARN_ON(chanb
, config
->alloc
== RING_BUFFER_ALLOC_GLOBAL
);
272 buf
= per_cpu_ptr(chanb
->buf
, cpu
);
273 ret
= lib_ring_buffer_create(buf
, chanb
, cpu
);
276 "ring_buffer_cpu_hp_callback: cpu %d "
277 "buffer creation failed\n", cpu
);
282 EXPORT_SYMBOL_GPL(lttng_cpuhp_rb_backend_prepare
);
284 #else /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) */
286 #ifdef CONFIG_HOTPLUG_CPU
289 * lib_ring_buffer_cpu_hp_callback - CPU hotplug callback
290 * @nb: notifier block
291 * @action: hotplug action to take
294 * Returns the success/failure of the operation. (%NOTIFY_OK, %NOTIFY_BAD)
297 int lib_ring_buffer_cpu_hp_callback(struct notifier_block
*nb
,
298 unsigned long action
,
301 unsigned int cpu
= (unsigned long)hcpu
;
302 struct channel_backend
*chanb
= container_of(nb
, struct channel_backend
,
304 const struct lib_ring_buffer_config
*config
= &chanb
->config
;
305 struct lib_ring_buffer
*buf
;
308 CHAN_WARN_ON(chanb
, config
->alloc
== RING_BUFFER_ALLOC_GLOBAL
);
312 case CPU_UP_PREPARE_FROZEN
:
313 buf
= per_cpu_ptr(chanb
->buf
, cpu
);
314 ret
= lib_ring_buffer_create(buf
, chanb
, cpu
);
317 "ring_buffer_cpu_hp_callback: cpu %d "
318 "buffer creation failed\n", cpu
);
323 case CPU_DEAD_FROZEN
:
324 /* No need to do a buffer switch here, because it will happen
325 * when tracing is stopped, or will be done by switch timer CPU
334 #endif /* #else #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) */
337 * channel_backend_init - initialize a channel backend
338 * @chanb: channel backend
339 * @name: channel name
340 * @config: client ring buffer configuration
341 * @priv: client private data
342 * @parent: dentry of parent directory, %NULL for root directory
343 * @subbuf_size: size of sub-buffers (> PAGE_SIZE, power of 2)
344 * @num_subbuf: number of sub-buffers (power of 2)
346 * Returns channel pointer if successful, %NULL otherwise.
348 * Creates per-cpu channel buffers using the sizes and attributes
349 * specified. The created channel buffer files will be named
350 * name_0...name_N-1. File permissions will be %S_IRUSR.
352 * Called with CPU hotplug disabled.
354 int channel_backend_init(struct channel_backend
*chanb
,
356 const struct lib_ring_buffer_config
*config
,
357 void *priv
, size_t subbuf_size
, size_t num_subbuf
)
359 struct channel
*chan
= container_of(chanb
, struct channel
, backend
);
366 /* Check that the subbuffer size is larger than a page. */
367 if (subbuf_size
< PAGE_SIZE
)
371 * Make sure the number of subbuffers and subbuffer size are
372 * power of 2 and nonzero.
374 if (!subbuf_size
|| (subbuf_size
& (subbuf_size
- 1)))
376 if (!num_subbuf
|| (num_subbuf
& (num_subbuf
- 1)))
379 * Overwrite mode buffers require at least 2 subbuffers per
382 if (config
->mode
== RING_BUFFER_OVERWRITE
&& num_subbuf
< 2)
385 ret
= subbuffer_id_check_index(config
, num_subbuf
);
390 chanb
->buf_size
= num_subbuf
* subbuf_size
;
391 chanb
->subbuf_size
= subbuf_size
;
392 chanb
->buf_size_order
= get_count_order(chanb
->buf_size
);
393 chanb
->subbuf_size_order
= get_count_order(subbuf_size
);
394 chanb
->num_subbuf_order
= get_count_order(num_subbuf
);
395 chanb
->extra_reader_sb
=
396 (config
->mode
== RING_BUFFER_OVERWRITE
) ? 1 : 0;
397 chanb
->num_subbuf
= num_subbuf
;
398 strlcpy(chanb
->name
, name
, NAME_MAX
);
399 memcpy(&chanb
->config
, config
, sizeof(chanb
->config
));
401 if (config
->alloc
== RING_BUFFER_ALLOC_PER_CPU
) {
402 if (!zalloc_cpumask_var(&chanb
->cpumask
, GFP_KERNEL
))
406 if (config
->alloc
== RING_BUFFER_ALLOC_PER_CPU
) {
407 /* Allocating the buffer per-cpu structures */
408 chanb
->buf
= alloc_percpu(struct lib_ring_buffer
);
412 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0))
413 chanb
->cpuhp_prepare
.component
= LTTNG_RING_BUFFER_BACKEND
;
414 ret
= cpuhp_state_add_instance(lttng_rb_hp_prepare
,
415 &chanb
->cpuhp_prepare
.node
);
418 #else /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) */
422 * In case of non-hotplug cpu, if the ring-buffer is allocated
423 * in early initcall, it will not be notified of secondary cpus.
424 * In that off case, we need to allocate for all possible cpus.
426 #ifdef CONFIG_HOTPLUG_CPU
428 * buf->backend.allocated test takes care of concurrent CPU
430 * Priority higher than frontend, so we create the ring buffer
431 * before we start the timer.
433 chanb
->cpu_hp_notifier
.notifier_call
=
434 lib_ring_buffer_cpu_hp_callback
;
435 chanb
->cpu_hp_notifier
.priority
= 5;
436 register_hotcpu_notifier(&chanb
->cpu_hp_notifier
);
439 for_each_online_cpu(i
) {
440 ret
= lib_ring_buffer_create(per_cpu_ptr(chanb
->buf
, i
),
443 goto free_bufs
; /* cpu hotplug locked */
447 for_each_possible_cpu(i
) {
448 ret
= lib_ring_buffer_create(per_cpu_ptr(chanb
->buf
, i
),
455 #endif /* #else #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) */
457 chanb
->buf
= kzalloc(sizeof(struct lib_ring_buffer
), GFP_KERNEL
);
460 ret
= lib_ring_buffer_create(chanb
->buf
, chanb
, -1);
464 chanb
->start_tsc
= config
->cb
.ring_buffer_clock_read(chan
);
469 if (config
->alloc
== RING_BUFFER_ALLOC_PER_CPU
) {
470 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0))
472 * Teardown of lttng_rb_hp_prepare instance
473 * on "add" error is handled within cpu hotplug,
474 * no teardown to do from the caller.
476 #else /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) */
477 #ifdef CONFIG_HOTPLUG_CPU
479 unregister_hotcpu_notifier(&chanb
->cpu_hp_notifier
);
481 #endif /* #else #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) */
482 for_each_possible_cpu(i
) {
483 struct lib_ring_buffer
*buf
=
484 per_cpu_ptr(chanb
->buf
, i
);
486 if (!buf
->backend
.allocated
)
488 lib_ring_buffer_free(buf
);
490 free_percpu(chanb
->buf
);
494 if (config
->alloc
== RING_BUFFER_ALLOC_PER_CPU
)
495 free_cpumask_var(chanb
->cpumask
);
500 * channel_backend_unregister_notifiers - unregister notifiers
505 void channel_backend_unregister_notifiers(struct channel_backend
*chanb
)
507 const struct lib_ring_buffer_config
*config
= &chanb
->config
;
509 if (config
->alloc
== RING_BUFFER_ALLOC_PER_CPU
) {
510 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0))
513 ret
= cpuhp_state_remove_instance(lttng_rb_hp_prepare
,
514 &chanb
->cpuhp_prepare
.node
);
516 #else /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) */
517 unregister_hotcpu_notifier(&chanb
->cpu_hp_notifier
);
518 #endif /* #else #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) */
523 * channel_backend_free - destroy the channel
526 * Destroy all channel buffers and frees the channel.
528 void channel_backend_free(struct channel_backend
*chanb
)
530 const struct lib_ring_buffer_config
*config
= &chanb
->config
;
533 if (config
->alloc
== RING_BUFFER_ALLOC_PER_CPU
) {
534 for_each_possible_cpu(i
) {
535 struct lib_ring_buffer
*buf
= per_cpu_ptr(chanb
->buf
, i
);
537 if (!buf
->backend
.allocated
)
539 lib_ring_buffer_free(buf
);
541 free_cpumask_var(chanb
->cpumask
);
542 free_percpu(chanb
->buf
);
544 struct lib_ring_buffer
*buf
= chanb
->buf
;
546 CHAN_WARN_ON(chanb
, !buf
->backend
.allocated
);
547 lib_ring_buffer_free(buf
);
553 * lib_ring_buffer_write - write data to a ring_buffer buffer.
554 * @bufb : buffer backend
555 * @offset : offset within the buffer
556 * @src : source address
557 * @len : length to write
558 * @pagecpy : page size copied so far
560 void _lib_ring_buffer_write(struct lib_ring_buffer_backend
*bufb
, size_t offset
,
561 const void *src
, size_t len
, size_t pagecpy
)
563 struct channel_backend
*chanb
= &bufb
->chan
->backend
;
564 const struct lib_ring_buffer_config
*config
= &chanb
->config
;
566 struct lib_ring_buffer_backend_pages
*rpages
;
567 unsigned long sb_bindex
, id
;
573 sbidx
= offset
>> chanb
->subbuf_size_order
;
574 index
= (offset
& (chanb
->subbuf_size
- 1)) >> PAGE_SHIFT
;
577 * Underlying layer should never ask for writes across
580 CHAN_WARN_ON(chanb
, offset
>= chanb
->buf_size
);
582 pagecpy
= min_t(size_t, len
, PAGE_SIZE
- (offset
& ~PAGE_MASK
));
583 id
= bufb
->buf_wsb
[sbidx
].id
;
584 sb_bindex
= subbuffer_id_get_index(config
, id
);
585 rpages
= bufb
->array
[sb_bindex
];
586 CHAN_WARN_ON(chanb
, config
->mode
== RING_BUFFER_OVERWRITE
587 && subbuffer_id_is_noref(config
, id
));
588 lib_ring_buffer_do_copy(config
,
589 rpages
->p
[index
].virt
590 + (offset
& ~PAGE_MASK
),
592 } while (unlikely(len
!= pagecpy
));
594 EXPORT_SYMBOL_GPL(_lib_ring_buffer_write
);
598 * lib_ring_buffer_memset - write len bytes of c to a ring_buffer buffer.
599 * @bufb : buffer backend
600 * @offset : offset within the buffer
601 * @c : the byte to write
602 * @len : length to write
603 * @pagecpy : page size copied so far
605 void _lib_ring_buffer_memset(struct lib_ring_buffer_backend
*bufb
,
607 int c
, size_t len
, size_t pagecpy
)
609 struct channel_backend
*chanb
= &bufb
->chan
->backend
;
610 const struct lib_ring_buffer_config
*config
= &chanb
->config
;
612 struct lib_ring_buffer_backend_pages
*rpages
;
613 unsigned long sb_bindex
, id
;
618 sbidx
= offset
>> chanb
->subbuf_size_order
;
619 index
= (offset
& (chanb
->subbuf_size
- 1)) >> PAGE_SHIFT
;
622 * Underlying layer should never ask for writes across
625 CHAN_WARN_ON(chanb
, offset
>= chanb
->buf_size
);
627 pagecpy
= min_t(size_t, len
, PAGE_SIZE
- (offset
& ~PAGE_MASK
));
628 id
= bufb
->buf_wsb
[sbidx
].id
;
629 sb_bindex
= subbuffer_id_get_index(config
, id
);
630 rpages
= bufb
->array
[sb_bindex
];
631 CHAN_WARN_ON(chanb
, config
->mode
== RING_BUFFER_OVERWRITE
632 && subbuffer_id_is_noref(config
, id
));
633 lib_ring_buffer_do_memset(rpages
->p
[index
].virt
634 + (offset
& ~PAGE_MASK
),
636 } while (unlikely(len
!= pagecpy
));
638 EXPORT_SYMBOL_GPL(_lib_ring_buffer_memset
);
641 * lib_ring_buffer_strcpy - write string data to a ring_buffer buffer.
642 * @bufb : buffer backend
643 * @offset : offset within the buffer
644 * @src : source address
645 * @len : length to write
646 * @pagecpy : page size copied so far
647 * @pad : character to use for padding
649 void _lib_ring_buffer_strcpy(struct lib_ring_buffer_backend
*bufb
,
650 size_t offset
, const char *src
, size_t len
,
651 size_t pagecpy
, int pad
)
653 struct channel_backend
*chanb
= &bufb
->chan
->backend
;
654 const struct lib_ring_buffer_config
*config
= &chanb
->config
;
656 struct lib_ring_buffer_backend_pages
*rpages
;
657 unsigned long sb_bindex
, id
;
658 int src_terminated
= 0;
660 CHAN_WARN_ON(chanb
, !len
);
666 sbidx
= offset
>> chanb
->subbuf_size_order
;
667 index
= (offset
& (chanb
->subbuf_size
- 1)) >> PAGE_SHIFT
;
670 * Underlying layer should never ask for writes across
673 CHAN_WARN_ON(chanb
, offset
>= chanb
->buf_size
);
675 pagecpy
= min_t(size_t, len
, PAGE_SIZE
- (offset
& ~PAGE_MASK
));
676 id
= bufb
->buf_wsb
[sbidx
].id
;
677 sb_bindex
= subbuffer_id_get_index(config
, id
);
678 rpages
= bufb
->array
[sb_bindex
];
679 CHAN_WARN_ON(chanb
, config
->mode
== RING_BUFFER_OVERWRITE
680 && subbuffer_id_is_noref(config
, id
));
682 if (likely(!src_terminated
)) {
683 size_t count
, to_copy
;
687 to_copy
--; /* Final '\0' */
688 count
= lib_ring_buffer_do_strcpy(config
,
689 rpages
->p
[index
].virt
690 + (offset
& ~PAGE_MASK
),
694 if (unlikely(count
< to_copy
)) {
695 size_t pad_len
= to_copy
- count
;
697 /* Next pages will have padding */
699 lib_ring_buffer_do_memset(rpages
->p
[index
].virt
700 + (offset
& ~PAGE_MASK
),
709 pad_len
--; /* Final '\0' */
710 lib_ring_buffer_do_memset(rpages
->p
[index
].virt
711 + (offset
& ~PAGE_MASK
),
715 } while (unlikely(len
!= pagecpy
));
717 lib_ring_buffer_do_memset(rpages
->p
[index
].virt
+ (offset
& ~PAGE_MASK
),
720 EXPORT_SYMBOL_GPL(_lib_ring_buffer_strcpy
);
723 * lib_ring_buffer_copy_from_user_inatomic - write user data to a ring_buffer buffer.
724 * @bufb : buffer backend
725 * @offset : offset within the buffer
726 * @src : source address
727 * @len : length to write
728 * @pagecpy : page size copied so far
730 * This function deals with userspace pointers, it should never be called
731 * directly without having the src pointer checked with access_ok()
734 void _lib_ring_buffer_copy_from_user_inatomic(struct lib_ring_buffer_backend
*bufb
,
736 const void __user
*src
, size_t len
,
739 struct channel_backend
*chanb
= &bufb
->chan
->backend
;
740 const struct lib_ring_buffer_config
*config
= &chanb
->config
;
742 struct lib_ring_buffer_backend_pages
*rpages
;
743 unsigned long sb_bindex
, id
;
750 sbidx
= offset
>> chanb
->subbuf_size_order
;
751 index
= (offset
& (chanb
->subbuf_size
- 1)) >> PAGE_SHIFT
;
754 * Underlying layer should never ask for writes across
757 CHAN_WARN_ON(chanb
, offset
>= chanb
->buf_size
);
759 pagecpy
= min_t(size_t, len
, PAGE_SIZE
- (offset
& ~PAGE_MASK
));
760 id
= bufb
->buf_wsb
[sbidx
].id
;
761 sb_bindex
= subbuffer_id_get_index(config
, id
);
762 rpages
= bufb
->array
[sb_bindex
];
763 CHAN_WARN_ON(chanb
, config
->mode
== RING_BUFFER_OVERWRITE
764 && subbuffer_id_is_noref(config
, id
));
765 ret
= lib_ring_buffer_do_copy_from_user_inatomic(rpages
->p
[index
].virt
766 + (offset
& ~PAGE_MASK
),
770 _lib_ring_buffer_memset(bufb
, offset
, 0, len
, 0);
771 break; /* stop copy */
773 } while (unlikely(len
!= pagecpy
));
775 EXPORT_SYMBOL_GPL(_lib_ring_buffer_copy_from_user_inatomic
);
778 * lib_ring_buffer_strcpy_from_user_inatomic - write userspace string data to a ring_buffer buffer.
779 * @bufb : buffer backend
780 * @offset : offset within the buffer
781 * @src : source address
782 * @len : length to write
783 * @pagecpy : page size copied so far
784 * @pad : character to use for padding
786 * This function deals with userspace pointers, it should never be called
787 * directly without having the src pointer checked with access_ok()
790 void _lib_ring_buffer_strcpy_from_user_inatomic(struct lib_ring_buffer_backend
*bufb
,
791 size_t offset
, const char __user
*src
, size_t len
,
792 size_t pagecpy
, int pad
)
794 struct channel_backend
*chanb
= &bufb
->chan
->backend
;
795 const struct lib_ring_buffer_config
*config
= &chanb
->config
;
797 struct lib_ring_buffer_backend_pages
*rpages
;
798 unsigned long sb_bindex
, id
;
799 int src_terminated
= 0;
806 sbidx
= offset
>> chanb
->subbuf_size_order
;
807 index
= (offset
& (chanb
->subbuf_size
- 1)) >> PAGE_SHIFT
;
810 * Underlying layer should never ask for writes across
813 CHAN_WARN_ON(chanb
, offset
>= chanb
->buf_size
);
815 pagecpy
= min_t(size_t, len
, PAGE_SIZE
- (offset
& ~PAGE_MASK
));
816 id
= bufb
->buf_wsb
[sbidx
].id
;
817 sb_bindex
= subbuffer_id_get_index(config
, id
);
818 rpages
= bufb
->array
[sb_bindex
];
819 CHAN_WARN_ON(chanb
, config
->mode
== RING_BUFFER_OVERWRITE
820 && subbuffer_id_is_noref(config
, id
));
822 if (likely(!src_terminated
)) {
823 size_t count
, to_copy
;
827 to_copy
--; /* Final '\0' */
828 count
= lib_ring_buffer_do_strcpy_from_user_inatomic(config
,
829 rpages
->p
[index
].virt
830 + (offset
& ~PAGE_MASK
),
834 if (unlikely(count
< to_copy
)) {
835 size_t pad_len
= to_copy
- count
;
837 /* Next pages will have padding */
839 lib_ring_buffer_do_memset(rpages
->p
[index
].virt
840 + (offset
& ~PAGE_MASK
),
849 pad_len
--; /* Final '\0' */
850 lib_ring_buffer_do_memset(rpages
->p
[index
].virt
851 + (offset
& ~PAGE_MASK
),
855 } while (unlikely(len
!= pagecpy
));
857 lib_ring_buffer_do_memset(rpages
->p
[index
].virt
+ (offset
& ~PAGE_MASK
),
860 EXPORT_SYMBOL_GPL(_lib_ring_buffer_strcpy_from_user_inatomic
);
863 * lib_ring_buffer_read - read data from ring_buffer_buffer.
864 * @bufb : buffer backend
865 * @offset : offset within the buffer
866 * @dest : destination address
867 * @len : length to copy to destination
869 * Should be protected by get_subbuf/put_subbuf.
870 * Returns the length copied.
872 size_t lib_ring_buffer_read(struct lib_ring_buffer_backend
*bufb
, size_t offset
,
873 void *dest
, size_t len
)
875 struct channel_backend
*chanb
= &bufb
->chan
->backend
;
876 const struct lib_ring_buffer_config
*config
= &chanb
->config
;
877 size_t index
, pagecpy
, orig_len
;
878 struct lib_ring_buffer_backend_pages
*rpages
;
879 unsigned long sb_bindex
, id
;
882 offset
&= chanb
->buf_size
- 1;
883 index
= (offset
& (chanb
->subbuf_size
- 1)) >> PAGE_SHIFT
;
887 pagecpy
= min_t(size_t, len
, PAGE_SIZE
- (offset
& ~PAGE_MASK
));
888 id
= bufb
->buf_rsb
.id
;
889 sb_bindex
= subbuffer_id_get_index(config
, id
);
890 rpages
= bufb
->array
[sb_bindex
];
891 CHAN_WARN_ON(chanb
, config
->mode
== RING_BUFFER_OVERWRITE
892 && subbuffer_id_is_noref(config
, id
));
893 memcpy(dest
, rpages
->p
[index
].virt
+ (offset
& ~PAGE_MASK
),
900 index
= (offset
& (chanb
->subbuf_size
- 1)) >> PAGE_SHIFT
;
902 * Underlying layer should never ask for reads across
905 CHAN_WARN_ON(chanb
, offset
>= chanb
->buf_size
);
909 EXPORT_SYMBOL_GPL(lib_ring_buffer_read
);
912 * __lib_ring_buffer_copy_to_user - read data from ring_buffer to userspace
913 * @bufb : buffer backend
914 * @offset : offset within the buffer
915 * @dest : destination userspace address
916 * @len : length to copy to destination
918 * Should be protected by get_subbuf/put_subbuf.
919 * access_ok() must have been performed on dest addresses prior to call this
921 * Returns -EFAULT on error, 0 if ok.
923 int __lib_ring_buffer_copy_to_user(struct lib_ring_buffer_backend
*bufb
,
924 size_t offset
, void __user
*dest
, size_t len
)
926 struct channel_backend
*chanb
= &bufb
->chan
->backend
;
927 const struct lib_ring_buffer_config
*config
= &chanb
->config
;
930 struct lib_ring_buffer_backend_pages
*rpages
;
931 unsigned long sb_bindex
, id
;
933 offset
&= chanb
->buf_size
- 1;
934 index
= (offset
& (chanb
->subbuf_size
- 1)) >> PAGE_SHIFT
;
938 pagecpy
= min_t(size_t, len
, PAGE_SIZE
- (offset
& ~PAGE_MASK
));
939 id
= bufb
->buf_rsb
.id
;
940 sb_bindex
= subbuffer_id_get_index(config
, id
);
941 rpages
= bufb
->array
[sb_bindex
];
942 CHAN_WARN_ON(chanb
, config
->mode
== RING_BUFFER_OVERWRITE
943 && subbuffer_id_is_noref(config
, id
));
944 if (__copy_to_user(dest
,
945 rpages
->p
[index
].virt
+ (offset
& ~PAGE_MASK
),
953 index
= (offset
& (chanb
->subbuf_size
- 1)) >> PAGE_SHIFT
;
955 * Underlying layer should never ask for reads across
958 CHAN_WARN_ON(chanb
, offset
>= chanb
->buf_size
);
962 EXPORT_SYMBOL_GPL(__lib_ring_buffer_copy_to_user
);
965 * lib_ring_buffer_read_cstr - read a C-style string from ring_buffer.
966 * @bufb : buffer backend
967 * @offset : offset within the buffer
968 * @dest : destination address
969 * @len : destination's length
971 * Return string's length, or -EINVAL on error.
972 * Should be protected by get_subbuf/put_subbuf.
973 * Destination length should be at least 1 to hold '\0'.
975 int lib_ring_buffer_read_cstr(struct lib_ring_buffer_backend
*bufb
, size_t offset
,
976 void *dest
, size_t len
)
978 struct channel_backend
*chanb
= &bufb
->chan
->backend
;
979 const struct lib_ring_buffer_config
*config
= &chanb
->config
;
981 ssize_t pagecpy
, pagelen
, strpagelen
, orig_offset
;
983 struct lib_ring_buffer_backend_pages
*rpages
;
984 unsigned long sb_bindex
, id
;
986 offset
&= chanb
->buf_size
- 1;
987 index
= (offset
& (chanb
->subbuf_size
- 1)) >> PAGE_SHIFT
;
988 orig_offset
= offset
;
992 id
= bufb
->buf_rsb
.id
;
993 sb_bindex
= subbuffer_id_get_index(config
, id
);
994 rpages
= bufb
->array
[sb_bindex
];
995 CHAN_WARN_ON(chanb
, config
->mode
== RING_BUFFER_OVERWRITE
996 && subbuffer_id_is_noref(config
, id
));
997 str
= (char *)rpages
->p
[index
].virt
+ (offset
& ~PAGE_MASK
);
998 pagelen
= PAGE_SIZE
- (offset
& ~PAGE_MASK
);
999 strpagelen
= strnlen(str
, pagelen
);
1001 pagecpy
= min_t(size_t, len
, strpagelen
);
1003 memcpy(dest
, str
, pagecpy
);
1008 offset
+= strpagelen
;
1009 index
= (offset
& (chanb
->subbuf_size
- 1)) >> PAGE_SHIFT
;
1010 if (strpagelen
< pagelen
)
1013 * Underlying layer should never ask for reads across
1016 CHAN_WARN_ON(chanb
, offset
>= chanb
->buf_size
);
1019 ((char *)dest
)[0] = 0;
1020 return offset
- orig_offset
;
1022 EXPORT_SYMBOL_GPL(lib_ring_buffer_read_cstr
);
1025 * lib_ring_buffer_read_get_pfn - Get a page frame number to read from
1026 * @bufb : buffer backend
1027 * @offset : offset within the buffer
1028 * @virt : pointer to page address (output)
1030 * Should be protected by get_subbuf/put_subbuf.
1031 * Returns the pointer to the page frame number unsigned long.
1033 unsigned long *lib_ring_buffer_read_get_pfn(struct lib_ring_buffer_backend
*bufb
,
1034 size_t offset
, void ***virt
)
1037 struct lib_ring_buffer_backend_pages
*rpages
;
1038 struct channel_backend
*chanb
= &bufb
->chan
->backend
;
1039 const struct lib_ring_buffer_config
*config
= &chanb
->config
;
1040 unsigned long sb_bindex
, id
;
1042 offset
&= chanb
->buf_size
- 1;
1043 index
= (offset
& (chanb
->subbuf_size
- 1)) >> PAGE_SHIFT
;
1044 id
= bufb
->buf_rsb
.id
;
1045 sb_bindex
= subbuffer_id_get_index(config
, id
);
1046 rpages
= bufb
->array
[sb_bindex
];
1047 CHAN_WARN_ON(chanb
, config
->mode
== RING_BUFFER_OVERWRITE
1048 && subbuffer_id_is_noref(config
, id
));
1049 *virt
= &rpages
->p
[index
].virt
;
1050 return &rpages
->p
[index
].pfn
;
1052 EXPORT_SYMBOL_GPL(lib_ring_buffer_read_get_pfn
);
1055 * lib_ring_buffer_read_offset_address - get address of a buffer location
1056 * @bufb : buffer backend
1057 * @offset : offset within the buffer.
1059 * Return the address where a given offset is located (for read).
1060 * Should be used to get the current subbuffer header pointer. Given we know
1061 * it's never on a page boundary, it's safe to read/write directly
1062 * from/to this address, as long as the read/write is never bigger than a
1065 void *lib_ring_buffer_read_offset_address(struct lib_ring_buffer_backend
*bufb
,
1069 struct lib_ring_buffer_backend_pages
*rpages
;
1070 struct channel_backend
*chanb
= &bufb
->chan
->backend
;
1071 const struct lib_ring_buffer_config
*config
= &chanb
->config
;
1072 unsigned long sb_bindex
, id
;
1074 offset
&= chanb
->buf_size
- 1;
1075 index
= (offset
& (chanb
->subbuf_size
- 1)) >> PAGE_SHIFT
;
1076 id
= bufb
->buf_rsb
.id
;
1077 sb_bindex
= subbuffer_id_get_index(config
, id
);
1078 rpages
= bufb
->array
[sb_bindex
];
1079 CHAN_WARN_ON(chanb
, config
->mode
== RING_BUFFER_OVERWRITE
1080 && subbuffer_id_is_noref(config
, id
));
1081 return rpages
->p
[index
].virt
+ (offset
& ~PAGE_MASK
);
1083 EXPORT_SYMBOL_GPL(lib_ring_buffer_read_offset_address
);
1086 * lib_ring_buffer_offset_address - get address of a location within the buffer
1087 * @bufb : buffer backend
1088 * @offset : offset within the buffer.
1090 * Return the address where a given offset is located.
1091 * Should be used to get the current subbuffer header pointer. Given we know
1092 * it's always at the beginning of a page, it's safe to write directly to this
1093 * address, as long as the write is never bigger than a page size.
1095 void *lib_ring_buffer_offset_address(struct lib_ring_buffer_backend
*bufb
,
1098 size_t sbidx
, index
;
1099 struct lib_ring_buffer_backend_pages
*rpages
;
1100 struct channel_backend
*chanb
= &bufb
->chan
->backend
;
1101 const struct lib_ring_buffer_config
*config
= &chanb
->config
;
1102 unsigned long sb_bindex
, id
;
1104 offset
&= chanb
->buf_size
- 1;
1105 sbidx
= offset
>> chanb
->subbuf_size_order
;
1106 index
= (offset
& (chanb
->subbuf_size
- 1)) >> PAGE_SHIFT
;
1107 id
= bufb
->buf_wsb
[sbidx
].id
;
1108 sb_bindex
= subbuffer_id_get_index(config
, id
);
1109 rpages
= bufb
->array
[sb_bindex
];
1110 CHAN_WARN_ON(chanb
, config
->mode
== RING_BUFFER_OVERWRITE
1111 && subbuffer_id_is_noref(config
, id
));
1112 return rpages
->p
[index
].virt
+ (offset
& ~PAGE_MASK
);
1114 EXPORT_SYMBOL_GPL(lib_ring_buffer_offset_address
);