2 * ring_buffer_backend.c
4 * Copyright (C) 2005-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; only
9 * version 2.1 of the License.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
21 #include <linux/stddef.h>
22 #include <linux/module.h>
23 #include <linux/string.h>
24 #include <linux/bitops.h>
25 #include <linux/delay.h>
26 #include <linux/errno.h>
27 #include <linux/slab.h>
28 #include <linux/oom.h>
29 #include <linux/cpu.h>
31 #include <linux/vmalloc.h>
33 #include <wrapper/vmalloc.h> /* for wrapper_vmalloc_sync_all() */
34 #include <wrapper/ringbuffer/config.h>
35 #include <wrapper/ringbuffer/backend.h>
36 #include <wrapper/ringbuffer/frontend.h>
39 * lib_ring_buffer_backend_allocate - allocate a channel buffer
40 * @config: ring buffer instance configuration
41 * @buf: the buffer struct
42 * @size: total size of the buffer
43 * @num_subbuf: number of subbuffers
44 * @extra_reader_sb: need extra subbuffer for reader
47 int lib_ring_buffer_backend_allocate(const struct lib_ring_buffer_config
*config
,
48 struct lib_ring_buffer_backend
*bufb
,
49 size_t size
, size_t num_subbuf
,
52 struct channel_backend
*chanb
= &bufb
->chan
->backend
;
53 unsigned long j
, num_pages
, num_pages_per_subbuf
, page_idx
= 0;
54 unsigned long subbuf_size
, mmap_offset
= 0;
55 unsigned long num_subbuf_alloc
;
59 num_pages
= size
>> PAGE_SHIFT
;
62 * Verify that the number of pages requested for that buffer is smaller
63 * than the number of available pages on the system. si_mem_available()
64 * returns an _estimate_ of the number of available pages.
66 if (num_pages
> si_mem_available())
67 goto not_enough_pages
;
70 * Set the current user thread as the first target of the OOM killer.
71 * If the estimate received by si_mem_available() was off, and we do
72 * end up running out of memory because of this buffer allocation, we
73 * want to kill the offending app first.
75 set_current_oom_origin();
77 num_pages_per_subbuf
= num_pages
>> get_count_order(num_subbuf
);
78 subbuf_size
= chanb
->subbuf_size
;
79 num_subbuf_alloc
= num_subbuf
;
81 if (extra_reader_sb
) {
82 num_pages
+= num_pages_per_subbuf
; /* Add pages for reader */
86 pages
= vmalloc_node(ALIGN(sizeof(*pages
) * num_pages
,
87 1 << INTERNODE_CACHE_SHIFT
),
88 cpu_to_node(max(bufb
->cpu
, 0)));
92 bufb
->array
= lttng_kvmalloc_node(ALIGN(sizeof(*bufb
->array
)
94 1 << INTERNODE_CACHE_SHIFT
),
95 GFP_KERNEL
| __GFP_NOWARN
,
96 cpu_to_node(max(bufb
->cpu
, 0)));
97 if (unlikely(!bufb
->array
))
100 for (i
= 0; i
< num_pages
; i
++) {
101 pages
[i
] = alloc_pages_node(cpu_to_node(max(bufb
->cpu
, 0)),
102 GFP_KERNEL
| __GFP_NOWARN
| __GFP_ZERO
, 0);
103 if (unlikely(!pages
[i
]))
106 bufb
->num_pages_per_subbuf
= num_pages_per_subbuf
;
108 /* Allocate backend pages array elements */
109 for (i
= 0; i
< num_subbuf_alloc
; i
++) {
111 lttng_kvzalloc_node(ALIGN(
112 sizeof(struct lib_ring_buffer_backend_pages
) +
113 sizeof(struct lib_ring_buffer_backend_page
)
114 * num_pages_per_subbuf
,
115 1 << INTERNODE_CACHE_SHIFT
),
116 GFP_KERNEL
| __GFP_NOWARN
,
117 cpu_to_node(max(bufb
->cpu
, 0)));
122 /* Allocate write-side subbuffer table */
123 bufb
->buf_wsb
= lttng_kvzalloc_node(ALIGN(
124 sizeof(struct lib_ring_buffer_backend_subbuffer
)
126 1 << INTERNODE_CACHE_SHIFT
),
127 GFP_KERNEL
| __GFP_NOWARN
,
128 cpu_to_node(max(bufb
->cpu
, 0)));
129 if (unlikely(!bufb
->buf_wsb
))
132 for (i
= 0; i
< num_subbuf
; i
++)
133 bufb
->buf_wsb
[i
].id
= subbuffer_id(config
, 0, 1, i
);
135 /* Assign read-side subbuffer table */
137 bufb
->buf_rsb
.id
= subbuffer_id(config
, 0, 1,
138 num_subbuf_alloc
- 1);
140 bufb
->buf_rsb
.id
= subbuffer_id(config
, 0, 1, 0);
142 /* Allocate subbuffer packet counter table */
143 bufb
->buf_cnt
= lttng_kvzalloc_node(ALIGN(
144 sizeof(struct lib_ring_buffer_backend_counts
)
146 1 << INTERNODE_CACHE_SHIFT
),
147 GFP_KERNEL
| __GFP_NOWARN
,
148 cpu_to_node(max(bufb
->cpu
, 0)));
149 if (unlikely(!bufb
->buf_cnt
))
152 /* Assign pages to page index */
153 for (i
= 0; i
< num_subbuf_alloc
; i
++) {
154 for (j
= 0; j
< num_pages_per_subbuf
; j
++) {
155 CHAN_WARN_ON(chanb
, page_idx
> num_pages
);
156 bufb
->array
[i
]->p
[j
].virt
= page_address(pages
[page_idx
]);
157 bufb
->array
[i
]->p
[j
].pfn
= page_to_pfn(pages
[page_idx
]);
160 if (config
->output
== RING_BUFFER_MMAP
) {
161 bufb
->array
[i
]->mmap_offset
= mmap_offset
;
162 mmap_offset
+= subbuf_size
;
167 * If kmalloc ever uses vmalloc underneath, make sure the buffer pages
170 wrapper_vmalloc_sync_all();
171 clear_current_oom_origin();
176 lttng_kvfree(bufb
->buf_wsb
);
178 for (i
= 0; (i
< num_subbuf_alloc
&& bufb
->array
[i
]); i
++)
179 lttng_kvfree(bufb
->array
[i
]);
181 /* Free all allocated pages */
182 for (i
= 0; (i
< num_pages
&& pages
[i
]); i
++)
183 __free_page(pages
[i
]);
184 lttng_kvfree(bufb
->array
);
188 clear_current_oom_origin();
193 int lib_ring_buffer_backend_create(struct lib_ring_buffer_backend
*bufb
,
194 struct channel_backend
*chanb
, int cpu
)
196 const struct lib_ring_buffer_config
*config
= &chanb
->config
;
198 bufb
->chan
= container_of(chanb
, struct channel
, backend
);
201 return lib_ring_buffer_backend_allocate(config
, bufb
, chanb
->buf_size
,
203 chanb
->extra_reader_sb
);
206 void lib_ring_buffer_backend_free(struct lib_ring_buffer_backend
*bufb
)
208 struct channel_backend
*chanb
= &bufb
->chan
->backend
;
209 unsigned long i
, j
, num_subbuf_alloc
;
211 num_subbuf_alloc
= chanb
->num_subbuf
;
212 if (chanb
->extra_reader_sb
)
215 lttng_kvfree(bufb
->buf_wsb
);
216 lttng_kvfree(bufb
->buf_cnt
);
217 for (i
= 0; i
< num_subbuf_alloc
; i
++) {
218 for (j
= 0; j
< bufb
->num_pages_per_subbuf
; j
++)
219 __free_page(pfn_to_page(bufb
->array
[i
]->p
[j
].pfn
));
220 lttng_kvfree(bufb
->array
[i
]);
222 lttng_kvfree(bufb
->array
);
226 void lib_ring_buffer_backend_reset(struct lib_ring_buffer_backend
*bufb
)
228 struct channel_backend
*chanb
= &bufb
->chan
->backend
;
229 const struct lib_ring_buffer_config
*config
= &chanb
->config
;
230 unsigned long num_subbuf_alloc
;
233 num_subbuf_alloc
= chanb
->num_subbuf
;
234 if (chanb
->extra_reader_sb
)
237 for (i
= 0; i
< chanb
->num_subbuf
; i
++)
238 bufb
->buf_wsb
[i
].id
= subbuffer_id(config
, 0, 1, i
);
239 if (chanb
->extra_reader_sb
)
240 bufb
->buf_rsb
.id
= subbuffer_id(config
, 0, 1,
241 num_subbuf_alloc
- 1);
243 bufb
->buf_rsb
.id
= subbuffer_id(config
, 0, 1, 0);
245 for (i
= 0; i
< num_subbuf_alloc
; i
++) {
246 /* Don't reset mmap_offset */
247 v_set(config
, &bufb
->array
[i
]->records_commit
, 0);
248 v_set(config
, &bufb
->array
[i
]->records_unread
, 0);
249 bufb
->array
[i
]->data_size
= 0;
250 /* Don't reset backend page and virt addresses */
252 /* Don't reset num_pages_per_subbuf, cpu, allocated */
253 v_set(config
, &bufb
->records_read
, 0);
257 * The frontend is responsible for also calling ring_buffer_backend_reset for
258 * each buffer when calling channel_backend_reset.
260 void channel_backend_reset(struct channel_backend
*chanb
)
262 struct channel
*chan
= container_of(chanb
, struct channel
, backend
);
263 const struct lib_ring_buffer_config
*config
= &chanb
->config
;
266 * Don't reset buf_size, subbuf_size, subbuf_size_order,
267 * num_subbuf_order, buf_size_order, extra_reader_sb, num_subbuf,
268 * priv, notifiers, config, cpumask and name.
270 chanb
->start_tsc
= config
->cb
.ring_buffer_clock_read(chan
);
273 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0))
276 * No need to implement a "dead" callback to do a buffer switch here,
277 * because it will happen when tracing is stopped, or will be done by
278 * switch timer CPU DEAD callback.
279 * We don't free buffers when CPU go away, because it would make trace
280 * data vanish, which is unwanted.
282 int lttng_cpuhp_rb_backend_prepare(unsigned int cpu
,
283 struct lttng_cpuhp_node
*node
)
285 struct channel_backend
*chanb
= container_of(node
,
286 struct channel_backend
, cpuhp_prepare
);
287 const struct lib_ring_buffer_config
*config
= &chanb
->config
;
288 struct lib_ring_buffer
*buf
;
291 CHAN_WARN_ON(chanb
, config
->alloc
== RING_BUFFER_ALLOC_GLOBAL
);
293 buf
= per_cpu_ptr(chanb
->buf
, cpu
);
294 ret
= lib_ring_buffer_create(buf
, chanb
, cpu
);
297 "ring_buffer_cpu_hp_callback: cpu %d "
298 "buffer creation failed\n", cpu
);
303 EXPORT_SYMBOL_GPL(lttng_cpuhp_rb_backend_prepare
);
305 #else /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) */
307 #ifdef CONFIG_HOTPLUG_CPU
310 * lib_ring_buffer_cpu_hp_callback - CPU hotplug callback
311 * @nb: notifier block
312 * @action: hotplug action to take
315 * Returns the success/failure of the operation. (%NOTIFY_OK, %NOTIFY_BAD)
318 int lib_ring_buffer_cpu_hp_callback(struct notifier_block
*nb
,
319 unsigned long action
,
322 unsigned int cpu
= (unsigned long)hcpu
;
323 struct channel_backend
*chanb
= container_of(nb
, struct channel_backend
,
325 const struct lib_ring_buffer_config
*config
= &chanb
->config
;
326 struct lib_ring_buffer
*buf
;
329 CHAN_WARN_ON(chanb
, config
->alloc
== RING_BUFFER_ALLOC_GLOBAL
);
333 case CPU_UP_PREPARE_FROZEN
:
334 buf
= per_cpu_ptr(chanb
->buf
, cpu
);
335 ret
= lib_ring_buffer_create(buf
, chanb
, cpu
);
338 "ring_buffer_cpu_hp_callback: cpu %d "
339 "buffer creation failed\n", cpu
);
344 case CPU_DEAD_FROZEN
:
345 /* No need to do a buffer switch here, because it will happen
346 * when tracing is stopped, or will be done by switch timer CPU
355 #endif /* #else #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) */
358 * channel_backend_init - initialize a channel backend
359 * @chanb: channel backend
360 * @name: channel name
361 * @config: client ring buffer configuration
362 * @priv: client private data
363 * @parent: dentry of parent directory, %NULL for root directory
364 * @subbuf_size: size of sub-buffers (> PAGE_SIZE, power of 2)
365 * @num_subbuf: number of sub-buffers (power of 2)
367 * Returns channel pointer if successful, %NULL otherwise.
369 * Creates per-cpu channel buffers using the sizes and attributes
370 * specified. The created channel buffer files will be named
371 * name_0...name_N-1. File permissions will be %S_IRUSR.
373 * Called with CPU hotplug disabled.
375 int channel_backend_init(struct channel_backend
*chanb
,
377 const struct lib_ring_buffer_config
*config
,
378 void *priv
, size_t subbuf_size
, size_t num_subbuf
)
380 struct channel
*chan
= container_of(chanb
, struct channel
, backend
);
387 /* Check that the subbuffer size is larger than a page. */
388 if (subbuf_size
< PAGE_SIZE
)
392 * Make sure the number of subbuffers and subbuffer size are
393 * power of 2 and nonzero.
395 if (!subbuf_size
|| (subbuf_size
& (subbuf_size
- 1)))
397 if (!num_subbuf
|| (num_subbuf
& (num_subbuf
- 1)))
400 * Overwrite mode buffers require at least 2 subbuffers per
403 if (config
->mode
== RING_BUFFER_OVERWRITE
&& num_subbuf
< 2)
406 ret
= subbuffer_id_check_index(config
, num_subbuf
);
411 chanb
->buf_size
= num_subbuf
* subbuf_size
;
412 chanb
->subbuf_size
= subbuf_size
;
413 chanb
->buf_size_order
= get_count_order(chanb
->buf_size
);
414 chanb
->subbuf_size_order
= get_count_order(subbuf_size
);
415 chanb
->num_subbuf_order
= get_count_order(num_subbuf
);
416 chanb
->extra_reader_sb
=
417 (config
->mode
== RING_BUFFER_OVERWRITE
) ? 1 : 0;
418 chanb
->num_subbuf
= num_subbuf
;
419 strlcpy(chanb
->name
, name
, NAME_MAX
);
420 memcpy(&chanb
->config
, config
, sizeof(chanb
->config
));
422 if (config
->alloc
== RING_BUFFER_ALLOC_PER_CPU
) {
423 if (!zalloc_cpumask_var(&chanb
->cpumask
, GFP_KERNEL
))
427 if (config
->alloc
== RING_BUFFER_ALLOC_PER_CPU
) {
428 /* Allocating the buffer per-cpu structures */
429 chanb
->buf
= alloc_percpu(struct lib_ring_buffer
);
433 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0))
434 chanb
->cpuhp_prepare
.component
= LTTNG_RING_BUFFER_BACKEND
;
435 ret
= cpuhp_state_add_instance(lttng_rb_hp_prepare
,
436 &chanb
->cpuhp_prepare
.node
);
439 #else /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) */
443 * In case of non-hotplug cpu, if the ring-buffer is allocated
444 * in early initcall, it will not be notified of secondary cpus.
445 * In that off case, we need to allocate for all possible cpus.
447 #ifdef CONFIG_HOTPLUG_CPU
449 * buf->backend.allocated test takes care of concurrent CPU
451 * Priority higher than frontend, so we create the ring buffer
452 * before we start the timer.
454 chanb
->cpu_hp_notifier
.notifier_call
=
455 lib_ring_buffer_cpu_hp_callback
;
456 chanb
->cpu_hp_notifier
.priority
= 5;
457 register_hotcpu_notifier(&chanb
->cpu_hp_notifier
);
460 for_each_online_cpu(i
) {
461 ret
= lib_ring_buffer_create(per_cpu_ptr(chanb
->buf
, i
),
464 goto free_bufs
; /* cpu hotplug locked */
468 for_each_possible_cpu(i
) {
469 ret
= lib_ring_buffer_create(per_cpu_ptr(chanb
->buf
, i
),
476 #endif /* #else #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) */
478 chanb
->buf
= kzalloc(sizeof(struct lib_ring_buffer
), GFP_KERNEL
);
481 ret
= lib_ring_buffer_create(chanb
->buf
, chanb
, -1);
485 chanb
->start_tsc
= config
->cb
.ring_buffer_clock_read(chan
);
490 if (config
->alloc
== RING_BUFFER_ALLOC_PER_CPU
) {
491 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0))
493 * Teardown of lttng_rb_hp_prepare instance
494 * on "add" error is handled within cpu hotplug,
495 * no teardown to do from the caller.
497 #else /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) */
498 #ifdef CONFIG_HOTPLUG_CPU
500 unregister_hotcpu_notifier(&chanb
->cpu_hp_notifier
);
502 #endif /* #else #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) */
503 for_each_possible_cpu(i
) {
504 struct lib_ring_buffer
*buf
=
505 per_cpu_ptr(chanb
->buf
, i
);
507 if (!buf
->backend
.allocated
)
509 lib_ring_buffer_free(buf
);
511 free_percpu(chanb
->buf
);
515 if (config
->alloc
== RING_BUFFER_ALLOC_PER_CPU
)
516 free_cpumask_var(chanb
->cpumask
);
521 * channel_backend_unregister_notifiers - unregister notifiers
526 void channel_backend_unregister_notifiers(struct channel_backend
*chanb
)
528 const struct lib_ring_buffer_config
*config
= &chanb
->config
;
530 if (config
->alloc
== RING_BUFFER_ALLOC_PER_CPU
) {
531 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0))
534 ret
= cpuhp_state_remove_instance(lttng_rb_hp_prepare
,
535 &chanb
->cpuhp_prepare
.node
);
537 #else /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) */
538 unregister_hotcpu_notifier(&chanb
->cpu_hp_notifier
);
539 #endif /* #else #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) */
544 * channel_backend_free - destroy the channel
547 * Destroy all channel buffers and frees the channel.
549 void channel_backend_free(struct channel_backend
*chanb
)
551 const struct lib_ring_buffer_config
*config
= &chanb
->config
;
554 if (config
->alloc
== RING_BUFFER_ALLOC_PER_CPU
) {
555 for_each_possible_cpu(i
) {
556 struct lib_ring_buffer
*buf
= per_cpu_ptr(chanb
->buf
, i
);
558 if (!buf
->backend
.allocated
)
560 lib_ring_buffer_free(buf
);
562 free_cpumask_var(chanb
->cpumask
);
563 free_percpu(chanb
->buf
);
565 struct lib_ring_buffer
*buf
= chanb
->buf
;
567 CHAN_WARN_ON(chanb
, !buf
->backend
.allocated
);
568 lib_ring_buffer_free(buf
);
574 * lib_ring_buffer_write - write data to a ring_buffer buffer.
575 * @bufb : buffer backend
576 * @offset : offset within the buffer
577 * @src : source address
578 * @len : length to write
579 * @pagecpy : page size copied so far
581 void _lib_ring_buffer_write(struct lib_ring_buffer_backend
*bufb
, size_t offset
,
582 const void *src
, size_t len
, size_t pagecpy
)
584 struct channel_backend
*chanb
= &bufb
->chan
->backend
;
585 const struct lib_ring_buffer_config
*config
= &chanb
->config
;
587 struct lib_ring_buffer_backend_pages
*rpages
;
588 unsigned long sb_bindex
, id
;
594 sbidx
= offset
>> chanb
->subbuf_size_order
;
595 index
= (offset
& (chanb
->subbuf_size
- 1)) >> PAGE_SHIFT
;
598 * Underlying layer should never ask for writes across
601 CHAN_WARN_ON(chanb
, offset
>= chanb
->buf_size
);
603 pagecpy
= min_t(size_t, len
, PAGE_SIZE
- (offset
& ~PAGE_MASK
));
604 id
= bufb
->buf_wsb
[sbidx
].id
;
605 sb_bindex
= subbuffer_id_get_index(config
, id
);
606 rpages
= bufb
->array
[sb_bindex
];
607 CHAN_WARN_ON(chanb
, config
->mode
== RING_BUFFER_OVERWRITE
608 && subbuffer_id_is_noref(config
, id
));
609 lib_ring_buffer_do_copy(config
,
610 rpages
->p
[index
].virt
611 + (offset
& ~PAGE_MASK
),
613 } while (unlikely(len
!= pagecpy
));
615 EXPORT_SYMBOL_GPL(_lib_ring_buffer_write
);
619 * lib_ring_buffer_memset - write len bytes of c to a ring_buffer buffer.
620 * @bufb : buffer backend
621 * @offset : offset within the buffer
622 * @c : the byte to write
623 * @len : length to write
624 * @pagecpy : page size copied so far
626 void _lib_ring_buffer_memset(struct lib_ring_buffer_backend
*bufb
,
628 int c
, size_t len
, size_t pagecpy
)
630 struct channel_backend
*chanb
= &bufb
->chan
->backend
;
631 const struct lib_ring_buffer_config
*config
= &chanb
->config
;
633 struct lib_ring_buffer_backend_pages
*rpages
;
634 unsigned long sb_bindex
, id
;
639 sbidx
= offset
>> chanb
->subbuf_size_order
;
640 index
= (offset
& (chanb
->subbuf_size
- 1)) >> PAGE_SHIFT
;
643 * Underlying layer should never ask for writes across
646 CHAN_WARN_ON(chanb
, offset
>= chanb
->buf_size
);
648 pagecpy
= min_t(size_t, len
, PAGE_SIZE
- (offset
& ~PAGE_MASK
));
649 id
= bufb
->buf_wsb
[sbidx
].id
;
650 sb_bindex
= subbuffer_id_get_index(config
, id
);
651 rpages
= bufb
->array
[sb_bindex
];
652 CHAN_WARN_ON(chanb
, config
->mode
== RING_BUFFER_OVERWRITE
653 && subbuffer_id_is_noref(config
, id
));
654 lib_ring_buffer_do_memset(rpages
->p
[index
].virt
655 + (offset
& ~PAGE_MASK
),
657 } while (unlikely(len
!= pagecpy
));
659 EXPORT_SYMBOL_GPL(_lib_ring_buffer_memset
);
662 * lib_ring_buffer_strcpy - write string data to a ring_buffer buffer.
663 * @bufb : buffer backend
664 * @offset : offset within the buffer
665 * @src : source address
666 * @len : length to write
667 * @pagecpy : page size copied so far
668 * @pad : character to use for padding
670 void _lib_ring_buffer_strcpy(struct lib_ring_buffer_backend
*bufb
,
671 size_t offset
, const char *src
, size_t len
,
672 size_t pagecpy
, int pad
)
674 struct channel_backend
*chanb
= &bufb
->chan
->backend
;
675 const struct lib_ring_buffer_config
*config
= &chanb
->config
;
677 struct lib_ring_buffer_backend_pages
*rpages
;
678 unsigned long sb_bindex
, id
;
679 int src_terminated
= 0;
681 CHAN_WARN_ON(chanb
, !len
);
687 sbidx
= offset
>> chanb
->subbuf_size_order
;
688 index
= (offset
& (chanb
->subbuf_size
- 1)) >> PAGE_SHIFT
;
691 * Underlying layer should never ask for writes across
694 CHAN_WARN_ON(chanb
, offset
>= chanb
->buf_size
);
696 pagecpy
= min_t(size_t, len
, PAGE_SIZE
- (offset
& ~PAGE_MASK
));
697 id
= bufb
->buf_wsb
[sbidx
].id
;
698 sb_bindex
= subbuffer_id_get_index(config
, id
);
699 rpages
= bufb
->array
[sb_bindex
];
700 CHAN_WARN_ON(chanb
, config
->mode
== RING_BUFFER_OVERWRITE
701 && subbuffer_id_is_noref(config
, id
));
703 if (likely(!src_terminated
)) {
704 size_t count
, to_copy
;
708 to_copy
--; /* Final '\0' */
709 count
= lib_ring_buffer_do_strcpy(config
,
710 rpages
->p
[index
].virt
711 + (offset
& ~PAGE_MASK
),
715 if (unlikely(count
< to_copy
)) {
716 size_t pad_len
= to_copy
- count
;
718 /* Next pages will have padding */
720 lib_ring_buffer_do_memset(rpages
->p
[index
].virt
721 + (offset
& ~PAGE_MASK
),
730 pad_len
--; /* Final '\0' */
731 lib_ring_buffer_do_memset(rpages
->p
[index
].virt
732 + (offset
& ~PAGE_MASK
),
736 } while (unlikely(len
!= pagecpy
));
738 lib_ring_buffer_do_memset(rpages
->p
[index
].virt
+ (offset
& ~PAGE_MASK
),
741 EXPORT_SYMBOL_GPL(_lib_ring_buffer_strcpy
);
744 * lib_ring_buffer_copy_from_user_inatomic - write user data to a ring_buffer buffer.
745 * @bufb : buffer backend
746 * @offset : offset within the buffer
747 * @src : source address
748 * @len : length to write
749 * @pagecpy : page size copied so far
751 * This function deals with userspace pointers, it should never be called
752 * directly without having the src pointer checked with access_ok()
755 void _lib_ring_buffer_copy_from_user_inatomic(struct lib_ring_buffer_backend
*bufb
,
757 const void __user
*src
, size_t len
,
760 struct channel_backend
*chanb
= &bufb
->chan
->backend
;
761 const struct lib_ring_buffer_config
*config
= &chanb
->config
;
763 struct lib_ring_buffer_backend_pages
*rpages
;
764 unsigned long sb_bindex
, id
;
771 sbidx
= offset
>> chanb
->subbuf_size_order
;
772 index
= (offset
& (chanb
->subbuf_size
- 1)) >> PAGE_SHIFT
;
775 * Underlying layer should never ask for writes across
778 CHAN_WARN_ON(chanb
, offset
>= chanb
->buf_size
);
780 pagecpy
= min_t(size_t, len
, PAGE_SIZE
- (offset
& ~PAGE_MASK
));
781 id
= bufb
->buf_wsb
[sbidx
].id
;
782 sb_bindex
= subbuffer_id_get_index(config
, id
);
783 rpages
= bufb
->array
[sb_bindex
];
784 CHAN_WARN_ON(chanb
, config
->mode
== RING_BUFFER_OVERWRITE
785 && subbuffer_id_is_noref(config
, id
));
786 ret
= lib_ring_buffer_do_copy_from_user_inatomic(rpages
->p
[index
].virt
787 + (offset
& ~PAGE_MASK
),
791 _lib_ring_buffer_memset(bufb
, offset
, 0, len
, 0);
792 break; /* stop copy */
794 } while (unlikely(len
!= pagecpy
));
796 EXPORT_SYMBOL_GPL(_lib_ring_buffer_copy_from_user_inatomic
);
799 * lib_ring_buffer_strcpy_from_user_inatomic - write userspace string data to a ring_buffer buffer.
800 * @bufb : buffer backend
801 * @offset : offset within the buffer
802 * @src : source address
803 * @len : length to write
804 * @pagecpy : page size copied so far
805 * @pad : character to use for padding
807 * This function deals with userspace pointers, it should never be called
808 * directly without having the src pointer checked with access_ok()
811 void _lib_ring_buffer_strcpy_from_user_inatomic(struct lib_ring_buffer_backend
*bufb
,
812 size_t offset
, const char __user
*src
, size_t len
,
813 size_t pagecpy
, int pad
)
815 struct channel_backend
*chanb
= &bufb
->chan
->backend
;
816 const struct lib_ring_buffer_config
*config
= &chanb
->config
;
818 struct lib_ring_buffer_backend_pages
*rpages
;
819 unsigned long sb_bindex
, id
;
820 int src_terminated
= 0;
827 sbidx
= offset
>> chanb
->subbuf_size_order
;
828 index
= (offset
& (chanb
->subbuf_size
- 1)) >> PAGE_SHIFT
;
831 * Underlying layer should never ask for writes across
834 CHAN_WARN_ON(chanb
, offset
>= chanb
->buf_size
);
836 pagecpy
= min_t(size_t, len
, PAGE_SIZE
- (offset
& ~PAGE_MASK
));
837 id
= bufb
->buf_wsb
[sbidx
].id
;
838 sb_bindex
= subbuffer_id_get_index(config
, id
);
839 rpages
= bufb
->array
[sb_bindex
];
840 CHAN_WARN_ON(chanb
, config
->mode
== RING_BUFFER_OVERWRITE
841 && subbuffer_id_is_noref(config
, id
));
843 if (likely(!src_terminated
)) {
844 size_t count
, to_copy
;
848 to_copy
--; /* Final '\0' */
849 count
= lib_ring_buffer_do_strcpy_from_user_inatomic(config
,
850 rpages
->p
[index
].virt
851 + (offset
& ~PAGE_MASK
),
855 if (unlikely(count
< to_copy
)) {
856 size_t pad_len
= to_copy
- count
;
858 /* Next pages will have padding */
860 lib_ring_buffer_do_memset(rpages
->p
[index
].virt
861 + (offset
& ~PAGE_MASK
),
870 pad_len
--; /* Final '\0' */
871 lib_ring_buffer_do_memset(rpages
->p
[index
].virt
872 + (offset
& ~PAGE_MASK
),
876 } while (unlikely(len
!= pagecpy
));
878 lib_ring_buffer_do_memset(rpages
->p
[index
].virt
+ (offset
& ~PAGE_MASK
),
881 EXPORT_SYMBOL_GPL(_lib_ring_buffer_strcpy_from_user_inatomic
);
884 * lib_ring_buffer_read - read data from ring_buffer_buffer.
885 * @bufb : buffer backend
886 * @offset : offset within the buffer
887 * @dest : destination address
888 * @len : length to copy to destination
890 * Should be protected by get_subbuf/put_subbuf.
891 * Returns the length copied.
893 size_t lib_ring_buffer_read(struct lib_ring_buffer_backend
*bufb
, size_t offset
,
894 void *dest
, size_t len
)
896 struct channel_backend
*chanb
= &bufb
->chan
->backend
;
897 const struct lib_ring_buffer_config
*config
= &chanb
->config
;
898 size_t index
, pagecpy
, orig_len
;
899 struct lib_ring_buffer_backend_pages
*rpages
;
900 unsigned long sb_bindex
, id
;
903 offset
&= chanb
->buf_size
- 1;
904 index
= (offset
& (chanb
->subbuf_size
- 1)) >> PAGE_SHIFT
;
908 pagecpy
= min_t(size_t, len
, PAGE_SIZE
- (offset
& ~PAGE_MASK
));
909 id
= bufb
->buf_rsb
.id
;
910 sb_bindex
= subbuffer_id_get_index(config
, id
);
911 rpages
= bufb
->array
[sb_bindex
];
912 CHAN_WARN_ON(chanb
, config
->mode
== RING_BUFFER_OVERWRITE
913 && subbuffer_id_is_noref(config
, id
));
914 memcpy(dest
, rpages
->p
[index
].virt
+ (offset
& ~PAGE_MASK
),
921 index
= (offset
& (chanb
->subbuf_size
- 1)) >> PAGE_SHIFT
;
923 * Underlying layer should never ask for reads across
926 CHAN_WARN_ON(chanb
, offset
>= chanb
->buf_size
);
930 EXPORT_SYMBOL_GPL(lib_ring_buffer_read
);
933 * __lib_ring_buffer_copy_to_user - read data from ring_buffer to userspace
934 * @bufb : buffer backend
935 * @offset : offset within the buffer
936 * @dest : destination userspace address
937 * @len : length to copy to destination
939 * Should be protected by get_subbuf/put_subbuf.
940 * access_ok() must have been performed on dest addresses prior to call this
942 * Returns -EFAULT on error, 0 if ok.
944 int __lib_ring_buffer_copy_to_user(struct lib_ring_buffer_backend
*bufb
,
945 size_t offset
, void __user
*dest
, size_t len
)
947 struct channel_backend
*chanb
= &bufb
->chan
->backend
;
948 const struct lib_ring_buffer_config
*config
= &chanb
->config
;
951 struct lib_ring_buffer_backend_pages
*rpages
;
952 unsigned long sb_bindex
, id
;
954 offset
&= chanb
->buf_size
- 1;
955 index
= (offset
& (chanb
->subbuf_size
- 1)) >> PAGE_SHIFT
;
959 pagecpy
= min_t(size_t, len
, PAGE_SIZE
- (offset
& ~PAGE_MASK
));
960 id
= bufb
->buf_rsb
.id
;
961 sb_bindex
= subbuffer_id_get_index(config
, id
);
962 rpages
= bufb
->array
[sb_bindex
];
963 CHAN_WARN_ON(chanb
, config
->mode
== RING_BUFFER_OVERWRITE
964 && subbuffer_id_is_noref(config
, id
));
965 if (__copy_to_user(dest
,
966 rpages
->p
[index
].virt
+ (offset
& ~PAGE_MASK
),
974 index
= (offset
& (chanb
->subbuf_size
- 1)) >> PAGE_SHIFT
;
976 * Underlying layer should never ask for reads across
979 CHAN_WARN_ON(chanb
, offset
>= chanb
->buf_size
);
983 EXPORT_SYMBOL_GPL(__lib_ring_buffer_copy_to_user
);
986 * lib_ring_buffer_read_cstr - read a C-style string from ring_buffer.
987 * @bufb : buffer backend
988 * @offset : offset within the buffer
989 * @dest : destination address
990 * @len : destination's length
992 * Return string's length, or -EINVAL on error.
993 * Should be protected by get_subbuf/put_subbuf.
994 * Destination length should be at least 1 to hold '\0'.
996 int lib_ring_buffer_read_cstr(struct lib_ring_buffer_backend
*bufb
, size_t offset
,
997 void *dest
, size_t len
)
999 struct channel_backend
*chanb
= &bufb
->chan
->backend
;
1000 const struct lib_ring_buffer_config
*config
= &chanb
->config
;
1002 ssize_t pagecpy
, pagelen
, strpagelen
, orig_offset
;
1004 struct lib_ring_buffer_backend_pages
*rpages
;
1005 unsigned long sb_bindex
, id
;
1007 offset
&= chanb
->buf_size
- 1;
1008 index
= (offset
& (chanb
->subbuf_size
- 1)) >> PAGE_SHIFT
;
1009 orig_offset
= offset
;
1013 id
= bufb
->buf_rsb
.id
;
1014 sb_bindex
= subbuffer_id_get_index(config
, id
);
1015 rpages
= bufb
->array
[sb_bindex
];
1016 CHAN_WARN_ON(chanb
, config
->mode
== RING_BUFFER_OVERWRITE
1017 && subbuffer_id_is_noref(config
, id
));
1018 str
= (char *)rpages
->p
[index
].virt
+ (offset
& ~PAGE_MASK
);
1019 pagelen
= PAGE_SIZE
- (offset
& ~PAGE_MASK
);
1020 strpagelen
= strnlen(str
, pagelen
);
1022 pagecpy
= min_t(size_t, len
, strpagelen
);
1024 memcpy(dest
, str
, pagecpy
);
1029 offset
+= strpagelen
;
1030 index
= (offset
& (chanb
->subbuf_size
- 1)) >> PAGE_SHIFT
;
1031 if (strpagelen
< pagelen
)
1034 * Underlying layer should never ask for reads across
1037 CHAN_WARN_ON(chanb
, offset
>= chanb
->buf_size
);
1040 ((char *)dest
)[0] = 0;
1041 return offset
- orig_offset
;
1043 EXPORT_SYMBOL_GPL(lib_ring_buffer_read_cstr
);
1046 * lib_ring_buffer_read_get_pfn - Get a page frame number to read from
1047 * @bufb : buffer backend
1048 * @offset : offset within the buffer
1049 * @virt : pointer to page address (output)
1051 * Should be protected by get_subbuf/put_subbuf.
1052 * Returns the pointer to the page frame number unsigned long.
1054 unsigned long *lib_ring_buffer_read_get_pfn(struct lib_ring_buffer_backend
*bufb
,
1055 size_t offset
, void ***virt
)
1058 struct lib_ring_buffer_backend_pages
*rpages
;
1059 struct channel_backend
*chanb
= &bufb
->chan
->backend
;
1060 const struct lib_ring_buffer_config
*config
= &chanb
->config
;
1061 unsigned long sb_bindex
, id
;
1063 offset
&= chanb
->buf_size
- 1;
1064 index
= (offset
& (chanb
->subbuf_size
- 1)) >> PAGE_SHIFT
;
1065 id
= bufb
->buf_rsb
.id
;
1066 sb_bindex
= subbuffer_id_get_index(config
, id
);
1067 rpages
= bufb
->array
[sb_bindex
];
1068 CHAN_WARN_ON(chanb
, config
->mode
== RING_BUFFER_OVERWRITE
1069 && subbuffer_id_is_noref(config
, id
));
1070 *virt
= &rpages
->p
[index
].virt
;
1071 return &rpages
->p
[index
].pfn
;
1073 EXPORT_SYMBOL_GPL(lib_ring_buffer_read_get_pfn
);
1076 * lib_ring_buffer_read_offset_address - get address of a buffer location
1077 * @bufb : buffer backend
1078 * @offset : offset within the buffer.
1080 * Return the address where a given offset is located (for read).
1081 * Should be used to get the current subbuffer header pointer. Given we know
1082 * it's never on a page boundary, it's safe to read/write directly
1083 * from/to this address, as long as the read/write is never bigger than a
1086 void *lib_ring_buffer_read_offset_address(struct lib_ring_buffer_backend
*bufb
,
1090 struct lib_ring_buffer_backend_pages
*rpages
;
1091 struct channel_backend
*chanb
= &bufb
->chan
->backend
;
1092 const struct lib_ring_buffer_config
*config
= &chanb
->config
;
1093 unsigned long sb_bindex
, id
;
1095 offset
&= chanb
->buf_size
- 1;
1096 index
= (offset
& (chanb
->subbuf_size
- 1)) >> PAGE_SHIFT
;
1097 id
= bufb
->buf_rsb
.id
;
1098 sb_bindex
= subbuffer_id_get_index(config
, id
);
1099 rpages
= bufb
->array
[sb_bindex
];
1100 CHAN_WARN_ON(chanb
, config
->mode
== RING_BUFFER_OVERWRITE
1101 && subbuffer_id_is_noref(config
, id
));
1102 return rpages
->p
[index
].virt
+ (offset
& ~PAGE_MASK
);
1104 EXPORT_SYMBOL_GPL(lib_ring_buffer_read_offset_address
);
1107 * lib_ring_buffer_offset_address - get address of a location within the buffer
1108 * @bufb : buffer backend
1109 * @offset : offset within the buffer.
1111 * Return the address where a given offset is located.
1112 * Should be used to get the current subbuffer header pointer. Given we know
1113 * it's always at the beginning of a page, it's safe to write directly to this
1114 * address, as long as the write is never bigger than a page size.
1116 void *lib_ring_buffer_offset_address(struct lib_ring_buffer_backend
*bufb
,
1119 size_t sbidx
, index
;
1120 struct lib_ring_buffer_backend_pages
*rpages
;
1121 struct channel_backend
*chanb
= &bufb
->chan
->backend
;
1122 const struct lib_ring_buffer_config
*config
= &chanb
->config
;
1123 unsigned long sb_bindex
, id
;
1125 offset
&= chanb
->buf_size
- 1;
1126 sbidx
= offset
>> chanb
->subbuf_size_order
;
1127 index
= (offset
& (chanb
->subbuf_size
- 1)) >> PAGE_SHIFT
;
1128 id
= bufb
->buf_wsb
[sbidx
].id
;
1129 sb_bindex
= subbuffer_id_get_index(config
, id
);
1130 rpages
= bufb
->array
[sb_bindex
];
1131 CHAN_WARN_ON(chanb
, config
->mode
== RING_BUFFER_OVERWRITE
1132 && subbuffer_id_is_noref(config
, id
));
1133 return rpages
->p
[index
].virt
+ (offset
& ~PAGE_MASK
);
1135 EXPORT_SYMBOL_GPL(lib_ring_buffer_offset_address
);