2 * ring_buffer_backend.c
4 * Copyright (C) 2005-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; only
9 * version 2.1 of the License.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
21 #include <linux/stddef.h>
22 #include <linux/module.h>
23 #include <linux/string.h>
24 #include <linux/bitops.h>
25 #include <linux/delay.h>
26 #include <linux/errno.h>
27 #include <linux/slab.h>
28 #include <linux/cpu.h>
31 #include <wrapper/vmalloc.h> /* for wrapper_vmalloc_sync_all() */
32 #include <wrapper/ringbuffer/config.h>
33 #include <wrapper/ringbuffer/backend.h>
34 #include <wrapper/ringbuffer/frontend.h>
37 * lib_ring_buffer_backend_allocate - allocate a channel buffer
38 * @config: ring buffer instance configuration
39 * @buf: the buffer struct
40 * @size: total size of the buffer
41 * @num_subbuf: number of subbuffers
42 * @extra_reader_sb: need extra subbuffer for reader
45 int lib_ring_buffer_backend_allocate(const struct lib_ring_buffer_config
*config
,
46 struct lib_ring_buffer_backend
*bufb
,
47 size_t size
, size_t num_subbuf
,
50 struct channel_backend
*chanb
= &bufb
->chan
->backend
;
51 unsigned long j
, num_pages
, num_pages_per_subbuf
, page_idx
= 0;
52 unsigned long subbuf_size
, mmap_offset
= 0;
53 unsigned long num_subbuf_alloc
;
57 num_pages
= size
>> PAGE_SHIFT
;
58 num_pages_per_subbuf
= num_pages
>> get_count_order(num_subbuf
);
59 subbuf_size
= chanb
->subbuf_size
;
60 num_subbuf_alloc
= num_subbuf
;
62 if (extra_reader_sb
) {
63 num_pages
+= num_pages_per_subbuf
; /* Add pages for reader */
67 pages
= kmalloc_node(ALIGN(sizeof(*pages
) * num_pages
,
68 1 << INTERNODE_CACHE_SHIFT
),
69 GFP_KERNEL
, cpu_to_node(max(bufb
->cpu
, 0)));
73 bufb
->array
= kmalloc_node(ALIGN(sizeof(*bufb
->array
)
75 1 << INTERNODE_CACHE_SHIFT
),
76 GFP_KERNEL
, cpu_to_node(max(bufb
->cpu
, 0)));
77 if (unlikely(!bufb
->array
))
80 for (i
= 0; i
< num_pages
; i
++) {
81 pages
[i
] = alloc_pages_node(cpu_to_node(max(bufb
->cpu
, 0)),
82 GFP_KERNEL
| __GFP_ZERO
, 0);
83 if (unlikely(!pages
[i
]))
86 bufb
->num_pages_per_subbuf
= num_pages_per_subbuf
;
88 /* Allocate backend pages array elements */
89 for (i
= 0; i
< num_subbuf_alloc
; i
++) {
92 sizeof(struct lib_ring_buffer_backend_pages
) +
93 sizeof(struct lib_ring_buffer_backend_page
)
94 * num_pages_per_subbuf
,
95 1 << INTERNODE_CACHE_SHIFT
),
96 GFP_KERNEL
, cpu_to_node(max(bufb
->cpu
, 0)));
101 /* Allocate write-side subbuffer table */
102 bufb
->buf_wsb
= kzalloc_node(ALIGN(
103 sizeof(struct lib_ring_buffer_backend_subbuffer
)
105 1 << INTERNODE_CACHE_SHIFT
),
106 GFP_KERNEL
, cpu_to_node(max(bufb
->cpu
, 0)));
107 if (unlikely(!bufb
->buf_wsb
))
110 for (i
= 0; i
< num_subbuf
; i
++)
111 bufb
->buf_wsb
[i
].id
= subbuffer_id(config
, 0, 1, i
);
113 /* Assign read-side subbuffer table */
115 bufb
->buf_rsb
.id
= subbuffer_id(config
, 0, 1,
116 num_subbuf_alloc
- 1);
118 bufb
->buf_rsb
.id
= subbuffer_id(config
, 0, 1, 0);
120 /* Allocate subbuffer packet counter table */
121 bufb
->buf_cnt
= kzalloc_node(ALIGN(
122 sizeof(struct lib_ring_buffer_backend_counts
)
124 1 << INTERNODE_CACHE_SHIFT
),
125 GFP_KERNEL
, cpu_to_node(max(bufb
->cpu
, 0)));
126 if (unlikely(!bufb
->buf_cnt
))
129 /* Assign pages to page index */
130 for (i
= 0; i
< num_subbuf_alloc
; i
++) {
131 for (j
= 0; j
< num_pages_per_subbuf
; j
++) {
132 CHAN_WARN_ON(chanb
, page_idx
> num_pages
);
133 bufb
->array
[i
]->p
[j
].virt
= page_address(pages
[page_idx
]);
134 bufb
->array
[i
]->p
[j
].pfn
= page_to_pfn(pages
[page_idx
]);
137 if (config
->output
== RING_BUFFER_MMAP
) {
138 bufb
->array
[i
]->mmap_offset
= mmap_offset
;
139 mmap_offset
+= subbuf_size
;
144 * If kmalloc ever uses vmalloc underneath, make sure the buffer pages
147 wrapper_vmalloc_sync_all();
152 kfree(bufb
->buf_wsb
);
154 for (i
= 0; (i
< num_subbuf_alloc
&& bufb
->array
[i
]); i
++)
155 kfree(bufb
->array
[i
]);
157 /* Free all allocated pages */
158 for (i
= 0; (i
< num_pages
&& pages
[i
]); i
++)
159 __free_page(pages
[i
]);
167 int lib_ring_buffer_backend_create(struct lib_ring_buffer_backend
*bufb
,
168 struct channel_backend
*chanb
, int cpu
)
170 const struct lib_ring_buffer_config
*config
= &chanb
->config
;
172 bufb
->chan
= container_of(chanb
, struct channel
, backend
);
175 return lib_ring_buffer_backend_allocate(config
, bufb
, chanb
->buf_size
,
177 chanb
->extra_reader_sb
);
180 void lib_ring_buffer_backend_free(struct lib_ring_buffer_backend
*bufb
)
182 struct channel_backend
*chanb
= &bufb
->chan
->backend
;
183 unsigned long i
, j
, num_subbuf_alloc
;
185 num_subbuf_alloc
= chanb
->num_subbuf
;
186 if (chanb
->extra_reader_sb
)
189 kfree(bufb
->buf_wsb
);
190 kfree(bufb
->buf_cnt
);
191 for (i
= 0; i
< num_subbuf_alloc
; i
++) {
192 for (j
= 0; j
< bufb
->num_pages_per_subbuf
; j
++)
193 __free_page(pfn_to_page(bufb
->array
[i
]->p
[j
].pfn
));
194 kfree(bufb
->array
[i
]);
200 void lib_ring_buffer_backend_reset(struct lib_ring_buffer_backend
*bufb
)
202 struct channel_backend
*chanb
= &bufb
->chan
->backend
;
203 const struct lib_ring_buffer_config
*config
= &chanb
->config
;
204 unsigned long num_subbuf_alloc
;
207 num_subbuf_alloc
= chanb
->num_subbuf
;
208 if (chanb
->extra_reader_sb
)
211 for (i
= 0; i
< chanb
->num_subbuf
; i
++)
212 bufb
->buf_wsb
[i
].id
= subbuffer_id(config
, 0, 1, i
);
213 if (chanb
->extra_reader_sb
)
214 bufb
->buf_rsb
.id
= subbuffer_id(config
, 0, 1,
215 num_subbuf_alloc
- 1);
217 bufb
->buf_rsb
.id
= subbuffer_id(config
, 0, 1, 0);
219 for (i
= 0; i
< num_subbuf_alloc
; i
++) {
220 /* Don't reset mmap_offset */
221 v_set(config
, &bufb
->array
[i
]->records_commit
, 0);
222 v_set(config
, &bufb
->array
[i
]->records_unread
, 0);
223 bufb
->array
[i
]->data_size
= 0;
224 /* Don't reset backend page and virt addresses */
226 /* Don't reset num_pages_per_subbuf, cpu, allocated */
227 v_set(config
, &bufb
->records_read
, 0);
231 * The frontend is responsible for also calling ring_buffer_backend_reset for
232 * each buffer when calling channel_backend_reset.
234 void channel_backend_reset(struct channel_backend
*chanb
)
236 struct channel
*chan
= container_of(chanb
, struct channel
, backend
);
237 const struct lib_ring_buffer_config
*config
= &chanb
->config
;
240 * Don't reset buf_size, subbuf_size, subbuf_size_order,
241 * num_subbuf_order, buf_size_order, extra_reader_sb, num_subbuf,
242 * priv, notifiers, config, cpumask and name.
244 chanb
->start_tsc
= config
->cb
.ring_buffer_clock_read(chan
);
247 #ifdef CONFIG_HOTPLUG_CPU
249 * lib_ring_buffer_cpu_hp_callback - CPU hotplug callback
250 * @nb: notifier block
251 * @action: hotplug action to take
254 * Returns the success/failure of the operation. (%NOTIFY_OK, %NOTIFY_BAD)
257 int lib_ring_buffer_cpu_hp_callback(struct notifier_block
*nb
,
258 unsigned long action
,
261 unsigned int cpu
= (unsigned long)hcpu
;
262 struct channel_backend
*chanb
= container_of(nb
, struct channel_backend
,
264 const struct lib_ring_buffer_config
*config
= &chanb
->config
;
265 struct lib_ring_buffer
*buf
;
268 CHAN_WARN_ON(chanb
, config
->alloc
== RING_BUFFER_ALLOC_GLOBAL
);
272 case CPU_UP_PREPARE_FROZEN
:
273 buf
= per_cpu_ptr(chanb
->buf
, cpu
);
274 ret
= lib_ring_buffer_create(buf
, chanb
, cpu
);
277 "ring_buffer_cpu_hp_callback: cpu %d "
278 "buffer creation failed\n", cpu
);
283 case CPU_DEAD_FROZEN
:
284 /* No need to do a buffer switch here, because it will happen
285 * when tracing is stopped, or will be done by switch timer CPU
294 * channel_backend_init - initialize a channel backend
295 * @chanb: channel backend
296 * @name: channel name
297 * @config: client ring buffer configuration
298 * @priv: client private data
299 * @parent: dentry of parent directory, %NULL for root directory
300 * @subbuf_size: size of sub-buffers (> PAGE_SIZE, power of 2)
301 * @num_subbuf: number of sub-buffers (power of 2)
303 * Returns channel pointer if successful, %NULL otherwise.
305 * Creates per-cpu channel buffers using the sizes and attributes
306 * specified. The created channel buffer files will be named
307 * name_0...name_N-1. File permissions will be %S_IRUSR.
309 * Called with CPU hotplug disabled.
311 int channel_backend_init(struct channel_backend
*chanb
,
313 const struct lib_ring_buffer_config
*config
,
314 void *priv
, size_t subbuf_size
, size_t num_subbuf
)
316 struct channel
*chan
= container_of(chanb
, struct channel
, backend
);
323 /* Check that the subbuffer size is larger than a page. */
324 if (subbuf_size
< PAGE_SIZE
)
328 * Make sure the number of subbuffers and subbuffer size are
329 * power of 2 and nonzero.
331 if (!subbuf_size
|| (subbuf_size
& (subbuf_size
- 1)))
333 if (!num_subbuf
|| (num_subbuf
& (num_subbuf
- 1)))
336 * Overwrite mode buffers require at least 2 subbuffers per
339 if (config
->mode
== RING_BUFFER_OVERWRITE
&& num_subbuf
< 2)
342 ret
= subbuffer_id_check_index(config
, num_subbuf
);
347 chanb
->buf_size
= num_subbuf
* subbuf_size
;
348 chanb
->subbuf_size
= subbuf_size
;
349 chanb
->buf_size_order
= get_count_order(chanb
->buf_size
);
350 chanb
->subbuf_size_order
= get_count_order(subbuf_size
);
351 chanb
->num_subbuf_order
= get_count_order(num_subbuf
);
352 chanb
->extra_reader_sb
=
353 (config
->mode
== RING_BUFFER_OVERWRITE
) ? 1 : 0;
354 chanb
->num_subbuf
= num_subbuf
;
355 strlcpy(chanb
->name
, name
, NAME_MAX
);
356 memcpy(&chanb
->config
, config
, sizeof(chanb
->config
));
358 if (config
->alloc
== RING_BUFFER_ALLOC_PER_CPU
) {
359 if (!zalloc_cpumask_var(&chanb
->cpumask
, GFP_KERNEL
))
363 if (config
->alloc
== RING_BUFFER_ALLOC_PER_CPU
) {
364 /* Allocating the buffer per-cpu structures */
365 chanb
->buf
= alloc_percpu(struct lib_ring_buffer
);
370 * In case of non-hotplug cpu, if the ring-buffer is allocated
371 * in early initcall, it will not be notified of secondary cpus.
372 * In that off case, we need to allocate for all possible cpus.
374 #ifdef CONFIG_HOTPLUG_CPU
376 * buf->backend.allocated test takes care of concurrent CPU
378 * Priority higher than frontend, so we create the ring buffer
379 * before we start the timer.
381 chanb
->cpu_hp_notifier
.notifier_call
=
382 lib_ring_buffer_cpu_hp_callback
;
383 chanb
->cpu_hp_notifier
.priority
= 5;
384 register_hotcpu_notifier(&chanb
->cpu_hp_notifier
);
387 for_each_online_cpu(i
) {
388 ret
= lib_ring_buffer_create(per_cpu_ptr(chanb
->buf
, i
),
391 goto free_bufs
; /* cpu hotplug locked */
395 for_each_possible_cpu(i
) {
396 ret
= lib_ring_buffer_create(per_cpu_ptr(chanb
->buf
, i
),
399 goto free_bufs
; /* cpu hotplug locked */
403 chanb
->buf
= kzalloc(sizeof(struct lib_ring_buffer
), GFP_KERNEL
);
406 ret
= lib_ring_buffer_create(chanb
->buf
, chanb
, -1);
410 chanb
->start_tsc
= config
->cb
.ring_buffer_clock_read(chan
);
415 if (config
->alloc
== RING_BUFFER_ALLOC_PER_CPU
) {
416 for_each_possible_cpu(i
) {
417 struct lib_ring_buffer
*buf
= per_cpu_ptr(chanb
->buf
, i
);
419 if (!buf
->backend
.allocated
)
421 lib_ring_buffer_free(buf
);
423 #ifdef CONFIG_HOTPLUG_CPU
425 unregister_hotcpu_notifier(&chanb
->cpu_hp_notifier
);
427 free_percpu(chanb
->buf
);
431 if (config
->alloc
== RING_BUFFER_ALLOC_PER_CPU
)
432 free_cpumask_var(chanb
->cpumask
);
437 * channel_backend_unregister_notifiers - unregister notifiers
442 void channel_backend_unregister_notifiers(struct channel_backend
*chanb
)
444 const struct lib_ring_buffer_config
*config
= &chanb
->config
;
446 if (config
->alloc
== RING_BUFFER_ALLOC_PER_CPU
)
447 unregister_hotcpu_notifier(&chanb
->cpu_hp_notifier
);
451 * channel_backend_free - destroy the channel
454 * Destroy all channel buffers and frees the channel.
456 void channel_backend_free(struct channel_backend
*chanb
)
458 const struct lib_ring_buffer_config
*config
= &chanb
->config
;
461 if (config
->alloc
== RING_BUFFER_ALLOC_PER_CPU
) {
462 for_each_possible_cpu(i
) {
463 struct lib_ring_buffer
*buf
= per_cpu_ptr(chanb
->buf
, i
);
465 if (!buf
->backend
.allocated
)
467 lib_ring_buffer_free(buf
);
469 free_cpumask_var(chanb
->cpumask
);
470 free_percpu(chanb
->buf
);
472 struct lib_ring_buffer
*buf
= chanb
->buf
;
474 CHAN_WARN_ON(chanb
, !buf
->backend
.allocated
);
475 lib_ring_buffer_free(buf
);
481 * lib_ring_buffer_write - write data to a ring_buffer buffer.
482 * @bufb : buffer backend
483 * @offset : offset within the buffer
484 * @src : source address
485 * @len : length to write
486 * @pagecpy : page size copied so far
488 void _lib_ring_buffer_write(struct lib_ring_buffer_backend
*bufb
, size_t offset
,
489 const void *src
, size_t len
, size_t pagecpy
)
491 struct channel_backend
*chanb
= &bufb
->chan
->backend
;
492 const struct lib_ring_buffer_config
*config
= &chanb
->config
;
494 struct lib_ring_buffer_backend_pages
*rpages
;
495 unsigned long sb_bindex
, id
;
501 sbidx
= offset
>> chanb
->subbuf_size_order
;
502 index
= (offset
& (chanb
->subbuf_size
- 1)) >> PAGE_SHIFT
;
505 * Underlying layer should never ask for writes across
508 CHAN_WARN_ON(chanb
, offset
>= chanb
->buf_size
);
510 pagecpy
= min_t(size_t, len
, PAGE_SIZE
- (offset
& ~PAGE_MASK
));
511 id
= bufb
->buf_wsb
[sbidx
].id
;
512 sb_bindex
= subbuffer_id_get_index(config
, id
);
513 rpages
= bufb
->array
[sb_bindex
];
514 CHAN_WARN_ON(chanb
, config
->mode
== RING_BUFFER_OVERWRITE
515 && subbuffer_id_is_noref(config
, id
));
516 lib_ring_buffer_do_copy(config
,
517 rpages
->p
[index
].virt
518 + (offset
& ~PAGE_MASK
),
520 } while (unlikely(len
!= pagecpy
));
522 EXPORT_SYMBOL_GPL(_lib_ring_buffer_write
);
526 * lib_ring_buffer_memset - write len bytes of c to a ring_buffer buffer.
527 * @bufb : buffer backend
528 * @offset : offset within the buffer
529 * @c : the byte to write
530 * @len : length to write
531 * @pagecpy : page size copied so far
533 void _lib_ring_buffer_memset(struct lib_ring_buffer_backend
*bufb
,
535 int c
, size_t len
, size_t pagecpy
)
537 struct channel_backend
*chanb
= &bufb
->chan
->backend
;
538 const struct lib_ring_buffer_config
*config
= &chanb
->config
;
540 struct lib_ring_buffer_backend_pages
*rpages
;
541 unsigned long sb_bindex
, id
;
546 sbidx
= offset
>> chanb
->subbuf_size_order
;
547 index
= (offset
& (chanb
->subbuf_size
- 1)) >> PAGE_SHIFT
;
550 * Underlying layer should never ask for writes across
553 CHAN_WARN_ON(chanb
, offset
>= chanb
->buf_size
);
555 pagecpy
= min_t(size_t, len
, PAGE_SIZE
- (offset
& ~PAGE_MASK
));
556 id
= bufb
->buf_wsb
[sbidx
].id
;
557 sb_bindex
= subbuffer_id_get_index(config
, id
);
558 rpages
= bufb
->array
[sb_bindex
];
559 CHAN_WARN_ON(chanb
, config
->mode
== RING_BUFFER_OVERWRITE
560 && subbuffer_id_is_noref(config
, id
));
561 lib_ring_buffer_do_memset(rpages
->p
[index
].virt
562 + (offset
& ~PAGE_MASK
),
564 } while (unlikely(len
!= pagecpy
));
566 EXPORT_SYMBOL_GPL(_lib_ring_buffer_memset
);
569 * lib_ring_buffer_strcpy - write string data to a ring_buffer buffer.
570 * @bufb : buffer backend
571 * @offset : offset within the buffer
572 * @src : source address
573 * @len : length to write
574 * @pagecpy : page size copied so far
575 * @pad : character to use for padding
577 void _lib_ring_buffer_strcpy(struct lib_ring_buffer_backend
*bufb
,
578 size_t offset
, const char *src
, size_t len
,
579 size_t pagecpy
, int pad
)
581 struct channel_backend
*chanb
= &bufb
->chan
->backend
;
582 const struct lib_ring_buffer_config
*config
= &chanb
->config
;
584 struct lib_ring_buffer_backend_pages
*rpages
;
585 unsigned long sb_bindex
, id
;
586 int src_terminated
= 0;
588 CHAN_WARN_ON(chanb
, !len
);
594 sbidx
= offset
>> chanb
->subbuf_size_order
;
595 index
= (offset
& (chanb
->subbuf_size
- 1)) >> PAGE_SHIFT
;
598 * Underlying layer should never ask for writes across
601 CHAN_WARN_ON(chanb
, offset
>= chanb
->buf_size
);
603 pagecpy
= min_t(size_t, len
, PAGE_SIZE
- (offset
& ~PAGE_MASK
));
604 id
= bufb
->buf_wsb
[sbidx
].id
;
605 sb_bindex
= subbuffer_id_get_index(config
, id
);
606 rpages
= bufb
->array
[sb_bindex
];
607 CHAN_WARN_ON(chanb
, config
->mode
== RING_BUFFER_OVERWRITE
608 && subbuffer_id_is_noref(config
, id
));
610 if (likely(!src_terminated
)) {
611 size_t count
, to_copy
;
615 to_copy
--; /* Final '\0' */
616 count
= lib_ring_buffer_do_strcpy(config
,
617 rpages
->p
[index
].virt
618 + (offset
& ~PAGE_MASK
),
622 if (unlikely(count
< to_copy
)) {
623 size_t pad_len
= to_copy
- count
;
625 /* Next pages will have padding */
627 lib_ring_buffer_do_memset(rpages
->p
[index
].virt
628 + (offset
& ~PAGE_MASK
),
637 pad_len
--; /* Final '\0' */
638 lib_ring_buffer_do_memset(rpages
->p
[index
].virt
639 + (offset
& ~PAGE_MASK
),
643 } while (unlikely(len
!= pagecpy
));
645 lib_ring_buffer_do_memset(rpages
->p
[index
].virt
+ (offset
& ~PAGE_MASK
),
648 EXPORT_SYMBOL_GPL(_lib_ring_buffer_strcpy
);
651 * lib_ring_buffer_copy_from_user_inatomic - write user data to a ring_buffer buffer.
652 * @bufb : buffer backend
653 * @offset : offset within the buffer
654 * @src : source address
655 * @len : length to write
656 * @pagecpy : page size copied so far
658 * This function deals with userspace pointers, it should never be called
659 * directly without having the src pointer checked with access_ok()
662 void _lib_ring_buffer_copy_from_user_inatomic(struct lib_ring_buffer_backend
*bufb
,
664 const void __user
*src
, size_t len
,
667 struct channel_backend
*chanb
= &bufb
->chan
->backend
;
668 const struct lib_ring_buffer_config
*config
= &chanb
->config
;
670 struct lib_ring_buffer_backend_pages
*rpages
;
671 unsigned long sb_bindex
, id
;
678 sbidx
= offset
>> chanb
->subbuf_size_order
;
679 index
= (offset
& (chanb
->subbuf_size
- 1)) >> PAGE_SHIFT
;
682 * Underlying layer should never ask for writes across
685 CHAN_WARN_ON(chanb
, offset
>= chanb
->buf_size
);
687 pagecpy
= min_t(size_t, len
, PAGE_SIZE
- (offset
& ~PAGE_MASK
));
688 id
= bufb
->buf_wsb
[sbidx
].id
;
689 sb_bindex
= subbuffer_id_get_index(config
, id
);
690 rpages
= bufb
->array
[sb_bindex
];
691 CHAN_WARN_ON(chanb
, config
->mode
== RING_BUFFER_OVERWRITE
692 && subbuffer_id_is_noref(config
, id
));
693 ret
= lib_ring_buffer_do_copy_from_user_inatomic(rpages
->p
[index
].virt
694 + (offset
& ~PAGE_MASK
),
698 _lib_ring_buffer_memset(bufb
, offset
, 0, len
, 0);
699 break; /* stop copy */
701 } while (unlikely(len
!= pagecpy
));
703 EXPORT_SYMBOL_GPL(_lib_ring_buffer_copy_from_user_inatomic
);
706 * lib_ring_buffer_strcpy_from_user_inatomic - write userspace string data to a ring_buffer buffer.
707 * @bufb : buffer backend
708 * @offset : offset within the buffer
709 * @src : source address
710 * @len : length to write
711 * @pagecpy : page size copied so far
712 * @pad : character to use for padding
714 * This function deals with userspace pointers, it should never be called
715 * directly without having the src pointer checked with access_ok()
718 void _lib_ring_buffer_strcpy_from_user_inatomic(struct lib_ring_buffer_backend
*bufb
,
719 size_t offset
, const char __user
*src
, size_t len
,
720 size_t pagecpy
, int pad
)
722 struct channel_backend
*chanb
= &bufb
->chan
->backend
;
723 const struct lib_ring_buffer_config
*config
= &chanb
->config
;
725 struct lib_ring_buffer_backend_pages
*rpages
;
726 unsigned long sb_bindex
, id
;
727 int src_terminated
= 0;
734 sbidx
= offset
>> chanb
->subbuf_size_order
;
735 index
= (offset
& (chanb
->subbuf_size
- 1)) >> PAGE_SHIFT
;
738 * Underlying layer should never ask for writes across
741 CHAN_WARN_ON(chanb
, offset
>= chanb
->buf_size
);
743 pagecpy
= min_t(size_t, len
, PAGE_SIZE
- (offset
& ~PAGE_MASK
));
744 id
= bufb
->buf_wsb
[sbidx
].id
;
745 sb_bindex
= subbuffer_id_get_index(config
, id
);
746 rpages
= bufb
->array
[sb_bindex
];
747 CHAN_WARN_ON(chanb
, config
->mode
== RING_BUFFER_OVERWRITE
748 && subbuffer_id_is_noref(config
, id
));
750 if (likely(!src_terminated
)) {
751 size_t count
, to_copy
;
755 to_copy
--; /* Final '\0' */
756 count
= lib_ring_buffer_do_strcpy_from_user_inatomic(config
,
757 rpages
->p
[index
].virt
758 + (offset
& ~PAGE_MASK
),
762 if (unlikely(count
< to_copy
)) {
763 size_t pad_len
= to_copy
- count
;
765 /* Next pages will have padding */
767 lib_ring_buffer_do_memset(rpages
->p
[index
].virt
768 + (offset
& ~PAGE_MASK
),
777 pad_len
--; /* Final '\0' */
778 lib_ring_buffer_do_memset(rpages
->p
[index
].virt
779 + (offset
& ~PAGE_MASK
),
783 } while (unlikely(len
!= pagecpy
));
785 lib_ring_buffer_do_memset(rpages
->p
[index
].virt
+ (offset
& ~PAGE_MASK
),
788 EXPORT_SYMBOL_GPL(_lib_ring_buffer_strcpy_from_user_inatomic
);
791 * lib_ring_buffer_read - read data from ring_buffer_buffer.
792 * @bufb : buffer backend
793 * @offset : offset within the buffer
794 * @dest : destination address
795 * @len : length to copy to destination
797 * Should be protected by get_subbuf/put_subbuf.
798 * Returns the length copied.
800 size_t lib_ring_buffer_read(struct lib_ring_buffer_backend
*bufb
, size_t offset
,
801 void *dest
, size_t len
)
803 struct channel_backend
*chanb
= &bufb
->chan
->backend
;
804 const struct lib_ring_buffer_config
*config
= &chanb
->config
;
805 size_t index
, pagecpy
, orig_len
;
806 struct lib_ring_buffer_backend_pages
*rpages
;
807 unsigned long sb_bindex
, id
;
810 offset
&= chanb
->buf_size
- 1;
811 index
= (offset
& (chanb
->subbuf_size
- 1)) >> PAGE_SHIFT
;
815 pagecpy
= min_t(size_t, len
, PAGE_SIZE
- (offset
& ~PAGE_MASK
));
816 id
= bufb
->buf_rsb
.id
;
817 sb_bindex
= subbuffer_id_get_index(config
, id
);
818 rpages
= bufb
->array
[sb_bindex
];
819 CHAN_WARN_ON(chanb
, config
->mode
== RING_BUFFER_OVERWRITE
820 && subbuffer_id_is_noref(config
, id
));
821 memcpy(dest
, rpages
->p
[index
].virt
+ (offset
& ~PAGE_MASK
),
828 index
= (offset
& (chanb
->subbuf_size
- 1)) >> PAGE_SHIFT
;
830 * Underlying layer should never ask for reads across
833 CHAN_WARN_ON(chanb
, offset
>= chanb
->buf_size
);
837 EXPORT_SYMBOL_GPL(lib_ring_buffer_read
);
840 * __lib_ring_buffer_copy_to_user - read data from ring_buffer to userspace
841 * @bufb : buffer backend
842 * @offset : offset within the buffer
843 * @dest : destination userspace address
844 * @len : length to copy to destination
846 * Should be protected by get_subbuf/put_subbuf.
847 * access_ok() must have been performed on dest addresses prior to call this
849 * Returns -EFAULT on error, 0 if ok.
851 int __lib_ring_buffer_copy_to_user(struct lib_ring_buffer_backend
*bufb
,
852 size_t offset
, void __user
*dest
, size_t len
)
854 struct channel_backend
*chanb
= &bufb
->chan
->backend
;
855 const struct lib_ring_buffer_config
*config
= &chanb
->config
;
858 struct lib_ring_buffer_backend_pages
*rpages
;
859 unsigned long sb_bindex
, id
;
861 offset
&= chanb
->buf_size
- 1;
862 index
= (offset
& (chanb
->subbuf_size
- 1)) >> PAGE_SHIFT
;
866 pagecpy
= min_t(size_t, len
, PAGE_SIZE
- (offset
& ~PAGE_MASK
));
867 id
= bufb
->buf_rsb
.id
;
868 sb_bindex
= subbuffer_id_get_index(config
, id
);
869 rpages
= bufb
->array
[sb_bindex
];
870 CHAN_WARN_ON(chanb
, config
->mode
== RING_BUFFER_OVERWRITE
871 && subbuffer_id_is_noref(config
, id
));
872 if (__copy_to_user(dest
,
873 rpages
->p
[index
].virt
+ (offset
& ~PAGE_MASK
),
881 index
= (offset
& (chanb
->subbuf_size
- 1)) >> PAGE_SHIFT
;
883 * Underlying layer should never ask for reads across
886 CHAN_WARN_ON(chanb
, offset
>= chanb
->buf_size
);
890 EXPORT_SYMBOL_GPL(__lib_ring_buffer_copy_to_user
);
893 * lib_ring_buffer_read_cstr - read a C-style string from ring_buffer.
894 * @bufb : buffer backend
895 * @offset : offset within the buffer
896 * @dest : destination address
897 * @len : destination's length
899 * Return string's length, or -EINVAL on error.
900 * Should be protected by get_subbuf/put_subbuf.
901 * Destination length should be at least 1 to hold '\0'.
903 int lib_ring_buffer_read_cstr(struct lib_ring_buffer_backend
*bufb
, size_t offset
,
904 void *dest
, size_t len
)
906 struct channel_backend
*chanb
= &bufb
->chan
->backend
;
907 const struct lib_ring_buffer_config
*config
= &chanb
->config
;
909 ssize_t pagecpy
, pagelen
, strpagelen
, orig_offset
;
911 struct lib_ring_buffer_backend_pages
*rpages
;
912 unsigned long sb_bindex
, id
;
914 offset
&= chanb
->buf_size
- 1;
915 index
= (offset
& (chanb
->subbuf_size
- 1)) >> PAGE_SHIFT
;
916 orig_offset
= offset
;
920 id
= bufb
->buf_rsb
.id
;
921 sb_bindex
= subbuffer_id_get_index(config
, id
);
922 rpages
= bufb
->array
[sb_bindex
];
923 CHAN_WARN_ON(chanb
, config
->mode
== RING_BUFFER_OVERWRITE
924 && subbuffer_id_is_noref(config
, id
));
925 str
= (char *)rpages
->p
[index
].virt
+ (offset
& ~PAGE_MASK
);
926 pagelen
= PAGE_SIZE
- (offset
& ~PAGE_MASK
);
927 strpagelen
= strnlen(str
, pagelen
);
929 pagecpy
= min_t(size_t, len
, strpagelen
);
931 memcpy(dest
, str
, pagecpy
);
936 offset
+= strpagelen
;
937 index
= (offset
& (chanb
->subbuf_size
- 1)) >> PAGE_SHIFT
;
938 if (strpagelen
< pagelen
)
941 * Underlying layer should never ask for reads across
944 CHAN_WARN_ON(chanb
, offset
>= chanb
->buf_size
);
947 ((char *)dest
)[0] = 0;
948 return offset
- orig_offset
;
950 EXPORT_SYMBOL_GPL(lib_ring_buffer_read_cstr
);
953 * lib_ring_buffer_read_get_pfn - Get a page frame number to read from
954 * @bufb : buffer backend
955 * @offset : offset within the buffer
956 * @virt : pointer to page address (output)
958 * Should be protected by get_subbuf/put_subbuf.
959 * Returns the pointer to the page frame number unsigned long.
961 unsigned long *lib_ring_buffer_read_get_pfn(struct lib_ring_buffer_backend
*bufb
,
962 size_t offset
, void ***virt
)
965 struct lib_ring_buffer_backend_pages
*rpages
;
966 struct channel_backend
*chanb
= &bufb
->chan
->backend
;
967 const struct lib_ring_buffer_config
*config
= &chanb
->config
;
968 unsigned long sb_bindex
, id
;
970 offset
&= chanb
->buf_size
- 1;
971 index
= (offset
& (chanb
->subbuf_size
- 1)) >> PAGE_SHIFT
;
972 id
= bufb
->buf_rsb
.id
;
973 sb_bindex
= subbuffer_id_get_index(config
, id
);
974 rpages
= bufb
->array
[sb_bindex
];
975 CHAN_WARN_ON(chanb
, config
->mode
== RING_BUFFER_OVERWRITE
976 && subbuffer_id_is_noref(config
, id
));
977 *virt
= &rpages
->p
[index
].virt
;
978 return &rpages
->p
[index
].pfn
;
980 EXPORT_SYMBOL_GPL(lib_ring_buffer_read_get_pfn
);
983 * lib_ring_buffer_read_offset_address - get address of a buffer location
984 * @bufb : buffer backend
985 * @offset : offset within the buffer.
987 * Return the address where a given offset is located (for read).
988 * Should be used to get the current subbuffer header pointer. Given we know
989 * it's never on a page boundary, it's safe to read/write directly
990 * from/to this address, as long as the read/write is never bigger than a
993 void *lib_ring_buffer_read_offset_address(struct lib_ring_buffer_backend
*bufb
,
997 struct lib_ring_buffer_backend_pages
*rpages
;
998 struct channel_backend
*chanb
= &bufb
->chan
->backend
;
999 const struct lib_ring_buffer_config
*config
= &chanb
->config
;
1000 unsigned long sb_bindex
, id
;
1002 offset
&= chanb
->buf_size
- 1;
1003 index
= (offset
& (chanb
->subbuf_size
- 1)) >> PAGE_SHIFT
;
1004 id
= bufb
->buf_rsb
.id
;
1005 sb_bindex
= subbuffer_id_get_index(config
, id
);
1006 rpages
= bufb
->array
[sb_bindex
];
1007 CHAN_WARN_ON(chanb
, config
->mode
== RING_BUFFER_OVERWRITE
1008 && subbuffer_id_is_noref(config
, id
));
1009 return rpages
->p
[index
].virt
+ (offset
& ~PAGE_MASK
);
1011 EXPORT_SYMBOL_GPL(lib_ring_buffer_read_offset_address
);
1014 * lib_ring_buffer_offset_address - get address of a location within the buffer
1015 * @bufb : buffer backend
1016 * @offset : offset within the buffer.
1018 * Return the address where a given offset is located.
1019 * Should be used to get the current subbuffer header pointer. Given we know
1020 * it's always at the beginning of a page, it's safe to write directly to this
1021 * address, as long as the write is never bigger than a page size.
1023 void *lib_ring_buffer_offset_address(struct lib_ring_buffer_backend
*bufb
,
1026 size_t sbidx
, index
;
1027 struct lib_ring_buffer_backend_pages
*rpages
;
1028 struct channel_backend
*chanb
= &bufb
->chan
->backend
;
1029 const struct lib_ring_buffer_config
*config
= &chanb
->config
;
1030 unsigned long sb_bindex
, id
;
1032 offset
&= chanb
->buf_size
- 1;
1033 sbidx
= offset
>> chanb
->subbuf_size_order
;
1034 index
= (offset
& (chanb
->subbuf_size
- 1)) >> PAGE_SHIFT
;
1035 id
= bufb
->buf_wsb
[sbidx
].id
;
1036 sb_bindex
= subbuffer_id_get_index(config
, id
);
1037 rpages
= bufb
->array
[sb_bindex
];
1038 CHAN_WARN_ON(chanb
, config
->mode
== RING_BUFFER_OVERWRITE
1039 && subbuffer_id_is_noref(config
, id
));
1040 return rpages
->p
[index
].virt
+ (offset
& ~PAGE_MASK
);
1042 EXPORT_SYMBOL_GPL(lib_ring_buffer_offset_address
);