Fix: handle large number of pages or subbuffers per buffer
[lttng-modules.git] / lib / ringbuffer / ring_buffer_backend.c
CommitLineData
f3bc08c5
MD
1/*
2 * ring_buffer_backend.c
3 *
886d51a3 4 * Copyright (C) 2005-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
f3bc08c5 5 *
886d51a3
MD
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; only
9 * version 2.1 of the License.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
f3bc08c5
MD
19 */
20
f3bc08c5
MD
21#include <linux/stddef.h>
22#include <linux/module.h>
23#include <linux/string.h>
24#include <linux/bitops.h>
25#include <linux/delay.h>
26#include <linux/errno.h>
27#include <linux/slab.h>
28#include <linux/cpu.h>
29#include <linux/mm.h>
6e4fc6f3 30#include <linux/vmalloc.h>
f3bc08c5 31
c075712b
MD
32#include <wrapper/vmalloc.h> /* for wrapper_vmalloc_sync_all() */
33#include <wrapper/ringbuffer/config.h>
34#include <wrapper/ringbuffer/backend.h>
35#include <wrapper/ringbuffer/frontend.h>
f3bc08c5
MD
36
37/**
38 * lib_ring_buffer_backend_allocate - allocate a channel buffer
39 * @config: ring buffer instance configuration
40 * @buf: the buffer struct
41 * @size: total size of the buffer
42 * @num_subbuf: number of subbuffers
43 * @extra_reader_sb: need extra subbuffer for reader
44 */
45static
46int lib_ring_buffer_backend_allocate(const struct lib_ring_buffer_config *config,
47 struct lib_ring_buffer_backend *bufb,
48 size_t size, size_t num_subbuf,
49 int extra_reader_sb)
50{
51 struct channel_backend *chanb = &bufb->chan->backend;
52 unsigned long j, num_pages, num_pages_per_subbuf, page_idx = 0;
53 unsigned long subbuf_size, mmap_offset = 0;
54 unsigned long num_subbuf_alloc;
55 struct page **pages;
f3bc08c5
MD
56 unsigned long i;
57
58 num_pages = size >> PAGE_SHIFT;
59 num_pages_per_subbuf = num_pages >> get_count_order(num_subbuf);
60 subbuf_size = chanb->subbuf_size;
61 num_subbuf_alloc = num_subbuf;
62
63 if (extra_reader_sb) {
64 num_pages += num_pages_per_subbuf; /* Add pages for reader */
65 num_subbuf_alloc++;
66 }
67
6e4fc6f3 68 pages = vmalloc_node(ALIGN(sizeof(*pages) * num_pages,
f3bc08c5 69 1 << INTERNODE_CACHE_SHIFT),
6e4fc6f3 70 cpu_to_node(max(bufb->cpu, 0)));
f3bc08c5
MD
71 if (unlikely(!pages))
72 goto pages_error;
73
f3bc08c5
MD
74 bufb->array = kmalloc_node(ALIGN(sizeof(*bufb->array)
75 * num_subbuf_alloc,
76 1 << INTERNODE_CACHE_SHIFT),
6e4fc6f3
MD
77 GFP_KERNEL | __GFP_NOWARN,
78 cpu_to_node(max(bufb->cpu, 0)));
f3bc08c5
MD
79 if (unlikely(!bufb->array))
80 goto array_error;
81
82 for (i = 0; i < num_pages; i++) {
83 pages[i] = alloc_pages_node(cpu_to_node(max(bufb->cpu, 0)),
6e4fc6f3 84 GFP_KERNEL | __GFP_NOWARN | __GFP_ZERO, 0);
f3bc08c5
MD
85 if (unlikely(!pages[i]))
86 goto depopulate;
f3bc08c5
MD
87 }
88 bufb->num_pages_per_subbuf = num_pages_per_subbuf;
89
90 /* Allocate backend pages array elements */
91 for (i = 0; i < num_subbuf_alloc; i++) {
92 bufb->array[i] =
93 kzalloc_node(ALIGN(
94 sizeof(struct lib_ring_buffer_backend_pages) +
95 sizeof(struct lib_ring_buffer_backend_page)
96 * num_pages_per_subbuf,
97 1 << INTERNODE_CACHE_SHIFT),
6e4fc6f3
MD
98 GFP_KERNEL | __GFP_NOWARN,
99 cpu_to_node(max(bufb->cpu, 0)));
f3bc08c5
MD
100 if (!bufb->array[i])
101 goto free_array;
102 }
103
104 /* Allocate write-side subbuffer table */
105 bufb->buf_wsb = kzalloc_node(ALIGN(
106 sizeof(struct lib_ring_buffer_backend_subbuffer)
107 * num_subbuf,
108 1 << INTERNODE_CACHE_SHIFT),
6e4fc6f3
MD
109 GFP_KERNEL | __GFP_NOWARN,
110 cpu_to_node(max(bufb->cpu, 0)));
f3bc08c5
MD
111 if (unlikely(!bufb->buf_wsb))
112 goto free_array;
113
114 for (i = 0; i < num_subbuf; i++)
115 bufb->buf_wsb[i].id = subbuffer_id(config, 0, 1, i);
116
117 /* Assign read-side subbuffer table */
118 if (extra_reader_sb)
119 bufb->buf_rsb.id = subbuffer_id(config, 0, 1,
120 num_subbuf_alloc - 1);
121 else
122 bufb->buf_rsb.id = subbuffer_id(config, 0, 1, 0);
123
5b3cf4f9
JD
124 /* Allocate subbuffer packet counter table */
125 bufb->buf_cnt = kzalloc_node(ALIGN(
126 sizeof(struct lib_ring_buffer_backend_counts)
127 * num_subbuf,
128 1 << INTERNODE_CACHE_SHIFT),
6e4fc6f3
MD
129 GFP_KERNEL | __GFP_NOWARN,
130 cpu_to_node(max(bufb->cpu, 0)));
5b3cf4f9
JD
131 if (unlikely(!bufb->buf_cnt))
132 goto free_wsb;
133
f3bc08c5
MD
134 /* Assign pages to page index */
135 for (i = 0; i < num_subbuf_alloc; i++) {
136 for (j = 0; j < num_pages_per_subbuf; j++) {
137 CHAN_WARN_ON(chanb, page_idx > num_pages);
0112cb7b
MD
138 bufb->array[i]->p[j].virt = page_address(pages[page_idx]);
139 bufb->array[i]->p[j].pfn = page_to_pfn(pages[page_idx]);
f3bc08c5
MD
140 page_idx++;
141 }
142 if (config->output == RING_BUFFER_MMAP) {
143 bufb->array[i]->mmap_offset = mmap_offset;
144 mmap_offset += subbuf_size;
145 }
146 }
147
148 /*
149 * If kmalloc ever uses vmalloc underneath, make sure the buffer pages
150 * will not fault.
151 */
6d2a620c 152 wrapper_vmalloc_sync_all();
6e4fc6f3 153 vfree(pages);
f3bc08c5
MD
154 return 0;
155
5b3cf4f9
JD
156free_wsb:
157 kfree(bufb->buf_wsb);
f3bc08c5
MD
158free_array:
159 for (i = 0; (i < num_subbuf_alloc && bufb->array[i]); i++)
160 kfree(bufb->array[i]);
161depopulate:
162 /* Free all allocated pages */
163 for (i = 0; (i < num_pages && pages[i]); i++)
164 __free_page(pages[i]);
165 kfree(bufb->array);
166array_error:
6e4fc6f3 167 vfree(pages);
f3bc08c5
MD
168pages_error:
169 return -ENOMEM;
170}
171
172int lib_ring_buffer_backend_create(struct lib_ring_buffer_backend *bufb,
173 struct channel_backend *chanb, int cpu)
174{
5a8fd222 175 const struct lib_ring_buffer_config *config = &chanb->config;
f3bc08c5
MD
176
177 bufb->chan = container_of(chanb, struct channel, backend);
178 bufb->cpu = cpu;
179
180 return lib_ring_buffer_backend_allocate(config, bufb, chanb->buf_size,
181 chanb->num_subbuf,
182 chanb->extra_reader_sb);
183}
184
185void lib_ring_buffer_backend_free(struct lib_ring_buffer_backend *bufb)
186{
187 struct channel_backend *chanb = &bufb->chan->backend;
188 unsigned long i, j, num_subbuf_alloc;
189
190 num_subbuf_alloc = chanb->num_subbuf;
191 if (chanb->extra_reader_sb)
192 num_subbuf_alloc++;
193
194 kfree(bufb->buf_wsb);
5b3cf4f9 195 kfree(bufb->buf_cnt);
f3bc08c5
MD
196 for (i = 0; i < num_subbuf_alloc; i++) {
197 for (j = 0; j < bufb->num_pages_per_subbuf; j++)
0112cb7b 198 __free_page(pfn_to_page(bufb->array[i]->p[j].pfn));
f3bc08c5
MD
199 kfree(bufb->array[i]);
200 }
201 kfree(bufb->array);
202 bufb->allocated = 0;
203}
204
205void lib_ring_buffer_backend_reset(struct lib_ring_buffer_backend *bufb)
206{
207 struct channel_backend *chanb = &bufb->chan->backend;
5a8fd222 208 const struct lib_ring_buffer_config *config = &chanb->config;
f3bc08c5
MD
209 unsigned long num_subbuf_alloc;
210 unsigned int i;
211
212 num_subbuf_alloc = chanb->num_subbuf;
213 if (chanb->extra_reader_sb)
214 num_subbuf_alloc++;
215
216 for (i = 0; i < chanb->num_subbuf; i++)
217 bufb->buf_wsb[i].id = subbuffer_id(config, 0, 1, i);
218 if (chanb->extra_reader_sb)
219 bufb->buf_rsb.id = subbuffer_id(config, 0, 1,
220 num_subbuf_alloc - 1);
221 else
222 bufb->buf_rsb.id = subbuffer_id(config, 0, 1, 0);
223
224 for (i = 0; i < num_subbuf_alloc; i++) {
225 /* Don't reset mmap_offset */
226 v_set(config, &bufb->array[i]->records_commit, 0);
227 v_set(config, &bufb->array[i]->records_unread, 0);
228 bufb->array[i]->data_size = 0;
229 /* Don't reset backend page and virt addresses */
230 }
231 /* Don't reset num_pages_per_subbuf, cpu, allocated */
232 v_set(config, &bufb->records_read, 0);
233}
234
235/*
236 * The frontend is responsible for also calling ring_buffer_backend_reset for
237 * each buffer when calling channel_backend_reset.
238 */
239void channel_backend_reset(struct channel_backend *chanb)
240{
241 struct channel *chan = container_of(chanb, struct channel, backend);
5a8fd222 242 const struct lib_ring_buffer_config *config = &chanb->config;
f3bc08c5
MD
243
244 /*
245 * Don't reset buf_size, subbuf_size, subbuf_size_order,
246 * num_subbuf_order, buf_size_order, extra_reader_sb, num_subbuf,
247 * priv, notifiers, config, cpumask and name.
248 */
249 chanb->start_tsc = config->cb.ring_buffer_clock_read(chan);
250}
251
252#ifdef CONFIG_HOTPLUG_CPU
253/**
254 * lib_ring_buffer_cpu_hp_callback - CPU hotplug callback
255 * @nb: notifier block
256 * @action: hotplug action to take
257 * @hcpu: CPU number
258 *
259 * Returns the success/failure of the operation. (%NOTIFY_OK, %NOTIFY_BAD)
260 */
261static
e8f071d5 262int lib_ring_buffer_cpu_hp_callback(struct notifier_block *nb,
f3bc08c5
MD
263 unsigned long action,
264 void *hcpu)
265{
266 unsigned int cpu = (unsigned long)hcpu;
267 struct channel_backend *chanb = container_of(nb, struct channel_backend,
268 cpu_hp_notifier);
5a8fd222 269 const struct lib_ring_buffer_config *config = &chanb->config;
f3bc08c5
MD
270 struct lib_ring_buffer *buf;
271 int ret;
272
273 CHAN_WARN_ON(chanb, config->alloc == RING_BUFFER_ALLOC_GLOBAL);
274
275 switch (action) {
276 case CPU_UP_PREPARE:
277 case CPU_UP_PREPARE_FROZEN:
278 buf = per_cpu_ptr(chanb->buf, cpu);
279 ret = lib_ring_buffer_create(buf, chanb, cpu);
280 if (ret) {
281 printk(KERN_ERR
282 "ring_buffer_cpu_hp_callback: cpu %d "
283 "buffer creation failed\n", cpu);
284 return NOTIFY_BAD;
285 }
286 break;
287 case CPU_DEAD:
288 case CPU_DEAD_FROZEN:
289 /* No need to do a buffer switch here, because it will happen
290 * when tracing is stopped, or will be done by switch timer CPU
291 * DEAD callback. */
292 break;
293 }
294 return NOTIFY_OK;
295}
296#endif
297
298/**
299 * channel_backend_init - initialize a channel backend
300 * @chanb: channel backend
301 * @name: channel name
302 * @config: client ring buffer configuration
303 * @priv: client private data
304 * @parent: dentry of parent directory, %NULL for root directory
305 * @subbuf_size: size of sub-buffers (> PAGE_SIZE, power of 2)
306 * @num_subbuf: number of sub-buffers (power of 2)
307 *
308 * Returns channel pointer if successful, %NULL otherwise.
309 *
310 * Creates per-cpu channel buffers using the sizes and attributes
311 * specified. The created channel buffer files will be named
312 * name_0...name_N-1. File permissions will be %S_IRUSR.
313 *
314 * Called with CPU hotplug disabled.
315 */
316int channel_backend_init(struct channel_backend *chanb,
317 const char *name,
318 const struct lib_ring_buffer_config *config,
319 void *priv, size_t subbuf_size, size_t num_subbuf)
320{
321 struct channel *chan = container_of(chanb, struct channel, backend);
322 unsigned int i;
323 int ret;
324
325 if (!name)
326 return -EPERM;
327
f3bc08c5 328 /* Check that the subbuffer size is larger than a page. */
2fb46300
MD
329 if (subbuf_size < PAGE_SIZE)
330 return -EINVAL;
f3bc08c5
MD
331
332 /*
bbda3a00
MD
333 * Make sure the number of subbuffers and subbuffer size are
334 * power of 2 and nonzero.
f3bc08c5 335 */
bbda3a00 336 if (!subbuf_size || (subbuf_size & (subbuf_size - 1)))
863497fa 337 return -EINVAL;
bbda3a00 338 if (!num_subbuf || (num_subbuf & (num_subbuf - 1)))
863497fa 339 return -EINVAL;
5140d2b3
MD
340 /*
341 * Overwrite mode buffers require at least 2 subbuffers per
342 * buffer.
343 */
344 if (config->mode == RING_BUFFER_OVERWRITE && num_subbuf < 2)
345 return -EINVAL;
f3bc08c5
MD
346
347 ret = subbuffer_id_check_index(config, num_subbuf);
348 if (ret)
349 return ret;
350
351 chanb->priv = priv;
352 chanb->buf_size = num_subbuf * subbuf_size;
353 chanb->subbuf_size = subbuf_size;
354 chanb->buf_size_order = get_count_order(chanb->buf_size);
355 chanb->subbuf_size_order = get_count_order(subbuf_size);
356 chanb->num_subbuf_order = get_count_order(num_subbuf);
357 chanb->extra_reader_sb =
358 (config->mode == RING_BUFFER_OVERWRITE) ? 1 : 0;
359 chanb->num_subbuf = num_subbuf;
360 strlcpy(chanb->name, name, NAME_MAX);
5a8fd222 361 memcpy(&chanb->config, config, sizeof(chanb->config));
f3bc08c5
MD
362
363 if (config->alloc == RING_BUFFER_ALLOC_PER_CPU) {
364 if (!zalloc_cpumask_var(&chanb->cpumask, GFP_KERNEL))
365 return -ENOMEM;
366 }
367
368 if (config->alloc == RING_BUFFER_ALLOC_PER_CPU) {
369 /* Allocating the buffer per-cpu structures */
370 chanb->buf = alloc_percpu(struct lib_ring_buffer);
371 if (!chanb->buf)
372 goto free_cpumask;
373
374 /*
375 * In case of non-hotplug cpu, if the ring-buffer is allocated
376 * in early initcall, it will not be notified of secondary cpus.
377 * In that off case, we need to allocate for all possible cpus.
378 */
379#ifdef CONFIG_HOTPLUG_CPU
380 /*
381 * buf->backend.allocated test takes care of concurrent CPU
382 * hotplug.
383 * Priority higher than frontend, so we create the ring buffer
384 * before we start the timer.
385 */
386 chanb->cpu_hp_notifier.notifier_call =
387 lib_ring_buffer_cpu_hp_callback;
388 chanb->cpu_hp_notifier.priority = 5;
389 register_hotcpu_notifier(&chanb->cpu_hp_notifier);
390
391 get_online_cpus();
392 for_each_online_cpu(i) {
393 ret = lib_ring_buffer_create(per_cpu_ptr(chanb->buf, i),
394 chanb, i);
395 if (ret)
396 goto free_bufs; /* cpu hotplug locked */
397 }
398 put_online_cpus();
399#else
400 for_each_possible_cpu(i) {
401 ret = lib_ring_buffer_create(per_cpu_ptr(chanb->buf, i),
402 chanb, i);
403 if (ret)
404 goto free_bufs; /* cpu hotplug locked */
405 }
406#endif
407 } else {
408 chanb->buf = kzalloc(sizeof(struct lib_ring_buffer), GFP_KERNEL);
409 if (!chanb->buf)
410 goto free_cpumask;
411 ret = lib_ring_buffer_create(chanb->buf, chanb, -1);
412 if (ret)
413 goto free_bufs;
414 }
415 chanb->start_tsc = config->cb.ring_buffer_clock_read(chan);
416
417 return 0;
418
419free_bufs:
420 if (config->alloc == RING_BUFFER_ALLOC_PER_CPU) {
421 for_each_possible_cpu(i) {
422 struct lib_ring_buffer *buf = per_cpu_ptr(chanb->buf, i);
423
424 if (!buf->backend.allocated)
425 continue;
426 lib_ring_buffer_free(buf);
427 }
428#ifdef CONFIG_HOTPLUG_CPU
429 put_online_cpus();
54af4470 430 unregister_hotcpu_notifier(&chanb->cpu_hp_notifier);
f3bc08c5
MD
431#endif
432 free_percpu(chanb->buf);
433 } else
434 kfree(chanb->buf);
435free_cpumask:
436 if (config->alloc == RING_BUFFER_ALLOC_PER_CPU)
437 free_cpumask_var(chanb->cpumask);
438 return -ENOMEM;
439}
440
441/**
442 * channel_backend_unregister_notifiers - unregister notifiers
443 * @chan: the channel
444 *
445 * Holds CPU hotplug.
446 */
447void channel_backend_unregister_notifiers(struct channel_backend *chanb)
448{
5a8fd222 449 const struct lib_ring_buffer_config *config = &chanb->config;
f3bc08c5
MD
450
451 if (config->alloc == RING_BUFFER_ALLOC_PER_CPU)
452 unregister_hotcpu_notifier(&chanb->cpu_hp_notifier);
453}
454
455/**
456 * channel_backend_free - destroy the channel
457 * @chan: the channel
458 *
459 * Destroy all channel buffers and frees the channel.
460 */
461void channel_backend_free(struct channel_backend *chanb)
462{
5a8fd222 463 const struct lib_ring_buffer_config *config = &chanb->config;
f3bc08c5
MD
464 unsigned int i;
465
466 if (config->alloc == RING_BUFFER_ALLOC_PER_CPU) {
467 for_each_possible_cpu(i) {
468 struct lib_ring_buffer *buf = per_cpu_ptr(chanb->buf, i);
469
470 if (!buf->backend.allocated)
471 continue;
472 lib_ring_buffer_free(buf);
473 }
474 free_cpumask_var(chanb->cpumask);
475 free_percpu(chanb->buf);
476 } else {
477 struct lib_ring_buffer *buf = chanb->buf;
478
479 CHAN_WARN_ON(chanb, !buf->backend.allocated);
480 lib_ring_buffer_free(buf);
481 kfree(buf);
482 }
483}
484
485/**
486 * lib_ring_buffer_write - write data to a ring_buffer buffer.
487 * @bufb : buffer backend
488 * @offset : offset within the buffer
489 * @src : source address
490 * @len : length to write
491 * @pagecpy : page size copied so far
492 */
493void _lib_ring_buffer_write(struct lib_ring_buffer_backend *bufb, size_t offset,
bfe529f9 494 const void *src, size_t len, size_t pagecpy)
f3bc08c5
MD
495{
496 struct channel_backend *chanb = &bufb->chan->backend;
5a8fd222 497 const struct lib_ring_buffer_config *config = &chanb->config;
f3bc08c5
MD
498 size_t sbidx, index;
499 struct lib_ring_buffer_backend_pages *rpages;
500 unsigned long sb_bindex, id;
501
502 do {
503 len -= pagecpy;
504 src += pagecpy;
505 offset += pagecpy;
506 sbidx = offset >> chanb->subbuf_size_order;
507 index = (offset & (chanb->subbuf_size - 1)) >> PAGE_SHIFT;
508
509 /*
510 * Underlying layer should never ask for writes across
511 * subbuffers.
512 */
513 CHAN_WARN_ON(chanb, offset >= chanb->buf_size);
514
515 pagecpy = min_t(size_t, len, PAGE_SIZE - (offset & ~PAGE_MASK));
516 id = bufb->buf_wsb[sbidx].id;
517 sb_bindex = subbuffer_id_get_index(config, id);
518 rpages = bufb->array[sb_bindex];
519 CHAN_WARN_ON(chanb, config->mode == RING_BUFFER_OVERWRITE
520 && subbuffer_id_is_noref(config, id));
521 lib_ring_buffer_do_copy(config,
522 rpages->p[index].virt
523 + (offset & ~PAGE_MASK),
524 src, pagecpy);
525 } while (unlikely(len != pagecpy));
526}
527EXPORT_SYMBOL_GPL(_lib_ring_buffer_write);
528
4ea00e4f
JD
529
530/**
531 * lib_ring_buffer_memset - write len bytes of c to a ring_buffer buffer.
532 * @bufb : buffer backend
533 * @offset : offset within the buffer
534 * @c : the byte to write
535 * @len : length to write
536 * @pagecpy : page size copied so far
537 */
538void _lib_ring_buffer_memset(struct lib_ring_buffer_backend *bufb,
539 size_t offset,
bfe529f9 540 int c, size_t len, size_t pagecpy)
4ea00e4f
JD
541{
542 struct channel_backend *chanb = &bufb->chan->backend;
5a8fd222 543 const struct lib_ring_buffer_config *config = &chanb->config;
4ea00e4f
JD
544 size_t sbidx, index;
545 struct lib_ring_buffer_backend_pages *rpages;
546 unsigned long sb_bindex, id;
547
548 do {
549 len -= pagecpy;
550 offset += pagecpy;
551 sbidx = offset >> chanb->subbuf_size_order;
552 index = (offset & (chanb->subbuf_size - 1)) >> PAGE_SHIFT;
553
554 /*
555 * Underlying layer should never ask for writes across
556 * subbuffers.
557 */
558 CHAN_WARN_ON(chanb, offset >= chanb->buf_size);
559
560 pagecpy = min_t(size_t, len, PAGE_SIZE - (offset & ~PAGE_MASK));
561 id = bufb->buf_wsb[sbidx].id;
562 sb_bindex = subbuffer_id_get_index(config, id);
563 rpages = bufb->array[sb_bindex];
564 CHAN_WARN_ON(chanb, config->mode == RING_BUFFER_OVERWRITE
565 && subbuffer_id_is_noref(config, id));
566 lib_ring_buffer_do_memset(rpages->p[index].virt
567 + (offset & ~PAGE_MASK),
568 c, pagecpy);
569 } while (unlikely(len != pagecpy));
570}
571EXPORT_SYMBOL_GPL(_lib_ring_buffer_memset);
572
16f78f3a
MD
573/**
574 * lib_ring_buffer_strcpy - write string data to a ring_buffer buffer.
575 * @bufb : buffer backend
576 * @offset : offset within the buffer
577 * @src : source address
578 * @len : length to write
579 * @pagecpy : page size copied so far
580 * @pad : character to use for padding
581 */
582void _lib_ring_buffer_strcpy(struct lib_ring_buffer_backend *bufb,
583 size_t offset, const char *src, size_t len,
584 size_t pagecpy, int pad)
585{
586 struct channel_backend *chanb = &bufb->chan->backend;
587 const struct lib_ring_buffer_config *config = &chanb->config;
588 size_t sbidx, index;
589 struct lib_ring_buffer_backend_pages *rpages;
590 unsigned long sb_bindex, id;
591 int src_terminated = 0;
592
593 CHAN_WARN_ON(chanb, !len);
594 offset += pagecpy;
595 do {
596 len -= pagecpy;
597 if (!src_terminated)
598 src += pagecpy;
599 sbidx = offset >> chanb->subbuf_size_order;
600 index = (offset & (chanb->subbuf_size - 1)) >> PAGE_SHIFT;
601
602 /*
603 * Underlying layer should never ask for writes across
604 * subbuffers.
605 */
606 CHAN_WARN_ON(chanb, offset >= chanb->buf_size);
607
608 pagecpy = min_t(size_t, len, PAGE_SIZE - (offset & ~PAGE_MASK));
609 id = bufb->buf_wsb[sbidx].id;
610 sb_bindex = subbuffer_id_get_index(config, id);
611 rpages = bufb->array[sb_bindex];
612 CHAN_WARN_ON(chanb, config->mode == RING_BUFFER_OVERWRITE
613 && subbuffer_id_is_noref(config, id));
614
615 if (likely(!src_terminated)) {
616 size_t count, to_copy;
617
618 to_copy = pagecpy;
619 if (pagecpy == len)
620 to_copy--; /* Final '\0' */
621 count = lib_ring_buffer_do_strcpy(config,
622 rpages->p[index].virt
623 + (offset & ~PAGE_MASK),
624 src, to_copy);
625 offset += count;
626 /* Padding */
627 if (unlikely(count < to_copy)) {
628 size_t pad_len = to_copy - count;
629
630 /* Next pages will have padding */
631 src_terminated = 1;
632 lib_ring_buffer_do_memset(rpages->p[index].virt
633 + (offset & ~PAGE_MASK),
634 pad, pad_len);
635 offset += pad_len;
636 }
637 } else {
638 size_t pad_len;
639
640 pad_len = pagecpy;
641 if (pagecpy == len)
642 pad_len--; /* Final '\0' */
643 lib_ring_buffer_do_memset(rpages->p[index].virt
644 + (offset & ~PAGE_MASK),
645 pad, pad_len);
646 offset += pad_len;
647 }
648 } while (unlikely(len != pagecpy));
649 /* Ending '\0' */
650 lib_ring_buffer_do_memset(rpages->p[index].virt + (offset & ~PAGE_MASK),
651 '\0', 1);
652}
653EXPORT_SYMBOL_GPL(_lib_ring_buffer_strcpy);
4ea00e4f
JD
654
655/**
7b8ea3a5 656 * lib_ring_buffer_copy_from_user_inatomic - write user data to a ring_buffer buffer.
4ea00e4f
JD
657 * @bufb : buffer backend
658 * @offset : offset within the buffer
659 * @src : source address
660 * @len : length to write
661 * @pagecpy : page size copied so far
662 *
663 * This function deals with userspace pointers, it should never be called
664 * directly without having the src pointer checked with access_ok()
665 * previously.
666 */
7b8ea3a5 667void _lib_ring_buffer_copy_from_user_inatomic(struct lib_ring_buffer_backend *bufb,
4ea00e4f
JD
668 size_t offset,
669 const void __user *src, size_t len,
bfe529f9 670 size_t pagecpy)
4ea00e4f
JD
671{
672 struct channel_backend *chanb = &bufb->chan->backend;
5a8fd222 673 const struct lib_ring_buffer_config *config = &chanb->config;
4ea00e4f
JD
674 size_t sbidx, index;
675 struct lib_ring_buffer_backend_pages *rpages;
676 unsigned long sb_bindex, id;
677 int ret;
678
679 do {
680 len -= pagecpy;
681 src += pagecpy;
682 offset += pagecpy;
683 sbidx = offset >> chanb->subbuf_size_order;
684 index = (offset & (chanb->subbuf_size - 1)) >> PAGE_SHIFT;
685
686 /*
687 * Underlying layer should never ask for writes across
688 * subbuffers.
689 */
690 CHAN_WARN_ON(chanb, offset >= chanb->buf_size);
691
692 pagecpy = min_t(size_t, len, PAGE_SIZE - (offset & ~PAGE_MASK));
693 id = bufb->buf_wsb[sbidx].id;
694 sb_bindex = subbuffer_id_get_index(config, id);
695 rpages = bufb->array[sb_bindex];
696 CHAN_WARN_ON(chanb, config->mode == RING_BUFFER_OVERWRITE
697 && subbuffer_id_is_noref(config, id));
7b8ea3a5 698 ret = lib_ring_buffer_do_copy_from_user_inatomic(rpages->p[index].virt
4ea00e4f
JD
699 + (offset & ~PAGE_MASK),
700 src, pagecpy) != 0;
701 if (ret > 0) {
e039f5fa 702 /* Copy failed. */
4ea00e4f
JD
703 _lib_ring_buffer_memset(bufb, offset, 0, len, 0);
704 break; /* stop copy */
705 }
706 } while (unlikely(len != pagecpy));
707}
7b8ea3a5 708EXPORT_SYMBOL_GPL(_lib_ring_buffer_copy_from_user_inatomic);
4ea00e4f 709
16f78f3a
MD
710/**
711 * lib_ring_buffer_strcpy_from_user_inatomic - write userspace string data to a ring_buffer buffer.
712 * @bufb : buffer backend
713 * @offset : offset within the buffer
714 * @src : source address
715 * @len : length to write
716 * @pagecpy : page size copied so far
717 * @pad : character to use for padding
718 *
719 * This function deals with userspace pointers, it should never be called
720 * directly without having the src pointer checked with access_ok()
721 * previously.
722 */
723void _lib_ring_buffer_strcpy_from_user_inatomic(struct lib_ring_buffer_backend *bufb,
724 size_t offset, const char __user *src, size_t len,
725 size_t pagecpy, int pad)
726{
727 struct channel_backend *chanb = &bufb->chan->backend;
728 const struct lib_ring_buffer_config *config = &chanb->config;
729 size_t sbidx, index;
730 struct lib_ring_buffer_backend_pages *rpages;
731 unsigned long sb_bindex, id;
732 int src_terminated = 0;
733
734 offset += pagecpy;
735 do {
736 len -= pagecpy;
737 if (!src_terminated)
738 src += pagecpy;
739 sbidx = offset >> chanb->subbuf_size_order;
740 index = (offset & (chanb->subbuf_size - 1)) >> PAGE_SHIFT;
741
742 /*
743 * Underlying layer should never ask for writes across
744 * subbuffers.
745 */
746 CHAN_WARN_ON(chanb, offset >= chanb->buf_size);
747
748 pagecpy = min_t(size_t, len, PAGE_SIZE - (offset & ~PAGE_MASK));
749 id = bufb->buf_wsb[sbidx].id;
750 sb_bindex = subbuffer_id_get_index(config, id);
751 rpages = bufb->array[sb_bindex];
752 CHAN_WARN_ON(chanb, config->mode == RING_BUFFER_OVERWRITE
753 && subbuffer_id_is_noref(config, id));
754
755 if (likely(!src_terminated)) {
756 size_t count, to_copy;
757
758 to_copy = pagecpy;
759 if (pagecpy == len)
760 to_copy--; /* Final '\0' */
761 count = lib_ring_buffer_do_strcpy_from_user_inatomic(config,
762 rpages->p[index].virt
763 + (offset & ~PAGE_MASK),
764 src, to_copy);
765 offset += count;
766 /* Padding */
767 if (unlikely(count < to_copy)) {
768 size_t pad_len = to_copy - count;
769
770 /* Next pages will have padding */
771 src_terminated = 1;
772 lib_ring_buffer_do_memset(rpages->p[index].virt
773 + (offset & ~PAGE_MASK),
774 pad, pad_len);
775 offset += pad_len;
776 }
777 } else {
778 size_t pad_len;
779
780 pad_len = pagecpy;
781 if (pagecpy == len)
782 pad_len--; /* Final '\0' */
783 lib_ring_buffer_do_memset(rpages->p[index].virt
784 + (offset & ~PAGE_MASK),
785 pad, pad_len);
786 offset += pad_len;
787 }
788 } while (unlikely(len != pagecpy));
789 /* Ending '\0' */
790 lib_ring_buffer_do_memset(rpages->p[index].virt + (offset & ~PAGE_MASK),
791 '\0', 1);
792}
793EXPORT_SYMBOL_GPL(_lib_ring_buffer_strcpy_from_user_inatomic);
794
f3bc08c5
MD
795/**
796 * lib_ring_buffer_read - read data from ring_buffer_buffer.
797 * @bufb : buffer backend
798 * @offset : offset within the buffer
799 * @dest : destination address
800 * @len : length to copy to destination
801 *
802 * Should be protected by get_subbuf/put_subbuf.
803 * Returns the length copied.
804 */
805size_t lib_ring_buffer_read(struct lib_ring_buffer_backend *bufb, size_t offset,
806 void *dest, size_t len)
807{
808 struct channel_backend *chanb = &bufb->chan->backend;
5a8fd222 809 const struct lib_ring_buffer_config *config = &chanb->config;
bfe529f9 810 size_t index, pagecpy, orig_len;
f3bc08c5
MD
811 struct lib_ring_buffer_backend_pages *rpages;
812 unsigned long sb_bindex, id;
813
814 orig_len = len;
815 offset &= chanb->buf_size - 1;
816 index = (offset & (chanb->subbuf_size - 1)) >> PAGE_SHIFT;
817 if (unlikely(!len))
818 return 0;
819 for (;;) {
820 pagecpy = min_t(size_t, len, PAGE_SIZE - (offset & ~PAGE_MASK));
821 id = bufb->buf_rsb.id;
822 sb_bindex = subbuffer_id_get_index(config, id);
823 rpages = bufb->array[sb_bindex];
824 CHAN_WARN_ON(chanb, config->mode == RING_BUFFER_OVERWRITE
825 && subbuffer_id_is_noref(config, id));
826 memcpy(dest, rpages->p[index].virt + (offset & ~PAGE_MASK),
827 pagecpy);
828 len -= pagecpy;
829 if (likely(!len))
830 break;
831 dest += pagecpy;
832 offset += pagecpy;
833 index = (offset & (chanb->subbuf_size - 1)) >> PAGE_SHIFT;
834 /*
835 * Underlying layer should never ask for reads across
836 * subbuffers.
837 */
838 CHAN_WARN_ON(chanb, offset >= chanb->buf_size);
839 }
840 return orig_len;
841}
842EXPORT_SYMBOL_GPL(lib_ring_buffer_read);
843
844/**
845 * __lib_ring_buffer_copy_to_user - read data from ring_buffer to userspace
846 * @bufb : buffer backend
847 * @offset : offset within the buffer
848 * @dest : destination userspace address
849 * @len : length to copy to destination
850 *
851 * Should be protected by get_subbuf/put_subbuf.
852 * access_ok() must have been performed on dest addresses prior to call this
853 * function.
854 * Returns -EFAULT on error, 0 if ok.
855 */
856int __lib_ring_buffer_copy_to_user(struct lib_ring_buffer_backend *bufb,
857 size_t offset, void __user *dest, size_t len)
858{
859 struct channel_backend *chanb = &bufb->chan->backend;
5a8fd222 860 const struct lib_ring_buffer_config *config = &chanb->config;
f3bc08c5 861 size_t index;
88dfd899 862 ssize_t pagecpy;
f3bc08c5
MD
863 struct lib_ring_buffer_backend_pages *rpages;
864 unsigned long sb_bindex, id;
865
f3bc08c5
MD
866 offset &= chanb->buf_size - 1;
867 index = (offset & (chanb->subbuf_size - 1)) >> PAGE_SHIFT;
868 if (unlikely(!len))
869 return 0;
870 for (;;) {
871 pagecpy = min_t(size_t, len, PAGE_SIZE - (offset & ~PAGE_MASK));
872 id = bufb->buf_rsb.id;
873 sb_bindex = subbuffer_id_get_index(config, id);
874 rpages = bufb->array[sb_bindex];
875 CHAN_WARN_ON(chanb, config->mode == RING_BUFFER_OVERWRITE
876 && subbuffer_id_is_noref(config, id));
877 if (__copy_to_user(dest,
878 rpages->p[index].virt + (offset & ~PAGE_MASK),
879 pagecpy))
880 return -EFAULT;
881 len -= pagecpy;
882 if (likely(!len))
883 break;
884 dest += pagecpy;
885 offset += pagecpy;
886 index = (offset & (chanb->subbuf_size - 1)) >> PAGE_SHIFT;
887 /*
888 * Underlying layer should never ask for reads across
889 * subbuffers.
890 */
891 CHAN_WARN_ON(chanb, offset >= chanb->buf_size);
892 }
893 return 0;
894}
895EXPORT_SYMBOL_GPL(__lib_ring_buffer_copy_to_user);
896
897/**
898 * lib_ring_buffer_read_cstr - read a C-style string from ring_buffer.
899 * @bufb : buffer backend
900 * @offset : offset within the buffer
901 * @dest : destination address
902 * @len : destination's length
903 *
61eb4c39 904 * Return string's length, or -EINVAL on error.
f3bc08c5 905 * Should be protected by get_subbuf/put_subbuf.
61eb4c39 906 * Destination length should be at least 1 to hold '\0'.
f3bc08c5
MD
907 */
908int lib_ring_buffer_read_cstr(struct lib_ring_buffer_backend *bufb, size_t offset,
909 void *dest, size_t len)
910{
911 struct channel_backend *chanb = &bufb->chan->backend;
5a8fd222 912 const struct lib_ring_buffer_config *config = &chanb->config;
f3bc08c5
MD
913 size_t index;
914 ssize_t pagecpy, pagelen, strpagelen, orig_offset;
915 char *str;
916 struct lib_ring_buffer_backend_pages *rpages;
917 unsigned long sb_bindex, id;
918
919 offset &= chanb->buf_size - 1;
920 index = (offset & (chanb->subbuf_size - 1)) >> PAGE_SHIFT;
921 orig_offset = offset;
61eb4c39
MD
922 if (unlikely(!len))
923 return -EINVAL;
f3bc08c5
MD
924 for (;;) {
925 id = bufb->buf_rsb.id;
926 sb_bindex = subbuffer_id_get_index(config, id);
927 rpages = bufb->array[sb_bindex];
928 CHAN_WARN_ON(chanb, config->mode == RING_BUFFER_OVERWRITE
929 && subbuffer_id_is_noref(config, id));
930 str = (char *)rpages->p[index].virt + (offset & ~PAGE_MASK);
931 pagelen = PAGE_SIZE - (offset & ~PAGE_MASK);
932 strpagelen = strnlen(str, pagelen);
933 if (len) {
934 pagecpy = min_t(size_t, len, strpagelen);
935 if (dest) {
936 memcpy(dest, str, pagecpy);
937 dest += pagecpy;
938 }
939 len -= pagecpy;
940 }
941 offset += strpagelen;
942 index = (offset & (chanb->subbuf_size - 1)) >> PAGE_SHIFT;
943 if (strpagelen < pagelen)
944 break;
945 /*
946 * Underlying layer should never ask for reads across
947 * subbuffers.
948 */
949 CHAN_WARN_ON(chanb, offset >= chanb->buf_size);
950 }
951 if (dest && len)
952 ((char *)dest)[0] = 0;
953 return offset - orig_offset;
954}
955EXPORT_SYMBOL_GPL(lib_ring_buffer_read_cstr);
956
957/**
0112cb7b 958 * lib_ring_buffer_read_get_pfn - Get a page frame number to read from
f3bc08c5
MD
959 * @bufb : buffer backend
960 * @offset : offset within the buffer
961 * @virt : pointer to page address (output)
962 *
963 * Should be protected by get_subbuf/put_subbuf.
0112cb7b 964 * Returns the pointer to the page frame number unsigned long.
f3bc08c5 965 */
0112cb7b 966unsigned long *lib_ring_buffer_read_get_pfn(struct lib_ring_buffer_backend *bufb,
f3bc08c5
MD
967 size_t offset, void ***virt)
968{
969 size_t index;
970 struct lib_ring_buffer_backend_pages *rpages;
971 struct channel_backend *chanb = &bufb->chan->backend;
5a8fd222 972 const struct lib_ring_buffer_config *config = &chanb->config;
f3bc08c5
MD
973 unsigned long sb_bindex, id;
974
975 offset &= chanb->buf_size - 1;
976 index = (offset & (chanb->subbuf_size - 1)) >> PAGE_SHIFT;
977 id = bufb->buf_rsb.id;
978 sb_bindex = subbuffer_id_get_index(config, id);
979 rpages = bufb->array[sb_bindex];
980 CHAN_WARN_ON(chanb, config->mode == RING_BUFFER_OVERWRITE
981 && subbuffer_id_is_noref(config, id));
982 *virt = &rpages->p[index].virt;
0112cb7b 983 return &rpages->p[index].pfn;
f3bc08c5 984}
0112cb7b 985EXPORT_SYMBOL_GPL(lib_ring_buffer_read_get_pfn);
f3bc08c5
MD
986
987/**
988 * lib_ring_buffer_read_offset_address - get address of a buffer location
989 * @bufb : buffer backend
990 * @offset : offset within the buffer.
991 *
992 * Return the address where a given offset is located (for read).
993 * Should be used to get the current subbuffer header pointer. Given we know
759d02c1
MD
994 * it's never on a page boundary, it's safe to read/write directly
995 * from/to this address, as long as the read/write is never bigger than a
996 * page size.
f3bc08c5
MD
997 */
998void *lib_ring_buffer_read_offset_address(struct lib_ring_buffer_backend *bufb,
999 size_t offset)
1000{
1001 size_t index;
1002 struct lib_ring_buffer_backend_pages *rpages;
1003 struct channel_backend *chanb = &bufb->chan->backend;
5a8fd222 1004 const struct lib_ring_buffer_config *config = &chanb->config;
f3bc08c5
MD
1005 unsigned long sb_bindex, id;
1006
1007 offset &= chanb->buf_size - 1;
1008 index = (offset & (chanb->subbuf_size - 1)) >> PAGE_SHIFT;
1009 id = bufb->buf_rsb.id;
1010 sb_bindex = subbuffer_id_get_index(config, id);
1011 rpages = bufb->array[sb_bindex];
1012 CHAN_WARN_ON(chanb, config->mode == RING_BUFFER_OVERWRITE
1013 && subbuffer_id_is_noref(config, id));
1014 return rpages->p[index].virt + (offset & ~PAGE_MASK);
1015}
1016EXPORT_SYMBOL_GPL(lib_ring_buffer_read_offset_address);
1017
1018/**
1019 * lib_ring_buffer_offset_address - get address of a location within the buffer
1020 * @bufb : buffer backend
1021 * @offset : offset within the buffer.
1022 *
1023 * Return the address where a given offset is located.
1024 * Should be used to get the current subbuffer header pointer. Given we know
1025 * it's always at the beginning of a page, it's safe to write directly to this
1026 * address, as long as the write is never bigger than a page size.
1027 */
1028void *lib_ring_buffer_offset_address(struct lib_ring_buffer_backend *bufb,
1029 size_t offset)
1030{
1031 size_t sbidx, index;
1032 struct lib_ring_buffer_backend_pages *rpages;
1033 struct channel_backend *chanb = &bufb->chan->backend;
5a8fd222 1034 const struct lib_ring_buffer_config *config = &chanb->config;
f3bc08c5
MD
1035 unsigned long sb_bindex, id;
1036
1037 offset &= chanb->buf_size - 1;
1038 sbidx = offset >> chanb->subbuf_size_order;
1039 index = (offset & (chanb->subbuf_size - 1)) >> PAGE_SHIFT;
1040 id = bufb->buf_wsb[sbidx].id;
1041 sb_bindex = subbuffer_id_get_index(config, id);
1042 rpages = bufb->array[sb_bindex];
1043 CHAN_WARN_ON(chanb, config->mode == RING_BUFFER_OVERWRITE
1044 && subbuffer_id_is_noref(config, id));
1045 return rpages->p[index].virt + (offset & ~PAGE_MASK);
1046}
1047EXPORT_SYMBOL_GPL(lib_ring_buffer_offset_address);
This page took 0.090251 seconds and 4 git commands to generate.