Commit | Line | Data |
---|---|---|
f3bc08c5 MD |
1 | /* |
2 | * ring_buffer_backend.c | |
3 | * | |
4 | * Copyright (C) 2005-2010 - Mathieu Desnoyers <mathieu.desnoyers@efficios.com> | |
5 | * | |
6 | * Dual LGPL v2.1/GPL v2 license. | |
7 | */ | |
8 | ||
f3bc08c5 MD |
9 | #include <linux/stddef.h> |
10 | #include <linux/module.h> | |
11 | #include <linux/string.h> | |
12 | #include <linux/bitops.h> | |
13 | #include <linux/delay.h> | |
14 | #include <linux/errno.h> | |
15 | #include <linux/slab.h> | |
16 | #include <linux/cpu.h> | |
17 | #include <linux/mm.h> | |
18 | ||
b13f3ebe | 19 | #include "../../wrapper/vmalloc.h" /* for wrapper_vmalloc_sync_all() */ |
f3bc08c5 MD |
20 | #include "../../wrapper/ringbuffer/config.h" |
21 | #include "../../wrapper/ringbuffer/backend.h" | |
22 | #include "../../wrapper/ringbuffer/frontend.h" | |
23 | ||
24 | /** | |
25 | * lib_ring_buffer_backend_allocate - allocate a channel buffer | |
26 | * @config: ring buffer instance configuration | |
27 | * @buf: the buffer struct | |
28 | * @size: total size of the buffer | |
29 | * @num_subbuf: number of subbuffers | |
30 | * @extra_reader_sb: need extra subbuffer for reader | |
31 | */ | |
32 | static | |
33 | int lib_ring_buffer_backend_allocate(const struct lib_ring_buffer_config *config, | |
34 | struct lib_ring_buffer_backend *bufb, | |
35 | size_t size, size_t num_subbuf, | |
36 | int extra_reader_sb) | |
37 | { | |
38 | struct channel_backend *chanb = &bufb->chan->backend; | |
39 | unsigned long j, num_pages, num_pages_per_subbuf, page_idx = 0; | |
40 | unsigned long subbuf_size, mmap_offset = 0; | |
41 | unsigned long num_subbuf_alloc; | |
42 | struct page **pages; | |
43 | void **virt; | |
44 | unsigned long i; | |
45 | ||
46 | num_pages = size >> PAGE_SHIFT; | |
47 | num_pages_per_subbuf = num_pages >> get_count_order(num_subbuf); | |
48 | subbuf_size = chanb->subbuf_size; | |
49 | num_subbuf_alloc = num_subbuf; | |
50 | ||
51 | if (extra_reader_sb) { | |
52 | num_pages += num_pages_per_subbuf; /* Add pages for reader */ | |
53 | num_subbuf_alloc++; | |
54 | } | |
55 | ||
56 | pages = kmalloc_node(ALIGN(sizeof(*pages) * num_pages, | |
57 | 1 << INTERNODE_CACHE_SHIFT), | |
58 | GFP_KERNEL, cpu_to_node(max(bufb->cpu, 0))); | |
59 | if (unlikely(!pages)) | |
60 | goto pages_error; | |
61 | ||
62 | virt = kmalloc_node(ALIGN(sizeof(*virt) * num_pages, | |
63 | 1 << INTERNODE_CACHE_SHIFT), | |
64 | GFP_KERNEL, cpu_to_node(max(bufb->cpu, 0))); | |
65 | if (unlikely(!virt)) | |
66 | goto virt_error; | |
67 | ||
68 | bufb->array = kmalloc_node(ALIGN(sizeof(*bufb->array) | |
69 | * num_subbuf_alloc, | |
70 | 1 << INTERNODE_CACHE_SHIFT), | |
71 | GFP_KERNEL, cpu_to_node(max(bufb->cpu, 0))); | |
72 | if (unlikely(!bufb->array)) | |
73 | goto array_error; | |
74 | ||
75 | for (i = 0; i < num_pages; i++) { | |
76 | pages[i] = alloc_pages_node(cpu_to_node(max(bufb->cpu, 0)), | |
77 | GFP_KERNEL | __GFP_ZERO, 0); | |
78 | if (unlikely(!pages[i])) | |
79 | goto depopulate; | |
80 | virt[i] = page_address(pages[i]); | |
81 | } | |
82 | bufb->num_pages_per_subbuf = num_pages_per_subbuf; | |
83 | ||
84 | /* Allocate backend pages array elements */ | |
85 | for (i = 0; i < num_subbuf_alloc; i++) { | |
86 | bufb->array[i] = | |
87 | kzalloc_node(ALIGN( | |
88 | sizeof(struct lib_ring_buffer_backend_pages) + | |
89 | sizeof(struct lib_ring_buffer_backend_page) | |
90 | * num_pages_per_subbuf, | |
91 | 1 << INTERNODE_CACHE_SHIFT), | |
92 | GFP_KERNEL, cpu_to_node(max(bufb->cpu, 0))); | |
93 | if (!bufb->array[i]) | |
94 | goto free_array; | |
95 | } | |
96 | ||
97 | /* Allocate write-side subbuffer table */ | |
98 | bufb->buf_wsb = kzalloc_node(ALIGN( | |
99 | sizeof(struct lib_ring_buffer_backend_subbuffer) | |
100 | * num_subbuf, | |
101 | 1 << INTERNODE_CACHE_SHIFT), | |
102 | GFP_KERNEL, cpu_to_node(max(bufb->cpu, 0))); | |
103 | if (unlikely(!bufb->buf_wsb)) | |
104 | goto free_array; | |
105 | ||
106 | for (i = 0; i < num_subbuf; i++) | |
107 | bufb->buf_wsb[i].id = subbuffer_id(config, 0, 1, i); | |
108 | ||
109 | /* Assign read-side subbuffer table */ | |
110 | if (extra_reader_sb) | |
111 | bufb->buf_rsb.id = subbuffer_id(config, 0, 1, | |
112 | num_subbuf_alloc - 1); | |
113 | else | |
114 | bufb->buf_rsb.id = subbuffer_id(config, 0, 1, 0); | |
115 | ||
116 | /* Assign pages to page index */ | |
117 | for (i = 0; i < num_subbuf_alloc; i++) { | |
118 | for (j = 0; j < num_pages_per_subbuf; j++) { | |
119 | CHAN_WARN_ON(chanb, page_idx > num_pages); | |
120 | bufb->array[i]->p[j].virt = virt[page_idx]; | |
121 | bufb->array[i]->p[j].page = pages[page_idx]; | |
122 | page_idx++; | |
123 | } | |
124 | if (config->output == RING_BUFFER_MMAP) { | |
125 | bufb->array[i]->mmap_offset = mmap_offset; | |
126 | mmap_offset += subbuf_size; | |
127 | } | |
128 | } | |
129 | ||
130 | /* | |
131 | * If kmalloc ever uses vmalloc underneath, make sure the buffer pages | |
132 | * will not fault. | |
133 | */ | |
6d2a620c | 134 | wrapper_vmalloc_sync_all(); |
f3bc08c5 MD |
135 | kfree(virt); |
136 | kfree(pages); | |
137 | return 0; | |
138 | ||
139 | free_array: | |
140 | for (i = 0; (i < num_subbuf_alloc && bufb->array[i]); i++) | |
141 | kfree(bufb->array[i]); | |
142 | depopulate: | |
143 | /* Free all allocated pages */ | |
144 | for (i = 0; (i < num_pages && pages[i]); i++) | |
145 | __free_page(pages[i]); | |
146 | kfree(bufb->array); | |
147 | array_error: | |
148 | kfree(virt); | |
149 | virt_error: | |
150 | kfree(pages); | |
151 | pages_error: | |
152 | return -ENOMEM; | |
153 | } | |
154 | ||
155 | int lib_ring_buffer_backend_create(struct lib_ring_buffer_backend *bufb, | |
156 | struct channel_backend *chanb, int cpu) | |
157 | { | |
158 | const struct lib_ring_buffer_config *config = chanb->config; | |
159 | ||
160 | bufb->chan = container_of(chanb, struct channel, backend); | |
161 | bufb->cpu = cpu; | |
162 | ||
163 | return lib_ring_buffer_backend_allocate(config, bufb, chanb->buf_size, | |
164 | chanb->num_subbuf, | |
165 | chanb->extra_reader_sb); | |
166 | } | |
167 | ||
168 | void lib_ring_buffer_backend_free(struct lib_ring_buffer_backend *bufb) | |
169 | { | |
170 | struct channel_backend *chanb = &bufb->chan->backend; | |
171 | unsigned long i, j, num_subbuf_alloc; | |
172 | ||
173 | num_subbuf_alloc = chanb->num_subbuf; | |
174 | if (chanb->extra_reader_sb) | |
175 | num_subbuf_alloc++; | |
176 | ||
177 | kfree(bufb->buf_wsb); | |
178 | for (i = 0; i < num_subbuf_alloc; i++) { | |
179 | for (j = 0; j < bufb->num_pages_per_subbuf; j++) | |
180 | __free_page(bufb->array[i]->p[j].page); | |
181 | kfree(bufb->array[i]); | |
182 | } | |
183 | kfree(bufb->array); | |
184 | bufb->allocated = 0; | |
185 | } | |
186 | ||
187 | void lib_ring_buffer_backend_reset(struct lib_ring_buffer_backend *bufb) | |
188 | { | |
189 | struct channel_backend *chanb = &bufb->chan->backend; | |
190 | const struct lib_ring_buffer_config *config = chanb->config; | |
191 | unsigned long num_subbuf_alloc; | |
192 | unsigned int i; | |
193 | ||
194 | num_subbuf_alloc = chanb->num_subbuf; | |
195 | if (chanb->extra_reader_sb) | |
196 | num_subbuf_alloc++; | |
197 | ||
198 | for (i = 0; i < chanb->num_subbuf; i++) | |
199 | bufb->buf_wsb[i].id = subbuffer_id(config, 0, 1, i); | |
200 | if (chanb->extra_reader_sb) | |
201 | bufb->buf_rsb.id = subbuffer_id(config, 0, 1, | |
202 | num_subbuf_alloc - 1); | |
203 | else | |
204 | bufb->buf_rsb.id = subbuffer_id(config, 0, 1, 0); | |
205 | ||
206 | for (i = 0; i < num_subbuf_alloc; i++) { | |
207 | /* Don't reset mmap_offset */ | |
208 | v_set(config, &bufb->array[i]->records_commit, 0); | |
209 | v_set(config, &bufb->array[i]->records_unread, 0); | |
210 | bufb->array[i]->data_size = 0; | |
211 | /* Don't reset backend page and virt addresses */ | |
212 | } | |
213 | /* Don't reset num_pages_per_subbuf, cpu, allocated */ | |
214 | v_set(config, &bufb->records_read, 0); | |
215 | } | |
216 | ||
217 | /* | |
218 | * The frontend is responsible for also calling ring_buffer_backend_reset for | |
219 | * each buffer when calling channel_backend_reset. | |
220 | */ | |
221 | void channel_backend_reset(struct channel_backend *chanb) | |
222 | { | |
223 | struct channel *chan = container_of(chanb, struct channel, backend); | |
224 | const struct lib_ring_buffer_config *config = chanb->config; | |
225 | ||
226 | /* | |
227 | * Don't reset buf_size, subbuf_size, subbuf_size_order, | |
228 | * num_subbuf_order, buf_size_order, extra_reader_sb, num_subbuf, | |
229 | * priv, notifiers, config, cpumask and name. | |
230 | */ | |
231 | chanb->start_tsc = config->cb.ring_buffer_clock_read(chan); | |
232 | } | |
233 | ||
234 | #ifdef CONFIG_HOTPLUG_CPU | |
235 | /** | |
236 | * lib_ring_buffer_cpu_hp_callback - CPU hotplug callback | |
237 | * @nb: notifier block | |
238 | * @action: hotplug action to take | |
239 | * @hcpu: CPU number | |
240 | * | |
241 | * Returns the success/failure of the operation. (%NOTIFY_OK, %NOTIFY_BAD) | |
242 | */ | |
243 | static | |
244 | int __cpuinit lib_ring_buffer_cpu_hp_callback(struct notifier_block *nb, | |
245 | unsigned long action, | |
246 | void *hcpu) | |
247 | { | |
248 | unsigned int cpu = (unsigned long)hcpu; | |
249 | struct channel_backend *chanb = container_of(nb, struct channel_backend, | |
250 | cpu_hp_notifier); | |
251 | const struct lib_ring_buffer_config *config = chanb->config; | |
252 | struct lib_ring_buffer *buf; | |
253 | int ret; | |
254 | ||
255 | CHAN_WARN_ON(chanb, config->alloc == RING_BUFFER_ALLOC_GLOBAL); | |
256 | ||
257 | switch (action) { | |
258 | case CPU_UP_PREPARE: | |
259 | case CPU_UP_PREPARE_FROZEN: | |
260 | buf = per_cpu_ptr(chanb->buf, cpu); | |
261 | ret = lib_ring_buffer_create(buf, chanb, cpu); | |
262 | if (ret) { | |
263 | printk(KERN_ERR | |
264 | "ring_buffer_cpu_hp_callback: cpu %d " | |
265 | "buffer creation failed\n", cpu); | |
266 | return NOTIFY_BAD; | |
267 | } | |
268 | break; | |
269 | case CPU_DEAD: | |
270 | case CPU_DEAD_FROZEN: | |
271 | /* No need to do a buffer switch here, because it will happen | |
272 | * when tracing is stopped, or will be done by switch timer CPU | |
273 | * DEAD callback. */ | |
274 | break; | |
275 | } | |
276 | return NOTIFY_OK; | |
277 | } | |
278 | #endif | |
279 | ||
280 | /** | |
281 | * channel_backend_init - initialize a channel backend | |
282 | * @chanb: channel backend | |
283 | * @name: channel name | |
284 | * @config: client ring buffer configuration | |
285 | * @priv: client private data | |
286 | * @parent: dentry of parent directory, %NULL for root directory | |
287 | * @subbuf_size: size of sub-buffers (> PAGE_SIZE, power of 2) | |
288 | * @num_subbuf: number of sub-buffers (power of 2) | |
289 | * | |
290 | * Returns channel pointer if successful, %NULL otherwise. | |
291 | * | |
292 | * Creates per-cpu channel buffers using the sizes and attributes | |
293 | * specified. The created channel buffer files will be named | |
294 | * name_0...name_N-1. File permissions will be %S_IRUSR. | |
295 | * | |
296 | * Called with CPU hotplug disabled. | |
297 | */ | |
298 | int channel_backend_init(struct channel_backend *chanb, | |
299 | const char *name, | |
300 | const struct lib_ring_buffer_config *config, | |
301 | void *priv, size_t subbuf_size, size_t num_subbuf) | |
302 | { | |
303 | struct channel *chan = container_of(chanb, struct channel, backend); | |
304 | unsigned int i; | |
305 | int ret; | |
306 | ||
307 | if (!name) | |
308 | return -EPERM; | |
309 | ||
310 | if (!(subbuf_size && num_subbuf)) | |
311 | return -EPERM; | |
312 | ||
313 | /* Check that the subbuffer size is larger than a page. */ | |
314 | CHAN_WARN_ON(chanb, subbuf_size < PAGE_SIZE); | |
315 | ||
316 | /* | |
317 | * Make sure the number of subbuffers and subbuffer size are power of 2. | |
318 | */ | |
319 | CHAN_WARN_ON(chanb, hweight32(subbuf_size) != 1); | |
320 | CHAN_WARN_ON(chanb, hweight32(num_subbuf) != 1); | |
321 | ||
322 | ret = subbuffer_id_check_index(config, num_subbuf); | |
323 | if (ret) | |
324 | return ret; | |
325 | ||
326 | chanb->priv = priv; | |
327 | chanb->buf_size = num_subbuf * subbuf_size; | |
328 | chanb->subbuf_size = subbuf_size; | |
329 | chanb->buf_size_order = get_count_order(chanb->buf_size); | |
330 | chanb->subbuf_size_order = get_count_order(subbuf_size); | |
331 | chanb->num_subbuf_order = get_count_order(num_subbuf); | |
332 | chanb->extra_reader_sb = | |
333 | (config->mode == RING_BUFFER_OVERWRITE) ? 1 : 0; | |
334 | chanb->num_subbuf = num_subbuf; | |
335 | strlcpy(chanb->name, name, NAME_MAX); | |
336 | chanb->config = config; | |
337 | ||
338 | if (config->alloc == RING_BUFFER_ALLOC_PER_CPU) { | |
339 | if (!zalloc_cpumask_var(&chanb->cpumask, GFP_KERNEL)) | |
340 | return -ENOMEM; | |
341 | } | |
342 | ||
343 | if (config->alloc == RING_BUFFER_ALLOC_PER_CPU) { | |
344 | /* Allocating the buffer per-cpu structures */ | |
345 | chanb->buf = alloc_percpu(struct lib_ring_buffer); | |
346 | if (!chanb->buf) | |
347 | goto free_cpumask; | |
348 | ||
349 | /* | |
350 | * In case of non-hotplug cpu, if the ring-buffer is allocated | |
351 | * in early initcall, it will not be notified of secondary cpus. | |
352 | * In that off case, we need to allocate for all possible cpus. | |
353 | */ | |
354 | #ifdef CONFIG_HOTPLUG_CPU | |
355 | /* | |
356 | * buf->backend.allocated test takes care of concurrent CPU | |
357 | * hotplug. | |
358 | * Priority higher than frontend, so we create the ring buffer | |
359 | * before we start the timer. | |
360 | */ | |
361 | chanb->cpu_hp_notifier.notifier_call = | |
362 | lib_ring_buffer_cpu_hp_callback; | |
363 | chanb->cpu_hp_notifier.priority = 5; | |
364 | register_hotcpu_notifier(&chanb->cpu_hp_notifier); | |
365 | ||
366 | get_online_cpus(); | |
367 | for_each_online_cpu(i) { | |
368 | ret = lib_ring_buffer_create(per_cpu_ptr(chanb->buf, i), | |
369 | chanb, i); | |
370 | if (ret) | |
371 | goto free_bufs; /* cpu hotplug locked */ | |
372 | } | |
373 | put_online_cpus(); | |
374 | #else | |
375 | for_each_possible_cpu(i) { | |
376 | ret = lib_ring_buffer_create(per_cpu_ptr(chanb->buf, i), | |
377 | chanb, i); | |
378 | if (ret) | |
379 | goto free_bufs; /* cpu hotplug locked */ | |
380 | } | |
381 | #endif | |
382 | } else { | |
383 | chanb->buf = kzalloc(sizeof(struct lib_ring_buffer), GFP_KERNEL); | |
384 | if (!chanb->buf) | |
385 | goto free_cpumask; | |
386 | ret = lib_ring_buffer_create(chanb->buf, chanb, -1); | |
387 | if (ret) | |
388 | goto free_bufs; | |
389 | } | |
390 | chanb->start_tsc = config->cb.ring_buffer_clock_read(chan); | |
391 | ||
392 | return 0; | |
393 | ||
394 | free_bufs: | |
395 | if (config->alloc == RING_BUFFER_ALLOC_PER_CPU) { | |
396 | for_each_possible_cpu(i) { | |
397 | struct lib_ring_buffer *buf = per_cpu_ptr(chanb->buf, i); | |
398 | ||
399 | if (!buf->backend.allocated) | |
400 | continue; | |
401 | lib_ring_buffer_free(buf); | |
402 | } | |
403 | #ifdef CONFIG_HOTPLUG_CPU | |
404 | put_online_cpus(); | |
405 | #endif | |
406 | free_percpu(chanb->buf); | |
407 | } else | |
408 | kfree(chanb->buf); | |
409 | free_cpumask: | |
410 | if (config->alloc == RING_BUFFER_ALLOC_PER_CPU) | |
411 | free_cpumask_var(chanb->cpumask); | |
412 | return -ENOMEM; | |
413 | } | |
414 | ||
415 | /** | |
416 | * channel_backend_unregister_notifiers - unregister notifiers | |
417 | * @chan: the channel | |
418 | * | |
419 | * Holds CPU hotplug. | |
420 | */ | |
421 | void channel_backend_unregister_notifiers(struct channel_backend *chanb) | |
422 | { | |
423 | const struct lib_ring_buffer_config *config = chanb->config; | |
424 | ||
425 | if (config->alloc == RING_BUFFER_ALLOC_PER_CPU) | |
426 | unregister_hotcpu_notifier(&chanb->cpu_hp_notifier); | |
427 | } | |
428 | ||
429 | /** | |
430 | * channel_backend_free - destroy the channel | |
431 | * @chan: the channel | |
432 | * | |
433 | * Destroy all channel buffers and frees the channel. | |
434 | */ | |
435 | void channel_backend_free(struct channel_backend *chanb) | |
436 | { | |
437 | const struct lib_ring_buffer_config *config = chanb->config; | |
438 | unsigned int i; | |
439 | ||
440 | if (config->alloc == RING_BUFFER_ALLOC_PER_CPU) { | |
441 | for_each_possible_cpu(i) { | |
442 | struct lib_ring_buffer *buf = per_cpu_ptr(chanb->buf, i); | |
443 | ||
444 | if (!buf->backend.allocated) | |
445 | continue; | |
446 | lib_ring_buffer_free(buf); | |
447 | } | |
448 | free_cpumask_var(chanb->cpumask); | |
449 | free_percpu(chanb->buf); | |
450 | } else { | |
451 | struct lib_ring_buffer *buf = chanb->buf; | |
452 | ||
453 | CHAN_WARN_ON(chanb, !buf->backend.allocated); | |
454 | lib_ring_buffer_free(buf); | |
455 | kfree(buf); | |
456 | } | |
457 | } | |
458 | ||
459 | /** | |
460 | * lib_ring_buffer_write - write data to a ring_buffer buffer. | |
461 | * @bufb : buffer backend | |
462 | * @offset : offset within the buffer | |
463 | * @src : source address | |
464 | * @len : length to write | |
465 | * @pagecpy : page size copied so far | |
466 | */ | |
467 | void _lib_ring_buffer_write(struct lib_ring_buffer_backend *bufb, size_t offset, | |
468 | const void *src, size_t len, ssize_t pagecpy) | |
469 | { | |
470 | struct channel_backend *chanb = &bufb->chan->backend; | |
471 | const struct lib_ring_buffer_config *config = chanb->config; | |
472 | size_t sbidx, index; | |
473 | struct lib_ring_buffer_backend_pages *rpages; | |
474 | unsigned long sb_bindex, id; | |
475 | ||
476 | do { | |
477 | len -= pagecpy; | |
478 | src += pagecpy; | |
479 | offset += pagecpy; | |
480 | sbidx = offset >> chanb->subbuf_size_order; | |
481 | index = (offset & (chanb->subbuf_size - 1)) >> PAGE_SHIFT; | |
482 | ||
483 | /* | |
484 | * Underlying layer should never ask for writes across | |
485 | * subbuffers. | |
486 | */ | |
487 | CHAN_WARN_ON(chanb, offset >= chanb->buf_size); | |
488 | ||
489 | pagecpy = min_t(size_t, len, PAGE_SIZE - (offset & ~PAGE_MASK)); | |
490 | id = bufb->buf_wsb[sbidx].id; | |
491 | sb_bindex = subbuffer_id_get_index(config, id); | |
492 | rpages = bufb->array[sb_bindex]; | |
493 | CHAN_WARN_ON(chanb, config->mode == RING_BUFFER_OVERWRITE | |
494 | && subbuffer_id_is_noref(config, id)); | |
495 | lib_ring_buffer_do_copy(config, | |
496 | rpages->p[index].virt | |
497 | + (offset & ~PAGE_MASK), | |
498 | src, pagecpy); | |
499 | } while (unlikely(len != pagecpy)); | |
500 | } | |
501 | EXPORT_SYMBOL_GPL(_lib_ring_buffer_write); | |
502 | ||
503 | /** | |
504 | * lib_ring_buffer_read - read data from ring_buffer_buffer. | |
505 | * @bufb : buffer backend | |
506 | * @offset : offset within the buffer | |
507 | * @dest : destination address | |
508 | * @len : length to copy to destination | |
509 | * | |
510 | * Should be protected by get_subbuf/put_subbuf. | |
511 | * Returns the length copied. | |
512 | */ | |
513 | size_t lib_ring_buffer_read(struct lib_ring_buffer_backend *bufb, size_t offset, | |
514 | void *dest, size_t len) | |
515 | { | |
516 | struct channel_backend *chanb = &bufb->chan->backend; | |
517 | const struct lib_ring_buffer_config *config = chanb->config; | |
518 | size_t index; | |
519 | ssize_t pagecpy, orig_len; | |
520 | struct lib_ring_buffer_backend_pages *rpages; | |
521 | unsigned long sb_bindex, id; | |
522 | ||
523 | orig_len = len; | |
524 | offset &= chanb->buf_size - 1; | |
525 | index = (offset & (chanb->subbuf_size - 1)) >> PAGE_SHIFT; | |
526 | if (unlikely(!len)) | |
527 | return 0; | |
528 | for (;;) { | |
529 | pagecpy = min_t(size_t, len, PAGE_SIZE - (offset & ~PAGE_MASK)); | |
530 | id = bufb->buf_rsb.id; | |
531 | sb_bindex = subbuffer_id_get_index(config, id); | |
532 | rpages = bufb->array[sb_bindex]; | |
533 | CHAN_WARN_ON(chanb, config->mode == RING_BUFFER_OVERWRITE | |
534 | && subbuffer_id_is_noref(config, id)); | |
535 | memcpy(dest, rpages->p[index].virt + (offset & ~PAGE_MASK), | |
536 | pagecpy); | |
537 | len -= pagecpy; | |
538 | if (likely(!len)) | |
539 | break; | |
540 | dest += pagecpy; | |
541 | offset += pagecpy; | |
542 | index = (offset & (chanb->subbuf_size - 1)) >> PAGE_SHIFT; | |
543 | /* | |
544 | * Underlying layer should never ask for reads across | |
545 | * subbuffers. | |
546 | */ | |
547 | CHAN_WARN_ON(chanb, offset >= chanb->buf_size); | |
548 | } | |
549 | return orig_len; | |
550 | } | |
551 | EXPORT_SYMBOL_GPL(lib_ring_buffer_read); | |
552 | ||
553 | /** | |
554 | * __lib_ring_buffer_copy_to_user - read data from ring_buffer to userspace | |
555 | * @bufb : buffer backend | |
556 | * @offset : offset within the buffer | |
557 | * @dest : destination userspace address | |
558 | * @len : length to copy to destination | |
559 | * | |
560 | * Should be protected by get_subbuf/put_subbuf. | |
561 | * access_ok() must have been performed on dest addresses prior to call this | |
562 | * function. | |
563 | * Returns -EFAULT on error, 0 if ok. | |
564 | */ | |
565 | int __lib_ring_buffer_copy_to_user(struct lib_ring_buffer_backend *bufb, | |
566 | size_t offset, void __user *dest, size_t len) | |
567 | { | |
568 | struct channel_backend *chanb = &bufb->chan->backend; | |
569 | const struct lib_ring_buffer_config *config = chanb->config; | |
570 | size_t index; | |
571 | ssize_t pagecpy, orig_len; | |
572 | struct lib_ring_buffer_backend_pages *rpages; | |
573 | unsigned long sb_bindex, id; | |
574 | ||
575 | orig_len = len; | |
576 | offset &= chanb->buf_size - 1; | |
577 | index = (offset & (chanb->subbuf_size - 1)) >> PAGE_SHIFT; | |
578 | if (unlikely(!len)) | |
579 | return 0; | |
580 | for (;;) { | |
581 | pagecpy = min_t(size_t, len, PAGE_SIZE - (offset & ~PAGE_MASK)); | |
582 | id = bufb->buf_rsb.id; | |
583 | sb_bindex = subbuffer_id_get_index(config, id); | |
584 | rpages = bufb->array[sb_bindex]; | |
585 | CHAN_WARN_ON(chanb, config->mode == RING_BUFFER_OVERWRITE | |
586 | && subbuffer_id_is_noref(config, id)); | |
587 | if (__copy_to_user(dest, | |
588 | rpages->p[index].virt + (offset & ~PAGE_MASK), | |
589 | pagecpy)) | |
590 | return -EFAULT; | |
591 | len -= pagecpy; | |
592 | if (likely(!len)) | |
593 | break; | |
594 | dest += pagecpy; | |
595 | offset += pagecpy; | |
596 | index = (offset & (chanb->subbuf_size - 1)) >> PAGE_SHIFT; | |
597 | /* | |
598 | * Underlying layer should never ask for reads across | |
599 | * subbuffers. | |
600 | */ | |
601 | CHAN_WARN_ON(chanb, offset >= chanb->buf_size); | |
602 | } | |
603 | return 0; | |
604 | } | |
605 | EXPORT_SYMBOL_GPL(__lib_ring_buffer_copy_to_user); | |
606 | ||
607 | /** | |
608 | * lib_ring_buffer_read_cstr - read a C-style string from ring_buffer. | |
609 | * @bufb : buffer backend | |
610 | * @offset : offset within the buffer | |
611 | * @dest : destination address | |
612 | * @len : destination's length | |
613 | * | |
614 | * return string's length | |
615 | * Should be protected by get_subbuf/put_subbuf. | |
616 | */ | |
617 | int lib_ring_buffer_read_cstr(struct lib_ring_buffer_backend *bufb, size_t offset, | |
618 | void *dest, size_t len) | |
619 | { | |
620 | struct channel_backend *chanb = &bufb->chan->backend; | |
621 | const struct lib_ring_buffer_config *config = chanb->config; | |
622 | size_t index; | |
623 | ssize_t pagecpy, pagelen, strpagelen, orig_offset; | |
624 | char *str; | |
625 | struct lib_ring_buffer_backend_pages *rpages; | |
626 | unsigned long sb_bindex, id; | |
627 | ||
628 | offset &= chanb->buf_size - 1; | |
629 | index = (offset & (chanb->subbuf_size - 1)) >> PAGE_SHIFT; | |
630 | orig_offset = offset; | |
631 | for (;;) { | |
632 | id = bufb->buf_rsb.id; | |
633 | sb_bindex = subbuffer_id_get_index(config, id); | |
634 | rpages = bufb->array[sb_bindex]; | |
635 | CHAN_WARN_ON(chanb, config->mode == RING_BUFFER_OVERWRITE | |
636 | && subbuffer_id_is_noref(config, id)); | |
637 | str = (char *)rpages->p[index].virt + (offset & ~PAGE_MASK); | |
638 | pagelen = PAGE_SIZE - (offset & ~PAGE_MASK); | |
639 | strpagelen = strnlen(str, pagelen); | |
640 | if (len) { | |
641 | pagecpy = min_t(size_t, len, strpagelen); | |
642 | if (dest) { | |
643 | memcpy(dest, str, pagecpy); | |
644 | dest += pagecpy; | |
645 | } | |
646 | len -= pagecpy; | |
647 | } | |
648 | offset += strpagelen; | |
649 | index = (offset & (chanb->subbuf_size - 1)) >> PAGE_SHIFT; | |
650 | if (strpagelen < pagelen) | |
651 | break; | |
652 | /* | |
653 | * Underlying layer should never ask for reads across | |
654 | * subbuffers. | |
655 | */ | |
656 | CHAN_WARN_ON(chanb, offset >= chanb->buf_size); | |
657 | } | |
658 | if (dest && len) | |
659 | ((char *)dest)[0] = 0; | |
660 | return offset - orig_offset; | |
661 | } | |
662 | EXPORT_SYMBOL_GPL(lib_ring_buffer_read_cstr); | |
663 | ||
664 | /** | |
665 | * lib_ring_buffer_read_get_page - Get a whole page to read from | |
666 | * @bufb : buffer backend | |
667 | * @offset : offset within the buffer | |
668 | * @virt : pointer to page address (output) | |
669 | * | |
670 | * Should be protected by get_subbuf/put_subbuf. | |
671 | * Returns the pointer to the page struct pointer. | |
672 | */ | |
673 | struct page **lib_ring_buffer_read_get_page(struct lib_ring_buffer_backend *bufb, | |
674 | size_t offset, void ***virt) | |
675 | { | |
676 | size_t index; | |
677 | struct lib_ring_buffer_backend_pages *rpages; | |
678 | struct channel_backend *chanb = &bufb->chan->backend; | |
679 | const struct lib_ring_buffer_config *config = chanb->config; | |
680 | unsigned long sb_bindex, id; | |
681 | ||
682 | offset &= chanb->buf_size - 1; | |
683 | index = (offset & (chanb->subbuf_size - 1)) >> PAGE_SHIFT; | |
684 | id = bufb->buf_rsb.id; | |
685 | sb_bindex = subbuffer_id_get_index(config, id); | |
686 | rpages = bufb->array[sb_bindex]; | |
687 | CHAN_WARN_ON(chanb, config->mode == RING_BUFFER_OVERWRITE | |
688 | && subbuffer_id_is_noref(config, id)); | |
689 | *virt = &rpages->p[index].virt; | |
690 | return &rpages->p[index].page; | |
691 | } | |
692 | EXPORT_SYMBOL_GPL(lib_ring_buffer_read_get_page); | |
693 | ||
694 | /** | |
695 | * lib_ring_buffer_read_offset_address - get address of a buffer location | |
696 | * @bufb : buffer backend | |
697 | * @offset : offset within the buffer. | |
698 | * | |
699 | * Return the address where a given offset is located (for read). | |
700 | * Should be used to get the current subbuffer header pointer. Given we know | |
701 | * it's never on a page boundary, it's safe to write directly to this address, | |
702 | * as long as the write is never bigger than a page size. | |
703 | */ | |
704 | void *lib_ring_buffer_read_offset_address(struct lib_ring_buffer_backend *bufb, | |
705 | size_t offset) | |
706 | { | |
707 | size_t index; | |
708 | struct lib_ring_buffer_backend_pages *rpages; | |
709 | struct channel_backend *chanb = &bufb->chan->backend; | |
710 | const struct lib_ring_buffer_config *config = chanb->config; | |
711 | unsigned long sb_bindex, id; | |
712 | ||
713 | offset &= chanb->buf_size - 1; | |
714 | index = (offset & (chanb->subbuf_size - 1)) >> PAGE_SHIFT; | |
715 | id = bufb->buf_rsb.id; | |
716 | sb_bindex = subbuffer_id_get_index(config, id); | |
717 | rpages = bufb->array[sb_bindex]; | |
718 | CHAN_WARN_ON(chanb, config->mode == RING_BUFFER_OVERWRITE | |
719 | && subbuffer_id_is_noref(config, id)); | |
720 | return rpages->p[index].virt + (offset & ~PAGE_MASK); | |
721 | } | |
722 | EXPORT_SYMBOL_GPL(lib_ring_buffer_read_offset_address); | |
723 | ||
724 | /** | |
725 | * lib_ring_buffer_offset_address - get address of a location within the buffer | |
726 | * @bufb : buffer backend | |
727 | * @offset : offset within the buffer. | |
728 | * | |
729 | * Return the address where a given offset is located. | |
730 | * Should be used to get the current subbuffer header pointer. Given we know | |
731 | * it's always at the beginning of a page, it's safe to write directly to this | |
732 | * address, as long as the write is never bigger than a page size. | |
733 | */ | |
734 | void *lib_ring_buffer_offset_address(struct lib_ring_buffer_backend *bufb, | |
735 | size_t offset) | |
736 | { | |
737 | size_t sbidx, index; | |
738 | struct lib_ring_buffer_backend_pages *rpages; | |
739 | struct channel_backend *chanb = &bufb->chan->backend; | |
740 | const struct lib_ring_buffer_config *config = chanb->config; | |
741 | unsigned long sb_bindex, id; | |
742 | ||
743 | offset &= chanb->buf_size - 1; | |
744 | sbidx = offset >> chanb->subbuf_size_order; | |
745 | index = (offset & (chanb->subbuf_size - 1)) >> PAGE_SHIFT; | |
746 | id = bufb->buf_wsb[sbidx].id; | |
747 | sb_bindex = subbuffer_id_get_index(config, id); | |
748 | rpages = bufb->array[sb_bindex]; | |
749 | CHAN_WARN_ON(chanb, config->mode == RING_BUFFER_OVERWRITE | |
750 | && subbuffer_id_is_noref(config, id)); | |
751 | return rpages->p[index].virt + (offset & ~PAGE_MASK); | |
752 | } | |
753 | EXPORT_SYMBOL_GPL(lib_ring_buffer_offset_address); |