Implement cpu_id context for filtering
[lttng-modules.git] / lib / ringbuffer / ring_buffer_iterator.c
1 /*
2 * ring_buffer_iterator.c
3 *
4 * Ring buffer and channel iterators. Get each event of a channel in order. Uses
5 * a prio heap for per-cpu buffers, giving a O(log(NR_CPUS)) algorithmic
6 * complexity for the "get next event" operation.
7 *
8 * Copyright (C) 2010-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
9 *
10 * This library is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU Lesser General Public
12 * License as published by the Free Software Foundation; only
13 * version 2.1 of the License.
14 *
15 * This library is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * Lesser General Public License for more details.
19 *
20 * You should have received a copy of the GNU Lesser General Public
21 * License along with this library; if not, write to the Free Software
22 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
23 *
24 * Author:
25 * Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
26 */
27
28 #include "../../wrapper/ringbuffer/iterator.h"
29 #include "../../wrapper/file.h"
30 #include <linux/jiffies.h>
31 #include <linux/delay.h>
32 #include <linux/module.h>
33
34 /*
35 * Safety factor taking into account internal kernel interrupt latency.
36 * Assuming 250ms worse-case latency.
37 */
38 #define MAX_SYSTEM_LATENCY 250
39
40 /*
41 * Maximum delta expected between trace clocks. At most 1 jiffy delta.
42 */
43 #define MAX_CLOCK_DELTA (jiffies_to_usecs(1) * 1000)
44
45 /**
46 * lib_ring_buffer_get_next_record - Get the next record in a buffer.
47 * @chan: channel
48 * @buf: buffer
49 *
50 * Returns the size of the event read, -EAGAIN if buffer is empty, -ENODATA if
51 * buffer is empty and finalized. The buffer must already be opened for reading.
52 */
53 ssize_t lib_ring_buffer_get_next_record(struct channel *chan,
54 struct lib_ring_buffer *buf)
55 {
56 const struct lib_ring_buffer_config *config = &chan->backend.config;
57 struct lib_ring_buffer_iter *iter = &buf->iter;
58 int ret;
59
60 restart:
61 switch (iter->state) {
62 case ITER_GET_SUBBUF:
63 ret = lib_ring_buffer_get_next_subbuf(buf);
64 if (ret && !ACCESS_ONCE(buf->finalized)
65 && config->alloc == RING_BUFFER_ALLOC_GLOBAL) {
66 /*
67 * Use "pull" scheme for global buffers. The reader
68 * itself flushes the buffer to "pull" data not visible
69 * to readers yet. Flush current subbuffer and re-try.
70 *
71 * Per-CPU buffers rather use a "push" scheme because
72 * the IPI needed to flush all CPU's buffers is too
73 * costly. In the "push" scheme, the reader waits for
74 * the writer periodic timer to flush the
75 * buffers (keeping track of a quiescent state
76 * timestamp). Therefore, the writer "pushes" data out
77 * of the buffers rather than letting the reader "pull"
78 * data from the buffer.
79 */
80 lib_ring_buffer_switch_slow(buf, SWITCH_ACTIVE);
81 ret = lib_ring_buffer_get_next_subbuf(buf);
82 }
83 if (ret)
84 return ret;
85 iter->consumed = buf->cons_snapshot;
86 iter->data_size = lib_ring_buffer_get_read_data_size(config, buf);
87 iter->read_offset = iter->consumed;
88 /* skip header */
89 iter->read_offset += config->cb.subbuffer_header_size();
90 iter->state = ITER_TEST_RECORD;
91 goto restart;
92 case ITER_TEST_RECORD:
93 if (iter->read_offset - iter->consumed >= iter->data_size) {
94 iter->state = ITER_PUT_SUBBUF;
95 } else {
96 CHAN_WARN_ON(chan, !config->cb.record_get);
97 config->cb.record_get(config, chan, buf,
98 iter->read_offset,
99 &iter->header_len,
100 &iter->payload_len,
101 &iter->timestamp);
102 iter->read_offset += iter->header_len;
103 subbuffer_consume_record(config, &buf->backend);
104 iter->state = ITER_NEXT_RECORD;
105 return iter->payload_len;
106 }
107 goto restart;
108 case ITER_NEXT_RECORD:
109 iter->read_offset += iter->payload_len;
110 iter->state = ITER_TEST_RECORD;
111 goto restart;
112 case ITER_PUT_SUBBUF:
113 lib_ring_buffer_put_next_subbuf(buf);
114 iter->state = ITER_GET_SUBBUF;
115 goto restart;
116 default:
117 CHAN_WARN_ON(chan, 1); /* Should not happen */
118 return -EPERM;
119 }
120 }
121 EXPORT_SYMBOL_GPL(lib_ring_buffer_get_next_record);
122
123 static int buf_is_higher(void *a, void *b)
124 {
125 struct lib_ring_buffer *bufa = a;
126 struct lib_ring_buffer *bufb = b;
127
128 /* Consider lowest timestamps to be at the top of the heap */
129 return (bufa->iter.timestamp < bufb->iter.timestamp);
130 }
131
132 static
133 void lib_ring_buffer_get_empty_buf_records(const struct lib_ring_buffer_config *config,
134 struct channel *chan)
135 {
136 struct lttng_ptr_heap *heap = &chan->iter.heap;
137 struct lib_ring_buffer *buf, *tmp;
138 ssize_t len;
139
140 list_for_each_entry_safe(buf, tmp, &chan->iter.empty_head,
141 iter.empty_node) {
142 len = lib_ring_buffer_get_next_record(chan, buf);
143
144 /*
145 * Deal with -EAGAIN and -ENODATA.
146 * len >= 0 means record contains data.
147 * -EBUSY should never happen, because we support only one
148 * reader.
149 */
150 switch (len) {
151 case -EAGAIN:
152 /* Keep node in empty list */
153 break;
154 case -ENODATA:
155 /*
156 * Buffer is finalized. Don't add to list of empty
157 * buffer, because it has no more data to provide, ever.
158 */
159 list_del(&buf->iter.empty_node);
160 break;
161 case -EBUSY:
162 CHAN_WARN_ON(chan, 1);
163 break;
164 default:
165 /*
166 * Insert buffer into the heap, remove from empty buffer
167 * list.
168 */
169 CHAN_WARN_ON(chan, len < 0);
170 list_del(&buf->iter.empty_node);
171 CHAN_WARN_ON(chan, lttng_heap_insert(heap, buf));
172 }
173 }
174 }
175
176 static
177 void lib_ring_buffer_wait_for_qs(const struct lib_ring_buffer_config *config,
178 struct channel *chan)
179 {
180 u64 timestamp_qs;
181 unsigned long wait_msecs;
182
183 /*
184 * No need to wait if no empty buffers are present.
185 */
186 if (list_empty(&chan->iter.empty_head))
187 return;
188
189 timestamp_qs = config->cb.ring_buffer_clock_read(chan);
190 /*
191 * We need to consider previously empty buffers.
192 * Do a get next buf record on each of them. Add them to
193 * the heap if they have data. If at least one of them
194 * don't have data, we need to wait for
195 * switch_timer_interval + MAX_SYSTEM_LATENCY (so we are sure the
196 * buffers have been switched either by the timer or idle entry) and
197 * check them again, adding them if they have data.
198 */
199 lib_ring_buffer_get_empty_buf_records(config, chan);
200
201 /*
202 * No need to wait if no empty buffers are present.
203 */
204 if (list_empty(&chan->iter.empty_head))
205 return;
206
207 /*
208 * We need to wait for the buffer switch timer to run. If the
209 * CPU is idle, idle entry performed the switch.
210 * TODO: we could optimize further by skipping the sleep if all
211 * empty buffers belong to idle or offline cpus.
212 */
213 wait_msecs = jiffies_to_msecs(chan->switch_timer_interval);
214 wait_msecs += MAX_SYSTEM_LATENCY;
215 msleep(wait_msecs);
216 lib_ring_buffer_get_empty_buf_records(config, chan);
217 /*
218 * Any buffer still in the empty list here cannot possibly
219 * contain an event with a timestamp prior to "timestamp_qs".
220 * The new quiescent state timestamp is the one we grabbed
221 * before waiting for buffer data. It is therefore safe to
222 * ignore empty buffers up to last_qs timestamp for fusion
223 * merge.
224 */
225 chan->iter.last_qs = timestamp_qs;
226 }
227
228 /**
229 * channel_get_next_record - Get the next record in a channel.
230 * @chan: channel
231 * @ret_buf: the buffer in which the event is located (output)
232 *
233 * Returns the size of new current event, -EAGAIN if all buffers are empty,
234 * -ENODATA if all buffers are empty and finalized. The channel must already be
235 * opened for reading.
236 */
237
238 ssize_t channel_get_next_record(struct channel *chan,
239 struct lib_ring_buffer **ret_buf)
240 {
241 const struct lib_ring_buffer_config *config = &chan->backend.config;
242 struct lib_ring_buffer *buf;
243 struct lttng_ptr_heap *heap;
244 ssize_t len;
245
246 if (config->alloc == RING_BUFFER_ALLOC_GLOBAL) {
247 *ret_buf = channel_get_ring_buffer(config, chan, 0);
248 return lib_ring_buffer_get_next_record(chan, *ret_buf);
249 }
250
251 heap = &chan->iter.heap;
252
253 /*
254 * get next record for topmost buffer.
255 */
256 buf = lttng_heap_maximum(heap);
257 if (buf) {
258 len = lib_ring_buffer_get_next_record(chan, buf);
259 /*
260 * Deal with -EAGAIN and -ENODATA.
261 * len >= 0 means record contains data.
262 */
263 switch (len) {
264 case -EAGAIN:
265 buf->iter.timestamp = 0;
266 list_add(&buf->iter.empty_node, &chan->iter.empty_head);
267 /* Remove topmost buffer from the heap */
268 CHAN_WARN_ON(chan, lttng_heap_remove(heap) != buf);
269 break;
270 case -ENODATA:
271 /*
272 * Buffer is finalized. Remove buffer from heap and
273 * don't add to list of empty buffer, because it has no
274 * more data to provide, ever.
275 */
276 CHAN_WARN_ON(chan, lttng_heap_remove(heap) != buf);
277 break;
278 case -EBUSY:
279 CHAN_WARN_ON(chan, 1);
280 break;
281 default:
282 /*
283 * Reinsert buffer into the heap. Note that heap can be
284 * partially empty, so we need to use
285 * lttng_heap_replace_max().
286 */
287 CHAN_WARN_ON(chan, len < 0);
288 CHAN_WARN_ON(chan, lttng_heap_replace_max(heap, buf) != buf);
289 break;
290 }
291 }
292
293 buf = lttng_heap_maximum(heap);
294 if (!buf || buf->iter.timestamp > chan->iter.last_qs) {
295 /*
296 * Deal with buffers previously showing no data.
297 * Add buffers containing data to the heap, update
298 * last_qs.
299 */
300 lib_ring_buffer_wait_for_qs(config, chan);
301 }
302
303 *ret_buf = buf = lttng_heap_maximum(heap);
304 if (buf) {
305 /*
306 * If this warning triggers, you probably need to check your
307 * system interrupt latency. Typical causes: too many printk()
308 * output going to a serial console with interrupts off.
309 * Allow for MAX_CLOCK_DELTA ns timestamp delta going backward.
310 * Observed on SMP KVM setups with trace_clock().
311 */
312 if (chan->iter.last_timestamp
313 > (buf->iter.timestamp + MAX_CLOCK_DELTA)) {
314 printk(KERN_WARNING "ring_buffer: timestamps going "
315 "backward. Last time %llu ns, cpu %d, "
316 "current time %llu ns, cpu %d, "
317 "delta %llu ns.\n",
318 chan->iter.last_timestamp, chan->iter.last_cpu,
319 buf->iter.timestamp, buf->backend.cpu,
320 chan->iter.last_timestamp - buf->iter.timestamp);
321 CHAN_WARN_ON(chan, 1);
322 }
323 chan->iter.last_timestamp = buf->iter.timestamp;
324 chan->iter.last_cpu = buf->backend.cpu;
325 return buf->iter.payload_len;
326 } else {
327 /* Heap is empty */
328 if (list_empty(&chan->iter.empty_head))
329 return -ENODATA; /* All buffers finalized */
330 else
331 return -EAGAIN; /* Temporarily empty */
332 }
333 }
334 EXPORT_SYMBOL_GPL(channel_get_next_record);
335
336 static
337 void lib_ring_buffer_iterator_init(struct channel *chan, struct lib_ring_buffer *buf)
338 {
339 if (buf->iter.allocated)
340 return;
341
342 buf->iter.allocated = 1;
343 if (chan->iter.read_open && !buf->iter.read_open) {
344 CHAN_WARN_ON(chan, lib_ring_buffer_open_read(buf) != 0);
345 buf->iter.read_open = 1;
346 }
347
348 /* Add to list of buffers without any current record */
349 if (chan->backend.config.alloc == RING_BUFFER_ALLOC_PER_CPU)
350 list_add(&buf->iter.empty_node, &chan->iter.empty_head);
351 }
352
353 #ifdef CONFIG_HOTPLUG_CPU
354 static
355 int channel_iterator_cpu_hotplug(struct notifier_block *nb,
356 unsigned long action,
357 void *hcpu)
358 {
359 unsigned int cpu = (unsigned long)hcpu;
360 struct channel *chan = container_of(nb, struct channel,
361 hp_iter_notifier);
362 struct lib_ring_buffer *buf = per_cpu_ptr(chan->backend.buf, cpu);
363 const struct lib_ring_buffer_config *config = &chan->backend.config;
364
365 if (!chan->hp_iter_enable)
366 return NOTIFY_DONE;
367
368 CHAN_WARN_ON(chan, config->alloc == RING_BUFFER_ALLOC_GLOBAL);
369
370 switch (action) {
371 case CPU_DOWN_FAILED:
372 case CPU_DOWN_FAILED_FROZEN:
373 case CPU_ONLINE:
374 case CPU_ONLINE_FROZEN:
375 lib_ring_buffer_iterator_init(chan, buf);
376 return NOTIFY_OK;
377 default:
378 return NOTIFY_DONE;
379 }
380 }
381 #endif
382
383 int channel_iterator_init(struct channel *chan)
384 {
385 const struct lib_ring_buffer_config *config = &chan->backend.config;
386 struct lib_ring_buffer *buf;
387
388 if (config->alloc == RING_BUFFER_ALLOC_PER_CPU) {
389 int cpu, ret;
390
391 INIT_LIST_HEAD(&chan->iter.empty_head);
392 ret = lttng_heap_init(&chan->iter.heap,
393 num_possible_cpus(),
394 GFP_KERNEL, buf_is_higher);
395 if (ret)
396 return ret;
397 /*
398 * In case of non-hotplug cpu, if the ring-buffer is allocated
399 * in early initcall, it will not be notified of secondary cpus.
400 * In that off case, we need to allocate for all possible cpus.
401 */
402 #ifdef CONFIG_HOTPLUG_CPU
403 chan->hp_iter_notifier.notifier_call =
404 channel_iterator_cpu_hotplug;
405 chan->hp_iter_notifier.priority = 10;
406 register_cpu_notifier(&chan->hp_iter_notifier);
407 get_online_cpus();
408 for_each_online_cpu(cpu) {
409 buf = per_cpu_ptr(chan->backend.buf, cpu);
410 lib_ring_buffer_iterator_init(chan, buf);
411 }
412 chan->hp_iter_enable = 1;
413 put_online_cpus();
414 #else
415 for_each_possible_cpu(cpu) {
416 buf = per_cpu_ptr(chan->backend.buf, cpu);
417 lib_ring_buffer_iterator_init(chan, buf);
418 }
419 #endif
420 } else {
421 buf = channel_get_ring_buffer(config, chan, 0);
422 lib_ring_buffer_iterator_init(chan, buf);
423 }
424 return 0;
425 }
426
427 void channel_iterator_unregister_notifiers(struct channel *chan)
428 {
429 const struct lib_ring_buffer_config *config = &chan->backend.config;
430
431 if (config->alloc == RING_BUFFER_ALLOC_PER_CPU) {
432 chan->hp_iter_enable = 0;
433 unregister_cpu_notifier(&chan->hp_iter_notifier);
434 }
435 }
436
437 void channel_iterator_free(struct channel *chan)
438 {
439 const struct lib_ring_buffer_config *config = &chan->backend.config;
440
441 if (config->alloc == RING_BUFFER_ALLOC_PER_CPU)
442 lttng_heap_free(&chan->iter.heap);
443 }
444
445 int lib_ring_buffer_iterator_open(struct lib_ring_buffer *buf)
446 {
447 struct channel *chan = buf->backend.chan;
448 const struct lib_ring_buffer_config *config = &chan->backend.config;
449 CHAN_WARN_ON(chan, config->output != RING_BUFFER_ITERATOR);
450 return lib_ring_buffer_open_read(buf);
451 }
452 EXPORT_SYMBOL_GPL(lib_ring_buffer_iterator_open);
453
454 /*
455 * Note: Iterators must not be mixed with other types of outputs, because an
456 * iterator can leave the buffer in "GET" state, which is not consistent with
457 * other types of output (mmap, splice, raw data read).
458 */
459 void lib_ring_buffer_iterator_release(struct lib_ring_buffer *buf)
460 {
461 lib_ring_buffer_release_read(buf);
462 }
463 EXPORT_SYMBOL_GPL(lib_ring_buffer_iterator_release);
464
465 int channel_iterator_open(struct channel *chan)
466 {
467 const struct lib_ring_buffer_config *config = &chan->backend.config;
468 struct lib_ring_buffer *buf;
469 int ret = 0, cpu;
470
471 CHAN_WARN_ON(chan, config->output != RING_BUFFER_ITERATOR);
472
473 if (config->alloc == RING_BUFFER_ALLOC_PER_CPU) {
474 get_online_cpus();
475 /* Allow CPU hotplug to keep track of opened reader */
476 chan->iter.read_open = 1;
477 for_each_channel_cpu(cpu, chan) {
478 buf = channel_get_ring_buffer(config, chan, cpu);
479 ret = lib_ring_buffer_iterator_open(buf);
480 if (ret)
481 goto error;
482 buf->iter.read_open = 1;
483 }
484 put_online_cpus();
485 } else {
486 buf = channel_get_ring_buffer(config, chan, 0);
487 ret = lib_ring_buffer_iterator_open(buf);
488 }
489 return ret;
490 error:
491 /* Error should always happen on CPU 0, hence no close is required. */
492 CHAN_WARN_ON(chan, cpu != 0);
493 put_online_cpus();
494 return ret;
495 }
496 EXPORT_SYMBOL_GPL(channel_iterator_open);
497
498 void channel_iterator_release(struct channel *chan)
499 {
500 const struct lib_ring_buffer_config *config = &chan->backend.config;
501 struct lib_ring_buffer *buf;
502 int cpu;
503
504 if (config->alloc == RING_BUFFER_ALLOC_PER_CPU) {
505 get_online_cpus();
506 for_each_channel_cpu(cpu, chan) {
507 buf = channel_get_ring_buffer(config, chan, cpu);
508 if (buf->iter.read_open) {
509 lib_ring_buffer_iterator_release(buf);
510 buf->iter.read_open = 0;
511 }
512 }
513 chan->iter.read_open = 0;
514 put_online_cpus();
515 } else {
516 buf = channel_get_ring_buffer(config, chan, 0);
517 lib_ring_buffer_iterator_release(buf);
518 }
519 }
520 EXPORT_SYMBOL_GPL(channel_iterator_release);
521
522 void lib_ring_buffer_iterator_reset(struct lib_ring_buffer *buf)
523 {
524 struct channel *chan = buf->backend.chan;
525
526 if (buf->iter.state != ITER_GET_SUBBUF)
527 lib_ring_buffer_put_next_subbuf(buf);
528 buf->iter.state = ITER_GET_SUBBUF;
529 /* Remove from heap (if present). */
530 if (lttng_heap_cherrypick(&chan->iter.heap, buf))
531 list_add(&buf->iter.empty_node, &chan->iter.empty_head);
532 buf->iter.timestamp = 0;
533 buf->iter.header_len = 0;
534 buf->iter.payload_len = 0;
535 buf->iter.consumed = 0;
536 buf->iter.read_offset = 0;
537 buf->iter.data_size = 0;
538 /* Don't reset allocated and read_open */
539 }
540
541 void channel_iterator_reset(struct channel *chan)
542 {
543 const struct lib_ring_buffer_config *config = &chan->backend.config;
544 struct lib_ring_buffer *buf;
545 int cpu;
546
547 /* Empty heap, put into empty_head */
548 while ((buf = lttng_heap_remove(&chan->iter.heap)) != NULL)
549 list_add(&buf->iter.empty_node, &chan->iter.empty_head);
550
551 for_each_channel_cpu(cpu, chan) {
552 buf = channel_get_ring_buffer(config, chan, cpu);
553 lib_ring_buffer_iterator_reset(buf);
554 }
555 /* Don't reset read_open */
556 chan->iter.last_qs = 0;
557 chan->iter.last_timestamp = 0;
558 chan->iter.last_cpu = 0;
559 chan->iter.len_left = 0;
560 }
561
562 /*
563 * Ring buffer payload extraction read() implementation.
564 */
565 static
566 ssize_t channel_ring_buffer_file_read(struct file *filp,
567 char __user *user_buf,
568 size_t count,
569 loff_t *ppos,
570 struct channel *chan,
571 struct lib_ring_buffer *buf,
572 int fusionmerge)
573 {
574 const struct lib_ring_buffer_config *config = &chan->backend.config;
575 size_t read_count = 0, read_offset;
576 ssize_t len;
577
578 might_sleep();
579 if (!access_ok(VERIFY_WRITE, user_buf, count))
580 return -EFAULT;
581
582 /* Finish copy of previous record */
583 if (*ppos != 0) {
584 if (read_count < count) {
585 len = chan->iter.len_left;
586 read_offset = *ppos;
587 if (config->alloc == RING_BUFFER_ALLOC_PER_CPU
588 && fusionmerge)
589 buf = lttng_heap_maximum(&chan->iter.heap);
590 CHAN_WARN_ON(chan, !buf);
591 goto skip_get_next;
592 }
593 }
594
595 while (read_count < count) {
596 size_t copy_len, space_left;
597
598 if (fusionmerge)
599 len = channel_get_next_record(chan, &buf);
600 else
601 len = lib_ring_buffer_get_next_record(chan, buf);
602 len_test:
603 if (len < 0) {
604 /*
605 * Check if buffer is finalized (end of file).
606 */
607 if (len == -ENODATA) {
608 /* A 0 read_count will tell about end of file */
609 goto nodata;
610 }
611 if (filp->f_flags & O_NONBLOCK) {
612 if (!read_count)
613 read_count = -EAGAIN;
614 goto nodata;
615 } else {
616 int error;
617
618 /*
619 * No data available at the moment, return what
620 * we got.
621 */
622 if (read_count)
623 goto nodata;
624
625 /*
626 * Wait for returned len to be >= 0 or -ENODATA.
627 */
628 if (fusionmerge)
629 error = wait_event_interruptible(
630 chan->read_wait,
631 ((len = channel_get_next_record(chan,
632 &buf)), len != -EAGAIN));
633 else
634 error = wait_event_interruptible(
635 buf->read_wait,
636 ((len = lib_ring_buffer_get_next_record(
637 chan, buf)), len != -EAGAIN));
638 CHAN_WARN_ON(chan, len == -EBUSY);
639 if (error) {
640 read_count = error;
641 goto nodata;
642 }
643 CHAN_WARN_ON(chan, len < 0 && len != -ENODATA);
644 goto len_test;
645 }
646 }
647 read_offset = buf->iter.read_offset;
648 skip_get_next:
649 space_left = count - read_count;
650 if (len <= space_left) {
651 copy_len = len;
652 chan->iter.len_left = 0;
653 *ppos = 0;
654 } else {
655 copy_len = space_left;
656 chan->iter.len_left = len - copy_len;
657 *ppos = read_offset + copy_len;
658 }
659 if (__lib_ring_buffer_copy_to_user(&buf->backend, read_offset,
660 &user_buf[read_count],
661 copy_len)) {
662 /*
663 * Leave the len_left and ppos values at their current
664 * state, as we currently have a valid event to read.
665 */
666 return -EFAULT;
667 }
668 read_count += copy_len;
669 };
670 return read_count;
671
672 nodata:
673 *ppos = 0;
674 chan->iter.len_left = 0;
675 return read_count;
676 }
677
678 /**
679 * lib_ring_buffer_file_read - Read buffer record payload.
680 * @filp: file structure pointer.
681 * @buffer: user buffer to read data into.
682 * @count: number of bytes to read.
683 * @ppos: file read position.
684 *
685 * Returns a negative value on error, or the number of bytes read on success.
686 * ppos is used to save the position _within the current record_ between calls
687 * to read().
688 */
689 static
690 ssize_t lib_ring_buffer_file_read(struct file *filp,
691 char __user *user_buf,
692 size_t count,
693 loff_t *ppos)
694 {
695 struct inode *inode = filp->lttng_f_dentry->d_inode;
696 struct lib_ring_buffer *buf = inode->i_private;
697 struct channel *chan = buf->backend.chan;
698
699 return channel_ring_buffer_file_read(filp, user_buf, count, ppos,
700 chan, buf, 0);
701 }
702
703 /**
704 * channel_file_read - Read channel record payload.
705 * @filp: file structure pointer.
706 * @buffer: user buffer to read data into.
707 * @count: number of bytes to read.
708 * @ppos: file read position.
709 *
710 * Returns a negative value on error, or the number of bytes read on success.
711 * ppos is used to save the position _within the current record_ between calls
712 * to read().
713 */
714 static
715 ssize_t channel_file_read(struct file *filp,
716 char __user *user_buf,
717 size_t count,
718 loff_t *ppos)
719 {
720 struct inode *inode = filp->lttng_f_dentry->d_inode;
721 struct channel *chan = inode->i_private;
722 const struct lib_ring_buffer_config *config = &chan->backend.config;
723
724 if (config->alloc == RING_BUFFER_ALLOC_PER_CPU)
725 return channel_ring_buffer_file_read(filp, user_buf, count,
726 ppos, chan, NULL, 1);
727 else {
728 struct lib_ring_buffer *buf =
729 channel_get_ring_buffer(config, chan, 0);
730 return channel_ring_buffer_file_read(filp, user_buf, count,
731 ppos, chan, buf, 0);
732 }
733 }
734
735 static
736 int lib_ring_buffer_file_open(struct inode *inode, struct file *file)
737 {
738 struct lib_ring_buffer *buf = inode->i_private;
739 int ret;
740
741 ret = lib_ring_buffer_iterator_open(buf);
742 if (ret)
743 return ret;
744
745 file->private_data = buf;
746 ret = nonseekable_open(inode, file);
747 if (ret)
748 goto release_iter;
749 return 0;
750
751 release_iter:
752 lib_ring_buffer_iterator_release(buf);
753 return ret;
754 }
755
756 static
757 int lib_ring_buffer_file_release(struct inode *inode, struct file *file)
758 {
759 struct lib_ring_buffer *buf = inode->i_private;
760
761 lib_ring_buffer_iterator_release(buf);
762 return 0;
763 }
764
765 static
766 int channel_file_open(struct inode *inode, struct file *file)
767 {
768 struct channel *chan = inode->i_private;
769 int ret;
770
771 ret = channel_iterator_open(chan);
772 if (ret)
773 return ret;
774
775 file->private_data = chan;
776 ret = nonseekable_open(inode, file);
777 if (ret)
778 goto release_iter;
779 return 0;
780
781 release_iter:
782 channel_iterator_release(chan);
783 return ret;
784 }
785
786 static
787 int channel_file_release(struct inode *inode, struct file *file)
788 {
789 struct channel *chan = inode->i_private;
790
791 channel_iterator_release(chan);
792 return 0;
793 }
794
795 const struct file_operations channel_payload_file_operations = {
796 .owner = THIS_MODULE,
797 .open = channel_file_open,
798 .release = channel_file_release,
799 .read = channel_file_read,
800 .llseek = vfs_lib_ring_buffer_no_llseek,
801 };
802 EXPORT_SYMBOL_GPL(channel_payload_file_operations);
803
804 const struct file_operations lib_ring_buffer_payload_file_operations = {
805 .owner = THIS_MODULE,
806 .open = lib_ring_buffer_file_open,
807 .release = lib_ring_buffer_file_release,
808 .read = lib_ring_buffer_file_read,
809 .llseek = vfs_lib_ring_buffer_no_llseek,
810 };
811 EXPORT_SYMBOL_GPL(lib_ring_buffer_payload_file_operations);
This page took 0.045943 seconds and 4 git commands to generate.