1 /* SPDX-License-Identifier: (GPL-2.0 OR LGPL-2.1)
3 * ring_buffer_iterator.c
5 * Ring buffer and channel iterators. Get each event of a channel in order. Uses
6 * a prio heap for per-cpu buffers, giving a O(log(NR_CPUS)) algorithmic
7 * complexity for the "get next event" operation.
9 * Copyright (C) 2010-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
12 #include <wrapper/ringbuffer/iterator.h>
13 #include <wrapper/file.h>
14 #include <linux/jiffies.h>
15 #include <linux/delay.h>
16 #include <linux/module.h>
19 * Safety factor taking into account internal kernel interrupt latency.
20 * Assuming 250ms worse-case latency.
22 #define MAX_SYSTEM_LATENCY 250
25 * Maximum delta expected between trace clocks. At most 1 jiffy delta.
27 #define MAX_CLOCK_DELTA (jiffies_to_usecs(1) * 1000)
30 * lib_ring_buffer_get_next_record - Get the next record in a buffer.
34 * Returns the size of the event read, -EAGAIN if buffer is empty, -ENODATA if
35 * buffer is empty and finalized. The buffer must already be opened for reading.
37 ssize_t
lib_ring_buffer_get_next_record(struct channel
*chan
,
38 struct lib_ring_buffer
*buf
)
40 const struct lib_ring_buffer_config
*config
= &chan
->backend
.config
;
41 struct lib_ring_buffer_iter
*iter
= &buf
->iter
;
45 switch (iter
->state
) {
47 ret
= lib_ring_buffer_get_next_subbuf(buf
);
48 if (ret
&& !READ_ONCE(buf
->finalized
)
49 && config
->alloc
== RING_BUFFER_ALLOC_GLOBAL
) {
51 * Use "pull" scheme for global buffers. The reader
52 * itself flushes the buffer to "pull" data not visible
53 * to readers yet. Flush current subbuffer and re-try.
55 * Per-CPU buffers rather use a "push" scheme because
56 * the IPI needed to flush all CPU's buffers is too
57 * costly. In the "push" scheme, the reader waits for
58 * the writer periodic timer to flush the
59 * buffers (keeping track of a quiescent state
60 * timestamp). Therefore, the writer "pushes" data out
61 * of the buffers rather than letting the reader "pull"
62 * data from the buffer.
64 lib_ring_buffer_switch_slow(buf
, SWITCH_ACTIVE
);
65 ret
= lib_ring_buffer_get_next_subbuf(buf
);
69 iter
->consumed
= buf
->cons_snapshot
;
70 iter
->data_size
= lib_ring_buffer_get_read_data_size(config
, buf
);
71 iter
->read_offset
= iter
->consumed
;
73 iter
->read_offset
+= config
->cb
.subbuffer_header_size();
74 iter
->state
= ITER_TEST_RECORD
;
76 case ITER_TEST_RECORD
:
77 if (iter
->read_offset
- iter
->consumed
>= iter
->data_size
) {
78 iter
->state
= ITER_PUT_SUBBUF
;
80 CHAN_WARN_ON(chan
, !config
->cb
.record_get
);
81 config
->cb
.record_get(config
, chan
, buf
,
86 iter
->read_offset
+= iter
->header_len
;
87 subbuffer_consume_record(config
, &buf
->backend
);
88 iter
->state
= ITER_NEXT_RECORD
;
89 return iter
->payload_len
;
92 case ITER_NEXT_RECORD
:
93 iter
->read_offset
+= iter
->payload_len
;
94 iter
->state
= ITER_TEST_RECORD
;
97 lib_ring_buffer_put_next_subbuf(buf
);
98 iter
->state
= ITER_GET_SUBBUF
;
101 CHAN_WARN_ON(chan
, 1); /* Should not happen */
105 EXPORT_SYMBOL_GPL(lib_ring_buffer_get_next_record
);
107 static int buf_is_higher(void *a
, void *b
)
109 struct lib_ring_buffer
*bufa
= a
;
110 struct lib_ring_buffer
*bufb
= b
;
112 /* Consider lowest timestamps to be at the top of the heap */
113 return (bufa
->iter
.timestamp
< bufb
->iter
.timestamp
);
117 void lib_ring_buffer_get_empty_buf_records(const struct lib_ring_buffer_config
*config
,
118 struct channel
*chan
)
120 struct lttng_ptr_heap
*heap
= &chan
->iter
.heap
;
121 struct lib_ring_buffer
*buf
, *tmp
;
124 list_for_each_entry_safe(buf
, tmp
, &chan
->iter
.empty_head
,
126 len
= lib_ring_buffer_get_next_record(chan
, buf
);
129 * Deal with -EAGAIN and -ENODATA.
130 * len >= 0 means record contains data.
131 * -EBUSY should never happen, because we support only one
136 /* Keep node in empty list */
140 * Buffer is finalized. Don't add to list of empty
141 * buffer, because it has no more data to provide, ever.
143 list_del(&buf
->iter
.empty_node
);
146 CHAN_WARN_ON(chan
, 1);
150 * Insert buffer into the heap, remove from empty buffer
153 CHAN_WARN_ON(chan
, len
< 0);
154 list_del(&buf
->iter
.empty_node
);
155 CHAN_WARN_ON(chan
, lttng_heap_insert(heap
, buf
));
161 void lib_ring_buffer_wait_for_qs(const struct lib_ring_buffer_config
*config
,
162 struct channel
*chan
)
165 unsigned long wait_msecs
;
168 * No need to wait if no empty buffers are present.
170 if (list_empty(&chan
->iter
.empty_head
))
173 timestamp_qs
= config
->cb
.ring_buffer_clock_read(chan
);
175 * We need to consider previously empty buffers.
176 * Do a get next buf record on each of them. Add them to
177 * the heap if they have data. If at least one of them
178 * don't have data, we need to wait for
179 * switch_timer_interval + MAX_SYSTEM_LATENCY (so we are sure the
180 * buffers have been switched either by the timer or idle entry) and
181 * check them again, adding them if they have data.
183 lib_ring_buffer_get_empty_buf_records(config
, chan
);
186 * No need to wait if no empty buffers are present.
188 if (list_empty(&chan
->iter
.empty_head
))
192 * We need to wait for the buffer switch timer to run. If the
193 * CPU is idle, idle entry performed the switch.
194 * TODO: we could optimize further by skipping the sleep if all
195 * empty buffers belong to idle or offline cpus.
197 wait_msecs
= jiffies_to_msecs(chan
->switch_timer_interval
);
198 wait_msecs
+= MAX_SYSTEM_LATENCY
;
200 lib_ring_buffer_get_empty_buf_records(config
, chan
);
202 * Any buffer still in the empty list here cannot possibly
203 * contain an event with a timestamp prior to "timestamp_qs".
204 * The new quiescent state timestamp is the one we grabbed
205 * before waiting for buffer data. It is therefore safe to
206 * ignore empty buffers up to last_qs timestamp for fusion
209 chan
->iter
.last_qs
= timestamp_qs
;
213 * channel_get_next_record - Get the next record in a channel.
215 * @ret_buf: the buffer in which the event is located (output)
217 * Returns the size of new current event, -EAGAIN if all buffers are empty,
218 * -ENODATA if all buffers are empty and finalized. The channel must already be
219 * opened for reading.
222 ssize_t
channel_get_next_record(struct channel
*chan
,
223 struct lib_ring_buffer
**ret_buf
)
225 const struct lib_ring_buffer_config
*config
= &chan
->backend
.config
;
226 struct lib_ring_buffer
*buf
;
227 struct lttng_ptr_heap
*heap
;
230 if (config
->alloc
== RING_BUFFER_ALLOC_GLOBAL
) {
231 *ret_buf
= channel_get_ring_buffer(config
, chan
, 0);
232 return lib_ring_buffer_get_next_record(chan
, *ret_buf
);
235 heap
= &chan
->iter
.heap
;
238 * get next record for topmost buffer.
240 buf
= lttng_heap_maximum(heap
);
242 len
= lib_ring_buffer_get_next_record(chan
, buf
);
244 * Deal with -EAGAIN and -ENODATA.
245 * len >= 0 means record contains data.
249 buf
->iter
.timestamp
= 0;
250 list_add(&buf
->iter
.empty_node
, &chan
->iter
.empty_head
);
251 /* Remove topmost buffer from the heap */
252 CHAN_WARN_ON(chan
, lttng_heap_remove(heap
) != buf
);
256 * Buffer is finalized. Remove buffer from heap and
257 * don't add to list of empty buffer, because it has no
258 * more data to provide, ever.
260 CHAN_WARN_ON(chan
, lttng_heap_remove(heap
) != buf
);
263 CHAN_WARN_ON(chan
, 1);
267 * Reinsert buffer into the heap. Note that heap can be
268 * partially empty, so we need to use
269 * lttng_heap_replace_max().
271 CHAN_WARN_ON(chan
, len
< 0);
272 CHAN_WARN_ON(chan
, lttng_heap_replace_max(heap
, buf
) != buf
);
277 buf
= lttng_heap_maximum(heap
);
278 if (!buf
|| buf
->iter
.timestamp
> chan
->iter
.last_qs
) {
280 * Deal with buffers previously showing no data.
281 * Add buffers containing data to the heap, update
284 lib_ring_buffer_wait_for_qs(config
, chan
);
287 *ret_buf
= buf
= lttng_heap_maximum(heap
);
290 * If this warning triggers, you probably need to check your
291 * system interrupt latency. Typical causes: too many printk()
292 * output going to a serial console with interrupts off.
293 * Allow for MAX_CLOCK_DELTA ns timestamp delta going backward.
294 * Observed on SMP KVM setups with trace_clock().
296 if (chan
->iter
.last_timestamp
297 > (buf
->iter
.timestamp
+ MAX_CLOCK_DELTA
)) {
298 printk(KERN_WARNING
"ring_buffer: timestamps going "
299 "backward. Last time %llu ns, cpu %d, "
300 "current time %llu ns, cpu %d, "
302 chan
->iter
.last_timestamp
, chan
->iter
.last_cpu
,
303 buf
->iter
.timestamp
, buf
->backend
.cpu
,
304 chan
->iter
.last_timestamp
- buf
->iter
.timestamp
);
305 CHAN_WARN_ON(chan
, 1);
307 chan
->iter
.last_timestamp
= buf
->iter
.timestamp
;
308 chan
->iter
.last_cpu
= buf
->backend
.cpu
;
309 return buf
->iter
.payload_len
;
312 if (list_empty(&chan
->iter
.empty_head
))
313 return -ENODATA
; /* All buffers finalized */
315 return -EAGAIN
; /* Temporarily empty */
318 EXPORT_SYMBOL_GPL(channel_get_next_record
);
321 void lib_ring_buffer_iterator_init(struct channel
*chan
, struct lib_ring_buffer
*buf
)
323 if (buf
->iter
.allocated
)
326 buf
->iter
.allocated
= 1;
327 if (chan
->iter
.read_open
&& !buf
->iter
.read_open
) {
328 CHAN_WARN_ON(chan
, lib_ring_buffer_open_read(buf
) != 0);
329 buf
->iter
.read_open
= 1;
332 /* Add to list of buffers without any current record */
333 if (chan
->backend
.config
.alloc
== RING_BUFFER_ALLOC_PER_CPU
)
334 list_add(&buf
->iter
.empty_node
, &chan
->iter
.empty_head
);
337 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0))
339 int lttng_cpuhp_rb_iter_online(unsigned int cpu
,
340 struct lttng_cpuhp_node
*node
)
342 struct channel
*chan
= container_of(node
, struct channel
,
344 struct lib_ring_buffer
*buf
= per_cpu_ptr(chan
->backend
.buf
, cpu
);
345 const struct lib_ring_buffer_config
*config
= &chan
->backend
.config
;
347 CHAN_WARN_ON(chan
, config
->alloc
== RING_BUFFER_ALLOC_GLOBAL
);
349 lib_ring_buffer_iterator_init(chan
, buf
);
352 EXPORT_SYMBOL_GPL(lttng_cpuhp_rb_iter_online
);
354 #else /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) */
356 #ifdef CONFIG_HOTPLUG_CPU
358 int channel_iterator_cpu_hotplug(struct notifier_block
*nb
,
359 unsigned long action
,
362 unsigned int cpu
= (unsigned long)hcpu
;
363 struct channel
*chan
= container_of(nb
, struct channel
,
365 struct lib_ring_buffer
*buf
= per_cpu_ptr(chan
->backend
.buf
, cpu
);
366 const struct lib_ring_buffer_config
*config
= &chan
->backend
.config
;
368 if (!chan
->hp_iter_enable
)
371 CHAN_WARN_ON(chan
, config
->alloc
== RING_BUFFER_ALLOC_GLOBAL
);
374 case CPU_DOWN_FAILED
:
375 case CPU_DOWN_FAILED_FROZEN
:
377 case CPU_ONLINE_FROZEN
:
378 lib_ring_buffer_iterator_init(chan
, buf
);
386 #endif /* #else #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) */
388 int channel_iterator_init(struct channel
*chan
)
390 const struct lib_ring_buffer_config
*config
= &chan
->backend
.config
;
391 struct lib_ring_buffer
*buf
;
393 if (config
->alloc
== RING_BUFFER_ALLOC_PER_CPU
) {
396 INIT_LIST_HEAD(&chan
->iter
.empty_head
);
397 ret
= lttng_heap_init(&chan
->iter
.heap
,
399 GFP_KERNEL
, buf_is_higher
);
403 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0))
404 chan
->cpuhp_iter_online
.component
= LTTNG_RING_BUFFER_ITER
;
405 ret
= cpuhp_state_add_instance(lttng_rb_hp_online
,
406 &chan
->cpuhp_iter_online
.node
);
409 #else /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) */
414 * In case of non-hotplug cpu, if the ring-buffer is allocated
415 * in early initcall, it will not be notified of secondary cpus.
416 * In that off case, we need to allocate for all possible cpus.
418 #ifdef CONFIG_HOTPLUG_CPU
419 chan
->hp_iter_notifier
.notifier_call
=
420 channel_iterator_cpu_hotplug
;
421 chan
->hp_iter_notifier
.priority
= 10;
422 register_cpu_notifier(&chan
->hp_iter_notifier
);
425 for_each_online_cpu(cpu
) {
426 buf
= per_cpu_ptr(chan
->backend
.buf
, cpu
);
427 lib_ring_buffer_iterator_init(chan
, buf
);
429 chan
->hp_iter_enable
= 1;
432 for_each_possible_cpu(cpu
) {
433 buf
= per_cpu_ptr(chan
->backend
.buf
, cpu
);
434 lib_ring_buffer_iterator_init(chan
, buf
);
438 #endif /* #else #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) */
440 buf
= channel_get_ring_buffer(config
, chan
, 0);
441 lib_ring_buffer_iterator_init(chan
, buf
);
446 void channel_iterator_unregister_notifiers(struct channel
*chan
)
448 const struct lib_ring_buffer_config
*config
= &chan
->backend
.config
;
450 if (config
->alloc
== RING_BUFFER_ALLOC_PER_CPU
) {
451 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0))
455 ret
= cpuhp_state_remove_instance(lttng_rb_hp_online
,
456 &chan
->cpuhp_iter_online
.node
);
459 #else /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) */
460 chan
->hp_iter_enable
= 0;
461 unregister_cpu_notifier(&chan
->hp_iter_notifier
);
462 #endif /* #else #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) */
466 void channel_iterator_free(struct channel
*chan
)
468 const struct lib_ring_buffer_config
*config
= &chan
->backend
.config
;
470 if (config
->alloc
== RING_BUFFER_ALLOC_PER_CPU
)
471 lttng_heap_free(&chan
->iter
.heap
);
474 int lib_ring_buffer_iterator_open(struct lib_ring_buffer
*buf
)
476 struct channel
*chan
= buf
->backend
.chan
;
477 const struct lib_ring_buffer_config
*config
= &chan
->backend
.config
;
478 CHAN_WARN_ON(chan
, config
->output
!= RING_BUFFER_ITERATOR
);
479 return lib_ring_buffer_open_read(buf
);
481 EXPORT_SYMBOL_GPL(lib_ring_buffer_iterator_open
);
484 * Note: Iterators must not be mixed with other types of outputs, because an
485 * iterator can leave the buffer in "GET" state, which is not consistent with
486 * other types of output (mmap, splice, raw data read).
488 void lib_ring_buffer_iterator_release(struct lib_ring_buffer
*buf
)
490 lib_ring_buffer_release_read(buf
);
492 EXPORT_SYMBOL_GPL(lib_ring_buffer_iterator_release
);
494 int channel_iterator_open(struct channel
*chan
)
496 const struct lib_ring_buffer_config
*config
= &chan
->backend
.config
;
497 struct lib_ring_buffer
*buf
;
500 CHAN_WARN_ON(chan
, config
->output
!= RING_BUFFER_ITERATOR
);
502 if (config
->alloc
== RING_BUFFER_ALLOC_PER_CPU
) {
504 /* Allow CPU hotplug to keep track of opened reader */
505 chan
->iter
.read_open
= 1;
506 for_each_channel_cpu(cpu
, chan
) {
507 buf
= channel_get_ring_buffer(config
, chan
, cpu
);
508 ret
= lib_ring_buffer_iterator_open(buf
);
511 buf
->iter
.read_open
= 1;
515 buf
= channel_get_ring_buffer(config
, chan
, 0);
516 ret
= lib_ring_buffer_iterator_open(buf
);
520 /* Error should always happen on CPU 0, hence no close is required. */
521 CHAN_WARN_ON(chan
, cpu
!= 0);
525 EXPORT_SYMBOL_GPL(channel_iterator_open
);
527 void channel_iterator_release(struct channel
*chan
)
529 const struct lib_ring_buffer_config
*config
= &chan
->backend
.config
;
530 struct lib_ring_buffer
*buf
;
533 if (config
->alloc
== RING_BUFFER_ALLOC_PER_CPU
) {
535 for_each_channel_cpu(cpu
, chan
) {
536 buf
= channel_get_ring_buffer(config
, chan
, cpu
);
537 if (buf
->iter
.read_open
) {
538 lib_ring_buffer_iterator_release(buf
);
539 buf
->iter
.read_open
= 0;
542 chan
->iter
.read_open
= 0;
545 buf
= channel_get_ring_buffer(config
, chan
, 0);
546 lib_ring_buffer_iterator_release(buf
);
549 EXPORT_SYMBOL_GPL(channel_iterator_release
);
551 void lib_ring_buffer_iterator_reset(struct lib_ring_buffer
*buf
)
553 struct channel
*chan
= buf
->backend
.chan
;
555 if (buf
->iter
.state
!= ITER_GET_SUBBUF
)
556 lib_ring_buffer_put_next_subbuf(buf
);
557 buf
->iter
.state
= ITER_GET_SUBBUF
;
558 /* Remove from heap (if present). */
559 if (lttng_heap_cherrypick(&chan
->iter
.heap
, buf
))
560 list_add(&buf
->iter
.empty_node
, &chan
->iter
.empty_head
);
561 buf
->iter
.timestamp
= 0;
562 buf
->iter
.header_len
= 0;
563 buf
->iter
.payload_len
= 0;
564 buf
->iter
.consumed
= 0;
565 buf
->iter
.read_offset
= 0;
566 buf
->iter
.data_size
= 0;
567 /* Don't reset allocated and read_open */
570 void channel_iterator_reset(struct channel
*chan
)
572 const struct lib_ring_buffer_config
*config
= &chan
->backend
.config
;
573 struct lib_ring_buffer
*buf
;
576 /* Empty heap, put into empty_head */
577 while ((buf
= lttng_heap_remove(&chan
->iter
.heap
)) != NULL
)
578 list_add(&buf
->iter
.empty_node
, &chan
->iter
.empty_head
);
580 for_each_channel_cpu(cpu
, chan
) {
581 buf
= channel_get_ring_buffer(config
, chan
, cpu
);
582 lib_ring_buffer_iterator_reset(buf
);
584 /* Don't reset read_open */
585 chan
->iter
.last_qs
= 0;
586 chan
->iter
.last_timestamp
= 0;
587 chan
->iter
.last_cpu
= 0;
588 chan
->iter
.len_left
= 0;
592 * Ring buffer payload extraction read() implementation.
595 ssize_t
channel_ring_buffer_file_read(struct file
*filp
,
596 char __user
*user_buf
,
599 struct channel
*chan
,
600 struct lib_ring_buffer
*buf
,
603 const struct lib_ring_buffer_config
*config
= &chan
->backend
.config
;
604 size_t read_count
= 0, read_offset
;
608 if (!access_ok(VERIFY_WRITE
, user_buf
, count
))
611 /* Finish copy of previous record */
613 if (read_count
< count
) {
614 len
= chan
->iter
.len_left
;
616 if (config
->alloc
== RING_BUFFER_ALLOC_PER_CPU
618 buf
= lttng_heap_maximum(&chan
->iter
.heap
);
619 CHAN_WARN_ON(chan
, !buf
);
624 while (read_count
< count
) {
625 size_t copy_len
, space_left
;
628 len
= channel_get_next_record(chan
, &buf
);
630 len
= lib_ring_buffer_get_next_record(chan
, buf
);
634 * Check if buffer is finalized (end of file).
636 if (len
== -ENODATA
) {
637 /* A 0 read_count will tell about end of file */
640 if (filp
->f_flags
& O_NONBLOCK
) {
642 read_count
= -EAGAIN
;
648 * No data available at the moment, return what
655 * Wait for returned len to be >= 0 or -ENODATA.
658 error
= wait_event_interruptible(
660 ((len
= channel_get_next_record(chan
,
661 &buf
)), len
!= -EAGAIN
));
663 error
= wait_event_interruptible(
665 ((len
= lib_ring_buffer_get_next_record(
666 chan
, buf
)), len
!= -EAGAIN
));
667 CHAN_WARN_ON(chan
, len
== -EBUSY
);
672 CHAN_WARN_ON(chan
, len
< 0 && len
!= -ENODATA
);
676 read_offset
= buf
->iter
.read_offset
;
678 space_left
= count
- read_count
;
679 if (len
<= space_left
) {
681 chan
->iter
.len_left
= 0;
684 copy_len
= space_left
;
685 chan
->iter
.len_left
= len
- copy_len
;
686 *ppos
= read_offset
+ copy_len
;
688 if (__lib_ring_buffer_copy_to_user(&buf
->backend
, read_offset
,
689 &user_buf
[read_count
],
692 * Leave the len_left and ppos values at their current
693 * state, as we currently have a valid event to read.
697 read_count
+= copy_len
;
703 chan
->iter
.len_left
= 0;
708 * lib_ring_buffer_file_read - Read buffer record payload.
709 * @filp: file structure pointer.
710 * @buffer: user buffer to read data into.
711 * @count: number of bytes to read.
712 * @ppos: file read position.
714 * Returns a negative value on error, or the number of bytes read on success.
715 * ppos is used to save the position _within the current record_ between calls
719 ssize_t
lib_ring_buffer_file_read(struct file
*filp
,
720 char __user
*user_buf
,
724 struct inode
*inode
= filp
->lttng_f_dentry
->d_inode
;
725 struct lib_ring_buffer
*buf
= inode
->i_private
;
726 struct channel
*chan
= buf
->backend
.chan
;
728 return channel_ring_buffer_file_read(filp
, user_buf
, count
, ppos
,
733 * channel_file_read - Read channel record payload.
734 * @filp: file structure pointer.
735 * @buffer: user buffer to read data into.
736 * @count: number of bytes to read.
737 * @ppos: file read position.
739 * Returns a negative value on error, or the number of bytes read on success.
740 * ppos is used to save the position _within the current record_ between calls
744 ssize_t
channel_file_read(struct file
*filp
,
745 char __user
*user_buf
,
749 struct inode
*inode
= filp
->lttng_f_dentry
->d_inode
;
750 struct channel
*chan
= inode
->i_private
;
751 const struct lib_ring_buffer_config
*config
= &chan
->backend
.config
;
753 if (config
->alloc
== RING_BUFFER_ALLOC_PER_CPU
)
754 return channel_ring_buffer_file_read(filp
, user_buf
, count
,
755 ppos
, chan
, NULL
, 1);
757 struct lib_ring_buffer
*buf
=
758 channel_get_ring_buffer(config
, chan
, 0);
759 return channel_ring_buffer_file_read(filp
, user_buf
, count
,
765 int lib_ring_buffer_file_open(struct inode
*inode
, struct file
*file
)
767 struct lib_ring_buffer
*buf
= inode
->i_private
;
770 ret
= lib_ring_buffer_iterator_open(buf
);
774 file
->private_data
= buf
;
775 ret
= nonseekable_open(inode
, file
);
781 lib_ring_buffer_iterator_release(buf
);
786 int lib_ring_buffer_file_release(struct inode
*inode
, struct file
*file
)
788 struct lib_ring_buffer
*buf
= inode
->i_private
;
790 lib_ring_buffer_iterator_release(buf
);
795 int channel_file_open(struct inode
*inode
, struct file
*file
)
797 struct channel
*chan
= inode
->i_private
;
800 ret
= channel_iterator_open(chan
);
804 file
->private_data
= chan
;
805 ret
= nonseekable_open(inode
, file
);
811 channel_iterator_release(chan
);
816 int channel_file_release(struct inode
*inode
, struct file
*file
)
818 struct channel
*chan
= inode
->i_private
;
820 channel_iterator_release(chan
);
824 const struct file_operations channel_payload_file_operations
= {
825 .owner
= THIS_MODULE
,
826 .open
= channel_file_open
,
827 .release
= channel_file_release
,
828 .read
= channel_file_read
,
829 .llseek
= vfs_lib_ring_buffer_no_llseek
,
831 EXPORT_SYMBOL_GPL(channel_payload_file_operations
);
833 const struct file_operations lib_ring_buffer_payload_file_operations
= {
834 .owner
= THIS_MODULE
,
835 .open
= lib_ring_buffer_file_open
,
836 .release
= lib_ring_buffer_file_release
,
837 .read
= lib_ring_buffer_file_read
,
838 .llseek
= vfs_lib_ring_buffer_no_llseek
,
840 EXPORT_SYMBOL_GPL(lib_ring_buffer_payload_file_operations
);