2 * ring_buffer_iterator.c
4 * Ring buffer and channel iterators. Get each event of a channel in order. Uses
5 * a prio heap for per-cpu buffers, giving a O(log(NR_CPUS)) algorithmic
6 * complexity for the "get next event" operation.
8 * Copyright (C) 2010-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
10 * This library is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU Lesser General Public
12 * License as published by the Free Software Foundation; only
13 * version 2.1 of the License.
15 * This library is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * Lesser General Public License for more details.
20 * You should have received a copy of the GNU Lesser General Public
21 * License along with this library; if not, write to the Free Software
22 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
25 * Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
28 #include <wrapper/ringbuffer/iterator.h>
29 #include <wrapper/file.h>
30 #include <wrapper/uaccess.h>
31 #include <linux/jiffies.h>
32 #include <linux/delay.h>
33 #include <linux/module.h>
36 * Safety factor taking into account internal kernel interrupt latency.
37 * Assuming 250ms worse-case latency.
39 #define MAX_SYSTEM_LATENCY 250
42 * Maximum delta expected between trace clocks. At most 1 jiffy delta.
44 #define MAX_CLOCK_DELTA (jiffies_to_usecs(1) * 1000)
47 * lib_ring_buffer_get_next_record - Get the next record in a buffer.
51 * Returns the size of the event read, -EAGAIN if buffer is empty, -ENODATA if
52 * buffer is empty and finalized. The buffer must already be opened for reading.
54 ssize_t
lib_ring_buffer_get_next_record(struct channel
*chan
,
55 struct lib_ring_buffer
*buf
)
57 const struct lib_ring_buffer_config
*config
= &chan
->backend
.config
;
58 struct lib_ring_buffer_iter
*iter
= &buf
->iter
;
62 switch (iter
->state
) {
64 ret
= lib_ring_buffer_get_next_subbuf(buf
);
65 if (ret
&& !READ_ONCE(buf
->finalized
)
66 && config
->alloc
== RING_BUFFER_ALLOC_GLOBAL
) {
68 * Use "pull" scheme for global buffers. The reader
69 * itself flushes the buffer to "pull" data not visible
70 * to readers yet. Flush current subbuffer and re-try.
72 * Per-CPU buffers rather use a "push" scheme because
73 * the IPI needed to flush all CPU's buffers is too
74 * costly. In the "push" scheme, the reader waits for
75 * the writer periodic timer to flush the
76 * buffers (keeping track of a quiescent state
77 * timestamp). Therefore, the writer "pushes" data out
78 * of the buffers rather than letting the reader "pull"
79 * data from the buffer.
81 lib_ring_buffer_switch_slow(buf
, SWITCH_ACTIVE
);
82 ret
= lib_ring_buffer_get_next_subbuf(buf
);
86 iter
->consumed
= buf
->cons_snapshot
;
87 iter
->data_size
= lib_ring_buffer_get_read_data_size(config
, buf
);
88 iter
->read_offset
= iter
->consumed
;
90 iter
->read_offset
+= config
->cb
.subbuffer_header_size();
91 iter
->state
= ITER_TEST_RECORD
;
93 case ITER_TEST_RECORD
:
94 if (iter
->read_offset
- iter
->consumed
>= iter
->data_size
) {
95 iter
->state
= ITER_PUT_SUBBUF
;
97 CHAN_WARN_ON(chan
, !config
->cb
.record_get
);
98 config
->cb
.record_get(config
, chan
, buf
,
103 iter
->read_offset
+= iter
->header_len
;
104 subbuffer_consume_record(config
, &buf
->backend
);
105 iter
->state
= ITER_NEXT_RECORD
;
106 return iter
->payload_len
;
109 case ITER_NEXT_RECORD
:
110 iter
->read_offset
+= iter
->payload_len
;
111 iter
->state
= ITER_TEST_RECORD
;
113 case ITER_PUT_SUBBUF
:
114 lib_ring_buffer_put_next_subbuf(buf
);
115 iter
->state
= ITER_GET_SUBBUF
;
118 CHAN_WARN_ON(chan
, 1); /* Should not happen */
122 EXPORT_SYMBOL_GPL(lib_ring_buffer_get_next_record
);
124 static int buf_is_higher(void *a
, void *b
)
126 struct lib_ring_buffer
*bufa
= a
;
127 struct lib_ring_buffer
*bufb
= b
;
129 /* Consider lowest timestamps to be at the top of the heap */
130 return (bufa
->iter
.timestamp
< bufb
->iter
.timestamp
);
134 void lib_ring_buffer_get_empty_buf_records(const struct lib_ring_buffer_config
*config
,
135 struct channel
*chan
)
137 struct lttng_ptr_heap
*heap
= &chan
->iter
.heap
;
138 struct lib_ring_buffer
*buf
, *tmp
;
141 list_for_each_entry_safe(buf
, tmp
, &chan
->iter
.empty_head
,
143 len
= lib_ring_buffer_get_next_record(chan
, buf
);
146 * Deal with -EAGAIN and -ENODATA.
147 * len >= 0 means record contains data.
148 * -EBUSY should never happen, because we support only one
153 /* Keep node in empty list */
157 * Buffer is finalized. Don't add to list of empty
158 * buffer, because it has no more data to provide, ever.
160 list_del(&buf
->iter
.empty_node
);
163 CHAN_WARN_ON(chan
, 1);
167 * Insert buffer into the heap, remove from empty buffer
170 CHAN_WARN_ON(chan
, len
< 0);
171 list_del(&buf
->iter
.empty_node
);
172 CHAN_WARN_ON(chan
, lttng_heap_insert(heap
, buf
));
178 void lib_ring_buffer_wait_for_qs(const struct lib_ring_buffer_config
*config
,
179 struct channel
*chan
)
182 unsigned long wait_msecs
;
185 * No need to wait if no empty buffers are present.
187 if (list_empty(&chan
->iter
.empty_head
))
190 timestamp_qs
= config
->cb
.ring_buffer_clock_read(chan
);
192 * We need to consider previously empty buffers.
193 * Do a get next buf record on each of them. Add them to
194 * the heap if they have data. If at least one of them
195 * don't have data, we need to wait for
196 * switch_timer_interval + MAX_SYSTEM_LATENCY (so we are sure the
197 * buffers have been switched either by the timer or idle entry) and
198 * check them again, adding them if they have data.
200 lib_ring_buffer_get_empty_buf_records(config
, chan
);
203 * No need to wait if no empty buffers are present.
205 if (list_empty(&chan
->iter
.empty_head
))
209 * We need to wait for the buffer switch timer to run. If the
210 * CPU is idle, idle entry performed the switch.
211 * TODO: we could optimize further by skipping the sleep if all
212 * empty buffers belong to idle or offline cpus.
214 wait_msecs
= jiffies_to_msecs(chan
->switch_timer_interval
);
215 wait_msecs
+= MAX_SYSTEM_LATENCY
;
217 lib_ring_buffer_get_empty_buf_records(config
, chan
);
219 * Any buffer still in the empty list here cannot possibly
220 * contain an event with a timestamp prior to "timestamp_qs".
221 * The new quiescent state timestamp is the one we grabbed
222 * before waiting for buffer data. It is therefore safe to
223 * ignore empty buffers up to last_qs timestamp for fusion
226 chan
->iter
.last_qs
= timestamp_qs
;
230 * channel_get_next_record - Get the next record in a channel.
232 * @ret_buf: the buffer in which the event is located (output)
234 * Returns the size of new current event, -EAGAIN if all buffers are empty,
235 * -ENODATA if all buffers are empty and finalized. The channel must already be
236 * opened for reading.
239 ssize_t
channel_get_next_record(struct channel
*chan
,
240 struct lib_ring_buffer
**ret_buf
)
242 const struct lib_ring_buffer_config
*config
= &chan
->backend
.config
;
243 struct lib_ring_buffer
*buf
;
244 struct lttng_ptr_heap
*heap
;
247 if (config
->alloc
== RING_BUFFER_ALLOC_GLOBAL
) {
248 *ret_buf
= channel_get_ring_buffer(config
, chan
, 0);
249 return lib_ring_buffer_get_next_record(chan
, *ret_buf
);
252 heap
= &chan
->iter
.heap
;
255 * get next record for topmost buffer.
257 buf
= lttng_heap_maximum(heap
);
259 len
= lib_ring_buffer_get_next_record(chan
, buf
);
261 * Deal with -EAGAIN and -ENODATA.
262 * len >= 0 means record contains data.
266 buf
->iter
.timestamp
= 0;
267 list_add(&buf
->iter
.empty_node
, &chan
->iter
.empty_head
);
268 /* Remove topmost buffer from the heap */
269 CHAN_WARN_ON(chan
, lttng_heap_remove(heap
) != buf
);
273 * Buffer is finalized. Remove buffer from heap and
274 * don't add to list of empty buffer, because it has no
275 * more data to provide, ever.
277 CHAN_WARN_ON(chan
, lttng_heap_remove(heap
) != buf
);
280 CHAN_WARN_ON(chan
, 1);
284 * Reinsert buffer into the heap. Note that heap can be
285 * partially empty, so we need to use
286 * lttng_heap_replace_max().
288 CHAN_WARN_ON(chan
, len
< 0);
289 CHAN_WARN_ON(chan
, lttng_heap_replace_max(heap
, buf
) != buf
);
294 buf
= lttng_heap_maximum(heap
);
295 if (!buf
|| buf
->iter
.timestamp
> chan
->iter
.last_qs
) {
297 * Deal with buffers previously showing no data.
298 * Add buffers containing data to the heap, update
301 lib_ring_buffer_wait_for_qs(config
, chan
);
304 *ret_buf
= buf
= lttng_heap_maximum(heap
);
307 * If this warning triggers, you probably need to check your
308 * system interrupt latency. Typical causes: too many printk()
309 * output going to a serial console with interrupts off.
310 * Allow for MAX_CLOCK_DELTA ns timestamp delta going backward.
311 * Observed on SMP KVM setups with trace_clock().
313 if (chan
->iter
.last_timestamp
314 > (buf
->iter
.timestamp
+ MAX_CLOCK_DELTA
)) {
315 printk(KERN_WARNING
"ring_buffer: timestamps going "
316 "backward. Last time %llu ns, cpu %d, "
317 "current time %llu ns, cpu %d, "
319 chan
->iter
.last_timestamp
, chan
->iter
.last_cpu
,
320 buf
->iter
.timestamp
, buf
->backend
.cpu
,
321 chan
->iter
.last_timestamp
- buf
->iter
.timestamp
);
322 CHAN_WARN_ON(chan
, 1);
324 chan
->iter
.last_timestamp
= buf
->iter
.timestamp
;
325 chan
->iter
.last_cpu
= buf
->backend
.cpu
;
326 return buf
->iter
.payload_len
;
329 if (list_empty(&chan
->iter
.empty_head
))
330 return -ENODATA
; /* All buffers finalized */
332 return -EAGAIN
; /* Temporarily empty */
335 EXPORT_SYMBOL_GPL(channel_get_next_record
);
338 void lib_ring_buffer_iterator_init(struct channel
*chan
, struct lib_ring_buffer
*buf
)
340 if (buf
->iter
.allocated
)
343 buf
->iter
.allocated
= 1;
344 if (chan
->iter
.read_open
&& !buf
->iter
.read_open
) {
345 CHAN_WARN_ON(chan
, lib_ring_buffer_open_read(buf
) != 0);
346 buf
->iter
.read_open
= 1;
349 /* Add to list of buffers without any current record */
350 if (chan
->backend
.config
.alloc
== RING_BUFFER_ALLOC_PER_CPU
)
351 list_add(&buf
->iter
.empty_node
, &chan
->iter
.empty_head
);
354 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0))
356 int lttng_cpuhp_rb_iter_online(unsigned int cpu
,
357 struct lttng_cpuhp_node
*node
)
359 struct channel
*chan
= container_of(node
, struct channel
,
361 struct lib_ring_buffer
*buf
= per_cpu_ptr(chan
->backend
.buf
, cpu
);
362 const struct lib_ring_buffer_config
*config
= &chan
->backend
.config
;
364 CHAN_WARN_ON(chan
, config
->alloc
== RING_BUFFER_ALLOC_GLOBAL
);
366 lib_ring_buffer_iterator_init(chan
, buf
);
369 EXPORT_SYMBOL_GPL(lttng_cpuhp_rb_iter_online
);
371 #else /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) */
373 #ifdef CONFIG_HOTPLUG_CPU
375 int channel_iterator_cpu_hotplug(struct notifier_block
*nb
,
376 unsigned long action
,
379 unsigned int cpu
= (unsigned long)hcpu
;
380 struct channel
*chan
= container_of(nb
, struct channel
,
382 struct lib_ring_buffer
*buf
= per_cpu_ptr(chan
->backend
.buf
, cpu
);
383 const struct lib_ring_buffer_config
*config
= &chan
->backend
.config
;
385 if (!chan
->hp_iter_enable
)
388 CHAN_WARN_ON(chan
, config
->alloc
== RING_BUFFER_ALLOC_GLOBAL
);
391 case CPU_DOWN_FAILED
:
392 case CPU_DOWN_FAILED_FROZEN
:
394 case CPU_ONLINE_FROZEN
:
395 lib_ring_buffer_iterator_init(chan
, buf
);
403 #endif /* #else #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) */
405 int channel_iterator_init(struct channel
*chan
)
407 const struct lib_ring_buffer_config
*config
= &chan
->backend
.config
;
408 struct lib_ring_buffer
*buf
;
410 if (config
->alloc
== RING_BUFFER_ALLOC_PER_CPU
) {
413 INIT_LIST_HEAD(&chan
->iter
.empty_head
);
414 ret
= lttng_heap_init(&chan
->iter
.heap
,
416 GFP_KERNEL
, buf_is_higher
);
420 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0))
421 chan
->cpuhp_iter_online
.component
= LTTNG_RING_BUFFER_ITER
;
422 ret
= cpuhp_state_add_instance(lttng_rb_hp_online
,
423 &chan
->cpuhp_iter_online
.node
);
426 #else /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) */
431 * In case of non-hotplug cpu, if the ring-buffer is allocated
432 * in early initcall, it will not be notified of secondary cpus.
433 * In that off case, we need to allocate for all possible cpus.
435 #ifdef CONFIG_HOTPLUG_CPU
436 chan
->hp_iter_notifier
.notifier_call
=
437 channel_iterator_cpu_hotplug
;
438 chan
->hp_iter_notifier
.priority
= 10;
439 register_cpu_notifier(&chan
->hp_iter_notifier
);
442 for_each_online_cpu(cpu
) {
443 buf
= per_cpu_ptr(chan
->backend
.buf
, cpu
);
444 lib_ring_buffer_iterator_init(chan
, buf
);
446 chan
->hp_iter_enable
= 1;
449 for_each_possible_cpu(cpu
) {
450 buf
= per_cpu_ptr(chan
->backend
.buf
, cpu
);
451 lib_ring_buffer_iterator_init(chan
, buf
);
455 #endif /* #else #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) */
457 buf
= channel_get_ring_buffer(config
, chan
, 0);
458 lib_ring_buffer_iterator_init(chan
, buf
);
463 void channel_iterator_unregister_notifiers(struct channel
*chan
)
465 const struct lib_ring_buffer_config
*config
= &chan
->backend
.config
;
467 if (config
->alloc
== RING_BUFFER_ALLOC_PER_CPU
) {
468 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0))
472 ret
= cpuhp_state_remove_instance(lttng_rb_hp_online
,
473 &chan
->cpuhp_iter_online
.node
);
476 #else /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) */
477 chan
->hp_iter_enable
= 0;
478 unregister_cpu_notifier(&chan
->hp_iter_notifier
);
479 #endif /* #else #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) */
483 void channel_iterator_free(struct channel
*chan
)
485 const struct lib_ring_buffer_config
*config
= &chan
->backend
.config
;
487 if (config
->alloc
== RING_BUFFER_ALLOC_PER_CPU
)
488 lttng_heap_free(&chan
->iter
.heap
);
491 int lib_ring_buffer_iterator_open(struct lib_ring_buffer
*buf
)
493 struct channel
*chan
= buf
->backend
.chan
;
494 const struct lib_ring_buffer_config
*config
= &chan
->backend
.config
;
495 CHAN_WARN_ON(chan
, config
->output
!= RING_BUFFER_ITERATOR
);
496 return lib_ring_buffer_open_read(buf
);
498 EXPORT_SYMBOL_GPL(lib_ring_buffer_iterator_open
);
501 * Note: Iterators must not be mixed with other types of outputs, because an
502 * iterator can leave the buffer in "GET" state, which is not consistent with
503 * other types of output (mmap, splice, raw data read).
505 void lib_ring_buffer_iterator_release(struct lib_ring_buffer
*buf
)
507 lib_ring_buffer_release_read(buf
);
509 EXPORT_SYMBOL_GPL(lib_ring_buffer_iterator_release
);
511 int channel_iterator_open(struct channel
*chan
)
513 const struct lib_ring_buffer_config
*config
= &chan
->backend
.config
;
514 struct lib_ring_buffer
*buf
;
517 CHAN_WARN_ON(chan
, config
->output
!= RING_BUFFER_ITERATOR
);
519 if (config
->alloc
== RING_BUFFER_ALLOC_PER_CPU
) {
521 /* Allow CPU hotplug to keep track of opened reader */
522 chan
->iter
.read_open
= 1;
523 for_each_channel_cpu(cpu
, chan
) {
524 buf
= channel_get_ring_buffer(config
, chan
, cpu
);
525 ret
= lib_ring_buffer_iterator_open(buf
);
528 buf
->iter
.read_open
= 1;
532 buf
= channel_get_ring_buffer(config
, chan
, 0);
533 ret
= lib_ring_buffer_iterator_open(buf
);
537 /* Error should always happen on CPU 0, hence no close is required. */
538 CHAN_WARN_ON(chan
, cpu
!= 0);
542 EXPORT_SYMBOL_GPL(channel_iterator_open
);
544 void channel_iterator_release(struct channel
*chan
)
546 const struct lib_ring_buffer_config
*config
= &chan
->backend
.config
;
547 struct lib_ring_buffer
*buf
;
550 if (config
->alloc
== RING_BUFFER_ALLOC_PER_CPU
) {
552 for_each_channel_cpu(cpu
, chan
) {
553 buf
= channel_get_ring_buffer(config
, chan
, cpu
);
554 if (buf
->iter
.read_open
) {
555 lib_ring_buffer_iterator_release(buf
);
556 buf
->iter
.read_open
= 0;
559 chan
->iter
.read_open
= 0;
562 buf
= channel_get_ring_buffer(config
, chan
, 0);
563 lib_ring_buffer_iterator_release(buf
);
566 EXPORT_SYMBOL_GPL(channel_iterator_release
);
568 void lib_ring_buffer_iterator_reset(struct lib_ring_buffer
*buf
)
570 struct channel
*chan
= buf
->backend
.chan
;
572 if (buf
->iter
.state
!= ITER_GET_SUBBUF
)
573 lib_ring_buffer_put_next_subbuf(buf
);
574 buf
->iter
.state
= ITER_GET_SUBBUF
;
575 /* Remove from heap (if present). */
576 if (lttng_heap_cherrypick(&chan
->iter
.heap
, buf
))
577 list_add(&buf
->iter
.empty_node
, &chan
->iter
.empty_head
);
578 buf
->iter
.timestamp
= 0;
579 buf
->iter
.header_len
= 0;
580 buf
->iter
.payload_len
= 0;
581 buf
->iter
.consumed
= 0;
582 buf
->iter
.read_offset
= 0;
583 buf
->iter
.data_size
= 0;
584 /* Don't reset allocated and read_open */
587 void channel_iterator_reset(struct channel
*chan
)
589 const struct lib_ring_buffer_config
*config
= &chan
->backend
.config
;
590 struct lib_ring_buffer
*buf
;
593 /* Empty heap, put into empty_head */
594 while ((buf
= lttng_heap_remove(&chan
->iter
.heap
)) != NULL
)
595 list_add(&buf
->iter
.empty_node
, &chan
->iter
.empty_head
);
597 for_each_channel_cpu(cpu
, chan
) {
598 buf
= channel_get_ring_buffer(config
, chan
, cpu
);
599 lib_ring_buffer_iterator_reset(buf
);
601 /* Don't reset read_open */
602 chan
->iter
.last_qs
= 0;
603 chan
->iter
.last_timestamp
= 0;
604 chan
->iter
.last_cpu
= 0;
605 chan
->iter
.len_left
= 0;
609 * Ring buffer payload extraction read() implementation.
612 ssize_t
channel_ring_buffer_file_read(struct file
*filp
,
613 char __user
*user_buf
,
616 struct channel
*chan
,
617 struct lib_ring_buffer
*buf
,
620 const struct lib_ring_buffer_config
*config
= &chan
->backend
.config
;
621 size_t read_count
= 0, read_offset
;
625 if (!lttng_access_ok(VERIFY_WRITE
, user_buf
, count
))
628 /* Finish copy of previous record */
630 if (read_count
< count
) {
631 len
= chan
->iter
.len_left
;
633 if (config
->alloc
== RING_BUFFER_ALLOC_PER_CPU
635 buf
= lttng_heap_maximum(&chan
->iter
.heap
);
636 CHAN_WARN_ON(chan
, !buf
);
641 while (read_count
< count
) {
642 size_t copy_len
, space_left
;
645 len
= channel_get_next_record(chan
, &buf
);
647 len
= lib_ring_buffer_get_next_record(chan
, buf
);
651 * Check if buffer is finalized (end of file).
653 if (len
== -ENODATA
) {
654 /* A 0 read_count will tell about end of file */
657 if (filp
->f_flags
& O_NONBLOCK
) {
659 read_count
= -EAGAIN
;
665 * No data available at the moment, return what
672 * Wait for returned len to be >= 0 or -ENODATA.
675 error
= wait_event_interruptible(
677 ((len
= channel_get_next_record(chan
,
678 &buf
)), len
!= -EAGAIN
));
680 error
= wait_event_interruptible(
682 ((len
= lib_ring_buffer_get_next_record(
683 chan
, buf
)), len
!= -EAGAIN
));
684 CHAN_WARN_ON(chan
, len
== -EBUSY
);
689 CHAN_WARN_ON(chan
, len
< 0 && len
!= -ENODATA
);
693 read_offset
= buf
->iter
.read_offset
;
695 space_left
= count
- read_count
;
696 if (len
<= space_left
) {
698 chan
->iter
.len_left
= 0;
701 copy_len
= space_left
;
702 chan
->iter
.len_left
= len
- copy_len
;
703 *ppos
= read_offset
+ copy_len
;
705 if (__lib_ring_buffer_copy_to_user(&buf
->backend
, read_offset
,
706 &user_buf
[read_count
],
709 * Leave the len_left and ppos values at their current
710 * state, as we currently have a valid event to read.
714 read_count
+= copy_len
;
720 chan
->iter
.len_left
= 0;
725 * lib_ring_buffer_file_read - Read buffer record payload.
726 * @filp: file structure pointer.
727 * @buffer: user buffer to read data into.
728 * @count: number of bytes to read.
729 * @ppos: file read position.
731 * Returns a negative value on error, or the number of bytes read on success.
732 * ppos is used to save the position _within the current record_ between calls
736 ssize_t
lib_ring_buffer_file_read(struct file
*filp
,
737 char __user
*user_buf
,
741 struct inode
*inode
= filp
->lttng_f_dentry
->d_inode
;
742 struct lib_ring_buffer
*buf
= inode
->i_private
;
743 struct channel
*chan
= buf
->backend
.chan
;
745 return channel_ring_buffer_file_read(filp
, user_buf
, count
, ppos
,
750 * channel_file_read - Read channel record payload.
751 * @filp: file structure pointer.
752 * @buffer: user buffer to read data into.
753 * @count: number of bytes to read.
754 * @ppos: file read position.
756 * Returns a negative value on error, or the number of bytes read on success.
757 * ppos is used to save the position _within the current record_ between calls
761 ssize_t
channel_file_read(struct file
*filp
,
762 char __user
*user_buf
,
766 struct inode
*inode
= filp
->lttng_f_dentry
->d_inode
;
767 struct channel
*chan
= inode
->i_private
;
768 const struct lib_ring_buffer_config
*config
= &chan
->backend
.config
;
770 if (config
->alloc
== RING_BUFFER_ALLOC_PER_CPU
)
771 return channel_ring_buffer_file_read(filp
, user_buf
, count
,
772 ppos
, chan
, NULL
, 1);
774 struct lib_ring_buffer
*buf
=
775 channel_get_ring_buffer(config
, chan
, 0);
776 return channel_ring_buffer_file_read(filp
, user_buf
, count
,
782 int lib_ring_buffer_file_open(struct inode
*inode
, struct file
*file
)
784 struct lib_ring_buffer
*buf
= inode
->i_private
;
787 ret
= lib_ring_buffer_iterator_open(buf
);
791 file
->private_data
= buf
;
792 ret
= nonseekable_open(inode
, file
);
798 lib_ring_buffer_iterator_release(buf
);
803 int lib_ring_buffer_file_release(struct inode
*inode
, struct file
*file
)
805 struct lib_ring_buffer
*buf
= inode
->i_private
;
807 lib_ring_buffer_iterator_release(buf
);
812 int channel_file_open(struct inode
*inode
, struct file
*file
)
814 struct channel
*chan
= inode
->i_private
;
817 ret
= channel_iterator_open(chan
);
821 file
->private_data
= chan
;
822 ret
= nonseekable_open(inode
, file
);
828 channel_iterator_release(chan
);
833 int channel_file_release(struct inode
*inode
, struct file
*file
)
835 struct channel
*chan
= inode
->i_private
;
837 channel_iterator_release(chan
);
841 const struct file_operations channel_payload_file_operations
= {
842 .owner
= THIS_MODULE
,
843 .open
= channel_file_open
,
844 .release
= channel_file_release
,
845 .read
= channel_file_read
,
846 .llseek
= vfs_lib_ring_buffer_no_llseek
,
848 EXPORT_SYMBOL_GPL(channel_payload_file_operations
);
850 const struct file_operations lib_ring_buffer_payload_file_operations
= {
851 .owner
= THIS_MODULE
,
852 .open
= lib_ring_buffer_file_open
,
853 .release
= lib_ring_buffer_file_release
,
854 .read
= lib_ring_buffer_file_read
,
855 .llseek
= vfs_lib_ring_buffer_no_llseek
,
857 EXPORT_SYMBOL_GPL(lib_ring_buffer_payload_file_operations
);