1 /* SPDX-License-Identifier: (GPL-2.0-only OR LGPL-2.1-only)
5 * Ring Buffer VFS file operations.
7 * Copyright (C) 2010-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
10 #include <linux/module.h>
12 #include <linux/compat.h>
14 #include <ringbuffer/backend.h>
15 #include <ringbuffer/frontend.h>
16 #include <ringbuffer/vfs.h>
17 #include <wrapper/poll.h>
18 #include <lttng/tracer.h>
20 static int put_ulong(unsigned long val
, unsigned long arg
)
22 return put_user(val
, (unsigned long __user
*)arg
);
26 static int compat_put_ulong(compat_ulong_t val
, unsigned long arg
)
28 return put_user(val
, (compat_ulong_t __user
*)compat_ptr(arg
));
33 * This is not used by anonymous file descriptors. This code is left
34 * there if we ever want to implement an inode with open() operation.
36 int lib_ring_buffer_open(struct inode
*inode
, struct file
*file
,
37 struct lttng_kernel_ring_buffer
*buf
)
44 ret
= lib_ring_buffer_open_read(buf
);
48 ret
= nonseekable_open(inode
, file
);
54 lib_ring_buffer_release_read(buf
);
57 EXPORT_SYMBOL_GPL(lib_ring_buffer_open
);
60 * vfs_lib_ring_buffer_open - ring buffer open file operation
61 * @inode: opened inode
64 * Open implementation. Makes sure only one open instance of a buffer is
65 * done at a given moment.
68 int vfs_lib_ring_buffer_open(struct inode
*inode
, struct file
*file
)
70 struct lttng_kernel_ring_buffer
*buf
= inode
->i_private
;
72 file
->private_data
= buf
;
73 return lib_ring_buffer_open(inode
, file
, buf
);
76 int lib_ring_buffer_release(struct inode
*inode
, struct file
*file
,
77 struct lttng_kernel_ring_buffer
*buf
)
79 lib_ring_buffer_release_read(buf
);
83 EXPORT_SYMBOL_GPL(lib_ring_buffer_release
);
86 * vfs_lib_ring_buffer_release - ring buffer release file operation
87 * @inode: opened inode
90 * Release implementation.
93 int vfs_lib_ring_buffer_release(struct inode
*inode
, struct file
*file
)
95 struct lttng_kernel_ring_buffer
*buf
= file
->private_data
;
97 return lib_ring_buffer_release(inode
, file
, buf
);
100 unsigned int lib_ring_buffer_poll(struct file
*filp
, poll_table
*wait
,
101 struct lttng_kernel_ring_buffer
*buf
)
103 unsigned int mask
= 0;
104 struct lttng_kernel_ring_buffer_channel
*chan
= buf
->backend
.chan
;
105 const struct lttng_kernel_ring_buffer_config
*config
= &chan
->backend
.config
;
106 int finalized
, disabled
;
108 if (filp
->f_mode
& FMODE_READ
) {
109 poll_wait_set_exclusive(wait
);
110 poll_wait(filp
, &buf
->read_wait
, wait
);
112 finalized
= lib_ring_buffer_is_finalized(config
, buf
);
113 disabled
= lib_ring_buffer_channel_is_disabled(chan
);
116 * lib_ring_buffer_is_finalized() contains a smp_rmb() ordering
117 * finalized load before offsets loads.
119 WARN_ON(atomic_long_read(&buf
->active_readers
) != 1);
124 if (subbuf_trunc(lib_ring_buffer_get_offset(config
, buf
), chan
)
125 - subbuf_trunc(lib_ring_buffer_get_consumed(config
, buf
), chan
)
131 * The memory barriers
132 * __wait_event()/wake_up_interruptible() take
133 * care of "raw_spin_is_locked" memory ordering.
135 if (raw_spin_is_locked(&buf
->raw_tick_nohz_spinlock
))
141 if (subbuf_trunc(lib_ring_buffer_get_offset(config
, buf
),
143 - subbuf_trunc(lib_ring_buffer_get_consumed(config
, buf
),
145 >= chan
->backend
.buf_size
)
146 return POLLPRI
| POLLRDBAND
;
148 return POLLIN
| POLLRDNORM
;
153 EXPORT_SYMBOL_GPL(lib_ring_buffer_poll
);
156 * vfs_lib_ring_buffer_poll - ring buffer poll file operation
160 * Poll implementation.
163 unsigned int vfs_lib_ring_buffer_poll(struct file
*filp
, poll_table
*wait
)
165 struct lttng_kernel_ring_buffer
*buf
= filp
->private_data
;
167 return lib_ring_buffer_poll(filp
, wait
, buf
);
170 long lib_ring_buffer_ioctl(struct file
*filp
, unsigned int cmd
,
171 unsigned long arg
, struct lttng_kernel_ring_buffer
*buf
)
173 struct lttng_kernel_ring_buffer_channel
*chan
= buf
->backend
.chan
;
174 const struct lttng_kernel_ring_buffer_config
*config
= &chan
->backend
.config
;
176 if (lib_ring_buffer_channel_is_disabled(chan
))
180 case LTTNG_KERNEL_ABI_RING_BUFFER_SNAPSHOT
:
181 return lib_ring_buffer_snapshot(buf
, &buf
->cons_snapshot
,
182 &buf
->prod_snapshot
);
183 case LTTNG_KERNEL_ABI_RING_BUFFER_SNAPSHOT_SAMPLE_POSITIONS
:
184 return lib_ring_buffer_snapshot_sample_positions(buf
,
185 &buf
->cons_snapshot
, &buf
->prod_snapshot
);
186 case LTTNG_KERNEL_ABI_RING_BUFFER_SNAPSHOT_GET_CONSUMED
:
187 return put_ulong(buf
->cons_snapshot
, arg
);
188 case LTTNG_KERNEL_ABI_RING_BUFFER_SNAPSHOT_GET_PRODUCED
:
189 return put_ulong(buf
->prod_snapshot
, arg
);
190 case LTTNG_KERNEL_ABI_RING_BUFFER_GET_SUBBUF
:
192 unsigned long uconsume
;
195 ret
= get_user(uconsume
, (unsigned long __user
*) arg
);
197 return ret
; /* will return -EFAULT */
198 ret
= lib_ring_buffer_get_subbuf(buf
, uconsume
);
200 /* Set file position to zero at each successful "get" */
205 case LTTNG_KERNEL_ABI_RING_BUFFER_PUT_SUBBUF
:
206 lib_ring_buffer_put_subbuf(buf
);
209 case LTTNG_KERNEL_ABI_RING_BUFFER_GET_NEXT_SUBBUF
:
213 ret
= lib_ring_buffer_get_next_subbuf(buf
);
215 /* Set file position to zero at each successful "get" */
220 case LTTNG_KERNEL_ABI_RING_BUFFER_PUT_NEXT_SUBBUF
:
221 lib_ring_buffer_put_next_subbuf(buf
);
223 case LTTNG_KERNEL_ABI_RING_BUFFER_GET_SUBBUF_SIZE
:
224 return put_ulong(lib_ring_buffer_get_read_data_size(config
, buf
),
226 case LTTNG_KERNEL_ABI_RING_BUFFER_GET_PADDED_SUBBUF_SIZE
:
230 size
= lib_ring_buffer_get_read_data_size(config
, buf
);
231 size
= PAGE_ALIGN(size
);
232 return put_ulong(size
, arg
);
234 case LTTNG_KERNEL_ABI_RING_BUFFER_GET_MAX_SUBBUF_SIZE
:
235 return put_ulong(chan
->backend
.subbuf_size
, arg
);
236 case LTTNG_KERNEL_ABI_RING_BUFFER_GET_MMAP_LEN
:
238 unsigned long mmap_buf_len
;
240 if (config
->output
!= RING_BUFFER_MMAP
)
242 mmap_buf_len
= chan
->backend
.buf_size
;
243 if (chan
->backend
.extra_reader_sb
)
244 mmap_buf_len
+= chan
->backend
.subbuf_size
;
245 if (mmap_buf_len
> INT_MAX
)
247 return put_ulong(mmap_buf_len
, arg
);
249 case LTTNG_KERNEL_ABI_RING_BUFFER_GET_MMAP_READ_OFFSET
:
251 unsigned long sb_bindex
;
253 if (config
->output
!= RING_BUFFER_MMAP
)
255 sb_bindex
= subbuffer_id_get_index(config
,
256 buf
->backend
.buf_rsb
.id
);
257 return put_ulong(buf
->backend
.array
[sb_bindex
]->mmap_offset
,
260 case LTTNG_KERNEL_ABI_RING_BUFFER_FLUSH
:
261 lib_ring_buffer_switch_remote(buf
);
263 case LTTNG_KERNEL_ABI_RING_BUFFER_FLUSH_EMPTY
:
264 lib_ring_buffer_switch_remote_empty(buf
);
266 case LTTNG_KERNEL_ABI_RING_BUFFER_CLEAR
:
267 lib_ring_buffer_clear(buf
);
273 EXPORT_SYMBOL_GPL(lib_ring_buffer_ioctl
);
276 * vfs_lib_ring_buffer_ioctl - control ring buffer reader synchronization
282 * This ioctl implements commands necessary for producer/consumer
283 * and flight recorder reader interaction :
284 * LTTNG_KERNEL_ABI_RING_BUFFER_GET_NEXT_SUBBUF
285 * Get the next sub-buffer that can be read. It never blocks.
286 * LTTNG_KERNEL_ABI_RING_BUFFER_PUT_NEXT_SUBBUF
287 * Release the currently read sub-buffer.
288 * LTTNG_KERNEL_ABI_RING_BUFFER_GET_SUBBUF_SIZE
289 * returns the size of the current sub-buffer.
290 * LTTNG_KERNEL_ABI_RING_BUFFER_GET_MAX_SUBBUF_SIZE
291 * returns the maximum size for sub-buffers.
292 * LTTNG_KERNEL_ABI_RING_BUFFER_GET_NUM_SUBBUF
293 * returns the number of reader-visible sub-buffers in the per cpu
294 * channel (for mmap).
295 * LTTNG_KERNEL_ABI_RING_BUFFER_GET_MMAP_READ_OFFSET
296 * returns the offset of the subbuffer belonging to the reader.
297 * Should only be used for mmap clients.
300 long vfs_lib_ring_buffer_ioctl(struct file
*filp
, unsigned int cmd
, unsigned long arg
)
302 struct lttng_kernel_ring_buffer
*buf
= filp
->private_data
;
304 return lib_ring_buffer_ioctl(filp
, cmd
, arg
, buf
);
308 long lib_ring_buffer_compat_ioctl(struct file
*filp
, unsigned int cmd
,
309 unsigned long arg
, struct lttng_kernel_ring_buffer
*buf
)
311 struct lttng_kernel_ring_buffer_channel
*chan
= buf
->backend
.chan
;
312 const struct lttng_kernel_ring_buffer_config
*config
= &chan
->backend
.config
;
314 if (lib_ring_buffer_channel_is_disabled(chan
))
318 case LTTNG_KERNEL_ABI_RING_BUFFER_COMPAT_SNAPSHOT
:
319 return lib_ring_buffer_snapshot(buf
, &buf
->cons_snapshot
,
320 &buf
->prod_snapshot
);
321 case LTTNG_KERNEL_ABI_RING_BUFFER_COMPAT_SNAPSHOT_SAMPLE_POSITIONS
:
322 return lib_ring_buffer_snapshot_sample_positions(buf
,
323 &buf
->cons_snapshot
, &buf
->prod_snapshot
);
324 case LTTNG_KERNEL_ABI_RING_BUFFER_COMPAT_SNAPSHOT_GET_CONSUMED
:
325 return compat_put_ulong(buf
->cons_snapshot
, arg
);
326 case LTTNG_KERNEL_ABI_RING_BUFFER_COMPAT_SNAPSHOT_GET_PRODUCED
:
327 return compat_put_ulong(buf
->prod_snapshot
, arg
);
328 case LTTNG_KERNEL_ABI_RING_BUFFER_COMPAT_GET_SUBBUF
:
331 unsigned long consume
;
334 ret
= get_user(uconsume
, (__u32 __user
*) arg
);
336 return ret
; /* will return -EFAULT */
337 consume
= buf
->cons_snapshot
;
338 consume
&= ~0xFFFFFFFFL
;
340 ret
= lib_ring_buffer_get_subbuf(buf
, consume
);
342 /* Set file position to zero at each successful "get" */
347 case LTTNG_KERNEL_ABI_RING_BUFFER_COMPAT_PUT_SUBBUF
:
348 lib_ring_buffer_put_subbuf(buf
);
351 case LTTNG_KERNEL_ABI_RING_BUFFER_COMPAT_GET_NEXT_SUBBUF
:
355 ret
= lib_ring_buffer_get_next_subbuf(buf
);
357 /* Set file position to zero at each successful "get" */
362 case LTTNG_KERNEL_ABI_RING_BUFFER_COMPAT_PUT_NEXT_SUBBUF
:
363 lib_ring_buffer_put_next_subbuf(buf
);
365 case LTTNG_KERNEL_ABI_RING_BUFFER_COMPAT_GET_SUBBUF_SIZE
:
367 unsigned long data_size
;
369 data_size
= lib_ring_buffer_get_read_data_size(config
, buf
);
370 if (data_size
> UINT_MAX
)
372 return compat_put_ulong(data_size
, arg
);
374 case LTTNG_KERNEL_ABI_RING_BUFFER_COMPAT_GET_PADDED_SUBBUF_SIZE
:
378 size
= lib_ring_buffer_get_read_data_size(config
, buf
);
379 size
= PAGE_ALIGN(size
);
382 return compat_put_ulong(size
, arg
);
384 case LTTNG_KERNEL_ABI_RING_BUFFER_COMPAT_GET_MAX_SUBBUF_SIZE
:
385 if (chan
->backend
.subbuf_size
> UINT_MAX
)
387 return compat_put_ulong(chan
->backend
.subbuf_size
, arg
);
388 case LTTNG_KERNEL_ABI_RING_BUFFER_COMPAT_GET_MMAP_LEN
:
390 unsigned long mmap_buf_len
;
392 if (config
->output
!= RING_BUFFER_MMAP
)
394 mmap_buf_len
= chan
->backend
.buf_size
;
395 if (chan
->backend
.extra_reader_sb
)
396 mmap_buf_len
+= chan
->backend
.subbuf_size
;
397 if (mmap_buf_len
> UINT_MAX
)
399 return compat_put_ulong(mmap_buf_len
, arg
);
401 case LTTNG_KERNEL_ABI_RING_BUFFER_COMPAT_GET_MMAP_READ_OFFSET
:
403 unsigned long sb_bindex
, read_offset
;
405 if (config
->output
!= RING_BUFFER_MMAP
)
407 sb_bindex
= subbuffer_id_get_index(config
,
408 buf
->backend
.buf_rsb
.id
);
409 read_offset
= buf
->backend
.array
[sb_bindex
]->mmap_offset
;
410 if (read_offset
> UINT_MAX
)
412 return compat_put_ulong(read_offset
, arg
);
414 case LTTNG_KERNEL_ABI_RING_BUFFER_COMPAT_FLUSH
:
415 lib_ring_buffer_switch_remote(buf
);
417 case LTTNG_KERNEL_ABI_RING_BUFFER_COMPAT_FLUSH_EMPTY
:
418 lib_ring_buffer_switch_remote_empty(buf
);
420 case LTTNG_KERNEL_ABI_RING_BUFFER_COMPAT_CLEAR
:
421 lib_ring_buffer_clear(buf
);
427 EXPORT_SYMBOL_GPL(lib_ring_buffer_compat_ioctl
);
430 long vfs_lib_ring_buffer_compat_ioctl(struct file
*filp
, unsigned int cmd
,
433 struct lttng_kernel_ring_buffer
*buf
= filp
->private_data
;
435 return lib_ring_buffer_compat_ioctl(filp
, cmd
, arg
, buf
);
439 const struct file_operations lib_ring_buffer_file_operations
= {
440 .owner
= THIS_MODULE
,
441 .open
= vfs_lib_ring_buffer_open
,
442 .release
= vfs_lib_ring_buffer_release
,
443 .poll
= vfs_lib_ring_buffer_poll
,
444 .splice_read
= vfs_lib_ring_buffer_splice_read
,
445 .mmap
= vfs_lib_ring_buffer_mmap
,
446 .unlocked_ioctl
= vfs_lib_ring_buffer_ioctl
,
447 .llseek
= vfs_lib_ring_buffer_no_llseek
,
449 .compat_ioctl
= vfs_lib_ring_buffer_compat_ioctl
,
452 EXPORT_SYMBOL_GPL(lib_ring_buffer_file_operations
);
454 MODULE_LICENSE("GPL and additional rights");
455 MODULE_AUTHOR("Mathieu Desnoyers <mathieu.desnoyers@efficios.com>");
456 MODULE_DESCRIPTION("LTTng ring buffer library");
457 MODULE_VERSION(__stringify(LTTNG_MODULES_MAJOR_VERSION
) "."
458 __stringify(LTTNG_MODULES_MINOR_VERSION
) "."
459 __stringify(LTTNG_MODULES_PATCHLEVEL_VERSION
)
460 LTTNG_MODULES_EXTRAVERSION
);