4 * Ring Buffer VFS file operations.
6 * Copyright (C) 2010-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
8 * This library is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; only
11 * version 2.1 of the License.
13 * This library is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
18 * You should have received a copy of the GNU Lesser General Public
19 * License along with this library; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
23 #include <linux/module.h>
25 #include <linux/compat.h>
27 #include "../../wrapper/ringbuffer/backend.h"
28 #include "../../wrapper/ringbuffer/frontend.h"
29 #include "../../wrapper/ringbuffer/vfs.h"
30 #include "../../wrapper/poll.h"
31 #include "../../lttng-tracer.h"
33 static int put_ulong(unsigned long val
, unsigned long arg
)
35 return put_user(val
, (unsigned long __user
*)arg
);
39 static int compat_put_ulong(compat_ulong_t val
, unsigned long arg
)
41 return put_user(val
, (compat_ulong_t __user
*)compat_ptr(arg
));
46 * This is not used by anonymous file descriptors. This code is left
47 * there if we ever want to implement an inode with open() operation.
49 int lib_ring_buffer_open(struct inode
*inode
, struct file
*file
,
50 struct lib_ring_buffer
*buf
)
57 ret
= lib_ring_buffer_open_read(buf
);
61 ret
= nonseekable_open(inode
, file
);
67 lib_ring_buffer_release_read(buf
);
70 EXPORT_SYMBOL_GPL(lib_ring_buffer_open
);
73 * vfs_lib_ring_buffer_open - ring buffer open file operation
74 * @inode: opened inode
77 * Open implementation. Makes sure only one open instance of a buffer is
78 * done at a given moment.
81 int vfs_lib_ring_buffer_open(struct inode
*inode
, struct file
*file
)
83 struct lib_ring_buffer
*buf
= inode
->i_private
;
85 file
->private_data
= buf
;
86 return lib_ring_buffer_open(inode
, file
, buf
);
89 int lib_ring_buffer_release(struct inode
*inode
, struct file
*file
,
90 struct lib_ring_buffer
*buf
)
92 lib_ring_buffer_release_read(buf
);
96 EXPORT_SYMBOL_GPL(lib_ring_buffer_release
);
99 * vfs_lib_ring_buffer_release - ring buffer release file operation
100 * @inode: opened inode
103 * Release implementation.
106 int vfs_lib_ring_buffer_release(struct inode
*inode
, struct file
*file
)
108 struct lib_ring_buffer
*buf
= file
->private_data
;
110 return lib_ring_buffer_release(inode
, file
, buf
);
113 unsigned int lib_ring_buffer_poll(struct file
*filp
, poll_table
*wait
,
114 struct lib_ring_buffer
*buf
)
116 unsigned int mask
= 0;
117 struct channel
*chan
= buf
->backend
.chan
;
118 const struct lib_ring_buffer_config
*config
= &chan
->backend
.config
;
119 int finalized
, disabled
;
121 if (filp
->f_mode
& FMODE_READ
) {
122 poll_wait_set_exclusive(wait
);
123 poll_wait(filp
, &buf
->read_wait
, wait
);
125 finalized
= lib_ring_buffer_is_finalized(config
, buf
);
126 disabled
= lib_ring_buffer_channel_is_disabled(chan
);
129 * lib_ring_buffer_is_finalized() contains a smp_rmb() ordering
130 * finalized load before offsets loads.
132 WARN_ON(atomic_long_read(&buf
->active_readers
) != 1);
137 if (subbuf_trunc(lib_ring_buffer_get_offset(config
, buf
), chan
)
138 - subbuf_trunc(lib_ring_buffer_get_consumed(config
, buf
), chan
)
144 * The memory barriers
145 * __wait_event()/wake_up_interruptible() take
146 * care of "raw_spin_is_locked" memory ordering.
148 if (raw_spin_is_locked(&buf
->raw_tick_nohz_spinlock
))
154 if (subbuf_trunc(lib_ring_buffer_get_offset(config
, buf
),
156 - subbuf_trunc(lib_ring_buffer_get_consumed(config
, buf
),
158 >= chan
->backend
.buf_size
)
159 return POLLPRI
| POLLRDBAND
;
161 return POLLIN
| POLLRDNORM
;
166 EXPORT_SYMBOL_GPL(lib_ring_buffer_poll
);
169 * vfs_lib_ring_buffer_poll - ring buffer poll file operation
173 * Poll implementation.
176 unsigned int vfs_lib_ring_buffer_poll(struct file
*filp
, poll_table
*wait
)
178 struct lib_ring_buffer
*buf
= filp
->private_data
;
180 return lib_ring_buffer_poll(filp
, wait
, buf
);
183 long lib_ring_buffer_ioctl(struct file
*filp
, unsigned int cmd
,
184 unsigned long arg
, struct lib_ring_buffer
*buf
)
186 struct channel
*chan
= buf
->backend
.chan
;
187 const struct lib_ring_buffer_config
*config
= &chan
->backend
.config
;
189 if (lib_ring_buffer_channel_is_disabled(chan
))
193 case RING_BUFFER_SNAPSHOT
:
194 return lib_ring_buffer_snapshot(buf
, &buf
->cons_snapshot
,
195 &buf
->prod_snapshot
);
196 case RING_BUFFER_SNAPSHOT_GET_CONSUMED
:
197 return put_ulong(buf
->cons_snapshot
, arg
);
198 case RING_BUFFER_SNAPSHOT_GET_PRODUCED
:
199 return put_ulong(buf
->prod_snapshot
, arg
);
200 case RING_BUFFER_GET_SUBBUF
:
202 unsigned long uconsume
;
205 ret
= get_user(uconsume
, (unsigned long __user
*) arg
);
207 return ret
; /* will return -EFAULT */
208 ret
= lib_ring_buffer_get_subbuf(buf
, uconsume
);
210 /* Set file position to zero at each successful "get" */
215 case RING_BUFFER_PUT_SUBBUF
:
216 lib_ring_buffer_put_subbuf(buf
);
219 case RING_BUFFER_GET_NEXT_SUBBUF
:
223 ret
= lib_ring_buffer_get_next_subbuf(buf
);
225 /* Set file position to zero at each successful "get" */
230 case RING_BUFFER_PUT_NEXT_SUBBUF
:
231 lib_ring_buffer_put_next_subbuf(buf
);
233 case RING_BUFFER_GET_SUBBUF_SIZE
:
234 return put_ulong(lib_ring_buffer_get_read_data_size(config
, buf
),
236 case RING_BUFFER_GET_PADDED_SUBBUF_SIZE
:
240 size
= lib_ring_buffer_get_read_data_size(config
, buf
);
241 size
= PAGE_ALIGN(size
);
242 return put_ulong(size
, arg
);
244 case RING_BUFFER_GET_MAX_SUBBUF_SIZE
:
245 return put_ulong(chan
->backend
.subbuf_size
, arg
);
246 case RING_BUFFER_GET_MMAP_LEN
:
248 unsigned long mmap_buf_len
;
250 if (config
->output
!= RING_BUFFER_MMAP
)
252 mmap_buf_len
= chan
->backend
.buf_size
;
253 if (chan
->backend
.extra_reader_sb
)
254 mmap_buf_len
+= chan
->backend
.subbuf_size
;
255 if (mmap_buf_len
> INT_MAX
)
257 return put_ulong(mmap_buf_len
, arg
);
259 case RING_BUFFER_GET_MMAP_READ_OFFSET
:
261 unsigned long sb_bindex
;
263 if (config
->output
!= RING_BUFFER_MMAP
)
265 sb_bindex
= subbuffer_id_get_index(config
,
266 buf
->backend
.buf_rsb
.id
);
267 return put_ulong(buf
->backend
.array
[sb_bindex
]->mmap_offset
,
270 case RING_BUFFER_FLUSH
:
271 lib_ring_buffer_switch_remote(buf
);
277 EXPORT_SYMBOL_GPL(lib_ring_buffer_ioctl
);
280 * vfs_lib_ring_buffer_ioctl - control ring buffer reader synchronization
286 * This ioctl implements commands necessary for producer/consumer
287 * and flight recorder reader interaction :
288 * RING_BUFFER_GET_NEXT_SUBBUF
289 * Get the next sub-buffer that can be read. It never blocks.
290 * RING_BUFFER_PUT_NEXT_SUBBUF
291 * Release the currently read sub-buffer.
292 * RING_BUFFER_GET_SUBBUF_SIZE
293 * returns the size of the current sub-buffer.
294 * RING_BUFFER_GET_MAX_SUBBUF_SIZE
295 * returns the maximum size for sub-buffers.
296 * RING_BUFFER_GET_NUM_SUBBUF
297 * returns the number of reader-visible sub-buffers in the per cpu
298 * channel (for mmap).
299 * RING_BUFFER_GET_MMAP_READ_OFFSET
300 * returns the offset of the subbuffer belonging to the reader.
301 * Should only be used for mmap clients.
304 long vfs_lib_ring_buffer_ioctl(struct file
*filp
, unsigned int cmd
, unsigned long arg
)
306 struct lib_ring_buffer
*buf
= filp
->private_data
;
308 return lib_ring_buffer_ioctl(filp
, cmd
, arg
, buf
);
312 long lib_ring_buffer_compat_ioctl(struct file
*filp
, unsigned int cmd
,
313 unsigned long arg
, struct lib_ring_buffer
*buf
)
315 struct channel
*chan
= buf
->backend
.chan
;
316 const struct lib_ring_buffer_config
*config
= &chan
->backend
.config
;
318 if (lib_ring_buffer_channel_is_disabled(chan
))
322 case RING_BUFFER_COMPAT_SNAPSHOT
:
323 return lib_ring_buffer_snapshot(buf
, &buf
->cons_snapshot
,
324 &buf
->prod_snapshot
);
325 case RING_BUFFER_COMPAT_SNAPSHOT_GET_CONSUMED
:
326 return compat_put_ulong(buf
->cons_snapshot
, arg
);
327 case RING_BUFFER_COMPAT_SNAPSHOT_GET_PRODUCED
:
328 return compat_put_ulong(buf
->prod_snapshot
, arg
);
329 case RING_BUFFER_COMPAT_GET_SUBBUF
:
332 unsigned long consume
;
335 ret
= get_user(uconsume
, (__u32 __user
*) arg
);
337 return ret
; /* will return -EFAULT */
338 consume
= buf
->cons_snapshot
;
339 consume
&= ~0xFFFFFFFFL
;
341 ret
= lib_ring_buffer_get_subbuf(buf
, consume
);
343 /* Set file position to zero at each successful "get" */
348 case RING_BUFFER_COMPAT_PUT_SUBBUF
:
349 lib_ring_buffer_put_subbuf(buf
);
352 case RING_BUFFER_COMPAT_GET_NEXT_SUBBUF
:
356 ret
= lib_ring_buffer_get_next_subbuf(buf
);
358 /* Set file position to zero at each successful "get" */
363 case RING_BUFFER_COMPAT_PUT_NEXT_SUBBUF
:
364 lib_ring_buffer_put_next_subbuf(buf
);
366 case RING_BUFFER_COMPAT_GET_SUBBUF_SIZE
:
368 unsigned long data_size
;
370 data_size
= lib_ring_buffer_get_read_data_size(config
, buf
);
371 if (data_size
> UINT_MAX
)
373 return compat_put_ulong(data_size
, arg
);
375 case RING_BUFFER_COMPAT_GET_PADDED_SUBBUF_SIZE
:
379 size
= lib_ring_buffer_get_read_data_size(config
, buf
);
380 size
= PAGE_ALIGN(size
);
383 return compat_put_ulong(size
, arg
);
385 case RING_BUFFER_COMPAT_GET_MAX_SUBBUF_SIZE
:
386 if (chan
->backend
.subbuf_size
> UINT_MAX
)
388 return compat_put_ulong(chan
->backend
.subbuf_size
, arg
);
389 case RING_BUFFER_COMPAT_GET_MMAP_LEN
:
391 unsigned long mmap_buf_len
;
393 if (config
->output
!= RING_BUFFER_MMAP
)
395 mmap_buf_len
= chan
->backend
.buf_size
;
396 if (chan
->backend
.extra_reader_sb
)
397 mmap_buf_len
+= chan
->backend
.subbuf_size
;
398 if (mmap_buf_len
> UINT_MAX
)
400 return compat_put_ulong(mmap_buf_len
, arg
);
402 case RING_BUFFER_COMPAT_GET_MMAP_READ_OFFSET
:
404 unsigned long sb_bindex
, read_offset
;
406 if (config
->output
!= RING_BUFFER_MMAP
)
408 sb_bindex
= subbuffer_id_get_index(config
,
409 buf
->backend
.buf_rsb
.id
);
410 read_offset
= buf
->backend
.array
[sb_bindex
]->mmap_offset
;
411 if (read_offset
> UINT_MAX
)
413 return compat_put_ulong(read_offset
, arg
);
415 case RING_BUFFER_COMPAT_FLUSH
:
416 lib_ring_buffer_switch_remote(buf
);
422 EXPORT_SYMBOL_GPL(lib_ring_buffer_compat_ioctl
);
425 long vfs_lib_ring_buffer_compat_ioctl(struct file
*filp
, unsigned int cmd
,
428 struct lib_ring_buffer
*buf
= filp
->private_data
;
430 return lib_ring_buffer_compat_ioctl(filp
, cmd
, arg
, buf
);
434 const struct file_operations lib_ring_buffer_file_operations
= {
435 .owner
= THIS_MODULE
,
436 .open
= vfs_lib_ring_buffer_open
,
437 .release
= vfs_lib_ring_buffer_release
,
438 .poll
= vfs_lib_ring_buffer_poll
,
439 .splice_read
= vfs_lib_ring_buffer_splice_read
,
440 .mmap
= vfs_lib_ring_buffer_mmap
,
441 .unlocked_ioctl
= vfs_lib_ring_buffer_ioctl
,
442 .llseek
= vfs_lib_ring_buffer_no_llseek
,
444 .compat_ioctl
= vfs_lib_ring_buffer_compat_ioctl
,
447 EXPORT_SYMBOL_GPL(lib_ring_buffer_file_operations
);
449 MODULE_LICENSE("GPL and additional rights");
450 MODULE_AUTHOR("Mathieu Desnoyers");
451 MODULE_DESCRIPTION("Ring Buffer Library VFS");
452 MODULE_VERSION(__stringify(LTTNG_MODULES_MAJOR_VERSION
) "."
453 __stringify(LTTNG_MODULES_MINOR_VERSION
) "."
454 __stringify(LTTNG_MODULES_PATCHLEVEL_VERSION
)
455 LTTNG_MODULES_EXTRAVERSION
);