Implement cpu_id context for filtering
[lttng-modules.git] / lib / ringbuffer / ring_buffer_vfs.c
1 /*
2 * ring_buffer_vfs.c
3 *
4 * Ring Buffer VFS file operations.
5 *
6 * Copyright (C) 2010-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
7 *
8 * This library is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; only
11 * version 2.1 of the License.
12 *
13 * This library is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
17 *
18 * You should have received a copy of the GNU Lesser General Public
19 * License along with this library; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
21 */
22
23 #include <linux/module.h>
24 #include <linux/fs.h>
25 #include <linux/compat.h>
26
27 #include "../../wrapper/ringbuffer/backend.h"
28 #include "../../wrapper/ringbuffer/frontend.h"
29 #include "../../wrapper/ringbuffer/vfs.h"
30 #include "../../wrapper/poll.h"
31 #include "../../lttng-tracer.h"
32
33 static int put_ulong(unsigned long val, unsigned long arg)
34 {
35 return put_user(val, (unsigned long __user *)arg);
36 }
37
38 #ifdef CONFIG_COMPAT
39 static int compat_put_ulong(compat_ulong_t val, unsigned long arg)
40 {
41 return put_user(val, (compat_ulong_t __user *)compat_ptr(arg));
42 }
43 #endif
44
45 /*
46 * This is not used by anonymous file descriptors. This code is left
47 * there if we ever want to implement an inode with open() operation.
48 */
49 int lib_ring_buffer_open(struct inode *inode, struct file *file,
50 struct lib_ring_buffer *buf)
51 {
52 int ret;
53
54 if (!buf)
55 return -EINVAL;
56
57 ret = lib_ring_buffer_open_read(buf);
58 if (ret)
59 return ret;
60
61 ret = nonseekable_open(inode, file);
62 if (ret)
63 goto release_read;
64 return 0;
65
66 release_read:
67 lib_ring_buffer_release_read(buf);
68 return ret;
69 }
70 EXPORT_SYMBOL_GPL(lib_ring_buffer_open);
71
72 /**
73 * vfs_lib_ring_buffer_open - ring buffer open file operation
74 * @inode: opened inode
75 * @file: opened file
76 *
77 * Open implementation. Makes sure only one open instance of a buffer is
78 * done at a given moment.
79 */
80 static
81 int vfs_lib_ring_buffer_open(struct inode *inode, struct file *file)
82 {
83 struct lib_ring_buffer *buf = inode->i_private;
84
85 file->private_data = buf;
86 return lib_ring_buffer_open(inode, file, buf);
87 }
88
89 int lib_ring_buffer_release(struct inode *inode, struct file *file,
90 struct lib_ring_buffer *buf)
91 {
92 lib_ring_buffer_release_read(buf);
93
94 return 0;
95 }
96 EXPORT_SYMBOL_GPL(lib_ring_buffer_release);
97
98 /**
99 * vfs_lib_ring_buffer_release - ring buffer release file operation
100 * @inode: opened inode
101 * @file: opened file
102 *
103 * Release implementation.
104 */
105 static
106 int vfs_lib_ring_buffer_release(struct inode *inode, struct file *file)
107 {
108 struct lib_ring_buffer *buf = file->private_data;
109
110 return lib_ring_buffer_release(inode, file, buf);
111 }
112
113 unsigned int lib_ring_buffer_poll(struct file *filp, poll_table *wait,
114 struct lib_ring_buffer *buf)
115 {
116 unsigned int mask = 0;
117 struct channel *chan = buf->backend.chan;
118 const struct lib_ring_buffer_config *config = &chan->backend.config;
119 int finalized, disabled;
120
121 if (filp->f_mode & FMODE_READ) {
122 poll_wait_set_exclusive(wait);
123 poll_wait(filp, &buf->read_wait, wait);
124
125 finalized = lib_ring_buffer_is_finalized(config, buf);
126 disabled = lib_ring_buffer_channel_is_disabled(chan);
127
128 /*
129 * lib_ring_buffer_is_finalized() contains a smp_rmb() ordering
130 * finalized load before offsets loads.
131 */
132 WARN_ON(atomic_long_read(&buf->active_readers) != 1);
133 retry:
134 if (disabled)
135 return POLLERR;
136
137 if (subbuf_trunc(lib_ring_buffer_get_offset(config, buf), chan)
138 - subbuf_trunc(lib_ring_buffer_get_consumed(config, buf), chan)
139 == 0) {
140 if (finalized)
141 return POLLHUP;
142 else {
143 /*
144 * The memory barriers
145 * __wait_event()/wake_up_interruptible() take
146 * care of "raw_spin_is_locked" memory ordering.
147 */
148 if (raw_spin_is_locked(&buf->raw_tick_nohz_spinlock))
149 goto retry;
150 else
151 return 0;
152 }
153 } else {
154 if (subbuf_trunc(lib_ring_buffer_get_offset(config, buf),
155 chan)
156 - subbuf_trunc(lib_ring_buffer_get_consumed(config, buf),
157 chan)
158 >= chan->backend.buf_size)
159 return POLLPRI | POLLRDBAND;
160 else
161 return POLLIN | POLLRDNORM;
162 }
163 }
164 return mask;
165 }
166 EXPORT_SYMBOL_GPL(lib_ring_buffer_poll);
167
168 /**
169 * vfs_lib_ring_buffer_poll - ring buffer poll file operation
170 * @filp: the file
171 * @wait: poll table
172 *
173 * Poll implementation.
174 */
175 static
176 unsigned int vfs_lib_ring_buffer_poll(struct file *filp, poll_table *wait)
177 {
178 struct lib_ring_buffer *buf = filp->private_data;
179
180 return lib_ring_buffer_poll(filp, wait, buf);
181 }
182
183 long lib_ring_buffer_ioctl(struct file *filp, unsigned int cmd,
184 unsigned long arg, struct lib_ring_buffer *buf)
185 {
186 struct channel *chan = buf->backend.chan;
187 const struct lib_ring_buffer_config *config = &chan->backend.config;
188
189 if (lib_ring_buffer_channel_is_disabled(chan))
190 return -EIO;
191
192 switch (cmd) {
193 case RING_BUFFER_SNAPSHOT:
194 return lib_ring_buffer_snapshot(buf, &buf->cons_snapshot,
195 &buf->prod_snapshot);
196 case RING_BUFFER_SNAPSHOT_GET_CONSUMED:
197 return put_ulong(buf->cons_snapshot, arg);
198 case RING_BUFFER_SNAPSHOT_GET_PRODUCED:
199 return put_ulong(buf->prod_snapshot, arg);
200 case RING_BUFFER_GET_SUBBUF:
201 {
202 unsigned long uconsume;
203 long ret;
204
205 ret = get_user(uconsume, (unsigned long __user *) arg);
206 if (ret)
207 return ret; /* will return -EFAULT */
208 ret = lib_ring_buffer_get_subbuf(buf, uconsume);
209 if (!ret) {
210 /* Set file position to zero at each successful "get" */
211 filp->f_pos = 0;
212 }
213 return ret;
214 }
215 case RING_BUFFER_PUT_SUBBUF:
216 lib_ring_buffer_put_subbuf(buf);
217 return 0;
218
219 case RING_BUFFER_GET_NEXT_SUBBUF:
220 {
221 long ret;
222
223 ret = lib_ring_buffer_get_next_subbuf(buf);
224 if (!ret) {
225 /* Set file position to zero at each successful "get" */
226 filp->f_pos = 0;
227 }
228 return ret;
229 }
230 case RING_BUFFER_PUT_NEXT_SUBBUF:
231 lib_ring_buffer_put_next_subbuf(buf);
232 return 0;
233 case RING_BUFFER_GET_SUBBUF_SIZE:
234 return put_ulong(lib_ring_buffer_get_read_data_size(config, buf),
235 arg);
236 case RING_BUFFER_GET_PADDED_SUBBUF_SIZE:
237 {
238 unsigned long size;
239
240 size = lib_ring_buffer_get_read_data_size(config, buf);
241 size = PAGE_ALIGN(size);
242 return put_ulong(size, arg);
243 }
244 case RING_BUFFER_GET_MAX_SUBBUF_SIZE:
245 return put_ulong(chan->backend.subbuf_size, arg);
246 case RING_BUFFER_GET_MMAP_LEN:
247 {
248 unsigned long mmap_buf_len;
249
250 if (config->output != RING_BUFFER_MMAP)
251 return -EINVAL;
252 mmap_buf_len = chan->backend.buf_size;
253 if (chan->backend.extra_reader_sb)
254 mmap_buf_len += chan->backend.subbuf_size;
255 if (mmap_buf_len > INT_MAX)
256 return -EFBIG;
257 return put_ulong(mmap_buf_len, arg);
258 }
259 case RING_BUFFER_GET_MMAP_READ_OFFSET:
260 {
261 unsigned long sb_bindex;
262
263 if (config->output != RING_BUFFER_MMAP)
264 return -EINVAL;
265 sb_bindex = subbuffer_id_get_index(config,
266 buf->backend.buf_rsb.id);
267 return put_ulong(buf->backend.array[sb_bindex]->mmap_offset,
268 arg);
269 }
270 case RING_BUFFER_FLUSH:
271 lib_ring_buffer_switch_remote(buf);
272 return 0;
273 default:
274 return -ENOIOCTLCMD;
275 }
276 }
277 EXPORT_SYMBOL_GPL(lib_ring_buffer_ioctl);
278
279 /**
280 * vfs_lib_ring_buffer_ioctl - control ring buffer reader synchronization
281 *
282 * @filp: the file
283 * @cmd: the command
284 * @arg: command arg
285 *
286 * This ioctl implements commands necessary for producer/consumer
287 * and flight recorder reader interaction :
288 * RING_BUFFER_GET_NEXT_SUBBUF
289 * Get the next sub-buffer that can be read. It never blocks.
290 * RING_BUFFER_PUT_NEXT_SUBBUF
291 * Release the currently read sub-buffer.
292 * RING_BUFFER_GET_SUBBUF_SIZE
293 * returns the size of the current sub-buffer.
294 * RING_BUFFER_GET_MAX_SUBBUF_SIZE
295 * returns the maximum size for sub-buffers.
296 * RING_BUFFER_GET_NUM_SUBBUF
297 * returns the number of reader-visible sub-buffers in the per cpu
298 * channel (for mmap).
299 * RING_BUFFER_GET_MMAP_READ_OFFSET
300 * returns the offset of the subbuffer belonging to the reader.
301 * Should only be used for mmap clients.
302 */
303 static
304 long vfs_lib_ring_buffer_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
305 {
306 struct lib_ring_buffer *buf = filp->private_data;
307
308 return lib_ring_buffer_ioctl(filp, cmd, arg, buf);
309 }
310
311 #ifdef CONFIG_COMPAT
312 long lib_ring_buffer_compat_ioctl(struct file *filp, unsigned int cmd,
313 unsigned long arg, struct lib_ring_buffer *buf)
314 {
315 struct channel *chan = buf->backend.chan;
316 const struct lib_ring_buffer_config *config = &chan->backend.config;
317
318 if (lib_ring_buffer_channel_is_disabled(chan))
319 return -EIO;
320
321 switch (cmd) {
322 case RING_BUFFER_COMPAT_SNAPSHOT:
323 return lib_ring_buffer_snapshot(buf, &buf->cons_snapshot,
324 &buf->prod_snapshot);
325 case RING_BUFFER_COMPAT_SNAPSHOT_GET_CONSUMED:
326 return compat_put_ulong(buf->cons_snapshot, arg);
327 case RING_BUFFER_COMPAT_SNAPSHOT_GET_PRODUCED:
328 return compat_put_ulong(buf->prod_snapshot, arg);
329 case RING_BUFFER_COMPAT_GET_SUBBUF:
330 {
331 __u32 uconsume;
332 unsigned long consume;
333 long ret;
334
335 ret = get_user(uconsume, (__u32 __user *) arg);
336 if (ret)
337 return ret; /* will return -EFAULT */
338 consume = buf->cons_snapshot;
339 consume &= ~0xFFFFFFFFL;
340 consume |= uconsume;
341 ret = lib_ring_buffer_get_subbuf(buf, consume);
342 if (!ret) {
343 /* Set file position to zero at each successful "get" */
344 filp->f_pos = 0;
345 }
346 return ret;
347 }
348 case RING_BUFFER_COMPAT_PUT_SUBBUF:
349 lib_ring_buffer_put_subbuf(buf);
350 return 0;
351
352 case RING_BUFFER_COMPAT_GET_NEXT_SUBBUF:
353 {
354 long ret;
355
356 ret = lib_ring_buffer_get_next_subbuf(buf);
357 if (!ret) {
358 /* Set file position to zero at each successful "get" */
359 filp->f_pos = 0;
360 }
361 return ret;
362 }
363 case RING_BUFFER_COMPAT_PUT_NEXT_SUBBUF:
364 lib_ring_buffer_put_next_subbuf(buf);
365 return 0;
366 case RING_BUFFER_COMPAT_GET_SUBBUF_SIZE:
367 {
368 unsigned long data_size;
369
370 data_size = lib_ring_buffer_get_read_data_size(config, buf);
371 if (data_size > UINT_MAX)
372 return -EFBIG;
373 return compat_put_ulong(data_size, arg);
374 }
375 case RING_BUFFER_COMPAT_GET_PADDED_SUBBUF_SIZE:
376 {
377 unsigned long size;
378
379 size = lib_ring_buffer_get_read_data_size(config, buf);
380 size = PAGE_ALIGN(size);
381 if (size > UINT_MAX)
382 return -EFBIG;
383 return compat_put_ulong(size, arg);
384 }
385 case RING_BUFFER_COMPAT_GET_MAX_SUBBUF_SIZE:
386 if (chan->backend.subbuf_size > UINT_MAX)
387 return -EFBIG;
388 return compat_put_ulong(chan->backend.subbuf_size, arg);
389 case RING_BUFFER_COMPAT_GET_MMAP_LEN:
390 {
391 unsigned long mmap_buf_len;
392
393 if (config->output != RING_BUFFER_MMAP)
394 return -EINVAL;
395 mmap_buf_len = chan->backend.buf_size;
396 if (chan->backend.extra_reader_sb)
397 mmap_buf_len += chan->backend.subbuf_size;
398 if (mmap_buf_len > UINT_MAX)
399 return -EFBIG;
400 return compat_put_ulong(mmap_buf_len, arg);
401 }
402 case RING_BUFFER_COMPAT_GET_MMAP_READ_OFFSET:
403 {
404 unsigned long sb_bindex, read_offset;
405
406 if (config->output != RING_BUFFER_MMAP)
407 return -EINVAL;
408 sb_bindex = subbuffer_id_get_index(config,
409 buf->backend.buf_rsb.id);
410 read_offset = buf->backend.array[sb_bindex]->mmap_offset;
411 if (read_offset > UINT_MAX)
412 return -EINVAL;
413 return compat_put_ulong(read_offset, arg);
414 }
415 case RING_BUFFER_COMPAT_FLUSH:
416 lib_ring_buffer_switch_remote(buf);
417 return 0;
418 default:
419 return -ENOIOCTLCMD;
420 }
421 }
422 EXPORT_SYMBOL_GPL(lib_ring_buffer_compat_ioctl);
423
424 static
425 long vfs_lib_ring_buffer_compat_ioctl(struct file *filp, unsigned int cmd,
426 unsigned long arg)
427 {
428 struct lib_ring_buffer *buf = filp->private_data;
429
430 return lib_ring_buffer_compat_ioctl(filp, cmd, arg, buf);
431 }
432 #endif
433
434 const struct file_operations lib_ring_buffer_file_operations = {
435 .owner = THIS_MODULE,
436 .open = vfs_lib_ring_buffer_open,
437 .release = vfs_lib_ring_buffer_release,
438 .poll = vfs_lib_ring_buffer_poll,
439 .splice_read = vfs_lib_ring_buffer_splice_read,
440 .mmap = vfs_lib_ring_buffer_mmap,
441 .unlocked_ioctl = vfs_lib_ring_buffer_ioctl,
442 .llseek = vfs_lib_ring_buffer_no_llseek,
443 #ifdef CONFIG_COMPAT
444 .compat_ioctl = vfs_lib_ring_buffer_compat_ioctl,
445 #endif
446 };
447 EXPORT_SYMBOL_GPL(lib_ring_buffer_file_operations);
448
449 MODULE_LICENSE("GPL and additional rights");
450 MODULE_AUTHOR("Mathieu Desnoyers");
451 MODULE_DESCRIPTION("Ring Buffer Library VFS");
452 MODULE_VERSION(__stringify(LTTNG_MODULES_MAJOR_VERSION) "."
453 __stringify(LTTNG_MODULES_MINOR_VERSION) "."
454 __stringify(LTTNG_MODULES_PATCHLEVEL_VERSION)
455 LTTNG_MODULES_EXTRAVERSION);
This page took 0.038318 seconds and 4 git commands to generate.