Commit | Line | Data |
---|---|---|
852c2936 | 1 | /* |
9f3fdbc6 | 2 | * ring_buffer_abi.c |
852c2936 MD |
3 | * |
4 | * Copyright (C) 2009-2010 - Mathieu Desnoyers <mathieu.desnoyers@efficios.com> | |
5 | * | |
6 | * Ring Buffer VFS file operations. | |
7 | * | |
8 | * Dual LGPL v2.1/GPL v2 license. | |
9 | */ | |
10 | ||
4931a13e MD |
11 | #include "backend.h" |
12 | #include "frontend.h" | |
9f3fdbc6 | 13 | #include <ust/ring-buffer-abi.h> |
852c2936 MD |
14 | |
15 | static int put_ulong(unsigned long val, unsigned long arg) | |
16 | { | |
17 | return put_user(val, (unsigned long __user *)arg); | |
18 | } | |
19 | ||
20 | #ifdef CONFIG_COMPAT | |
21 | static int compat_put_ulong(compat_ulong_t val, unsigned long arg) | |
22 | { | |
23 | return put_user(val, (compat_ulong_t __user *)compat_ptr(arg)); | |
24 | } | |
25 | #endif | |
26 | ||
27 | /** | |
28 | * lib_ring_buffer_open - ring buffer open file operation | |
29 | * @inode: opened inode | |
30 | * @file: opened file | |
31 | * | |
32 | * Open implementation. Makes sure only one open instance of a buffer is | |
33 | * done at a given moment. | |
34 | */ | |
35 | int lib_ring_buffer_open(struct inode *inode, struct file *file) | |
36 | { | |
37 | struct lib_ring_buffer *buf = inode->i_private; | |
38 | int ret; | |
39 | ||
40 | ret = lib_ring_buffer_open_read(buf); | |
41 | if (ret) | |
42 | return ret; | |
43 | ||
44 | file->private_data = buf; | |
45 | ret = nonseekable_open(inode, file); | |
46 | if (ret) | |
47 | goto release_read; | |
48 | return 0; | |
49 | ||
50 | release_read: | |
51 | lib_ring_buffer_release_read(buf); | |
52 | return ret; | |
53 | } | |
54 | ||
55 | /** | |
56 | * lib_ring_buffer_release - ring buffer release file operation | |
57 | * @inode: opened inode | |
58 | * @file: opened file | |
59 | * | |
60 | * Release implementation. | |
61 | */ | |
62 | int lib_ring_buffer_release(struct inode *inode, struct file *file) | |
63 | { | |
64 | struct lib_ring_buffer *buf = file->private_data; | |
65 | ||
66 | lib_ring_buffer_release_read(buf); | |
67 | ||
68 | return 0; | |
69 | } | |
70 | ||
71 | /** | |
72 | * lib_ring_buffer_poll - ring buffer poll file operation | |
73 | * @filp: the file | |
74 | * @wait: poll table | |
75 | * | |
76 | * Poll implementation. | |
77 | */ | |
78 | unsigned int lib_ring_buffer_poll(struct file *filp, poll_table *wait) | |
79 | { | |
80 | unsigned int mask = 0; | |
81 | struct lib_ring_buffer *buf = filp->private_data; | |
82 | struct channel *chan = buf->backend.chan; | |
83 | const struct lib_ring_buffer_config *config = chan->backend.config; | |
84 | int finalized, disabled; | |
85 | ||
86 | if (filp->f_mode & FMODE_READ) { | |
87 | poll_wait_set_exclusive(wait); | |
88 | poll_wait(filp, &buf->read_wait, wait); | |
89 | ||
90 | finalized = lib_ring_buffer_is_finalized(config, buf); | |
91 | disabled = lib_ring_buffer_channel_is_disabled(chan); | |
92 | ||
93 | /* | |
94 | * lib_ring_buffer_is_finalized() contains a smp_rmb() ordering | |
95 | * finalized load before offsets loads. | |
96 | */ | |
97 | WARN_ON(atomic_long_read(&buf->active_readers) != 1); | |
98 | retry: | |
99 | if (disabled) | |
100 | return POLLERR; | |
101 | ||
102 | if (subbuf_trunc(lib_ring_buffer_get_offset(config, buf), chan) | |
103 | - subbuf_trunc(lib_ring_buffer_get_consumed(config, buf), chan) | |
104 | == 0) { | |
105 | if (finalized) | |
106 | return POLLHUP; | |
107 | else { | |
108 | /* | |
109 | * The memory barriers | |
110 | * __wait_event()/wake_up_interruptible() take | |
111 | * care of "raw_spin_is_locked" memory ordering. | |
112 | */ | |
113 | if (raw_spin_is_locked(&buf->raw_tick_nohz_spinlock)) | |
114 | goto retry; | |
115 | else | |
116 | return 0; | |
117 | } | |
118 | } else { | |
119 | if (subbuf_trunc(lib_ring_buffer_get_offset(config, buf), | |
120 | chan) | |
121 | - subbuf_trunc(lib_ring_buffer_get_consumed(config, buf), | |
122 | chan) | |
123 | >= chan->backend.buf_size) | |
124 | return POLLPRI | POLLRDBAND; | |
125 | else | |
126 | return POLLIN | POLLRDNORM; | |
127 | } | |
128 | } | |
129 | return mask; | |
130 | } | |
131 | ||
132 | /** | |
133 | * lib_ring_buffer_ioctl - control ring buffer reader synchronization | |
134 | * | |
135 | * @filp: the file | |
136 | * @cmd: the command | |
137 | * @arg: command arg | |
138 | * | |
139 | * This ioctl implements commands necessary for producer/consumer | |
140 | * and flight recorder reader interaction : | |
141 | * RING_BUFFER_GET_NEXT_SUBBUF | |
142 | * Get the next sub-buffer that can be read. It never blocks. | |
143 | * RING_BUFFER_PUT_NEXT_SUBBUF | |
144 | * Release the currently read sub-buffer. | |
145 | * RING_BUFFER_GET_SUBBUF_SIZE | |
146 | * returns the size of the current sub-buffer. | |
147 | * RING_BUFFER_GET_MAX_SUBBUF_SIZE | |
148 | * returns the maximum size for sub-buffers. | |
149 | * RING_BUFFER_GET_NUM_SUBBUF | |
150 | * returns the number of reader-visible sub-buffers in the per cpu | |
151 | * channel (for mmap). | |
152 | * RING_BUFFER_GET_MMAP_READ_OFFSET | |
153 | * returns the offset of the subbuffer belonging to the reader. | |
154 | * Should only be used for mmap clients. | |
155 | */ | |
156 | long lib_ring_buffer_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) | |
157 | { | |
158 | struct lib_ring_buffer *buf = filp->private_data; | |
159 | struct channel *chan = buf->backend.chan; | |
160 | const struct lib_ring_buffer_config *config = chan->backend.config; | |
161 | ||
162 | if (lib_ring_buffer_channel_is_disabled(chan)) | |
163 | return -EIO; | |
164 | ||
165 | switch (cmd) { | |
166 | case RING_BUFFER_SNAPSHOT: | |
167 | return lib_ring_buffer_snapshot(buf, &buf->cons_snapshot, | |
168 | &buf->prod_snapshot); | |
169 | case RING_BUFFER_SNAPSHOT_GET_CONSUMED: | |
170 | return put_ulong(buf->cons_snapshot, arg); | |
171 | case RING_BUFFER_SNAPSHOT_GET_PRODUCED: | |
172 | return put_ulong(buf->prod_snapshot, arg); | |
173 | case RING_BUFFER_GET_SUBBUF: | |
174 | { | |
175 | unsigned long uconsume; | |
176 | long ret; | |
177 | ||
178 | ret = get_user(uconsume, (unsigned long __user *) arg); | |
179 | if (ret) | |
180 | return ret; /* will return -EFAULT */ | |
181 | ret = lib_ring_buffer_get_subbuf(buf, uconsume); | |
182 | if (!ret) { | |
183 | /* Set file position to zero at each successful "get" */ | |
184 | filp->f_pos = 0; | |
185 | } | |
186 | return ret; | |
187 | } | |
188 | case RING_BUFFER_PUT_SUBBUF: | |
189 | lib_ring_buffer_put_subbuf(buf); | |
190 | return 0; | |
191 | ||
192 | case RING_BUFFER_GET_NEXT_SUBBUF: | |
193 | { | |
194 | long ret; | |
195 | ||
196 | ret = lib_ring_buffer_get_next_subbuf(buf); | |
197 | if (!ret) { | |
198 | /* Set file position to zero at each successful "get" */ | |
199 | filp->f_pos = 0; | |
200 | } | |
201 | return ret; | |
202 | } | |
203 | case RING_BUFFER_PUT_NEXT_SUBBUF: | |
204 | lib_ring_buffer_put_next_subbuf(buf); | |
205 | return 0; | |
206 | case RING_BUFFER_GET_SUBBUF_SIZE: | |
207 | return put_ulong(lib_ring_buffer_get_read_data_size(config, buf), | |
208 | arg); | |
209 | case RING_BUFFER_GET_PADDED_SUBBUF_SIZE: | |
210 | { | |
211 | unsigned long size; | |
212 | ||
213 | size = lib_ring_buffer_get_read_data_size(config, buf); | |
214 | size = PAGE_ALIGN(size); | |
215 | return put_ulong(size, arg); | |
216 | } | |
217 | case RING_BUFFER_GET_MAX_SUBBUF_SIZE: | |
218 | return put_ulong(chan->backend.subbuf_size, arg); | |
219 | case RING_BUFFER_GET_MMAP_LEN: | |
220 | { | |
221 | unsigned long mmap_buf_len; | |
222 | ||
223 | if (config->output != RING_BUFFER_MMAP) | |
224 | return -EINVAL; | |
225 | mmap_buf_len = chan->backend.buf_size; | |
226 | if (chan->backend.extra_reader_sb) | |
227 | mmap_buf_len += chan->backend.subbuf_size; | |
228 | if (mmap_buf_len > INT_MAX) | |
229 | return -EFBIG; | |
230 | return put_ulong(mmap_buf_len, arg); | |
231 | } | |
232 | case RING_BUFFER_GET_MMAP_READ_OFFSET: | |
233 | { | |
234 | unsigned long sb_bindex; | |
235 | ||
236 | if (config->output != RING_BUFFER_MMAP) | |
237 | return -EINVAL; | |
238 | sb_bindex = subbuffer_id_get_index(config, | |
239 | buf->backend.buf_rsb.id); | |
240 | return put_ulong(buf->backend.array[sb_bindex]->mmap_offset, | |
241 | arg); | |
242 | } | |
529b552a MD |
243 | case RING_BUFFER_FLUSH: |
244 | lib_ring_buffer_switch_slow(buf, SWITCH_ACTIVE); | |
245 | return 0; | |
852c2936 MD |
246 | default: |
247 | return -ENOIOCTLCMD; | |
248 | } | |
249 | } | |
250 | ||
251 | #ifdef CONFIG_COMPAT | |
252 | long lib_ring_buffer_compat_ioctl(struct file *filp, unsigned int cmd, | |
253 | unsigned long arg) | |
254 | { | |
255 | struct lib_ring_buffer *buf = filp->private_data; | |
256 | struct channel *chan = buf->backend.chan; | |
257 | const struct lib_ring_buffer_config *config = chan->backend.config; | |
258 | ||
259 | if (lib_ring_buffer_channel_is_disabled(chan)) | |
260 | return -EIO; | |
261 | ||
262 | switch (cmd) { | |
263 | case RING_BUFFER_SNAPSHOT: | |
264 | return lib_ring_buffer_snapshot(buf, &buf->cons_snapshot, | |
265 | &buf->prod_snapshot); | |
266 | case RING_BUFFER_SNAPSHOT_GET_CONSUMED: | |
267 | return compat_put_ulong(buf->cons_snapshot, arg); | |
268 | case RING_BUFFER_SNAPSHOT_GET_PRODUCED: | |
269 | return compat_put_ulong(buf->prod_snapshot, arg); | |
270 | case RING_BUFFER_GET_SUBBUF: | |
271 | { | |
272 | __u32 uconsume; | |
273 | unsigned long consume; | |
274 | long ret; | |
275 | ||
276 | ret = get_user(uconsume, (__u32 __user *) arg); | |
277 | if (ret) | |
278 | return ret; /* will return -EFAULT */ | |
279 | consume = buf->cons_snapshot; | |
280 | consume &= ~0xFFFFFFFFL; | |
281 | consume |= uconsume; | |
282 | ret = lib_ring_buffer_get_subbuf(buf, consume); | |
283 | if (!ret) { | |
284 | /* Set file position to zero at each successful "get" */ | |
285 | filp->f_pos = 0; | |
286 | } | |
287 | return ret; | |
288 | } | |
289 | case RING_BUFFER_PUT_SUBBUF: | |
290 | lib_ring_buffer_put_subbuf(buf); | |
291 | return 0; | |
292 | ||
293 | case RING_BUFFER_GET_NEXT_SUBBUF: | |
294 | { | |
295 | long ret; | |
296 | ||
297 | ret = lib_ring_buffer_get_next_subbuf(buf); | |
298 | if (!ret) { | |
299 | /* Set file position to zero at each successful "get" */ | |
300 | filp->f_pos = 0; | |
301 | } | |
302 | return ret; | |
303 | } | |
304 | case RING_BUFFER_PUT_NEXT_SUBBUF: | |
305 | lib_ring_buffer_put_next_subbuf(buf); | |
306 | return 0; | |
307 | case RING_BUFFER_GET_SUBBUF_SIZE: | |
308 | { | |
309 | unsigned long data_size; | |
310 | ||
311 | data_size = lib_ring_buffer_get_read_data_size(config, buf); | |
312 | if (data_size > UINT_MAX) | |
313 | return -EFBIG; | |
314 | return put_ulong(data_size, arg); | |
315 | } | |
316 | case RING_BUFFER_GET_PADDED_SUBBUF_SIZE: | |
317 | { | |
318 | unsigned long size; | |
319 | ||
320 | size = lib_ring_buffer_get_read_data_size(config, buf); | |
321 | size = PAGE_ALIGN(size); | |
322 | if (size > UINT_MAX) | |
323 | return -EFBIG; | |
324 | return put_ulong(size, arg); | |
325 | } | |
326 | case RING_BUFFER_GET_MAX_SUBBUF_SIZE: | |
327 | if (chan->backend.subbuf_size > UINT_MAX) | |
328 | return -EFBIG; | |
329 | return put_ulong(chan->backend.subbuf_size, arg); | |
330 | case RING_BUFFER_GET_MMAP_LEN: | |
331 | { | |
332 | unsigned long mmap_buf_len; | |
333 | ||
334 | if (config->output != RING_BUFFER_MMAP) | |
335 | return -EINVAL; | |
336 | mmap_buf_len = chan->backend.buf_size; | |
337 | if (chan->backend.extra_reader_sb) | |
338 | mmap_buf_len += chan->backend.subbuf_size; | |
339 | if (mmap_buf_len > UINT_MAX) | |
340 | return -EFBIG; | |
341 | return put_ulong(mmap_buf_len, arg); | |
342 | } | |
343 | case RING_BUFFER_GET_MMAP_READ_OFFSET: | |
344 | { | |
345 | unsigned long sb_bindex, read_offset; | |
346 | ||
347 | if (config->output != RING_BUFFER_MMAP) | |
348 | return -EINVAL; | |
349 | sb_bindex = subbuffer_id_get_index(config, | |
350 | buf->backend.buf_rsb.id); | |
351 | read_offset = buf->backend.array[sb_bindex]->mmap_offset; | |
352 | if (read_offset > UINT_MAX) | |
353 | return -EINVAL; | |
354 | return put_ulong(read_offset, arg); | |
355 | } | |
529b552a MD |
356 | case RING_BUFFER_FLUSH: |
357 | lib_ring_buffer_switch_slow(buf, SWITCH_ACTIVE); | |
358 | return 0; | |
852c2936 MD |
359 | default: |
360 | return -ENOIOCTLCMD; | |
361 | } | |
362 | } | |
363 | #endif | |
364 | ||
365 | const struct file_operations lib_ring_buffer_file_operations = { | |
366 | .open = lib_ring_buffer_open, | |
367 | .release = lib_ring_buffer_release, | |
368 | .poll = lib_ring_buffer_poll, | |
369 | .splice_read = lib_ring_buffer_splice_read, | |
370 | .mmap = lib_ring_buffer_mmap, | |
371 | .unlocked_ioctl = lib_ring_buffer_ioctl, | |
372 | .llseek = lib_ring_buffer_no_llseek, | |
373 | #ifdef CONFIG_COMPAT | |
374 | .compat_ioctl = lib_ring_buffer_compat_ioctl, | |
375 | #endif | |
376 | }; |