1 /* SPDX-License-Identifier: GPL-2.0-only
5 * Copyright (C) 2002-2005 - Tom Zanussi <zanussi@us.ibm.com>, IBM Corp
6 * Copyright (C) 1999-2005 - Karim Yaghmour <karim@opersys.com>
7 * Copyright (C) 2008-2012 - Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
9 * Re-using code from kernel/relay.c, which is why it is licensed under
13 #include <linux/module.h>
15 #include <linux/splice.h>
16 #include <lttng/kernel-version.h>
18 #include <ringbuffer/backend.h>
19 #include <ringbuffer/frontend.h>
20 #include <ringbuffer/vfs.h>
23 #define printk_dbg(fmt, args...) printk(fmt, args)
25 #define printk_dbg(fmt, args...) \
27 /* do nothing but check printf format */ \
29 printk(fmt, ## args); \
33 loff_t
vfs_lib_ring_buffer_no_llseek(struct file
*file
, loff_t offset
,
38 EXPORT_SYMBOL_GPL(vfs_lib_ring_buffer_no_llseek
);
41 * Release pages from the buffer so splice pipe_to_file can move them.
42 * Called after the pipe has been populated with buffer pages.
44 static void lib_ring_buffer_pipe_buf_release(struct pipe_inode_info
*pipe
,
45 struct pipe_buffer
*pbuf
)
47 __free_page(pbuf
->page
);
50 #if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(5,8,0))
51 static const struct pipe_buf_operations ring_buffer_pipe_buf_ops
= {
52 .release
= lib_ring_buffer_pipe_buf_release
,
53 .try_steal
= generic_pipe_buf_try_steal
,
54 .get
= generic_pipe_buf_get
56 #elif (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(5,1,0))
57 static const struct pipe_buf_operations ring_buffer_pipe_buf_ops
= {
58 .confirm
= generic_pipe_buf_confirm
,
59 .release
= lib_ring_buffer_pipe_buf_release
,
60 .steal
= generic_pipe_buf_steal
,
61 .get
= generic_pipe_buf_get
64 static const struct pipe_buf_operations ring_buffer_pipe_buf_ops
= {
66 .confirm
= generic_pipe_buf_confirm
,
67 .release
= lib_ring_buffer_pipe_buf_release
,
68 .steal
= generic_pipe_buf_steal
,
69 .get
= generic_pipe_buf_get
74 * Page release operation after splice pipe_to_file ends.
76 static void lib_ring_buffer_page_release(struct splice_pipe_desc
*spd
,
79 __free_page(spd
->pages
[i
]);
83 * subbuf_splice_actor - splice up to one subbuf's worth of data
85 static int subbuf_splice_actor(struct file
*in
,
87 struct pipe_inode_info
*pipe
,
90 struct lttng_kernel_ring_buffer
*buf
)
92 struct lttng_kernel_ring_buffer_channel
*chan
= buf
->backend
.chan
;
93 const struct lttng_kernel_ring_buffer_config
*config
= &chan
->backend
.config
;
94 unsigned int poff
, subbuf_pages
, nr_pages
;
95 struct page
*pages
[PIPE_DEF_BUFFERS
];
96 struct partial_page partial
[PIPE_DEF_BUFFERS
];
97 struct splice_pipe_desc spd
= {
101 #if (LTTNG_LINUX_VERSION_CODE < LTTNG_KERNEL_VERSION(4,12,0))
104 .ops
= &ring_buffer_pipe_buf_ops
,
105 .spd_release
= lib_ring_buffer_page_release
,
107 unsigned long consumed_old
, roffset
;
108 unsigned long bytes_avail
;
111 * Check that a GET_SUBBUF ioctl has been done before.
113 WARN_ON(atomic_long_read(&buf
->active_readers
) != 1);
114 consumed_old
= lib_ring_buffer_get_consumed(config
, buf
);
115 consumed_old
+= *ppos
;
118 * Adjust read len, if longer than what is available.
119 * Max read size is 1 subbuffer due to get_subbuf/put_subbuf for
122 bytes_avail
= chan
->backend
.subbuf_size
;
123 WARN_ON(bytes_avail
> chan
->backend
.buf_size
);
124 len
= min_t(size_t, len
, bytes_avail
);
125 subbuf_pages
= bytes_avail
>> PAGE_SHIFT
;
126 nr_pages
= min_t(unsigned int, subbuf_pages
, PIPE_DEF_BUFFERS
);
127 roffset
= consumed_old
& PAGE_MASK
;
128 poff
= consumed_old
& ~PAGE_MASK
;
129 printk_dbg(KERN_DEBUG
"LTTng: SPLICE actor len %zu pos %zd write_pos %ld\n",
130 len
, (ssize_t
)*ppos
, lib_ring_buffer_get_offset(config
, buf
));
132 for (; spd
.nr_pages
< nr_pages
; spd
.nr_pages
++) {
133 unsigned int this_len
;
134 unsigned long *pfnp
, new_pfn
;
135 struct page
*new_page
;
140 printk_dbg(KERN_DEBUG
"LTTng: SPLICE actor loop len %zu roffset %ld\n",
144 * We have to replace the page we are moving into the splice
147 new_page
= alloc_pages_node(cpu_to_node(max(buf
->backend
.cpu
,
149 GFP_KERNEL
| __GFP_ZERO
, 0);
152 new_pfn
= page_to_pfn(new_page
);
153 this_len
= PAGE_SIZE
- poff
;
154 pfnp
= lib_ring_buffer_read_get_pfn(&buf
->backend
, roffset
, &virt
);
155 spd
.pages
[spd
.nr_pages
] = pfn_to_page(*pfnp
);
157 *virt
= page_address(new_page
);
158 spd
.partial
[spd
.nr_pages
].offset
= poff
;
159 spd
.partial
[spd
.nr_pages
].len
= this_len
;
162 roffset
+= PAGE_SIZE
;
169 return splice_to_pipe(pipe
, &spd
);
172 ssize_t
lib_ring_buffer_splice_read(struct file
*in
, loff_t
*ppos
,
173 struct pipe_inode_info
*pipe
, size_t len
,
175 struct lttng_kernel_ring_buffer
*buf
)
177 struct lttng_kernel_ring_buffer_channel
*chan
= buf
->backend
.chan
;
178 const struct lttng_kernel_ring_buffer_config
*config
= &chan
->backend
.config
;
182 if (config
->output
!= RING_BUFFER_SPLICE
)
186 * We require ppos and length to be page-aligned for performance reasons
187 * (no page copy). Size is known using the ioctl
188 * RING_BUFFER_GET_PADDED_SUBBUF_SIZE, which is page-size padded.
189 * We fail when the ppos or len passed is not page-sized, because splice
190 * is not allowed to copy more than the length passed as parameter (so
191 * the ABI does not let us silently copy more than requested to include
194 if (*ppos
!= PAGE_ALIGN(*ppos
) || len
!= PAGE_ALIGN(len
))
200 printk_dbg(KERN_DEBUG
"LTTng: SPLICE read len %zu pos %zd\n", len
,
202 while (len
&& !spliced
) {
203 ret
= subbuf_splice_actor(in
, ppos
, pipe
, len
, flags
, buf
);
204 printk_dbg(KERN_DEBUG
"LTTng: SPLICE read loop ret %d\n", ret
);
208 if (flags
& SPLICE_F_NONBLOCK
)
226 EXPORT_SYMBOL_GPL(lib_ring_buffer_splice_read
);
228 ssize_t
vfs_lib_ring_buffer_splice_read(struct file
*in
, loff_t
*ppos
,
229 struct pipe_inode_info
*pipe
, size_t len
,
232 struct lttng_kernel_ring_buffer
*buf
= in
->private_data
;
234 return lib_ring_buffer_splice_read(in
, ppos
, pipe
, len
, flags
, buf
);
236 EXPORT_SYMBOL_GPL(vfs_lib_ring_buffer_splice_read
);