*/
#include <ringbuffer/iterator.h>
+#include <wrapper/cpu.h>
#include <wrapper/file.h>
#include <wrapper/uaccess.h>
#include <linux/jiffies.h>
* Returns the size of the event read, -EAGAIN if buffer is empty, -ENODATA if
* buffer is empty and finalized. The buffer must already be opened for reading.
*/
-ssize_t lib_ring_buffer_get_next_record(struct channel *chan,
- struct lib_ring_buffer *buf)
+ssize_t lib_ring_buffer_get_next_record(struct lttng_kernel_ring_buffer_channel *chan,
+ struct lttng_kernel_ring_buffer *buf)
{
- const struct lib_ring_buffer_config *config = &chan->backend.config;
- struct lib_ring_buffer_iter *iter = &buf->iter;
+ const struct lttng_kernel_ring_buffer_config *config = &chan->backend.config;
+ struct lttng_kernel_ring_buffer_iter *iter = &buf->iter;
int ret;
restart:
switch (iter->state) {
case ITER_GET_SUBBUF:
ret = lib_ring_buffer_get_next_subbuf(buf);
- if (ret && !READ_ONCE(buf->finalized)
+ if (ret && !LTTNG_READ_ONCE(buf->finalized)
&& config->alloc == RING_BUFFER_ALLOC_GLOBAL) {
/*
* Use "pull" scheme for global buffers. The reader
}
EXPORT_SYMBOL_GPL(lib_ring_buffer_get_next_record);
+void lib_ring_buffer_put_current_record(struct lttng_kernel_ring_buffer *buf)
+{
+ struct lttng_kernel_ring_buffer_iter *iter;
+
+ if (!buf)
+ return;
+ iter = &buf->iter;
+ if (iter->state != ITER_NEXT_RECORD)
+ return;
+ iter->read_offset += iter->payload_len;
+ iter->state = ITER_TEST_RECORD;
+ if (iter->read_offset - iter->consumed >= iter->data_size) {
+ lib_ring_buffer_put_next_subbuf(buf);
+ iter->state = ITER_GET_SUBBUF;
+ }
+}
+EXPORT_SYMBOL_GPL(lib_ring_buffer_put_current_record);
+
static int buf_is_higher(void *a, void *b)
{
- struct lib_ring_buffer *bufa = a;
- struct lib_ring_buffer *bufb = b;
+ struct lttng_kernel_ring_buffer *bufa = a;
+ struct lttng_kernel_ring_buffer *bufb = b;
/* Consider lowest timestamps to be at the top of the heap */
return (bufa->iter.timestamp < bufb->iter.timestamp);
}
static
-void lib_ring_buffer_get_empty_buf_records(const struct lib_ring_buffer_config *config,
- struct channel *chan)
+void lib_ring_buffer_get_empty_buf_records(const struct lttng_kernel_ring_buffer_config *config,
+ struct lttng_kernel_ring_buffer_channel *chan)
{
struct lttng_ptr_heap *heap = &chan->iter.heap;
- struct lib_ring_buffer *buf, *tmp;
+ struct lttng_kernel_ring_buffer *buf, *tmp;
ssize_t len;
list_for_each_entry_safe(buf, tmp, &chan->iter.empty_head,
}
static
-void lib_ring_buffer_wait_for_qs(const struct lib_ring_buffer_config *config,
- struct channel *chan)
+void lib_ring_buffer_wait_for_qs(const struct lttng_kernel_ring_buffer_config *config,
+ struct lttng_kernel_ring_buffer_channel *chan)
{
u64 timestamp_qs;
unsigned long wait_msecs;
* opened for reading.
*/
-ssize_t channel_get_next_record(struct channel *chan,
- struct lib_ring_buffer **ret_buf)
+ssize_t channel_get_next_record(struct lttng_kernel_ring_buffer_channel *chan,
+ struct lttng_kernel_ring_buffer **ret_buf)
{
- const struct lib_ring_buffer_config *config = &chan->backend.config;
- struct lib_ring_buffer *buf;
+ const struct lttng_kernel_ring_buffer_config *config = &chan->backend.config;
+ struct lttng_kernel_ring_buffer *buf;
struct lttng_ptr_heap *heap;
ssize_t len;
EXPORT_SYMBOL_GPL(channel_get_next_record);
static
-void lib_ring_buffer_iterator_init(struct channel *chan, struct lib_ring_buffer *buf)
+void lib_ring_buffer_iterator_init(struct lttng_kernel_ring_buffer_channel *chan, struct lttng_kernel_ring_buffer *buf)
{
if (buf->iter.allocated)
return;
list_add(&buf->iter.empty_node, &chan->iter.empty_head);
}
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0))
+#if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(4,10,0))
int lttng_cpuhp_rb_iter_online(unsigned int cpu,
struct lttng_cpuhp_node *node)
{
- struct channel *chan = container_of(node, struct channel,
+ struct lttng_kernel_ring_buffer_channel *chan = container_of(node, struct lttng_kernel_ring_buffer_channel,
cpuhp_iter_online);
- struct lib_ring_buffer *buf = per_cpu_ptr(chan->backend.buf, cpu);
- const struct lib_ring_buffer_config *config = &chan->backend.config;
+ struct lttng_kernel_ring_buffer *buf = per_cpu_ptr(chan->backend.buf, cpu);
+ const struct lttng_kernel_ring_buffer_config *config = &chan->backend.config;
CHAN_WARN_ON(chan, config->alloc == RING_BUFFER_ALLOC_GLOBAL);
}
EXPORT_SYMBOL_GPL(lttng_cpuhp_rb_iter_online);
-#else /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) */
+#else /* #if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(4,10,0)) */
#ifdef CONFIG_HOTPLUG_CPU
static
void *hcpu)
{
unsigned int cpu = (unsigned long)hcpu;
- struct channel *chan = container_of(nb, struct channel,
+ struct lttng_kernel_ring_buffer_channel *chan = container_of(nb, struct lttng_kernel_ring_buffer_channel,
hp_iter_notifier);
- struct lib_ring_buffer *buf = per_cpu_ptr(chan->backend.buf, cpu);
- const struct lib_ring_buffer_config *config = &chan->backend.config;
+ struct lttng_kernel_ring_buffer *buf = per_cpu_ptr(chan->backend.buf, cpu);
+ const struct lttng_kernel_ring_buffer_config *config = &chan->backend.config;
if (!chan->hp_iter_enable)
return NOTIFY_DONE;
}
#endif
-#endif /* #else #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) */
+#endif /* #else #if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(4,10,0)) */
-int channel_iterator_init(struct channel *chan)
+int channel_iterator_init(struct lttng_kernel_ring_buffer_channel *chan)
{
- const struct lib_ring_buffer_config *config = &chan->backend.config;
- struct lib_ring_buffer *buf;
+ const struct lttng_kernel_ring_buffer_config *config = &chan->backend.config;
+ struct lttng_kernel_ring_buffer *buf;
if (config->alloc == RING_BUFFER_ALLOC_PER_CPU) {
int ret;
if (ret)
return ret;
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0))
+#if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(4,10,0))
chan->cpuhp_iter_online.component = LTTNG_RING_BUFFER_ITER;
ret = cpuhp_state_add_instance(lttng_rb_hp_online,
&chan->cpuhp_iter_online.node);
if (ret)
return ret;
-#else /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) */
+#else /* #if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(4,10,0)) */
{
int cpu;
chan->hp_iter_notifier.priority = 10;
register_cpu_notifier(&chan->hp_iter_notifier);
- get_online_cpus();
+ lttng_cpus_read_lock();
for_each_online_cpu(cpu) {
buf = per_cpu_ptr(chan->backend.buf, cpu);
lib_ring_buffer_iterator_init(chan, buf);
}
chan->hp_iter_enable = 1;
- put_online_cpus();
+ lttng_cpus_read_unlock();
#else
for_each_possible_cpu(cpu) {
buf = per_cpu_ptr(chan->backend.buf, cpu);
}
#endif
}
-#endif /* #else #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) */
+#endif /* #else #if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(4,10,0)) */
} else {
buf = channel_get_ring_buffer(config, chan, 0);
lib_ring_buffer_iterator_init(chan, buf);
return 0;
}
-void channel_iterator_unregister_notifiers(struct channel *chan)
+void channel_iterator_unregister_notifiers(struct lttng_kernel_ring_buffer_channel *chan)
{
- const struct lib_ring_buffer_config *config = &chan->backend.config;
+ const struct lttng_kernel_ring_buffer_config *config = &chan->backend.config;
if (config->alloc == RING_BUFFER_ALLOC_PER_CPU) {
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0))
+#if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(4,10,0))
{
int ret;
&chan->cpuhp_iter_online.node);
WARN_ON(ret);
}
-#else /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) */
+#else /* #if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(4,10,0)) */
chan->hp_iter_enable = 0;
unregister_cpu_notifier(&chan->hp_iter_notifier);
-#endif /* #else #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) */
+#endif /* #else #if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(4,10,0)) */
}
}
-void channel_iterator_free(struct channel *chan)
+void channel_iterator_free(struct lttng_kernel_ring_buffer_channel *chan)
{
- const struct lib_ring_buffer_config *config = &chan->backend.config;
+ const struct lttng_kernel_ring_buffer_config *config = &chan->backend.config;
if (config->alloc == RING_BUFFER_ALLOC_PER_CPU)
lttng_heap_free(&chan->iter.heap);
}
-int lib_ring_buffer_iterator_open(struct lib_ring_buffer *buf)
+int lib_ring_buffer_iterator_open(struct lttng_kernel_ring_buffer *buf)
{
- struct channel *chan = buf->backend.chan;
- const struct lib_ring_buffer_config *config = &chan->backend.config;
+ struct lttng_kernel_ring_buffer_channel *chan = buf->backend.chan;
+ const struct lttng_kernel_ring_buffer_config *config = &chan->backend.config;
CHAN_WARN_ON(chan, config->output != RING_BUFFER_ITERATOR);
return lib_ring_buffer_open_read(buf);
}
* iterator can leave the buffer in "GET" state, which is not consistent with
* other types of output (mmap, splice, raw data read).
*/
-void lib_ring_buffer_iterator_release(struct lib_ring_buffer *buf)
+void lib_ring_buffer_iterator_release(struct lttng_kernel_ring_buffer *buf)
{
lib_ring_buffer_release_read(buf);
}
EXPORT_SYMBOL_GPL(lib_ring_buffer_iterator_release);
-int channel_iterator_open(struct channel *chan)
+int channel_iterator_open(struct lttng_kernel_ring_buffer_channel *chan)
{
- const struct lib_ring_buffer_config *config = &chan->backend.config;
- struct lib_ring_buffer *buf;
+ const struct lttng_kernel_ring_buffer_config *config = &chan->backend.config;
+ struct lttng_kernel_ring_buffer *buf;
int ret = 0, cpu;
CHAN_WARN_ON(chan, config->output != RING_BUFFER_ITERATOR);
if (config->alloc == RING_BUFFER_ALLOC_PER_CPU) {
- get_online_cpus();
+ lttng_cpus_read_lock();
/* Allow CPU hotplug to keep track of opened reader */
chan->iter.read_open = 1;
for_each_channel_cpu(cpu, chan) {
goto error;
buf->iter.read_open = 1;
}
- put_online_cpus();
+ lttng_cpus_read_unlock();
} else {
buf = channel_get_ring_buffer(config, chan, 0);
ret = lib_ring_buffer_iterator_open(buf);
error:
/* Error should always happen on CPU 0, hence no close is required. */
CHAN_WARN_ON(chan, cpu != 0);
- put_online_cpus();
+ lttng_cpus_read_unlock();
return ret;
}
EXPORT_SYMBOL_GPL(channel_iterator_open);
-void channel_iterator_release(struct channel *chan)
+void channel_iterator_release(struct lttng_kernel_ring_buffer_channel *chan)
{
- const struct lib_ring_buffer_config *config = &chan->backend.config;
- struct lib_ring_buffer *buf;
+ const struct lttng_kernel_ring_buffer_config *config = &chan->backend.config;
+ struct lttng_kernel_ring_buffer *buf;
int cpu;
if (config->alloc == RING_BUFFER_ALLOC_PER_CPU) {
- get_online_cpus();
+ lttng_cpus_read_lock();
for_each_channel_cpu(cpu, chan) {
buf = channel_get_ring_buffer(config, chan, cpu);
if (buf->iter.read_open) {
}
}
chan->iter.read_open = 0;
- put_online_cpus();
+ lttng_cpus_read_unlock();
} else {
buf = channel_get_ring_buffer(config, chan, 0);
lib_ring_buffer_iterator_release(buf);
}
EXPORT_SYMBOL_GPL(channel_iterator_release);
-void lib_ring_buffer_iterator_reset(struct lib_ring_buffer *buf)
+void lib_ring_buffer_iterator_reset(struct lttng_kernel_ring_buffer *buf)
{
- struct channel *chan = buf->backend.chan;
+ struct lttng_kernel_ring_buffer_channel *chan = buf->backend.chan;
if (buf->iter.state != ITER_GET_SUBBUF)
lib_ring_buffer_put_next_subbuf(buf);
/* Don't reset allocated and read_open */
}
-void channel_iterator_reset(struct channel *chan)
+void channel_iterator_reset(struct lttng_kernel_ring_buffer_channel *chan)
{
- const struct lib_ring_buffer_config *config = &chan->backend.config;
- struct lib_ring_buffer *buf;
+ const struct lttng_kernel_ring_buffer_config *config = &chan->backend.config;
+ struct lttng_kernel_ring_buffer *buf;
int cpu;
/* Empty heap, put into empty_head */
char __user *user_buf,
size_t count,
loff_t *ppos,
- struct channel *chan,
- struct lib_ring_buffer *buf,
+ struct lttng_kernel_ring_buffer_channel *chan,
+ struct lttng_kernel_ring_buffer *buf,
int fusionmerge)
{
- const struct lib_ring_buffer_config *config = &chan->backend.config;
+ const struct lttng_kernel_ring_buffer_config *config = &chan->backend.config;
size_t read_count = 0, read_offset;
ssize_t len;
return -EFAULT;
}
read_count += copy_len;
- };
- return read_count;
+ }
+ goto put_record;
nodata:
*ppos = 0;
chan->iter.len_left = 0;
+put_record:
+ lib_ring_buffer_put_current_record(buf);
return read_count;
}
loff_t *ppos)
{
struct inode *inode = filp->lttng_f_dentry->d_inode;
- struct lib_ring_buffer *buf = inode->i_private;
- struct channel *chan = buf->backend.chan;
+ struct lttng_kernel_ring_buffer *buf = inode->i_private;
+ struct lttng_kernel_ring_buffer_channel *chan = buf->backend.chan;
return channel_ring_buffer_file_read(filp, user_buf, count, ppos,
chan, buf, 0);
loff_t *ppos)
{
struct inode *inode = filp->lttng_f_dentry->d_inode;
- struct channel *chan = inode->i_private;
- const struct lib_ring_buffer_config *config = &chan->backend.config;
+ struct lttng_kernel_ring_buffer_channel *chan = inode->i_private;
+ const struct lttng_kernel_ring_buffer_config *config = &chan->backend.config;
if (config->alloc == RING_BUFFER_ALLOC_PER_CPU)
return channel_ring_buffer_file_read(filp, user_buf, count,
ppos, chan, NULL, 1);
else {
- struct lib_ring_buffer *buf =
+ struct lttng_kernel_ring_buffer *buf =
channel_get_ring_buffer(config, chan, 0);
return channel_ring_buffer_file_read(filp, user_buf, count,
ppos, chan, buf, 0);
static
int lib_ring_buffer_file_open(struct inode *inode, struct file *file)
{
- struct lib_ring_buffer *buf = inode->i_private;
+ struct lttng_kernel_ring_buffer *buf = inode->i_private;
int ret;
ret = lib_ring_buffer_iterator_open(buf);
static
int lib_ring_buffer_file_release(struct inode *inode, struct file *file)
{
- struct lib_ring_buffer *buf = inode->i_private;
+ struct lttng_kernel_ring_buffer *buf = inode->i_private;
lib_ring_buffer_iterator_release(buf);
return 0;
static
int channel_file_open(struct inode *inode, struct file *file)
{
- struct channel *chan = inode->i_private;
+ struct lttng_kernel_ring_buffer_channel *chan = inode->i_private;
int ret;
ret = channel_iterator_open(chan);
static
int channel_file_release(struct inode *inode, struct file *file)
{
- struct channel *chan = inode->i_private;
+ struct lttng_kernel_ring_buffer_channel *chan = inode->i_private;
channel_iterator_release(chan);
return 0;