u64 start_tsc; /* Channel creation TSC value */
void *priv; /* Client-specific information */
struct notifier_block cpu_hp_notifier; /* CPU hotplug notifier */
- const struct lib_ring_buffer_config *config; /* Ring buffer configuration */
+ /*
+ * We need to copy config because the module containing the
+ * source config can vanish before the last reference to this
+ * channel's streams is released.
+ */
+ struct lib_ring_buffer_config config; /* Ring buffer configuration */
cpumask_var_t cpumask; /* Allocated per-cpu buffers cpumask */
char name[NAME_MAX]; /* Channel name */
};
int lib_ring_buffer_backend_create(struct lib_ring_buffer_backend *bufb,
struct channel_backend *chanb, int cpu)
{
- const struct lib_ring_buffer_config *config = chanb->config;
+ const struct lib_ring_buffer_config *config = &chanb->config;
bufb->chan = container_of(chanb, struct channel, backend);
bufb->cpu = cpu;
void lib_ring_buffer_backend_reset(struct lib_ring_buffer_backend *bufb)
{
struct channel_backend *chanb = &bufb->chan->backend;
- const struct lib_ring_buffer_config *config = chanb->config;
+ const struct lib_ring_buffer_config *config = &chanb->config;
unsigned long num_subbuf_alloc;
unsigned int i;
void channel_backend_reset(struct channel_backend *chanb)
{
struct channel *chan = container_of(chanb, struct channel, backend);
- const struct lib_ring_buffer_config *config = chanb->config;
+ const struct lib_ring_buffer_config *config = &chanb->config;
/*
* Don't reset buf_size, subbuf_size, subbuf_size_order,
unsigned int cpu = (unsigned long)hcpu;
struct channel_backend *chanb = container_of(nb, struct channel_backend,
cpu_hp_notifier);
- const struct lib_ring_buffer_config *config = chanb->config;
+ const struct lib_ring_buffer_config *config = &chanb->config;
struct lib_ring_buffer *buf;
int ret;
(config->mode == RING_BUFFER_OVERWRITE) ? 1 : 0;
chanb->num_subbuf = num_subbuf;
strlcpy(chanb->name, name, NAME_MAX);
- chanb->config = config;
+ memcpy(&chanb->config, config, sizeof(chanb->config));
if (config->alloc == RING_BUFFER_ALLOC_PER_CPU) {
if (!zalloc_cpumask_var(&chanb->cpumask, GFP_KERNEL))
*/
void channel_backend_unregister_notifiers(struct channel_backend *chanb)
{
- const struct lib_ring_buffer_config *config = chanb->config;
+ const struct lib_ring_buffer_config *config = &chanb->config;
if (config->alloc == RING_BUFFER_ALLOC_PER_CPU)
unregister_hotcpu_notifier(&chanb->cpu_hp_notifier);
*/
void channel_backend_free(struct channel_backend *chanb)
{
- const struct lib_ring_buffer_config *config = chanb->config;
+ const struct lib_ring_buffer_config *config = &chanb->config;
unsigned int i;
if (config->alloc == RING_BUFFER_ALLOC_PER_CPU) {
const void *src, size_t len, ssize_t pagecpy)
{
struct channel_backend *chanb = &bufb->chan->backend;
- const struct lib_ring_buffer_config *config = chanb->config;
+ const struct lib_ring_buffer_config *config = &chanb->config;
size_t sbidx, index;
struct lib_ring_buffer_backend_pages *rpages;
unsigned long sb_bindex, id;
int c, size_t len, ssize_t pagecpy)
{
struct channel_backend *chanb = &bufb->chan->backend;
- const struct lib_ring_buffer_config *config = chanb->config;
+ const struct lib_ring_buffer_config *config = &chanb->config;
size_t sbidx, index;
struct lib_ring_buffer_backend_pages *rpages;
unsigned long sb_bindex, id;
ssize_t pagecpy)
{
struct channel_backend *chanb = &bufb->chan->backend;
- const struct lib_ring_buffer_config *config = chanb->config;
+ const struct lib_ring_buffer_config *config = &chanb->config;
size_t sbidx, index;
struct lib_ring_buffer_backend_pages *rpages;
unsigned long sb_bindex, id;
void *dest, size_t len)
{
struct channel_backend *chanb = &bufb->chan->backend;
- const struct lib_ring_buffer_config *config = chanb->config;
+ const struct lib_ring_buffer_config *config = &chanb->config;
size_t index;
ssize_t pagecpy, orig_len;
struct lib_ring_buffer_backend_pages *rpages;
size_t offset, void __user *dest, size_t len)
{
struct channel_backend *chanb = &bufb->chan->backend;
- const struct lib_ring_buffer_config *config = chanb->config;
+ const struct lib_ring_buffer_config *config = &chanb->config;
size_t index;
ssize_t pagecpy;
struct lib_ring_buffer_backend_pages *rpages;
void *dest, size_t len)
{
struct channel_backend *chanb = &bufb->chan->backend;
- const struct lib_ring_buffer_config *config = chanb->config;
+ const struct lib_ring_buffer_config *config = &chanb->config;
size_t index;
ssize_t pagecpy, pagelen, strpagelen, orig_offset;
char *str;
size_t index;
struct lib_ring_buffer_backend_pages *rpages;
struct channel_backend *chanb = &bufb->chan->backend;
- const struct lib_ring_buffer_config *config = chanb->config;
+ const struct lib_ring_buffer_config *config = &chanb->config;
unsigned long sb_bindex, id;
offset &= chanb->buf_size - 1;
size_t index;
struct lib_ring_buffer_backend_pages *rpages;
struct channel_backend *chanb = &bufb->chan->backend;
- const struct lib_ring_buffer_config *config = chanb->config;
+ const struct lib_ring_buffer_config *config = &chanb->config;
unsigned long sb_bindex, id;
offset &= chanb->buf_size - 1;
size_t sbidx, index;
struct lib_ring_buffer_backend_pages *rpages;
struct channel_backend *chanb = &bufb->chan->backend;
- const struct lib_ring_buffer_config *config = chanb->config;
+ const struct lib_ring_buffer_config *config = &chanb->config;
unsigned long sb_bindex, id;
offset &= chanb->buf_size - 1;
void lib_ring_buffer_reset(struct lib_ring_buffer *buf)
{
struct channel *chan = buf->backend.chan;
- const struct lib_ring_buffer_config *config = chan->backend.config;
+ const struct lib_ring_buffer_config *config = &chan->backend.config;
unsigned int i;
/*
int lib_ring_buffer_create(struct lib_ring_buffer *buf,
struct channel_backend *chanb, int cpu)
{
- const struct lib_ring_buffer_config *config = chanb->config;
+ const struct lib_ring_buffer_config *config = &chanb->config;
struct channel *chan = container_of(chanb, struct channel, backend);
void *priv = chanb->priv;
size_t subbuf_header_size;
{
struct lib_ring_buffer *buf = (struct lib_ring_buffer *)data;
struct channel *chan = buf->backend.chan;
- const struct lib_ring_buffer_config *config = chan->backend.config;
+ const struct lib_ring_buffer_config *config = &chan->backend.config;
/*
* Only flush buffers periodically if readers are active.
static void lib_ring_buffer_start_switch_timer(struct lib_ring_buffer *buf)
{
struct channel *chan = buf->backend.chan;
- const struct lib_ring_buffer_config *config = chan->backend.config;
+ const struct lib_ring_buffer_config *config = &chan->backend.config;
if (!chan->switch_timer_interval || buf->switch_timer_enabled)
return;
{
struct lib_ring_buffer *buf = (struct lib_ring_buffer *)data;
struct channel *chan = buf->backend.chan;
- const struct lib_ring_buffer_config *config = chan->backend.config;
+ const struct lib_ring_buffer_config *config = &chan->backend.config;
CHAN_WARN_ON(chan, !buf->backend.allocated);
static void lib_ring_buffer_start_read_timer(struct lib_ring_buffer *buf)
{
struct channel *chan = buf->backend.chan;
- const struct lib_ring_buffer_config *config = chan->backend.config;
+ const struct lib_ring_buffer_config *config = &chan->backend.config;
if (config->wakeup != RING_BUFFER_WAKEUP_BY_TIMER
|| !chan->read_timer_interval
static void lib_ring_buffer_stop_read_timer(struct lib_ring_buffer *buf)
{
struct channel *chan = buf->backend.chan;
- const struct lib_ring_buffer_config *config = chan->backend.config;
+ const struct lib_ring_buffer_config *config = &chan->backend.config;
if (config->wakeup != RING_BUFFER_WAKEUP_BY_TIMER
|| !chan->read_timer_interval
struct channel *chan = container_of(nb, struct channel,
cpu_hp_notifier);
struct lib_ring_buffer *buf = per_cpu_ptr(chan->backend.buf, cpu);
- const struct lib_ring_buffer_config *config = chan->backend.config;
+ const struct lib_ring_buffer_config *config = &chan->backend.config;
if (!chan->cpu_hp_enable)
return NOTIFY_DONE;
{
struct channel *chan = container_of(nb, struct channel,
tick_nohz_notifier);
- const struct lib_ring_buffer_config *config = chan->backend.config;
+ const struct lib_ring_buffer_config *config = &chan->backend.config;
struct lib_ring_buffer *buf;
int cpu = smp_processor_id();
*/
static void channel_unregister_notifiers(struct channel *chan)
{
- const struct lib_ring_buffer_config *config = chan->backend.config;
+ const struct lib_ring_buffer_config *config = &chan->backend.config;
int cpu;
channel_iterator_unregister_notifiers(chan);
void *channel_destroy(struct channel *chan)
{
int cpu;
- const struct lib_ring_buffer_config *config = chan->backend.config;
+ const struct lib_ring_buffer_config *config = &chan->backend.config;
void *priv;
channel_unregister_notifiers(chan);
unsigned long *consumed, unsigned long *produced)
{
struct channel *chan = buf->backend.chan;
- const struct lib_ring_buffer_config *config = chan->backend.config;
+ const struct lib_ring_buffer_config *config = &chan->backend.config;
unsigned long consumed_cur, write_offset;
int finalized;
unsigned long consumed)
{
struct channel *chan = buf->backend.chan;
- const struct lib_ring_buffer_config *config = chan->backend.config;
+ const struct lib_ring_buffer_config *config = &chan->backend.config;
unsigned long consumed_cur, consumed_idx, commit_count, write_offset;
int ret;
int finalized;
{
struct lib_ring_buffer_backend *bufb = &buf->backend;
struct channel *chan = bufb->chan;
- const struct lib_ring_buffer_config *config = chan->backend.config;
+ const struct lib_ring_buffer_config *config = &chan->backend.config;
unsigned long read_sb_bindex, consumed_idx, consumed;
CHAN_WARN_ON(chan, atomic_long_read(&buf->active_readers) != 1);
unsigned long cons_offset,
int cpu)
{
- const struct lib_ring_buffer_config *config = chan->backend.config;
+ const struct lib_ring_buffer_config *config = &chan->backend.config;
unsigned long cons_idx, commit_count, commit_count_sb;
cons_idx = subbuf_index(cons_offset, chan);
struct channel *chan,
void *priv, int cpu)
{
- const struct lib_ring_buffer_config *config = chan->backend.config;
+ const struct lib_ring_buffer_config *config = &chan->backend.config;
unsigned long write_offset, cons_offset;
/*
void lib_ring_buffer_print_errors(struct channel *chan,
struct lib_ring_buffer *buf, int cpu)
{
- const struct lib_ring_buffer_config *config = chan->backend.config;
+ const struct lib_ring_buffer_config *config = &chan->backend.config;
void *priv = chan->backend.priv;
printk(KERN_DEBUG "ring buffer %s, cpu %d: %lu records written, "
struct switch_offsets *offsets,
u64 tsc)
{
- const struct lib_ring_buffer_config *config = chan->backend.config;
+ const struct lib_ring_buffer_config *config = &chan->backend.config;
unsigned long oldidx = subbuf_index(offsets->old, chan);
unsigned long commit_count;
struct switch_offsets *offsets,
u64 tsc)
{
- const struct lib_ring_buffer_config *config = chan->backend.config;
+ const struct lib_ring_buffer_config *config = &chan->backend.config;
unsigned long oldidx = subbuf_index(offsets->old - 1, chan);
unsigned long commit_count, padding_size, data_size;
struct switch_offsets *offsets,
u64 tsc)
{
- const struct lib_ring_buffer_config *config = chan->backend.config;
+ const struct lib_ring_buffer_config *config = &chan->backend.config;
unsigned long beginidx = subbuf_index(offsets->begin, chan);
unsigned long commit_count;
struct switch_offsets *offsets,
u64 tsc)
{
- const struct lib_ring_buffer_config *config = chan->backend.config;
+ const struct lib_ring_buffer_config *config = &chan->backend.config;
unsigned long endidx = subbuf_index(offsets->end - 1, chan);
unsigned long commit_count, padding_size, data_size;
struct switch_offsets *offsets,
u64 *tsc)
{
- const struct lib_ring_buffer_config *config = chan->backend.config;
+ const struct lib_ring_buffer_config *config = &chan->backend.config;
unsigned long off;
offsets->begin = v_read(config, &buf->offset);
void lib_ring_buffer_switch_slow(struct lib_ring_buffer *buf, enum switch_mode mode)
{
struct channel *chan = buf->backend.chan;
- const struct lib_ring_buffer_config *config = chan->backend.config;
+ const struct lib_ring_buffer_config *config = &chan->backend.config;
struct switch_offsets offsets;
unsigned long oldidx;
u64 tsc;
struct switch_offsets *offsets,
struct lib_ring_buffer_ctx *ctx)
{
- const struct lib_ring_buffer_config *config = chan->backend.config;
+ const struct lib_ring_buffer_config *config = &chan->backend.config;
unsigned long reserve_commit_diff;
offsets->begin = v_read(config, &buf->offset);
int lib_ring_buffer_reserve_slow(struct lib_ring_buffer_ctx *ctx)
{
struct channel *chan = ctx->chan;
- const struct lib_ring_buffer_config *config = chan->backend.config;
+ const struct lib_ring_buffer_config *config = &chan->backend.config;
struct lib_ring_buffer *buf;
struct switch_offsets offsets;
int ret;
ssize_t lib_ring_buffer_get_next_record(struct channel *chan,
struct lib_ring_buffer *buf)
{
- const struct lib_ring_buffer_config *config = chan->backend.config;
+ const struct lib_ring_buffer_config *config = &chan->backend.config;
struct lib_ring_buffer_iter *iter = &buf->iter;
int ret;
ssize_t channel_get_next_record(struct channel *chan,
struct lib_ring_buffer **ret_buf)
{
- const struct lib_ring_buffer_config *config = chan->backend.config;
+ const struct lib_ring_buffer_config *config = &chan->backend.config;
struct lib_ring_buffer *buf;
struct lttng_ptr_heap *heap;
ssize_t len;
}
/* Add to list of buffers without any current record */
- if (chan->backend.config->alloc == RING_BUFFER_ALLOC_PER_CPU)
+ if (chan->backend.config.alloc == RING_BUFFER_ALLOC_PER_CPU)
list_add(&buf->iter.empty_node, &chan->iter.empty_head);
}
struct channel *chan = container_of(nb, struct channel,
hp_iter_notifier);
struct lib_ring_buffer *buf = per_cpu_ptr(chan->backend.buf, cpu);
- const struct lib_ring_buffer_config *config = chan->backend.config;
+ const struct lib_ring_buffer_config *config = &chan->backend.config;
if (!chan->hp_iter_enable)
return NOTIFY_DONE;
int channel_iterator_init(struct channel *chan)
{
- const struct lib_ring_buffer_config *config = chan->backend.config;
+ const struct lib_ring_buffer_config *config = &chan->backend.config;
struct lib_ring_buffer *buf;
if (config->alloc == RING_BUFFER_ALLOC_PER_CPU) {
void channel_iterator_unregister_notifiers(struct channel *chan)
{
- const struct lib_ring_buffer_config *config = chan->backend.config;
+ const struct lib_ring_buffer_config *config = &chan->backend.config;
if (config->alloc == RING_BUFFER_ALLOC_PER_CPU) {
chan->hp_iter_enable = 0;
void channel_iterator_free(struct channel *chan)
{
- const struct lib_ring_buffer_config *config = chan->backend.config;
+ const struct lib_ring_buffer_config *config = &chan->backend.config;
if (config->alloc == RING_BUFFER_ALLOC_PER_CPU)
lttng_heap_free(&chan->iter.heap);
int lib_ring_buffer_iterator_open(struct lib_ring_buffer *buf)
{
struct channel *chan = buf->backend.chan;
- const struct lib_ring_buffer_config *config = chan->backend.config;
+ const struct lib_ring_buffer_config *config = &chan->backend.config;
CHAN_WARN_ON(chan, config->output != RING_BUFFER_ITERATOR);
return lib_ring_buffer_open_read(buf);
}
int channel_iterator_open(struct channel *chan)
{
- const struct lib_ring_buffer_config *config = chan->backend.config;
+ const struct lib_ring_buffer_config *config = &chan->backend.config;
struct lib_ring_buffer *buf;
int ret = 0, cpu;
void channel_iterator_release(struct channel *chan)
{
- const struct lib_ring_buffer_config *config = chan->backend.config;
+ const struct lib_ring_buffer_config *config = &chan->backend.config;
struct lib_ring_buffer *buf;
int cpu;
void channel_iterator_reset(struct channel *chan)
{
- const struct lib_ring_buffer_config *config = chan->backend.config;
+ const struct lib_ring_buffer_config *config = &chan->backend.config;
struct lib_ring_buffer *buf;
int cpu;
struct lib_ring_buffer *buf,
int fusionmerge)
{
- const struct lib_ring_buffer_config *config = chan->backend.config;
+ const struct lib_ring_buffer_config *config = &chan->backend.config;
size_t read_count = 0, read_offset;
ssize_t len;
{
struct inode *inode = filp->f_dentry->d_inode;
struct channel *chan = inode->i_private;
- const struct lib_ring_buffer_config *config = chan->backend.config;
+ const struct lib_ring_buffer_config *config = &chan->backend.config;
if (config->alloc == RING_BUFFER_ALLOC_PER_CPU)
return channel_ring_buffer_file_read(filp, user_buf, count,
{
struct lib_ring_buffer *buf = vma->vm_private_data;
struct channel *chan = buf->backend.chan;
- const struct lib_ring_buffer_config *config = chan->backend.config;
+ const struct lib_ring_buffer_config *config = &chan->backend.config;
pgoff_t pgoff = vmf->pgoff;
struct page **page;
void **virt;
{
unsigned long length = vma->vm_end - vma->vm_start;
struct channel *chan = buf->backend.chan;
- const struct lib_ring_buffer_config *config = chan->backend.config;
+ const struct lib_ring_buffer_config *config = &chan->backend.config;
unsigned long mmap_buf_len;
if (config->output != RING_BUFFER_MMAP)
{
struct lib_ring_buffer *buf = in->private_data;
struct channel *chan = buf->backend.chan;
- const struct lib_ring_buffer_config *config = chan->backend.config;
+ const struct lib_ring_buffer_config *config = &chan->backend.config;
unsigned int poff, subbuf_pages, nr_pages;
struct page *pages[PIPE_DEF_BUFFERS];
struct partial_page partial[PIPE_DEF_BUFFERS];
{
struct lib_ring_buffer *buf = in->private_data;
struct channel *chan = buf->backend.chan;
- const struct lib_ring_buffer_config *config = chan->backend.config;
+ const struct lib_ring_buffer_config *config = &chan->backend.config;
ssize_t spliced;
int ret;
unsigned int mask = 0;
struct lib_ring_buffer *buf = filp->private_data;
struct channel *chan = buf->backend.chan;
- const struct lib_ring_buffer_config *config = chan->backend.config;
+ const struct lib_ring_buffer_config *config = &chan->backend.config;
int finalized, disabled;
if (filp->f_mode & FMODE_READ) {
{
struct lib_ring_buffer *buf = filp->private_data;
struct channel *chan = buf->backend.chan;
- const struct lib_ring_buffer_config *config = chan->backend.config;
+ const struct lib_ring_buffer_config *config = &chan->backend.config;
if (lib_ring_buffer_channel_is_disabled(chan))
return -EIO;
{
struct lib_ring_buffer *buf = filp->private_data;
struct channel *chan = buf->backend.chan;
- const struct lib_ring_buffer_config *config = chan->backend.config;
+ const struct lib_ring_buffer_config *config = &chan->backend.config;
if (lib_ring_buffer_channel_is_disabled(chan))
return -EIO;