enum channel_type channel_type;
struct lttng_kernel_ctx *ctx;
- struct channel *rb_chan; /* Ring buffer channel */
+ struct lttng_kernel_ring_buffer_channel *rb_chan; /* Ring buffer channel */
unsigned int metadata_dumped:1;
struct list_head node; /* Channel list in session */
struct lttng_transport *transport;
struct lttng_kernel_channel_buffer_ops_private {
struct lttng_kernel_channel_buffer_ops *pub; /* Public channel buffer ops interface */
- struct channel *(*channel_create)(const char *name,
+ struct lttng_kernel_ring_buffer_channel *(*channel_create)(const char *name,
void *priv,
void *buf_addr,
size_t subbuf_size, size_t num_subbuf,
unsigned int switch_timer_interval,
unsigned int read_timer_interval);
- void (*channel_destroy)(struct channel *chan);
- struct lib_ring_buffer *(*buffer_read_open)(struct channel *chan);
- int (*buffer_has_read_closed_stream)(struct channel *chan);
+ void (*channel_destroy)(struct lttng_kernel_ring_buffer_channel *chan);
+ struct lib_ring_buffer *(*buffer_read_open)(struct lttng_kernel_ring_buffer_channel *chan);
+ int (*buffer_has_read_closed_stream)(struct lttng_kernel_ring_buffer_channel *chan);
void (*buffer_read_close)(struct lib_ring_buffer *buf);
/*
* packet_avail_size returns the available size in the current
* packet. Note that the size returned is only a hint, since it
* may change due to concurrent writes.
*/
- size_t (*packet_avail_size)(struct channel *chan);
- wait_queue_head_t *(*get_writer_buf_wait_queue)(struct channel *chan, int cpu);
- wait_queue_head_t *(*get_hp_wait_queue)(struct channel *chan);
- int (*is_finalized)(struct channel *chan);
- int (*is_disabled)(struct channel *chan);
+ size_t (*packet_avail_size)(struct lttng_kernel_ring_buffer_channel *chan);
+ wait_queue_head_t *(*get_writer_buf_wait_queue)(struct lttng_kernel_ring_buffer_channel *chan, int cpu);
+ wait_queue_head_t *(*get_hp_wait_queue)(struct lttng_kernel_ring_buffer_channel *chan);
+ int (*is_finalized)(struct lttng_kernel_ring_buffer_channel *chan);
+ int (*is_disabled)(struct lttng_kernel_ring_buffer_channel *chan);
int (*timestamp_begin) (const struct lib_ring_buffer_config *config,
struct lib_ring_buffer *bufb,
uint64_t *timestamp_begin);
struct lttng_event_notifier_ht event_notifiers_ht; /* Hash table of event notifiers */
struct lttng_kernel_channel_buffer_ops *ops;
struct lttng_transport *transport;
- struct channel *chan; /* Ring buffer channel for event notifier group. */
+ struct lttng_kernel_ring_buffer_channel *chan; /* Ring buffer channel for event notifier group. */
struct lib_ring_buffer *buf; /* Ring buffer for event notifier group. */
wait_queue_head_t read_wait;
struct irq_work wakeup_pending; /* Pending wakeup irq work. */
void lttng_probes_exit(void);
int lttng_metadata_output_channel(struct lttng_metadata_stream *stream,
- struct channel *chan, bool *coherent);
+ struct lttng_kernel_ring_buffer_channel *chan, bool *coherent);
int lttng_id_tracker_get_node_id(const struct lttng_id_hash_node *node);
int lttng_id_tracker_empty_set(struct lttng_kernel_id_tracker *lf);
/*
* Forward declaration of frontend-specific channel and ring_buffer.
*/
-struct channel;
+struct lttng_kernel_ring_buffer_channel;
struct lib_ring_buffer;
struct lib_ring_buffer_backend {
struct lib_ring_buffer_backend_pages **array;
unsigned int num_pages_per_subbuf;
- struct channel *chan; /* Associated channel */
+ struct lttng_kernel_ring_buffer_channel *chan; /* Associated channel */
int cpu; /* This buffer's cpu. -1 if global. */
union v_atomic records_read; /* Number of records read */
unsigned int allocated:1; /* is buffer allocated ? */
#include <lttng/tracer-core.h>
struct lib_ring_buffer;
-struct channel;
+struct lttng_kernel_ring_buffer_channel;
struct lib_ring_buffer_config;
struct lttng_kernel_ring_buffer_ctx;
struct lttng_kernel_ring_buffer_ctx_private;
/* Mandatory callbacks */
/* A static inline version is also required for fast path */
- u64 (*ring_buffer_clock_read) (struct channel *chan);
+ u64 (*ring_buffer_clock_read) (struct lttng_kernel_ring_buffer_channel *chan);
size_t (*record_header_size) (const struct lib_ring_buffer_config *config,
- struct channel *chan, size_t offset,
+ struct lttng_kernel_ring_buffer_channel *chan, size_t offset,
size_t *pre_header_padding,
struct lttng_kernel_ring_buffer_ctx *ctx,
void *client_ctx);
* iterator.
*/
void (*record_get) (const struct lib_ring_buffer_config *config,
- struct channel *chan, struct lib_ring_buffer *buf,
+ struct lttng_kernel_ring_buffer_channel *chan, struct lib_ring_buffer *buf,
size_t offset, size_t *header_len,
size_t *payload_len, u64 *timestamp);
};
struct lttng_kernel_ring_buffer_ctx_private {
/* input received by lib_ring_buffer_reserve(). */
- struct channel *chan; /* ring buffer channel */
+ struct lttng_kernel_ring_buffer_channel *chan; /* ring buffer channel */
/* output from lib_ring_buffer_reserve() */
int reserve_cpu; /* processor id updated by the reserve */
*/
extern
-struct channel *channel_create(const struct lib_ring_buffer_config *config,
+struct lttng_kernel_ring_buffer_channel *channel_create(const struct lib_ring_buffer_config *config,
const char *name, void *priv,
void *buf_addr,
size_t subbuf_size, size_t num_subbuf,
* channel.
*/
extern
-void *channel_destroy(struct channel *chan);
+void *channel_destroy(struct lttng_kernel_ring_buffer_channel *chan);
/* Buffer read operations */
extern struct lib_ring_buffer *channel_get_ring_buffer(
const struct lib_ring_buffer_config *config,
- struct channel *chan, int cpu);
+ struct lttng_kernel_ring_buffer_channel *chan, int cpu);
extern int lib_ring_buffer_open_read(struct lib_ring_buffer *buf);
extern void lib_ring_buffer_release_read(struct lib_ring_buffer *buf);
unsigned long consumed);
extern void lib_ring_buffer_put_subbuf(struct lib_ring_buffer *buf);
-void lib_ring_buffer_set_quiescent_channel(struct channel *chan);
-void lib_ring_buffer_clear_quiescent_channel(struct channel *chan);
+void lib_ring_buffer_set_quiescent_channel(struct lttng_kernel_ring_buffer_channel *chan);
+void lib_ring_buffer_clear_quiescent_channel(struct lttng_kernel_ring_buffer_channel *chan);
/*
* lib_ring_buffer_get_next_subbuf/lib_ring_buffer_put_next_subbuf are helpers
buf->backend.chan));
}
-extern void channel_reset(struct channel *chan);
+extern void channel_reset(struct lttng_kernel_ring_buffer_channel *chan);
extern void lib_ring_buffer_reset(struct lib_ring_buffer *buf);
static inline
}
static inline
-int lib_ring_buffer_channel_is_finalized(const struct channel *chan)
+int lib_ring_buffer_channel_is_finalized(const struct lttng_kernel_ring_buffer_channel *chan)
{
return chan->finalized;
}
static inline
-int lib_ring_buffer_channel_is_disabled(const struct channel *chan)
+int lib_ring_buffer_channel_is_disabled(const struct lttng_kernel_ring_buffer_channel *chan)
{
return atomic_read(&chan->record_disabled);
}
unsigned long *o_begin, unsigned long *o_end,
unsigned long *o_old, size_t *before_hdr_pad)
{
- struct channel *chan = ctx->priv.chan;
+ struct lttng_kernel_ring_buffer_channel *chan = ctx->priv.chan;
struct lib_ring_buffer *buf = ctx->priv.buf;
*o_begin = v_read(config, &buf->offset);
*o_old = *o_begin;
struct lttng_kernel_ring_buffer_ctx *ctx,
void *client_ctx)
{
- struct channel *chan = ctx->priv.chan;
+ struct lttng_kernel_ring_buffer_channel *chan = ctx->priv.chan;
struct lib_ring_buffer *buf;
unsigned long o_begin, o_end, o_old;
size_t before_hdr_pad = 0;
void lib_ring_buffer_commit(const struct lib_ring_buffer_config *config,
const struct lttng_kernel_ring_buffer_ctx *ctx)
{
- struct channel *chan = ctx->priv.chan;
+ struct lttng_kernel_ring_buffer_channel *chan = ctx->priv.chan;
struct lib_ring_buffer *buf = ctx->priv.buf;
unsigned long offset_end = ctx->priv.buf_offset;
unsigned long endidx = subbuf_index(offset_end - 1, chan);
static inline
void channel_record_disable(const struct lib_ring_buffer_config *config,
- struct channel *chan)
+ struct lttng_kernel_ring_buffer_channel *chan)
{
atomic_inc(&chan->record_disabled);
}
static inline
void channel_record_enable(const struct lib_ring_buffer_config *config,
- struct channel *chan)
+ struct lttng_kernel_ring_buffer_channel *chan)
{
atomic_dec(&chan->record_disabled);
}
/* buf_trunc mask selects only the buffer number. */
static inline
-unsigned long buf_trunc(unsigned long offset, struct channel *chan)
+unsigned long buf_trunc(unsigned long offset, struct lttng_kernel_ring_buffer_channel *chan)
{
return offset & ~(chan->backend.buf_size - 1);
/* Select the buffer number value (counter). */
static inline
-unsigned long buf_trunc_val(unsigned long offset, struct channel *chan)
+unsigned long buf_trunc_val(unsigned long offset, struct lttng_kernel_ring_buffer_channel *chan)
{
return buf_trunc(offset, chan) >> chan->backend.buf_size_order;
}
/* buf_offset mask selects only the offset within the current buffer. */
static inline
-unsigned long buf_offset(unsigned long offset, struct channel *chan)
+unsigned long buf_offset(unsigned long offset, struct lttng_kernel_ring_buffer_channel *chan)
{
return offset & (chan->backend.buf_size - 1);
}
/* subbuf_offset mask selects the offset within the current subbuffer. */
static inline
-unsigned long subbuf_offset(unsigned long offset, struct channel *chan)
+unsigned long subbuf_offset(unsigned long offset, struct lttng_kernel_ring_buffer_channel *chan)
{
return offset & (chan->backend.subbuf_size - 1);
}
/* subbuf_trunc mask selects the subbuffer number. */
static inline
-unsigned long subbuf_trunc(unsigned long offset, struct channel *chan)
+unsigned long subbuf_trunc(unsigned long offset, struct lttng_kernel_ring_buffer_channel *chan)
{
return offset & ~(chan->backend.subbuf_size - 1);
}
/* subbuf_align aligns the offset to the next subbuffer. */
static inline
-unsigned long subbuf_align(unsigned long offset, struct channel *chan)
+unsigned long subbuf_align(unsigned long offset, struct lttng_kernel_ring_buffer_channel *chan)
{
return (offset + chan->backend.subbuf_size)
& ~(chan->backend.subbuf_size - 1);
/* subbuf_index returns the index of the current subbuffer within the buffer. */
static inline
-unsigned long subbuf_index(unsigned long offset, struct channel *chan)
+unsigned long subbuf_index(unsigned long offset, struct lttng_kernel_ring_buffer_channel *chan)
{
return buf_offset(offset, chan) >> chan->backend.subbuf_size_order;
}
extern
void lib_ring_buffer_check_deliver_slow(const struct lib_ring_buffer_config *config,
struct lib_ring_buffer *buf,
- struct channel *chan,
+ struct lttng_kernel_ring_buffer_channel *chan,
unsigned long offset,
unsigned long commit_count,
unsigned long idx,
static inline
void lib_ring_buffer_reserve_push_reader(struct lib_ring_buffer *buf,
- struct channel *chan,
+ struct lttng_kernel_ring_buffer_channel *chan,
unsigned long offset)
{
unsigned long consumed_old, consumed_new;
*/
static inline
void lib_ring_buffer_clear_reader(struct lib_ring_buffer *buf,
- struct channel *chan)
+ struct lttng_kernel_ring_buffer_channel *chan)
{
const struct lib_ring_buffer_config *config = &chan->backend.config;
unsigned long offset, consumed_old, consumed_new;
static inline
int lib_ring_buffer_pending_data(const struct lib_ring_buffer_config *config,
struct lib_ring_buffer *buf,
- struct channel *chan)
+ struct lttng_kernel_ring_buffer_channel *chan)
{
return !!subbuf_offset(v_read(config, &buf->offset), chan);
}
static inline
int lib_ring_buffer_reserve_committed(const struct lib_ring_buffer_config *config,
struct lib_ring_buffer *buf,
- struct channel *chan)
+ struct lttng_kernel_ring_buffer_channel *chan)
{
unsigned long offset, idx, commit_count;
static inline
void lib_ring_buffer_check_deliver(const struct lib_ring_buffer_config *config,
struct lib_ring_buffer *buf,
- struct channel *chan,
+ struct lttng_kernel_ring_buffer_channel *chan,
unsigned long offset,
unsigned long commit_count,
unsigned long idx,
static inline
void lib_ring_buffer_write_commit_counter(const struct lib_ring_buffer_config *config,
struct lib_ring_buffer *buf,
- struct channel *chan,
+ struct lttng_kernel_ring_buffer_channel *chan,
unsigned long buf_offset,
unsigned long commit_count,
struct commit_counters_hot *cc_hot)
};
/* channel: collection of per-cpu ring buffers. */
-struct channel {
+struct lttng_kernel_ring_buffer_channel {
atomic_t record_disabled;
unsigned long commit_count_mask; /*
* Commit count mask, removing
};
static inline
-void *channel_get_private(struct channel *chan)
+void *channel_get_private(struct lttng_kernel_ring_buffer_channel *chan)
{
return chan->backend.priv;
}
-void lib_ring_buffer_lost_event_too_big(struct channel *chan);
+void lib_ring_buffer_lost_event_too_big(struct lttng_kernel_ring_buffer_channel *chan);
/*
* Issue warnings and disable channels upon internal error.
*/
#define CHAN_WARN_ON(c, cond) \
({ \
- struct channel *__chan; \
+ struct lttng_kernel_ring_buffer_channel *__chan; \
int _____ret = unlikely(cond); \
if (_____ret) { \
if (__same_type(*(c), struct channel_backend)) \
__chan = container_of((void *) (c), \
- struct channel, \
+ struct lttng_kernel_ring_buffer_channel, \
backend); \
- else if (__same_type(*(c), struct channel)) \
+ else if (__same_type(*(c), struct lttng_kernel_ring_buffer_channel)) \
__chan = (void *) (c); \
else \
BUG_ON(1); \
* currently no data available, or -ENODATA if no data is available and buffer
* is finalized.
*/
-extern ssize_t lib_ring_buffer_get_next_record(struct channel *chan,
+extern ssize_t lib_ring_buffer_get_next_record(struct lttng_kernel_ring_buffer_channel *chan,
struct lib_ring_buffer *buf);
/*
* finalized.
* Returns the current buffer in ret_buf.
*/
-extern ssize_t channel_get_next_record(struct channel *chan,
+extern ssize_t channel_get_next_record(struct lttng_kernel_ring_buffer_channel *chan,
struct lib_ring_buffer **ret_buf);
/**
extern int lib_ring_buffer_iterator_open(struct lib_ring_buffer *buf);
extern void lib_ring_buffer_iterator_release(struct lib_ring_buffer *buf);
-extern int channel_iterator_open(struct channel *chan);
-extern void channel_iterator_release(struct channel *chan);
+extern int channel_iterator_open(struct lttng_kernel_ring_buffer_channel *chan);
+extern void channel_iterator_release(struct lttng_kernel_ring_buffer_channel *chan);
extern const struct file_operations channel_payload_file_operations;
extern const struct file_operations lib_ring_buffer_payload_file_operations;
/*
* Used internally.
*/
-int channel_iterator_init(struct channel *chan);
-void channel_iterator_unregister_notifiers(struct channel *chan);
-void channel_iterator_free(struct channel *chan);
-void channel_iterator_reset(struct channel *chan);
+int channel_iterator_init(struct lttng_kernel_ring_buffer_channel *chan);
+void channel_iterator_unregister_notifiers(struct lttng_kernel_ring_buffer_channel *chan);
+void channel_iterator_free(struct lttng_kernel_ring_buffer_channel *chan);
+void channel_iterator_reset(struct lttng_kernel_ring_buffer_channel *chan);
void lib_ring_buffer_iterator_reset(struct lib_ring_buffer *buf);
#endif /* _LIB_RING_BUFFER_ITERATOR_H */
{
const struct lib_ring_buffer_config *config = &chanb->config;
- bufb->chan = container_of(chanb, struct channel, backend);
+ bufb->chan = container_of(chanb, struct lttng_kernel_ring_buffer_channel, backend);
bufb->cpu = cpu;
return lib_ring_buffer_backend_allocate(config, bufb, chanb->buf_size,
*/
void channel_backend_reset(struct channel_backend *chanb)
{
- struct channel *chan = container_of(chanb, struct channel, backend);
+ struct lttng_kernel_ring_buffer_channel *chan = container_of(chanb, struct lttng_kernel_ring_buffer_channel, backend);
const struct lib_ring_buffer_config *config = &chanb->config;
/*
const struct lib_ring_buffer_config *config,
void *priv, size_t subbuf_size, size_t num_subbuf)
{
- struct channel *chan = container_of(chanb, struct channel, backend);
+ struct lttng_kernel_ring_buffer_channel *chan = container_of(chanb, struct lttng_kernel_ring_buffer_channel, backend);
unsigned int i;
int ret;
EXPORT_PER_CPU_SYMBOL(lib_ring_buffer_nesting);
static
-void lib_ring_buffer_print_errors(struct channel *chan,
+void lib_ring_buffer_print_errors(struct lttng_kernel_ring_buffer_channel *chan,
struct lib_ring_buffer *buf, int cpu);
static
void _lib_ring_buffer_switch_remote(struct lib_ring_buffer *buf,
static
int lib_ring_buffer_poll_deliver(const struct lib_ring_buffer_config *config,
struct lib_ring_buffer *buf,
- struct channel *chan)
+ struct lttng_kernel_ring_buffer_channel *chan)
{
unsigned long consumed_old, consumed_idx, commit_count, write_offset;
*/
void lib_ring_buffer_free(struct lib_ring_buffer *buf)
{
- struct channel *chan = buf->backend.chan;
+ struct lttng_kernel_ring_buffer_channel *chan = buf->backend.chan;
irq_work_sync(&buf->wakeup_pending);
*/
void lib_ring_buffer_reset(struct lib_ring_buffer *buf)
{
- struct channel *chan = buf->backend.chan;
+ struct lttng_kernel_ring_buffer_channel *chan = buf->backend.chan;
const struct lib_ring_buffer_config *config = &chan->backend.config;
unsigned int i;
* be using the iterator concurrently with reset. The previous current iterator
* record is reset.
*/
-void channel_reset(struct channel *chan)
+void channel_reset(struct lttng_kernel_ring_buffer_channel *chan)
{
/*
* Reset iterators first. Will put the subbuffer if held for reading.
static void lib_ring_buffer_pending_wakeup_chan(struct irq_work *entry)
{
- struct channel *chan = container_of(entry, struct channel, wakeup_pending);
+ struct lttng_kernel_ring_buffer_channel *chan = container_of(entry, struct lttng_kernel_ring_buffer_channel, wakeup_pending);
wake_up_interruptible(&chan->read_wait);
}
struct channel_backend *chanb, int cpu)
{
const struct lib_ring_buffer_config *config = &chanb->config;
- struct channel *chan = container_of(chanb, struct channel, backend);
+ struct lttng_kernel_ring_buffer_channel *chan = container_of(chanb, struct lttng_kernel_ring_buffer_channel, backend);
void *priv = chanb->priv;
size_t subbuf_header_size;
u64 tsc;
static void switch_buffer_timer(LTTNG_TIMER_FUNC_ARG_TYPE t)
{
struct lib_ring_buffer *buf = lttng_from_timer(buf, t, switch_timer);
- struct channel *chan = buf->backend.chan;
+ struct lttng_kernel_ring_buffer_channel *chan = buf->backend.chan;
const struct lib_ring_buffer_config *config = &chan->backend.config;
/*
*/
static void lib_ring_buffer_start_switch_timer(struct lib_ring_buffer *buf)
{
- struct channel *chan = buf->backend.chan;
+ struct lttng_kernel_ring_buffer_channel *chan = buf->backend.chan;
const struct lib_ring_buffer_config *config = &chan->backend.config;
unsigned int flags = 0;
*/
static void lib_ring_buffer_stop_switch_timer(struct lib_ring_buffer *buf)
{
- struct channel *chan = buf->backend.chan;
+ struct lttng_kernel_ring_buffer_channel *chan = buf->backend.chan;
if (!chan->switch_timer_interval || !buf->switch_timer_enabled)
return;
static void read_buffer_timer(LTTNG_TIMER_FUNC_ARG_TYPE t)
{
struct lib_ring_buffer *buf = lttng_from_timer(buf, t, read_timer);
- struct channel *chan = buf->backend.chan;
+ struct lttng_kernel_ring_buffer_channel *chan = buf->backend.chan;
const struct lib_ring_buffer_config *config = &chan->backend.config;
CHAN_WARN_ON(chan, !buf->backend.allocated);
*/
static void lib_ring_buffer_start_read_timer(struct lib_ring_buffer *buf)
{
- struct channel *chan = buf->backend.chan;
+ struct lttng_kernel_ring_buffer_channel *chan = buf->backend.chan;
const struct lib_ring_buffer_config *config = &chan->backend.config;
unsigned int flags = 0;
*/
static void lib_ring_buffer_stop_read_timer(struct lib_ring_buffer *buf)
{
- struct channel *chan = buf->backend.chan;
+ struct lttng_kernel_ring_buffer_channel *chan = buf->backend.chan;
const struct lib_ring_buffer_config *config = &chan->backend.config;
if (config->wakeup != RING_BUFFER_WAKEUP_BY_TIMER
int lttng_cpuhp_rb_frontend_dead(unsigned int cpu,
struct lttng_cpuhp_node *node)
{
- struct channel *chan = container_of(node, struct channel,
+ struct lttng_kernel_ring_buffer_channel *chan = container_of(node, struct lttng_kernel_ring_buffer_channel,
cpuhp_prepare);
struct lib_ring_buffer *buf = per_cpu_ptr(chan->backend.buf, cpu);
const struct lib_ring_buffer_config *config = &chan->backend.config;
int lttng_cpuhp_rb_frontend_online(unsigned int cpu,
struct lttng_cpuhp_node *node)
{
- struct channel *chan = container_of(node, struct channel,
+ struct lttng_kernel_ring_buffer_channel *chan = container_of(node, struct lttng_kernel_ring_buffer_channel,
cpuhp_online);
struct lib_ring_buffer *buf = per_cpu_ptr(chan->backend.buf, cpu);
const struct lib_ring_buffer_config *config = &chan->backend.config;
int lttng_cpuhp_rb_frontend_offline(unsigned int cpu,
struct lttng_cpuhp_node *node)
{
- struct channel *chan = container_of(node, struct channel,
+ struct lttng_kernel_ring_buffer_channel *chan = container_of(node, struct lttng_kernel_ring_buffer_channel,
cpuhp_online);
struct lib_ring_buffer *buf = per_cpu_ptr(chan->backend.buf, cpu);
const struct lib_ring_buffer_config *config = &chan->backend.config;
void *hcpu)
{
unsigned int cpu = (unsigned long)hcpu;
- struct channel *chan = container_of(nb, struct channel,
+ struct lttng_kernel_ring_buffer_channel *chan = container_of(nb, struct lttng_kernel_ring_buffer_channel,
cpu_hp_notifier);
struct lib_ring_buffer *buf = per_cpu_ptr(chan->backend.buf, cpu);
const struct lib_ring_buffer_config *config = &chan->backend.config;
unsigned long val,
void *data)
{
- struct channel *chan = container_of(nb, struct channel,
+ struct lttng_kernel_ring_buffer_channel *chan = container_of(nb, struct lttng_kernel_ring_buffer_channel,
tick_nohz_notifier);
const struct lib_ring_buffer_config *config = &chan->backend.config;
struct lib_ring_buffer *buf;
/*
* Holds CPU hotplug.
*/
-static void channel_unregister_notifiers(struct channel *chan)
+static void channel_unregister_notifiers(struct lttng_kernel_ring_buffer_channel *chan)
{
const struct lib_ring_buffer_config *config = &chan->backend.config;
buf->quiescent = false;
}
-void lib_ring_buffer_set_quiescent_channel(struct channel *chan)
+void lib_ring_buffer_set_quiescent_channel(struct lttng_kernel_ring_buffer_channel *chan)
{
int cpu;
const struct lib_ring_buffer_config *config = &chan->backend.config;
}
EXPORT_SYMBOL_GPL(lib_ring_buffer_set_quiescent_channel);
-void lib_ring_buffer_clear_quiescent_channel(struct channel *chan)
+void lib_ring_buffer_clear_quiescent_channel(struct lttng_kernel_ring_buffer_channel *chan)
{
int cpu;
const struct lib_ring_buffer_config *config = &chan->backend.config;
}
EXPORT_SYMBOL_GPL(lib_ring_buffer_clear_quiescent_channel);
-static void channel_free(struct channel *chan)
+static void channel_free(struct lttng_kernel_ring_buffer_channel *chan)
{
if (chan->backend.release_priv_ops) {
chan->backend.release_priv_ops(chan->backend.priv_ops);
* Holds cpu hotplug.
* Returns NULL on failure.
*/
-struct channel *channel_create(const struct lib_ring_buffer_config *config,
+struct lttng_kernel_ring_buffer_channel *channel_create(const struct lib_ring_buffer_config *config,
const char *name, void *priv, void *buf_addr,
size_t subbuf_size,
size_t num_subbuf, unsigned int switch_timer_interval,
unsigned int read_timer_interval)
{
int ret;
- struct channel *chan;
+ struct lttng_kernel_ring_buffer_channel *chan;
if (lib_ring_buffer_check_config(config, switch_timer_interval,
read_timer_interval))
return NULL;
- chan = kzalloc(sizeof(struct channel), GFP_KERNEL);
+ chan = kzalloc(sizeof(struct lttng_kernel_ring_buffer_channel), GFP_KERNEL);
if (!chan)
return NULL;
static
void channel_release(struct kref *kref)
{
- struct channel *chan = container_of(kref, struct channel, ref);
+ struct lttng_kernel_ring_buffer_channel *chan = container_of(kref, struct lttng_kernel_ring_buffer_channel, ref);
channel_free(chan);
}
* They should release their handle at that point. Returns the private
* data pointer.
*/
-void *channel_destroy(struct channel *chan)
+void *channel_destroy(struct lttng_kernel_ring_buffer_channel *chan)
{
int cpu;
const struct lib_ring_buffer_config *config = &chan->backend.config;
struct lib_ring_buffer *channel_get_ring_buffer(
const struct lib_ring_buffer_config *config,
- struct channel *chan, int cpu)
+ struct lttng_kernel_ring_buffer_channel *chan, int cpu)
{
if (config->alloc == RING_BUFFER_ALLOC_GLOBAL)
return chan->backend.buf;
int lib_ring_buffer_open_read(struct lib_ring_buffer *buf)
{
- struct channel *chan = buf->backend.chan;
+ struct lttng_kernel_ring_buffer_channel *chan = buf->backend.chan;
if (!atomic_long_add_unless(&buf->active_readers, 1, 1))
return -EBUSY;
void lib_ring_buffer_release_read(struct lib_ring_buffer *buf)
{
- struct channel *chan = buf->backend.chan;
+ struct lttng_kernel_ring_buffer_channel *chan = buf->backend.chan;
CHAN_WARN_ON(chan, atomic_long_read(&buf->active_readers) != 1);
lttng_smp_mb__before_atomic();
int lib_ring_buffer_snapshot(struct lib_ring_buffer *buf,
unsigned long *consumed, unsigned long *produced)
{
- struct channel *chan = buf->backend.chan;
+ struct lttng_kernel_ring_buffer_channel *chan = buf->backend.chan;
const struct lib_ring_buffer_config *config = &chan->backend.config;
unsigned long consumed_cur, write_offset;
int finalized;
int lib_ring_buffer_snapshot_sample_positions(struct lib_ring_buffer *buf,
unsigned long *consumed, unsigned long *produced)
{
- struct channel *chan = buf->backend.chan;
+ struct lttng_kernel_ring_buffer_channel *chan = buf->backend.chan;
const struct lib_ring_buffer_config *config = &chan->backend.config;
smp_rmb();
unsigned long consumed_new)
{
struct lib_ring_buffer_backend *bufb = &buf->backend;
- struct channel *chan = bufb->chan;
+ struct lttng_kernel_ring_buffer_channel *chan = bufb->chan;
unsigned long consumed;
CHAN_WARN_ON(chan, atomic_long_read(&buf->active_readers) != 1);
#if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE
static void lib_ring_buffer_flush_read_subbuf_dcache(
const struct lib_ring_buffer_config *config,
- struct channel *chan,
+ struct lttng_kernel_ring_buffer_channel *chan,
struct lib_ring_buffer *buf)
{
struct lib_ring_buffer_backend_pages *pages;
#else
static void lib_ring_buffer_flush_read_subbuf_dcache(
const struct lib_ring_buffer_config *config,
- struct channel *chan,
+ struct lttng_kernel_ring_buffer_channel *chan,
struct lib_ring_buffer *buf)
{
}
int lib_ring_buffer_get_subbuf(struct lib_ring_buffer *buf,
unsigned long consumed)
{
- struct channel *chan = buf->backend.chan;
+ struct lttng_kernel_ring_buffer_channel *chan = buf->backend.chan;
const struct lib_ring_buffer_config *config = &chan->backend.config;
unsigned long consumed_cur, consumed_idx, commit_count, write_offset;
int ret;
void lib_ring_buffer_put_subbuf(struct lib_ring_buffer *buf)
{
struct lib_ring_buffer_backend *bufb = &buf->backend;
- struct channel *chan = bufb->chan;
+ struct lttng_kernel_ring_buffer_channel *chan = bufb->chan;
const struct lib_ring_buffer_config *config = &chan->backend.config;
unsigned long read_sb_bindex, consumed_idx, consumed;
*/
static
void lib_ring_buffer_print_subbuffer_errors(struct lib_ring_buffer *buf,
- struct channel *chan,
+ struct lttng_kernel_ring_buffer_channel *chan,
unsigned long cons_offset,
int cpu)
{
static
void lib_ring_buffer_print_buffer_errors(struct lib_ring_buffer *buf,
- struct channel *chan,
+ struct lttng_kernel_ring_buffer_channel *chan,
void *priv, int cpu)
{
const struct lib_ring_buffer_config *config = &chan->backend.config;
#ifdef LTTNG_RING_BUFFER_COUNT_EVENTS
static
-void lib_ring_buffer_print_records_count(struct channel *chan,
+void lib_ring_buffer_print_records_count(struct lttng_kernel_ring_buffer_channel *chan,
struct lib_ring_buffer *buf,
int cpu)
{
}
#else
static
-void lib_ring_buffer_print_records_count(struct channel *chan,
+void lib_ring_buffer_print_records_count(struct lttng_kernel_ring_buffer_channel *chan,
struct lib_ring_buffer *buf,
int cpu)
{
#endif
static
-void lib_ring_buffer_print_errors(struct channel *chan,
+void lib_ring_buffer_print_errors(struct lttng_kernel_ring_buffer_channel *chan,
struct lib_ring_buffer *buf, int cpu)
{
const struct lib_ring_buffer_config *config = &chan->backend.config;
*/
static
void lib_ring_buffer_switch_old_start(struct lib_ring_buffer *buf,
- struct channel *chan,
+ struct lttng_kernel_ring_buffer_channel *chan,
struct switch_offsets *offsets,
u64 tsc)
{
*/
static
void lib_ring_buffer_switch_old_end(struct lib_ring_buffer *buf,
- struct channel *chan,
+ struct lttng_kernel_ring_buffer_channel *chan,
struct switch_offsets *offsets,
u64 tsc)
{
*/
static
void lib_ring_buffer_switch_new_start(struct lib_ring_buffer *buf,
- struct channel *chan,
+ struct lttng_kernel_ring_buffer_channel *chan,
struct switch_offsets *offsets,
u64 tsc)
{
*/
static
void lib_ring_buffer_switch_new_end(struct lib_ring_buffer *buf,
- struct channel *chan,
+ struct lttng_kernel_ring_buffer_channel *chan,
struct switch_offsets *offsets,
u64 tsc)
{
static
int lib_ring_buffer_try_switch_slow(enum switch_mode mode,
struct lib_ring_buffer *buf,
- struct channel *chan,
+ struct lttng_kernel_ring_buffer_channel *chan,
struct switch_offsets *offsets,
u64 *tsc)
{
*/
void lib_ring_buffer_switch_slow(struct lib_ring_buffer *buf, enum switch_mode mode)
{
- struct channel *chan = buf->backend.chan;
+ struct lttng_kernel_ring_buffer_channel *chan = buf->backend.chan;
const struct lib_ring_buffer_config *config = &chan->backend.config;
struct switch_offsets offsets;
unsigned long oldidx;
static void _lib_ring_buffer_switch_remote(struct lib_ring_buffer *buf,
enum switch_mode mode)
{
- struct channel *chan = buf->backend.chan;
+ struct lttng_kernel_ring_buffer_channel *chan = buf->backend.chan;
const struct lib_ring_buffer_config *config = &chan->backend.config;
int ret;
struct switch_param param;
void lib_ring_buffer_clear(struct lib_ring_buffer *buf)
{
struct lib_ring_buffer_backend *bufb = &buf->backend;
- struct channel *chan = bufb->chan;
+ struct lttng_kernel_ring_buffer_channel *chan = bufb->chan;
lib_ring_buffer_switch_remote(buf);
lib_ring_buffer_clear_reader(buf, chan);
*/
static
int lib_ring_buffer_try_reserve_slow(struct lib_ring_buffer *buf,
- struct channel *chan,
+ struct lttng_kernel_ring_buffer_channel *chan,
struct switch_offsets *offsets,
struct lttng_kernel_ring_buffer_ctx *ctx,
void *client_ctx)
return 0;
}
-static struct lib_ring_buffer *get_current_buf(struct channel *chan, int cpu)
+static struct lib_ring_buffer *get_current_buf(struct lttng_kernel_ring_buffer_channel *chan, int cpu)
{
const struct lib_ring_buffer_config *config = &chan->backend.config;
return chan->backend.buf;
}
-void lib_ring_buffer_lost_event_too_big(struct channel *chan)
+void lib_ring_buffer_lost_event_too_big(struct lttng_kernel_ring_buffer_channel *chan)
{
const struct lib_ring_buffer_config *config = &chan->backend.config;
struct lib_ring_buffer *buf = get_current_buf(chan, smp_processor_id());
int lib_ring_buffer_reserve_slow(struct lttng_kernel_ring_buffer_ctx *ctx,
void *client_ctx)
{
- struct channel *chan = ctx->priv.chan;
+ struct lttng_kernel_ring_buffer_channel *chan = ctx->priv.chan;
const struct lib_ring_buffer_config *config = &chan->backend.config;
struct lib_ring_buffer *buf;
struct switch_offsets offsets;
void lib_ring_buffer_check_deliver_slow(const struct lib_ring_buffer_config *config,
struct lib_ring_buffer *buf,
- struct channel *chan,
+ struct lttng_kernel_ring_buffer_channel *chan,
unsigned long offset,
unsigned long commit_count,
unsigned long idx,
* Returns the size of the event read, -EAGAIN if buffer is empty, -ENODATA if
* buffer is empty and finalized. The buffer must already be opened for reading.
*/
-ssize_t lib_ring_buffer_get_next_record(struct channel *chan,
+ssize_t lib_ring_buffer_get_next_record(struct lttng_kernel_ring_buffer_channel *chan,
struct lib_ring_buffer *buf)
{
const struct lib_ring_buffer_config *config = &chan->backend.config;
static
void lib_ring_buffer_get_empty_buf_records(const struct lib_ring_buffer_config *config,
- struct channel *chan)
+ struct lttng_kernel_ring_buffer_channel *chan)
{
struct lttng_ptr_heap *heap = &chan->iter.heap;
struct lib_ring_buffer *buf, *tmp;
static
void lib_ring_buffer_wait_for_qs(const struct lib_ring_buffer_config *config,
- struct channel *chan)
+ struct lttng_kernel_ring_buffer_channel *chan)
{
u64 timestamp_qs;
unsigned long wait_msecs;
* opened for reading.
*/
-ssize_t channel_get_next_record(struct channel *chan,
+ssize_t channel_get_next_record(struct lttng_kernel_ring_buffer_channel *chan,
struct lib_ring_buffer **ret_buf)
{
const struct lib_ring_buffer_config *config = &chan->backend.config;
EXPORT_SYMBOL_GPL(channel_get_next_record);
static
-void lib_ring_buffer_iterator_init(struct channel *chan, struct lib_ring_buffer *buf)
+void lib_ring_buffer_iterator_init(struct lttng_kernel_ring_buffer_channel *chan, struct lib_ring_buffer *buf)
{
if (buf->iter.allocated)
return;
int lttng_cpuhp_rb_iter_online(unsigned int cpu,
struct lttng_cpuhp_node *node)
{
- struct channel *chan = container_of(node, struct channel,
+ struct lttng_kernel_ring_buffer_channel *chan = container_of(node, struct lttng_kernel_ring_buffer_channel,
cpuhp_iter_online);
struct lib_ring_buffer *buf = per_cpu_ptr(chan->backend.buf, cpu);
const struct lib_ring_buffer_config *config = &chan->backend.config;
void *hcpu)
{
unsigned int cpu = (unsigned long)hcpu;
- struct channel *chan = container_of(nb, struct channel,
+ struct lttng_kernel_ring_buffer_channel *chan = container_of(nb, struct lttng_kernel_ring_buffer_channel,
hp_iter_notifier);
struct lib_ring_buffer *buf = per_cpu_ptr(chan->backend.buf, cpu);
const struct lib_ring_buffer_config *config = &chan->backend.config;
#endif /* #else #if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(4,10,0)) */
-int channel_iterator_init(struct channel *chan)
+int channel_iterator_init(struct lttng_kernel_ring_buffer_channel *chan)
{
const struct lib_ring_buffer_config *config = &chan->backend.config;
struct lib_ring_buffer *buf;
return 0;
}
-void channel_iterator_unregister_notifiers(struct channel *chan)
+void channel_iterator_unregister_notifiers(struct lttng_kernel_ring_buffer_channel *chan)
{
const struct lib_ring_buffer_config *config = &chan->backend.config;
}
}
-void channel_iterator_free(struct channel *chan)
+void channel_iterator_free(struct lttng_kernel_ring_buffer_channel *chan)
{
const struct lib_ring_buffer_config *config = &chan->backend.config;
int lib_ring_buffer_iterator_open(struct lib_ring_buffer *buf)
{
- struct channel *chan = buf->backend.chan;
+ struct lttng_kernel_ring_buffer_channel *chan = buf->backend.chan;
const struct lib_ring_buffer_config *config = &chan->backend.config;
CHAN_WARN_ON(chan, config->output != RING_BUFFER_ITERATOR);
return lib_ring_buffer_open_read(buf);
}
EXPORT_SYMBOL_GPL(lib_ring_buffer_iterator_release);
-int channel_iterator_open(struct channel *chan)
+int channel_iterator_open(struct lttng_kernel_ring_buffer_channel *chan)
{
const struct lib_ring_buffer_config *config = &chan->backend.config;
struct lib_ring_buffer *buf;
}
EXPORT_SYMBOL_GPL(channel_iterator_open);
-void channel_iterator_release(struct channel *chan)
+void channel_iterator_release(struct lttng_kernel_ring_buffer_channel *chan)
{
const struct lib_ring_buffer_config *config = &chan->backend.config;
struct lib_ring_buffer *buf;
void lib_ring_buffer_iterator_reset(struct lib_ring_buffer *buf)
{
- struct channel *chan = buf->backend.chan;
+ struct lttng_kernel_ring_buffer_channel *chan = buf->backend.chan;
if (buf->iter.state != ITER_GET_SUBBUF)
lib_ring_buffer_put_next_subbuf(buf);
/* Don't reset allocated and read_open */
}
-void channel_iterator_reset(struct channel *chan)
+void channel_iterator_reset(struct lttng_kernel_ring_buffer_channel *chan)
{
const struct lib_ring_buffer_config *config = &chan->backend.config;
struct lib_ring_buffer *buf;
char __user *user_buf,
size_t count,
loff_t *ppos,
- struct channel *chan,
+ struct lttng_kernel_ring_buffer_channel *chan,
struct lib_ring_buffer *buf,
int fusionmerge)
{
{
struct inode *inode = filp->lttng_f_dentry->d_inode;
struct lib_ring_buffer *buf = inode->i_private;
- struct channel *chan = buf->backend.chan;
+ struct lttng_kernel_ring_buffer_channel *chan = buf->backend.chan;
return channel_ring_buffer_file_read(filp, user_buf, count, ppos,
chan, buf, 0);
loff_t *ppos)
{
struct inode *inode = filp->lttng_f_dentry->d_inode;
- struct channel *chan = inode->i_private;
+ struct lttng_kernel_ring_buffer_channel *chan = inode->i_private;
const struct lib_ring_buffer_config *config = &chan->backend.config;
if (config->alloc == RING_BUFFER_ALLOC_PER_CPU)
static
int channel_file_open(struct inode *inode, struct file *file)
{
- struct channel *chan = inode->i_private;
+ struct lttng_kernel_ring_buffer_channel *chan = inode->i_private;
int ret;
ret = channel_iterator_open(chan);
static
int channel_file_release(struct inode *inode, struct file *file)
{
- struct channel *chan = inode->i_private;
+ struct lttng_kernel_ring_buffer_channel *chan = inode->i_private;
channel_iterator_release(chan);
return 0;
#endif
{
struct lib_ring_buffer *buf = vma->vm_private_data;
- struct channel *chan = buf->backend.chan;
+ struct lttng_kernel_ring_buffer_channel *chan = buf->backend.chan;
const struct lib_ring_buffer_config *config = &chan->backend.config;
pgoff_t pgoff = vmf->pgoff;
unsigned long *pfnp;
struct vm_area_struct *vma)
{
unsigned long length = vma->vm_end - vma->vm_start;
- struct channel *chan = buf->backend.chan;
+ struct lttng_kernel_ring_buffer_channel *chan = buf->backend.chan;
const struct lib_ring_buffer_config *config = &chan->backend.config;
unsigned long mmap_buf_len;
unsigned int flags,
struct lib_ring_buffer *buf)
{
- struct channel *chan = buf->backend.chan;
+ struct lttng_kernel_ring_buffer_channel *chan = buf->backend.chan;
const struct lib_ring_buffer_config *config = &chan->backend.config;
unsigned int poff, subbuf_pages, nr_pages;
struct page *pages[PIPE_DEF_BUFFERS];
unsigned int flags,
struct lib_ring_buffer *buf)
{
- struct channel *chan = buf->backend.chan;
+ struct lttng_kernel_ring_buffer_channel *chan = buf->backend.chan;
const struct lib_ring_buffer_config *config = &chan->backend.config;
ssize_t spliced;
int ret;
struct lib_ring_buffer *buf)
{
unsigned int mask = 0;
- struct channel *chan = buf->backend.chan;
+ struct lttng_kernel_ring_buffer_channel *chan = buf->backend.chan;
const struct lib_ring_buffer_config *config = &chan->backend.config;
int finalized, disabled;
long lib_ring_buffer_ioctl(struct file *filp, unsigned int cmd,
unsigned long arg, struct lib_ring_buffer *buf)
{
- struct channel *chan = buf->backend.chan;
+ struct lttng_kernel_ring_buffer_channel *chan = buf->backend.chan;
const struct lib_ring_buffer_config *config = &chan->backend.config;
if (lib_ring_buffer_channel_is_disabled(chan))
long lib_ring_buffer_compat_ioctl(struct file *filp, unsigned int cmd,
unsigned long arg, struct lib_ring_buffer *buf)
{
- struct channel *chan = buf->backend.chan;
+ struct lttng_kernel_ring_buffer_channel *chan = buf->backend.chan;
const struct lib_ring_buffer_config *config = &chan->backend.config;
if (lib_ring_buffer_channel_is_disabled(chan))
size_t count, loff_t *ppos)
{
struct lttng_event_notifier_group *event_notifier_group = filp->private_data;
- struct channel *chan = event_notifier_group->chan;
+ struct lttng_kernel_ring_buffer_channel *chan = event_notifier_group->chan;
struct lib_ring_buffer *buf = event_notifier_group->buf;
ssize_t read_count = 0, len;
size_t read_offset;
{
unsigned int mask = 0;
struct lttng_event_notifier_group *event_notifier_group = filp->private_data;
- struct channel *chan = event_notifier_group->chan;
+ struct lttng_kernel_ring_buffer_channel *chan = event_notifier_group->chan;
struct lib_ring_buffer *buf = event_notifier_group->buf;
const struct lib_ring_buffer_config *config = &chan->backend.config;
int finalized, disabled;
{
struct lttng_metadata_stream *stream = filp->private_data;
struct lib_ring_buffer *buf = stream->priv;
- struct channel *chan = buf->backend.chan;
+ struct lttng_kernel_ring_buffer_channel *chan = buf->backend.chan;
ret = lttng_metadata_output_channel(stream, chan, NULL);
if (ret > 0) {
{
struct lttng_metadata_stream *stream = filp->private_data;
struct lib_ring_buffer *buf = stream->priv;
- struct channel *chan = buf->backend.chan;
+ struct lttng_kernel_ring_buffer_channel *chan = buf->backend.chan;
/*
* Before doing the actual ring buffer flush, write up to one
{
struct lttng_metadata_stream *stream = filp->private_data;
struct lib_ring_buffer *buf = stream->priv;
- struct channel *chan = buf->backend.chan;
+ struct lttng_kernel_ring_buffer_channel *chan = buf->backend.chan;
ret = lttng_metadata_output_channel(stream, chan, &coherent);
if (ret > 0) {
{
struct lttng_metadata_stream *stream = filp->private_data;
struct lib_ring_buffer *buf = stream->priv;
- struct channel *chan = buf->backend.chan;
+ struct lttng_kernel_ring_buffer_channel *chan = buf->backend.chan;
ret = lttng_metadata_output_channel(stream, chan, NULL);
if (ret > 0) {
{
struct lttng_metadata_stream *stream = filp->private_data;
struct lib_ring_buffer *buf = stream->priv;
- struct channel *chan = buf->backend.chan;
+ struct lttng_kernel_ring_buffer_channel *chan = buf->backend.chan;
/*
* Before doing the actual ring buffer flush, write up to one
{
struct lttng_metadata_stream *stream = filp->private_data;
struct lib_ring_buffer *buf = stream->priv;
- struct channel *chan = buf->backend.chan;
+ struct lttng_kernel_ring_buffer_channel *chan = buf->backend.chan;
ret = lttng_metadata_output_channel(stream, chan, &coherent);
if (ret > 0) {
int lttng_abi_open_event_notifier_group_stream(struct file *notif_file)
{
struct lttng_event_notifier_group *event_notifier_group = notif_file->private_data;
- struct channel *chan = event_notifier_group->chan;
+ struct lttng_kernel_ring_buffer_channel *chan = event_notifier_group->chan;
struct lib_ring_buffer *buf;
int ret;
void *stream_priv;
unsigned int cmd, unsigned long arg)
{
struct lib_ring_buffer *buf = filp->private_data;
- struct channel *chan = buf->backend.chan;
+ struct lttng_kernel_ring_buffer_channel *chan = buf->backend.chan;
const struct lib_ring_buffer_config *config = &chan->backend.config;
const struct lttng_kernel_channel_buffer_ops *ops = chan->backend.priv_ops;
int ret;
unsigned int cmd, unsigned long arg)
{
struct lib_ring_buffer *buf = filp->private_data;
- struct channel *chan = buf->backend.chan;
+ struct lttng_kernel_ring_buffer_channel *chan = buf->backend.chan;
const struct lib_ring_buffer_config *config = &chan->backend.config;
const struct lttng_kernel_channel_buffer_ops *ops = chan->backend.priv_ops;
int ret;
* was written and a negative value on error.
*/
int lttng_metadata_output_channel(struct lttng_metadata_stream *stream,
- struct channel *chan, bool *coherent)
+ struct lttng_kernel_ring_buffer_channel *chan, bool *coherent)
{
struct lttng_kernel_ring_buffer_ctx ctx;
int ret = 0;
size_t event_context_len;
};
-static inline notrace u64 lib_ring_buffer_clock_read(struct channel *chan)
+static inline notrace u64 lib_ring_buffer_clock_read(struct lttng_kernel_ring_buffer_channel *chan)
{
return trace_clock_read64();
}
*/
static __inline__
size_t record_header_size(const struct lib_ring_buffer_config *config,
- struct channel *chan, size_t offset,
+ struct lttng_kernel_ring_buffer_channel *chan, size_t offset,
size_t *pre_header_padding,
struct lttng_kernel_ring_buffer_ctx *ctx,
struct lttng_client_ctx *client_ctx)
static const struct lib_ring_buffer_config client_config;
-static u64 client_ring_buffer_clock_read(struct channel *chan)
+static u64 client_ring_buffer_clock_read(struct lttng_kernel_ring_buffer_channel *chan)
{
return lib_ring_buffer_clock_read(chan);
}
static
size_t client_record_header_size(const struct lib_ring_buffer_config *config,
- struct channel *chan, size_t offset,
+ struct lttng_kernel_ring_buffer_channel *chan, size_t offset,
size_t *pre_header_padding,
struct lttng_kernel_ring_buffer_ctx *ctx,
void *client_ctx)
static void client_buffer_begin(struct lib_ring_buffer *buf, u64 tsc,
unsigned int subbuf_idx)
{
- struct channel *chan = buf->backend.chan;
+ struct lttng_kernel_ring_buffer_channel *chan = buf->backend.chan;
struct packet_header *header =
(struct packet_header *)
lib_ring_buffer_offset_address(&buf->backend,
static void client_buffer_end(struct lib_ring_buffer *buf, u64 tsc,
unsigned int subbuf_idx, unsigned long data_size)
{
- struct channel *chan = buf->backend.chan;
+ struct lttng_kernel_ring_buffer_channel *chan = buf->backend.chan;
struct packet_header *header =
(struct packet_header *)
lib_ring_buffer_offset_address(&buf->backend,
struct lib_ring_buffer *buf,
uint64_t *stream_id)
{
- struct channel *chan = buf->backend.chan;
+ struct lttng_kernel_ring_buffer_channel *chan = buf->backend.chan;
struct lttng_kernel_channel_buffer *lttng_chan = channel_get_private(chan);
*stream_id = lttng_chan->priv->id;
}
static
-void lttng_channel_destroy(struct channel *chan)
+void lttng_channel_destroy(struct lttng_kernel_ring_buffer_channel *chan)
{
channel_destroy(chan);
}
static
-struct channel *_channel_create(const char *name,
+struct lttng_kernel_ring_buffer_channel *_channel_create(const char *name,
void *priv, void *buf_addr,
size_t subbuf_size, size_t num_subbuf,
unsigned int switch_timer_interval,
unsigned int read_timer_interval)
{
struct lttng_kernel_channel_buffer *lttng_chan = priv;
- struct channel *chan;
+ struct lttng_kernel_ring_buffer_channel *chan;
chan = channel_create(&client_config, name, lttng_chan, buf_addr,
subbuf_size, num_subbuf, switch_timer_interval,
}
static
-struct lib_ring_buffer *lttng_buffer_read_open(struct channel *chan)
+struct lib_ring_buffer *lttng_buffer_read_open(struct lttng_kernel_ring_buffer_channel *chan)
{
struct lib_ring_buffer *buf;
int cpu;
}
static
-int lttng_buffer_has_read_closed_stream(struct channel *chan)
+int lttng_buffer_has_read_closed_stream(struct lttng_kernel_ring_buffer_channel *chan)
{
struct lib_ring_buffer *buf;
int cpu;
}
static
-wait_queue_head_t *lttng_get_writer_buf_wait_queue(struct channel *chan, int cpu)
+wait_queue_head_t *lttng_get_writer_buf_wait_queue(struct lttng_kernel_ring_buffer_channel *chan, int cpu)
{
struct lib_ring_buffer *buf = channel_get_ring_buffer(&client_config,
chan, cpu);
}
static
-wait_queue_head_t *lttng_get_hp_wait_queue(struct channel *chan)
+wait_queue_head_t *lttng_get_hp_wait_queue(struct lttng_kernel_ring_buffer_channel *chan)
{
return &chan->hp_wait;
}
static
-int lttng_is_finalized(struct channel *chan)
+int lttng_is_finalized(struct lttng_kernel_ring_buffer_channel *chan)
{
return lib_ring_buffer_channel_is_finalized(chan);
}
static
-int lttng_is_disabled(struct channel *chan)
+int lttng_is_disabled(struct lttng_kernel_ring_buffer_channel *chan)
{
return lib_ring_buffer_channel_is_disabled(chan);
}
static const struct lib_ring_buffer_config client_config;
static inline
-u64 lib_ring_buffer_clock_read(struct channel *chan)
+u64 lib_ring_buffer_clock_read(struct lttng_kernel_ring_buffer_channel *chan)
{
return 0;
}
static inline
size_t record_header_size(const struct lib_ring_buffer_config *config,
- struct channel *chan, size_t offset,
+ struct lttng_kernel_ring_buffer_channel *chan, size_t offset,
size_t *pre_header_padding,
struct lttng_kernel_ring_buffer_ctx *ctx,
void *client_ctx)
#include <ringbuffer/api.h>
-static u64 client_ring_buffer_clock_read(struct channel *chan)
+static u64 client_ring_buffer_clock_read(struct lttng_kernel_ring_buffer_channel *chan)
{
return 0;
}
static
size_t client_record_header_size(const struct lib_ring_buffer_config *config,
- struct channel *chan, size_t offset,
+ struct lttng_kernel_ring_buffer_channel *chan, size_t offset,
size_t *pre_header_padding,
struct lttng_kernel_ring_buffer_ctx *ctx,
void *client_ctx)
}
static void client_record_get(const struct lib_ring_buffer_config *config,
- struct channel *chan, struct lib_ring_buffer *buf,
+ struct lttng_kernel_ring_buffer_channel *chan, struct lib_ring_buffer *buf,
size_t offset, size_t *header_len,
size_t *payload_len, u64 *timestamp)
{
}
static
-void lttng_channel_destroy(struct channel *chan)
+void lttng_channel_destroy(struct lttng_kernel_ring_buffer_channel *chan)
{
channel_destroy(chan);
}
static
-struct channel *_channel_create(const char *name,
+struct lttng_kernel_ring_buffer_channel *_channel_create(const char *name,
void *priv, void *buf_addr,
size_t subbuf_size, size_t num_subbuf,
unsigned int switch_timer_interval,
unsigned int read_timer_interval)
{
struct lttng_event_notifier_group *event_notifier_group = priv;
- struct channel *chan;
+ struct lttng_kernel_ring_buffer_channel *chan;
chan = channel_create(&client_config, name,
event_notifier_group, buf_addr,
}
static
-struct lib_ring_buffer *lttng_buffer_read_open(struct channel *chan)
+struct lib_ring_buffer *lttng_buffer_read_open(struct lttng_kernel_ring_buffer_channel *chan)
{
struct lib_ring_buffer *buf;
}
static
-int lttng_buffer_has_read_closed_stream(struct channel *chan)
+int lttng_buffer_has_read_closed_stream(struct lttng_kernel_ring_buffer_channel *chan)
{
struct lib_ring_buffer *buf;
int cpu;
static
int lttng_event_reserve(struct lttng_kernel_ring_buffer_ctx *ctx)
{
- struct channel *chan = ctx->client_priv;
+ struct lttng_kernel_ring_buffer_channel *chan = ctx->client_priv;
int ret;
memset(&ctx->priv, 0, sizeof(ctx->priv));
}
static
-size_t lttng_packet_avail_size(struct channel *chan)
+size_t lttng_packet_avail_size(struct lttng_kernel_ring_buffer_channel *chan)
{
unsigned long o_begin;
struct lib_ring_buffer *buf;
}
static
-wait_queue_head_t *lttng_get_writer_buf_wait_queue(struct channel *chan, int cpu)
+wait_queue_head_t *lttng_get_writer_buf_wait_queue(struct lttng_kernel_ring_buffer_channel *chan, int cpu)
{
struct lib_ring_buffer *buf = channel_get_ring_buffer(&client_config,
chan, cpu);
}
static
-wait_queue_head_t *lttng_get_hp_wait_queue(struct channel *chan)
+wait_queue_head_t *lttng_get_hp_wait_queue(struct lttng_kernel_ring_buffer_channel *chan)
{
return &chan->hp_wait;
}
static
-int lttng_is_finalized(struct channel *chan)
+int lttng_is_finalized(struct lttng_kernel_ring_buffer_channel *chan)
{
return lib_ring_buffer_channel_is_finalized(chan);
}
static
-int lttng_is_disabled(struct channel *chan)
+int lttng_is_disabled(struct lttng_kernel_ring_buffer_channel *chan)
{
return lib_ring_buffer_channel_is_disabled(chan);
}
static const struct lib_ring_buffer_config client_config;
static inline
-u64 lib_ring_buffer_clock_read(struct channel *chan)
+u64 lib_ring_buffer_clock_read(struct lttng_kernel_ring_buffer_channel *chan)
{
return 0;
}
static inline
size_t record_header_size(const struct lib_ring_buffer_config *config,
- struct channel *chan, size_t offset,
+ struct lttng_kernel_ring_buffer_channel *chan, size_t offset,
size_t *pre_header_padding,
struct lttng_kernel_ring_buffer_ctx *ctx,
void *client_ctx)
#include <ringbuffer/api.h>
-static u64 client_ring_buffer_clock_read(struct channel *chan)
+static u64 client_ring_buffer_clock_read(struct lttng_kernel_ring_buffer_channel *chan)
{
return 0;
}
static
size_t client_record_header_size(const struct lib_ring_buffer_config *config,
- struct channel *chan, size_t offset,
+ struct lttng_kernel_ring_buffer_channel *chan, size_t offset,
size_t *pre_header_padding,
struct lttng_kernel_ring_buffer_ctx *ctx,
void *client_ctx)
static void client_buffer_begin(struct lib_ring_buffer *buf, u64 tsc,
unsigned int subbuf_idx)
{
- struct channel *chan = buf->backend.chan;
+ struct lttng_kernel_ring_buffer_channel *chan = buf->backend.chan;
struct metadata_packet_header *header =
(struct metadata_packet_header *)
lib_ring_buffer_offset_address(&buf->backend,
static void client_buffer_end(struct lib_ring_buffer *buf, u64 tsc,
unsigned int subbuf_idx, unsigned long data_size)
{
- struct channel *chan = buf->backend.chan;
+ struct lttng_kernel_ring_buffer_channel *chan = buf->backend.chan;
struct metadata_packet_header *header =
(struct metadata_packet_header *)
lib_ring_buffer_offset_address(&buf->backend,
}
static
-void lttng_channel_destroy(struct channel *chan)
+void lttng_channel_destroy(struct lttng_kernel_ring_buffer_channel *chan)
{
channel_destroy(chan);
}
static
-struct channel *_channel_create(const char *name,
+struct lttng_kernel_ring_buffer_channel *_channel_create(const char *name,
void *priv, void *buf_addr,
size_t subbuf_size, size_t num_subbuf,
unsigned int switch_timer_interval,
unsigned int read_timer_interval)
{
struct lttng_kernel_channel_buffer *lttng_chan = priv;
- struct channel *chan;
+ struct lttng_kernel_ring_buffer_channel *chan;
chan = channel_create(&client_config, name,
lttng_chan->parent.session->priv->metadata_cache, buf_addr,
}
static
-struct lib_ring_buffer *lttng_buffer_read_open(struct channel *chan)
+struct lib_ring_buffer *lttng_buffer_read_open(struct lttng_kernel_ring_buffer_channel *chan)
{
struct lib_ring_buffer *buf;
}
static
-int lttng_buffer_has_read_closed_stream(struct channel *chan)
+int lttng_buffer_has_read_closed_stream(struct lttng_kernel_ring_buffer_channel *chan)
{
struct lib_ring_buffer *buf;
int cpu;
static
int lttng_event_reserve(struct lttng_kernel_ring_buffer_ctx *ctx)
{
- struct channel *chan = ctx->client_priv;
+ struct lttng_kernel_ring_buffer_channel *chan = ctx->client_priv;
int ret;
memset(&ctx->priv, 0, sizeof(ctx->priv));
}
static
-size_t lttng_packet_avail_size(struct channel *chan)
+size_t lttng_packet_avail_size(struct lttng_kernel_ring_buffer_channel *chan)
{
unsigned long o_begin;
struct lib_ring_buffer *buf;
}
static
-wait_queue_head_t *lttng_get_writer_buf_wait_queue(struct channel *chan, int cpu)
+wait_queue_head_t *lttng_get_writer_buf_wait_queue(struct lttng_kernel_ring_buffer_channel *chan, int cpu)
{
struct lib_ring_buffer *buf = channel_get_ring_buffer(&client_config,
chan, cpu);
}
static
-wait_queue_head_t *lttng_get_hp_wait_queue(struct channel *chan)
+wait_queue_head_t *lttng_get_hp_wait_queue(struct lttng_kernel_ring_buffer_channel *chan)
{
return &chan->hp_wait;
}
static
-int lttng_is_finalized(struct channel *chan)
+int lttng_is_finalized(struct lttng_kernel_ring_buffer_channel *chan)
{
return lib_ring_buffer_channel_is_finalized(chan);
}
static
-int lttng_is_disabled(struct channel *chan)
+int lttng_is_disabled(struct lttng_kernel_ring_buffer_channel *chan)
{
return lib_ring_buffer_channel_is_disabled(chan);
}