struct lttng_metadata_cache;
struct perf_event;
struct perf_event_attr;
-struct lib_ring_buffer_config;
+struct lttng_kernel_ring_buffer_config;
enum lttng_enabler_format_type {
LTTNG_ENABLER_FORMAT_STAR_GLOB,
unsigned int switch_timer_interval,
unsigned int read_timer_interval);
void (*channel_destroy)(struct lttng_kernel_ring_buffer_channel *chan);
- struct lib_ring_buffer *(*buffer_read_open)(struct lttng_kernel_ring_buffer_channel *chan);
+ struct lttng_kernel_ring_buffer *(*buffer_read_open)(struct lttng_kernel_ring_buffer_channel *chan);
int (*buffer_has_read_closed_stream)(struct lttng_kernel_ring_buffer_channel *chan);
- void (*buffer_read_close)(struct lib_ring_buffer *buf);
+ void (*buffer_read_close)(struct lttng_kernel_ring_buffer *buf);
/*
* packet_avail_size returns the available size in the current
* packet. Note that the size returned is only a hint, since it
wait_queue_head_t *(*get_hp_wait_queue)(struct lttng_kernel_ring_buffer_channel *chan);
int (*is_finalized)(struct lttng_kernel_ring_buffer_channel *chan);
int (*is_disabled)(struct lttng_kernel_ring_buffer_channel *chan);
- int (*timestamp_begin) (const struct lib_ring_buffer_config *config,
- struct lib_ring_buffer *bufb,
+ int (*timestamp_begin) (const struct lttng_kernel_ring_buffer_config *config,
+ struct lttng_kernel_ring_buffer *bufb,
uint64_t *timestamp_begin);
- int (*timestamp_end) (const struct lib_ring_buffer_config *config,
- struct lib_ring_buffer *bufb,
+ int (*timestamp_end) (const struct lttng_kernel_ring_buffer_config *config,
+ struct lttng_kernel_ring_buffer *bufb,
uint64_t *timestamp_end);
- int (*events_discarded) (const struct lib_ring_buffer_config *config,
- struct lib_ring_buffer *bufb,
+ int (*events_discarded) (const struct lttng_kernel_ring_buffer_config *config,
+ struct lttng_kernel_ring_buffer *bufb,
uint64_t *events_discarded);
- int (*content_size) (const struct lib_ring_buffer_config *config,
- struct lib_ring_buffer *bufb,
+ int (*content_size) (const struct lttng_kernel_ring_buffer_config *config,
+ struct lttng_kernel_ring_buffer *bufb,
uint64_t *content_size);
- int (*packet_size) (const struct lib_ring_buffer_config *config,
- struct lib_ring_buffer *bufb,
+ int (*packet_size) (const struct lttng_kernel_ring_buffer_config *config,
+ struct lttng_kernel_ring_buffer *bufb,
uint64_t *packet_size);
- int (*stream_id) (const struct lib_ring_buffer_config *config,
- struct lib_ring_buffer *bufb,
+ int (*stream_id) (const struct lttng_kernel_ring_buffer_config *config,
+ struct lttng_kernel_ring_buffer *bufb,
uint64_t *stream_id);
- int (*current_timestamp) (const struct lib_ring_buffer_config *config,
- struct lib_ring_buffer *bufb,
+ int (*current_timestamp) (const struct lttng_kernel_ring_buffer_config *config,
+ struct lttng_kernel_ring_buffer *bufb,
uint64_t *ts);
- int (*sequence_number) (const struct lib_ring_buffer_config *config,
- struct lib_ring_buffer *bufb,
+ int (*sequence_number) (const struct lttng_kernel_ring_buffer_config *config,
+ struct lttng_kernel_ring_buffer *bufb,
uint64_t *seq);
- int (*instance_id) (const struct lib_ring_buffer_config *config,
- struct lib_ring_buffer *bufb,
+ int (*instance_id) (const struct lttng_kernel_ring_buffer_config *config,
+ struct lttng_kernel_ring_buffer *bufb,
uint64_t *id);
};
struct lttng_kernel_channel_buffer_ops *ops;
struct lttng_transport *transport;
struct lttng_kernel_ring_buffer_channel *chan; /* Ring buffer channel for event notifier group. */
- struct lib_ring_buffer *buf; /* Ring buffer for event notifier group. */
+ struct lttng_kernel_ring_buffer *buf; /* Ring buffer for event notifier group. */
wait_queue_head_t read_wait;
struct irq_work wakeup_pending; /* Pending wakeup irq work. */
struct lttng_kernel_event_notifier *sc_unknown; /* for unknown syscalls */
/* Ring buffer backend access (read/write) */
-extern size_t lib_ring_buffer_read(struct lib_ring_buffer_backend *bufb,
+extern size_t lib_ring_buffer_read(struct lttng_kernel_ring_buffer_backend *bufb,
size_t offset, void *dest, size_t len);
-extern int __lib_ring_buffer_copy_to_user(struct lib_ring_buffer_backend *bufb,
+extern int __lib_ring_buffer_copy_to_user(struct lttng_kernel_ring_buffer_backend *bufb,
size_t offset, void __user *dest,
size_t len);
-extern int lib_ring_buffer_read_cstr(struct lib_ring_buffer_backend *bufb,
+extern int lib_ring_buffer_read_cstr(struct lttng_kernel_ring_buffer_backend *bufb,
size_t offset, void *dest, size_t len);
extern unsigned long *
-lib_ring_buffer_read_get_pfn(struct lib_ring_buffer_backend *bufb, size_t offset,
+lib_ring_buffer_read_get_pfn(struct lttng_kernel_ring_buffer_backend *bufb, size_t offset,
void ***virt);
/*
* as long as the write is never bigger than a page size.
*/
extern void *
-lib_ring_buffer_offset_address(struct lib_ring_buffer_backend *bufb,
+lib_ring_buffer_offset_address(struct lttng_kernel_ring_buffer_backend *bufb,
size_t offset);
extern void *
-lib_ring_buffer_read_offset_address(struct lib_ring_buffer_backend *bufb,
+lib_ring_buffer_read_offset_address(struct lttng_kernel_ring_buffer_backend *bufb,
size_t offset);
/**
* if copy is crossing a page boundary.
*/
static inline __attribute__((always_inline))
-void lib_ring_buffer_write(const struct lib_ring_buffer_config *config,
+void lib_ring_buffer_write(const struct lttng_kernel_ring_buffer_config *config,
struct lttng_kernel_ring_buffer_ctx *ctx,
const void *src, size_t len)
{
- struct lib_ring_buffer_backend *bufb = &ctx->priv.buf->backend;
+ struct lttng_kernel_ring_buffer_backend *bufb = &ctx->priv.buf->backend;
struct channel_backend *chanb = &ctx->priv.chan->backend;
size_t index, pagecpy;
size_t offset = ctx->priv.buf_offset;
- struct lib_ring_buffer_backend_pages *backend_pages;
+ struct lttng_kernel_ring_buffer_backend_pages *backend_pages;
if (unlikely(!len))
return;
* boundary.
*/
static inline
-void lib_ring_buffer_memset(const struct lib_ring_buffer_config *config,
+void lib_ring_buffer_memset(const struct lttng_kernel_ring_buffer_config *config,
struct lttng_kernel_ring_buffer_ctx *ctx, int c, size_t len)
{
- struct lib_ring_buffer_backend *bufb = &ctx->priv.buf->backend;
+ struct lttng_kernel_ring_buffer_backend *bufb = &ctx->priv.buf->backend;
struct channel_backend *chanb = &ctx->priv.chan->backend;
size_t index, pagecpy;
size_t offset = ctx->priv.buf_offset;
- struct lib_ring_buffer_backend_pages *backend_pages;
+ struct lttng_kernel_ring_buffer_backend_pages *backend_pages;
if (unlikely(!len))
return;
* copied. Does *not* terminate @dest with NULL terminating character.
*/
static inline __attribute__((always_inline))
-size_t lib_ring_buffer_do_strcpy(const struct lib_ring_buffer_config *config,
+size_t lib_ring_buffer_do_strcpy(const struct lttng_kernel_ring_buffer_config *config,
char *dest, const char *src, size_t len)
{
size_t count;
* previously.
*/
static inline __attribute__((always_inline))
-size_t lib_ring_buffer_do_strcpy_from_user_inatomic(const struct lib_ring_buffer_config *config,
+size_t lib_ring_buffer_do_strcpy_from_user_inatomic(const struct lttng_kernel_ring_buffer_config *config,
char *dest, const char __user *src, size_t len)
{
size_t count;
* (_ring_buffer_strcpy) if copy is crossing a page boundary.
*/
static inline
-void lib_ring_buffer_strcpy(const struct lib_ring_buffer_config *config,
+void lib_ring_buffer_strcpy(const struct lttng_kernel_ring_buffer_config *config,
struct lttng_kernel_ring_buffer_ctx *ctx,
const char *src, size_t len, int pad)
{
- struct lib_ring_buffer_backend *bufb = &ctx->priv.buf->backend;
+ struct lttng_kernel_ring_buffer_backend *bufb = &ctx->priv.buf->backend;
struct channel_backend *chanb = &ctx->priv.chan->backend;
size_t index, pagecpy;
size_t offset = ctx->priv.buf_offset;
- struct lib_ring_buffer_backend_pages *backend_pages;
+ struct lttng_kernel_ring_buffer_backend_pages *backend_pages;
if (unlikely(!len))
return;
* Disable the page fault handler to ensure we never try to take the mmap_sem.
*/
static inline __attribute__((always_inline))
-void lib_ring_buffer_copy_from_user_inatomic(const struct lib_ring_buffer_config *config,
+void lib_ring_buffer_copy_from_user_inatomic(const struct lttng_kernel_ring_buffer_config *config,
struct lttng_kernel_ring_buffer_ctx *ctx,
const void __user *src, size_t len)
{
- struct lib_ring_buffer_backend *bufb = &ctx->priv.buf->backend;
+ struct lttng_kernel_ring_buffer_backend *bufb = &ctx->priv.buf->backend;
struct channel_backend *chanb = &ctx->priv.chan->backend;
size_t index, pagecpy;
size_t offset = ctx->priv.buf_offset;
- struct lib_ring_buffer_backend_pages *backend_pages;
+ struct lttng_kernel_ring_buffer_backend_pages *backend_pages;
unsigned long ret;
if (unlikely(!len))
* take the mmap_sem.
*/
static inline
-void lib_ring_buffer_strcpy_from_user_inatomic(const struct lib_ring_buffer_config *config,
+void lib_ring_buffer_strcpy_from_user_inatomic(const struct lttng_kernel_ring_buffer_config *config,
struct lttng_kernel_ring_buffer_ctx *ctx,
const void __user *src, size_t len, int pad)
{
- struct lib_ring_buffer_backend *bufb = &ctx->priv.buf->backend;
+ struct lttng_kernel_ring_buffer_backend *bufb = &ctx->priv.buf->backend;
struct channel_backend *chanb = &ctx->priv.chan->backend;
size_t index, pagecpy;
size_t offset = ctx->priv.buf_offset;
- struct lib_ring_buffer_backend_pages *backend_pages;
+ struct lttng_kernel_ring_buffer_backend_pages *backend_pages;
if (unlikely(!len))
return;
*/
static inline
unsigned long lib_ring_buffer_get_records_unread(
- const struct lib_ring_buffer_config *config,
- struct lib_ring_buffer *buf)
+ const struct lttng_kernel_ring_buffer_config *config,
+ struct lttng_kernel_ring_buffer *buf)
{
- struct lib_ring_buffer_backend *bufb = &buf->backend;
- struct lib_ring_buffer_backend_pages *pages;
+ struct lttng_kernel_ring_buffer_backend *bufb = &buf->backend;
+ struct lttng_kernel_ring_buffer_backend_pages *pages;
unsigned long records_unread = 0, sb_bindex, id;
unsigned int i;
/* Ring buffer and channel backend create/free */
-int lib_ring_buffer_backend_create(struct lib_ring_buffer_backend *bufb,
+int lib_ring_buffer_backend_create(struct lttng_kernel_ring_buffer_backend *bufb,
struct channel_backend *chan, int cpu);
void channel_backend_unregister_notifiers(struct channel_backend *chanb);
-void lib_ring_buffer_backend_free(struct lib_ring_buffer_backend *bufb);
+void lib_ring_buffer_backend_free(struct lttng_kernel_ring_buffer_backend *bufb);
int channel_backend_init(struct channel_backend *chanb,
const char *name,
- const struct lib_ring_buffer_config *config,
+ const struct lttng_kernel_ring_buffer_config *config,
void *priv, size_t subbuf_size,
size_t num_subbuf);
void channel_backend_free(struct channel_backend *chanb);
-void lib_ring_buffer_backend_reset(struct lib_ring_buffer_backend *bufb);
+void lib_ring_buffer_backend_reset(struct lttng_kernel_ring_buffer_backend *bufb);
void channel_backend_reset(struct channel_backend *chanb);
int lib_ring_buffer_backend_init(void);
void lib_ring_buffer_backend_exit(void);
-extern void _lib_ring_buffer_write(struct lib_ring_buffer_backend *bufb,
+extern void _lib_ring_buffer_write(struct lttng_kernel_ring_buffer_backend *bufb,
size_t offset, const void *src, size_t len,
size_t pagecpy);
-extern void _lib_ring_buffer_memset(struct lib_ring_buffer_backend *bufb,
+extern void _lib_ring_buffer_memset(struct lttng_kernel_ring_buffer_backend *bufb,
size_t offset, int c, size_t len,
size_t pagecpy);
-extern void _lib_ring_buffer_strcpy(struct lib_ring_buffer_backend *bufb,
+extern void _lib_ring_buffer_strcpy(struct lttng_kernel_ring_buffer_backend *bufb,
size_t offset, const char *src, size_t len,
size_t pagecpy, int pad);
-extern void _lib_ring_buffer_copy_from_user_inatomic(struct lib_ring_buffer_backend *bufb,
+extern void _lib_ring_buffer_copy_from_user_inatomic(struct lttng_kernel_ring_buffer_backend *bufb,
size_t offset, const void *src,
size_t len, size_t pagecpy);
-extern void _lib_ring_buffer_strcpy_from_user_inatomic(struct lib_ring_buffer_backend *bufb,
+extern void _lib_ring_buffer_strcpy_from_user_inatomic(struct lttng_kernel_ring_buffer_backend *bufb,
size_t offset, const char __user *src, size_t len,
size_t pagecpy, int pad);
* mode).
*/
static inline
-unsigned long subbuffer_id(const struct lib_ring_buffer_config *config,
+unsigned long subbuffer_id(const struct lttng_kernel_ring_buffer_config *config,
unsigned long offset, unsigned long noref,
unsigned long index)
{
* bits are identical, else 0.
*/
static inline
-int subbuffer_id_compare_offset(const struct lib_ring_buffer_config *config,
+int subbuffer_id_compare_offset(const struct lttng_kernel_ring_buffer_config *config,
unsigned long id, unsigned long offset)
{
return (id & SB_ID_OFFSET_MASK) == (offset << SB_ID_OFFSET_SHIFT);
}
static inline
-unsigned long subbuffer_id_get_index(const struct lib_ring_buffer_config *config,
+unsigned long subbuffer_id_get_index(const struct lttng_kernel_ring_buffer_config *config,
unsigned long id)
{
if (config->mode == RING_BUFFER_OVERWRITE)
}
static inline
-unsigned long subbuffer_id_is_noref(const struct lib_ring_buffer_config *config,
+unsigned long subbuffer_id_is_noref(const struct lttng_kernel_ring_buffer_config *config,
unsigned long id)
{
if (config->mode == RING_BUFFER_OVERWRITE)
* needed.
*/
static inline
-void subbuffer_id_set_noref(const struct lib_ring_buffer_config *config,
+void subbuffer_id_set_noref(const struct lttng_kernel_ring_buffer_config *config,
unsigned long *id)
{
if (config->mode == RING_BUFFER_OVERWRITE)
}
static inline
-void subbuffer_id_set_noref_offset(const struct lib_ring_buffer_config *config,
+void subbuffer_id_set_noref_offset(const struct lttng_kernel_ring_buffer_config *config,
unsigned long *id, unsigned long offset)
{
unsigned long tmp;
/* No volatile access, since already used locally */
static inline
-void subbuffer_id_clear_noref(const struct lib_ring_buffer_config *config,
+void subbuffer_id_clear_noref(const struct lttng_kernel_ring_buffer_config *config,
unsigned long *id)
{
if (config->mode == RING_BUFFER_OVERWRITE)
* -EPERM on failure.
*/
static inline
-int subbuffer_id_check_index(const struct lib_ring_buffer_config *config,
+int subbuffer_id_check_index(const struct lttng_kernel_ring_buffer_config *config,
unsigned long num_subbuf)
{
if (config->mode == RING_BUFFER_OVERWRITE)
}
static inline
-void lib_ring_buffer_backend_get_pages(const struct lib_ring_buffer_config *config,
+void lib_ring_buffer_backend_get_pages(const struct lttng_kernel_ring_buffer_config *config,
struct lttng_kernel_ring_buffer_ctx *ctx,
- struct lib_ring_buffer_backend_pages **backend_pages)
+ struct lttng_kernel_ring_buffer_backend_pages **backend_pages)
{
- struct lib_ring_buffer_backend *bufb = &ctx->priv.buf->backend;
+ struct lttng_kernel_ring_buffer_backend *bufb = &ctx->priv.buf->backend;
struct channel_backend *chanb = &ctx->priv.chan->backend;
size_t sbidx, offset = ctx->priv.buf_offset;
unsigned long sb_bindex, id;
- struct lib_ring_buffer_backend_pages *rpages;
+ struct lttng_kernel_ring_buffer_backend_pages *rpages;
offset &= chanb->buf_size - 1;
sbidx = offset >> chanb->subbuf_size_order;
/* Get backend pages from cache. */
static inline
-struct lib_ring_buffer_backend_pages *
- lib_ring_buffer_get_backend_pages_from_ctx(const struct lib_ring_buffer_config *config,
+struct lttng_kernel_ring_buffer_backend_pages *
+ lib_ring_buffer_get_backend_pages_from_ctx(const struct lttng_kernel_ring_buffer_config *config,
struct lttng_kernel_ring_buffer_ctx *ctx)
{
return ctx->priv.backend_pages;
*/
#ifdef LTTNG_RING_BUFFER_COUNT_EVENTS
static inline
-void subbuffer_count_record(const struct lib_ring_buffer_config *config,
- struct lib_ring_buffer_backend *bufb,
+void subbuffer_count_record(const struct lttng_kernel_ring_buffer_config *config,
+ struct lttng_kernel_ring_buffer_backend *bufb,
unsigned long idx)
{
unsigned long sb_bindex;
* perform the decrement atomically.
*/
static inline
-void subbuffer_consume_record(const struct lib_ring_buffer_config *config,
- struct lib_ring_buffer_backend *bufb)
+void subbuffer_consume_record(const struct lttng_kernel_ring_buffer_config *config,
+ struct lttng_kernel_ring_buffer_backend *bufb)
{
unsigned long sb_bindex;
}
#else /* LTTNG_RING_BUFFER_COUNT_EVENTS */
static inline
-void subbuffer_count_record(const struct lib_ring_buffer_config *config,
- struct lib_ring_buffer_backend *bufb,
+void subbuffer_count_record(const struct lttng_kernel_ring_buffer_config *config,
+ struct lttng_kernel_ring_buffer_backend *bufb,
unsigned long idx)
{
}
static inline
-void subbuffer_consume_record(const struct lib_ring_buffer_config *config,
- struct lib_ring_buffer_backend *bufb)
+void subbuffer_consume_record(const struct lttng_kernel_ring_buffer_config *config,
+ struct lttng_kernel_ring_buffer_backend *bufb)
{
}
#endif /* #else LTTNG_RING_BUFFER_COUNT_EVENTS */
static inline
unsigned long subbuffer_get_records_count(
- const struct lib_ring_buffer_config *config,
- struct lib_ring_buffer_backend *bufb,
+ const struct lttng_kernel_ring_buffer_config *config,
+ struct lttng_kernel_ring_buffer_backend *bufb,
unsigned long idx)
{
unsigned long sb_bindex;
*/
static inline
unsigned long subbuffer_count_records_overrun(
- const struct lib_ring_buffer_config *config,
- struct lib_ring_buffer_backend *bufb,
+ const struct lttng_kernel_ring_buffer_config *config,
+ struct lttng_kernel_ring_buffer_backend *bufb,
unsigned long idx)
{
- struct lib_ring_buffer_backend_pages *pages;
+ struct lttng_kernel_ring_buffer_backend_pages *pages;
unsigned long overruns, sb_bindex;
sb_bindex = subbuffer_id_get_index(config, bufb->buf_wsb[idx].id);
}
static inline
-void subbuffer_set_data_size(const struct lib_ring_buffer_config *config,
- struct lib_ring_buffer_backend *bufb,
+void subbuffer_set_data_size(const struct lttng_kernel_ring_buffer_config *config,
+ struct lttng_kernel_ring_buffer_backend *bufb,
unsigned long idx,
unsigned long data_size)
{
- struct lib_ring_buffer_backend_pages *pages;
+ struct lttng_kernel_ring_buffer_backend_pages *pages;
unsigned long sb_bindex;
sb_bindex = subbuffer_id_get_index(config, bufb->buf_wsb[idx].id);
static inline
unsigned long subbuffer_get_read_data_size(
- const struct lib_ring_buffer_config *config,
- struct lib_ring_buffer_backend *bufb)
+ const struct lttng_kernel_ring_buffer_config *config,
+ struct lttng_kernel_ring_buffer_backend *bufb)
{
- struct lib_ring_buffer_backend_pages *pages;
+ struct lttng_kernel_ring_buffer_backend_pages *pages;
unsigned long sb_bindex;
sb_bindex = subbuffer_id_get_index(config, bufb->buf_rsb.id);
static inline
unsigned long subbuffer_get_data_size(
- const struct lib_ring_buffer_config *config,
- struct lib_ring_buffer_backend *bufb,
+ const struct lttng_kernel_ring_buffer_config *config,
+ struct lttng_kernel_ring_buffer_backend *bufb,
unsigned long idx)
{
- struct lib_ring_buffer_backend_pages *pages;
+ struct lttng_kernel_ring_buffer_backend_pages *pages;
unsigned long sb_bindex;
sb_bindex = subbuffer_id_get_index(config, bufb->buf_wsb[idx].id);
}
static inline
-void subbuffer_inc_packet_count(const struct lib_ring_buffer_config *config,
- struct lib_ring_buffer_backend *bufb,
+void subbuffer_inc_packet_count(const struct lttng_kernel_ring_buffer_config *config,
+ struct lttng_kernel_ring_buffer_backend *bufb,
unsigned long idx)
{
bufb->buf_cnt[idx].seq_cnt++;
* writer.
*/
static inline
-void lib_ring_buffer_clear_noref(const struct lib_ring_buffer_config *config,
- struct lib_ring_buffer_backend *bufb,
+void lib_ring_buffer_clear_noref(const struct lttng_kernel_ring_buffer_config *config,
+ struct lttng_kernel_ring_buffer_backend *bufb,
unsigned long idx)
{
unsigned long id, new_id;
* called by writer.
*/
static inline
-void lib_ring_buffer_set_noref_offset(const struct lib_ring_buffer_config *config,
- struct lib_ring_buffer_backend *bufb,
+void lib_ring_buffer_set_noref_offset(const struct lttng_kernel_ring_buffer_config *config,
+ struct lttng_kernel_ring_buffer_backend *bufb,
unsigned long idx, unsigned long offset)
{
if (config->mode != RING_BUFFER_OVERWRITE)
* update_read_sb_index - Read-side subbuffer index update.
*/
static inline
-int update_read_sb_index(const struct lib_ring_buffer_config *config,
- struct lib_ring_buffer_backend *bufb,
+int update_read_sb_index(const struct lttng_kernel_ring_buffer_config *config,
+ struct lttng_kernel_ring_buffer_backend *bufb,
struct channel_backend *chanb,
unsigned long consumed_idx,
unsigned long consumed_count)
#include <lttng/kernel-version.h>
#include <lttng/cpuhotplug.h>
-struct lib_ring_buffer_backend_page {
+struct lttng_kernel_ring_buffer_backend_page {
void *virt; /* page virtual address (cached) */
unsigned long pfn; /* page frame number */
};
-struct lib_ring_buffer_backend_pages {
+struct lttng_kernel_ring_buffer_backend_pages {
unsigned long mmap_offset; /* offset of the subbuffer in mmap */
union v_atomic records_commit; /* current records committed count */
union v_atomic records_unread; /* records to read */
unsigned long data_size; /* Amount of data to read from subbuf */
- struct lib_ring_buffer_backend_page p[];
+ struct lttng_kernel_ring_buffer_backend_page p[];
};
-struct lib_ring_buffer_backend_subbuffer {
+struct lttng_kernel_ring_buffer_backend_subbuffer {
/* Identifier for subbuf backend pages. Exchanged atomically. */
unsigned long id; /* backend subbuffer identifier */
};
-struct lib_ring_buffer_backend_counts {
+struct lttng_kernel_ring_buffer_backend_counts {
/*
* Counter specific to the sub-buffer location within the ring buffer.
* The actual sequence number of the packet within the entire ring
* Forward declaration of frontend-specific channel and ring_buffer.
*/
struct lttng_kernel_ring_buffer_channel;
-struct lib_ring_buffer;
+struct lttng_kernel_ring_buffer;
-struct lib_ring_buffer_backend {
+struct lttng_kernel_ring_buffer_backend {
/* Array of ring_buffer_backend_subbuffer for writer */
- struct lib_ring_buffer_backend_subbuffer *buf_wsb;
+ struct lttng_kernel_ring_buffer_backend_subbuffer *buf_wsb;
/* ring_buffer_backend_subbuffer for reader */
- struct lib_ring_buffer_backend_subbuffer buf_rsb;
+ struct lttng_kernel_ring_buffer_backend_subbuffer buf_rsb;
/* Array of lib_ring_buffer_backend_counts for the packet counter */
- struct lib_ring_buffer_backend_counts *buf_cnt;
+ struct lttng_kernel_ring_buffer_backend_counts *buf_cnt;
/*
* Pointer array of backend pages, for whole buffer.
* Indexed by ring_buffer_backend_subbuffer identifier (id) index.
*/
- struct lib_ring_buffer_backend_pages **array;
+ struct lttng_kernel_ring_buffer_backend_pages **array;
unsigned int num_pages_per_subbuf;
struct lttng_kernel_ring_buffer_channel *chan; /* Associated channel */
*/
unsigned int buf_size_order; /* Order of buffer size */
unsigned int extra_reader_sb:1; /* has extra reader subbuffer ? */
- struct lib_ring_buffer *buf; /* Channel per-cpu buffers */
+ struct lttng_kernel_ring_buffer *buf; /* Channel per-cpu buffers */
unsigned long num_subbuf; /* Number of sub-buffers for writer */
u64 start_tsc; /* Channel creation TSC value */
* source config can vanish before the last reference to this
* channel's streams is released.
*/
- struct lib_ring_buffer_config config; /* Ring buffer configuration */
+ struct lttng_kernel_ring_buffer_config config; /* Ring buffer configuration */
cpumask_var_t cpumask; /* Allocated per-cpu buffers cpumask */
char name[NAME_MAX]; /* Channel name */
};
#include <lttng/align.h>
#include <lttng/tracer-core.h>
-struct lib_ring_buffer;
+struct lttng_kernel_ring_buffer;
struct lttng_kernel_ring_buffer_channel;
-struct lib_ring_buffer_config;
+struct lttng_kernel_ring_buffer_config;
struct lttng_kernel_ring_buffer_ctx;
struct lttng_kernel_ring_buffer_ctx_private;
* provided as inline functions too. These may simply return 0 if not used by
* the client.
*/
-struct lib_ring_buffer_client_cb {
+struct lttng_kernel_ring_buffer_client_cb {
/* Mandatory callbacks */
/* A static inline version is also required for fast path */
u64 (*ring_buffer_clock_read) (struct lttng_kernel_ring_buffer_channel *chan);
- size_t (*record_header_size) (const struct lib_ring_buffer_config *config,
+ size_t (*record_header_size) (const struct lttng_kernel_ring_buffer_config *config,
struct lttng_kernel_ring_buffer_channel *chan, size_t offset,
size_t *pre_header_padding,
struct lttng_kernel_ring_buffer_ctx *ctx,
/* Slow path only, at subbuffer switch */
size_t (*subbuffer_header_size) (void);
- void (*buffer_begin) (struct lib_ring_buffer *buf, u64 tsc,
+ void (*buffer_begin) (struct lttng_kernel_ring_buffer *buf, u64 tsc,
unsigned int subbuf_idx);
- void (*buffer_end) (struct lib_ring_buffer *buf, u64 tsc,
+ void (*buffer_end) (struct lttng_kernel_ring_buffer *buf, u64 tsc,
unsigned int subbuf_idx, unsigned long data_size);
/* Optional callbacks (can be set to NULL) */
/* Called at buffer creation/finalize */
- int (*buffer_create) (struct lib_ring_buffer *buf, void *priv,
+ int (*buffer_create) (struct lttng_kernel_ring_buffer *buf, void *priv,
int cpu, const char *name);
/*
* Clients should guarantee that no new reader handle can be opened
* after finalize.
*/
- void (*buffer_finalize) (struct lib_ring_buffer *buf, void *priv, int cpu);
+ void (*buffer_finalize) (struct lttng_kernel_ring_buffer *buf, void *priv, int cpu);
/*
* Extract header length, payload length and timestamp from event
* record. Used by buffer iterators. Timestamp is only used by channel
* iterator.
*/
- void (*record_get) (const struct lib_ring_buffer_config *config,
- struct lttng_kernel_ring_buffer_channel *chan, struct lib_ring_buffer *buf,
+ void (*record_get) (const struct lttng_kernel_ring_buffer_config *config,
+ struct lttng_kernel_ring_buffer_channel *chan, struct lttng_kernel_ring_buffer *buf,
size_t offset, size_t *header_len,
size_t *payload_len, u64 *timestamp);
};
* RING_BUFFER_WAKEUP_NONE does not perform any wakeup whatsoever. The client
* has the responsibility to perform wakeups.
*/
-struct lib_ring_buffer_config {
+struct lttng_kernel_ring_buffer_config {
enum {
RING_BUFFER_ALLOC_PER_CPU,
RING_BUFFER_ALLOC_GLOBAL,
* 0 and 64 disable the timestamp compression scheme.
*/
unsigned int tsc_bits;
- struct lib_ring_buffer_client_cb cb;
+ struct lttng_kernel_ring_buffer_client_cb cb;
};
/*
u64 tsc; /* time-stamp counter value */
unsigned int rflags; /* reservation flags */
- struct lib_ring_buffer *buf; /*
+ struct lttng_kernel_ring_buffer *buf; /*
* buffer corresponding to processor id
* for this channel
*/
- struct lib_ring_buffer_backend_pages *backend_pages;
+ struct lttng_kernel_ring_buffer_backend_pages *backend_pages;
};
/*
* Used internally to check for valid configurations at channel creation.
*/
static inline
-int lib_ring_buffer_check_config(const struct lib_ring_buffer_config *config,
+int lib_ring_buffer_check_config(const struct lttng_kernel_ring_buffer_config *config,
unsigned int switch_timer_interval,
unsigned int read_timer_interval)
{
*/
extern
-struct lttng_kernel_ring_buffer_channel *channel_create(const struct lib_ring_buffer_config *config,
+struct lttng_kernel_ring_buffer_channel *channel_create(const struct lttng_kernel_ring_buffer_config *config,
const char *name, void *priv,
void *buf_addr,
size_t subbuf_size, size_t num_subbuf,
({ (cpu) = cpumask_next(cpu, (chan)->backend.cpumask); \
smp_rmb(); (cpu) < nr_cpu_ids; });)
-extern struct lib_ring_buffer *channel_get_ring_buffer(
- const struct lib_ring_buffer_config *config,
+extern struct lttng_kernel_ring_buffer *channel_get_ring_buffer(
+ const struct lttng_kernel_ring_buffer_config *config,
struct lttng_kernel_ring_buffer_channel *chan, int cpu);
-extern int lib_ring_buffer_open_read(struct lib_ring_buffer *buf);
-extern void lib_ring_buffer_release_read(struct lib_ring_buffer *buf);
+extern int lib_ring_buffer_open_read(struct lttng_kernel_ring_buffer *buf);
+extern void lib_ring_buffer_release_read(struct lttng_kernel_ring_buffer *buf);
/*
* Read sequence: snapshot, many get_subbuf/put_subbuf, move_consumer.
*/
-extern int lib_ring_buffer_snapshot(struct lib_ring_buffer *buf,
+extern int lib_ring_buffer_snapshot(struct lttng_kernel_ring_buffer *buf,
unsigned long *consumed,
unsigned long *produced);
extern int lib_ring_buffer_snapshot_sample_positions(
- struct lib_ring_buffer *buf,
+ struct lttng_kernel_ring_buffer *buf,
unsigned long *consumed,
unsigned long *produced);
-extern void lib_ring_buffer_move_consumer(struct lib_ring_buffer *buf,
+extern void lib_ring_buffer_move_consumer(struct lttng_kernel_ring_buffer *buf,
unsigned long consumed_new);
-extern int lib_ring_buffer_get_subbuf(struct lib_ring_buffer *buf,
+extern int lib_ring_buffer_get_subbuf(struct lttng_kernel_ring_buffer *buf,
unsigned long consumed);
-extern void lib_ring_buffer_put_subbuf(struct lib_ring_buffer *buf);
+extern void lib_ring_buffer_put_subbuf(struct lttng_kernel_ring_buffer *buf);
void lib_ring_buffer_set_quiescent_channel(struct lttng_kernel_ring_buffer_channel *chan);
void lib_ring_buffer_clear_quiescent_channel(struct lttng_kernel_ring_buffer_channel *chan);
* lib_ring_buffer_get_next_subbuf/lib_ring_buffer_put_next_subbuf are helpers
* to read sub-buffers sequentially.
*/
-static inline int lib_ring_buffer_get_next_subbuf(struct lib_ring_buffer *buf)
+static inline int lib_ring_buffer_get_next_subbuf(struct lttng_kernel_ring_buffer *buf)
{
int ret;
return ret;
}
-static inline void lib_ring_buffer_put_next_subbuf(struct lib_ring_buffer *buf)
+static inline void lib_ring_buffer_put_next_subbuf(struct lttng_kernel_ring_buffer *buf)
{
lib_ring_buffer_put_subbuf(buf);
lib_ring_buffer_move_consumer(buf, subbuf_align(buf->cons_snapshot,
}
extern void channel_reset(struct lttng_kernel_ring_buffer_channel *chan);
-extern void lib_ring_buffer_reset(struct lib_ring_buffer *buf);
+extern void lib_ring_buffer_reset(struct lttng_kernel_ring_buffer *buf);
static inline
-unsigned long lib_ring_buffer_get_offset(const struct lib_ring_buffer_config *config,
- struct lib_ring_buffer *buf)
+unsigned long lib_ring_buffer_get_offset(const struct lttng_kernel_ring_buffer_config *config,
+ struct lttng_kernel_ring_buffer *buf)
{
return v_read(config, &buf->offset);
}
static inline
-unsigned long lib_ring_buffer_get_consumed(const struct lib_ring_buffer_config *config,
- struct lib_ring_buffer *buf)
+unsigned long lib_ring_buffer_get_consumed(const struct lttng_kernel_ring_buffer_config *config,
+ struct lttng_kernel_ring_buffer *buf)
{
return atomic_long_read(&buf->consumed);
}
* ordering enforced with respect to trace teardown).
*/
static inline
-int lib_ring_buffer_is_finalized(const struct lib_ring_buffer_config *config,
- struct lib_ring_buffer *buf)
+int lib_ring_buffer_is_finalized(const struct lttng_kernel_ring_buffer_config *config,
+ struct lttng_kernel_ring_buffer *buf)
{
int finalized = LTTNG_READ_ONCE(buf->finalized);
/*
static inline
unsigned long lib_ring_buffer_get_read_data_size(
- const struct lib_ring_buffer_config *config,
- struct lib_ring_buffer *buf)
+ const struct lttng_kernel_ring_buffer_config *config,
+ struct lttng_kernel_ring_buffer *buf)
{
return subbuffer_get_read_data_size(config, &buf->backend);
}
static inline
unsigned long lib_ring_buffer_get_records_count(
- const struct lib_ring_buffer_config *config,
- struct lib_ring_buffer *buf)
+ const struct lttng_kernel_ring_buffer_config *config,
+ struct lttng_kernel_ring_buffer *buf)
{
return v_read(config, &buf->records_count);
}
static inline
unsigned long lib_ring_buffer_get_records_overrun(
- const struct lib_ring_buffer_config *config,
- struct lib_ring_buffer *buf)
+ const struct lttng_kernel_ring_buffer_config *config,
+ struct lttng_kernel_ring_buffer *buf)
{
return v_read(config, &buf->records_overrun);
}
static inline
unsigned long lib_ring_buffer_get_records_lost_full(
- const struct lib_ring_buffer_config *config,
- struct lib_ring_buffer *buf)
+ const struct lttng_kernel_ring_buffer_config *config,
+ struct lttng_kernel_ring_buffer *buf)
{
return v_read(config, &buf->records_lost_full);
}
static inline
unsigned long lib_ring_buffer_get_records_lost_wrap(
- const struct lib_ring_buffer_config *config,
- struct lib_ring_buffer *buf)
+ const struct lttng_kernel_ring_buffer_config *config,
+ struct lttng_kernel_ring_buffer *buf)
{
return v_read(config, &buf->records_lost_wrap);
}
static inline
unsigned long lib_ring_buffer_get_records_lost_big(
- const struct lib_ring_buffer_config *config,
- struct lib_ring_buffer *buf)
+ const struct lttng_kernel_ring_buffer_config *config,
+ struct lttng_kernel_ring_buffer *buf)
{
return v_read(config, &buf->records_lost_big);
}
static inline
unsigned long lib_ring_buffer_get_records_read(
- const struct lib_ring_buffer_config *config,
- struct lib_ring_buffer *buf)
+ const struct lttng_kernel_ring_buffer_config *config,
+ struct lttng_kernel_ring_buffer *buf)
{
return v_read(config, &buf->backend.records_read);
}
* section.
*/
static inline
-int lib_ring_buffer_get_cpu(const struct lib_ring_buffer_config *config)
+int lib_ring_buffer_get_cpu(const struct lttng_kernel_ring_buffer_config *config)
{
int cpu, nesting;
* lib_ring_buffer_put_cpu - Follows ring buffer reserve/commit.
*/
static inline
-void lib_ring_buffer_put_cpu(const struct lib_ring_buffer_config *config)
+void lib_ring_buffer_put_cpu(const struct lttng_kernel_ring_buffer_config *config)
{
barrier();
(*lttng_this_cpu_ptr(&lib_ring_buffer_nesting))--;
* returns 0 if reserve ok, or 1 if the slow path must be taken.
*/
static inline
-int lib_ring_buffer_try_reserve(const struct lib_ring_buffer_config *config,
+int lib_ring_buffer_try_reserve(const struct lttng_kernel_ring_buffer_config *config,
struct lttng_kernel_ring_buffer_ctx *ctx,
void *client_ctx,
unsigned long *o_begin, unsigned long *o_end,
unsigned long *o_old, size_t *before_hdr_pad)
{
struct lttng_kernel_ring_buffer_channel *chan = ctx->priv.chan;
- struct lib_ring_buffer *buf = ctx->priv.buf;
+ struct lttng_kernel_ring_buffer *buf = ctx->priv.buf;
*o_begin = v_read(config, &buf->offset);
*o_old = *o_begin;
*/
static inline
-int lib_ring_buffer_reserve(const struct lib_ring_buffer_config *config,
+int lib_ring_buffer_reserve(const struct lttng_kernel_ring_buffer_config *config,
struct lttng_kernel_ring_buffer_ctx *ctx,
void *client_ctx)
{
struct lttng_kernel_ring_buffer_channel *chan = ctx->priv.chan;
- struct lib_ring_buffer *buf;
+ struct lttng_kernel_ring_buffer *buf;
unsigned long o_begin, o_end, o_old;
size_t before_hdr_pad = 0;
* disabled, for RING_BUFFER_SYNC_PER_CPU configuration.
*/
static inline
-void lib_ring_buffer_switch(const struct lib_ring_buffer_config *config,
- struct lib_ring_buffer *buf, enum switch_mode mode)
+void lib_ring_buffer_switch(const struct lttng_kernel_ring_buffer_config *config,
+ struct lttng_kernel_ring_buffer *buf, enum switch_mode mode)
{
lib_ring_buffer_switch_slow(buf, mode);
}
* specified sub-buffer, and delivers it if necessary.
*/
static inline
-void lib_ring_buffer_commit(const struct lib_ring_buffer_config *config,
+void lib_ring_buffer_commit(const struct lttng_kernel_ring_buffer_config *config,
const struct lttng_kernel_ring_buffer_ctx *ctx)
{
struct lttng_kernel_ring_buffer_channel *chan = ctx->priv.chan;
- struct lib_ring_buffer *buf = ctx->priv.buf;
+ struct lttng_kernel_ring_buffer *buf = ctx->priv.buf;
unsigned long offset_end = ctx->priv.buf_offset;
unsigned long endidx = subbuf_index(offset_end - 1, chan);
unsigned long commit_count;
* Returns 0 upon success, -EPERM if the record cannot be discarded.
*/
static inline
-int lib_ring_buffer_try_discard_reserve(const struct lib_ring_buffer_config *config,
+int lib_ring_buffer_try_discard_reserve(const struct lttng_kernel_ring_buffer_config *config,
const struct lttng_kernel_ring_buffer_ctx *ctx)
{
- struct lib_ring_buffer *buf = ctx->priv.buf;
+ struct lttng_kernel_ring_buffer *buf = ctx->priv.buf;
unsigned long end_offset = ctx->priv.pre_offset + ctx->priv.slot_size;
/*
}
static inline
-void channel_record_disable(const struct lib_ring_buffer_config *config,
+void channel_record_disable(const struct lttng_kernel_ring_buffer_config *config,
struct lttng_kernel_ring_buffer_channel *chan)
{
atomic_inc(&chan->record_disabled);
}
static inline
-void channel_record_enable(const struct lib_ring_buffer_config *config,
+void channel_record_enable(const struct lttng_kernel_ring_buffer_config *config,
struct lttng_kernel_ring_buffer_channel *chan)
{
atomic_dec(&chan->record_disabled);
}
static inline
-void lib_ring_buffer_record_disable(const struct lib_ring_buffer_config *config,
- struct lib_ring_buffer *buf)
+void lib_ring_buffer_record_disable(const struct lttng_kernel_ring_buffer_config *config,
+ struct lttng_kernel_ring_buffer *buf)
{
atomic_inc(&buf->record_disabled);
}
static inline
-void lib_ring_buffer_record_enable(const struct lib_ring_buffer_config *config,
- struct lib_ring_buffer *buf)
+void lib_ring_buffer_record_enable(const struct lttng_kernel_ring_buffer_config *config,
+ struct lttng_kernel_ring_buffer *buf)
{
atomic_dec(&buf->record_disabled);
}
#if (BITS_PER_LONG == 32)
static inline
-void save_last_tsc(const struct lib_ring_buffer_config *config,
- struct lib_ring_buffer *buf, u64 tsc)
+void save_last_tsc(const struct lttng_kernel_ring_buffer_config *config,
+ struct lttng_kernel_ring_buffer *buf, u64 tsc)
{
if (config->tsc_bits == 0 || config->tsc_bits == 64)
return;
}
static inline
-int last_tsc_overflow(const struct lib_ring_buffer_config *config,
- struct lib_ring_buffer *buf, u64 tsc)
+int last_tsc_overflow(const struct lttng_kernel_ring_buffer_config *config,
+ struct lttng_kernel_ring_buffer *buf, u64 tsc)
{
unsigned long tsc_shifted;
}
#else
static inline
-void save_last_tsc(const struct lib_ring_buffer_config *config,
- struct lib_ring_buffer *buf, u64 tsc)
+void save_last_tsc(const struct lttng_kernel_ring_buffer_config *config,
+ struct lttng_kernel_ring_buffer *buf, u64 tsc)
{
if (config->tsc_bits == 0 || config->tsc_bits == 64)
return;
}
static inline
-int last_tsc_overflow(const struct lib_ring_buffer_config *config,
- struct lib_ring_buffer *buf, u64 tsc)
+int last_tsc_overflow(const struct lttng_kernel_ring_buffer_config *config,
+ struct lttng_kernel_ring_buffer *buf, u64 tsc)
{
if (config->tsc_bits == 0 || config->tsc_bits == 64)
return 0;
void *client_ctx);
extern
-void lib_ring_buffer_switch_slow(struct lib_ring_buffer *buf,
+void lib_ring_buffer_switch_slow(struct lttng_kernel_ring_buffer *buf,
enum switch_mode mode);
extern
-void lib_ring_buffer_check_deliver_slow(const struct lib_ring_buffer_config *config,
- struct lib_ring_buffer *buf,
+void lib_ring_buffer_check_deliver_slow(const struct lttng_kernel_ring_buffer_config *config,
+ struct lttng_kernel_ring_buffer *buf,
struct lttng_kernel_ring_buffer_channel *chan,
unsigned long offset,
unsigned long commit_count,
u64 tsc);
extern
-void lib_ring_buffer_switch_remote(struct lib_ring_buffer *buf);
+void lib_ring_buffer_switch_remote(struct lttng_kernel_ring_buffer *buf);
extern
-void lib_ring_buffer_switch_remote_empty(struct lib_ring_buffer *buf);
+void lib_ring_buffer_switch_remote_empty(struct lttng_kernel_ring_buffer *buf);
extern
-void lib_ring_buffer_clear(struct lib_ring_buffer *buf);
+void lib_ring_buffer_clear(struct lttng_kernel_ring_buffer *buf);
/* Buffer write helpers */
static inline
-void lib_ring_buffer_reserve_push_reader(struct lib_ring_buffer *buf,
+void lib_ring_buffer_reserve_push_reader(struct lttng_kernel_ring_buffer *buf,
struct lttng_kernel_ring_buffer_channel *chan,
unsigned long offset)
{
* algorithm guarantees.
*/
static inline
-void lib_ring_buffer_clear_reader(struct lib_ring_buffer *buf,
+void lib_ring_buffer_clear_reader(struct lttng_kernel_ring_buffer *buf,
struct lttng_kernel_ring_buffer_channel *chan)
{
- const struct lib_ring_buffer_config *config = &chan->backend.config;
+ const struct lttng_kernel_ring_buffer_config *config = &chan->backend.config;
unsigned long offset, consumed_old, consumed_new;
do {
}
static inline
-int lib_ring_buffer_pending_data(const struct lib_ring_buffer_config *config,
- struct lib_ring_buffer *buf,
+int lib_ring_buffer_pending_data(const struct lttng_kernel_ring_buffer_config *config,
+ struct lttng_kernel_ring_buffer *buf,
struct lttng_kernel_ring_buffer_channel *chan)
{
return !!subbuf_offset(v_read(config, &buf->offset), chan);
}
static inline
-unsigned long lib_ring_buffer_get_data_size(const struct lib_ring_buffer_config *config,
- struct lib_ring_buffer *buf,
+unsigned long lib_ring_buffer_get_data_size(const struct lttng_kernel_ring_buffer_config *config,
+ struct lttng_kernel_ring_buffer *buf,
unsigned long idx)
{
return subbuffer_get_data_size(config, &buf->backend, idx);
* This is a very specific ftrace use-case, so we keep this as "internal" API.
*/
static inline
-int lib_ring_buffer_reserve_committed(const struct lib_ring_buffer_config *config,
- struct lib_ring_buffer *buf,
+int lib_ring_buffer_reserve_committed(const struct lttng_kernel_ring_buffer_config *config,
+ struct lttng_kernel_ring_buffer *buf,
struct lttng_kernel_ring_buffer_channel *chan)
{
unsigned long offset, idx, commit_count;
* timestamp of the following subbuffers.
*/
static inline
-void lib_ring_buffer_check_deliver(const struct lib_ring_buffer_config *config,
- struct lib_ring_buffer *buf,
+void lib_ring_buffer_check_deliver(const struct lttng_kernel_ring_buffer_config *config,
+ struct lttng_kernel_ring_buffer *buf,
struct lttng_kernel_ring_buffer_channel *chan,
unsigned long offset,
unsigned long commit_count,
* useful for crash dump.
*/
static inline
-void lib_ring_buffer_write_commit_counter(const struct lib_ring_buffer_config *config,
- struct lib_ring_buffer *buf,
+void lib_ring_buffer_write_commit_counter(const struct lttng_kernel_ring_buffer_config *config,
+ struct lttng_kernel_ring_buffer *buf,
struct lttng_kernel_ring_buffer_channel *chan,
unsigned long buf_offset,
unsigned long commit_count,
v_set(config, &cc_hot->seq, commit_count);
}
-extern int lib_ring_buffer_create(struct lib_ring_buffer *buf,
+extern int lib_ring_buffer_create(struct lttng_kernel_ring_buffer *buf,
struct channel_backend *chanb, int cpu);
-extern void lib_ring_buffer_free(struct lib_ring_buffer *buf);
+extern void lib_ring_buffer_free(struct lttng_kernel_ring_buffer *buf);
/* Keep track of trap nesting inside ring buffer code */
DECLARE_PER_CPU(unsigned int, lib_ring_buffer_nesting);
/* channel-level read-side iterator */
struct channel_iter {
/* Prio heap of buffers. Lowest timestamps at the top. */
- struct lttng_ptr_heap heap; /* Heap of struct lib_ring_buffer ptrs */
+ struct lttng_ptr_heap heap; /* Heap of struct lttng_kernel_ring_buffer ptrs */
struct list_head empty_head; /* Empty buffers linked-list head */
int read_open; /* Opened for reading ? */
u64 last_qs; /* Last quiescent state timestamp */
};
/* Per-buffer read iterator */
-struct lib_ring_buffer_iter {
+struct lttng_kernel_ring_buffer_iter {
u64 timestamp; /* Current record timestamp */
size_t header_len; /* Current record header length */
size_t payload_len; /* Current record payload length */
};
/* ring buffer state */
-struct lib_ring_buffer {
+struct lttng_kernel_ring_buffer {
/* First 32 bytes cache-hot cacheline */
union v_atomic offset; /* Current offset in the buffer */
struct commit_counters_hot *commit_hot;
* Last timestamp written in the buffer.
*/
- struct lib_ring_buffer_backend backend; /* Associated backend */
+ struct lttng_kernel_ring_buffer_backend backend; /* Associated backend */
struct commit_counters_cold *commit_cold;
/* Commit count per sub-buffer */
struct timer_list switch_timer; /* timer for periodical switch */
struct timer_list read_timer; /* timer for read poll */
raw_spinlock_t raw_tick_nohz_spinlock; /* nohz entry lock/trylock */
- struct lib_ring_buffer_iter iter; /* read-side iterator */
+ struct lttng_kernel_ring_buffer_iter iter; /* read-side iterator */
unsigned long get_subbuf_consumed; /* Read-side consumed */
unsigned long prod_snapshot; /* Producer count snapshot */
unsigned long cons_snapshot; /* Consumer count snapshot */
/*
* Issue warnings and disable channels upon internal error.
- * Can receive struct lib_ring_buffer or struct lib_ring_buffer_backend
+ * Can receive struct lttng_kernel_ring_buffer or struct lttng_kernel_ring_buffer_backend
* parameters.
*/
#define CHAN_WARN_ON(c, cond) \
* is finalized.
*/
extern ssize_t lib_ring_buffer_get_next_record(struct lttng_kernel_ring_buffer_channel *chan,
- struct lib_ring_buffer *buf);
+ struct lttng_kernel_ring_buffer *buf);
/*
* Ensure that the current subbuffer is put after client code has read the
* However, it should be invoked before returning data to user-space to ensure
* that the get/put subbuffer state is quiescent.
*/
-extern void lib_ring_buffer_put_current_record(struct lib_ring_buffer *buf);
+extern void lib_ring_buffer_put_current_record(struct lttng_kernel_ring_buffer *buf);
/*
* channel_get_next_record advances the buffer read position to the next record.
* Returns the current buffer in ret_buf.
*/
extern ssize_t channel_get_next_record(struct lttng_kernel_ring_buffer_channel *chan,
- struct lib_ring_buffer **ret_buf);
+ struct lttng_kernel_ring_buffer **ret_buf);
/**
* read_current_record - copy the buffer current record into dest.
* dest should be large enough to contain the record. Returns the number of
* bytes copied.
*/
-static inline size_t read_current_record(struct lib_ring_buffer *buf, void *dest)
+static inline size_t read_current_record(struct lttng_kernel_ring_buffer *buf, void *dest)
{
return lib_ring_buffer_read(&buf->backend, buf->iter.read_offset,
dest, buf->iter.payload_len);
}
-extern int lib_ring_buffer_iterator_open(struct lib_ring_buffer *buf);
-extern void lib_ring_buffer_iterator_release(struct lib_ring_buffer *buf);
+extern int lib_ring_buffer_iterator_open(struct lttng_kernel_ring_buffer *buf);
+extern void lib_ring_buffer_iterator_release(struct lttng_kernel_ring_buffer *buf);
extern int channel_iterator_open(struct lttng_kernel_ring_buffer_channel *chan);
extern void channel_iterator_release(struct lttng_kernel_ring_buffer_channel *chan);
void channel_iterator_unregister_notifiers(struct lttng_kernel_ring_buffer_channel *chan);
void channel_iterator_free(struct lttng_kernel_ring_buffer_channel *chan);
void channel_iterator_reset(struct lttng_kernel_ring_buffer_channel *chan);
-void lib_ring_buffer_iterator_reset(struct lib_ring_buffer *buf);
+void lib_ring_buffer_iterator_reset(struct lttng_kernel_ring_buffer *buf);
#endif /* _LIB_RING_BUFFER_ITERATOR_H */
};
static inline
-long v_read(const struct lib_ring_buffer_config *config, union v_atomic *v_a)
+long v_read(const struct lttng_kernel_ring_buffer_config *config, union v_atomic *v_a)
{
if (config->sync == RING_BUFFER_SYNC_PER_CPU)
return local_read(&v_a->l);
}
static inline
-void v_set(const struct lib_ring_buffer_config *config, union v_atomic *v_a,
+void v_set(const struct lttng_kernel_ring_buffer_config *config, union v_atomic *v_a,
long v)
{
if (config->sync == RING_BUFFER_SYNC_PER_CPU)
}
static inline
-void v_add(const struct lib_ring_buffer_config *config, long v, union v_atomic *v_a)
+void v_add(const struct lttng_kernel_ring_buffer_config *config, long v, union v_atomic *v_a)
{
if (config->sync == RING_BUFFER_SYNC_PER_CPU)
local_add(v, &v_a->l);
}
static inline
-void v_inc(const struct lib_ring_buffer_config *config, union v_atomic *v_a)
+void v_inc(const struct lttng_kernel_ring_buffer_config *config, union v_atomic *v_a)
{
if (config->sync == RING_BUFFER_SYNC_PER_CPU)
local_inc(&v_a->l);
* Non-atomic decrement. Only used by reader, apply to reader-owned subbuffer.
*/
static inline
-void _v_dec(const struct lib_ring_buffer_config *config, union v_atomic *v_a)
+void _v_dec(const struct lttng_kernel_ring_buffer_config *config, union v_atomic *v_a)
{
--v_a->v;
}
static inline
-long v_cmpxchg(const struct lib_ring_buffer_config *config, union v_atomic *v_a,
+long v_cmpxchg(const struct lttng_kernel_ring_buffer_config *config, union v_atomic *v_a,
long old, long _new)
{
if (config->sync == RING_BUFFER_SYNC_PER_CPU)
* Internal file operations.
*/
-struct lib_ring_buffer;
+struct lttng_kernel_ring_buffer;
int lib_ring_buffer_open(struct inode *inode, struct file *file,
- struct lib_ring_buffer *buf);
+ struct lttng_kernel_ring_buffer *buf);
int lib_ring_buffer_release(struct inode *inode, struct file *file,
- struct lib_ring_buffer *buf);
+ struct lttng_kernel_ring_buffer *buf);
unsigned int lib_ring_buffer_poll(struct file *filp, poll_table *wait,
- struct lib_ring_buffer *buf);
+ struct lttng_kernel_ring_buffer *buf);
ssize_t lib_ring_buffer_splice_read(struct file *in, loff_t *ppos,
struct pipe_inode_info *pipe, size_t len,
- unsigned int flags, struct lib_ring_buffer *buf);
+ unsigned int flags, struct lttng_kernel_ring_buffer *buf);
int lib_ring_buffer_mmap(struct file *filp, struct vm_area_struct *vma,
- struct lib_ring_buffer *buf);
+ struct lttng_kernel_ring_buffer *buf);
/* Ring Buffer ioctl() and ioctl numbers */
long lib_ring_buffer_ioctl(struct file *filp, unsigned int cmd,
- unsigned long arg, struct lib_ring_buffer *buf);
+ unsigned long arg, struct lttng_kernel_ring_buffer *buf);
#ifdef CONFIG_COMPAT
long lib_ring_buffer_compat_ioctl(struct file *filp, unsigned int cmd,
- unsigned long arg, struct lib_ring_buffer *buf);
+ unsigned long arg, struct lttng_kernel_ring_buffer *buf);
#endif
ssize_t vfs_lib_ring_buffer_file_splice_read(struct file *in, loff_t *ppos,
* @extra_reader_sb: need extra subbuffer for reader
*/
static
-int lib_ring_buffer_backend_allocate(const struct lib_ring_buffer_config *config,
- struct lib_ring_buffer_backend *bufb,
+int lib_ring_buffer_backend_allocate(const struct lttng_kernel_ring_buffer_config *config,
+ struct lttng_kernel_ring_buffer_backend *bufb,
size_t size, size_t num_subbuf,
int extra_reader_sb)
{
for (i = 0; i < num_subbuf_alloc; i++) {
bufb->array[i] =
lttng_kvzalloc_node(ALIGN(
- sizeof(struct lib_ring_buffer_backend_pages) +
- sizeof(struct lib_ring_buffer_backend_page)
+ sizeof(struct lttng_kernel_ring_buffer_backend_pages) +
+ sizeof(struct lttng_kernel_ring_buffer_backend_page)
* num_pages_per_subbuf,
1 << INTERNODE_CACHE_SHIFT),
GFP_KERNEL | __GFP_NOWARN,
/* Allocate write-side subbuffer table */
bufb->buf_wsb = lttng_kvzalloc_node(ALIGN(
- sizeof(struct lib_ring_buffer_backend_subbuffer)
+ sizeof(struct lttng_kernel_ring_buffer_backend_subbuffer)
* num_subbuf,
1 << INTERNODE_CACHE_SHIFT),
GFP_KERNEL | __GFP_NOWARN,
/* Allocate subbuffer packet counter table */
bufb->buf_cnt = lttng_kvzalloc_node(ALIGN(
- sizeof(struct lib_ring_buffer_backend_counts)
+ sizeof(struct lttng_kernel_ring_buffer_backend_counts)
* num_subbuf,
1 << INTERNODE_CACHE_SHIFT),
GFP_KERNEL | __GFP_NOWARN,
return -ENOMEM;
}
-int lib_ring_buffer_backend_create(struct lib_ring_buffer_backend *bufb,
+int lib_ring_buffer_backend_create(struct lttng_kernel_ring_buffer_backend *bufb,
struct channel_backend *chanb, int cpu)
{
- const struct lib_ring_buffer_config *config = &chanb->config;
+ const struct lttng_kernel_ring_buffer_config *config = &chanb->config;
bufb->chan = container_of(chanb, struct lttng_kernel_ring_buffer_channel, backend);
bufb->cpu = cpu;
chanb->extra_reader_sb);
}
-void lib_ring_buffer_backend_free(struct lib_ring_buffer_backend *bufb)
+void lib_ring_buffer_backend_free(struct lttng_kernel_ring_buffer_backend *bufb)
{
struct channel_backend *chanb = &bufb->chan->backend;
unsigned long i, j, num_subbuf_alloc;
bufb->allocated = 0;
}
-void lib_ring_buffer_backend_reset(struct lib_ring_buffer_backend *bufb)
+void lib_ring_buffer_backend_reset(struct lttng_kernel_ring_buffer_backend *bufb)
{
struct channel_backend *chanb = &bufb->chan->backend;
- const struct lib_ring_buffer_config *config = &chanb->config;
+ const struct lttng_kernel_ring_buffer_config *config = &chanb->config;
unsigned long num_subbuf_alloc;
unsigned int i;
void channel_backend_reset(struct channel_backend *chanb)
{
struct lttng_kernel_ring_buffer_channel *chan = container_of(chanb, struct lttng_kernel_ring_buffer_channel, backend);
- const struct lib_ring_buffer_config *config = &chanb->config;
+ const struct lttng_kernel_ring_buffer_config *config = &chanb->config;
/*
* Don't reset buf_size, subbuf_size, subbuf_size_order,
{
struct channel_backend *chanb = container_of(node,
struct channel_backend, cpuhp_prepare);
- const struct lib_ring_buffer_config *config = &chanb->config;
- struct lib_ring_buffer *buf;
+ const struct lttng_kernel_ring_buffer_config *config = &chanb->config;
+ struct lttng_kernel_ring_buffer *buf;
int ret;
CHAN_WARN_ON(chanb, config->alloc == RING_BUFFER_ALLOC_GLOBAL);
unsigned int cpu = (unsigned long)hcpu;
struct channel_backend *chanb = container_of(nb, struct channel_backend,
cpu_hp_notifier);
- const struct lib_ring_buffer_config *config = &chanb->config;
- struct lib_ring_buffer *buf;
+ const struct lttng_kernel_ring_buffer_config *config = &chanb->config;
+ struct lttng_kernel_ring_buffer *buf;
int ret;
CHAN_WARN_ON(chanb, config->alloc == RING_BUFFER_ALLOC_GLOBAL);
*/
int channel_backend_init(struct channel_backend *chanb,
const char *name,
- const struct lib_ring_buffer_config *config,
+ const struct lttng_kernel_ring_buffer_config *config,
void *priv, size_t subbuf_size, size_t num_subbuf)
{
struct lttng_kernel_ring_buffer_channel *chan = container_of(chanb, struct lttng_kernel_ring_buffer_channel, backend);
if (config->alloc == RING_BUFFER_ALLOC_PER_CPU) {
/* Allocating the buffer per-cpu structures */
- chanb->buf = alloc_percpu(struct lib_ring_buffer);
+ chanb->buf = alloc_percpu(struct lttng_kernel_ring_buffer);
if (!chanb->buf)
goto free_cpumask;
}
#endif /* #else #if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(4,10,0)) */
} else {
- chanb->buf = kzalloc(sizeof(struct lib_ring_buffer), GFP_KERNEL);
+ chanb->buf = kzalloc(sizeof(struct lttng_kernel_ring_buffer), GFP_KERNEL);
if (!chanb->buf)
goto free_cpumask;
ret = lib_ring_buffer_create(chanb->buf, chanb, -1);
#endif
#endif /* #else #if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(4,10,0)) */
for_each_possible_cpu(i) {
- struct lib_ring_buffer *buf =
+ struct lttng_kernel_ring_buffer *buf =
per_cpu_ptr(chanb->buf, i);
if (!buf->backend.allocated)
*/
void channel_backend_unregister_notifiers(struct channel_backend *chanb)
{
- const struct lib_ring_buffer_config *config = &chanb->config;
+ const struct lttng_kernel_ring_buffer_config *config = &chanb->config;
if (config->alloc == RING_BUFFER_ALLOC_PER_CPU) {
#if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(4,10,0))
*/
void channel_backend_free(struct channel_backend *chanb)
{
- const struct lib_ring_buffer_config *config = &chanb->config;
+ const struct lttng_kernel_ring_buffer_config *config = &chanb->config;
unsigned int i;
if (config->alloc == RING_BUFFER_ALLOC_PER_CPU) {
for_each_possible_cpu(i) {
- struct lib_ring_buffer *buf = per_cpu_ptr(chanb->buf, i);
+ struct lttng_kernel_ring_buffer *buf = per_cpu_ptr(chanb->buf, i);
if (!buf->backend.allocated)
continue;
free_cpumask_var(chanb->cpumask);
free_percpu(chanb->buf);
} else {
- struct lib_ring_buffer *buf = chanb->buf;
+ struct lttng_kernel_ring_buffer *buf = chanb->buf;
CHAN_WARN_ON(chanb, !buf->backend.allocated);
lib_ring_buffer_free(buf);
* @len : length to write
* @pagecpy : page size copied so far
*/
-void _lib_ring_buffer_write(struct lib_ring_buffer_backend *bufb, size_t offset,
+void _lib_ring_buffer_write(struct lttng_kernel_ring_buffer_backend *bufb, size_t offset,
const void *src, size_t len, size_t pagecpy)
{
struct channel_backend *chanb = &bufb->chan->backend;
- const struct lib_ring_buffer_config *config = &chanb->config;
+ const struct lttng_kernel_ring_buffer_config *config = &chanb->config;
size_t sbidx, index;
- struct lib_ring_buffer_backend_pages *rpages;
+ struct lttng_kernel_ring_buffer_backend_pages *rpages;
unsigned long sb_bindex, id;
do {
* @len : length to write
* @pagecpy : page size copied so far
*/
-void _lib_ring_buffer_memset(struct lib_ring_buffer_backend *bufb,
+void _lib_ring_buffer_memset(struct lttng_kernel_ring_buffer_backend *bufb,
size_t offset,
int c, size_t len, size_t pagecpy)
{
struct channel_backend *chanb = &bufb->chan->backend;
- const struct lib_ring_buffer_config *config = &chanb->config;
+ const struct lttng_kernel_ring_buffer_config *config = &chanb->config;
size_t sbidx, index;
- struct lib_ring_buffer_backend_pages *rpages;
+ struct lttng_kernel_ring_buffer_backend_pages *rpages;
unsigned long sb_bindex, id;
do {
* @pagecpy : page size copied so far
* @pad : character to use for padding
*/
-void _lib_ring_buffer_strcpy(struct lib_ring_buffer_backend *bufb,
+void _lib_ring_buffer_strcpy(struct lttng_kernel_ring_buffer_backend *bufb,
size_t offset, const char *src, size_t len,
size_t pagecpy, int pad)
{
struct channel_backend *chanb = &bufb->chan->backend;
- const struct lib_ring_buffer_config *config = &chanb->config;
+ const struct lttng_kernel_ring_buffer_config *config = &chanb->config;
size_t sbidx, index;
- struct lib_ring_buffer_backend_pages *rpages;
+ struct lttng_kernel_ring_buffer_backend_pages *rpages;
unsigned long sb_bindex, id;
int src_terminated = 0;
* directly without having the src pointer checked with access_ok()
* previously.
*/
-void _lib_ring_buffer_copy_from_user_inatomic(struct lib_ring_buffer_backend *bufb,
+void _lib_ring_buffer_copy_from_user_inatomic(struct lttng_kernel_ring_buffer_backend *bufb,
size_t offset,
const void __user *src, size_t len,
size_t pagecpy)
{
struct channel_backend *chanb = &bufb->chan->backend;
- const struct lib_ring_buffer_config *config = &chanb->config;
+ const struct lttng_kernel_ring_buffer_config *config = &chanb->config;
size_t sbidx, index;
- struct lib_ring_buffer_backend_pages *rpages;
+ struct lttng_kernel_ring_buffer_backend_pages *rpages;
unsigned long sb_bindex, id;
int ret;
* directly without having the src pointer checked with access_ok()
* previously.
*/
-void _lib_ring_buffer_strcpy_from_user_inatomic(struct lib_ring_buffer_backend *bufb,
+void _lib_ring_buffer_strcpy_from_user_inatomic(struct lttng_kernel_ring_buffer_backend *bufb,
size_t offset, const char __user *src, size_t len,
size_t pagecpy, int pad)
{
struct channel_backend *chanb = &bufb->chan->backend;
- const struct lib_ring_buffer_config *config = &chanb->config;
+ const struct lttng_kernel_ring_buffer_config *config = &chanb->config;
size_t sbidx, index;
- struct lib_ring_buffer_backend_pages *rpages;
+ struct lttng_kernel_ring_buffer_backend_pages *rpages;
unsigned long sb_bindex, id;
int src_terminated = 0;
* Should be protected by get_subbuf/put_subbuf.
* Returns the length copied.
*/
-size_t lib_ring_buffer_read(struct lib_ring_buffer_backend *bufb, size_t offset,
+size_t lib_ring_buffer_read(struct lttng_kernel_ring_buffer_backend *bufb, size_t offset,
void *dest, size_t len)
{
struct channel_backend *chanb = &bufb->chan->backend;
- const struct lib_ring_buffer_config *config = &chanb->config;
+ const struct lttng_kernel_ring_buffer_config *config = &chanb->config;
size_t index, pagecpy, orig_len;
- struct lib_ring_buffer_backend_pages *rpages;
+ struct lttng_kernel_ring_buffer_backend_pages *rpages;
unsigned long sb_bindex, id;
orig_len = len;
* function.
* Returns -EFAULT on error, 0 if ok.
*/
-int __lib_ring_buffer_copy_to_user(struct lib_ring_buffer_backend *bufb,
+int __lib_ring_buffer_copy_to_user(struct lttng_kernel_ring_buffer_backend *bufb,
size_t offset, void __user *dest, size_t len)
{
struct channel_backend *chanb = &bufb->chan->backend;
- const struct lib_ring_buffer_config *config = &chanb->config;
+ const struct lttng_kernel_ring_buffer_config *config = &chanb->config;
size_t index;
ssize_t pagecpy;
- struct lib_ring_buffer_backend_pages *rpages;
+ struct lttng_kernel_ring_buffer_backend_pages *rpages;
unsigned long sb_bindex, id;
offset &= chanb->buf_size - 1;
* Should be protected by get_subbuf/put_subbuf.
* Destination length should be at least 1 to hold '\0'.
*/
-int lib_ring_buffer_read_cstr(struct lib_ring_buffer_backend *bufb, size_t offset,
+int lib_ring_buffer_read_cstr(struct lttng_kernel_ring_buffer_backend *bufb, size_t offset,
void *dest, size_t len)
{
struct channel_backend *chanb = &bufb->chan->backend;
- const struct lib_ring_buffer_config *config = &chanb->config;
+ const struct lttng_kernel_ring_buffer_config *config = &chanb->config;
size_t index;
ssize_t pagecpy, pagelen, strpagelen, orig_offset;
char *str;
- struct lib_ring_buffer_backend_pages *rpages;
+ struct lttng_kernel_ring_buffer_backend_pages *rpages;
unsigned long sb_bindex, id;
offset &= chanb->buf_size - 1;
* Should be protected by get_subbuf/put_subbuf.
* Returns the pointer to the page frame number unsigned long.
*/
-unsigned long *lib_ring_buffer_read_get_pfn(struct lib_ring_buffer_backend *bufb,
+unsigned long *lib_ring_buffer_read_get_pfn(struct lttng_kernel_ring_buffer_backend *bufb,
size_t offset, void ***virt)
{
size_t index;
- struct lib_ring_buffer_backend_pages *rpages;
+ struct lttng_kernel_ring_buffer_backend_pages *rpages;
struct channel_backend *chanb = &bufb->chan->backend;
- const struct lib_ring_buffer_config *config = &chanb->config;
+ const struct lttng_kernel_ring_buffer_config *config = &chanb->config;
unsigned long sb_bindex, id;
offset &= chanb->buf_size - 1;
* from/to this address, as long as the read/write is never bigger than a
* page size.
*/
-void *lib_ring_buffer_read_offset_address(struct lib_ring_buffer_backend *bufb,
+void *lib_ring_buffer_read_offset_address(struct lttng_kernel_ring_buffer_backend *bufb,
size_t offset)
{
size_t index;
- struct lib_ring_buffer_backend_pages *rpages;
+ struct lttng_kernel_ring_buffer_backend_pages *rpages;
struct channel_backend *chanb = &bufb->chan->backend;
- const struct lib_ring_buffer_config *config = &chanb->config;
+ const struct lttng_kernel_ring_buffer_config *config = &chanb->config;
unsigned long sb_bindex, id;
offset &= chanb->buf_size - 1;
* it's always at the beginning of a page, it's safe to write directly to this
* address, as long as the write is never bigger than a page size.
*/
-void *lib_ring_buffer_offset_address(struct lib_ring_buffer_backend *bufb,
+void *lib_ring_buffer_offset_address(struct lttng_kernel_ring_buffer_backend *bufb,
size_t offset)
{
size_t sbidx, index;
- struct lib_ring_buffer_backend_pages *rpages;
+ struct lttng_kernel_ring_buffer_backend_pages *rpages;
struct channel_backend *chanb = &bufb->chan->backend;
- const struct lib_ring_buffer_config *config = &chanb->config;
+ const struct lttng_kernel_ring_buffer_config *config = &chanb->config;
unsigned long sb_bindex, id;
offset &= chanb->buf_size - 1;
static
void lib_ring_buffer_print_errors(struct lttng_kernel_ring_buffer_channel *chan,
- struct lib_ring_buffer *buf, int cpu);
+ struct lttng_kernel_ring_buffer *buf, int cpu);
static
-void _lib_ring_buffer_switch_remote(struct lib_ring_buffer *buf,
+void _lib_ring_buffer_switch_remote(struct lttng_kernel_ring_buffer *buf,
enum switch_mode mode);
static
-int lib_ring_buffer_poll_deliver(const struct lib_ring_buffer_config *config,
- struct lib_ring_buffer *buf,
+int lib_ring_buffer_poll_deliver(const struct lttng_kernel_ring_buffer_config *config,
+ struct lttng_kernel_ring_buffer *buf,
struct lttng_kernel_ring_buffer_channel *chan)
{
unsigned long consumed_old, consumed_idx, commit_count, write_offset;
/*
* Must be called under cpu hotplug protection.
*/
-void lib_ring_buffer_free(struct lib_ring_buffer *buf)
+void lib_ring_buffer_free(struct lttng_kernel_ring_buffer *buf)
{
struct lttng_kernel_ring_buffer_channel *chan = buf->backend.chan;
* should not be using the iterator concurrently with reset. The previous
* current iterator record is reset.
*/
-void lib_ring_buffer_reset(struct lib_ring_buffer *buf)
+void lib_ring_buffer_reset(struct lttng_kernel_ring_buffer *buf)
{
struct lttng_kernel_ring_buffer_channel *chan = buf->backend.chan;
- const struct lib_ring_buffer_config *config = &chan->backend.config;
+ const struct lttng_kernel_ring_buffer_config *config = &chan->backend.config;
unsigned int i;
/*
static void lib_ring_buffer_pending_wakeup_buf(struct irq_work *entry)
{
- struct lib_ring_buffer *buf = container_of(entry, struct lib_ring_buffer,
+ struct lttng_kernel_ring_buffer *buf = container_of(entry, struct lttng_kernel_ring_buffer,
wakeup_pending);
wake_up_interruptible(&buf->read_wait);
}
/*
* Must be called under cpu hotplug protection.
*/
-int lib_ring_buffer_create(struct lib_ring_buffer *buf,
+int lib_ring_buffer_create(struct lttng_kernel_ring_buffer *buf,
struct channel_backend *chanb, int cpu)
{
- const struct lib_ring_buffer_config *config = &chanb->config;
+ const struct lttng_kernel_ring_buffer_config *config = &chanb->config;
struct lttng_kernel_ring_buffer_channel *chan = container_of(chanb, struct lttng_kernel_ring_buffer_channel, backend);
void *priv = chanb->priv;
size_t subbuf_header_size;
static void switch_buffer_timer(LTTNG_TIMER_FUNC_ARG_TYPE t)
{
- struct lib_ring_buffer *buf = lttng_from_timer(buf, t, switch_timer);
+ struct lttng_kernel_ring_buffer *buf = lttng_from_timer(buf, t, switch_timer);
struct lttng_kernel_ring_buffer_channel *chan = buf->backend.chan;
- const struct lib_ring_buffer_config *config = &chan->backend.config;
+ const struct lttng_kernel_ring_buffer_config *config = &chan->backend.config;
/*
* Only flush buffers periodically if readers are active.
/*
* Called with ring_buffer_nohz_lock held for per-cpu buffers.
*/
-static void lib_ring_buffer_start_switch_timer(struct lib_ring_buffer *buf)
+static void lib_ring_buffer_start_switch_timer(struct lttng_kernel_ring_buffer *buf)
{
struct lttng_kernel_ring_buffer_channel *chan = buf->backend.chan;
- const struct lib_ring_buffer_config *config = &chan->backend.config;
+ const struct lttng_kernel_ring_buffer_config *config = &chan->backend.config;
unsigned int flags = 0;
if (!chan->switch_timer_interval || buf->switch_timer_enabled)
/*
* Called with ring_buffer_nohz_lock held for per-cpu buffers.
*/
-static void lib_ring_buffer_stop_switch_timer(struct lib_ring_buffer *buf)
+static void lib_ring_buffer_stop_switch_timer(struct lttng_kernel_ring_buffer *buf)
{
struct lttng_kernel_ring_buffer_channel *chan = buf->backend.chan;
*/
static void read_buffer_timer(LTTNG_TIMER_FUNC_ARG_TYPE t)
{
- struct lib_ring_buffer *buf = lttng_from_timer(buf, t, read_timer);
+ struct lttng_kernel_ring_buffer *buf = lttng_from_timer(buf, t, read_timer);
struct lttng_kernel_ring_buffer_channel *chan = buf->backend.chan;
- const struct lib_ring_buffer_config *config = &chan->backend.config;
+ const struct lttng_kernel_ring_buffer_config *config = &chan->backend.config;
CHAN_WARN_ON(chan, !buf->backend.allocated);
/*
* Called with ring_buffer_nohz_lock held for per-cpu buffers.
*/
-static void lib_ring_buffer_start_read_timer(struct lib_ring_buffer *buf)
+static void lib_ring_buffer_start_read_timer(struct lttng_kernel_ring_buffer *buf)
{
struct lttng_kernel_ring_buffer_channel *chan = buf->backend.chan;
- const struct lib_ring_buffer_config *config = &chan->backend.config;
+ const struct lttng_kernel_ring_buffer_config *config = &chan->backend.config;
unsigned int flags = 0;
if (config->wakeup != RING_BUFFER_WAKEUP_BY_TIMER
/*
* Called with ring_buffer_nohz_lock held for per-cpu buffers.
*/
-static void lib_ring_buffer_stop_read_timer(struct lib_ring_buffer *buf)
+static void lib_ring_buffer_stop_read_timer(struct lttng_kernel_ring_buffer *buf)
{
struct lttng_kernel_ring_buffer_channel *chan = buf->backend.chan;
- const struct lib_ring_buffer_config *config = &chan->backend.config;
+ const struct lttng_kernel_ring_buffer_config *config = &chan->backend.config;
if (config->wakeup != RING_BUFFER_WAKEUP_BY_TIMER
|| !chan->read_timer_interval
{
struct lttng_kernel_ring_buffer_channel *chan = container_of(node, struct lttng_kernel_ring_buffer_channel,
cpuhp_prepare);
- struct lib_ring_buffer *buf = per_cpu_ptr(chan->backend.buf, cpu);
- const struct lib_ring_buffer_config *config = &chan->backend.config;
+ struct lttng_kernel_ring_buffer *buf = per_cpu_ptr(chan->backend.buf, cpu);
+ const struct lttng_kernel_ring_buffer_config *config = &chan->backend.config;
CHAN_WARN_ON(chan, config->alloc == RING_BUFFER_ALLOC_GLOBAL);
{
struct lttng_kernel_ring_buffer_channel *chan = container_of(node, struct lttng_kernel_ring_buffer_channel,
cpuhp_online);
- struct lib_ring_buffer *buf = per_cpu_ptr(chan->backend.buf, cpu);
- const struct lib_ring_buffer_config *config = &chan->backend.config;
+ struct lttng_kernel_ring_buffer *buf = per_cpu_ptr(chan->backend.buf, cpu);
+ const struct lttng_kernel_ring_buffer_config *config = &chan->backend.config;
CHAN_WARN_ON(chan, config->alloc == RING_BUFFER_ALLOC_GLOBAL);
{
struct lttng_kernel_ring_buffer_channel *chan = container_of(node, struct lttng_kernel_ring_buffer_channel,
cpuhp_online);
- struct lib_ring_buffer *buf = per_cpu_ptr(chan->backend.buf, cpu);
- const struct lib_ring_buffer_config *config = &chan->backend.config;
+ struct lttng_kernel_ring_buffer *buf = per_cpu_ptr(chan->backend.buf, cpu);
+ const struct lttng_kernel_ring_buffer_config *config = &chan->backend.config;
CHAN_WARN_ON(chan, config->alloc == RING_BUFFER_ALLOC_GLOBAL);
unsigned int cpu = (unsigned long)hcpu;
struct lttng_kernel_ring_buffer_channel *chan = container_of(nb, struct lttng_kernel_ring_buffer_channel,
cpu_hp_notifier);
- struct lib_ring_buffer *buf = per_cpu_ptr(chan->backend.buf, cpu);
- const struct lib_ring_buffer_config *config = &chan->backend.config;
+ struct lttng_kernel_ring_buffer *buf = per_cpu_ptr(chan->backend.buf, cpu);
+ const struct lttng_kernel_ring_buffer_config *config = &chan->backend.config;
if (!chan->cpu_hp_enable)
return NOTIFY_DONE;
{
struct lttng_kernel_ring_buffer_channel *chan = container_of(nb, struct lttng_kernel_ring_buffer_channel,
tick_nohz_notifier);
- const struct lib_ring_buffer_config *config = &chan->backend.config;
- struct lib_ring_buffer *buf;
+ const struct lttng_kernel_ring_buffer_config *config = &chan->backend.config;
+ struct lttng_kernel_ring_buffer *buf;
int cpu = smp_processor_id();
if (config->alloc != RING_BUFFER_ALLOC_PER_CPU) {
*/
static void channel_unregister_notifiers(struct lttng_kernel_ring_buffer_channel *chan)
{
- const struct lib_ring_buffer_config *config = &chan->backend.config;
+ const struct lttng_kernel_ring_buffer_config *config = &chan->backend.config;
channel_iterator_unregister_notifiers(chan);
if (config->alloc == RING_BUFFER_ALLOC_PER_CPU) {
get_online_cpus();
chan->cpu_hp_enable = 0;
for_each_online_cpu(cpu) {
- struct lib_ring_buffer *buf = per_cpu_ptr(chan->backend.buf,
+ struct lttng_kernel_ring_buffer *buf = per_cpu_ptr(chan->backend.buf,
cpu);
lib_ring_buffer_stop_switch_timer(buf);
lib_ring_buffer_stop_read_timer(buf);
unregister_cpu_notifier(&chan->cpu_hp_notifier);
#else
for_each_possible_cpu(cpu) {
- struct lib_ring_buffer *buf = per_cpu_ptr(chan->backend.buf,
+ struct lttng_kernel_ring_buffer *buf = per_cpu_ptr(chan->backend.buf,
cpu);
lib_ring_buffer_stop_switch_timer(buf);
lib_ring_buffer_stop_read_timer(buf);
}
#endif /* #else #if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(4,10,0)) */
} else {
- struct lib_ring_buffer *buf = chan->backend.buf;
+ struct lttng_kernel_ring_buffer *buf = chan->backend.buf;
lib_ring_buffer_stop_switch_timer(buf);
lib_ring_buffer_stop_read_timer(buf);
channel_backend_unregister_notifiers(&chan->backend);
}
-static void lib_ring_buffer_set_quiescent(struct lib_ring_buffer *buf)
+static void lib_ring_buffer_set_quiescent(struct lttng_kernel_ring_buffer *buf)
{
if (!buf->quiescent) {
buf->quiescent = true;
}
}
-static void lib_ring_buffer_clear_quiescent(struct lib_ring_buffer *buf)
+static void lib_ring_buffer_clear_quiescent(struct lttng_kernel_ring_buffer *buf)
{
buf->quiescent = false;
}
void lib_ring_buffer_set_quiescent_channel(struct lttng_kernel_ring_buffer_channel *chan)
{
int cpu;
- const struct lib_ring_buffer_config *config = &chan->backend.config;
+ const struct lttng_kernel_ring_buffer_config *config = &chan->backend.config;
if (config->alloc == RING_BUFFER_ALLOC_PER_CPU) {
get_online_cpus();
for_each_channel_cpu(cpu, chan) {
- struct lib_ring_buffer *buf = per_cpu_ptr(chan->backend.buf,
+ struct lttng_kernel_ring_buffer *buf = per_cpu_ptr(chan->backend.buf,
cpu);
lib_ring_buffer_set_quiescent(buf);
}
put_online_cpus();
} else {
- struct lib_ring_buffer *buf = chan->backend.buf;
+ struct lttng_kernel_ring_buffer *buf = chan->backend.buf;
lib_ring_buffer_set_quiescent(buf);
}
void lib_ring_buffer_clear_quiescent_channel(struct lttng_kernel_ring_buffer_channel *chan)
{
int cpu;
- const struct lib_ring_buffer_config *config = &chan->backend.config;
+ const struct lttng_kernel_ring_buffer_config *config = &chan->backend.config;
if (config->alloc == RING_BUFFER_ALLOC_PER_CPU) {
get_online_cpus();
for_each_channel_cpu(cpu, chan) {
- struct lib_ring_buffer *buf = per_cpu_ptr(chan->backend.buf,
+ struct lttng_kernel_ring_buffer *buf = per_cpu_ptr(chan->backend.buf,
cpu);
lib_ring_buffer_clear_quiescent(buf);
}
put_online_cpus();
} else {
- struct lib_ring_buffer *buf = chan->backend.buf;
+ struct lttng_kernel_ring_buffer *buf = chan->backend.buf;
lib_ring_buffer_clear_quiescent(buf);
}
* Holds cpu hotplug.
* Returns NULL on failure.
*/
-struct lttng_kernel_ring_buffer_channel *channel_create(const struct lib_ring_buffer_config *config,
+struct lttng_kernel_ring_buffer_channel *channel_create(const struct lttng_kernel_ring_buffer_config *config,
const char *name, void *priv, void *buf_addr,
size_t subbuf_size,
size_t num_subbuf, unsigned int switch_timer_interval,
get_online_cpus();
for_each_online_cpu(cpu) {
- struct lib_ring_buffer *buf = per_cpu_ptr(chan->backend.buf,
+ struct lttng_kernel_ring_buffer *buf = per_cpu_ptr(chan->backend.buf,
cpu);
spin_lock(&per_cpu(ring_buffer_nohz_lock, cpu));
lib_ring_buffer_start_switch_timer(buf);
put_online_cpus();
#else
for_each_possible_cpu(cpu) {
- struct lib_ring_buffer *buf = per_cpu_ptr(chan->backend.buf,
+ struct lttng_kernel_ring_buffer *buf = per_cpu_ptr(chan->backend.buf,
cpu);
spin_lock(&per_cpu(ring_buffer_nohz_lock, cpu));
lib_ring_buffer_start_switch_timer(buf);
#endif /* defined(CONFIG_NO_HZ) && defined(CONFIG_LIB_RING_BUFFER) */
} else {
- struct lib_ring_buffer *buf = chan->backend.buf;
+ struct lttng_kernel_ring_buffer *buf = chan->backend.buf;
lib_ring_buffer_start_switch_timer(buf);
lib_ring_buffer_start_read_timer(buf);
void *channel_destroy(struct lttng_kernel_ring_buffer_channel *chan)
{
int cpu;
- const struct lib_ring_buffer_config *config = &chan->backend.config;
+ const struct lttng_kernel_ring_buffer_config *config = &chan->backend.config;
void *priv;
irq_work_sync(&chan->wakeup_pending);
* unregistered.
*/
for_each_channel_cpu(cpu, chan) {
- struct lib_ring_buffer *buf = per_cpu_ptr(chan->backend.buf,
+ struct lttng_kernel_ring_buffer *buf = per_cpu_ptr(chan->backend.buf,
cpu);
if (config->cb.buffer_finalize)
wake_up_interruptible(&buf->read_wait);
}
} else {
- struct lib_ring_buffer *buf = chan->backend.buf;
+ struct lttng_kernel_ring_buffer *buf = chan->backend.buf;
if (config->cb.buffer_finalize)
config->cb.buffer_finalize(buf, chan->backend.priv, -1);
}
EXPORT_SYMBOL_GPL(channel_destroy);
-struct lib_ring_buffer *channel_get_ring_buffer(
- const struct lib_ring_buffer_config *config,
+struct lttng_kernel_ring_buffer *channel_get_ring_buffer(
+ const struct lttng_kernel_ring_buffer_config *config,
struct lttng_kernel_ring_buffer_channel *chan, int cpu)
{
if (config->alloc == RING_BUFFER_ALLOC_GLOBAL)
}
EXPORT_SYMBOL_GPL(channel_get_ring_buffer);
-int lib_ring_buffer_open_read(struct lib_ring_buffer *buf)
+int lib_ring_buffer_open_read(struct lttng_kernel_ring_buffer *buf)
{
struct lttng_kernel_ring_buffer_channel *chan = buf->backend.chan;
}
EXPORT_SYMBOL_GPL(lib_ring_buffer_open_read);
-void lib_ring_buffer_release_read(struct lib_ring_buffer *buf)
+void lib_ring_buffer_release_read(struct lttng_kernel_ring_buffer *buf)
{
struct lttng_kernel_ring_buffer_channel *chan = buf->backend.chan;
* Busy-loop trying to get data if the tick_nohz sequence lock is held.
*/
-int lib_ring_buffer_snapshot(struct lib_ring_buffer *buf,
+int lib_ring_buffer_snapshot(struct lttng_kernel_ring_buffer *buf,
unsigned long *consumed, unsigned long *produced)
{
struct lttng_kernel_ring_buffer_channel *chan = buf->backend.chan;
- const struct lib_ring_buffer_config *config = &chan->backend.config;
+ const struct lttng_kernel_ring_buffer_config *config = &chan->backend.config;
unsigned long consumed_cur, write_offset;
int finalized;
* This function is meant to provide information on the exact producer and
* consumer positions without regard for the "snapshot" feature.
*/
-int lib_ring_buffer_snapshot_sample_positions(struct lib_ring_buffer *buf,
+int lib_ring_buffer_snapshot_sample_positions(struct lttng_kernel_ring_buffer *buf,
unsigned long *consumed, unsigned long *produced)
{
struct lttng_kernel_ring_buffer_channel *chan = buf->backend.chan;
- const struct lib_ring_buffer_config *config = &chan->backend.config;
+ const struct lttng_kernel_ring_buffer_config *config = &chan->backend.config;
smp_rmb();
*consumed = atomic_long_read(&buf->consumed);
* @buf: ring buffer
* @consumed_new: new consumed count value
*/
-void lib_ring_buffer_move_consumer(struct lib_ring_buffer *buf,
+void lib_ring_buffer_move_consumer(struct lttng_kernel_ring_buffer *buf,
unsigned long consumed_new)
{
- struct lib_ring_buffer_backend *bufb = &buf->backend;
+ struct lttng_kernel_ring_buffer_backend *bufb = &buf->backend;
struct lttng_kernel_ring_buffer_channel *chan = bufb->chan;
unsigned long consumed;
#if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE
static void lib_ring_buffer_flush_read_subbuf_dcache(
- const struct lib_ring_buffer_config *config,
+ const struct lttng_kernel_ring_buffer_config *config,
struct lttng_kernel_ring_buffer_channel *chan,
- struct lib_ring_buffer *buf)
+ struct lttng_kernel_ring_buffer *buf)
{
- struct lib_ring_buffer_backend_pages *pages;
+ struct lttng_kernel_ring_buffer_backend_pages *pages;
unsigned long sb_bindex, id, i, nr_pages;
if (config->output != RING_BUFFER_MMAP)
pages = buf->backend.array[sb_bindex];
nr_pages = buf->backend.num_pages_per_subbuf;
for (i = 0; i < nr_pages; i++) {
- struct lib_ring_buffer_backend_page *backend_page;
+ struct lttng_kernel_ring_buffer_backend_page *backend_page;
backend_page = &pages->p[i];
flush_dcache_page(pfn_to_page(backend_page->pfn));
}
#else
static void lib_ring_buffer_flush_read_subbuf_dcache(
- const struct lib_ring_buffer_config *config,
+ const struct lttng_kernel_ring_buffer_config *config,
struct lttng_kernel_ring_buffer_channel *chan,
- struct lib_ring_buffer *buf)
+ struct lttng_kernel_ring_buffer *buf)
{
}
#endif
* data to read at consumed position, or 0 if the get operation succeeds.
* Busy-loop trying to get data if the tick_nohz sequence lock is held.
*/
-int lib_ring_buffer_get_subbuf(struct lib_ring_buffer *buf,
+int lib_ring_buffer_get_subbuf(struct lttng_kernel_ring_buffer *buf,
unsigned long consumed)
{
struct lttng_kernel_ring_buffer_channel *chan = buf->backend.chan;
- const struct lib_ring_buffer_config *config = &chan->backend.config;
+ const struct lttng_kernel_ring_buffer_config *config = &chan->backend.config;
unsigned long consumed_cur, consumed_idx, commit_count, write_offset;
int ret;
int finalized;
* lib_ring_buffer_put_subbuf - release exclusive subbuffer access
* @buf: ring buffer
*/
-void lib_ring_buffer_put_subbuf(struct lib_ring_buffer *buf)
+void lib_ring_buffer_put_subbuf(struct lttng_kernel_ring_buffer *buf)
{
- struct lib_ring_buffer_backend *bufb = &buf->backend;
+ struct lttng_kernel_ring_buffer_backend *bufb = &buf->backend;
struct lttng_kernel_ring_buffer_channel *chan = bufb->chan;
- const struct lib_ring_buffer_config *config = &chan->backend.config;
+ const struct lttng_kernel_ring_buffer_config *config = &chan->backend.config;
unsigned long read_sb_bindex, consumed_idx, consumed;
CHAN_WARN_ON(chan, atomic_long_read(&buf->active_readers) != 1);
* position and the writer position. (inclusive)
*/
static
-void lib_ring_buffer_print_subbuffer_errors(struct lib_ring_buffer *buf,
+void lib_ring_buffer_print_subbuffer_errors(struct lttng_kernel_ring_buffer *buf,
struct lttng_kernel_ring_buffer_channel *chan,
unsigned long cons_offset,
int cpu)
{
- const struct lib_ring_buffer_config *config = &chan->backend.config;
+ const struct lttng_kernel_ring_buffer_config *config = &chan->backend.config;
unsigned long cons_idx, commit_count, commit_count_sb;
cons_idx = subbuf_index(cons_offset, chan);
}
static
-void lib_ring_buffer_print_buffer_errors(struct lib_ring_buffer *buf,
+void lib_ring_buffer_print_buffer_errors(struct lttng_kernel_ring_buffer *buf,
struct lttng_kernel_ring_buffer_channel *chan,
void *priv, int cpu)
{
- const struct lib_ring_buffer_config *config = &chan->backend.config;
+ const struct lttng_kernel_ring_buffer_config *config = &chan->backend.config;
unsigned long write_offset, cons_offset;
/*
#ifdef LTTNG_RING_BUFFER_COUNT_EVENTS
static
void lib_ring_buffer_print_records_count(struct lttng_kernel_ring_buffer_channel *chan,
- struct lib_ring_buffer *buf,
+ struct lttng_kernel_ring_buffer *buf,
int cpu)
{
- const struct lib_ring_buffer_config *config = &chan->backend.config;
+ const struct lttng_kernel_ring_buffer_config *config = &chan->backend.config;
if (!strcmp(chan->backend.name, "relay-metadata")) {
printk(KERN_DEBUG "LTTng: ring buffer %s: %lu records written, "
#else
static
void lib_ring_buffer_print_records_count(struct lttng_kernel_ring_buffer_channel *chan,
- struct lib_ring_buffer *buf,
+ struct lttng_kernel_ring_buffer *buf,
int cpu)
{
}
static
void lib_ring_buffer_print_errors(struct lttng_kernel_ring_buffer_channel *chan,
- struct lib_ring_buffer *buf, int cpu)
+ struct lttng_kernel_ring_buffer *buf, int cpu)
{
- const struct lib_ring_buffer_config *config = &chan->backend.config;
+ const struct lttng_kernel_ring_buffer_config *config = &chan->backend.config;
void *priv = chan->backend.priv;
lib_ring_buffer_print_records_count(chan, buf, cpu);
* Only executed when the buffer is finalized, in SWITCH_FLUSH.
*/
static
-void lib_ring_buffer_switch_old_start(struct lib_ring_buffer *buf,
+void lib_ring_buffer_switch_old_start(struct lttng_kernel_ring_buffer *buf,
struct lttng_kernel_ring_buffer_channel *chan,
struct switch_offsets *offsets,
u64 tsc)
{
- const struct lib_ring_buffer_config *config = &chan->backend.config;
+ const struct lttng_kernel_ring_buffer_config *config = &chan->backend.config;
unsigned long oldidx = subbuf_index(offsets->old, chan);
unsigned long commit_count;
struct commit_counters_hot *cc_hot;
* subbuffer.
*/
static
-void lib_ring_buffer_switch_old_end(struct lib_ring_buffer *buf,
+void lib_ring_buffer_switch_old_end(struct lttng_kernel_ring_buffer *buf,
struct lttng_kernel_ring_buffer_channel *chan,
struct switch_offsets *offsets,
u64 tsc)
{
- const struct lib_ring_buffer_config *config = &chan->backend.config;
+ const struct lttng_kernel_ring_buffer_config *config = &chan->backend.config;
unsigned long oldidx = subbuf_index(offsets->old - 1, chan);
unsigned long commit_count, padding_size, data_size;
struct commit_counters_hot *cc_hot;
* that this code is executed before the deliver of this sub-buffer.
*/
static
-void lib_ring_buffer_switch_new_start(struct lib_ring_buffer *buf,
+void lib_ring_buffer_switch_new_start(struct lttng_kernel_ring_buffer *buf,
struct lttng_kernel_ring_buffer_channel *chan,
struct switch_offsets *offsets,
u64 tsc)
{
- const struct lib_ring_buffer_config *config = &chan->backend.config;
+ const struct lttng_kernel_ring_buffer_config *config = &chan->backend.config;
unsigned long beginidx = subbuf_index(offsets->begin, chan);
unsigned long commit_count;
struct commit_counters_hot *cc_hot;
* we are currently doing the space reservation.
*/
static
-void lib_ring_buffer_switch_new_end(struct lib_ring_buffer *buf,
+void lib_ring_buffer_switch_new_end(struct lttng_kernel_ring_buffer *buf,
struct lttng_kernel_ring_buffer_channel *chan,
struct switch_offsets *offsets,
u64 tsc)
{
- const struct lib_ring_buffer_config *config = &chan->backend.config;
+ const struct lttng_kernel_ring_buffer_config *config = &chan->backend.config;
unsigned long endidx, data_size;
u64 *ts_end;
*/
static
int lib_ring_buffer_try_switch_slow(enum switch_mode mode,
- struct lib_ring_buffer *buf,
+ struct lttng_kernel_ring_buffer *buf,
struct lttng_kernel_ring_buffer_channel *chan,
struct switch_offsets *offsets,
u64 *tsc)
{
- const struct lib_ring_buffer_config *config = &chan->backend.config;
+ const struct lttng_kernel_ring_buffer_config *config = &chan->backend.config;
unsigned long off, reserve_commit_diff;
offsets->begin = v_read(config, &buf->offset);
* operations, this function must be called from the CPU which owns the buffer
* for a ACTIVE flush.
*/
-void lib_ring_buffer_switch_slow(struct lib_ring_buffer *buf, enum switch_mode mode)
+void lib_ring_buffer_switch_slow(struct lttng_kernel_ring_buffer *buf, enum switch_mode mode)
{
struct lttng_kernel_ring_buffer_channel *chan = buf->backend.chan;
- const struct lib_ring_buffer_config *config = &chan->backend.config;
+ const struct lttng_kernel_ring_buffer_config *config = &chan->backend.config;
struct switch_offsets offsets;
unsigned long oldidx;
u64 tsc;
EXPORT_SYMBOL_GPL(lib_ring_buffer_switch_slow);
struct switch_param {
- struct lib_ring_buffer *buf;
+ struct lttng_kernel_ring_buffer *buf;
enum switch_mode mode;
};
static void remote_switch(void *info)
{
struct switch_param *param = info;
- struct lib_ring_buffer *buf = param->buf;
+ struct lttng_kernel_ring_buffer *buf = param->buf;
lib_ring_buffer_switch_slow(buf, param->mode);
}
-static void _lib_ring_buffer_switch_remote(struct lib_ring_buffer *buf,
+static void _lib_ring_buffer_switch_remote(struct lttng_kernel_ring_buffer *buf,
enum switch_mode mode)
{
struct lttng_kernel_ring_buffer_channel *chan = buf->backend.chan;
- const struct lib_ring_buffer_config *config = &chan->backend.config;
+ const struct lttng_kernel_ring_buffer_config *config = &chan->backend.config;
int ret;
struct switch_param param;
}
/* Switch sub-buffer if current sub-buffer is non-empty. */
-void lib_ring_buffer_switch_remote(struct lib_ring_buffer *buf)
+void lib_ring_buffer_switch_remote(struct lttng_kernel_ring_buffer *buf)
{
_lib_ring_buffer_switch_remote(buf, SWITCH_ACTIVE);
}
EXPORT_SYMBOL_GPL(lib_ring_buffer_switch_remote);
/* Switch sub-buffer even if current sub-buffer is empty. */
-void lib_ring_buffer_switch_remote_empty(struct lib_ring_buffer *buf)
+void lib_ring_buffer_switch_remote_empty(struct lttng_kernel_ring_buffer *buf)
{
_lib_ring_buffer_switch_remote(buf, SWITCH_FLUSH);
}
EXPORT_SYMBOL_GPL(lib_ring_buffer_switch_remote_empty);
-void lib_ring_buffer_clear(struct lib_ring_buffer *buf)
+void lib_ring_buffer_clear(struct lttng_kernel_ring_buffer *buf)
{
- struct lib_ring_buffer_backend *bufb = &buf->backend;
+ struct lttng_kernel_ring_buffer_backend *bufb = &buf->backend;
struct lttng_kernel_ring_buffer_channel *chan = bufb->chan;
lib_ring_buffer_switch_remote(buf);
* -EIO if data cannot be written into the buffer for any other reason.
*/
static
-int lib_ring_buffer_try_reserve_slow(struct lib_ring_buffer *buf,
+int lib_ring_buffer_try_reserve_slow(struct lttng_kernel_ring_buffer *buf,
struct lttng_kernel_ring_buffer_channel *chan,
struct switch_offsets *offsets,
struct lttng_kernel_ring_buffer_ctx *ctx,
void *client_ctx)
{
- const struct lib_ring_buffer_config *config = &chan->backend.config;
+ const struct lttng_kernel_ring_buffer_config *config = &chan->backend.config;
unsigned long reserve_commit_diff, offset_cmp;
retry:
return 0;
}
-static struct lib_ring_buffer *get_current_buf(struct lttng_kernel_ring_buffer_channel *chan, int cpu)
+static struct lttng_kernel_ring_buffer *get_current_buf(struct lttng_kernel_ring_buffer_channel *chan, int cpu)
{
- const struct lib_ring_buffer_config *config = &chan->backend.config;
+ const struct lttng_kernel_ring_buffer_config *config = &chan->backend.config;
if (config->alloc == RING_BUFFER_ALLOC_PER_CPU)
return per_cpu_ptr(chan->backend.buf, cpu);
void lib_ring_buffer_lost_event_too_big(struct lttng_kernel_ring_buffer_channel *chan)
{
- const struct lib_ring_buffer_config *config = &chan->backend.config;
- struct lib_ring_buffer *buf = get_current_buf(chan, smp_processor_id());
+ const struct lttng_kernel_ring_buffer_config *config = &chan->backend.config;
+ struct lttng_kernel_ring_buffer *buf = get_current_buf(chan, smp_processor_id());
v_inc(config, &buf->records_lost_big);
}
void *client_ctx)
{
struct lttng_kernel_ring_buffer_channel *chan = ctx->priv.chan;
- const struct lib_ring_buffer_config *config = &chan->backend.config;
- struct lib_ring_buffer *buf;
+ const struct lttng_kernel_ring_buffer_config *config = &chan->backend.config;
+ struct lttng_kernel_ring_buffer *buf;
struct switch_offsets offsets;
int ret;
EXPORT_SYMBOL_GPL(lib_ring_buffer_reserve_slow);
static
-void lib_ring_buffer_vmcore_check_deliver(const struct lib_ring_buffer_config *config,
- struct lib_ring_buffer *buf,
+void lib_ring_buffer_vmcore_check_deliver(const struct lttng_kernel_ring_buffer_config *config,
+ struct lttng_kernel_ring_buffer *buf,
unsigned long commit_count,
unsigned long idx)
{
*/
#ifdef LTTNG_RING_BUFFER_COUNT_EVENTS
static
-void deliver_count_events(const struct lib_ring_buffer_config *config,
- struct lib_ring_buffer *buf,
+void deliver_count_events(const struct lttng_kernel_ring_buffer_config *config,
+ struct lttng_kernel_ring_buffer *buf,
unsigned long idx)
{
v_add(config, subbuffer_get_records_count(config,
}
#else /* LTTNG_RING_BUFFER_COUNT_EVENTS */
static
-void deliver_count_events(const struct lib_ring_buffer_config *config,
- struct lib_ring_buffer *buf,
+void deliver_count_events(const struct lttng_kernel_ring_buffer_config *config,
+ struct lttng_kernel_ring_buffer *buf,
unsigned long idx)
{
}
#endif /* #else LTTNG_RING_BUFFER_COUNT_EVENTS */
-void lib_ring_buffer_check_deliver_slow(const struct lib_ring_buffer_config *config,
- struct lib_ring_buffer *buf,
+void lib_ring_buffer_check_deliver_slow(const struct lttng_kernel_ring_buffer_config *config,
+ struct lttng_kernel_ring_buffer *buf,
struct lttng_kernel_ring_buffer_channel *chan,
unsigned long offset,
unsigned long commit_count,
* buffer is empty and finalized. The buffer must already be opened for reading.
*/
ssize_t lib_ring_buffer_get_next_record(struct lttng_kernel_ring_buffer_channel *chan,
- struct lib_ring_buffer *buf)
+ struct lttng_kernel_ring_buffer *buf)
{
- const struct lib_ring_buffer_config *config = &chan->backend.config;
- struct lib_ring_buffer_iter *iter = &buf->iter;
+ const struct lttng_kernel_ring_buffer_config *config = &chan->backend.config;
+ struct lttng_kernel_ring_buffer_iter *iter = &buf->iter;
int ret;
restart:
}
EXPORT_SYMBOL_GPL(lib_ring_buffer_get_next_record);
-void lib_ring_buffer_put_current_record(struct lib_ring_buffer *buf)
+void lib_ring_buffer_put_current_record(struct lttng_kernel_ring_buffer *buf)
{
- struct lib_ring_buffer_iter *iter;
+ struct lttng_kernel_ring_buffer_iter *iter;
if (!buf)
return;
static int buf_is_higher(void *a, void *b)
{
- struct lib_ring_buffer *bufa = a;
- struct lib_ring_buffer *bufb = b;
+ struct lttng_kernel_ring_buffer *bufa = a;
+ struct lttng_kernel_ring_buffer *bufb = b;
/* Consider lowest timestamps to be at the top of the heap */
return (bufa->iter.timestamp < bufb->iter.timestamp);
}
static
-void lib_ring_buffer_get_empty_buf_records(const struct lib_ring_buffer_config *config,
+void lib_ring_buffer_get_empty_buf_records(const struct lttng_kernel_ring_buffer_config *config,
struct lttng_kernel_ring_buffer_channel *chan)
{
struct lttng_ptr_heap *heap = &chan->iter.heap;
- struct lib_ring_buffer *buf, *tmp;
+ struct lttng_kernel_ring_buffer *buf, *tmp;
ssize_t len;
list_for_each_entry_safe(buf, tmp, &chan->iter.empty_head,
}
static
-void lib_ring_buffer_wait_for_qs(const struct lib_ring_buffer_config *config,
+void lib_ring_buffer_wait_for_qs(const struct lttng_kernel_ring_buffer_config *config,
struct lttng_kernel_ring_buffer_channel *chan)
{
u64 timestamp_qs;
*/
ssize_t channel_get_next_record(struct lttng_kernel_ring_buffer_channel *chan,
- struct lib_ring_buffer **ret_buf)
+ struct lttng_kernel_ring_buffer **ret_buf)
{
- const struct lib_ring_buffer_config *config = &chan->backend.config;
- struct lib_ring_buffer *buf;
+ const struct lttng_kernel_ring_buffer_config *config = &chan->backend.config;
+ struct lttng_kernel_ring_buffer *buf;
struct lttng_ptr_heap *heap;
ssize_t len;
EXPORT_SYMBOL_GPL(channel_get_next_record);
static
-void lib_ring_buffer_iterator_init(struct lttng_kernel_ring_buffer_channel *chan, struct lib_ring_buffer *buf)
+void lib_ring_buffer_iterator_init(struct lttng_kernel_ring_buffer_channel *chan, struct lttng_kernel_ring_buffer *buf)
{
if (buf->iter.allocated)
return;
{
struct lttng_kernel_ring_buffer_channel *chan = container_of(node, struct lttng_kernel_ring_buffer_channel,
cpuhp_iter_online);
- struct lib_ring_buffer *buf = per_cpu_ptr(chan->backend.buf, cpu);
- const struct lib_ring_buffer_config *config = &chan->backend.config;
+ struct lttng_kernel_ring_buffer *buf = per_cpu_ptr(chan->backend.buf, cpu);
+ const struct lttng_kernel_ring_buffer_config *config = &chan->backend.config;
CHAN_WARN_ON(chan, config->alloc == RING_BUFFER_ALLOC_GLOBAL);
unsigned int cpu = (unsigned long)hcpu;
struct lttng_kernel_ring_buffer_channel *chan = container_of(nb, struct lttng_kernel_ring_buffer_channel,
hp_iter_notifier);
- struct lib_ring_buffer *buf = per_cpu_ptr(chan->backend.buf, cpu);
- const struct lib_ring_buffer_config *config = &chan->backend.config;
+ struct lttng_kernel_ring_buffer *buf = per_cpu_ptr(chan->backend.buf, cpu);
+ const struct lttng_kernel_ring_buffer_config *config = &chan->backend.config;
if (!chan->hp_iter_enable)
return NOTIFY_DONE;
int channel_iterator_init(struct lttng_kernel_ring_buffer_channel *chan)
{
- const struct lib_ring_buffer_config *config = &chan->backend.config;
- struct lib_ring_buffer *buf;
+ const struct lttng_kernel_ring_buffer_config *config = &chan->backend.config;
+ struct lttng_kernel_ring_buffer *buf;
if (config->alloc == RING_BUFFER_ALLOC_PER_CPU) {
int ret;
void channel_iterator_unregister_notifiers(struct lttng_kernel_ring_buffer_channel *chan)
{
- const struct lib_ring_buffer_config *config = &chan->backend.config;
+ const struct lttng_kernel_ring_buffer_config *config = &chan->backend.config;
if (config->alloc == RING_BUFFER_ALLOC_PER_CPU) {
#if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(4,10,0))
void channel_iterator_free(struct lttng_kernel_ring_buffer_channel *chan)
{
- const struct lib_ring_buffer_config *config = &chan->backend.config;
+ const struct lttng_kernel_ring_buffer_config *config = &chan->backend.config;
if (config->alloc == RING_BUFFER_ALLOC_PER_CPU)
lttng_heap_free(&chan->iter.heap);
}
-int lib_ring_buffer_iterator_open(struct lib_ring_buffer *buf)
+int lib_ring_buffer_iterator_open(struct lttng_kernel_ring_buffer *buf)
{
struct lttng_kernel_ring_buffer_channel *chan = buf->backend.chan;
- const struct lib_ring_buffer_config *config = &chan->backend.config;
+ const struct lttng_kernel_ring_buffer_config *config = &chan->backend.config;
CHAN_WARN_ON(chan, config->output != RING_BUFFER_ITERATOR);
return lib_ring_buffer_open_read(buf);
}
* iterator can leave the buffer in "GET" state, which is not consistent with
* other types of output (mmap, splice, raw data read).
*/
-void lib_ring_buffer_iterator_release(struct lib_ring_buffer *buf)
+void lib_ring_buffer_iterator_release(struct lttng_kernel_ring_buffer *buf)
{
lib_ring_buffer_release_read(buf);
}
int channel_iterator_open(struct lttng_kernel_ring_buffer_channel *chan)
{
- const struct lib_ring_buffer_config *config = &chan->backend.config;
- struct lib_ring_buffer *buf;
+ const struct lttng_kernel_ring_buffer_config *config = &chan->backend.config;
+ struct lttng_kernel_ring_buffer *buf;
int ret = 0, cpu;
CHAN_WARN_ON(chan, config->output != RING_BUFFER_ITERATOR);
void channel_iterator_release(struct lttng_kernel_ring_buffer_channel *chan)
{
- const struct lib_ring_buffer_config *config = &chan->backend.config;
- struct lib_ring_buffer *buf;
+ const struct lttng_kernel_ring_buffer_config *config = &chan->backend.config;
+ struct lttng_kernel_ring_buffer *buf;
int cpu;
if (config->alloc == RING_BUFFER_ALLOC_PER_CPU) {
}
EXPORT_SYMBOL_GPL(channel_iterator_release);
-void lib_ring_buffer_iterator_reset(struct lib_ring_buffer *buf)
+void lib_ring_buffer_iterator_reset(struct lttng_kernel_ring_buffer *buf)
{
struct lttng_kernel_ring_buffer_channel *chan = buf->backend.chan;
void channel_iterator_reset(struct lttng_kernel_ring_buffer_channel *chan)
{
- const struct lib_ring_buffer_config *config = &chan->backend.config;
- struct lib_ring_buffer *buf;
+ const struct lttng_kernel_ring_buffer_config *config = &chan->backend.config;
+ struct lttng_kernel_ring_buffer *buf;
int cpu;
/* Empty heap, put into empty_head */
size_t count,
loff_t *ppos,
struct lttng_kernel_ring_buffer_channel *chan,
- struct lib_ring_buffer *buf,
+ struct lttng_kernel_ring_buffer *buf,
int fusionmerge)
{
- const struct lib_ring_buffer_config *config = &chan->backend.config;
+ const struct lttng_kernel_ring_buffer_config *config = &chan->backend.config;
size_t read_count = 0, read_offset;
ssize_t len;
loff_t *ppos)
{
struct inode *inode = filp->lttng_f_dentry->d_inode;
- struct lib_ring_buffer *buf = inode->i_private;
+ struct lttng_kernel_ring_buffer *buf = inode->i_private;
struct lttng_kernel_ring_buffer_channel *chan = buf->backend.chan;
return channel_ring_buffer_file_read(filp, user_buf, count, ppos,
{
struct inode *inode = filp->lttng_f_dentry->d_inode;
struct lttng_kernel_ring_buffer_channel *chan = inode->i_private;
- const struct lib_ring_buffer_config *config = &chan->backend.config;
+ const struct lttng_kernel_ring_buffer_config *config = &chan->backend.config;
if (config->alloc == RING_BUFFER_ALLOC_PER_CPU)
return channel_ring_buffer_file_read(filp, user_buf, count,
ppos, chan, NULL, 1);
else {
- struct lib_ring_buffer *buf =
+ struct lttng_kernel_ring_buffer *buf =
channel_get_ring_buffer(config, chan, 0);
return channel_ring_buffer_file_read(filp, user_buf, count,
ppos, chan, buf, 0);
static
int lib_ring_buffer_file_open(struct inode *inode, struct file *file)
{
- struct lib_ring_buffer *buf = inode->i_private;
+ struct lttng_kernel_ring_buffer *buf = inode->i_private;
int ret;
ret = lib_ring_buffer_iterator_open(buf);
static
int lib_ring_buffer_file_release(struct inode *inode, struct file *file)
{
- struct lib_ring_buffer *buf = inode->i_private;
+ struct lttng_kernel_ring_buffer *buf = inode->i_private;
lib_ring_buffer_iterator_release(buf);
return 0;
static int lib_ring_buffer_fault_compat(struct vm_area_struct *vma, struct vm_fault *vmf)
#endif
{
- struct lib_ring_buffer *buf = vma->vm_private_data;
+ struct lttng_kernel_ring_buffer *buf = vma->vm_private_data;
struct lttng_kernel_ring_buffer_channel *chan = buf->backend.chan;
- const struct lib_ring_buffer_config *config = &chan->backend.config;
+ const struct lttng_kernel_ring_buffer_config *config = &chan->backend.config;
pgoff_t pgoff = vmf->pgoff;
unsigned long *pfnp;
void **virt;
*
* Caller should already have grabbed mmap_sem.
*/
-static int lib_ring_buffer_mmap_buf(struct lib_ring_buffer *buf,
+static int lib_ring_buffer_mmap_buf(struct lttng_kernel_ring_buffer *buf,
struct vm_area_struct *vma)
{
unsigned long length = vma->vm_end - vma->vm_start;
struct lttng_kernel_ring_buffer_channel *chan = buf->backend.chan;
- const struct lib_ring_buffer_config *config = &chan->backend.config;
+ const struct lttng_kernel_ring_buffer_config *config = &chan->backend.config;
unsigned long mmap_buf_len;
if (config->output != RING_BUFFER_MMAP)
}
int lib_ring_buffer_mmap(struct file *filp, struct vm_area_struct *vma,
- struct lib_ring_buffer *buf)
+ struct lttng_kernel_ring_buffer *buf)
{
return lib_ring_buffer_mmap_buf(buf, vma);
}
*/
int vfs_lib_ring_buffer_mmap(struct file *filp, struct vm_area_struct *vma)
{
- struct lib_ring_buffer *buf = filp->private_data;
+ struct lttng_kernel_ring_buffer *buf = filp->private_data;
return lib_ring_buffer_mmap(filp, vma, buf);
}
EXPORT_SYMBOL_GPL(vfs_lib_ring_buffer_mmap);
struct pipe_inode_info *pipe,
size_t len,
unsigned int flags,
- struct lib_ring_buffer *buf)
+ struct lttng_kernel_ring_buffer *buf)
{
struct lttng_kernel_ring_buffer_channel *chan = buf->backend.chan;
- const struct lib_ring_buffer_config *config = &chan->backend.config;
+ const struct lttng_kernel_ring_buffer_config *config = &chan->backend.config;
unsigned int poff, subbuf_pages, nr_pages;
struct page *pages[PIPE_DEF_BUFFERS];
struct partial_page partial[PIPE_DEF_BUFFERS];
ssize_t lib_ring_buffer_splice_read(struct file *in, loff_t *ppos,
struct pipe_inode_info *pipe, size_t len,
unsigned int flags,
- struct lib_ring_buffer *buf)
+ struct lttng_kernel_ring_buffer *buf)
{
struct lttng_kernel_ring_buffer_channel *chan = buf->backend.chan;
- const struct lib_ring_buffer_config *config = &chan->backend.config;
+ const struct lttng_kernel_ring_buffer_config *config = &chan->backend.config;
ssize_t spliced;
int ret;
struct pipe_inode_info *pipe, size_t len,
unsigned int flags)
{
- struct lib_ring_buffer *buf = in->private_data;
+ struct lttng_kernel_ring_buffer *buf = in->private_data;
return lib_ring_buffer_splice_read(in, ppos, pipe, len, flags, buf);
}
* there if we ever want to implement an inode with open() operation.
*/
int lib_ring_buffer_open(struct inode *inode, struct file *file,
- struct lib_ring_buffer *buf)
+ struct lttng_kernel_ring_buffer *buf)
{
int ret;
static
int vfs_lib_ring_buffer_open(struct inode *inode, struct file *file)
{
- struct lib_ring_buffer *buf = inode->i_private;
+ struct lttng_kernel_ring_buffer *buf = inode->i_private;
file->private_data = buf;
return lib_ring_buffer_open(inode, file, buf);
}
int lib_ring_buffer_release(struct inode *inode, struct file *file,
- struct lib_ring_buffer *buf)
+ struct lttng_kernel_ring_buffer *buf)
{
lib_ring_buffer_release_read(buf);
static
int vfs_lib_ring_buffer_release(struct inode *inode, struct file *file)
{
- struct lib_ring_buffer *buf = file->private_data;
+ struct lttng_kernel_ring_buffer *buf = file->private_data;
return lib_ring_buffer_release(inode, file, buf);
}
unsigned int lib_ring_buffer_poll(struct file *filp, poll_table *wait,
- struct lib_ring_buffer *buf)
+ struct lttng_kernel_ring_buffer *buf)
{
unsigned int mask = 0;
struct lttng_kernel_ring_buffer_channel *chan = buf->backend.chan;
- const struct lib_ring_buffer_config *config = &chan->backend.config;
+ const struct lttng_kernel_ring_buffer_config *config = &chan->backend.config;
int finalized, disabled;
if (filp->f_mode & FMODE_READ) {
static
unsigned int vfs_lib_ring_buffer_poll(struct file *filp, poll_table *wait)
{
- struct lib_ring_buffer *buf = filp->private_data;
+ struct lttng_kernel_ring_buffer *buf = filp->private_data;
return lib_ring_buffer_poll(filp, wait, buf);
}
long lib_ring_buffer_ioctl(struct file *filp, unsigned int cmd,
- unsigned long arg, struct lib_ring_buffer *buf)
+ unsigned long arg, struct lttng_kernel_ring_buffer *buf)
{
struct lttng_kernel_ring_buffer_channel *chan = buf->backend.chan;
- const struct lib_ring_buffer_config *config = &chan->backend.config;
+ const struct lttng_kernel_ring_buffer_config *config = &chan->backend.config;
if (lib_ring_buffer_channel_is_disabled(chan))
return -EIO;
static
long vfs_lib_ring_buffer_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
{
- struct lib_ring_buffer *buf = filp->private_data;
+ struct lttng_kernel_ring_buffer *buf = filp->private_data;
return lib_ring_buffer_ioctl(filp, cmd, arg, buf);
}
#ifdef CONFIG_COMPAT
long lib_ring_buffer_compat_ioctl(struct file *filp, unsigned int cmd,
- unsigned long arg, struct lib_ring_buffer *buf)
+ unsigned long arg, struct lttng_kernel_ring_buffer *buf)
{
struct lttng_kernel_ring_buffer_channel *chan = buf->backend.chan;
- const struct lib_ring_buffer_config *config = &chan->backend.config;
+ const struct lttng_kernel_ring_buffer_config *config = &chan->backend.config;
if (lib_ring_buffer_channel_is_disabled(chan))
return -EIO;
long vfs_lib_ring_buffer_compat_ioctl(struct file *filp, unsigned int cmd,
unsigned long arg)
{
- struct lib_ring_buffer *buf = filp->private_data;
+ struct lttng_kernel_ring_buffer *buf = filp->private_data;
return lib_ring_buffer_compat_ioctl(filp, cmd, arg, buf);
}
{
struct lttng_event_notifier_group *event_notifier_group = filp->private_data;
struct lttng_kernel_ring_buffer_channel *chan = event_notifier_group->chan;
- struct lib_ring_buffer *buf = event_notifier_group->buf;
+ struct lttng_kernel_ring_buffer *buf = event_notifier_group->buf;
ssize_t read_count = 0, len;
size_t read_offset;
unsigned int mask = 0;
struct lttng_event_notifier_group *event_notifier_group = filp->private_data;
struct lttng_kernel_ring_buffer_channel *chan = event_notifier_group->chan;
- struct lib_ring_buffer *buf = event_notifier_group->buf;
- const struct lib_ring_buffer_config *config = &chan->backend.config;
+ struct lttng_kernel_ring_buffer *buf = event_notifier_group->buf;
+ const struct lttng_kernel_ring_buffer_config *config = &chan->backend.config;
int finalized, disabled;
unsigned long consumed, offset;
size_t subbuffer_header_size = config->cb.subbuffer_header_size();
static int lttng_event_notifier_group_notif_open(struct inode *inode, struct file *file)
{
struct lttng_event_notifier_group *event_notifier_group = inode->i_private;
- struct lib_ring_buffer *buf = event_notifier_group->buf;
+ struct lttng_kernel_ring_buffer *buf = event_notifier_group->buf;
file->private_data = event_notifier_group;
return lib_ring_buffer_open(inode, file, buf);
static int lttng_event_notifier_group_notif_release(struct inode *inode, struct file *file)
{
struct lttng_event_notifier_group *event_notifier_group = file->private_data;
- struct lib_ring_buffer *buf = event_notifier_group->buf;
+ struct lttng_kernel_ring_buffer *buf = event_notifier_group->buf;
int ret;
ret = lib_ring_buffer_release(inode, file, buf);
poll_table *wait)
{
struct lttng_metadata_stream *stream = filp->private_data;
- struct lib_ring_buffer *buf = stream->priv;
+ struct lttng_kernel_ring_buffer *buf = stream->priv;
int finalized;
unsigned int mask = 0;
{
int ret;
struct lttng_metadata_stream *stream = filp->private_data;
- struct lib_ring_buffer *buf = stream->priv;
+ struct lttng_kernel_ring_buffer *buf = stream->priv;
unsigned int rb_cmd;
bool coherent;
case LTTNG_KERNEL_ABI_RING_BUFFER_GET_NEXT_SUBBUF:
{
struct lttng_metadata_stream *stream = filp->private_data;
- struct lib_ring_buffer *buf = stream->priv;
+ struct lttng_kernel_ring_buffer *buf = stream->priv;
struct lttng_kernel_ring_buffer_channel *chan = buf->backend.chan;
ret = lttng_metadata_output_channel(stream, chan, NULL);
case LTTNG_KERNEL_ABI_RING_BUFFER_FLUSH:
{
struct lttng_metadata_stream *stream = filp->private_data;
- struct lib_ring_buffer *buf = stream->priv;
+ struct lttng_kernel_ring_buffer *buf = stream->priv;
struct lttng_kernel_ring_buffer_channel *chan = buf->backend.chan;
/*
case LTTNG_KERNEL_ABI_RING_BUFFER_GET_NEXT_SUBBUF_METADATA_CHECK:
{
struct lttng_metadata_stream *stream = filp->private_data;
- struct lib_ring_buffer *buf = stream->priv;
+ struct lttng_kernel_ring_buffer *buf = stream->priv;
struct lttng_kernel_ring_buffer_channel *chan = buf->backend.chan;
ret = lttng_metadata_output_channel(stream, chan, &coherent);
{
int ret;
struct lttng_metadata_stream *stream = filp->private_data;
- struct lib_ring_buffer *buf = stream->priv;
+ struct lttng_kernel_ring_buffer *buf = stream->priv;
unsigned int rb_cmd;
bool coherent;
case LTTNG_KERNEL_ABI_RING_BUFFER_GET_NEXT_SUBBUF:
{
struct lttng_metadata_stream *stream = filp->private_data;
- struct lib_ring_buffer *buf = stream->priv;
+ struct lttng_kernel_ring_buffer *buf = stream->priv;
struct lttng_kernel_ring_buffer_channel *chan = buf->backend.chan;
ret = lttng_metadata_output_channel(stream, chan, NULL);
case LTTNG_KERNEL_ABI_RING_BUFFER_FLUSH:
{
struct lttng_metadata_stream *stream = filp->private_data;
- struct lib_ring_buffer *buf = stream->priv;
+ struct lttng_kernel_ring_buffer *buf = stream->priv;
struct lttng_kernel_ring_buffer_channel *chan = buf->backend.chan;
/*
case LTTNG_KERNEL_ABI_RING_BUFFER_GET_NEXT_SUBBUF_METADATA_CHECK:
{
struct lttng_metadata_stream *stream = filp->private_data;
- struct lib_ring_buffer *buf = stream->priv;
+ struct lttng_kernel_ring_buffer *buf = stream->priv;
struct lttng_kernel_ring_buffer_channel *chan = buf->backend.chan;
ret = lttng_metadata_output_channel(stream, chan, &coherent);
int lttng_metadata_ring_buffer_open(struct inode *inode, struct file *file)
{
struct lttng_metadata_stream *stream = inode->i_private;
- struct lib_ring_buffer *buf = stream->priv;
+ struct lttng_kernel_ring_buffer *buf = stream->priv;
file->private_data = buf;
/*
int lttng_metadata_ring_buffer_release(struct inode *inode, struct file *file)
{
struct lttng_metadata_stream *stream = file->private_data;
- struct lib_ring_buffer *buf = stream->priv;
+ struct lttng_kernel_ring_buffer *buf = stream->priv;
mutex_lock(&stream->metadata_cache->lock);
list_del(&stream->list);
unsigned int flags)
{
struct lttng_metadata_stream *stream = in->private_data;
- struct lib_ring_buffer *buf = stream->priv;
+ struct lttng_kernel_ring_buffer *buf = stream->priv;
return lib_ring_buffer_splice_read(in, ppos, pipe, len,
flags, buf);
struct vm_area_struct *vma)
{
struct lttng_metadata_stream *stream = filp->private_data;
- struct lib_ring_buffer *buf = stream->priv;
+ struct lttng_kernel_ring_buffer *buf = stream->priv;
return lib_ring_buffer_mmap(filp, vma, buf);
}
int lttng_abi_open_stream(struct file *channel_file)
{
struct lttng_kernel_channel_buffer *channel = channel_file->private_data;
- struct lib_ring_buffer *buf;
+ struct lttng_kernel_ring_buffer *buf;
int ret;
void *stream_priv;
{
struct lttng_kernel_channel_buffer *channel = channel_file->private_data;
struct lttng_kernel_session *session = channel->parent.session;
- struct lib_ring_buffer *buf;
+ struct lttng_kernel_ring_buffer *buf;
int ret;
struct lttng_metadata_stream *metadata_stream;
void *stream_priv;
{
struct lttng_event_notifier_group *event_notifier_group = notif_file->private_data;
struct lttng_kernel_ring_buffer_channel *chan = event_notifier_group->chan;
- struct lib_ring_buffer *buf;
+ struct lttng_kernel_ring_buffer *buf;
int ret;
void *stream_priv;
static long lttng_stream_ring_buffer_ioctl(struct file *filp,
unsigned int cmd, unsigned long arg)
{
- struct lib_ring_buffer *buf = filp->private_data;
+ struct lttng_kernel_ring_buffer *buf = filp->private_data;
struct lttng_kernel_ring_buffer_channel *chan = buf->backend.chan;
- const struct lib_ring_buffer_config *config = &chan->backend.config;
+ const struct lttng_kernel_ring_buffer_config *config = &chan->backend.config;
const struct lttng_kernel_channel_buffer_ops *ops = chan->backend.priv_ops;
int ret;
static long lttng_stream_ring_buffer_compat_ioctl(struct file *filp,
unsigned int cmd, unsigned long arg)
{
- struct lib_ring_buffer *buf = filp->private_data;
+ struct lttng_kernel_ring_buffer *buf = filp->private_data;
struct lttng_kernel_ring_buffer_channel *chan = buf->backend.chan;
- const struct lib_ring_buffer_config *config = &chan->backend.config;
+ const struct lttng_kernel_ring_buffer_config *config = &chan->backend.config;
const struct lttng_kernel_channel_buffer_ops *ops = chan->backend.priv_ops;
int ret;
* contains.
*/
static __inline__
-size_t record_header_size(const struct lib_ring_buffer_config *config,
+size_t record_header_size(const struct lttng_kernel_ring_buffer_config *config,
struct lttng_kernel_ring_buffer_channel *chan, size_t offset,
size_t *pre_header_padding,
struct lttng_kernel_ring_buffer_ctx *ctx,
#include <ringbuffer/api.h>
static
-void lttng_write_event_header_slow(const struct lib_ring_buffer_config *config,
+void lttng_write_event_header_slow(const struct lttng_kernel_ring_buffer_config *config,
struct lttng_kernel_ring_buffer_ctx *ctx,
uint32_t event_id);
* @event_id: event ID
*/
static __inline__
-void lttng_write_event_header(const struct lib_ring_buffer_config *config,
+void lttng_write_event_header(const struct lttng_kernel_ring_buffer_config *config,
struct lttng_kernel_ring_buffer_ctx *ctx,
uint32_t event_id)
{
}
static
-void lttng_write_event_header_slow(const struct lib_ring_buffer_config *config,
+void lttng_write_event_header_slow(const struct lttng_kernel_ring_buffer_config *config,
struct lttng_kernel_ring_buffer_ctx *ctx,
uint32_t event_id)
{
lib_ring_buffer_align_ctx(ctx, ctx->largest_align);
}
-static const struct lib_ring_buffer_config client_config;
+static const struct lttng_kernel_ring_buffer_config client_config;
static u64 client_ring_buffer_clock_read(struct lttng_kernel_ring_buffer_channel *chan)
{
}
static
-size_t client_record_header_size(const struct lib_ring_buffer_config *config,
+size_t client_record_header_size(const struct lttng_kernel_ring_buffer_config *config,
struct lttng_kernel_ring_buffer_channel *chan, size_t offset,
size_t *pre_header_padding,
struct lttng_kernel_ring_buffer_ctx *ctx,
return offsetof(struct packet_header, ctx.header_end);
}
-static void client_buffer_begin(struct lib_ring_buffer *buf, u64 tsc,
+static void client_buffer_begin(struct lttng_kernel_ring_buffer *buf, u64 tsc,
unsigned int subbuf_idx)
{
struct lttng_kernel_ring_buffer_channel *chan = buf->backend.chan;
* offset is assumed to never be 0 here : never deliver a completely empty
* subbuffer. data_size is between 1 and subbuf_size.
*/
-static void client_buffer_end(struct lib_ring_buffer *buf, u64 tsc,
+static void client_buffer_end(struct lttng_kernel_ring_buffer *buf, u64 tsc,
unsigned int subbuf_idx, unsigned long data_size)
{
struct lttng_kernel_ring_buffer_channel *chan = buf->backend.chan;
header->ctx.events_discarded = records_lost;
}
-static int client_buffer_create(struct lib_ring_buffer *buf, void *priv,
+static int client_buffer_create(struct lttng_kernel_ring_buffer *buf, void *priv,
int cpu, const char *name)
{
return 0;
}
-static void client_buffer_finalize(struct lib_ring_buffer *buf, void *priv, int cpu)
+static void client_buffer_finalize(struct lttng_kernel_ring_buffer *buf, void *priv, int cpu)
{
}
static struct packet_header *client_packet_header(
- const struct lib_ring_buffer_config *config,
- struct lib_ring_buffer *buf)
+ const struct lttng_kernel_ring_buffer_config *config,
+ struct lttng_kernel_ring_buffer *buf)
{
return lib_ring_buffer_read_offset_address(&buf->backend, 0);
}
-static int client_timestamp_begin(const struct lib_ring_buffer_config *config,
- struct lib_ring_buffer *buf,
+static int client_timestamp_begin(const struct lttng_kernel_ring_buffer_config *config,
+ struct lttng_kernel_ring_buffer *buf,
uint64_t *timestamp_begin)
{
struct packet_header *header = client_packet_header(config, buf);
return 0;
}
-static int client_timestamp_end(const struct lib_ring_buffer_config *config,
- struct lib_ring_buffer *buf,
+static int client_timestamp_end(const struct lttng_kernel_ring_buffer_config *config,
+ struct lttng_kernel_ring_buffer *buf,
uint64_t *timestamp_end)
{
struct packet_header *header = client_packet_header(config, buf);
return 0;
}
-static int client_events_discarded(const struct lib_ring_buffer_config *config,
- struct lib_ring_buffer *buf,
+static int client_events_discarded(const struct lttng_kernel_ring_buffer_config *config,
+ struct lttng_kernel_ring_buffer *buf,
uint64_t *events_discarded)
{
struct packet_header *header = client_packet_header(config, buf);
return 0;
}
-static int client_content_size(const struct lib_ring_buffer_config *config,
- struct lib_ring_buffer *buf,
+static int client_content_size(const struct lttng_kernel_ring_buffer_config *config,
+ struct lttng_kernel_ring_buffer *buf,
uint64_t *content_size)
{
struct packet_header *header = client_packet_header(config, buf);
return 0;
}
-static int client_packet_size(const struct lib_ring_buffer_config *config,
- struct lib_ring_buffer *buf,
+static int client_packet_size(const struct lttng_kernel_ring_buffer_config *config,
+ struct lttng_kernel_ring_buffer *buf,
uint64_t *packet_size)
{
struct packet_header *header = client_packet_header(config, buf);
return 0;
}
-static int client_stream_id(const struct lib_ring_buffer_config *config,
- struct lib_ring_buffer *buf,
+static int client_stream_id(const struct lttng_kernel_ring_buffer_config *config,
+ struct lttng_kernel_ring_buffer *buf,
uint64_t *stream_id)
{
struct lttng_kernel_ring_buffer_channel *chan = buf->backend.chan;
return 0;
}
-static int client_current_timestamp(const struct lib_ring_buffer_config *config,
- struct lib_ring_buffer *bufb,
+static int client_current_timestamp(const struct lttng_kernel_ring_buffer_config *config,
+ struct lttng_kernel_ring_buffer *bufb,
uint64_t *ts)
{
*ts = config->cb.ring_buffer_clock_read(bufb->backend.chan);
return 0;
}
-static int client_sequence_number(const struct lib_ring_buffer_config *config,
- struct lib_ring_buffer *buf,
+static int client_sequence_number(const struct lttng_kernel_ring_buffer_config *config,
+ struct lttng_kernel_ring_buffer *buf,
uint64_t *seq)
{
struct packet_header *header = client_packet_header(config, buf);
}
static
-int client_instance_id(const struct lib_ring_buffer_config *config,
- struct lib_ring_buffer *buf,
+int client_instance_id(const struct lttng_kernel_ring_buffer_config *config,
+ struct lttng_kernel_ring_buffer *buf,
uint64_t *id)
{
*id = buf->backend.cpu;
return 0;
}
-static const struct lib_ring_buffer_config client_config = {
+static const struct lttng_kernel_ring_buffer_config client_config = {
.cb.ring_buffer_clock_read = client_ring_buffer_clock_read,
.cb.record_header_size = client_record_header_size,
.cb.subbuffer_header_size = client_packet_header_size,
}
static
-struct lib_ring_buffer *lttng_buffer_read_open(struct lttng_kernel_ring_buffer_channel *chan)
+struct lttng_kernel_ring_buffer *lttng_buffer_read_open(struct lttng_kernel_ring_buffer_channel *chan)
{
- struct lib_ring_buffer *buf;
+ struct lttng_kernel_ring_buffer *buf;
int cpu;
for_each_channel_cpu(cpu, chan) {
static
int lttng_buffer_has_read_closed_stream(struct lttng_kernel_ring_buffer_channel *chan)
{
- struct lib_ring_buffer *buf;
+ struct lttng_kernel_ring_buffer *buf;
int cpu;
for_each_channel_cpu(cpu, chan) {
}
static
-void lttng_buffer_read_close(struct lib_ring_buffer *buf)
+void lttng_buffer_read_close(struct lttng_kernel_ring_buffer *buf)
{
lib_ring_buffer_release_read(buf);
}
static
wait_queue_head_t *lttng_get_writer_buf_wait_queue(struct lttng_kernel_ring_buffer_channel *chan, int cpu)
{
- struct lib_ring_buffer *buf = channel_get_ring_buffer(&client_config,
+ struct lttng_kernel_ring_buffer *buf = channel_get_ring_buffer(&client_config,
chan, cpu);
return &buf->write_wait;
}
uint8_t header_end[0]; /* End of header */
};
-static const struct lib_ring_buffer_config client_config;
+static const struct lttng_kernel_ring_buffer_config client_config;
static inline
u64 lib_ring_buffer_clock_read(struct lttng_kernel_ring_buffer_channel *chan)
}
static inline
-size_t record_header_size(const struct lib_ring_buffer_config *config,
+size_t record_header_size(const struct lttng_kernel_ring_buffer_config *config,
struct lttng_kernel_ring_buffer_channel *chan, size_t offset,
size_t *pre_header_padding,
struct lttng_kernel_ring_buffer_ctx *ctx,
}
static
-size_t client_record_header_size(const struct lib_ring_buffer_config *config,
+size_t client_record_header_size(const struct lttng_kernel_ring_buffer_config *config,
struct lttng_kernel_ring_buffer_channel *chan, size_t offset,
size_t *pre_header_padding,
struct lttng_kernel_ring_buffer_ctx *ctx,
return offsetof(struct event_notifier_packet_header, header_end);
}
-static void client_buffer_begin(struct lib_ring_buffer *buf, u64 tsc,
+static void client_buffer_begin(struct lttng_kernel_ring_buffer *buf, u64 tsc,
unsigned int subbuf_idx)
{
}
* offset is assumed to never be 0 here : never deliver a completely empty
* subbuffer. data_size is between 1 and subbuf_size.
*/
-static void client_buffer_end(struct lib_ring_buffer *buf, u64 tsc,
+static void client_buffer_end(struct lttng_kernel_ring_buffer *buf, u64 tsc,
unsigned int subbuf_idx, unsigned long data_size)
{
}
-static int client_buffer_create(struct lib_ring_buffer *buf, void *priv,
+static int client_buffer_create(struct lttng_kernel_ring_buffer *buf, void *priv,
int cpu, const char *name)
{
return 0;
}
-static void client_buffer_finalize(struct lib_ring_buffer *buf, void *priv, int cpu)
+static void client_buffer_finalize(struct lttng_kernel_ring_buffer *buf, void *priv, int cpu)
{
}
-static int client_timestamp_begin(const struct lib_ring_buffer_config *config,
- struct lib_ring_buffer *buf, uint64_t *timestamp_begin)
+static int client_timestamp_begin(const struct lttng_kernel_ring_buffer_config *config,
+ struct lttng_kernel_ring_buffer *buf, uint64_t *timestamp_begin)
{
return -ENOSYS;
}
-static int client_timestamp_end(const struct lib_ring_buffer_config *config,
- struct lib_ring_buffer *bufb,
+static int client_timestamp_end(const struct lttng_kernel_ring_buffer_config *config,
+ struct lttng_kernel_ring_buffer *bufb,
uint64_t *timestamp_end)
{
return -ENOSYS;
}
-static int client_events_discarded(const struct lib_ring_buffer_config *config,
- struct lib_ring_buffer *bufb,
+static int client_events_discarded(const struct lttng_kernel_ring_buffer_config *config,
+ struct lttng_kernel_ring_buffer *bufb,
uint64_t *events_discarded)
{
return -ENOSYS;
}
-static int client_current_timestamp(const struct lib_ring_buffer_config *config,
- struct lib_ring_buffer *bufb,
+static int client_current_timestamp(const struct lttng_kernel_ring_buffer_config *config,
+ struct lttng_kernel_ring_buffer *bufb,
uint64_t *ts)
{
return -ENOSYS;
}
-static int client_content_size(const struct lib_ring_buffer_config *config,
- struct lib_ring_buffer *bufb,
+static int client_content_size(const struct lttng_kernel_ring_buffer_config *config,
+ struct lttng_kernel_ring_buffer *bufb,
uint64_t *content_size)
{
return -ENOSYS;
}
-static int client_packet_size(const struct lib_ring_buffer_config *config,
- struct lib_ring_buffer *bufb,
+static int client_packet_size(const struct lttng_kernel_ring_buffer_config *config,
+ struct lttng_kernel_ring_buffer *bufb,
uint64_t *packet_size)
{
return -ENOSYS;
}
-static int client_stream_id(const struct lib_ring_buffer_config *config,
- struct lib_ring_buffer *bufb,
+static int client_stream_id(const struct lttng_kernel_ring_buffer_config *config,
+ struct lttng_kernel_ring_buffer *bufb,
uint64_t *stream_id)
{
return -ENOSYS;
}
-static int client_sequence_number(const struct lib_ring_buffer_config *config,
- struct lib_ring_buffer *bufb,
+static int client_sequence_number(const struct lttng_kernel_ring_buffer_config *config,
+ struct lttng_kernel_ring_buffer *bufb,
uint64_t *seq)
{
return -ENOSYS;
}
static
-int client_instance_id(const struct lib_ring_buffer_config *config,
- struct lib_ring_buffer *bufb,
+int client_instance_id(const struct lttng_kernel_ring_buffer_config *config,
+ struct lttng_kernel_ring_buffer *bufb,
uint64_t *id)
{
return -ENOSYS;
}
-static void client_record_get(const struct lib_ring_buffer_config *config,
- struct lttng_kernel_ring_buffer_channel *chan, struct lib_ring_buffer *buf,
+static void client_record_get(const struct lttng_kernel_ring_buffer_config *config,
+ struct lttng_kernel_ring_buffer_channel *chan, struct lttng_kernel_ring_buffer *buf,
size_t offset, size_t *header_len,
size_t *payload_len, u64 *timestamp)
{
*timestamp = 0;
}
-static const struct lib_ring_buffer_config client_config = {
+static const struct lttng_kernel_ring_buffer_config client_config = {
.cb.ring_buffer_clock_read = client_ring_buffer_clock_read,
.cb.record_header_size = client_record_header_size,
.cb.subbuffer_header_size = client_packet_header_size,
}
static
-struct lib_ring_buffer *lttng_buffer_read_open(struct lttng_kernel_ring_buffer_channel *chan)
+struct lttng_kernel_ring_buffer *lttng_buffer_read_open(struct lttng_kernel_ring_buffer_channel *chan)
{
- struct lib_ring_buffer *buf;
+ struct lttng_kernel_ring_buffer *buf;
buf = channel_get_ring_buffer(&client_config, chan, 0);
if (!lib_ring_buffer_open_read(buf))
static
int lttng_buffer_has_read_closed_stream(struct lttng_kernel_ring_buffer_channel *chan)
{
- struct lib_ring_buffer *buf;
+ struct lttng_kernel_ring_buffer *buf;
int cpu;
for_each_channel_cpu(cpu, chan) {
}
static
-void lttng_buffer_read_close(struct lib_ring_buffer *buf)
+void lttng_buffer_read_close(struct lttng_kernel_ring_buffer *buf)
{
lib_ring_buffer_release_read(buf);
}
static
-void lttng_write_event_notifier_header(const struct lib_ring_buffer_config *config,
+void lttng_write_event_notifier_header(const struct lttng_kernel_ring_buffer_config *config,
struct lttng_kernel_ring_buffer_ctx *ctx)
{
uint32_t data_size;
size_t lttng_packet_avail_size(struct lttng_kernel_ring_buffer_channel *chan)
{
unsigned long o_begin;
- struct lib_ring_buffer *buf;
+ struct lttng_kernel_ring_buffer *buf;
buf = chan->backend.buf; /* Only for global buffer ! */
o_begin = v_read(&client_config, &buf->offset);
static
wait_queue_head_t *lttng_get_writer_buf_wait_queue(struct lttng_kernel_ring_buffer_channel *chan, int cpu)
{
- struct lib_ring_buffer *buf = channel_get_ring_buffer(&client_config,
+ struct lttng_kernel_ring_buffer *buf = channel_get_ring_buffer(&client_config,
chan, cpu);
return &buf->write_wait;
}
uint8_t header_end[0]; /* End of header */
};
-static const struct lib_ring_buffer_config client_config;
+static const struct lttng_kernel_ring_buffer_config client_config;
static inline
u64 lib_ring_buffer_clock_read(struct lttng_kernel_ring_buffer_channel *chan)
}
static inline
-size_t record_header_size(const struct lib_ring_buffer_config *config,
+size_t record_header_size(const struct lttng_kernel_ring_buffer_config *config,
struct lttng_kernel_ring_buffer_channel *chan, size_t offset,
size_t *pre_header_padding,
struct lttng_kernel_ring_buffer_ctx *ctx,
}
static
-size_t client_record_header_size(const struct lib_ring_buffer_config *config,
+size_t client_record_header_size(const struct lttng_kernel_ring_buffer_config *config,
struct lttng_kernel_ring_buffer_channel *chan, size_t offset,
size_t *pre_header_padding,
struct lttng_kernel_ring_buffer_ctx *ctx,
return offsetof(struct metadata_packet_header, header_end);
}
-static void client_buffer_begin(struct lib_ring_buffer *buf, u64 tsc,
+static void client_buffer_begin(struct lttng_kernel_ring_buffer *buf, u64 tsc,
unsigned int subbuf_idx)
{
struct lttng_kernel_ring_buffer_channel *chan = buf->backend.chan;
* offset is assumed to never be 0 here : never deliver a completely empty
* subbuffer. data_size is between 1 and subbuf_size.
*/
-static void client_buffer_end(struct lib_ring_buffer *buf, u64 tsc,
+static void client_buffer_end(struct lttng_kernel_ring_buffer *buf, u64 tsc,
unsigned int subbuf_idx, unsigned long data_size)
{
struct lttng_kernel_ring_buffer_channel *chan = buf->backend.chan;
WARN_ON_ONCE(records_lost != 0);
}
-static int client_buffer_create(struct lib_ring_buffer *buf, void *priv,
+static int client_buffer_create(struct lttng_kernel_ring_buffer *buf, void *priv,
int cpu, const char *name)
{
return 0;
}
-static void client_buffer_finalize(struct lib_ring_buffer *buf, void *priv, int cpu)
+static void client_buffer_finalize(struct lttng_kernel_ring_buffer *buf, void *priv, int cpu)
{
}
-static int client_timestamp_begin(const struct lib_ring_buffer_config *config,
- struct lib_ring_buffer *buf, uint64_t *timestamp_begin)
+static int client_timestamp_begin(const struct lttng_kernel_ring_buffer_config *config,
+ struct lttng_kernel_ring_buffer *buf, uint64_t *timestamp_begin)
{
return -ENOSYS;
}
-static int client_timestamp_end(const struct lib_ring_buffer_config *config,
- struct lib_ring_buffer *bufb,
+static int client_timestamp_end(const struct lttng_kernel_ring_buffer_config *config,
+ struct lttng_kernel_ring_buffer *bufb,
uint64_t *timestamp_end)
{
return -ENOSYS;
}
-static int client_events_discarded(const struct lib_ring_buffer_config *config,
- struct lib_ring_buffer *bufb,
+static int client_events_discarded(const struct lttng_kernel_ring_buffer_config *config,
+ struct lttng_kernel_ring_buffer *bufb,
uint64_t *events_discarded)
{
return -ENOSYS;
}
-static int client_current_timestamp(const struct lib_ring_buffer_config *config,
- struct lib_ring_buffer *bufb,
+static int client_current_timestamp(const struct lttng_kernel_ring_buffer_config *config,
+ struct lttng_kernel_ring_buffer *bufb,
uint64_t *ts)
{
return -ENOSYS;
}
-static int client_content_size(const struct lib_ring_buffer_config *config,
- struct lib_ring_buffer *bufb,
+static int client_content_size(const struct lttng_kernel_ring_buffer_config *config,
+ struct lttng_kernel_ring_buffer *bufb,
uint64_t *content_size)
{
return -ENOSYS;
}
-static int client_packet_size(const struct lib_ring_buffer_config *config,
- struct lib_ring_buffer *bufb,
+static int client_packet_size(const struct lttng_kernel_ring_buffer_config *config,
+ struct lttng_kernel_ring_buffer *bufb,
uint64_t *packet_size)
{
return -ENOSYS;
}
-static int client_stream_id(const struct lib_ring_buffer_config *config,
- struct lib_ring_buffer *bufb,
+static int client_stream_id(const struct lttng_kernel_ring_buffer_config *config,
+ struct lttng_kernel_ring_buffer *bufb,
uint64_t *stream_id)
{
return -ENOSYS;
}
-static int client_sequence_number(const struct lib_ring_buffer_config *config,
- struct lib_ring_buffer *bufb,
+static int client_sequence_number(const struct lttng_kernel_ring_buffer_config *config,
+ struct lttng_kernel_ring_buffer *bufb,
uint64_t *seq)
{
return -ENOSYS;
}
static
-int client_instance_id(const struct lib_ring_buffer_config *config,
- struct lib_ring_buffer *bufb,
+int client_instance_id(const struct lttng_kernel_ring_buffer_config *config,
+ struct lttng_kernel_ring_buffer *bufb,
uint64_t *id)
{
return -ENOSYS;
}
-static const struct lib_ring_buffer_config client_config = {
+static const struct lttng_kernel_ring_buffer_config client_config = {
.cb.ring_buffer_clock_read = client_ring_buffer_clock_read,
.cb.record_header_size = client_record_header_size,
.cb.subbuffer_header_size = client_packet_header_size,
}
static
-struct lib_ring_buffer *lttng_buffer_read_open(struct lttng_kernel_ring_buffer_channel *chan)
+struct lttng_kernel_ring_buffer *lttng_buffer_read_open(struct lttng_kernel_ring_buffer_channel *chan)
{
- struct lib_ring_buffer *buf;
+ struct lttng_kernel_ring_buffer *buf;
buf = channel_get_ring_buffer(&client_config, chan, 0);
if (!lib_ring_buffer_open_read(buf))
static
int lttng_buffer_has_read_closed_stream(struct lttng_kernel_ring_buffer_channel *chan)
{
- struct lib_ring_buffer *buf;
+ struct lttng_kernel_ring_buffer *buf;
int cpu;
for_each_channel_cpu(cpu, chan) {
}
static
-void lttng_buffer_read_close(struct lib_ring_buffer *buf)
+void lttng_buffer_read_close(struct lttng_kernel_ring_buffer *buf)
{
lib_ring_buffer_release_read(buf);
}
size_t lttng_packet_avail_size(struct lttng_kernel_ring_buffer_channel *chan)
{
unsigned long o_begin;
- struct lib_ring_buffer *buf;
+ struct lttng_kernel_ring_buffer *buf;
buf = chan->backend.buf; /* Only for global buffer ! */
o_begin = v_read(&client_config, &buf->offset);
static
wait_queue_head_t *lttng_get_writer_buf_wait_queue(struct lttng_kernel_ring_buffer_channel *chan, int cpu)
{
- struct lib_ring_buffer *buf = channel_get_ring_buffer(&client_config,
+ struct lttng_kernel_ring_buffer *buf = channel_get_ring_buffer(&client_config,
chan, cpu);
return &buf->write_wait;
}