struct ltt_channel;
struct ltt_session;
-struct lib_ring_buffer_ctx;
+struct lttng_ust_lib_ring_buffer_ctx;
/* Type description */
struct lttng_event_field event_field;
size_t (*get_size)(size_t offset);
void (*record)(struct lttng_ctx_field *field,
- struct lib_ring_buffer_ctx *ctx,
+ struct lttng_ust_lib_ring_buffer_ctx *ctx,
struct ltt_channel *chan);
union {
} u;
int *shm_fd, int *wait_fd,
uint64_t *memory_map_size);
void (*channel_destroy)(struct ltt_channel *ltt_chan);
- struct lib_ring_buffer *(*buffer_read_open)(struct channel *chan,
+ struct lttng_ust_lib_ring_buffer *(*buffer_read_open)(struct channel *chan,
struct lttng_ust_shm_handle *handle,
int *shm_fd, int *wait_fd,
uint64_t *memory_map_size);
- void (*buffer_read_close)(struct lib_ring_buffer *buf,
+ void (*buffer_read_close)(struct lttng_ust_lib_ring_buffer *buf,
struct lttng_ust_shm_handle *handle);
- int (*event_reserve)(struct lib_ring_buffer_ctx *ctx,
+ int (*event_reserve)(struct lttng_ust_lib_ring_buffer_ctx *ctx,
uint32_t event_id);
- void (*event_commit)(struct lib_ring_buffer_ctx *ctx);
- void (*event_write)(struct lib_ring_buffer_ctx *ctx, const void *src,
+ void (*event_commit)(struct lttng_ust_lib_ring_buffer_ctx *ctx);
+ void (*event_write)(struct lttng_ust_lib_ring_buffer_ctx *ctx, const void *src,
size_t len);
/*
* packet_avail_size returns the available size in the current
{ \
struct ltt_event *__event = __data; \
struct ltt_channel *__chan = __event->chan; \
- struct lib_ring_buffer_ctx __ctx; \
+ struct lttng_ust_lib_ring_buffer_ctx __ctx; \
size_t __event_len, __event_align; \
size_t __dynamic_len_idx = 0; \
size_t __dynamic_len[_TP_ARRAY_SIZE(__event_fields___##_name)]; \
{ \
struct ltt_event *__event = __data; \
struct ltt_channel *__chan = __event->chan; \
- struct lib_ring_buffer_ctx __ctx; \
+ struct lttng_ust_lib_ring_buffer_ctx __ctx; \
size_t __event_len, __event_align; \
int __ret; \
\
/* Buffer operations */
struct lttng_ust_shm_handle;
-struct lib_ring_buffer;
+struct lttng_ust_lib_ring_buffer;
/* Open/close stream buffers for read */
-struct lib_ring_buffer *ustctl_open_stream_read(struct lttng_ust_shm_handle *handle,
+struct lttng_ust_lib_ring_buffer *ustctl_open_stream_read(struct lttng_ust_shm_handle *handle,
int cpu);
void ustctl_close_stream_read(struct lttng_ust_shm_handle *handle,
- struct lib_ring_buffer *buf);
+ struct lttng_ust_lib_ring_buffer *buf);
/* For mmap mode, readable without "get" operation */
int ustctl_get_mmap_len(struct lttng_ust_shm_handle *handle,
- struct lib_ring_buffer *buf,
+ struct lttng_ust_lib_ring_buffer *buf,
unsigned long *len);
int ustctl_get_max_subbuf_size(struct lttng_ust_shm_handle *handle,
- struct lib_ring_buffer *buf,
+ struct lttng_ust_lib_ring_buffer *buf,
unsigned long *len);
/*
* get_next/put_next).
*/
void *ustctl_get_mmap_base(struct lttng_ust_shm_handle *handle,
- struct lib_ring_buffer *buf);
+ struct lttng_ust_lib_ring_buffer *buf);
int ustctl_get_mmap_read_offset(struct lttng_ust_shm_handle *handle,
- struct lib_ring_buffer *buf, unsigned long *off);
+ struct lttng_ust_lib_ring_buffer *buf, unsigned long *off);
int ustctl_get_subbuf_size(struct lttng_ust_shm_handle *handle,
- struct lib_ring_buffer *buf, unsigned long *len);
+ struct lttng_ust_lib_ring_buffer *buf, unsigned long *len);
int ustctl_get_padded_subbuf_size(struct lttng_ust_shm_handle *handle,
- struct lib_ring_buffer *buf, unsigned long *len);
+ struct lttng_ust_lib_ring_buffer *buf, unsigned long *len);
int ustctl_get_next_subbuf(struct lttng_ust_shm_handle *handle,
- struct lib_ring_buffer *buf);
+ struct lttng_ust_lib_ring_buffer *buf);
int ustctl_put_next_subbuf(struct lttng_ust_shm_handle *handle,
- struct lib_ring_buffer *buf);
+ struct lttng_ust_lib_ring_buffer *buf);
/* snapshot */
int ustctl_snapshot(struct lttng_ust_shm_handle *handle,
- struct lib_ring_buffer *buf);
+ struct lttng_ust_lib_ring_buffer *buf);
int ustctl_snapshot_get_consumed(struct lttng_ust_shm_handle *handle,
- struct lib_ring_buffer *buf, unsigned long *pos);
+ struct lttng_ust_lib_ring_buffer *buf, unsigned long *pos);
int ustctl_snapshot_get_produced(struct lttng_ust_shm_handle *handle,
- struct lib_ring_buffer *buf, unsigned long *pos);
+ struct lttng_ust_lib_ring_buffer *buf, unsigned long *pos);
int ustctl_get_subbuf(struct lttng_ust_shm_handle *handle,
- struct lib_ring_buffer *buf, unsigned long *pos);
+ struct lttng_ust_lib_ring_buffer *buf, unsigned long *pos);
int ustctl_put_subbuf(struct lttng_ust_shm_handle *handle,
- struct lib_ring_buffer *buf);
+ struct lttng_ust_lib_ring_buffer *buf);
/* Release object created by members of this API */
void release_object(int sock, struct lttng_ust_object_data *data);
#include "ust/kcompat/kcompat.h"
#include "ust/align.h"
-struct lib_ring_buffer;
+struct lttng_ust_lib_ring_buffer;
struct channel;
-struct lib_ring_buffer_config;
-struct lib_ring_buffer_ctx;
+struct lttng_ust_lib_ring_buffer_config;
+struct lttng_ust_lib_ring_buffer_ctx;
struct lttng_ust_shm_handle *handle;
/*
* provided as inline functions too. These may simply return 0 if not used by
* the client.
*/
-struct lib_ring_buffer_client_cb {
+struct lttng_ust_lib_ring_buffer_client_cb {
/* Mandatory callbacks */
/* A static inline version is also required for fast path */
u64 (*ring_buffer_clock_read) (struct channel *chan);
- size_t (*record_header_size) (const struct lib_ring_buffer_config *config,
+ size_t (*record_header_size) (const struct lttng_ust_lib_ring_buffer_config *config,
struct channel *chan, size_t offset,
size_t *pre_header_padding,
- struct lib_ring_buffer_ctx *ctx);
+ struct lttng_ust_lib_ring_buffer_ctx *ctx);
/* Slow path only, at subbuffer switch */
size_t (*subbuffer_header_size) (void);
- void (*buffer_begin) (struct lib_ring_buffer *buf, u64 tsc,
+ void (*buffer_begin) (struct lttng_ust_lib_ring_buffer *buf, u64 tsc,
unsigned int subbuf_idx,
struct lttng_ust_shm_handle *handle);
- void (*buffer_end) (struct lib_ring_buffer *buf, u64 tsc,
+ void (*buffer_end) (struct lttng_ust_lib_ring_buffer *buf, u64 tsc,
unsigned int subbuf_idx, unsigned long data_size,
struct lttng_ust_shm_handle *handle);
/* Optional callbacks (can be set to NULL) */
/* Called at buffer creation/finalize */
- int (*buffer_create) (struct lib_ring_buffer *buf, void *priv,
+ int (*buffer_create) (struct lttng_ust_lib_ring_buffer *buf, void *priv,
int cpu, const char *name,
struct lttng_ust_shm_handle *handle);
/*
* Clients should guarantee that no new reader handle can be opened
* after finalize.
*/
- void (*buffer_finalize) (struct lib_ring_buffer *buf,
+ void (*buffer_finalize) (struct lttng_ust_lib_ring_buffer *buf,
void *priv, int cpu,
struct lttng_ust_shm_handle *handle);
* record. Used by buffer iterators. Timestamp is only used by channel
* iterator.
*/
- void (*record_get) (const struct lib_ring_buffer_config *config,
- struct channel *chan, struct lib_ring_buffer *buf,
+ void (*record_get) (const struct lttng_ust_lib_ring_buffer_config *config,
+ struct channel *chan, struct lttng_ust_lib_ring_buffer *buf,
size_t offset, size_t *header_len,
size_t *payload_len, u64 *timestamp,
struct lttng_ust_shm_handle *handle);
* RING_BUFFER_WAKEUP_NONE does not perform any wakeup whatsoever. The client
* has the responsibility to perform wakeups.
*/
-struct lib_ring_buffer_config {
+struct lttng_ust_lib_ring_buffer_config {
enum {
RING_BUFFER_ALLOC_PER_CPU,
RING_BUFFER_ALLOC_GLOBAL,
* 0 and 64 disable the timestamp compression scheme.
*/
unsigned int tsc_bits;
- struct lib_ring_buffer_client_cb cb;
+ struct lttng_ust_lib_ring_buffer_client_cb cb;
};
/*
* lib_ring_buffer_try_discard_reserve(), lib_ring_buffer_align_ctx() and
* lib_ring_buffer_write().
*/
-struct lib_ring_buffer_ctx {
+struct lttng_ust_lib_ring_buffer_ctx {
/* input received by lib_ring_buffer_reserve(), saved here. */
struct channel *chan; /* channel */
void *priv; /* client private data */
int cpu; /* processor id */
/* output from lib_ring_buffer_reserve() */
- struct lib_ring_buffer *buf; /*
+ struct lttng_ust_lib_ring_buffer *buf; /*
* buffer corresponding to processor id
* for this channel
*/
* @cpu: processor id
*/
static inline
-void lib_ring_buffer_ctx_init(struct lib_ring_buffer_ctx *ctx,
+void lib_ring_buffer_ctx_init(struct lttng_ust_lib_ring_buffer_ctx *ctx,
struct channel *chan, void *priv,
size_t data_size, int largest_align,
int cpu, struct lttng_ust_shm_handle *handle)
* @ctx: ring buffer context.
*/
static inline
-void lib_ring_buffer_align_ctx(struct lib_ring_buffer_ctx *ctx,
+void lib_ring_buffer_align_ctx(struct lttng_ust_lib_ring_buffer_ctx *ctx,
size_t alignment)
{
ctx->buf_offset += lib_ring_buffer_align(ctx->buf_offset,
* Used internally to check for valid configurations at channel creation.
*/
static inline
-int lib_ring_buffer_check_config(const struct lib_ring_buffer_config *config,
+int lib_ring_buffer_check_config(const struct lttng_ust_lib_ring_buffer_config *config,
unsigned int switch_timer_interval,
unsigned int read_timer_interval)
{
};
static inline
-long v_read(const struct lib_ring_buffer_config *config, union v_atomic *v_a)
+long v_read(const struct lttng_ust_lib_ring_buffer_config *config, union v_atomic *v_a)
{
assert(config->sync != RING_BUFFER_SYNC_PER_CPU);
return uatomic_read(&v_a->a);
}
static inline
-void v_set(const struct lib_ring_buffer_config *config, union v_atomic *v_a,
+void v_set(const struct lttng_ust_lib_ring_buffer_config *config, union v_atomic *v_a,
long v)
{
assert(config->sync != RING_BUFFER_SYNC_PER_CPU);
}
static inline
-void v_add(const struct lib_ring_buffer_config *config, long v, union v_atomic *v_a)
+void v_add(const struct lttng_ust_lib_ring_buffer_config *config, long v, union v_atomic *v_a)
{
assert(config->sync != RING_BUFFER_SYNC_PER_CPU);
uatomic_add(&v_a->a, v);
}
static inline
-void v_inc(const struct lib_ring_buffer_config *config, union v_atomic *v_a)
+void v_inc(const struct lttng_ust_lib_ring_buffer_config *config, union v_atomic *v_a)
{
assert(config->sync != RING_BUFFER_SYNC_PER_CPU);
uatomic_inc(&v_a->a);
* Non-atomic decrement. Only used by reader, apply to reader-owned subbuffer.
*/
static inline
-void _v_dec(const struct lib_ring_buffer_config *config, union v_atomic *v_a)
+void _v_dec(const struct lttng_ust_lib_ring_buffer_config *config, union v_atomic *v_a)
{
--v_a->v;
}
static inline
-long v_cmpxchg(const struct lib_ring_buffer_config *config, union v_atomic *v_a,
+long v_cmpxchg(const struct lttng_ust_lib_ring_buffer_config *config, union v_atomic *v_a,
long old, long _new)
{
assert(config->sync != RING_BUFFER_SYNC_PER_CPU);
/* Ring buffer backend access (read/write) */
-extern size_t lib_ring_buffer_read(struct lib_ring_buffer_backend *bufb,
+extern size_t lib_ring_buffer_read(struct lttng_ust_lib_ring_buffer_backend *bufb,
size_t offset, void *dest, size_t len,
struct lttng_ust_shm_handle *handle);
-extern int lib_ring_buffer_read_cstr(struct lib_ring_buffer_backend *bufb,
+extern int lib_ring_buffer_read_cstr(struct lttng_ust_lib_ring_buffer_backend *bufb,
size_t offset, void *dest, size_t len,
struct lttng_ust_shm_handle *handle);
* as long as the write is never bigger than a page size.
*/
extern void *
-lib_ring_buffer_offset_address(struct lib_ring_buffer_backend *bufb,
+lib_ring_buffer_offset_address(struct lttng_ust_lib_ring_buffer_backend *bufb,
size_t offset,
struct lttng_ust_shm_handle *handle);
extern void *
-lib_ring_buffer_read_offset_address(struct lib_ring_buffer_backend *bufb,
+lib_ring_buffer_read_offset_address(struct lttng_ust_lib_ring_buffer_backend *bufb,
size_t offset,
struct lttng_ust_shm_handle *handle);
* if copy is crossing a page boundary.
*/
static inline
-void lib_ring_buffer_write(const struct lib_ring_buffer_config *config,
- struct lib_ring_buffer_ctx *ctx,
+void lib_ring_buffer_write(const struct lttng_ust_lib_ring_buffer_config *config,
+ struct lttng_ust_lib_ring_buffer_ctx *ctx,
const void *src, size_t len)
{
- struct lib_ring_buffer_backend *bufb = &ctx->buf->backend;
+ struct lttng_ust_lib_ring_buffer_backend *bufb = &ctx->buf->backend;
struct channel_backend *chanb = &ctx->chan->backend;
struct lttng_ust_shm_handle *handle = ctx->handle;
size_t sbidx;
size_t offset = ctx->buf_offset;
- struct lib_ring_buffer_backend_pages_shmp *rpages;
+ struct lttng_ust_lib_ring_buffer_backend_pages_shmp *rpages;
unsigned long sb_bindex, id;
offset &= chanb->buf_size - 1;
*/
static inline
unsigned long lib_ring_buffer_get_records_unread(
- const struct lib_ring_buffer_config *config,
- struct lib_ring_buffer *buf,
+ const struct lttng_ust_lib_ring_buffer_config *config,
+ struct lttng_ust_lib_ring_buffer *buf,
struct lttng_ust_shm_handle *handle)
{
- struct lib_ring_buffer_backend *bufb = &buf->backend;
- struct lib_ring_buffer_backend_pages_shmp *pages;
+ struct lttng_ust_lib_ring_buffer_backend *bufb = &buf->backend;
+ struct lttng_ust_lib_ring_buffer_backend_pages_shmp *pages;
unsigned long records_unread = 0, sb_bindex, id;
unsigned int i;
/* Ring buffer and channel backend create/free */
-int lib_ring_buffer_backend_create(struct lib_ring_buffer_backend *bufb,
+int lib_ring_buffer_backend_create(struct lttng_ust_lib_ring_buffer_backend *bufb,
struct channel_backend *chan, int cpu,
struct lttng_ust_shm_handle *handle,
struct shm_object *shmobj);
void channel_backend_unregister_notifiers(struct channel_backend *chanb);
-void lib_ring_buffer_backend_free(struct lib_ring_buffer_backend *bufb);
+void lib_ring_buffer_backend_free(struct lttng_ust_lib_ring_buffer_backend *bufb);
int channel_backend_init(struct channel_backend *chanb,
const char *name,
- const struct lib_ring_buffer_config *config,
+ const struct lttng_ust_lib_ring_buffer_config *config,
void *priv, size_t subbuf_size,
size_t num_subbuf, struct lttng_ust_shm_handle *handle);
void channel_backend_free(struct channel_backend *chanb,
struct lttng_ust_shm_handle *handle);
-void lib_ring_buffer_backend_reset(struct lib_ring_buffer_backend *bufb,
+void lib_ring_buffer_backend_reset(struct lttng_ust_lib_ring_buffer_backend *bufb,
struct lttng_ust_shm_handle *handle);
void channel_backend_reset(struct channel_backend *chanb);
int lib_ring_buffer_backend_init(void);
void lib_ring_buffer_backend_exit(void);
-extern void _lib_ring_buffer_write(struct lib_ring_buffer_backend *bufb,
+extern void _lib_ring_buffer_write(struct lttng_ust_lib_ring_buffer_backend *bufb,
size_t offset, const void *src, size_t len,
ssize_t pagecpy);
* mode).
*/
static inline
-unsigned long subbuffer_id(const struct lib_ring_buffer_config *config,
+unsigned long subbuffer_id(const struct lttng_ust_lib_ring_buffer_config *config,
unsigned long offset, unsigned long noref,
unsigned long index)
{
* bits are identical, else 0.
*/
static inline
-int subbuffer_id_compare_offset(const struct lib_ring_buffer_config *config,
+int subbuffer_id_compare_offset(const struct lttng_ust_lib_ring_buffer_config *config,
unsigned long id, unsigned long offset)
{
return (id & SB_ID_OFFSET_MASK) == (offset << SB_ID_OFFSET_SHIFT);
}
static inline
-unsigned long subbuffer_id_get_index(const struct lib_ring_buffer_config *config,
+unsigned long subbuffer_id_get_index(const struct lttng_ust_lib_ring_buffer_config *config,
unsigned long id)
{
if (config->mode == RING_BUFFER_OVERWRITE)
}
static inline
-unsigned long subbuffer_id_is_noref(const struct lib_ring_buffer_config *config,
+unsigned long subbuffer_id_is_noref(const struct lttng_ust_lib_ring_buffer_config *config,
unsigned long id)
{
if (config->mode == RING_BUFFER_OVERWRITE)
* needed.
*/
static inline
-void subbuffer_id_set_noref(const struct lib_ring_buffer_config *config,
+void subbuffer_id_set_noref(const struct lttng_ust_lib_ring_buffer_config *config,
unsigned long *id)
{
if (config->mode == RING_BUFFER_OVERWRITE)
}
static inline
-void subbuffer_id_set_noref_offset(const struct lib_ring_buffer_config *config,
+void subbuffer_id_set_noref_offset(const struct lttng_ust_lib_ring_buffer_config *config,
unsigned long *id, unsigned long offset)
{
unsigned long tmp;
/* No volatile access, since already used locally */
static inline
-void subbuffer_id_clear_noref(const struct lib_ring_buffer_config *config,
+void subbuffer_id_clear_noref(const struct lttng_ust_lib_ring_buffer_config *config,
unsigned long *id)
{
if (config->mode == RING_BUFFER_OVERWRITE)
* -EPERM on failure.
*/
static inline
-int subbuffer_id_check_index(const struct lib_ring_buffer_config *config,
+int subbuffer_id_check_index(const struct lttng_ust_lib_ring_buffer_config *config,
unsigned long num_subbuf)
{
if (config->mode == RING_BUFFER_OVERWRITE)
}
static inline
-void subbuffer_count_record(const struct lib_ring_buffer_config *config,
- struct lib_ring_buffer_backend *bufb,
+void subbuffer_count_record(const struct lttng_ust_lib_ring_buffer_config *config,
+ struct lttng_ust_lib_ring_buffer_backend *bufb,
unsigned long idx, struct lttng_ust_shm_handle *handle)
{
unsigned long sb_bindex;
* perform the decrement atomically.
*/
static inline
-void subbuffer_consume_record(const struct lib_ring_buffer_config *config,
- struct lib_ring_buffer_backend *bufb,
+void subbuffer_consume_record(const struct lttng_ust_lib_ring_buffer_config *config,
+ struct lttng_ust_lib_ring_buffer_backend *bufb,
struct lttng_ust_shm_handle *handle)
{
unsigned long sb_bindex;
static inline
unsigned long subbuffer_get_records_count(
- const struct lib_ring_buffer_config *config,
- struct lib_ring_buffer_backend *bufb,
+ const struct lttng_ust_lib_ring_buffer_config *config,
+ struct lttng_ust_lib_ring_buffer_backend *bufb,
unsigned long idx,
struct lttng_ust_shm_handle *handle)
{
*/
static inline
unsigned long subbuffer_count_records_overrun(
- const struct lib_ring_buffer_config *config,
- struct lib_ring_buffer_backend *bufb,
+ const struct lttng_ust_lib_ring_buffer_config *config,
+ struct lttng_ust_lib_ring_buffer_backend *bufb,
unsigned long idx,
struct lttng_ust_shm_handle *handle)
{
- struct lib_ring_buffer_backend_pages_shmp *pages;
+ struct lttng_ust_lib_ring_buffer_backend_pages_shmp *pages;
unsigned long overruns, sb_bindex;
sb_bindex = subbuffer_id_get_index(config, shmp_index(handle, bufb->buf_wsb, idx)->id);
}
static inline
-void subbuffer_set_data_size(const struct lib_ring_buffer_config *config,
- struct lib_ring_buffer_backend *bufb,
+void subbuffer_set_data_size(const struct lttng_ust_lib_ring_buffer_config *config,
+ struct lttng_ust_lib_ring_buffer_backend *bufb,
unsigned long idx,
unsigned long data_size,
struct lttng_ust_shm_handle *handle)
{
- struct lib_ring_buffer_backend_pages_shmp *pages;
+ struct lttng_ust_lib_ring_buffer_backend_pages_shmp *pages;
unsigned long sb_bindex;
sb_bindex = subbuffer_id_get_index(config, shmp_index(handle, bufb->buf_wsb, idx)->id);
static inline
unsigned long subbuffer_get_read_data_size(
- const struct lib_ring_buffer_config *config,
- struct lib_ring_buffer_backend *bufb,
+ const struct lttng_ust_lib_ring_buffer_config *config,
+ struct lttng_ust_lib_ring_buffer_backend *bufb,
struct lttng_ust_shm_handle *handle)
{
- struct lib_ring_buffer_backend_pages_shmp *pages;
+ struct lttng_ust_lib_ring_buffer_backend_pages_shmp *pages;
unsigned long sb_bindex;
sb_bindex = subbuffer_id_get_index(config, bufb->buf_rsb.id);
static inline
unsigned long subbuffer_get_data_size(
- const struct lib_ring_buffer_config *config,
- struct lib_ring_buffer_backend *bufb,
+ const struct lttng_ust_lib_ring_buffer_config *config,
+ struct lttng_ust_lib_ring_buffer_backend *bufb,
unsigned long idx,
struct lttng_ust_shm_handle *handle)
{
- struct lib_ring_buffer_backend_pages_shmp *pages;
+ struct lttng_ust_lib_ring_buffer_backend_pages_shmp *pages;
unsigned long sb_bindex;
sb_bindex = subbuffer_id_get_index(config, shmp_index(handle, bufb->buf_wsb, idx)->id);
* writer.
*/
static inline
-void lib_ring_buffer_clear_noref(const struct lib_ring_buffer_config *config,
- struct lib_ring_buffer_backend *bufb,
+void lib_ring_buffer_clear_noref(const struct lttng_ust_lib_ring_buffer_config *config,
+ struct lttng_ust_lib_ring_buffer_backend *bufb,
unsigned long idx,
struct lttng_ust_shm_handle *handle)
{
* called by writer.
*/
static inline
-void lib_ring_buffer_set_noref_offset(const struct lib_ring_buffer_config *config,
- struct lib_ring_buffer_backend *bufb,
+void lib_ring_buffer_set_noref_offset(const struct lttng_ust_lib_ring_buffer_config *config,
+ struct lttng_ust_lib_ring_buffer_backend *bufb,
unsigned long idx, unsigned long offset,
struct lttng_ust_shm_handle *handle)
{
* update_read_sb_index - Read-side subbuffer index update.
*/
static inline
-int update_read_sb_index(const struct lib_ring_buffer_config *config,
- struct lib_ring_buffer_backend *bufb,
+int update_read_sb_index(const struct lttng_ust_lib_ring_buffer_config *config,
+ struct lttng_ust_lib_ring_buffer_backend *bufb,
struct channel_backend *chanb,
unsigned long consumed_idx,
unsigned long consumed_count,
#include "shm_internal.h"
-struct lib_ring_buffer_backend_pages {
+struct lttng_ust_lib_ring_buffer_backend_pages {
unsigned long mmap_offset; /* offset of the subbuffer in mmap */
union v_atomic records_commit; /* current records committed count */
union v_atomic records_unread; /* records to read */
DECLARE_SHMP(char, p); /* Backing memory map */
};
-struct lib_ring_buffer_backend_subbuffer {
+struct lttng_ust_lib_ring_buffer_backend_subbuffer {
/* Identifier for subbuf backend pages. Exchanged atomically. */
unsigned long id; /* backend subbuffer identifier */
};
* Forward declaration of frontend-specific channel and ring_buffer.
*/
struct channel;
-struct lib_ring_buffer;
+struct lttng_ust_lib_ring_buffer;
-struct lib_ring_buffer_backend_pages_shmp {
- DECLARE_SHMP(struct lib_ring_buffer_backend_pages, shmp);
+struct lttng_ust_lib_ring_buffer_backend_pages_shmp {
+ DECLARE_SHMP(struct lttng_ust_lib_ring_buffer_backend_pages, shmp);
};
-struct lib_ring_buffer_backend {
+struct lttng_ust_lib_ring_buffer_backend {
/* Array of ring_buffer_backend_subbuffer for writer */
- DECLARE_SHMP(struct lib_ring_buffer_backend_subbuffer, buf_wsb);
+ DECLARE_SHMP(struct lttng_ust_lib_ring_buffer_backend_subbuffer, buf_wsb);
/* ring_buffer_backend_subbuffer for reader */
- struct lib_ring_buffer_backend_subbuffer buf_rsb;
+ struct lttng_ust_lib_ring_buffer_backend_subbuffer buf_rsb;
/*
* Pointer array of backend pages, for whole buffer.
* Indexed by ring_buffer_backend_subbuffer identifier (id) index.
*/
- DECLARE_SHMP(struct lib_ring_buffer_backend_pages_shmp, array);
+ DECLARE_SHMP(struct lttng_ust_lib_ring_buffer_backend_pages_shmp, array);
DECLARE_SHMP(char, memory_map); /* memory mapping */
DECLARE_SHMP(struct channel, chan); /* Associated channel */
unsigned int allocated:1; /* Bool: is buffer allocated ? */
};
-struct lib_ring_buffer_shmp {
- DECLARE_SHMP(struct lib_ring_buffer, shmp); /* Channel per-cpu buffers */
+struct lttng_ust_lib_ring_buffer_shmp {
+ DECLARE_SHMP(struct lttng_ust_lib_ring_buffer, shmp); /* Channel per-cpu buffers */
};
struct channel_backend {
unsigned long num_subbuf; /* Number of sub-buffers for writer */
u64 start_tsc; /* Channel creation TSC value */
void *priv; /* Client-specific information */
- struct lib_ring_buffer_config config; /* Ring buffer configuration */
+ struct lttng_ust_lib_ring_buffer_config config; /* Ring buffer configuration */
char name[NAME_MAX]; /* Channel name */
- struct lib_ring_buffer_shmp buf[];
+ struct lttng_ust_lib_ring_buffer_shmp buf[];
};
#endif /* _LINUX_RING_BUFFER_BACKEND_TYPES_H */
*/
extern
-struct lttng_ust_shm_handle *channel_create(const struct lib_ring_buffer_config *config,
+struct lttng_ust_shm_handle *channel_create(const struct lttng_ust_lib_ring_buffer_config *config,
const char *name, void *priv,
void *buf_addr,
size_t subbuf_size, size_t num_subbuf,
#define for_each_channel_cpu(cpu, chan) \
for_each_possible_cpu(cpu)
-extern struct lib_ring_buffer *channel_get_ring_buffer(
- const struct lib_ring_buffer_config *config,
+extern struct lttng_ust_lib_ring_buffer *channel_get_ring_buffer(
+ const struct lttng_ust_lib_ring_buffer_config *config,
struct channel *chan, int cpu,
struct lttng_ust_shm_handle *handle,
int *shm_fd, int *wait_fd,
uint64_t *memory_map_size);
-extern int lib_ring_buffer_open_read(struct lib_ring_buffer *buf,
+extern int lib_ring_buffer_open_read(struct lttng_ust_lib_ring_buffer *buf,
struct lttng_ust_shm_handle *handle,
int shadow);
-extern void lib_ring_buffer_release_read(struct lib_ring_buffer *buf,
+extern void lib_ring_buffer_release_read(struct lttng_ust_lib_ring_buffer *buf,
struct lttng_ust_shm_handle *handle,
int shadow);
/*
* Read sequence: snapshot, many get_subbuf/put_subbuf, move_consumer.
*/
-extern int lib_ring_buffer_snapshot(struct lib_ring_buffer *buf,
+extern int lib_ring_buffer_snapshot(struct lttng_ust_lib_ring_buffer *buf,
unsigned long *consumed,
unsigned long *produced,
struct lttng_ust_shm_handle *handle);
-extern void lib_ring_buffer_move_consumer(struct lib_ring_buffer *buf,
+extern void lib_ring_buffer_move_consumer(struct lttng_ust_lib_ring_buffer *buf,
unsigned long consumed_new,
struct lttng_ust_shm_handle *handle);
-extern int lib_ring_buffer_get_subbuf(struct lib_ring_buffer *buf,
+extern int lib_ring_buffer_get_subbuf(struct lttng_ust_lib_ring_buffer *buf,
unsigned long consumed,
struct lttng_ust_shm_handle *handle);
-extern void lib_ring_buffer_put_subbuf(struct lib_ring_buffer *buf,
+extern void lib_ring_buffer_put_subbuf(struct lttng_ust_lib_ring_buffer *buf,
struct lttng_ust_shm_handle *handle);
/*
* lib_ring_buffer_get_next_subbuf/lib_ring_buffer_put_next_subbuf are helpers
* to read sub-buffers sequentially.
*/
-static inline int lib_ring_buffer_get_next_subbuf(struct lib_ring_buffer *buf,
+static inline int lib_ring_buffer_get_next_subbuf(struct lttng_ust_lib_ring_buffer *buf,
struct lttng_ust_shm_handle *handle)
{
int ret;
}
static inline
-void lib_ring_buffer_put_next_subbuf(struct lib_ring_buffer *buf,
+void lib_ring_buffer_put_next_subbuf(struct lttng_ust_lib_ring_buffer *buf,
struct lttng_ust_shm_handle *handle)
{
lib_ring_buffer_put_subbuf(buf, handle);
}
extern void channel_reset(struct channel *chan);
-extern void lib_ring_buffer_reset(struct lib_ring_buffer *buf,
+extern void lib_ring_buffer_reset(struct lttng_ust_lib_ring_buffer *buf,
struct lttng_ust_shm_handle *handle);
static inline
-unsigned long lib_ring_buffer_get_offset(const struct lib_ring_buffer_config *config,
- struct lib_ring_buffer *buf)
+unsigned long lib_ring_buffer_get_offset(const struct lttng_ust_lib_ring_buffer_config *config,
+ struct lttng_ust_lib_ring_buffer *buf)
{
return v_read(config, &buf->offset);
}
static inline
-unsigned long lib_ring_buffer_get_consumed(const struct lib_ring_buffer_config *config,
- struct lib_ring_buffer *buf)
+unsigned long lib_ring_buffer_get_consumed(const struct lttng_ust_lib_ring_buffer_config *config,
+ struct lttng_ust_lib_ring_buffer *buf)
{
return uatomic_read(&buf->consumed);
}
* ordering enforced with respect to trace teardown).
*/
static inline
-int lib_ring_buffer_is_finalized(const struct lib_ring_buffer_config *config,
- struct lib_ring_buffer *buf)
+int lib_ring_buffer_is_finalized(const struct lttng_ust_lib_ring_buffer_config *config,
+ struct lttng_ust_lib_ring_buffer *buf)
{
int finalized = CMM_ACCESS_ONCE(buf->finalized);
/*
static inline
unsigned long lib_ring_buffer_get_read_data_size(
- const struct lib_ring_buffer_config *config,
- struct lib_ring_buffer *buf,
+ const struct lttng_ust_lib_ring_buffer_config *config,
+ struct lttng_ust_lib_ring_buffer *buf,
struct lttng_ust_shm_handle *handle)
{
return subbuffer_get_read_data_size(config, &buf->backend, handle);
static inline
unsigned long lib_ring_buffer_get_records_count(
- const struct lib_ring_buffer_config *config,
- struct lib_ring_buffer *buf)
+ const struct lttng_ust_lib_ring_buffer_config *config,
+ struct lttng_ust_lib_ring_buffer *buf)
{
return v_read(config, &buf->records_count);
}
static inline
unsigned long lib_ring_buffer_get_records_overrun(
- const struct lib_ring_buffer_config *config,
- struct lib_ring_buffer *buf)
+ const struct lttng_ust_lib_ring_buffer_config *config,
+ struct lttng_ust_lib_ring_buffer *buf)
{
return v_read(config, &buf->records_overrun);
}
static inline
unsigned long lib_ring_buffer_get_records_lost_full(
- const struct lib_ring_buffer_config *config,
- struct lib_ring_buffer *buf)
+ const struct lttng_ust_lib_ring_buffer_config *config,
+ struct lttng_ust_lib_ring_buffer *buf)
{
return v_read(config, &buf->records_lost_full);
}
static inline
unsigned long lib_ring_buffer_get_records_lost_wrap(
- const struct lib_ring_buffer_config *config,
- struct lib_ring_buffer *buf)
+ const struct lttng_ust_lib_ring_buffer_config *config,
+ struct lttng_ust_lib_ring_buffer *buf)
{
return v_read(config, &buf->records_lost_wrap);
}
static inline
unsigned long lib_ring_buffer_get_records_lost_big(
- const struct lib_ring_buffer_config *config,
- struct lib_ring_buffer *buf)
+ const struct lttng_ust_lib_ring_buffer_config *config,
+ struct lttng_ust_lib_ring_buffer *buf)
{
return v_read(config, &buf->records_lost_big);
}
static inline
unsigned long lib_ring_buffer_get_records_read(
- const struct lib_ring_buffer_config *config,
- struct lib_ring_buffer *buf)
+ const struct lttng_ust_lib_ring_buffer_config *config,
+ struct lttng_ust_lib_ring_buffer *buf)
{
return v_read(config, &buf->backend.records_read);
}
* section.
*/
static inline
-int lib_ring_buffer_get_cpu(const struct lib_ring_buffer_config *config)
+int lib_ring_buffer_get_cpu(const struct lttng_ust_lib_ring_buffer_config *config)
{
int cpu, nesting;
* lib_ring_buffer_put_cpu - Follows ring buffer reserve/commit.
*/
static inline
-void lib_ring_buffer_put_cpu(const struct lib_ring_buffer_config *config)
+void lib_ring_buffer_put_cpu(const struct lttng_ust_lib_ring_buffer_config *config)
{
cmm_barrier();
lib_ring_buffer_nesting--; /* TLS */
* returns 0 if reserve ok, or 1 if the slow path must be taken.
*/
static inline
-int lib_ring_buffer_try_reserve(const struct lib_ring_buffer_config *config,
- struct lib_ring_buffer_ctx *ctx,
+int lib_ring_buffer_try_reserve(const struct lttng_ust_lib_ring_buffer_config *config,
+ struct lttng_ust_lib_ring_buffer_ctx *ctx,
unsigned long *o_begin, unsigned long *o_end,
unsigned long *o_old, size_t *before_hdr_pad)
{
struct channel *chan = ctx->chan;
- struct lib_ring_buffer *buf = ctx->buf;
+ struct lttng_ust_lib_ring_buffer *buf = ctx->buf;
*o_begin = v_read(config, &buf->offset);
*o_old = *o_begin;
*/
static inline
-int lib_ring_buffer_reserve(const struct lib_ring_buffer_config *config,
- struct lib_ring_buffer_ctx *ctx)
+int lib_ring_buffer_reserve(const struct lttng_ust_lib_ring_buffer_config *config,
+ struct lttng_ust_lib_ring_buffer_ctx *ctx)
{
struct channel *chan = ctx->chan;
struct lttng_ust_shm_handle *handle = ctx->handle;
- struct lib_ring_buffer *buf;
+ struct lttng_ust_lib_ring_buffer *buf;
unsigned long o_begin, o_end, o_old;
size_t before_hdr_pad = 0;
* disabled, for RING_BUFFER_SYNC_PER_CPU configuration.
*/
static inline
-void lib_ring_buffer_switch(const struct lib_ring_buffer_config *config,
- struct lib_ring_buffer *buf, enum switch_mode mode,
+void lib_ring_buffer_switch(const struct lttng_ust_lib_ring_buffer_config *config,
+ struct lttng_ust_lib_ring_buffer *buf, enum switch_mode mode,
struct lttng_ust_shm_handle *handle)
{
lib_ring_buffer_switch_slow(buf, mode, handle);
* specified sub-buffer, and delivers it if necessary.
*/
static inline
-void lib_ring_buffer_commit(const struct lib_ring_buffer_config *config,
- const struct lib_ring_buffer_ctx *ctx)
+void lib_ring_buffer_commit(const struct lttng_ust_lib_ring_buffer_config *config,
+ const struct lttng_ust_lib_ring_buffer_ctx *ctx)
{
struct channel *chan = ctx->chan;
struct lttng_ust_shm_handle *handle = ctx->handle;
- struct lib_ring_buffer *buf = ctx->buf;
+ struct lttng_ust_lib_ring_buffer *buf = ctx->buf;
unsigned long offset_end = ctx->buf_offset;
unsigned long endidx = subbuf_index(offset_end - 1, chan);
unsigned long commit_count;
* Returns 0 upon success, -EPERM if the record cannot be discarded.
*/
static inline
-int lib_ring_buffer_try_discard_reserve(const struct lib_ring_buffer_config *config,
- const struct lib_ring_buffer_ctx *ctx)
+int lib_ring_buffer_try_discard_reserve(const struct lttng_ust_lib_ring_buffer_config *config,
+ const struct lttng_ust_lib_ring_buffer_ctx *ctx)
{
- struct lib_ring_buffer *buf = ctx->buf;
+ struct lttng_ust_lib_ring_buffer *buf = ctx->buf;
unsigned long end_offset = ctx->pre_offset + ctx->slot_size;
/*
}
static inline
-void channel_record_disable(const struct lib_ring_buffer_config *config,
+void channel_record_disable(const struct lttng_ust_lib_ring_buffer_config *config,
struct channel *chan)
{
uatomic_inc(&chan->record_disabled);
}
static inline
-void channel_record_enable(const struct lib_ring_buffer_config *config,
+void channel_record_enable(const struct lttng_ust_lib_ring_buffer_config *config,
struct channel *chan)
{
uatomic_dec(&chan->record_disabled);
}
static inline
-void lib_ring_buffer_record_disable(const struct lib_ring_buffer_config *config,
- struct lib_ring_buffer *buf)
+void lib_ring_buffer_record_disable(const struct lttng_ust_lib_ring_buffer_config *config,
+ struct lttng_ust_lib_ring_buffer *buf)
{
uatomic_inc(&buf->record_disabled);
}
static inline
-void lib_ring_buffer_record_enable(const struct lib_ring_buffer_config *config,
- struct lib_ring_buffer *buf)
+void lib_ring_buffer_record_enable(const struct lttng_ust_lib_ring_buffer_config *config,
+ struct lttng_ust_lib_ring_buffer *buf)
{
uatomic_dec(&buf->record_disabled);
}
#if (CAA_BITS_PER_LONG == 32)
static inline
-void save_last_tsc(const struct lib_ring_buffer_config *config,
- struct lib_ring_buffer *buf, u64 tsc)
+void save_last_tsc(const struct lttng_ust_lib_ring_buffer_config *config,
+ struct lttng_ust_lib_ring_buffer *buf, u64 tsc)
{
if (config->tsc_bits == 0 || config->tsc_bits == 64)
return;
}
static inline
-int last_tsc_overflow(const struct lib_ring_buffer_config *config,
- struct lib_ring_buffer *buf, u64 tsc)
+int last_tsc_overflow(const struct lttng_ust_lib_ring_buffer_config *config,
+ struct lttng_ust_lib_ring_buffer *buf, u64 tsc)
{
unsigned long tsc_shifted;
}
#else
static inline
-void save_last_tsc(const struct lib_ring_buffer_config *config,
- struct lib_ring_buffer *buf, u64 tsc)
+void save_last_tsc(const struct lttng_ust_lib_ring_buffer_config *config,
+ struct lttng_ust_lib_ring_buffer *buf, u64 tsc)
{
if (config->tsc_bits == 0 || config->tsc_bits == 64)
return;
}
static inline
-int last_tsc_overflow(const struct lib_ring_buffer_config *config,
- struct lib_ring_buffer *buf, u64 tsc)
+int last_tsc_overflow(const struct lttng_ust_lib_ring_buffer_config *config,
+ struct lttng_ust_lib_ring_buffer *buf, u64 tsc)
{
if (config->tsc_bits == 0 || config->tsc_bits == 64)
return 0;
#endif
extern
-int lib_ring_buffer_reserve_slow(struct lib_ring_buffer_ctx *ctx);
+int lib_ring_buffer_reserve_slow(struct lttng_ust_lib_ring_buffer_ctx *ctx);
extern
-void lib_ring_buffer_switch_slow(struct lib_ring_buffer *buf,
+void lib_ring_buffer_switch_slow(struct lttng_ust_lib_ring_buffer *buf,
enum switch_mode mode,
struct lttng_ust_shm_handle *handle);
/* Buffer write helpers */
static inline
-void lib_ring_buffer_reserve_push_reader(struct lib_ring_buffer *buf,
+void lib_ring_buffer_reserve_push_reader(struct lttng_ust_lib_ring_buffer *buf,
struct channel *chan,
unsigned long offset)
{
}
static inline
-void lib_ring_buffer_vmcore_check_deliver(const struct lib_ring_buffer_config *config,
- struct lib_ring_buffer *buf,
+void lib_ring_buffer_vmcore_check_deliver(const struct lttng_ust_lib_ring_buffer_config *config,
+ struct lttng_ust_lib_ring_buffer *buf,
unsigned long commit_count,
unsigned long idx,
struct lttng_ust_shm_handle *handle)
}
static inline
-int lib_ring_buffer_poll_deliver(const struct lib_ring_buffer_config *config,
- struct lib_ring_buffer *buf,
+int lib_ring_buffer_poll_deliver(const struct lttng_ust_lib_ring_buffer_config *config,
+ struct lttng_ust_lib_ring_buffer *buf,
struct channel *chan,
struct lttng_ust_shm_handle *handle)
{
}
static inline
-int lib_ring_buffer_pending_data(const struct lib_ring_buffer_config *config,
- struct lib_ring_buffer *buf,
+int lib_ring_buffer_pending_data(const struct lttng_ust_lib_ring_buffer_config *config,
+ struct lttng_ust_lib_ring_buffer *buf,
struct channel *chan)
{
return !!subbuf_offset(v_read(config, &buf->offset), chan);
}
static inline
-unsigned long lib_ring_buffer_get_data_size(const struct lib_ring_buffer_config *config,
- struct lib_ring_buffer *buf,
+unsigned long lib_ring_buffer_get_data_size(const struct lttng_ust_lib_ring_buffer_config *config,
+ struct lttng_ust_lib_ring_buffer *buf,
unsigned long idx,
struct lttng_ust_shm_handle *handle)
{
* This is a very specific ftrace use-case, so we keep this as "internal" API.
*/
static inline
-int lib_ring_buffer_reserve_committed(const struct lib_ring_buffer_config *config,
- struct lib_ring_buffer *buf,
+int lib_ring_buffer_reserve_committed(const struct lttng_ust_lib_ring_buffer_config *config,
+ struct lttng_ust_lib_ring_buffer *buf,
struct channel *chan,
struct lttng_ust_shm_handle *handle)
{
}
static inline
-void lib_ring_buffer_check_deliver(const struct lib_ring_buffer_config *config,
- struct lib_ring_buffer *buf,
+void lib_ring_buffer_check_deliver(const struct lttng_ust_lib_ring_buffer_config *config,
+ struct lttng_ust_lib_ring_buffer *buf,
struct channel *chan,
unsigned long offset,
unsigned long commit_count,
* useful for crash dump.
*/
static inline
-void lib_ring_buffer_write_commit_counter(const struct lib_ring_buffer_config *config,
- struct lib_ring_buffer *buf,
+void lib_ring_buffer_write_commit_counter(const struct lttng_ust_lib_ring_buffer_config *config,
+ struct lttng_ust_lib_ring_buffer *buf,
struct channel *chan,
unsigned long idx,
unsigned long buf_offset,
commit_seq_old, commit_count);
}
-extern int lib_ring_buffer_create(struct lib_ring_buffer *buf,
+extern int lib_ring_buffer_create(struct lttng_ust_lib_ring_buffer *buf,
struct channel_backend *chanb, int cpu,
struct lttng_ust_shm_handle *handle,
struct shm_object *shmobj);
-extern void lib_ring_buffer_free(struct lib_ring_buffer *buf,
+extern void lib_ring_buffer_free(struct lttng_ust_lib_ring_buffer *buf,
struct lttng_ust_shm_handle *handle);
/* Keep track of trap nesting inside ring buffer code */
} ____cacheline_aligned;
/* ring buffer state */
-struct lib_ring_buffer {
+struct lttng_ust_lib_ring_buffer {
/* First 32 bytes cache-hot cacheline */
union v_atomic offset; /* Current offset in the buffer */
DECLARE_SHMP(struct commit_counters_hot, commit_hot);
* Last timestamp written in the buffer.
*/
- struct lib_ring_buffer_backend backend; /* Associated backend */
+ struct lttng_ust_lib_ring_buffer_backend backend; /* Associated backend */
DECLARE_SHMP(struct commit_counters_cold, commit_cold);
/* Commit count per sub-buffer */
int switch_timer_enabled:1; /* Protected by ring_buffer_nohz_lock */
int read_timer_enabled:1; /* Protected by ring_buffer_nohz_lock */
/* shmp pointer to self */
- DECLARE_SHMP(struct lib_ring_buffer, self);
+ DECLARE_SHMP(struct lttng_ust_lib_ring_buffer, self);
} ____cacheline_aligned;
static inline
/*
* Issue warnings and disable channels upon internal error.
- * Can receive struct lib_ring_buffer or struct lib_ring_buffer_backend
+ * Can receive struct lttng_ust_lib_ring_buffer or struct lttng_ust_lib_ring_buffer_backend
* parameters.
*/
#define CHAN_WARN_ON(c, cond) \
*/
int lib_ring_buffer_open(struct inode *inode, struct file *file)
{
- struct lib_ring_buffer *buf = inode->i_private;
+ struct lttng_ust_lib_ring_buffer *buf = inode->i_private;
int ret;
ret = lib_ring_buffer_open_read(buf);
*/
int lib_ring_buffer_release(struct inode *inode, struct file *file)
{
- struct lib_ring_buffer *buf = file->private_data;
+ struct lttng_ust_lib_ring_buffer *buf = file->private_data;
lib_ring_buffer_release_read(buf);
unsigned int lib_ring_buffer_poll(struct file *filp, poll_table *wait)
{
unsigned int mask = 0;
- struct lib_ring_buffer *buf = filp->private_data;
+ struct lttng_ust_lib_ring_buffer *buf = filp->private_data;
struct channel *chan = buf->backend.chan;
- const struct lib_ring_buffer_config *config = chan->backend.config;
+ const struct lttng_ust_lib_ring_buffer_config *config = chan->backend.config;
int finalized, disabled;
if (filp->f_mode & FMODE_READ) {
*/
long lib_ring_buffer_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
{
- struct lib_ring_buffer *buf = filp->private_data;
+ struct lttng_ust_lib_ring_buffer *buf = filp->private_data;
struct channel *chan = buf->backend.chan;
- const struct lib_ring_buffer_config *config = chan->backend.config;
+ const struct lttng_ust_lib_ring_buffer_config *config = chan->backend.config;
if (lib_ring_buffer_channel_is_disabled(chan))
return -EIO;
long lib_ring_buffer_compat_ioctl(struct file *filp, unsigned int cmd,
unsigned long arg)
{
- struct lib_ring_buffer *buf = filp->private_data;
+ struct lttng_ust_lib_ring_buffer *buf = filp->private_data;
struct channel *chan = buf->backend.chan;
- const struct lib_ring_buffer_config *config = chan->backend.config;
+ const struct lttng_ust_lib_ring_buffer_config *config = chan->backend.config;
if (lib_ring_buffer_channel_is_disabled(chan))
return -EIO;
* @extra_reader_sb: need extra subbuffer for reader
*/
static
-int lib_ring_buffer_backend_allocate(const struct lib_ring_buffer_config *config,
- struct lib_ring_buffer_backend *bufb,
+int lib_ring_buffer_backend_allocate(const struct lttng_ust_lib_ring_buffer_config *config,
+ struct lttng_ust_lib_ring_buffer_backend *bufb,
size_t size, size_t num_subbuf,
int extra_reader_sb,
struct lttng_ust_shm_handle *handle,
if (extra_reader_sb)
num_subbuf_alloc++;
- align_shm(shmobj, __alignof__(struct lib_ring_buffer_backend_pages_shmp));
+ align_shm(shmobj, __alignof__(struct lttng_ust_lib_ring_buffer_backend_pages_shmp));
set_shmp(bufb->array, zalloc_shm(shmobj,
- sizeof(struct lib_ring_buffer_backend_pages_shmp) * num_subbuf_alloc));
+ sizeof(struct lttng_ust_lib_ring_buffer_backend_pages_shmp) * num_subbuf_alloc));
if (unlikely(!shmp(handle, bufb->array)))
goto array_error;
/* Allocate backend pages array elements */
for (i = 0; i < num_subbuf_alloc; i++) {
- align_shm(shmobj, __alignof__(struct lib_ring_buffer_backend_pages));
+ align_shm(shmobj, __alignof__(struct lttng_ust_lib_ring_buffer_backend_pages));
set_shmp(shmp_index(handle, bufb->array, i)->shmp,
zalloc_shm(shmobj,
- sizeof(struct lib_ring_buffer_backend_pages)));
+ sizeof(struct lttng_ust_lib_ring_buffer_backend_pages)));
if (!shmp(handle, shmp_index(handle, bufb->array, i)->shmp))
goto free_array;
}
/* Allocate write-side subbuffer table */
- align_shm(shmobj, __alignof__(struct lib_ring_buffer_backend_subbuffer));
+ align_shm(shmobj, __alignof__(struct lttng_ust_lib_ring_buffer_backend_subbuffer));
set_shmp(bufb->buf_wsb, zalloc_shm(shmobj,
- sizeof(struct lib_ring_buffer_backend_subbuffer)
+ sizeof(struct lttng_ust_lib_ring_buffer_backend_subbuffer)
* num_subbuf));
if (unlikely(!shmp(handle, bufb->buf_wsb)))
goto free_array;
return -ENOMEM;
}
-int lib_ring_buffer_backend_create(struct lib_ring_buffer_backend *bufb,
+int lib_ring_buffer_backend_create(struct lttng_ust_lib_ring_buffer_backend *bufb,
struct channel_backend *chanb, int cpu,
struct lttng_ust_shm_handle *handle,
struct shm_object *shmobj)
{
- const struct lib_ring_buffer_config *config = &chanb->config;
+ const struct lttng_ust_lib_ring_buffer_config *config = &chanb->config;
set_shmp(bufb->chan, handle->chan._ref);
bufb->cpu = cpu;
handle, shmobj);
}
-void lib_ring_buffer_backend_free(struct lib_ring_buffer_backend *bufb)
+void lib_ring_buffer_backend_free(struct lttng_ust_lib_ring_buffer_backend *bufb)
{
/* bufb->buf_wsb will be freed by shm teardown */
/* bufb->array[i] will be freed by shm teardown */
bufb->allocated = 0;
}
-void lib_ring_buffer_backend_reset(struct lib_ring_buffer_backend *bufb,
+void lib_ring_buffer_backend_reset(struct lttng_ust_lib_ring_buffer_backend *bufb,
struct lttng_ust_shm_handle *handle)
{
struct channel_backend *chanb = &shmp(handle, bufb->chan)->backend;
- const struct lib_ring_buffer_config *config = &chanb->config;
+ const struct lttng_ust_lib_ring_buffer_config *config = &chanb->config;
unsigned long num_subbuf_alloc;
unsigned int i;
void channel_backend_reset(struct channel_backend *chanb)
{
struct channel *chan = caa_container_of(chanb, struct channel, backend);
- const struct lib_ring_buffer_config *config = &chanb->config;
+ const struct lttng_ust_lib_ring_buffer_config *config = &chanb->config;
/*
* Don't reset buf_size, subbuf_size, subbuf_size_order,
*/
int channel_backend_init(struct channel_backend *chanb,
const char *name,
- const struct lib_ring_buffer_config *config,
+ const struct lttng_ust_lib_ring_buffer_config *config,
void *priv, size_t subbuf_size, size_t num_subbuf,
struct lttng_ust_shm_handle *handle)
{
memcpy(&chanb->config, config, sizeof(*config));
/* Per-cpu buffer size: control (prior to backend) */
- shmsize = offset_align(shmsize, __alignof__(struct lib_ring_buffer));
- shmsize += sizeof(struct lib_ring_buffer);
+ shmsize = offset_align(shmsize, __alignof__(struct lttng_ust_lib_ring_buffer));
+ shmsize += sizeof(struct lttng_ust_lib_ring_buffer);
/* Per-cpu buffer size: backend */
/* num_subbuf + 1 is the worse case */
num_subbuf_alloc = num_subbuf + 1;
- shmsize += offset_align(shmsize, __alignof__(struct lib_ring_buffer_backend_pages_shmp));
- shmsize += sizeof(struct lib_ring_buffer_backend_pages_shmp) * num_subbuf_alloc;
+ shmsize += offset_align(shmsize, __alignof__(struct lttng_ust_lib_ring_buffer_backend_pages_shmp));
+ shmsize += sizeof(struct lttng_ust_lib_ring_buffer_backend_pages_shmp) * num_subbuf_alloc;
shmsize += offset_align(shmsize, PAGE_SIZE);
shmsize += subbuf_size * num_subbuf_alloc;
- shmsize += offset_align(shmsize, __alignof__(struct lib_ring_buffer_backend_pages));
- shmsize += sizeof(struct lib_ring_buffer_backend_pages) * num_subbuf_alloc;
- shmsize += offset_align(shmsize, __alignof__(struct lib_ring_buffer_backend_subbuffer));
- shmsize += sizeof(struct lib_ring_buffer_backend_subbuffer) * num_subbuf;
+ shmsize += offset_align(shmsize, __alignof__(struct lttng_ust_lib_ring_buffer_backend_pages));
+ shmsize += sizeof(struct lttng_ust_lib_ring_buffer_backend_pages) * num_subbuf_alloc;
+ shmsize += offset_align(shmsize, __alignof__(struct lttng_ust_lib_ring_buffer_backend_subbuffer));
+ shmsize += sizeof(struct lttng_ust_lib_ring_buffer_backend_subbuffer) * num_subbuf;
/* Per-cpu buffer size: control (after backend) */
shmsize += offset_align(shmsize, __alignof__(struct commit_counters_hot));
shmsize += sizeof(struct commit_counters_hot) * num_subbuf;
shmsize += sizeof(struct commit_counters_cold) * num_subbuf;
if (config->alloc == RING_BUFFER_ALLOC_PER_CPU) {
- struct lib_ring_buffer *buf;
+ struct lttng_ust_lib_ring_buffer *buf;
/*
* We need to allocate for all possible cpus.
*/
shmobj = shm_object_table_append(handle->table, shmsize);
if (!shmobj)
goto end;
- align_shm(shmobj, __alignof__(struct lib_ring_buffer));
- set_shmp(chanb->buf[i].shmp, zalloc_shm(shmobj, sizeof(struct lib_ring_buffer)));
+ align_shm(shmobj, __alignof__(struct lttng_ust_lib_ring_buffer));
+ set_shmp(chanb->buf[i].shmp, zalloc_shm(shmobj, sizeof(struct lttng_ust_lib_ring_buffer)));
buf = shmp(handle, chanb->buf[i].shmp);
if (!buf)
goto end;
}
} else {
struct shm_object *shmobj;
- struct lib_ring_buffer *buf;
+ struct lttng_ust_lib_ring_buffer *buf;
shmobj = shm_object_table_append(handle->table, shmsize);
if (!shmobj)
goto end;
- align_shm(shmobj, __alignof__(struct lib_ring_buffer));
- set_shmp(chanb->buf[0].shmp, zalloc_shm(shmobj, sizeof(struct lib_ring_buffer)));
+ align_shm(shmobj, __alignof__(struct lttng_ust_lib_ring_buffer));
+ set_shmp(chanb->buf[0].shmp, zalloc_shm(shmobj, sizeof(struct lttng_ust_lib_ring_buffer)));
buf = shmp(handle, chanb->buf[0].shmp);
if (!buf)
goto end;
free_bufs:
if (config->alloc == RING_BUFFER_ALLOC_PER_CPU) {
for_each_possible_cpu(i) {
- struct lib_ring_buffer *buf = shmp(handle, chanb->buf[i].shmp);
+ struct lttng_ust_lib_ring_buffer *buf = shmp(handle, chanb->buf[i].shmp);
if (!buf->backend.allocated)
continue;
void channel_backend_free(struct channel_backend *chanb,
struct lttng_ust_shm_handle *handle)
{
- const struct lib_ring_buffer_config *config = &chanb->config;
+ const struct lttng_ust_lib_ring_buffer_config *config = &chanb->config;
unsigned int i;
if (config->alloc == RING_BUFFER_ALLOC_PER_CPU) {
for_each_possible_cpu(i) {
- struct lib_ring_buffer *buf = shmp(handle, chanb->buf[i].shmp);
+ struct lttng_ust_lib_ring_buffer *buf = shmp(handle, chanb->buf[i].shmp);
if (!buf->backend.allocated)
continue;
lib_ring_buffer_free(buf, handle);
}
} else {
- struct lib_ring_buffer *buf = shmp(handle, chanb->buf[0].shmp);
+ struct lttng_ust_lib_ring_buffer *buf = shmp(handle, chanb->buf[0].shmp);
CHAN_WARN_ON(chanb, !buf->backend.allocated);
lib_ring_buffer_free(buf, handle);
* Should be protected by get_subbuf/put_subbuf.
* Returns the length copied.
*/
-size_t lib_ring_buffer_read(struct lib_ring_buffer_backend *bufb, size_t offset,
+size_t lib_ring_buffer_read(struct lttng_ust_lib_ring_buffer_backend *bufb, size_t offset,
void *dest, size_t len, struct lttng_ust_shm_handle *handle)
{
struct channel_backend *chanb = &shmp(handle, bufb->chan)->backend;
- const struct lib_ring_buffer_config *config = &chanb->config;
+ const struct lttng_ust_lib_ring_buffer_config *config = &chanb->config;
ssize_t orig_len;
- struct lib_ring_buffer_backend_pages_shmp *rpages;
+ struct lttng_ust_lib_ring_buffer_backend_pages_shmp *rpages;
unsigned long sb_bindex, id;
orig_len = len;
* return string's length
* Should be protected by get_subbuf/put_subbuf.
*/
-int lib_ring_buffer_read_cstr(struct lib_ring_buffer_backend *bufb, size_t offset,
+int lib_ring_buffer_read_cstr(struct lttng_ust_lib_ring_buffer_backend *bufb, size_t offset,
void *dest, size_t len, struct lttng_ust_shm_handle *handle)
{
struct channel_backend *chanb = &shmp(handle, bufb->chan)->backend;
- const struct lib_ring_buffer_config *config = &chanb->config;
+ const struct lttng_ust_lib_ring_buffer_config *config = &chanb->config;
ssize_t string_len, orig_offset;
char *str;
- struct lib_ring_buffer_backend_pages_shmp *rpages;
+ struct lttng_ust_lib_ring_buffer_backend_pages_shmp *rpages;
unsigned long sb_bindex, id;
offset &= chanb->buf_size - 1;
* it's never on a page boundary, it's safe to write directly to this address,
* as long as the write is never bigger than a page size.
*/
-void *lib_ring_buffer_read_offset_address(struct lib_ring_buffer_backend *bufb,
+void *lib_ring_buffer_read_offset_address(struct lttng_ust_lib_ring_buffer_backend *bufb,
size_t offset,
struct lttng_ust_shm_handle *handle)
{
- struct lib_ring_buffer_backend_pages_shmp *rpages;
+ struct lttng_ust_lib_ring_buffer_backend_pages_shmp *rpages;
struct channel_backend *chanb = &shmp(handle, bufb->chan)->backend;
- const struct lib_ring_buffer_config *config = &chanb->config;
+ const struct lttng_ust_lib_ring_buffer_config *config = &chanb->config;
unsigned long sb_bindex, id;
offset &= chanb->buf_size - 1;
* it's always at the beginning of a page, it's safe to write directly to this
* address, as long as the write is never bigger than a page size.
*/
-void *lib_ring_buffer_offset_address(struct lib_ring_buffer_backend *bufb,
+void *lib_ring_buffer_offset_address(struct lttng_ust_lib_ring_buffer_backend *bufb,
size_t offset,
struct lttng_ust_shm_handle *handle)
{
size_t sbidx;
- struct lib_ring_buffer_backend_pages_shmp *rpages;
+ struct lttng_ust_lib_ring_buffer_backend_pages_shmp *rpages;
struct channel_backend *chanb = &shmp(handle, bufb->chan)->backend;
- const struct lib_ring_buffer_config *config = &chanb->config;
+ const struct lttng_ust_lib_ring_buffer_config *config = &chanb->config;
unsigned long sb_bindex, id;
offset &= chanb->buf_size - 1;
static
void lib_ring_buffer_print_errors(struct channel *chan,
- struct lib_ring_buffer *buf, int cpu,
+ struct lttng_ust_lib_ring_buffer *buf, int cpu,
struct lttng_ust_shm_handle *handle);
/*
* Must be called under cpu hotplug protection.
*/
-void lib_ring_buffer_free(struct lib_ring_buffer *buf,
+void lib_ring_buffer_free(struct lttng_ust_lib_ring_buffer *buf,
struct lttng_ust_shm_handle *handle)
{
struct channel *chan = shmp(handle, buf->backend.chan);
* should not be using the iterator concurrently with reset. The previous
* current iterator record is reset.
*/
-void lib_ring_buffer_reset(struct lib_ring_buffer *buf,
+void lib_ring_buffer_reset(struct lttng_ust_lib_ring_buffer *buf,
struct lttng_ust_shm_handle *handle)
{
struct channel *chan = shmp(handle, buf->backend.chan);
- const struct lib_ring_buffer_config *config = &chan->backend.config;
+ const struct lttng_ust_lib_ring_buffer_config *config = &chan->backend.config;
unsigned int i;
/*
/*
* Must be called under cpu hotplug protection.
*/
-int lib_ring_buffer_create(struct lib_ring_buffer *buf,
+int lib_ring_buffer_create(struct lttng_ust_lib_ring_buffer *buf,
struct channel_backend *chanb, int cpu,
struct lttng_ust_shm_handle *handle,
struct shm_object *shmobj)
{
- const struct lib_ring_buffer_config *config = &chanb->config;
+ const struct lttng_ust_lib_ring_buffer_config *config = &chanb->config;
struct channel *chan = caa_container_of(chanb, struct channel, backend);
void *priv = chanb->priv;
unsigned int num_subbuf;
#if 0
static void switch_buffer_timer(unsigned long data)
{
- struct lib_ring_buffer *buf = (struct lib_ring_buffer *)data;
+ struct lttng_ust_lib_ring_buffer *buf = (struct lttng_ust_lib_ring_buffer *)data;
struct channel *chan = shmp(handle, buf->backend.chan);
- const struct lib_ring_buffer_config *config = &chan->backend.config;
+ const struct lttng_ust_lib_ring_buffer_config *config = &chan->backend.config;
/*
* Only flush buffers periodically if readers are active.
}
#endif //0
-static void lib_ring_buffer_start_switch_timer(struct lib_ring_buffer *buf,
+static void lib_ring_buffer_start_switch_timer(struct lttng_ust_lib_ring_buffer *buf,
struct lttng_ust_shm_handle *handle)
{
struct channel *chan = shmp(handle, buf->backend.chan);
- //const struct lib_ring_buffer_config *config = &chan->backend.config;
+ //const struct lttng_ust_lib_ring_buffer_config *config = &chan->backend.config;
if (!chan->switch_timer_interval || buf->switch_timer_enabled)
return;
buf->switch_timer_enabled = 1;
}
-static void lib_ring_buffer_stop_switch_timer(struct lib_ring_buffer *buf,
+static void lib_ring_buffer_stop_switch_timer(struct lttng_ust_lib_ring_buffer *buf,
struct lttng_ust_shm_handle *handle)
{
struct channel *chan = shmp(handle, buf->backend.chan);
*/
static void read_buffer_timer(unsigned long data)
{
- struct lib_ring_buffer *buf = (struct lib_ring_buffer *)data;
+ struct lttng_ust_lib_ring_buffer *buf = (struct lttng_ust_lib_ring_buffer *)data;
struct channel *chan = shmp(handle, buf->backend.chan);
- const struct lib_ring_buffer_config *config = &chan->backend.config;
+ const struct lttng_ust_lib_ring_buffer_config *config = &chan->backend.config;
CHAN_WARN_ON(chan, !buf->backend.allocated);
}
#endif //0
-static void lib_ring_buffer_start_read_timer(struct lib_ring_buffer *buf,
+static void lib_ring_buffer_start_read_timer(struct lttng_ust_lib_ring_buffer *buf,
struct lttng_ust_shm_handle *handle)
{
struct channel *chan = shmp(handle, buf->backend.chan);
- const struct lib_ring_buffer_config *config = &chan->backend.config;
+ const struct lttng_ust_lib_ring_buffer_config *config = &chan->backend.config;
if (config->wakeup != RING_BUFFER_WAKEUP_BY_TIMER
|| !chan->read_timer_interval
buf->read_timer_enabled = 1;
}
-static void lib_ring_buffer_stop_read_timer(struct lib_ring_buffer *buf,
+static void lib_ring_buffer_stop_read_timer(struct lttng_ust_lib_ring_buffer *buf,
struct lttng_ust_shm_handle *handle)
{
struct channel *chan = shmp(handle, buf->backend.chan);
- const struct lib_ring_buffer_config *config = &chan->backend.config;
+ const struct lttng_ust_lib_ring_buffer_config *config = &chan->backend.config;
if (config->wakeup != RING_BUFFER_WAKEUP_BY_TIMER
|| !chan->read_timer_interval
static void channel_unregister_notifiers(struct channel *chan,
struct lttng_ust_shm_handle *handle)
{
- const struct lib_ring_buffer_config *config = &chan->backend.config;
+ const struct lttng_ust_lib_ring_buffer_config *config = &chan->backend.config;
int cpu;
if (config->alloc == RING_BUFFER_ALLOC_PER_CPU) {
for_each_possible_cpu(cpu) {
- struct lib_ring_buffer *buf = shmp(handle, chan->backend.buf[cpu].shmp);
+ struct lttng_ust_lib_ring_buffer *buf = shmp(handle, chan->backend.buf[cpu].shmp);
lib_ring_buffer_stop_switch_timer(buf, handle);
lib_ring_buffer_stop_read_timer(buf, handle);
}
} else {
- struct lib_ring_buffer *buf = shmp(handle, chan->backend.buf[0].shmp);
+ struct lttng_ust_lib_ring_buffer *buf = shmp(handle, chan->backend.buf[0].shmp);
lib_ring_buffer_stop_switch_timer(buf, handle);
lib_ring_buffer_stop_read_timer(buf, handle);
* Holds cpu hotplug.
* Returns NULL on failure.
*/
-struct lttng_ust_shm_handle *channel_create(const struct lib_ring_buffer_config *config,
+struct lttng_ust_shm_handle *channel_create(const struct lttng_ust_lib_ring_buffer_config *config,
const char *name, void *priv, void *buf_addr,
size_t subbuf_size,
size_t num_subbuf, unsigned int switch_timer_interval,
/* Calculate the shm allocation layout */
shmsize = sizeof(struct channel);
if (config->alloc == RING_BUFFER_ALLOC_PER_CPU)
- shmsize += sizeof(struct lib_ring_buffer_shmp) * num_possible_cpus();
+ shmsize += sizeof(struct lttng_ust_lib_ring_buffer_shmp) * num_possible_cpus();
else
- shmsize += sizeof(struct lib_ring_buffer_shmp);
+ shmsize += sizeof(struct lttng_ust_lib_ring_buffer_shmp);
shmobj = shm_object_table_append(handle->table, shmsize);
if (!shmobj)
* In that off case, we need to allocate for all possible cpus.
*/
for_each_possible_cpu(cpu) {
- struct lib_ring_buffer *buf = shmp(handle, chan->backend.buf[cpu].shmp);
+ struct lttng_ust_lib_ring_buffer *buf = shmp(handle, chan->backend.buf[cpu].shmp);
lib_ring_buffer_start_switch_timer(buf, handle);
lib_ring_buffer_start_read_timer(buf, handle);
}
} else {
- struct lib_ring_buffer *buf = shmp(handle, chan->backend.buf[0].shmp);
+ struct lttng_ust_lib_ring_buffer *buf = shmp(handle, chan->backend.buf[0].shmp);
lib_ring_buffer_start_switch_timer(buf, handle);
lib_ring_buffer_start_read_timer(buf, handle);
void *channel_destroy(struct channel *chan, struct lttng_ust_shm_handle *handle,
int shadow)
{
- const struct lib_ring_buffer_config *config = &chan->backend.config;
+ const struct lttng_ust_lib_ring_buffer_config *config = &chan->backend.config;
void *priv;
int cpu;
if (config->alloc == RING_BUFFER_ALLOC_PER_CPU) {
for_each_channel_cpu(cpu, chan) {
- struct lib_ring_buffer *buf = shmp(handle, chan->backend.buf[cpu].shmp);
+ struct lttng_ust_lib_ring_buffer *buf = shmp(handle, chan->backend.buf[cpu].shmp);
if (config->cb.buffer_finalize)
config->cb.buffer_finalize(buf,
//wake_up_interruptible(&buf->read_wait);
}
} else {
- struct lib_ring_buffer *buf = shmp(handle, chan->backend.buf[0].shmp);
+ struct lttng_ust_lib_ring_buffer *buf = shmp(handle, chan->backend.buf[0].shmp);
if (config->cb.buffer_finalize)
config->cb.buffer_finalize(buf, chan->backend.priv, -1, handle);
return priv;
}
-struct lib_ring_buffer *channel_get_ring_buffer(
- const struct lib_ring_buffer_config *config,
+struct lttng_ust_lib_ring_buffer *channel_get_ring_buffer(
+ const struct lttng_ust_lib_ring_buffer_config *config,
struct channel *chan, int cpu,
struct lttng_ust_shm_handle *handle,
int *shm_fd, int *wait_fd,
}
}
-int lib_ring_buffer_open_read(struct lib_ring_buffer *buf,
+int lib_ring_buffer_open_read(struct lttng_ust_lib_ring_buffer *buf,
struct lttng_ust_shm_handle *handle,
int shadow)
{
return 0;
}
-void lib_ring_buffer_release_read(struct lib_ring_buffer *buf,
+void lib_ring_buffer_release_read(struct lttng_ust_lib_ring_buffer *buf,
struct lttng_ust_shm_handle *handle,
int shadow)
{
* data to read at consumed position, or 0 if the get operation succeeds.
*/
-int lib_ring_buffer_snapshot(struct lib_ring_buffer *buf,
+int lib_ring_buffer_snapshot(struct lttng_ust_lib_ring_buffer *buf,
unsigned long *consumed, unsigned long *produced,
struct lttng_ust_shm_handle *handle)
{
struct channel *chan = shmp(handle, buf->backend.chan);
- const struct lib_ring_buffer_config *config = &chan->backend.config;
+ const struct lttng_ust_lib_ring_buffer_config *config = &chan->backend.config;
unsigned long consumed_cur, write_offset;
int finalized;
* @buf: ring buffer
* @consumed_new: new consumed count value
*/
-void lib_ring_buffer_move_consumer(struct lib_ring_buffer *buf,
+void lib_ring_buffer_move_consumer(struct lttng_ust_lib_ring_buffer *buf,
unsigned long consumed_new,
struct lttng_ust_shm_handle *handle)
{
- struct lib_ring_buffer_backend *bufb = &buf->backend;
+ struct lttng_ust_lib_ring_buffer_backend *bufb = &buf->backend;
struct channel *chan = shmp(handle, bufb->chan);
unsigned long consumed;
* Returns -ENODATA if buffer is finalized, -EAGAIN if there is currently no
* data to read at consumed position, or 0 if the get operation succeeds.
*/
-int lib_ring_buffer_get_subbuf(struct lib_ring_buffer *buf,
+int lib_ring_buffer_get_subbuf(struct lttng_ust_lib_ring_buffer *buf,
unsigned long consumed,
struct lttng_ust_shm_handle *handle)
{
struct channel *chan = shmp(handle, buf->backend.chan);
- const struct lib_ring_buffer_config *config = &chan->backend.config;
+ const struct lttng_ust_lib_ring_buffer_config *config = &chan->backend.config;
unsigned long consumed_cur, consumed_idx, commit_count, write_offset;
int ret;
int finalized;
* lib_ring_buffer_put_subbuf - release exclusive subbuffer access
* @buf: ring buffer
*/
-void lib_ring_buffer_put_subbuf(struct lib_ring_buffer *buf,
+void lib_ring_buffer_put_subbuf(struct lttng_ust_lib_ring_buffer *buf,
struct lttng_ust_shm_handle *handle)
{
- struct lib_ring_buffer_backend *bufb = &buf->backend;
+ struct lttng_ust_lib_ring_buffer_backend *bufb = &buf->backend;
struct channel *chan = shmp(handle, bufb->chan);
- const struct lib_ring_buffer_config *config = &chan->backend.config;
+ const struct lttng_ust_lib_ring_buffer_config *config = &chan->backend.config;
unsigned long read_sb_bindex, consumed_idx, consumed;
CHAN_WARN_ON(chan, uatomic_read(&buf->active_readers) != 1
* position and the writer position. (inclusive)
*/
static
-void lib_ring_buffer_print_subbuffer_errors(struct lib_ring_buffer *buf,
+void lib_ring_buffer_print_subbuffer_errors(struct lttng_ust_lib_ring_buffer *buf,
struct channel *chan,
unsigned long cons_offset,
int cpu,
struct lttng_ust_shm_handle *handle)
{
- const struct lib_ring_buffer_config *config = &chan->backend.config;
+ const struct lttng_ust_lib_ring_buffer_config *config = &chan->backend.config;
unsigned long cons_idx, commit_count, commit_count_sb;
cons_idx = subbuf_index(cons_offset, chan);
}
static
-void lib_ring_buffer_print_buffer_errors(struct lib_ring_buffer *buf,
+void lib_ring_buffer_print_buffer_errors(struct lttng_ust_lib_ring_buffer *buf,
struct channel *chan,
void *priv, int cpu,
struct lttng_ust_shm_handle *handle)
{
- const struct lib_ring_buffer_config *config = &chan->backend.config;
+ const struct lttng_ust_lib_ring_buffer_config *config = &chan->backend.config;
unsigned long write_offset, cons_offset;
/*
static
void lib_ring_buffer_print_errors(struct channel *chan,
- struct lib_ring_buffer *buf, int cpu,
+ struct lttng_ust_lib_ring_buffer *buf, int cpu,
struct lttng_ust_shm_handle *handle)
{
- const struct lib_ring_buffer_config *config = &chan->backend.config;
+ const struct lttng_ust_lib_ring_buffer_config *config = &chan->backend.config;
void *priv = chan->backend.priv;
ERRMSG("ring buffer %s, cpu %d: %lu records written, "
* Only executed when the buffer is finalized, in SWITCH_FLUSH.
*/
static
-void lib_ring_buffer_switch_old_start(struct lib_ring_buffer *buf,
+void lib_ring_buffer_switch_old_start(struct lttng_ust_lib_ring_buffer *buf,
struct channel *chan,
struct switch_offsets *offsets,
u64 tsc,
struct lttng_ust_shm_handle *handle)
{
- const struct lib_ring_buffer_config *config = &chan->backend.config;
+ const struct lttng_ust_lib_ring_buffer_config *config = &chan->backend.config;
unsigned long oldidx = subbuf_index(offsets->old, chan);
unsigned long commit_count;
* subbuffer.
*/
static
-void lib_ring_buffer_switch_old_end(struct lib_ring_buffer *buf,
+void lib_ring_buffer_switch_old_end(struct lttng_ust_lib_ring_buffer *buf,
struct channel *chan,
struct switch_offsets *offsets,
u64 tsc,
struct lttng_ust_shm_handle *handle)
{
- const struct lib_ring_buffer_config *config = &chan->backend.config;
+ const struct lttng_ust_lib_ring_buffer_config *config = &chan->backend.config;
unsigned long oldidx = subbuf_index(offsets->old - 1, chan);
unsigned long commit_count, padding_size, data_size;
* that this code is executed before the deliver of this sub-buffer.
*/
static
-void lib_ring_buffer_switch_new_start(struct lib_ring_buffer *buf,
+void lib_ring_buffer_switch_new_start(struct lttng_ust_lib_ring_buffer *buf,
struct channel *chan,
struct switch_offsets *offsets,
u64 tsc,
struct lttng_ust_shm_handle *handle)
{
- const struct lib_ring_buffer_config *config = &chan->backend.config;
+ const struct lttng_ust_lib_ring_buffer_config *config = &chan->backend.config;
unsigned long beginidx = subbuf_index(offsets->begin, chan);
unsigned long commit_count;
* have to do the deliver themselves.
*/
static
-void lib_ring_buffer_switch_new_end(struct lib_ring_buffer *buf,
+void lib_ring_buffer_switch_new_end(struct lttng_ust_lib_ring_buffer *buf,
struct channel *chan,
struct switch_offsets *offsets,
u64 tsc,
struct lttng_ust_shm_handle *handle)
{
- const struct lib_ring_buffer_config *config = &chan->backend.config;
+ const struct lttng_ust_lib_ring_buffer_config *config = &chan->backend.config;
unsigned long endidx = subbuf_index(offsets->end - 1, chan);
unsigned long commit_count, padding_size, data_size;
*/
static
int lib_ring_buffer_try_switch_slow(enum switch_mode mode,
- struct lib_ring_buffer *buf,
+ struct lttng_ust_lib_ring_buffer *buf,
struct channel *chan,
struct switch_offsets *offsets,
u64 *tsc)
{
- const struct lib_ring_buffer_config *config = &chan->backend.config;
+ const struct lttng_ust_lib_ring_buffer_config *config = &chan->backend.config;
unsigned long off;
offsets->begin = v_read(config, &buf->offset);
* operations, this function must be called from the CPU which owns the buffer
* for a ACTIVE flush.
*/
-void lib_ring_buffer_switch_slow(struct lib_ring_buffer *buf, enum switch_mode mode,
+void lib_ring_buffer_switch_slow(struct lttng_ust_lib_ring_buffer *buf, enum switch_mode mode,
struct lttng_ust_shm_handle *handle)
{
struct channel *chan = shmp(handle, buf->backend.chan);
- const struct lib_ring_buffer_config *config = &chan->backend.config;
+ const struct lttng_ust_lib_ring_buffer_config *config = &chan->backend.config;
struct switch_offsets offsets;
unsigned long oldidx;
u64 tsc;
* -EIO if data cannot be written into the buffer for any other reason.
*/
static
-int lib_ring_buffer_try_reserve_slow(struct lib_ring_buffer *buf,
+int lib_ring_buffer_try_reserve_slow(struct lttng_ust_lib_ring_buffer *buf,
struct channel *chan,
struct switch_offsets *offsets,
- struct lib_ring_buffer_ctx *ctx)
+ struct lttng_ust_lib_ring_buffer_ctx *ctx)
{
- const struct lib_ring_buffer_config *config = &chan->backend.config;
+ const struct lttng_ust_lib_ring_buffer_config *config = &chan->backend.config;
struct lttng_ust_shm_handle *handle = ctx->handle;
unsigned long reserve_commit_diff;
* -EIO for other errors, else returns 0.
* It will take care of sub-buffer switching.
*/
-int lib_ring_buffer_reserve_slow(struct lib_ring_buffer_ctx *ctx)
+int lib_ring_buffer_reserve_slow(struct lttng_ust_lib_ring_buffer_ctx *ctx)
{
struct channel *chan = ctx->chan;
struct lttng_ust_shm_handle *handle = ctx->handle;
- const struct lib_ring_buffer_config *config = &chan->backend.config;
- struct lib_ring_buffer *buf;
+ const struct lttng_ust_lib_ring_buffer_config *config = &chan->backend.config;
+ struct lttng_ust_lib_ring_buffer *buf;
struct switch_offsets offsets;
int ret;
int lttng_metadata_printf(struct ltt_session *session,
const char *fmt, ...)
{
- struct lib_ring_buffer_ctx ctx;
+ struct lttng_ust_lib_ring_buffer_ctx ctx;
struct ltt_channel *chan = session->metadata;
char *str = NULL;
int ret = 0, waitret;
}
static inline
-void ctx_record(struct lib_ring_buffer_ctx *bufctx,
+void ctx_record(struct lttng_ust_lib_ring_buffer_ctx *bufctx,
struct ltt_channel *chan,
struct lttng_ctx *ctx)
{
* contains.
*/
static __inline__
-unsigned char record_header_size(const struct lib_ring_buffer_config *config,
+unsigned char record_header_size(const struct lttng_ust_lib_ring_buffer_config *config,
struct channel *chan, size_t offset,
size_t *pre_header_padding,
- struct lib_ring_buffer_ctx *ctx)
+ struct lttng_ust_lib_ring_buffer_ctx *ctx)
{
struct ltt_channel *ltt_chan = channel_get_private(chan);
struct ltt_event *event = ctx->priv;
#include "../libringbuffer/api.h"
static
-void ltt_write_event_header_slow(const struct lib_ring_buffer_config *config,
- struct lib_ring_buffer_ctx *ctx,
+void ltt_write_event_header_slow(const struct lttng_ust_lib_ring_buffer_config *config,
+ struct lttng_ust_lib_ring_buffer_ctx *ctx,
uint32_t event_id);
/*
* @event_id: event ID
*/
static __inline__
-void ltt_write_event_header(const struct lib_ring_buffer_config *config,
- struct lib_ring_buffer_ctx *ctx,
+void ltt_write_event_header(const struct lttng_ust_lib_ring_buffer_config *config,
+ struct lttng_ust_lib_ring_buffer_ctx *ctx,
uint32_t event_id)
{
struct ltt_channel *ltt_chan = channel_get_private(ctx->chan);
}
static
-void ltt_write_event_header_slow(const struct lib_ring_buffer_config *config,
- struct lib_ring_buffer_ctx *ctx,
+void ltt_write_event_header_slow(const struct lttng_ust_lib_ring_buffer_config *config,
+ struct lttng_ust_lib_ring_buffer_ctx *ctx,
uint32_t event_id)
{
struct ltt_channel *ltt_chan = channel_get_private(ctx->chan);
lib_ring_buffer_align_ctx(ctx, ctx->largest_align);
}
-static const struct lib_ring_buffer_config client_config;
+static const struct lttng_ust_lib_ring_buffer_config client_config;
static u64 client_ring_buffer_clock_read(struct channel *chan)
{
}
static
-size_t client_record_header_size(const struct lib_ring_buffer_config *config,
+size_t client_record_header_size(const struct lttng_ust_lib_ring_buffer_config *config,
struct channel *chan, size_t offset,
size_t *pre_header_padding,
- struct lib_ring_buffer_ctx *ctx)
+ struct lttng_ust_lib_ring_buffer_ctx *ctx)
{
return record_header_size(config, chan, offset,
pre_header_padding, ctx);
return offsetof(struct packet_header, ctx.header_end);
}
-static void client_buffer_begin(struct lib_ring_buffer *buf, u64 tsc,
+static void client_buffer_begin(struct lttng_ust_lib_ring_buffer *buf, u64 tsc,
unsigned int subbuf_idx,
struct lttng_ust_shm_handle *handle)
{
* offset is assumed to never be 0 here : never deliver a completely empty
* subbuffer. data_size is between 1 and subbuf_size.
*/
-static void client_buffer_end(struct lib_ring_buffer *buf, u64 tsc,
+static void client_buffer_end(struct lttng_ust_lib_ring_buffer *buf, u64 tsc,
unsigned int subbuf_idx, unsigned long data_size,
struct lttng_ust_shm_handle *handle)
{
header->ctx.events_discarded = records_lost;
}
-static int client_buffer_create(struct lib_ring_buffer *buf, void *priv,
+static int client_buffer_create(struct lttng_ust_lib_ring_buffer *buf, void *priv,
int cpu, const char *name, struct lttng_ust_shm_handle *handle)
{
return 0;
}
-static void client_buffer_finalize(struct lib_ring_buffer *buf, void *priv, int cpu, struct lttng_ust_shm_handle *handle)
+static void client_buffer_finalize(struct lttng_ust_lib_ring_buffer *buf, void *priv, int cpu, struct lttng_ust_shm_handle *handle)
{
}
-static const struct lib_ring_buffer_config client_config = {
+static const struct lttng_ust_lib_ring_buffer_config client_config = {
.cb.ring_buffer_clock_read = client_ring_buffer_clock_read,
.cb.record_header_size = client_record_header_size,
.cb.subbuffer_header_size = client_packet_header_size,
}
static
-struct lib_ring_buffer *ltt_buffer_read_open(struct channel *chan,
+struct lttng_ust_lib_ring_buffer *ltt_buffer_read_open(struct channel *chan,
struct lttng_ust_shm_handle *handle,
int *shm_fd, int *wait_fd,
uint64_t *memory_map_size)
{
- struct lib_ring_buffer *buf;
+ struct lttng_ust_lib_ring_buffer *buf;
int cpu;
for_each_channel_cpu(cpu, chan) {
}
static
-void ltt_buffer_read_close(struct lib_ring_buffer *buf,
+void ltt_buffer_read_close(struct lttng_ust_lib_ring_buffer *buf,
struct lttng_ust_shm_handle *handle)
{
lib_ring_buffer_release_read(buf, handle, 0);
}
static
-int ltt_event_reserve(struct lib_ring_buffer_ctx *ctx,
+int ltt_event_reserve(struct lttng_ust_lib_ring_buffer_ctx *ctx,
uint32_t event_id)
{
struct ltt_channel *ltt_chan = channel_get_private(ctx->chan);
}
static
-void ltt_event_commit(struct lib_ring_buffer_ctx *ctx)
+void ltt_event_commit(struct lttng_ust_lib_ring_buffer_ctx *ctx)
{
lib_ring_buffer_commit(&client_config, ctx);
lib_ring_buffer_put_cpu(&client_config);
}
static
-void ltt_event_write(struct lib_ring_buffer_ctx *ctx, const void *src,
+void ltt_event_write(struct lttng_ust_lib_ring_buffer_ctx *ctx, const void *src,
size_t len)
{
lib_ring_buffer_write(&client_config, ctx, src, len);
static
int ltt_flush_buffer(struct channel *chan, struct lttng_ust_shm_handle *handle)
{
- struct lib_ring_buffer *buf;
+ struct lttng_ust_lib_ring_buffer *buf;
int cpu;
for_each_channel_cpu(cpu, chan) {
uint8_t header_end[0]; /* End of header */
};
-static const struct lib_ring_buffer_config client_config;
+static const struct lttng_ust_lib_ring_buffer_config client_config;
static inline
u64 lib_ring_buffer_clock_read(struct channel *chan)
}
static inline
-unsigned char record_header_size(const struct lib_ring_buffer_config *config,
+unsigned char record_header_size(const struct lttng_ust_lib_ring_buffer_config *config,
struct channel *chan, size_t offset,
size_t *pre_header_padding,
- struct lib_ring_buffer_ctx *ctx)
+ struct lttng_ust_lib_ring_buffer_ctx *ctx)
{
return 0;
}
}
static
-size_t client_record_header_size(const struct lib_ring_buffer_config *config,
+size_t client_record_header_size(const struct lttng_ust_lib_ring_buffer_config *config,
struct channel *chan, size_t offset,
size_t *pre_header_padding,
- struct lib_ring_buffer_ctx *ctx)
+ struct lttng_ust_lib_ring_buffer_ctx *ctx)
{
return 0;
}
return offsetof(struct metadata_packet_header, header_end);
}
-static void client_buffer_begin(struct lib_ring_buffer *buf, u64 tsc,
+static void client_buffer_begin(struct lttng_ust_lib_ring_buffer *buf, u64 tsc,
unsigned int subbuf_idx,
struct lttng_ust_shm_handle *handle)
{
* offset is assumed to never be 0 here : never deliver a completely empty
* subbuffer. data_size is between 1 and subbuf_size.
*/
-static void client_buffer_end(struct lib_ring_buffer *buf, u64 tsc,
+static void client_buffer_end(struct lttng_ust_lib_ring_buffer *buf, u64 tsc,
unsigned int subbuf_idx, unsigned long data_size,
struct lttng_ust_shm_handle *handle)
{
WARN_ON_ONCE(records_lost != 0);
}
-static int client_buffer_create(struct lib_ring_buffer *buf, void *priv,
+static int client_buffer_create(struct lttng_ust_lib_ring_buffer *buf, void *priv,
int cpu, const char *name,
struct lttng_ust_shm_handle *handle)
{
return 0;
}
-static void client_buffer_finalize(struct lib_ring_buffer *buf,
+static void client_buffer_finalize(struct lttng_ust_lib_ring_buffer *buf,
void *priv, int cpu,
struct lttng_ust_shm_handle *handle)
{
}
-static const struct lib_ring_buffer_config client_config = {
+static const struct lttng_ust_lib_ring_buffer_config client_config = {
.cb.ring_buffer_clock_read = client_ring_buffer_clock_read,
.cb.record_header_size = client_record_header_size,
.cb.subbuffer_header_size = client_packet_header_size,
}
static
-struct lib_ring_buffer *ltt_buffer_read_open(struct channel *chan,
+struct lttng_ust_lib_ring_buffer *ltt_buffer_read_open(struct channel *chan,
struct lttng_ust_shm_handle *handle,
int *shm_fd, int *wait_fd,
uint64_t *memory_map_size)
{
- struct lib_ring_buffer *buf;
+ struct lttng_ust_lib_ring_buffer *buf;
buf = channel_get_ring_buffer(&client_config, chan,
0, handle, shm_fd, wait_fd, memory_map_size);
}
static
-void ltt_buffer_read_close(struct lib_ring_buffer *buf,
+void ltt_buffer_read_close(struct lttng_ust_lib_ring_buffer *buf,
struct lttng_ust_shm_handle *handle)
{
lib_ring_buffer_release_read(buf, handle, 0);
}
static
-int ltt_event_reserve(struct lib_ring_buffer_ctx *ctx, uint32_t event_id)
+int ltt_event_reserve(struct lttng_ust_lib_ring_buffer_ctx *ctx, uint32_t event_id)
{
return lib_ring_buffer_reserve(&client_config, ctx);
}
static
-void ltt_event_commit(struct lib_ring_buffer_ctx *ctx)
+void ltt_event_commit(struct lttng_ust_lib_ring_buffer_ctx *ctx)
{
lib_ring_buffer_commit(&client_config, ctx);
}
static
-void ltt_event_write(struct lib_ring_buffer_ctx *ctx, const void *src,
+void ltt_event_write(struct lttng_ust_lib_ring_buffer_ctx *ctx, const void *src,
size_t len)
{
lib_ring_buffer_write(&client_config, ctx, src, len);
{
unsigned long o_begin;
- struct lib_ring_buffer *buf;
+ struct lttng_ust_lib_ring_buffer *buf;
buf = shmp(handle, chan->backend.buf[0].shmp); /* Only for global buffer ! */
o_begin = v_read(&client_config, &buf->offset);
static
int ltt_flush_buffer(struct channel *chan, struct lttng_ust_shm_handle *handle)
{
- struct lib_ring_buffer *buf;
+ struct lttng_ust_lib_ring_buffer *buf;
int shm_fd, wait_fd;
uint64_t memory_map_size;
static
void procname_record(struct lttng_ctx_field *field,
- struct lib_ring_buffer_ctx *ctx,
+ struct lttng_ust_lib_ring_buffer_ctx *ctx,
struct ltt_channel *chan)
{
char *procname;
static
void pthread_id_record(struct lttng_ctx_field *field,
- struct lib_ring_buffer_ctx *ctx,
+ struct lttng_ust_lib_ring_buffer_ctx *ctx,
struct ltt_channel *chan)
{
unsigned long pthread_id;
static
void vpid_record(struct lttng_ctx_field *field,
- struct lib_ring_buffer_ctx *ctx,
+ struct lttng_ust_lib_ring_buffer_ctx *ctx,
struct ltt_channel *chan)
{
pid_t pid;
static
void vtid_record(struct lttng_ctx_field *field,
- struct lib_ring_buffer_ctx *ctx,
+ struct lttng_ust_lib_ring_buffer_ctx *ctx,
struct ltt_channel *chan)
{
if (unlikely(!cached_vtid))
};
struct stream_priv_data {
- struct lib_ring_buffer *buf;
+ struct lttng_ust_lib_ring_buffer *buf;
struct ltt_channel *ltt_chan;
};
int lttng_abi_open_stream(int channel_objd, struct lttng_ust_stream *info)
{
struct ltt_channel *channel = objd_private(channel_objd);
- struct lib_ring_buffer *buf;
+ struct lttng_ust_lib_ring_buffer *buf;
struct stream_priv_data *priv;
int stream_objd, ret;
int lttng_rb_release(int objd)
{
struct stream_priv_data *priv = objd_private(objd);
- struct lib_ring_buffer *buf;
+ struct lttng_ust_lib_ring_buffer *buf;
struct ltt_channel *channel;
if (priv) {
channel_destroy(chan, handle, 1);
}
-struct lib_ring_buffer *ustctl_open_stream_read(struct lttng_ust_shm_handle *handle,
+struct lttng_ust_lib_ring_buffer *ustctl_open_stream_read(struct lttng_ust_shm_handle *handle,
int cpu)
{
struct channel *chan = handle->shadow_chan;
int shm_fd, wait_fd;
uint64_t memory_map_size;
- struct lib_ring_buffer *buf;
+ struct lttng_ust_lib_ring_buffer *buf;
int ret;
buf = channel_get_ring_buffer(&chan->backend.config,
}
void ustctl_close_stream_read(struct lttng_ust_shm_handle *handle,
- struct lib_ring_buffer *buf)
+ struct lttng_ust_lib_ring_buffer *buf)
{
lib_ring_buffer_release_read(buf, handle, 1);
}
/* For mmap mode, readable without "get" operation */
void *ustctl_get_mmap_base(struct lttng_ust_shm_handle *handle,
- struct lib_ring_buffer *buf)
+ struct lttng_ust_lib_ring_buffer *buf)
{
return shmp(handle, buf->backend.memory_map);
}
/* returns the length to mmap. */
int ustctl_get_mmap_len(struct lttng_ust_shm_handle *handle,
- struct lib_ring_buffer *buf,
+ struct lttng_ust_lib_ring_buffer *buf,
unsigned long *len)
{
unsigned long mmap_buf_len;
/* returns the maximum size for sub-buffers. */
int ustctl_get_max_subbuf_size(struct lttng_ust_shm_handle *handle,
- struct lib_ring_buffer *buf,
+ struct lttng_ust_lib_ring_buffer *buf,
unsigned long *len)
{
struct channel *chan = handle->shadow_chan;
/* returns the offset of the subbuffer belonging to the mmap reader. */
int ustctl_get_mmap_read_offset(struct lttng_ust_shm_handle *handle,
- struct lib_ring_buffer *buf, unsigned long *off)
+ struct lttng_ust_lib_ring_buffer *buf, unsigned long *off)
{
struct channel *chan = handle->shadow_chan;
unsigned long sb_bindex;
/* returns the size of the current sub-buffer, without padding (for mmap). */
int ustctl_get_subbuf_size(struct lttng_ust_shm_handle *handle,
- struct lib_ring_buffer *buf, unsigned long *len)
+ struct lttng_ust_lib_ring_buffer *buf, unsigned long *len)
{
struct channel *chan = handle->shadow_chan;
/* returns the size of the current sub-buffer, without padding (for mmap). */
int ustctl_get_padded_subbuf_size(struct lttng_ust_shm_handle *handle,
- struct lib_ring_buffer *buf, unsigned long *len)
+ struct lttng_ust_lib_ring_buffer *buf, unsigned long *len)
{
struct channel *chan = handle->shadow_chan;
/* Get exclusive read access to the next sub-buffer that can be read. */
int ustctl_get_next_subbuf(struct lttng_ust_shm_handle *handle,
- struct lib_ring_buffer *buf)
+ struct lttng_ust_lib_ring_buffer *buf)
{
return lib_ring_buffer_get_next_subbuf(buf, handle);
}
/* Release exclusive sub-buffer access, move consumer forward. */
int ustctl_put_next_subbuf(struct lttng_ust_shm_handle *handle,
- struct lib_ring_buffer *buf)
+ struct lttng_ust_lib_ring_buffer *buf)
{
lib_ring_buffer_put_next_subbuf(buf, handle);
return 0;
/* Get a snapshot of the current ring buffer producer and consumer positions */
int ustctl_snapshot(struct lttng_ust_shm_handle *handle,
- struct lib_ring_buffer *buf)
+ struct lttng_ust_lib_ring_buffer *buf)
{
return lib_ring_buffer_snapshot(buf, &buf->cons_snapshot,
&buf->prod_snapshot, handle);
/* Get the consumer position (iteration start) */
int ustctl_snapshot_get_consumed(struct lttng_ust_shm_handle *handle,
- struct lib_ring_buffer *buf, unsigned long *pos)
+ struct lttng_ust_lib_ring_buffer *buf, unsigned long *pos)
{
*pos = buf->cons_snapshot;
return 0;
/* Get the producer position (iteration end) */
int ustctl_snapshot_get_produced(struct lttng_ust_shm_handle *handle,
- struct lib_ring_buffer *buf, unsigned long *pos)
+ struct lttng_ust_lib_ring_buffer *buf, unsigned long *pos)
{
*pos = buf->prod_snapshot;
return 0;
/* Get exclusive read access to the specified sub-buffer position */
int ustctl_get_subbuf(struct lttng_ust_shm_handle *handle,
- struct lib_ring_buffer *buf, unsigned long *pos)
+ struct lttng_ust_lib_ring_buffer *buf, unsigned long *pos)
{
return lib_ring_buffer_get_subbuf(buf, *pos, handle);
}
/* Release exclusive sub-buffer access */
int ustctl_put_subbuf(struct lttng_ust_shm_handle *handle,
- struct lib_ring_buffer *buf)
+ struct lttng_ust_lib_ring_buffer *buf)
{
lib_ring_buffer_put_subbuf(buf, handle);
return 0;
}
int ustctl_buffer_flush(struct lttng_ust_shm_handle *handle,
- struct lib_ring_buffer *buf)
+ struct lttng_ust_lib_ring_buffer *buf)
{
lib_ring_buffer_switch_slow(buf, SWITCH_ACTIVE, handle);
return 0;
int consume_stream(struct lttng_ust_shm_handle *handle, int cpu, char *outfile)
{
struct channel *chan;
- struct lib_ring_buffer *buf;
+ struct lttng_ust_lib_ring_buffer *buf;
int outfd, ret;
int shm_fd, wait_fd;
uint64_t memory_map_size;
int consume_stream(struct lttng_ust_shm_handle *handle, int cpu, char *outfile)
{
struct channel *chan;
- struct lib_ring_buffer *buf;
+ struct lttng_ust_lib_ring_buffer *buf;
int outfd, ret;
int shm_fd, wait_fd;
uint64_t memory_map_size;