#include <lttng/ust-utils.h>
#include <lttng/ust-compiler.h>
-struct lttng_ust_lib_ring_buffer;
-struct lttng_ust_lib_ring_buffer_channel;
-struct lttng_ust_lib_ring_buffer_ctx;
-struct lttng_ust_lib_ring_buffer_ctx_private;
+struct lttng_ust_ring_buffer;
+struct lttng_ust_ring_buffer_channel;
+struct lttng_ust_ring_buffer_ctx;
+struct lttng_ust_ring_buffer_ctx_private;
/*
* ring buffer context
* structure. It should be queried before using additional fields added
* at the end of the structure.
*/
-struct lttng_ust_lib_ring_buffer_ctx {
+struct lttng_ust_ring_buffer_ctx {
uint32_t struct_size; /* Size of this structure. */
void *client_priv; /* Ring buffer client private data */
void *ip; /* caller ip address */
/* Private ring buffer context, set by reserve callback. */
- struct lttng_ust_lib_ring_buffer_ctx_private *priv;
+ struct lttng_ust_ring_buffer_ctx_private *priv;
/* End of base ABI. Fields below should be used after checking struct_size. */
};
/**
- * lttng_ust_lib_ring_buffer_ctx_init - initialize ring buffer context
+ * lttng_ust_ring_buffer_ctx_init - initialize ring buffer context
* @ctx: ring buffer context to initialize
* @client_priv: client private data
* @data_size: size of record data payload
* @ip: caller ip address
*/
static inline
-void lttng_ust_lib_ring_buffer_ctx_init(struct lttng_ust_lib_ring_buffer_ctx *ctx,
+void lttng_ust_ring_buffer_ctx_init(struct lttng_ust_ring_buffer_ctx *ctx,
void *client_priv, size_t data_size, int largest_align,
void *ip)
lttng_ust_notrace;
static inline
-void lttng_ust_lib_ring_buffer_ctx_init(struct lttng_ust_lib_ring_buffer_ctx *ctx,
+void lttng_ust_ring_buffer_ctx_init(struct lttng_ust_ring_buffer_ctx *ctx,
void *client_priv, size_t data_size, int largest_align,
void *ip)
{
- ctx->struct_size = sizeof(struct lttng_ust_lib_ring_buffer_ctx);
+ ctx->struct_size = sizeof(struct lttng_ust_ring_buffer_ctx);
ctx->client_priv = client_priv;
ctx->data_size = data_size;
ctx->largest_align = largest_align;
# define LTTNG_UST_RING_BUFFER_ALIGN_ATTR /* Default arch alignment */
/*
- * lttng_ust_lib_ring_buffer_align - Calculate the offset needed to align the type.
+ * lttng_ust_ring_buffer_align - Calculate the offset needed to align the type.
* @align_drift: object offset from an "alignment"-aligned address.
* @size_of_type: Must be non-zero, power of 2.
*/
static inline
-unsigned int lttng_ust_lib_ring_buffer_align(size_t align_drift, size_t size_of_type)
+unsigned int lttng_ust_ring_buffer_align(size_t align_drift, size_t size_of_type)
lttng_ust_notrace;
static inline
-unsigned int lttng_ust_lib_ring_buffer_align(size_t align_drift, size_t size_of_type)
+unsigned int lttng_ust_ring_buffer_align(size_t align_drift, size_t size_of_type)
{
return lttng_ust_offset_align(align_drift, size_of_type);
}
# define LTTNG_UST_RING_BUFFER_ALIGN_ATTR __attribute__((packed))
/*
- * lttng_ust_lib_ring_buffer_align - Calculate the offset needed to align the type.
+ * lttng_ust_ring_buffer_align - Calculate the offset needed to align the type.
* @align_drift: object offset from an "alignment"-aligned address.
* @size_of_type: Must be non-zero, power of 2.
*/
static inline
-unsigned int lttng_ust_lib_ring_buffer_align(size_t align_drift, size_t size_of_type)
+unsigned int lttng_ust_ring_buffer_align(size_t align_drift, size_t size_of_type)
lttng_ust_notrace;
static inline
-unsigned int lttng_ust_lib_ring_buffer_align(size_t align_drift __attribute__((unused)),
+unsigned int lttng_ust_ring_buffer_align(size_t align_drift __attribute__((unused)),
size_t size_of_type __attribute__((unused)))
{
/*
"lttng-ust-wait-" \
lttng_ust_stringify(LTTNG_UST_ABI_MAJOR_VERSION)
-struct lttng_ust_lib_ring_buffer;
-
struct ustctl_consumer_channel_attr {
enum lttng_ust_abi_chan_type type;
uint64_t subbuf_size; /* bytes */
struct lttng_ust_channel_buffer;
struct lttng_ust_session;
-struct lttng_ust_lib_ring_buffer_ctx;
+struct lttng_ust_ring_buffer_ctx;
struct lttng_ust_event_field;
struct lttng_ust_registered_probe;
/* End of base ABI. Fields below should be used after checking struct_size. */
};
-struct lttng_ust_lib_ring_buffer_channel;
+struct lttng_ust_ring_buffer_channel;
struct lttng_ust_channel_buffer_ops_private;
/*
struct lttng_ust_channel_buffer_ops_private *priv; /* Private channel buffer ops interface */
- int (*event_reserve)(struct lttng_ust_lib_ring_buffer_ctx *ctx);
- void (*event_commit)(struct lttng_ust_lib_ring_buffer_ctx *ctx);
- void (*event_write)(struct lttng_ust_lib_ring_buffer_ctx *ctx,
+ int (*event_reserve)(struct lttng_ust_ring_buffer_ctx *ctx);
+ void (*event_commit)(struct lttng_ust_ring_buffer_ctx *ctx);
+ void (*event_write)(struct lttng_ust_ring_buffer_ctx *ctx,
const void *src, size_t len, size_t alignment);
- void (*event_strcpy)(struct lttng_ust_lib_ring_buffer_ctx *ctx,
+ void (*event_strcpy)(struct lttng_ust_ring_buffer_ctx *ctx,
const char *src, size_t len);
- void (*event_pstrcpy_pad)(struct lttng_ust_lib_ring_buffer_ctx *ctx,
+ void (*event_pstrcpy_pad)(struct lttng_ust_ring_buffer_ctx *ctx,
const char *src, size_t len);
/* End of base ABI. Fields below should be used after checking struct_size. */
#define _ctf_integer_ext(_type, _item, _src, _byte_order, _base, _nowrite) \
if (0) \
(void) (_src); /* Unused */ \
- __event_len += lttng_ust_lib_ring_buffer_align(__event_len, lttng_ust_rb_alignof(_type)); \
+ __event_len += lttng_ust_ring_buffer_align(__event_len, lttng_ust_rb_alignof(_type)); \
__event_len += sizeof(_type);
#undef _ctf_float
#define _ctf_float(_type, _item, _src, _nowrite) \
if (0) \
(void) (_src); /* Unused */ \
- __event_len += lttng_ust_lib_ring_buffer_align(__event_len, lttng_ust_rb_alignof(_type)); \
+ __event_len += lttng_ust_ring_buffer_align(__event_len, lttng_ust_rb_alignof(_type)); \
__event_len += sizeof(_type);
#undef _ctf_array_encoded
_nowrite, _elem_type_base) \
if (0) \
(void) (_src); /* Unused */ \
- __event_len += lttng_ust_lib_ring_buffer_align(__event_len, lttng_ust_rb_alignof(_type)); \
+ __event_len += lttng_ust_ring_buffer_align(__event_len, lttng_ust_rb_alignof(_type)); \
__event_len += sizeof(_type) * (_length);
#undef _ctf_sequence_encoded
_src_length, _encoding, _nowrite, _elem_type_base) \
if (0) \
(void) (_src); /* Unused */ \
- __event_len += lttng_ust_lib_ring_buffer_align(__event_len, lttng_ust_rb_alignof(_length_type)); \
+ __event_len += lttng_ust_ring_buffer_align(__event_len, lttng_ust_rb_alignof(_length_type)); \
__event_len += sizeof(_length_type); \
- __event_len += lttng_ust_lib_ring_buffer_align(__event_len, lttng_ust_rb_alignof(_type)); \
+ __event_len += lttng_ust_ring_buffer_align(__event_len, lttng_ust_rb_alignof(_type)); \
__dynamic_len[__dynamic_len_idx] = (_src_length); \
__event_len += sizeof(_type) * __dynamic_len[__dynamic_len_idx]; \
__dynamic_len_idx++;
size_t __event_len, __event_align; \
struct lttng_ust_event_recorder *__event_recorder = (struct lttng_ust_event_recorder *) __event->child; \
struct lttng_ust_channel_buffer *__chan = __event_recorder->chan; \
- struct lttng_ust_lib_ring_buffer_ctx __ctx; \
+ struct lttng_ust_ring_buffer_ctx __ctx; \
\
__event_len = __event_get_size__##_provider##___##_name(__stackvar.__dynamic_len, \
_TP_ARGS_DATA_VAR(_args)); \
__event_align = __event_get_align__##_provider##___##_name(_TP_ARGS_VAR(_args)); \
- lttng_ust_lib_ring_buffer_ctx_init(&__ctx, __event_recorder, __event_len, __event_align, \
+ lttng_ust_ring_buffer_ctx_init(&__ctx, __event_recorder, __event_len, __event_align, \
_TP_IP_PARAM(TP_IP_PARAM)); \
__ret = __chan->ops->event_reserve(&__ctx); \
if (__ret < 0) \
const char *name;
struct cds_list_head node;
struct lttng_ust_channel_buffer_ops ops;
- const struct lttng_ust_lib_ring_buffer_config *client_config;
+ const struct lttng_ust_ring_buffer_config *client_config;
};
struct lttng_counter_transport {
unsigned int id; /* Channel ID */
enum lttng_ust_abi_chan_type type;
struct lttng_ust_ctx *ctx;
- struct lttng_ust_lib_ring_buffer_channel *rb_chan; /* Ring buffer channel */
+ struct lttng_ust_ring_buffer_channel *rb_chan; /* Ring buffer channel */
unsigned char uuid[LTTNG_UST_UUID_LEN]; /* Trace session unique ID */
};
struct lttng_ust_ctx_field {
const struct lttng_ust_event_field *event_field;
size_t (*get_size)(void *priv, size_t offset);
- void (*record)(void *priv, struct lttng_ust_lib_ring_buffer_ctx *ctx,
+ void (*record)(void *priv, struct lttng_ust_ring_buffer_ctx *ctx,
struct lttng_ust_channel_buffer *chan);
void (*get_value)(void *priv, struct lttng_ust_ctx_value *value);
void (*destroy)(void *priv);
/* Ring buffer backend access (read/write) */
-extern size_t lib_ring_buffer_read(struct lttng_ust_lib_ring_buffer_backend *bufb,
+extern size_t lib_ring_buffer_read(struct lttng_ust_ring_buffer_backend *bufb,
size_t offset, void *dest, size_t len,
struct lttng_ust_shm_handle *handle)
__attribute__((visibility("hidden")));
-extern int lib_ring_buffer_read_cstr(struct lttng_ust_lib_ring_buffer_backend *bufb,
+extern int lib_ring_buffer_read_cstr(struct lttng_ust_ring_buffer_backend *bufb,
size_t offset, void *dest, size_t len,
struct lttng_ust_shm_handle *handle)
__attribute__((visibility("hidden")));
* as long as the write is never bigger than a page size.
*/
extern void *
-lib_ring_buffer_offset_address(struct lttng_ust_lib_ring_buffer_backend *bufb,
+lib_ring_buffer_offset_address(struct lttng_ust_ring_buffer_backend *bufb,
size_t offset,
struct lttng_ust_shm_handle *handle)
__attribute__((visibility("hidden")));
extern void *
-lib_ring_buffer_read_offset_address(struct lttng_ust_lib_ring_buffer_backend *bufb,
+lib_ring_buffer_read_offset_address(struct lttng_ust_ring_buffer_backend *bufb,
size_t offset,
struct lttng_ust_shm_handle *handle)
__attribute__((visibility("hidden")));
* if copy is crossing a page boundary.
*/
static inline
-void lib_ring_buffer_write(const struct lttng_ust_lib_ring_buffer_config *config,
- struct lttng_ust_lib_ring_buffer_ctx *ctx,
+void lib_ring_buffer_write(const struct lttng_ust_ring_buffer_config *config,
+ struct lttng_ust_ring_buffer_ctx *ctx,
const void *src, size_t len)
__attribute__((always_inline));
static inline
-void lib_ring_buffer_write(const struct lttng_ust_lib_ring_buffer_config *config,
- struct lttng_ust_lib_ring_buffer_ctx *ctx,
+void lib_ring_buffer_write(const struct lttng_ust_ring_buffer_config *config,
+ struct lttng_ust_ring_buffer_ctx *ctx,
const void *src, size_t len)
{
- struct lttng_ust_lib_ring_buffer_ctx_private *ctx_private = ctx->priv;
+ struct lttng_ust_ring_buffer_ctx_private *ctx_private = ctx->priv;
struct channel_backend *chanb = &ctx_private->chan->backend;
struct lttng_ust_shm_handle *handle = ctx_private->chan->handle;
size_t offset = ctx_private->buf_offset;
- struct lttng_ust_lib_ring_buffer_backend_pages *backend_pages;
+ struct lttng_ust_ring_buffer_backend_pages *backend_pages;
void *p;
if (caa_unlikely(!len))
* copied. Does *not* terminate @dest with NULL terminating character.
*/
static inline
-size_t lib_ring_buffer_do_strcpy(const struct lttng_ust_lib_ring_buffer_config *config,
+size_t lib_ring_buffer_do_strcpy(const struct lttng_ust_ring_buffer_config *config,
char *dest, const char *src, size_t len)
__attribute__((always_inline));
static inline
size_t lib_ring_buffer_do_strcpy(
- const struct lttng_ust_lib_ring_buffer_config *config __attribute__((unused)),
+ const struct lttng_ust_ring_buffer_config *config __attribute__((unused)),
char *dest, const char *src, size_t len)
{
size_t count;
* the buffer with @pad characters (e.g. '#').
*/
static inline
-void lib_ring_buffer_strcpy(const struct lttng_ust_lib_ring_buffer_config *config,
- struct lttng_ust_lib_ring_buffer_ctx *ctx,
+void lib_ring_buffer_strcpy(const struct lttng_ust_ring_buffer_config *config,
+ struct lttng_ust_ring_buffer_ctx *ctx,
const char *src, size_t len, char pad)
__attribute__((always_inline));
static inline
-void lib_ring_buffer_strcpy(const struct lttng_ust_lib_ring_buffer_config *config,
- struct lttng_ust_lib_ring_buffer_ctx *ctx,
+void lib_ring_buffer_strcpy(const struct lttng_ust_ring_buffer_config *config,
+ struct lttng_ust_ring_buffer_ctx *ctx,
const char *src, size_t len, char pad)
{
- struct lttng_ust_lib_ring_buffer_ctx_private *ctx_private = ctx->priv;
+ struct lttng_ust_ring_buffer_ctx_private *ctx_private = ctx->priv;
struct channel_backend *chanb = &ctx_private->chan->backend;
struct lttng_ust_shm_handle *handle = ctx_private->chan->handle;
size_t count;
size_t offset = ctx_private->buf_offset;
- struct lttng_ust_lib_ring_buffer_backend_pages *backend_pages;
+ struct lttng_ust_ring_buffer_backend_pages *backend_pages;
void *p;
if (caa_unlikely(!len))
* is either the array or sequence length.
*/
static inline
-void lib_ring_buffer_pstrcpy(const struct lttng_ust_lib_ring_buffer_config *config,
- struct lttng_ust_lib_ring_buffer_ctx *ctx,
+void lib_ring_buffer_pstrcpy(const struct lttng_ust_ring_buffer_config *config,
+ struct lttng_ust_ring_buffer_ctx *ctx,
const char *src, size_t len, char pad)
__attribute__((always_inline));
static inline
-void lib_ring_buffer_pstrcpy(const struct lttng_ust_lib_ring_buffer_config *config,
- struct lttng_ust_lib_ring_buffer_ctx *ctx,
+void lib_ring_buffer_pstrcpy(const struct lttng_ust_ring_buffer_config *config,
+ struct lttng_ust_ring_buffer_ctx *ctx,
const char *src, size_t len, char pad)
{
- struct lttng_ust_lib_ring_buffer_ctx_private *ctx_private = ctx->priv;
+ struct lttng_ust_ring_buffer_ctx_private *ctx_private = ctx->priv;
struct channel_backend *chanb = &ctx_private->chan->backend;
struct lttng_ust_shm_handle *handle = ctx_private->chan->handle;
size_t count;
size_t offset = ctx_private->buf_offset;
- struct lttng_ust_lib_ring_buffer_backend_pages *backend_pages;
+ struct lttng_ust_ring_buffer_backend_pages *backend_pages;
void *p;
if (caa_unlikely(!len))
*/
static inline
unsigned long lib_ring_buffer_get_records_unread(
- const struct lttng_ust_lib_ring_buffer_config *config,
- struct lttng_ust_lib_ring_buffer *buf,
+ const struct lttng_ust_ring_buffer_config *config,
+ struct lttng_ust_ring_buffer *buf,
struct lttng_ust_shm_handle *handle)
{
- struct lttng_ust_lib_ring_buffer_backend *bufb = &buf->backend;
+ struct lttng_ust_ring_buffer_backend *bufb = &buf->backend;
unsigned long records_unread = 0, sb_bindex;
unsigned int i;
- struct lttng_ust_lib_ring_buffer_channel *chan;
+ struct lttng_ust_ring_buffer_channel *chan;
chan = shmp(handle, bufb->chan);
if (!chan)
return 0;
for (i = 0; i < chan->backend.num_subbuf; i++) {
- struct lttng_ust_lib_ring_buffer_backend_subbuffer *wsb;
- struct lttng_ust_lib_ring_buffer_backend_pages_shmp *rpages;
- struct lttng_ust_lib_ring_buffer_backend_pages *backend_pages;
+ struct lttng_ust_ring_buffer_backend_subbuffer *wsb;
+ struct lttng_ust_ring_buffer_backend_pages_shmp *rpages;
+ struct lttng_ust_ring_buffer_backend_pages *backend_pages;
wsb = shmp_index(handle, bufb->buf_wsb, i);
if (!wsb)
records_unread += v_read(config, &backend_pages->records_unread);
}
if (config->mode == RING_BUFFER_OVERWRITE) {
- struct lttng_ust_lib_ring_buffer_backend_pages_shmp *rpages;
- struct lttng_ust_lib_ring_buffer_backend_pages *backend_pages;
+ struct lttng_ust_ring_buffer_backend_pages_shmp *rpages;
+ struct lttng_ust_ring_buffer_backend_pages *backend_pages;
sb_bindex = subbuffer_id_get_index(config, bufb->buf_rsb.id);
rpages = shmp_index(handle, bufb->array, sb_bindex);
/* Ring buffer and channel backend create/free */
-int lib_ring_buffer_backend_create(struct lttng_ust_lib_ring_buffer_backend *bufb,
+int lib_ring_buffer_backend_create(struct lttng_ust_ring_buffer_backend *bufb,
struct channel_backend *chan,
int cpu,
struct lttng_ust_shm_handle *handle,
void channel_backend_unregister_notifiers(struct channel_backend *chanb)
__attribute__((visibility("hidden")));
-void lib_ring_buffer_backend_free(struct lttng_ust_lib_ring_buffer_backend *bufb)
+void lib_ring_buffer_backend_free(struct lttng_ust_ring_buffer_backend *bufb)
__attribute__((visibility("hidden")));
int channel_backend_init(struct channel_backend *chanb,
const char *name,
- const struct lttng_ust_lib_ring_buffer_config *config,
+ const struct lttng_ust_ring_buffer_config *config,
size_t subbuf_size,
size_t num_subbuf, struct lttng_ust_shm_handle *handle,
const int *stream_fds)
struct lttng_ust_shm_handle *handle)
__attribute__((visibility("hidden")));
-void lib_ring_buffer_backend_reset(struct lttng_ust_lib_ring_buffer_backend *bufb,
+void lib_ring_buffer_backend_reset(struct lttng_ust_ring_buffer_backend *bufb,
struct lttng_ust_shm_handle *handle)
__attribute__((visibility("hidden")));
void lib_ring_buffer_backend_exit(void)
__attribute__((visibility("hidden")));
-extern void _lib_ring_buffer_write(struct lttng_ust_lib_ring_buffer_backend *bufb,
+extern void _lib_ring_buffer_write(struct lttng_ust_ring_buffer_backend *bufb,
size_t offset, const void *src, size_t len,
ssize_t pagecpy)
__attribute__((visibility("hidden")));
* mode).
*/
static inline
-unsigned long subbuffer_id(const struct lttng_ust_lib_ring_buffer_config *config,
+unsigned long subbuffer_id(const struct lttng_ust_ring_buffer_config *config,
unsigned long offset, unsigned long noref,
unsigned long index)
{
*/
static inline
int subbuffer_id_compare_offset(
- const struct lttng_ust_lib_ring_buffer_config *config __attribute__((unused)),
+ const struct lttng_ust_ring_buffer_config *config __attribute__((unused)),
unsigned long id, unsigned long offset)
{
return (id & SB_ID_OFFSET_MASK) == (offset << SB_ID_OFFSET_SHIFT);
}
static inline
-unsigned long subbuffer_id_get_index(const struct lttng_ust_lib_ring_buffer_config *config,
+unsigned long subbuffer_id_get_index(const struct lttng_ust_ring_buffer_config *config,
unsigned long id)
{
if (config->mode == RING_BUFFER_OVERWRITE)
}
static inline
-unsigned long subbuffer_id_is_noref(const struct lttng_ust_lib_ring_buffer_config *config,
+unsigned long subbuffer_id_is_noref(const struct lttng_ust_ring_buffer_config *config,
unsigned long id)
{
if (config->mode == RING_BUFFER_OVERWRITE)
* needed.
*/
static inline
-void subbuffer_id_set_noref(const struct lttng_ust_lib_ring_buffer_config *config,
+void subbuffer_id_set_noref(const struct lttng_ust_ring_buffer_config *config,
unsigned long *id)
{
if (config->mode == RING_BUFFER_OVERWRITE)
}
static inline
-void subbuffer_id_set_noref_offset(const struct lttng_ust_lib_ring_buffer_config *config,
+void subbuffer_id_set_noref_offset(const struct lttng_ust_ring_buffer_config *config,
unsigned long *id, unsigned long offset)
{
unsigned long tmp;
/* No volatile access, since already used locally */
static inline
-void subbuffer_id_clear_noref(const struct lttng_ust_lib_ring_buffer_config *config,
+void subbuffer_id_clear_noref(const struct lttng_ust_ring_buffer_config *config,
unsigned long *id)
{
if (config->mode == RING_BUFFER_OVERWRITE)
* -EPERM on failure.
*/
static inline
-int subbuffer_id_check_index(const struct lttng_ust_lib_ring_buffer_config *config,
+int subbuffer_id_check_index(const struct lttng_ust_ring_buffer_config *config,
unsigned long num_subbuf)
{
if (config->mode == RING_BUFFER_OVERWRITE)
}
static inline
-int lib_ring_buffer_backend_get_pages(const struct lttng_ust_lib_ring_buffer_config *config,
- struct lttng_ust_lib_ring_buffer_ctx *ctx,
- struct lttng_ust_lib_ring_buffer_backend_pages **backend_pages)
+int lib_ring_buffer_backend_get_pages(const struct lttng_ust_ring_buffer_config *config,
+ struct lttng_ust_ring_buffer_ctx *ctx,
+ struct lttng_ust_ring_buffer_backend_pages **backend_pages)
{
- struct lttng_ust_lib_ring_buffer_ctx_private *ctx_private = ctx->priv;
- struct lttng_ust_lib_ring_buffer_backend *bufb = &ctx_private->buf->backend;
+ struct lttng_ust_ring_buffer_ctx_private *ctx_private = ctx->priv;
+ struct lttng_ust_ring_buffer_backend *bufb = &ctx_private->buf->backend;
struct channel_backend *chanb = &ctx_private->chan->backend;
struct lttng_ust_shm_handle *handle = ctx_private->chan->handle;
size_t sbidx;
size_t offset = ctx_private->buf_offset;
- struct lttng_ust_lib_ring_buffer_backend_subbuffer *wsb;
- struct lttng_ust_lib_ring_buffer_backend_pages_shmp *rpages;
+ struct lttng_ust_ring_buffer_backend_subbuffer *wsb;
+ struct lttng_ust_ring_buffer_backend_pages_shmp *rpages;
unsigned long sb_bindex, id;
- struct lttng_ust_lib_ring_buffer_backend_pages *_backend_pages;
+ struct lttng_ust_ring_buffer_backend_pages *_backend_pages;
offset &= chanb->buf_size - 1;
sbidx = offset >> chanb->subbuf_size_order;
/* Get backend pages from cache. */
static inline
-struct lttng_ust_lib_ring_buffer_backend_pages *
+struct lttng_ust_ring_buffer_backend_pages *
lib_ring_buffer_get_backend_pages_from_ctx(
- const struct lttng_ust_lib_ring_buffer_config *config __attribute__((unused)),
- struct lttng_ust_lib_ring_buffer_ctx *ctx)
+ const struct lttng_ust_ring_buffer_config *config __attribute__((unused)),
+ struct lttng_ust_ring_buffer_ctx *ctx)
{
return ctx->priv->backend_pages;
}
*/
#ifdef LTTNG_RING_BUFFER_COUNT_EVENTS
static inline
-void subbuffer_count_record(const struct lttng_ust_lib_ring_buffer_config *config,
- const struct lttng_ust_lib_ring_buffer_ctx *ctx,
- struct lttng_ust_lib_ring_buffer_backend *bufb,
+void subbuffer_count_record(const struct lttng_ust_ring_buffer_config *config,
+ const struct lttng_ust_ring_buffer_ctx *ctx,
+ struct lttng_ust_ring_buffer_backend *bufb,
unsigned long idx, struct lttng_ust_shm_handle *handle)
{
- struct lttng_ust_lib_ring_buffer_backend_pages *backend_pages;
+ struct lttng_ust_ring_buffer_backend_pages *backend_pages;
backend_pages = lib_ring_buffer_get_backend_pages_from_ctx(config, ctx);
if (caa_unlikely(!backend_pages)) {
}
#else /* LTTNG_RING_BUFFER_COUNT_EVENTS */
static inline
-void subbuffer_count_record(const struct lttng_ust_lib_ring_buffer_config *config __attribute__((unused)),
- const struct lttng_ust_lib_ring_buffer_ctx *ctx __attribute__((unused)),
- struct lttng_ust_lib_ring_buffer_backend *bufb __attribute__((unused)),
+void subbuffer_count_record(const struct lttng_ust_ring_buffer_config *config __attribute__((unused)),
+ const struct lttng_ust_ring_buffer_ctx *ctx __attribute__((unused)),
+ struct lttng_ust_ring_buffer_backend *bufb __attribute__((unused)),
unsigned long idx __attribute__((unused)),
struct lttng_ust_shm_handle *handle __attribute__((unused)))
{
* perform the decrement atomically.
*/
static inline
-void subbuffer_consume_record(const struct lttng_ust_lib_ring_buffer_config *config,
- struct lttng_ust_lib_ring_buffer_backend *bufb,
+void subbuffer_consume_record(const struct lttng_ust_ring_buffer_config *config,
+ struct lttng_ust_ring_buffer_backend *bufb,
struct lttng_ust_shm_handle *handle)
{
unsigned long sb_bindex;
- struct lttng_ust_lib_ring_buffer_channel *chan;
- struct lttng_ust_lib_ring_buffer_backend_pages_shmp *pages_shmp;
- struct lttng_ust_lib_ring_buffer_backend_pages *backend_pages;
+ struct lttng_ust_ring_buffer_channel *chan;
+ struct lttng_ust_ring_buffer_backend_pages_shmp *pages_shmp;
+ struct lttng_ust_ring_buffer_backend_pages *backend_pages;
sb_bindex = subbuffer_id_get_index(config, bufb->buf_rsb.id);
chan = shmp(handle, bufb->chan);
static inline
unsigned long subbuffer_get_records_count(
- const struct lttng_ust_lib_ring_buffer_config *config,
- struct lttng_ust_lib_ring_buffer_backend *bufb,
+ const struct lttng_ust_ring_buffer_config *config,
+ struct lttng_ust_ring_buffer_backend *bufb,
unsigned long idx,
struct lttng_ust_shm_handle *handle)
{
unsigned long sb_bindex;
- struct lttng_ust_lib_ring_buffer_backend_subbuffer *wsb;
- struct lttng_ust_lib_ring_buffer_backend_pages_shmp *rpages;
- struct lttng_ust_lib_ring_buffer_backend_pages *backend_pages;
+ struct lttng_ust_ring_buffer_backend_subbuffer *wsb;
+ struct lttng_ust_ring_buffer_backend_pages_shmp *rpages;
+ struct lttng_ust_ring_buffer_backend_pages *backend_pages;
wsb = shmp_index(handle, bufb->buf_wsb, idx);
if (!wsb)
*/
static inline
unsigned long subbuffer_count_records_overrun(
- const struct lttng_ust_lib_ring_buffer_config *config,
- struct lttng_ust_lib_ring_buffer_backend *bufb,
+ const struct lttng_ust_ring_buffer_config *config,
+ struct lttng_ust_ring_buffer_backend *bufb,
unsigned long idx,
struct lttng_ust_shm_handle *handle)
{
unsigned long overruns, sb_bindex;
- struct lttng_ust_lib_ring_buffer_backend_subbuffer *wsb;
- struct lttng_ust_lib_ring_buffer_backend_pages_shmp *rpages;
- struct lttng_ust_lib_ring_buffer_backend_pages *backend_pages;
+ struct lttng_ust_ring_buffer_backend_subbuffer *wsb;
+ struct lttng_ust_ring_buffer_backend_pages_shmp *rpages;
+ struct lttng_ust_ring_buffer_backend_pages *backend_pages;
wsb = shmp_index(handle, bufb->buf_wsb, idx);
if (!wsb)
}
static inline
-void subbuffer_set_data_size(const struct lttng_ust_lib_ring_buffer_config *config,
- struct lttng_ust_lib_ring_buffer_backend *bufb,
+void subbuffer_set_data_size(const struct lttng_ust_ring_buffer_config *config,
+ struct lttng_ust_ring_buffer_backend *bufb,
unsigned long idx,
unsigned long data_size,
struct lttng_ust_shm_handle *handle)
{
unsigned long sb_bindex;
- struct lttng_ust_lib_ring_buffer_backend_subbuffer *wsb;
- struct lttng_ust_lib_ring_buffer_backend_pages_shmp *rpages;
- struct lttng_ust_lib_ring_buffer_backend_pages *backend_pages;
+ struct lttng_ust_ring_buffer_backend_subbuffer *wsb;
+ struct lttng_ust_ring_buffer_backend_pages_shmp *rpages;
+ struct lttng_ust_ring_buffer_backend_pages *backend_pages;
wsb = shmp_index(handle, bufb->buf_wsb, idx);
if (!wsb)
static inline
unsigned long subbuffer_get_read_data_size(
- const struct lttng_ust_lib_ring_buffer_config *config,
- struct lttng_ust_lib_ring_buffer_backend *bufb,
+ const struct lttng_ust_ring_buffer_config *config,
+ struct lttng_ust_ring_buffer_backend *bufb,
struct lttng_ust_shm_handle *handle)
{
unsigned long sb_bindex;
- struct lttng_ust_lib_ring_buffer_backend_pages_shmp *pages_shmp;
- struct lttng_ust_lib_ring_buffer_backend_pages *backend_pages;
+ struct lttng_ust_ring_buffer_backend_pages_shmp *pages_shmp;
+ struct lttng_ust_ring_buffer_backend_pages *backend_pages;
sb_bindex = subbuffer_id_get_index(config, bufb->buf_rsb.id);
pages_shmp = shmp_index(handle, bufb->array, sb_bindex);
static inline
unsigned long subbuffer_get_data_size(
- const struct lttng_ust_lib_ring_buffer_config *config,
- struct lttng_ust_lib_ring_buffer_backend *bufb,
+ const struct lttng_ust_ring_buffer_config *config,
+ struct lttng_ust_ring_buffer_backend *bufb,
unsigned long idx,
struct lttng_ust_shm_handle *handle)
{
unsigned long sb_bindex;
- struct lttng_ust_lib_ring_buffer_backend_subbuffer *wsb;
- struct lttng_ust_lib_ring_buffer_backend_pages_shmp *rpages;
- struct lttng_ust_lib_ring_buffer_backend_pages *backend_pages;
+ struct lttng_ust_ring_buffer_backend_subbuffer *wsb;
+ struct lttng_ust_ring_buffer_backend_pages_shmp *rpages;
+ struct lttng_ust_ring_buffer_backend_pages *backend_pages;
wsb = shmp_index(handle, bufb->buf_wsb, idx);
if (!wsb)
static inline
void subbuffer_inc_packet_count(
- const struct lttng_ust_lib_ring_buffer_config *config __attribute__((unused)),
- struct lttng_ust_lib_ring_buffer_backend *bufb,
+ const struct lttng_ust_ring_buffer_config *config __attribute__((unused)),
+ struct lttng_ust_ring_buffer_backend *bufb,
unsigned long idx, struct lttng_ust_shm_handle *handle)
{
- struct lttng_ust_lib_ring_buffer_backend_counts *counts;
+ struct lttng_ust_ring_buffer_backend_counts *counts;
counts = shmp_index(handle, bufb->buf_cnt, idx);
if (!counts)
* writer.
*/
static inline
-void lib_ring_buffer_clear_noref(const struct lttng_ust_lib_ring_buffer_config *config,
- struct lttng_ust_lib_ring_buffer_backend *bufb,
+void lib_ring_buffer_clear_noref(const struct lttng_ust_ring_buffer_config *config,
+ struct lttng_ust_ring_buffer_backend *bufb,
unsigned long idx,
struct lttng_ust_shm_handle *handle)
{
unsigned long id, new_id;
- struct lttng_ust_lib_ring_buffer_backend_subbuffer *wsb;
+ struct lttng_ust_ring_buffer_backend_subbuffer *wsb;
if (config->mode != RING_BUFFER_OVERWRITE)
return;
* called by writer.
*/
static inline
-void lib_ring_buffer_set_noref_offset(const struct lttng_ust_lib_ring_buffer_config *config,
- struct lttng_ust_lib_ring_buffer_backend *bufb,
+void lib_ring_buffer_set_noref_offset(const struct lttng_ust_ring_buffer_config *config,
+ struct lttng_ust_ring_buffer_backend *bufb,
unsigned long idx, unsigned long offset,
struct lttng_ust_shm_handle *handle)
{
- struct lttng_ust_lib_ring_buffer_backend_subbuffer *wsb;
- struct lttng_ust_lib_ring_buffer_channel *chan;
+ struct lttng_ust_ring_buffer_backend_subbuffer *wsb;
+ struct lttng_ust_ring_buffer_channel *chan;
if (config->mode != RING_BUFFER_OVERWRITE)
return;
* update_read_sb_index - Read-side subbuffer index update.
*/
static inline
-int update_read_sb_index(const struct lttng_ust_lib_ring_buffer_config *config,
- struct lttng_ust_lib_ring_buffer_backend *bufb,
+int update_read_sb_index(const struct lttng_ust_ring_buffer_config *config,
+ struct lttng_ust_ring_buffer_backend *bufb,
struct channel_backend *chanb __attribute__((unused)),
unsigned long consumed_idx,
unsigned long consumed_count,
struct lttng_ust_shm_handle *handle)
{
- struct lttng_ust_lib_ring_buffer_backend_subbuffer *wsb;
+ struct lttng_ust_ring_buffer_backend_subbuffer *wsb;
unsigned long old_id, new_id;
wsb = shmp_index(handle, bufb->buf_wsb, consumed_idx);
return -EPERM;
if (config->mode == RING_BUFFER_OVERWRITE) {
- struct lttng_ust_lib_ring_buffer_channel *chan;
+ struct lttng_ust_ring_buffer_channel *chan;
/*
* Exchange the target writer subbuffer with our own unused
#include "vatomic.h"
#define RB_BACKEND_PAGES_PADDING 16
-struct lttng_ust_lib_ring_buffer_backend_pages {
+struct lttng_ust_ring_buffer_backend_pages {
unsigned long mmap_offset; /* offset of the subbuffer in mmap */
union v_atomic records_commit; /* current records committed count */
union v_atomic records_unread; /* records to read */
char padding[RB_BACKEND_PAGES_PADDING];
};
-struct lttng_ust_lib_ring_buffer_backend_subbuffer {
+struct lttng_ust_ring_buffer_backend_subbuffer {
/* Identifier for subbuf backend pages. Exchanged atomically. */
unsigned long id; /* backend subbuffer identifier */
};
-struct lttng_ust_lib_ring_buffer_backend_counts {
+struct lttng_ust_ring_buffer_backend_counts {
/*
* Counter specific to the sub-buffer location within the ring buffer.
* The actual sequence number of the packet within the entire ring
/*
* Forward declaration of frontend-specific channel and ring_buffer.
*/
-struct lttng_ust_lib_ring_buffer_channel;
-struct lttng_ust_lib_ring_buffer;
+struct lttng_ust_ring_buffer_channel;
+struct lttng_ust_ring_buffer;
-struct lttng_ust_lib_ring_buffer_backend_pages_shmp {
- DECLARE_SHMP(struct lttng_ust_lib_ring_buffer_backend_pages, shmp);
+struct lttng_ust_ring_buffer_backend_pages_shmp {
+ DECLARE_SHMP(struct lttng_ust_ring_buffer_backend_pages, shmp);
};
#define RB_BACKEND_RING_BUFFER_PADDING 64
-struct lttng_ust_lib_ring_buffer_backend {
+struct lttng_ust_ring_buffer_backend {
/* Array of ring_buffer_backend_subbuffer for writer */
- DECLARE_SHMP(struct lttng_ust_lib_ring_buffer_backend_subbuffer, buf_wsb);
+ DECLARE_SHMP(struct lttng_ust_ring_buffer_backend_subbuffer, buf_wsb);
/* ring_buffer_backend_subbuffer for reader */
- struct lttng_ust_lib_ring_buffer_backend_subbuffer buf_rsb;
+ struct lttng_ust_ring_buffer_backend_subbuffer buf_rsb;
/* Array of lib_ring_buffer_backend_counts for the packet counter */
- DECLARE_SHMP(struct lttng_ust_lib_ring_buffer_backend_counts, buf_cnt);
+ DECLARE_SHMP(struct lttng_ust_ring_buffer_backend_counts, buf_cnt);
/*
* Pointer array of backend pages, for whole buffer.
* Indexed by ring_buffer_backend_subbuffer identifier (id) index.
*/
- DECLARE_SHMP(struct lttng_ust_lib_ring_buffer_backend_pages_shmp, array);
+ DECLARE_SHMP(struct lttng_ust_ring_buffer_backend_pages_shmp, array);
DECLARE_SHMP(char, memory_map); /* memory mapping */
- DECLARE_SHMP(struct lttng_ust_lib_ring_buffer_channel, chan); /* Associated channel */
+ DECLARE_SHMP(struct lttng_ust_ring_buffer_channel, chan); /* Associated channel */
int cpu; /* This buffer's cpu. -1 if global. */
union v_atomic records_read; /* Number of records read */
unsigned int allocated:1; /* is buffer allocated ? */
char padding[RB_BACKEND_RING_BUFFER_PADDING];
};
-struct lttng_ust_lib_ring_buffer_shmp {
- DECLARE_SHMP(struct lttng_ust_lib_ring_buffer, shmp); /* Channel per-cpu buffers */
+struct lttng_ust_ring_buffer_shmp {
+ DECLARE_SHMP(struct lttng_ust_ring_buffer, shmp); /* Channel per-cpu buffers */
};
#define RB_BACKEND_CHANNEL_PADDING 64
unsigned long num_subbuf; /* Number of sub-buffers for writer */
uint64_t start_tsc; /* Channel creation TSC value */
DECLARE_SHMP(void *, priv_data);/* Client-specific information */
- struct lttng_ust_lib_ring_buffer_config config; /* Ring buffer configuration */
+ struct lttng_ust_ring_buffer_config config; /* Ring buffer configuration */
char name[NAME_MAX]; /* Channel name */
char padding[RB_BACKEND_CHANNEL_PADDING];
- struct lttng_ust_lib_ring_buffer_shmp buf[];
+ struct lttng_ust_ring_buffer_shmp buf[];
};
#endif /* _LTTNG_RING_BUFFER_BACKEND_TYPES_H */
*/
extern
-struct lttng_ust_shm_handle *channel_create(const struct lttng_ust_lib_ring_buffer_config *config,
+struct lttng_ust_shm_handle *channel_create(const struct lttng_ust_ring_buffer_config *config,
const char *name,
size_t priv_data_align,
size_t priv_data_size,
* channel_destroy finalizes all channel's buffers, waits for readers to
* release all references, and destroys the channel.
*/
-void channel_destroy(struct lttng_ust_lib_ring_buffer_channel *chan,
+void channel_destroy(struct lttng_ust_ring_buffer_channel *chan,
struct lttng_ust_shm_handle *handle,
int consumer)
__attribute__((visibility("hidden")));
#define for_each_channel_cpu(cpu, chan) \
for_each_possible_cpu(cpu)
-extern struct lttng_ust_lib_ring_buffer *channel_get_ring_buffer(
- const struct lttng_ust_lib_ring_buffer_config *config,
- struct lttng_ust_lib_ring_buffer_channel *chan, int cpu,
+extern struct lttng_ust_ring_buffer *channel_get_ring_buffer(
+ const struct lttng_ust_ring_buffer_config *config,
+ struct lttng_ust_ring_buffer_channel *chan, int cpu,
struct lttng_ust_shm_handle *handle,
int *shm_fd, int *wait_fd,
int *wakeup_fd,
__attribute__((visibility("hidden")));
extern
-int ring_buffer_channel_close_wait_fd(const struct lttng_ust_lib_ring_buffer_config *config,
- struct lttng_ust_lib_ring_buffer_channel *chan,
+int ring_buffer_channel_close_wait_fd(const struct lttng_ust_ring_buffer_config *config,
+ struct lttng_ust_ring_buffer_channel *chan,
struct lttng_ust_shm_handle *handle)
__attribute__((visibility("hidden")));
extern
-int ring_buffer_channel_close_wakeup_fd(const struct lttng_ust_lib_ring_buffer_config *config,
- struct lttng_ust_lib_ring_buffer_channel *chan,
+int ring_buffer_channel_close_wakeup_fd(const struct lttng_ust_ring_buffer_config *config,
+ struct lttng_ust_ring_buffer_channel *chan,
struct lttng_ust_shm_handle *handle)
__attribute__((visibility("hidden")));
extern
-int ring_buffer_stream_close_wait_fd(const struct lttng_ust_lib_ring_buffer_config *config,
- struct lttng_ust_lib_ring_buffer_channel *chan,
+int ring_buffer_stream_close_wait_fd(const struct lttng_ust_ring_buffer_config *config,
+ struct lttng_ust_ring_buffer_channel *chan,
struct lttng_ust_shm_handle *handle,
int cpu)
__attribute__((visibility("hidden")));
extern
-int ring_buffer_stream_close_wakeup_fd(const struct lttng_ust_lib_ring_buffer_config *config,
- struct lttng_ust_lib_ring_buffer_channel *chan,
+int ring_buffer_stream_close_wakeup_fd(const struct lttng_ust_ring_buffer_config *config,
+ struct lttng_ust_ring_buffer_channel *chan,
struct lttng_ust_shm_handle *handle,
int cpu)
__attribute__((visibility("hidden")));
-extern int lib_ring_buffer_open_read(struct lttng_ust_lib_ring_buffer *buf,
+extern int lib_ring_buffer_open_read(struct lttng_ust_ring_buffer *buf,
struct lttng_ust_shm_handle *handle)
__attribute__((visibility("hidden")));
-extern void lib_ring_buffer_release_read(struct lttng_ust_lib_ring_buffer *buf,
+extern void lib_ring_buffer_release_read(struct lttng_ust_ring_buffer *buf,
struct lttng_ust_shm_handle *handle)
__attribute__((visibility("hidden")));
/*
* Read sequence: snapshot, many get_subbuf/put_subbuf, move_consumer.
*/
-extern int lib_ring_buffer_snapshot(struct lttng_ust_lib_ring_buffer *buf,
+extern int lib_ring_buffer_snapshot(struct lttng_ust_ring_buffer *buf,
unsigned long *consumed,
unsigned long *produced,
struct lttng_ust_shm_handle *handle)
__attribute__((visibility("hidden")));
extern int lib_ring_buffer_snapshot_sample_positions(
- struct lttng_ust_lib_ring_buffer *buf,
+ struct lttng_ust_ring_buffer *buf,
unsigned long *consumed,
unsigned long *produced,
struct lttng_ust_shm_handle *handle)
__attribute__((visibility("hidden")));
-extern void lib_ring_buffer_move_consumer(struct lttng_ust_lib_ring_buffer *buf,
+extern void lib_ring_buffer_move_consumer(struct lttng_ust_ring_buffer *buf,
unsigned long consumed_new,
struct lttng_ust_shm_handle *handle)
__attribute__((visibility("hidden")));
-extern int lib_ring_buffer_get_subbuf(struct lttng_ust_lib_ring_buffer *buf,
+extern int lib_ring_buffer_get_subbuf(struct lttng_ust_ring_buffer *buf,
unsigned long consumed,
struct lttng_ust_shm_handle *handle)
__attribute__((visibility("hidden")));
-extern void lib_ring_buffer_put_subbuf(struct lttng_ust_lib_ring_buffer *buf,
+extern void lib_ring_buffer_put_subbuf(struct lttng_ust_ring_buffer *buf,
struct lttng_ust_shm_handle *handle)
__attribute__((visibility("hidden")));
* lib_ring_buffer_get_next_subbuf/lib_ring_buffer_put_next_subbuf are helpers
* to read sub-buffers sequentially.
*/
-static inline int lib_ring_buffer_get_next_subbuf(struct lttng_ust_lib_ring_buffer *buf,
+static inline int lib_ring_buffer_get_next_subbuf(struct lttng_ust_ring_buffer *buf,
struct lttng_ust_shm_handle *handle)
{
int ret;
}
static inline
-void lib_ring_buffer_put_next_subbuf(struct lttng_ust_lib_ring_buffer *buf,
+void lib_ring_buffer_put_next_subbuf(struct lttng_ust_ring_buffer *buf,
struct lttng_ust_shm_handle *handle)
{
- struct lttng_ust_lib_ring_buffer_channel *chan;
+ struct lttng_ust_ring_buffer_channel *chan;
chan = shmp(handle, buf->backend.chan);
if (!chan)
handle);
}
-extern void channel_reset(struct lttng_ust_lib_ring_buffer_channel *chan)
+extern void channel_reset(struct lttng_ust_ring_buffer_channel *chan)
__attribute__((visibility("hidden")));
-extern void lib_ring_buffer_reset(struct lttng_ust_lib_ring_buffer *buf,
+extern void lib_ring_buffer_reset(struct lttng_ust_ring_buffer *buf,
struct lttng_ust_shm_handle *handle)
__attribute__((visibility("hidden")));
static inline
-unsigned long lib_ring_buffer_get_offset(const struct lttng_ust_lib_ring_buffer_config *config,
- struct lttng_ust_lib_ring_buffer *buf)
+unsigned long lib_ring_buffer_get_offset(const struct lttng_ust_ring_buffer_config *config,
+ struct lttng_ust_ring_buffer *buf)
{
return v_read(config, &buf->offset);
}
static inline
unsigned long lib_ring_buffer_get_consumed(
- const struct lttng_ust_lib_ring_buffer_config *config __attribute__((unused)),
- struct lttng_ust_lib_ring_buffer *buf)
+ const struct lttng_ust_ring_buffer_config *config __attribute__((unused)),
+ struct lttng_ust_ring_buffer *buf)
{
return uatomic_read(&buf->consumed);
}
*/
static inline
int lib_ring_buffer_is_finalized(
- const struct lttng_ust_lib_ring_buffer_config *config __attribute__((unused)),
- struct lttng_ust_lib_ring_buffer *buf)
+ const struct lttng_ust_ring_buffer_config *config __attribute__((unused)),
+ struct lttng_ust_ring_buffer *buf)
{
int finalized = CMM_ACCESS_ONCE(buf->finalized);
/*
}
static inline
-int lib_ring_buffer_channel_is_finalized(const struct lttng_ust_lib_ring_buffer_channel *chan)
+int lib_ring_buffer_channel_is_finalized(const struct lttng_ust_ring_buffer_channel *chan)
{
return chan->finalized;
}
static inline
-int lib_ring_buffer_channel_is_disabled(const struct lttng_ust_lib_ring_buffer_channel *chan)
+int lib_ring_buffer_channel_is_disabled(const struct lttng_ust_ring_buffer_channel *chan)
{
return uatomic_read(&chan->record_disabled);
}
static inline
unsigned long lib_ring_buffer_get_read_data_size(
- const struct lttng_ust_lib_ring_buffer_config *config,
- struct lttng_ust_lib_ring_buffer *buf,
+ const struct lttng_ust_ring_buffer_config *config,
+ struct lttng_ust_ring_buffer *buf,
struct lttng_ust_shm_handle *handle)
{
return subbuffer_get_read_data_size(config, &buf->backend, handle);
static inline
unsigned long lib_ring_buffer_get_records_count(
- const struct lttng_ust_lib_ring_buffer_config *config,
- struct lttng_ust_lib_ring_buffer *buf)
+ const struct lttng_ust_ring_buffer_config *config,
+ struct lttng_ust_ring_buffer *buf)
{
return v_read(config, &buf->records_count);
}
static inline
unsigned long lib_ring_buffer_get_records_overrun(
- const struct lttng_ust_lib_ring_buffer_config *config,
- struct lttng_ust_lib_ring_buffer *buf)
+ const struct lttng_ust_ring_buffer_config *config,
+ struct lttng_ust_ring_buffer *buf)
{
return v_read(config, &buf->records_overrun);
}
static inline
unsigned long lib_ring_buffer_get_records_lost_full(
- const struct lttng_ust_lib_ring_buffer_config *config,
- struct lttng_ust_lib_ring_buffer *buf)
+ const struct lttng_ust_ring_buffer_config *config,
+ struct lttng_ust_ring_buffer *buf)
{
return v_read(config, &buf->records_lost_full);
}
static inline
unsigned long lib_ring_buffer_get_records_lost_wrap(
- const struct lttng_ust_lib_ring_buffer_config *config,
- struct lttng_ust_lib_ring_buffer *buf)
+ const struct lttng_ust_ring_buffer_config *config,
+ struct lttng_ust_ring_buffer *buf)
{
return v_read(config, &buf->records_lost_wrap);
}
static inline
unsigned long lib_ring_buffer_get_records_lost_big(
- const struct lttng_ust_lib_ring_buffer_config *config,
- struct lttng_ust_lib_ring_buffer *buf)
+ const struct lttng_ust_ring_buffer_config *config,
+ struct lttng_ust_ring_buffer *buf)
{
return v_read(config, &buf->records_lost_big);
}
static inline
unsigned long lib_ring_buffer_get_records_read(
- const struct lttng_ust_lib_ring_buffer_config *config,
- struct lttng_ust_lib_ring_buffer *buf)
+ const struct lttng_ust_ring_buffer_config *config,
+ struct lttng_ust_ring_buffer *buf)
{
return v_read(config, &buf->backend.records_read);
}
*/
static inline
int lib_ring_buffer_nesting_inc(
- const struct lttng_ust_lib_ring_buffer_config *config __attribute__((unused)))
+ const struct lttng_ust_ring_buffer_config *config __attribute__((unused)))
{
int nesting;
static inline
int lib_ring_buffer_nesting_count(
- const struct lttng_ust_lib_ring_buffer_config *config __attribute__((unused)))
+ const struct lttng_ust_ring_buffer_config *config __attribute__((unused)))
{
return URCU_TLS(lib_ring_buffer_nesting);
}
static inline
void lib_ring_buffer_nesting_dec(
- const struct lttng_ust_lib_ring_buffer_config *config __attribute__((unused)))
+ const struct lttng_ust_ring_buffer_config *config __attribute__((unused)))
{
cmm_barrier();
URCU_TLS(lib_ring_buffer_nesting)--; /* TLS */
* returns 0 if reserve ok, or 1 if the slow path must be taken.
*/
static inline
-int lib_ring_buffer_try_reserve(const struct lttng_ust_lib_ring_buffer_config *config,
- struct lttng_ust_lib_ring_buffer_ctx *ctx,
+int lib_ring_buffer_try_reserve(const struct lttng_ust_ring_buffer_config *config,
+ struct lttng_ust_ring_buffer_ctx *ctx,
void *client_ctx,
unsigned long *o_begin, unsigned long *o_end,
unsigned long *o_old, size_t *before_hdr_pad)
{
- struct lttng_ust_lib_ring_buffer_ctx_private *ctx_private = ctx->priv;
- struct lttng_ust_lib_ring_buffer_channel *chan = ctx_private->chan;
- struct lttng_ust_lib_ring_buffer *buf = ctx_private->buf;
+ struct lttng_ust_ring_buffer_ctx_private *ctx_private = ctx->priv;
+ struct lttng_ust_ring_buffer_channel *chan = ctx_private->chan;
+ struct lttng_ust_ring_buffer *buf = ctx_private->buf;
*o_begin = v_read(config, &buf->offset);
*o_old = *o_begin;
ctx_private->slot_size = record_header_size(config, chan, *o_begin,
before_hdr_pad, ctx, client_ctx);
ctx_private->slot_size +=
- lttng_ust_lib_ring_buffer_align(*o_begin + ctx_private->slot_size,
+ lttng_ust_ring_buffer_align(*o_begin + ctx_private->slot_size,
ctx->largest_align) + ctx->data_size;
if (caa_unlikely((subbuf_offset(*o_begin, chan) + ctx_private->slot_size)
> chan->backend.subbuf_size))
*/
static inline
-int lib_ring_buffer_reserve(const struct lttng_ust_lib_ring_buffer_config *config,
- struct lttng_ust_lib_ring_buffer_ctx *ctx,
+int lib_ring_buffer_reserve(const struct lttng_ust_ring_buffer_config *config,
+ struct lttng_ust_ring_buffer_ctx *ctx,
void *client_ctx)
{
- struct lttng_ust_lib_ring_buffer_ctx_private *ctx_private = ctx->priv;
- struct lttng_ust_lib_ring_buffer_channel *chan = ctx_private->chan;
+ struct lttng_ust_ring_buffer_ctx_private *ctx_private = ctx->priv;
+ struct lttng_ust_ring_buffer_channel *chan = ctx_private->chan;
struct lttng_ust_shm_handle *handle = chan->handle;
- struct lttng_ust_lib_ring_buffer *buf;
+ struct lttng_ust_ring_buffer *buf;
unsigned long o_begin, o_end, o_old;
size_t before_hdr_pad = 0;
*/
static inline
void lib_ring_buffer_switch(
- const struct lttng_ust_lib_ring_buffer_config *config __attribute__((unused)),
- struct lttng_ust_lib_ring_buffer *buf, enum switch_mode mode,
+ const struct lttng_ust_ring_buffer_config *config __attribute__((unused)),
+ struct lttng_ust_ring_buffer *buf, enum switch_mode mode,
struct lttng_ust_shm_handle *handle)
{
lib_ring_buffer_switch_slow(buf, mode, handle);
* specified sub-buffer, and delivers it if necessary.
*/
static inline
-void lib_ring_buffer_commit(const struct lttng_ust_lib_ring_buffer_config *config,
- const struct lttng_ust_lib_ring_buffer_ctx *ctx)
+void lib_ring_buffer_commit(const struct lttng_ust_ring_buffer_config *config,
+ const struct lttng_ust_ring_buffer_ctx *ctx)
{
- struct lttng_ust_lib_ring_buffer_ctx_private *ctx_private = ctx->priv;
- struct lttng_ust_lib_ring_buffer_channel *chan = ctx_private->chan;
+ struct lttng_ust_ring_buffer_ctx_private *ctx_private = ctx->priv;
+ struct lttng_ust_ring_buffer_channel *chan = ctx_private->chan;
struct lttng_ust_shm_handle *handle = chan->handle;
- struct lttng_ust_lib_ring_buffer *buf = ctx_private->buf;
+ struct lttng_ust_ring_buffer *buf = ctx_private->buf;
unsigned long offset_end = ctx_private->buf_offset;
unsigned long endidx = subbuf_index(offset_end - 1, chan);
unsigned long commit_count;
* Returns 0 upon success, -EPERM if the record cannot be discarded.
*/
static inline
-int lib_ring_buffer_try_discard_reserve(const struct lttng_ust_lib_ring_buffer_config *config,
- const struct lttng_ust_lib_ring_buffer_ctx *ctx)
+int lib_ring_buffer_try_discard_reserve(const struct lttng_ust_ring_buffer_config *config,
+ const struct lttng_ust_ring_buffer_ctx *ctx)
{
- struct lttng_ust_lib_ring_buffer_ctx_private *ctx_private = ctx->priv;
- struct lttng_ust_lib_ring_buffer *buf = ctx_private->buf;
+ struct lttng_ust_ring_buffer_ctx_private *ctx_private = ctx->priv;
+ struct lttng_ust_ring_buffer *buf = ctx_private->buf;
unsigned long end_offset = ctx_private->pre_offset + ctx_private->slot_size;
/*
static inline
void channel_record_disable(
- const struct lttng_ust_lib_ring_buffer_config *config __attribute__((unused)),
- struct lttng_ust_lib_ring_buffer_channel *chan)
+ const struct lttng_ust_ring_buffer_config *config __attribute__((unused)),
+ struct lttng_ust_ring_buffer_channel *chan)
{
uatomic_inc(&chan->record_disabled);
}
static inline
void channel_record_enable(
- const struct lttng_ust_lib_ring_buffer_config *config __attribute__((unused)),
- struct lttng_ust_lib_ring_buffer_channel *chan)
+ const struct lttng_ust_ring_buffer_config *config __attribute__((unused)),
+ struct lttng_ust_ring_buffer_channel *chan)
{
uatomic_dec(&chan->record_disabled);
}
static inline
void lib_ring_buffer_record_disable(
- const struct lttng_ust_lib_ring_buffer_config *config __attribute__((unused)),
- struct lttng_ust_lib_ring_buffer *buf)
+ const struct lttng_ust_ring_buffer_config *config __attribute__((unused)),
+ struct lttng_ust_ring_buffer *buf)
{
uatomic_inc(&buf->record_disabled);
}
static inline
void lib_ring_buffer_record_enable(
- const struct lttng_ust_lib_ring_buffer_config *config __attribute__((unused)),
- struct lttng_ust_lib_ring_buffer *buf)
+ const struct lttng_ust_ring_buffer_config *config __attribute__((unused)),
+ struct lttng_ust_ring_buffer *buf)
{
uatomic_dec(&buf->record_disabled);
}
/* buf_trunc mask selects only the buffer number. */
static inline
unsigned long buf_trunc(unsigned long offset,
- struct lttng_ust_lib_ring_buffer_channel *chan)
+ struct lttng_ust_ring_buffer_channel *chan)
{
return offset & ~(chan->backend.buf_size - 1);
/* Select the buffer number value (counter). */
static inline
unsigned long buf_trunc_val(unsigned long offset,
- struct lttng_ust_lib_ring_buffer_channel *chan)
+ struct lttng_ust_ring_buffer_channel *chan)
{
return buf_trunc(offset, chan) >> chan->backend.buf_size_order;
}
/* buf_offset mask selects only the offset within the current buffer. */
static inline
unsigned long buf_offset(unsigned long offset,
- struct lttng_ust_lib_ring_buffer_channel *chan)
+ struct lttng_ust_ring_buffer_channel *chan)
{
return offset & (chan->backend.buf_size - 1);
}
/* subbuf_offset mask selects the offset within the current subbuffer. */
static inline
unsigned long subbuf_offset(unsigned long offset,
- struct lttng_ust_lib_ring_buffer_channel *chan)
+ struct lttng_ust_ring_buffer_channel *chan)
{
return offset & (chan->backend.subbuf_size - 1);
}
/* subbuf_trunc mask selects the subbuffer number. */
static inline
unsigned long subbuf_trunc(unsigned long offset,
- struct lttng_ust_lib_ring_buffer_channel *chan)
+ struct lttng_ust_ring_buffer_channel *chan)
{
return offset & ~(chan->backend.subbuf_size - 1);
}
/* subbuf_align aligns the offset to the next subbuffer. */
static inline
unsigned long subbuf_align(unsigned long offset,
- struct lttng_ust_lib_ring_buffer_channel *chan)
+ struct lttng_ust_ring_buffer_channel *chan)
{
return (offset + chan->backend.subbuf_size)
& ~(chan->backend.subbuf_size - 1);
/* subbuf_index returns the index of the current subbuffer within the buffer. */
static inline
unsigned long subbuf_index(unsigned long offset,
- struct lttng_ust_lib_ring_buffer_channel *chan)
+ struct lttng_ust_ring_buffer_channel *chan)
{
return buf_offset(offset, chan) >> chan->backend.subbuf_size_order;
}
#if (CAA_BITS_PER_LONG == 32)
static inline
-void save_last_tsc(const struct lttng_ust_lib_ring_buffer_config *config,
- struct lttng_ust_lib_ring_buffer *buf, uint64_t tsc)
+void save_last_tsc(const struct lttng_ust_ring_buffer_config *config,
+ struct lttng_ust_ring_buffer *buf, uint64_t tsc)
{
if (config->tsc_bits == 0 || config->tsc_bits == 64)
return;
}
static inline
-int last_tsc_overflow(const struct lttng_ust_lib_ring_buffer_config *config,
- struct lttng_ust_lib_ring_buffer *buf, uint64_t tsc)
+int last_tsc_overflow(const struct lttng_ust_ring_buffer_config *config,
+ struct lttng_ust_ring_buffer *buf, uint64_t tsc)
{
unsigned long tsc_shifted;
}
#else
static inline
-void save_last_tsc(const struct lttng_ust_lib_ring_buffer_config *config,
- struct lttng_ust_lib_ring_buffer *buf, uint64_t tsc)
+void save_last_tsc(const struct lttng_ust_ring_buffer_config *config,
+ struct lttng_ust_ring_buffer *buf, uint64_t tsc)
{
if (config->tsc_bits == 0 || config->tsc_bits == 64)
return;
}
static inline
-int last_tsc_overflow(const struct lttng_ust_lib_ring_buffer_config *config,
- struct lttng_ust_lib_ring_buffer *buf, uint64_t tsc)
+int last_tsc_overflow(const struct lttng_ust_ring_buffer_config *config,
+ struct lttng_ust_ring_buffer *buf, uint64_t tsc)
{
if (config->tsc_bits == 0 || config->tsc_bits == 64)
return 0;
#endif
extern
-int lib_ring_buffer_reserve_slow(struct lttng_ust_lib_ring_buffer_ctx *ctx,
+int lib_ring_buffer_reserve_slow(struct lttng_ust_ring_buffer_ctx *ctx,
void *client_ctx)
__attribute__((visibility("hidden")));
extern
-void lib_ring_buffer_switch_slow(struct lttng_ust_lib_ring_buffer *buf,
+void lib_ring_buffer_switch_slow(struct lttng_ust_ring_buffer *buf,
enum switch_mode mode,
struct lttng_ust_shm_handle *handle)
__attribute__((visibility("hidden")));
-void lib_ring_buffer_check_deliver_slow(const struct lttng_ust_lib_ring_buffer_config *config,
- struct lttng_ust_lib_ring_buffer *buf,
- struct lttng_ust_lib_ring_buffer_channel *chan,
+void lib_ring_buffer_check_deliver_slow(const struct lttng_ust_ring_buffer_config *config,
+ struct lttng_ust_ring_buffer *buf,
+ struct lttng_ust_ring_buffer_channel *chan,
unsigned long offset,
unsigned long commit_count,
unsigned long idx,
/* Buffer write helpers */
static inline
-void lib_ring_buffer_reserve_push_reader(struct lttng_ust_lib_ring_buffer *buf,
- struct lttng_ust_lib_ring_buffer_channel *chan,
+void lib_ring_buffer_reserve_push_reader(struct lttng_ust_ring_buffer *buf,
+ struct lttng_ust_ring_buffer_channel *chan,
unsigned long offset)
{
unsigned long consumed_old, consumed_new;
* algorithm guarantees.
*/
static inline
-void lib_ring_buffer_clear_reader(struct lttng_ust_lib_ring_buffer *buf,
+void lib_ring_buffer_clear_reader(struct lttng_ust_ring_buffer *buf,
struct lttng_ust_shm_handle *handle)
{
- struct lttng_ust_lib_ring_buffer_channel *chan;
- const struct lttng_ust_lib_ring_buffer_config *config;
+ struct lttng_ust_ring_buffer_channel *chan;
+ const struct lttng_ust_ring_buffer_config *config;
unsigned long offset, consumed_old, consumed_new;
chan = shmp(handle, buf->backend.chan);
}
static inline
-int lib_ring_buffer_pending_data(const struct lttng_ust_lib_ring_buffer_config *config,
- struct lttng_ust_lib_ring_buffer *buf,
- struct lttng_ust_lib_ring_buffer_channel *chan)
+int lib_ring_buffer_pending_data(const struct lttng_ust_ring_buffer_config *config,
+ struct lttng_ust_ring_buffer *buf,
+ struct lttng_ust_ring_buffer_channel *chan)
{
return !!subbuf_offset(v_read(config, &buf->offset), chan);
}
static inline
-unsigned long lib_ring_buffer_get_data_size(const struct lttng_ust_lib_ring_buffer_config *config,
- struct lttng_ust_lib_ring_buffer *buf,
+unsigned long lib_ring_buffer_get_data_size(const struct lttng_ust_ring_buffer_config *config,
+ struct lttng_ust_ring_buffer *buf,
unsigned long idx,
struct lttng_ust_shm_handle *handle)
{
* This is a very specific ftrace use-case, so we keep this as "internal" API.
*/
static inline
-int lib_ring_buffer_reserve_committed(const struct lttng_ust_lib_ring_buffer_config *config,
- struct lttng_ust_lib_ring_buffer *buf,
- struct lttng_ust_lib_ring_buffer_channel *chan,
+int lib_ring_buffer_reserve_committed(const struct lttng_ust_ring_buffer_config *config,
+ struct lttng_ust_ring_buffer *buf,
+ struct lttng_ust_ring_buffer_channel *chan,
struct lttng_ust_shm_handle *handle)
{
unsigned long offset, idx, commit_count;
* timestamp of the following subbuffers.
*/
static inline
-void lib_ring_buffer_check_deliver(const struct lttng_ust_lib_ring_buffer_config *config,
- struct lttng_ust_lib_ring_buffer *buf,
- struct lttng_ust_lib_ring_buffer_channel *chan,
+void lib_ring_buffer_check_deliver(const struct lttng_ust_ring_buffer_config *config,
+ struct lttng_ust_ring_buffer *buf,
+ struct lttng_ust_ring_buffer_channel *chan,
unsigned long offset,
unsigned long commit_count,
unsigned long idx,
*/
static inline
void lib_ring_buffer_write_commit_counter(
- const struct lttng_ust_lib_ring_buffer_config *config,
- struct lttng_ust_lib_ring_buffer *buf __attribute__((unused)),
- struct lttng_ust_lib_ring_buffer_channel *chan,
+ const struct lttng_ust_ring_buffer_config *config,
+ struct lttng_ust_ring_buffer *buf __attribute__((unused)),
+ struct lttng_ust_ring_buffer_channel *chan,
unsigned long buf_offset,
unsigned long commit_count,
struct lttng_ust_shm_handle *handle __attribute__((unused)),
v_set(config, &cc_hot->seq, commit_count);
}
-extern int lib_ring_buffer_create(struct lttng_ust_lib_ring_buffer *buf,
+extern int lib_ring_buffer_create(struct lttng_ust_ring_buffer *buf,
struct channel_backend *chanb, int cpu,
struct lttng_ust_shm_handle *handle,
struct shm_object *shmobj)
__attribute__((visibility("hidden")));
-extern void lib_ring_buffer_free(struct lttng_ust_lib_ring_buffer *buf,
+extern void lib_ring_buffer_free(struct lttng_ust_ring_buffer *buf,
struct lttng_ust_shm_handle *handle)
__attribute__((visibility("hidden")));
/* channel: collection of per-cpu ring buffers. */
#define RB_CHANNEL_PADDING 32
-struct lttng_ust_lib_ring_buffer_channel {
+struct lttng_ust_ring_buffer_channel {
int record_disabled;
unsigned long commit_count_mask; /*
* Commit count mask, removing
uint32_t mode; /* Buffer mode: 0: overwrite, 1: discard */
} __attribute__((packed));
-struct lttng_ust_lib_ring_buffer {
+struct lttng_ust_ring_buffer {
/* First 32 bytes are for the buffer crash dump ABI */
struct lttng_crash_abi crash_abi;
* Last timestamp written in the buffer.
*/
- struct lttng_ust_lib_ring_buffer_backend backend;
+ struct lttng_ust_ring_buffer_backend backend;
/* Associated backend */
DECLARE_SHMP(struct commit_counters_cold, commit_cold);
unsigned long cons_snapshot; /* Consumer count snapshot */
unsigned int get_subbuf:1; /* Sub-buffer being held by reader */
/* shmp pointer to self */
- DECLARE_SHMP(struct lttng_ust_lib_ring_buffer, self);
+ DECLARE_SHMP(struct lttng_ust_ring_buffer, self);
char padding[RB_RING_BUFFER_PADDING];
} __attribute__((aligned(CAA_CACHE_LINE_SIZE)));
* ring buffer private context
*
* Private context passed to lib_ring_buffer_reserve(), lib_ring_buffer_commit(),
- * lib_ring_buffer_try_discard_reserve(), lttng_ust_lib_ring_buffer_align_ctx() and
+ * lib_ring_buffer_try_discard_reserve(), lttng_ust_ring_buffer_align_ctx() and
* lib_ring_buffer_write().
*
* This context is allocated on an internal shadow-stack by a successful reserve
* operation, used by align/write, and freed by commit.
*/
-struct lttng_ust_lib_ring_buffer_ctx_private {
+struct lttng_ust_ring_buffer_ctx_private {
/* input received by lib_ring_buffer_reserve(). */
- struct lttng_ust_lib_ring_buffer_ctx *pub;
- struct lttng_ust_lib_ring_buffer_channel *chan; /* channel */
+ struct lttng_ust_ring_buffer_ctx *pub;
+ struct lttng_ust_ring_buffer_channel *chan; /* channel */
/* output from lib_ring_buffer_reserve() */
int reserve_cpu; /* processor id updated by the reserve */
unsigned int rflags; /* reservation flags */
void *ip; /* caller ip address */
- struct lttng_ust_lib_ring_buffer *buf; /*
+ struct lttng_ust_ring_buffer *buf; /*
* buffer corresponding to processor id
* for this channel
*/
- struct lttng_ust_lib_ring_buffer_backend_pages *backend_pages;
+ struct lttng_ust_ring_buffer_backend_pages *backend_pages;
};
static inline
-void *channel_get_private_config(struct lttng_ust_lib_ring_buffer_channel *chan)
+void *channel_get_private_config(struct lttng_ust_ring_buffer_channel *chan)
{
return ((char *) chan) + chan->priv_data_offset;
}
static inline
-void *channel_get_private(struct lttng_ust_lib_ring_buffer_channel *chan)
+void *channel_get_private(struct lttng_ust_ring_buffer_channel *chan)
{
return chan->u.s.priv;
}
static inline
-void channel_set_private(struct lttng_ust_lib_ring_buffer_channel *chan, void *priv)
+void channel_set_private(struct lttng_ust_ring_buffer_channel *chan, void *priv)
{
chan->u.s.priv = priv;
}
/*
* Issue warnings and disable channels upon internal error.
- * Can receive struct lttng_ust_lib_ring_buffer or struct lttng_ust_lib_ring_buffer_backend
+ * Can receive struct lttng_ust_ring_buffer or struct lttng_ust_ring_buffer_backend
* parameters.
*/
#define CHAN_WARN_ON(c, cond) \
({ \
- struct lttng_ust_lib_ring_buffer_channel *__chan; \
+ struct lttng_ust_ring_buffer_channel *__chan; \
int _____ret = caa_unlikely(cond); \
if (_____ret) { \
if (__rb_same_type(*(c), struct channel_backend)) \
__chan = caa_container_of((void *) (c), \
- struct lttng_ust_lib_ring_buffer_channel, \
+ struct lttng_ust_ring_buffer_channel, \
backend); \
else if (__rb_same_type(*(c), \
- struct lttng_ust_lib_ring_buffer_channel)) \
+ struct lttng_ust_ring_buffer_channel)) \
__chan = (void *) (c); \
else \
BUG_ON(1); \
})
/**
- * lttng_ust_lib_ring_buffer_align_ctx - Align context offset on "alignment"
+ * lttng_ust_ring_buffer_align_ctx - Align context offset on "alignment"
* @ctx: ring buffer context.
*/
static inline
-void lttng_ust_lib_ring_buffer_align_ctx(struct lttng_ust_lib_ring_buffer_ctx *ctx,
+void lttng_ust_ring_buffer_align_ctx(struct lttng_ust_ring_buffer_ctx *ctx,
size_t alignment)
lttng_ust_notrace;
static inline
-void lttng_ust_lib_ring_buffer_align_ctx(struct lttng_ust_lib_ring_buffer_ctx *ctx,
+void lttng_ust_ring_buffer_align_ctx(struct lttng_ust_ring_buffer_ctx *ctx,
size_t alignment)
{
- struct lttng_ust_lib_ring_buffer_ctx_private *ctx_private = ctx->priv;
+ struct lttng_ust_ring_buffer_ctx_private *ctx_private = ctx->priv;
- ctx_private->buf_offset += lttng_ust_lib_ring_buffer_align(ctx_private->buf_offset,
+ ctx_private->buf_offset += lttng_ust_ring_buffer_align(ctx_private->buf_offset,
alignment);
}
* Copyright (C) 2012-2016 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
*/
-#ifndef _LTTNG_UST_LIB_RINGBUFFER_RB_INIT_H
-#define _LTTNG_UST_LIB_RINGBUFFER_RB_INIT_H
+#ifndef _LTTNG_UST_RINGBUFFER_RB_INIT_H
+#define _LTTNG_UST_RINGBUFFER_RB_INIT_H
void lttng_fixup_ringbuffer_tls(void)
__attribute__((visibility("hidden")));
void lttng_ust_ringbuffer_set_allow_blocking(void)
__attribute__((visibility("hidden")));
-#endif /* _LTTNG_UST_LIB_RINGBUFFER_RB_INIT_H */
+#endif /* _LTTNG_UST_RINGBUFFER_RB_INIT_H */
* @extra_reader_sb: need extra subbuffer for reader
*/
static
-int lib_ring_buffer_backend_allocate(const struct lttng_ust_lib_ring_buffer_config *config,
- struct lttng_ust_lib_ring_buffer_backend *bufb,
+int lib_ring_buffer_backend_allocate(const struct lttng_ust_ring_buffer_config *config,
+ struct lttng_ust_ring_buffer_backend *bufb,
size_t size __attribute__((unused)), size_t num_subbuf,
int extra_reader_sb,
struct lttng_ust_shm_handle *handle,
goto page_size_error;
}
- align_shm(shmobj, __alignof__(struct lttng_ust_lib_ring_buffer_backend_pages_shmp));
+ align_shm(shmobj, __alignof__(struct lttng_ust_ring_buffer_backend_pages_shmp));
set_shmp(bufb->array, zalloc_shm(shmobj,
- sizeof(struct lttng_ust_lib_ring_buffer_backend_pages_shmp) * num_subbuf_alloc));
+ sizeof(struct lttng_ust_ring_buffer_backend_pages_shmp) * num_subbuf_alloc));
if (caa_unlikely(!shmp(handle, bufb->array)))
goto array_error;
/* Allocate backend pages array elements */
for (i = 0; i < num_subbuf_alloc; i++) {
- align_shm(shmobj, __alignof__(struct lttng_ust_lib_ring_buffer_backend_pages));
+ align_shm(shmobj, __alignof__(struct lttng_ust_ring_buffer_backend_pages));
set_shmp(shmp_index(handle, bufb->array, i)->shmp,
zalloc_shm(shmobj,
- sizeof(struct lttng_ust_lib_ring_buffer_backend_pages)));
+ sizeof(struct lttng_ust_ring_buffer_backend_pages)));
if (!shmp(handle, shmp_index(handle, bufb->array, i)->shmp))
goto free_array;
}
/* Allocate write-side subbuffer table */
- align_shm(shmobj, __alignof__(struct lttng_ust_lib_ring_buffer_backend_subbuffer));
+ align_shm(shmobj, __alignof__(struct lttng_ust_ring_buffer_backend_subbuffer));
set_shmp(bufb->buf_wsb, zalloc_shm(shmobj,
- sizeof(struct lttng_ust_lib_ring_buffer_backend_subbuffer)
+ sizeof(struct lttng_ust_ring_buffer_backend_subbuffer)
* num_subbuf));
if (caa_unlikely(!shmp(handle, bufb->buf_wsb)))
goto free_array;
for (i = 0; i < num_subbuf; i++) {
- struct lttng_ust_lib_ring_buffer_backend_subbuffer *sb;
+ struct lttng_ust_ring_buffer_backend_subbuffer *sb;
sb = shmp_index(handle, bufb->buf_wsb, i);
if (!sb)
bufb->buf_rsb.id = subbuffer_id(config, 0, 1, 0);
/* Allocate subbuffer packet counter table */
- align_shm(shmobj, __alignof__(struct lttng_ust_lib_ring_buffer_backend_counts));
+ align_shm(shmobj, __alignof__(struct lttng_ust_ring_buffer_backend_counts));
set_shmp(bufb->buf_cnt, zalloc_shm(shmobj,
- sizeof(struct lttng_ust_lib_ring_buffer_backend_counts)
+ sizeof(struct lttng_ust_ring_buffer_backend_counts)
* num_subbuf));
if (caa_unlikely(!shmp(handle, bufb->buf_cnt)))
goto free_wsb;
/* Assign pages to page index */
for (i = 0; i < num_subbuf_alloc; i++) {
- struct lttng_ust_lib_ring_buffer_backend_pages_shmp *sbp;
- struct lttng_ust_lib_ring_buffer_backend_pages *pages;
+ struct lttng_ust_ring_buffer_backend_pages_shmp *sbp;
+ struct lttng_ust_ring_buffer_backend_pages *pages;
struct shm_ref ref;
ref.index = bufb->memory_map._ref.index;
return -ENOMEM;
}
-int lib_ring_buffer_backend_create(struct lttng_ust_lib_ring_buffer_backend *bufb,
+int lib_ring_buffer_backend_create(struct lttng_ust_ring_buffer_backend *bufb,
struct channel_backend *chanb, int cpu,
struct lttng_ust_shm_handle *handle,
struct shm_object *shmobj)
{
- const struct lttng_ust_lib_ring_buffer_config *config = &chanb->config;
+ const struct lttng_ust_ring_buffer_config *config = &chanb->config;
set_shmp(bufb->chan, handle->chan._ref);
bufb->cpu = cpu;
handle, shmobj);
}
-void lib_ring_buffer_backend_reset(struct lttng_ust_lib_ring_buffer_backend *bufb,
+void lib_ring_buffer_backend_reset(struct lttng_ust_ring_buffer_backend *bufb,
struct lttng_ust_shm_handle *handle)
{
struct channel_backend *chanb;
- const struct lttng_ust_lib_ring_buffer_config *config;
+ const struct lttng_ust_ring_buffer_config *config;
unsigned long num_subbuf_alloc;
unsigned int i;
num_subbuf_alloc++;
for (i = 0; i < chanb->num_subbuf; i++) {
- struct lttng_ust_lib_ring_buffer_backend_subbuffer *sb;
+ struct lttng_ust_ring_buffer_backend_subbuffer *sb;
sb = shmp_index(handle, bufb->buf_wsb, i);
if (!sb)
bufb->buf_rsb.id = subbuffer_id(config, 0, 1, 0);
for (i = 0; i < num_subbuf_alloc; i++) {
- struct lttng_ust_lib_ring_buffer_backend_pages_shmp *sbp;
- struct lttng_ust_lib_ring_buffer_backend_pages *pages;
+ struct lttng_ust_ring_buffer_backend_pages_shmp *sbp;
+ struct lttng_ust_ring_buffer_backend_pages *pages;
sbp = shmp_index(handle, bufb->array, i);
if (!sbp)
*/
void channel_backend_reset(struct channel_backend *chanb)
{
- struct lttng_ust_lib_ring_buffer_channel *chan = caa_container_of(chanb,
- struct lttng_ust_lib_ring_buffer_channel, backend);
- const struct lttng_ust_lib_ring_buffer_config *config = &chanb->config;
+ struct lttng_ust_ring_buffer_channel *chan = caa_container_of(chanb,
+ struct lttng_ust_ring_buffer_channel, backend);
+ const struct lttng_ust_ring_buffer_config *config = &chanb->config;
/*
* Don't reset buf_size, subbuf_size, subbuf_size_order,
*/
int channel_backend_init(struct channel_backend *chanb,
const char *name,
- const struct lttng_ust_lib_ring_buffer_config *config,
+ const struct lttng_ust_ring_buffer_config *config,
size_t subbuf_size, size_t num_subbuf,
struct lttng_ust_shm_handle *handle,
const int *stream_fds)
{
- struct lttng_ust_lib_ring_buffer_channel *chan = caa_container_of(chanb,
- struct lttng_ust_lib_ring_buffer_channel, backend);
+ struct lttng_ust_ring_buffer_channel *chan = caa_container_of(chanb,
+ struct lttng_ust_ring_buffer_channel, backend);
unsigned int i;
int ret;
size_t shmsize = 0, num_subbuf_alloc;
memcpy(&chanb->config, config, sizeof(*config));
/* Per-cpu buffer size: control (prior to backend) */
- shmsize = lttng_ust_offset_align(shmsize, __alignof__(struct lttng_ust_lib_ring_buffer));
- shmsize += sizeof(struct lttng_ust_lib_ring_buffer);
+ shmsize = lttng_ust_offset_align(shmsize, __alignof__(struct lttng_ust_ring_buffer));
+ shmsize += sizeof(struct lttng_ust_ring_buffer);
shmsize += lttng_ust_offset_align(shmsize, __alignof__(struct commit_counters_hot));
shmsize += sizeof(struct commit_counters_hot) * num_subbuf;
shmsize += lttng_ust_offset_align(shmsize, __alignof__(struct commit_counters_cold));
/* Per-cpu buffer size: backend */
/* num_subbuf + 1 is the worse case */
num_subbuf_alloc = num_subbuf + 1;
- shmsize += lttng_ust_offset_align(shmsize, __alignof__(struct lttng_ust_lib_ring_buffer_backend_pages_shmp));
- shmsize += sizeof(struct lttng_ust_lib_ring_buffer_backend_pages_shmp) * num_subbuf_alloc;
+ shmsize += lttng_ust_offset_align(shmsize, __alignof__(struct lttng_ust_ring_buffer_backend_pages_shmp));
+ shmsize += sizeof(struct lttng_ust_ring_buffer_backend_pages_shmp) * num_subbuf_alloc;
shmsize += lttng_ust_offset_align(shmsize, page_size);
shmsize += subbuf_size * num_subbuf_alloc;
- shmsize += lttng_ust_offset_align(shmsize, __alignof__(struct lttng_ust_lib_ring_buffer_backend_pages));
- shmsize += sizeof(struct lttng_ust_lib_ring_buffer_backend_pages) * num_subbuf_alloc;
- shmsize += lttng_ust_offset_align(shmsize, __alignof__(struct lttng_ust_lib_ring_buffer_backend_subbuffer));
- shmsize += sizeof(struct lttng_ust_lib_ring_buffer_backend_subbuffer) * num_subbuf;
- shmsize += lttng_ust_offset_align(shmsize, __alignof__(struct lttng_ust_lib_ring_buffer_backend_counts));
- shmsize += sizeof(struct lttng_ust_lib_ring_buffer_backend_counts) * num_subbuf;
+ shmsize += lttng_ust_offset_align(shmsize, __alignof__(struct lttng_ust_ring_buffer_backend_pages));
+ shmsize += sizeof(struct lttng_ust_ring_buffer_backend_pages) * num_subbuf_alloc;
+ shmsize += lttng_ust_offset_align(shmsize, __alignof__(struct lttng_ust_ring_buffer_backend_subbuffer));
+ shmsize += sizeof(struct lttng_ust_ring_buffer_backend_subbuffer) * num_subbuf;
+ shmsize += lttng_ust_offset_align(shmsize, __alignof__(struct lttng_ust_ring_buffer_backend_counts));
+ shmsize += sizeof(struct lttng_ust_ring_buffer_backend_counts) * num_subbuf;
if (config->alloc == RING_BUFFER_ALLOC_PER_CPU) {
- struct lttng_ust_lib_ring_buffer *buf;
+ struct lttng_ust_ring_buffer *buf;
/*
* We need to allocate for all possible cpus.
*/
SHM_OBJECT_SHM, stream_fds[i], i);
if (!shmobj)
goto end;
- align_shm(shmobj, __alignof__(struct lttng_ust_lib_ring_buffer));
- set_shmp(chanb->buf[i].shmp, zalloc_shm(shmobj, sizeof(struct lttng_ust_lib_ring_buffer)));
+ align_shm(shmobj, __alignof__(struct lttng_ust_ring_buffer));
+ set_shmp(chanb->buf[i].shmp, zalloc_shm(shmobj, sizeof(struct lttng_ust_ring_buffer)));
buf = shmp(handle, chanb->buf[i].shmp);
if (!buf)
goto end;
}
} else {
struct shm_object *shmobj;
- struct lttng_ust_lib_ring_buffer *buf;
+ struct lttng_ust_ring_buffer *buf;
shmobj = shm_object_table_alloc(handle->table, shmsize,
SHM_OBJECT_SHM, stream_fds[0], -1);
if (!shmobj)
goto end;
- align_shm(shmobj, __alignof__(struct lttng_ust_lib_ring_buffer));
- set_shmp(chanb->buf[0].shmp, zalloc_shm(shmobj, sizeof(struct lttng_ust_lib_ring_buffer)));
+ align_shm(shmobj, __alignof__(struct lttng_ust_ring_buffer));
+ set_shmp(chanb->buf[0].shmp, zalloc_shm(shmobj, sizeof(struct lttng_ust_ring_buffer)));
buf = shmp(handle, chanb->buf[0].shmp);
if (!buf)
goto end;
* Should be protected by get_subbuf/put_subbuf.
* Returns the length copied.
*/
-size_t lib_ring_buffer_read(struct lttng_ust_lib_ring_buffer_backend *bufb, size_t offset,
+size_t lib_ring_buffer_read(struct lttng_ust_ring_buffer_backend *bufb, size_t offset,
void *dest, size_t len, struct lttng_ust_shm_handle *handle)
{
struct channel_backend *chanb;
- const struct lttng_ust_lib_ring_buffer_config *config;
+ const struct lttng_ust_ring_buffer_config *config;
ssize_t orig_len;
- struct lttng_ust_lib_ring_buffer_backend_pages_shmp *rpages;
- struct lttng_ust_lib_ring_buffer_backend_pages *backend_pages;
+ struct lttng_ust_ring_buffer_backend_pages_shmp *rpages;
+ struct lttng_ust_ring_buffer_backend_pages *backend_pages;
unsigned long sb_bindex, id;
void *src;
* Should be protected by get_subbuf/put_subbuf.
* Destination length should be at least 1 to hold '\0'.
*/
-int lib_ring_buffer_read_cstr(struct lttng_ust_lib_ring_buffer_backend *bufb, size_t offset,
+int lib_ring_buffer_read_cstr(struct lttng_ust_ring_buffer_backend *bufb, size_t offset,
void *dest, size_t len, struct lttng_ust_shm_handle *handle)
{
struct channel_backend *chanb;
- const struct lttng_ust_lib_ring_buffer_config *config;
+ const struct lttng_ust_ring_buffer_config *config;
ssize_t string_len, orig_offset;
char *str;
- struct lttng_ust_lib_ring_buffer_backend_pages_shmp *rpages;
- struct lttng_ust_lib_ring_buffer_backend_pages *backend_pages;
+ struct lttng_ust_ring_buffer_backend_pages_shmp *rpages;
+ struct lttng_ust_ring_buffer_backend_pages *backend_pages;
unsigned long sb_bindex, id;
chanb = &shmp(handle, bufb->chan)->backend;
* from/to this address, as long as the read/write is never bigger than
* a page size.
*/
-void *lib_ring_buffer_read_offset_address(struct lttng_ust_lib_ring_buffer_backend *bufb,
+void *lib_ring_buffer_read_offset_address(struct lttng_ust_ring_buffer_backend *bufb,
size_t offset,
struct lttng_ust_shm_handle *handle)
{
- struct lttng_ust_lib_ring_buffer_backend_pages_shmp *rpages;
- struct lttng_ust_lib_ring_buffer_backend_pages *backend_pages;
+ struct lttng_ust_ring_buffer_backend_pages_shmp *rpages;
+ struct lttng_ust_ring_buffer_backend_pages *backend_pages;
struct channel_backend *chanb;
- const struct lttng_ust_lib_ring_buffer_config *config;
+ const struct lttng_ust_ring_buffer_config *config;
unsigned long sb_bindex, id;
chanb = &shmp(handle, bufb->chan)->backend;
* it's always at the beginning of a page, it's safe to write directly to this
* address, as long as the write is never bigger than a page size.
*/
-void *lib_ring_buffer_offset_address(struct lttng_ust_lib_ring_buffer_backend *bufb,
+void *lib_ring_buffer_offset_address(struct lttng_ust_ring_buffer_backend *bufb,
size_t offset,
struct lttng_ust_shm_handle *handle)
{
size_t sbidx;
- struct lttng_ust_lib_ring_buffer_backend_pages_shmp *rpages;
- struct lttng_ust_lib_ring_buffer_backend_pages *backend_pages;
+ struct lttng_ust_ring_buffer_backend_pages_shmp *rpages;
+ struct lttng_ust_ring_buffer_backend_pages *backend_pages;
struct channel_backend *chanb;
- const struct lttng_ust_lib_ring_buffer_config *config;
+ const struct lttng_ust_ring_buffer_config *config;
unsigned long sb_bindex, id;
- struct lttng_ust_lib_ring_buffer_backend_subbuffer *sb;
+ struct lttng_ust_ring_buffer_backend_subbuffer *sb;
chanb = &shmp(handle, bufb->chan)->backend;
if (!chanb)
static pthread_mutex_t wakeup_fd_mutex = PTHREAD_MUTEX_INITIALIZER;
static
-void lib_ring_buffer_print_errors(struct lttng_ust_lib_ring_buffer_channel *chan,
- struct lttng_ust_lib_ring_buffer *buf, int cpu,
+void lib_ring_buffer_print_errors(struct lttng_ust_ring_buffer_channel *chan,
+ struct lttng_ust_ring_buffer *buf, int cpu,
struct lttng_ust_shm_handle *handle);
/*
}
/* Get blocking timeout, in ms */
-static int lttng_ust_ringbuffer_get_timeout(struct lttng_ust_lib_ring_buffer_channel *chan)
+static int lttng_ust_ringbuffer_get_timeout(struct lttng_ust_ring_buffer_channel *chan)
{
if (!lttng_ust_allow_blocking)
return 0;
* should not be using the iterator concurrently with reset. The previous
* current iterator record is reset.
*/
-void lib_ring_buffer_reset(struct lttng_ust_lib_ring_buffer *buf,
+void lib_ring_buffer_reset(struct lttng_ust_ring_buffer *buf,
struct lttng_ust_shm_handle *handle)
{
- struct lttng_ust_lib_ring_buffer_channel *chan;
- const struct lttng_ust_lib_ring_buffer_config *config;
+ struct lttng_ust_ring_buffer_channel *chan;
+ const struct lttng_ust_ring_buffer_config *config;
unsigned int i;
chan = shmp(handle, buf->backend.chan);
* be using the iterator concurrently with reset. The previous current iterator
* record is reset.
*/
-void channel_reset(struct lttng_ust_lib_ring_buffer_channel *chan)
+void channel_reset(struct lttng_ust_ring_buffer_channel *chan)
{
/*
* Reset iterators first. Will put the subbuffer if held for reading.
}
static
-void init_crash_abi(const struct lttng_ust_lib_ring_buffer_config *config,
+void init_crash_abi(const struct lttng_ust_ring_buffer_config *config,
struct lttng_crash_abi *crash_abi,
- struct lttng_ust_lib_ring_buffer *buf,
+ struct lttng_ust_ring_buffer *buf,
struct channel_backend *chanb,
struct shm_object *shmobj,
struct lttng_ust_shm_handle *handle)
crash_abi->offset.buf_wsb_array =
(uint32_t) ((char *) shmp(handle, buf->backend.buf_wsb) - (char *) buf);
crash_abi->offset.buf_wsb_id =
- offsetof(struct lttng_ust_lib_ring_buffer_backend_subbuffer, id);
+ offsetof(struct lttng_ust_ring_buffer_backend_subbuffer, id);
crash_abi->offset.sb_array =
(uint32_t) ((char *) shmp(handle, buf->backend.array) - (char *) buf);
crash_abi->offset.sb_array_shmp_offset =
- offsetof(struct lttng_ust_lib_ring_buffer_backend_pages_shmp,
+ offsetof(struct lttng_ust_ring_buffer_backend_pages_shmp,
shmp._ref.offset);
crash_abi->offset.sb_backend_p_offset =
- offsetof(struct lttng_ust_lib_ring_buffer_backend_pages,
+ offsetof(struct lttng_ust_ring_buffer_backend_pages,
p._ref.offset);
/* Field length */
crash_abi->length.commit_hot_seq =
sizeof(((struct commit_counters_hot *) NULL)->seq);
crash_abi->length.buf_wsb_id =
- sizeof(((struct lttng_ust_lib_ring_buffer_backend_subbuffer *) NULL)->id);
+ sizeof(((struct lttng_ust_ring_buffer_backend_subbuffer *) NULL)->id);
crash_abi->length.sb_array_shmp_offset =
- sizeof(((struct lttng_ust_lib_ring_buffer_backend_pages_shmp *) NULL)->shmp._ref.offset);
+ sizeof(((struct lttng_ust_ring_buffer_backend_pages_shmp *) NULL)->shmp._ref.offset);
crash_abi->length.sb_backend_p_offset =
- sizeof(((struct lttng_ust_lib_ring_buffer_backend_pages *) NULL)->p._ref.offset);
+ sizeof(((struct lttng_ust_ring_buffer_backend_pages *) NULL)->p._ref.offset);
/* Array stride */
crash_abi->stride.commit_hot_array =
sizeof(struct commit_counters_hot);
crash_abi->stride.buf_wsb_array =
- sizeof(struct lttng_ust_lib_ring_buffer_backend_subbuffer);
+ sizeof(struct lttng_ust_ring_buffer_backend_subbuffer);
crash_abi->stride.sb_array =
- sizeof(struct lttng_ust_lib_ring_buffer_backend_pages_shmp);
+ sizeof(struct lttng_ust_ring_buffer_backend_pages_shmp);
/* Buffer constants */
crash_abi->buf_size = chanb->buf_size;
/*
* Must be called under cpu hotplug protection.
*/
-int lib_ring_buffer_create(struct lttng_ust_lib_ring_buffer *buf,
+int lib_ring_buffer_create(struct lttng_ust_ring_buffer *buf,
struct channel_backend *chanb, int cpu,
struct lttng_ust_shm_handle *handle,
struct shm_object *shmobj)
{
- const struct lttng_ust_lib_ring_buffer_config *config = &chanb->config;
- struct lttng_ust_lib_ring_buffer_channel *chan = caa_container_of(chanb,
- struct lttng_ust_lib_ring_buffer_channel, backend);
- struct lttng_ust_lib_ring_buffer_backend_subbuffer *wsb;
- struct lttng_ust_lib_ring_buffer_channel *shmp_chan;
+ const struct lttng_ust_ring_buffer_config *config = &chanb->config;
+ struct lttng_ust_ring_buffer_channel *chan = caa_container_of(chanb,
+ struct lttng_ust_ring_buffer_channel, backend);
+ struct lttng_ust_ring_buffer_backend_subbuffer *wsb;
+ struct lttng_ust_ring_buffer_channel *shmp_chan;
struct commit_counters_hot *cc_hot;
void *priv = channel_get_private_config(chan);
size_t subbuf_header_size;
void lib_ring_buffer_channel_switch_timer(int sig __attribute__((unused)),
siginfo_t *si, void *uc __attribute__((unused)))
{
- const struct lttng_ust_lib_ring_buffer_config *config;
+ const struct lttng_ust_ring_buffer_config *config;
struct lttng_ust_shm_handle *handle;
- struct lttng_ust_lib_ring_buffer_channel *chan;
+ struct lttng_ust_ring_buffer_channel *chan;
int cpu;
assert(CMM_LOAD_SHARED(timer_signal.tid) == pthread_self());
pthread_mutex_lock(&wakeup_fd_mutex);
if (config->alloc == RING_BUFFER_ALLOC_PER_CPU) {
for_each_possible_cpu(cpu) {
- struct lttng_ust_lib_ring_buffer *buf =
+ struct lttng_ust_ring_buffer *buf =
shmp(handle, chan->backend.buf[cpu].shmp);
if (!buf)
chan->handle);
}
} else {
- struct lttng_ust_lib_ring_buffer *buf =
+ struct lttng_ust_ring_buffer *buf =
shmp(handle, chan->backend.buf[0].shmp);
if (!buf)
}
static
-int lib_ring_buffer_poll_deliver(const struct lttng_ust_lib_ring_buffer_config *config,
- struct lttng_ust_lib_ring_buffer *buf,
- struct lttng_ust_lib_ring_buffer_channel *chan,
+int lib_ring_buffer_poll_deliver(const struct lttng_ust_ring_buffer_config *config,
+ struct lttng_ust_ring_buffer *buf,
+ struct lttng_ust_ring_buffer_channel *chan,
struct lttng_ust_shm_handle *handle)
{
unsigned long consumed_old, consumed_idx, commit_count, write_offset;
}
static
-void lib_ring_buffer_wakeup(struct lttng_ust_lib_ring_buffer *buf,
+void lib_ring_buffer_wakeup(struct lttng_ust_ring_buffer *buf,
struct lttng_ust_shm_handle *handle)
{
int wakeup_fd = shm_get_wakeup_fd(handle, &buf->self._ref);
}
static
-void lib_ring_buffer_channel_do_read(struct lttng_ust_lib_ring_buffer_channel *chan)
+void lib_ring_buffer_channel_do_read(struct lttng_ust_ring_buffer_channel *chan)
{
- const struct lttng_ust_lib_ring_buffer_config *config;
+ const struct lttng_ust_ring_buffer_config *config;
struct lttng_ust_shm_handle *handle;
int cpu;
pthread_mutex_lock(&wakeup_fd_mutex);
if (config->alloc == RING_BUFFER_ALLOC_PER_CPU) {
for_each_possible_cpu(cpu) {
- struct lttng_ust_lib_ring_buffer *buf =
+ struct lttng_ust_ring_buffer *buf =
shmp(handle, chan->backend.buf[cpu].shmp);
if (!buf)
}
}
} else {
- struct lttng_ust_lib_ring_buffer *buf =
+ struct lttng_ust_ring_buffer *buf =
shmp(handle, chan->backend.buf[0].shmp);
if (!buf)
void lib_ring_buffer_channel_read_timer(int sig __attribute__((unused)),
siginfo_t *si, void *uc __attribute__((unused)))
{
- struct lttng_ust_lib_ring_buffer_channel *chan;
+ struct lttng_ust_ring_buffer_channel *chan;
assert(CMM_LOAD_SHARED(timer_signal.tid) == pthread_self());
chan = si->si_value.sival_ptr;
}
static
-void lib_ring_buffer_channel_switch_timer_start(struct lttng_ust_lib_ring_buffer_channel *chan)
+void lib_ring_buffer_channel_switch_timer_start(struct lttng_ust_ring_buffer_channel *chan)
{
struct sigevent sev;
struct itimerspec its;
}
static
-void lib_ring_buffer_channel_switch_timer_stop(struct lttng_ust_lib_ring_buffer_channel *chan)
+void lib_ring_buffer_channel_switch_timer_stop(struct lttng_ust_ring_buffer_channel *chan)
{
int ret;
}
static
-void lib_ring_buffer_channel_read_timer_start(struct lttng_ust_lib_ring_buffer_channel *chan)
+void lib_ring_buffer_channel_read_timer_start(struct lttng_ust_ring_buffer_channel *chan)
{
- const struct lttng_ust_lib_ring_buffer_config *config = &chan->backend.config;
+ const struct lttng_ust_ring_buffer_config *config = &chan->backend.config;
struct sigevent sev;
struct itimerspec its;
int ret;
}
static
-void lib_ring_buffer_channel_read_timer_stop(struct lttng_ust_lib_ring_buffer_channel *chan)
+void lib_ring_buffer_channel_read_timer_stop(struct lttng_ust_ring_buffer_channel *chan)
{
- const struct lttng_ust_lib_ring_buffer_config *config = &chan->backend.config;
+ const struct lttng_ust_ring_buffer_config *config = &chan->backend.config;
int ret;
if (config->wakeup != RING_BUFFER_WAKEUP_BY_TIMER
chan->read_timer_enabled = 0;
}
-static void channel_unregister_notifiers(struct lttng_ust_lib_ring_buffer_channel *chan,
+static void channel_unregister_notifiers(struct lttng_ust_ring_buffer_channel *chan,
struct lttng_ust_shm_handle *handle __attribute__((unused)))
{
lib_ring_buffer_channel_switch_timer_stop(chan);
lib_ring_buffer_channel_read_timer_stop(chan);
}
-static void channel_print_errors(struct lttng_ust_lib_ring_buffer_channel *chan,
+static void channel_print_errors(struct lttng_ust_ring_buffer_channel *chan,
struct lttng_ust_shm_handle *handle)
{
- const struct lttng_ust_lib_ring_buffer_config *config =
+ const struct lttng_ust_ring_buffer_config *config =
&chan->backend.config;
int cpu;
if (config->alloc == RING_BUFFER_ALLOC_PER_CPU) {
for_each_possible_cpu(cpu) {
- struct lttng_ust_lib_ring_buffer *buf =
+ struct lttng_ust_ring_buffer *buf =
shmp(handle, chan->backend.buf[cpu].shmp);
if (buf)
lib_ring_buffer_print_errors(chan, buf, cpu, handle);
}
} else {
- struct lttng_ust_lib_ring_buffer *buf =
+ struct lttng_ust_ring_buffer *buf =
shmp(handle, chan->backend.buf[0].shmp);
if (buf)
}
}
-static void channel_free(struct lttng_ust_lib_ring_buffer_channel *chan,
+static void channel_free(struct lttng_ust_ring_buffer_channel *chan,
struct lttng_ust_shm_handle *handle,
int consumer)
{
* Holds cpu hotplug.
* Returns NULL on failure.
*/
-struct lttng_ust_shm_handle *channel_create(const struct lttng_ust_lib_ring_buffer_config *config,
+struct lttng_ust_shm_handle *channel_create(const struct lttng_ust_ring_buffer_config *config,
const char *name,
size_t priv_data_align,
size_t priv_data_size,
{
int ret;
size_t shmsize, chansize;
- struct lttng_ust_lib_ring_buffer_channel *chan;
+ struct lttng_ust_ring_buffer_channel *chan;
struct lttng_ust_shm_handle *handle;
struct shm_object *shmobj;
unsigned int nr_streams;
goto error_table_alloc;
/* Calculate the shm allocation layout */
- shmsize = sizeof(struct lttng_ust_lib_ring_buffer_channel);
- shmsize += lttng_ust_offset_align(shmsize, __alignof__(struct lttng_ust_lib_ring_buffer_shmp));
- shmsize += sizeof(struct lttng_ust_lib_ring_buffer_shmp) * nr_streams;
+ shmsize = sizeof(struct lttng_ust_ring_buffer_channel);
+ shmsize += lttng_ust_offset_align(shmsize, __alignof__(struct lttng_ust_ring_buffer_shmp));
+ shmsize += sizeof(struct lttng_ust_ring_buffer_shmp) * nr_streams;
chansize = shmsize;
if (priv_data_align)
shmsize += lttng_ust_offset_align(shmsize, priv_data_align);
-1, -1);
if (!shmobj)
goto error_append;
- /* struct lttng_ust_lib_ring_buffer_channel is at object 0, offset 0 (hardcoded) */
+ /* struct lttng_ust_ring_buffer_channel is at object 0, offset 0 (hardcoded) */
set_shmp(handle->chan, zalloc_shm(shmobj, chansize));
assert(handle->chan._ref.index == 0);
assert(handle->chan._ref.offset == 0);
memory_map_size, wakeup_fd);
if (!object)
goto error_table_object;
- /* struct lttng_ust_lib_ring_buffer_channel is at object 0, offset 0 (hardcoded) */
+ /* struct lttng_ust_ring_buffer_channel is at object 0, offset 0 (hardcoded) */
handle->chan._ref.index = 0;
handle->chan._ref.offset = 0;
return handle;
}
static
-void channel_release(struct lttng_ust_lib_ring_buffer_channel *chan, struct lttng_ust_shm_handle *handle,
+void channel_release(struct lttng_ust_ring_buffer_channel *chan, struct lttng_ust_shm_handle *handle,
int consumer)
{
channel_free(chan, handle, consumer);
* consumption of finalized channels, get_subbuf() will return -ENODATA.
* They should release their handle at that point.
*/
-void channel_destroy(struct lttng_ust_lib_ring_buffer_channel *chan, struct lttng_ust_shm_handle *handle,
+void channel_destroy(struct lttng_ust_ring_buffer_channel *chan, struct lttng_ust_shm_handle *handle,
int consumer)
{
if (consumer) {
return;
}
-struct lttng_ust_lib_ring_buffer *channel_get_ring_buffer(
- const struct lttng_ust_lib_ring_buffer_config *config,
- struct lttng_ust_lib_ring_buffer_channel *chan, int cpu,
+struct lttng_ust_ring_buffer *channel_get_ring_buffer(
+ const struct lttng_ust_ring_buffer_config *config,
+ struct lttng_ust_ring_buffer_channel *chan, int cpu,
struct lttng_ust_shm_handle *handle,
int *shm_fd, int *wait_fd,
int *wakeup_fd,
}
int ring_buffer_channel_close_wait_fd(
- const struct lttng_ust_lib_ring_buffer_config *config __attribute__((unused)),
- struct lttng_ust_lib_ring_buffer_channel *chan __attribute__((unused)),
+ const struct lttng_ust_ring_buffer_config *config __attribute__((unused)),
+ struct lttng_ust_ring_buffer_channel *chan __attribute__((unused)),
struct lttng_ust_shm_handle *handle)
{
struct shm_ref *ref;
}
int ring_buffer_channel_close_wakeup_fd(
- const struct lttng_ust_lib_ring_buffer_config *config __attribute__((unused)),
- struct lttng_ust_lib_ring_buffer_channel *chan __attribute__((unused)),
+ const struct lttng_ust_ring_buffer_config *config __attribute__((unused)),
+ struct lttng_ust_ring_buffer_channel *chan __attribute__((unused)),
struct lttng_ust_shm_handle *handle)
{
struct shm_ref *ref;
return shm_close_wakeup_fd(handle, ref);
}
-int ring_buffer_stream_close_wait_fd(const struct lttng_ust_lib_ring_buffer_config *config,
- struct lttng_ust_lib_ring_buffer_channel *chan,
+int ring_buffer_stream_close_wait_fd(const struct lttng_ust_ring_buffer_config *config,
+ struct lttng_ust_ring_buffer_channel *chan,
struct lttng_ust_shm_handle *handle,
int cpu)
{
return shm_close_wait_fd(handle, ref);
}
-int ring_buffer_stream_close_wakeup_fd(const struct lttng_ust_lib_ring_buffer_config *config,
- struct lttng_ust_lib_ring_buffer_channel *chan,
+int ring_buffer_stream_close_wakeup_fd(const struct lttng_ust_ring_buffer_config *config,
+ struct lttng_ust_ring_buffer_channel *chan,
struct lttng_ust_shm_handle *handle,
int cpu)
{
return ret;
}
-int lib_ring_buffer_open_read(struct lttng_ust_lib_ring_buffer *buf,
+int lib_ring_buffer_open_read(struct lttng_ust_ring_buffer *buf,
struct lttng_ust_shm_handle *handle __attribute__((unused)))
{
if (uatomic_cmpxchg(&buf->active_readers, 0, 1) != 0)
return 0;
}
-void lib_ring_buffer_release_read(struct lttng_ust_lib_ring_buffer *buf,
+void lib_ring_buffer_release_read(struct lttng_ust_ring_buffer *buf,
struct lttng_ust_shm_handle *handle)
{
- struct lttng_ust_lib_ring_buffer_channel *chan = shmp(handle, buf->backend.chan);
+ struct lttng_ust_ring_buffer_channel *chan = shmp(handle, buf->backend.chan);
if (!chan)
return;
* data to read at consumed position, or 0 if the get operation succeeds.
*/
-int lib_ring_buffer_snapshot(struct lttng_ust_lib_ring_buffer *buf,
+int lib_ring_buffer_snapshot(struct lttng_ust_ring_buffer *buf,
unsigned long *consumed, unsigned long *produced,
struct lttng_ust_shm_handle *handle)
{
- struct lttng_ust_lib_ring_buffer_channel *chan;
- const struct lttng_ust_lib_ring_buffer_config *config;
+ struct lttng_ust_ring_buffer_channel *chan;
+ const struct lttng_ust_ring_buffer_config *config;
unsigned long consumed_cur, write_offset;
int finalized;
* consumer positions without regard for the "snapshot" feature.
*/
int lib_ring_buffer_snapshot_sample_positions(
- struct lttng_ust_lib_ring_buffer *buf,
+ struct lttng_ust_ring_buffer *buf,
unsigned long *consumed, unsigned long *produced,
struct lttng_ust_shm_handle *handle)
{
- struct lttng_ust_lib_ring_buffer_channel *chan;
- const struct lttng_ust_lib_ring_buffer_config *config;
+ struct lttng_ust_ring_buffer_channel *chan;
+ const struct lttng_ust_ring_buffer_config *config;
chan = shmp(handle, buf->backend.chan);
if (!chan)
* @buf: ring buffer
* @consumed_new: new consumed count value
*/
-void lib_ring_buffer_move_consumer(struct lttng_ust_lib_ring_buffer *buf,
+void lib_ring_buffer_move_consumer(struct lttng_ust_ring_buffer *buf,
unsigned long consumed_new,
struct lttng_ust_shm_handle *handle)
{
- struct lttng_ust_lib_ring_buffer_backend *bufb = &buf->backend;
- struct lttng_ust_lib_ring_buffer_channel *chan;
+ struct lttng_ust_ring_buffer_backend *bufb = &buf->backend;
+ struct lttng_ust_ring_buffer_channel *chan;
unsigned long consumed;
chan = shmp(handle, bufb->chan);
* Returns -ENODATA if buffer is finalized, -EAGAIN if there is currently no
* data to read at consumed position, or 0 if the get operation succeeds.
*/
-int lib_ring_buffer_get_subbuf(struct lttng_ust_lib_ring_buffer *buf,
+int lib_ring_buffer_get_subbuf(struct lttng_ust_ring_buffer *buf,
unsigned long consumed,
struct lttng_ust_shm_handle *handle)
{
- struct lttng_ust_lib_ring_buffer_channel *chan;
- const struct lttng_ust_lib_ring_buffer_config *config;
+ struct lttng_ust_ring_buffer_channel *chan;
+ const struct lttng_ust_ring_buffer_config *config;
unsigned long consumed_cur, consumed_idx, commit_count, write_offset;
int ret, finalized, nr_retry = LTTNG_UST_RING_BUFFER_GET_RETRY;
struct commit_counters_cold *cc_cold;
* lib_ring_buffer_put_subbuf - release exclusive subbuffer access
* @buf: ring buffer
*/
-void lib_ring_buffer_put_subbuf(struct lttng_ust_lib_ring_buffer *buf,
+void lib_ring_buffer_put_subbuf(struct lttng_ust_ring_buffer *buf,
struct lttng_ust_shm_handle *handle)
{
- struct lttng_ust_lib_ring_buffer_backend *bufb = &buf->backend;
- struct lttng_ust_lib_ring_buffer_channel *chan;
- const struct lttng_ust_lib_ring_buffer_config *config;
+ struct lttng_ust_ring_buffer_backend *bufb = &buf->backend;
+ struct lttng_ust_ring_buffer_channel *chan;
+ const struct lttng_ust_ring_buffer_config *config;
unsigned long sb_bindex, consumed_idx, consumed;
- struct lttng_ust_lib_ring_buffer_backend_pages_shmp *rpages;
- struct lttng_ust_lib_ring_buffer_backend_pages *backend_pages;
+ struct lttng_ust_ring_buffer_backend_pages_shmp *rpages;
+ struct lttng_ust_ring_buffer_backend_pages *backend_pages;
chan = shmp(handle, bufb->chan);
if (!chan)
* position and the writer position. (inclusive)
*/
static
-void lib_ring_buffer_print_subbuffer_errors(struct lttng_ust_lib_ring_buffer *buf,
- struct lttng_ust_lib_ring_buffer_channel *chan,
+void lib_ring_buffer_print_subbuffer_errors(struct lttng_ust_ring_buffer *buf,
+ struct lttng_ust_ring_buffer_channel *chan,
unsigned long cons_offset,
int cpu,
struct lttng_ust_shm_handle *handle)
{
- const struct lttng_ust_lib_ring_buffer_config *config = &chan->backend.config;
+ const struct lttng_ust_ring_buffer_config *config = &chan->backend.config;
unsigned long cons_idx, commit_count, commit_count_sb;
struct commit_counters_hot *cc_hot;
struct commit_counters_cold *cc_cold;
}
static
-void lib_ring_buffer_print_buffer_errors(struct lttng_ust_lib_ring_buffer *buf,
- struct lttng_ust_lib_ring_buffer_channel *chan,
+void lib_ring_buffer_print_buffer_errors(struct lttng_ust_ring_buffer *buf,
+ struct lttng_ust_ring_buffer_channel *chan,
int cpu, struct lttng_ust_shm_handle *handle)
{
- const struct lttng_ust_lib_ring_buffer_config *config = &chan->backend.config;
+ const struct lttng_ust_ring_buffer_config *config = &chan->backend.config;
unsigned long write_offset, cons_offset;
/*
}
static
-void lib_ring_buffer_print_errors(struct lttng_ust_lib_ring_buffer_channel *chan,
- struct lttng_ust_lib_ring_buffer *buf, int cpu,
+void lib_ring_buffer_print_errors(struct lttng_ust_ring_buffer_channel *chan,
+ struct lttng_ust_ring_buffer *buf, int cpu,
struct lttng_ust_shm_handle *handle)
{
- const struct lttng_ust_lib_ring_buffer_config *config = &chan->backend.config;
+ const struct lttng_ust_ring_buffer_config *config = &chan->backend.config;
if (!strcmp(chan->backend.name, "relay-metadata-mmap")) {
DBG("ring buffer %s: %lu records written, "
* active or at buffer finalization (destroy).
*/
static
-void lib_ring_buffer_switch_old_start(struct lttng_ust_lib_ring_buffer *buf,
- struct lttng_ust_lib_ring_buffer_channel *chan,
+void lib_ring_buffer_switch_old_start(struct lttng_ust_ring_buffer *buf,
+ struct lttng_ust_ring_buffer_channel *chan,
struct switch_offsets *offsets,
uint64_t tsc,
struct lttng_ust_shm_handle *handle)
{
- const struct lttng_ust_lib_ring_buffer_config *config = &chan->backend.config;
+ const struct lttng_ust_ring_buffer_config *config = &chan->backend.config;
unsigned long oldidx = subbuf_index(offsets->old, chan);
unsigned long commit_count;
struct commit_counters_hot *cc_hot;
* subbuffer.
*/
static
-void lib_ring_buffer_switch_old_end(struct lttng_ust_lib_ring_buffer *buf,
- struct lttng_ust_lib_ring_buffer_channel *chan,
+void lib_ring_buffer_switch_old_end(struct lttng_ust_ring_buffer *buf,
+ struct lttng_ust_ring_buffer_channel *chan,
struct switch_offsets *offsets,
uint64_t tsc,
struct lttng_ust_shm_handle *handle)
{
- const struct lttng_ust_lib_ring_buffer_config *config = &chan->backend.config;
+ const struct lttng_ust_ring_buffer_config *config = &chan->backend.config;
unsigned long oldidx = subbuf_index(offsets->old - 1, chan);
unsigned long commit_count, padding_size, data_size;
struct commit_counters_hot *cc_hot;
* that this code is executed before the deliver of this sub-buffer.
*/
static
-void lib_ring_buffer_switch_new_start(struct lttng_ust_lib_ring_buffer *buf,
- struct lttng_ust_lib_ring_buffer_channel *chan,
+void lib_ring_buffer_switch_new_start(struct lttng_ust_ring_buffer *buf,
+ struct lttng_ust_ring_buffer_channel *chan,
struct switch_offsets *offsets,
uint64_t tsc,
struct lttng_ust_shm_handle *handle)
{
- const struct lttng_ust_lib_ring_buffer_config *config = &chan->backend.config;
+ const struct lttng_ust_ring_buffer_config *config = &chan->backend.config;
unsigned long beginidx = subbuf_index(offsets->begin, chan);
unsigned long commit_count;
struct commit_counters_hot *cc_hot;
* we are currently doing the space reservation.
*/
static
-void lib_ring_buffer_switch_new_end(struct lttng_ust_lib_ring_buffer *buf,
- struct lttng_ust_lib_ring_buffer_channel *chan,
+void lib_ring_buffer_switch_new_end(struct lttng_ust_ring_buffer *buf,
+ struct lttng_ust_ring_buffer_channel *chan,
struct switch_offsets *offsets,
uint64_t tsc,
struct lttng_ust_shm_handle *handle)
{
- const struct lttng_ust_lib_ring_buffer_config *config = &chan->backend.config;
+ const struct lttng_ust_ring_buffer_config *config = &chan->backend.config;
unsigned long endidx, data_size;
uint64_t *ts_end;
*/
static
int lib_ring_buffer_try_switch_slow(enum switch_mode mode,
- struct lttng_ust_lib_ring_buffer *buf,
- struct lttng_ust_lib_ring_buffer_channel *chan,
+ struct lttng_ust_ring_buffer *buf,
+ struct lttng_ust_ring_buffer_channel *chan,
struct switch_offsets *offsets,
uint64_t *tsc,
struct lttng_ust_shm_handle *handle)
{
- const struct lttng_ust_lib_ring_buffer_config *config = &chan->backend.config;
+ const struct lttng_ust_ring_buffer_config *config = &chan->backend.config;
unsigned long off, reserve_commit_diff;
offsets->begin = v_read(config, &buf->offset);
* RING_BUFFER_SYNC_GLOBAL ring buffers, this function can be called
* from any CPU.
*/
-void lib_ring_buffer_switch_slow(struct lttng_ust_lib_ring_buffer *buf, enum switch_mode mode,
+void lib_ring_buffer_switch_slow(struct lttng_ust_ring_buffer *buf, enum switch_mode mode,
struct lttng_ust_shm_handle *handle)
{
- struct lttng_ust_lib_ring_buffer_channel *chan;
- const struct lttng_ust_lib_ring_buffer_config *config;
+ struct lttng_ust_ring_buffer_channel *chan;
+ const struct lttng_ust_ring_buffer_config *config;
struct switch_offsets offsets;
unsigned long oldidx;
uint64_t tsc;
* -EIO if data cannot be written into the buffer for any other reason.
*/
static
-int lib_ring_buffer_try_reserve_slow(struct lttng_ust_lib_ring_buffer *buf,
- struct lttng_ust_lib_ring_buffer_channel *chan,
+int lib_ring_buffer_try_reserve_slow(struct lttng_ust_ring_buffer *buf,
+ struct lttng_ust_ring_buffer_channel *chan,
struct switch_offsets *offsets,
- struct lttng_ust_lib_ring_buffer_ctx *ctx,
+ struct lttng_ust_ring_buffer_ctx *ctx,
void *client_ctx)
{
- struct lttng_ust_lib_ring_buffer_ctx_private *ctx_private = ctx->priv;
- const struct lttng_ust_lib_ring_buffer_config *config = &chan->backend.config;
+ struct lttng_ust_ring_buffer_ctx_private *ctx_private = ctx->priv;
+ const struct lttng_ust_ring_buffer_config *config = &chan->backend.config;
struct lttng_ust_shm_handle *handle = chan->handle;
unsigned long reserve_commit_diff, offset_cmp;
int timeout_left_ms = lttng_ust_ringbuffer_get_timeout(chan);
&offsets->pre_header_padding,
ctx, client_ctx);
offsets->size +=
- lttng_ust_lib_ring_buffer_align(offsets->begin + offsets->size,
+ lttng_ust_ring_buffer_align(offsets->begin + offsets->size,
ctx->largest_align)
+ ctx->data_size;
if (caa_unlikely(subbuf_offset(offsets->begin, chan) +
&offsets->pre_header_padding,
ctx, client_ctx);
offsets->size +=
- lttng_ust_lib_ring_buffer_align(offsets->begin + offsets->size,
+ lttng_ust_ring_buffer_align(offsets->begin + offsets->size,
ctx->largest_align)
+ ctx->data_size;
if (caa_unlikely(subbuf_offset(offsets->begin, chan)
* -EIO for other errors, else returns 0.
* It will take care of sub-buffer switching.
*/
-int lib_ring_buffer_reserve_slow(struct lttng_ust_lib_ring_buffer_ctx *ctx,
+int lib_ring_buffer_reserve_slow(struct lttng_ust_ring_buffer_ctx *ctx,
void *client_ctx)
{
- struct lttng_ust_lib_ring_buffer_ctx_private *ctx_private = ctx->priv;
- struct lttng_ust_lib_ring_buffer_channel *chan = ctx_private->chan;
+ struct lttng_ust_ring_buffer_ctx_private *ctx_private = ctx->priv;
+ struct lttng_ust_ring_buffer_channel *chan = ctx_private->chan;
struct lttng_ust_shm_handle *handle = chan->handle;
- const struct lttng_ust_lib_ring_buffer_config *config = &chan->backend.config;
- struct lttng_ust_lib_ring_buffer *buf;
+ const struct lttng_ust_ring_buffer_config *config = &chan->backend.config;
+ struct lttng_ust_ring_buffer *buf;
struct switch_offsets offsets;
int ret;
}
static
-void lib_ring_buffer_vmcore_check_deliver(const struct lttng_ust_lib_ring_buffer_config *config,
- struct lttng_ust_lib_ring_buffer *buf,
+void lib_ring_buffer_vmcore_check_deliver(const struct lttng_ust_ring_buffer_config *config,
+ struct lttng_ust_ring_buffer *buf,
unsigned long commit_count,
unsigned long idx,
struct lttng_ust_shm_handle *handle)
*/
#ifdef LTTNG_RING_BUFFER_COUNT_EVENTS
static
-void deliver_count_events(const struct lttng_ust_lib_ring_buffer_config *config,
- struct lttng_ust_lib_ring_buffer *buf,
+void deliver_count_events(const struct lttng_ust_ring_buffer_config *config,
+ struct lttng_ust_ring_buffer *buf,
unsigned long idx,
struct lttng_ust_shm_handle *handle)
{
#else /* LTTNG_RING_BUFFER_COUNT_EVENTS */
static
void deliver_count_events(
- const struct lttng_ust_lib_ring_buffer_config *config __attribute__((unused)),
- struct lttng_ust_lib_ring_buffer *buf __attribute__((unused)),
+ const struct lttng_ust_ring_buffer_config *config __attribute__((unused)),
+ struct lttng_ust_ring_buffer *buf __attribute__((unused)),
unsigned long idx __attribute__((unused)),
struct lttng_ust_shm_handle *handle __attribute__((unused)))
{
}
#endif /* #else LTTNG_RING_BUFFER_COUNT_EVENTS */
-void lib_ring_buffer_check_deliver_slow(const struct lttng_ust_lib_ring_buffer_config *config,
- struct lttng_ust_lib_ring_buffer *buf,
- struct lttng_ust_lib_ring_buffer_channel *chan,
+void lib_ring_buffer_check_deliver_slow(const struct lttng_ust_ring_buffer_config *config,
+ struct lttng_ust_ring_buffer *buf,
+ struct lttng_ust_ring_buffer_channel *chan,
unsigned long offset,
unsigned long commit_count,
unsigned long idx,
#include <lttng/ust-compiler.h>
#include <lttng/ust-tracer.h>
-struct lttng_ust_lib_ring_buffer;
-struct lttng_ust_lib_ring_buffer_channel;
-struct lttng_ust_lib_ring_buffer_config;
-struct lttng_ust_lib_ring_buffer_ctx_private;
+struct lttng_ust_ring_buffer;
+struct lttng_ust_ring_buffer_channel;
+struct lttng_ust_ring_buffer_config;
+struct lttng_ust_ring_buffer_ctx_private;
struct lttng_ust_shm_handle;
/*
* provided as inline functions too. These may simply return 0 if not used by
* the client.
*/
-struct lttng_ust_lib_ring_buffer_client_cb {
+struct lttng_ust_ring_buffer_client_cb {
/* Mandatory callbacks */
/* A static inline version is also required for fast path */
- uint64_t (*ring_buffer_clock_read) (struct lttng_ust_lib_ring_buffer_channel *chan);
- size_t (*record_header_size) (const struct lttng_ust_lib_ring_buffer_config *config,
- struct lttng_ust_lib_ring_buffer_channel *chan,
+ uint64_t (*ring_buffer_clock_read) (struct lttng_ust_ring_buffer_channel *chan);
+ size_t (*record_header_size) (const struct lttng_ust_ring_buffer_config *config,
+ struct lttng_ust_ring_buffer_channel *chan,
size_t offset,
size_t *pre_header_padding,
- struct lttng_ust_lib_ring_buffer_ctx *ctx,
+ struct lttng_ust_ring_buffer_ctx *ctx,
void *client_ctx);
/* Slow path only, at subbuffer switch */
size_t (*subbuffer_header_size) (void);
- void (*buffer_begin) (struct lttng_ust_lib_ring_buffer *buf, uint64_t tsc,
+ void (*buffer_begin) (struct lttng_ust_ring_buffer *buf, uint64_t tsc,
unsigned int subbuf_idx,
struct lttng_ust_shm_handle *handle);
- void (*buffer_end) (struct lttng_ust_lib_ring_buffer *buf, uint64_t tsc,
+ void (*buffer_end) (struct lttng_ust_ring_buffer *buf, uint64_t tsc,
unsigned int subbuf_idx, unsigned long data_size,
struct lttng_ust_shm_handle *handle);
/* Optional callbacks (can be set to NULL) */
/* Called at buffer creation/finalize */
- int (*buffer_create) (struct lttng_ust_lib_ring_buffer *buf, void *priv,
+ int (*buffer_create) (struct lttng_ust_ring_buffer *buf, void *priv,
int cpu, const char *name,
struct lttng_ust_shm_handle *handle);
/*
* Clients should guarantee that no new reader handle can be opened
* after finalize.
*/
- void (*buffer_finalize) (struct lttng_ust_lib_ring_buffer *buf,
+ void (*buffer_finalize) (struct lttng_ust_ring_buffer *buf,
void *priv, int cpu,
struct lttng_ust_shm_handle *handle);
* record. Used by buffer iterators. Timestamp is only used by channel
* iterator.
*/
- void (*record_get) (const struct lttng_ust_lib_ring_buffer_config *config,
- struct lttng_ust_lib_ring_buffer_channel *chan,
- struct lttng_ust_lib_ring_buffer *buf,
+ void (*record_get) (const struct lttng_ust_ring_buffer_config *config,
+ struct lttng_ust_ring_buffer_channel *chan,
+ struct lttng_ust_ring_buffer *buf,
size_t offset, size_t *header_len,
size_t *payload_len, uint64_t *timestamp,
struct lttng_ust_shm_handle *handle);
/*
* Offset and size of content size field in client.
*/
- void (*content_size_field) (const struct lttng_ust_lib_ring_buffer_config *config,
+ void (*content_size_field) (const struct lttng_ust_ring_buffer_config *config,
size_t *offset, size_t *length);
- void (*packet_size_field) (const struct lttng_ust_lib_ring_buffer_config *config,
+ void (*packet_size_field) (const struct lttng_ust_ring_buffer_config *config,
size_t *offset, size_t *length);
};
*/
#define LTTNG_UST_RING_BUFFER_CONFIG_PADDING 20
-enum lttng_ust_lib_ring_buffer_alloc_types {
+enum lttng_ust_ring_buffer_alloc_types {
RING_BUFFER_ALLOC_PER_CPU,
RING_BUFFER_ALLOC_GLOBAL,
};
-enum lttng_ust_lib_ring_buffer_sync_types {
+enum lttng_ust_ring_buffer_sync_types {
RING_BUFFER_SYNC_PER_CPU, /* Wait-free */
RING_BUFFER_SYNC_GLOBAL, /* Lock-free */
};
-enum lttng_ust_lib_ring_buffer_mode_types {
+enum lttng_ust_ring_buffer_mode_types {
RING_BUFFER_OVERWRITE = 0, /* Overwrite when buffer full */
RING_BUFFER_DISCARD = 1, /* Discard when buffer full */
};
-enum lttng_ust_lib_ring_buffer_output_types {
+enum lttng_ust_ring_buffer_output_types {
RING_BUFFER_SPLICE,
RING_BUFFER_MMAP,
RING_BUFFER_READ, /* TODO */
RING_BUFFER_NONE,
};
-enum lttng_ust_lib_ring_buffer_backend_types {
+enum lttng_ust_ring_buffer_backend_types {
RING_BUFFER_PAGE,
RING_BUFFER_VMAP, /* TODO */
RING_BUFFER_STATIC, /* TODO */
};
-enum lttng_ust_lib_ring_buffer_oops_types {
+enum lttng_ust_ring_buffer_oops_types {
RING_BUFFER_NO_OOPS_CONSISTENCY,
RING_BUFFER_OOPS_CONSISTENCY,
};
-enum lttng_ust_lib_ring_buffer_ipi_types {
+enum lttng_ust_ring_buffer_ipi_types {
RING_BUFFER_IPI_BARRIER,
RING_BUFFER_NO_IPI_BARRIER,
};
-enum lttng_ust_lib_ring_buffer_wakeup_types {
+enum lttng_ust_ring_buffer_wakeup_types {
RING_BUFFER_WAKEUP_BY_TIMER, /* wake up performed by timer */
RING_BUFFER_WAKEUP_BY_WRITER, /*
* writer wakes up reader,
*/
};
-struct lttng_ust_lib_ring_buffer_config {
- enum lttng_ust_lib_ring_buffer_alloc_types alloc;
- enum lttng_ust_lib_ring_buffer_sync_types sync;
- enum lttng_ust_lib_ring_buffer_mode_types mode;
- enum lttng_ust_lib_ring_buffer_output_types output;
- enum lttng_ust_lib_ring_buffer_backend_types backend;
- enum lttng_ust_lib_ring_buffer_oops_types oops;
- enum lttng_ust_lib_ring_buffer_ipi_types ipi;
- enum lttng_ust_lib_ring_buffer_wakeup_types wakeup;
+struct lttng_ust_ring_buffer_config {
+ enum lttng_ust_ring_buffer_alloc_types alloc;
+ enum lttng_ust_ring_buffer_sync_types sync;
+ enum lttng_ust_ring_buffer_mode_types mode;
+ enum lttng_ust_ring_buffer_output_types output;
+ enum lttng_ust_ring_buffer_backend_types backend;
+ enum lttng_ust_ring_buffer_oops_types oops;
+ enum lttng_ust_ring_buffer_ipi_types ipi;
+ enum lttng_ust_ring_buffer_wakeup_types wakeup;
/*
* tsc_bits: timestamp bits saved at each record.
* 0 and 64 disable the timestamp compression scheme.
*/
unsigned int tsc_bits;
- struct lttng_ust_lib_ring_buffer_client_cb cb;
+ struct lttng_ust_ring_buffer_client_cb cb;
/*
* client_type is used by the consumer process (which is in a
* different address space) to lookup the appropriate client
*/
int client_type;
int _unused1;
- const struct lttng_ust_lib_ring_buffer_client_cb *cb_ptr;
+ const struct lttng_ust_ring_buffer_client_cb *cb_ptr;
char padding[LTTNG_UST_RING_BUFFER_CONFIG_PADDING];
};
* Used internally to check for valid configurations at channel creation.
*/
static inline
-int lib_ring_buffer_check_config(const struct lttng_ust_lib_ring_buffer_config *config,
+int lib_ring_buffer_check_config(const struct lttng_ust_ring_buffer_config *config,
unsigned int switch_timer_interval,
unsigned int read_timer_interval)
lttng_ust_notrace;
static inline
-int lib_ring_buffer_check_config(const struct lttng_ust_lib_ring_buffer_config *config,
+int lib_ring_buffer_check_config(const struct lttng_ust_ring_buffer_config *config,
unsigned int switch_timer_interval,
unsigned int read_timer_interval __attribute__((unused)))
{
#include <limits.h>
#include "shm_internal.h"
-struct lttng_ust_lib_ring_buffer_channel;
+struct lttng_ust_ring_buffer_channel;
enum shm_object_type {
SHM_OBJECT_SHM,
struct lttng_ust_shm_handle {
struct shm_object_table *table;
- DECLARE_SHMP(struct lttng_ust_lib_ring_buffer_channel, chan);
+ DECLARE_SHMP(struct lttng_ust_ring_buffer_channel, chan);
};
#endif /* _LIBRINGBUFFER_SHM_TYPES_H */
};
static inline
-long v_read(const struct lttng_ust_lib_ring_buffer_config *config, union v_atomic *v_a)
+long v_read(const struct lttng_ust_ring_buffer_config *config, union v_atomic *v_a)
{
assert(config->sync != RING_BUFFER_SYNC_PER_CPU);
return uatomic_read(&v_a->a);
}
static inline
-void v_set(const struct lttng_ust_lib_ring_buffer_config *config, union v_atomic *v_a,
+void v_set(const struct lttng_ust_ring_buffer_config *config, union v_atomic *v_a,
long v)
{
assert(config->sync != RING_BUFFER_SYNC_PER_CPU);
}
static inline
-void v_add(const struct lttng_ust_lib_ring_buffer_config *config, long v, union v_atomic *v_a)
+void v_add(const struct lttng_ust_ring_buffer_config *config, long v, union v_atomic *v_a)
{
assert(config->sync != RING_BUFFER_SYNC_PER_CPU);
uatomic_add(&v_a->a, v);
}
static inline
-void v_inc(const struct lttng_ust_lib_ring_buffer_config *config, union v_atomic *v_a)
+void v_inc(const struct lttng_ust_ring_buffer_config *config, union v_atomic *v_a)
{
assert(config->sync != RING_BUFFER_SYNC_PER_CPU);
uatomic_inc(&v_a->a);
* Non-atomic decrement. Only used by reader, apply to reader-owned subbuffer.
*/
static inline
-void _v_dec(const struct lttng_ust_lib_ring_buffer_config *config __attribute__((unused)), union v_atomic *v_a)
+void _v_dec(const struct lttng_ust_ring_buffer_config *config __attribute__((unused)), union v_atomic *v_a)
{
--v_a->v;
}
static inline
-long v_cmpxchg(const struct lttng_ust_lib_ring_buffer_config *config, union v_atomic *v_a,
+long v_cmpxchg(const struct lttng_ust_ring_buffer_config *config, union v_atomic *v_a,
long old, long _new)
{
assert(config->sync != RING_BUFFER_SYNC_PER_CPU);
const char *name;
size_t (*get_size)(void *priv, size_t offset);
- void (*record)(void *priv, struct lttng_ust_lib_ring_buffer_ctx *ctx,
+ void (*record)(void *priv, struct lttng_ust_ring_buffer_ctx *ctx,
struct lttng_ust_channel_buffer *chan);
void (*get_value)(void *priv, struct lttng_ust_ctx_value *value);
void *priv;
* Stream representation within consumer.
*/
struct ustctl_consumer_stream {
- struct lttng_ust_lib_ring_buffer *buf;
+ struct lttng_ust_ring_buffer *buf;
struct ustctl_consumer_channel *chan;
int shm_fd, wait_fd, wakeup_fd;
int cpu;
const char *metadata_str, /* NOT null-terminated */
size_t len) /* metadata length */
{
- struct lttng_ust_lib_ring_buffer_ctx ctx;
+ struct lttng_ust_ring_buffer_ctx ctx;
struct lttng_ust_channel_buffer *lttng_chan_buf = channel->chan;
- struct lttng_ust_lib_ring_buffer_channel *rb_chan = lttng_chan_buf->priv->rb_chan;
+ struct lttng_ust_ring_buffer_channel *rb_chan = lttng_chan_buf->priv->rb_chan;
const char *str = metadata_str;
int ret = 0, waitret;
size_t reserve_len, pos;
reserve_len = min_t(size_t,
lttng_chan_buf->ops->priv->packet_avail_size(lttng_chan_buf),
len - pos);
- lttng_ust_lib_ring_buffer_ctx_init(&ctx, rb_chan, reserve_len, sizeof(char), NULL);
+ lttng_ust_ring_buffer_ctx_init(&ctx, rb_chan, reserve_len, sizeof(char), NULL);
/*
* We don't care about metadata buffer's records lost
* count, because we always retry here. Report error if
const char *metadata_str, /* NOT null-terminated */
size_t len) /* metadata length */
{
- struct lttng_ust_lib_ring_buffer_ctx ctx;
+ struct lttng_ust_ring_buffer_ctx ctx;
struct lttng_ust_channel_buffer *lttng_chan_buf = channel->chan;
- struct lttng_ust_lib_ring_buffer_channel *rb_chan = lttng_chan_buf->priv->rb_chan;
+ struct lttng_ust_ring_buffer_channel *rb_chan = lttng_chan_buf->priv->rb_chan;
const char *str = metadata_str;
ssize_t reserve_len;
int ret;
reserve_len = min_t(ssize_t,
lttng_chan_buf->ops->priv->packet_avail_size(lttng_chan_buf),
len);
- lttng_ust_lib_ring_buffer_ctx_init(&ctx, rb_chan, reserve_len, sizeof(char), NULL);
+ lttng_ust_ring_buffer_ctx_init(&ctx, rb_chan, reserve_len, sizeof(char), NULL);
ret = lttng_chan_buf->ops->event_reserve(&ctx);
if (ret != 0) {
DBG("LTTng: event reservation failed");
int ustctl_channel_close_wait_fd(struct ustctl_consumer_channel *consumer_chan)
{
- struct lttng_ust_lib_ring_buffer_channel *chan;
+ struct lttng_ust_ring_buffer_channel *chan;
int ret;
chan = consumer_chan->chan->priv->rb_chan;
int ustctl_channel_close_wakeup_fd(struct ustctl_consumer_channel *consumer_chan)
{
- struct lttng_ust_lib_ring_buffer_channel *chan;
+ struct lttng_ust_ring_buffer_channel *chan;
int ret;
chan = consumer_chan->chan->priv->rb_chan;
int ustctl_stream_close_wait_fd(struct ustctl_consumer_stream *stream)
{
- struct lttng_ust_lib_ring_buffer_channel *chan;
+ struct lttng_ust_ring_buffer_channel *chan;
chan = stream->chan->chan->priv->rb_chan;
return ring_buffer_stream_close_wait_fd(&chan->backend.config,
int ustctl_stream_close_wakeup_fd(struct ustctl_consumer_stream *stream)
{
- struct lttng_ust_lib_ring_buffer_channel *chan;
+ struct lttng_ust_ring_buffer_channel *chan;
chan = stream->chan->chan->priv->rb_chan;
return ring_buffer_stream_close_wakeup_fd(&chan->backend.config,
{
struct ustctl_consumer_stream *stream;
struct lttng_ust_shm_handle *handle;
- struct lttng_ust_lib_ring_buffer_channel *rb_chan;
+ struct lttng_ust_ring_buffer_channel *rb_chan;
int shm_fd, wait_fd, wakeup_fd;
uint64_t memory_map_size;
- struct lttng_ust_lib_ring_buffer *buf;
+ struct lttng_ust_ring_buffer *buf;
int ret;
if (!channel)
void ustctl_destroy_stream(struct ustctl_consumer_stream *stream)
{
- struct lttng_ust_lib_ring_buffer *buf;
+ struct lttng_ust_ring_buffer *buf;
struct ustctl_consumer_channel *consumer_chan;
assert(stream);
int ustctl_stream_get_wait_fd(struct ustctl_consumer_stream *stream)
{
- struct lttng_ust_lib_ring_buffer *buf;
+ struct lttng_ust_ring_buffer *buf;
struct ustctl_consumer_channel *consumer_chan;
if (!stream)
int ustctl_stream_get_wakeup_fd(struct ustctl_consumer_stream *stream)
{
- struct lttng_ust_lib_ring_buffer *buf;
+ struct lttng_ust_ring_buffer *buf;
struct ustctl_consumer_channel *consumer_chan;
if (!stream)
void *ustctl_get_mmap_base(struct ustctl_consumer_stream *stream)
{
- struct lttng_ust_lib_ring_buffer *buf;
+ struct lttng_ust_ring_buffer *buf;
struct ustctl_consumer_channel *consumer_chan;
if (!stream)
{
struct ustctl_consumer_channel *consumer_chan;
unsigned long mmap_buf_len;
- struct lttng_ust_lib_ring_buffer_channel *rb_chan;
+ struct lttng_ust_ring_buffer_channel *rb_chan;
if (!stream)
return -EINVAL;
unsigned long *len)
{
struct ustctl_consumer_channel *consumer_chan;
- struct lttng_ust_lib_ring_buffer_channel *rb_chan;
+ struct lttng_ust_ring_buffer_channel *rb_chan;
if (!stream)
return -EINVAL;
int ustctl_get_mmap_read_offset(struct ustctl_consumer_stream *stream,
unsigned long *off)
{
- struct lttng_ust_lib_ring_buffer_channel *rb_chan;
+ struct lttng_ust_ring_buffer_channel *rb_chan;
unsigned long sb_bindex;
- struct lttng_ust_lib_ring_buffer *buf;
+ struct lttng_ust_ring_buffer *buf;
struct ustctl_consumer_channel *consumer_chan;
- struct lttng_ust_lib_ring_buffer_backend_pages_shmp *barray_idx;
- struct lttng_ust_lib_ring_buffer_backend_pages *pages;
+ struct lttng_ust_ring_buffer_backend_pages_shmp *barray_idx;
+ struct lttng_ust_ring_buffer_backend_pages *pages;
if (!stream)
return -EINVAL;
unsigned long *len)
{
struct ustctl_consumer_channel *consumer_chan;
- struct lttng_ust_lib_ring_buffer_channel *rb_chan;
- struct lttng_ust_lib_ring_buffer *buf;
+ struct lttng_ust_ring_buffer_channel *rb_chan;
+ struct lttng_ust_ring_buffer *buf;
if (!stream)
return -EINVAL;
unsigned long *len)
{
struct ustctl_consumer_channel *consumer_chan;
- struct lttng_ust_lib_ring_buffer_channel *rb_chan;
- struct lttng_ust_lib_ring_buffer *buf;
+ struct lttng_ust_ring_buffer_channel *rb_chan;
+ struct lttng_ust_ring_buffer *buf;
if (!stream)
return -EINVAL;
/* Get exclusive read access to the next sub-buffer that can be read. */
int ustctl_get_next_subbuf(struct ustctl_consumer_stream *stream)
{
- struct lttng_ust_lib_ring_buffer *buf;
+ struct lttng_ust_ring_buffer *buf;
struct ustctl_consumer_channel *consumer_chan;
if (!stream)
/* Release exclusive sub-buffer access, move consumer forward. */
int ustctl_put_next_subbuf(struct ustctl_consumer_stream *stream)
{
- struct lttng_ust_lib_ring_buffer *buf;
+ struct lttng_ust_ring_buffer *buf;
struct ustctl_consumer_channel *consumer_chan;
if (!stream)
/* Get a snapshot of the current ring buffer producer and consumer positions */
int ustctl_snapshot(struct ustctl_consumer_stream *stream)
{
- struct lttng_ust_lib_ring_buffer *buf;
+ struct lttng_ust_ring_buffer *buf;
struct ustctl_consumer_channel *consumer_chan;
if (!stream)
*/
int ustctl_snapshot_sample_positions(struct ustctl_consumer_stream *stream)
{
- struct lttng_ust_lib_ring_buffer *buf;
+ struct lttng_ust_ring_buffer *buf;
struct ustctl_consumer_channel *consumer_chan;
if (!stream)
int ustctl_snapshot_get_consumed(struct ustctl_consumer_stream *stream,
unsigned long *pos)
{
- struct lttng_ust_lib_ring_buffer *buf;
+ struct lttng_ust_ring_buffer *buf;
if (!stream)
return -EINVAL;
int ustctl_snapshot_get_produced(struct ustctl_consumer_stream *stream,
unsigned long *pos)
{
- struct lttng_ust_lib_ring_buffer *buf;
+ struct lttng_ust_ring_buffer *buf;
if (!stream)
return -EINVAL;
int ustctl_get_subbuf(struct ustctl_consumer_stream *stream,
unsigned long *pos)
{
- struct lttng_ust_lib_ring_buffer *buf;
+ struct lttng_ust_ring_buffer *buf;
struct ustctl_consumer_channel *consumer_chan;
if (!stream)
/* Release exclusive sub-buffer access */
int ustctl_put_subbuf(struct ustctl_consumer_stream *stream)
{
- struct lttng_ust_lib_ring_buffer *buf;
+ struct lttng_ust_ring_buffer *buf;
struct ustctl_consumer_channel *consumer_chan;
if (!stream)
void ustctl_flush_buffer(struct ustctl_consumer_stream *stream,
int producer_active)
{
- struct lttng_ust_lib_ring_buffer *buf;
+ struct lttng_ust_ring_buffer *buf;
struct ustctl_consumer_channel *consumer_chan;
assert(stream);
void ustctl_clear_buffer(struct ustctl_consumer_stream *stream)
{
- struct lttng_ust_lib_ring_buffer *buf;
+ struct lttng_ust_ring_buffer *buf;
struct ustctl_consumer_channel *consumer_chan;
assert(stream);
static
struct lttng_ust_client_lib_ring_buffer_client_cb *get_client_cb(
- struct lttng_ust_lib_ring_buffer *buf __attribute__((unused)),
- struct lttng_ust_lib_ring_buffer_channel *chan)
+ struct lttng_ust_ring_buffer *buf __attribute__((unused)),
+ struct lttng_ust_ring_buffer_channel *chan)
{
- const struct lttng_ust_lib_ring_buffer_config *config;
+ const struct lttng_ust_ring_buffer_config *config;
struct lttng_ust_client_lib_ring_buffer_client_cb *client_cb;
config = &chan->backend.config;
uint64_t *timestamp_begin)
{
struct lttng_ust_client_lib_ring_buffer_client_cb *client_cb;
- struct lttng_ust_lib_ring_buffer_channel *chan;
- struct lttng_ust_lib_ring_buffer *buf;
+ struct lttng_ust_ring_buffer_channel *chan;
+ struct lttng_ust_ring_buffer *buf;
if (!stream || !timestamp_begin)
return -EINVAL;
uint64_t *timestamp_end)
{
struct lttng_ust_client_lib_ring_buffer_client_cb *client_cb;
- struct lttng_ust_lib_ring_buffer_channel *chan;
- struct lttng_ust_lib_ring_buffer *buf;
+ struct lttng_ust_ring_buffer_channel *chan;
+ struct lttng_ust_ring_buffer *buf;
if (!stream || !timestamp_end)
return -EINVAL;
uint64_t *events_discarded)
{
struct lttng_ust_client_lib_ring_buffer_client_cb *client_cb;
- struct lttng_ust_lib_ring_buffer_channel *chan;
- struct lttng_ust_lib_ring_buffer *buf;
+ struct lttng_ust_ring_buffer_channel *chan;
+ struct lttng_ust_ring_buffer *buf;
if (!stream || !events_discarded)
return -EINVAL;
uint64_t *content_size)
{
struct lttng_ust_client_lib_ring_buffer_client_cb *client_cb;
- struct lttng_ust_lib_ring_buffer_channel *chan;
- struct lttng_ust_lib_ring_buffer *buf;
+ struct lttng_ust_ring_buffer_channel *chan;
+ struct lttng_ust_ring_buffer *buf;
if (!stream || !content_size)
return -EINVAL;
uint64_t *packet_size)
{
struct lttng_ust_client_lib_ring_buffer_client_cb *client_cb;
- struct lttng_ust_lib_ring_buffer_channel *chan;
- struct lttng_ust_lib_ring_buffer *buf;
+ struct lttng_ust_ring_buffer_channel *chan;
+ struct lttng_ust_ring_buffer *buf;
if (!stream || !packet_size)
return -EINVAL;
uint64_t *stream_id)
{
struct lttng_ust_client_lib_ring_buffer_client_cb *client_cb;
- struct lttng_ust_lib_ring_buffer_channel *chan;
- struct lttng_ust_lib_ring_buffer *buf;
+ struct lttng_ust_ring_buffer_channel *chan;
+ struct lttng_ust_ring_buffer *buf;
if (!stream || !stream_id)
return -EINVAL;
uint64_t *ts)
{
struct lttng_ust_client_lib_ring_buffer_client_cb *client_cb;
- struct lttng_ust_lib_ring_buffer_channel *chan;
- struct lttng_ust_lib_ring_buffer *buf;
+ struct lttng_ust_ring_buffer_channel *chan;
+ struct lttng_ust_ring_buffer *buf;
if (!stream || !ts)
return -EINVAL;
uint64_t *seq)
{
struct lttng_ust_client_lib_ring_buffer_client_cb *client_cb;
- struct lttng_ust_lib_ring_buffer_channel *chan;
- struct lttng_ust_lib_ring_buffer *buf;
+ struct lttng_ust_ring_buffer_channel *chan;
+ struct lttng_ust_ring_buffer *buf;
if (!stream || !seq)
return -EINVAL;
uint64_t *id)
{
struct lttng_ust_client_lib_ring_buffer_client_cb *client_cb;
- struct lttng_ust_lib_ring_buffer_channel *chan;
- struct lttng_ust_lib_ring_buffer *buf;
+ struct lttng_ust_ring_buffer_channel *chan;
+ struct lttng_ust_ring_buffer *buf;
if (!stream || !id)
return -EINVAL;
const char *ctx_name = jni_provider->name;
enum lttng_ust_jni_type jni_type;
- size += lttng_ust_lib_ring_buffer_align(offset, lttng_ust_rb_alignof(char));
+ size += lttng_ust_ring_buffer_align(offset, lttng_ust_rb_alignof(char));
size += sizeof(char); /* tag */
jctx = lookup_ctx_by_name(ctx_name);
if (!jctx) {
case JNI_TYPE_NULL:
break;
case JNI_TYPE_INTEGER:
- size += lttng_ust_lib_ring_buffer_align(offset, lttng_ust_rb_alignof(int32_t));
+ size += lttng_ust_ring_buffer_align(offset, lttng_ust_rb_alignof(int32_t));
size += sizeof(int32_t); /* variant */
break;
case JNI_TYPE_LONG:
- size += lttng_ust_lib_ring_buffer_align(offset, lttng_ust_rb_alignof(int64_t));
+ size += lttng_ust_ring_buffer_align(offset, lttng_ust_rb_alignof(int64_t));
size += sizeof(int64_t); /* variant */
break;
case JNI_TYPE_DOUBLE:
- size += lttng_ust_lib_ring_buffer_align(offset, lttng_ust_rb_alignof(double));
+ size += lttng_ust_ring_buffer_align(offset, lttng_ust_rb_alignof(double));
size += sizeof(double); /* variant */
break;
case JNI_TYPE_FLOAT:
- size += lttng_ust_lib_ring_buffer_align(offset, lttng_ust_rb_alignof(float));
+ size += lttng_ust_ring_buffer_align(offset, lttng_ust_rb_alignof(float));
size += sizeof(float); /* variant */
break;
case JNI_TYPE_SHORT:
- size += lttng_ust_lib_ring_buffer_align(offset, lttng_ust_rb_alignof(int16_t));
+ size += lttng_ust_ring_buffer_align(offset, lttng_ust_rb_alignof(int16_t));
size += sizeof(int16_t); /* variant */
break;
case JNI_TYPE_BYTE: /* Fall-through. */
case JNI_TYPE_BOOLEAN:
- size += lttng_ust_lib_ring_buffer_align(offset, lttng_ust_rb_alignof(char));
+ size += lttng_ust_ring_buffer_align(offset, lttng_ust_rb_alignof(char));
size += sizeof(char); /* variant */
break;
case JNI_TYPE_STRING:
}
static void record_cb(void *priv,
- struct lttng_ust_lib_ring_buffer_ctx *ctx,
+ struct lttng_ust_ring_buffer_ctx *ctx,
struct lttng_ust_channel_buffer *lttng_chan_buf)
{
struct lttng_ust_jni_ctx_entry *jctx;
void lttng_ust_context_set_event_notifier_group_provider(const char *name,
size_t (*get_size)(void *priv, size_t offset),
void (*record)(void *priv,
- struct lttng_ust_lib_ring_buffer_ctx *ctx,
+ struct lttng_ust_ring_buffer_ctx *ctx,
struct lttng_ust_channel_buffer *chan),
void (*get_value)(void *priv,
struct lttng_ust_ctx_value *value),
int lttng_ust_context_set_provider_rcu(struct lttng_ust_ctx **_ctx,
const char *name,
size_t (*get_size)(void *priv, size_t offset),
- void (*record)(void *priv, struct lttng_ust_lib_ring_buffer_ctx *ctx,
+ void (*record)(void *priv, struct lttng_ust_ring_buffer_ctx *ctx,
struct lttng_ust_channel_buffer *chan),
void (*get_value)(void *priv, struct lttng_ust_ctx_value *value),
void *priv)
void lttng_ust_context_set_session_provider(const char *name,
size_t (*get_size)(void *priv, size_t offset),
- void (*record)(void *priv, struct lttng_ust_lib_ring_buffer_ctx *ctx,
+ void (*record)(void *priv, struct lttng_ust_ring_buffer_ctx *ctx,
struct lttng_ust_channel_buffer *chan),
void (*get_value)(void *priv, struct lttng_ust_ctx_value *value),
void *priv)
{
size_t size = 0;
- size += lttng_ust_lib_ring_buffer_align(offset, lttng_ust_rb_alignof(ino_t));
+ size += lttng_ust_ring_buffer_align(offset, lttng_ust_rb_alignof(ino_t));
size += sizeof(ino_t);
return size;
}
static
void cgroup_ns_record(void *priv __attribute__((unused)),
- struct lttng_ust_lib_ring_buffer_ctx *ctx,
+ struct lttng_ust_ring_buffer_ctx *ctx,
struct lttng_ust_channel_buffer *chan)
{
ino_t cgroup_ns;
{
size_t size = 0;
- size += lttng_ust_lib_ring_buffer_align(offset, lttng_ust_rb_alignof(int));
+ size += lttng_ust_ring_buffer_align(offset, lttng_ust_rb_alignof(int));
size += sizeof(int);
return size;
}
static
void cpu_id_record(void *priv __attribute__((unused)),
- struct lttng_ust_lib_ring_buffer_ctx *ctx,
+ struct lttng_ust_ring_buffer_ctx *ctx,
struct lttng_ust_channel_buffer *chan)
{
int cpu;
{
size_t size = 0;
- size += lttng_ust_lib_ring_buffer_align(offset, lttng_ust_rb_alignof(void *));
+ size += lttng_ust_ring_buffer_align(offset, lttng_ust_rb_alignof(void *));
size += sizeof(void *);
return size;
}
static
void ip_record(void *priv __attribute__((unused)),
- struct lttng_ust_lib_ring_buffer_ctx *ctx,
+ struct lttng_ust_ring_buffer_ctx *ctx,
struct lttng_ust_channel_buffer *chan)
{
void *ip;
{
size_t size = 0;
- size += lttng_ust_lib_ring_buffer_align(offset, lttng_ust_rb_alignof(ino_t));
+ size += lttng_ust_ring_buffer_align(offset, lttng_ust_rb_alignof(ino_t));
size += sizeof(ino_t);
return size;
}
static
void ipc_ns_record(void *priv __attribute__((unused)),
- struct lttng_ust_lib_ring_buffer_ctx *ctx,
+ struct lttng_ust_ring_buffer_ctx *ctx,
struct lttng_ust_channel_buffer *chan)
{
ino_t ipc_ns;
{
size_t size = 0;
- size += lttng_ust_lib_ring_buffer_align(offset, lttng_ust_rb_alignof(ino_t));
+ size += lttng_ust_ring_buffer_align(offset, lttng_ust_rb_alignof(ino_t));
size += sizeof(ino_t);
return size;
}
static
void mnt_ns_record(void *priv __attribute__((unused)),
- struct lttng_ust_lib_ring_buffer_ctx *ctx,
+ struct lttng_ust_ring_buffer_ctx *ctx,
struct lttng_ust_channel_buffer *chan)
{
ino_t mnt_ns;
{
size_t size = 0;
- size += lttng_ust_lib_ring_buffer_align(offset, lttng_ust_rb_alignof(ino_t));
+ size += lttng_ust_ring_buffer_align(offset, lttng_ust_rb_alignof(ino_t));
size += sizeof(ino_t);
return size;
}
static
void net_ns_record(void *priv __attribute__((unused)),
- struct lttng_ust_lib_ring_buffer_ctx *ctx,
+ struct lttng_ust_ring_buffer_ctx *ctx,
struct lttng_ust_channel_buffer *chan)
{
ino_t net_ns;
{
size_t size = 0;
- size += lttng_ust_lib_ring_buffer_align(offset, lttng_ust_rb_alignof(uint64_t));
+ size += lttng_ust_ring_buffer_align(offset, lttng_ust_rb_alignof(uint64_t));
size += sizeof(uint64_t);
return size;
}
static
void perf_counter_record(void *priv,
- struct lttng_ust_lib_ring_buffer_ctx *ctx,
+ struct lttng_ust_ring_buffer_ctx *ctx,
struct lttng_ust_channel_buffer *chan)
{
uint64_t value;
{
size_t size = 0;
- size += lttng_ust_lib_ring_buffer_align(offset, lttng_ust_rb_alignof(ino_t));
+ size += lttng_ust_ring_buffer_align(offset, lttng_ust_rb_alignof(ino_t));
size += sizeof(ino_t);
return size;
}
static
void pid_ns_record(void *priv __attribute__((unused)),
- struct lttng_ust_lib_ring_buffer_ctx *ctx,
+ struct lttng_ust_ring_buffer_ctx *ctx,
struct lttng_ust_channel_buffer *chan)
{
ino_t pid_ns;
static
void procname_record(void *priv __attribute__((unused)),
- struct lttng_ust_lib_ring_buffer_ctx *ctx,
+ struct lttng_ust_ring_buffer_ctx *ctx,
struct lttng_ust_channel_buffer *chan)
{
const char *procname;
{
size_t size = 0;
- size += lttng_ust_lib_ring_buffer_align(offset, lttng_ust_rb_alignof(unsigned long));
+ size += lttng_ust_ring_buffer_align(offset, lttng_ust_rb_alignof(unsigned long));
size += sizeof(unsigned long);
return size;
}
static
void pthread_id_record(void *priv __attribute__((unused)),
- struct lttng_ust_lib_ring_buffer_ctx *ctx,
+ struct lttng_ust_ring_buffer_ctx *ctx,
struct lttng_ust_channel_buffer *chan)
{
unsigned long pthread_id;
{
size_t size = 0;
- size += lttng_ust_lib_ring_buffer_align(offset, lttng_ust_rb_alignof(ino_t));
+ size += lttng_ust_ring_buffer_align(offset, lttng_ust_rb_alignof(ino_t));
size += sizeof(ino_t);
return size;
}
static
void time_ns_record(void *priv __attribute__((unused)),
- struct lttng_ust_lib_ring_buffer_ctx *ctx,
+ struct lttng_ust_ring_buffer_ctx *ctx,
struct lttng_ust_channel_buffer *chan)
{
ino_t time_ns;
{
size_t size = 0;
- size += lttng_ust_lib_ring_buffer_align(offset, lttng_ust_rb_alignof(ino_t));
+ size += lttng_ust_ring_buffer_align(offset, lttng_ust_rb_alignof(ino_t));
size += sizeof(ino_t);
return size;
}
static
void user_ns_record(void *priv __attribute__((unused)),
- struct lttng_ust_lib_ring_buffer_ctx *ctx,
+ struct lttng_ust_ring_buffer_ctx *ctx,
struct lttng_ust_channel_buffer *chan)
{
ino_t user_ns;
{
size_t size = 0;
- size += lttng_ust_lib_ring_buffer_align(offset, lttng_ust_rb_alignof(ino_t));
+ size += lttng_ust_ring_buffer_align(offset, lttng_ust_rb_alignof(ino_t));
size += sizeof(ino_t);
return size;
}
static
void uts_ns_record(void *priv __attribute__((unused)),
- struct lttng_ust_lib_ring_buffer_ctx *ctx,
+ struct lttng_ust_ring_buffer_ctx *ctx,
struct lttng_ust_channel_buffer *chan)
{
ino_t uts_ns;
{
size_t size = 0;
- size += lttng_ust_lib_ring_buffer_align(offset, lttng_ust_rb_alignof(gid_t));
+ size += lttng_ust_ring_buffer_align(offset, lttng_ust_rb_alignof(gid_t));
size += sizeof(gid_t);
return size;
}
static
void vegid_record(void *priv __attribute__((unused)),
- struct lttng_ust_lib_ring_buffer_ctx *ctx,
+ struct lttng_ust_ring_buffer_ctx *ctx,
struct lttng_ust_channel_buffer *chan)
{
gid_t vegid;
{
size_t size = 0;
- size += lttng_ust_lib_ring_buffer_align(offset, lttng_ust_rb_alignof(uid_t));
+ size += lttng_ust_ring_buffer_align(offset, lttng_ust_rb_alignof(uid_t));
size += sizeof(uid_t);
return size;
}
static
void veuid_record(void *priv __attribute__((unused)),
- struct lttng_ust_lib_ring_buffer_ctx *ctx,
+ struct lttng_ust_ring_buffer_ctx *ctx,
struct lttng_ust_channel_buffer *chan)
{
uid_t veuid;
{
size_t size = 0;
- size += lttng_ust_lib_ring_buffer_align(offset, lttng_ust_rb_alignof(gid_t));
+ size += lttng_ust_ring_buffer_align(offset, lttng_ust_rb_alignof(gid_t));
size += sizeof(gid_t);
return size;
}
static
void vgid_record(void *priv __attribute__((unused)),
- struct lttng_ust_lib_ring_buffer_ctx *ctx,
+ struct lttng_ust_ring_buffer_ctx *ctx,
struct lttng_ust_channel_buffer *chan)
{
gid_t vgid;
{
size_t size = 0;
- size += lttng_ust_lib_ring_buffer_align(offset, lttng_ust_rb_alignof(pid_t));
+ size += lttng_ust_ring_buffer_align(offset, lttng_ust_rb_alignof(pid_t));
size += sizeof(pid_t);
return size;
}
static
void vpid_record(void *priv __attribute__((unused)),
- struct lttng_ust_lib_ring_buffer_ctx *ctx,
+ struct lttng_ust_ring_buffer_ctx *ctx,
struct lttng_ust_channel_buffer *chan)
{
pid_t vpid = wrapper_getvpid();
{
size_t size = 0;
- size += lttng_ust_lib_ring_buffer_align(offset, lttng_ust_rb_alignof(gid_t));
+ size += lttng_ust_ring_buffer_align(offset, lttng_ust_rb_alignof(gid_t));
size += sizeof(gid_t);
return size;
}
static
void vsgid_record(void *priv __attribute__((unused)),
- struct lttng_ust_lib_ring_buffer_ctx *ctx,
+ struct lttng_ust_ring_buffer_ctx *ctx,
struct lttng_ust_channel_buffer *chan)
{
gid_t vsgid;
{
size_t size = 0;
- size += lttng_ust_lib_ring_buffer_align(offset, lttng_ust_rb_alignof(uid_t));
+ size += lttng_ust_ring_buffer_align(offset, lttng_ust_rb_alignof(uid_t));
size += sizeof(uid_t);
return size;
}
static
void vsuid_record(void *priv __attribute__((unused)),
- struct lttng_ust_lib_ring_buffer_ctx *ctx,
+ struct lttng_ust_ring_buffer_ctx *ctx,
struct lttng_ust_channel_buffer *chan)
{
uid_t vsuid;
{
size_t size = 0;
- size += lttng_ust_lib_ring_buffer_align(offset, lttng_ust_rb_alignof(pid_t));
+ size += lttng_ust_ring_buffer_align(offset, lttng_ust_rb_alignof(pid_t));
size += sizeof(pid_t);
return size;
}
static
void vtid_record(void *priv __attribute__((unused)),
- struct lttng_ust_lib_ring_buffer_ctx *ctx,
+ struct lttng_ust_ring_buffer_ctx *ctx,
struct lttng_ust_channel_buffer *chan)
{
pid_t vtid = wrapper_getvtid();
{
size_t size = 0;
- size += lttng_ust_lib_ring_buffer_align(offset, lttng_ust_rb_alignof(uid_t));
+ size += lttng_ust_ring_buffer_align(offset, lttng_ust_rb_alignof(uid_t));
size += sizeof(uid_t);
return size;
}
static
void vuid_record(void *priv __attribute__((unused)),
- struct lttng_ust_lib_ring_buffer_ctx *ctx,
+ struct lttng_ust_ring_buffer_ctx *ctx,
struct lttng_ust_channel_buffer *chan)
{
uid_t vuid;
int lttng_ust_context_set_provider_rcu(struct lttng_ust_ctx **_ctx,
const char *name,
size_t (*get_size)(void *priv, size_t offset),
- void (*record)(void *priv, struct lttng_ust_lib_ring_buffer_ctx *ctx,
+ void (*record)(void *priv, struct lttng_ust_ring_buffer_ctx *ctx,
struct lttng_ust_channel_buffer *chan),
void (*get_value)(void *priv, struct lttng_ust_ctx_value *value),
void *priv)
static
void _lttng_channel_unmap(struct lttng_ust_channel_buffer *lttng_chan)
{
- struct lttng_ust_lib_ring_buffer_channel *chan;
+ struct lttng_ust_ring_buffer_channel *chan;
struct lttng_ust_shm_handle *handle;
cds_list_del(<tng_chan->priv->node);
*/
void lttng_ust_context_set_session_provider(const char *name,
size_t (*get_size)(void *priv, size_t offset),
- void (*record)(void *priv, struct lttng_ust_lib_ring_buffer_ctx *ctx,
+ void (*record)(void *priv, struct lttng_ust_ring_buffer_ctx *ctx,
struct lttng_ust_channel_buffer *chan),
void (*get_value)(void *priv, struct lttng_ust_ctx_value *value),
void *priv)
*/
void lttng_ust_context_set_event_notifier_group_provider(const char *name,
size_t (*get_size)(void *priv, size_t offset),
- void (*record)(void *priv, struct lttng_ust_lib_ring_buffer_ctx *ctx,
+ void (*record)(void *priv, struct lttng_ust_ring_buffer_ctx *ctx,
struct lttng_ust_channel_buffer *chan),
void (*get_value)(void *priv, struct lttng_ust_ctx_value *value),
void *priv)
#include "common/ringbuffer/ringbuffer-config.h"
struct lttng_ust_client_lib_ring_buffer_client_cb {
- struct lttng_ust_lib_ring_buffer_client_cb parent;
+ struct lttng_ust_ring_buffer_client_cb parent;
- int (*timestamp_begin) (struct lttng_ust_lib_ring_buffer *buf,
- struct lttng_ust_lib_ring_buffer_channel *chan,
+ int (*timestamp_begin) (struct lttng_ust_ring_buffer *buf,
+ struct lttng_ust_ring_buffer_channel *chan,
uint64_t *timestamp_begin);
- int (*timestamp_end) (struct lttng_ust_lib_ring_buffer *buf,
- struct lttng_ust_lib_ring_buffer_channel *chan,
+ int (*timestamp_end) (struct lttng_ust_ring_buffer *buf,
+ struct lttng_ust_ring_buffer_channel *chan,
uint64_t *timestamp_end);
- int (*events_discarded) (struct lttng_ust_lib_ring_buffer *buf,
- struct lttng_ust_lib_ring_buffer_channel *chan,
+ int (*events_discarded) (struct lttng_ust_ring_buffer *buf,
+ struct lttng_ust_ring_buffer_channel *chan,
uint64_t *events_discarded);
- int (*content_size) (struct lttng_ust_lib_ring_buffer *buf,
- struct lttng_ust_lib_ring_buffer_channel *chan,
+ int (*content_size) (struct lttng_ust_ring_buffer *buf,
+ struct lttng_ust_ring_buffer_channel *chan,
uint64_t *content_size);
- int (*packet_size) (struct lttng_ust_lib_ring_buffer *buf,
- struct lttng_ust_lib_ring_buffer_channel *chan,
+ int (*packet_size) (struct lttng_ust_ring_buffer *buf,
+ struct lttng_ust_ring_buffer_channel *chan,
uint64_t *packet_size);
- int (*stream_id) (struct lttng_ust_lib_ring_buffer *buf,
- struct lttng_ust_lib_ring_buffer_channel *chan,
+ int (*stream_id) (struct lttng_ust_ring_buffer *buf,
+ struct lttng_ust_ring_buffer_channel *chan,
uint64_t *stream_id);
- int (*current_timestamp) (struct lttng_ust_lib_ring_buffer *buf,
- struct lttng_ust_lib_ring_buffer_channel *chan,
+ int (*current_timestamp) (struct lttng_ust_ring_buffer *buf,
+ struct lttng_ust_ring_buffer_channel *chan,
uint64_t *ts);
- int (*sequence_number) (struct lttng_ust_lib_ring_buffer *buf,
- struct lttng_ust_lib_ring_buffer_channel *chan, uint64_t *seq);
- int (*instance_id) (struct lttng_ust_lib_ring_buffer *buf,
- struct lttng_ust_lib_ring_buffer_channel *chan, uint64_t *id);
+ int (*sequence_number) (struct lttng_ust_ring_buffer *buf,
+ struct lttng_ust_ring_buffer_channel *chan, uint64_t *seq);
+ int (*instance_id) (struct lttng_ust_ring_buffer *buf,
+ struct lttng_ust_ring_buffer_channel *chan, uint64_t *id);
};
/*
/*
* Indexed by lib_ring_buffer_nesting_count().
*/
-typedef struct lttng_ust_lib_ring_buffer_ctx_private private_ctx_stack_t[LIB_RING_BUFFER_MAX_NESTING];
+typedef struct lttng_ust_ring_buffer_ctx_private private_ctx_stack_t[LIB_RING_BUFFER_MAX_NESTING];
static DEFINE_URCU_TLS(private_ctx_stack_t, private_ctx_stack);
/*
}
static inline uint64_t lib_ring_buffer_clock_read(
- struct lttng_ust_lib_ring_buffer_channel *chan __attribute__((unused)))
+ struct lttng_ust_ring_buffer_channel *chan __attribute__((unused)))
{
return trace_clock_read64();
}
if (caa_likely(!ctx))
return 0;
- offset += lttng_ust_lib_ring_buffer_align(offset, ctx->largest_align);
+ offset += lttng_ust_ring_buffer_align(offset, ctx->largest_align);
offset += ctx_len;
return offset - orig_offset;
}
}
static inline
-void ctx_record(struct lttng_ust_lib_ring_buffer_ctx *bufctx,
+void ctx_record(struct lttng_ust_ring_buffer_ctx *bufctx,
struct lttng_ust_channel_buffer *chan,
struct lttng_ust_ctx *ctx)
{
if (caa_likely(!ctx))
return;
- lttng_ust_lib_ring_buffer_align_ctx(bufctx, ctx->largest_align);
+ lttng_ust_ring_buffer_align_ctx(bufctx, ctx->largest_align);
for (i = 0; i < ctx->nr_fields; i++)
ctx->fields[i].record(ctx->fields[i].priv, bufctx, chan);
}
*/
static __inline__
size_t record_header_size(
- const struct lttng_ust_lib_ring_buffer_config *config __attribute__((unused)),
- struct lttng_ust_lib_ring_buffer_channel *chan,
+ const struct lttng_ust_ring_buffer_config *config __attribute__((unused)),
+ struct lttng_ust_ring_buffer_channel *chan,
size_t offset,
size_t *pre_header_padding,
- struct lttng_ust_lib_ring_buffer_ctx *ctx,
+ struct lttng_ust_ring_buffer_ctx *ctx,
struct lttng_client_ctx *client_ctx)
{
struct lttng_ust_channel_buffer *lttng_chan = channel_get_private(chan);
switch (lttng_chan->priv->header_type) {
case 1: /* compact */
- padding = lttng_ust_lib_ring_buffer_align(offset, lttng_ust_rb_alignof(uint32_t));
+ padding = lttng_ust_ring_buffer_align(offset, lttng_ust_rb_alignof(uint32_t));
offset += padding;
if (!(ctx->priv->rflags & (RING_BUFFER_RFLAG_FULL_TSC | LTTNG_RFLAG_EXTENDED))) {
offset += sizeof(uint32_t); /* id and timestamp */
/* Minimum space taken by LTTNG_COMPACT_EVENT_BITS id */
offset += (LTTNG_COMPACT_EVENT_BITS + CHAR_BIT - 1) / CHAR_BIT;
/* Align extended struct on largest member */
- offset += lttng_ust_lib_ring_buffer_align(offset, lttng_ust_rb_alignof(uint64_t));
+ offset += lttng_ust_ring_buffer_align(offset, lttng_ust_rb_alignof(uint64_t));
offset += sizeof(uint32_t); /* id */
- offset += lttng_ust_lib_ring_buffer_align(offset, lttng_ust_rb_alignof(uint64_t));
+ offset += lttng_ust_ring_buffer_align(offset, lttng_ust_rb_alignof(uint64_t));
offset += sizeof(uint64_t); /* timestamp */
}
break;
case 2: /* large */
- padding = lttng_ust_lib_ring_buffer_align(offset, lttng_ust_rb_alignof(uint16_t));
+ padding = lttng_ust_ring_buffer_align(offset, lttng_ust_rb_alignof(uint16_t));
offset += padding;
offset += sizeof(uint16_t);
if (!(ctx->priv->rflags & (RING_BUFFER_RFLAG_FULL_TSC | LTTNG_RFLAG_EXTENDED))) {
- offset += lttng_ust_lib_ring_buffer_align(offset, lttng_ust_rb_alignof(uint32_t));
+ offset += lttng_ust_ring_buffer_align(offset, lttng_ust_rb_alignof(uint32_t));
offset += sizeof(uint32_t); /* timestamp */
} else {
/* Align extended struct on largest member */
- offset += lttng_ust_lib_ring_buffer_align(offset, lttng_ust_rb_alignof(uint64_t));
+ offset += lttng_ust_ring_buffer_align(offset, lttng_ust_rb_alignof(uint64_t));
offset += sizeof(uint32_t); /* id */
- offset += lttng_ust_lib_ring_buffer_align(offset, lttng_ust_rb_alignof(uint64_t));
+ offset += lttng_ust_ring_buffer_align(offset, lttng_ust_rb_alignof(uint64_t));
offset += sizeof(uint64_t); /* timestamp */
}
break;
#include "lttng-rb-clients.h"
static
-void lttng_write_event_header_slow(const struct lttng_ust_lib_ring_buffer_config *config,
- struct lttng_ust_lib_ring_buffer_ctx *ctx,
+void lttng_write_event_header_slow(const struct lttng_ust_ring_buffer_config *config,
+ struct lttng_ust_ring_buffer_ctx *ctx,
struct lttng_client_ctx *client_ctx,
uint32_t event_id);
* @event_id: event ID
*/
static __inline__
-void lttng_write_event_header(const struct lttng_ust_lib_ring_buffer_config *config,
- struct lttng_ust_lib_ring_buffer_ctx *ctx,
+void lttng_write_event_header(const struct lttng_ust_ring_buffer_config *config,
+ struct lttng_ust_ring_buffer_ctx *ctx,
struct lttng_client_ctx *client_ctx,
uint32_t event_id)
{
uint16_t id = event_id;
lib_ring_buffer_write(config, ctx, &id, sizeof(id));
- lttng_ust_lib_ring_buffer_align_ctx(ctx, lttng_ust_rb_alignof(uint32_t));
+ lttng_ust_ring_buffer_align_ctx(ctx, lttng_ust_rb_alignof(uint32_t));
lib_ring_buffer_write(config, ctx, ×tamp, sizeof(timestamp));
break;
}
ctx_record(ctx, lttng_chan, client_ctx->chan_ctx);
ctx_record(ctx, lttng_chan, client_ctx->event_ctx);
- lttng_ust_lib_ring_buffer_align_ctx(ctx, ctx->largest_align);
+ lttng_ust_ring_buffer_align_ctx(ctx, ctx->largest_align);
return;
}
static
-void lttng_write_event_header_slow(const struct lttng_ust_lib_ring_buffer_config *config,
- struct lttng_ust_lib_ring_buffer_ctx *ctx,
+void lttng_write_event_header_slow(const struct lttng_ust_ring_buffer_config *config,
+ struct lttng_ust_ring_buffer_ctx *ctx,
struct lttng_client_ctx *client_ctx,
uint32_t event_id)
{
- struct lttng_ust_lib_ring_buffer_ctx_private *ctx_private = ctx->priv;
+ struct lttng_ust_ring_buffer_ctx_private *ctx_private = ctx->priv;
struct lttng_ust_channel_buffer *lttng_chan = channel_get_private(ctx->priv->chan);
switch (lttng_chan->priv->header_type) {
31);
lib_ring_buffer_write(config, ctx, &id, sizeof(id));
/* Align extended struct on largest member */
- lttng_ust_lib_ring_buffer_align_ctx(ctx, lttng_ust_rb_alignof(uint64_t));
+ lttng_ust_ring_buffer_align_ctx(ctx, lttng_ust_rb_alignof(uint64_t));
lib_ring_buffer_write(config, ctx, &event_id, sizeof(event_id));
- lttng_ust_lib_ring_buffer_align_ctx(ctx, lttng_ust_rb_alignof(uint64_t));
+ lttng_ust_ring_buffer_align_ctx(ctx, lttng_ust_rb_alignof(uint64_t));
lib_ring_buffer_write(config, ctx, ×tamp, sizeof(timestamp));
}
break;
uint16_t id = event_id;
lib_ring_buffer_write(config, ctx, &id, sizeof(id));
- lttng_ust_lib_ring_buffer_align_ctx(ctx, lttng_ust_rb_alignof(uint32_t));
+ lttng_ust_ring_buffer_align_ctx(ctx, lttng_ust_rb_alignof(uint32_t));
lib_ring_buffer_write(config, ctx, ×tamp, sizeof(timestamp));
} else {
uint16_t id = 65535;
lib_ring_buffer_write(config, ctx, &id, sizeof(id));
/* Align extended struct on largest member */
- lttng_ust_lib_ring_buffer_align_ctx(ctx, lttng_ust_rb_alignof(uint64_t));
+ lttng_ust_ring_buffer_align_ctx(ctx, lttng_ust_rb_alignof(uint64_t));
lib_ring_buffer_write(config, ctx, &event_id, sizeof(event_id));
- lttng_ust_lib_ring_buffer_align_ctx(ctx, lttng_ust_rb_alignof(uint64_t));
+ lttng_ust_ring_buffer_align_ctx(ctx, lttng_ust_rb_alignof(uint64_t));
lib_ring_buffer_write(config, ctx, ×tamp, sizeof(timestamp));
}
break;
}
ctx_record(ctx, lttng_chan, client_ctx->chan_ctx);
ctx_record(ctx, lttng_chan, client_ctx->event_ctx);
- lttng_ust_lib_ring_buffer_align_ctx(ctx, ctx->largest_align);
+ lttng_ust_ring_buffer_align_ctx(ctx, ctx->largest_align);
}
-static const struct lttng_ust_lib_ring_buffer_config client_config;
+static const struct lttng_ust_ring_buffer_config client_config;
-static uint64_t client_ring_buffer_clock_read(struct lttng_ust_lib_ring_buffer_channel *chan)
+static uint64_t client_ring_buffer_clock_read(struct lttng_ust_ring_buffer_channel *chan)
{
return lib_ring_buffer_clock_read(chan);
}
static
-size_t client_record_header_size(const struct lttng_ust_lib_ring_buffer_config *config,
- struct lttng_ust_lib_ring_buffer_channel *chan,
+size_t client_record_header_size(const struct lttng_ust_ring_buffer_config *config,
+ struct lttng_ust_ring_buffer_channel *chan,
size_t offset,
size_t *pre_header_padding,
- struct lttng_ust_lib_ring_buffer_ctx *ctx,
+ struct lttng_ust_ring_buffer_ctx *ctx,
void *client_ctx)
{
return record_header_size(config, chan, offset,
return offsetof(struct packet_header, ctx.header_end);
}
-static void client_buffer_begin(struct lttng_ust_lib_ring_buffer *buf, uint64_t tsc,
+static void client_buffer_begin(struct lttng_ust_ring_buffer *buf, uint64_t tsc,
unsigned int subbuf_idx,
struct lttng_ust_shm_handle *handle)
{
- struct lttng_ust_lib_ring_buffer_channel *chan = shmp(handle, buf->backend.chan);
+ struct lttng_ust_ring_buffer_channel *chan = shmp(handle, buf->backend.chan);
struct packet_header *header =
(struct packet_header *)
lib_ring_buffer_offset_address(&buf->backend,
* offset is assumed to never be 0 here : never deliver a completely empty
* subbuffer. data_size is between 1 and subbuf_size.
*/
-static void client_buffer_end(struct lttng_ust_lib_ring_buffer *buf, uint64_t tsc,
+static void client_buffer_end(struct lttng_ust_ring_buffer *buf, uint64_t tsc,
unsigned int subbuf_idx, unsigned long data_size,
struct lttng_ust_shm_handle *handle)
{
- struct lttng_ust_lib_ring_buffer_channel *chan = shmp(handle, buf->backend.chan);
+ struct lttng_ust_ring_buffer_channel *chan = shmp(handle, buf->backend.chan);
struct packet_header *header =
(struct packet_header *)
lib_ring_buffer_offset_address(&buf->backend,
}
static int client_buffer_create(
- struct lttng_ust_lib_ring_buffer *buf __attribute__((unused)),
+ struct lttng_ust_ring_buffer *buf __attribute__((unused)),
void *priv __attribute__((unused)),
int cpu __attribute__((unused)),
const char *name __attribute__((unused)),
}
static void client_buffer_finalize(
- struct lttng_ust_lib_ring_buffer *buf __attribute__((unused)),
+ struct lttng_ust_ring_buffer *buf __attribute__((unused)),
void *priv __attribute__((unused)),
int cpu __attribute__((unused)),
struct lttng_ust_shm_handle *handle __attribute__((unused)))
}
static void client_content_size_field(
- const struct lttng_ust_lib_ring_buffer_config *config __attribute__((unused)),
+ const struct lttng_ust_ring_buffer_config *config __attribute__((unused)),
size_t *offset, size_t *length)
{
*offset = offsetof(struct packet_header, ctx.content_size);
}
static void client_packet_size_field(
- const struct lttng_ust_lib_ring_buffer_config *config __attribute__((unused)),
+ const struct lttng_ust_ring_buffer_config *config __attribute__((unused)),
size_t *offset, size_t *length)
{
*offset = offsetof(struct packet_header, ctx.packet_size);
*length = sizeof(((struct packet_header *) NULL)->ctx.packet_size);
}
-static struct packet_header *client_packet_header(struct lttng_ust_lib_ring_buffer *buf,
+static struct packet_header *client_packet_header(struct lttng_ust_ring_buffer *buf,
struct lttng_ust_shm_handle *handle)
{
return lib_ring_buffer_read_offset_address(&buf->backend, 0, handle);
}
-static int client_timestamp_begin(struct lttng_ust_lib_ring_buffer *buf,
- struct lttng_ust_lib_ring_buffer_channel *chan,
+static int client_timestamp_begin(struct lttng_ust_ring_buffer *buf,
+ struct lttng_ust_ring_buffer_channel *chan,
uint64_t *timestamp_begin)
{
struct lttng_ust_shm_handle *handle = chan->handle;
return 0;
}
-static int client_timestamp_end(struct lttng_ust_lib_ring_buffer *buf,
- struct lttng_ust_lib_ring_buffer_channel *chan,
+static int client_timestamp_end(struct lttng_ust_ring_buffer *buf,
+ struct lttng_ust_ring_buffer_channel *chan,
uint64_t *timestamp_end)
{
struct lttng_ust_shm_handle *handle = chan->handle;
return 0;
}
-static int client_events_discarded(struct lttng_ust_lib_ring_buffer *buf,
- struct lttng_ust_lib_ring_buffer_channel *chan,
+static int client_events_discarded(struct lttng_ust_ring_buffer *buf,
+ struct lttng_ust_ring_buffer_channel *chan,
uint64_t *events_discarded)
{
struct lttng_ust_shm_handle *handle = chan->handle;
return 0;
}
-static int client_content_size(struct lttng_ust_lib_ring_buffer *buf,
- struct lttng_ust_lib_ring_buffer_channel *chan,
+static int client_content_size(struct lttng_ust_ring_buffer *buf,
+ struct lttng_ust_ring_buffer_channel *chan,
uint64_t *content_size)
{
struct lttng_ust_shm_handle *handle = chan->handle;
return 0;
}
-static int client_packet_size(struct lttng_ust_lib_ring_buffer *buf,
- struct lttng_ust_lib_ring_buffer_channel *chan,
+static int client_packet_size(struct lttng_ust_ring_buffer *buf,
+ struct lttng_ust_ring_buffer_channel *chan,
uint64_t *packet_size)
{
struct lttng_ust_shm_handle *handle = chan->handle;
return 0;
}
-static int client_stream_id(struct lttng_ust_lib_ring_buffer *buf __attribute__((unused)),
- struct lttng_ust_lib_ring_buffer_channel *chan,
+static int client_stream_id(struct lttng_ust_ring_buffer *buf __attribute__((unused)),
+ struct lttng_ust_ring_buffer_channel *chan,
uint64_t *stream_id)
{
struct lttng_ust_channel_buffer *lttng_chan = channel_get_private(chan);
}
static int client_current_timestamp(
- struct lttng_ust_lib_ring_buffer *buf __attribute__((unused)),
- struct lttng_ust_lib_ring_buffer_channel *chan,
+ struct lttng_ust_ring_buffer *buf __attribute__((unused)),
+ struct lttng_ust_ring_buffer_channel *chan,
uint64_t *ts)
{
*ts = client_ring_buffer_clock_read(chan);
return 0;
}
-static int client_sequence_number(struct lttng_ust_lib_ring_buffer *buf,
- struct lttng_ust_lib_ring_buffer_channel *chan,
+static int client_sequence_number(struct lttng_ust_ring_buffer *buf,
+ struct lttng_ust_ring_buffer_channel *chan,
uint64_t *seq)
{
struct lttng_ust_shm_handle *handle = chan->handle;
return 0;
}
-static int client_instance_id(struct lttng_ust_lib_ring_buffer *buf,
- struct lttng_ust_lib_ring_buffer_channel *chan __attribute__((unused)),
+static int client_instance_id(struct lttng_ust_ring_buffer *buf,
+ struct lttng_ust_ring_buffer_channel *chan __attribute__((unused)),
uint64_t *id)
{
*id = buf->backend.cpu;
.instance_id = client_instance_id,
};
-static const struct lttng_ust_lib_ring_buffer_config client_config = {
+static const struct lttng_ust_ring_buffer_config client_config = {
.cb.ring_buffer_clock_read = client_ring_buffer_clock_read,
.cb.record_header_size = client_record_header_size,
.cb.subbuffer_header_size = client_packet_header_size,
}
static
-int lttng_event_reserve(struct lttng_ust_lib_ring_buffer_ctx *ctx)
+int lttng_event_reserve(struct lttng_ust_ring_buffer_ctx *ctx)
{
struct lttng_ust_event_recorder *event_recorder = ctx->client_priv;
struct lttng_ust_channel_buffer *lttng_chan = event_recorder->chan;
struct lttng_client_ctx client_ctx;
int ret, nesting;
- struct lttng_ust_lib_ring_buffer_ctx_private *private_ctx;
+ struct lttng_ust_ring_buffer_ctx_private *private_ctx;
uint32_t event_id;
event_id = event_recorder->priv->id;
}
static
-void lttng_event_commit(struct lttng_ust_lib_ring_buffer_ctx *ctx)
+void lttng_event_commit(struct lttng_ust_ring_buffer_ctx *ctx)
{
lib_ring_buffer_commit(&client_config, ctx);
lib_ring_buffer_nesting_dec(&client_config);
}
static
-void lttng_event_write(struct lttng_ust_lib_ring_buffer_ctx *ctx,
+void lttng_event_write(struct lttng_ust_ring_buffer_ctx *ctx,
const void *src, size_t len, size_t alignment)
{
- lttng_ust_lib_ring_buffer_align_ctx(ctx, alignment);
+ lttng_ust_ring_buffer_align_ctx(ctx, alignment);
lib_ring_buffer_write(&client_config, ctx, src, len);
}
static
-void lttng_event_strcpy(struct lttng_ust_lib_ring_buffer_ctx *ctx,
+void lttng_event_strcpy(struct lttng_ust_ring_buffer_ctx *ctx,
const char *src, size_t len)
{
lib_ring_buffer_strcpy(&client_config, ctx, src, len, '#');
}
static
-void lttng_event_pstrcpy_pad(struct lttng_ust_lib_ring_buffer_ctx *ctx,
+void lttng_event_pstrcpy_pad(struct lttng_ust_ring_buffer_ctx *ctx,
const char *src, size_t len)
{
lib_ring_buffer_pstrcpy(&client_config, ctx, src, len, '\0');
static
int lttng_is_finalized(struct lttng_ust_channel_buffer *chan)
{
- struct lttng_ust_lib_ring_buffer_channel *rb_chan = chan->priv->rb_chan;
+ struct lttng_ust_ring_buffer_channel *rb_chan = chan->priv->rb_chan;
return lib_ring_buffer_channel_is_finalized(rb_chan);
}
static
int lttng_is_disabled(struct lttng_ust_channel_buffer *chan)
{
- struct lttng_ust_lib_ring_buffer_channel *rb_chan = chan->priv->rb_chan;
+ struct lttng_ust_ring_buffer_channel *rb_chan = chan->priv->rb_chan;
return lib_ring_buffer_channel_is_disabled(rb_chan);
}
static
int lttng_flush_buffer(struct lttng_ust_channel_buffer *chan)
{
- struct lttng_ust_lib_ring_buffer_channel *rb_chan = chan->priv->rb_chan;
- struct lttng_ust_lib_ring_buffer *buf;
+ struct lttng_ust_ring_buffer_channel *rb_chan = chan->priv->rb_chan;
+ struct lttng_ust_ring_buffer *buf;
int cpu;
for_each_channel_cpu(cpu, rb_chan) {
uint8_t header_end[0]; /* End of header */
};
-static const struct lttng_ust_lib_ring_buffer_config client_config;
+static const struct lttng_ust_ring_buffer_config client_config;
/* No nested use supported for metadata ring buffer. */
-static DEFINE_URCU_TLS(struct lttng_ust_lib_ring_buffer_ctx_private, private_ctx);
+static DEFINE_URCU_TLS(struct lttng_ust_ring_buffer_ctx_private, private_ctx);
static inline uint64_t lib_ring_buffer_clock_read(
- struct lttng_ust_lib_ring_buffer_channel *chan __attribute__((unused)))
+ struct lttng_ust_ring_buffer_channel *chan __attribute__((unused)))
{
return 0;
}
static inline
size_t record_header_size(
- const struct lttng_ust_lib_ring_buffer_config *config __attribute__((unused)),
- struct lttng_ust_lib_ring_buffer_channel *chan __attribute__((unused)),
+ const struct lttng_ust_ring_buffer_config *config __attribute__((unused)),
+ struct lttng_ust_ring_buffer_channel *chan __attribute__((unused)),
size_t offset __attribute__((unused)),
size_t *pre_header_padding __attribute__((unused)),
- struct lttng_ust_lib_ring_buffer_ctx *ctx __attribute__((unused)),
+ struct lttng_ust_ring_buffer_ctx *ctx __attribute__((unused)),
void *client_ctx __attribute__((unused)))
{
return 0;
#include "lttng-rb-clients.h"
static uint64_t client_ring_buffer_clock_read(
- struct lttng_ust_lib_ring_buffer_channel *chan __attribute__((unused)))
+ struct lttng_ust_ring_buffer_channel *chan __attribute__((unused)))
{
return 0;
}
static
size_t client_record_header_size(
- const struct lttng_ust_lib_ring_buffer_config *config __attribute__((unused)),
- struct lttng_ust_lib_ring_buffer_channel *chan __attribute__((unused)),
+ const struct lttng_ust_ring_buffer_config *config __attribute__((unused)),
+ struct lttng_ust_ring_buffer_channel *chan __attribute__((unused)),
size_t offset __attribute__((unused)),
size_t *pre_header_padding __attribute__((unused)),
- struct lttng_ust_lib_ring_buffer_ctx *ctx __attribute__((unused)),
+ struct lttng_ust_ring_buffer_ctx *ctx __attribute__((unused)),
void *client_ctx __attribute__((unused)))
{
return 0;
return offsetof(struct metadata_packet_header, header_end);
}
-static void client_buffer_begin(struct lttng_ust_lib_ring_buffer *buf,
+static void client_buffer_begin(struct lttng_ust_ring_buffer *buf,
uint64_t tsc __attribute__((unused)),
unsigned int subbuf_idx,
struct lttng_ust_shm_handle *handle)
{
- struct lttng_ust_lib_ring_buffer_channel *chan = shmp(handle, buf->backend.chan);
+ struct lttng_ust_ring_buffer_channel *chan = shmp(handle, buf->backend.chan);
struct metadata_packet_header *header =
(struct metadata_packet_header *)
lib_ring_buffer_offset_address(&buf->backend,
* offset is assumed to never be 0 here : never deliver a completely empty
* subbuffer. data_size is between 1 and subbuf_size.
*/
-static void client_buffer_end(struct lttng_ust_lib_ring_buffer *buf,
+static void client_buffer_end(struct lttng_ust_ring_buffer *buf,
uint64_t tsc __attribute__((unused)),
unsigned int subbuf_idx, unsigned long data_size,
struct lttng_ust_shm_handle *handle)
{
- struct lttng_ust_lib_ring_buffer_channel *chan = shmp(handle, buf->backend.chan);
+ struct lttng_ust_ring_buffer_channel *chan = shmp(handle, buf->backend.chan);
struct metadata_packet_header *header =
(struct metadata_packet_header *)
lib_ring_buffer_offset_address(&buf->backend,
}
static int client_buffer_create(
- struct lttng_ust_lib_ring_buffer *buf __attribute__((unused)),
+ struct lttng_ust_ring_buffer *buf __attribute__((unused)),
void *priv __attribute__((unused)),
int cpu __attribute__((unused)),
const char *name __attribute__((unused)),
}
static void client_buffer_finalize(
- struct lttng_ust_lib_ring_buffer *buf __attribute__((unused)),
+ struct lttng_ust_ring_buffer *buf __attribute__((unused)),
void *priv __attribute__((unused)),
int cpu __attribute__((unused)),
struct lttng_ust_shm_handle *handle __attribute__((unused)))
},
};
-static const struct lttng_ust_lib_ring_buffer_config client_config = {
+static const struct lttng_ust_ring_buffer_config client_config = {
.cb.ring_buffer_clock_read = client_ring_buffer_clock_read,
.cb.record_header_size = client_record_header_size,
.cb.subbuffer_header_size = client_packet_header_size,
}
static
-int lttng_event_reserve(struct lttng_ust_lib_ring_buffer_ctx *ctx)
+int lttng_event_reserve(struct lttng_ust_ring_buffer_ctx *ctx)
{
int ret;
- memset(&URCU_TLS(private_ctx), 0, sizeof(struct lttng_ust_lib_ring_buffer_ctx_private));
+ memset(&URCU_TLS(private_ctx), 0, sizeof(struct lttng_ust_ring_buffer_ctx_private));
URCU_TLS(private_ctx).pub = ctx;
URCU_TLS(private_ctx).chan = ctx->client_priv;
ctx->priv = &URCU_TLS(private_ctx);
}
static
-void lttng_event_commit(struct lttng_ust_lib_ring_buffer_ctx *ctx)
+void lttng_event_commit(struct lttng_ust_ring_buffer_ctx *ctx)
{
lib_ring_buffer_commit(&client_config, ctx);
}
static
-void lttng_event_write(struct lttng_ust_lib_ring_buffer_ctx *ctx,
+void lttng_event_write(struct lttng_ust_ring_buffer_ctx *ctx,
const void *src, size_t len, size_t alignment)
{
- lttng_ust_lib_ring_buffer_align_ctx(ctx, alignment);
+ lttng_ust_ring_buffer_align_ctx(ctx, alignment);
lib_ring_buffer_write(&client_config, ctx, src, len);
}
static
size_t lttng_packet_avail_size(struct lttng_ust_channel_buffer *chan)
{
- struct lttng_ust_lib_ring_buffer_channel *rb_chan = chan->priv->rb_chan;
+ struct lttng_ust_ring_buffer_channel *rb_chan = chan->priv->rb_chan;
unsigned long o_begin;
- struct lttng_ust_lib_ring_buffer *buf;
+ struct lttng_ust_ring_buffer *buf;
buf = shmp(rb_chan->handle, rb_chan->backend.buf[0].shmp); /* Only for global buffer ! */
o_begin = v_read(&client_config, &buf->offset);
static
int lttng_is_finalized(struct lttng_ust_channel_buffer *chan)
{
- struct lttng_ust_lib_ring_buffer_channel *rb_chan = chan->priv->rb_chan;
+ struct lttng_ust_ring_buffer_channel *rb_chan = chan->priv->rb_chan;
return lib_ring_buffer_channel_is_finalized(rb_chan);
}
static
int lttng_is_disabled(struct lttng_ust_channel_buffer *chan)
{
- struct lttng_ust_lib_ring_buffer_channel *rb_chan = chan->priv->rb_chan;
+ struct lttng_ust_ring_buffer_channel *rb_chan = chan->priv->rb_chan;
return lib_ring_buffer_channel_is_disabled(rb_chan);
}
static
int lttng_flush_buffer(struct lttng_ust_channel_buffer *chan)
{
- struct lttng_ust_lib_ring_buffer_channel *rb_chan = chan->priv->rb_chan;
- struct lttng_ust_lib_ring_buffer *buf;
+ struct lttng_ust_ring_buffer_channel *rb_chan = chan->priv->rb_chan;
+ struct lttng_ust_ring_buffer *buf;
int shm_fd, wait_fd, wakeup_fd;
uint64_t memory_map_size;
struct lttng_ust_session;
struct lttng_ust_channel_buffer;
struct lttng_ust_ctx_field;
-struct lttng_ust_lib_ring_buffer_ctx;
+struct lttng_ust_ring_buffer_ctx;
struct lttng_ust_ctx_value;
struct lttng_ust_event_recorder;
struct lttng_ust_event_notifier;
size_t lttng_ust_dummy_get_size(void *priv, size_t offset)
__attribute__((visibility("hidden")));
-void lttng_ust_dummy_record(void *priv, struct lttng_ust_lib_ring_buffer_ctx *ctx,
+void lttng_ust_dummy_record(void *priv, struct lttng_ust_ring_buffer_ctx *ctx,
struct lttng_ust_channel_buffer *chan)
__attribute__((visibility("hidden")));
static
int lttng_is_channel_ready(struct lttng_ust_channel_buffer *lttng_chan)
{
- struct lttng_ust_lib_ring_buffer_channel *chan;
+ struct lttng_ust_ring_buffer_channel *chan;
unsigned int nr_streams, exp_streams;
chan = lttng_chan->priv->rb_chan;
struct lttng_ust_shm_handle *channel_handle;
struct lttng_ust_abi_channel_config *lttng_chan_config;
struct lttng_ust_channel_buffer *lttng_chan_buf;
- struct lttng_ust_lib_ring_buffer_channel *chan;
- struct lttng_ust_lib_ring_buffer_config *config;
+ struct lttng_ust_ring_buffer_channel *chan;
+ struct lttng_ust_ring_buffer_config *config;
void *chan_data;
int wakeup_fd;
uint64_t len;
{
size_t size = 0;
- size += lttng_ust_lib_ring_buffer_align(offset, lttng_ust_rb_alignof(char));
+ size += lttng_ust_ring_buffer_align(offset, lttng_ust_rb_alignof(char));
size += sizeof(char); /* tag */
return size;
}
void lttng_ust_dummy_record(void *priv __attribute__((unused)),
- struct lttng_ust_lib_ring_buffer_ctx *ctx,
+ struct lttng_ust_ring_buffer_ctx *ctx,
struct lttng_ust_channel_buffer *chan)
{
char sel_char = (char) LTTNG_UST_DYNAMIC_TYPE_NONE;
int sel = test_count % _NR_LTTNG_UST_DYNAMIC_TYPES;
size_t size = 0;
- size += lttng_ust_lib_ring_buffer_align(offset, lttng_ust_rb_alignof(char));
+ size += lttng_ust_ring_buffer_align(offset, lttng_ust_rb_alignof(char));
size += sizeof(char); /* tag */
switch (sel) {
case LTTNG_UST_DYNAMIC_TYPE_NONE:
break;
case LTTNG_UST_DYNAMIC_TYPE_S8:
- size += lttng_ust_lib_ring_buffer_align(offset, lttng_ust_rb_alignof(int8_t));
+ size += lttng_ust_ring_buffer_align(offset, lttng_ust_rb_alignof(int8_t));
size += sizeof(int8_t); /* variant */
break;
case LTTNG_UST_DYNAMIC_TYPE_S16:
- size += lttng_ust_lib_ring_buffer_align(offset, lttng_ust_rb_alignof(int16_t));
+ size += lttng_ust_ring_buffer_align(offset, lttng_ust_rb_alignof(int16_t));
size += sizeof(int16_t); /* variant */
break;
case LTTNG_UST_DYNAMIC_TYPE_S32:
- size += lttng_ust_lib_ring_buffer_align(offset, lttng_ust_rb_alignof(int32_t));
+ size += lttng_ust_ring_buffer_align(offset, lttng_ust_rb_alignof(int32_t));
size += sizeof(int32_t); /* variant */
break;
case LTTNG_UST_DYNAMIC_TYPE_S64:
- size += lttng_ust_lib_ring_buffer_align(offset, lttng_ust_rb_alignof(int64_t));
+ size += lttng_ust_ring_buffer_align(offset, lttng_ust_rb_alignof(int64_t));
size += sizeof(int64_t); /* variant */
break;
case LTTNG_UST_DYNAMIC_TYPE_U8:
- size += lttng_ust_lib_ring_buffer_align(offset, lttng_ust_rb_alignof(uint8_t));
+ size += lttng_ust_ring_buffer_align(offset, lttng_ust_rb_alignof(uint8_t));
size += sizeof(uint8_t); /* variant */
break;
case LTTNG_UST_DYNAMIC_TYPE_U16:
- size += lttng_ust_lib_ring_buffer_align(offset, lttng_ust_rb_alignof(uint16_t));
+ size += lttng_ust_ring_buffer_align(offset, lttng_ust_rb_alignof(uint16_t));
size += sizeof(uint16_t); /* variant */
break;
case LTTNG_UST_DYNAMIC_TYPE_U32:
- size += lttng_ust_lib_ring_buffer_align(offset, lttng_ust_rb_alignof(uint32_t));
+ size += lttng_ust_ring_buffer_align(offset, lttng_ust_rb_alignof(uint32_t));
size += sizeof(uint32_t); /* variant */
break;
case LTTNG_UST_DYNAMIC_TYPE_U64:
- size += lttng_ust_lib_ring_buffer_align(offset, lttng_ust_rb_alignof(uint64_t));
+ size += lttng_ust_ring_buffer_align(offset, lttng_ust_rb_alignof(uint64_t));
size += sizeof(uint64_t); /* variant */
break;
case LTTNG_UST_DYNAMIC_TYPE_FLOAT:
- size += lttng_ust_lib_ring_buffer_align(offset, lttng_ust_rb_alignof(float));
+ size += lttng_ust_ring_buffer_align(offset, lttng_ust_rb_alignof(float));
size += sizeof(float); /* variant */
break;
case LTTNG_UST_DYNAMIC_TYPE_DOUBLE:
- size += lttng_ust_lib_ring_buffer_align(offset, lttng_ust_rb_alignof(double));
+ size += lttng_ust_ring_buffer_align(offset, lttng_ust_rb_alignof(double));
size += sizeof(double); /* variant */
break;
case LTTNG_UST_DYNAMIC_TYPE_STRING:
static
void test_record(void *priv __attribute__((unused)),
- struct lttng_ust_lib_ring_buffer_ctx *ctx,
+ struct lttng_ust_ring_buffer_ctx *ctx,
struct lttng_ust_channel_buffer *lttng_chan_buf)
{
int sel = test_count % _NR_LTTNG_UST_DYNAMIC_TYPES;