}
static void client_buffer_begin(struct lttng_ust_ring_buffer *buf,
- uint64_t tsc __attribute__((unused)),
+ uint64_t timestamp __attribute__((unused)),
unsigned int subbuf_idx,
struct lttng_ust_shm_handle *handle)
{
* subbuffer. data_size is between 1 and subbuf_size.
*/
static void client_buffer_end(struct lttng_ust_ring_buffer *buf,
- uint64_t tsc __attribute__((unused)),
+ uint64_t timestamp __attribute__((unused)),
unsigned int subbuf_idx, unsigned long data_size,
struct lttng_ust_shm_handle *handle,
const struct lttng_ust_ring_buffer_ctx *ctx)
.cb.buffer_create = client_buffer_create,
.cb.buffer_finalize = client_buffer_finalize,
- .tsc_bits = 0,
+ .timestamp_bits = 0,
.alloc = RING_BUFFER_ALLOC_GLOBAL,
.sync = RING_BUFFER_SYNC_GLOBAL,
.mode = RING_BUFFER_MODE_TEMPLATE,
#include "common/clock.h"
#include "common/ringbuffer/frontend_types.h"
-#define LTTNG_COMPACT_EVENT_BITS 5
-#define LTTNG_COMPACT_TSC_BITS 27
+#define LTTNG_COMPACT_EVENT_BITS 5
+#define LTTNG_COMPACT_TIMESTAMP_BITS 27
/*
* Keep the natural field alignment for _each field_ within this structure if
case 1: /* compact */
padding = lttng_ust_ring_buffer_align(offset, lttng_ust_rb_alignof(uint32_t));
offset += padding;
- if (!(ctx->priv->rflags & (RING_BUFFER_RFLAG_FULL_TSC | LTTNG_RFLAG_EXTENDED))) {
+ if (!(ctx->priv->rflags & (RING_BUFFER_RFLAG_FULL_TIMESTAMP | LTTNG_RFLAG_EXTENDED))) {
offset += sizeof(uint32_t); /* id and timestamp */
} else {
/* Minimum space taken by LTTNG_COMPACT_EVENT_BITS id */
padding = lttng_ust_ring_buffer_align(offset, lttng_ust_rb_alignof(uint16_t));
offset += padding;
offset += sizeof(uint16_t);
- if (!(ctx->priv->rflags & (RING_BUFFER_RFLAG_FULL_TSC | LTTNG_RFLAG_EXTENDED))) {
+ if (!(ctx->priv->rflags & (RING_BUFFER_RFLAG_FULL_TIMESTAMP | LTTNG_RFLAG_EXTENDED))) {
offset += lttng_ust_ring_buffer_align(offset, lttng_ust_rb_alignof(uint32_t));
offset += sizeof(uint32_t); /* timestamp */
} else {
event_id);
bt_bitfield_write(&id_time, uint32_t,
LTTNG_COMPACT_EVENT_BITS,
- LTTNG_COMPACT_TSC_BITS,
- ctx->priv->tsc);
+ LTTNG_COMPACT_TIMESTAMP_BITS,
+ ctx->priv->timestamp);
lib_ring_buffer_write(config, ctx, &id_time, sizeof(id_time));
break;
}
case 2: /* large */
{
- uint32_t timestamp = (uint32_t) ctx->priv->tsc;
+ uint32_t timestamp = (uint32_t) ctx->priv->timestamp;
uint16_t id = event_id;
lib_ring_buffer_write(config, ctx, &id, sizeof(id));
switch (lttng_chan->priv->header_type) {
case 1: /* compact */
- if (!(ctx_private->rflags & (RING_BUFFER_RFLAG_FULL_TSC | LTTNG_RFLAG_EXTENDED))) {
+ if (!(ctx_private->rflags & (RING_BUFFER_RFLAG_FULL_TIMESTAMP | LTTNG_RFLAG_EXTENDED))) {
uint32_t id_time = 0;
bt_bitfield_write(&id_time, uint32_t,
event_id);
bt_bitfield_write(&id_time, uint32_t,
LTTNG_COMPACT_EVENT_BITS,
- LTTNG_COMPACT_TSC_BITS,
- ctx_private->tsc);
+ LTTNG_COMPACT_TIMESTAMP_BITS,
+ ctx_private->timestamp);
lib_ring_buffer_write(config, ctx, &id_time, sizeof(id_time));
} else {
uint8_t id = 0;
- uint64_t timestamp = ctx_private->tsc;
+ uint64_t timestamp = ctx_private->timestamp;
bt_bitfield_write(&id, uint8_t,
0,
break;
case 2: /* large */
{
- if (!(ctx_private->rflags & (RING_BUFFER_RFLAG_FULL_TSC | LTTNG_RFLAG_EXTENDED))) {
- uint32_t timestamp = (uint32_t) ctx_private->tsc;
+ if (!(ctx_private->rflags & (RING_BUFFER_RFLAG_FULL_TIMESTAMP | LTTNG_RFLAG_EXTENDED))) {
+ uint32_t timestamp = (uint32_t) ctx_private->timestamp;
uint16_t id = event_id;
lib_ring_buffer_write(config, ctx, &id, sizeof(id));
lib_ring_buffer_write(config, ctx, ×tamp, sizeof(timestamp));
} else {
uint16_t id = 65535;
- uint64_t timestamp = ctx_private->tsc;
+ uint64_t timestamp = ctx_private->timestamp;
lib_ring_buffer_write(config, ctx, &id, sizeof(id));
/* Align extended struct on largest member */
return offsetof(struct packet_header, ctx.header_end);
}
-static void client_buffer_begin(struct lttng_ust_ring_buffer *buf, uint64_t tsc,
+static void client_buffer_begin(struct lttng_ust_ring_buffer *buf, uint64_t timestamp,
unsigned int subbuf_idx,
struct lttng_ust_shm_handle *handle)
{
memcpy(header->uuid, lttng_chan->priv->uuid, sizeof(lttng_chan->priv->uuid));
header->stream_id = lttng_chan->priv->id;
header->stream_instance_id = buf->backend.cpu;
- header->ctx.timestamp_begin = tsc;
+ header->ctx.timestamp_begin = timestamp;
header->ctx.timestamp_end = 0;
header->ctx.content_size = ~0ULL; /* for debugging */
header->ctx.packet_size = ~0ULL;
* offset is assumed to never be 0 here : never deliver a completely empty
* subbuffer. data_size is between 1 and subbuf_size.
*/
-static void client_buffer_end(struct lttng_ust_ring_buffer *buf, uint64_t tsc,
+static void client_buffer_end(struct lttng_ust_ring_buffer *buf, uint64_t timestamp,
unsigned int subbuf_idx, unsigned long data_size,
struct lttng_ust_shm_handle *handle,
const struct lttng_ust_ring_buffer_ctx *ctx)
assert(header);
if (!header)
return;
- header->ctx.timestamp_end = tsc;
+ header->ctx.timestamp_end = timestamp;
header->ctx.content_size =
(uint64_t) data_size * CHAR_BIT; /* in bits */
header->ctx.packet_size =
.cb.content_size_field = client_content_size_field,
.cb.packet_size_field = client_packet_size_field,
- .tsc_bits = LTTNG_COMPACT_TSC_BITS,
+ .timestamp_bits = LTTNG_COMPACT_TIMESTAMP_BITS,
.alloc = RING_BUFFER_ALLOC_PER_CPU,
.sync = RING_BUFFER_SYNC_GLOBAL,
.mode = RING_BUFFER_MODE_TEMPLATE,
unsigned int buf_size_order; /* Order of buffer size */
unsigned int extra_reader_sb:1; /* has extra reader subbuffer ? */
unsigned long num_subbuf; /* Number of sub-buffers for writer */
- uint64_t start_tsc; /* Channel creation TSC value */
+ uint64_t start_timestamp; /* Channel creation timestamp value */
DECLARE_SHMP(void *, priv_data);/* Client-specific information */
struct lttng_ust_ring_buffer_config config; /* Ring buffer configuration */
char name[NAME_MAX]; /* Channel name */
*o_begin = v_read(config, &buf->offset);
*o_old = *o_begin;
- ctx_private->tsc = lib_ring_buffer_clock_read(chan);
- if ((int64_t) ctx_private->tsc == -EIO)
+ ctx_private->timestamp = lib_ring_buffer_clock_read(chan);
+ if ((int64_t) ctx_private->timestamp == -EIO)
return 1;
/*
*/
//prefetch(&buf->commit_hot[subbuf_index(*o_begin, chan)]);
- if (last_tsc_overflow(config, buf, ctx_private->tsc))
- ctx_private->rflags |= RING_BUFFER_RFLAG_FULL_TSC;
+ if (last_timestamp_overflow(config, buf, ctx_private->timestamp))
+ ctx_private->rflags |= RING_BUFFER_RFLAG_FULL_TIMESTAMP;
if (caa_unlikely(subbuf_offset(*o_begin, chan) == 0))
return 1;
* @ctx: ring buffer context. (input and output) Must be already initialized.
*
* Atomic wait-free slot reservation. The reserved space starts at the context
- * "pre_offset". Its length is "slot_size". The associated time-stamp is "tsc".
+ * "pre_offset". Its length is "slot_size". The associated time-stamp is
+ * "timestamp".
*
* Return :
* 0 on success.
goto slow_path;
/*
- * Atomically update last_tsc. This update races against concurrent
- * atomic updates, but the race will always cause supplementary full TSC
- * record headers, never the opposite (missing a full TSC record header
- * when it would be needed).
+ * Atomically update last_timestamp. This update races against concurrent
+ * atomic updates, but the race will always cause supplementary full
+ * timestamp record headers, never the opposite (missing a full
+ * timestamp record header when it would be needed).
*/
- save_last_tsc(config, buf, ctx_private->tsc);
+ save_last_timestamp(config, buf, ctx_private->timestamp);
/*
* Push the reader if necessary
/*
* We need to ensure that if the cmpxchg succeeds and discards the
- * record, the next record will record a full TSC, because it cannot
- * rely on the last_tsc associated with the discarded record to detect
- * overflows. The only way to ensure this is to set the last_tsc to 0
- * (assuming no 64-bit TSC overflow), which forces to write a 64-bit
+ * record, the next record will record a full timestamp, because it cannot
+ * rely on the last_timestamp associated with the discarded record to detect
+ * overflows. The only way to ensure this is to set the last_timestamp to 0
+ * (assuming no 64-bit timestamp overflow), which forces to write a 64-bit
* timestamp in the next record.
*
- * Note: if discard fails, we must leave the TSC in the record header.
- * It is needed to keep track of TSC overflows for the following
+ * Note: if discard fails, we must leave the timestamp in the record header.
+ * It is needed to keep track of timestamp overflows for the following
* records.
*/
- save_last_tsc(config, buf, 0ULL);
+ save_last_timestamp(config, buf, 0ULL);
if (caa_likely(v_cmpxchg(config, &buf->offset, end_offset, ctx_private->pre_offset)
!= end_offset))
}
/*
- * Last TSC comparison functions. Check if the current TSC overflows tsc_bits
- * bits from the last TSC read. When overflows are detected, the full 64-bit
- * timestamp counter should be written in the record header. Reads and writes
- * last_tsc atomically.
+ * Last timestamp comparison functions. Check if the current timestamp overflows
+ * timestamp_bits bits from the last timestamp read. When overflows are
+ * detected, the full 64-bit timestamp counter should be written in the record
+ * header. Reads and writes last_timestamp atomically.
*/
#if (CAA_BITS_PER_LONG == 32)
static inline
-void save_last_tsc(const struct lttng_ust_ring_buffer_config *config,
- struct lttng_ust_ring_buffer *buf, uint64_t tsc)
+void save_last_timestamp(const struct lttng_ust_ring_buffer_config *config,
+ struct lttng_ust_ring_buffer *buf, uint64_t timestamp)
{
- if (config->tsc_bits == 0 || config->tsc_bits == 64)
+ if (config->timestamp_bits == 0 || config->timestamp_bits == 64)
return;
/*
* Ensure the compiler performs this update in a single instruction.
*/
- v_set(config, &buf->last_tsc, (unsigned long)(tsc >> config->tsc_bits));
+ v_set(config, &buf->last_timestamp, (unsigned long)(timestamp >> config->timestamp_bits));
}
static inline
-int last_tsc_overflow(const struct lttng_ust_ring_buffer_config *config,
- struct lttng_ust_ring_buffer *buf, uint64_t tsc)
+int last_timestamp_overflow(const struct lttng_ust_ring_buffer_config *config,
+ struct lttng_ust_ring_buffer *buf, uint64_t timestamp)
{
- unsigned long tsc_shifted;
+ unsigned long timestamp_shifted;
- if (config->tsc_bits == 0 || config->tsc_bits == 64)
+ if (config->timestamp_bits == 0 || config->timestamp_bits == 64)
return 0;
- tsc_shifted = (unsigned long)(tsc >> config->tsc_bits);
- if (caa_unlikely(tsc_shifted
- - (unsigned long)v_read(config, &buf->last_tsc)))
+ timestamp_shifted = (unsigned long)(timestamp >> config->timestamp_bits);
+ if (caa_unlikely(timestamp_shifted
+ - (unsigned long)v_read(config, &buf->last_timestamp)))
return 1;
else
return 0;
}
#else
static inline
-void save_last_tsc(const struct lttng_ust_ring_buffer_config *config,
- struct lttng_ust_ring_buffer *buf, uint64_t tsc)
+void save_last_timestamp(const struct lttng_ust_ring_buffer_config *config,
+ struct lttng_ust_ring_buffer *buf, uint64_t timestamp)
{
- if (config->tsc_bits == 0 || config->tsc_bits == 64)
+ if (config->timestamp_bits == 0 || config->timestamp_bits == 64)
return;
- v_set(config, &buf->last_tsc, (unsigned long)tsc);
+ v_set(config, &buf->last_timestamp, (unsigned long)timestamp);
}
static inline
-int last_tsc_overflow(const struct lttng_ust_ring_buffer_config *config,
- struct lttng_ust_ring_buffer *buf, uint64_t tsc)
+int last_timestamp_overflow(const struct lttng_ust_ring_buffer_config *config,
+ struct lttng_ust_ring_buffer *buf, uint64_t timestamp)
{
- if (config->tsc_bits == 0 || config->tsc_bits == 64)
+ if (config->timestamp_bits == 0 || config->timestamp_bits == 64)
return 0;
- if (caa_unlikely((tsc - v_read(config, &buf->last_tsc))
- >> config->tsc_bits))
+ if (caa_unlikely((timestamp - v_read(config, &buf->last_timestamp))
+ >> config->timestamp_bits))
return 1;
else
return 0;
}
/*
- * Receive end of subbuffer TSC as parameter. It has been read in the
+ * Receive end of subbuffer timestamp as parameter. It has been read in the
* space reservation loop of either reserve or switch, which ensures it
* progresses monotonically with event records in the buffer. Therefore,
* it ensures that the end timestamp of a subbuffer is <= begin
int record_disabled;
/* End of cache-hot 32 bytes cacheline */
- union v_atomic last_tsc; /*
+ union v_atomic last_timestamp; /*
* Last timestamp written in the buffer.
*/
* prior to record header alignment
* padding.
*/
- uint64_t tsc; /* time-stamp counter value */
+ uint64_t timestamp; /* time-stamp counter value */
unsigned int rflags; /* reservation flags */
struct lttng_ust_ring_buffer *buf; /*
* buffer corresponding to processor id
* num_subbuf_order, buf_size_order, extra_reader_sb, num_subbuf,
* priv, notifiers, config, cpumask and name.
*/
- chanb->start_tsc = config->cb.ring_buffer_clock_read(chan);
+ chanb->start_timestamp = config->cb.ring_buffer_clock_read(chan);
}
/**
if (ret)
goto free_bufs;
}
- chanb->start_tsc = config->cb.ring_buffer_clock_read(chan);
+ chanb->start_timestamp = config->cb.ring_buffer_clock_read(chan);
return 0;
}
uatomic_set(&buf->consumed, 0);
uatomic_set(&buf->record_disabled, 0);
- v_set(config, &buf->last_tsc, 0);
+ v_set(config, &buf->last_timestamp, 0);
lib_ring_buffer_backend_reset(&buf->backend, handle);
/* Don't reset number of active readers */
v_set(config, &buf->records_lost_full, 0);
struct commit_counters_hot *cc_hot;
void *priv = channel_get_private_config(chan);
size_t subbuf_header_size;
- uint64_t tsc;
+ uint64_t timestamp;
int ret;
/* Test for cpu hotplug */
ret = -EPERM;
goto free_chanbuf;
}
- tsc = config->cb.ring_buffer_clock_read(shmp_chan);
- config->cb.buffer_begin(buf, tsc, 0, handle);
+ timestamp = config->cb.ring_buffer_clock_read(shmp_chan);
+ config->cb.buffer_begin(buf, timestamp, 0, handle);
cc_hot = shmp_index(handle, buf->commit_hot, 0);
if (!cc_hot) {
ret = -EPERM;
unsigned long commit_count;
struct commit_counters_hot *cc_hot;
- config->cb.buffer_begin(buf, ctx->priv->tsc, oldidx, handle);
+ config->cb.buffer_begin(buf, ctx->priv->timestamp, oldidx, handle);
/*
* Order all writes to buffer before the commit count update that will
* postponed until the commit counter is incremented for the
* current space reservation.
*/
- *ts_end = ctx->priv->tsc;
+ *ts_end = ctx->priv->timestamp;
/*
* Order all writes to buffer and store to ts_end before the commit
unsigned long commit_count;
struct commit_counters_hot *cc_hot;
- config->cb.buffer_begin(buf, ctx->priv->tsc, beginidx, handle);
+ config->cb.buffer_begin(buf, ctx->priv->timestamp, beginidx, handle);
/*
* Order all writes to buffer before the commit count update that will
* postponed until the commit counter is incremented for the
* current space reservation.
*/
- *ts_end = ctx->priv->tsc;
+ *ts_end = ctx->priv->timestamp;
}
/*
offsets->switch_old_start = 0;
off = subbuf_offset(offsets->begin, chan);
- ctx->priv->tsc = config->cb.ring_buffer_clock_read(chan);
+ ctx->priv->timestamp = config->cb.ring_buffer_clock_read(chan);
/*
* Ensure we flush the header of an empty subbuffer when doing the
!= offsets.old);
/*
- * Atomically update last_tsc. This update races against concurrent
- * atomic updates, but the race will always cause supplementary full TSC
- * records, never the opposite (missing a full TSC record when it would
- * be needed).
+ * Atomically update last_timestamp. This update races against concurrent
+ * atomic updates, but the race will always cause supplementary full
+ * timestamp records, never the opposite (missing a full timestamp
+ * record when it would be needed).
*/
- save_last_tsc(config, buf, ctx.priv->tsc);
+ save_last_timestamp(config, buf, ctx.priv->timestamp);
/*
* Push the reader if necessary
offsets->switch_old_end = 0;
offsets->pre_header_padding = 0;
- ctx_private->tsc = config->cb.ring_buffer_clock_read(chan);
- if ((int64_t) ctx_private->tsc == -EIO)
+ ctx_private->timestamp = config->cb.ring_buffer_clock_read(chan);
+ if ((int64_t) ctx_private->timestamp == -EIO)
return -EIO;
- if (last_tsc_overflow(config, buf, ctx_private->tsc))
- ctx_private->rflags |= RING_BUFFER_RFLAG_FULL_TSC;
+ if (last_timestamp_overflow(config, buf, ctx_private->timestamp))
+ ctx_private->rflags |= RING_BUFFER_RFLAG_FULL_TIMESTAMP;
if (caa_unlikely(subbuf_offset(offsets->begin, chan) == 0)) {
offsets->switch_new_start = 1; /* For offsets->begin */
!= offsets.old));
/*
- * Atomically update last_tsc. This update races against concurrent
- * atomic updates, but the race will always cause supplementary full TSC
- * records, never the opposite (missing a full TSC record when it would
- * be needed).
+ * Atomically update last_timestamp. This update races against concurrent
+ * atomic updates, but the race will always cause supplementary full
+ * timestamp records, never the opposite (missing a full timestamp
+ * record when it would be needed).
*/
- save_last_tsc(config, buf, ctx_private->tsc);
+ save_last_timestamp(config, buf, ctx_private->timestamp);
/*
* Push the reader if necessary
/* Slow path only, at subbuffer switch */
size_t (*subbuffer_header_size) (void);
- void (*buffer_begin) (struct lttng_ust_ring_buffer *buf, uint64_t tsc,
+ void (*buffer_begin) (struct lttng_ust_ring_buffer *buf, uint64_t timestamp,
unsigned int subbuf_idx,
struct lttng_ust_shm_handle *handle);
- void (*buffer_end) (struct lttng_ust_ring_buffer *buf, uint64_t tsc,
+ void (*buffer_end) (struct lttng_ust_ring_buffer *buf, uint64_t timestamp,
unsigned int subbuf_idx, unsigned long data_size,
struct lttng_ust_shm_handle *handle,
const struct lttng_ust_ring_buffer_ctx *ctx);
enum lttng_ust_ring_buffer_ipi_types ipi;
enum lttng_ust_ring_buffer_wakeup_types wakeup;
/*
- * tsc_bits: timestamp bits saved at each record.
+ * timestamp_bits: timestamp bits saved at each record.
* 0 and 64 disable the timestamp compression scheme.
*/
- unsigned int tsc_bits;
+ unsigned int timestamp_bits;
struct lttng_ust_ring_buffer_client_cb cb;
/*
* client_type is used by the consumer process (which is in a
/*
* Reservation flags.
*
- * RING_BUFFER_RFLAG_FULL_TSC
+ * RING_BUFFER_RFLAG_FULL_TIMESTAMP
*
* This flag is passed to record_header_size() and to the primitive used to
* write the record header. It indicates that the full 64-bit time value is
* needed in the record header. If this flag is not set, the record header needs
- * only to contain "tsc_bits" bit of time value.
+ * only to contain "timestamp_bits" bit of time value.
*
* Reservation flags can be added by the client, starting from
* "(RING_BUFFER_FLAGS_END << 0)". It can be used to pass information from
* record_header_size() to lib_ring_buffer_write_record_header().
*/
-#define RING_BUFFER_RFLAG_FULL_TSC (1U << 0)
+#define RING_BUFFER_RFLAG_FULL_TIMESTAMP (1U << 0)
#define RING_BUFFER_RFLAG_END (1U << 1)
/*