struct lttng_kernel_ring_buffer *buf; /* Channel per-cpu buffers */
unsigned long num_subbuf; /* Number of sub-buffers for writer */
- u64 start_tsc; /* Channel creation TSC value */
+ u64 start_timestamp; /* Channel creation timestamp value */
void *priv; /* Client-specific information */
void *priv_ops; /* Client-specific ops pointer */
void (*release_priv_ops)(void *priv_ops);
/* Slow path only, at subbuffer switch */
size_t (*subbuffer_header_size) (void);
- void (*buffer_begin) (struct lttng_kernel_ring_buffer *buf, u64 tsc,
+ void (*buffer_begin) (struct lttng_kernel_ring_buffer *buf, u64 timestamp,
unsigned int subbuf_idx);
- void (*buffer_end) (struct lttng_kernel_ring_buffer *buf, u64 tsc,
+ void (*buffer_end) (struct lttng_kernel_ring_buffer *buf, u64 timestamp,
unsigned int subbuf_idx, unsigned long data_size,
const struct lttng_kernel_ring_buffer_ctx *ctx);
*/
} wakeup;
/*
- * tsc_bits: timestamp bits saved at each record.
+ * timestamp_bits: timestamp bits saved at each record.
* 0 and 64 disable the timestamp compression scheme.
*/
- unsigned int tsc_bits;
+ unsigned int timestamp_bits;
struct lttng_kernel_ring_buffer_client_cb cb;
};
* prior to record header alignment
* padding.
*/
- u64 tsc; /* time-stamp counter value */
+ u64 timestamp; /* time-stamp counter value */
unsigned int rflags; /* reservation flags */
struct lttng_kernel_ring_buffer *buf; /*
/*
* Reservation flags.
*
- * RING_BUFFER_RFLAG_FULL_TSC
+ * RING_BUFFER_RFLAG_FULL_TIMESTAMP
*
* This flag is passed to record_header_size() and to the primitive used to
* write the record header. It indicates that the full 64-bit time value is
* needed in the record header. If this flag is not set, the record header needs
- * only to contain "tsc_bits" bit of time value.
+ * only to contain "timestamp_bits" bit of time value.
*
* Reservation flags can be added by the client, starting from
* "(RING_BUFFER_FLAGS_END << 0)". It can be used to pass information from
* record_header_size() to lib_ring_buffer_write_record_header().
*/
-#define RING_BUFFER_RFLAG_FULL_TSC (1U << 0)
+#define RING_BUFFER_RFLAG_FULL_TIMESTAMP (1U << 0)
#define RING_BUFFER_RFLAG_END (1U << 1)
#ifndef LTTNG_TRACER_CORE_H
*o_begin = v_read(config, &buf->offset);
*o_old = *o_begin;
- ctx->priv.tsc = lib_ring_buffer_clock_read(chan);
- if ((int64_t) ctx->priv.tsc == -EIO)
+ ctx->priv.timestamp = lib_ring_buffer_clock_read(chan);
+ if ((int64_t) ctx->priv.timestamp == -EIO)
return 1;
/*
*/
prefetch(&buf->commit_hot[subbuf_index(*o_begin, chan)]);
- if (last_tsc_overflow(config, buf, ctx->priv.tsc))
- ctx->priv.rflags |= RING_BUFFER_RFLAG_FULL_TSC;
+ if (last_timestamp_overflow(config, buf, ctx->priv.timestamp))
+ ctx->priv.rflags |= RING_BUFFER_RFLAG_FULL_TIMESTAMP;
if (unlikely(subbuf_offset(*o_begin, chan) == 0))
return 1;
* @ctx: ring buffer context. (input and output) Must be already initialized.
*
* Atomic wait-free slot reservation. The reserved space starts at the context
- * "pre_offset". Its length is "slot_size". The associated time-stamp is "tsc".
+ * "pre_offset". Its length is "slot_size". The associated time-stamp is
+ * "timestamp".
*
* Return :
* 0 on success.
goto slow_path;
/*
- * Atomically update last_tsc. This update races against concurrent
- * atomic updates, but the race will always cause supplementary full TSC
- * record headers, never the opposite (missing a full TSC record header
- * when it would be needed).
+ * Atomically update last_timestamp. This update races against concurrent
+ * atomic updates, but the race will always cause supplementary
+ * full timestamp record headers, never the opposite (missing a
+ * full timestamp record header when it would be needed).
*/
- save_last_tsc(config, ctx->priv.buf, ctx->priv.tsc);
+ save_last_timestamp(config, ctx->priv.buf, ctx->priv.timestamp);
/*
* Push the reader if necessary
/*
* We need to ensure that if the cmpxchg succeeds and discards the
- * record, the next record will record a full TSC, because it cannot
- * rely on the last_tsc associated with the discarded record to detect
- * overflows. The only way to ensure this is to set the last_tsc to 0
- * (assuming no 64-bit TSC overflow), which forces to write a 64-bit
- * timestamp in the next record.
+ * record, the next record will record a full timestamp, because
+ * it cannot rely on the last_timestamp associated with the
+ * discarded record to detect overflows. The only way to ensure
+ * this is to set the last_timestamp to 0 (assuming no 64-bit
+ * timestamp overflow), which forces to write a 64-bit timestamp in
+ * the next record.
*
- * Note: if discard fails, we must leave the TSC in the record header.
- * It is needed to keep track of TSC overflows for the following
- * records.
+ * Note: if discard fails, we must leave the timestamp in the
+ * record header. It is needed to keep track of timestamp
+ * overflows for the following records.
*/
- save_last_tsc(config, buf, 0ULL);
+ save_last_timestamp(config, buf, 0ULL);
if (likely(v_cmpxchg(config, &buf->offset, end_offset, ctx->priv.pre_offset)
!= end_offset))
}
/*
- * Last TSC comparison functions. Check if the current TSC overflows tsc_bits
- * bits from the last TSC read. When overflows are detected, the full 64-bit
- * timestamp counter should be written in the record header. Reads and writes
- * last_tsc atomically.
+ * Last timestamp comparison functions. Check if the current timestamp
+ * overflows timestamp_bits bits from the last timestamp read. When
+ * overflows are detected, the full 64-bit timestamp counter should be
+ * written in the record header. Reads and writes last_timestamp
+ * atomically.
*/
#if (BITS_PER_LONG == 32)
static inline
-void save_last_tsc(const struct lttng_kernel_ring_buffer_config *config,
- struct lttng_kernel_ring_buffer *buf, u64 tsc)
+void save_last_timestamp(const struct lttng_kernel_ring_buffer_config *config,
+ struct lttng_kernel_ring_buffer *buf, u64 timestamp)
{
- if (config->tsc_bits == 0 || config->tsc_bits == 64)
+ if (config->timestamp_bits == 0 || config->timestamp_bits == 64)
return;
/*
* Ensure the compiler performs this update in a single instruction.
*/
- v_set(config, &buf->last_tsc, (unsigned long)(tsc >> config->tsc_bits));
+ v_set(config, &buf->last_timestamp, (unsigned long)(timestamp >> config->timestamp_bits));
}
static inline
-int last_tsc_overflow(const struct lttng_kernel_ring_buffer_config *config,
- struct lttng_kernel_ring_buffer *buf, u64 tsc)
+int last_timestamp_overflow(const struct lttng_kernel_ring_buffer_config *config,
+ struct lttng_kernel_ring_buffer *buf, u64 timestamp)
{
- unsigned long tsc_shifted;
+ unsigned long timestamp_shifted;
- if (config->tsc_bits == 0 || config->tsc_bits == 64)
+ if (config->timestamp_bits == 0 || config->timestamp_bits == 64)
return 0;
- tsc_shifted = (unsigned long)(tsc >> config->tsc_bits);
- if (unlikely(tsc_shifted
- - (unsigned long)v_read(config, &buf->last_tsc)))
+ timestamp_shifted = (unsigned long)(timestamp >> config->timestamp_bits);
+ if (unlikely(timestamp_shifted
+ - (unsigned long)v_read(config, &buf->last_timestamp)))
return 1;
else
return 0;
}
#else
static inline
-void save_last_tsc(const struct lttng_kernel_ring_buffer_config *config,
- struct lttng_kernel_ring_buffer *buf, u64 tsc)
+void save_last_timestamp(const struct lttng_kernel_ring_buffer_config *config,
+ struct lttng_kernel_ring_buffer *buf, u64 timestamp)
{
- if (config->tsc_bits == 0 || config->tsc_bits == 64)
+ if (config->timestamp_bits == 0 || config->timestamp_bits == 64)
return;
- v_set(config, &buf->last_tsc, (unsigned long)tsc);
+ v_set(config, &buf->last_timestamp, (unsigned long)timestamp);
}
static inline
-int last_tsc_overflow(const struct lttng_kernel_ring_buffer_config *config,
- struct lttng_kernel_ring_buffer *buf, u64 tsc)
+int last_timestamp_overflow(const struct lttng_kernel_ring_buffer_config *config,
+ struct lttng_kernel_ring_buffer *buf, u64 timestamp)
{
- if (config->tsc_bits == 0 || config->tsc_bits == 64)
+ if (config->timestamp_bits == 0 || config->timestamp_bits == 64)
return 0;
- if (unlikely((tsc - v_read(config, &buf->last_tsc))
- >> config->tsc_bits))
+ if (unlikely((timestamp - v_read(config, &buf->last_timestamp))
+ >> config->timestamp_bits))
return 1;
else
return 0;
}
/*
- * Receive end of subbuffer TSC as parameter. It has been read in the
+ * Receive end of subbuffer timestamp as parameter. It has been read in the
* space reservation loop of either reserve or switch, which ensures it
* progresses monotonically with event records in the buffer. Therefore,
* it ensures that the end timestamp of a subbuffer is <= begin
*/
atomic_t record_disabled;
/* End of first 32 bytes cacheline */
- union v_atomic last_tsc; /*
+ union v_atomic last_timestamp; /*
* Last timestamp written in the buffer.
*/
#ifdef LTTNG_USE_NMI_SAFE_CLOCK
-DECLARE_PER_CPU(u64, lttng_last_tsc);
+DECLARE_PER_CPU(u64, lttng_last_timestamp);
/*
* Sometimes called with preemption enabled. Can be interrupted.
static inline u64 trace_clock_monotonic_wrapper(void)
{
u64 now, last, result;
- u64 *last_tsc_ptr;
+ u64 *last_timestamp_ptr;
/* Use fast nmi-safe monotonic clock provided by the Linux kernel. */
preempt_disable();
- last_tsc_ptr = this_cpu_ptr(<tng_last_tsc);
- last = *last_tsc_ptr;
+ last_timestamp_ptr = this_cpu_ptr(<tng_last_timestamp);
+ last = *last_timestamp_ptr;
/*
* Read "last" before "now". It is not strictly required, but it ensures
* that an interrupt coming in won't artificially trigger a case where
now = ktime_get_mono_fast_ns();
if (U64_MAX / 2 < now - last)
now = last;
- result = cmpxchg64_local(last_tsc_ptr, last, now);
+ result = cmpxchg64_local(last_timestamp_ptr, last, now);
preempt_enable();
if (result == last) {
/* Update done. */
* num_subbuf_order, buf_size_order, extra_reader_sb, num_subbuf,
* priv, notifiers, config, cpumask and name.
*/
- chanb->start_tsc = config->cb.ring_buffer_clock_read(chan);
+ chanb->start_timestamp = config->cb.ring_buffer_clock_read(chan);
}
#if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(4,10,0))
if (ret)
goto free_bufs;
}
- chanb->start_tsc = config->cb.ring_buffer_clock_read(chan);
+ chanb->start_timestamp = config->cb.ring_buffer_clock_read(chan);
return 0;
}
atomic_long_set(&buf->consumed, 0);
atomic_set(&buf->record_disabled, 0);
- v_set(config, &buf->last_tsc, 0);
+ v_set(config, &buf->last_timestamp, 0);
lib_ring_buffer_backend_reset(&buf->backend);
/* Don't reset number of active readers */
v_set(config, &buf->records_lost_full, 0);
struct lttng_kernel_ring_buffer_channel *chan = container_of(chanb, struct lttng_kernel_ring_buffer_channel, backend);
void *priv = chanb->priv;
size_t subbuf_header_size;
- u64 tsc;
+ u64 timestamp;
int ret;
/* Test for cpu hotplug */
subbuf_header_size = config->cb.subbuffer_header_size();
v_set(config, &buf->offset, subbuf_header_size);
subbuffer_id_clear_noref(config, &buf->backend.buf_wsb[0].id);
- tsc = config->cb.ring_buffer_clock_read(buf->backend.chan);
- config->cb.buffer_begin(buf, tsc, 0);
+ timestamp = config->cb.ring_buffer_clock_read(buf->backend.chan);
+ config->cb.buffer_begin(buf, timestamp, 0);
v_add(config, subbuf_header_size, &buf->commit_hot[0].cc);
if (config->cb.buffer_create) {
unsigned long commit_count;
struct commit_counters_hot *cc_hot;
- config->cb.buffer_begin(buf, ctx->priv.tsc, oldidx);
+ config->cb.buffer_begin(buf, ctx->priv.timestamp, oldidx);
/*
* Order all writes to buffer before the commit count update that will
* postponed until the commit counter is incremented for the
* current space reservation.
*/
- *ts_end = ctx->priv.tsc;
+ *ts_end = ctx->priv.timestamp;
/*
* Order all writes to buffer and store to ts_end before the commit
unsigned long commit_count;
struct commit_counters_hot *cc_hot;
- config->cb.buffer_begin(buf, ctx->priv.tsc, beginidx);
+ config->cb.buffer_begin(buf, ctx->priv.timestamp, beginidx);
/*
* Order all writes to buffer before the commit count update that will
* postponed until the commit counter is incremented for the
* current space reservation.
*/
- *ts_end = ctx->priv.tsc;
+ *ts_end = ctx->priv.timestamp;
}
/*
offsets->switch_old_start = 0;
off = subbuf_offset(offsets->begin, chan);
- ctx->priv.tsc = config->cb.ring_buffer_clock_read(chan);
+ ctx->priv.timestamp = config->cb.ring_buffer_clock_read(chan);
/*
* Ensure we flush the header of an empty subbuffer when doing the
!= offsets.old);
/*
- * Atomically update last_tsc. This update races against concurrent
- * atomic updates, but the race will always cause supplementary full TSC
- * records, never the opposite (missing a full TSC record when it would
- * be needed).
+ * Atomically update last_timestamp. This update races against concurrent
+ * atomic updates, but the race will always cause supplementary
+ * full timestamp records, never the opposite (missing a full
+ * timestamp record when it would be needed).
*/
- save_last_tsc(config, buf, ctx.priv.tsc);
+ save_last_timestamp(config, buf, ctx.priv.timestamp);
/*
* Push the reader if necessary
offsets->switch_old_end = 0;
offsets->pre_header_padding = 0;
- ctx->priv.tsc = config->cb.ring_buffer_clock_read(chan);
- if ((int64_t) ctx->priv.tsc == -EIO)
+ ctx->priv.timestamp = config->cb.ring_buffer_clock_read(chan);
+ if ((int64_t) ctx->priv.timestamp == -EIO)
return -EIO;
- if (last_tsc_overflow(config, buf, ctx->priv.tsc))
- ctx->priv.rflags |= RING_BUFFER_RFLAG_FULL_TSC;
+ if (last_timestamp_overflow(config, buf, ctx->priv.timestamp))
+ ctx->priv.rflags |= RING_BUFFER_RFLAG_FULL_TIMESTAMP;
if (unlikely(subbuf_offset(offsets->begin, ctx->priv.chan) == 0)) {
offsets->switch_new_start = 1; /* For offsets->begin */
!= offsets.old));
/*
- * Atomically update last_tsc. This update races against concurrent
- * atomic updates, but the race will always cause supplementary full TSC
- * records, never the opposite (missing a full TSC record when it would
- * be needed).
+ * Atomically update last_timestamp. This update races against concurrent
+ * atomic updates, but the race will always cause supplementary
+ * full timestamp records, never the opposite (missing a full
+ * timestamp record when it would be needed).
*/
- save_last_tsc(config, buf, ctx->priv.tsc);
+ save_last_timestamp(config, buf, ctx->priv.timestamp);
/*
* Push the reader if necessary
#include <ringbuffer/frontend_types.h>
#define LTTNG_COMPACT_EVENT_BITS 5
-#define LTTNG_COMPACT_TSC_BITS 27
+#define LTTNG_COMPACT_TIMESTAMP_BITS 27
static struct lttng_transport lttng_relay_transport;
case 1: /* compact */
padding = lib_ring_buffer_align(offset, lttng_alignof(uint32_t));
offset += padding;
- if (!(ctx->priv.rflags & (RING_BUFFER_RFLAG_FULL_TSC | LTTNG_RFLAG_EXTENDED))) {
+ if (!(ctx->priv.rflags & (RING_BUFFER_RFLAG_FULL_TIMESTAMP | LTTNG_RFLAG_EXTENDED))) {
offset += sizeof(uint32_t); /* id and timestamp */
} else {
/* Minimum space taken by LTTNG_COMPACT_EVENT_BITS id */
padding = lib_ring_buffer_align(offset, lttng_alignof(uint16_t));
offset += padding;
offset += sizeof(uint16_t);
- if (!(ctx->priv.rflags & (RING_BUFFER_RFLAG_FULL_TSC | LTTNG_RFLAG_EXTENDED))) {
+ if (!(ctx->priv.rflags & (RING_BUFFER_RFLAG_FULL_TIMESTAMP | LTTNG_RFLAG_EXTENDED))) {
offset += lib_ring_buffer_align(offset, lttng_alignof(uint32_t));
offset += sizeof(uint32_t); /* timestamp */
} else {
event_id);
bt_bitfield_write(&id_time, uint32_t,
LTTNG_COMPACT_EVENT_BITS,
- LTTNG_COMPACT_TSC_BITS,
- ctx->priv.tsc);
+ LTTNG_COMPACT_TIMESTAMP_BITS,
+ ctx->priv.timestamp);
lib_ring_buffer_write(config, ctx, &id_time, sizeof(id_time));
break;
}
case 2: /* large */
{
- uint32_t timestamp = (uint32_t) ctx->priv.tsc;
+ uint32_t timestamp = (uint32_t) ctx->priv.timestamp;
uint16_t id = event_id;
lib_ring_buffer_write(config, ctx, &id, sizeof(id));
switch (lttng_chan->priv->header_type) {
case 1: /* compact */
- if (!(ctx->priv.rflags & (RING_BUFFER_RFLAG_FULL_TSC | LTTNG_RFLAG_EXTENDED))) {
+ if (!(ctx->priv.rflags & (RING_BUFFER_RFLAG_FULL_TIMESTAMP | LTTNG_RFLAG_EXTENDED))) {
uint32_t id_time = 0;
bt_bitfield_write(&id_time, uint32_t,
event_id);
bt_bitfield_write(&id_time, uint32_t,
LTTNG_COMPACT_EVENT_BITS,
- LTTNG_COMPACT_TSC_BITS, ctx->priv.tsc);
+ LTTNG_COMPACT_TIMESTAMP_BITS, ctx->priv.timestamp);
lib_ring_buffer_write(config, ctx, &id_time, sizeof(id_time));
} else {
uint8_t id = 0;
- uint64_t timestamp = ctx->priv.tsc;
+ uint64_t timestamp = ctx->priv.timestamp;
bt_bitfield_write(&id, uint8_t,
0,
break;
case 2: /* large */
{
- if (!(ctx->priv.rflags & (RING_BUFFER_RFLAG_FULL_TSC | LTTNG_RFLAG_EXTENDED))) {
- uint32_t timestamp = (uint32_t) ctx->priv.tsc;
+ if (!(ctx->priv.rflags & (RING_BUFFER_RFLAG_FULL_TIMESTAMP | LTTNG_RFLAG_EXTENDED))) {
+ uint32_t timestamp = (uint32_t) ctx->priv.timestamp;
uint16_t id = event_id;
lib_ring_buffer_write(config, ctx, &id, sizeof(id));
lib_ring_buffer_write(config, ctx, ×tamp, sizeof(timestamp));
} else {
uint16_t id = 65535;
- uint64_t timestamp = ctx->priv.tsc;
+ uint64_t timestamp = ctx->priv.timestamp;
lib_ring_buffer_write(config, ctx, &id, sizeof(id));
/* Align extended struct on largest member */
return offsetof(struct packet_header, ctx.header_end);
}
-static void client_buffer_begin(struct lttng_kernel_ring_buffer *buf, u64 tsc,
+static void client_buffer_begin(struct lttng_kernel_ring_buffer *buf, u64 timestamp,
unsigned int subbuf_idx)
{
struct lttng_kernel_ring_buffer_channel *chan = buf->backend.chan;
memcpy(header->uuid, session->priv->uuid.b, sizeof(session->priv->uuid));
header->stream_id = lttng_chan->priv->id;
header->stream_instance_id = buf->backend.cpu;
- header->ctx.timestamp_begin = tsc;
+ header->ctx.timestamp_begin = timestamp;
header->ctx.timestamp_end = 0;
header->ctx.content_size = ~0ULL; /* for debugging */
header->ctx.packet_size = ~0ULL;
* offset is assumed to never be 0 here : never deliver a completely empty
* subbuffer. data_size is between 1 and subbuf_size.
*/
-static void client_buffer_end(struct lttng_kernel_ring_buffer *buf, u64 tsc,
+static void client_buffer_end(struct lttng_kernel_ring_buffer *buf, u64 timestamp,
unsigned int subbuf_idx, unsigned long data_size,
const struct lttng_kernel_ring_buffer_ctx *ctx)
{
subbuf_idx * chan->backend.subbuf_size);
unsigned long records_lost = 0;
- header->ctx.timestamp_end = tsc;
+ header->ctx.timestamp_end = timestamp;
header->ctx.content_size =
(uint64_t) data_size * CHAR_BIT; /* in bits */
header->ctx.packet_size =
.cb.buffer_create = client_buffer_create,
.cb.buffer_finalize = client_buffer_finalize,
- .tsc_bits = LTTNG_COMPACT_TSC_BITS,
+ .timestamp_bits = LTTNG_COMPACT_TIMESTAMP_BITS,
.alloc = RING_BUFFER_ALLOC_PER_CPU,
.sync = RING_BUFFER_SYNC_PER_CPU,
.mode = RING_BUFFER_MODE_TEMPLATE,
return offsetof(struct event_notifier_packet_header, header_end);
}
-static void client_buffer_begin(struct lttng_kernel_ring_buffer *buf, u64 tsc,
+static void client_buffer_begin(struct lttng_kernel_ring_buffer *buf, u64 timestamp,
unsigned int subbuf_idx)
{
}
* offset is assumed to never be 0 here : never deliver a completely empty
* subbuffer. data_size is between 1 and subbuf_size.
*/
-static void client_buffer_end(struct lttng_kernel_ring_buffer *buf, u64 tsc,
+static void client_buffer_end(struct lttng_kernel_ring_buffer *buf, u64 timestamp,
unsigned int subbuf_idx, unsigned long data_size,
const struct lttng_kernel_ring_buffer_ctx *ctx)
{
.cb.buffer_finalize = client_buffer_finalize,
.cb.record_get = client_record_get,
- .tsc_bits = 0,
+ .timestamp_bits = 0,
.alloc = RING_BUFFER_ALLOC_GLOBAL,
.sync = RING_BUFFER_SYNC_GLOBAL,
.mode = RING_BUFFER_MODE_TEMPLATE,
return offsetof(struct metadata_packet_header, header_end);
}
-static void client_buffer_begin(struct lttng_kernel_ring_buffer *buf, u64 tsc,
+static void client_buffer_begin(struct lttng_kernel_ring_buffer *buf, u64 timestamp,
unsigned int subbuf_idx)
{
struct lttng_kernel_ring_buffer_channel *chan = buf->backend.chan;
* offset is assumed to never be 0 here : never deliver a completely empty
* subbuffer. data_size is between 1 and subbuf_size.
*/
-static void client_buffer_end(struct lttng_kernel_ring_buffer *buf, u64 tsc,
+static void client_buffer_end(struct lttng_kernel_ring_buffer *buf, u64 timestamp,
unsigned int subbuf_idx, unsigned long data_size,
const struct lttng_kernel_ring_buffer_ctx *ctx)
{
.cb.buffer_create = client_buffer_create,
.cb.buffer_finalize = client_buffer_finalize,
- .tsc_bits = 0,
+ .timestamp_bits = 0,
.alloc = RING_BUFFER_ALLOC_GLOBAL,
.sync = RING_BUFFER_SYNC_GLOBAL,
.mode = RING_BUFFER_MODE_TEMPLATE,
#include <wrapper/trace-clock.h>
#ifdef LTTNG_USE_NMI_SAFE_CLOCK
-DEFINE_PER_CPU(u64, lttng_last_tsc);
-EXPORT_PER_CPU_SYMBOL(lttng_last_tsc);
+DEFINE_PER_CPU(u64, lttng_last_timestamp);
+EXPORT_PER_CPU_SYMBOL(lttng_last_timestamp);
#endif /* #ifdef LTTNG_USE_NMI_SAFE_CLOCK */
#ifdef LTTNG_CLOCK_NMI_SAFE_BROKEN