Rename "tsc" to "timestamp"
authorMathieu Desnoyers <mathieu.desnoyers@efficios.com>
Thu, 2 May 2024 21:06:29 +0000 (17:06 -0400)
committerMathieu Desnoyers <mathieu.desnoyers@efficios.com>
Fri, 3 May 2024 19:33:13 +0000 (15:33 -0400)
Naming timestamps "TSC" or "tsc" is an historical artefact dating from
the implementation of libringbuffer, where the initial intent was to use
the x86 "rdtsc" instruction directly, which ended up not being what was
done in reality.

Rename uses of "TSC" and "tsc" to "timestamp" to clarify things and
don't require reviewers to be fluent in x86 instruction set.

Signed-off-by: Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
Change-Id: I09437b0cf6db79600ed1a423d1f6df0344f5c9f7

12 files changed:
include/ringbuffer/backend_types.h
include/ringbuffer/config.h
include/ringbuffer/frontend_api.h
include/ringbuffer/frontend_internal.h
include/ringbuffer/frontend_types.h
include/wrapper/trace-clock.h
src/lib/ringbuffer/ring_buffer_backend.c
src/lib/ringbuffer/ring_buffer_frontend.c
src/lttng-ring-buffer-client.h
src/lttng-ring-buffer-event-notifier-client.h
src/lttng-ring-buffer-metadata-client.h
src/wrapper/trace-clock.c

index 337f1cda9ba26219a34f01065b5b4a3c14a31a1f..c23889ea63e616800f286a4ffdb6b3ebd5bd3158 100644 (file)
@@ -82,7 +82,7 @@ struct channel_backend {
        struct lttng_kernel_ring_buffer *buf;   /* Channel per-cpu buffers */
 
        unsigned long num_subbuf;       /* Number of sub-buffers for writer */
-       u64 start_tsc;                  /* Channel creation TSC value */
+       u64 start_timestamp;            /* Channel creation timestamp value */
        void *priv;                     /* Client-specific information */
        void *priv_ops;                 /* Client-specific ops pointer */
        void (*release_priv_ops)(void *priv_ops);
index 43523de3055b55414a969d1ac30bee9610615e2c..e63463b08ef45b8fd6540681f636630ef6763db5 100644 (file)
@@ -41,9 +41,9 @@ struct lttng_kernel_ring_buffer_client_cb {
 
        /* Slow path only, at subbuffer switch */
        size_t (*subbuffer_header_size) (void);
-       void (*buffer_begin) (struct lttng_kernel_ring_buffer *buf, u64 tsc,
+       void (*buffer_begin) (struct lttng_kernel_ring_buffer *buf, u64 timestamp,
                              unsigned int subbuf_idx);
-       void (*buffer_end) (struct lttng_kernel_ring_buffer *buf, u64 tsc,
+       void (*buffer_end) (struct lttng_kernel_ring_buffer *buf, u64 timestamp,
                            unsigned int subbuf_idx, unsigned long data_size,
                            const struct lttng_kernel_ring_buffer_ctx *ctx);
 
@@ -151,10 +151,10 @@ struct lttng_kernel_ring_buffer_config {
                                                 */
        } wakeup;
        /*
-        * tsc_bits: timestamp bits saved at each record.
+        * timestamp_bits: timestamp bits saved at each record.
         *   0 and 64 disable the timestamp compression scheme.
         */
-       unsigned int tsc_bits;
+       unsigned int timestamp_bits;
        struct lttng_kernel_ring_buffer_client_cb cb;
 };
 
@@ -182,7 +182,7 @@ struct lttng_kernel_ring_buffer_ctx_private {
                                                 * prior to record header alignment
                                                 * padding.
                                                 */
-       u64 tsc;                                /* time-stamp counter value */
+       u64 timestamp;                          /* time-stamp counter value */
        unsigned int rflags;                    /* reservation flags */
 
        struct lttng_kernel_ring_buffer *buf;   /*
@@ -244,18 +244,18 @@ void lib_ring_buffer_ctx_init(struct lttng_kernel_ring_buffer_ctx *ctx,
 /*
  * Reservation flags.
  *
- * RING_BUFFER_RFLAG_FULL_TSC
+ * RING_BUFFER_RFLAG_FULL_TIMESTAMP
  *
  * This flag is passed to record_header_size() and to the primitive used to
  * write the record header. It indicates that the full 64-bit time value is
  * needed in the record header. If this flag is not set, the record header needs
- * only to contain "tsc_bits" bit of time value.
+ * only to contain "timestamp_bits" bit of time value.
  *
  * Reservation flags can be added by the client, starting from
  * "(RING_BUFFER_FLAGS_END << 0)". It can be used to pass information from
  * record_header_size() to lib_ring_buffer_write_record_header().
  */
-#define        RING_BUFFER_RFLAG_FULL_TSC              (1U << 0)
+#define        RING_BUFFER_RFLAG_FULL_TIMESTAMP        (1U << 0)
 #define RING_BUFFER_RFLAG_END                  (1U << 1)
 
 #ifndef LTTNG_TRACER_CORE_H
index b473a61b60407859366fd7965a2e548f23357a70..e8d77d9588168d48fb6502c43093bd271b5770c5 100644 (file)
@@ -80,8 +80,8 @@ int lib_ring_buffer_try_reserve(const struct lttng_kernel_ring_buffer_config *co
        *o_begin = v_read(config, &buf->offset);
        *o_old = *o_begin;
 
-       ctx->priv.tsc = lib_ring_buffer_clock_read(chan);
-       if ((int64_t) ctx->priv.tsc == -EIO)
+       ctx->priv.timestamp = lib_ring_buffer_clock_read(chan);
+       if ((int64_t) ctx->priv.timestamp == -EIO)
                return 1;
 
        /*
@@ -91,8 +91,8 @@ int lib_ring_buffer_try_reserve(const struct lttng_kernel_ring_buffer_config *co
         */
        prefetch(&buf->commit_hot[subbuf_index(*o_begin, chan)]);
 
-       if (last_tsc_overflow(config, buf, ctx->priv.tsc))
-               ctx->priv.rflags |= RING_BUFFER_RFLAG_FULL_TSC;
+       if (last_timestamp_overflow(config, buf, ctx->priv.timestamp))
+               ctx->priv.rflags |= RING_BUFFER_RFLAG_FULL_TIMESTAMP;
 
        if (unlikely(subbuf_offset(*o_begin, chan) == 0))
                return 1;
@@ -128,7 +128,8 @@ int lib_ring_buffer_try_reserve(const struct lttng_kernel_ring_buffer_config *co
  * @ctx: ring buffer context. (input and output) Must be already initialized.
  *
  * Atomic wait-free slot reservation. The reserved space starts at the context
- * "pre_offset". Its length is "slot_size". The associated time-stamp is "tsc".
+ * "pre_offset". Its length is "slot_size". The associated time-stamp is
+ * "timestamp".
  *
  * Return :
  *  0 on success.
@@ -171,12 +172,12 @@ int lib_ring_buffer_reserve(const struct lttng_kernel_ring_buffer_config *config
                goto slow_path;
 
        /*
-        * Atomically update last_tsc. This update races against concurrent
-        * atomic updates, but the race will always cause supplementary full TSC
-        * record headers, never the opposite (missing a full TSC record header
-        * when it would be needed).
+        * Atomically update last_timestamp. This update races against concurrent
+        * atomic updates, but the race will always cause supplementary
+        * full timestamp record headers, never the opposite (missing a
+        * full timestamp record header when it would be needed).
         */
-       save_last_tsc(config, ctx->priv.buf, ctx->priv.tsc);
+       save_last_timestamp(config, ctx->priv.buf, ctx->priv.timestamp);
 
        /*
         * Push the reader if necessary
@@ -308,17 +309,18 @@ int lib_ring_buffer_try_discard_reserve(const struct lttng_kernel_ring_buffer_co
 
        /*
         * We need to ensure that if the cmpxchg succeeds and discards the
-        * record, the next record will record a full TSC, because it cannot
-        * rely on the last_tsc associated with the discarded record to detect
-        * overflows. The only way to ensure this is to set the last_tsc to 0
-        * (assuming no 64-bit TSC overflow), which forces to write a 64-bit
-        * timestamp in the next record.
+        * record, the next record will record a full timestamp, because
+        * it cannot rely on the last_timestamp associated with the
+        * discarded record to detect overflows. The only way to ensure
+        * this is to set the last_timestamp to 0 (assuming no 64-bit
+        * timestamp overflow), which forces to write a 64-bit timestamp in
+        * the next record.
         *
-        * Note: if discard fails, we must leave the TSC in the record header.
-        * It is needed to keep track of TSC overflows for the following
-        * records.
+        * Note: if discard fails, we must leave the timestamp in the
+        * record header. It is needed to keep track of timestamp
+        * overflows for the following records.
         */
-       save_last_tsc(config, buf, 0ULL);
+       save_last_timestamp(config, buf, 0ULL);
 
        if (likely(v_cmpxchg(config, &buf->offset, end_offset, ctx->priv.pre_offset)
                   != end_offset))
index 999a7bb9993273a94c18dd2f7e2f861e04bd72a9..39dbbf080cc9d23558607727d62ca8af12d270ae 100644 (file)
@@ -71,62 +71,63 @@ unsigned long subbuf_index(unsigned long offset, struct lttng_kernel_ring_buffer
 }
 
 /*
- * Last TSC comparison functions. Check if the current TSC overflows tsc_bits
- * bits from the last TSC read. When overflows are detected, the full 64-bit
- * timestamp counter should be written in the record header. Reads and writes
- * last_tsc atomically.
+ * Last timestamp comparison functions. Check if the current timestamp
+ * overflows timestamp_bits bits from the last timestamp read. When
+ * overflows are detected, the full 64-bit timestamp counter should be
+ * written in the record header. Reads and writes last_timestamp
+ * atomically.
  */
 
 #if (BITS_PER_LONG == 32)
 static inline
-void save_last_tsc(const struct lttng_kernel_ring_buffer_config *config,
-                  struct lttng_kernel_ring_buffer *buf, u64 tsc)
+void save_last_timestamp(const struct lttng_kernel_ring_buffer_config *config,
+                  struct lttng_kernel_ring_buffer *buf, u64 timestamp)
 {
-       if (config->tsc_bits == 0 || config->tsc_bits == 64)
+       if (config->timestamp_bits == 0 || config->timestamp_bits == 64)
                return;
 
        /*
         * Ensure the compiler performs this update in a single instruction.
         */
-       v_set(config, &buf->last_tsc, (unsigned long)(tsc >> config->tsc_bits));
+       v_set(config, &buf->last_timestamp, (unsigned long)(timestamp >> config->timestamp_bits));
 }
 
 static inline
-int last_tsc_overflow(const struct lttng_kernel_ring_buffer_config *config,
-                     struct lttng_kernel_ring_buffer *buf, u64 tsc)
+int last_timestamp_overflow(const struct lttng_kernel_ring_buffer_config *config,
+                     struct lttng_kernel_ring_buffer *buf, u64 timestamp)
 {
-       unsigned long tsc_shifted;
+       unsigned long timestamp_shifted;
 
-       if (config->tsc_bits == 0 || config->tsc_bits == 64)
+       if (config->timestamp_bits == 0 || config->timestamp_bits == 64)
                return 0;
 
-       tsc_shifted = (unsigned long)(tsc >> config->tsc_bits);
-       if (unlikely(tsc_shifted
-                    - (unsigned long)v_read(config, &buf->last_tsc)))
+       timestamp_shifted = (unsigned long)(timestamp >> config->timestamp_bits);
+       if (unlikely(timestamp_shifted
+                    - (unsigned long)v_read(config, &buf->last_timestamp)))
                return 1;
        else
                return 0;
 }
 #else
 static inline
-void save_last_tsc(const struct lttng_kernel_ring_buffer_config *config,
-                  struct lttng_kernel_ring_buffer *buf, u64 tsc)
+void save_last_timestamp(const struct lttng_kernel_ring_buffer_config *config,
+                  struct lttng_kernel_ring_buffer *buf, u64 timestamp)
 {
-       if (config->tsc_bits == 0 || config->tsc_bits == 64)
+       if (config->timestamp_bits == 0 || config->timestamp_bits == 64)
                return;
 
-       v_set(config, &buf->last_tsc, (unsigned long)tsc);
+       v_set(config, &buf->last_timestamp, (unsigned long)timestamp);
 }
 
 static inline
-int last_tsc_overflow(const struct lttng_kernel_ring_buffer_config *config,
-                     struct lttng_kernel_ring_buffer *buf, u64 tsc)
+int last_timestamp_overflow(const struct lttng_kernel_ring_buffer_config *config,
+                     struct lttng_kernel_ring_buffer *buf, u64 timestamp)
 {
-       if (config->tsc_bits == 0 || config->tsc_bits == 64)
+       if (config->timestamp_bits == 0 || config->timestamp_bits == 64)
                return 0;
 
-       if (unlikely((tsc - v_read(config, &buf->last_tsc))
-                    >> config->tsc_bits))
+       if (unlikely((timestamp - v_read(config, &buf->last_timestamp))
+                    >> config->timestamp_bits))
                return 1;
        else
                return 0;
@@ -264,7 +265,7 @@ int lib_ring_buffer_reserve_committed(const struct lttng_kernel_ring_buffer_conf
 }
 
 /*
- * Receive end of subbuffer TSC as parameter. It has been read in the
+ * Receive end of subbuffer timestamp as parameter. It has been read in the
  * space reservation loop of either reserve or switch, which ensures it
  * progresses monotonically with event records in the buffer. Therefore,
  * it ensures that the end timestamp of a subbuffer is <= begin
index 07d23be260147ae8c56a233f599b30cc0c996c7b..65782ea776cefd67d44d7eaead57181048c33a97 100644 (file)
@@ -114,7 +114,7 @@ struct lttng_kernel_ring_buffer {
                                         */
        atomic_t record_disabled;
        /* End of first 32 bytes cacheline */
-       union v_atomic last_tsc;        /*
+       union v_atomic last_timestamp;  /*
                                         * Last timestamp written in the buffer.
                                         */
 
index adb1101f2d0c8c6836fe1e9fc63de9dd7552fec2..18578a6168681d5383f8140292d93a811312effd 100644 (file)
@@ -56,7 +56,7 @@ extern struct lttng_trace_clock *lttng_trace_clock;
 
 #ifdef LTTNG_USE_NMI_SAFE_CLOCK
 
-DECLARE_PER_CPU(u64, lttng_last_tsc);
+DECLARE_PER_CPU(u64, lttng_last_timestamp);
 
 /*
  * Sometimes called with preemption enabled. Can be interrupted.
@@ -64,12 +64,12 @@ DECLARE_PER_CPU(u64, lttng_last_tsc);
 static inline u64 trace_clock_monotonic_wrapper(void)
 {
        u64 now, last, result;
-       u64 *last_tsc_ptr;
+       u64 *last_timestamp_ptr;
 
        /* Use fast nmi-safe monotonic clock provided by the Linux kernel. */
        preempt_disable();
-       last_tsc_ptr = this_cpu_ptr(&lttng_last_tsc);
-       last = *last_tsc_ptr;
+       last_timestamp_ptr = this_cpu_ptr(&lttng_last_timestamp);
+       last = *last_timestamp_ptr;
        /*
         * Read "last" before "now". It is not strictly required, but it ensures
         * that an interrupt coming in won't artificially trigger a case where
@@ -80,7 +80,7 @@ static inline u64 trace_clock_monotonic_wrapper(void)
        now = ktime_get_mono_fast_ns();
        if (U64_MAX / 2 < now - last)
                now = last;
-       result = cmpxchg64_local(last_tsc_ptr, last, now);
+       result = cmpxchg64_local(last_timestamp_ptr, last, now);
        preempt_enable();
        if (result == last) {
                /* Update done. */
index 02a9d4870f5289e5cc5576228759469111440498..74d249f30cead50fcd6daab2d00ddf81f621e307 100644 (file)
@@ -256,7 +256,7 @@ void channel_backend_reset(struct channel_backend *chanb)
         * num_subbuf_order, buf_size_order, extra_reader_sb, num_subbuf,
         * priv, notifiers, config, cpumask and name.
         */
-       chanb->start_tsc = config->cb.ring_buffer_clock_read(chan);
+       chanb->start_timestamp = config->cb.ring_buffer_clock_read(chan);
 }
 
 #if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(4,10,0))
@@ -471,7 +471,7 @@ int channel_backend_init(struct channel_backend *chanb,
                if (ret)
                        goto free_bufs;
        }
-       chanb->start_tsc = config->cb.ring_buffer_clock_read(chan);
+       chanb->start_timestamp = config->cb.ring_buffer_clock_read(chan);
 
        return 0;
 
index fbf3a16837c8574ebf6b90a2986d804626ba2d97..5e0728835f5166fb207baf4521ba4669354300c2 100644 (file)
@@ -173,7 +173,7 @@ void lib_ring_buffer_reset(struct lttng_kernel_ring_buffer *buf)
        }
        atomic_long_set(&buf->consumed, 0);
        atomic_set(&buf->record_disabled, 0);
-       v_set(config, &buf->last_tsc, 0);
+       v_set(config, &buf->last_timestamp, 0);
        lib_ring_buffer_backend_reset(&buf->backend);
        /* Don't reset number of active readers */
        v_set(config, &buf->records_lost_full, 0);
@@ -232,7 +232,7 @@ int lib_ring_buffer_create(struct lttng_kernel_ring_buffer *buf,
        struct lttng_kernel_ring_buffer_channel *chan = container_of(chanb, struct lttng_kernel_ring_buffer_channel, backend);
        void *priv = chanb->priv;
        size_t subbuf_header_size;
-       u64 tsc;
+       u64 timestamp;
        int ret;
 
        /* Test for cpu hotplug */
@@ -294,8 +294,8 @@ int lib_ring_buffer_create(struct lttng_kernel_ring_buffer *buf,
        subbuf_header_size = config->cb.subbuffer_header_size();
        v_set(config, &buf->offset, subbuf_header_size);
        subbuffer_id_clear_noref(config, &buf->backend.buf_wsb[0].id);
-       tsc = config->cb.ring_buffer_clock_read(buf->backend.chan);
-       config->cb.buffer_begin(buf, tsc, 0);
+       timestamp = config->cb.ring_buffer_clock_read(buf->backend.chan);
+       config->cb.buffer_begin(buf, timestamp, 0);
        v_add(config, subbuf_header_size, &buf->commit_hot[0].cc);
 
        if (config->cb.buffer_create) {
@@ -1599,7 +1599,7 @@ void lib_ring_buffer_switch_old_start(struct lttng_kernel_ring_buffer *buf,
        unsigned long commit_count;
        struct commit_counters_hot *cc_hot;
 
-       config->cb.buffer_begin(buf, ctx->priv.tsc, oldidx);
+       config->cb.buffer_begin(buf, ctx->priv.timestamp, oldidx);
 
        /*
         * Order all writes to buffer before the commit count update that will
@@ -1658,7 +1658,7 @@ void lib_ring_buffer_switch_old_end(struct lttng_kernel_ring_buffer *buf,
         * postponed until the commit counter is incremented for the
         * current space reservation.
         */
-       *ts_end = ctx->priv.tsc;
+       *ts_end = ctx->priv.timestamp;
 
        /*
         * Order all writes to buffer and store to ts_end before the commit
@@ -1701,7 +1701,7 @@ void lib_ring_buffer_switch_new_start(struct lttng_kernel_ring_buffer *buf,
        unsigned long commit_count;
        struct commit_counters_hot *cc_hot;
 
-       config->cb.buffer_begin(buf, ctx->priv.tsc, beginidx);
+       config->cb.buffer_begin(buf, ctx->priv.timestamp, beginidx);
 
        /*
         * Order all writes to buffer before the commit count update that will
@@ -1757,7 +1757,7 @@ void lib_ring_buffer_switch_new_end(struct lttng_kernel_ring_buffer *buf,
         * postponed until the commit counter is incremented for the
         * current space reservation.
         */
-       *ts_end = ctx->priv.tsc;
+       *ts_end = ctx->priv.timestamp;
 }
 
 /*
@@ -1780,7 +1780,7 @@ int lib_ring_buffer_try_switch_slow(enum switch_mode mode,
        offsets->switch_old_start = 0;
        off = subbuf_offset(offsets->begin, chan);
 
-       ctx->priv.tsc = config->cb.ring_buffer_clock_read(chan);
+       ctx->priv.timestamp = config->cb.ring_buffer_clock_read(chan);
 
        /*
         * Ensure we flush the header of an empty subbuffer when doing the
@@ -1901,12 +1901,12 @@ void lib_ring_buffer_switch_slow(struct lttng_kernel_ring_buffer *buf, enum swit
                 != offsets.old);
 
        /*
-        * Atomically update last_tsc. This update races against concurrent
-        * atomic updates, but the race will always cause supplementary full TSC
-        * records, never the opposite (missing a full TSC record when it would
-        * be needed).
+        * Atomically update last_timestamp. This update races against concurrent
+        * atomic updates, but the race will always cause supplementary
+        * full timestamp records, never the opposite (missing a full
+        * timestamp record when it would be needed).
         */
-       save_last_tsc(config, buf, ctx.priv.tsc);
+       save_last_timestamp(config, buf, ctx.priv.timestamp);
 
        /*
         * Push the reader if necessary
@@ -2029,12 +2029,12 @@ retry:
        offsets->switch_old_end = 0;
        offsets->pre_header_padding = 0;
 
-       ctx->priv.tsc = config->cb.ring_buffer_clock_read(chan);
-       if ((int64_t) ctx->priv.tsc == -EIO)
+       ctx->priv.timestamp = config->cb.ring_buffer_clock_read(chan);
+       if ((int64_t) ctx->priv.timestamp == -EIO)
                return -EIO;
 
-       if (last_tsc_overflow(config, buf, ctx->priv.tsc))
-               ctx->priv.rflags |= RING_BUFFER_RFLAG_FULL_TSC;
+       if (last_timestamp_overflow(config, buf, ctx->priv.timestamp))
+               ctx->priv.rflags |= RING_BUFFER_RFLAG_FULL_TIMESTAMP;
 
        if (unlikely(subbuf_offset(offsets->begin, ctx->priv.chan) == 0)) {
                offsets->switch_new_start = 1;          /* For offsets->begin */
@@ -2221,12 +2221,12 @@ int lib_ring_buffer_reserve_slow(struct lttng_kernel_ring_buffer_ctx *ctx,
                          != offsets.old));
 
        /*
-        * Atomically update last_tsc. This update races against concurrent
-        * atomic updates, but the race will always cause supplementary full TSC
-        * records, never the opposite (missing a full TSC record when it would
-        * be needed).
+        * Atomically update last_timestamp. This update races against concurrent
+        * atomic updates, but the race will always cause supplementary
+        * full timestamp records, never the opposite (missing a full
+        * timestamp record when it would be needed).
         */
-       save_last_tsc(config, buf, ctx->priv.tsc);
+       save_last_timestamp(config, buf, ctx->priv.timestamp);
 
        /*
         * Push the reader if necessary
index 0dc8a8d9f5f73393a04afa39efe51db09e226e1d..a1753e986ae3fd68582cdc29e0c7b48a9329a5a6 100644 (file)
@@ -18,7 +18,7 @@
 #include <ringbuffer/frontend_types.h>
 
 #define LTTNG_COMPACT_EVENT_BITS       5
-#define LTTNG_COMPACT_TSC_BITS         27
+#define LTTNG_COMPACT_TIMESTAMP_BITS   27
 
 static struct lttng_transport lttng_relay_transport;
 
@@ -149,7 +149,7 @@ size_t record_header_size(const struct lttng_kernel_ring_buffer_config *config,
        case 1: /* compact */
                padding = lib_ring_buffer_align(offset, lttng_alignof(uint32_t));
                offset += padding;
-               if (!(ctx->priv.rflags & (RING_BUFFER_RFLAG_FULL_TSC | LTTNG_RFLAG_EXTENDED))) {
+               if (!(ctx->priv.rflags & (RING_BUFFER_RFLAG_FULL_TIMESTAMP | LTTNG_RFLAG_EXTENDED))) {
                        offset += sizeof(uint32_t);     /* id and timestamp */
                } else {
                        /* Minimum space taken by LTTNG_COMPACT_EVENT_BITS id */
@@ -165,7 +165,7 @@ size_t record_header_size(const struct lttng_kernel_ring_buffer_config *config,
                padding = lib_ring_buffer_align(offset, lttng_alignof(uint16_t));
                offset += padding;
                offset += sizeof(uint16_t);
-               if (!(ctx->priv.rflags & (RING_BUFFER_RFLAG_FULL_TSC | LTTNG_RFLAG_EXTENDED))) {
+               if (!(ctx->priv.rflags & (RING_BUFFER_RFLAG_FULL_TIMESTAMP | LTTNG_RFLAG_EXTENDED))) {
                        offset += lib_ring_buffer_align(offset, lttng_alignof(uint32_t));
                        offset += sizeof(uint32_t);     /* timestamp */
                } else {
@@ -223,14 +223,14 @@ void lttng_write_event_header(const struct lttng_kernel_ring_buffer_config *conf
                                event_id);
                bt_bitfield_write(&id_time, uint32_t,
                                LTTNG_COMPACT_EVENT_BITS,
-                               LTTNG_COMPACT_TSC_BITS,
-                               ctx->priv.tsc);
+                               LTTNG_COMPACT_TIMESTAMP_BITS,
+                               ctx->priv.timestamp);
                lib_ring_buffer_write(config, ctx, &id_time, sizeof(id_time));
                break;
        }
        case 2: /* large */
        {
-               uint32_t timestamp = (uint32_t) ctx->priv.tsc;
+               uint32_t timestamp = (uint32_t) ctx->priv.timestamp;
                uint16_t id = event_id;
 
                lib_ring_buffer_write(config, ctx, &id, sizeof(id));
@@ -260,7 +260,7 @@ void lttng_write_event_header_slow(const struct lttng_kernel_ring_buffer_config
 
        switch (lttng_chan->priv->header_type) {
        case 1: /* compact */
-               if (!(ctx->priv.rflags & (RING_BUFFER_RFLAG_FULL_TSC | LTTNG_RFLAG_EXTENDED))) {
+               if (!(ctx->priv.rflags & (RING_BUFFER_RFLAG_FULL_TIMESTAMP | LTTNG_RFLAG_EXTENDED))) {
                        uint32_t id_time = 0;
 
                        bt_bitfield_write(&id_time, uint32_t,
@@ -269,11 +269,11 @@ void lttng_write_event_header_slow(const struct lttng_kernel_ring_buffer_config
                                        event_id);
                        bt_bitfield_write(&id_time, uint32_t,
                                        LTTNG_COMPACT_EVENT_BITS,
-                                       LTTNG_COMPACT_TSC_BITS, ctx->priv.tsc);
+                                       LTTNG_COMPACT_TIMESTAMP_BITS, ctx->priv.timestamp);
                        lib_ring_buffer_write(config, ctx, &id_time, sizeof(id_time));
                } else {
                        uint8_t id = 0;
-                       uint64_t timestamp = ctx->priv.tsc;
+                       uint64_t timestamp = ctx->priv.timestamp;
 
                        bt_bitfield_write(&id, uint8_t,
                                        0,
@@ -289,8 +289,8 @@ void lttng_write_event_header_slow(const struct lttng_kernel_ring_buffer_config
                break;
        case 2: /* large */
        {
-               if (!(ctx->priv.rflags & (RING_BUFFER_RFLAG_FULL_TSC | LTTNG_RFLAG_EXTENDED))) {
-                       uint32_t timestamp = (uint32_t) ctx->priv.tsc;
+               if (!(ctx->priv.rflags & (RING_BUFFER_RFLAG_FULL_TIMESTAMP | LTTNG_RFLAG_EXTENDED))) {
+                       uint32_t timestamp = (uint32_t) ctx->priv.timestamp;
                        uint16_t id = event_id;
 
                        lib_ring_buffer_write(config, ctx, &id, sizeof(id));
@@ -298,7 +298,7 @@ void lttng_write_event_header_slow(const struct lttng_kernel_ring_buffer_config
                        lib_ring_buffer_write(config, ctx, &timestamp, sizeof(timestamp));
                } else {
                        uint16_t id = 65535;
-                       uint64_t timestamp = ctx->priv.tsc;
+                       uint64_t timestamp = ctx->priv.timestamp;
 
                        lib_ring_buffer_write(config, ctx, &id, sizeof(id));
                        /* Align extended struct on largest member */
@@ -346,7 +346,7 @@ static size_t client_packet_header_size(void)
        return offsetof(struct packet_header, ctx.header_end);
 }
 
-static void client_buffer_begin(struct lttng_kernel_ring_buffer *buf, u64 tsc,
+static void client_buffer_begin(struct lttng_kernel_ring_buffer *buf, u64 timestamp,
                                unsigned int subbuf_idx)
 {
        struct lttng_kernel_ring_buffer_channel *chan = buf->backend.chan;
@@ -361,7 +361,7 @@ static void client_buffer_begin(struct lttng_kernel_ring_buffer *buf, u64 tsc,
        memcpy(header->uuid, session->priv->uuid.b, sizeof(session->priv->uuid));
        header->stream_id = lttng_chan->priv->id;
        header->stream_instance_id = buf->backend.cpu;
-       header->ctx.timestamp_begin = tsc;
+       header->ctx.timestamp_begin = timestamp;
        header->ctx.timestamp_end = 0;
        header->ctx.content_size = ~0ULL; /* for debugging */
        header->ctx.packet_size = ~0ULL;
@@ -376,7 +376,7 @@ static void client_buffer_begin(struct lttng_kernel_ring_buffer *buf, u64 tsc,
  * offset is assumed to never be 0 here : never deliver a completely empty
  * subbuffer. data_size is between 1 and subbuf_size.
  */
-static void client_buffer_end(struct lttng_kernel_ring_buffer *buf, u64 tsc,
+static void client_buffer_end(struct lttng_kernel_ring_buffer *buf, u64 timestamp,
                              unsigned int subbuf_idx, unsigned long data_size,
                              const struct lttng_kernel_ring_buffer_ctx *ctx)
 {
@@ -387,7 +387,7 @@ static void client_buffer_end(struct lttng_kernel_ring_buffer *buf, u64 tsc,
                                subbuf_idx * chan->backend.subbuf_size);
        unsigned long records_lost = 0;
 
-       header->ctx.timestamp_end = tsc;
+       header->ctx.timestamp_end = timestamp;
        header->ctx.content_size =
                (uint64_t) data_size * CHAR_BIT;                /* in bits */
        header->ctx.packet_size =
@@ -515,7 +515,7 @@ static const struct lttng_kernel_ring_buffer_config client_config = {
        .cb.buffer_create = client_buffer_create,
        .cb.buffer_finalize = client_buffer_finalize,
 
-       .tsc_bits = LTTNG_COMPACT_TSC_BITS,
+       .timestamp_bits = LTTNG_COMPACT_TIMESTAMP_BITS,
        .alloc = RING_BUFFER_ALLOC_PER_CPU,
        .sync = RING_BUFFER_SYNC_PER_CPU,
        .mode = RING_BUFFER_MODE_TEMPLATE,
index 8526e05e413fcb1e2b58a75682013ff242b8f1cb..fa7bbf57a04c62d0e952db8bc83c8277c600e2ed 100644 (file)
@@ -86,7 +86,7 @@ static size_t client_packet_header_size(void)
        return offsetof(struct event_notifier_packet_header, header_end);
 }
 
-static void client_buffer_begin(struct lttng_kernel_ring_buffer *buf, u64 tsc,
+static void client_buffer_begin(struct lttng_kernel_ring_buffer *buf, u64 timestamp,
                                unsigned int subbuf_idx)
 {
 }
@@ -95,7 +95,7 @@ static void client_buffer_begin(struct lttng_kernel_ring_buffer *buf, u64 tsc,
  * offset is assumed to never be 0 here : never deliver a completely empty
  * subbuffer. data_size is between 1 and subbuf_size.
  */
-static void client_buffer_end(struct lttng_kernel_ring_buffer *buf, u64 tsc,
+static void client_buffer_end(struct lttng_kernel_ring_buffer *buf, u64 timestamp,
                              unsigned int subbuf_idx, unsigned long data_size,
                              const struct lttng_kernel_ring_buffer_ctx *ctx)
 {
@@ -200,7 +200,7 @@ static const struct lttng_kernel_ring_buffer_config client_config = {
        .cb.buffer_finalize = client_buffer_finalize,
        .cb.record_get = client_record_get,
 
-       .tsc_bits = 0,
+       .timestamp_bits = 0,
        .alloc = RING_BUFFER_ALLOC_GLOBAL,
        .sync = RING_BUFFER_SYNC_GLOBAL,
        .mode = RING_BUFFER_MODE_TEMPLATE,
index 86ec78f16abf7af75bc0ed9a3fae5ce66f64766b..d5e7c742e2ccd0e605dfeb5f801c024dcb1a842c 100644 (file)
@@ -81,7 +81,7 @@ static size_t client_packet_header_size(void)
        return offsetof(struct metadata_packet_header, header_end);
 }
 
-static void client_buffer_begin(struct lttng_kernel_ring_buffer *buf, u64 tsc,
+static void client_buffer_begin(struct lttng_kernel_ring_buffer *buf, u64 timestamp,
                                unsigned int subbuf_idx)
 {
        struct lttng_kernel_ring_buffer_channel *chan = buf->backend.chan;
@@ -109,7 +109,7 @@ static void client_buffer_begin(struct lttng_kernel_ring_buffer *buf, u64 tsc,
  * offset is assumed to never be 0 here : never deliver a completely empty
  * subbuffer. data_size is between 1 and subbuf_size.
  */
-static void client_buffer_end(struct lttng_kernel_ring_buffer *buf, u64 tsc,
+static void client_buffer_end(struct lttng_kernel_ring_buffer *buf, u64 timestamp,
                              unsigned int subbuf_idx, unsigned long data_size,
                              const struct lttng_kernel_ring_buffer_ctx *ctx)
 {
@@ -214,7 +214,7 @@ static const struct lttng_kernel_ring_buffer_config client_config = {
        .cb.buffer_create = client_buffer_create,
        .cb.buffer_finalize = client_buffer_finalize,
 
-       .tsc_bits = 0,
+       .timestamp_bits = 0,
        .alloc = RING_BUFFER_ALLOC_GLOBAL,
        .sync = RING_BUFFER_SYNC_GLOBAL,
        .mode = RING_BUFFER_MODE_TEMPLATE,
index 74995fefdbb341a1f4c87d29ca88a1e72c2a7d3c..7ef21cf614c097e0032414d8aefc24c5ecfc7e24 100644 (file)
@@ -11,8 +11,8 @@
 #include <wrapper/trace-clock.h>
 
 #ifdef LTTNG_USE_NMI_SAFE_CLOCK
-DEFINE_PER_CPU(u64, lttng_last_tsc);
-EXPORT_PER_CPU_SYMBOL(lttng_last_tsc);
+DEFINE_PER_CPU(u64, lttng_last_timestamp);
+EXPORT_PER_CPU_SYMBOL(lttng_last_timestamp);
 #endif /* #ifdef LTTNG_USE_NMI_SAFE_CLOCK */
 
 #ifdef LTTNG_CLOCK_NMI_SAFE_BROKEN
This page took 0.043074 seconds and 4 git commands to generate.