This allows the viewer to identify the gaps between trace packets.
This is a locked-step with the corresponding commit in lttng-tools.
Signed-off-by: Julien Desfossez <jdesfossez@efficios.com>
Acked-by: Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
Signed-off-by: Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
uint64_t *stream_id);
int ustctl_get_current_timestamp(struct ustctl_consumer_stream *stream,
uint64_t *ts);
+int ustctl_get_sequence_number(struct ustctl_consumer_stream *stream,
+ uint64_t *seq);
/* returns whether UST has perf counters support. */
int ustctl_has_perf_counters(void);
return client_cb->current_timestamp(buf, handle, ts);
}
+int ustctl_get_sequence_number(struct ustctl_consumer_stream *stream,
+ uint64_t *seq)
+{
+ struct lttng_ust_client_lib_ring_buffer_client_cb *client_cb;
+ struct lttng_ust_lib_ring_buffer *buf;
+ struct lttng_ust_shm_handle *handle;
+
+ if (!stream || !seq)
+ return -EINVAL;
+ buf = stream->buf;
+ handle = stream->chan->chan->handle;
+ client_cb = get_client_cb(buf, handle);
+ if (!client_cb || !client_cb->sequence_number)
+ return -ENOSYS;
+ return client_cb->sequence_number(buf, handle, seq);
+}
+
#if defined(__x86_64__) || defined(__i386__)
int ustctl_has_perf_counters(void)
int (*current_timestamp) (struct lttng_ust_lib_ring_buffer *buf,
struct lttng_ust_shm_handle *handle,
uint64_t *ts);
+ int (*sequence_number) (struct lttng_ust_lib_ring_buffer *buf,
+ struct lttng_ust_shm_handle *handle, uint64_t *seq);
};
#endif /* _LTTNG_RB_CLIENT_H */
uint64_t timestamp_end; /* Cycle count at subbuffer end */
uint64_t content_size; /* Size of data in subbuffer */
uint64_t packet_size; /* Subbuffer size (include padding) */
+ uint64_t packet_seq_num; /* Packet sequence number */
unsigned long events_discarded; /*
* Events lost in this subbuffer since
* the beginning of the trace.
subbuf_idx * chan->backend.subbuf_size,
handle);
struct lttng_channel *lttng_chan = channel_get_private(chan);
+ uint64_t cnt = shmp_index(handle, buf->backend.buf_cnt, subbuf_idx)->seq_cnt;
assert(header);
if (!header)
header->ctx.timestamp_end = 0;
header->ctx.content_size = ~0ULL; /* for debugging */
header->ctx.packet_size = ~0ULL;
+ header->ctx.packet_seq_num = chan->backend.num_subbuf * cnt + subbuf_idx;
header->ctx.events_discarded = 0;
header->ctx.cpu_id = buf->backend.cpu;
}
return 0;
}
+static int client_sequence_number(struct lttng_ust_lib_ring_buffer *buf,
+ struct lttng_ust_shm_handle *handle,
+ uint64_t *seq)
+{
+ struct packet_header *header;
+
+ header = client_packet_header(buf, handle);
+ *seq = header->ctx.packet_seq_num;
+ return 0;
+}
+
static const
struct lttng_ust_client_lib_ring_buffer_client_cb client_cb = {
.parent = {
.packet_size = client_packet_size,
.stream_id = client_stream_id,
.current_timestamp = client_current_timestamp,
+ .sequence_number = client_sequence_number,
};
static const struct lttng_ust_lib_ring_buffer_config client_config = {
return shmp(handle, pages->shmp)->data_size;
}
+static inline
+void subbuffer_inc_packet_count(const struct lttng_ust_lib_ring_buffer_config *config,
+ struct lttng_ust_lib_ring_buffer_backend *bufb,
+ unsigned long idx, struct lttng_ust_shm_handle *handle)
+{
+ shmp_index(handle, bufb->buf_cnt, idx)->seq_cnt++;
+}
+
/**
* lib_ring_buffer_clear_noref - Clear the noref subbuffer flag, called by
* writer.
unsigned long id; /* backend subbuffer identifier */
};
+struct lttng_ust_lib_ring_buffer_backend_counts {
+ /*
+ * Counter specific to the sub-buffer location within the ring buffer.
+ * The actual sequence number of the packet within the entire ring
+ * buffer can be derived from the formula nr_subbuffers * seq_cnt +
+ * subbuf_idx.
+ */
+ uint64_t seq_cnt; /* packet sequence number */
+};
+
/*
* Forward declaration of frontend-specific channel and ring_buffer.
*/
DECLARE_SHMP(struct lttng_ust_lib_ring_buffer_backend_subbuffer, buf_wsb);
/* ring_buffer_backend_subbuffer for reader */
struct lttng_ust_lib_ring_buffer_backend_subbuffer buf_rsb;
+ /* Array of lib_ring_buffer_backend_counts for the packet counter */
+ DECLARE_SHMP(struct lttng_ust_lib_ring_buffer_backend_counts, buf_cnt);
/*
* Pointer array of backend pages, for whole buffer.
* Indexed by ring_buffer_backend_subbuffer identifier (id) index.
handle),
handle);
+ /*
+ * Increment the packet counter while we have exclusive
+ * access.
+ */
+ subbuffer_inc_packet_count(config, &buf->backend, idx, handle);
+
/*
* Set noref flag and offset for this subbuffer id.
* Contains a memory barrier that ensures counter stores
else
bufb->buf_rsb.id = subbuffer_id(config, 0, 1, 0);
+ /* Allocate subbuffer packet counter table */
+ align_shm(shmobj, __alignof__(struct lttng_ust_lib_ring_buffer_backend_subbuffer));
+ set_shmp(bufb->buf_cnt, zalloc_shm(shmobj,
+ sizeof(struct lttng_ust_lib_ring_buffer_backend_counts)
+ * num_subbuf));
+ if (caa_unlikely(!shmp(handle, bufb->buf_cnt)))
+ goto free_wsb;
+
/* Assign pages to page index */
for (i = 0; i < num_subbuf_alloc; i++) {
struct lttng_ust_lib_ring_buffer_backend_pages_shmp *sbp;
}
return 0;
+free_wsb:
+ /* bufb->buf_wsb will be freed by shm teardown */
free_array:
/* bufb->array[i] will be freed by shm teardown */
memory_map_error: