This allows the viewer to identify the gaps between trace packets.
Signed-off-by: Julien Desfossez <jdesfossez@efficios.com>
Signed-off-by: Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
return pages->data_size;
}
+static inline
+void subbuffer_inc_packet_count(const struct lib_ring_buffer_config *config,
+ struct lib_ring_buffer_backend *bufb,
+ unsigned long idx)
+{
+ bufb->buf_cnt[idx].seq_cnt++;
+}
+
/**
* lib_ring_buffer_clear_noref - Clear the noref subbuffer flag, called by
* writer.
unsigned long id; /* backend subbuffer identifier */
};
+struct lib_ring_buffer_backend_counts {
+ /*
+ * Counter specific to the sub-buffer location within the ring buffer.
+ * The actual sequence number of the packet within the entire ring
+ * buffer can be derived from the formula nr_subbuffers * seq_cnt +
+ * subbuf_idx.
+ */
+ uint64_t seq_cnt; /* packet sequence number */
+};
+
/*
* Forward declaration of frontend-specific channel and ring_buffer.
*/
struct lib_ring_buffer_backend_subbuffer *buf_wsb;
/* ring_buffer_backend_subbuffer for reader */
struct lib_ring_buffer_backend_subbuffer buf_rsb;
+ /* Array of lib_ring_buffer_backend_counts for the packet counter */
+ struct lib_ring_buffer_backend_counts *buf_cnt;
/*
* Pointer array of backend pages, for whole buffer.
* Indexed by ring_buffer_backend_subbuffer identifier (id) index.
buf,
idx));
+ /*
+ * Increment the packet counter while we have exclusive
+ * access.
+ */
+ subbuffer_inc_packet_count(config, &buf->backend, idx);
+
/*
* Set noref flag and offset for this subbuffer id.
* Contains a memory barrier that ensures counter stores
else
bufb->buf_rsb.id = subbuffer_id(config, 0, 1, 0);
+ /* Allocate subbuffer packet counter table */
+ bufb->buf_cnt = kzalloc_node(ALIGN(
+ sizeof(struct lib_ring_buffer_backend_counts)
+ * num_subbuf,
+ 1 << INTERNODE_CACHE_SHIFT),
+ GFP_KERNEL, cpu_to_node(max(bufb->cpu, 0)));
+ if (unlikely(!bufb->buf_cnt))
+ goto free_wsb;
+
/* Assign pages to page index */
for (i = 0; i < num_subbuf_alloc; i++) {
for (j = 0; j < num_pages_per_subbuf; j++) {
kfree(pages);
return 0;
+free_wsb:
+ kfree(bufb->buf_wsb);
free_array:
for (i = 0; (i < num_subbuf_alloc && bufb->array[i]); i++)
kfree(bufb->array[i]);
num_subbuf_alloc++;
kfree(bufb->buf_wsb);
+ kfree(bufb->buf_cnt);
for (i = 0; i < num_subbuf_alloc; i++) {
for (j = 0; j < bufb->num_pages_per_subbuf; j++)
__free_page(bufb->array[i]->p[j].page);
goto error;
return put_u64(ts, arg);
}
+ case LTTNG_RING_BUFFER_GET_SEQ_NUM:
+ {
+ uint64_t seq;
+
+ ret = ops->sequence_number(config, buf, &seq);
+ if (ret < 0)
+ goto error;
+ return put_u64(seq, arg);
+ }
default:
return lib_ring_buffer_file_operations.unlocked_ioctl(filp,
cmd, arg);
goto error;
return put_u64(ts, arg);
}
+ case LTTNG_RING_BUFFER_COMPAT_GET_SEQ_NUM:
+ {
+ uint64_t seq;
+
+ ret = ops->sequence_number(config, buf, &seq);
+ if (ret < 0)
+ goto error;
+ return put_u64(seq, arg);
+ }
default:
return lib_ring_buffer_file_operations.compat_ioctl(filp,
cmd, arg);
#define LTTNG_RING_BUFFER_GET_STREAM_ID _IOR(0xF6, 0x25, uint64_t)
/* returns the current timestamp */
#define LTTNG_RING_BUFFER_GET_CURRENT_TIMESTAMP _IOR(0xF6, 0x26, uint64_t)
+/* returns the packet sequence number */
+#define LTTNG_RING_BUFFER_GET_SEQ_NUM _IOR(0xF6, 0x27, uint64_t)
#ifdef CONFIG_COMPAT
/* returns the timestamp begin of the current sub-buffer */
/* returns the current timestamp */
#define LTTNG_RING_BUFFER_COMPAT_GET_CURRENT_TIMESTAMP \
LTTNG_RING_BUFFER_GET_CURRENT_TIMESTAMP
+/* returns the packet sequence number */
+#define LTTNG_RING_BUFFER_COMPAT_GET_SEQ_NUM \
+ LTTNG_RING_BUFFER_GET_SEQ_NUM
#endif /* CONFIG_COMPAT */
#endif /* _LTTNG_ABI_H */
" uint64_clock_monotonic_t timestamp_end;\n"
" uint64_t content_size;\n"
" uint64_t packet_size;\n"
+ " uint64_t packet_seq_num;\n"
" unsigned long events_discarded;\n"
" uint32_t cpu_id;\n"
"};\n\n"
int (*current_timestamp) (const struct lib_ring_buffer_config *config,
struct lib_ring_buffer *bufb,
uint64_t *ts);
+ int (*sequence_number) (const struct lib_ring_buffer_config *config,
+ struct lib_ring_buffer *bufb,
+ uint64_t *seq);
};
struct lttng_transport {
uint64_t timestamp_end; /* Cycle count at subbuffer end */
uint64_t content_size; /* Size of data in subbuffer */
uint64_t packet_size; /* Subbuffer size (include padding) */
+ uint64_t packet_seq_num; /* Packet sequence number */
unsigned long events_discarded; /*
* Events lost in this subbuffer since
* the beginning of the trace.
header->ctx.timestamp_end = 0;
header->ctx.content_size = ~0ULL; /* for debugging */
header->ctx.packet_size = ~0ULL;
+ header->ctx.packet_seq_num = chan->backend.num_subbuf * \
+ buf->backend.buf_cnt[subbuf_idx].seq_cnt + \
+ subbuf_idx;
header->ctx.events_discarded = 0;
header->ctx.cpu_id = buf->backend.cpu;
}
return 0;
}
+static int client_sequence_number(const struct lib_ring_buffer_config *config,
+ struct lib_ring_buffer *buf,
+ uint64_t *seq)
+{
+ struct packet_header *header = client_packet_header(config, buf);
+
+ *seq = header->ctx.packet_seq_num;
+
+ return 0;
+}
+
static const struct lib_ring_buffer_config client_config = {
.cb.ring_buffer_clock_read = client_ring_buffer_clock_read,
.cb.record_header_size = client_record_header_size,
.packet_size = client_packet_size,
.stream_id = client_stream_id,
.current_timestamp = client_current_timestamp,
+ .sequence_number = client_sequence_number,
},
};
return -ENOSYS;
}
+static int client_sequence_number(const struct lib_ring_buffer_config *config,
+ struct lib_ring_buffer *bufb,
+ uint64_t *seq)
+{
+ return -ENOSYS;
+}
+
static const struct lib_ring_buffer_config client_config = {
.cb.ring_buffer_clock_read = client_ring_buffer_clock_read,
.cb.record_header_size = client_record_header_size,
.packet_size = client_packet_size,
.stream_id = client_stream_id,
.current_timestamp = client_current_timestamp,
+ .sequence_number = client_sequence_number,
},
};