ltt-probes.o ltt-core.o
obj-m += probes/
-
-# Use externally packaged lib ring buffer if missing in kernel
-ifneq ($(CONFIG_LIB_RING_BUFFER),)
-else
obj-m += lib/
-endif
endif
__field( unsigned int, nr_sector )
__field( int, errors )
__field( unsigned int, rwbs )
- __dynamic_array( char, cmd, blk_cmd_buf_len(rq) )
+ __dynamic_array_text( char, cmd, blk_cmd_buf_len(rq) )
),
TP_fast_assign(
__field( unsigned int, nr_sector )
__field( unsigned int, bytes )
__field( unsigned int, rwbs )
- __array( char, comm, TASK_COMM_LEN )
- __dynamic_array( char, cmd, blk_cmd_buf_len(rq) )
+ __array_text( char, comm, TASK_COMM_LEN )
+ __dynamic_array_text( char, cmd, blk_cmd_buf_len(rq) )
),
TP_fast_assign(
__field( sector_t, sector )
__field( unsigned int, nr_sector )
__field( unsigned int, rwbs )
- __array( char, comm, TASK_COMM_LEN )
+ __array_text( char, comm, TASK_COMM_LEN )
),
TP_fast_assign(
__field( sector_t, sector )
__field( unsigned int, nr_sector )
__field( unsigned int, rwbs )
- __array( char, comm, TASK_COMM_LEN )
+ __array_text( char, comm, TASK_COMM_LEN )
),
TP_fast_assign(
__field( sector_t, sector )
__field( unsigned int, nr_sector )
__field( unsigned int, rwbs )
- __array( char, comm, TASK_COMM_LEN )
+ __array_text( char, comm, TASK_COMM_LEN )
),
TP_fast_assign(
TP_ARGS(q),
TP_STRUCT__entry(
- __array( char, comm, TASK_COMM_LEN )
+ __array_text( char, comm, TASK_COMM_LEN )
),
TP_fast_assign(
TP_STRUCT__entry(
__field( int, nr_rq )
- __array( char, comm, TASK_COMM_LEN )
+ __array_text( char, comm, TASK_COMM_LEN )
),
TP_fast_assign(
__field( sector_t, sector )
__field( sector_t, new_sector )
__field( unsigned int, rwbs )
- __array( char, comm, TASK_COMM_LEN )
+ __array_text( char, comm, TASK_COMM_LEN )
),
TP_fast_assign(
TP_ARGS(str),
+ /*
+ * Not exactly a string: more a sequence of bytes (dynamic
+ * array) without the length. This is a dummy anyway: we only
+ * use this declaration to generate an event metadata entry.
+ */
TP_STRUCT__entry(
__string( str, str )
),
TP_ARGS(t),
TP_STRUCT__entry(
- __array( char, comm, TASK_COMM_LEN )
+ __array_text( char, comm, TASK_COMM_LEN )
__field( pid_t, pid )
),
TP_ARGS(p, success),
TP_STRUCT__entry(
- __array( char, comm, TASK_COMM_LEN )
+ __array_text( char, comm, TASK_COMM_LEN )
__field( pid_t, pid )
__field( int, prio )
__field( int, success )
TP_ARGS(prev, next),
TP_STRUCT__entry(
- __array( char, prev_comm, TASK_COMM_LEN )
+ __array_text( char, prev_comm, TASK_COMM_LEN )
__field( pid_t, prev_pid )
__field( int, prev_prio )
__field( long, prev_state )
- __array( char, next_comm, TASK_COMM_LEN )
+ __array_text( char, next_comm, TASK_COMM_LEN )
__field( pid_t, next_pid )
__field( int, next_prio )
),
TP_ARGS(p, dest_cpu),
TP_STRUCT__entry(
- __array( char, comm, TASK_COMM_LEN )
+ __array_text( char, comm, TASK_COMM_LEN )
__field( pid_t, pid )
__field( int, prio )
__field( int, orig_cpu )
TP_ARGS(p),
TP_STRUCT__entry(
- __array( char, comm, TASK_COMM_LEN )
+ __array_text( char, comm, TASK_COMM_LEN )
__field( pid_t, pid )
__field( int, prio )
),
TP_ARGS(pid),
TP_STRUCT__entry(
- __array( char, comm, TASK_COMM_LEN )
+ __array_text( char, comm, TASK_COMM_LEN )
__field( pid_t, pid )
__field( int, prio )
),
TP_ARGS(parent, child),
TP_STRUCT__entry(
- __array( char, parent_comm, TASK_COMM_LEN )
+ __array_text( char, parent_comm, TASK_COMM_LEN )
__field( pid_t, parent_pid )
- __array( char, child_comm, TASK_COMM_LEN )
+ __array_text( char, child_comm, TASK_COMM_LEN )
__field( pid_t, child_pid )
),
TP_ARGS(tsk, delay),
TP_STRUCT__entry(
- __array( char, comm, TASK_COMM_LEN )
+ __array_text( char, comm, TASK_COMM_LEN )
__field( pid_t, pid )
__field( u64, delay )
),
TP_ARGS(tsk, runtime, vruntime),
TP_STRUCT__entry(
- __array( char, comm, TASK_COMM_LEN )
+ __array_text( char, comm, TASK_COMM_LEN )
__field( pid_t, pid )
__field( u64, runtime )
__field( u64, vruntime )
TP_ARGS(tsk, newprio),
TP_STRUCT__entry(
- __array( char, comm, TASK_COMM_LEN )
+ __array_text( char, comm, TASK_COMM_LEN )
__field( pid_t, pid )
__field( int, oldprio )
__field( int, newprio )
u64 (*ring_buffer_clock_read) (struct channel *chan);
size_t (*record_header_size) (const struct lib_ring_buffer_config *config,
struct channel *chan, size_t offset,
- size_t data_size,
size_t *pre_header_padding,
- unsigned int rflags,
struct lib_ring_buffer_ctx *ctx);
/* Slow path only, at subbuffer switch */
ctx->data_size = data_size;
ctx->largest_align = largest_align;
ctx->cpu = cpu;
+ ctx->rflags = 0;
}
/*
prefetch(&buf->commit_hot[subbuf_index(*o_begin, chan)]);
if (last_tsc_overflow(config, buf, ctx->tsc))
- ctx->rflags = RING_BUFFER_RFLAG_FULL_TSC;
+ ctx->rflags |= RING_BUFFER_RFLAG_FULL_TSC;
if (unlikely(subbuf_offset(*o_begin, chan) == 0))
return 1;
ctx->slot_size = record_header_size(config, chan, *o_begin,
- ctx->data_size, before_hdr_pad,
- ctx->rflags, ctx);
+ before_hdr_pad, ctx);
ctx->slot_size +=
lib_ring_buffer_align(*o_begin + ctx->slot_size,
ctx->largest_align) + ctx->data_size;
return -EIO;
if (last_tsc_overflow(config, buf, ctx->tsc))
- ctx->rflags = RING_BUFFER_RFLAG_FULL_TSC;
+ ctx->rflags |= RING_BUFFER_RFLAG_FULL_TSC;
if (unlikely(subbuf_offset(offsets->begin, ctx->chan) == 0)) {
offsets->switch_new_start = 1; /* For offsets->begin */
} else {
offsets->size = config->cb.record_header_size(config, chan,
offsets->begin,
- ctx->data_size,
&offsets->pre_header_padding,
- ctx->rflags, ctx);
+ ctx);
offsets->size +=
lib_ring_buffer_align(offsets->begin + offsets->size,
ctx->largest_align)
offsets->size =
config->cb.record_header_size(config, chan,
offsets->begin,
- ctx->data_size,
&offsets->pre_header_padding,
- ctx->rflags, ctx);
+ ctx);
offsets->size +=
lib_ring_buffer_align(offsets->begin + offsets->size,
ctx->largest_align)
const char *string;
};
-#define __type_integer(_type, _byte_order, _base) \
+#define __type_integer(_type, _byte_order, _base, _encoding) \
{ \
.atype = atype_integer, \
.u.basic.integer = \
.signedness = is_signed_type(_type), \
.reverse_byte_order = _byte_order != __BYTE_ORDER, \
.base = _base, \
- .encoding = lttng_encode_none, \
+ .encoding = lttng_encode_##_encoding, \
}, \
} \
struct lib_ring_buffer *(*buffer_read_open)(struct channel *chan);
void (*buffer_read_close)(struct lib_ring_buffer *buf);
int (*event_reserve)(struct lib_ring_buffer_ctx *ctx,
- uint16_t event_id);
+ uint32_t event_id);
void (*event_commit)(struct lib_ring_buffer_ctx *ctx);
void (*event_write)(struct lib_ring_buffer_ctx *ctx, const void *src,
size_t len);
* @config: ring buffer instance configuration
* @chan: channel
* @offset: offset in the write buffer
- * @data_size: size of the payload
* @pre_header_padding: padding to add before the header (output)
- * @rflags: reservation flags
* @ctx: reservation context
*
* Returns the event header size (including padding).
static __inline__
unsigned char record_header_size(const struct lib_ring_buffer_config *config,
struct channel *chan, size_t offset,
- size_t data_size, size_t *pre_header_padding,
- unsigned int rflags,
+ size_t *pre_header_padding,
struct lib_ring_buffer_ctx *ctx)
{
struct ltt_channel *ltt_chan = channel_get_private(chan);
case 1: /* compact */
padding = lib_ring_buffer_align(offset, ltt_alignof(uint32_t));
offset += padding;
- if (!(rflags & RING_BUFFER_RFLAG_FULL_TSC)) {
+ if (!(ctx->rflags & (RING_BUFFER_RFLAG_FULL_TSC | LTT_RFLAG_EXTENDED))) {
offset += sizeof(uint32_t); /* id and timestamp */
} else {
/* Minimum space taken by 5-bit id */
padding = lib_ring_buffer_align(offset, ltt_alignof(uint16_t));
offset += padding;
offset += sizeof(uint16_t);
- if (!(rflags & RING_BUFFER_RFLAG_FULL_TSC)) {
+ if (!(ctx->rflags & (RING_BUFFER_RFLAG_FULL_TSC | LTT_RFLAG_EXTENDED))) {
offset += lib_ring_buffer_align(offset, ltt_alignof(uint32_t));
offset += sizeof(uint32_t); /* timestamp */
} else {
}
break;
default:
- WARN_ON(1);
+ WARN_ON_ONCE(1);
}
*pre_header_padding = padding;
extern
void ltt_write_event_header_slow(const struct lib_ring_buffer_config *config,
struct lib_ring_buffer_ctx *ctx,
- uint16_t event_id);
+ uint32_t event_id);
/*
* ltt_write_event_header
static __inline__
void ltt_write_event_header(const struct lib_ring_buffer_config *config,
struct lib_ring_buffer_ctx *ctx,
- uint16_t event_id)
+ uint32_t event_id)
{
struct ltt_channel *ltt_chan = channel_get_private(ctx->chan);
break;
}
default:
- WARN_ON(1);
+ WARN_ON_ONCE(1);
}
return;
ltt_write_event_header_slow(config, ctx, event_id);
}
-/*
- * TODO: For now, we only support 65536 event ids per channel.
- */
void ltt_write_event_header_slow(const struct lib_ring_buffer_config *config,
- struct lib_ring_buffer_ctx *ctx,
- uint16_t event_id)
+ struct lib_ring_buffer_ctx *ctx,
+ uint32_t event_id)
{
struct ltt_channel *ltt_chan = channel_get_private(ctx->chan);
switch (ltt_chan->header_type) {
case 1: /* compact */
- if (!(ctx->rflags & RING_BUFFER_RFLAG_FULL_TSC)) {
+ if (!(ctx->rflags & (RING_BUFFER_RFLAG_FULL_TSC | LTT_RFLAG_EXTENDED))) {
uint32_t id_time = 0;
bt_bitfield_write(&id_time, uint32_t, 0, 5, event_id);
break;
case 2: /* large */
{
- if (!(ctx->rflags & RING_BUFFER_RFLAG_FULL_TSC)) {
+ if (!(ctx->rflags & (RING_BUFFER_RFLAG_FULL_TSC | LTT_RFLAG_EXTENDED))) {
uint32_t timestamp = (uint32_t) ctx->tsc;
lib_ring_buffer_write(config, ctx, &event_id, sizeof(event_id));
lib_ring_buffer_align_ctx(ctx, ltt_alignof(uint32_t));
lib_ring_buffer_write(config, ctx, ×tamp, sizeof(timestamp));
} else {
- uint16_t event_id = 65535;
- uint32_t event_id_ext = (uint32_t) event_id;
+ uint16_t id = 65535;
uint64_t timestamp = ctx->tsc;
- lib_ring_buffer_write(config, ctx, &event_id, sizeof(event_id));
+ lib_ring_buffer_write(config, ctx, &id, sizeof(id));
/* Align extended struct on largest member */
lib_ring_buffer_align_ctx(ctx, ltt_alignof(uint64_t));
- lib_ring_buffer_write(config, ctx, &event_id_ext, sizeof(event_id_ext));
+ lib_ring_buffer_write(config, ctx, &event_id, sizeof(event_id));
lib_ring_buffer_align_ctx(ctx, ltt_alignof(uint64_t));
lib_ring_buffer_write(config, ctx, ×tamp, sizeof(timestamp));
}
break;
}
default:
- WARN_ON(1);
+ WARN_ON_ONCE(1);
}
}
static
size_t client_record_header_size(const struct lib_ring_buffer_config *config,
struct channel *chan, size_t offset,
- size_t data_size,
size_t *pre_header_padding,
- unsigned int rflags,
struct lib_ring_buffer_ctx *ctx)
{
- return record_header_size(config, chan, offset, data_size,
- pre_header_padding, rflags, ctx);
+ return record_header_size(config, chan, offset,
+ pre_header_padding, ctx);
}
/**
static
int ltt_event_reserve(struct lib_ring_buffer_ctx *ctx,
- uint16_t event_id)
+ uint32_t event_id)
{
+ struct ltt_channel *ltt_chan = channel_get_private(ctx->chan);
int ret, cpu;
cpu = lib_ring_buffer_get_cpu(&client_config);
return -EPERM;
ctx->cpu = cpu;
+ switch (ltt_chan->header_type) {
+ case 1: /* compact */
+ if (event_id > 30)
+ ctx->rflags |= LTT_RFLAG_EXTENDED;
+ break;
+ case 2: /* large */
+ if (event_id > 65534)
+ ctx->rflags |= LTT_RFLAG_EXTENDED;
+ break;
+ default:
+ WARN_ON_ONCE(1);
+ }
+
ret = lib_ring_buffer_reserve(&client_config, ctx);
if (ret)
goto put;
static inline
unsigned char record_header_size(const struct lib_ring_buffer_config *config,
struct channel *chan, size_t offset,
- size_t data_size, size_t *pre_header_padding,
- unsigned int rflags,
+ size_t *pre_header_padding,
struct lib_ring_buffer_ctx *ctx)
{
return 0;
}
static
-int ltt_event_reserve(struct lib_ring_buffer_ctx *ctx, uint16_t event_id)
+int ltt_event_reserve(struct lib_ring_buffer_ctx *ctx, uint32_t event_id)
{
return lib_ring_buffer_reserve(&client_config, ctx);
}
*/
#define LTT_RESERVE_CRITICAL 4096
+#define LTT_RFLAG_EXTENDED RING_BUFFER_RFLAG_END
+#define LTT_RFLAG_END (LTT_RFLAG_EXTENDED << 1)
+
/* Register and unregister function pointers */
enum ltt_module_function {
#define __field(_type, _item) \
{ \
.name = #_item, \
- .type = __type_integer(_type, __BYTE_ORDER, 10), \
+ .type = __type_integer(_type, __BYTE_ORDER, 10, none),\
},
#undef __field_ext
#define __field_network(_type, _item) \
{ \
.name = #_item, \
- .type = __type_integer(_type, __BIG_ENDIAN, 10), \
+ .type = __type_integer(_type, __BIG_ENDIAN, 10, none),\
},
-#undef __array
-#define __array(_type, _item, _length) \
+#undef __array_enc
+#define __array_enc(_type, _item, _length, _encoding) \
{ \
.name = #_item, \
.type = \
.u.array = \
{ \
.length = _length, \
- .elem_type = __type_integer(_type, __BYTE_ORDER, 10), \
+ .elem_type = __type_integer(_type, __BYTE_ORDER, 10, _encoding), \
}, \
}, \
},
-#undef __dynamic_array
-#define __dynamic_array(_type, _item, _length) \
+#undef __array
+#define __array(_type, _item, _length) \
+ __array_enc(_type, _item, _length, none)
+
+#undef __array_text
+#define __array_text(_type, _item, _length) \
+ __array_enc(_type, _item, _length, UTF8)
+
+#undef __dynamic_array_enc
+#define __dynamic_array_enc(_type, _item, _length, _encoding) \
{ \
.name = #_item, \
.type = \
.atype = atype_sequence, \
.u.sequence = \
{ \
- .length_type = __type_integer(u32, __BYTE_ORDER, 10), \
- .elem_type = __type_integer(_type, __BYTE_ORDER, 10), \
+ .length_type = __type_integer(u32, __BYTE_ORDER, 10, none), \
+ .elem_type = __type_integer(_type, __BYTE_ORDER, 10, _encoding), \
}, \
}, \
},
+#undef __dynamic_array
+#define __dynamic_array(_type, _item, _length) \
+ __dynamic_array_enc(_type, _item, _length, none)
+
+#undef __dynamic_array_text
+#define __dynamic_array_text(_type, _item, _length) \
+ __dynamic_array_enc(_type, _item, _length, UTF8)
+
#undef __string
#define __string(_item, _src) \
{ \
__event_len += lib_ring_buffer_align(__event_len, ltt_alignof(_type)); \
__event_len += sizeof(_type) * (_length);
+#undef __array_text
+#define __array_text(_type, _item, _length) \
+ __array(_type, _item, _length)
+
#undef __dynamic_array
#define __dynamic_array(_type, _item, _length) \
__event_len += lib_ring_buffer_align(__event_len, ltt_alignof(u32)); \
__event_len += lib_ring_buffer_align(__event_len, ltt_alignof(_type)); \
__event_len += sizeof(_type) * (_length);
+#undef __dynamic_array_text
+#define __dynamic_array_text(_type, _item, _length) \
+ __dynamic_array(_type, _item, _length)
+
#undef __string
#define __string(_item, _src) \
__event_len += __dynamic_len[__dynamic_len_idx++] = strlen(_src) + 1;
#define __array(_type, _item, _length) \
__event_align = max_t(size_t, __event_align, ltt_alignof(_type));
+#undef __array_text
+#define __array_text(_type, _item, _length) \
+ __array(_type, _item, _length)
+
#undef __dynamic_array
#define __dynamic_array(_type, _item, _length) \
__event_align = max_t(size_t, __event_align, ltt_alignof(u32)); \
__event_align = max_t(size_t, __event_align, ltt_alignof(_type));
+#undef __dynamic_array_text
+#define __dynamic_array_text(_type, _item, _length) \
+ __dynamic_array(_type, _item, _length)
+
#undef __string
#define __string(_item, _src)
#undef __array
#define __array(_type, _item, _length) _type _item;
+#undef __array_text
+#define __array_text(_type, _item, _length) \
+ __array(_type, _item, _length)
+
#undef __dynamic_array
#define __dynamic_array(_type, _item, _length) _type _item;
+#undef __dynamic_array_text
+#define __dynamic_array_text(_type, _item, _length) \
+ __dynamic_array(_type, _item, _length)
+
#undef __string
#define __string(_item, _src) char _item;
goto __assign_##_item; \
__end_field_##_item:
+#undef __array_text
+#define __array_text(_type, _item, _length) \
+ __array(_type, _item, _length)
+
#undef __dynamic_array
#define __dynamic_array(_type, _item, _length) \
goto __assign_##_item##_1; \
goto __assign_##_item##_2; \
__end_field_##_item##_2:
+#undef __dynamic_array_text
+#define __dynamic_array_text(_type, _item, _length) \
+ __dynamic_array(_type, _item, _length)
+
#undef __string
#define __string(_item, _src) \
goto __assign_##_item; \
#define TRACE_EVENT_TYPE___enum(_name, _container_type) \
{ \
.name = #_name, \
- .container_type = __type_integer(_container_type, __BYTE_ORDER, 10), \
+ .container_type = __type_integer(_container_type, __BYTE_ORDER, 10, none), \
.entries = __trace_event_enum_##_name, \
.len = ARRAY_SIZE(__trace_event_enum_##_name), \
},