uint16_t offset;
} __attribute__((packed));
+struct get_symbol {
+ /* Symbol offset. */
+ uint16_t offset;
+} __attribute__((packed));
+
+struct get_index_u16 {
+ uint16_t index;
+} __attribute__((packed));
+
+struct get_index_u64 {
+ uint64_t index;
+} __attribute__((packed));
+
struct literal_numeric {
int64_t v;
} __attribute__((packed));
FILTER_OP_MINUS = 6,
FILTER_OP_RSHIFT = 7,
FILTER_OP_LSHIFT = 8,
- FILTER_OP_BIN_AND = 9,
- FILTER_OP_BIN_OR = 10,
- FILTER_OP_BIN_XOR = 11,
+ FILTER_OP_BIT_AND = 9,
+ FILTER_OP_BIT_OR = 10,
+ FILTER_OP_BIT_XOR = 11,
/* binary comparators */
FILTER_OP_EQ = 12,
FILTER_OP_EQ_STAR_GLOB_STRING = 77,
FILTER_OP_NE_STAR_GLOB_STRING = 78,
+ /*
+ * Instructions for recursive traversal through composed types.
+ */
+ FILTER_OP_GET_CONTEXT_ROOT = 79,
+ FILTER_OP_GET_APP_CONTEXT_ROOT = 80,
+ FILTER_OP_GET_PAYLOAD_ROOT = 81,
+
+ FILTER_OP_GET_SYMBOL = 82,
+ FILTER_OP_GET_SYMBOL_FIELD = 83,
+ FILTER_OP_GET_INDEX_U16 = 84,
+ FILTER_OP_GET_INDEX_U64 = 85,
+
+ FILTER_OP_LOAD_FIELD = 86,
+ FILTER_OP_LOAD_FIELD_S8 = 87,
+ FILTER_OP_LOAD_FIELD_S16 = 88,
+ FILTER_OP_LOAD_FIELD_S32 = 89,
+ FILTER_OP_LOAD_FIELD_S64 = 90,
+ FILTER_OP_LOAD_FIELD_U8 = 91,
+ FILTER_OP_LOAD_FIELD_U16 = 92,
+ FILTER_OP_LOAD_FIELD_U32 = 93,
+ FILTER_OP_LOAD_FIELD_U64 = 94,
+ FILTER_OP_LOAD_FIELD_STRING = 95,
+ FILTER_OP_LOAD_FIELD_SEQUENCE = 96,
+ FILTER_OP_LOAD_FIELD_DOUBLE = 97,
+
NR_FILTER_OPS,
};
LTTNG_TRACEPOINT_EVENT(lttng_test_filter_event,
TP_PROTO(int anint, int netint, long *values,
char *text, size_t textlen,
- char *etext),
- TP_ARGS(anint, netint, values, text, textlen, etext),
+ char *etext, uint32_t * net_values),
+ TP_ARGS(anint, netint, values, text, textlen, etext, net_values),
TP_FIELDS(
ctf_integer(int, intfield, anint)
ctf_integer_hex(int, intfield2, anint)
ctf_integer_network_hex(int, netintfieldhex, netint)
ctf_array(long, arrfield1, values, 3)
ctf_array_text(char, arrfield2, text, 10)
+ ctf_array_network(uint32_t, arrfield3, net_values, 3)
ctf_sequence(char, seqfield1, text, size_t, textlen)
ctf_sequence_text(char, seqfield2, text, size_t, textlen)
+ ctf_sequence_network(uint32_t, seqfield3, net_values, size_t, 3)
+ ctf_sequence(long, seqfield4, values, size_t, 3)
ctf_string(stringfield, text)
ctf_string(stringfield2, etext)
ctf_sequence_bitfield(long, bitfield_seq, values, uint8_t, 3)
field_align = type->u.basic.integer.alignment;
break;
case atype_array:
+ case atype_array_bitfield:
{
struct lttng_basic_type *btype;
case atype_array:
case atype_sequence:
+ case atype_array_bitfield:
+ case atype_sequence_bitfield:
case atype_struct:
case atype_array_compound:
case atype_sequence_compound:
break;
}
case atype_sequence:
+ case atype_sequence_bitfield:
{
struct lttng_basic_type *btype;
case atype_string:
case atype_array:
case atype_sequence:
+ case atype_array_bitfield:
+ case atype_sequence_bitfield:
case atype_struct:
case atype_array_compound:
case atype_sequence_compound:
case atype_array:
case atype_sequence:
+ case atype_array_bitfield:
+ case atype_sequence_bitfield:
case atype_struct:
case atype_array_compound:
case atype_sequence_compound:
ret = _lttng_enum_statedump(session, field, nesting);
break;
case atype_array:
+ case atype_array_bitfield:
{
const struct lttng_basic_type *elem_type;
break;
}
case atype_sequence:
+ case atype_sequence_bitfield:
{
const struct lttng_basic_type *elem_type;
const struct lttng_basic_type *length_type;
atype_array_compound, /* Array of compound types. */
atype_sequence_compound, /* Sequence of compound types. */
atype_variant,
+ atype_array_bitfield,
+ atype_sequence_bitfield,
NR_ABSTRACT_TYPES,
};
const char *filter_stack_data);
int link_failed;
struct list_head node; /* list of bytecode runtime in event */
+ struct lttng_event *event;
};
/*
#include <linux/uaccess.h>
#include <wrapper/frame.h>
#include <wrapper/types.h>
+#include <linux/swab.h>
#include <lttng-filter.h>
#include <lttng-string-utils.h>
*/
#define START_OP \
- start_pc = &bytecode->data[0]; \
+ start_pc = &bytecode->code[0]; \
pc = next_pc = start_pc; \
if (unlikely(pc - start_pc >= bytecode->len)) \
goto end; \
#endif
+static int context_get_index(struct lttng_probe_ctx *lttng_probe_ctx,
+ struct load_ptr *ptr,
+ uint32_t idx)
+{
+
+ struct lttng_ctx_field *ctx_field;
+ struct lttng_event_field *field;
+ union lttng_ctx_value v;
+
+ ctx_field = <tng_static_ctx->fields[idx];
+ field = &ctx_field->event_field;
+ ptr->type = LOAD_OBJECT;
+ /* field is only used for types nested within variants. */
+ ptr->field = NULL;
+
+ switch (field->type.atype) {
+ case atype_integer:
+ ctx_field->get_value(ctx_field, lttng_probe_ctx, &v);
+ if (field->type.u.basic.integer.signedness) {
+ ptr->object_type = OBJECT_TYPE_S64;
+ ptr->u.s64 = v.s64;
+ ptr->ptr = &ptr->u.s64;
+ } else {
+ ptr->object_type = OBJECT_TYPE_U64;
+ ptr->u.u64 = v.s64; /* Cast. */
+ ptr->ptr = &ptr->u.u64;
+ }
+ break;
+ case atype_enum:
+ {
+ const struct lttng_integer_type *itype =
+ &field->type.u.basic.enumeration.container_type;
+
+ ctx_field->get_value(ctx_field, lttng_probe_ctx, &v);
+ if (itype->signedness) {
+ ptr->object_type = OBJECT_TYPE_S64;
+ ptr->u.s64 = v.s64;
+ ptr->ptr = &ptr->u.s64;
+ } else {
+ ptr->object_type = OBJECT_TYPE_U64;
+ ptr->u.u64 = v.s64; /* Cast. */
+ ptr->ptr = &ptr->u.u64;
+ }
+ break;
+ }
+ case atype_array:
+ if (field->type.u.array.elem_type.atype != atype_integer) {
+ printk(KERN_WARNING "Array nesting only supports integer types.\n");
+ return -EINVAL;
+ }
+ if (field->type.u.array.elem_type.u.basic.integer.encoding == lttng_encode_none) {
+ printk(KERN_WARNING "Only string arrays are supported for contexts.\n");
+ return -EINVAL;
+ }
+ ptr->object_type = OBJECT_TYPE_STRING;
+ ctx_field->get_value(ctx_field, lttng_probe_ctx, &v);
+ ptr->ptr = v.str;
+ break;
+ case atype_sequence:
+ if (field->type.u.sequence.elem_type.atype != atype_integer) {
+ printk(KERN_WARNING "Sequence nesting only supports integer types.\n");
+ return -EINVAL;
+ }
+ if (field->type.u.sequence.elem_type.u.basic.integer.encoding == lttng_encode_none) {
+ printk(KERN_WARNING "Only string sequences are supported for contexts.\n");
+ return -EINVAL;
+ }
+ ptr->object_type = OBJECT_TYPE_STRING;
+ ctx_field->get_value(ctx_field, lttng_probe_ctx, &v);
+ ptr->ptr = v.str;
+ break;
+ case atype_array_bitfield:
+ printk(KERN_WARNING "Bitfield array type is not supported.\n");
+ return -EINVAL;
+ case atype_sequence_bitfield:
+ printk(KERN_WARNING "Bitfield sequence type is not supported.\n");
+ return -EINVAL;
+ case atype_string:
+ ptr->object_type = OBJECT_TYPE_STRING;
+ ctx_field->get_value(ctx_field, lttng_probe_ctx, &v);
+ ptr->ptr = v.str;
+ break;
+ case atype_struct:
+ printk(KERN_WARNING "Structure type cannot be loaded.\n");
+ return -EINVAL;
+ default:
+ printk(KERN_WARNING "Unknown type: %d", (int) field->type.atype);
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int dynamic_get_index(struct lttng_probe_ctx *lttng_probe_ctx,
+ struct bytecode_runtime *runtime,
+ uint64_t index, struct estack_entry *stack_top)
+{
+ int ret;
+ const struct filter_get_index_data *gid;
+
+ /*
+ * Types nested within variants need to perform dynamic lookup
+ * based on the field descriptions. LTTng-UST does not implement
+ * variants for now.
+ */
+ if (stack_top->u.ptr.field)
+ return -EINVAL;
+ gid = (const struct filter_get_index_data *) &runtime->data[index];
+ switch (stack_top->u.ptr.type) {
+ case LOAD_OBJECT:
+ switch (stack_top->u.ptr.object_type) {
+ case OBJECT_TYPE_ARRAY:
+ {
+ const char *ptr;
+
+ WARN_ON_ONCE(gid->offset >= gid->array_len);
+ /* Skip count (unsigned long) */
+ ptr = *(const char **) (stack_top->u.ptr.ptr + sizeof(unsigned long));
+ ptr = ptr + gid->offset;
+ stack_top->u.ptr.ptr = ptr;
+ stack_top->u.ptr.object_type = gid->elem.type;
+ stack_top->u.ptr.rev_bo = gid->elem.rev_bo;
+ /* field is only used for types nested within variants. */
+ stack_top->u.ptr.field = NULL;
+ break;
+ }
+ case OBJECT_TYPE_SEQUENCE:
+ {
+ const char *ptr;
+ size_t ptr_seq_len;
+
+ ptr = *(const char **) (stack_top->u.ptr.ptr + sizeof(unsigned long));
+ ptr_seq_len = *(unsigned long *) stack_top->u.ptr.ptr;
+ if (gid->offset >= gid->elem.len * ptr_seq_len) {
+ ret = -EINVAL;
+ goto end;
+ }
+ ptr = ptr + gid->offset;
+ stack_top->u.ptr.ptr = ptr;
+ stack_top->u.ptr.object_type = gid->elem.type;
+ stack_top->u.ptr.rev_bo = gid->elem.rev_bo;
+ /* field is only used for types nested within variants. */
+ stack_top->u.ptr.field = NULL;
+ break;
+ }
+ case OBJECT_TYPE_STRUCT:
+ printk(KERN_WARNING "Nested structures are not supported yet.\n");
+ ret = -EINVAL;
+ goto end;
+ case OBJECT_TYPE_VARIANT:
+ default:
+ printk(KERN_WARNING "Unexpected get index type %d",
+ (int) stack_top->u.ptr.object_type);
+ ret = -EINVAL;
+ goto end;
+ }
+ break;
+ case LOAD_ROOT_CONTEXT:
+ case LOAD_ROOT_APP_CONTEXT: /* Fall-through */
+ {
+ ret = context_get_index(lttng_probe_ctx,
+ &stack_top->u.ptr,
+ gid->ctx_index);
+ if (ret) {
+ goto end;
+ }
+ break;
+ }
+ case LOAD_ROOT_PAYLOAD:
+ stack_top->u.ptr.ptr += gid->offset;
+ if (gid->elem.type == OBJECT_TYPE_STRING)
+ stack_top->u.ptr.ptr = *(const char * const *) stack_top->u.ptr.ptr;
+ stack_top->u.ptr.object_type = gid->elem.type;
+ stack_top->u.ptr.type = LOAD_OBJECT;
+ /* field is only used for types nested within variants. */
+ stack_top->u.ptr.field = NULL;
+ break;
+ }
+ return 0;
+
+end:
+ return ret;
+}
+
+static int dynamic_load_field(struct estack_entry *stack_top)
+{
+ int ret;
+
+ switch (stack_top->u.ptr.type) {
+ case LOAD_OBJECT:
+ break;
+ case LOAD_ROOT_CONTEXT:
+ case LOAD_ROOT_APP_CONTEXT:
+ case LOAD_ROOT_PAYLOAD:
+ default:
+ dbg_printk("Filter warning: cannot load root, missing field name.\n");
+ ret = -EINVAL;
+ goto end;
+ }
+ switch (stack_top->u.ptr.object_type) {
+ case OBJECT_TYPE_S8:
+ dbg_printk("op load field s8\n");
+ stack_top->u.v = *(int8_t *) stack_top->u.ptr.ptr;
+ break;
+ case OBJECT_TYPE_S16:
+ {
+ int16_t tmp;
+
+ dbg_printk("op load field s16\n");
+ tmp = *(int16_t *) stack_top->u.ptr.ptr;
+ if (stack_top->u.ptr.rev_bo)
+ __swab16s(&tmp);
+ stack_top->u.v = tmp;
+ break;
+ }
+ case OBJECT_TYPE_S32:
+ {
+ int32_t tmp;
+
+ dbg_printk("op load field s32\n");
+ tmp = *(int32_t *) stack_top->u.ptr.ptr;
+ if (stack_top->u.ptr.rev_bo)
+ __swab32s(&tmp);
+ stack_top->u.v = tmp;
+ break;
+ }
+ case OBJECT_TYPE_S64:
+ {
+ int64_t tmp;
+
+ dbg_printk("op load field s64\n");
+ tmp = *(int64_t *) stack_top->u.ptr.ptr;
+ if (stack_top->u.ptr.rev_bo)
+ __swab64s(&tmp);
+ stack_top->u.v = tmp;
+ break;
+ }
+ case OBJECT_TYPE_U8:
+ dbg_printk("op load field u8\n");
+ stack_top->u.v = *(uint8_t *) stack_top->u.ptr.ptr;
+ break;
+ case OBJECT_TYPE_U16:
+ {
+ uint16_t tmp;
+
+ dbg_printk("op load field s16\n");
+ tmp = *(uint16_t *) stack_top->u.ptr.ptr;
+ if (stack_top->u.ptr.rev_bo)
+ __swab16s(&tmp);
+ stack_top->u.v = tmp;
+ break;
+ }
+ case OBJECT_TYPE_U32:
+ {
+ uint32_t tmp;
+
+ dbg_printk("op load field u32\n");
+ tmp = *(uint32_t *) stack_top->u.ptr.ptr;
+ if (stack_top->u.ptr.rev_bo)
+ __swab32s(&tmp);
+ stack_top->u.v = tmp;
+ break;
+ }
+ case OBJECT_TYPE_U64:
+ {
+ uint64_t tmp;
+
+ dbg_printk("op load field u64\n");
+ tmp = *(uint64_t *) stack_top->u.ptr.ptr;
+ if (stack_top->u.ptr.rev_bo)
+ __swab64s(&tmp);
+ stack_top->u.v = tmp;
+ break;
+ }
+ case OBJECT_TYPE_STRING:
+ {
+ const char *str;
+
+ dbg_printk("op load field string\n");
+ str = (const char *) stack_top->u.ptr.ptr;
+ stack_top->u.s.str = str;
+ if (unlikely(!stack_top->u.s.str)) {
+ dbg_printk("Filter warning: loading a NULL string.\n");
+ ret = -EINVAL;
+ goto end;
+ }
+ stack_top->u.s.seq_len = SIZE_MAX;
+ stack_top->u.s.literal_type =
+ ESTACK_STRING_LITERAL_TYPE_NONE;
+ break;
+ }
+ case OBJECT_TYPE_STRING_SEQUENCE:
+ {
+ const char *ptr;
+
+ dbg_printk("op load field string sequence\n");
+ ptr = stack_top->u.ptr.ptr;
+ stack_top->u.s.seq_len = *(unsigned long *) ptr;
+ stack_top->u.s.str = *(const char **) (ptr + sizeof(unsigned long));
+ if (unlikely(!stack_top->u.s.str)) {
+ dbg_printk("Filter warning: loading a NULL sequence.\n");
+ ret = -EINVAL;
+ goto end;
+ }
+ stack_top->u.s.literal_type =
+ ESTACK_STRING_LITERAL_TYPE_NONE;
+ break;
+ }
+ case OBJECT_TYPE_DYNAMIC:
+ /*
+ * Dynamic types in context are looked up
+ * by context get index.
+ */
+ ret = -EINVAL;
+ goto end;
+ case OBJECT_TYPE_DOUBLE:
+ ret = -EINVAL;
+ goto end;
+ case OBJECT_TYPE_SEQUENCE:
+ case OBJECT_TYPE_ARRAY:
+ case OBJECT_TYPE_STRUCT:
+ case OBJECT_TYPE_VARIANT:
+ printk(KERN_WARNING "Sequences, arrays, struct and variant cannot be loaded (nested types).\n");
+ ret = -EINVAL;
+ goto end;
+ }
+ return 0;
+
+end:
+ return ret;
+}
+
/*
* Return 0 (discard), or raise the 0x1 flag (log event).
* Currently, other flags are kept for future extensions and have no
[ FILTER_OP_MINUS ] = &&LABEL_FILTER_OP_MINUS,
[ FILTER_OP_RSHIFT ] = &&LABEL_FILTER_OP_RSHIFT,
[ FILTER_OP_LSHIFT ] = &&LABEL_FILTER_OP_LSHIFT,
- [ FILTER_OP_BIN_AND ] = &&LABEL_FILTER_OP_BIN_AND,
- [ FILTER_OP_BIN_OR ] = &&LABEL_FILTER_OP_BIN_OR,
- [ FILTER_OP_BIN_XOR ] = &&LABEL_FILTER_OP_BIN_XOR,
+ [ FILTER_OP_BIT_AND ] = &&LABEL_FILTER_OP_BIT_AND,
+ [ FILTER_OP_BIT_OR ] = &&LABEL_FILTER_OP_BIT_OR,
+ [ FILTER_OP_BIT_XOR ] = &&LABEL_FILTER_OP_BIT_XOR,
/* binary comparators */
[ FILTER_OP_EQ ] = &&LABEL_FILTER_OP_EQ,
/* load userspace field ref */
[ FILTER_OP_LOAD_FIELD_REF_USER_STRING ] = &&LABEL_FILTER_OP_LOAD_FIELD_REF_USER_STRING,
[ FILTER_OP_LOAD_FIELD_REF_USER_SEQUENCE ] = &&LABEL_FILTER_OP_LOAD_FIELD_REF_USER_SEQUENCE,
+
+ /* Instructions for recursive traversal through composed types. */
+ [ FILTER_OP_GET_CONTEXT_ROOT ] = &&LABEL_FILTER_OP_GET_CONTEXT_ROOT,
+ [ FILTER_OP_GET_APP_CONTEXT_ROOT ] = &&LABEL_FILTER_OP_GET_APP_CONTEXT_ROOT,
+ [ FILTER_OP_GET_PAYLOAD_ROOT ] = &&LABEL_FILTER_OP_GET_PAYLOAD_ROOT,
+
+ [ FILTER_OP_GET_SYMBOL ] = &&LABEL_FILTER_OP_GET_SYMBOL,
+ [ FILTER_OP_GET_SYMBOL_FIELD ] = &&LABEL_FILTER_OP_GET_SYMBOL_FIELD,
+ [ FILTER_OP_GET_INDEX_U16 ] = &&LABEL_FILTER_OP_GET_INDEX_U16,
+ [ FILTER_OP_GET_INDEX_U64 ] = &&LABEL_FILTER_OP_GET_INDEX_U64,
+
+ [ FILTER_OP_LOAD_FIELD ] = &&LABEL_FILTER_OP_LOAD_FIELD,
+ [ FILTER_OP_LOAD_FIELD_S8 ] = &&LABEL_FILTER_OP_LOAD_FIELD_S8,
+ [ FILTER_OP_LOAD_FIELD_S16 ] = &&LABEL_FILTER_OP_LOAD_FIELD_S16,
+ [ FILTER_OP_LOAD_FIELD_S32 ] = &&LABEL_FILTER_OP_LOAD_FIELD_S32,
+ [ FILTER_OP_LOAD_FIELD_S64 ] = &&LABEL_FILTER_OP_LOAD_FIELD_S64,
+ [ FILTER_OP_LOAD_FIELD_U8 ] = &&LABEL_FILTER_OP_LOAD_FIELD_U8,
+ [ FILTER_OP_LOAD_FIELD_U16 ] = &&LABEL_FILTER_OP_LOAD_FIELD_U16,
+ [ FILTER_OP_LOAD_FIELD_U32 ] = &&LABEL_FILTER_OP_LOAD_FIELD_U32,
+ [ FILTER_OP_LOAD_FIELD_U64 ] = &&LABEL_FILTER_OP_LOAD_FIELD_U64,
+ [ FILTER_OP_LOAD_FIELD_STRING ] = &&LABEL_FILTER_OP_LOAD_FIELD_STRING,
+ [ FILTER_OP_LOAD_FIELD_SEQUENCE ] = &&LABEL_FILTER_OP_LOAD_FIELD_SEQUENCE,
+ [ FILTER_OP_LOAD_FIELD_DOUBLE ] = &&LABEL_FILTER_OP_LOAD_FIELD_DOUBLE,
};
#endif /* #ifndef INTERPRETER_USE_SWITCH */
OP(FILTER_OP_MINUS):
OP(FILTER_OP_RSHIFT):
OP(FILTER_OP_LSHIFT):
- OP(FILTER_OP_BIN_AND):
- OP(FILTER_OP_BIN_OR):
- OP(FILTER_OP_BIN_XOR):
printk(KERN_WARNING "unsupported bytecode op %u\n",
(unsigned int) *(filter_opcode_t *) pc);
ret = -EINVAL;
BUG_ON(1);
PO;
}
+ OP(FILTER_OP_BIT_AND):
+ {
+ int64_t res;
+
+ res = (estack_bx_v & estack_ax_v);
+ estack_pop(stack, top, ax, bx);
+ estack_ax_v = res;
+ next_pc += sizeof(struct binary_op);
+ PO;
+ }
+ OP(FILTER_OP_BIT_OR):
+ {
+ int64_t res;
+
+ res = (estack_bx_v | estack_ax_v);
+ estack_pop(stack, top, ax, bx);
+ estack_ax_v = res;
+ next_pc += sizeof(struct binary_op);
+ PO;
+ }
+ OP(FILTER_OP_BIT_XOR):
+ {
+ int64_t res;
+
+ res = (estack_bx_v ^ estack_ax_v);
+ estack_pop(stack, top, ax, bx);
+ estack_ax_v = res;
+ next_pc += sizeof(struct binary_op);
+ PO;
+ }
/* unary */
OP(FILTER_OP_UNARY_PLUS):
PO;
}
+ OP(FILTER_OP_GET_CONTEXT_ROOT):
+ {
+ dbg_printk("op get context root\n");
+ estack_push(stack, top, ax, bx);
+ estack_ax(stack, top)->u.ptr.type = LOAD_ROOT_CONTEXT;
+ /* "field" only needed for variants. */
+ estack_ax(stack, top)->u.ptr.field = NULL;
+ next_pc += sizeof(struct load_op);
+ PO;
+ }
+
+ OP(FILTER_OP_GET_APP_CONTEXT_ROOT):
+ {
+ BUG_ON(1);
+ PO;
+ }
+
+ OP(FILTER_OP_GET_PAYLOAD_ROOT):
+ {
+ dbg_printk("op get app payload root\n");
+ estack_push(stack, top, ax, bx);
+ estack_ax(stack, top)->u.ptr.type = LOAD_ROOT_PAYLOAD;
+ estack_ax(stack, top)->u.ptr.ptr = filter_stack_data;
+ /* "field" only needed for variants. */
+ estack_ax(stack, top)->u.ptr.field = NULL;
+ next_pc += sizeof(struct load_op);
+ PO;
+ }
+
+ OP(FILTER_OP_GET_SYMBOL):
+ {
+ dbg_printk("op get symbol\n");
+ switch (estack_ax(stack, top)->u.ptr.type) {
+ case LOAD_OBJECT:
+ printk(KERN_WARNING "Nested fields not implemented yet.\n");
+ ret = -EINVAL;
+ goto end;
+ case LOAD_ROOT_CONTEXT:
+ case LOAD_ROOT_APP_CONTEXT:
+ case LOAD_ROOT_PAYLOAD:
+ /*
+ * symbol lookup is performed by
+ * specialization.
+ */
+ ret = -EINVAL;
+ goto end;
+ }
+ next_pc += sizeof(struct load_op) + sizeof(struct get_symbol);
+ PO;
+ }
+
+ OP(FILTER_OP_GET_SYMBOL_FIELD):
+ {
+ /*
+ * Used for first variant encountered in a
+ * traversal. Variants are not implemented yet.
+ */
+ ret = -EINVAL;
+ goto end;
+ }
+
+ OP(FILTER_OP_GET_INDEX_U16):
+ {
+ struct load_op *insn = (struct load_op *) pc;
+ struct get_index_u16 *index = (struct get_index_u16 *) insn->data;
+
+ dbg_printk("op get index u16\n");
+ ret = dynamic_get_index(lttng_probe_ctx, bytecode, index->index, estack_ax(stack, top));
+ if (ret)
+ goto end;
+ estack_ax_v = estack_ax(stack, top)->u.v;
+ next_pc += sizeof(struct load_op) + sizeof(struct get_index_u16);
+ PO;
+ }
+
+ OP(FILTER_OP_GET_INDEX_U64):
+ {
+ struct load_op *insn = (struct load_op *) pc;
+ struct get_index_u64 *index = (struct get_index_u64 *) insn->data;
+
+ dbg_printk("op get index u64\n");
+ ret = dynamic_get_index(lttng_probe_ctx, bytecode, index->index, estack_ax(stack, top));
+ if (ret)
+ goto end;
+ estack_ax_v = estack_ax(stack, top)->u.v;
+ next_pc += sizeof(struct load_op) + sizeof(struct get_index_u64);
+ PO;
+ }
+
+ OP(FILTER_OP_LOAD_FIELD):
+ {
+ dbg_printk("op load field\n");
+ ret = dynamic_load_field(estack_ax(stack, top));
+ if (ret)
+ goto end;
+ estack_ax_v = estack_ax(stack, top)->u.v;
+ next_pc += sizeof(struct load_op);
+ PO;
+ }
+
+ OP(FILTER_OP_LOAD_FIELD_S8):
+ {
+ dbg_printk("op load field s8\n");
+
+ estack_ax_v = *(int8_t *) estack_ax(stack, top)->u.ptr.ptr;
+ next_pc += sizeof(struct load_op);
+ PO;
+ }
+ OP(FILTER_OP_LOAD_FIELD_S16):
+ {
+ dbg_printk("op load field s16\n");
+
+ estack_ax_v = *(int16_t *) estack_ax(stack, top)->u.ptr.ptr;
+ next_pc += sizeof(struct load_op);
+ PO;
+ }
+ OP(FILTER_OP_LOAD_FIELD_S32):
+ {
+ dbg_printk("op load field s32\n");
+
+ estack_ax_v = *(int32_t *) estack_ax(stack, top)->u.ptr.ptr;
+ next_pc += sizeof(struct load_op);
+ PO;
+ }
+ OP(FILTER_OP_LOAD_FIELD_S64):
+ {
+ dbg_printk("op load field s64\n");
+
+ estack_ax_v = *(int64_t *) estack_ax(stack, top)->u.ptr.ptr;
+ next_pc += sizeof(struct load_op);
+ PO;
+ }
+ OP(FILTER_OP_LOAD_FIELD_U8):
+ {
+ dbg_printk("op load field u8\n");
+
+ estack_ax_v = *(uint8_t *) estack_ax(stack, top)->u.ptr.ptr;
+ next_pc += sizeof(struct load_op);
+ PO;
+ }
+ OP(FILTER_OP_LOAD_FIELD_U16):
+ {
+ dbg_printk("op load field u16\n");
+
+ estack_ax_v = *(uint16_t *) estack_ax(stack, top)->u.ptr.ptr;
+ next_pc += sizeof(struct load_op);
+ PO;
+ }
+ OP(FILTER_OP_LOAD_FIELD_U32):
+ {
+ dbg_printk("op load field u32\n");
+
+ estack_ax_v = *(uint32_t *) estack_ax(stack, top)->u.ptr.ptr;
+ next_pc += sizeof(struct load_op);
+ PO;
+ }
+ OP(FILTER_OP_LOAD_FIELD_U64):
+ {
+ dbg_printk("op load field u64\n");
+
+ estack_ax_v = *(uint64_t *) estack_ax(stack, top)->u.ptr.ptr;
+ next_pc += sizeof(struct load_op);
+ PO;
+ }
+ OP(FILTER_OP_LOAD_FIELD_DOUBLE):
+ {
+ ret = -EINVAL;
+ goto end;
+ }
+
+ OP(FILTER_OP_LOAD_FIELD_STRING):
+ {
+ const char *str;
+
+ dbg_printk("op load field string\n");
+ str = (const char *) estack_ax(stack, top)->u.ptr.ptr;
+ estack_ax(stack, top)->u.s.str = str;
+ if (unlikely(!estack_ax(stack, top)->u.s.str)) {
+ dbg_printk("Filter warning: loading a NULL string.\n");
+ ret = -EINVAL;
+ goto end;
+ }
+ estack_ax(stack, top)->u.s.seq_len = SIZE_MAX;
+ estack_ax(stack, top)->u.s.literal_type =
+ ESTACK_STRING_LITERAL_TYPE_NONE;
+ next_pc += sizeof(struct load_op);
+ PO;
+ }
+
+ OP(FILTER_OP_LOAD_FIELD_SEQUENCE):
+ {
+ const char *ptr;
+
+ dbg_printk("op load field string sequence\n");
+ ptr = estack_ax(stack, top)->u.ptr.ptr;
+ estack_ax(stack, top)->u.s.seq_len = *(unsigned long *) ptr;
+ estack_ax(stack, top)->u.s.str = *(const char **) (ptr + sizeof(unsigned long));
+ if (unlikely(!estack_ax(stack, top)->u.s.str)) {
+ dbg_printk("Filter warning: loading a NULL sequence.\n");
+ ret = -EINVAL;
+ goto end;
+ }
+ estack_ax(stack, top)->u.s.literal_type =
+ ESTACK_STRING_LITERAL_TYPE_NONE;
+ next_pc += sizeof(struct load_op);
+ PO;
+ }
+
END_OP
end:
/* return 0 (discard) on error */
* SOFTWARE.
*/
+#include <linux/slab.h>
#include <lttng-filter.h>
+#include "lib/align.h"
-int lttng_filter_specialize_bytecode(struct bytecode_runtime *bytecode)
+static ssize_t bytecode_reserve_data(struct bytecode_runtime *runtime,
+ size_t align, size_t len)
+{
+ ssize_t ret;
+ size_t padding = offset_align(runtime->data_len, align);
+ size_t new_len = runtime->data_len + padding + len;
+ size_t new_alloc_len = new_len;
+ size_t old_alloc_len = runtime->data_alloc_len;
+
+ if (new_len > FILTER_MAX_DATA_LEN)
+ return -EINVAL;
+
+ if (new_alloc_len > old_alloc_len) {
+ char *newptr;
+
+ new_alloc_len =
+ max_t(size_t, 1U << get_count_order(new_alloc_len), old_alloc_len << 1);
+ newptr = krealloc(runtime->data, new_alloc_len, GFP_KERNEL);
+ if (!newptr)
+ return -ENOMEM;
+ runtime->data = newptr;
+ /* We zero directly the memory from start of allocation. */
+ memset(&runtime->data[old_alloc_len], 0, new_alloc_len - old_alloc_len);
+ runtime->data_alloc_len = new_alloc_len;
+ }
+ runtime->data_len += padding;
+ ret = runtime->data_len;
+ runtime->data_len += len;
+ return ret;
+}
+
+static ssize_t bytecode_push_data(struct bytecode_runtime *runtime,
+ const void *p, size_t align, size_t len)
+{
+ ssize_t offset;
+
+ offset = bytecode_reserve_data(runtime, align, len);
+ if (offset < 0)
+ return -ENOMEM;
+ memcpy(&runtime->data[offset], p, len);
+ return offset;
+}
+
+static int specialize_load_field(struct vstack_entry *stack_top,
+ struct load_op *insn)
+{
+ int ret;
+
+ switch (stack_top->load.type) {
+ case LOAD_OBJECT:
+ break;
+ case LOAD_ROOT_CONTEXT:
+ case LOAD_ROOT_APP_CONTEXT:
+ case LOAD_ROOT_PAYLOAD:
+ default:
+ dbg_printk("Filter warning: cannot load root, missing field name.\n");
+ ret = -EINVAL;
+ goto end;
+ }
+ switch (stack_top->load.object_type) {
+ case OBJECT_TYPE_S8:
+ dbg_printk("op load field s8\n");
+ stack_top->type = REG_S64;
+ if (!stack_top->load.rev_bo)
+ insn->op = FILTER_OP_LOAD_FIELD_S8;
+ break;
+ case OBJECT_TYPE_S16:
+ dbg_printk("op load field s16\n");
+ stack_top->type = REG_S64;
+ if (!stack_top->load.rev_bo)
+ insn->op = FILTER_OP_LOAD_FIELD_S16;
+ break;
+ case OBJECT_TYPE_S32:
+ dbg_printk("op load field s32\n");
+ stack_top->type = REG_S64;
+ if (!stack_top->load.rev_bo)
+ insn->op = FILTER_OP_LOAD_FIELD_S32;
+ break;
+ case OBJECT_TYPE_S64:
+ dbg_printk("op load field s64\n");
+ stack_top->type = REG_S64;
+ if (!stack_top->load.rev_bo)
+ insn->op = FILTER_OP_LOAD_FIELD_S64;
+ break;
+ case OBJECT_TYPE_U8:
+ dbg_printk("op load field u8\n");
+ stack_top->type = REG_S64;
+ insn->op = FILTER_OP_LOAD_FIELD_U8;
+ break;
+ case OBJECT_TYPE_U16:
+ dbg_printk("op load field u16\n");
+ stack_top->type = REG_S64;
+ if (!stack_top->load.rev_bo)
+ insn->op = FILTER_OP_LOAD_FIELD_U16;
+ break;
+ case OBJECT_TYPE_U32:
+ dbg_printk("op load field u32\n");
+ stack_top->type = REG_S64;
+ if (!stack_top->load.rev_bo)
+ insn->op = FILTER_OP_LOAD_FIELD_U32;
+ break;
+ case OBJECT_TYPE_U64:
+ dbg_printk("op load field u64\n");
+ stack_top->type = REG_S64;
+ if (!stack_top->load.rev_bo)
+ insn->op = FILTER_OP_LOAD_FIELD_U64;
+ break;
+ case OBJECT_TYPE_DOUBLE:
+ printk(KERN_WARNING "Double type unsupported\n\n");
+ ret = -EINVAL;
+ goto end;
+ case OBJECT_TYPE_STRING:
+ dbg_printk("op load field string\n");
+ stack_top->type = REG_STRING;
+ insn->op = FILTER_OP_LOAD_FIELD_STRING;
+ break;
+ case OBJECT_TYPE_STRING_SEQUENCE:
+ dbg_printk("op load field string sequence\n");
+ stack_top->type = REG_STRING;
+ insn->op = FILTER_OP_LOAD_FIELD_SEQUENCE;
+ break;
+ case OBJECT_TYPE_DYNAMIC:
+ ret = -EINVAL;
+ goto end;
+ case OBJECT_TYPE_SEQUENCE:
+ case OBJECT_TYPE_ARRAY:
+ case OBJECT_TYPE_STRUCT:
+ case OBJECT_TYPE_VARIANT:
+ printk(KERN_WARNING "Sequences, arrays, struct and variant cannot be loaded (nested types).\n");
+ ret = -EINVAL;
+ goto end;
+ }
+ return 0;
+
+end:
+ return ret;
+}
+
+static int specialize_get_index_object_type(enum object_type *otype,
+ int signedness, uint32_t elem_len)
+{
+ switch (elem_len) {
+ case 8:
+ if (signedness)
+ *otype = OBJECT_TYPE_S8;
+ else
+ *otype = OBJECT_TYPE_U8;
+ break;
+ case 16:
+ if (signedness)
+ *otype = OBJECT_TYPE_S16;
+ else
+ *otype = OBJECT_TYPE_U16;
+ break;
+ case 32:
+ if (signedness)
+ *otype = OBJECT_TYPE_S32;
+ else
+ *otype = OBJECT_TYPE_U32;
+ break;
+ case 64:
+ if (signedness)
+ *otype = OBJECT_TYPE_S64;
+ else
+ *otype = OBJECT_TYPE_U64;
+ break;
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int specialize_get_index(struct bytecode_runtime *runtime,
+ struct load_op *insn, uint64_t index,
+ struct vstack_entry *stack_top,
+ int idx_len)
+{
+ int ret;
+ struct filter_get_index_data gid;
+ ssize_t data_offset;
+
+ memset(&gid, 0, sizeof(gid));
+ switch (stack_top->load.type) {
+ case LOAD_OBJECT:
+ switch (stack_top->load.object_type) {
+ case OBJECT_TYPE_ARRAY:
+ {
+ const struct lttng_event_field *field;
+ uint32_t elem_len, num_elems;
+ int signedness;
+
+ field = stack_top->load.field;
+ elem_len = field->type.u.array.elem_type.u.basic.integer.size;
+ signedness = field->type.u.array.elem_type.u.basic.integer.signedness;
+ num_elems = field->type.u.array.length;
+ if (index >= num_elems) {
+ ret = -EINVAL;
+ goto end;
+ }
+ ret = specialize_get_index_object_type(&stack_top->load.object_type,
+ signedness, elem_len);
+ if (ret)
+ goto end;
+ gid.offset = index * (elem_len / CHAR_BIT);
+ gid.array_len = num_elems * (elem_len / CHAR_BIT);
+ gid.elem.type = stack_top->load.object_type;
+ gid.elem.len = elem_len;
+ if (field->type.u.array.elem_type.u.basic.integer.reverse_byte_order)
+ gid.elem.rev_bo = true;
+ stack_top->load.rev_bo = gid.elem.rev_bo;
+ break;
+ }
+ case OBJECT_TYPE_SEQUENCE:
+ {
+ const struct lttng_event_field *field;
+ uint32_t elem_len;
+ int signedness;
+
+ field = stack_top->load.field;
+ elem_len = field->type.u.sequence.elem_type.u.basic.integer.size;
+ signedness = field->type.u.sequence.elem_type.u.basic.integer.signedness;
+ ret = specialize_get_index_object_type(&stack_top->load.object_type,
+ signedness, elem_len);
+ if (ret)
+ goto end;
+ gid.offset = index * (elem_len / CHAR_BIT);
+ gid.elem.type = stack_top->load.object_type;
+ gid.elem.len = elem_len;
+ if (field->type.u.sequence.elem_type.u.basic.integer.reverse_byte_order)
+ gid.elem.rev_bo = true;
+ stack_top->load.rev_bo = gid.elem.rev_bo;
+ break;
+ }
+ case OBJECT_TYPE_STRUCT:
+ /* Only generated by the specialize phase. */
+ case OBJECT_TYPE_VARIANT: /* Fall-through */
+ default:
+ printk(KERN_WARNING "Unexpected get index type %d",
+ (int) stack_top->load.object_type);
+ ret = -EINVAL;
+ goto end;
+ }
+ break;
+ case LOAD_ROOT_CONTEXT:
+ case LOAD_ROOT_APP_CONTEXT:
+ case LOAD_ROOT_PAYLOAD:
+ printk(KERN_WARNING "Index lookup for root field not implemented yet.\n");
+ ret = -EINVAL;
+ goto end;
+ }
+ data_offset = bytecode_push_data(runtime, &gid,
+ __alignof__(gid), sizeof(gid));
+ if (data_offset < 0) {
+ ret = -EINVAL;
+ goto end;
+ }
+ switch (idx_len) {
+ case 2:
+ ((struct get_index_u16 *) insn->data)->index = data_offset;
+ break;
+ case 8:
+ ((struct get_index_u64 *) insn->data)->index = data_offset;
+ break;
+ default:
+ ret = -EINVAL;
+ goto end;
+ }
+
+ return 0;
+
+end:
+ return ret;
+}
+
+static int specialize_context_lookup_name(struct bytecode_runtime *bytecode,
+ struct load_op *insn)
+{
+ uint16_t offset;
+ const char *name;
+
+ offset = ((struct get_symbol *) insn->data)->offset;
+ name = bytecode->p.bc->bc.data + bytecode->p.bc->bc.reloc_offset + offset;
+ return lttng_get_context_index(lttng_static_ctx, name);
+}
+
+static int specialize_load_object(const struct lttng_event_field *field,
+ struct vstack_load *load, bool is_context)
+{
+ load->type = LOAD_OBJECT;
+ /*
+ * LTTng-UST layout all integer fields as s64 on the stack for the filter.
+ */
+ switch (field->type.atype) {
+ case atype_integer:
+ if (field->type.u.basic.integer.signedness)
+ load->object_type = OBJECT_TYPE_S64;
+ else
+ load->object_type = OBJECT_TYPE_U64;
+ load->rev_bo = false;
+ break;
+ case atype_enum:
+ {
+ const struct lttng_integer_type *itype =
+ &field->type.u.basic.enumeration.container_type;
+
+ if (itype->signedness)
+ load->object_type = OBJECT_TYPE_S64;
+ else
+ load->object_type = OBJECT_TYPE_U64;
+ load->rev_bo = false;
+ break;
+ }
+ case atype_array:
+ if (field->type.u.array.elem_type.atype != atype_integer) {
+ printk(KERN_WARNING "Array nesting only supports integer types.\n");
+ return -EINVAL;
+ }
+ if (is_context) {
+ load->object_type = OBJECT_TYPE_STRING;
+ } else {
+ if (field->type.u.array.elem_type.u.basic.integer.encoding == lttng_encode_none) {
+ load->object_type = OBJECT_TYPE_ARRAY;
+ load->field = field;
+ } else {
+ load->object_type = OBJECT_TYPE_STRING_SEQUENCE;
+ }
+ }
+ break;
+ case atype_sequence:
+ if (field->type.u.sequence.elem_type.atype != atype_integer) {
+ printk(KERN_WARNING "Sequence nesting only supports integer types.\n");
+ return -EINVAL;
+ }
+ if (is_context) {
+ load->object_type = OBJECT_TYPE_STRING;
+ } else {
+ if (field->type.u.sequence.elem_type.u.basic.integer.encoding == lttng_encode_none) {
+ load->object_type = OBJECT_TYPE_SEQUENCE;
+ load->field = field;
+ } else {
+ load->object_type = OBJECT_TYPE_STRING_SEQUENCE;
+ }
+ }
+ break;
+ case atype_array_bitfield:
+ printk(KERN_WARNING "Bitfield array type is not supported.\n");
+ return -EINVAL;
+ case atype_sequence_bitfield:
+ printk(KERN_WARNING "Bitfield sequence type is not supported.\n");
+ return -EINVAL;
+ case atype_string:
+ load->object_type = OBJECT_TYPE_STRING;
+ break;
+ case atype_struct:
+ printk(KERN_WARNING "Structure type cannot be loaded.\n");
+ return -EINVAL;
+ default:
+ printk(KERN_WARNING "Unknown type: %d", (int) field->type.atype);
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int specialize_context_lookup(struct bytecode_runtime *runtime,
+ struct load_op *insn,
+ struct vstack_load *load)
+{
+ int idx, ret;
+ struct lttng_ctx_field *ctx_field;
+ struct lttng_event_field *field;
+ struct filter_get_index_data gid;
+ ssize_t data_offset;
+
+ idx = specialize_context_lookup_name(runtime, insn);
+ if (idx < 0) {
+ return -ENOENT;
+ }
+ ctx_field = <tng_static_ctx->fields[idx];
+ field = &ctx_field->event_field;
+ ret = specialize_load_object(field, load, true);
+ if (ret)
+ return ret;
+ /* Specialize each get_symbol into a get_index. */
+ insn->op = FILTER_OP_GET_INDEX_U16;
+ memset(&gid, 0, sizeof(gid));
+ gid.ctx_index = idx;
+ gid.elem.type = load->object_type;
+ data_offset = bytecode_push_data(runtime, &gid,
+ __alignof__(gid), sizeof(gid));
+ if (data_offset < 0) {
+ return -EINVAL;
+ }
+ ((struct get_index_u16 *) insn->data)->index = data_offset;
+ return 0;
+}
+
+static int specialize_event_payload_lookup(struct lttng_event *event,
+ struct bytecode_runtime *runtime,
+ struct load_op *insn,
+ struct vstack_load *load)
+{
+ const char *name;
+ uint16_t offset;
+ const struct lttng_event_desc *desc = event->desc;
+ unsigned int i, nr_fields;
+ bool found = false;
+ uint32_t field_offset = 0;
+ const struct lttng_event_field *field;
+ int ret;
+ struct filter_get_index_data gid;
+ ssize_t data_offset;
+
+ nr_fields = desc->nr_fields;
+ offset = ((struct get_symbol *) insn->data)->offset;
+ name = runtime->p.bc->bc.data + runtime->p.bc->bc.reloc_offset + offset;
+ for (i = 0; i < nr_fields; i++) {
+ field = &desc->fields[i];
+ if (!strcmp(field->name, name)) {
+ found = true;
+ break;
+ }
+ /* compute field offset on stack */
+ switch (field->type.atype) {
+ case atype_integer:
+ case atype_enum:
+ field_offset += sizeof(int64_t);
+ break;
+ case atype_array:
+ case atype_sequence:
+ case atype_array_bitfield:
+ case atype_sequence_bitfield:
+ field_offset += sizeof(unsigned long);
+ field_offset += sizeof(void *);
+ break;
+ case atype_string:
+ field_offset += sizeof(void *);
+ break;
+ default:
+ ret = -EINVAL;
+ goto end;
+ }
+ }
+ if (!found) {
+ ret = -EINVAL;
+ goto end;
+ }
+
+ ret = specialize_load_object(field, load, false);
+ if (ret)
+ goto end;
+
+ /* Specialize each get_symbol into a get_index. */
+ insn->op = FILTER_OP_GET_INDEX_U16;
+ memset(&gid, 0, sizeof(gid));
+ gid.offset = field_offset;
+ gid.elem.type = load->object_type;
+ data_offset = bytecode_push_data(runtime, &gid,
+ __alignof__(gid), sizeof(gid));
+ if (data_offset < 0) {
+ ret = -EINVAL;
+ goto end;
+ }
+ ((struct get_index_u16 *) insn->data)->index = data_offset;
+ ret = 0;
+end:
+ return ret;
+}
+
+int lttng_filter_specialize_bytecode(struct lttng_event *event,
+ struct bytecode_runtime *bytecode)
{
void *pc, *next_pc, *start_pc;
int ret = -EINVAL;
vstack_init(stack);
- start_pc = &bytecode->data[0];
+ start_pc = &bytecode->code[0];
for (pc = next_pc = start_pc; pc - start_pc < bytecode->len;
pc = next_pc) {
switch (*(filter_opcode_t *) pc) {
case FILTER_OP_MINUS:
case FILTER_OP_RSHIFT:
case FILTER_OP_LSHIFT:
- case FILTER_OP_BIN_AND:
- case FILTER_OP_BIN_OR:
- case FILTER_OP_BIN_XOR:
printk(KERN_WARNING "unsupported bytecode op %u\n",
(unsigned int) *(filter_opcode_t *) pc);
ret = -EINVAL;
case FILTER_OP_LT_S64_DOUBLE:
case FILTER_OP_GE_S64_DOUBLE:
case FILTER_OP_LE_S64_DOUBLE:
+ case FILTER_OP_BIT_AND:
+ case FILTER_OP_BIT_OR:
+ case FILTER_OP_BIT_XOR:
{
/* Pop 2, push 1 */
if (vstack_pop(stack)) {
break;
}
+ /*
+ * Instructions for recursive traversal through composed types.
+ */
+ case FILTER_OP_GET_CONTEXT_ROOT:
+ {
+ if (vstack_push(stack)) {
+ ret = -EINVAL;
+ goto end;
+ }
+ vstack_ax(stack)->type = REG_PTR;
+ vstack_ax(stack)->load.type = LOAD_ROOT_CONTEXT;
+ next_pc += sizeof(struct load_op);
+ break;
+ }
+ case FILTER_OP_GET_APP_CONTEXT_ROOT:
+ {
+ if (vstack_push(stack)) {
+ ret = -EINVAL;
+ goto end;
+ }
+ vstack_ax(stack)->type = REG_PTR;
+ vstack_ax(stack)->load.type = LOAD_ROOT_APP_CONTEXT;
+ next_pc += sizeof(struct load_op);
+ break;
+ }
+ case FILTER_OP_GET_PAYLOAD_ROOT:
+ {
+ if (vstack_push(stack)) {
+ ret = -EINVAL;
+ goto end;
+ }
+ vstack_ax(stack)->type = REG_PTR;
+ vstack_ax(stack)->load.type = LOAD_ROOT_PAYLOAD;
+ next_pc += sizeof(struct load_op);
+ break;
+ }
+
+ case FILTER_OP_LOAD_FIELD:
+ {
+ struct load_op *insn = (struct load_op *) pc;
+
+ WARN_ON_ONCE(vstack_ax(stack)->type != REG_PTR);
+ /* Pop 1, push 1 */
+ ret = specialize_load_field(vstack_ax(stack), insn);
+ if (ret)
+ goto end;
+
+ next_pc += sizeof(struct load_op);
+ break;
+ }
+
+ case FILTER_OP_LOAD_FIELD_S8:
+ case FILTER_OP_LOAD_FIELD_S16:
+ case FILTER_OP_LOAD_FIELD_S32:
+ case FILTER_OP_LOAD_FIELD_S64:
+ case FILTER_OP_LOAD_FIELD_U8:
+ case FILTER_OP_LOAD_FIELD_U16:
+ case FILTER_OP_LOAD_FIELD_U32:
+ case FILTER_OP_LOAD_FIELD_U64:
+ {
+ /* Pop 1, push 1 */
+ vstack_ax(stack)->type = REG_S64;
+ next_pc += sizeof(struct load_op);
+ break;
+ }
+
+ case FILTER_OP_LOAD_FIELD_STRING:
+ case FILTER_OP_LOAD_FIELD_SEQUENCE:
+ {
+ /* Pop 1, push 1 */
+ vstack_ax(stack)->type = REG_STRING;
+ next_pc += sizeof(struct load_op);
+ break;
+ }
+
+ case FILTER_OP_LOAD_FIELD_DOUBLE:
+ {
+ /* Pop 1, push 1 */
+ vstack_ax(stack)->type = REG_DOUBLE;
+ next_pc += sizeof(struct load_op);
+ break;
+ }
+
+ case FILTER_OP_GET_SYMBOL:
+ {
+ struct load_op *insn = (struct load_op *) pc;
+
+ dbg_printk("op get symbol\n");
+ switch (vstack_ax(stack)->load.type) {
+ case LOAD_OBJECT:
+ printk(KERN_WARNING "Nested fields not implemented yet.\n");
+ ret = -EINVAL;
+ goto end;
+ case LOAD_ROOT_CONTEXT:
+ /* Lookup context field. */
+ ret = specialize_context_lookup(bytecode, insn,
+ &vstack_ax(stack)->load);
+ if (ret)
+ goto end;
+ break;
+ case LOAD_ROOT_APP_CONTEXT:
+ ret = -EINVAL;
+ goto end;
+ case LOAD_ROOT_PAYLOAD:
+ /* Lookup event payload field. */
+ ret = specialize_event_payload_lookup(event,
+ bytecode, insn,
+ &vstack_ax(stack)->load);
+ if (ret)
+ goto end;
+ break;
+ }
+ next_pc += sizeof(struct load_op) + sizeof(struct get_symbol);
+ break;
+ }
+
+ case FILTER_OP_GET_SYMBOL_FIELD:
+ {
+ /* Always generated by specialize phase. */
+ ret = -EINVAL;
+ goto end;
+ }
+
+ case FILTER_OP_GET_INDEX_U16:
+ {
+ struct load_op *insn = (struct load_op *) pc;
+ struct get_index_u16 *index = (struct get_index_u16 *) insn->data;
+
+ dbg_printk("op get index u16\n");
+ /* Pop 1, push 1 */
+ ret = specialize_get_index(bytecode, insn, index->index,
+ vstack_ax(stack), sizeof(*index));
+ if (ret)
+ goto end;
+ next_pc += sizeof(struct load_op) + sizeof(struct get_index_u16);
+ break;
+ }
+
+ case FILTER_OP_GET_INDEX_U64:
+ {
+ struct load_op *insn = (struct load_op *) pc;
+ struct get_index_u64 *index = (struct get_index_u64 *) insn->data;
+
+ dbg_printk("op get index u64\n");
+ /* Pop 1, push 1 */
+ ret = specialize_get_index(bytecode, insn, index->index,
+ vstack_ax(stack), sizeof(*index));
+ if (ret)
+ goto end;
+ next_pc += sizeof(struct load_op) + sizeof(struct get_index_u64);
+ break;
+ }
+
}
}
end:
const char *str)
{
if (unlikely(!vstack_ax(stack) || !vstack_bx(stack)))
- goto error_unknown;
+ goto error_empty;
switch (vstack_ax(stack)->type) {
default:
case REG_DOUBLE:
- goto error_unknown;
+ goto error_type;
case REG_STRING:
switch (vstack_bx(stack)->type) {
default:
case REG_DOUBLE:
- goto error_unknown;
-
+ goto error_type;
+ case REG_TYPE_UNKNOWN:
+ goto unknown;
case REG_STRING:
break;
case REG_STAR_GLOB_STRING:
switch (vstack_bx(stack)->type) {
default:
case REG_DOUBLE:
- goto error_unknown;
-
+ goto error_type;
+ case REG_TYPE_UNKNOWN:
+ goto unknown;
case REG_STRING:
if (opcode != FILTER_OP_EQ && opcode != FILTER_OP_NE) {
goto error_mismatch;
switch (vstack_bx(stack)->type) {
default:
case REG_DOUBLE:
- goto error_unknown;
-
+ goto error_type;
+ case REG_TYPE_UNKNOWN:
+ goto unknown;
case REG_STRING:
case REG_STAR_GLOB_STRING:
goto error_mismatch;
-
case REG_S64:
break;
}
break;
+ case REG_TYPE_UNKNOWN:
+ switch (vstack_bx(stack)->type) {
+ default:
+ case REG_DOUBLE:
+ goto error_type;
+ case REG_TYPE_UNKNOWN:
+ case REG_STRING:
+ case REG_STAR_GLOB_STRING:
+ case REG_S64:
+ goto unknown;
+ }
+ break;
}
return 0;
-error_unknown:
+unknown:
+ return 1;
+
+error_empty:
+ printk(KERN_WARNING "empty stack for '%s' binary operator\n", str);
return -EINVAL;
error_mismatch:
printk(KERN_WARNING "type mismatch for '%s' binary operator\n", str);
return -EINVAL;
+
+error_type:
+ printk(KERN_WARNING "unknown type for '%s' binary operator\n", str);
+ return -EINVAL;
+}
+
+/*
+ * Binary bitwise operators use top of stack and top of stack -1.
+ * Return 0 if typing is known to match, 1 if typing is dynamic
+ * (unknown), negative error value on error.
+ */
+static
+int bin_op_bitwise_check(struct vstack *stack, filter_opcode_t opcode,
+ const char *str)
+{
+ if (unlikely(!vstack_ax(stack) || !vstack_bx(stack)))
+ goto error_empty;
+
+ switch (vstack_ax(stack)->type) {
+ default:
+ case REG_DOUBLE:
+ goto error_type;
+
+ case REG_TYPE_UNKNOWN:
+ switch (vstack_bx(stack)->type) {
+ default:
+ case REG_DOUBLE:
+ goto error_type;
+ case REG_TYPE_UNKNOWN:
+ case REG_STRING:
+ case REG_STAR_GLOB_STRING:
+ case REG_S64:
+ goto unknown;
+ }
+ break;
+ case REG_S64:
+ switch (vstack_bx(stack)->type) {
+ default:
+ case REG_DOUBLE:
+ goto error_type;
+ case REG_TYPE_UNKNOWN:
+ goto unknown;
+ case REG_S64:
+ break;
+ }
+ break;
+ }
+ return 0;
+
+unknown:
+ return 1;
+
+error_empty:
+ printk(KERN_WARNING "empty stack for '%s' binary operator\n", str);
+ return -EINVAL;
+
+error_type:
+ printk(KERN_WARNING "unknown type for '%s' binary operator\n", str);
+ return -EINVAL;
+}
+
+static
+int validate_get_symbol(struct bytecode_runtime *bytecode,
+ const struct get_symbol *sym)
+{
+ const char *str, *str_limit;
+ size_t len_limit;
+
+ if (sym->offset >= bytecode->p.bc->bc.len - bytecode->p.bc->bc.reloc_offset)
+ return -EINVAL;
+
+ str = bytecode->p.bc->bc.data + bytecode->p.bc->bc.reloc_offset + sym->offset;
+ str_limit = bytecode->p.bc->bc.data + bytecode->p.bc->bc.len;
+ len_limit = str_limit - str;
+ if (strnlen(str, len_limit) == len_limit)
+ return -EINVAL;
+ return 0;
}
/*
case FILTER_OP_MINUS:
case FILTER_OP_RSHIFT:
case FILTER_OP_LSHIFT:
- case FILTER_OP_BIN_AND:
- case FILTER_OP_BIN_OR:
- case FILTER_OP_BIN_XOR:
case FILTER_OP_EQ_DOUBLE:
case FILTER_OP_NE_DOUBLE:
case FILTER_OP_GT_DOUBLE:
case FILTER_OP_LT_S64:
case FILTER_OP_GE_S64:
case FILTER_OP_LE_S64:
+ case FILTER_OP_BIT_AND:
+ case FILTER_OP_BIT_OR:
+ case FILTER_OP_BIT_XOR:
{
if (unlikely(pc + sizeof(struct binary_op)
> start_pc + bytecode->len)) {
ret = -EINVAL;
break;
}
+
/* get context ref */
case FILTER_OP_GET_CONTEXT_REF:
{
break;
}
+ /*
+ * Instructions for recursive traversal through composed types.
+ */
+ case FILTER_OP_GET_CONTEXT_ROOT:
+ case FILTER_OP_GET_APP_CONTEXT_ROOT:
+ case FILTER_OP_GET_PAYLOAD_ROOT:
+ case FILTER_OP_LOAD_FIELD:
+ case FILTER_OP_LOAD_FIELD_S8:
+ case FILTER_OP_LOAD_FIELD_S16:
+ case FILTER_OP_LOAD_FIELD_S32:
+ case FILTER_OP_LOAD_FIELD_S64:
+ case FILTER_OP_LOAD_FIELD_U8:
+ case FILTER_OP_LOAD_FIELD_U16:
+ case FILTER_OP_LOAD_FIELD_U32:
+ case FILTER_OP_LOAD_FIELD_U64:
+ case FILTER_OP_LOAD_FIELD_STRING:
+ case FILTER_OP_LOAD_FIELD_SEQUENCE:
+ case FILTER_OP_LOAD_FIELD_DOUBLE:
+ if (unlikely(pc + sizeof(struct load_op)
+ > start_pc + bytecode->len)) {
+ ret = -ERANGE;
+ }
+ break;
+
+ case FILTER_OP_GET_SYMBOL:
+ {
+ struct load_op *insn = (struct load_op *) pc;
+ struct get_symbol *sym = (struct get_symbol *) insn->data;
+
+ if (unlikely(pc + sizeof(struct load_op) + sizeof(struct get_symbol)
+ > start_pc + bytecode->len)) {
+ ret = -ERANGE;
+ }
+ ret = validate_get_symbol(bytecode, sym);
+ break;
+ }
+
+ case FILTER_OP_GET_SYMBOL_FIELD:
+ printk(KERN_WARNING "Unexpected get symbol field\n");
+ ret = -EINVAL;
+ break;
+
+ case FILTER_OP_GET_INDEX_U16:
+ if (unlikely(pc + sizeof(struct load_op) + sizeof(struct get_index_u16)
+ > start_pc + bytecode->len)) {
+ ret = -ERANGE;
+ }
+ break;
+
+ case FILTER_OP_GET_INDEX_U64:
+ if (unlikely(pc + sizeof(struct load_op) + sizeof(struct get_index_u64)
+ > start_pc + bytecode->len)) {
+ ret = -ERANGE;
+ }
+ break;
}
return ret;
/*
* Return value:
- * 0: success
+ * >=0: success
* <0: error
*/
static
case FILTER_OP_MINUS:
case FILTER_OP_RSHIFT:
case FILTER_OP_LSHIFT:
- case FILTER_OP_BIN_AND:
- case FILTER_OP_BIN_OR:
- case FILTER_OP_BIN_XOR:
/* Floating point */
case FILTER_OP_EQ_DOUBLE:
case FILTER_OP_NE_DOUBLE:
case FILTER_OP_EQ:
{
ret = bin_op_compare_check(stack, opcode, "==");
- if (ret)
+ if (ret < 0)
goto end;
break;
}
case FILTER_OP_NE:
{
ret = bin_op_compare_check(stack, opcode, "!=");
- if (ret)
+ if (ret < 0)
goto end;
break;
}
case FILTER_OP_GT:
{
ret = bin_op_compare_check(stack, opcode, ">");
- if (ret)
+ if (ret < 0)
goto end;
break;
}
case FILTER_OP_LT:
{
ret = bin_op_compare_check(stack, opcode, "<");
- if (ret)
+ if (ret < 0)
goto end;
break;
}
case FILTER_OP_GE:
{
ret = bin_op_compare_check(stack, opcode, ">=");
- if (ret)
+ if (ret < 0)
goto end;
break;
}
case FILTER_OP_LE:
{
ret = bin_op_compare_check(stack, opcode, "<=");
- if (ret)
+ if (ret < 0)
goto end;
break;
}
break;
}
+ case FILTER_OP_BIT_AND:
+ ret = bin_op_bitwise_check(stack, opcode, "&");
+ if (ret < 0)
+ goto end;
+ break;
+ case FILTER_OP_BIT_OR:
+ ret = bin_op_bitwise_check(stack, opcode, "|");
+ if (ret < 0)
+ goto end;
+ break;
+ case FILTER_OP_BIT_XOR:
+ ret = bin_op_bitwise_check(stack, opcode, "^");
+ if (ret < 0)
+ goto end;
+ break;
+
/* unary */
case FILTER_OP_UNARY_PLUS:
case FILTER_OP_UNARY_MINUS:
ret = -EINVAL;
goto end;
case REG_S64:
+ case REG_TYPE_UNKNOWN:
break;
}
break;
break;
}
+ /*
+ * Instructions for recursive traversal through composed types.
+ */
+ case FILTER_OP_GET_CONTEXT_ROOT:
+ {
+ dbg_printk("Validate get context root\n");
+ break;
+ }
+ case FILTER_OP_GET_APP_CONTEXT_ROOT:
+ {
+ dbg_printk("Validate get app context root\n");
+ break;
+ }
+ case FILTER_OP_GET_PAYLOAD_ROOT:
+ {
+ dbg_printk("Validate get payload root\n");
+ break;
+ }
+ case FILTER_OP_LOAD_FIELD:
+ {
+ /*
+ * We tolerate that field type is unknown at validation,
+ * because we are performing the load specialization in
+ * a phase after validation.
+ */
+ dbg_printk("Validate load field\n");
+ break;
+ }
+ case FILTER_OP_LOAD_FIELD_S8:
+ {
+ dbg_printk("Validate load field s8\n");
+ break;
+ }
+ case FILTER_OP_LOAD_FIELD_S16:
+ {
+ dbg_printk("Validate load field s16\n");
+ break;
+ }
+ case FILTER_OP_LOAD_FIELD_S32:
+ {
+ dbg_printk("Validate load field s32\n");
+ break;
+ }
+ case FILTER_OP_LOAD_FIELD_S64:
+ {
+ dbg_printk("Validate load field s64\n");
+ break;
+ }
+ case FILTER_OP_LOAD_FIELD_U8:
+ {
+ dbg_printk("Validate load field u8\n");
+ break;
+ }
+ case FILTER_OP_LOAD_FIELD_U16:
+ {
+ dbg_printk("Validate load field u16\n");
+ break;
+ }
+ case FILTER_OP_LOAD_FIELD_U32:
+ {
+ dbg_printk("Validate load field u32\n");
+ break;
+ }
+ case FILTER_OP_LOAD_FIELD_U64:
+ {
+ dbg_printk("Validate load field u64\n");
+ break;
+ }
+ case FILTER_OP_LOAD_FIELD_STRING:
+ {
+ dbg_printk("Validate load field string\n");
+ break;
+ }
+ case FILTER_OP_LOAD_FIELD_SEQUENCE:
+ {
+ dbg_printk("Validate load field sequence\n");
+ break;
+ }
+ case FILTER_OP_LOAD_FIELD_DOUBLE:
+ {
+ dbg_printk("Validate load field double\n");
+ break;
+ }
+
+ case FILTER_OP_GET_SYMBOL:
+ {
+ struct load_op *insn = (struct load_op *) pc;
+ struct get_symbol *sym = (struct get_symbol *) insn->data;
+
+ dbg_printk("Validate get symbol offset %u\n", sym->offset);
+ break;
+ }
+
+ case FILTER_OP_GET_SYMBOL_FIELD:
+ {
+ struct load_op *insn = (struct load_op *) pc;
+ struct get_symbol *sym = (struct get_symbol *) insn->data;
+
+ dbg_printk("Validate get symbol field offset %u\n", sym->offset);
+ break;
+ }
+
+ case FILTER_OP_GET_INDEX_U16:
+ {
+ struct load_op *insn = (struct load_op *) pc;
+ struct get_index_u16 *get_index = (struct get_index_u16 *) insn->data;
+
+ dbg_printk("Validate get index u16 index %u\n", get_index->index);
+ break;
+ }
+
+ case FILTER_OP_GET_INDEX_U64:
+ {
+ struct load_op *insn = (struct load_op *) pc;
+ struct get_index_u64 *get_index = (struct get_index_u64 *) insn->data;
+
+ dbg_printk("Validate get index u64 index %llu\n",
+ (unsigned long long) get_index->index);
+ break;
+ }
}
end:
return ret;
/* Validate the context resulting from the previous instruction */
ret = validate_instruction_context(bytecode, stack, start_pc, pc);
- if (ret)
+ if (ret < 0)
return ret;
/* Validate merge points */
ret = -EINVAL;
goto end;
}
+ switch (vstack_ax(stack)->type) {
+ case REG_S64:
+ case REG_TYPE_UNKNOWN:
+ break;
+ default:
+ printk(KERN_WARNING "Unexpected register type %d at end of bytecode\n",
+ (int) vstack_ax(stack)->type);
+ ret = -EINVAL;
+ goto end;
+ }
+
ret = 0;
goto end;
}
case FILTER_OP_MINUS:
case FILTER_OP_RSHIFT:
case FILTER_OP_LSHIFT:
- case FILTER_OP_BIN_AND:
- case FILTER_OP_BIN_OR:
- case FILTER_OP_BIN_XOR:
/* Floating point */
case FILTER_OP_EQ_DOUBLE:
case FILTER_OP_NE_DOUBLE:
case FILTER_OP_LT_S64:
case FILTER_OP_GE_S64:
case FILTER_OP_LE_S64:
+ case FILTER_OP_BIT_AND:
+ case FILTER_OP_BIT_OR:
+ case FILTER_OP_BIT_XOR:
{
/* Pop 2, push 1 */
if (vstack_pop(stack)) {
ret = -EINVAL;
goto end;
}
+ switch (vstack_ax(stack)->type) {
+ case REG_S64:
+ case REG_DOUBLE:
+ case REG_STRING:
+ case REG_STAR_GLOB_STRING:
+ case REG_TYPE_UNKNOWN:
+ break;
+ default:
+ printk(KERN_WARNING "Unexpected register type %d for operation\n",
+ (int) vstack_ax(stack)->type);
+ ret = -EINVAL;
+ goto end;
+ }
+
vstack_ax(stack)->type = REG_S64;
next_pc += sizeof(struct binary_op);
break;
/* unary */
case FILTER_OP_UNARY_PLUS:
case FILTER_OP_UNARY_MINUS:
- case FILTER_OP_UNARY_NOT:
+ {
+ /* Pop 1, push 1 */
+ if (!vstack_ax(stack)) {
+ printk(KERN_WARNING "Empty stack\n\n");
+ ret = -EINVAL;
+ goto end;
+ }
+ switch (vstack_ax(stack)->type) {
+ case REG_S64:
+ case REG_TYPE_UNKNOWN:
+ break;
+ default:
+ printk(KERN_WARNING "Unexpected register type %d for operation\n",
+ (int) vstack_ax(stack)->type);
+ ret = -EINVAL;
+ goto end;
+ }
+
+ vstack_ax(stack)->type = REG_TYPE_UNKNOWN;
+ next_pc += sizeof(struct unary_op);
+ break;
+ }
+
case FILTER_OP_UNARY_PLUS_S64:
case FILTER_OP_UNARY_MINUS_S64:
case FILTER_OP_UNARY_NOT_S64:
{
/* Pop 1, push 1 */
if (!vstack_ax(stack)) {
- printk(KERN_WARNING "Empty stack\n");
+ printk(KERN_WARNING "Empty stack\n\n");
+ ret = -EINVAL;
+ goto end;
+ }
+ switch (vstack_ax(stack)->type) {
+ case REG_S64:
+ break;
+ default:
+ printk(KERN_WARNING "Unexpected register type %d for operation\n",
+ (int) vstack_ax(stack)->type);
+ ret = -EINVAL;
+ goto end;
+ }
+
+ vstack_ax(stack)->type = REG_S64;
+ next_pc += sizeof(struct unary_op);
+ break;
+ }
+
+ case FILTER_OP_UNARY_NOT:
+ {
+ /* Pop 1, push 1 */
+ if (!vstack_ax(stack)) {
+ printk(KERN_WARNING "Empty stack\n\n");
+ ret = -EINVAL;
+ goto end;
+ }
+ switch (vstack_ax(stack)->type) {
+ case REG_S64:
+ case REG_TYPE_UNKNOWN:
+ break;
+ default:
+ printk(KERN_WARNING "Unexpected register type %d for operation\n",
+ (int) vstack_ax(stack)->type);
ret = -EINVAL;
goto end;
}
+
vstack_ax(stack)->type = REG_S64;
next_pc += sizeof(struct unary_op);
break;
ret = merge_ret;
goto end;
}
+
+ if (!vstack_ax(stack)) {
+ printk(KERN_WARNING "Empty stack\n\n");
+ ret = -EINVAL;
+ goto end;
+ }
+ /* There is always a cast-to-s64 operation before a or/and op. */
+ switch (vstack_ax(stack)->type) {
+ case REG_S64:
+ break;
+ default:
+ printk(KERN_WARNING "Incorrect register type %d for operation\n",
+ (int) vstack_ax(stack)->type);
+ ret = -EINVAL;
+ goto end;
+ }
+
/* Continue to next instruction */
/* Pop 1 when jump not taken */
if (vstack_pop(stack)) {
ret = -EINVAL;
goto end;
}
+ switch (vstack_ax(stack)->type) {
+ case REG_S64:
+ case REG_DOUBLE:
+ case REG_TYPE_UNKNOWN:
+ break;
+ default:
+ printk(KERN_WARNING "Incorrect register type %d for cast\n",
+ (int) vstack_ax(stack)->type);
+ ret = -EINVAL;
+ goto end;
+ }
vstack_ax(stack)->type = REG_S64;
next_pc += sizeof(struct cast_op);
break;
break;
}
+ /*
+ * Instructions for recursive traversal through composed types.
+ */
+ case FILTER_OP_GET_CONTEXT_ROOT:
+ case FILTER_OP_GET_APP_CONTEXT_ROOT:
+ case FILTER_OP_GET_PAYLOAD_ROOT:
+ {
+ if (vstack_push(stack)) {
+ ret = -EINVAL;
+ goto end;
+ }
+ vstack_ax(stack)->type = REG_PTR;
+ next_pc += sizeof(struct load_op);
+ break;
+ }
+
+ case FILTER_OP_LOAD_FIELD:
+ {
+ /* Pop 1, push 1 */
+ if (!vstack_ax(stack)) {
+ printk(KERN_WARNING "Empty stack\n\n");
+ ret = -EINVAL;
+ goto end;
+ }
+ if (vstack_ax(stack)->type != REG_PTR) {
+ printk(KERN_WARNING "Expecting pointer on top of stack\n\n");
+ ret = -EINVAL;
+ goto end;
+ }
+ vstack_ax(stack)->type = REG_TYPE_UNKNOWN;
+ next_pc += sizeof(struct load_op);
+ break;
+ }
+
+ case FILTER_OP_LOAD_FIELD_S8:
+ case FILTER_OP_LOAD_FIELD_S16:
+ case FILTER_OP_LOAD_FIELD_S32:
+ case FILTER_OP_LOAD_FIELD_S64:
+ case FILTER_OP_LOAD_FIELD_U8:
+ case FILTER_OP_LOAD_FIELD_U16:
+ case FILTER_OP_LOAD_FIELD_U32:
+ case FILTER_OP_LOAD_FIELD_U64:
+ {
+ /* Pop 1, push 1 */
+ if (!vstack_ax(stack)) {
+ printk(KERN_WARNING "Empty stack\n\n");
+ ret = -EINVAL;
+ goto end;
+ }
+ if (vstack_ax(stack)->type != REG_PTR) {
+ printk(KERN_WARNING "Expecting pointer on top of stack\n\n");
+ ret = -EINVAL;
+ goto end;
+ }
+ vstack_ax(stack)->type = REG_S64;
+ next_pc += sizeof(struct load_op);
+ break;
+ }
+
+ case FILTER_OP_LOAD_FIELD_STRING:
+ case FILTER_OP_LOAD_FIELD_SEQUENCE:
+ {
+ /* Pop 1, push 1 */
+ if (!vstack_ax(stack)) {
+ printk(KERN_WARNING "Empty stack\n\n");
+ ret = -EINVAL;
+ goto end;
+ }
+ if (vstack_ax(stack)->type != REG_PTR) {
+ printk(KERN_WARNING "Expecting pointer on top of stack\n\n");
+ ret = -EINVAL;
+ goto end;
+ }
+ vstack_ax(stack)->type = REG_STRING;
+ next_pc += sizeof(struct load_op);
+ break;
+ }
+
+ case FILTER_OP_LOAD_FIELD_DOUBLE:
+ {
+ /* Pop 1, push 1 */
+ if (!vstack_ax(stack)) {
+ printk(KERN_WARNING "Empty stack\n\n");
+ ret = -EINVAL;
+ goto end;
+ }
+ if (vstack_ax(stack)->type != REG_PTR) {
+ printk(KERN_WARNING "Expecting pointer on top of stack\n\n");
+ ret = -EINVAL;
+ goto end;
+ }
+ vstack_ax(stack)->type = REG_DOUBLE;
+ next_pc += sizeof(struct load_op);
+ break;
+ }
+
+ case FILTER_OP_GET_SYMBOL:
+ case FILTER_OP_GET_SYMBOL_FIELD:
+ {
+ /* Pop 1, push 1 */
+ if (!vstack_ax(stack)) {
+ printk(KERN_WARNING "Empty stack\n\n");
+ ret = -EINVAL;
+ goto end;
+ }
+ if (vstack_ax(stack)->type != REG_PTR) {
+ printk(KERN_WARNING "Expecting pointer on top of stack\n\n");
+ ret = -EINVAL;
+ goto end;
+ }
+ next_pc += sizeof(struct load_op) + sizeof(struct get_symbol);
+ break;
+ }
+
+ case FILTER_OP_GET_INDEX_U16:
+ {
+ /* Pop 1, push 1 */
+ if (!vstack_ax(stack)) {
+ printk(KERN_WARNING "Empty stack\n\n");
+ ret = -EINVAL;
+ goto end;
+ }
+ if (vstack_ax(stack)->type != REG_PTR) {
+ printk(KERN_WARNING "Expecting pointer on top of stack\n\n");
+ ret = -EINVAL;
+ goto end;
+ }
+ next_pc += sizeof(struct load_op) + sizeof(struct get_index_u16);
+ break;
+ }
+
+ case FILTER_OP_GET_INDEX_U64:
+ {
+ /* Pop 1, push 1 */
+ if (!vstack_ax(stack)) {
+ printk(KERN_WARNING "Empty stack\n\n");
+ ret = -EINVAL;
+ goto end;
+ }
+ if (vstack_ax(stack)->type != REG_PTR) {
+ printk(KERN_WARNING "Expecting pointer on top of stack\n\n");
+ ret = -EINVAL;
+ goto end;
+ }
+ next_pc += sizeof(struct load_op) + sizeof(struct get_index_u64);
+ break;
+ }
+
}
end:
*_next_pc = next_pc;
printk(KERN_WARNING "Error allocating hash table for bytecode validation\n");
return -ENOMEM;
}
- start_pc = &bytecode->data[0];
+ start_pc = &bytecode->code[0];
for (pc = next_pc = start_pc; pc - start_pc < bytecode->len;
pc = next_pc) {
ret = bytecode_validate_overflow(bytecode, start_pc, pc);
/*
* For each instruction, validate the current context
* (traversal of entire execution flow), and validate
- * all merge points targeting this instruction.
+ * all merge points targeting this instruction.
*/
ret = validate_instruction_all_contexts(bytecode, mp_table,
&stack, start_pc, pc);
[ FILTER_OP_MINUS ] = "MINUS",
[ FILTER_OP_RSHIFT ] = "RSHIFT",
[ FILTER_OP_LSHIFT ] = "LSHIFT",
- [ FILTER_OP_BIN_AND ] = "BIN_AND",
- [ FILTER_OP_BIN_OR ] = "BIN_OR",
- [ FILTER_OP_BIN_XOR ] = "BIN_XOR",
+ [ FILTER_OP_BIT_AND ] = "BIT_AND",
+ [ FILTER_OP_BIT_OR ] = "BIT_OR",
+ [ FILTER_OP_BIT_XOR ] = "BIT_XOR",
/* binary comparators */
[ FILTER_OP_EQ ] = "EQ",
/* globbing pattern binary operator: apply to */
[ FILTER_OP_EQ_STAR_GLOB_STRING ] = "EQ_STAR_GLOB_STRING",
[ FILTER_OP_NE_STAR_GLOB_STRING ] = "NE_STAR_GLOB_STRING",
+
+ /*
+ * Instructions for recursive traversal through composed types.
+ */
+ [ FILTER_OP_GET_CONTEXT_ROOT ] = "GET_CONTEXT_ROOT",
+ [ FILTER_OP_GET_APP_CONTEXT_ROOT ] = "GET_APP_CONTEXT_ROOT",
+ [ FILTER_OP_GET_PAYLOAD_ROOT ] = "GET_PAYLOAD_ROOT",
+
+ [ FILTER_OP_GET_SYMBOL ] = "GET_SYMBOL",
+ [ FILTER_OP_GET_SYMBOL_FIELD ] = "GET_SYMBOL_FIELD",
+ [ FILTER_OP_GET_INDEX_U16 ] = "GET_INDEX_U16",
+ [ FILTER_OP_GET_INDEX_U64 ] = "GET_INDEX_U64",
+
+ [ FILTER_OP_LOAD_FIELD ] = "LOAD_FIELD",
+ [ FILTER_OP_LOAD_FIELD_S8 ] = "LOAD_FIELD_S8",
+ [ FILTER_OP_LOAD_FIELD_S16 ] = "LOAD_FIELD_S16",
+ [ FILTER_OP_LOAD_FIELD_S32 ] = "LOAD_FIELD_S32",
+ [ FILTER_OP_LOAD_FIELD_S64 ] = "LOAD_FIELD_S64",
+ [ FILTER_OP_LOAD_FIELD_U8 ] = "LOAD_FIELD_U8",
+ [ FILTER_OP_LOAD_FIELD_U16 ] = "LOAD_FIELD_U16",
+ [ FILTER_OP_LOAD_FIELD_U32 ] = "LOAD_FIELD_U32",
+ [ FILTER_OP_LOAD_FIELD_U64 ] = "LOAD_FIELD_U64",
+ [ FILTER_OP_LOAD_FIELD_STRING ] = "LOAD_FIELD_STRING",
+ [ FILTER_OP_LOAD_FIELD_SEQUENCE ] = "LOAD_FIELD_SEQUENCE",
+ [ FILTER_OP_LOAD_FIELD_DOUBLE ] = "LOAD_FIELD_DOUBLE",
};
const char *lttng_filter_print_op(enum filter_op op)
struct bytecode_runtime *runtime,
uint32_t runtime_len,
uint32_t reloc_offset,
- const char *field_name)
+ const char *field_name,
+ enum filter_op filter_op)
{
const struct lttng_event_desc *desc;
const struct lttng_event_field *fields, *field = NULL;
unsigned int nr_fields, i;
- struct field_ref *field_ref;
struct load_op *op;
uint32_t field_offset = 0;
break;
case atype_array:
case atype_sequence:
+ case atype_array_bitfield:
+ case atype_sequence_bitfield:
field_offset += sizeof(unsigned long);
field_offset += sizeof(void *);
break;
return -EINVAL;
/* set type */
- op = (struct load_op *) &runtime->data[reloc_offset];
- field_ref = (struct field_ref *) op->data;
- switch (field->type.atype) {
- case atype_integer:
- case atype_enum:
- op->op = FILTER_OP_LOAD_FIELD_REF_S64;
- break;
- case atype_array:
- case atype_sequence:
- if (field->user)
- op->op = FILTER_OP_LOAD_FIELD_REF_USER_SEQUENCE;
- else
- op->op = FILTER_OP_LOAD_FIELD_REF_SEQUENCE;
- break;
- case atype_string:
- if (field->user)
- op->op = FILTER_OP_LOAD_FIELD_REF_USER_STRING;
- else
- op->op = FILTER_OP_LOAD_FIELD_REF_STRING;
+ op = (struct load_op *) &runtime->code[reloc_offset];
+
+ switch (filter_op) {
+ case FILTER_OP_LOAD_FIELD_REF:
+ {
+ struct field_ref *field_ref;
+
+ field_ref = (struct field_ref *) op->data;
+ switch (field->type.atype) {
+ case atype_integer:
+ case atype_enum:
+ op->op = FILTER_OP_LOAD_FIELD_REF_S64;
+ break;
+ case atype_array:
+ case atype_sequence:
+ if (field->user)
+ op->op = FILTER_OP_LOAD_FIELD_REF_USER_SEQUENCE;
+ else
+ op->op = FILTER_OP_LOAD_FIELD_REF_SEQUENCE;
+ break;
+ case atype_string:
+ if (field->user)
+ op->op = FILTER_OP_LOAD_FIELD_REF_USER_STRING;
+ else
+ op->op = FILTER_OP_LOAD_FIELD_REF_STRING;
+ break;
+ case atype_struct: /* Unsupported. */
+ case atype_array_compound: /* Unsupported. */
+ case atype_sequence_compound: /* Unsupported. */
+ case atype_variant: /* Unsupported. */
+ case atype_array_bitfield: /* Unsupported. */
+ case atype_sequence_bitfield: /* Unsupported. */
+ default:
+ return -EINVAL;
+ }
+ /* set offset */
+ field_ref->offset = (uint16_t) field_offset;
break;
- case atype_struct: /* Unsupported. */
- case atype_array_compound: /* Unsupported. */
- case atype_sequence_compound: /* Unsupported. */
- case atype_variant: /* Unsupported. */
+ }
default:
return -EINVAL;
}
- /* set offset */
- field_ref->offset = (uint16_t) field_offset;
return 0;
}
struct bytecode_runtime *runtime,
uint32_t runtime_len,
uint32_t reloc_offset,
- const char *context_name)
+ const char *context_name,
+ enum filter_op filter_op)
{
- struct field_ref *field_ref;
struct load_op *op;
struct lttng_ctx_field *ctx_field;
int idx;
/* Get context return type */
ctx_field = <tng_static_ctx->fields[idx];
- op = (struct load_op *) &runtime->data[reloc_offset];
- field_ref = (struct field_ref *) op->data;
- switch (ctx_field->event_field.type.atype) {
- case atype_integer:
- case atype_enum:
- op->op = FILTER_OP_GET_CONTEXT_REF_S64;
- break;
- /* Sequence and array supported as string */
- case atype_string:
- case atype_array:
- case atype_sequence:
- BUG_ON(ctx_field->event_field.user);
- op->op = FILTER_OP_GET_CONTEXT_REF_STRING;
+ op = (struct load_op *) &runtime->code[reloc_offset];
+
+ switch (filter_op) {
+ case FILTER_OP_GET_CONTEXT_REF:
+ {
+ struct field_ref *field_ref;
+
+ field_ref = (struct field_ref *) op->data;
+ switch (ctx_field->event_field.type.atype) {
+ case atype_integer:
+ case atype_enum:
+ op->op = FILTER_OP_GET_CONTEXT_REF_S64;
+ break;
+ /* Sequence and array supported as string */
+ case atype_string:
+ case atype_array:
+ case atype_sequence:
+ BUG_ON(ctx_field->event_field.user);
+ op->op = FILTER_OP_GET_CONTEXT_REF_STRING;
+ break;
+ case atype_struct: /* Unsupported. */
+ case atype_array_compound: /* Unsupported. */
+ case atype_sequence_compound: /* Unsupported. */
+ case atype_variant: /* Unsupported. */
+ case atype_array_bitfield: /* Unsupported. */
+ case atype_sequence_bitfield: /* Unsupported. */
+ default:
+ return -EINVAL;
+ }
+ /* set offset to context index within channel contexts */
+ field_ref->offset = (uint16_t) idx;
break;
- case atype_struct: /* Unsupported. */
- case atype_array_compound: /* Unsupported. */
- case atype_sequence_compound: /* Unsupported. */
- case atype_variant: /* Unsupported. */
+ }
default:
return -EINVAL;
}
- /* set offset to context index within channel contexts */
- field_ref->offset = (uint16_t) idx;
return 0;
}
if (runtime_len - reloc_offset < sizeof(uint16_t))
return -EINVAL;
- op = (struct load_op *) &runtime->data[reloc_offset];
+ op = (struct load_op *) &runtime->code[reloc_offset];
switch (op->op) {
case FILTER_OP_LOAD_FIELD_REF:
return apply_field_reloc(event, runtime, runtime_len,
- reloc_offset, name);
+ reloc_offset, name, op->op);
case FILTER_OP_GET_CONTEXT_REF:
return apply_context_reloc(event, runtime, runtime_len,
- reloc_offset, name);
+ reloc_offset, name, op->op);
+ case FILTER_OP_GET_SYMBOL:
+ case FILTER_OP_GET_SYMBOL_FIELD:
+ /*
+ * Will be handled by load specialize phase or
+ * dynamically by interpreter.
+ */
+ return 0;
default:
printk(KERN_WARNING "Unknown reloc op type %u\n", op->op);
return -EINVAL;
goto alloc_error;
}
runtime->p.bc = filter_bytecode;
+ runtime->p.event = event;
runtime->len = filter_bytecode->bc.reloc_offset;
/* copy original bytecode */
- memcpy(runtime->data, filter_bytecode->bc.data, runtime->len);
+ memcpy(runtime->code, filter_bytecode->bc.data, runtime->len);
/*
* apply relocs. Those are a uint16_t (offset in bytecode)
* followed by a string (field name).
goto link_error;
}
/* Specialize bytecode */
- ret = lttng_filter_specialize_bytecode(runtime);
+ ret = lttng_filter_specialize_bytecode(event, runtime);
if (ret) {
goto link_error;
}
list_for_each_entry_safe(runtime, tmp,
&event->bytecode_runtime_head, p.node) {
+ kfree(runtime->data);
kfree(runtime);
}
}
#define FILTER_STACK_LEN 10 /* includes 2 dummy */
#define FILTER_STACK_EMPTY 1
+#define FILTER_MAX_DATA_LEN 65536
+
#ifdef DEBUG
#define dbg_printk(fmt, args...) \
printk(KERN_DEBUG "[debug bytecode in %s:%s@%u] " fmt, \
/* Linked bytecode. Child of struct lttng_bytecode_runtime. */
struct bytecode_runtime {
struct lttng_bytecode_runtime p;
+ size_t data_len;
+ size_t data_alloc_len;
+ char *data;
uint16_t len;
- char data[0];
+ char code[0];
};
enum entry_type {
REG_STRING,
REG_STAR_GLOB_STRING,
REG_TYPE_UNKNOWN,
+ REG_PTR,
+};
+
+enum load_type {
+ LOAD_ROOT_CONTEXT,
+ LOAD_ROOT_APP_CONTEXT,
+ LOAD_ROOT_PAYLOAD,
+ LOAD_OBJECT,
+};
+
+enum object_type {
+ OBJECT_TYPE_S8,
+ OBJECT_TYPE_S16,
+ OBJECT_TYPE_S32,
+ OBJECT_TYPE_S64,
+ OBJECT_TYPE_U8,
+ OBJECT_TYPE_U16,
+ OBJECT_TYPE_U32,
+ OBJECT_TYPE_U64,
+
+ OBJECT_TYPE_DOUBLE,
+ OBJECT_TYPE_STRING,
+ OBJECT_TYPE_STRING_SEQUENCE,
+
+ OBJECT_TYPE_SEQUENCE,
+ OBJECT_TYPE_ARRAY,
+ OBJECT_TYPE_STRUCT,
+ OBJECT_TYPE_VARIANT,
+
+ OBJECT_TYPE_DYNAMIC,
+};
+
+struct filter_get_index_data {
+ uint64_t offset; /* in bytes */
+ size_t ctx_index;
+ size_t array_len;
+ struct {
+ size_t len;
+ enum object_type type;
+ bool rev_bo; /* reverse byte order */
+ } elem;
};
/* Validation stack */
+struct vstack_load {
+ enum load_type type;
+ enum object_type object_type;
+ const struct lttng_event_field *field;
+ bool rev_bo; /* reverse byte order */
+};
+
struct vstack_entry {
enum entry_type type;
+ struct vstack_load load;
};
struct vstack {
ESTACK_STRING_LITERAL_TYPE_STAR_GLOB,
};
+struct load_ptr {
+ enum load_type type;
+ enum object_type object_type;
+ const void *ptr;
+ bool rev_bo;
+ /* Temporary place-holders for contexts. */
+ union {
+ int64_t s64;
+ uint64_t u64;
+ double d;
+ } u;
+ /*
+ * "field" is only needed when nested under a variant, in which
+ * case we cannot specialize the nested operations.
+ */
+ const struct lttng_event_field *field;
+};
+
struct estack_entry {
union {
int64_t v;
enum estack_string_literal_type literal_type;
int user; /* is string from userspace ? */
} s;
+ struct load_ptr ptr;
} u;
};
const char *lttng_filter_print_op(enum filter_op op);
int lttng_filter_validate_bytecode(struct bytecode_runtime *bytecode);
-int lttng_filter_specialize_bytecode(struct bytecode_runtime *bytecode);
+int lttng_filter_specialize_bytecode(struct lttng_event *event,
+ struct bytecode_runtime *bytecode);
uint64_t lttng_filter_false(void *filter_data,
struct lttng_probe_ctx *lttng_probe_ctx,
_ctf_integer_ext(_type, _item, _user_src, __BIG_ENDIAN, 16, 0, 1)
#undef ctf_array_nowrite
-#define ctf_array_nowrite(_type, _item, _user_src, _length) \
- _ctf_array_encoded(_type, _item, _user_src, _length, none, 0, 1)
+#define ctf_array_nowrite(_type, _item, _src, _length) \
+ _ctf_array_encoded(_type, _item, _src, \
+ _length, none, __BYTE_ORDER, 10, 0, 1)
+
+#undef ctf_array_network_nowrite
+#define ctf_array_network_nowrite(_type, _item, _src, _length) \
+ _ctf_array_encoded(_type, _item, _src, \
+ _length, none, __BIG_ENDIAN, 10, 0, 1)
#undef ctf_array_text_nowrite
-#define ctf_array_text_nowrite(_type, _item, _user_src, _length) \
- _ctf_array_encoded(_type, _item, _user_src, _length, UTF8, 0, 1)
+#define ctf_array_text_nowrite(_type, _item, _src, _length) \
+ _ctf_array_encoded(_type, _item, _src, \
+ _length, UTF8, __BYTE_ORDER, 10, 0, 1)
#undef ctf_array_bitfield_nowrite
#define ctf_array_bitfield_nowrite(_type, _item, _src, _length) \
_ctf_sequence_encoded(_type, _item, _user_src, \
_length_type, _user_src_length, none, __BYTE_ORDER, 10, 0, 1)
+#undef ctf_sequence_network_nowrite
+#define ctf_sequence_network_nowrite(_type, _item, _user_src, _length_type, _user_src_length) \
+ _ctf_sequence_encoded(_type, _item, _user_src, \
+ _length_type, _user_src_length, none, __BIG_ENDIAN, 10, 0, 1)
+
#undef ctf_sequence_text_nowrite
#define ctf_sequence_text_nowrite(_type, _item, _user_src, _length_type, _user_src_length) \
_ctf_sequence_encoded(_type, _item, _user_src, \
_ctf_integer_ext(_type, _item, _user_src, __BIG_ENDIAN, 16, 1, 1)
#undef ctf_user_array_nowrite
-#define ctf_user_array_nowrite(_type, _item, _user_src, _length) \
- _ctf_array_encoded(_type, _item, _user_src, _length, none, 1, 1)
+#define ctf_user_array_nowrite(_type, _item, _src, _length) \
+ _ctf_array_encoded(_type, _item, _src, \
+ _length, none, __BYTE_ORDER, 10, 1, 1)
+
+#undef ctf_user_array_network_nowrite
+#define ctf_user_array_network_nowrite(_type, _item, _src, _length) \
+ _ctf_array_encoded(_type, _item, _src, \
+ _length, none, __BIG_ENDIAN, 10, 1, 1)
#undef ctf_user_array_text_nowrite
-#define ctf_user_array_text_nowrite(_type, _item, _user_src, _length) \
- _ctf_array_encoded(_type, _item, _user_src, _length, UTF8, 1, 1)
+#define ctf_user_array_text_nowrite(_type, _item, _src, _length) \
+ _ctf_array_encoded(_type, _item, _src, \
+ _length, UTF8, __BYTE_ORDER, 10, 1, 1)
#undef ctf_user_array_bitfield_nowrite
#define ctf_user_array_bitfield_nowrite(_type, _item, _src, _length) \
_ctf_sequence_encoded(_type, _item, _user_src, \
_length_type, _user_src_length, none, __BYTE_ORDER, 10, 1, 1)
+#undef ctf_user_sequence_network_nowrite
+#define ctf_user_sequence_network_nowrite(_type, _item, _user_src, _length_type, _user_src_length) \
+ _ctf_sequence_encoded(_type, _item, _user_src, \
+ _length_type, _user_src_length, none, __BIG_ENDIAN, 10, 1, 1)
+
#undef ctf_user_sequence_text_nowrite
#define ctf_user_sequence_text_nowrite(_type, _item, _user_src, _length_type, _user_src_length) \
_ctf_sequence_encoded(_type, _item, _user_src, \
#undef _ctf_array_encoded
#define _ctf_array_encoded(_type, _item, _src, _length, _encoding, \
- _user, _nowrite)
+ _byte_order, _base, _user, _nowrite)
#undef _ctf_array_bitfield
#define _ctf_array_bitfield(_type, _item, _src, _length, _user, _nowrite)
#undef ctf_array
#define ctf_array(_type, _item, _src, _length)
+#undef ctf_array_hex
+#define ctf_array_hex(_type, _item, _src, _length)
+
+#undef ctf_array_network
+#define ctf_array_network(_type, _item, _src, _length)
+
+#undef ctf_array_network_hex
+#define ctf_array_network_hex(_type, _item, _src, _length)
+
#undef ctf_array_text
#define ctf_array_text(_type, _item, _src, _length)
#undef ctf_sequence_network
#define ctf_sequence_network(_type, _item, _src, _length_type, _src_length)
+#undef ctf_sequence_network_hex
+#define ctf_sequence_network_hex(_type, _item, _src, _length_type, _src_length)
+
#undef ctf_sequence_text
#define ctf_sequence_text(_type, _item, _src, _length_type, _src_length)
#undef ctf_array_nowrite
#define ctf_array_nowrite(_type, _item, _src, _length)
+#undef ctf_array_network_nowrite
+#define ctf_array_network_nowrite(_type, _item, _src, _length)
+
#undef ctf_array_text_nowrite
#define ctf_array_text_nowrite(_type, _item, _src, _length)
#undef ctf_sequence_nowrite
#define ctf_sequence_nowrite(_type, _item, _src, _length_type, _src_length)
+#undef ctf_sequence_network_nowrite
+#define ctf_sequence_network_nowrite(_type, _item, _src, _length_type, _src_length)
+
#undef ctf_sequence_text_nowrite
#define ctf_sequence_text_nowrite(_type, _item, _src, _length_type, _src_length)
#undef ctf_user_array
#define ctf_user_array(_type, _item, _user_src, _length)
+#undef ctf_user_array_hex
+#define ctf_user_array_hex(_type, _item, _user_src, _length)
+
+#undef ctf_user_array_network
+#define ctf_user_array_network(_type, _item, _user_src, _length)
+
+#undef ctf_user_array_network_hex
+#define ctf_user_array_network_hex(_type, _item, _user_src, _length)
+
#undef ctf_user_array_text
#define ctf_user_array_text(_type, _item, _user_src, _length)
#undef ctf_user_sequence
#define ctf_user_sequence(_type, _item, _user_src, _length_type, _user_src_length)
+#undef ctf_user_sequence_hex
+#define ctf_user_sequence_hex(_type, _item, _user_src, _length_type, _user_src_length)
+
+#undef ctf_user_sequence_network
+#define ctf_user_sequence_network(_type, _item, _user_src, _length_type, _user_src_length)
+
+#undef ctf_user_sequence_network_hex
+#define ctf_user_sequence_network_hex(_type, _item, _user_src, _length_type, _user_src_length)
+
#undef ctf_user_sequence_text
#define ctf_user_sequence_text(_type, _item, _user_src, _length_type, _user_src_length)
#undef ctf_user_array_nowrite
#define ctf_user_array_nowrite(_type, _item, _user_src, _length)
+#undef ctf_user_array_network_nowrite
+#define ctf_user_array_network_nowrite(_type, _item, _user_src, _length)
+
#undef ctf_user_array_text_nowrite
#define ctf_user_array_text_nowrite(_type, _item, _user_src, _length)
#undef ctf_user_sequence_nowrite
#define ctf_user_sequence_nowrite(_type, _item, _user_src, _length_type, _user_src_length)
+#undef ctf_user_sequence_network_nowrite
+#define ctf_user_sequence_network_nowrite(_type, _item, _user_src, _length_type, _user_src_length)
+
#undef ctf_user_sequence_text_nowrite
#define ctf_user_sequence_text_nowrite(_type, _item, _user_src, _length_type, _user_src_length)
#undef ctf_array
#define ctf_array(_type, _item, _src, _length) \
- _ctf_array_encoded(_type, _item, _src, _length, none, 0, 0)
+ _ctf_array_encoded(_type, _item, _src, \
+ _length, none, __BYTE_ORDER, 10, 0, 0)
+
+#undef ctf_array_hex
+#define ctf_array_hex(_type, _item, _src, _length) \
+ _ctf_array_encoded(_type, _item, _src, \
+ _length, none, __BYTE_ORDER, 16, 0, 0)
+
+#undef ctf_array_network
+#define ctf_array_network(_type, _item, _src, _length) \
+ _ctf_array_encoded(_type, _item, _src, \
+ _length, none, __BIG_ENDIAN, 10, 0, 0)
+
+#undef ctf_array_network_hex
+#define ctf_array_network_hex(_type, _item, _src, _length) \
+ _ctf_array_encoded(_type, _item, _src, \
+ _length, none, __BIG_ENDIAN, 16, 0, 0)
#undef ctf_array_text
#define ctf_array_text(_type, _item, _src, _length) \
- _ctf_array_encoded(_type, _item, _src, _length, UTF8, 0, 0)
+ _ctf_array_encoded(_type, _item, _src, \
+ _length, UTF8, __BYTE_ORDER, 10, 0, 0)
#undef ctf_array_bitfield
#define ctf_array_bitfield(_type, _item, _src, _length) \
_ctf_sequence_encoded(_type, _item, _src, \
_length_type, _src_length, none, __BIG_ENDIAN, 10, 0, 0)
+#undef ctf_sequence_network_hex
+#define ctf_sequence_network_hex(_type, _item, _src, _length_type, _src_length) \
+ _ctf_sequence_encoded(_type, _item, _src, \
+ _length_type, _src_length, none, __BIG_ENDIAN, 16, 0, 0)
+
#undef ctf_sequence_text
#define ctf_sequence_text(_type, _item, _src, _length_type, _src_length) \
_ctf_sequence_encoded(_type, _item, _src, \
_ctf_integer_ext(_type, _item, _src, __BIG_ENDIAN, 16, 1, 0)
#undef ctf_user_array
-#define ctf_user_array(_type, _item, _src, _length) \
- _ctf_array_encoded(_type, _item, _src, _length, none, 1, 0)
+#define ctf_user_array(_type, _item, _src, _length) \
+ _ctf_array_encoded(_type, _item, _src, \
+ _length, none, __BYTE_ORDER, 10, 1, 0)
+
+#undef ctf_user_array_hex
+#define ctf_user_array_hex(_type, _item, _src, _length) \
+ _ctf_array_encoded(_type, _item, _src, \
+ _length, none, __BYTE_ORDER, 16, 1, 0)
+
+#undef ctf_user_array_network
+#define ctf_user_array_network(_type, _item, _src, _length) \
+ _ctf_array_encoded(_type, _item, _src, \
+ _length, none, __BIG_ENDIAN, 10, 1, 0)
+
+#undef ctf_user_array_network_hex
+#define ctf_user_array_network_hex(_type, _item, _src, _length) \
+ _ctf_array_encoded(_type, _item, _src, \
+ _length, none, __BIG_ENDIAN, 16, 1, 0)
#undef ctf_user_array_text
-#define ctf_user_array_text(_type, _item, _src, _length) \
- _ctf_array_encoded(_type, _item, _src, _length, UTF8, 1, 0)
+#define ctf_user_array_text(_type, _item, _src, _length) \
+ _ctf_array_encoded(_type, _item, _src, \
+ _length, UTF8, __BYTE_ORDER, 10, 1, 0)
#undef ctf_user_array_bitfield
-#define ctf_user_array_bitfield(_type, _item, _src, _length) \
+#define ctf_user_array_bitfield(_type, _item, _src, _length) \
_ctf_array_bitfield(_type, _item, _src, _length, 1, 0)
#undef ctf_user_sequence
_ctf_sequence_encoded(_type, _item, _src, \
_length_type, _src_length, none, __BYTE_ORDER, 16, 1, 0)
+#undef ctf_user_sequence_network
+#define ctf_user_sequence_network(_type, _item, _src, _length_type, _src_length) \
+ _ctf_sequence_encoded(_type, _item, _src, \
+ _length_type, _src_length, none, __BIG_ENDIAN, 10, 1, 0)
+
+#undef ctf_user_sequence_network_hex
+#define ctf_user_sequence_network_hex(_type, _item, _src, _length_type, _src_length) \
+ _ctf_sequence_encoded(_type, _item, _src, \
+ _length_type, _src_length, none, __BIG_ENDIAN, 16, 1, 0)
+
#undef ctf_user_sequence_text
#define ctf_user_sequence_text(_type, _item, _src, _length_type, _src_length) \
_ctf_sequence_encoded(_type, _item, _src, \
#define ctf_array_type(_type, _src, _length) \
ctf_array(_type, unused, _src, _length)
+#undef ctf_array_hex_type
+#define ctf_array_hex_type(_type, _src, _length) \
+ ctf_array_hex(_type, unused, _src, _length)
+
+#undef ctf_array_network_type
+#define ctf_array_network_type(_type, _src, _length) \
+ ctf_array_network(_type, unused, _src, _length)
+
+#undef ctf_array_network_hex_type
+#define ctf_array_network_hex_type(_type, _src, _length) \
+ ctf_array_network_hex(_type, unused, _src, _length)
+
#undef ctf_array_text_type
#define ctf_array_text_type(_type, _src, _length) \
ctf_array_text(_type, unused, _src, _length)
#define ctf_sequence_network_type(_type, _src, _length_type, _src_length) \
ctf_sequence_network(_type, unused, _src, _length_type, _src_length)
+#undef ctf_sequence_network_hex_type
+#define ctf_sequence_network_hex_type(_type, _src, _length_type, _src_length) \
+ ctf_sequence_network_hex(_type, unused, _src, _length_type, _src_length)
+
#undef ctf_sequence_text_type
#define ctf_sequence_text_type(_type, _src, _length_type, _src_length) \
ctf_sequence_text(_type, unused, _src, _length_type, _src_length)
#define ctf_user_array_type(_type, _src, _length) \
ctf_user_array(_type, unused, _src, _length)
+#undef ctf_user_array_hex_type
+#define ctf_user_array_hex_type(_type, _src, _length) \
+ ctf_user_array_hex(_type, unused, _src, _length)
+
+#undef ctf_user_array_network_type
+#define ctf_user_array_network_type(_type, _src, _length) \
+ ctf_user_array_network(_type, unused, _src, _length)
+
+#undef ctf_user_array_network_hex_type
+#define ctf_user_array_network_hex_type(_type, _src, _length) \
+ ctf_user_array_network_hex(_type, unused, _src, _length)
+
#undef ctf_user_array_text_type
#define ctf_user_array_text_type(_type, _src, _length) \
ctf_user_array_text(_type, unused, _src, _length)
#define ctf_user_sequence_network_type(_type, _src, _length_type, _src_length) \
ctf_user_sequence_network(_type, unused, _src, _length_type, _src_length)
+#undef ctf_user_sequence_network_hex_type
+#define ctf_user_sequence_network_hex_type(_type, _src, _length_type, _src_length) \
+ ctf_user_sequence_network_hex(_type, unused, _src, _length_type, _src_length)
+
#undef ctf_user_sequence_text_type
#define ctf_user_sequence_text_type(_type, _src, _length_type, _src_length) \
ctf_user_sequence_text(_type, unused, _src, _length_type, _src_length)
},
#undef _ctf_array_encoded
-#define _ctf_array_encoded(_type, _item, _src, _length, _encoding, _user, _nowrite) \
+#define _ctf_array_encoded(_type, _item, _src, _length, _encoding, _byte_order, _base, _user, _nowrite) \
{ \
.name = #_item, \
.type = \
{ \
.array = \
{ \
- .elem_type = __type_integer(_type, 0, 0, 0, __BYTE_ORDER, 10, _encoding), \
+ .elem_type = __type_integer(_type, 0, 0, 0, _byte_order, _base, _encoding), \
.length = _length, \
} \
} \
.name = #_item, \
.type = \
{ \
- .atype = atype_array, \
+ .atype = atype_array_bitfield, \
.u = \
{ \
.array = \
.name = #_item, \
.type = \
{ \
- .atype = atype_sequence, \
+ .atype = atype_sequence_bitfield, \
.u = \
{ \
.sequence = \
__event_len += sizeof(_type);
#undef _ctf_array_encoded
-#define _ctf_array_encoded(_type, _item, _src, _length, _encoding, _user, _nowrite) \
+#define _ctf_array_encoded(_type, _item, _src, _length, _encoding, _byte_order, _base, _user, _nowrite) \
__event_len += lib_ring_buffer_align(__event_len, lttng_alignof(_type)); \
__event_len += sizeof(_type) * (_length);
#undef _ctf_array_bitfield
#define _ctf_array_bitfield(_type, _item, _src, _length, _user, _nowrite) \
- _ctf_array_encoded(_type, _item, _src, _length, none, _user, _nowrite)
+ _ctf_array_encoded(_type, _item, _src, _length, none, __LITTLE_ENDIAN, 0, _user, _nowrite)
#undef _ctf_sequence_encoded
#define _ctf_sequence_encoded(_type, _item, _src, _length_type, \
_ctf_integer_ext_isuser##_user(_type, _item, _user_src, _byte_order, _base, _nowrite)
#undef _ctf_array_encoded
-#define _ctf_array_encoded(_type, _item, _src, _length, _encoding, _user, _nowrite) \
+#define _ctf_array_encoded(_type, _item, _src, _length, _encoding, _byte_order, _base, _user, _nowrite) \
{ \
unsigned long __ctf_tmp_ulong = (unsigned long) (_length); \
const void *__ctf_tmp_ptr = (_src); \
#undef _ctf_array_bitfield
#define _ctf_array_bitfield(_type, _item, _src, _length, _user, _nowrite) \
- _ctf_array_encoded(_type, _item, _src, _length, none, _user, _nowrite)
+ _ctf_array_encoded(_type, _item, _src, _length, none, __LITTLE_ENDIAN, 0, _user, _nowrite)
#undef _ctf_sequence_encoded
#define _ctf_sequence_encoded(_type, _item, _src, _length_type, \
__event_align = max_t(size_t, __event_align, lttng_alignof(_type));
#undef _ctf_array_encoded
-#define _ctf_array_encoded(_type, _item, _src, _length, _encoding, _user, _nowrite) \
+#define _ctf_array_encoded(_type, _item, _src, _length, _encoding, _byte_order, _base, _user, _nowrite) \
__event_align = max_t(size_t, __event_align, lttng_alignof(_type));
#undef _ctf_array_bitfield
#define _ctf_array_bitfield(_type, _item, _src, _length, _user, _nowrite) \
- _ctf_array_encoded(_type, _item, _src, _length, none, _user, _nowrite)
+ _ctf_array_encoded(_type, _item, _src, _length, none, __LITTLE_ENDIAN, 0, _user, _nowrite)
#undef _ctf_sequence_encoded
#define _ctf_sequence_encoded(_type, _item, _src, _length_type, \
_ctf_integer_ext_isuser##_user(_type, _item, _user_src, _byte_order, _base, _nowrite)
#undef _ctf_array_encoded
-#define _ctf_array_encoded(_type, _item, _src, _length, _encoding, _user, _nowrite) \
+#define _ctf_array_encoded(_type, _item, _src, _length, _encoding, _byte_order, _base, _user, _nowrite) \
lib_ring_buffer_align_ctx(&__ctx, lttng_alignof(_type)); \
if (_user) { \
__chan->ops->event_write_from_user(&__ctx, _src, sizeof(_type) * (_length)); \
#include <linux/module.h>
#include <linux/proc_fs.h>
#include <linux/byteorder/generic.h>
+#include <asm/byteorder.h>
#include <lttng-events.h>
#include <lttng-tracer.h>
{
int i, netint;
long values[] = { 1, 2, 3 };
+ uint32_t net_values[] = { 1, 2, 3 };
char text[10] = "test";
char escape[10] = "\\*";
+ for (i = 0; i < 3; i++) {
+ net_values[i] = htonl(net_values[i]);
+ }
for (i = 0; i < nr_iter; i++) {
netint = htonl(i);
- trace_lttng_test_filter_event(i, netint, values, text, strlen(text), escape);
+ trace_lttng_test_filter_event(i, netint, values, text, strlen(text), escape, net_values);
}
}