int link_failed;
struct cds_list_head node; /* list of bytecode runtime in event */
struct lttng_session *session;
+ struct lttng_event *event;
};
/*
uint16_t offset;
} __attribute__((packed));
+struct get_symbol {
+ /* Symbol offset. */
+ uint16_t offset;
+} LTTNG_PACKED;
+
+struct get_index_u16 {
+ uint16_t index;
+} LTTNG_PACKED;
+
+struct get_index_u64 {
+ uint64_t index;
+} LTTNG_PACKED;
+
struct literal_numeric {
int64_t v;
} __attribute__((packed));
FILTER_OP_MINUS = 6,
FILTER_OP_RSHIFT = 7,
FILTER_OP_LSHIFT = 8,
- FILTER_OP_BIN_AND = 9,
- FILTER_OP_BIN_OR = 10,
- FILTER_OP_BIN_XOR = 11,
+ FILTER_OP_BIT_AND = 9,
+ FILTER_OP_BIT_OR = 10,
+ FILTER_OP_BIT_XOR = 11,
/* binary comparators */
FILTER_OP_EQ = 12,
FILTER_OP_EQ_STAR_GLOB_STRING = 77,
FILTER_OP_NE_STAR_GLOB_STRING = 78,
+ /*
+ * Instructions for recursive traversal through composed types.
+ */
+ FILTER_OP_GET_CONTEXT_ROOT = 79,
+ FILTER_OP_GET_APP_CONTEXT_ROOT = 80,
+ FILTER_OP_GET_PAYLOAD_ROOT = 81,
+
+ FILTER_OP_GET_SYMBOL = 82,
+ FILTER_OP_GET_SYMBOL_FIELD = 83,
+ FILTER_OP_GET_INDEX_U16 = 84,
+ FILTER_OP_GET_INDEX_U64 = 85,
+
+ FILTER_OP_LOAD_FIELD = 86,
+ FILTER_OP_LOAD_FIELD_S8 = 87,
+ FILTER_OP_LOAD_FIELD_S16 = 88,
+ FILTER_OP_LOAD_FIELD_S32 = 89,
+ FILTER_OP_LOAD_FIELD_S64 = 90,
+ FILTER_OP_LOAD_FIELD_U8 = 91,
+ FILTER_OP_LOAD_FIELD_U16 = 92,
+ FILTER_OP_LOAD_FIELD_U32 = 93,
+ FILTER_OP_LOAD_FIELD_U64 = 94,
+ FILTER_OP_LOAD_FIELD_STRING = 95,
+ FILTER_OP_LOAD_FIELD_SEQUENCE = 96,
+ FILTER_OP_LOAD_FIELD_DOUBLE = 97,
+
NR_FILTER_OPS,
};
#define _LGPL_SOURCE
#include <urcu-pointer.h>
#include <stdint.h>
+#include <byteswap.h>
#include "lttng-filter.h"
#include "string-utils.h"
*/
#define START_OP \
- start_pc = &bytecode->data[0]; \
+ start_pc = &bytecode->code[0]; \
pc = next_pc = start_pc; \
if (unlikely(pc - start_pc >= bytecode->len)) \
goto end; \
#endif
+static int context_get_index(struct lttng_ctx *ctx,
+ struct load_ptr *ptr,
+ uint32_t idx)
+{
+
+ struct lttng_ctx_field *ctx_field;
+ struct lttng_event_field *field;
+ struct lttng_ctx_value v;
+
+ ctx_field = &ctx->fields[idx];
+ field = &ctx_field->event_field;
+ ptr->type = LOAD_OBJECT;
+ /* field is only used for types nested within variants. */
+ ptr->field = NULL;
+
+ switch (field->type.atype) {
+ case atype_integer:
+ ctx_field->get_value(ctx_field, &v);
+ if (field->type.u.basic.integer.signedness) {
+ ptr->object_type = OBJECT_TYPE_S64;
+ ptr->u.s64 = v.u.s64;
+ ptr->ptr = &ptr->u.s64;
+ } else {
+ ptr->object_type = OBJECT_TYPE_U64;
+ ptr->u.u64 = v.u.s64; /* Cast. */
+ ptr->ptr = &ptr->u.u64;
+ }
+ break;
+ case atype_enum:
+ {
+ const struct lttng_integer_type *itype =
+ &field->type.u.basic.enumeration.container_type;
+
+ ctx_field->get_value(ctx_field, &v);
+ if (itype->signedness) {
+ ptr->object_type = OBJECT_TYPE_S64;
+ ptr->u.s64 = v.u.s64;
+ ptr->ptr = &ptr->u.s64;
+ } else {
+ ptr->object_type = OBJECT_TYPE_U64;
+ ptr->u.u64 = v.u.s64; /* Cast. */
+ ptr->ptr = &ptr->u.u64;
+ }
+ break;
+ }
+ case atype_array:
+ if (field->type.u.array.elem_type.atype != atype_integer) {
+ ERR("Array nesting only supports integer types.");
+ return -EINVAL;
+ }
+ if (field->type.u.array.elem_type.u.basic.integer.encoding == lttng_encode_none) {
+ ERR("Only string arrays are supported for contexts.");
+ return -EINVAL;
+ }
+ ptr->object_type = OBJECT_TYPE_STRING;
+ ctx_field->get_value(ctx_field, &v);
+ ptr->ptr = v.u.str;
+ break;
+ case atype_sequence:
+ if (field->type.u.sequence.elem_type.atype != atype_integer) {
+ ERR("Sequence nesting only supports integer types.");
+ return -EINVAL;
+ }
+ if (field->type.u.sequence.elem_type.u.basic.integer.encoding == lttng_encode_none) {
+ ERR("Only string sequences are supported for contexts.");
+ return -EINVAL;
+ }
+ ptr->object_type = OBJECT_TYPE_STRING;
+ ctx_field->get_value(ctx_field, &v);
+ ptr->ptr = v.u.str;
+ break;
+ case atype_string:
+ ptr->object_type = OBJECT_TYPE_STRING;
+ ctx_field->get_value(ctx_field, &v);
+ ptr->ptr = v.u.str;
+ break;
+ case atype_float:
+ ptr->object_type = OBJECT_TYPE_DOUBLE;
+ ptr->u.d = v.u.d;
+ ptr->ptr = &ptr->u.d;
+ break;
+ case atype_dynamic:
+ ctx_field->get_value(ctx_field, &v);
+ switch (v.sel) {
+ case LTTNG_UST_DYNAMIC_TYPE_NONE:
+ return -EINVAL;
+ case LTTNG_UST_DYNAMIC_TYPE_S64:
+ ptr->object_type = OBJECT_TYPE_S64;
+ ptr->u.s64 = v.u.s64;
+ ptr->ptr = &ptr->u.s64;
+ dbg_printf("context get index dynamic s64 %" PRIi64 "\n", ptr->u.s64);
+ break;
+ case LTTNG_UST_DYNAMIC_TYPE_DOUBLE:
+ ptr->object_type = OBJECT_TYPE_DOUBLE;
+ ptr->u.d = v.u.d;
+ ptr->ptr = &ptr->u.d;
+ dbg_printf("context get index dynamic double %g\n", ptr->u.d);
+ break;
+ case LTTNG_UST_DYNAMIC_TYPE_STRING:
+ ptr->object_type = OBJECT_TYPE_STRING;
+ ptr->ptr = v.u.str;
+ dbg_printf("context get index dynamic string %s\n", (const char *) ptr->ptr);
+ break;
+ default:
+ dbg_printf("Filter warning: unknown dynamic type (%d).\n", (int) v.sel);
+ return -EINVAL;
+ }
+ break;
+ case atype_struct:
+ ERR("Structure type cannot be loaded.");
+ return -EINVAL;
+ default:
+ ERR("Unknown type: %d", (int) field->type.atype);
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int dynamic_get_index(struct lttng_session *session,
+ struct bytecode_runtime *runtime,
+ uint64_t index, struct estack_entry *stack_top)
+{
+ int ret;
+ const struct filter_get_index_data *gid;
+
+ /*
+ * Types nested within variants need to perform dynamic lookup
+ * based on the field descriptions. LTTng-UST does not implement
+ * variants for now.
+ */
+ if (stack_top->u.ptr.field)
+ return -EINVAL;
+ gid = (const struct filter_get_index_data *) &runtime->data[index];
+ switch (stack_top->u.ptr.type) {
+ case LOAD_OBJECT:
+ switch (stack_top->u.ptr.object_type) {
+ case OBJECT_TYPE_ARRAY:
+ {
+ const char *ptr;
+
+ assert(gid->offset < gid->array_len);
+ /* Skip count (unsigned long) */
+ ptr = *(const char **) (stack_top->u.ptr.ptr + sizeof(unsigned long));
+ ptr = ptr + gid->offset;
+ stack_top->u.ptr.ptr = ptr;
+ stack_top->u.ptr.object_type = gid->elem.type;
+ stack_top->u.ptr.rev_bo = gid->elem.rev_bo;
+ /* field is only used for types nested within variants. */
+ stack_top->u.ptr.field = NULL;
+ break;
+ }
+ case OBJECT_TYPE_SEQUENCE:
+ {
+ const char *ptr;
+ size_t ptr_seq_len;
+
+ ptr = *(const char **) (stack_top->u.ptr.ptr + sizeof(unsigned long));
+ ptr_seq_len = *(unsigned long *) stack_top->u.ptr.ptr;
+ if (gid->offset >= gid->elem.len * ptr_seq_len) {
+ ret = -EINVAL;
+ goto end;
+ }
+ ptr = ptr + gid->offset;
+ stack_top->u.ptr.ptr = ptr;
+ stack_top->u.ptr.object_type = gid->elem.type;
+ stack_top->u.ptr.rev_bo = gid->elem.rev_bo;
+ /* field is only used for types nested within variants. */
+ stack_top->u.ptr.field = NULL;
+ break;
+ }
+ case OBJECT_TYPE_STRUCT:
+ ERR("Nested structures are not supported yet.");
+ ret = -EINVAL;
+ goto end;
+ case OBJECT_TYPE_VARIANT:
+ default:
+ ERR("Unexpected get index type %d",
+ (int) stack_top->u.ptr.object_type);
+ ret = -EINVAL;
+ goto end;
+ }
+ break;
+ case LOAD_ROOT_CONTEXT:
+ case LOAD_ROOT_APP_CONTEXT: /* Fall-through */
+ {
+ struct lttng_ctx *ctx;
+
+ ctx = rcu_dereference(session->ctx);
+ ret = context_get_index(ctx,
+ &stack_top->u.ptr,
+ gid->ctx_index);
+ if (ret) {
+ goto end;
+ }
+ break;
+ }
+ case LOAD_ROOT_PAYLOAD:
+ stack_top->u.ptr.ptr += gid->offset;
+ if (gid->elem.type == OBJECT_TYPE_STRING)
+ stack_top->u.ptr.ptr = *(const char * const *) stack_top->u.ptr.ptr;
+ stack_top->u.ptr.object_type = gid->elem.type;
+ stack_top->u.ptr.type = LOAD_OBJECT;
+ /* field is only used for types nested within variants. */
+ stack_top->u.ptr.field = NULL;
+ break;
+ }
+ return 0;
+
+end:
+ return ret;
+}
+
+static int dynamic_load_field(struct estack_entry *stack_top)
+{
+ int ret;
+
+ switch (stack_top->u.ptr.type) {
+ case LOAD_OBJECT:
+ break;
+ case LOAD_ROOT_CONTEXT:
+ case LOAD_ROOT_APP_CONTEXT:
+ case LOAD_ROOT_PAYLOAD:
+ default:
+ dbg_printf("Filter warning: cannot load root, missing field name.\n");
+ ret = -EINVAL;
+ goto end;
+ }
+ switch (stack_top->u.ptr.object_type) {
+ case OBJECT_TYPE_S8:
+ dbg_printf("op load field s8\n");
+ stack_top->u.v = *(int8_t *) stack_top->u.ptr.ptr;
+ stack_top->type = REG_S64;
+ break;
+ case OBJECT_TYPE_S16:
+ {
+ int16_t tmp;
+
+ dbg_printf("op load field s16\n");
+ tmp = *(int16_t *) stack_top->u.ptr.ptr;
+ if (stack_top->u.ptr.rev_bo)
+ tmp = bswap_16(tmp);
+ stack_top->u.v = tmp;
+ stack_top->type = REG_S64;
+ break;
+ }
+ case OBJECT_TYPE_S32:
+ {
+ int32_t tmp;
+
+ dbg_printf("op load field s32\n");
+ tmp = *(int32_t *) stack_top->u.ptr.ptr;
+ if (stack_top->u.ptr.rev_bo)
+ tmp = bswap_32(tmp);
+ stack_top->u.v = tmp;
+ stack_top->type = REG_S64;
+ break;
+ }
+ case OBJECT_TYPE_S64:
+ {
+ int64_t tmp;
+
+ dbg_printf("op load field s64\n");
+ tmp = *(int64_t *) stack_top->u.ptr.ptr;
+ if (stack_top->u.ptr.rev_bo)
+ tmp = bswap_64(tmp);
+ stack_top->u.v = tmp;
+ stack_top->type = REG_S64;
+ break;
+ }
+ case OBJECT_TYPE_U8:
+ dbg_printf("op load field u8\n");
+ stack_top->u.v = *(uint8_t *) stack_top->u.ptr.ptr;
+ stack_top->type = REG_S64;
+ break;
+ case OBJECT_TYPE_U16:
+ {
+ uint16_t tmp;
+
+ dbg_printf("op load field s16\n");
+ tmp = *(uint16_t *) stack_top->u.ptr.ptr;
+ if (stack_top->u.ptr.rev_bo)
+ tmp = bswap_16(tmp);
+ stack_top->u.v = tmp;
+ stack_top->type = REG_S64;
+ break;
+ }
+ case OBJECT_TYPE_U32:
+ {
+ uint32_t tmp;
+
+ dbg_printf("op load field u32\n");
+ tmp = *(uint32_t *) stack_top->u.ptr.ptr;
+ if (stack_top->u.ptr.rev_bo)
+ tmp = bswap_32(tmp);
+ stack_top->u.v = tmp;
+ stack_top->type = REG_S64;
+ break;
+ }
+ case OBJECT_TYPE_U64:
+ {
+ uint64_t tmp;
+
+ dbg_printf("op load field u64\n");
+ tmp = *(uint64_t *) stack_top->u.ptr.ptr;
+ if (stack_top->u.ptr.rev_bo)
+ tmp = bswap_64(tmp);
+ stack_top->u.v = tmp;
+ stack_top->type = REG_S64;
+ break;
+ }
+ case OBJECT_TYPE_DOUBLE:
+ memcpy(&stack_top->u.d,
+ stack_top->u.ptr.ptr,
+ sizeof(struct literal_double));
+ stack_top->type = REG_DOUBLE;
+ break;
+ case OBJECT_TYPE_STRING:
+ {
+ const char *str;
+
+ dbg_printf("op load field string\n");
+ str = (const char *) stack_top->u.ptr.ptr;
+ stack_top->u.s.str = str;
+ if (unlikely(!stack_top->u.s.str)) {
+ dbg_printf("Filter warning: loading a NULL string.\n");
+ ret = -EINVAL;
+ goto end;
+ }
+ stack_top->u.s.seq_len = SIZE_MAX;
+ stack_top->u.s.literal_type =
+ ESTACK_STRING_LITERAL_TYPE_NONE;
+ stack_top->type = REG_STRING;
+ break;
+ }
+ case OBJECT_TYPE_STRING_SEQUENCE:
+ {
+ const char *ptr;
+
+ dbg_printf("op load field string sequence\n");
+ ptr = stack_top->u.ptr.ptr;
+ stack_top->u.s.seq_len = *(unsigned long *) ptr;
+ stack_top->u.s.str = *(const char **) (ptr + sizeof(unsigned long));
+ stack_top->type = REG_STRING;
+ if (unlikely(!stack_top->u.s.str)) {
+ dbg_printf("Filter warning: loading a NULL sequence.\n");
+ ret = -EINVAL;
+ goto end;
+ }
+ stack_top->u.s.literal_type =
+ ESTACK_STRING_LITERAL_TYPE_NONE;
+ break;
+ }
+ case OBJECT_TYPE_DYNAMIC:
+ /*
+ * Dynamic types in context are looked up
+ * by context get index.
+ */
+ ret = -EINVAL;
+ goto end;
+ case OBJECT_TYPE_SEQUENCE:
+ case OBJECT_TYPE_ARRAY:
+ case OBJECT_TYPE_STRUCT:
+ case OBJECT_TYPE_VARIANT:
+ ERR("Sequences, arrays, struct and variant cannot be loaded (nested types).");
+ ret = -EINVAL;
+ goto end;
+ }
+ return 0;
+
+end:
+ return ret;
+}
+
/*
* Return 0 (discard), or raise the 0x1 flag (log event).
* Currently, other flags are kept for future extensions and have no
[ FILTER_OP_MINUS ] = &&LABEL_FILTER_OP_MINUS,
[ FILTER_OP_RSHIFT ] = &&LABEL_FILTER_OP_RSHIFT,
[ FILTER_OP_LSHIFT ] = &&LABEL_FILTER_OP_LSHIFT,
- [ FILTER_OP_BIN_AND ] = &&LABEL_FILTER_OP_BIN_AND,
- [ FILTER_OP_BIN_OR ] = &&LABEL_FILTER_OP_BIN_OR,
- [ FILTER_OP_BIN_XOR ] = &&LABEL_FILTER_OP_BIN_XOR,
+ [ FILTER_OP_BIT_AND ] = &&LABEL_FILTER_OP_BIT_AND,
+ [ FILTER_OP_BIT_OR ] = &&LABEL_FILTER_OP_BIT_OR,
+ [ FILTER_OP_BIT_XOR ] = &&LABEL_FILTER_OP_BIT_XOR,
/* binary comparators */
[ FILTER_OP_EQ ] = &&LABEL_FILTER_OP_EQ,
[ FILTER_OP_GET_CONTEXT_REF_STRING ] = &&LABEL_FILTER_OP_GET_CONTEXT_REF_STRING,
[ FILTER_OP_GET_CONTEXT_REF_S64 ] = &&LABEL_FILTER_OP_GET_CONTEXT_REF_S64,
[ FILTER_OP_GET_CONTEXT_REF_DOUBLE ] = &&LABEL_FILTER_OP_GET_CONTEXT_REF_DOUBLE,
+
+ /* Instructions for recursive traversal through composed types. */
+ [ FILTER_OP_GET_CONTEXT_ROOT ] = &&LABEL_FILTER_OP_GET_CONTEXT_ROOT,
+ [ FILTER_OP_GET_APP_CONTEXT_ROOT ] = &&LABEL_FILTER_OP_GET_APP_CONTEXT_ROOT,
+ [ FILTER_OP_GET_PAYLOAD_ROOT ] = &&LABEL_FILTER_OP_GET_PAYLOAD_ROOT,
+
+ [ FILTER_OP_GET_SYMBOL ] = &&LABEL_FILTER_OP_GET_SYMBOL,
+ [ FILTER_OP_GET_SYMBOL_FIELD ] = &&LABEL_FILTER_OP_GET_SYMBOL_FIELD,
+ [ FILTER_OP_GET_INDEX_U16 ] = &&LABEL_FILTER_OP_GET_INDEX_U16,
+ [ FILTER_OP_GET_INDEX_U64 ] = &&LABEL_FILTER_OP_GET_INDEX_U64,
+
+ [ FILTER_OP_LOAD_FIELD ] = &&LABEL_FILTER_OP_LOAD_FIELD,
+ [ FILTER_OP_LOAD_FIELD_S8 ] = &&LABEL_FILTER_OP_LOAD_FIELD_S8,
+ [ FILTER_OP_LOAD_FIELD_S16 ] = &&LABEL_FILTER_OP_LOAD_FIELD_S16,
+ [ FILTER_OP_LOAD_FIELD_S32 ] = &&LABEL_FILTER_OP_LOAD_FIELD_S32,
+ [ FILTER_OP_LOAD_FIELD_S64 ] = &&LABEL_FILTER_OP_LOAD_FIELD_S64,
+ [ FILTER_OP_LOAD_FIELD_U8 ] = &&LABEL_FILTER_OP_LOAD_FIELD_U8,
+ [ FILTER_OP_LOAD_FIELD_U16 ] = &&LABEL_FILTER_OP_LOAD_FIELD_U16,
+ [ FILTER_OP_LOAD_FIELD_U32 ] = &&LABEL_FILTER_OP_LOAD_FIELD_U32,
+ [ FILTER_OP_LOAD_FIELD_U64 ] = &&LABEL_FILTER_OP_LOAD_FIELD_U64,
+ [ FILTER_OP_LOAD_FIELD_STRING ] = &&LABEL_FILTER_OP_LOAD_FIELD_STRING,
+ [ FILTER_OP_LOAD_FIELD_SEQUENCE ] = &&LABEL_FILTER_OP_LOAD_FIELD_SEQUENCE,
+ [ FILTER_OP_LOAD_FIELD_DOUBLE ] = &&LABEL_FILTER_OP_LOAD_FIELD_DOUBLE,
};
#endif /* #ifndef INTERPRETER_USE_SWITCH */
#ifdef INTERPRETER_USE_SWITCH
default:
#endif /* INTERPRETER_USE_SWITCH */
- ERR("unknown bytecode op %u\n",
+ ERR("unknown bytecode op %u",
(unsigned int) *(filter_opcode_t *) pc);
ret = -EINVAL;
goto end;
OP(FILTER_OP_RETURN):
/* LTTNG_FILTER_DISCARD or LTTNG_FILTER_RECORD_FLAG */
- retval = !!estack_ax_v;
+ /* Handle dynamic typing. */
+ switch (estack_ax_t) {
+ case REG_S64:
+ retval = !!estack_ax_v;
+ break;
+ case REG_DOUBLE:
+ case REG_STRING:
+ case REG_STAR_GLOB_STRING:
+ default:
+ ret = -EINVAL;
+ goto end;
+ }
ret = 0;
goto end;
OP(FILTER_OP_MINUS):
OP(FILTER_OP_RSHIFT):
OP(FILTER_OP_LSHIFT):
- OP(FILTER_OP_BIN_AND):
- OP(FILTER_OP_BIN_OR):
- OP(FILTER_OP_BIN_XOR):
- ERR("unsupported bytecode op %u\n",
+ ERR("unsupported bytecode op %u",
(unsigned int) *(filter_opcode_t *) pc);
ret = -EINVAL;
goto end;
next_pc += sizeof(struct binary_op);
PO;
}
+ OP(FILTER_OP_BIT_AND):
+ {
+ int64_t res;
+
+ /* Dynamic typing. */
+ if (estack_ax_t != REG_S64 || estack_bx_t != REG_S64) {
+ ret = -EINVAL;
+ goto end;
+ }
+
+ res = (estack_bx_v & estack_ax_v);
+ estack_pop(stack, top, ax, bx, ax_t, bx_t);
+ estack_ax_v = res;
+ estack_ax_t = REG_S64;
+ next_pc += sizeof(struct binary_op);
+ PO;
+ }
+ OP(FILTER_OP_BIT_OR):
+ {
+ int64_t res;
+
+ /* Dynamic typing. */
+ if (estack_ax_t != REG_S64 || estack_bx_t != REG_S64) {
+ ret = -EINVAL;
+ goto end;
+ }
+
+ res = (estack_bx_v | estack_ax_v);
+ estack_pop(stack, top, ax, bx, ax_t, bx_t);
+ estack_ax_v = res;
+ estack_ax_t = REG_S64;
+ next_pc += sizeof(struct binary_op);
+ PO;
+ }
+ OP(FILTER_OP_BIT_XOR):
+ {
+ int64_t res;
+
+ /* Dynamic typing. */
+ if (estack_ax_t != REG_S64 || estack_bx_t != REG_S64) {
+ ret = -EINVAL;
+ goto end;
+ }
+
+ res = (estack_bx_v ^ estack_ax_v);
+ estack_pop(stack, top, ax, bx, ax_t, bx_t);
+ estack_ax_v = res;
+ estack_ax_t = REG_S64;
+ next_pc += sizeof(struct binary_op);
+ PO;
+ }
/* unary */
OP(FILTER_OP_UNARY_PLUS):
PO;
}
+ OP(FILTER_OP_GET_CONTEXT_ROOT):
+ {
+ dbg_printf("op get context root\n");
+ estack_push(stack, top, ax, bx, ax_t, bx_t);
+ estack_ax(stack, top)->u.ptr.type = LOAD_ROOT_CONTEXT;
+ /* "field" only needed for variants. */
+ estack_ax(stack, top)->u.ptr.field = NULL;
+ estack_ax_t = REG_PTR;
+ next_pc += sizeof(struct load_op);
+ PO;
+ }
+
+ OP(FILTER_OP_GET_APP_CONTEXT_ROOT):
+ {
+ dbg_printf("op get app context root\n");
+ estack_push(stack, top, ax, bx, ax_t, bx_t);
+ estack_ax(stack, top)->u.ptr.type = LOAD_ROOT_APP_CONTEXT;
+ /* "field" only needed for variants. */
+ estack_ax(stack, top)->u.ptr.field = NULL;
+ estack_ax_t = REG_PTR;
+ next_pc += sizeof(struct load_op);
+ PO;
+ }
+
+ OP(FILTER_OP_GET_PAYLOAD_ROOT):
+ {
+ dbg_printf("op get app payload root\n");
+ estack_push(stack, top, ax, bx, ax_t, bx_t);
+ estack_ax(stack, top)->u.ptr.type = LOAD_ROOT_PAYLOAD;
+ estack_ax(stack, top)->u.ptr.ptr = filter_stack_data;
+ /* "field" only needed for variants. */
+ estack_ax(stack, top)->u.ptr.field = NULL;
+ estack_ax_t = REG_PTR;
+ next_pc += sizeof(struct load_op);
+ PO;
+ }
+
+ OP(FILTER_OP_GET_SYMBOL):
+ {
+ dbg_printf("op get symbol\n");
+ switch (estack_ax(stack, top)->u.ptr.type) {
+ case LOAD_OBJECT:
+ ERR("Nested fields not implemented yet.");
+ ret = -EINVAL;
+ goto end;
+ case LOAD_ROOT_CONTEXT:
+ case LOAD_ROOT_APP_CONTEXT:
+ case LOAD_ROOT_PAYLOAD:
+ /*
+ * symbol lookup is performed by
+ * specialization.
+ */
+ ret = -EINVAL;
+ goto end;
+ }
+ next_pc += sizeof(struct load_op) + sizeof(struct get_symbol);
+ PO;
+ }
+
+ OP(FILTER_OP_GET_SYMBOL_FIELD):
+ {
+ /*
+ * Used for first variant encountered in a
+ * traversal. Variants are not implemented yet.
+ */
+ ret = -EINVAL;
+ goto end;
+ }
+
+ OP(FILTER_OP_GET_INDEX_U16):
+ {
+ struct load_op *insn = (struct load_op *) pc;
+ struct get_index_u16 *index = (struct get_index_u16 *) insn->data;
+
+ dbg_printf("op get index u16\n");
+ ret = dynamic_get_index(session, bytecode, index->index, estack_ax(stack, top));
+ if (ret)
+ goto end;
+ estack_ax_v = estack_ax(stack, top)->u.v;
+ estack_ax_t = estack_ax(stack, top)->type;
+ next_pc += sizeof(struct load_op) + sizeof(struct get_index_u16);
+ PO;
+ }
+
+ OP(FILTER_OP_GET_INDEX_U64):
+ {
+ struct load_op *insn = (struct load_op *) pc;
+ struct get_index_u64 *index = (struct get_index_u64 *) insn->data;
+
+ dbg_printf("op get index u64\n");
+ ret = dynamic_get_index(session, bytecode, index->index, estack_ax(stack, top));
+ if (ret)
+ goto end;
+ estack_ax_v = estack_ax(stack, top)->u.v;
+ estack_ax_t = estack_ax(stack, top)->type;
+ next_pc += sizeof(struct load_op) + sizeof(struct get_index_u64);
+ PO;
+ }
+
+ OP(FILTER_OP_LOAD_FIELD):
+ {
+ dbg_printf("op load field\n");
+ ret = dynamic_load_field(estack_ax(stack, top));
+ if (ret)
+ goto end;
+ estack_ax_v = estack_ax(stack, top)->u.v;
+ estack_ax_t = estack_ax(stack, top)->type;
+ next_pc += sizeof(struct load_op);
+ PO;
+ }
+
+ OP(FILTER_OP_LOAD_FIELD_S8):
+ {
+ dbg_printf("op load field s8\n");
+
+ estack_ax_v = *(int8_t *) estack_ax(stack, top)->u.ptr.ptr;
+ estack_ax_t = REG_S64;
+ next_pc += sizeof(struct load_op);
+ PO;
+ }
+ OP(FILTER_OP_LOAD_FIELD_S16):
+ {
+ dbg_printf("op load field s16\n");
+
+ estack_ax_v = *(int16_t *) estack_ax(stack, top)->u.ptr.ptr;
+ estack_ax_t = REG_S64;
+ next_pc += sizeof(struct load_op);
+ PO;
+ }
+ OP(FILTER_OP_LOAD_FIELD_S32):
+ {
+ dbg_printf("op load field s32\n");
+
+ estack_ax_v = *(int32_t *) estack_ax(stack, top)->u.ptr.ptr;
+ estack_ax_t = REG_S64;
+ next_pc += sizeof(struct load_op);
+ PO;
+ }
+ OP(FILTER_OP_LOAD_FIELD_S64):
+ {
+ dbg_printf("op load field s64\n");
+
+ estack_ax_v = *(int64_t *) estack_ax(stack, top)->u.ptr.ptr;
+ estack_ax_t = REG_S64;
+ next_pc += sizeof(struct load_op);
+ PO;
+ }
+ OP(FILTER_OP_LOAD_FIELD_U8):
+ {
+ dbg_printf("op load field u8\n");
+
+ estack_ax_v = *(uint8_t *) estack_ax(stack, top)->u.ptr.ptr;
+ estack_ax_t = REG_S64;
+ next_pc += sizeof(struct load_op);
+ PO;
+ }
+ OP(FILTER_OP_LOAD_FIELD_U16):
+ {
+ dbg_printf("op load field u16\n");
+
+ estack_ax_v = *(uint16_t *) estack_ax(stack, top)->u.ptr.ptr;
+ estack_ax_t = REG_S64;
+ next_pc += sizeof(struct load_op);
+ PO;
+ }
+ OP(FILTER_OP_LOAD_FIELD_U32):
+ {
+ dbg_printf("op load field u32\n");
+
+ estack_ax_v = *(uint32_t *) estack_ax(stack, top)->u.ptr.ptr;
+ estack_ax_t = REG_S64;
+ next_pc += sizeof(struct load_op);
+ PO;
+ }
+ OP(FILTER_OP_LOAD_FIELD_U64):
+ {
+ dbg_printf("op load field u64\n");
+
+ estack_ax_v = *(uint64_t *) estack_ax(stack, top)->u.ptr.ptr;
+ estack_ax_t = REG_S64;
+ next_pc += sizeof(struct load_op);
+ PO;
+ }
+ OP(FILTER_OP_LOAD_FIELD_DOUBLE):
+ {
+ dbg_printf("op load field double\n");
+
+ memcpy(&estack_ax(stack, top)->u.d,
+ estack_ax(stack, top)->u.ptr.ptr,
+ sizeof(struct literal_double));
+ estack_ax(stack, top)->type = REG_DOUBLE;
+ next_pc += sizeof(struct load_op);
+ PO;
+ }
+
+ OP(FILTER_OP_LOAD_FIELD_STRING):
+ {
+ const char *str;
+
+ dbg_printf("op load field string\n");
+ str = (const char *) estack_ax(stack, top)->u.ptr.ptr;
+ estack_ax(stack, top)->u.s.str = str;
+ if (unlikely(!estack_ax(stack, top)->u.s.str)) {
+ dbg_printf("Filter warning: loading a NULL string.\n");
+ ret = -EINVAL;
+ goto end;
+ }
+ estack_ax(stack, top)->u.s.seq_len = SIZE_MAX;
+ estack_ax(stack, top)->u.s.literal_type =
+ ESTACK_STRING_LITERAL_TYPE_NONE;
+ estack_ax(stack, top)->type = REG_STRING;
+ next_pc += sizeof(struct load_op);
+ PO;
+ }
+
+ OP(FILTER_OP_LOAD_FIELD_SEQUENCE):
+ {
+ const char *ptr;
+
+ dbg_printf("op load field string sequence\n");
+ ptr = estack_ax(stack, top)->u.ptr.ptr;
+ estack_ax(stack, top)->u.s.seq_len = *(unsigned long *) ptr;
+ estack_ax(stack, top)->u.s.str = *(const char **) (ptr + sizeof(unsigned long));
+ estack_ax(stack, top)->type = REG_STRING;
+ if (unlikely(!estack_ax(stack, top)->u.s.str)) {
+ dbg_printf("Filter warning: loading a NULL sequence.\n");
+ ret = -EINVAL;
+ goto end;
+ }
+ estack_ax(stack, top)->u.s.literal_type =
+ ESTACK_STRING_LITERAL_TYPE_NONE;
+ next_pc += sizeof(struct load_op);
+ PO;
+ }
+
END_OP
end:
/* return 0 (discard) on error */
#define _LGPL_SOURCE
#include "lttng-filter.h"
+#include <lttng/align.h>
-int lttng_filter_specialize_bytecode(struct bytecode_runtime *bytecode)
+static int lttng_fls(int val)
+{
+ int r = 32;
+ unsigned int x = (unsigned int) val;
+
+ if (!x)
+ return 0;
+ if (!(x & 0xFFFF0000U)) {
+ x <<= 16;
+ r -= 16;
+ }
+ if (!(x & 0xFF000000U)) {
+ x <<= 8;
+ r -= 8;
+ }
+ if (!(x & 0xF0000000U)) {
+ x <<= 4;
+ r -= 4;
+ }
+ if (!(x & 0xC0000000U)) {
+ x <<= 2;
+ r -= 2;
+ }
+ if (!(x & 0x80000000U)) {
+ r -= 1;
+ }
+ return r;
+}
+
+static int get_count_order(unsigned int count)
+{
+ int order;
+
+ order = lttng_fls(count) - 1;
+ if (count & (count - 1))
+ order++;
+ return order;
+}
+
+static ssize_t bytecode_reserve_data(struct bytecode_runtime *runtime,
+ size_t align, size_t len)
+{
+ ssize_t ret;
+ size_t padding = offset_align(runtime->data_len, align);
+ size_t new_len = runtime->data_len + padding + len;
+ size_t new_alloc_len = new_len;
+ size_t old_alloc_len = runtime->data_alloc_len;
+
+ if (new_len > FILTER_MAX_DATA_LEN)
+ return -EINVAL;
+
+ if (new_alloc_len > old_alloc_len) {
+ char *newptr;
+
+ new_alloc_len =
+ max_t(size_t, 1U << get_count_order(new_alloc_len), old_alloc_len << 1);
+ newptr = realloc(runtime->data, new_alloc_len);
+ if (!newptr)
+ return -ENOMEM;
+ runtime->data = newptr;
+ /* We zero directly the memory from start of allocation. */
+ memset(&runtime->data[old_alloc_len], 0, new_alloc_len - old_alloc_len);
+ runtime->data_alloc_len = new_alloc_len;
+ }
+ runtime->data_len += padding;
+ ret = runtime->data_len;
+ runtime->data_len += len;
+ return ret;
+}
+
+static ssize_t bytecode_push_data(struct bytecode_runtime *runtime,
+ const void *p, size_t align, size_t len)
+{
+ ssize_t offset;
+
+ offset = bytecode_reserve_data(runtime, align, len);
+ if (offset < 0)
+ return -ENOMEM;
+ memcpy(&runtime->data[offset], p, len);
+ return offset;
+}
+
+static int specialize_load_field(struct vstack_entry *stack_top,
+ struct load_op *insn)
+{
+ int ret;
+
+ switch (stack_top->load.type) {
+ case LOAD_OBJECT:
+ break;
+ case LOAD_ROOT_CONTEXT:
+ case LOAD_ROOT_APP_CONTEXT:
+ case LOAD_ROOT_PAYLOAD:
+ default:
+ dbg_printf("Filter warning: cannot load root, missing field name.\n");
+ ret = -EINVAL;
+ goto end;
+ }
+ switch (stack_top->load.object_type) {
+ case OBJECT_TYPE_S8:
+ dbg_printf("op load field s8\n");
+ stack_top->type = REG_S64;
+ if (!stack_top->load.rev_bo)
+ insn->op = FILTER_OP_LOAD_FIELD_S8;
+ break;
+ case OBJECT_TYPE_S16:
+ dbg_printf("op load field s16\n");
+ stack_top->type = REG_S64;
+ if (!stack_top->load.rev_bo)
+ insn->op = FILTER_OP_LOAD_FIELD_S16;
+ break;
+ case OBJECT_TYPE_S32:
+ dbg_printf("op load field s32\n");
+ stack_top->type = REG_S64;
+ if (!stack_top->load.rev_bo)
+ insn->op = FILTER_OP_LOAD_FIELD_S32;
+ break;
+ case OBJECT_TYPE_S64:
+ dbg_printf("op load field s64\n");
+ stack_top->type = REG_S64;
+ if (!stack_top->load.rev_bo)
+ insn->op = FILTER_OP_LOAD_FIELD_S64;
+ break;
+ case OBJECT_TYPE_U8:
+ dbg_printf("op load field u8\n");
+ stack_top->type = REG_S64;
+ insn->op = FILTER_OP_LOAD_FIELD_U8;
+ break;
+ case OBJECT_TYPE_U16:
+ dbg_printf("op load field u16\n");
+ stack_top->type = REG_S64;
+ if (!stack_top->load.rev_bo)
+ insn->op = FILTER_OP_LOAD_FIELD_U16;
+ break;
+ case OBJECT_TYPE_U32:
+ dbg_printf("op load field u32\n");
+ stack_top->type = REG_S64;
+ if (!stack_top->load.rev_bo)
+ insn->op = FILTER_OP_LOAD_FIELD_U32;
+ break;
+ case OBJECT_TYPE_U64:
+ dbg_printf("op load field u64\n");
+ stack_top->type = REG_S64;
+ if (!stack_top->load.rev_bo)
+ insn->op = FILTER_OP_LOAD_FIELD_U64;
+ break;
+ case OBJECT_TYPE_DOUBLE:
+ stack_top->type = REG_DOUBLE;
+ insn->op = FILTER_OP_LOAD_FIELD_DOUBLE;
+ break;
+ case OBJECT_TYPE_STRING:
+ dbg_printf("op load field string\n");
+ stack_top->type = REG_STRING;
+ insn->op = FILTER_OP_LOAD_FIELD_STRING;
+ break;
+ case OBJECT_TYPE_STRING_SEQUENCE:
+ dbg_printf("op load field string sequence\n");
+ stack_top->type = REG_STRING;
+ insn->op = FILTER_OP_LOAD_FIELD_SEQUENCE;
+ break;
+ case OBJECT_TYPE_DYNAMIC:
+ dbg_printf("op load field dynamic\n");
+ stack_top->type = REG_UNKNOWN;
+ /* Don't specialize load op. */
+ break;
+ case OBJECT_TYPE_SEQUENCE:
+ case OBJECT_TYPE_ARRAY:
+ case OBJECT_TYPE_STRUCT:
+ case OBJECT_TYPE_VARIANT:
+ ERR("Sequences, arrays, struct and variant cannot be loaded (nested types).");
+ ret = -EINVAL;
+ goto end;
+ }
+ return 0;
+
+end:
+ return ret;
+}
+
+static int specialize_get_index_object_type(enum object_type *otype,
+ int signedness, uint32_t elem_len)
+{
+ switch (elem_len) {
+ case 8:
+ if (signedness)
+ *otype = OBJECT_TYPE_S8;
+ else
+ *otype = OBJECT_TYPE_U8;
+ break;
+ case 16:
+ if (signedness)
+ *otype = OBJECT_TYPE_S16;
+ else
+ *otype = OBJECT_TYPE_U16;
+ break;
+ case 32:
+ if (signedness)
+ *otype = OBJECT_TYPE_S32;
+ else
+ *otype = OBJECT_TYPE_U32;
+ break;
+ case 64:
+ if (signedness)
+ *otype = OBJECT_TYPE_S64;
+ else
+ *otype = OBJECT_TYPE_U64;
+ break;
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int specialize_get_index(struct bytecode_runtime *runtime,
+ struct load_op *insn, uint64_t index,
+ struct vstack_entry *stack_top,
+ int idx_len)
+{
+ int ret;
+ struct filter_get_index_data gid;
+ ssize_t data_offset;
+
+ memset(&gid, 0, sizeof(gid));
+ switch (stack_top->load.type) {
+ case LOAD_OBJECT:
+ switch (stack_top->load.object_type) {
+ case OBJECT_TYPE_ARRAY:
+ {
+ const struct lttng_event_field *field;
+ uint32_t elem_len, num_elems;
+ int signedness;
+
+ field = stack_top->load.field;
+ elem_len = field->type.u.array.elem_type.u.basic.integer.size;
+ signedness = field->type.u.array.elem_type.u.basic.integer.signedness;
+ num_elems = field->type.u.array.length;
+ if (index >= num_elems) {
+ ret = -EINVAL;
+ goto end;
+ }
+ ret = specialize_get_index_object_type(&stack_top->load.object_type,
+ signedness, elem_len);
+ if (ret)
+ goto end;
+ gid.offset = index * (elem_len / CHAR_BIT);
+ gid.array_len = num_elems * (elem_len / CHAR_BIT);
+ gid.elem.type = stack_top->load.object_type;
+ gid.elem.len = elem_len;
+ if (field->type.u.array.elem_type.u.basic.integer.reverse_byte_order)
+ gid.elem.rev_bo = true;
+ stack_top->load.rev_bo = gid.elem.rev_bo;
+ break;
+ }
+ case OBJECT_TYPE_SEQUENCE:
+ {
+ const struct lttng_event_field *field;
+ uint32_t elem_len;
+ int signedness;
+
+ field = stack_top->load.field;
+ elem_len = field->type.u.sequence.elem_type.u.basic.integer.size;
+ signedness = field->type.u.sequence.elem_type.u.basic.integer.signedness;
+ ret = specialize_get_index_object_type(&stack_top->load.object_type,
+ signedness, elem_len);
+ if (ret)
+ goto end;
+ gid.offset = index * (elem_len / CHAR_BIT);
+ gid.elem.type = stack_top->load.object_type;
+ gid.elem.len = elem_len;
+ if (field->type.u.sequence.elem_type.u.basic.integer.reverse_byte_order)
+ gid.elem.rev_bo = true;
+ stack_top->load.rev_bo = gid.elem.rev_bo;
+ break;
+ }
+ case OBJECT_TYPE_STRUCT:
+ /* Only generated by the specialize phase. */
+ case OBJECT_TYPE_VARIANT: /* Fall-through */
+ default:
+ ERR("Unexpected get index type %d",
+ (int) stack_top->load.object_type);
+ ret = -EINVAL;
+ goto end;
+ }
+ break;
+ case LOAD_ROOT_CONTEXT:
+ case LOAD_ROOT_APP_CONTEXT:
+ case LOAD_ROOT_PAYLOAD:
+ ERR("Index lookup for root field not implemented yet.");
+ ret = -EINVAL;
+ goto end;
+ }
+ data_offset = bytecode_push_data(runtime, &gid,
+ __alignof__(gid), sizeof(gid));
+ if (data_offset < 0) {
+ ret = -EINVAL;
+ goto end;
+ }
+ switch (idx_len) {
+ case 2:
+ ((struct get_index_u16 *) insn->data)->index = data_offset;
+ break;
+ case 8:
+ ((struct get_index_u64 *) insn->data)->index = data_offset;
+ break;
+ default:
+ ret = -EINVAL;
+ goto end;
+ }
+
+ return 0;
+
+end:
+ return ret;
+}
+
+static int specialize_context_lookup_name(struct lttng_ctx *ctx,
+ struct bytecode_runtime *bytecode,
+ struct load_op *insn)
+{
+ uint16_t offset;
+ const char *name;
+
+ offset = ((struct get_symbol *) insn->data)->offset;
+ name = bytecode->p.bc->bc.data + bytecode->p.bc->bc.reloc_offset + offset;
+ return lttng_get_context_index(ctx, name);
+}
+
+static int specialize_load_object(const struct lttng_event_field *field,
+ struct vstack_load *load, bool is_context)
+{
+ load->type = LOAD_OBJECT;
+ /*
+ * LTTng-UST layout all integer fields as s64 on the stack for the filter.
+ */
+ switch (field->type.atype) {
+ case atype_integer:
+ if (field->type.u.basic.integer.signedness)
+ load->object_type = OBJECT_TYPE_S64;
+ else
+ load->object_type = OBJECT_TYPE_U64;
+ load->rev_bo = false;
+ break;
+ case atype_enum:
+ {
+ const struct lttng_integer_type *itype =
+ &field->type.u.basic.enumeration.container_type;
+
+ if (itype->signedness)
+ load->object_type = OBJECT_TYPE_S64;
+ else
+ load->object_type = OBJECT_TYPE_U64;
+ load->rev_bo = false;
+ break;
+ }
+ case atype_array:
+ if (field->type.u.array.elem_type.atype != atype_integer) {
+ ERR("Array nesting only supports integer types.");
+ return -EINVAL;
+ }
+ if (is_context) {
+ load->object_type = OBJECT_TYPE_STRING;
+ } else {
+ if (field->type.u.array.elem_type.u.basic.integer.encoding == lttng_encode_none) {
+ load->object_type = OBJECT_TYPE_ARRAY;
+ load->field = field;
+ } else {
+ load->object_type = OBJECT_TYPE_STRING_SEQUENCE;
+ }
+ }
+ break;
+ case atype_sequence:
+ if (field->type.u.sequence.elem_type.atype != atype_integer) {
+ ERR("Sequence nesting only supports integer types.");
+ return -EINVAL;
+ }
+ if (is_context) {
+ load->object_type = OBJECT_TYPE_STRING;
+ } else {
+ if (field->type.u.sequence.elem_type.u.basic.integer.encoding == lttng_encode_none) {
+ load->object_type = OBJECT_TYPE_SEQUENCE;
+ load->field = field;
+ } else {
+ load->object_type = OBJECT_TYPE_STRING_SEQUENCE;
+ }
+ }
+ break;
+ case atype_string:
+ load->object_type = OBJECT_TYPE_STRING;
+ break;
+ case atype_float:
+ load->object_type = OBJECT_TYPE_DOUBLE;
+ break;
+ case atype_dynamic:
+ load->object_type = OBJECT_TYPE_DYNAMIC;
+ return -EINVAL;
+ case atype_struct:
+ ERR("Structure type cannot be loaded.");
+ return -EINVAL;
+ default:
+ ERR("Unknown type: %d", (int) field->type.atype);
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int specialize_context_lookup(struct lttng_session *session,
+ struct bytecode_runtime *runtime,
+ struct load_op *insn,
+ struct vstack_load *load)
+{
+ int idx, ret;
+ struct lttng_ctx_field *ctx_field;
+ struct lttng_event_field *field;
+ struct filter_get_index_data gid;
+ ssize_t data_offset;
+
+ idx = specialize_context_lookup_name(session->ctx, runtime, insn);
+ if (idx < 0) {
+ return -ENOENT;
+ }
+ ctx_field = &session->ctx->fields[idx];
+ field = &ctx_field->event_field;
+ ret = specialize_load_object(field, load, true);
+ if (ret)
+ return ret;
+ /* Specialize each get_symbol into a get_index. */
+ insn->op = FILTER_OP_GET_INDEX_U16;
+ memset(&gid, 0, sizeof(gid));
+ gid.ctx_index = idx;
+ gid.elem.type = load->object_type;
+ data_offset = bytecode_push_data(runtime, &gid,
+ __alignof__(gid), sizeof(gid));
+ if (data_offset < 0) {
+ return -EINVAL;
+ }
+ ((struct get_index_u16 *) insn->data)->index = data_offset;
+ return 0;
+}
+
+static int specialize_app_context_lookup(struct lttng_session *session,
+ struct bytecode_runtime *runtime,
+ struct load_op *insn,
+ struct vstack_load *load)
+{
+ uint16_t offset;
+ const char *orig_name;
+ char *name = NULL;
+ int idx, ret;
+ struct lttng_ctx_field *ctx_field;
+ struct lttng_event_field *field;
+ struct filter_get_index_data gid;
+ ssize_t data_offset;
+
+ offset = ((struct get_symbol *) insn->data)->offset;
+ orig_name = runtime->p.bc->bc.data + runtime->p.bc->bc.reloc_offset + offset;
+ name = zmalloc(strlen(orig_name) + strlen("$app.") + 1);
+ if (!name) {
+ ret = -ENOMEM;
+ goto end;
+ }
+ strcpy(name, "$app.");
+ strcat(name, orig_name);
+ idx = lttng_get_context_index(session->ctx, name);
+ if (idx < 0) {
+ assert(lttng_context_is_app(name));
+ ret = lttng_ust_add_app_context_to_ctx_rcu(name,
+ &session->ctx);
+ if (ret)
+ return ret;
+ idx = lttng_get_context_index(session->ctx,
+ name);
+ if (idx < 0)
+ return -ENOENT;
+ }
+ ctx_field = &session->ctx->fields[idx];
+ field = &ctx_field->event_field;
+ ret = specialize_load_object(field, load, true);
+ if (ret)
+ goto end;
+ /* Specialize each get_symbol into a get_index. */
+ insn->op = FILTER_OP_GET_INDEX_U16;
+ memset(&gid, 0, sizeof(gid));
+ gid.ctx_index = idx;
+ gid.elem.type = load->object_type;
+ data_offset = bytecode_push_data(runtime, &gid,
+ __alignof__(gid), sizeof(gid));
+ if (data_offset < 0) {
+ ret = -EINVAL;
+ goto end;
+ }
+ ((struct get_index_u16 *) insn->data)->index = data_offset;
+ ret = 0;
+end:
+ free(name);
+ return ret;
+}
+
+static int specialize_event_payload_lookup(struct lttng_event *event,
+ struct bytecode_runtime *runtime,
+ struct load_op *insn,
+ struct vstack_load *load)
+{
+ const char *name;
+ uint16_t offset;
+ const struct lttng_event_desc *desc = event->desc;
+ unsigned int i, nr_fields;
+ bool found = false;
+ uint32_t field_offset = 0;
+ const struct lttng_event_field *field;
+ int ret;
+ struct filter_get_index_data gid;
+ ssize_t data_offset;
+
+ nr_fields = desc->nr_fields;
+ offset = ((struct get_symbol *) insn->data)->offset;
+ name = runtime->p.bc->bc.data + runtime->p.bc->bc.reloc_offset + offset;
+ for (i = 0; i < nr_fields; i++) {
+ field = &desc->fields[i];
+ if (!strcmp(field->name, name)) {
+ found = true;
+ break;
+ }
+ /* compute field offset on stack */
+ switch (field->type.atype) {
+ case atype_integer:
+ case atype_enum:
+ field_offset += sizeof(int64_t);
+ break;
+ case atype_array:
+ case atype_sequence:
+ field_offset += sizeof(unsigned long);
+ field_offset += sizeof(void *);
+ break;
+ case atype_string:
+ field_offset += sizeof(void *);
+ break;
+ case atype_float:
+ field_offset += sizeof(double);
+ break;
+ default:
+ ret = -EINVAL;
+ goto end;
+ }
+ }
+ if (!found) {
+ ret = -EINVAL;
+ goto end;
+ }
+
+ ret = specialize_load_object(field, load, false);
+ if (ret)
+ goto end;
+
+ /* Specialize each get_symbol into a get_index. */
+ insn->op = FILTER_OP_GET_INDEX_U16;
+ memset(&gid, 0, sizeof(gid));
+ gid.offset = field_offset;
+ gid.elem.type = load->object_type;
+ data_offset = bytecode_push_data(runtime, &gid,
+ __alignof__(gid), sizeof(gid));
+ if (data_offset < 0) {
+ ret = -EINVAL;
+ goto end;
+ }
+ ((struct get_index_u16 *) insn->data)->index = data_offset;
+ ret = 0;
+end:
+ return ret;
+}
+
+int lttng_filter_specialize_bytecode(struct lttng_event *event,
+ struct bytecode_runtime *bytecode)
{
void *pc, *next_pc, *start_pc;
int ret = -EINVAL;
struct vstack _stack;
struct vstack *stack = &_stack;
+ struct lttng_session *session = bytecode->p.session;
vstack_init(stack);
- start_pc = &bytecode->data[0];
+ start_pc = &bytecode->code[0];
for (pc = next_pc = start_pc; pc - start_pc < bytecode->len;
pc = next_pc) {
switch (*(filter_opcode_t *) pc) {
case FILTER_OP_MINUS:
case FILTER_OP_RSHIFT:
case FILTER_OP_LSHIFT:
- case FILTER_OP_BIN_AND:
- case FILTER_OP_BIN_OR:
- case FILTER_OP_BIN_XOR:
ERR("unsupported bytecode op %u\n",
(unsigned int) *(filter_opcode_t *) pc);
ret = -EINVAL;
case FILTER_OP_LT_S64_DOUBLE:
case FILTER_OP_GE_S64_DOUBLE:
case FILTER_OP_LE_S64_DOUBLE:
+ case FILTER_OP_BIT_AND:
+ case FILTER_OP_BIT_OR:
+ case FILTER_OP_BIT_XOR:
{
/* Pop 2, push 1 */
if (vstack_pop(stack)) {
break;
}
+ /*
+ * Instructions for recursive traversal through composed types.
+ */
+ case FILTER_OP_GET_CONTEXT_ROOT:
+ {
+ if (vstack_push(stack)) {
+ ret = -EINVAL;
+ goto end;
+ }
+ vstack_ax(stack)->type = REG_PTR;
+ vstack_ax(stack)->load.type = LOAD_ROOT_CONTEXT;
+ next_pc += sizeof(struct load_op);
+ break;
+ }
+ case FILTER_OP_GET_APP_CONTEXT_ROOT:
+ {
+ if (vstack_push(stack)) {
+ ret = -EINVAL;
+ goto end;
+ }
+ vstack_ax(stack)->type = REG_PTR;
+ vstack_ax(stack)->load.type = LOAD_ROOT_APP_CONTEXT;
+ next_pc += sizeof(struct load_op);
+ break;
+ }
+ case FILTER_OP_GET_PAYLOAD_ROOT:
+ {
+ if (vstack_push(stack)) {
+ ret = -EINVAL;
+ goto end;
+ }
+ vstack_ax(stack)->type = REG_PTR;
+ vstack_ax(stack)->load.type = LOAD_ROOT_PAYLOAD;
+ next_pc += sizeof(struct load_op);
+ break;
+ }
+
+ case FILTER_OP_LOAD_FIELD:
+ {
+ struct load_op *insn = (struct load_op *) pc;
+
+ assert(vstack_ax(stack)->type == REG_PTR);
+ /* Pop 1, push 1 */
+ ret = specialize_load_field(vstack_ax(stack), insn);
+ if (ret)
+ goto end;
+
+ next_pc += sizeof(struct load_op);
+ break;
+ }
+
+ case FILTER_OP_LOAD_FIELD_S8:
+ case FILTER_OP_LOAD_FIELD_S16:
+ case FILTER_OP_LOAD_FIELD_S32:
+ case FILTER_OP_LOAD_FIELD_S64:
+ case FILTER_OP_LOAD_FIELD_U8:
+ case FILTER_OP_LOAD_FIELD_U16:
+ case FILTER_OP_LOAD_FIELD_U32:
+ case FILTER_OP_LOAD_FIELD_U64:
+ {
+ /* Pop 1, push 1 */
+ vstack_ax(stack)->type = REG_S64;
+ next_pc += sizeof(struct load_op);
+ break;
+ }
+
+ case FILTER_OP_LOAD_FIELD_STRING:
+ case FILTER_OP_LOAD_FIELD_SEQUENCE:
+ {
+ /* Pop 1, push 1 */
+ vstack_ax(stack)->type = REG_STRING;
+ next_pc += sizeof(struct load_op);
+ break;
+ }
+
+ case FILTER_OP_LOAD_FIELD_DOUBLE:
+ {
+ /* Pop 1, push 1 */
+ vstack_ax(stack)->type = REG_DOUBLE;
+ next_pc += sizeof(struct load_op);
+ break;
+ }
+
+ case FILTER_OP_GET_SYMBOL:
+ {
+ struct load_op *insn = (struct load_op *) pc;
+
+ dbg_printf("op get symbol\n");
+ switch (vstack_ax(stack)->load.type) {
+ case LOAD_OBJECT:
+ ERR("Nested fields not implemented yet.");
+ ret = -EINVAL;
+ goto end;
+ case LOAD_ROOT_CONTEXT:
+ /* Lookup context field. */
+ ret = specialize_context_lookup(session,
+ bytecode, insn,
+ &vstack_ax(stack)->load);
+ if (ret)
+ goto end;
+ break;
+ case LOAD_ROOT_APP_CONTEXT:
+ /* Lookup app context field. */
+ ret = specialize_app_context_lookup(session,
+ bytecode, insn,
+ &vstack_ax(stack)->load);
+ if (ret)
+ goto end;
+ break;
+ case LOAD_ROOT_PAYLOAD:
+ /* Lookup event payload field. */
+ ret = specialize_event_payload_lookup(event,
+ bytecode, insn,
+ &vstack_ax(stack)->load);
+ if (ret)
+ goto end;
+ break;
+ }
+ next_pc += sizeof(struct load_op) + sizeof(struct get_symbol);
+ break;
+ }
+
+ case FILTER_OP_GET_SYMBOL_FIELD:
+ {
+ /* Always generated by specialize phase. */
+ ret = -EINVAL;
+ goto end;
+ }
+
+ case FILTER_OP_GET_INDEX_U16:
+ {
+ struct load_op *insn = (struct load_op *) pc;
+ struct get_index_u16 *index = (struct get_index_u16 *) insn->data;
+
+ dbg_printf("op get index u16\n");
+ /* Pop 1, push 1 */
+ ret = specialize_get_index(bytecode, insn, index->index,
+ vstack_ax(stack), sizeof(*index));
+ if (ret)
+ goto end;
+ next_pc += sizeof(struct load_op) + sizeof(struct get_index_u16);
+ break;
+ }
+
+ case FILTER_OP_GET_INDEX_U64:
+ {
+ struct load_op *insn = (struct load_op *) pc;
+ struct get_index_u64 *index = (struct get_index_u64 *) insn->data;
+
+ dbg_printf("op get index u64\n");
+ /* Pop 1, push 1 */
+ ret = specialize_get_index(bytecode, insn, index->index,
+ vstack_ax(stack), sizeof(*index));
+ if (ret)
+ goto end;
+ next_pc += sizeof(struct load_op) + sizeof(struct get_index_u64);
+ break;
+ }
+
}
}
end:
return -EINVAL;
}
+/*
+ * Binary bitwise operators use top of stack and top of stack -1.
+ * Return 0 if typing is known to match, 1 if typing is dynamic
+ * (unknown), negative error value on error.
+ */
+static
+int bin_op_bitwise_check(struct vstack *stack, filter_opcode_t opcode,
+ const char *str)
+{
+ if (unlikely(!vstack_ax(stack) || !vstack_bx(stack)))
+ goto error_empty;
+
+ switch (vstack_ax(stack)->type) {
+ default:
+ goto error_type;
+
+ case REG_UNKNOWN:
+ goto unknown;
+ case REG_S64:
+ switch (vstack_bx(stack)->type) {
+ default:
+ goto error_type;
+
+ case REG_UNKNOWN:
+ goto unknown;
+ case REG_S64:
+ break;
+ }
+ break;
+ }
+ return 0;
+
+unknown:
+ return 1;
+
+error_empty:
+ ERR("empty stack for '%s' binary operator\n", str);
+ return -EINVAL;
+
+error_type:
+ ERR("unknown type for '%s' binary operator\n", str);
+ return -EINVAL;
+}
+
+static
+int validate_get_symbol(struct bytecode_runtime *bytecode,
+ const struct get_symbol *sym)
+{
+ const char *str, *str_limit;
+ size_t len_limit;
+
+ if (sym->offset >= bytecode->p.bc->bc.len - bytecode->p.bc->bc.reloc_offset)
+ return -EINVAL;
+
+ str = bytecode->p.bc->bc.data + bytecode->p.bc->bc.reloc_offset + sym->offset;
+ str_limit = bytecode->p.bc->bc.data + bytecode->p.bc->bc.len;
+ len_limit = str_limit - str;
+ if (strnlen(str, len_limit) == len_limit)
+ return -EINVAL;
+ return 0;
+}
+
/*
* Validate bytecode range overflow within the validation pass.
* Called for each instruction encountered.
case FILTER_OP_MINUS:
case FILTER_OP_RSHIFT:
case FILTER_OP_LSHIFT:
- case FILTER_OP_BIN_AND:
- case FILTER_OP_BIN_OR:
- case FILTER_OP_BIN_XOR:
{
ERR("unsupported bytecode op %u\n",
(unsigned int) *(filter_opcode_t *) pc);
case FILTER_OP_LT_S64_DOUBLE:
case FILTER_OP_GE_S64_DOUBLE:
case FILTER_OP_LE_S64_DOUBLE:
+ case FILTER_OP_BIT_AND:
+ case FILTER_OP_BIT_OR:
+ case FILTER_OP_BIT_XOR:
{
if (unlikely(pc + sizeof(struct binary_op)
> start_pc + bytecode->len)) {
ret = -EINVAL;
break;
}
+
/* get context ref */
case FILTER_OP_GET_CONTEXT_REF:
case FILTER_OP_LOAD_FIELD_REF_STRING:
break;
}
+ /*
+ * Instructions for recursive traversal through composed types.
+ */
+ case FILTER_OP_GET_CONTEXT_ROOT:
+ case FILTER_OP_GET_APP_CONTEXT_ROOT:
+ case FILTER_OP_GET_PAYLOAD_ROOT:
+ case FILTER_OP_LOAD_FIELD:
+ case FILTER_OP_LOAD_FIELD_S8:
+ case FILTER_OP_LOAD_FIELD_S16:
+ case FILTER_OP_LOAD_FIELD_S32:
+ case FILTER_OP_LOAD_FIELD_S64:
+ case FILTER_OP_LOAD_FIELD_U8:
+ case FILTER_OP_LOAD_FIELD_U16:
+ case FILTER_OP_LOAD_FIELD_U32:
+ case FILTER_OP_LOAD_FIELD_U64:
+ case FILTER_OP_LOAD_FIELD_STRING:
+ case FILTER_OP_LOAD_FIELD_SEQUENCE:
+ case FILTER_OP_LOAD_FIELD_DOUBLE:
+ if (unlikely(pc + sizeof(struct load_op)
+ > start_pc + bytecode->len)) {
+ ret = -ERANGE;
+ }
+ break;
+
+ case FILTER_OP_GET_SYMBOL:
+ {
+ struct load_op *insn = (struct load_op *) pc;
+ struct get_symbol *sym = (struct get_symbol *) insn->data;
+
+ if (unlikely(pc + sizeof(struct load_op) + sizeof(struct get_symbol)
+ > start_pc + bytecode->len)) {
+ ret = -ERANGE;
+ }
+ ret = validate_get_symbol(bytecode, sym);
+ break;
+ }
+
+ case FILTER_OP_GET_SYMBOL_FIELD:
+ ERR("Unexpected get symbol field");
+ ret = -EINVAL;
+ break;
+
+ case FILTER_OP_GET_INDEX_U16:
+ if (unlikely(pc + sizeof(struct load_op) + sizeof(struct get_index_u16)
+ > start_pc + bytecode->len)) {
+ ret = -ERANGE;
+ }
+ break;
+
+ case FILTER_OP_GET_INDEX_U64:
+ if (unlikely(pc + sizeof(struct load_op) + sizeof(struct get_index_u64)
+ > start_pc + bytecode->len)) {
+ ret = -ERANGE;
+ }
+ break;
}
return ret;
case FILTER_OP_MINUS:
case FILTER_OP_RSHIFT:
case FILTER_OP_LSHIFT:
- case FILTER_OP_BIN_AND:
- case FILTER_OP_BIN_OR:
- case FILTER_OP_BIN_XOR:
{
ERR("unsupported bytecode op %u\n",
(unsigned int) opcode);
break;
}
+ case FILTER_OP_BIT_AND:
+ ret = bin_op_bitwise_check(stack, opcode, "&");
+ if (ret < 0)
+ goto end;
+ break;
+ case FILTER_OP_BIT_OR:
+ ret = bin_op_bitwise_check(stack, opcode, "|");
+ if (ret < 0)
+ goto end;
+ break;
+ case FILTER_OP_BIT_XOR:
+ ret = bin_op_bitwise_check(stack, opcode, "^");
+ if (ret < 0)
+ goto end;
+ break;
+
/* unary */
case FILTER_OP_UNARY_PLUS:
case FILTER_OP_UNARY_MINUS:
break;
}
+ /*
+ * Instructions for recursive traversal through composed types.
+ */
+ case FILTER_OP_GET_CONTEXT_ROOT:
+ {
+ dbg_printf("Validate get context root\n");
+ break;
+ }
+ case FILTER_OP_GET_APP_CONTEXT_ROOT:
+ {
+ dbg_printf("Validate get app context root\n");
+ break;
+ }
+ case FILTER_OP_GET_PAYLOAD_ROOT:
+ {
+ dbg_printf("Validate get payload root\n");
+ break;
+ }
+ case FILTER_OP_LOAD_FIELD:
+ {
+ /*
+ * We tolerate that field type is unknown at validation,
+ * because we are performing the load specialization in
+ * a phase after validation.
+ */
+ dbg_printf("Validate load field\n");
+ break;
+ }
+ case FILTER_OP_LOAD_FIELD_S8:
+ {
+ dbg_printf("Validate load field s8\n");
+ break;
+ }
+ case FILTER_OP_LOAD_FIELD_S16:
+ {
+ dbg_printf("Validate load field s16\n");
+ break;
+ }
+ case FILTER_OP_LOAD_FIELD_S32:
+ {
+ dbg_printf("Validate load field s32\n");
+ break;
+ }
+ case FILTER_OP_LOAD_FIELD_S64:
+ {
+ dbg_printf("Validate load field s64\n");
+ break;
+ }
+ case FILTER_OP_LOAD_FIELD_U8:
+ {
+ dbg_printf("Validate load field u8\n");
+ break;
+ }
+ case FILTER_OP_LOAD_FIELD_U16:
+ {
+ dbg_printf("Validate load field u16\n");
+ break;
+ }
+ case FILTER_OP_LOAD_FIELD_U32:
+ {
+ dbg_printf("Validate load field u32\n");
+ break;
+ }
+ case FILTER_OP_LOAD_FIELD_U64:
+ {
+ dbg_printf("Validate load field u64\n");
+ break;
+ }
+ case FILTER_OP_LOAD_FIELD_STRING:
+ {
+ dbg_printf("Validate load field string\n");
+ break;
+ }
+ case FILTER_OP_LOAD_FIELD_SEQUENCE:
+ {
+ dbg_printf("Validate load field sequence\n");
+ break;
+ }
+ case FILTER_OP_LOAD_FIELD_DOUBLE:
+ {
+ dbg_printf("Validate load field double\n");
+ break;
+ }
+
+ case FILTER_OP_GET_SYMBOL:
+ {
+ struct load_op *insn = (struct load_op *) pc;
+ struct get_symbol *sym = (struct get_symbol *) insn->data;
+
+ dbg_printf("Validate get symbol offset %u\n", sym->offset);
+ break;
+ }
+
+ case FILTER_OP_GET_SYMBOL_FIELD:
+ {
+ struct load_op *insn = (struct load_op *) pc;
+ struct get_symbol *sym = (struct get_symbol *) insn->data;
+
+ dbg_printf("Validate get symbol field offset %u\n", sym->offset);
+ break;
+ }
+
+ case FILTER_OP_GET_INDEX_U16:
+ {
+ struct load_op *insn = (struct load_op *) pc;
+ struct get_index_u16 *get_index = (struct get_index_u16 *) insn->data;
+
+ dbg_printf("Validate get index u16 index %u\n", get_index->index);
+ break;
+ }
+
+ case FILTER_OP_GET_INDEX_U64:
+ {
+ struct load_op *insn = (struct load_op *) pc;
+ struct get_index_u64 *get_index = (struct get_index_u64 *) insn->data;
+
+ dbg_printf("Validate get index u64 index %" PRIu64 "\n", get_index->index);
+ break;
+ }
}
end:
return ret;
ret = -EINVAL;
goto end;
}
+ switch (vstack_ax(stack)->type) {
+ case REG_S64:
+ case REG_UNKNOWN:
+ break;
+ default:
+ ERR("Unexpected register type %d at end of bytecode\n",
+ (int) vstack_ax(stack)->type);
+ ret = -EINVAL;
+ goto end;
+ }
+
ret = 0;
goto end;
}
case FILTER_OP_MINUS:
case FILTER_OP_RSHIFT:
case FILTER_OP_LSHIFT:
- case FILTER_OP_BIN_AND:
- case FILTER_OP_BIN_OR:
- case FILTER_OP_BIN_XOR:
{
ERR("unsupported bytecode op %u\n",
(unsigned int) *(filter_opcode_t *) pc);
case FILTER_OP_LT_S64_DOUBLE:
case FILTER_OP_GE_S64_DOUBLE:
case FILTER_OP_LE_S64_DOUBLE:
+ case FILTER_OP_BIT_AND:
+ case FILTER_OP_BIT_OR:
+ case FILTER_OP_BIT_XOR:
{
/* Pop 2, push 1 */
if (vstack_pop(stack)) {
ret = -EINVAL;
goto end;
}
+ switch (vstack_ax(stack)->type) {
+ case REG_S64:
+ case REG_DOUBLE:
+ case REG_STRING:
+ case REG_STAR_GLOB_STRING:
+ case REG_UNKNOWN:
+ break;
+ default:
+ ERR("Unexpected register type %d for operation\n",
+ (int) vstack_ax(stack)->type);
+ ret = -EINVAL;
+ goto end;
+ }
+
vstack_ax(stack)->type = REG_S64;
next_pc += sizeof(struct binary_op);
break;
ret = -EINVAL;
goto end;
}
+ switch (vstack_ax(stack)->type) {
+ case REG_UNKNOWN:
+ case REG_DOUBLE:
+ case REG_S64:
+ break;
+ default:
+ ERR("Unexpected register type %d for operation\n",
+ (int) vstack_ax(stack)->type);
+ ret = -EINVAL;
+ goto end;
+ }
vstack_ax(stack)->type = REG_UNKNOWN;
next_pc += sizeof(struct unary_op);
break;
case FILTER_OP_UNARY_PLUS_S64:
case FILTER_OP_UNARY_MINUS_S64:
- case FILTER_OP_UNARY_NOT:
case FILTER_OP_UNARY_NOT_S64:
+ {
+ /* Pop 1, push 1 */
+ if (!vstack_ax(stack)) {
+ ERR("Empty stack\n");
+ ret = -EINVAL;
+ goto end;
+ }
+ switch (vstack_ax(stack)->type) {
+ case REG_S64:
+ break;
+ default:
+ ERR("Unexpected register type %d for operation\n",
+ (int) vstack_ax(stack)->type);
+ ret = -EINVAL;
+ goto end;
+ }
+
+ vstack_ax(stack)->type = REG_S64;
+ next_pc += sizeof(struct unary_op);
+ break;
+ }
+
+ case FILTER_OP_UNARY_NOT:
+ {
+ /* Pop 1, push 1 */
+ if (!vstack_ax(stack)) {
+ ERR("Empty stack\n");
+ ret = -EINVAL;
+ goto end;
+ }
+ switch (vstack_ax(stack)->type) {
+ case REG_UNKNOWN:
+ case REG_DOUBLE:
+ case REG_S64:
+ break;
+ default:
+ ERR("Unexpected register type %d for operation\n",
+ (int) vstack_ax(stack)->type);
+ ret = -EINVAL;
+ goto end;
+ }
+
+ vstack_ax(stack)->type = REG_S64;
+ next_pc += sizeof(struct unary_op);
+ break;
+ }
+
case FILTER_OP_UNARY_NOT_DOUBLE:
{
/* Pop 1, push 1 */
ret = -EINVAL;
goto end;
}
+ switch (vstack_ax(stack)->type) {
+ case REG_DOUBLE:
+ break;
+ default:
+ ERR("Incorrect register type %d for operation\n",
+ (int) vstack_ax(stack)->type);
+ ret = -EINVAL;
+ goto end;
+ }
+
vstack_ax(stack)->type = REG_S64;
next_pc += sizeof(struct unary_op);
break;
ret = -EINVAL;
goto end;
}
+ switch (vstack_ax(stack)->type) {
+ case REG_DOUBLE:
+ break;
+ default:
+ ERR("Incorrect register type %d for operation\n",
+ (int) vstack_ax(stack)->type);
+ ret = -EINVAL;
+ goto end;
+ }
+
vstack_ax(stack)->type = REG_DOUBLE;
next_pc += sizeof(struct unary_op);
break;
ret = merge_ret;
goto end;
}
+
+ if (!vstack_ax(stack)) {
+ ERR("Empty stack\n");
+ ret = -EINVAL;
+ goto end;
+ }
+ /* There is always a cast-to-s64 operation before a or/and op. */
+ switch (vstack_ax(stack)->type) {
+ case REG_S64:
+ break;
+ default:
+ ERR("Incorrect register type %d for operation\n",
+ (int) vstack_ax(stack)->type);
+ ret = -EINVAL;
+ goto end;
+ }
+
/* Continue to next instruction */
/* Pop 1 when jump not taken */
if (vstack_pop(stack)) {
ret = -EINVAL;
goto end;
}
+ switch (vstack_ax(stack)->type) {
+ case REG_S64:
+ case REG_DOUBLE:
+ case REG_UNKNOWN:
+ break;
+ default:
+ ERR("Incorrect register type %d for cast\n",
+ (int) vstack_ax(stack)->type);
+ ret = -EINVAL;
+ goto end;
+ }
vstack_ax(stack)->type = REG_S64;
next_pc += sizeof(struct cast_op);
break;
break;
}
+ /*
+ * Instructions for recursive traversal through composed types.
+ */
+ case FILTER_OP_GET_CONTEXT_ROOT:
+ case FILTER_OP_GET_APP_CONTEXT_ROOT:
+ case FILTER_OP_GET_PAYLOAD_ROOT:
+ {
+ if (vstack_push(stack)) {
+ ret = -EINVAL;
+ goto end;
+ }
+ vstack_ax(stack)->type = REG_PTR;
+ next_pc += sizeof(struct load_op);
+ break;
+ }
+
+ case FILTER_OP_LOAD_FIELD:
+ {
+ /* Pop 1, push 1 */
+ if (!vstack_ax(stack)) {
+ ERR("Empty stack\n");
+ ret = -EINVAL;
+ goto end;
+ }
+ if (vstack_ax(stack)->type != REG_PTR) {
+ ERR("Expecting pointer on top of stack\n");
+ ret = -EINVAL;
+ goto end;
+ }
+ vstack_ax(stack)->type = REG_UNKNOWN;
+ next_pc += sizeof(struct load_op);
+ break;
+ }
+
+ case FILTER_OP_LOAD_FIELD_S8:
+ case FILTER_OP_LOAD_FIELD_S16:
+ case FILTER_OP_LOAD_FIELD_S32:
+ case FILTER_OP_LOAD_FIELD_S64:
+ case FILTER_OP_LOAD_FIELD_U8:
+ case FILTER_OP_LOAD_FIELD_U16:
+ case FILTER_OP_LOAD_FIELD_U32:
+ case FILTER_OP_LOAD_FIELD_U64:
+ {
+ /* Pop 1, push 1 */
+ if (!vstack_ax(stack)) {
+ ERR("Empty stack\n");
+ ret = -EINVAL;
+ goto end;
+ }
+ if (vstack_ax(stack)->type != REG_PTR) {
+ ERR("Expecting pointer on top of stack\n");
+ ret = -EINVAL;
+ goto end;
+ }
+ vstack_ax(stack)->type = REG_S64;
+ next_pc += sizeof(struct load_op);
+ break;
+ }
+
+ case FILTER_OP_LOAD_FIELD_STRING:
+ case FILTER_OP_LOAD_FIELD_SEQUENCE:
+ {
+ /* Pop 1, push 1 */
+ if (!vstack_ax(stack)) {
+ ERR("Empty stack\n");
+ ret = -EINVAL;
+ goto end;
+ }
+ if (vstack_ax(stack)->type != REG_PTR) {
+ ERR("Expecting pointer on top of stack\n");
+ ret = -EINVAL;
+ goto end;
+ }
+ vstack_ax(stack)->type = REG_STRING;
+ next_pc += sizeof(struct load_op);
+ break;
+ }
+
+ case FILTER_OP_LOAD_FIELD_DOUBLE:
+ {
+ /* Pop 1, push 1 */
+ if (!vstack_ax(stack)) {
+ ERR("Empty stack\n");
+ ret = -EINVAL;
+ goto end;
+ }
+ if (vstack_ax(stack)->type != REG_PTR) {
+ ERR("Expecting pointer on top of stack\n");
+ ret = -EINVAL;
+ goto end;
+ }
+ vstack_ax(stack)->type = REG_DOUBLE;
+ next_pc += sizeof(struct load_op);
+ break;
+ }
+
+ case FILTER_OP_GET_SYMBOL:
+ case FILTER_OP_GET_SYMBOL_FIELD:
+ {
+ /* Pop 1, push 1 */
+ if (!vstack_ax(stack)) {
+ ERR("Empty stack\n");
+ ret = -EINVAL;
+ goto end;
+ }
+ if (vstack_ax(stack)->type != REG_PTR) {
+ ERR("Expecting pointer on top of stack\n");
+ ret = -EINVAL;
+ goto end;
+ }
+ next_pc += sizeof(struct load_op) + sizeof(struct get_symbol);
+ break;
+ }
+
+ case FILTER_OP_GET_INDEX_U16:
+ {
+ /* Pop 1, push 1 */
+ if (!vstack_ax(stack)) {
+ ERR("Empty stack\n");
+ ret = -EINVAL;
+ goto end;
+ }
+ if (vstack_ax(stack)->type != REG_PTR) {
+ ERR("Expecting pointer on top of stack\n");
+ ret = -EINVAL;
+ goto end;
+ }
+ next_pc += sizeof(struct load_op) + sizeof(struct get_index_u16);
+ break;
+ }
+
+ case FILTER_OP_GET_INDEX_U64:
+ {
+ /* Pop 1, push 1 */
+ if (!vstack_ax(stack)) {
+ ERR("Empty stack\n");
+ ret = -EINVAL;
+ goto end;
+ }
+ if (vstack_ax(stack)->type != REG_PTR) {
+ ERR("Expecting pointer on top of stack\n");
+ ret = -EINVAL;
+ goto end;
+ }
+ next_pc += sizeof(struct load_op) + sizeof(struct get_index_u64);
+ break;
+ }
+
}
end:
*_next_pc = next_pc;
ERR("Error allocating hash table for bytecode validation\n");
return -ENOMEM;
}
- start_pc = &bytecode->data[0];
+ start_pc = &bytecode->code[0];
for (pc = next_pc = start_pc; pc - start_pc < bytecode->len;
pc = next_pc) {
ret = bytecode_validate_overflow(bytecode, start_pc, pc);
[ FILTER_OP_MINUS ] = "MINUS",
[ FILTER_OP_RSHIFT ] = "RSHIFT",
[ FILTER_OP_LSHIFT ] = "LSHIFT",
- [ FILTER_OP_BIN_AND ] = "BIN_AND",
- [ FILTER_OP_BIN_OR ] = "BIN_OR",
- [ FILTER_OP_BIN_XOR ] = "BIN_XOR",
+ [ FILTER_OP_BIT_AND ] = "BIT_AND",
+ [ FILTER_OP_BIT_OR ] = "BIT_OR",
+ [ FILTER_OP_BIT_XOR ] = "BIT_XOR",
/* binary comparators */
[ FILTER_OP_EQ ] = "EQ",
/* globbing pattern binary operator: apply to */
[ FILTER_OP_EQ_STAR_GLOB_STRING ] = "EQ_STAR_GLOB_STRING",
[ FILTER_OP_NE_STAR_GLOB_STRING ] = "NE_STAR_GLOB_STRING",
+
+ /*
+ * Instructions for recursive traversal through composed types.
+ */
+ [ FILTER_OP_GET_CONTEXT_ROOT ] = "GET_CONTEXT_ROOT",
+ [ FILTER_OP_GET_APP_CONTEXT_ROOT ] = "GET_APP_CONTEXT_ROOT",
+ [ FILTER_OP_GET_PAYLOAD_ROOT ] = "GET_PAYLOAD_ROOT",
+
+ [ FILTER_OP_GET_SYMBOL ] = "GET_SYMBOL",
+ [ FILTER_OP_GET_SYMBOL_FIELD ] = "GET_SYMBOL_FIELD",
+ [ FILTER_OP_GET_INDEX_U16 ] = "GET_INDEX_U16",
+ [ FILTER_OP_GET_INDEX_U64 ] = "GET_INDEX_U64",
+
+ [ FILTER_OP_LOAD_FIELD ] = "LOAD_FIELD",
+ [ FILTER_OP_LOAD_FIELD_S8 ] = "LOAD_FIELD_S8",
+ [ FILTER_OP_LOAD_FIELD_S16 ] = "LOAD_FIELD_S16",
+ [ FILTER_OP_LOAD_FIELD_S32 ] = "LOAD_FIELD_S32",
+ [ FILTER_OP_LOAD_FIELD_S64 ] = "LOAD_FIELD_S64",
+ [ FILTER_OP_LOAD_FIELD_U8 ] = "LOAD_FIELD_U8",
+ [ FILTER_OP_LOAD_FIELD_U16 ] = "LOAD_FIELD_U16",
+ [ FILTER_OP_LOAD_FIELD_U32 ] = "LOAD_FIELD_U32",
+ [ FILTER_OP_LOAD_FIELD_U64 ] = "LOAD_FIELD_U64",
+ [ FILTER_OP_LOAD_FIELD_STRING ] = "LOAD_FIELD_STRING",
+ [ FILTER_OP_LOAD_FIELD_SEQUENCE ] = "LOAD_FIELD_SEQUENCE",
+ [ FILTER_OP_LOAD_FIELD_DOUBLE ] = "LOAD_FIELD_DOUBLE",
};
const char *print_op(enum filter_op op)
struct bytecode_runtime *runtime,
uint32_t runtime_len,
uint32_t reloc_offset,
- const char *field_name)
+ const char *field_name,
+ enum filter_op filter_op)
{
const struct lttng_event_desc *desc;
const struct lttng_event_field *fields, *field = NULL;
unsigned int nr_fields, i;
- struct field_ref *field_ref;
struct load_op *op;
uint32_t field_offset = 0;
return -EINVAL;
/* set type */
- op = (struct load_op *) &runtime->data[reloc_offset];
- field_ref = (struct field_ref *) op->data;
- switch (field->type.atype) {
- case atype_integer:
- case atype_enum:
- op->op = FILTER_OP_LOAD_FIELD_REF_S64;
- break;
- case atype_array:
- case atype_sequence:
- op->op = FILTER_OP_LOAD_FIELD_REF_SEQUENCE;
- break;
- case atype_string:
- op->op = FILTER_OP_LOAD_FIELD_REF_STRING;
- break;
- case atype_float:
- op->op = FILTER_OP_LOAD_FIELD_REF_DOUBLE;
+ op = (struct load_op *) &runtime->code[reloc_offset];
+
+ switch (filter_op) {
+ case FILTER_OP_LOAD_FIELD_REF:
+ {
+ struct field_ref *field_ref;
+
+ field_ref = (struct field_ref *) op->data;
+ switch (field->type.atype) {
+ case atype_integer:
+ case atype_enum:
+ op->op = FILTER_OP_LOAD_FIELD_REF_S64;
+ break;
+ case atype_array:
+ case atype_sequence:
+ op->op = FILTER_OP_LOAD_FIELD_REF_SEQUENCE;
+ break;
+ case atype_string:
+ op->op = FILTER_OP_LOAD_FIELD_REF_STRING;
+ break;
+ case atype_float:
+ op->op = FILTER_OP_LOAD_FIELD_REF_DOUBLE;
+ break;
+ default:
+ return -EINVAL;
+ }
+ /* set offset */
+ field_ref->offset = (uint16_t) field_offset;
break;
+ }
default:
return -EINVAL;
}
- /* set offset */
- field_ref->offset = (uint16_t) field_offset;
return 0;
}
struct bytecode_runtime *runtime,
uint32_t runtime_len,
uint32_t reloc_offset,
- const char *context_name)
+ const char *context_name,
+ enum filter_op filter_op)
{
- struct field_ref *field_ref;
struct load_op *op;
struct lttng_ctx_field *ctx_field;
int idx;
/* Get context return type */
ctx_field = &session->ctx->fields[idx];
- op = (struct load_op *) &runtime->data[reloc_offset];
- field_ref = (struct field_ref *) op->data;
- switch (ctx_field->event_field.type.atype) {
- case atype_integer:
- case atype_enum:
- op->op = FILTER_OP_GET_CONTEXT_REF_S64;
- break;
- /* Sequence and array supported as string */
- case atype_string:
- case atype_array:
- case atype_sequence:
- op->op = FILTER_OP_GET_CONTEXT_REF_STRING;
- break;
- case atype_float:
- op->op = FILTER_OP_GET_CONTEXT_REF_DOUBLE;
- break;
- case atype_dynamic:
- op->op = FILTER_OP_GET_CONTEXT_REF;
+ op = (struct load_op *) &runtime->code[reloc_offset];
+
+ switch (filter_op) {
+ case FILTER_OP_GET_CONTEXT_REF:
+ {
+ struct field_ref *field_ref;
+
+ field_ref = (struct field_ref *) op->data;
+ switch (ctx_field->event_field.type.atype) {
+ case atype_integer:
+ case atype_enum:
+ op->op = FILTER_OP_GET_CONTEXT_REF_S64;
+ break;
+ /* Sequence and array supported as string */
+ case atype_string:
+ case atype_array:
+ case atype_sequence:
+ op->op = FILTER_OP_GET_CONTEXT_REF_STRING;
+ break;
+ case atype_float:
+ op->op = FILTER_OP_GET_CONTEXT_REF_DOUBLE;
+ break;
+ case atype_dynamic:
+ op->op = FILTER_OP_GET_CONTEXT_REF;
+ break;
+ default:
+ return -EINVAL;
+ }
+ /* set offset to context index within channel contexts */
+ field_ref->offset = (uint16_t) idx;
break;
+ }
default:
return -EINVAL;
}
- /* set offset to context index within channel contexts */
- field_ref->offset = (uint16_t) idx;
return 0;
}
if (runtime_len - reloc_offset < sizeof(uint16_t))
return -EINVAL;
- op = (struct load_op *) &runtime->data[reloc_offset];
+ op = (struct load_op *) &runtime->code[reloc_offset];
switch (op->op) {
case FILTER_OP_LOAD_FIELD_REF:
return apply_field_reloc(event, runtime, runtime_len,
- reloc_offset, name);
+ reloc_offset, name, op->op);
case FILTER_OP_GET_CONTEXT_REF:
return apply_context_reloc(event, runtime, runtime_len,
- reloc_offset, name);
+ reloc_offset, name, op->op);
+ case FILTER_OP_GET_SYMBOL:
+ case FILTER_OP_GET_SYMBOL_FIELD:
+ /*
+ * Will be handled by load specialize phase or
+ * dynamically by interpreter.
+ */
+ return 0;
default:
ERR("Unknown reloc op type %u\n", op->op);
return -EINVAL;
}
runtime->p.bc = filter_bytecode;
runtime->p.session = event->chan->session;
+ runtime->p.event = event;
runtime->len = filter_bytecode->bc.reloc_offset;
/* copy original bytecode */
- memcpy(runtime->data, filter_bytecode->bc.data, runtime->len);
+ memcpy(runtime->code, filter_bytecode->bc.data, runtime->len);
/*
* apply relocs. Those are a uint16_t (offset in bytecode)
* followed by a string (field name).
goto link_error;
}
/* Specialize bytecode */
- ret = lttng_filter_specialize_bytecode(runtime);
+ ret = lttng_filter_specialize_bytecode(event, runtime);
if (ret) {
goto link_error;
}
cds_list_for_each_entry_safe(runtime, tmp,
&event->bytecode_runtime_head, p.node) {
+ free(runtime->data);
free(runtime);
}
}
#define FILTER_STACK_LEN 10 /* includes 2 dummy */
#define FILTER_STACK_EMPTY 1
+#define FILTER_MAX_DATA_LEN 65536
+
#ifndef min_t
#define min_t(type, a, b) \
((type) (a) < (type) (b) ? (type) (a) : (type) (b))
/* Linked bytecode. Child of struct lttng_bytecode_runtime. */
struct bytecode_runtime {
struct lttng_bytecode_runtime p;
+ size_t data_len;
+ size_t data_alloc_len;
+ char *data;
uint16_t len;
- char data[0];
+ char code[0];
};
enum entry_type {
REG_STRING,
REG_STAR_GLOB_STRING,
REG_UNKNOWN,
+ REG_PTR,
+};
+
+enum load_type {
+ LOAD_ROOT_CONTEXT,
+ LOAD_ROOT_APP_CONTEXT,
+ LOAD_ROOT_PAYLOAD,
+ LOAD_OBJECT,
+};
+
+enum object_type {
+ OBJECT_TYPE_S8,
+ OBJECT_TYPE_S16,
+ OBJECT_TYPE_S32,
+ OBJECT_TYPE_S64,
+ OBJECT_TYPE_U8,
+ OBJECT_TYPE_U16,
+ OBJECT_TYPE_U32,
+ OBJECT_TYPE_U64,
+
+ OBJECT_TYPE_DOUBLE,
+ OBJECT_TYPE_STRING,
+ OBJECT_TYPE_STRING_SEQUENCE,
+
+ OBJECT_TYPE_SEQUENCE,
+ OBJECT_TYPE_ARRAY,
+ OBJECT_TYPE_STRUCT,
+ OBJECT_TYPE_VARIANT,
+
+ OBJECT_TYPE_DYNAMIC,
+};
+
+struct filter_get_index_data {
+ uint64_t offset; /* in bytes */
+ size_t ctx_index;
+ size_t array_len;
+ struct {
+ size_t len;
+ enum object_type type;
+ bool rev_bo; /* reverse byte order */
+ } elem;
};
/* Validation stack */
+struct vstack_load {
+ enum load_type type;
+ enum object_type object_type;
+ const struct lttng_event_field *field;
+ bool rev_bo; /* reverse byte order */
+};
+
struct vstack_entry {
enum entry_type type;
+ struct vstack_load load;
};
struct vstack {
ESTACK_STRING_LITERAL_TYPE_STAR_GLOB,
};
+struct load_ptr {
+ enum load_type type;
+ enum object_type object_type;
+ const void *ptr;
+ bool rev_bo;
+ /* Temporary place-holders for contexts. */
+ union {
+ int64_t s64;
+ uint64_t u64;
+ double d;
+ } u;
+ /*
+ * "field" is only needed when nested under a variant, in which
+ * case we cannot specialize the nested operations.
+ */
+ const struct lttng_event_field *field;
+};
+
struct estack_entry {
enum entry_type type; /* For dynamic typing. */
union {
size_t seq_len;
enum estack_string_literal_type literal_type;
} s;
+ struct load_ptr ptr;
} u;
};
const char *print_op(enum filter_op op);
int lttng_filter_validate_bytecode(struct bytecode_runtime *bytecode);
-int lttng_filter_specialize_bytecode(struct bytecode_runtime *bytecode);
+int lttng_filter_specialize_bytecode(struct lttng_event *event,
+ struct bytecode_runtime *bytecode);
uint64_t lttng_filter_false(void *filter_data,
const char *filter_stack_data);