--- /dev/null
+/* SPDX-License-Identifier: MIT
+ *
+ * lttng/filter-bytecode.h
+ *
+ * LTTng filter bytecode
+ *
+ * Copyright 2012-2016 - Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ */
+
+#ifndef _FILTER_BYTECODE_H
+#define _FILTER_BYTECODE_H
+
+/*
+ * offsets are absolute from start of bytecode.
+ */
+
+struct field_ref {
+ /* Initially, symbol offset. After link, field offset. */
+ uint16_t offset;
+} __attribute__((packed));
+
+struct get_symbol {
+ /* Symbol offset. */
+ uint16_t offset;
+} __attribute__((packed));
+
+struct get_index_u16 {
+ uint16_t index;
+} __attribute__((packed));
+
+struct get_index_u64 {
+ uint64_t index;
+} __attribute__((packed));
+
+struct literal_numeric {
+ int64_t v;
+} __attribute__((packed));
+
+struct literal_double {
+ double v;
+} __attribute__((packed));
+
+struct literal_string {
+ char string[0];
+} __attribute__((packed));
+
+enum bytecode_op {
+ BYTECODE_OP_UNKNOWN = 0,
+
+ BYTECODE_OP_RETURN = 1,
+
+ /* binary */
+ BYTECODE_OP_MUL = 2,
+ BYTECODE_OP_DIV = 3,
+ BYTECODE_OP_MOD = 4,
+ BYTECODE_OP_PLUS = 5,
+ BYTECODE_OP_MINUS = 6,
+ BYTECODE_OP_BIT_RSHIFT = 7,
+ BYTECODE_OP_BIT_LSHIFT = 8,
+ BYTECODE_OP_BIT_AND = 9,
+ BYTECODE_OP_BIT_OR = 10,
+ BYTECODE_OP_BIT_XOR = 11,
+
+ /* binary comparators */
+ BYTECODE_OP_EQ = 12,
+ BYTECODE_OP_NE = 13,
+ BYTECODE_OP_GT = 14,
+ BYTECODE_OP_LT = 15,
+ BYTECODE_OP_GE = 16,
+ BYTECODE_OP_LE = 17,
+
+ /* string binary comparator: apply to */
+ BYTECODE_OP_EQ_STRING = 18,
+ BYTECODE_OP_NE_STRING = 19,
+ BYTECODE_OP_GT_STRING = 20,
+ BYTECODE_OP_LT_STRING = 21,
+ BYTECODE_OP_GE_STRING = 22,
+ BYTECODE_OP_LE_STRING = 23,
+
+ /* s64 binary comparator */
+ BYTECODE_OP_EQ_S64 = 24,
+ BYTECODE_OP_NE_S64 = 25,
+ BYTECODE_OP_GT_S64 = 26,
+ BYTECODE_OP_LT_S64 = 27,
+ BYTECODE_OP_GE_S64 = 28,
+ BYTECODE_OP_LE_S64 = 29,
+
+ /* double binary comparator */
+ BYTECODE_OP_EQ_DOUBLE = 30,
+ BYTECODE_OP_NE_DOUBLE = 31,
+ BYTECODE_OP_GT_DOUBLE = 32,
+ BYTECODE_OP_LT_DOUBLE = 33,
+ BYTECODE_OP_GE_DOUBLE = 34,
+ BYTECODE_OP_LE_DOUBLE = 35,
+
+ /* Mixed S64-double binary comparators */
+ BYTECODE_OP_EQ_DOUBLE_S64 = 36,
+ BYTECODE_OP_NE_DOUBLE_S64 = 37,
+ BYTECODE_OP_GT_DOUBLE_S64 = 38,
+ BYTECODE_OP_LT_DOUBLE_S64 = 39,
+ BYTECODE_OP_GE_DOUBLE_S64 = 40,
+ BYTECODE_OP_LE_DOUBLE_S64 = 41,
+
+ BYTECODE_OP_EQ_S64_DOUBLE = 42,
+ BYTECODE_OP_NE_S64_DOUBLE = 43,
+ BYTECODE_OP_GT_S64_DOUBLE = 44,
+ BYTECODE_OP_LT_S64_DOUBLE = 45,
+ BYTECODE_OP_GE_S64_DOUBLE = 46,
+ BYTECODE_OP_LE_S64_DOUBLE = 47,
+
+ /* unary */
+ BYTECODE_OP_UNARY_PLUS = 48,
+ BYTECODE_OP_UNARY_MINUS = 49,
+ BYTECODE_OP_UNARY_NOT = 50,
+ BYTECODE_OP_UNARY_PLUS_S64 = 51,
+ BYTECODE_OP_UNARY_MINUS_S64 = 52,
+ BYTECODE_OP_UNARY_NOT_S64 = 53,
+ BYTECODE_OP_UNARY_PLUS_DOUBLE = 54,
+ BYTECODE_OP_UNARY_MINUS_DOUBLE = 55,
+ BYTECODE_OP_UNARY_NOT_DOUBLE = 56,
+
+ /* logical */
+ BYTECODE_OP_AND = 57,
+ BYTECODE_OP_OR = 58,
+
+ /* load field ref */
+ BYTECODE_OP_LOAD_FIELD_REF = 59,
+ BYTECODE_OP_LOAD_FIELD_REF_STRING = 60,
+ BYTECODE_OP_LOAD_FIELD_REF_SEQUENCE = 61,
+ BYTECODE_OP_LOAD_FIELD_REF_S64 = 62,
+ BYTECODE_OP_LOAD_FIELD_REF_DOUBLE = 63,
+
+ /* load immediate from operand */
+ BYTECODE_OP_LOAD_STRING = 64,
+ BYTECODE_OP_LOAD_S64 = 65,
+ BYTECODE_OP_LOAD_DOUBLE = 66,
+
+ /* cast */
+ BYTECODE_OP_CAST_TO_S64 = 67,
+ BYTECODE_OP_CAST_DOUBLE_TO_S64 = 68,
+ BYTECODE_OP_CAST_NOP = 69,
+
+ /* get context ref */
+ BYTECODE_OP_GET_CONTEXT_REF = 70,
+ BYTECODE_OP_GET_CONTEXT_REF_STRING = 71,
+ BYTECODE_OP_GET_CONTEXT_REF_S64 = 72,
+ BYTECODE_OP_GET_CONTEXT_REF_DOUBLE = 73,
+
+ /* load userspace field ref */
+ BYTECODE_OP_LOAD_FIELD_REF_USER_STRING = 74,
+ BYTECODE_OP_LOAD_FIELD_REF_USER_SEQUENCE = 75,
+
+ /*
+ * load immediate star globbing pattern (literal string)
+ * from immediate
+ */
+ BYTECODE_OP_LOAD_STAR_GLOB_STRING = 76,
+
+ /* globbing pattern binary operator: apply to */
+ BYTECODE_OP_EQ_STAR_GLOB_STRING = 77,
+ BYTECODE_OP_NE_STAR_GLOB_STRING = 78,
+
+ /*
+ * Instructions for recursive traversal through composed types.
+ */
+ BYTECODE_OP_GET_CONTEXT_ROOT = 79,
+ BYTECODE_OP_GET_APP_CONTEXT_ROOT = 80,
+ BYTECODE_OP_GET_PAYLOAD_ROOT = 81,
+
+ BYTECODE_OP_GET_SYMBOL = 82,
+ BYTECODE_OP_GET_SYMBOL_FIELD = 83,
+ BYTECODE_OP_GET_INDEX_U16 = 84,
+ BYTECODE_OP_GET_INDEX_U64 = 85,
+
+ BYTECODE_OP_LOAD_FIELD = 86,
+ BYTECODE_OP_LOAD_FIELD_S8 = 87,
+ BYTECODE_OP_LOAD_FIELD_S16 = 88,
+ BYTECODE_OP_LOAD_FIELD_S32 = 89,
+ BYTECODE_OP_LOAD_FIELD_S64 = 90,
+ BYTECODE_OP_LOAD_FIELD_U8 = 91,
+ BYTECODE_OP_LOAD_FIELD_U16 = 92,
+ BYTECODE_OP_LOAD_FIELD_U32 = 93,
+ BYTECODE_OP_LOAD_FIELD_U64 = 94,
+ BYTECODE_OP_LOAD_FIELD_STRING = 95,
+ BYTECODE_OP_LOAD_FIELD_SEQUENCE = 96,
+ BYTECODE_OP_LOAD_FIELD_DOUBLE = 97,
+
+ BYTECODE_OP_UNARY_BIT_NOT = 98,
+
+ BYTECODE_OP_RETURN_S64 = 99,
+
+ NR_BYTECODE_OPS,
+};
+
+typedef uint8_t bytecode_opcode_t;
+
+struct load_op {
+ bytecode_opcode_t op;
+ char data[0];
+ /* data to load. Size known by enum filter_opcode and null-term char. */
+} __attribute__((packed));
+
+struct binary_op {
+ bytecode_opcode_t op;
+} __attribute__((packed));
+
+struct unary_op {
+ bytecode_opcode_t op;
+} __attribute__((packed));
+
+/* skip_offset is absolute from start of bytecode */
+struct logical_op {
+ bytecode_opcode_t op;
+ uint16_t skip_offset; /* bytecode insn, if skip second test */
+} __attribute__((packed));
+
+struct cast_op {
+ bytecode_opcode_t op;
+} __attribute__((packed));
+
+struct return_op {
+ bytecode_opcode_t op;
+} __attribute__((packed));
+
+#endif /* _FILTER_BYTECODE_H */
};
/*
- * Filter return value masks.
+ * Bytecode interpreter return value masks.
*/
-enum lttng_filter_ret {
- LTTNG_FILTER_DISCARD = 0,
- LTTNG_FILTER_RECORD_FLAG = (1ULL << 0),
+enum lttng_bytecode_interpreter_ret {
+ LTTNG_INTERPRETER_DISCARD = 0,
+ LTTNG_INTERPRETER_RECORD_FLAG = (1ULL << 0),
/* Other bits are kept for future use. */
};
+++ /dev/null
-/* SPDX-License-Identifier: MIT
- *
- * lttng/filter-bytecode.h
- *
- * LTTng filter bytecode
- *
- * Copyright 2012-2016 - Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
- */
-
-#ifndef _FILTER_BYTECODE_H
-#define _FILTER_BYTECODE_H
-
-/*
- * offsets are absolute from start of bytecode.
- */
-
-struct field_ref {
- /* Initially, symbol offset. After link, field offset. */
- uint16_t offset;
-} __attribute__((packed));
-
-struct get_symbol {
- /* Symbol offset. */
- uint16_t offset;
-} __attribute__((packed));
-
-struct get_index_u16 {
- uint16_t index;
-} __attribute__((packed));
-
-struct get_index_u64 {
- uint64_t index;
-} __attribute__((packed));
-
-struct literal_numeric {
- int64_t v;
-} __attribute__((packed));
-
-struct literal_double {
- double v;
-} __attribute__((packed));
-
-struct literal_string {
- char string[0];
-} __attribute__((packed));
-
-enum filter_op {
- FILTER_OP_UNKNOWN = 0,
-
- FILTER_OP_RETURN = 1,
-
- /* binary */
- FILTER_OP_MUL = 2,
- FILTER_OP_DIV = 3,
- FILTER_OP_MOD = 4,
- FILTER_OP_PLUS = 5,
- FILTER_OP_MINUS = 6,
- FILTER_OP_BIT_RSHIFT = 7,
- FILTER_OP_BIT_LSHIFT = 8,
- FILTER_OP_BIT_AND = 9,
- FILTER_OP_BIT_OR = 10,
- FILTER_OP_BIT_XOR = 11,
-
- /* binary comparators */
- FILTER_OP_EQ = 12,
- FILTER_OP_NE = 13,
- FILTER_OP_GT = 14,
- FILTER_OP_LT = 15,
- FILTER_OP_GE = 16,
- FILTER_OP_LE = 17,
-
- /* string binary comparator: apply to */
- FILTER_OP_EQ_STRING = 18,
- FILTER_OP_NE_STRING = 19,
- FILTER_OP_GT_STRING = 20,
- FILTER_OP_LT_STRING = 21,
- FILTER_OP_GE_STRING = 22,
- FILTER_OP_LE_STRING = 23,
-
- /* s64 binary comparator */
- FILTER_OP_EQ_S64 = 24,
- FILTER_OP_NE_S64 = 25,
- FILTER_OP_GT_S64 = 26,
- FILTER_OP_LT_S64 = 27,
- FILTER_OP_GE_S64 = 28,
- FILTER_OP_LE_S64 = 29,
-
- /* double binary comparator */
- FILTER_OP_EQ_DOUBLE = 30,
- FILTER_OP_NE_DOUBLE = 31,
- FILTER_OP_GT_DOUBLE = 32,
- FILTER_OP_LT_DOUBLE = 33,
- FILTER_OP_GE_DOUBLE = 34,
- FILTER_OP_LE_DOUBLE = 35,
-
- /* Mixed S64-double binary comparators */
- FILTER_OP_EQ_DOUBLE_S64 = 36,
- FILTER_OP_NE_DOUBLE_S64 = 37,
- FILTER_OP_GT_DOUBLE_S64 = 38,
- FILTER_OP_LT_DOUBLE_S64 = 39,
- FILTER_OP_GE_DOUBLE_S64 = 40,
- FILTER_OP_LE_DOUBLE_S64 = 41,
-
- FILTER_OP_EQ_S64_DOUBLE = 42,
- FILTER_OP_NE_S64_DOUBLE = 43,
- FILTER_OP_GT_S64_DOUBLE = 44,
- FILTER_OP_LT_S64_DOUBLE = 45,
- FILTER_OP_GE_S64_DOUBLE = 46,
- FILTER_OP_LE_S64_DOUBLE = 47,
-
- /* unary */
- FILTER_OP_UNARY_PLUS = 48,
- FILTER_OP_UNARY_MINUS = 49,
- FILTER_OP_UNARY_NOT = 50,
- FILTER_OP_UNARY_PLUS_S64 = 51,
- FILTER_OP_UNARY_MINUS_S64 = 52,
- FILTER_OP_UNARY_NOT_S64 = 53,
- FILTER_OP_UNARY_PLUS_DOUBLE = 54,
- FILTER_OP_UNARY_MINUS_DOUBLE = 55,
- FILTER_OP_UNARY_NOT_DOUBLE = 56,
-
- /* logical */
- FILTER_OP_AND = 57,
- FILTER_OP_OR = 58,
-
- /* load field ref */
- FILTER_OP_LOAD_FIELD_REF = 59,
- FILTER_OP_LOAD_FIELD_REF_STRING = 60,
- FILTER_OP_LOAD_FIELD_REF_SEQUENCE = 61,
- FILTER_OP_LOAD_FIELD_REF_S64 = 62,
- FILTER_OP_LOAD_FIELD_REF_DOUBLE = 63,
-
- /* load immediate from operand */
- FILTER_OP_LOAD_STRING = 64,
- FILTER_OP_LOAD_S64 = 65,
- FILTER_OP_LOAD_DOUBLE = 66,
-
- /* cast */
- FILTER_OP_CAST_TO_S64 = 67,
- FILTER_OP_CAST_DOUBLE_TO_S64 = 68,
- FILTER_OP_CAST_NOP = 69,
-
- /* get context ref */
- FILTER_OP_GET_CONTEXT_REF = 70,
- FILTER_OP_GET_CONTEXT_REF_STRING = 71,
- FILTER_OP_GET_CONTEXT_REF_S64 = 72,
- FILTER_OP_GET_CONTEXT_REF_DOUBLE = 73,
-
- /* load userspace field ref */
- FILTER_OP_LOAD_FIELD_REF_USER_STRING = 74,
- FILTER_OP_LOAD_FIELD_REF_USER_SEQUENCE = 75,
-
- /*
- * load immediate star globbing pattern (literal string)
- * from immediate
- */
- FILTER_OP_LOAD_STAR_GLOB_STRING = 76,
-
- /* globbing pattern binary operator: apply to */
- FILTER_OP_EQ_STAR_GLOB_STRING = 77,
- FILTER_OP_NE_STAR_GLOB_STRING = 78,
-
- /*
- * Instructions for recursive traversal through composed types.
- */
- FILTER_OP_GET_CONTEXT_ROOT = 79,
- FILTER_OP_GET_APP_CONTEXT_ROOT = 80,
- FILTER_OP_GET_PAYLOAD_ROOT = 81,
-
- FILTER_OP_GET_SYMBOL = 82,
- FILTER_OP_GET_SYMBOL_FIELD = 83,
- FILTER_OP_GET_INDEX_U16 = 84,
- FILTER_OP_GET_INDEX_U64 = 85,
-
- FILTER_OP_LOAD_FIELD = 86,
- FILTER_OP_LOAD_FIELD_S8 = 87,
- FILTER_OP_LOAD_FIELD_S16 = 88,
- FILTER_OP_LOAD_FIELD_S32 = 89,
- FILTER_OP_LOAD_FIELD_S64 = 90,
- FILTER_OP_LOAD_FIELD_U8 = 91,
- FILTER_OP_LOAD_FIELD_U16 = 92,
- FILTER_OP_LOAD_FIELD_U32 = 93,
- FILTER_OP_LOAD_FIELD_U64 = 94,
- FILTER_OP_LOAD_FIELD_STRING = 95,
- FILTER_OP_LOAD_FIELD_SEQUENCE = 96,
- FILTER_OP_LOAD_FIELD_DOUBLE = 97,
-
- FILTER_OP_UNARY_BIT_NOT = 98,
-
- FILTER_OP_RETURN_S64 = 99,
-
- NR_FILTER_OPS,
-};
-
-typedef uint8_t filter_opcode_t;
-
-struct load_op {
- filter_opcode_t op;
- char data[0];
- /* data to load. Size known by enum filter_opcode and null-term char. */
-} __attribute__((packed));
-
-struct binary_op {
- filter_opcode_t op;
-} __attribute__((packed));
-
-struct unary_op {
- filter_opcode_t op;
-} __attribute__((packed));
-
-/* skip_offset is absolute from start of bytecode */
-struct logical_op {
- filter_opcode_t op;
- uint16_t skip_offset; /* bytecode insn, if skip second test */
-} __attribute__((packed));
-
-struct cast_op {
- filter_opcode_t op;
-} __attribute__((packed));
-
-struct return_op {
- filter_opcode_t op;
-} __attribute__((packed));
-
-#endif /* _FILTER_BYTECODE_H */
+++ /dev/null
-/* SPDX-License-Identifier: MIT
- *
- * lttng/filter.h
- *
- * LTTng modules filter header.
- *
- * Copyright (C) 2010-2016 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
- */
-
-#ifndef _LTTNG_FILTER_H
-#define _LTTNG_FILTER_H
-
-#include <linux/kernel.h>
-
-#include <lttng/events.h>
-#include <lttng/filter-bytecode.h>
-
-/* Filter stack length, in number of entries */
-#define FILTER_STACK_LEN 10 /* includes 2 dummy */
-#define FILTER_STACK_EMPTY 1
-
-#define FILTER_MAX_DATA_LEN 65536
-
-#ifdef DEBUG
-#define dbg_printk(fmt, args...) \
- printk(KERN_DEBUG "LTTng: [debug bytecode in %s:%s@%u] " fmt, \
- __FILE__, __func__, __LINE__, ## args)
-#else
-#define dbg_printk(fmt, args...) \
-do { \
- /* do nothing but check printf format */ \
- if (0) \
- printk(KERN_DEBUG "LTTng: [debug bytecode in %s:%s@%u] " fmt, \
- __FILE__, __func__, __LINE__, ## args); \
-} while (0)
-#endif
-
-/* Linked bytecode. Child of struct lttng_bytecode_runtime. */
-struct bytecode_runtime {
- struct lttng_bytecode_runtime p;
- size_t data_len;
- size_t data_alloc_len;
- char *data;
- uint16_t len;
- char code[0];
-};
-
-enum entry_type {
- REG_S64,
- REG_U64,
- REG_DOUBLE,
- REG_STRING,
- REG_STAR_GLOB_STRING,
- REG_TYPE_UNKNOWN,
- REG_PTR,
-};
-
-enum load_type {
- LOAD_ROOT_CONTEXT,
- LOAD_ROOT_APP_CONTEXT,
- LOAD_ROOT_PAYLOAD,
- LOAD_OBJECT,
-};
-
-enum object_type {
- OBJECT_TYPE_S8,
- OBJECT_TYPE_S16,
- OBJECT_TYPE_S32,
- OBJECT_TYPE_S64,
- OBJECT_TYPE_U8,
- OBJECT_TYPE_U16,
- OBJECT_TYPE_U32,
- OBJECT_TYPE_U64,
-
- OBJECT_TYPE_DOUBLE,
- OBJECT_TYPE_STRING,
- OBJECT_TYPE_STRING_SEQUENCE,
-
- OBJECT_TYPE_SEQUENCE,
- OBJECT_TYPE_ARRAY,
- OBJECT_TYPE_STRUCT,
- OBJECT_TYPE_VARIANT,
-
- OBJECT_TYPE_DYNAMIC,
-};
-
-struct filter_get_index_data {
- uint64_t offset; /* in bytes */
- size_t ctx_index;
- size_t array_len;
- /*
- * Field is only populated for LOAD_ROOT_CONTEXT, LOAD_ROOT_APP_CONTEXT
- * and LOAD_ROOT_PAYLOAD. Left NULL for LOAD_OBJECT, considering that the
- * interpreter needs to find it from the event fields and types to
- * support variants.
- */
- const struct lttng_event_field *field;
- struct {
- size_t len;
- enum object_type type;
- bool rev_bo; /* reverse byte order */
- } elem;
-};
-
-/* Validation stack */
-struct vstack_load {
- enum load_type type;
- enum object_type object_type;
- const struct lttng_event_field *field;
- bool rev_bo; /* reverse byte order */
-};
-
-struct vstack_entry {
- enum entry_type type;
- struct vstack_load load;
-};
-
-struct vstack {
- int top; /* top of stack */
- struct vstack_entry e[FILTER_STACK_LEN];
-};
-
-static inline
-void vstack_init(struct vstack *stack)
-{
- stack->top = -1;
-}
-
-static inline
-struct vstack_entry *vstack_ax(struct vstack *stack)
-{
- if (unlikely(stack->top < 0))
- return NULL;
- return &stack->e[stack->top];
-}
-
-static inline
-struct vstack_entry *vstack_bx(struct vstack *stack)
-{
- if (unlikely(stack->top < 1))
- return NULL;
- return &stack->e[stack->top - 1];
-}
-
-static inline
-int vstack_push(struct vstack *stack)
-{
- if (stack->top >= FILTER_STACK_LEN - 1) {
- printk(KERN_WARNING "LTTng: filter: Stack full\n");
- return -EINVAL;
- }
- ++stack->top;
- return 0;
-}
-
-static inline
-int vstack_pop(struct vstack *stack)
-{
- if (unlikely(stack->top < 0)) {
- printk(KERN_WARNING "LTTng: filter: Stack empty\n");
- return -EINVAL;
- }
- stack->top--;
- return 0;
-}
-
-/* Execution stack */
-enum estack_string_literal_type {
- ESTACK_STRING_LITERAL_TYPE_NONE,
- ESTACK_STRING_LITERAL_TYPE_PLAIN,
- ESTACK_STRING_LITERAL_TYPE_STAR_GLOB,
-};
-
-struct load_ptr {
- enum load_type type;
- enum object_type object_type;
- const void *ptr;
- size_t nr_elem;
- bool rev_bo;
- /* Temporary place-holders for contexts. */
- union {
- int64_t s64;
- uint64_t u64;
- double d;
- } u;
- const struct lttng_event_field *field;
-};
-
-struct estack_entry {
- enum entry_type type;
- union {
- int64_t v;
-
- struct {
- const char *str;
- const char __user *user_str;
- size_t seq_len;
- enum estack_string_literal_type literal_type;
- int user; /* is string from userspace ? */
- } s;
- struct load_ptr ptr;
- } u;
-};
-
-struct estack {
- int top; /* top of stack */
- struct estack_entry e[FILTER_STACK_LEN];
-};
-
-#define estack_ax_v ax
-#define estack_bx_v bx
-
-#define estack_ax_t ax_t
-#define estack_bx_t bx_t
-
-#define estack_ax(stack, top) \
- ({ \
- BUG_ON((top) <= FILTER_STACK_EMPTY); \
- &(stack)->e[top]; \
- })
-
-#define estack_bx(stack, top) \
- ({ \
- BUG_ON((top) <= FILTER_STACK_EMPTY + 1); \
- &(stack)->e[(top) - 1]; \
- })
-
-#define estack_push(stack, top, ax, bx, ax_t, bx_t) \
- do { \
- BUG_ON((top) >= FILTER_STACK_LEN - 1); \
- (stack)->e[(top) - 1].u.v = (bx); \
- (stack)->e[(top) - 1].type = (bx_t); \
- (bx) = (ax); \
- (bx_t) = (ax_t); \
- ++(top); \
- } while (0)
-
-#define estack_pop(stack, top, ax, bx, ax_t, bx_t) \
- do { \
- BUG_ON((top) <= FILTER_STACK_EMPTY); \
- (ax) = (bx); \
- (ax_t) = (bx_t); \
- (bx) = (stack)->e[(top) - 2].u.v; \
- (bx_t) = (stack)->e[(top) - 2].type; \
- (top)--; \
- } while (0)
-
-enum lttng_interpreter_type {
- LTTNG_INTERPRETER_TYPE_S64,
- LTTNG_INTERPRETER_TYPE_U64,
- LTTNG_INTERPRETER_TYPE_SIGNED_ENUM,
- LTTNG_INTERPRETER_TYPE_UNSIGNED_ENUM,
- LTTNG_INTERPRETER_TYPE_DOUBLE,
- LTTNG_INTERPRETER_TYPE_STRING,
- LTTNG_INTERPRETER_TYPE_SEQUENCE,
-};
-
-/*
- * Represents the output parameter of the lttng interpreter.
- * Currently capturable field classes are integer, double, string and sequence
- * of integer.
- */
-struct lttng_interpreter_output {
- enum lttng_interpreter_type type;
- union {
- int64_t s;
- uint64_t u;
-
- struct {
- const char *str;
- size_t len;
- } str;
- struct {
- const void *ptr;
- size_t nr_elem;
-
- /* Inner type. */
- const struct lttng_type *nested_type;
- } sequence;
- } u;
-};
-
-const char *lttng_filter_print_op(enum filter_op op);
-
-int lttng_filter_validate_bytecode(struct bytecode_runtime *bytecode);
-int lttng_filter_specialize_bytecode(const struct lttng_event_desc *event_desc,
- struct bytecode_runtime *bytecode);
-
-uint64_t lttng_filter_interpret_bytecode_false(void *filter_data,
- struct lttng_probe_ctx *lttng_probe_ctx,
- const char *filter_stack_data);
-uint64_t lttng_filter_interpret_bytecode(void *filter_data,
- struct lttng_probe_ctx *lttng_probe_ctx,
- const char *filter_stack_data);
-
-#endif /* _LTTNG_FILTER_H */
--- /dev/null
+/* SPDX-License-Identifier: MIT
+ *
+ * lttng/lttng-bytecode.h
+ *
+ * LTTng modules bytecode header.
+ *
+ * Copyright (C) 2010-2016 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ */
+
+#ifndef _LTTNG_BYTECODE_H
+#define _LTTNG_BYTECODE_H
+
+#include <linux/kernel.h>
+
+#include <lttng/events.h>
+#include <lttng/bytecode.h>
+
+/* Interpreter stack length, in number of entries */
+#define INTERPRETER_STACK_LEN 10 /* includes 2 dummy */
+#define INTERPRETER_STACK_EMPTY 1
+#define INTERPRETER_MAX_DATA_LEN 65536
+
+#ifdef DEBUG
+#define dbg_printk(fmt, args...) \
+ printk(KERN_DEBUG "LTTng: [debug bytecode in %s:%s@%u] " fmt, \
+ __FILE__, __func__, __LINE__, ## args)
+#else
+#define dbg_printk(fmt, args...) \
+do { \
+ /* do nothing but check printf format */ \
+ if (0) \
+ printk(KERN_DEBUG "LTTng: [debug bytecode in %s:%s@%u] " fmt, \
+ __FILE__, __func__, __LINE__, ## args); \
+} while (0)
+#endif
+
+/* Linked bytecode. Child of struct lttng_bytecode_runtime. */
+struct bytecode_runtime {
+ struct lttng_bytecode_runtime p;
+ size_t data_len;
+ size_t data_alloc_len;
+ char *data;
+ uint16_t len;
+ char code[0];
+};
+
+enum entry_type {
+ REG_S64,
+ REG_U64,
+ REG_DOUBLE,
+ REG_STRING,
+ REG_STAR_GLOB_STRING,
+ REG_TYPE_UNKNOWN,
+ REG_PTR,
+};
+
+enum load_type {
+ LOAD_ROOT_CONTEXT,
+ LOAD_ROOT_APP_CONTEXT,
+ LOAD_ROOT_PAYLOAD,
+ LOAD_OBJECT,
+};
+
+enum object_type {
+ OBJECT_TYPE_S8,
+ OBJECT_TYPE_S16,
+ OBJECT_TYPE_S32,
+ OBJECT_TYPE_S64,
+ OBJECT_TYPE_U8,
+ OBJECT_TYPE_U16,
+ OBJECT_TYPE_U32,
+ OBJECT_TYPE_U64,
+
+ OBJECT_TYPE_DOUBLE,
+ OBJECT_TYPE_STRING,
+ OBJECT_TYPE_STRING_SEQUENCE,
+
+ OBJECT_TYPE_SEQUENCE,
+ OBJECT_TYPE_ARRAY,
+ OBJECT_TYPE_STRUCT,
+ OBJECT_TYPE_VARIANT,
+
+ OBJECT_TYPE_DYNAMIC,
+};
+
+struct bytecode_get_index_data {
+ uint64_t offset; /* in bytes */
+ size_t ctx_index;
+ size_t array_len;
+ /*
+ * Field is only populated for LOAD_ROOT_CONTEXT, LOAD_ROOT_APP_CONTEXT
+ * and LOAD_ROOT_PAYLOAD. Left NULL for LOAD_OBJECT, considering that the
+ * interpreter needs to find it from the event fields and types to
+ * support variants.
+ */
+ const struct lttng_event_field *field;
+ struct {
+ size_t len;
+ enum object_type type;
+ bool rev_bo; /* reverse byte order */
+ } elem;
+};
+
+/* Validation stack */
+struct vstack_load {
+ enum load_type type;
+ enum object_type object_type;
+ const struct lttng_event_field *field;
+ bool rev_bo; /* reverse byte order */
+};
+
+struct vstack_entry {
+ enum entry_type type;
+ struct vstack_load load;
+};
+
+struct vstack {
+ int top; /* top of stack */
+ struct vstack_entry e[INTERPRETER_STACK_LEN];
+};
+
+static inline
+void vstack_init(struct vstack *stack)
+{
+ stack->top = -1;
+}
+
+static inline
+struct vstack_entry *vstack_ax(struct vstack *stack)
+{
+ if (unlikely(stack->top < 0))
+ return NULL;
+ return &stack->e[stack->top];
+}
+
+static inline
+struct vstack_entry *vstack_bx(struct vstack *stack)
+{
+ if (unlikely(stack->top < 1))
+ return NULL;
+ return &stack->e[stack->top - 1];
+}
+
+static inline
+int vstack_push(struct vstack *stack)
+{
+ if (stack->top >= INTERPRETER_STACK_LEN - 1) {
+ printk(KERN_WARNING "LTTng: filter: Stack full\n");
+ return -EINVAL;
+ }
+ ++stack->top;
+ return 0;
+}
+
+static inline
+int vstack_pop(struct vstack *stack)
+{
+ if (unlikely(stack->top < 0)) {
+ printk(KERN_WARNING "LTTng: filter: Stack empty\n");
+ return -EINVAL;
+ }
+ stack->top--;
+ return 0;
+}
+
+/* Execution stack */
+enum estack_string_literal_type {
+ ESTACK_STRING_LITERAL_TYPE_NONE,
+ ESTACK_STRING_LITERAL_TYPE_PLAIN,
+ ESTACK_STRING_LITERAL_TYPE_STAR_GLOB,
+};
+
+struct load_ptr {
+ enum load_type type;
+ enum object_type object_type;
+ const void *ptr;
+ size_t nr_elem;
+ bool rev_bo;
+ /* Temporary place-holders for contexts. */
+ union {
+ int64_t s64;
+ uint64_t u64;
+ double d;
+ } u;
+ const struct lttng_event_field *field;
+};
+
+struct estack_entry {
+ enum entry_type type;
+ union {
+ int64_t v;
+
+ struct {
+ const char *str;
+ const char __user *user_str;
+ size_t seq_len;
+ enum estack_string_literal_type literal_type;
+ int user; /* is string from userspace ? */
+ } s;
+ struct load_ptr ptr;
+ } u;
+};
+
+struct estack {
+ int top; /* top of stack */
+ struct estack_entry e[INTERPRETER_STACK_LEN];
+};
+
+#define estack_ax_v ax
+#define estack_bx_v bx
+
+#define estack_ax_t ax_t
+#define estack_bx_t bx_t
+
+#define estack_ax(stack, top) \
+ ({ \
+ BUG_ON((top) <= INTERPRETER_STACK_EMPTY); \
+ &(stack)->e[top]; \
+ })
+
+#define estack_bx(stack, top) \
+ ({ \
+ BUG_ON((top) <= INTERPRETER_STACK_EMPTY + 1); \
+ &(stack)->e[(top) - 1]; \
+ })
+
+#define estack_push(stack, top, ax, bx, ax_t, bx_t) \
+ do { \
+ BUG_ON((top) >= INTERPRETER_STACK_LEN - 1); \
+ (stack)->e[(top) - 1].u.v = (bx); \
+ (stack)->e[(top) - 1].type = (bx_t); \
+ (bx) = (ax); \
+ (bx_t) = (ax_t); \
+ ++(top); \
+ } while (0)
+
+#define estack_pop(stack, top, ax, bx, ax_t, bx_t) \
+ do { \
+ BUG_ON((top) <= INTERPRETER_STACK_EMPTY); \
+ (ax) = (bx); \
+ (ax_t) = (bx_t); \
+ (bx) = (stack)->e[(top) - 2].u.v; \
+ (bx_t) = (stack)->e[(top) - 2].type; \
+ (top)--; \
+ } while (0)
+
+enum lttng_interpreter_type {
+ LTTNG_INTERPRETER_TYPE_S64,
+ LTTNG_INTERPRETER_TYPE_U64,
+ LTTNG_INTERPRETER_TYPE_SIGNED_ENUM,
+ LTTNG_INTERPRETER_TYPE_UNSIGNED_ENUM,
+ LTTNG_INTERPRETER_TYPE_DOUBLE,
+ LTTNG_INTERPRETER_TYPE_STRING,
+ LTTNG_INTERPRETER_TYPE_SEQUENCE,
+};
+
+/*
+ * Represents the output parameter of the lttng interpreter.
+ * Currently capturable field classes are integer, double, string and sequence
+ * of integer.
+ */
+struct lttng_interpreter_output {
+ enum lttng_interpreter_type type;
+ union {
+ int64_t s;
+ uint64_t u;
+
+ struct {
+ const char *str;
+ size_t len;
+ } str;
+ struct {
+ const void *ptr;
+ size_t nr_elem;
+
+ /* Inner type. */
+ const struct lttng_type *nested_type;
+ } sequence;
+ } u;
+};
+
+const char *lttng_bytecode_print_op(enum bytecode_op op);
+
+int lttng_bytecode_validate(struct bytecode_runtime *bytecode);
+int lttng_bytecode_specialize(const struct lttng_event_desc *event_desc,
+ struct bytecode_runtime *bytecode);
+
+uint64_t lttng_bytecode_filter_interpret_false(void *filter_data,
+ struct lttng_probe_ctx *lttng_probe_ctx,
+ const char *filter_stack_data);
+uint64_t lttng_bytecode_filter_interpret(void *filter_data,
+ struct lttng_probe_ctx *lttng_probe_ctx,
+ const char *filter_stack_data);
+
+#endif /* _LTTNG_FILTER_H */
tp_locvar, _args); \
lttng_list_for_each_entry_rcu(bc_runtime, &__event->filter_bytecode_runtime_head, node) { \
if (unlikely(bc_runtime->filter(bc_runtime, &__lttng_probe_ctx, \
- __stackvar.__filter_stack_data) & LTTNG_FILTER_RECORD_FLAG)) { \
+ __stackvar.__filter_stack_data) & LTTNG_INTERPRETER_RECORD_FLAG)) { \
__filter_record = 1; \
break; \
} \
tp_locvar); \
lttng_list_for_each_entry_rcu(bc_runtime, &__event->filter_bytecode_runtime_head, node) { \
if (unlikely(bc_runtime->filter(bc_runtime, &__lttng_probe_ctx, \
- __stackvar.__filter_stack_data) & LTTNG_FILTER_RECORD_FLAG)) { \
+ __stackvar.__filter_stack_data) & LTTNG_INTERPRETER_RECORD_FLAG)) { \
__filter_record = 1; \
break; \
} \
tp_locvar, _args); \
lttng_list_for_each_entry_rcu(bc_runtime, &__event_notifier->filter_bytecode_runtime_head, node) { \
if (unlikely(bc_runtime->filter(bc_runtime, &__lttng_probe_ctx, \
- __stackvar.__filter_stack_data) & LTTNG_FILTER_RECORD_FLAG)) \
+ __stackvar.__filter_stack_data) & LTTNG_INTERPRETER_RECORD_FLAG)) \
__filter_record = 1; \
} \
if (likely(!__filter_record)) \
tp_locvar); \
lttng_list_for_each_entry_rcu(bc_runtime, &__event_notifier->filter_bytecode_runtime_head, node) { \
if (unlikely(bc_runtime->filter(bc_runtime, &__lttng_probe_ctx, \
- __stackvar.__filter_stack_data) & LTTNG_FILTER_RECORD_FLAG)) \
+ __stackvar.__filter_stack_data) & LTTNG_INTERPRETER_RECORD_FLAG)) \
__filter_record = 1; \
} \
if (likely(!__filter_record)) \
lttng-context-hostname.o \
probes/lttng.o \
lttng-tracker-id.o \
- lttng-filter.o lttng-filter-interpreter.o \
- lttng-filter-specialize.o \
- lttng-filter-validator.o \
+ lttng-bytecode.o lttng-bytecode-interpreter.o \
+ lttng-bytecode-specialize.o \
+ lttng-bytecode-validator.o \
probes/lttng-probe-user.o \
lttng-tp-mempool.o \
lttng-event-notifier-notification.o
--- /dev/null
+/* SPDX-License-Identifier: MIT
+ *
+ * lttng-bytecode-interpreter.c
+ *
+ * LTTng modules bytecode interpreter.
+ *
+ * Copyright (C) 2010-2016 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ */
+
+#include <wrapper/uaccess.h>
+#include <wrapper/objtool.h>
+#include <wrapper/types.h>
+#include <linux/swab.h>
+
+#include <lttng/lttng-bytecode.h>
+#include <lttng/string-utils.h>
+
+/*
+ * get_char should be called with page fault handler disabled if it is expected
+ * to handle user-space read.
+ */
+static
+char get_char(struct estack_entry *reg, size_t offset)
+{
+ if (unlikely(offset >= reg->u.s.seq_len))
+ return '\0';
+ if (reg->u.s.user) {
+ char c;
+
+ /* Handle invalid access as end of string. */
+ if (unlikely(!lttng_access_ok(VERIFY_READ,
+ reg->u.s.user_str + offset,
+ sizeof(c))))
+ return '\0';
+ /* Handle fault (nonzero return value) as end of string. */
+ if (unlikely(__copy_from_user_inatomic(&c,
+ reg->u.s.user_str + offset,
+ sizeof(c))))
+ return '\0';
+ return c;
+ } else {
+ return reg->u.s.str[offset];
+ }
+}
+
+/*
+ * -1: wildcard found.
+ * -2: unknown escape char.
+ * 0: normal char.
+ */
+static
+int parse_char(struct estack_entry *reg, char *c, size_t *offset)
+{
+ switch (*c) {
+ case '\\':
+ (*offset)++;
+ *c = get_char(reg, *offset);
+ switch (*c) {
+ case '\\':
+ case '*':
+ return 0;
+ default:
+ return -2;
+ }
+ case '*':
+ return -1;
+ default:
+ return 0;
+ }
+}
+
+static
+char get_char_at_cb(size_t at, void *data)
+{
+ return get_char(data, at);
+}
+
+static
+int stack_star_glob_match(struct estack *stack, int top, const char *cmp_type)
+{
+ bool has_user = false;
+ int result;
+ struct estack_entry *pattern_reg;
+ struct estack_entry *candidate_reg;
+
+ /* Disable the page fault handler when reading from userspace. */
+ if (estack_bx(stack, top)->u.s.user
+ || estack_ax(stack, top)->u.s.user) {
+ has_user = true;
+ pagefault_disable();
+ }
+
+ /* Find out which side is the pattern vs. the candidate. */
+ if (estack_ax(stack, top)->u.s.literal_type == ESTACK_STRING_LITERAL_TYPE_STAR_GLOB) {
+ pattern_reg = estack_ax(stack, top);
+ candidate_reg = estack_bx(stack, top);
+ } else {
+ pattern_reg = estack_bx(stack, top);
+ candidate_reg = estack_ax(stack, top);
+ }
+
+ /* Perform the match operation. */
+ result = !strutils_star_glob_match_char_cb(get_char_at_cb,
+ pattern_reg, get_char_at_cb, candidate_reg);
+ if (has_user)
+ pagefault_enable();
+
+ return result;
+}
+
+static
+int stack_strcmp(struct estack *stack, int top, const char *cmp_type)
+{
+ size_t offset_bx = 0, offset_ax = 0;
+ int diff, has_user = 0;
+
+ if (estack_bx(stack, top)->u.s.user
+ || estack_ax(stack, top)->u.s.user) {
+ has_user = 1;
+ pagefault_disable();
+ }
+
+ for (;;) {
+ int ret;
+ int escaped_r0 = 0;
+ char char_bx, char_ax;
+
+ char_bx = get_char(estack_bx(stack, top), offset_bx);
+ char_ax = get_char(estack_ax(stack, top), offset_ax);
+
+ if (unlikely(char_bx == '\0')) {
+ if (char_ax == '\0') {
+ diff = 0;
+ break;
+ } else {
+ if (estack_ax(stack, top)->u.s.literal_type ==
+ ESTACK_STRING_LITERAL_TYPE_PLAIN) {
+ ret = parse_char(estack_ax(stack, top),
+ &char_ax, &offset_ax);
+ if (ret == -1) {
+ diff = 0;
+ break;
+ }
+ }
+ diff = -1;
+ break;
+ }
+ }
+ if (unlikely(char_ax == '\0')) {
+ if (estack_bx(stack, top)->u.s.literal_type ==
+ ESTACK_STRING_LITERAL_TYPE_PLAIN) {
+ ret = parse_char(estack_bx(stack, top),
+ &char_bx, &offset_bx);
+ if (ret == -1) {
+ diff = 0;
+ break;
+ }
+ }
+ diff = 1;
+ break;
+ }
+ if (estack_bx(stack, top)->u.s.literal_type ==
+ ESTACK_STRING_LITERAL_TYPE_PLAIN) {
+ ret = parse_char(estack_bx(stack, top),
+ &char_bx, &offset_bx);
+ if (ret == -1) {
+ diff = 0;
+ break;
+ } else if (ret == -2) {
+ escaped_r0 = 1;
+ }
+ /* else compare both char */
+ }
+ if (estack_ax(stack, top)->u.s.literal_type ==
+ ESTACK_STRING_LITERAL_TYPE_PLAIN) {
+ ret = parse_char(estack_ax(stack, top),
+ &char_ax, &offset_ax);
+ if (ret == -1) {
+ diff = 0;
+ break;
+ } else if (ret == -2) {
+ if (!escaped_r0) {
+ diff = -1;
+ break;
+ }
+ } else {
+ if (escaped_r0) {
+ diff = 1;
+ break;
+ }
+ }
+ } else {
+ if (escaped_r0) {
+ diff = 1;
+ break;
+ }
+ }
+ diff = char_bx - char_ax;
+ if (diff != 0)
+ break;
+ offset_bx++;
+ offset_ax++;
+ }
+ if (has_user)
+ pagefault_enable();
+
+ return diff;
+}
+
+uint64_t lttng_bytecode_filter_interpret_false(void *filter_data,
+ struct lttng_probe_ctx *lttng_probe_ctx,
+ const char *filter_stack_data)
+{
+ return LTTNG_INTERPRETER_DISCARD;
+}
+
+#ifdef INTERPRETER_USE_SWITCH
+
+/*
+ * Fallback for compilers that do not support taking address of labels.
+ */
+
+#define START_OP \
+ start_pc = &bytecode->data[0]; \
+ for (pc = next_pc = start_pc; pc - start_pc < bytecode->len; \
+ pc = next_pc) { \
+ dbg_printk("LTTng: Executing op %s (%u)\n", \
+ lttng_bytecode_print_op((unsigned int) *(bytecode_opcode_t *) pc), \
+ (unsigned int) *(bytecode_opcode_t *) pc); \
+ switch (*(bytecode_opcode_t *) pc) {
+
+#define OP(name) case name
+
+#define PO break
+
+#define END_OP } \
+ }
+
+#else
+
+/*
+ * Dispatch-table based interpreter.
+ */
+
+#define START_OP \
+ start_pc = &bytecode->code[0]; \
+ pc = next_pc = start_pc; \
+ if (unlikely(pc - start_pc >= bytecode->len)) \
+ goto end; \
+ goto *dispatch[*(bytecode_opcode_t *) pc];
+
+#define OP(name) \
+LABEL_##name
+
+#define PO \
+ pc = next_pc; \
+ goto *dispatch[*(bytecode_opcode_t *) pc];
+
+#define END_OP
+
+#endif
+
+#define IS_INTEGER_REGISTER(reg_type) \
+ (reg_type == REG_S64 || reg_type == REG_U64)
+
+static int context_get_index(struct lttng_probe_ctx *lttng_probe_ctx,
+ struct load_ptr *ptr,
+ uint32_t idx)
+{
+
+ struct lttng_ctx_field *ctx_field;
+ struct lttng_event_field *field;
+ union lttng_ctx_value v;
+
+ ctx_field = <tng_static_ctx->fields[idx];
+ field = &ctx_field->event_field;
+ ptr->type = LOAD_OBJECT;
+ /* field is only used for types nested within variants. */
+ ptr->field = NULL;
+
+ switch (field->type.atype) {
+ case atype_integer:
+ ctx_field->get_value(ctx_field, lttng_probe_ctx, &v);
+ if (field->type.u.integer.signedness) {
+ ptr->object_type = OBJECT_TYPE_S64;
+ ptr->u.s64 = v.s64;
+ ptr->ptr = &ptr->u.s64;
+ } else {
+ ptr->object_type = OBJECT_TYPE_U64;
+ ptr->u.u64 = v.s64; /* Cast. */
+ ptr->ptr = &ptr->u.u64;
+ }
+ break;
+ case atype_enum_nestable:
+ {
+ const struct lttng_integer_type *itype =
+ &field->type.u.enum_nestable.container_type->u.integer;
+
+ ctx_field->get_value(ctx_field, lttng_probe_ctx, &v);
+ if (itype->signedness) {
+ ptr->object_type = OBJECT_TYPE_S64;
+ ptr->u.s64 = v.s64;
+ ptr->ptr = &ptr->u.s64;
+ } else {
+ ptr->object_type = OBJECT_TYPE_U64;
+ ptr->u.u64 = v.s64; /* Cast. */
+ ptr->ptr = &ptr->u.u64;
+ }
+ break;
+ }
+ case atype_array_nestable:
+ if (!lttng_is_bytewise_integer(field->type.u.array_nestable.elem_type)) {
+ printk(KERN_WARNING "LTTng: bytecode: Array nesting only supports integer types.\n");
+ return -EINVAL;
+ }
+ if (field->type.u.array_nestable.elem_type->u.integer.encoding == lttng_encode_none) {
+ printk(KERN_WARNING "LTTng: bytecode: Only string arrays are supported for contexts.\n");
+ return -EINVAL;
+ }
+ ptr->object_type = OBJECT_TYPE_STRING;
+ ctx_field->get_value(ctx_field, lttng_probe_ctx, &v);
+ ptr->ptr = v.str;
+ break;
+ case atype_sequence_nestable:
+ if (!lttng_is_bytewise_integer(field->type.u.sequence_nestable.elem_type)) {
+ printk(KERN_WARNING "LTTng: bytecode: Sequence nesting only supports integer types.\n");
+ return -EINVAL;
+ }
+ if (field->type.u.sequence_nestable.elem_type->u.integer.encoding == lttng_encode_none) {
+ printk(KERN_WARNING "LTTng: bytecode: Only string sequences are supported for contexts.\n");
+ return -EINVAL;
+ }
+ ptr->object_type = OBJECT_TYPE_STRING;
+ ctx_field->get_value(ctx_field, lttng_probe_ctx, &v);
+ ptr->ptr = v.str;
+ break;
+ case atype_string:
+ ptr->object_type = OBJECT_TYPE_STRING;
+ ctx_field->get_value(ctx_field, lttng_probe_ctx, &v);
+ ptr->ptr = v.str;
+ break;
+ case atype_struct_nestable:
+ printk(KERN_WARNING "LTTng: bytecode: Structure type cannot be loaded.\n");
+ return -EINVAL;
+ case atype_variant_nestable:
+ printk(KERN_WARNING "LTTng: bytecode: Variant type cannot be loaded.\n");
+ return -EINVAL;
+ default:
+ printk(KERN_WARNING "LTTng: bytecode: Unknown type: %d", (int) field->type.atype);
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int dynamic_get_index(struct lttng_probe_ctx *lttng_probe_ctx,
+ struct bytecode_runtime *runtime,
+ uint64_t index, struct estack_entry *stack_top)
+{
+ int ret;
+ const struct bytecode_get_index_data *gid;
+
+ gid = (const struct bytecode_get_index_data *) &runtime->data[index];
+ switch (stack_top->u.ptr.type) {
+ case LOAD_OBJECT:
+ switch (stack_top->u.ptr.object_type) {
+ case OBJECT_TYPE_ARRAY:
+ {
+ const char *ptr;
+
+ WARN_ON_ONCE(gid->offset >= gid->array_len);
+ /* Skip count (unsigned long) */
+ ptr = *(const char **) (stack_top->u.ptr.ptr + sizeof(unsigned long));
+ ptr = ptr + gid->offset;
+ stack_top->u.ptr.ptr = ptr;
+ stack_top->u.ptr.object_type = gid->elem.type;
+ stack_top->u.ptr.rev_bo = gid->elem.rev_bo;
+ BUG_ON(stack_top->u.ptr.field->type.atype != atype_array_nestable);
+ stack_top->u.ptr.field = NULL;
+ break;
+ }
+ case OBJECT_TYPE_SEQUENCE:
+ {
+ const char *ptr;
+ size_t ptr_seq_len;
+
+ ptr = *(const char **) (stack_top->u.ptr.ptr + sizeof(unsigned long));
+ ptr_seq_len = *(unsigned long *) stack_top->u.ptr.ptr;
+ if (gid->offset >= gid->elem.len * ptr_seq_len) {
+ ret = -EINVAL;
+ goto end;
+ }
+ ptr = ptr + gid->offset;
+ stack_top->u.ptr.ptr = ptr;
+ stack_top->u.ptr.object_type = gid->elem.type;
+ stack_top->u.ptr.rev_bo = gid->elem.rev_bo;
+ BUG_ON(stack_top->u.ptr.field->type.atype != atype_sequence_nestable);
+ stack_top->u.ptr.field = NULL;
+ break;
+ }
+ case OBJECT_TYPE_STRUCT:
+ printk(KERN_WARNING "LTTng: bytecode: Nested structures are not supported yet.\n");
+ ret = -EINVAL;
+ goto end;
+ case OBJECT_TYPE_VARIANT:
+ default:
+ printk(KERN_WARNING "LTTng: bytecode: Unexpected get index type %d",
+ (int) stack_top->u.ptr.object_type);
+ ret = -EINVAL;
+ goto end;
+ }
+ break;
+ case LOAD_ROOT_CONTEXT:
+ case LOAD_ROOT_APP_CONTEXT: /* Fall-through */
+ {
+ ret = context_get_index(lttng_probe_ctx,
+ &stack_top->u.ptr,
+ gid->ctx_index);
+ if (ret) {
+ goto end;
+ }
+ break;
+ }
+ case LOAD_ROOT_PAYLOAD:
+ stack_top->u.ptr.ptr += gid->offset;
+ if (gid->elem.type == OBJECT_TYPE_STRING)
+ stack_top->u.ptr.ptr = *(const char * const *) stack_top->u.ptr.ptr;
+ stack_top->u.ptr.object_type = gid->elem.type;
+ stack_top->u.ptr.type = LOAD_OBJECT;
+ stack_top->u.ptr.field = gid->field;
+ stack_top->u.ptr.rev_bo = gid->elem.rev_bo;
+ break;
+ }
+
+ stack_top->type = REG_PTR;
+
+ return 0;
+
+end:
+ return ret;
+}
+
+static int dynamic_load_field(struct estack_entry *stack_top)
+{
+ int ret;
+
+ switch (stack_top->u.ptr.type) {
+ case LOAD_OBJECT:
+ break;
+ case LOAD_ROOT_CONTEXT:
+ case LOAD_ROOT_APP_CONTEXT:
+ case LOAD_ROOT_PAYLOAD:
+ default:
+ dbg_printk("Bytecode warning: cannot load root, missing field name.\n");
+ ret = -EINVAL;
+ goto end;
+ }
+ switch (stack_top->u.ptr.object_type) {
+ case OBJECT_TYPE_S8:
+ dbg_printk("op load field s8\n");
+ stack_top->u.v = *(int8_t *) stack_top->u.ptr.ptr;
+ stack_top->type = REG_S64;
+ break;
+ case OBJECT_TYPE_S16:
+ {
+ int16_t tmp;
+
+ dbg_printk("op load field s16\n");
+ tmp = *(int16_t *) stack_top->u.ptr.ptr;
+ if (stack_top->u.ptr.rev_bo)
+ __swab16s(&tmp);
+ stack_top->u.v = tmp;
+ stack_top->type = REG_S64;
+ break;
+ }
+ case OBJECT_TYPE_S32:
+ {
+ int32_t tmp;
+
+ dbg_printk("op load field s32\n");
+ tmp = *(int32_t *) stack_top->u.ptr.ptr;
+ if (stack_top->u.ptr.rev_bo)
+ __swab32s(&tmp);
+ stack_top->u.v = tmp;
+ stack_top->type = REG_S64;
+ break;
+ }
+ case OBJECT_TYPE_S64:
+ {
+ int64_t tmp;
+
+ dbg_printk("op load field s64\n");
+ tmp = *(int64_t *) stack_top->u.ptr.ptr;
+ if (stack_top->u.ptr.rev_bo)
+ __swab64s(&tmp);
+ stack_top->u.v = tmp;
+ stack_top->type = REG_S64;
+ break;
+ }
+ case OBJECT_TYPE_U8:
+ dbg_printk("op load field u8\n");
+ stack_top->u.v = *(uint8_t *) stack_top->u.ptr.ptr;
+ stack_top->type = REG_U64;
+ break;
+ case OBJECT_TYPE_U16:
+ {
+ uint16_t tmp;
+
+ dbg_printk("op load field u16\n");
+ tmp = *(uint16_t *) stack_top->u.ptr.ptr;
+ if (stack_top->u.ptr.rev_bo)
+ __swab16s(&tmp);
+ stack_top->u.v = tmp;
+ stack_top->type = REG_U64;
+ break;
+ }
+ case OBJECT_TYPE_U32:
+ {
+ uint32_t tmp;
+
+ dbg_printk("op load field u32\n");
+ tmp = *(uint32_t *) stack_top->u.ptr.ptr;
+ if (stack_top->u.ptr.rev_bo)
+ __swab32s(&tmp);
+ stack_top->u.v = tmp;
+ stack_top->type = REG_U64;
+ break;
+ }
+ case OBJECT_TYPE_U64:
+ {
+ uint64_t tmp;
+
+ dbg_printk("op load field u64\n");
+ tmp = *(uint64_t *) stack_top->u.ptr.ptr;
+ if (stack_top->u.ptr.rev_bo)
+ __swab64s(&tmp);
+ stack_top->u.v = tmp;
+ stack_top->type = REG_U64;
+ break;
+ }
+ case OBJECT_TYPE_STRING:
+ {
+ const char *str;
+
+ dbg_printk("op load field string\n");
+ str = (const char *) stack_top->u.ptr.ptr;
+ stack_top->u.s.str = str;
+ if (unlikely(!stack_top->u.s.str)) {
+ dbg_printk("Bytecode warning: loading a NULL string.\n");
+ ret = -EINVAL;
+ goto end;
+ }
+ stack_top->u.s.seq_len = LTTNG_SIZE_MAX;
+ stack_top->u.s.literal_type =
+ ESTACK_STRING_LITERAL_TYPE_NONE;
+ stack_top->type = REG_STRING;
+ break;
+ }
+ case OBJECT_TYPE_STRING_SEQUENCE:
+ {
+ const char *ptr;
+
+ dbg_printk("op load field string sequence\n");
+ ptr = stack_top->u.ptr.ptr;
+ stack_top->u.s.seq_len = *(unsigned long *) ptr;
+ stack_top->u.s.str = *(const char **) (ptr + sizeof(unsigned long));
+ if (unlikely(!stack_top->u.s.str)) {
+ dbg_printk("Bytecode warning: loading a NULL sequence.\n");
+ ret = -EINVAL;
+ goto end;
+ }
+ stack_top->u.s.literal_type =
+ ESTACK_STRING_LITERAL_TYPE_NONE;
+ stack_top->type = REG_STRING;
+ break;
+ }
+ case OBJECT_TYPE_DYNAMIC:
+ /*
+ * Dynamic types in context are looked up
+ * by context get index.
+ */
+ ret = -EINVAL;
+ goto end;
+ case OBJECT_TYPE_DOUBLE:
+ ret = -EINVAL;
+ goto end;
+ case OBJECT_TYPE_SEQUENCE:
+ case OBJECT_TYPE_ARRAY:
+ case OBJECT_TYPE_STRUCT:
+ case OBJECT_TYPE_VARIANT:
+ printk(KERN_WARNING "LTTng: bytecode: Sequences, arrays, struct and variant cannot be loaded (nested types).\n");
+ ret = -EINVAL;
+ goto end;
+ }
+ return 0;
+
+end:
+ return ret;
+}
+
+static
+int lttng_bytecode_interpret_format_output(struct estack_entry *ax,
+ struct lttng_interpreter_output *output)
+{
+ int ret;
+
+again:
+ switch (ax->type) {
+ case REG_S64:
+ output->type = LTTNG_INTERPRETER_TYPE_S64;
+ output->u.s = ax->u.v;
+ break;
+ case REG_U64:
+ output->type = LTTNG_INTERPRETER_TYPE_U64;
+ output->u.u = (uint64_t) ax->u.v;
+ break;
+ case REG_STRING:
+ output->type = LTTNG_INTERPRETER_TYPE_STRING;
+ output->u.str.str = ax->u.s.str;
+ output->u.str.len = ax->u.s.seq_len;
+ break;
+ case REG_PTR:
+ switch (ax->u.ptr.object_type) {
+ case OBJECT_TYPE_S8:
+ case OBJECT_TYPE_S16:
+ case OBJECT_TYPE_S32:
+ case OBJECT_TYPE_S64:
+ case OBJECT_TYPE_U8:
+ case OBJECT_TYPE_U16:
+ case OBJECT_TYPE_U32:
+ case OBJECT_TYPE_U64:
+ case OBJECT_TYPE_DOUBLE:
+ case OBJECT_TYPE_STRING:
+ case OBJECT_TYPE_STRING_SEQUENCE:
+ ret = dynamic_load_field(ax);
+ if (ret)
+ return ret;
+ /* Retry after loading ptr into stack top. */
+ goto again;
+ case OBJECT_TYPE_SEQUENCE:
+ output->type = LTTNG_INTERPRETER_TYPE_SEQUENCE;
+ output->u.sequence.ptr = *(const char **) (ax->u.ptr.ptr + sizeof(unsigned long));
+ output->u.sequence.nr_elem = *(unsigned long *) ax->u.ptr.ptr;
+ output->u.sequence.nested_type = ax->u.ptr.field->type.u.sequence_nestable.elem_type;
+ break;
+ case OBJECT_TYPE_ARRAY:
+ /* Skip count (unsigned long) */
+ output->type = LTTNG_INTERPRETER_TYPE_SEQUENCE;
+ output->u.sequence.ptr = *(const char **) (ax->u.ptr.ptr + sizeof(unsigned long));
+ output->u.sequence.nr_elem = ax->u.ptr.field->type.u.array_nestable.length;
+ output->u.sequence.nested_type = ax->u.ptr.field->type.u.array_nestable.elem_type;
+ break;
+ case OBJECT_TYPE_STRUCT:
+ case OBJECT_TYPE_VARIANT:
+ default:
+ return -EINVAL;
+ }
+
+ break;
+ case REG_STAR_GLOB_STRING:
+ case REG_TYPE_UNKNOWN:
+ default:
+ return -EINVAL;
+ }
+
+ return LTTNG_INTERPRETER_RECORD_FLAG;
+}
+
+/*
+ * Return 0 (discard), or raise the 0x1 flag (log event).
+ * Currently, other flags are kept for future extensions and have no
+ * effect.
+ */
+static
+uint64_t bytecode_interpret(void *interpreter_data,
+ struct lttng_probe_ctx *lttng_probe_ctx,
+ const char *interpreter_stack_data,
+ struct lttng_interpreter_output *output)
+{
+ struct bytecode_runtime *bytecode = interpreter_data;
+ void *pc, *next_pc, *start_pc;
+ int ret = -EINVAL;
+ uint64_t retval = 0;
+ struct estack _stack;
+ struct estack *stack = &_stack;
+ register int64_t ax = 0, bx = 0;
+ register enum entry_type ax_t = REG_TYPE_UNKNOWN, bx_t = REG_TYPE_UNKNOWN;
+ register int top = INTERPRETER_STACK_EMPTY;
+#ifndef INTERPRETER_USE_SWITCH
+ static void *dispatch[NR_BYTECODE_OPS] = {
+ [ BYTECODE_OP_UNKNOWN ] = &&LABEL_BYTECODE_OP_UNKNOWN,
+
+ [ BYTECODE_OP_RETURN ] = &&LABEL_BYTECODE_OP_RETURN,
+
+ /* binary */
+ [ BYTECODE_OP_MUL ] = &&LABEL_BYTECODE_OP_MUL,
+ [ BYTECODE_OP_DIV ] = &&LABEL_BYTECODE_OP_DIV,
+ [ BYTECODE_OP_MOD ] = &&LABEL_BYTECODE_OP_MOD,
+ [ BYTECODE_OP_PLUS ] = &&LABEL_BYTECODE_OP_PLUS,
+ [ BYTECODE_OP_MINUS ] = &&LABEL_BYTECODE_OP_MINUS,
+ [ BYTECODE_OP_BIT_RSHIFT ] = &&LABEL_BYTECODE_OP_BIT_RSHIFT,
+ [ BYTECODE_OP_BIT_LSHIFT ] = &&LABEL_BYTECODE_OP_BIT_LSHIFT,
+ [ BYTECODE_OP_BIT_AND ] = &&LABEL_BYTECODE_OP_BIT_AND,
+ [ BYTECODE_OP_BIT_OR ] = &&LABEL_BYTECODE_OP_BIT_OR,
+ [ BYTECODE_OP_BIT_XOR ] = &&LABEL_BYTECODE_OP_BIT_XOR,
+
+ /* binary comparators */
+ [ BYTECODE_OP_EQ ] = &&LABEL_BYTECODE_OP_EQ,
+ [ BYTECODE_OP_NE ] = &&LABEL_BYTECODE_OP_NE,
+ [ BYTECODE_OP_GT ] = &&LABEL_BYTECODE_OP_GT,
+ [ BYTECODE_OP_LT ] = &&LABEL_BYTECODE_OP_LT,
+ [ BYTECODE_OP_GE ] = &&LABEL_BYTECODE_OP_GE,
+ [ BYTECODE_OP_LE ] = &&LABEL_BYTECODE_OP_LE,
+
+ /* string binary comparator */
+ [ BYTECODE_OP_EQ_STRING ] = &&LABEL_BYTECODE_OP_EQ_STRING,
+ [ BYTECODE_OP_NE_STRING ] = &&LABEL_BYTECODE_OP_NE_STRING,
+ [ BYTECODE_OP_GT_STRING ] = &&LABEL_BYTECODE_OP_GT_STRING,
+ [ BYTECODE_OP_LT_STRING ] = &&LABEL_BYTECODE_OP_LT_STRING,
+ [ BYTECODE_OP_GE_STRING ] = &&LABEL_BYTECODE_OP_GE_STRING,
+ [ BYTECODE_OP_LE_STRING ] = &&LABEL_BYTECODE_OP_LE_STRING,
+
+ /* globbing pattern binary comparator */
+ [ BYTECODE_OP_EQ_STAR_GLOB_STRING ] = &&LABEL_BYTECODE_OP_EQ_STAR_GLOB_STRING,
+ [ BYTECODE_OP_NE_STAR_GLOB_STRING ] = &&LABEL_BYTECODE_OP_NE_STAR_GLOB_STRING,
+
+ /* s64 binary comparator */
+ [ BYTECODE_OP_EQ_S64 ] = &&LABEL_BYTECODE_OP_EQ_S64,
+ [ BYTECODE_OP_NE_S64 ] = &&LABEL_BYTECODE_OP_NE_S64,
+ [ BYTECODE_OP_GT_S64 ] = &&LABEL_BYTECODE_OP_GT_S64,
+ [ BYTECODE_OP_LT_S64 ] = &&LABEL_BYTECODE_OP_LT_S64,
+ [ BYTECODE_OP_GE_S64 ] = &&LABEL_BYTECODE_OP_GE_S64,
+ [ BYTECODE_OP_LE_S64 ] = &&LABEL_BYTECODE_OP_LE_S64,
+
+ /* double binary comparator */
+ [ BYTECODE_OP_EQ_DOUBLE ] = &&LABEL_BYTECODE_OP_EQ_DOUBLE,
+ [ BYTECODE_OP_NE_DOUBLE ] = &&LABEL_BYTECODE_OP_NE_DOUBLE,
+ [ BYTECODE_OP_GT_DOUBLE ] = &&LABEL_BYTECODE_OP_GT_DOUBLE,
+ [ BYTECODE_OP_LT_DOUBLE ] = &&LABEL_BYTECODE_OP_LT_DOUBLE,
+ [ BYTECODE_OP_GE_DOUBLE ] = &&LABEL_BYTECODE_OP_GE_DOUBLE,
+ [ BYTECODE_OP_LE_DOUBLE ] = &&LABEL_BYTECODE_OP_LE_DOUBLE,
+
+ /* Mixed S64-double binary comparators */
+ [ BYTECODE_OP_EQ_DOUBLE_S64 ] = &&LABEL_BYTECODE_OP_EQ_DOUBLE_S64,
+ [ BYTECODE_OP_NE_DOUBLE_S64 ] = &&LABEL_BYTECODE_OP_NE_DOUBLE_S64,
+ [ BYTECODE_OP_GT_DOUBLE_S64 ] = &&LABEL_BYTECODE_OP_GT_DOUBLE_S64,
+ [ BYTECODE_OP_LT_DOUBLE_S64 ] = &&LABEL_BYTECODE_OP_LT_DOUBLE_S64,
+ [ BYTECODE_OP_GE_DOUBLE_S64 ] = &&LABEL_BYTECODE_OP_GE_DOUBLE_S64,
+ [ BYTECODE_OP_LE_DOUBLE_S64 ] = &&LABEL_BYTECODE_OP_LE_DOUBLE_S64,
+
+ [ BYTECODE_OP_EQ_S64_DOUBLE ] = &&LABEL_BYTECODE_OP_EQ_S64_DOUBLE,
+ [ BYTECODE_OP_NE_S64_DOUBLE ] = &&LABEL_BYTECODE_OP_NE_S64_DOUBLE,
+ [ BYTECODE_OP_GT_S64_DOUBLE ] = &&LABEL_BYTECODE_OP_GT_S64_DOUBLE,
+ [ BYTECODE_OP_LT_S64_DOUBLE ] = &&LABEL_BYTECODE_OP_LT_S64_DOUBLE,
+ [ BYTECODE_OP_GE_S64_DOUBLE ] = &&LABEL_BYTECODE_OP_GE_S64_DOUBLE,
+ [ BYTECODE_OP_LE_S64_DOUBLE ] = &&LABEL_BYTECODE_OP_LE_S64_DOUBLE,
+
+ /* unary */
+ [ BYTECODE_OP_UNARY_PLUS ] = &&LABEL_BYTECODE_OP_UNARY_PLUS,
+ [ BYTECODE_OP_UNARY_MINUS ] = &&LABEL_BYTECODE_OP_UNARY_MINUS,
+ [ BYTECODE_OP_UNARY_NOT ] = &&LABEL_BYTECODE_OP_UNARY_NOT,
+ [ BYTECODE_OP_UNARY_PLUS_S64 ] = &&LABEL_BYTECODE_OP_UNARY_PLUS_S64,
+ [ BYTECODE_OP_UNARY_MINUS_S64 ] = &&LABEL_BYTECODE_OP_UNARY_MINUS_S64,
+ [ BYTECODE_OP_UNARY_NOT_S64 ] = &&LABEL_BYTECODE_OP_UNARY_NOT_S64,
+ [ BYTECODE_OP_UNARY_PLUS_DOUBLE ] = &&LABEL_BYTECODE_OP_UNARY_PLUS_DOUBLE,
+ [ BYTECODE_OP_UNARY_MINUS_DOUBLE ] = &&LABEL_BYTECODE_OP_UNARY_MINUS_DOUBLE,
+ [ BYTECODE_OP_UNARY_NOT_DOUBLE ] = &&LABEL_BYTECODE_OP_UNARY_NOT_DOUBLE,
+
+ /* logical */
+ [ BYTECODE_OP_AND ] = &&LABEL_BYTECODE_OP_AND,
+ [ BYTECODE_OP_OR ] = &&LABEL_BYTECODE_OP_OR,
+
+ /* load field ref */
+ [ BYTECODE_OP_LOAD_FIELD_REF ] = &&LABEL_BYTECODE_OP_LOAD_FIELD_REF,
+ [ BYTECODE_OP_LOAD_FIELD_REF_STRING ] = &&LABEL_BYTECODE_OP_LOAD_FIELD_REF_STRING,
+ [ BYTECODE_OP_LOAD_FIELD_REF_SEQUENCE ] = &&LABEL_BYTECODE_OP_LOAD_FIELD_REF_SEQUENCE,
+ [ BYTECODE_OP_LOAD_FIELD_REF_S64 ] = &&LABEL_BYTECODE_OP_LOAD_FIELD_REF_S64,
+ [ BYTECODE_OP_LOAD_FIELD_REF_DOUBLE ] = &&LABEL_BYTECODE_OP_LOAD_FIELD_REF_DOUBLE,
+
+ /* load from immediate operand */
+ [ BYTECODE_OP_LOAD_STRING ] = &&LABEL_BYTECODE_OP_LOAD_STRING,
+ [ BYTECODE_OP_LOAD_STAR_GLOB_STRING ] = &&LABEL_BYTECODE_OP_LOAD_STAR_GLOB_STRING,
+ [ BYTECODE_OP_LOAD_S64 ] = &&LABEL_BYTECODE_OP_LOAD_S64,
+ [ BYTECODE_OP_LOAD_DOUBLE ] = &&LABEL_BYTECODE_OP_LOAD_DOUBLE,
+
+ /* cast */
+ [ BYTECODE_OP_CAST_TO_S64 ] = &&LABEL_BYTECODE_OP_CAST_TO_S64,
+ [ BYTECODE_OP_CAST_DOUBLE_TO_S64 ] = &&LABEL_BYTECODE_OP_CAST_DOUBLE_TO_S64,
+ [ BYTECODE_OP_CAST_NOP ] = &&LABEL_BYTECODE_OP_CAST_NOP,
+
+ /* get context ref */
+ [ BYTECODE_OP_GET_CONTEXT_REF ] = &&LABEL_BYTECODE_OP_GET_CONTEXT_REF,
+ [ BYTECODE_OP_GET_CONTEXT_REF_STRING ] = &&LABEL_BYTECODE_OP_GET_CONTEXT_REF_STRING,
+ [ BYTECODE_OP_GET_CONTEXT_REF_S64 ] = &&LABEL_BYTECODE_OP_GET_CONTEXT_REF_S64,
+ [ BYTECODE_OP_GET_CONTEXT_REF_DOUBLE ] = &&LABEL_BYTECODE_OP_GET_CONTEXT_REF_DOUBLE,
+
+ /* load userspace field ref */
+ [ BYTECODE_OP_LOAD_FIELD_REF_USER_STRING ] = &&LABEL_BYTECODE_OP_LOAD_FIELD_REF_USER_STRING,
+ [ BYTECODE_OP_LOAD_FIELD_REF_USER_SEQUENCE ] = &&LABEL_BYTECODE_OP_LOAD_FIELD_REF_USER_SEQUENCE,
+
+ /* Instructions for recursive traversal through composed types. */
+ [ BYTECODE_OP_GET_CONTEXT_ROOT ] = &&LABEL_BYTECODE_OP_GET_CONTEXT_ROOT,
+ [ BYTECODE_OP_GET_APP_CONTEXT_ROOT ] = &&LABEL_BYTECODE_OP_GET_APP_CONTEXT_ROOT,
+ [ BYTECODE_OP_GET_PAYLOAD_ROOT ] = &&LABEL_BYTECODE_OP_GET_PAYLOAD_ROOT,
+
+ [ BYTECODE_OP_GET_SYMBOL ] = &&LABEL_BYTECODE_OP_GET_SYMBOL,
+ [ BYTECODE_OP_GET_SYMBOL_FIELD ] = &&LABEL_BYTECODE_OP_GET_SYMBOL_FIELD,
+ [ BYTECODE_OP_GET_INDEX_U16 ] = &&LABEL_BYTECODE_OP_GET_INDEX_U16,
+ [ BYTECODE_OP_GET_INDEX_U64 ] = &&LABEL_BYTECODE_OP_GET_INDEX_U64,
+
+ [ BYTECODE_OP_LOAD_FIELD ] = &&LABEL_BYTECODE_OP_LOAD_FIELD,
+ [ BYTECODE_OP_LOAD_FIELD_S8 ] = &&LABEL_BYTECODE_OP_LOAD_FIELD_S8,
+ [ BYTECODE_OP_LOAD_FIELD_S16 ] = &&LABEL_BYTECODE_OP_LOAD_FIELD_S16,
+ [ BYTECODE_OP_LOAD_FIELD_S32 ] = &&LABEL_BYTECODE_OP_LOAD_FIELD_S32,
+ [ BYTECODE_OP_LOAD_FIELD_S64 ] = &&LABEL_BYTECODE_OP_LOAD_FIELD_S64,
+ [ BYTECODE_OP_LOAD_FIELD_U8 ] = &&LABEL_BYTECODE_OP_LOAD_FIELD_U8,
+ [ BYTECODE_OP_LOAD_FIELD_U16 ] = &&LABEL_BYTECODE_OP_LOAD_FIELD_U16,
+ [ BYTECODE_OP_LOAD_FIELD_U32 ] = &&LABEL_BYTECODE_OP_LOAD_FIELD_U32,
+ [ BYTECODE_OP_LOAD_FIELD_U64 ] = &&LABEL_BYTECODE_OP_LOAD_FIELD_U64,
+ [ BYTECODE_OP_LOAD_FIELD_STRING ] = &&LABEL_BYTECODE_OP_LOAD_FIELD_STRING,
+ [ BYTECODE_OP_LOAD_FIELD_SEQUENCE ] = &&LABEL_BYTECODE_OP_LOAD_FIELD_SEQUENCE,
+ [ BYTECODE_OP_LOAD_FIELD_DOUBLE ] = &&LABEL_BYTECODE_OP_LOAD_FIELD_DOUBLE,
+
+ [ BYTECODE_OP_UNARY_BIT_NOT ] = &&LABEL_BYTECODE_OP_UNARY_BIT_NOT,
+
+ [ BYTECODE_OP_RETURN_S64 ] = &&LABEL_BYTECODE_OP_RETURN_S64,
+ };
+#endif /* #ifndef INTERPRETER_USE_SWITCH */
+
+ START_OP
+
+ OP(BYTECODE_OP_UNKNOWN):
+ OP(BYTECODE_OP_LOAD_FIELD_REF):
+ OP(BYTECODE_OP_GET_CONTEXT_REF):
+#ifdef INTERPRETER_USE_SWITCH
+ default:
+#endif /* INTERPRETER_USE_SWITCH */
+ printk(KERN_WARNING "LTTng: bytecode: unknown bytecode op %u\n",
+ (unsigned int) *(bytecode_opcode_t *) pc);
+ ret = -EINVAL;
+ goto end;
+
+ OP(BYTECODE_OP_RETURN):
+ OP(BYTECODE_OP_RETURN_S64):
+ /* LTTNG_INTERPRETER_DISCARD or LTTNG_INTERPRETER_RECORD_FLAG */
+ switch (estack_ax_t) {
+ case REG_S64:
+ case REG_U64:
+ retval = !!estack_ax_v;
+ break;
+ case REG_DOUBLE:
+ case REG_STRING:
+ case REG_PTR:
+ if (!output) {
+ ret = -EINVAL;
+ goto end;
+ }
+ retval = 0;
+ break;
+ case REG_STAR_GLOB_STRING:
+ case REG_TYPE_UNKNOWN:
+ ret = -EINVAL;
+ goto end;
+ }
+ ret = 0;
+ goto end;
+
+ /* binary */
+ OP(BYTECODE_OP_MUL):
+ OP(BYTECODE_OP_DIV):
+ OP(BYTECODE_OP_MOD):
+ OP(BYTECODE_OP_PLUS):
+ OP(BYTECODE_OP_MINUS):
+ printk(KERN_WARNING "LTTng: bytecode: unsupported bytecode op %u\n",
+ (unsigned int) *(bytecode_opcode_t *) pc);
+ ret = -EINVAL;
+ goto end;
+
+ OP(BYTECODE_OP_EQ):
+ OP(BYTECODE_OP_NE):
+ OP(BYTECODE_OP_GT):
+ OP(BYTECODE_OP_LT):
+ OP(BYTECODE_OP_GE):
+ OP(BYTECODE_OP_LE):
+ printk(KERN_WARNING "LTTng: bytecode: unsupported non-specialized bytecode op %u\n",
+ (unsigned int) *(bytecode_opcode_t *) pc);
+ ret = -EINVAL;
+ goto end;
+
+ OP(BYTECODE_OP_EQ_STRING):
+ {
+ int res;
+
+ res = (stack_strcmp(stack, top, "==") == 0);
+ estack_pop(stack, top, ax, bx, ax_t, bx_t);
+ estack_ax_v = res;
+ estack_ax_t = REG_S64;
+ next_pc += sizeof(struct binary_op);
+ PO;
+ }
+ OP(BYTECODE_OP_NE_STRING):
+ {
+ int res;
+
+ res = (stack_strcmp(stack, top, "!=") != 0);
+ estack_pop(stack, top, ax, bx, ax_t, bx_t);
+ estack_ax_v = res;
+ estack_ax_t = REG_S64;
+ next_pc += sizeof(struct binary_op);
+ PO;
+ }
+ OP(BYTECODE_OP_GT_STRING):
+ {
+ int res;
+
+ res = (stack_strcmp(stack, top, ">") > 0);
+ estack_pop(stack, top, ax, bx, ax_t, bx_t);
+ estack_ax_v = res;
+ estack_ax_t = REG_S64;
+ next_pc += sizeof(struct binary_op);
+ PO;
+ }
+ OP(BYTECODE_OP_LT_STRING):
+ {
+ int res;
+
+ res = (stack_strcmp(stack, top, "<") < 0);
+ estack_pop(stack, top, ax, bx, ax_t, bx_t);
+ estack_ax_v = res;
+ estack_ax_t = REG_S64;
+ next_pc += sizeof(struct binary_op);
+ PO;
+ }
+ OP(BYTECODE_OP_GE_STRING):
+ {
+ int res;
+
+ res = (stack_strcmp(stack, top, ">=") >= 0);
+ estack_pop(stack, top, ax, bx, ax_t, bx_t);
+ estack_ax_v = res;
+ estack_ax_t = REG_S64;
+ next_pc += sizeof(struct binary_op);
+ PO;
+ }
+ OP(BYTECODE_OP_LE_STRING):
+ {
+ int res;
+
+ res = (stack_strcmp(stack, top, "<=") <= 0);
+ estack_pop(stack, top, ax, bx, ax_t, bx_t);
+ estack_ax_v = res;
+ estack_ax_t = REG_S64;
+ next_pc += sizeof(struct binary_op);
+ PO;
+ }
+
+ OP(BYTECODE_OP_EQ_STAR_GLOB_STRING):
+ {
+ int res;
+
+ res = (stack_star_glob_match(stack, top, "==") == 0);
+ estack_pop(stack, top, ax, bx, ax_t, bx_t);
+ estack_ax_v = res;
+ estack_ax_t = REG_S64;
+ next_pc += sizeof(struct binary_op);
+ PO;
+ }
+ OP(BYTECODE_OP_NE_STAR_GLOB_STRING):
+ {
+ int res;
+
+ res = (stack_star_glob_match(stack, top, "!=") != 0);
+ estack_pop(stack, top, ax, bx, ax_t, bx_t);
+ estack_ax_v = res;
+ estack_ax_t = REG_S64;
+ next_pc += sizeof(struct binary_op);
+ PO;
+ }
+
+ OP(BYTECODE_OP_EQ_S64):
+ {
+ int res;
+
+ res = (estack_bx_v == estack_ax_v);
+ estack_pop(stack, top, ax, bx, ax_t, bx_t);
+ estack_ax_v = res;
+ estack_ax_t = REG_S64;
+ next_pc += sizeof(struct binary_op);
+ PO;
+ }
+ OP(BYTECODE_OP_NE_S64):
+ {
+ int res;
+
+ res = (estack_bx_v != estack_ax_v);
+ estack_pop(stack, top, ax, bx, ax_t, bx_t);
+ estack_ax_v = res;
+ estack_ax_t = REG_S64;
+ next_pc += sizeof(struct binary_op);
+ PO;
+ }
+ OP(BYTECODE_OP_GT_S64):
+ {
+ int res;
+
+ res = (estack_bx_v > estack_ax_v);
+ estack_pop(stack, top, ax, bx, ax_t, bx_t);
+ estack_ax_v = res;
+ estack_ax_t = REG_S64;
+ next_pc += sizeof(struct binary_op);
+ PO;
+ }
+ OP(BYTECODE_OP_LT_S64):
+ {
+ int res;
+
+ res = (estack_bx_v < estack_ax_v);
+ estack_pop(stack, top, ax, bx, ax_t, bx_t);
+ estack_ax_v = res;
+ estack_ax_t = REG_S64;
+ next_pc += sizeof(struct binary_op);
+ PO;
+ }
+ OP(BYTECODE_OP_GE_S64):
+ {
+ int res;
+
+ res = (estack_bx_v >= estack_ax_v);
+ estack_pop(stack, top, ax, bx, ax_t, bx_t);
+ estack_ax_v = res;
+ estack_ax_t = REG_S64;
+ next_pc += sizeof(struct binary_op);
+ PO;
+ }
+ OP(BYTECODE_OP_LE_S64):
+ {
+ int res;
+
+ res = (estack_bx_v <= estack_ax_v);
+ estack_pop(stack, top, ax, bx, ax_t, bx_t);
+ estack_ax_v = res;
+ estack_ax_t = REG_S64;
+ next_pc += sizeof(struct binary_op);
+ PO;
+ }
+
+ OP(BYTECODE_OP_EQ_DOUBLE):
+ OP(BYTECODE_OP_NE_DOUBLE):
+ OP(BYTECODE_OP_GT_DOUBLE):
+ OP(BYTECODE_OP_LT_DOUBLE):
+ OP(BYTECODE_OP_GE_DOUBLE):
+ OP(BYTECODE_OP_LE_DOUBLE):
+ {
+ BUG_ON(1);
+ PO;
+ }
+
+ /* Mixed S64-double binary comparators */
+ OP(BYTECODE_OP_EQ_DOUBLE_S64):
+ OP(BYTECODE_OP_NE_DOUBLE_S64):
+ OP(BYTECODE_OP_GT_DOUBLE_S64):
+ OP(BYTECODE_OP_LT_DOUBLE_S64):
+ OP(BYTECODE_OP_GE_DOUBLE_S64):
+ OP(BYTECODE_OP_LE_DOUBLE_S64):
+ OP(BYTECODE_OP_EQ_S64_DOUBLE):
+ OP(BYTECODE_OP_NE_S64_DOUBLE):
+ OP(BYTECODE_OP_GT_S64_DOUBLE):
+ OP(BYTECODE_OP_LT_S64_DOUBLE):
+ OP(BYTECODE_OP_GE_S64_DOUBLE):
+ OP(BYTECODE_OP_LE_S64_DOUBLE):
+ {
+ BUG_ON(1);
+ PO;
+ }
+ OP(BYTECODE_OP_BIT_RSHIFT):
+ {
+ int64_t res;
+
+ if (!IS_INTEGER_REGISTER(estack_ax_t) || !IS_INTEGER_REGISTER(estack_bx_t)) {
+ ret = -EINVAL;
+ goto end;
+ }
+
+ /* Catch undefined behavior. */
+ if (unlikely(estack_ax_v < 0 || estack_ax_v >= 64)) {
+ ret = -EINVAL;
+ goto end;
+ }
+ res = ((uint64_t) estack_bx_v >> (uint32_t) estack_ax_v);
+ estack_pop(stack, top, ax, bx, ax_t, bx_t);
+ estack_ax_v = res;
+ estack_ax_t = REG_U64;
+ next_pc += sizeof(struct binary_op);
+ PO;
+ }
+ OP(BYTECODE_OP_BIT_LSHIFT):
+ {
+ int64_t res;
+
+ if (!IS_INTEGER_REGISTER(estack_ax_t) || !IS_INTEGER_REGISTER(estack_bx_t)) {
+ ret = -EINVAL;
+ goto end;
+ }
+
+ /* Catch undefined behavior. */
+ if (unlikely(estack_ax_v < 0 || estack_ax_v >= 64)) {
+ ret = -EINVAL;
+ goto end;
+ }
+ res = ((uint64_t) estack_bx_v << (uint32_t) estack_ax_v);
+ estack_pop(stack, top, ax, bx, ax_t, bx_t);
+ estack_ax_v = res;
+ estack_ax_t = REG_U64;
+ next_pc += sizeof(struct binary_op);
+ PO;
+ }
+ OP(BYTECODE_OP_BIT_AND):
+ {
+ int64_t res;
+
+ if (!IS_INTEGER_REGISTER(estack_ax_t) || !IS_INTEGER_REGISTER(estack_bx_t)) {
+ ret = -EINVAL;
+ goto end;
+ }
+
+ res = ((uint64_t) estack_bx_v & (uint64_t) estack_ax_v);
+ estack_pop(stack, top, ax, bx, ax_t, bx_t);
+ estack_ax_v = res;
+ estack_ax_t = REG_U64;
+ next_pc += sizeof(struct binary_op);
+ PO;
+ }
+ OP(BYTECODE_OP_BIT_OR):
+ {
+ int64_t res;
+
+ if (!IS_INTEGER_REGISTER(estack_ax_t) || !IS_INTEGER_REGISTER(estack_bx_t)) {
+ ret = -EINVAL;
+ goto end;
+ }
+
+ res = ((uint64_t) estack_bx_v | (uint64_t) estack_ax_v);
+ estack_pop(stack, top, ax, bx, ax_t, bx_t);
+ estack_ax_v = res;
+ estack_ax_t = REG_U64;
+ next_pc += sizeof(struct binary_op);
+ PO;
+ }
+ OP(BYTECODE_OP_BIT_XOR):
+ {
+ int64_t res;
+
+ if (!IS_INTEGER_REGISTER(estack_ax_t) || !IS_INTEGER_REGISTER(estack_bx_t)) {
+ ret = -EINVAL;
+ goto end;
+ }
+
+ res = ((uint64_t) estack_bx_v ^ (uint64_t) estack_ax_v);
+ estack_pop(stack, top, ax, bx, ax_t, bx_t);
+ estack_ax_v = res;
+ estack_ax_t = REG_U64;
+ next_pc += sizeof(struct binary_op);
+ PO;
+ }
+
+ /* unary */
+ OP(BYTECODE_OP_UNARY_PLUS):
+ OP(BYTECODE_OP_UNARY_MINUS):
+ OP(BYTECODE_OP_UNARY_NOT):
+ printk(KERN_WARNING "LTTng: bytecode: unsupported non-specialized bytecode op %u\n",
+ (unsigned int) *(bytecode_opcode_t *) pc);
+ ret = -EINVAL;
+ goto end;
+
+
+ OP(BYTECODE_OP_UNARY_BIT_NOT):
+ {
+ estack_ax_v = ~(uint64_t) estack_ax_v;
+ estack_ax_t = REG_S64;
+ next_pc += sizeof(struct unary_op);
+ PO;
+ }
+
+ OP(BYTECODE_OP_UNARY_PLUS_S64):
+ {
+ next_pc += sizeof(struct unary_op);
+ PO;
+ }
+ OP(BYTECODE_OP_UNARY_MINUS_S64):
+ {
+ estack_ax_v = -estack_ax_v;
+ estack_ax_t = REG_S64;
+ next_pc += sizeof(struct unary_op);
+ PO;
+ }
+ OP(BYTECODE_OP_UNARY_PLUS_DOUBLE):
+ OP(BYTECODE_OP_UNARY_MINUS_DOUBLE):
+ {
+ BUG_ON(1);
+ PO;
+ }
+ OP(BYTECODE_OP_UNARY_NOT_S64):
+ {
+ estack_ax_v = !estack_ax_v;
+ estack_ax_t = REG_S64;
+ next_pc += sizeof(struct unary_op);
+ PO;
+ }
+ OP(BYTECODE_OP_UNARY_NOT_DOUBLE):
+ {
+ BUG_ON(1);
+ PO;
+ }
+
+ /* logical */
+ OP(BYTECODE_OP_AND):
+ {
+ struct logical_op *insn = (struct logical_op *) pc;
+
+ /* If AX is 0, skip and evaluate to 0 */
+ if (unlikely(estack_ax_v == 0)) {
+ dbg_printk("Jumping to bytecode offset %u\n",
+ (unsigned int) insn->skip_offset);
+ next_pc = start_pc + insn->skip_offset;
+ } else {
+ /* Pop 1 when jump not taken */
+ estack_pop(stack, top, ax, bx, ax_t, bx_t);
+ next_pc += sizeof(struct logical_op);
+ }
+ PO;
+ }
+ OP(BYTECODE_OP_OR):
+ {
+ struct logical_op *insn = (struct logical_op *) pc;
+
+ /* If AX is nonzero, skip and evaluate to 1 */
+
+ if (unlikely(estack_ax_v != 0)) {
+ estack_ax_v = 1;
+ dbg_printk("Jumping to bytecode offset %u\n",
+ (unsigned int) insn->skip_offset);
+ next_pc = start_pc + insn->skip_offset;
+ } else {
+ /* Pop 1 when jump not taken */
+ estack_pop(stack, top, ax, bx, ax_t, bx_t);
+ next_pc += sizeof(struct logical_op);
+ }
+ PO;
+ }
+
+
+ /* load field ref */
+ OP(BYTECODE_OP_LOAD_FIELD_REF_STRING):
+ {
+ struct load_op *insn = (struct load_op *) pc;
+ struct field_ref *ref = (struct field_ref *) insn->data;
+
+ dbg_printk("load field ref offset %u type string\n",
+ ref->offset);
+ estack_push(stack, top, ax, bx, ax_t, bx_t);
+ estack_ax(stack, top)->u.s.str =
+ *(const char * const *) &interpreter_stack_data[ref->offset];
+ if (unlikely(!estack_ax(stack, top)->u.s.str)) {
+ dbg_printk("Bytecode warning: loading a NULL string.\n");
+ ret = -EINVAL;
+ goto end;
+ }
+ estack_ax(stack, top)->u.s.seq_len = LTTNG_SIZE_MAX;
+ estack_ax(stack, top)->u.s.literal_type =
+ ESTACK_STRING_LITERAL_TYPE_NONE;
+ estack_ax(stack, top)->u.s.user = 0;
+ estack_ax(stack, top)->type = REG_STRING;
+ dbg_printk("ref load string %s\n", estack_ax(stack, top)->u.s.str);
+ next_pc += sizeof(struct load_op) + sizeof(struct field_ref);
+ PO;
+ }
+
+ OP(BYTECODE_OP_LOAD_FIELD_REF_SEQUENCE):
+ {
+ struct load_op *insn = (struct load_op *) pc;
+ struct field_ref *ref = (struct field_ref *) insn->data;
+
+ dbg_printk("load field ref offset %u type sequence\n",
+ ref->offset);
+ estack_push(stack, top, ax, bx, ax_t, bx_t);
+ estack_ax(stack, top)->u.s.seq_len =
+ *(unsigned long *) &interpreter_stack_data[ref->offset];
+ estack_ax(stack, top)->u.s.str =
+ *(const char **) (&interpreter_stack_data[ref->offset
+ + sizeof(unsigned long)]);
+ if (unlikely(!estack_ax(stack, top)->u.s.str)) {
+ dbg_printk("Bytecode warning: loading a NULL sequence.\n");
+ ret = -EINVAL;
+ goto end;
+ }
+ estack_ax(stack, top)->u.s.literal_type =
+ ESTACK_STRING_LITERAL_TYPE_NONE;
+ estack_ax(stack, top)->u.s.user = 0;
+ next_pc += sizeof(struct load_op) + sizeof(struct field_ref);
+ PO;
+ }
+
+ OP(BYTECODE_OP_LOAD_FIELD_REF_S64):
+ {
+ struct load_op *insn = (struct load_op *) pc;
+ struct field_ref *ref = (struct field_ref *) insn->data;
+
+ dbg_printk("load field ref offset %u type s64\n",
+ ref->offset);
+ estack_push(stack, top, ax, bx, ax_t, bx_t);
+ estack_ax_v =
+ ((struct literal_numeric *) &interpreter_stack_data[ref->offset])->v;
+ estack_ax_t = REG_S64;
+ dbg_printk("ref load s64 %lld\n",
+ (long long) estack_ax_v);
+ next_pc += sizeof(struct load_op) + sizeof(struct field_ref);
+ PO;
+ }
+
+ OP(BYTECODE_OP_LOAD_FIELD_REF_DOUBLE):
+ {
+ BUG_ON(1);
+ PO;
+ }
+
+ /* load from immediate operand */
+ OP(BYTECODE_OP_LOAD_STRING):
+ {
+ struct load_op *insn = (struct load_op *) pc;
+
+ dbg_printk("load string %s\n", insn->data);
+ estack_push(stack, top, ax, bx, ax_t, bx_t);
+ estack_ax(stack, top)->u.s.str = insn->data;
+ estack_ax(stack, top)->u.s.seq_len = LTTNG_SIZE_MAX;
+ estack_ax(stack, top)->u.s.literal_type =
+ ESTACK_STRING_LITERAL_TYPE_PLAIN;
+ estack_ax(stack, top)->u.s.user = 0;
+ next_pc += sizeof(struct load_op) + strlen(insn->data) + 1;
+ PO;
+ }
+
+ OP(BYTECODE_OP_LOAD_STAR_GLOB_STRING):
+ {
+ struct load_op *insn = (struct load_op *) pc;
+
+ dbg_printk("load globbing pattern %s\n", insn->data);
+ estack_push(stack, top, ax, bx, ax_t, bx_t);
+ estack_ax(stack, top)->u.s.str = insn->data;
+ estack_ax(stack, top)->u.s.seq_len = LTTNG_SIZE_MAX;
+ estack_ax(stack, top)->u.s.literal_type =
+ ESTACK_STRING_LITERAL_TYPE_STAR_GLOB;
+ estack_ax(stack, top)->u.s.user = 0;
+ next_pc += sizeof(struct load_op) + strlen(insn->data) + 1;
+ PO;
+ }
+
+ OP(BYTECODE_OP_LOAD_S64):
+ {
+ struct load_op *insn = (struct load_op *) pc;
+
+ estack_push(stack, top, ax, bx, ax_t, bx_t);
+ estack_ax_v = ((struct literal_numeric *) insn->data)->v;
+ estack_ax_t = REG_S64;
+ dbg_printk("load s64 %lld\n",
+ (long long) estack_ax_v);
+ next_pc += sizeof(struct load_op)
+ + sizeof(struct literal_numeric);
+ PO;
+ }
+
+ OP(BYTECODE_OP_LOAD_DOUBLE):
+ {
+ BUG_ON(1);
+ PO;
+ }
+
+ /* cast */
+ OP(BYTECODE_OP_CAST_TO_S64):
+ printk(KERN_WARNING "LTTng: bytecode: unsupported non-specialized bytecode op %u\n",
+ (unsigned int) *(bytecode_opcode_t *) pc);
+ ret = -EINVAL;
+ goto end;
+
+ OP(BYTECODE_OP_CAST_DOUBLE_TO_S64):
+ {
+ BUG_ON(1);
+ PO;
+ }
+
+ OP(BYTECODE_OP_CAST_NOP):
+ {
+ next_pc += sizeof(struct cast_op);
+ PO;
+ }
+
+ /* get context ref */
+ OP(BYTECODE_OP_GET_CONTEXT_REF_STRING):
+ {
+ struct load_op *insn = (struct load_op *) pc;
+ struct field_ref *ref = (struct field_ref *) insn->data;
+ struct lttng_ctx_field *ctx_field;
+ union lttng_ctx_value v;
+
+ dbg_printk("get context ref offset %u type string\n",
+ ref->offset);
+ ctx_field = <tng_static_ctx->fields[ref->offset];
+ ctx_field->get_value(ctx_field, lttng_probe_ctx, &v);
+ estack_push(stack, top, ax, bx, ax_t, bx_t);
+ estack_ax(stack, top)->u.s.str = v.str;
+ if (unlikely(!estack_ax(stack, top)->u.s.str)) {
+ dbg_printk("Bytecode warning: loading a NULL string.\n");
+ ret = -EINVAL;
+ goto end;
+ }
+ estack_ax(stack, top)->u.s.seq_len = LTTNG_SIZE_MAX;
+ estack_ax(stack, top)->u.s.literal_type =
+ ESTACK_STRING_LITERAL_TYPE_NONE;
+ estack_ax(stack, top)->u.s.user = 0;
+ estack_ax(stack, top)->type = REG_STRING;
+ dbg_printk("ref get context string %s\n", estack_ax(stack, top)->u.s.str);
+ next_pc += sizeof(struct load_op) + sizeof(struct field_ref);
+ PO;
+ }
+
+ OP(BYTECODE_OP_GET_CONTEXT_REF_S64):
+ {
+ struct load_op *insn = (struct load_op *) pc;
+ struct field_ref *ref = (struct field_ref *) insn->data;
+ struct lttng_ctx_field *ctx_field;
+ union lttng_ctx_value v;
+
+ dbg_printk("get context ref offset %u type s64\n",
+ ref->offset);
+ ctx_field = <tng_static_ctx->fields[ref->offset];
+ ctx_field->get_value(ctx_field, lttng_probe_ctx, &v);
+ estack_push(stack, top, ax, bx, ax_t, bx_t);
+ estack_ax_v = v.s64;
+ estack_ax_t = REG_S64;
+ dbg_printk("ref get context s64 %lld\n",
+ (long long) estack_ax_v);
+ next_pc += sizeof(struct load_op) + sizeof(struct field_ref);
+ PO;
+ }
+
+ OP(BYTECODE_OP_GET_CONTEXT_REF_DOUBLE):
+ {
+ BUG_ON(1);
+ PO;
+ }
+
+ /* load userspace field ref */
+ OP(BYTECODE_OP_LOAD_FIELD_REF_USER_STRING):
+ {
+ struct load_op *insn = (struct load_op *) pc;
+ struct field_ref *ref = (struct field_ref *) insn->data;
+
+ dbg_printk("load field ref offset %u type user string\n",
+ ref->offset);
+ estack_push(stack, top, ax, bx, ax_t, bx_t);
+ estack_ax(stack, top)->u.s.user_str =
+ *(const char * const *) &interpreter_stack_data[ref->offset];
+ if (unlikely(!estack_ax(stack, top)->u.s.str)) {
+ dbg_printk("Bytecode warning: loading a NULL string.\n");
+ ret = -EINVAL;
+ goto end;
+ }
+ estack_ax(stack, top)->u.s.seq_len = LTTNG_SIZE_MAX;
+ estack_ax(stack, top)->u.s.literal_type =
+ ESTACK_STRING_LITERAL_TYPE_NONE;
+ estack_ax(stack, top)->u.s.user = 1;
+ estack_ax(stack, top)->type = REG_STRING;
+ dbg_printk("ref load string %s\n", estack_ax(stack, top)->u.s.str);
+ next_pc += sizeof(struct load_op) + sizeof(struct field_ref);
+ PO;
+ }
+
+ OP(BYTECODE_OP_LOAD_FIELD_REF_USER_SEQUENCE):
+ {
+ struct load_op *insn = (struct load_op *) pc;
+ struct field_ref *ref = (struct field_ref *) insn->data;
+
+ dbg_printk("load field ref offset %u type user sequence\n",
+ ref->offset);
+ estack_push(stack, top, ax, bx, ax_t, bx_t);
+ estack_ax(stack, top)->u.s.seq_len =
+ *(unsigned long *) &interpreter_stack_data[ref->offset];
+ estack_ax(stack, top)->u.s.user_str =
+ *(const char **) (&interpreter_stack_data[ref->offset
+ + sizeof(unsigned long)]);
+ if (unlikely(!estack_ax(stack, top)->u.s.str)) {
+ dbg_printk("Bytecode warning: loading a NULL sequence.\n");
+ ret = -EINVAL;
+ goto end;
+ }
+ estack_ax(stack, top)->u.s.literal_type =
+ ESTACK_STRING_LITERAL_TYPE_NONE;
+ estack_ax(stack, top)->u.s.user = 1;
+ next_pc += sizeof(struct load_op) + sizeof(struct field_ref);
+ PO;
+ }
+
+ OP(BYTECODE_OP_GET_CONTEXT_ROOT):
+ {
+ dbg_printk("op get context root\n");
+ estack_push(stack, top, ax, bx, ax_t, bx_t);
+ estack_ax(stack, top)->u.ptr.type = LOAD_ROOT_CONTEXT;
+ /* "field" only needed for variants. */
+ estack_ax(stack, top)->u.ptr.field = NULL;
+ estack_ax(stack, top)->type = REG_PTR;
+ next_pc += sizeof(struct load_op);
+ PO;
+ }
+
+ OP(BYTECODE_OP_GET_APP_CONTEXT_ROOT):
+ {
+ BUG_ON(1);
+ PO;
+ }
+
+ OP(BYTECODE_OP_GET_PAYLOAD_ROOT):
+ {
+ dbg_printk("op get app payload root\n");
+ estack_push(stack, top, ax, bx, ax_t, bx_t);
+ estack_ax(stack, top)->u.ptr.type = LOAD_ROOT_PAYLOAD;
+ estack_ax(stack, top)->u.ptr.ptr = interpreter_stack_data;
+ /* "field" only needed for variants. */
+ estack_ax(stack, top)->u.ptr.field = NULL;
+ estack_ax(stack, top)->type = REG_PTR;
+ next_pc += sizeof(struct load_op);
+ PO;
+ }
+
+ OP(BYTECODE_OP_GET_SYMBOL):
+ {
+ dbg_printk("op get symbol\n");
+ switch (estack_ax(stack, top)->u.ptr.type) {
+ case LOAD_OBJECT:
+ printk(KERN_WARNING "LTTng: bytecode: Nested fields not implemented yet.\n");
+ ret = -EINVAL;
+ goto end;
+ case LOAD_ROOT_CONTEXT:
+ case LOAD_ROOT_APP_CONTEXT:
+ case LOAD_ROOT_PAYLOAD:
+ /*
+ * symbol lookup is performed by
+ * specialization.
+ */
+ ret = -EINVAL;
+ goto end;
+ }
+ next_pc += sizeof(struct load_op) + sizeof(struct get_symbol);
+ PO;
+ }
+
+ OP(BYTECODE_OP_GET_SYMBOL_FIELD):
+ {
+ /*
+ * Used for first variant encountered in a
+ * traversal. Variants are not implemented yet.
+ */
+ ret = -EINVAL;
+ goto end;
+ }
+
+ OP(BYTECODE_OP_GET_INDEX_U16):
+ {
+ struct load_op *insn = (struct load_op *) pc;
+ struct get_index_u16 *index = (struct get_index_u16 *) insn->data;
+
+ dbg_printk("op get index u16\n");
+ ret = dynamic_get_index(lttng_probe_ctx, bytecode, index->index, estack_ax(stack, top));
+ if (ret)
+ goto end;
+ estack_ax_v = estack_ax(stack, top)->u.v;
+ estack_ax_t = estack_ax(stack, top)->type;
+ next_pc += sizeof(struct load_op) + sizeof(struct get_index_u16);
+ PO;
+ }
+
+ OP(BYTECODE_OP_GET_INDEX_U64):
+ {
+ struct load_op *insn = (struct load_op *) pc;
+ struct get_index_u64 *index = (struct get_index_u64 *) insn->data;
+
+ dbg_printk("op get index u64\n");
+ ret = dynamic_get_index(lttng_probe_ctx, bytecode, index->index, estack_ax(stack, top));
+ if (ret)
+ goto end;
+ estack_ax_v = estack_ax(stack, top)->u.v;
+ estack_ax_t = estack_ax(stack, top)->type;
+ next_pc += sizeof(struct load_op) + sizeof(struct get_index_u64);
+ PO;
+ }
+
+ OP(BYTECODE_OP_LOAD_FIELD):
+ {
+ dbg_printk("op load field\n");
+ ret = dynamic_load_field(estack_ax(stack, top));
+ if (ret)
+ goto end;
+ estack_ax_v = estack_ax(stack, top)->u.v;
+ estack_ax_t = estack_ax(stack, top)->type;
+ next_pc += sizeof(struct load_op);
+ PO;
+ }
+
+ OP(BYTECODE_OP_LOAD_FIELD_S8):
+ {
+ dbg_printk("op load field s8\n");
+
+ estack_ax_v = *(int8_t *) estack_ax(stack, top)->u.ptr.ptr;
+ estack_ax_t = REG_S64;
+ next_pc += sizeof(struct load_op);
+ PO;
+ }
+ OP(BYTECODE_OP_LOAD_FIELD_S16):
+ {
+ dbg_printk("op load field s16\n");
+
+ estack_ax_v = *(int16_t *) estack_ax(stack, top)->u.ptr.ptr;
+ estack_ax_t = REG_S64;
+ next_pc += sizeof(struct load_op);
+ PO;
+ }
+ OP(BYTECODE_OP_LOAD_FIELD_S32):
+ {
+ dbg_printk("op load field s32\n");
+
+ estack_ax_v = *(int32_t *) estack_ax(stack, top)->u.ptr.ptr;
+ estack_ax_t = REG_S64;
+ next_pc += sizeof(struct load_op);
+ PO;
+ }
+ OP(BYTECODE_OP_LOAD_FIELD_S64):
+ {
+ dbg_printk("op load field s64\n");
+
+ estack_ax_v = *(int64_t *) estack_ax(stack, top)->u.ptr.ptr;
+ estack_ax_t = REG_S64;
+ next_pc += sizeof(struct load_op);
+ PO;
+ }
+ OP(BYTECODE_OP_LOAD_FIELD_U8):
+ {
+ dbg_printk("op load field u8\n");
+
+ estack_ax_v = *(uint8_t *) estack_ax(stack, top)->u.ptr.ptr;
+ estack_ax_t = REG_S64;
+ next_pc += sizeof(struct load_op);
+ PO;
+ }
+ OP(BYTECODE_OP_LOAD_FIELD_U16):
+ {
+ dbg_printk("op load field u16\n");
+
+ estack_ax_v = *(uint16_t *) estack_ax(stack, top)->u.ptr.ptr;
+ estack_ax_t = REG_S64;
+ next_pc += sizeof(struct load_op);
+ PO;
+ }
+ OP(BYTECODE_OP_LOAD_FIELD_U32):
+ {
+ dbg_printk("op load field u32\n");
+
+ estack_ax_v = *(uint32_t *) estack_ax(stack, top)->u.ptr.ptr;
+ estack_ax_t = REG_S64;
+ next_pc += sizeof(struct load_op);
+ PO;
+ }
+ OP(BYTECODE_OP_LOAD_FIELD_U64):
+ {
+ dbg_printk("op load field u64\n");
+
+ estack_ax_v = *(uint64_t *) estack_ax(stack, top)->u.ptr.ptr;
+ estack_ax_t = REG_S64;
+ next_pc += sizeof(struct load_op);
+ PO;
+ }
+ OP(BYTECODE_OP_LOAD_FIELD_DOUBLE):
+ {
+ ret = -EINVAL;
+ goto end;
+ }
+
+ OP(BYTECODE_OP_LOAD_FIELD_STRING):
+ {
+ const char *str;
+
+ dbg_printk("op load field string\n");
+ str = (const char *) estack_ax(stack, top)->u.ptr.ptr;
+ estack_ax(stack, top)->u.s.str = str;
+ if (unlikely(!estack_ax(stack, top)->u.s.str)) {
+ dbg_printk("Bytecode warning: loading a NULL string.\n");
+ ret = -EINVAL;
+ goto end;
+ }
+ estack_ax(stack, top)->u.s.seq_len = LTTNG_SIZE_MAX;
+ estack_ax(stack, top)->u.s.literal_type =
+ ESTACK_STRING_LITERAL_TYPE_NONE;
+ estack_ax(stack, top)->type = REG_STRING;
+ next_pc += sizeof(struct load_op);
+ PO;
+ }
+
+ OP(BYTECODE_OP_LOAD_FIELD_SEQUENCE):
+ {
+ const char *ptr;
+
+ dbg_printk("op load field string sequence\n");
+ ptr = estack_ax(stack, top)->u.ptr.ptr;
+ estack_ax(stack, top)->u.s.seq_len = *(unsigned long *) ptr;
+ estack_ax(stack, top)->u.s.str = *(const char **) (ptr + sizeof(unsigned long));
+ if (unlikely(!estack_ax(stack, top)->u.s.str)) {
+ dbg_printk("Bytecode warning: loading a NULL sequence.\n");
+ ret = -EINVAL;
+ goto end;
+ }
+ estack_ax(stack, top)->u.s.literal_type =
+ ESTACK_STRING_LITERAL_TYPE_NONE;
+ estack_ax(stack, top)->type = REG_STRING;
+ next_pc += sizeof(struct load_op);
+ PO;
+ }
+
+ END_OP
+end:
+ /* Return _DISCARD on error. */
+ if (ret)
+ return LTTNG_INTERPRETER_DISCARD;
+
+ if (output) {
+ return lttng_bytecode_interpret_format_output(
+ estack_ax(stack, top), output);
+ }
+
+ return retval;
+}
+LTTNG_STACK_FRAME_NON_STANDARD(bytecode_interpret);
+
+uint64_t lttng_bytecode_filter_interpret(void *filter_data,
+ struct lttng_probe_ctx *lttng_probe_ctx,
+ const char *filter_stack_data)
+{
+ return bytecode_interpret(filter_data, lttng_probe_ctx,
+ filter_stack_data, NULL);
+}
+
+#undef START_OP
+#undef OP
+#undef PO
+#undef END_OP
--- /dev/null
+/* SPDX-License-Identifier: MIT
+ *
+ * lttng-bytecode-specialize.c
+ *
+ * LTTng modules bytecode code specializer.
+ *
+ * Copyright (C) 2010-2016 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ */
+
+#include <linux/slab.h>
+#include <lttng/lttng-bytecode.h>
+#include <lttng/align.h>
+
+static ssize_t bytecode_reserve_data(struct bytecode_runtime *runtime,
+ size_t align, size_t len)
+{
+ ssize_t ret;
+ size_t padding = offset_align(runtime->data_len, align);
+ size_t new_len = runtime->data_len + padding + len;
+ size_t new_alloc_len = new_len;
+ size_t old_alloc_len = runtime->data_alloc_len;
+
+ if (new_len > INTERPRETER_MAX_DATA_LEN)
+ return -EINVAL;
+
+ if (new_alloc_len > old_alloc_len) {
+ char *newptr;
+
+ new_alloc_len =
+ max_t(size_t, 1U << get_count_order(new_alloc_len), old_alloc_len << 1);
+ newptr = krealloc(runtime->data, new_alloc_len, GFP_KERNEL);
+ if (!newptr)
+ return -ENOMEM;
+ runtime->data = newptr;
+ /* We zero directly the memory from start of allocation. */
+ memset(&runtime->data[old_alloc_len], 0, new_alloc_len - old_alloc_len);
+ runtime->data_alloc_len = new_alloc_len;
+ }
+ runtime->data_len += padding;
+ ret = runtime->data_len;
+ runtime->data_len += len;
+ return ret;
+}
+
+static ssize_t bytecode_push_data(struct bytecode_runtime *runtime,
+ const void *p, size_t align, size_t len)
+{
+ ssize_t offset;
+
+ offset = bytecode_reserve_data(runtime, align, len);
+ if (offset < 0)
+ return -ENOMEM;
+ memcpy(&runtime->data[offset], p, len);
+ return offset;
+}
+
+static int specialize_load_field(struct vstack_entry *stack_top,
+ struct load_op *insn)
+{
+ int ret;
+
+ switch (stack_top->load.type) {
+ case LOAD_OBJECT:
+ break;
+ case LOAD_ROOT_CONTEXT:
+ case LOAD_ROOT_APP_CONTEXT:
+ case LOAD_ROOT_PAYLOAD:
+ default:
+ dbg_printk("Bytecode warning: cannot load root, missing field name.\n");
+ ret = -EINVAL;
+ goto end;
+ }
+ switch (stack_top->load.object_type) {
+ case OBJECT_TYPE_S8:
+ dbg_printk("op load field s8\n");
+ stack_top->type = REG_S64;
+ if (!stack_top->load.rev_bo)
+ insn->op = BYTECODE_OP_LOAD_FIELD_S8;
+ break;
+ case OBJECT_TYPE_S16:
+ dbg_printk("op load field s16\n");
+ stack_top->type = REG_S64;
+ if (!stack_top->load.rev_bo)
+ insn->op = BYTECODE_OP_LOAD_FIELD_S16;
+ break;
+ case OBJECT_TYPE_S32:
+ dbg_printk("op load field s32\n");
+ stack_top->type = REG_S64;
+ if (!stack_top->load.rev_bo)
+ insn->op = BYTECODE_OP_LOAD_FIELD_S32;
+ break;
+ case OBJECT_TYPE_S64:
+ dbg_printk("op load field s64\n");
+ stack_top->type = REG_S64;
+ if (!stack_top->load.rev_bo)
+ insn->op = BYTECODE_OP_LOAD_FIELD_S64;
+ break;
+ case OBJECT_TYPE_U8:
+ dbg_printk("op load field u8\n");
+ stack_top->type = REG_S64;
+ insn->op = BYTECODE_OP_LOAD_FIELD_U8;
+ break;
+ case OBJECT_TYPE_U16:
+ dbg_printk("op load field u16\n");
+ stack_top->type = REG_S64;
+ if (!stack_top->load.rev_bo)
+ insn->op = BYTECODE_OP_LOAD_FIELD_U16;
+ break;
+ case OBJECT_TYPE_U32:
+ dbg_printk("op load field u32\n");
+ stack_top->type = REG_S64;
+ if (!stack_top->load.rev_bo)
+ insn->op = BYTECODE_OP_LOAD_FIELD_U32;
+ break;
+ case OBJECT_TYPE_U64:
+ dbg_printk("op load field u64\n");
+ stack_top->type = REG_S64;
+ if (!stack_top->load.rev_bo)
+ insn->op = BYTECODE_OP_LOAD_FIELD_U64;
+ break;
+ case OBJECT_TYPE_DOUBLE:
+ printk(KERN_WARNING "LTTng: bytecode: Double type unsupported\n\n");
+ ret = -EINVAL;
+ goto end;
+ case OBJECT_TYPE_STRING:
+ dbg_printk("op load field string\n");
+ stack_top->type = REG_STRING;
+ insn->op = BYTECODE_OP_LOAD_FIELD_STRING;
+ break;
+ case OBJECT_TYPE_STRING_SEQUENCE:
+ dbg_printk("op load field string sequence\n");
+ stack_top->type = REG_STRING;
+ insn->op = BYTECODE_OP_LOAD_FIELD_SEQUENCE;
+ break;
+ case OBJECT_TYPE_DYNAMIC:
+ ret = -EINVAL;
+ goto end;
+ case OBJECT_TYPE_SEQUENCE:
+ case OBJECT_TYPE_ARRAY:
+ case OBJECT_TYPE_STRUCT:
+ case OBJECT_TYPE_VARIANT:
+ printk(KERN_WARNING "LTTng: bytecode: Sequences, arrays, struct and variant cannot be loaded (nested types).\n");
+ ret = -EINVAL;
+ goto end;
+ }
+ return 0;
+
+end:
+ return ret;
+}
+
+static int specialize_get_index_object_type(enum object_type *otype,
+ int signedness, uint32_t elem_len)
+{
+ switch (elem_len) {
+ case 8:
+ if (signedness)
+ *otype = OBJECT_TYPE_S8;
+ else
+ *otype = OBJECT_TYPE_U8;
+ break;
+ case 16:
+ if (signedness)
+ *otype = OBJECT_TYPE_S16;
+ else
+ *otype = OBJECT_TYPE_U16;
+ break;
+ case 32:
+ if (signedness)
+ *otype = OBJECT_TYPE_S32;
+ else
+ *otype = OBJECT_TYPE_U32;
+ break;
+ case 64:
+ if (signedness)
+ *otype = OBJECT_TYPE_S64;
+ else
+ *otype = OBJECT_TYPE_U64;
+ break;
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int specialize_get_index(struct bytecode_runtime *runtime,
+ struct load_op *insn, uint64_t index,
+ struct vstack_entry *stack_top,
+ int idx_len)
+{
+ int ret;
+ struct bytecode_get_index_data gid;
+ ssize_t data_offset;
+
+ memset(&gid, 0, sizeof(gid));
+ switch (stack_top->load.type) {
+ case LOAD_OBJECT:
+ switch (stack_top->load.object_type) {
+ case OBJECT_TYPE_ARRAY:
+ {
+ const struct lttng_integer_type *integer_type;
+ const struct lttng_event_field *field;
+ uint32_t elem_len, num_elems;
+ int signedness;
+
+ field = stack_top->load.field;
+ if (!lttng_is_bytewise_integer(field->type.u.array_nestable.elem_type)) {
+ ret = -EINVAL;
+ goto end;
+ }
+ integer_type = &field->type.u.array_nestable.elem_type->u.integer;
+ num_elems = field->type.u.array_nestable.length;
+ elem_len = integer_type->size;
+ signedness = integer_type->signedness;
+ if (index >= num_elems) {
+ ret = -EINVAL;
+ goto end;
+ }
+ ret = specialize_get_index_object_type(&stack_top->load.object_type,
+ signedness, elem_len);
+ if (ret)
+ goto end;
+ gid.offset = index * (elem_len / CHAR_BIT);
+ gid.array_len = num_elems * (elem_len / CHAR_BIT);
+ gid.elem.type = stack_top->load.object_type;
+ gid.elem.len = elem_len;
+ if (integer_type->reverse_byte_order)
+ gid.elem.rev_bo = true;
+ stack_top->load.rev_bo = gid.elem.rev_bo;
+ break;
+ }
+ case OBJECT_TYPE_SEQUENCE:
+ {
+ const struct lttng_integer_type *integer_type;
+ const struct lttng_event_field *field;
+ uint32_t elem_len;
+ int signedness;
+
+ field = stack_top->load.field;
+ if (!lttng_is_bytewise_integer(field->type.u.sequence_nestable.elem_type)) {
+ ret = -EINVAL;
+ goto end;
+ }
+ integer_type = &field->type.u.sequence_nestable.elem_type->u.integer;
+ elem_len = integer_type->size;
+ signedness = integer_type->signedness;
+ ret = specialize_get_index_object_type(&stack_top->load.object_type,
+ signedness, elem_len);
+ if (ret)
+ goto end;
+ gid.offset = index * (elem_len / CHAR_BIT);
+ gid.elem.type = stack_top->load.object_type;
+ gid.elem.len = elem_len;
+ if (integer_type->reverse_byte_order)
+ gid.elem.rev_bo = true;
+ stack_top->load.rev_bo = gid.elem.rev_bo;
+ break;
+ }
+ case OBJECT_TYPE_STRUCT:
+ /* Only generated by the specialize phase. */
+ case OBJECT_TYPE_VARIANT: /* Fall-through */
+ default:
+ printk(KERN_WARNING "LTTng: bytecode: Unexpected get index type %d",
+ (int) stack_top->load.object_type);
+ ret = -EINVAL;
+ goto end;
+ }
+ break;
+ case LOAD_ROOT_CONTEXT:
+ case LOAD_ROOT_APP_CONTEXT:
+ case LOAD_ROOT_PAYLOAD:
+ printk(KERN_WARNING "LTTng: bytecode: Index lookup for root field not implemented yet.\n");
+ ret = -EINVAL;
+ goto end;
+ }
+ data_offset = bytecode_push_data(runtime, &gid,
+ __alignof__(gid), sizeof(gid));
+ if (data_offset < 0) {
+ ret = -EINVAL;
+ goto end;
+ }
+ switch (idx_len) {
+ case 2:
+ ((struct get_index_u16 *) insn->data)->index = data_offset;
+ break;
+ case 8:
+ ((struct get_index_u64 *) insn->data)->index = data_offset;
+ break;
+ default:
+ ret = -EINVAL;
+ goto end;
+ }
+
+ return 0;
+
+end:
+ return ret;
+}
+
+static int specialize_context_lookup_name(struct lttng_ctx *ctx,
+ struct bytecode_runtime *bytecode,
+ struct load_op *insn)
+{
+ uint16_t offset;
+ const char *name;
+
+ offset = ((struct get_symbol *) insn->data)->offset;
+ name = bytecode->p.bc->bc.data + bytecode->p.bc->bc.reloc_offset + offset;
+ return lttng_get_context_index(ctx, name);
+}
+
+static int specialize_load_object(const struct lttng_event_field *field,
+ struct vstack_load *load, bool is_context)
+{
+ load->type = LOAD_OBJECT;
+
+ switch (field->type.atype) {
+ case atype_integer:
+ if (field->type.u.integer.signedness)
+ load->object_type = OBJECT_TYPE_S64;
+ else
+ load->object_type = OBJECT_TYPE_U64;
+ load->rev_bo = false;
+ break;
+ case atype_enum_nestable:
+ {
+ const struct lttng_integer_type *itype =
+ &field->type.u.enum_nestable.container_type->u.integer;
+
+ if (itype->signedness)
+ load->object_type = OBJECT_TYPE_S64;
+ else
+ load->object_type = OBJECT_TYPE_U64;
+ load->rev_bo = false;
+ break;
+ }
+ case atype_array_nestable:
+ if (!lttng_is_bytewise_integer(field->type.u.array_nestable.elem_type)) {
+ printk(KERN_WARNING "LTTng: bytecode: Array nesting only supports integer types.\n");
+ return -EINVAL;
+ }
+ if (is_context) {
+ load->object_type = OBJECT_TYPE_STRING;
+ } else {
+ if (field->type.u.array_nestable.elem_type->u.integer.encoding == lttng_encode_none) {
+ load->object_type = OBJECT_TYPE_ARRAY;
+ load->field = field;
+ } else {
+ load->object_type = OBJECT_TYPE_STRING_SEQUENCE;
+ }
+ }
+ break;
+ case atype_sequence_nestable:
+ if (!lttng_is_bytewise_integer(field->type.u.sequence_nestable.elem_type)) {
+ printk(KERN_WARNING "LTTng: bytecode: Sequence nesting only supports integer types.\n");
+ return -EINVAL;
+ }
+ if (is_context) {
+ load->object_type = OBJECT_TYPE_STRING;
+ } else {
+ if (field->type.u.sequence_nestable.elem_type->u.integer.encoding == lttng_encode_none) {
+ load->object_type = OBJECT_TYPE_SEQUENCE;
+ load->field = field;
+ } else {
+ load->object_type = OBJECT_TYPE_STRING_SEQUENCE;
+ }
+ }
+ break;
+ case atype_string:
+ load->object_type = OBJECT_TYPE_STRING;
+ break;
+ case atype_struct_nestable:
+ printk(KERN_WARNING "LTTng: bytecode: Structure type cannot be loaded.\n");
+ return -EINVAL;
+ case atype_variant_nestable:
+ printk(KERN_WARNING "LTTng: bytecode: Variant type cannot be loaded.\n");
+ return -EINVAL;
+ default:
+ printk(KERN_WARNING "LTTng: bytecode: Unknown type: %d", (int) field->type.atype);
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int specialize_context_lookup(struct lttng_ctx *ctx,
+ struct bytecode_runtime *runtime,
+ struct load_op *insn,
+ struct vstack_load *load)
+{
+ int idx, ret;
+ struct lttng_ctx_field *ctx_field;
+ struct lttng_event_field *field;
+ struct bytecode_get_index_data gid;
+ ssize_t data_offset;
+
+ idx = specialize_context_lookup_name(ctx, runtime, insn);
+ if (idx < 0) {
+ return -ENOENT;
+ }
+ ctx_field = <tng_static_ctx->fields[idx];
+ field = &ctx_field->event_field;
+ ret = specialize_load_object(field, load, true);
+ if (ret)
+ return ret;
+ /* Specialize each get_symbol into a get_index. */
+ insn->op = BYTECODE_OP_GET_INDEX_U16;
+ memset(&gid, 0, sizeof(gid));
+ gid.ctx_index = idx;
+ gid.elem.type = load->object_type;
+ gid.elem.rev_bo = load->rev_bo;
+ gid.field = field;
+ data_offset = bytecode_push_data(runtime, &gid,
+ __alignof__(gid), sizeof(gid));
+ if (data_offset < 0) {
+ return -EINVAL;
+ }
+ ((struct get_index_u16 *) insn->data)->index = data_offset;
+ return 0;
+}
+
+static int specialize_payload_lookup(const struct lttng_event_desc *event_desc,
+ struct bytecode_runtime *runtime,
+ struct load_op *insn,
+ struct vstack_load *load)
+{
+ const char *name;
+ uint16_t offset;
+ unsigned int i, nr_fields;
+ bool found = false;
+ uint32_t field_offset = 0;
+ const struct lttng_event_field *field;
+ int ret;
+ struct bytecode_get_index_data gid;
+ ssize_t data_offset;
+
+ nr_fields = event_desc->nr_fields;
+ offset = ((struct get_symbol *) insn->data)->offset;
+ name = runtime->p.bc->bc.data + runtime->p.bc->bc.reloc_offset + offset;
+ for (i = 0; i < nr_fields; i++) {
+ field = &event_desc->fields[i];
+ if (field->nofilter) {
+ continue;
+ }
+ if (!strcmp(field->name, name)) {
+ found = true;
+ break;
+ }
+ /* compute field offset on stack */
+ switch (field->type.atype) {
+ case atype_integer:
+ case atype_enum_nestable:
+ field_offset += sizeof(int64_t);
+ break;
+ case atype_array_nestable:
+ case atype_sequence_nestable:
+ field_offset += sizeof(unsigned long);
+ field_offset += sizeof(void *);
+ break;
+ case atype_string:
+ field_offset += sizeof(void *);
+ break;
+ default:
+ ret = -EINVAL;
+ goto end;
+ }
+ }
+ if (!found) {
+ ret = -EINVAL;
+ goto end;
+ }
+
+ ret = specialize_load_object(field, load, false);
+ if (ret)
+ goto end;
+
+ /* Specialize each get_symbol into a get_index. */
+ insn->op = BYTECODE_OP_GET_INDEX_U16;
+ memset(&gid, 0, sizeof(gid));
+ gid.offset = field_offset;
+ gid.elem.type = load->object_type;
+ gid.elem.rev_bo = load->rev_bo;
+ gid.field = field;
+ data_offset = bytecode_push_data(runtime, &gid,
+ __alignof__(gid), sizeof(gid));
+ if (data_offset < 0) {
+ ret = -EINVAL;
+ goto end;
+ }
+ ((struct get_index_u16 *) insn->data)->index = data_offset;
+ ret = 0;
+end:
+ return ret;
+}
+
+int lttng_bytecode_specialize(const struct lttng_event_desc *event_desc,
+ struct bytecode_runtime *bytecode)
+{
+ void *pc, *next_pc, *start_pc;
+ int ret = -EINVAL;
+ struct vstack _stack;
+ struct vstack *stack = &_stack;
+ struct lttng_ctx *ctx = bytecode->p.ctx;
+
+ vstack_init(stack);
+
+ start_pc = &bytecode->code[0];
+ for (pc = next_pc = start_pc; pc - start_pc < bytecode->len;
+ pc = next_pc) {
+ switch (*(bytecode_opcode_t *) pc) {
+ case BYTECODE_OP_UNKNOWN:
+ default:
+ printk(KERN_WARNING "LTTng: bytecode: unknown bytecode op %u\n",
+ (unsigned int) *(bytecode_opcode_t *) pc);
+ ret = -EINVAL;
+ goto end;
+
+ case BYTECODE_OP_RETURN:
+ case BYTECODE_OP_RETURN_S64:
+ ret = 0;
+ goto end;
+
+ /* binary */
+ case BYTECODE_OP_MUL:
+ case BYTECODE_OP_DIV:
+ case BYTECODE_OP_MOD:
+ case BYTECODE_OP_PLUS:
+ case BYTECODE_OP_MINUS:
+ printk(KERN_WARNING "LTTng: bytecode: unknown bytecode op %u\n",
+ (unsigned int) *(bytecode_opcode_t *) pc);
+ ret = -EINVAL;
+ goto end;
+
+ case BYTECODE_OP_EQ:
+ {
+ struct binary_op *insn = (struct binary_op *) pc;
+
+ switch(vstack_ax(stack)->type) {
+ default:
+ printk(KERN_WARNING "LTTng: bytecode: unknown register type\n");
+ ret = -EINVAL;
+ goto end;
+
+ case REG_STRING:
+ if (vstack_bx(stack)->type == REG_STAR_GLOB_STRING)
+ insn->op = BYTECODE_OP_EQ_STAR_GLOB_STRING;
+ else
+ insn->op = BYTECODE_OP_EQ_STRING;
+ break;
+ case REG_STAR_GLOB_STRING:
+ insn->op = BYTECODE_OP_EQ_STAR_GLOB_STRING;
+ break;
+ case REG_S64:
+ if (vstack_bx(stack)->type == REG_S64)
+ insn->op = BYTECODE_OP_EQ_S64;
+ else
+ insn->op = BYTECODE_OP_EQ_DOUBLE_S64;
+ break;
+ case REG_DOUBLE:
+ if (vstack_bx(stack)->type == REG_S64)
+ insn->op = BYTECODE_OP_EQ_S64_DOUBLE;
+ else
+ insn->op = BYTECODE_OP_EQ_DOUBLE;
+ break;
+ }
+ /* Pop 2, push 1 */
+ if (vstack_pop(stack)) {
+ ret = -EINVAL;
+ goto end;
+ }
+ vstack_ax(stack)->type = REG_S64;
+ next_pc += sizeof(struct binary_op);
+ break;
+ }
+
+ case BYTECODE_OP_NE:
+ {
+ struct binary_op *insn = (struct binary_op *) pc;
+
+ switch(vstack_ax(stack)->type) {
+ default:
+ printk(KERN_WARNING "LTTng: bytecode: unknown register type\n");
+ ret = -EINVAL;
+ goto end;
+
+ case REG_STRING:
+ if (vstack_bx(stack)->type == REG_STAR_GLOB_STRING)
+ insn->op = BYTECODE_OP_NE_STAR_GLOB_STRING;
+ else
+ insn->op = BYTECODE_OP_NE_STRING;
+ break;
+ case REG_STAR_GLOB_STRING:
+ insn->op = BYTECODE_OP_NE_STAR_GLOB_STRING;
+ break;
+ case REG_S64:
+ if (vstack_bx(stack)->type == REG_S64)
+ insn->op = BYTECODE_OP_NE_S64;
+ else
+ insn->op = BYTECODE_OP_NE_DOUBLE_S64;
+ break;
+ case REG_DOUBLE:
+ if (vstack_bx(stack)->type == REG_S64)
+ insn->op = BYTECODE_OP_NE_S64_DOUBLE;
+ else
+ insn->op = BYTECODE_OP_NE_DOUBLE;
+ break;
+ }
+ /* Pop 2, push 1 */
+ if (vstack_pop(stack)) {
+ ret = -EINVAL;
+ goto end;
+ }
+ vstack_ax(stack)->type = REG_S64;
+ next_pc += sizeof(struct binary_op);
+ break;
+ }
+
+ case BYTECODE_OP_GT:
+ {
+ struct binary_op *insn = (struct binary_op *) pc;
+
+ switch(vstack_ax(stack)->type) {
+ default:
+ printk(KERN_WARNING "LTTng: bytecode: unknown register type\n");
+ ret = -EINVAL;
+ goto end;
+
+ case REG_STAR_GLOB_STRING:
+ printk(KERN_WARNING "LTTng: bytecode: invalid register type for '>' binary operator\n");
+ ret = -EINVAL;
+ goto end;
+ case REG_STRING:
+ insn->op = BYTECODE_OP_GT_STRING;
+ break;
+ case REG_S64:
+ if (vstack_bx(stack)->type == REG_S64)
+ insn->op = BYTECODE_OP_GT_S64;
+ else
+ insn->op = BYTECODE_OP_GT_DOUBLE_S64;
+ break;
+ case REG_DOUBLE:
+ if (vstack_bx(stack)->type == REG_S64)
+ insn->op = BYTECODE_OP_GT_S64_DOUBLE;
+ else
+ insn->op = BYTECODE_OP_GT_DOUBLE;
+ break;
+ }
+ /* Pop 2, push 1 */
+ if (vstack_pop(stack)) {
+ ret = -EINVAL;
+ goto end;
+ }
+ vstack_ax(stack)->type = REG_S64;
+ next_pc += sizeof(struct binary_op);
+ break;
+ }
+
+ case BYTECODE_OP_LT:
+ {
+ struct binary_op *insn = (struct binary_op *) pc;
+
+ switch(vstack_ax(stack)->type) {
+ default:
+ printk(KERN_WARNING "LTTng: bytecode: unknown register type\n");
+ ret = -EINVAL;
+ goto end;
+
+ case REG_STAR_GLOB_STRING:
+ printk(KERN_WARNING "LTTng: bytecode: invalid register type for '<' binary operator\n");
+ ret = -EINVAL;
+ goto end;
+ case REG_STRING:
+ insn->op = BYTECODE_OP_LT_STRING;
+ break;
+ case REG_S64:
+ if (vstack_bx(stack)->type == REG_S64)
+ insn->op = BYTECODE_OP_LT_S64;
+ else
+ insn->op = BYTECODE_OP_LT_DOUBLE_S64;
+ break;
+ case REG_DOUBLE:
+ if (vstack_bx(stack)->type == REG_S64)
+ insn->op = BYTECODE_OP_LT_S64_DOUBLE;
+ else
+ insn->op = BYTECODE_OP_LT_DOUBLE;
+ break;
+ }
+ /* Pop 2, push 1 */
+ if (vstack_pop(stack)) {
+ ret = -EINVAL;
+ goto end;
+ }
+ vstack_ax(stack)->type = REG_S64;
+ next_pc += sizeof(struct binary_op);
+ break;
+ }
+
+ case BYTECODE_OP_GE:
+ {
+ struct binary_op *insn = (struct binary_op *) pc;
+
+ switch(vstack_ax(stack)->type) {
+ default:
+ printk(KERN_WARNING "LTTng: bytecode: unknown register type\n");
+ ret = -EINVAL;
+ goto end;
+
+ case REG_STAR_GLOB_STRING:
+ printk(KERN_WARNING "LTTng: bytecode: invalid register type for '>=' binary operator\n");
+ ret = -EINVAL;
+ goto end;
+ case REG_STRING:
+ insn->op = BYTECODE_OP_GE_STRING;
+ break;
+ case REG_S64:
+ if (vstack_bx(stack)->type == REG_S64)
+ insn->op = BYTECODE_OP_GE_S64;
+ else
+ insn->op = BYTECODE_OP_GE_DOUBLE_S64;
+ break;
+ case REG_DOUBLE:
+ if (vstack_bx(stack)->type == REG_S64)
+ insn->op = BYTECODE_OP_GE_S64_DOUBLE;
+ else
+ insn->op = BYTECODE_OP_GE_DOUBLE;
+ break;
+ }
+ /* Pop 2, push 1 */
+ if (vstack_pop(stack)) {
+ ret = -EINVAL;
+ goto end;
+ }
+ vstack_ax(stack)->type = REG_S64;
+ next_pc += sizeof(struct binary_op);
+ break;
+ }
+ case BYTECODE_OP_LE:
+ {
+ struct binary_op *insn = (struct binary_op *) pc;
+
+ switch(vstack_ax(stack)->type) {
+ default:
+ printk(KERN_WARNING "LTTng: bytecode: unknown register type\n");
+ ret = -EINVAL;
+ goto end;
+
+ case REG_STAR_GLOB_STRING:
+ printk(KERN_WARNING "LTTng: bytecode: invalid register type for '<=' binary operator\n");
+ ret = -EINVAL;
+ goto end;
+ case REG_STRING:
+ insn->op = BYTECODE_OP_LE_STRING;
+ break;
+ case REG_S64:
+ if (vstack_bx(stack)->type == REG_S64)
+ insn->op = BYTECODE_OP_LE_S64;
+ else
+ insn->op = BYTECODE_OP_LE_DOUBLE_S64;
+ break;
+ case REG_DOUBLE:
+ if (vstack_bx(stack)->type == REG_S64)
+ insn->op = BYTECODE_OP_LE_S64_DOUBLE;
+ else
+ insn->op = BYTECODE_OP_LE_DOUBLE;
+ break;
+ }
+ vstack_ax(stack)->type = REG_S64;
+ next_pc += sizeof(struct binary_op);
+ break;
+ }
+
+ case BYTECODE_OP_EQ_STRING:
+ case BYTECODE_OP_NE_STRING:
+ case BYTECODE_OP_GT_STRING:
+ case BYTECODE_OP_LT_STRING:
+ case BYTECODE_OP_GE_STRING:
+ case BYTECODE_OP_LE_STRING:
+ case BYTECODE_OP_EQ_STAR_GLOB_STRING:
+ case BYTECODE_OP_NE_STAR_GLOB_STRING:
+ case BYTECODE_OP_EQ_S64:
+ case BYTECODE_OP_NE_S64:
+ case BYTECODE_OP_GT_S64:
+ case BYTECODE_OP_LT_S64:
+ case BYTECODE_OP_GE_S64:
+ case BYTECODE_OP_LE_S64:
+ case BYTECODE_OP_EQ_DOUBLE:
+ case BYTECODE_OP_NE_DOUBLE:
+ case BYTECODE_OP_GT_DOUBLE:
+ case BYTECODE_OP_LT_DOUBLE:
+ case BYTECODE_OP_GE_DOUBLE:
+ case BYTECODE_OP_LE_DOUBLE:
+ case BYTECODE_OP_EQ_DOUBLE_S64:
+ case BYTECODE_OP_NE_DOUBLE_S64:
+ case BYTECODE_OP_GT_DOUBLE_S64:
+ case BYTECODE_OP_LT_DOUBLE_S64:
+ case BYTECODE_OP_GE_DOUBLE_S64:
+ case BYTECODE_OP_LE_DOUBLE_S64:
+ case BYTECODE_OP_EQ_S64_DOUBLE:
+ case BYTECODE_OP_NE_S64_DOUBLE:
+ case BYTECODE_OP_GT_S64_DOUBLE:
+ case BYTECODE_OP_LT_S64_DOUBLE:
+ case BYTECODE_OP_GE_S64_DOUBLE:
+ case BYTECODE_OP_LE_S64_DOUBLE:
+ case BYTECODE_OP_BIT_RSHIFT:
+ case BYTECODE_OP_BIT_LSHIFT:
+ case BYTECODE_OP_BIT_AND:
+ case BYTECODE_OP_BIT_OR:
+ case BYTECODE_OP_BIT_XOR:
+ {
+ /* Pop 2, push 1 */
+ if (vstack_pop(stack)) {
+ ret = -EINVAL;
+ goto end;
+ }
+ vstack_ax(stack)->type = REG_S64;
+ next_pc += sizeof(struct binary_op);
+ break;
+ }
+
+ /* unary */
+ case BYTECODE_OP_UNARY_PLUS:
+ {
+ struct unary_op *insn = (struct unary_op *) pc;
+
+ switch(vstack_ax(stack)->type) {
+ default:
+ printk(KERN_WARNING "LTTng: bytecode: unknown register type\n");
+ ret = -EINVAL;
+ goto end;
+
+ case REG_S64:
+ insn->op = BYTECODE_OP_UNARY_PLUS_S64;
+ break;
+ case REG_DOUBLE:
+ insn->op = BYTECODE_OP_UNARY_PLUS_DOUBLE;
+ break;
+ }
+ /* Pop 1, push 1 */
+ next_pc += sizeof(struct unary_op);
+ break;
+ }
+
+ case BYTECODE_OP_UNARY_MINUS:
+ {
+ struct unary_op *insn = (struct unary_op *) pc;
+
+ switch(vstack_ax(stack)->type) {
+ default:
+ printk(KERN_WARNING "LTTng: bytecode: unknown register type\n");
+ ret = -EINVAL;
+ goto end;
+
+ case REG_S64:
+ insn->op = BYTECODE_OP_UNARY_MINUS_S64;
+ break;
+ case REG_DOUBLE:
+ insn->op = BYTECODE_OP_UNARY_MINUS_DOUBLE;
+ break;
+ }
+ /* Pop 1, push 1 */
+ next_pc += sizeof(struct unary_op);
+ break;
+ }
+
+ case BYTECODE_OP_UNARY_NOT:
+ {
+ struct unary_op *insn = (struct unary_op *) pc;
+
+ switch(vstack_ax(stack)->type) {
+ default:
+ printk(KERN_WARNING "LTTng: bytecode: unknown register type\n");
+ ret = -EINVAL;
+ goto end;
+
+ case REG_S64:
+ insn->op = BYTECODE_OP_UNARY_NOT_S64;
+ break;
+ case REG_DOUBLE:
+ insn->op = BYTECODE_OP_UNARY_NOT_DOUBLE;
+ break;
+ }
+ /* Pop 1, push 1 */
+ next_pc += sizeof(struct unary_op);
+ break;
+ }
+
+ case BYTECODE_OP_UNARY_BIT_NOT:
+ {
+ /* Pop 1, push 1 */
+ next_pc += sizeof(struct unary_op);
+ break;
+ }
+
+ case BYTECODE_OP_UNARY_PLUS_S64:
+ case BYTECODE_OP_UNARY_MINUS_S64:
+ case BYTECODE_OP_UNARY_NOT_S64:
+ case BYTECODE_OP_UNARY_PLUS_DOUBLE:
+ case BYTECODE_OP_UNARY_MINUS_DOUBLE:
+ case BYTECODE_OP_UNARY_NOT_DOUBLE:
+ {
+ /* Pop 1, push 1 */
+ next_pc += sizeof(struct unary_op);
+ break;
+ }
+
+ /* logical */
+ case BYTECODE_OP_AND:
+ case BYTECODE_OP_OR:
+ {
+ /* Continue to next instruction */
+ /* Pop 1 when jump not taken */
+ if (vstack_pop(stack)) {
+ ret = -EINVAL;
+ goto end;
+ }
+ next_pc += sizeof(struct logical_op);
+ break;
+ }
+
+ /* load field ref */
+ case BYTECODE_OP_LOAD_FIELD_REF:
+ {
+ printk(KERN_WARNING "LTTng: bytecode: Unknown field ref type\n");
+ ret = -EINVAL;
+ goto end;
+ }
+ /* get context ref */
+ case BYTECODE_OP_GET_CONTEXT_REF:
+ {
+ printk(KERN_WARNING "LTTng: bytecode: Unknown get context ref type\n");
+ ret = -EINVAL;
+ goto end;
+ }
+ case BYTECODE_OP_LOAD_FIELD_REF_STRING:
+ case BYTECODE_OP_LOAD_FIELD_REF_SEQUENCE:
+ case BYTECODE_OP_GET_CONTEXT_REF_STRING:
+ case BYTECODE_OP_LOAD_FIELD_REF_USER_STRING:
+ case BYTECODE_OP_LOAD_FIELD_REF_USER_SEQUENCE:
+ {
+ if (vstack_push(stack)) {
+ ret = -EINVAL;
+ goto end;
+ }
+ vstack_ax(stack)->type = REG_STRING;
+ next_pc += sizeof(struct load_op) + sizeof(struct field_ref);
+ break;
+ }
+ case BYTECODE_OP_LOAD_FIELD_REF_S64:
+ case BYTECODE_OP_GET_CONTEXT_REF_S64:
+ {
+ if (vstack_push(stack)) {
+ ret = -EINVAL;
+ goto end;
+ }
+ vstack_ax(stack)->type = REG_S64;
+ next_pc += sizeof(struct load_op) + sizeof(struct field_ref);
+ break;
+ }
+ case BYTECODE_OP_LOAD_FIELD_REF_DOUBLE:
+ case BYTECODE_OP_GET_CONTEXT_REF_DOUBLE:
+ {
+ if (vstack_push(stack)) {
+ ret = -EINVAL;
+ goto end;
+ }
+ vstack_ax(stack)->type = REG_DOUBLE;
+ next_pc += sizeof(struct load_op) + sizeof(struct field_ref);
+ break;
+ }
+
+ /* load from immediate operand */
+ case BYTECODE_OP_LOAD_STRING:
+ {
+ struct load_op *insn = (struct load_op *) pc;
+
+ if (vstack_push(stack)) {
+ ret = -EINVAL;
+ goto end;
+ }
+ vstack_ax(stack)->type = REG_STRING;
+ next_pc += sizeof(struct load_op) + strlen(insn->data) + 1;
+ break;
+ }
+
+ case BYTECODE_OP_LOAD_STAR_GLOB_STRING:
+ {
+ struct load_op *insn = (struct load_op *) pc;
+
+ if (vstack_push(stack)) {
+ ret = -EINVAL;
+ goto end;
+ }
+ vstack_ax(stack)->type = REG_STAR_GLOB_STRING;
+ next_pc += sizeof(struct load_op) + strlen(insn->data) + 1;
+ break;
+ }
+
+ case BYTECODE_OP_LOAD_S64:
+ {
+ if (vstack_push(stack)) {
+ ret = -EINVAL;
+ goto end;
+ }
+ vstack_ax(stack)->type = REG_S64;
+ next_pc += sizeof(struct load_op)
+ + sizeof(struct literal_numeric);
+ break;
+ }
+
+ case BYTECODE_OP_LOAD_DOUBLE:
+ {
+ if (vstack_push(stack)) {
+ ret = -EINVAL;
+ goto end;
+ }
+ vstack_ax(stack)->type = REG_DOUBLE;
+ next_pc += sizeof(struct load_op)
+ + sizeof(struct literal_double);
+ break;
+ }
+
+ /* cast */
+ case BYTECODE_OP_CAST_TO_S64:
+ {
+ struct cast_op *insn = (struct cast_op *) pc;
+
+ switch (vstack_ax(stack)->type) {
+ default:
+ printk(KERN_WARNING "LTTng: bytecode: unknown register type\n");
+ ret = -EINVAL;
+ goto end;
+
+ case REG_STRING:
+ case REG_STAR_GLOB_STRING:
+ printk(KERN_WARNING "LTTng: bytecode: Cast op can only be applied to numeric or floating point registers\n");
+ ret = -EINVAL;
+ goto end;
+ case REG_S64:
+ insn->op = BYTECODE_OP_CAST_NOP;
+ break;
+ case REG_DOUBLE:
+ insn->op = BYTECODE_OP_CAST_DOUBLE_TO_S64;
+ break;
+ }
+ /* Pop 1, push 1 */
+ vstack_ax(stack)->type = REG_S64;
+ next_pc += sizeof(struct cast_op);
+ break;
+ }
+ case BYTECODE_OP_CAST_DOUBLE_TO_S64:
+ {
+ /* Pop 1, push 1 */
+ vstack_ax(stack)->type = REG_S64;
+ next_pc += sizeof(struct cast_op);
+ break;
+ }
+ case BYTECODE_OP_CAST_NOP:
+ {
+ next_pc += sizeof(struct cast_op);
+ break;
+ }
+
+ /*
+ * Instructions for recursive traversal through composed types.
+ */
+ case BYTECODE_OP_GET_CONTEXT_ROOT:
+ {
+ if (vstack_push(stack)) {
+ ret = -EINVAL;
+ goto end;
+ }
+ vstack_ax(stack)->type = REG_PTR;
+ vstack_ax(stack)->load.type = LOAD_ROOT_CONTEXT;
+ next_pc += sizeof(struct load_op);
+ break;
+ }
+ case BYTECODE_OP_GET_APP_CONTEXT_ROOT:
+ {
+ if (vstack_push(stack)) {
+ ret = -EINVAL;
+ goto end;
+ }
+ vstack_ax(stack)->type = REG_PTR;
+ vstack_ax(stack)->load.type = LOAD_ROOT_APP_CONTEXT;
+ next_pc += sizeof(struct load_op);
+ break;
+ }
+ case BYTECODE_OP_GET_PAYLOAD_ROOT:
+ {
+ if (vstack_push(stack)) {
+ ret = -EINVAL;
+ goto end;
+ }
+ vstack_ax(stack)->type = REG_PTR;
+ vstack_ax(stack)->load.type = LOAD_ROOT_PAYLOAD;
+ next_pc += sizeof(struct load_op);
+ break;
+ }
+
+ case BYTECODE_OP_LOAD_FIELD:
+ {
+ struct load_op *insn = (struct load_op *) pc;
+
+ WARN_ON_ONCE(vstack_ax(stack)->type != REG_PTR);
+ /* Pop 1, push 1 */
+ ret = specialize_load_field(vstack_ax(stack), insn);
+ if (ret)
+ goto end;
+
+ next_pc += sizeof(struct load_op);
+ break;
+ }
+
+ case BYTECODE_OP_LOAD_FIELD_S8:
+ case BYTECODE_OP_LOAD_FIELD_S16:
+ case BYTECODE_OP_LOAD_FIELD_S32:
+ case BYTECODE_OP_LOAD_FIELD_S64:
+ case BYTECODE_OP_LOAD_FIELD_U8:
+ case BYTECODE_OP_LOAD_FIELD_U16:
+ case BYTECODE_OP_LOAD_FIELD_U32:
+ case BYTECODE_OP_LOAD_FIELD_U64:
+ {
+ /* Pop 1, push 1 */
+ vstack_ax(stack)->type = REG_S64;
+ next_pc += sizeof(struct load_op);
+ break;
+ }
+
+ case BYTECODE_OP_LOAD_FIELD_STRING:
+ case BYTECODE_OP_LOAD_FIELD_SEQUENCE:
+ {
+ /* Pop 1, push 1 */
+ vstack_ax(stack)->type = REG_STRING;
+ next_pc += sizeof(struct load_op);
+ break;
+ }
+
+ case BYTECODE_OP_LOAD_FIELD_DOUBLE:
+ {
+ /* Pop 1, push 1 */
+ vstack_ax(stack)->type = REG_DOUBLE;
+ next_pc += sizeof(struct load_op);
+ break;
+ }
+
+ case BYTECODE_OP_GET_SYMBOL:
+ {
+ struct load_op *insn = (struct load_op *) pc;
+
+ dbg_printk("op get symbol\n");
+ switch (vstack_ax(stack)->load.type) {
+ case LOAD_OBJECT:
+ printk(KERN_WARNING "LTTng: bytecode: Nested fields not implemented yet.\n");
+ ret = -EINVAL;
+ goto end;
+ case LOAD_ROOT_CONTEXT:
+ /* Lookup context field. */
+ ret = specialize_context_lookup(ctx, bytecode, insn,
+ &vstack_ax(stack)->load);
+ if (ret)
+ goto end;
+ break;
+ case LOAD_ROOT_APP_CONTEXT:
+ ret = -EINVAL;
+ goto end;
+ case LOAD_ROOT_PAYLOAD:
+ /* Lookup event payload field. */
+ ret = specialize_payload_lookup(event_desc,
+ bytecode, insn,
+ &vstack_ax(stack)->load);
+ if (ret)
+ goto end;
+ break;
+ }
+ next_pc += sizeof(struct load_op) + sizeof(struct get_symbol);
+ break;
+ }
+
+ case BYTECODE_OP_GET_SYMBOL_FIELD:
+ {
+ /* Always generated by specialize phase. */
+ ret = -EINVAL;
+ goto end;
+ }
+
+ case BYTECODE_OP_GET_INDEX_U16:
+ {
+ struct load_op *insn = (struct load_op *) pc;
+ struct get_index_u16 *index = (struct get_index_u16 *) insn->data;
+
+ dbg_printk("op get index u16\n");
+ /* Pop 1, push 1 */
+ ret = specialize_get_index(bytecode, insn, index->index,
+ vstack_ax(stack), sizeof(*index));
+ if (ret)
+ goto end;
+ next_pc += sizeof(struct load_op) + sizeof(struct get_index_u16);
+ break;
+ }
+
+ case BYTECODE_OP_GET_INDEX_U64:
+ {
+ struct load_op *insn = (struct load_op *) pc;
+ struct get_index_u64 *index = (struct get_index_u64 *) insn->data;
+
+ dbg_printk("op get index u64\n");
+ /* Pop 1, push 1 */
+ ret = specialize_get_index(bytecode, insn, index->index,
+ vstack_ax(stack), sizeof(*index));
+ if (ret)
+ goto end;
+ next_pc += sizeof(struct load_op) + sizeof(struct get_index_u64);
+ break;
+ }
+
+ }
+ }
+end:
+ return ret;
+}
--- /dev/null
+/* SPDX-License-Identifier: MIT
+ *
+ * lttng-bytecode-validator.c
+ *
+ * LTTng modules bytecode bytecode validator.
+ *
+ * Copyright (C) 2010-2016 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ */
+
+#include <linux/types.h>
+#include <linux/jhash.h>
+#include <linux/slab.h>
+
+#include <wrapper/list.h>
+#include <lttng/lttng-bytecode.h>
+
+#define MERGE_POINT_TABLE_BITS 7
+#define MERGE_POINT_TABLE_SIZE (1U << MERGE_POINT_TABLE_BITS)
+
+/* merge point table node */
+struct mp_node {
+ struct hlist_node node;
+
+ /* Context at merge point */
+ struct vstack stack;
+ unsigned long target_pc;
+};
+
+struct mp_table {
+ struct hlist_head mp_head[MERGE_POINT_TABLE_SIZE];
+};
+
+static
+int lttng_hash_match(struct mp_node *mp_node, unsigned long key_pc)
+{
+ if (mp_node->target_pc == key_pc)
+ return 1;
+ else
+ return 0;
+}
+
+static
+int merge_points_compare(const struct vstack *stacka,
+ const struct vstack *stackb)
+{
+ int i, len;
+
+ if (stacka->top != stackb->top)
+ return 1;
+ len = stacka->top + 1;
+ WARN_ON_ONCE(len < 0);
+ for (i = 0; i < len; i++) {
+ if (stacka->e[i].type != stackb->e[i].type)
+ return 1;
+ }
+ return 0;
+}
+
+static
+int merge_point_add_check(struct mp_table *mp_table, unsigned long target_pc,
+ const struct vstack *stack)
+{
+ struct mp_node *mp_node;
+ unsigned long hash = jhash_1word(target_pc, 0);
+ struct hlist_head *head;
+ struct mp_node *lookup_node;
+ int found = 0;
+
+ dbg_printk("Bytecode: adding merge point at offset %lu, hash %lu\n",
+ target_pc, hash);
+ mp_node = kzalloc(sizeof(struct mp_node), GFP_KERNEL);
+ if (!mp_node)
+ return -ENOMEM;
+ mp_node->target_pc = target_pc;
+ memcpy(&mp_node->stack, stack, sizeof(mp_node->stack));
+
+ head = &mp_table->mp_head[hash & (MERGE_POINT_TABLE_SIZE - 1)];
+ lttng_hlist_for_each_entry(lookup_node, head, node) {
+ if (lttng_hash_match(lookup_node, target_pc)) {
+ found = 1;
+ break;
+ }
+ }
+ if (found) {
+ /* Key already present */
+ dbg_printk("Bytecode: compare merge points for offset %lu, hash %lu\n",
+ target_pc, hash);
+ kfree(mp_node);
+ if (merge_points_compare(stack, &lookup_node->stack)) {
+ printk(KERN_WARNING "LTTng: bytecode: Merge points differ for offset %lu\n",
+ target_pc);
+ return -EINVAL;
+ }
+ } else {
+ hlist_add_head(&mp_node->node, head);
+ }
+ return 0;
+}
+
+/*
+ * Binary comparators use top of stack and top of stack -1.
+ */
+static
+int bin_op_compare_check(struct vstack *stack, const bytecode_opcode_t opcode,
+ const char *str)
+{
+ if (unlikely(!vstack_ax(stack) || !vstack_bx(stack)))
+ goto error_empty;
+
+ switch (vstack_ax(stack)->type) {
+ default:
+ case REG_DOUBLE:
+ goto error_type;
+
+ case REG_STRING:
+ switch (vstack_bx(stack)->type) {
+ default:
+ case REG_DOUBLE:
+ goto error_type;
+ case REG_TYPE_UNKNOWN:
+ goto unknown;
+ case REG_STRING:
+ break;
+ case REG_STAR_GLOB_STRING:
+ if (opcode != BYTECODE_OP_EQ && opcode != BYTECODE_OP_NE) {
+ goto error_mismatch;
+ }
+ break;
+ case REG_S64:
+ case REG_U64:
+ goto error_mismatch;
+ }
+ break;
+ case REG_STAR_GLOB_STRING:
+ switch (vstack_bx(stack)->type) {
+ default:
+ case REG_DOUBLE:
+ goto error_type;
+ case REG_TYPE_UNKNOWN:
+ goto unknown;
+ case REG_STRING:
+ if (opcode != BYTECODE_OP_EQ && opcode != BYTECODE_OP_NE) {
+ goto error_mismatch;
+ }
+ break;
+ case REG_STAR_GLOB_STRING:
+ case REG_S64:
+ case REG_U64:
+ goto error_mismatch;
+ }
+ break;
+ case REG_S64:
+ case REG_U64:
+ switch (vstack_bx(stack)->type) {
+ default:
+ case REG_DOUBLE:
+ goto error_type;
+ case REG_TYPE_UNKNOWN:
+ goto unknown;
+ case REG_STRING:
+ case REG_STAR_GLOB_STRING:
+ goto error_mismatch;
+ case REG_S64:
+ case REG_U64:
+ break;
+ }
+ break;
+ case REG_TYPE_UNKNOWN:
+ switch (vstack_bx(stack)->type) {
+ default:
+ case REG_DOUBLE:
+ goto error_type;
+ case REG_TYPE_UNKNOWN:
+ case REG_STRING:
+ case REG_STAR_GLOB_STRING:
+ case REG_S64:
+ case REG_U64:
+ goto unknown;
+ }
+ break;
+ }
+ return 0;
+
+unknown:
+ return 1;
+
+error_empty:
+ printk(KERN_WARNING "LTTng: bytecode: empty stack for '%s' binary operator\n", str);
+ return -EINVAL;
+
+error_mismatch:
+ printk(KERN_WARNING "LTTng: bytecode: type mismatch for '%s' binary operator\n", str);
+ return -EINVAL;
+
+error_type:
+ printk(KERN_WARNING "LTTng: bytecode: unknown type for '%s' binary operator\n", str);
+ return -EINVAL;
+}
+
+/*
+ * Binary bitwise operators use top of stack and top of stack -1.
+ * Return 0 if typing is known to match, 1 if typing is dynamic
+ * (unknown), negative error value on error.
+ */
+static
+int bin_op_bitwise_check(struct vstack *stack, bytecode_opcode_t opcode,
+ const char *str)
+{
+ if (unlikely(!vstack_ax(stack) || !vstack_bx(stack)))
+ goto error_empty;
+
+ switch (vstack_ax(stack)->type) {
+ default:
+ case REG_DOUBLE:
+ goto error_type;
+
+ case REG_TYPE_UNKNOWN:
+ switch (vstack_bx(stack)->type) {
+ default:
+ case REG_DOUBLE:
+ goto error_type;
+ case REG_TYPE_UNKNOWN:
+ case REG_S64:
+ case REG_U64:
+ goto unknown;
+ }
+ break;
+ case REG_S64:
+ case REG_U64:
+ switch (vstack_bx(stack)->type) {
+ default:
+ case REG_DOUBLE:
+ goto error_type;
+ case REG_TYPE_UNKNOWN:
+ goto unknown;
+ case REG_S64:
+ case REG_U64:
+ break;
+ }
+ break;
+ }
+ return 0;
+
+unknown:
+ return 1;
+
+error_empty:
+ printk(KERN_WARNING "LTTng: bytecode: empty stack for '%s' binary operator\n", str);
+ return -EINVAL;
+
+error_type:
+ printk(KERN_WARNING "LTTng: bytecode: unknown type for '%s' binary operator\n", str);
+ return -EINVAL;
+}
+
+static
+int validate_get_symbol(struct bytecode_runtime *bytecode,
+ const struct get_symbol *sym)
+{
+ const char *str, *str_limit;
+ size_t len_limit;
+
+ if (sym->offset >= bytecode->p.bc->bc.len - bytecode->p.bc->bc.reloc_offset)
+ return -EINVAL;
+
+ str = bytecode->p.bc->bc.data + bytecode->p.bc->bc.reloc_offset + sym->offset;
+ str_limit = bytecode->p.bc->bc.data + bytecode->p.bc->bc.len;
+ len_limit = str_limit - str;
+ if (strnlen(str, len_limit) == len_limit)
+ return -EINVAL;
+ return 0;
+}
+
+/*
+ * Validate bytecode range overflow within the validation pass.
+ * Called for each instruction encountered.
+ */
+static
+int bytecode_validate_overflow(struct bytecode_runtime *bytecode,
+ char *start_pc, char *pc)
+{
+ int ret = 0;
+
+ switch (*(bytecode_opcode_t *) pc) {
+ case BYTECODE_OP_UNKNOWN:
+ default:
+ {
+ printk(KERN_WARNING "LTTng: bytecode: unknown bytecode op %u\n",
+ (unsigned int) *(bytecode_opcode_t *) pc);
+ ret = -EINVAL;
+ break;
+ }
+
+ case BYTECODE_OP_RETURN:
+ case BYTECODE_OP_RETURN_S64:
+ {
+ if (unlikely(pc + sizeof(struct return_op)
+ > start_pc + bytecode->len)) {
+ ret = -ERANGE;
+ }
+ break;
+ }
+
+ /* binary */
+ case BYTECODE_OP_MUL:
+ case BYTECODE_OP_DIV:
+ case BYTECODE_OP_MOD:
+ case BYTECODE_OP_PLUS:
+ case BYTECODE_OP_MINUS:
+ case BYTECODE_OP_EQ_DOUBLE:
+ case BYTECODE_OP_NE_DOUBLE:
+ case BYTECODE_OP_GT_DOUBLE:
+ case BYTECODE_OP_LT_DOUBLE:
+ case BYTECODE_OP_GE_DOUBLE:
+ case BYTECODE_OP_LE_DOUBLE:
+ /* Floating point */
+ case BYTECODE_OP_EQ_DOUBLE_S64:
+ case BYTECODE_OP_NE_DOUBLE_S64:
+ case BYTECODE_OP_GT_DOUBLE_S64:
+ case BYTECODE_OP_LT_DOUBLE_S64:
+ case BYTECODE_OP_GE_DOUBLE_S64:
+ case BYTECODE_OP_LE_DOUBLE_S64:
+ case BYTECODE_OP_EQ_S64_DOUBLE:
+ case BYTECODE_OP_NE_S64_DOUBLE:
+ case BYTECODE_OP_GT_S64_DOUBLE:
+ case BYTECODE_OP_LT_S64_DOUBLE:
+ case BYTECODE_OP_GE_S64_DOUBLE:
+ case BYTECODE_OP_LE_S64_DOUBLE:
+ case BYTECODE_OP_LOAD_FIELD_REF_DOUBLE:
+ case BYTECODE_OP_GET_CONTEXT_REF_DOUBLE:
+ case BYTECODE_OP_LOAD_DOUBLE:
+ case BYTECODE_OP_CAST_DOUBLE_TO_S64:
+ case BYTECODE_OP_UNARY_PLUS_DOUBLE:
+ case BYTECODE_OP_UNARY_MINUS_DOUBLE:
+ case BYTECODE_OP_UNARY_NOT_DOUBLE:
+ {
+ printk(KERN_WARNING "LTTng: bytecode: unsupported bytecode op %u\n",
+ (unsigned int) *(bytecode_opcode_t *) pc);
+ ret = -EINVAL;
+ break;
+ }
+
+ case BYTECODE_OP_EQ:
+ case BYTECODE_OP_NE:
+ case BYTECODE_OP_GT:
+ case BYTECODE_OP_LT:
+ case BYTECODE_OP_GE:
+ case BYTECODE_OP_LE:
+ case BYTECODE_OP_EQ_STRING:
+ case BYTECODE_OP_NE_STRING:
+ case BYTECODE_OP_GT_STRING:
+ case BYTECODE_OP_LT_STRING:
+ case BYTECODE_OP_GE_STRING:
+ case BYTECODE_OP_LE_STRING:
+ case BYTECODE_OP_EQ_STAR_GLOB_STRING:
+ case BYTECODE_OP_NE_STAR_GLOB_STRING:
+ case BYTECODE_OP_EQ_S64:
+ case BYTECODE_OP_NE_S64:
+ case BYTECODE_OP_GT_S64:
+ case BYTECODE_OP_LT_S64:
+ case BYTECODE_OP_GE_S64:
+ case BYTECODE_OP_LE_S64:
+ case BYTECODE_OP_BIT_RSHIFT:
+ case BYTECODE_OP_BIT_LSHIFT:
+ case BYTECODE_OP_BIT_AND:
+ case BYTECODE_OP_BIT_OR:
+ case BYTECODE_OP_BIT_XOR:
+ {
+ if (unlikely(pc + sizeof(struct binary_op)
+ > start_pc + bytecode->len)) {
+ ret = -ERANGE;
+ }
+ break;
+ }
+
+ /* unary */
+ case BYTECODE_OP_UNARY_PLUS:
+ case BYTECODE_OP_UNARY_MINUS:
+ case BYTECODE_OP_UNARY_NOT:
+ case BYTECODE_OP_UNARY_PLUS_S64:
+ case BYTECODE_OP_UNARY_MINUS_S64:
+ case BYTECODE_OP_UNARY_NOT_S64:
+ case BYTECODE_OP_UNARY_BIT_NOT:
+ {
+ if (unlikely(pc + sizeof(struct unary_op)
+ > start_pc + bytecode->len)) {
+ ret = -ERANGE;
+ }
+ break;
+ }
+
+ /* logical */
+ case BYTECODE_OP_AND:
+ case BYTECODE_OP_OR:
+ {
+ if (unlikely(pc + sizeof(struct logical_op)
+ > start_pc + bytecode->len)) {
+ ret = -ERANGE;
+ }
+ break;
+ }
+
+ /* load field ref */
+ case BYTECODE_OP_LOAD_FIELD_REF:
+ {
+ printk(KERN_WARNING "LTTng: bytecode: Unknown field ref type\n");
+ ret = -EINVAL;
+ break;
+ }
+
+ /* get context ref */
+ case BYTECODE_OP_GET_CONTEXT_REF:
+ {
+ printk(KERN_WARNING "LTTng: bytecode: Unknown field ref type\n");
+ ret = -EINVAL;
+ break;
+ }
+ case BYTECODE_OP_LOAD_FIELD_REF_STRING:
+ case BYTECODE_OP_LOAD_FIELD_REF_SEQUENCE:
+ case BYTECODE_OP_LOAD_FIELD_REF_USER_STRING:
+ case BYTECODE_OP_LOAD_FIELD_REF_USER_SEQUENCE:
+ case BYTECODE_OP_LOAD_FIELD_REF_S64:
+ case BYTECODE_OP_GET_CONTEXT_REF_STRING:
+ case BYTECODE_OP_GET_CONTEXT_REF_S64:
+ {
+ if (unlikely(pc + sizeof(struct load_op) + sizeof(struct field_ref)
+ > start_pc + bytecode->len)) {
+ ret = -ERANGE;
+ }
+ break;
+ }
+
+ /* load from immediate operand */
+ case BYTECODE_OP_LOAD_STRING:
+ case BYTECODE_OP_LOAD_STAR_GLOB_STRING:
+ {
+ struct load_op *insn = (struct load_op *) pc;
+ uint32_t str_len, maxlen;
+
+ if (unlikely(pc + sizeof(struct load_op)
+ > start_pc + bytecode->len)) {
+ ret = -ERANGE;
+ break;
+ }
+
+ maxlen = start_pc + bytecode->len - pc - sizeof(struct load_op);
+ str_len = strnlen(insn->data, maxlen);
+ if (unlikely(str_len >= maxlen)) {
+ /* Final '\0' not found within range */
+ ret = -ERANGE;
+ }
+ break;
+ }
+
+ case BYTECODE_OP_LOAD_S64:
+ {
+ if (unlikely(pc + sizeof(struct load_op) + sizeof(struct literal_numeric)
+ > start_pc + bytecode->len)) {
+ ret = -ERANGE;
+ }
+ break;
+ }
+
+ case BYTECODE_OP_CAST_TO_S64:
+ case BYTECODE_OP_CAST_NOP:
+ {
+ if (unlikely(pc + sizeof(struct cast_op)
+ > start_pc + bytecode->len)) {
+ ret = -ERANGE;
+ }
+ break;
+ }
+
+ /*
+ * Instructions for recursive traversal through composed types.
+ */
+ case BYTECODE_OP_GET_CONTEXT_ROOT:
+ case BYTECODE_OP_GET_APP_CONTEXT_ROOT:
+ case BYTECODE_OP_GET_PAYLOAD_ROOT:
+ case BYTECODE_OP_LOAD_FIELD:
+ case BYTECODE_OP_LOAD_FIELD_S8:
+ case BYTECODE_OP_LOAD_FIELD_S16:
+ case BYTECODE_OP_LOAD_FIELD_S32:
+ case BYTECODE_OP_LOAD_FIELD_S64:
+ case BYTECODE_OP_LOAD_FIELD_U8:
+ case BYTECODE_OP_LOAD_FIELD_U16:
+ case BYTECODE_OP_LOAD_FIELD_U32:
+ case BYTECODE_OP_LOAD_FIELD_U64:
+ case BYTECODE_OP_LOAD_FIELD_STRING:
+ case BYTECODE_OP_LOAD_FIELD_SEQUENCE:
+ case BYTECODE_OP_LOAD_FIELD_DOUBLE:
+ if (unlikely(pc + sizeof(struct load_op)
+ > start_pc + bytecode->len)) {
+ ret = -ERANGE;
+ }
+ break;
+
+ case BYTECODE_OP_GET_SYMBOL:
+ {
+ struct load_op *insn = (struct load_op *) pc;
+ struct get_symbol *sym = (struct get_symbol *) insn->data;
+
+ if (unlikely(pc + sizeof(struct load_op) + sizeof(struct get_symbol)
+ > start_pc + bytecode->len)) {
+ ret = -ERANGE;
+ break;
+ }
+ ret = validate_get_symbol(bytecode, sym);
+ break;
+ }
+
+ case BYTECODE_OP_GET_SYMBOL_FIELD:
+ printk(KERN_WARNING "LTTng: bytecode: Unexpected get symbol field\n");
+ ret = -EINVAL;
+ break;
+
+ case BYTECODE_OP_GET_INDEX_U16:
+ if (unlikely(pc + sizeof(struct load_op) + sizeof(struct get_index_u16)
+ > start_pc + bytecode->len)) {
+ ret = -ERANGE;
+ }
+ break;
+
+ case BYTECODE_OP_GET_INDEX_U64:
+ if (unlikely(pc + sizeof(struct load_op) + sizeof(struct get_index_u64)
+ > start_pc + bytecode->len)) {
+ ret = -ERANGE;
+ }
+ break;
+ }
+
+ return ret;
+}
+
+static
+unsigned long delete_all_nodes(struct mp_table *mp_table)
+{
+ struct mp_node *mp_node;
+ struct hlist_node *tmp;
+ unsigned long nr_nodes = 0;
+ int i;
+
+ for (i = 0; i < MERGE_POINT_TABLE_SIZE; i++) {
+ struct hlist_head *head;
+
+ head = &mp_table->mp_head[i];
+ lttng_hlist_for_each_entry_safe(mp_node, tmp, head, node) {
+ kfree(mp_node);
+ nr_nodes++;
+ }
+ }
+ return nr_nodes;
+}
+
+/*
+ * Return value:
+ * >=0: success
+ * <0: error
+ */
+static
+int validate_instruction_context(struct bytecode_runtime *bytecode,
+ struct vstack *stack,
+ char *start_pc,
+ char *pc)
+{
+ int ret = 0;
+ const bytecode_opcode_t opcode = *(bytecode_opcode_t *) pc;
+
+ switch (opcode) {
+ case BYTECODE_OP_UNKNOWN:
+ default:
+ {
+ printk(KERN_WARNING "LTTng: bytecode: unknown bytecode op %u\n",
+ (unsigned int) *(bytecode_opcode_t *) pc);
+ ret = -EINVAL;
+ goto end;
+ }
+
+ case BYTECODE_OP_RETURN:
+ case BYTECODE_OP_RETURN_S64:
+ {
+ goto end;
+ }
+
+ /* binary */
+ case BYTECODE_OP_MUL:
+ case BYTECODE_OP_DIV:
+ case BYTECODE_OP_MOD:
+ case BYTECODE_OP_PLUS:
+ case BYTECODE_OP_MINUS:
+ /* Floating point */
+ case BYTECODE_OP_EQ_DOUBLE:
+ case BYTECODE_OP_NE_DOUBLE:
+ case BYTECODE_OP_GT_DOUBLE:
+ case BYTECODE_OP_LT_DOUBLE:
+ case BYTECODE_OP_GE_DOUBLE:
+ case BYTECODE_OP_LE_DOUBLE:
+ case BYTECODE_OP_EQ_DOUBLE_S64:
+ case BYTECODE_OP_NE_DOUBLE_S64:
+ case BYTECODE_OP_GT_DOUBLE_S64:
+ case BYTECODE_OP_LT_DOUBLE_S64:
+ case BYTECODE_OP_GE_DOUBLE_S64:
+ case BYTECODE_OP_LE_DOUBLE_S64:
+ case BYTECODE_OP_EQ_S64_DOUBLE:
+ case BYTECODE_OP_NE_S64_DOUBLE:
+ case BYTECODE_OP_GT_S64_DOUBLE:
+ case BYTECODE_OP_LT_S64_DOUBLE:
+ case BYTECODE_OP_GE_S64_DOUBLE:
+ case BYTECODE_OP_LE_S64_DOUBLE:
+ case BYTECODE_OP_UNARY_PLUS_DOUBLE:
+ case BYTECODE_OP_UNARY_MINUS_DOUBLE:
+ case BYTECODE_OP_UNARY_NOT_DOUBLE:
+ case BYTECODE_OP_LOAD_FIELD_REF_DOUBLE:
+ case BYTECODE_OP_LOAD_DOUBLE:
+ case BYTECODE_OP_CAST_DOUBLE_TO_S64:
+ case BYTECODE_OP_GET_CONTEXT_REF_DOUBLE:
+ {
+ printk(KERN_WARNING "LTTng: bytecode: unsupported bytecode op %u\n",
+ (unsigned int) *(bytecode_opcode_t *) pc);
+ ret = -EINVAL;
+ goto end;
+ }
+
+ case BYTECODE_OP_EQ:
+ {
+ ret = bin_op_compare_check(stack, opcode, "==");
+ if (ret < 0)
+ goto end;
+ break;
+ }
+ case BYTECODE_OP_NE:
+ {
+ ret = bin_op_compare_check(stack, opcode, "!=");
+ if (ret < 0)
+ goto end;
+ break;
+ }
+ case BYTECODE_OP_GT:
+ {
+ ret = bin_op_compare_check(stack, opcode, ">");
+ if (ret < 0)
+ goto end;
+ break;
+ }
+ case BYTECODE_OP_LT:
+ {
+ ret = bin_op_compare_check(stack, opcode, "<");
+ if (ret < 0)
+ goto end;
+ break;
+ }
+ case BYTECODE_OP_GE:
+ {
+ ret = bin_op_compare_check(stack, opcode, ">=");
+ if (ret < 0)
+ goto end;
+ break;
+ }
+ case BYTECODE_OP_LE:
+ {
+ ret = bin_op_compare_check(stack, opcode, "<=");
+ if (ret < 0)
+ goto end;
+ break;
+ }
+
+ case BYTECODE_OP_EQ_STRING:
+ case BYTECODE_OP_NE_STRING:
+ case BYTECODE_OP_GT_STRING:
+ case BYTECODE_OP_LT_STRING:
+ case BYTECODE_OP_GE_STRING:
+ case BYTECODE_OP_LE_STRING:
+ {
+ if (!vstack_ax(stack) || !vstack_bx(stack)) {
+ printk(KERN_WARNING "LTTng: bytecode: Empty stack\n");
+ ret = -EINVAL;
+ goto end;
+ }
+ if (vstack_ax(stack)->type != REG_STRING
+ || vstack_bx(stack)->type != REG_STRING) {
+ printk(KERN_WARNING "LTTng: bytecode: Unexpected register type for string comparator\n");
+ ret = -EINVAL;
+ goto end;
+ }
+ break;
+ }
+
+
+ case BYTECODE_OP_EQ_STAR_GLOB_STRING:
+ case BYTECODE_OP_NE_STAR_GLOB_STRING:
+ {
+ if (!vstack_ax(stack) || !vstack_bx(stack)) {
+ printk(KERN_WARNING "LTTng: bytecode: Empty stack\n");
+ ret = -EINVAL;
+ goto end;
+ }
+ if (vstack_ax(stack)->type != REG_STAR_GLOB_STRING
+ && vstack_bx(stack)->type != REG_STAR_GLOB_STRING) {
+ printk(KERN_WARNING "LTTng: bytecode: Unexpected register type for globbing pattern comparator\n");
+ ret = -EINVAL;
+ goto end;
+ }
+ break;
+ }
+
+ case BYTECODE_OP_EQ_S64:
+ case BYTECODE_OP_NE_S64:
+ case BYTECODE_OP_GT_S64:
+ case BYTECODE_OP_LT_S64:
+ case BYTECODE_OP_GE_S64:
+ case BYTECODE_OP_LE_S64:
+ {
+ if (!vstack_ax(stack) || !vstack_bx(stack)) {
+ printk(KERN_WARNING "LTTng: bytecode: Empty stack\n");
+ ret = -EINVAL;
+ goto end;
+ }
+ switch (vstack_ax(stack)->type) {
+ case REG_S64:
+ case REG_U64:
+ break;
+ default:
+ printk(KERN_WARNING "LTTng: bytecode: Unexpected register type for s64 comparator\n");
+ ret = -EINVAL;
+ goto end;
+ }
+ switch (vstack_bx(stack)->type) {
+ case REG_S64:
+ case REG_U64:
+ break;
+ default:
+ printk(KERN_WARNING "LTTng: bytecode: Unexpected register type for s64 comparator\n");
+ ret = -EINVAL;
+ goto end;
+ }
+ break;
+ }
+
+ case BYTECODE_OP_BIT_RSHIFT:
+ ret = bin_op_bitwise_check(stack, opcode, ">>");
+ if (ret < 0)
+ goto end;
+ break;
+ case BYTECODE_OP_BIT_LSHIFT:
+ ret = bin_op_bitwise_check(stack, opcode, "<<");
+ if (ret < 0)
+ goto end;
+ break;
+ case BYTECODE_OP_BIT_AND:
+ ret = bin_op_bitwise_check(stack, opcode, "&");
+ if (ret < 0)
+ goto end;
+ break;
+ case BYTECODE_OP_BIT_OR:
+ ret = bin_op_bitwise_check(stack, opcode, "|");
+ if (ret < 0)
+ goto end;
+ break;
+ case BYTECODE_OP_BIT_XOR:
+ ret = bin_op_bitwise_check(stack, opcode, "^");
+ if (ret < 0)
+ goto end;
+ break;
+
+ /* unary */
+ case BYTECODE_OP_UNARY_PLUS:
+ case BYTECODE_OP_UNARY_MINUS:
+ case BYTECODE_OP_UNARY_NOT:
+ {
+ if (!vstack_ax(stack)) {
+ printk(KERN_WARNING "LTTng: bytecode: Empty stack\n");
+ ret = -EINVAL;
+ goto end;
+ }
+ switch (vstack_ax(stack)->type) {
+ default:
+ case REG_DOUBLE:
+ printk(KERN_WARNING "LTTng: bytecode: unknown register type\n");
+ ret = -EINVAL;
+ goto end;
+
+ case REG_STRING:
+ case REG_STAR_GLOB_STRING:
+ printk(KERN_WARNING "LTTng: bytecode: Unary op can only be applied to numeric or floating point registers\n");
+ ret = -EINVAL;
+ goto end;
+ case REG_S64:
+ case REG_U64:
+ case REG_TYPE_UNKNOWN:
+ break;
+ }
+ break;
+ }
+ case BYTECODE_OP_UNARY_BIT_NOT:
+ {
+ if (!vstack_ax(stack)) {
+ printk(KERN_WARNING "LTTng: bytecode: Empty stack\n");
+ ret = -EINVAL;
+ goto end;
+ }
+ switch (vstack_ax(stack)->type) {
+ default:
+ printk(KERN_WARNING "LTTng: bytecode: unknown register type\n");
+ ret = -EINVAL;
+ goto end;
+
+ case REG_STRING:
+ case REG_STAR_GLOB_STRING:
+ case REG_DOUBLE:
+ printk(KERN_WARNING "LTTng: bytecode: Unary bitwise op can only be applied to numeric registers\n");
+ ret = -EINVAL;
+ goto end;
+ case REG_S64:
+ case REG_U64:
+ case REG_TYPE_UNKNOWN:
+ break;
+ }
+ break;
+ }
+
+ case BYTECODE_OP_UNARY_PLUS_S64:
+ case BYTECODE_OP_UNARY_MINUS_S64:
+ case BYTECODE_OP_UNARY_NOT_S64:
+ {
+ if (!vstack_ax(stack)) {
+ printk(KERN_WARNING "LTTng: bytecode: Empty stack\n");
+ ret = -EINVAL;
+ goto end;
+ }
+ if (vstack_ax(stack)->type != REG_S64 &&
+ vstack_ax(stack)->type != REG_U64) {
+ printk(KERN_WARNING "LTTng: bytecode: Invalid register type\n");
+ ret = -EINVAL;
+ goto end;
+ }
+ break;
+ }
+
+ /* logical */
+ case BYTECODE_OP_AND:
+ case BYTECODE_OP_OR:
+ {
+ struct logical_op *insn = (struct logical_op *) pc;
+
+ if (!vstack_ax(stack)) {
+ printk(KERN_WARNING "LTTng: bytecode: Empty stack\n");
+ ret = -EINVAL;
+ goto end;
+ }
+ if (vstack_ax(stack)->type != REG_S64 &&
+ vstack_ax(stack)->type != REG_U64) {
+ printk(KERN_WARNING "LTTng: bytecode: Logical comparator expects S64 register\n");
+ ret = -EINVAL;
+ goto end;
+ }
+
+ dbg_printk("Validate jumping to bytecode offset %u\n",
+ (unsigned int) insn->skip_offset);
+ if (unlikely(start_pc + insn->skip_offset <= pc)) {
+ printk(KERN_WARNING "LTTng: bytecode: Loops are not allowed in bytecode\n");
+ ret = -EINVAL;
+ goto end;
+ }
+ break;
+ }
+
+ /* load field ref */
+ case BYTECODE_OP_LOAD_FIELD_REF:
+ {
+ printk(KERN_WARNING "LTTng: bytecode: Unknown field ref type\n");
+ ret = -EINVAL;
+ goto end;
+ }
+ case BYTECODE_OP_LOAD_FIELD_REF_STRING:
+ case BYTECODE_OP_LOAD_FIELD_REF_SEQUENCE:
+ case BYTECODE_OP_LOAD_FIELD_REF_USER_STRING:
+ case BYTECODE_OP_LOAD_FIELD_REF_USER_SEQUENCE:
+ {
+ struct load_op *insn = (struct load_op *) pc;
+ struct field_ref *ref = (struct field_ref *) insn->data;
+
+ dbg_printk("Validate load field ref offset %u type string\n",
+ ref->offset);
+ break;
+ }
+ case BYTECODE_OP_LOAD_FIELD_REF_S64:
+ {
+ struct load_op *insn = (struct load_op *) pc;
+ struct field_ref *ref = (struct field_ref *) insn->data;
+
+ dbg_printk("Validate load field ref offset %u type s64\n",
+ ref->offset);
+ break;
+ }
+
+ /* load from immediate operand */
+ case BYTECODE_OP_LOAD_STRING:
+ case BYTECODE_OP_LOAD_STAR_GLOB_STRING:
+ {
+ break;
+ }
+
+ case BYTECODE_OP_LOAD_S64:
+ {
+ break;
+ }
+
+ case BYTECODE_OP_CAST_TO_S64:
+ {
+ struct cast_op *insn = (struct cast_op *) pc;
+
+ if (!vstack_ax(stack)) {
+ printk(KERN_WARNING "LTTng: bytecode: Empty stack\n");
+ ret = -EINVAL;
+ goto end;
+ }
+ switch (vstack_ax(stack)->type) {
+ default:
+ case REG_DOUBLE:
+ printk(KERN_WARNING "LTTng: bytecode: unknown register type\n");
+ ret = -EINVAL;
+ goto end;
+
+ case REG_STRING:
+ case REG_STAR_GLOB_STRING:
+ printk(KERN_WARNING "LTTng: bytecode: Cast op can only be applied to numeric or floating point registers\n");
+ ret = -EINVAL;
+ goto end;
+ case REG_S64:
+ break;
+ }
+ if (insn->op == BYTECODE_OP_CAST_DOUBLE_TO_S64) {
+ if (vstack_ax(stack)->type != REG_DOUBLE) {
+ printk(KERN_WARNING "LTTng: bytecode: Cast expects double\n");
+ ret = -EINVAL;
+ goto end;
+ }
+ }
+ break;
+ }
+ case BYTECODE_OP_CAST_NOP:
+ {
+ break;
+ }
+
+ /* get context ref */
+ case BYTECODE_OP_GET_CONTEXT_REF:
+ {
+ printk(KERN_WARNING "LTTng: bytecode: Unknown get context ref type\n");
+ ret = -EINVAL;
+ goto end;
+ }
+ case BYTECODE_OP_GET_CONTEXT_REF_STRING:
+ {
+ struct load_op *insn = (struct load_op *) pc;
+ struct field_ref *ref = (struct field_ref *) insn->data;
+
+ dbg_printk("Validate get context ref offset %u type string\n",
+ ref->offset);
+ break;
+ }
+ case BYTECODE_OP_GET_CONTEXT_REF_S64:
+ {
+ struct load_op *insn = (struct load_op *) pc;
+ struct field_ref *ref = (struct field_ref *) insn->data;
+
+ dbg_printk("Validate get context ref offset %u type s64\n",
+ ref->offset);
+ break;
+ }
+
+ /*
+ * Instructions for recursive traversal through composed types.
+ */
+ case BYTECODE_OP_GET_CONTEXT_ROOT:
+ {
+ dbg_printk("Validate get context root\n");
+ break;
+ }
+ case BYTECODE_OP_GET_APP_CONTEXT_ROOT:
+ {
+ dbg_printk("Validate get app context root\n");
+ break;
+ }
+ case BYTECODE_OP_GET_PAYLOAD_ROOT:
+ {
+ dbg_printk("Validate get payload root\n");
+ break;
+ }
+ case BYTECODE_OP_LOAD_FIELD:
+ {
+ /*
+ * We tolerate that field type is unknown at validation,
+ * because we are performing the load specialization in
+ * a phase after validation.
+ */
+ dbg_printk("Validate load field\n");
+ break;
+ }
+ case BYTECODE_OP_LOAD_FIELD_S8:
+ {
+ dbg_printk("Validate load field s8\n");
+ break;
+ }
+ case BYTECODE_OP_LOAD_FIELD_S16:
+ {
+ dbg_printk("Validate load field s16\n");
+ break;
+ }
+ case BYTECODE_OP_LOAD_FIELD_S32:
+ {
+ dbg_printk("Validate load field s32\n");
+ break;
+ }
+ case BYTECODE_OP_LOAD_FIELD_S64:
+ {
+ dbg_printk("Validate load field s64\n");
+ break;
+ }
+ case BYTECODE_OP_LOAD_FIELD_U8:
+ {
+ dbg_printk("Validate load field u8\n");
+ break;
+ }
+ case BYTECODE_OP_LOAD_FIELD_U16:
+ {
+ dbg_printk("Validate load field u16\n");
+ break;
+ }
+ case BYTECODE_OP_LOAD_FIELD_U32:
+ {
+ dbg_printk("Validate load field u32\n");
+ break;
+ }
+ case BYTECODE_OP_LOAD_FIELD_U64:
+ {
+ dbg_printk("Validate load field u64\n");
+ break;
+ }
+ case BYTECODE_OP_LOAD_FIELD_STRING:
+ {
+ dbg_printk("Validate load field string\n");
+ break;
+ }
+ case BYTECODE_OP_LOAD_FIELD_SEQUENCE:
+ {
+ dbg_printk("Validate load field sequence\n");
+ break;
+ }
+ case BYTECODE_OP_LOAD_FIELD_DOUBLE:
+ {
+ dbg_printk("Validate load field double\n");
+ break;
+ }
+
+ case BYTECODE_OP_GET_SYMBOL:
+ {
+ struct load_op *insn = (struct load_op *) pc;
+ struct get_symbol *sym = (struct get_symbol *) insn->data;
+
+ dbg_printk("Validate get symbol offset %u\n", sym->offset);
+ break;
+ }
+
+ case BYTECODE_OP_GET_SYMBOL_FIELD:
+ {
+ struct load_op *insn = (struct load_op *) pc;
+ struct get_symbol *sym = (struct get_symbol *) insn->data;
+
+ dbg_printk("Validate get symbol field offset %u\n", sym->offset);
+ break;
+ }
+
+ case BYTECODE_OP_GET_INDEX_U16:
+ {
+ struct load_op *insn = (struct load_op *) pc;
+ struct get_index_u16 *get_index = (struct get_index_u16 *) insn->data;
+
+ dbg_printk("Validate get index u16 index %u\n", get_index->index);
+ break;
+ }
+
+ case BYTECODE_OP_GET_INDEX_U64:
+ {
+ struct load_op *insn = (struct load_op *) pc;
+ struct get_index_u64 *get_index = (struct get_index_u64 *) insn->data;
+
+ dbg_printk("Validate get index u64 index %llu\n",
+ (unsigned long long) get_index->index);
+ break;
+ }
+ }
+end:
+ return ret;
+}
+
+/*
+ * Return value:
+ * 0: success
+ * <0: error
+ */
+static
+int validate_instruction_all_contexts(struct bytecode_runtime *bytecode,
+ struct mp_table *mp_table,
+ struct vstack *stack,
+ char *start_pc,
+ char *pc)
+{
+ int ret, found = 0;
+ unsigned long target_pc = pc - start_pc;
+ unsigned long hash;
+ struct hlist_head *head;
+ struct mp_node *mp_node;
+
+ /* Validate the context resulting from the previous instruction */
+ ret = validate_instruction_context(bytecode, stack, start_pc, pc);
+ if (ret < 0)
+ return ret;
+
+ /* Validate merge points */
+ hash = jhash_1word(target_pc, 0);
+ head = &mp_table->mp_head[hash & (MERGE_POINT_TABLE_SIZE - 1)];
+ lttng_hlist_for_each_entry(mp_node, head, node) {
+ if (lttng_hash_match(mp_node, target_pc)) {
+ found = 1;
+ break;
+ }
+ }
+ if (found) {
+ dbg_printk("Bytecode: validate merge point at offset %lu\n",
+ target_pc);
+ if (merge_points_compare(stack, &mp_node->stack)) {
+ printk(KERN_WARNING "LTTng: bytecode: Merge points differ for offset %lu\n",
+ target_pc);
+ return -EINVAL;
+ }
+ /* Once validated, we can remove the merge point */
+ dbg_printk("Bytecode: remove merge point at offset %lu\n",
+ target_pc);
+ hlist_del(&mp_node->node);
+ }
+ return 0;
+}
+
+/*
+ * Return value:
+ * >0: going to next insn.
+ * 0: success, stop iteration.
+ * <0: error
+ */
+static
+int exec_insn(struct bytecode_runtime *bytecode,
+ struct mp_table *mp_table,
+ struct vstack *stack,
+ char **_next_pc,
+ char *pc)
+{
+ int ret = 1;
+ char *next_pc = *_next_pc;
+
+ switch (*(bytecode_opcode_t *) pc) {
+ case BYTECODE_OP_UNKNOWN:
+ default:
+ {
+ printk(KERN_WARNING "LTTng: bytecode: unknown bytecode op %u\n",
+ (unsigned int) *(bytecode_opcode_t *) pc);
+ ret = -EINVAL;
+ goto end;
+ }
+
+ case BYTECODE_OP_RETURN:
+ {
+ if (!vstack_ax(stack)) {
+ printk(KERN_WARNING "LTTng: bytecode: Empty stack\n");
+ ret = -EINVAL;
+ goto end;
+ }
+ switch (vstack_ax(stack)->type) {
+ case REG_S64:
+ case REG_U64:
+ case REG_DOUBLE:
+ case REG_STRING:
+ case REG_PTR:
+ case REG_TYPE_UNKNOWN:
+ break;
+ default:
+ printk(KERN_WARNING "LTTng: bytecode: Unexpected register type %d at end of bytecode\n",
+ (int) vstack_ax(stack)->type);
+ ret = -EINVAL;
+ goto end;
+ }
+
+ ret = 0;
+ goto end;
+ }
+
+ case BYTECODE_OP_RETURN_S64:
+ {
+ if (!vstack_ax(stack)) {
+ printk(KERN_WARNING "LTTng: bytecode: Empty stack\n");
+ ret = -EINVAL;
+ goto end;
+ }
+ switch (vstack_ax(stack)->type) {
+ case REG_S64:
+ case REG_U64:
+ break;
+ default:
+ case REG_TYPE_UNKNOWN:
+ printk(KERN_WARNING "LTTng: bytecode: Unexpected register type %d at end of bytecode\n",
+ (int) vstack_ax(stack)->type);
+ ret = -EINVAL;
+ goto end;
+ }
+
+ ret = 0;
+ goto end;
+ }
+
+ /* binary */
+ case BYTECODE_OP_MUL:
+ case BYTECODE_OP_DIV:
+ case BYTECODE_OP_MOD:
+ case BYTECODE_OP_PLUS:
+ case BYTECODE_OP_MINUS:
+ /* Floating point */
+ case BYTECODE_OP_EQ_DOUBLE:
+ case BYTECODE_OP_NE_DOUBLE:
+ case BYTECODE_OP_GT_DOUBLE:
+ case BYTECODE_OP_LT_DOUBLE:
+ case BYTECODE_OP_GE_DOUBLE:
+ case BYTECODE_OP_LE_DOUBLE:
+ case BYTECODE_OP_EQ_DOUBLE_S64:
+ case BYTECODE_OP_NE_DOUBLE_S64:
+ case BYTECODE_OP_GT_DOUBLE_S64:
+ case BYTECODE_OP_LT_DOUBLE_S64:
+ case BYTECODE_OP_GE_DOUBLE_S64:
+ case BYTECODE_OP_LE_DOUBLE_S64:
+ case BYTECODE_OP_EQ_S64_DOUBLE:
+ case BYTECODE_OP_NE_S64_DOUBLE:
+ case BYTECODE_OP_GT_S64_DOUBLE:
+ case BYTECODE_OP_LT_S64_DOUBLE:
+ case BYTECODE_OP_GE_S64_DOUBLE:
+ case BYTECODE_OP_LE_S64_DOUBLE:
+ case BYTECODE_OP_UNARY_PLUS_DOUBLE:
+ case BYTECODE_OP_UNARY_MINUS_DOUBLE:
+ case BYTECODE_OP_UNARY_NOT_DOUBLE:
+ case BYTECODE_OP_LOAD_FIELD_REF_DOUBLE:
+ case BYTECODE_OP_GET_CONTEXT_REF_DOUBLE:
+ case BYTECODE_OP_LOAD_DOUBLE:
+ case BYTECODE_OP_CAST_DOUBLE_TO_S64:
+ {
+ printk(KERN_WARNING "LTTng: bytecode: unsupported bytecode op %u\n",
+ (unsigned int) *(bytecode_opcode_t *) pc);
+ ret = -EINVAL;
+ goto end;
+ }
+
+ case BYTECODE_OP_EQ:
+ case BYTECODE_OP_NE:
+ case BYTECODE_OP_GT:
+ case BYTECODE_OP_LT:
+ case BYTECODE_OP_GE:
+ case BYTECODE_OP_LE:
+ case BYTECODE_OP_EQ_STRING:
+ case BYTECODE_OP_NE_STRING:
+ case BYTECODE_OP_GT_STRING:
+ case BYTECODE_OP_LT_STRING:
+ case BYTECODE_OP_GE_STRING:
+ case BYTECODE_OP_LE_STRING:
+ case BYTECODE_OP_EQ_STAR_GLOB_STRING:
+ case BYTECODE_OP_NE_STAR_GLOB_STRING:
+ case BYTECODE_OP_EQ_S64:
+ case BYTECODE_OP_NE_S64:
+ case BYTECODE_OP_GT_S64:
+ case BYTECODE_OP_LT_S64:
+ case BYTECODE_OP_GE_S64:
+ case BYTECODE_OP_LE_S64:
+ {
+ /* Pop 2, push 1 */
+ if (vstack_pop(stack)) {
+ ret = -EINVAL;
+ goto end;
+ }
+ if (!vstack_ax(stack)) {
+ printk(KERN_WARNING "Empty stack\n");
+ ret = -EINVAL;
+ goto end;
+ }
+ switch (vstack_ax(stack)->type) {
+ case REG_S64:
+ case REG_U64:
+ case REG_DOUBLE:
+ case REG_STRING:
+ case REG_STAR_GLOB_STRING:
+ case REG_TYPE_UNKNOWN:
+ break;
+ default:
+ printk(KERN_WARNING "Unexpected register type %d for operation\n",
+ (int) vstack_ax(stack)->type);
+ ret = -EINVAL;
+ goto end;
+ }
+
+ vstack_ax(stack)->type = REG_S64;
+ next_pc += sizeof(struct binary_op);
+ break;
+ }
+ case BYTECODE_OP_BIT_RSHIFT:
+ case BYTECODE_OP_BIT_LSHIFT:
+ case BYTECODE_OP_BIT_AND:
+ case BYTECODE_OP_BIT_OR:
+ case BYTECODE_OP_BIT_XOR:
+ {
+ /* Pop 2, push 1 */
+ if (vstack_pop(stack)) {
+ ret = -EINVAL;
+ goto end;
+ }
+ if (!vstack_ax(stack)) {
+ printk(KERN_WARNING "LTTng: bytecode: Empty stack\n");
+ ret = -EINVAL;
+ goto end;
+ }
+ switch (vstack_ax(stack)->type) {
+ case REG_S64:
+ case REG_U64:
+ case REG_DOUBLE:
+ case REG_STRING:
+ case REG_STAR_GLOB_STRING:
+ case REG_TYPE_UNKNOWN:
+ break;
+ default:
+ printk(KERN_WARNING "LTTng: bytecode: Unexpected register type %d for operation\n",
+ (int) vstack_ax(stack)->type);
+ ret = -EINVAL;
+ goto end;
+ }
+
+ vstack_ax(stack)->type = REG_U64;
+ next_pc += sizeof(struct binary_op);
+ break;
+ }
+
+ /* unary */
+ case BYTECODE_OP_UNARY_PLUS:
+ case BYTECODE_OP_UNARY_MINUS:
+ {
+ /* Pop 1, push 1 */
+ if (!vstack_ax(stack)) {
+ printk(KERN_WARNING "LTTng: bytecode: Empty stack\n\n");
+ ret = -EINVAL;
+ goto end;
+ }
+ switch (vstack_ax(stack)->type) {
+ case REG_S64:
+ case REG_U64:
+ case REG_TYPE_UNKNOWN:
+ break;
+ default:
+ printk(KERN_WARNING "LTTng: bytecode: Unexpected register type %d for operation\n",
+ (int) vstack_ax(stack)->type);
+ ret = -EINVAL;
+ goto end;
+ }
+
+ vstack_ax(stack)->type = REG_TYPE_UNKNOWN;
+ next_pc += sizeof(struct unary_op);
+ break;
+ }
+
+ case BYTECODE_OP_UNARY_PLUS_S64:
+ case BYTECODE_OP_UNARY_MINUS_S64:
+ case BYTECODE_OP_UNARY_NOT_S64:
+ {
+ /* Pop 1, push 1 */
+ if (!vstack_ax(stack)) {
+ printk(KERN_WARNING "LTTng: bytecode: Empty stack\n\n");
+ ret = -EINVAL;
+ goto end;
+ }
+ switch (vstack_ax(stack)->type) {
+ case REG_S64:
+ case REG_U64:
+ break;
+ default:
+ printk(KERN_WARNING "LTTng: bytecode: Unexpected register type %d for operation\n",
+ (int) vstack_ax(stack)->type);
+ ret = -EINVAL;
+ goto end;
+ }
+
+ next_pc += sizeof(struct unary_op);
+ break;
+ }
+
+ case BYTECODE_OP_UNARY_NOT:
+ {
+ /* Pop 1, push 1 */
+ if (!vstack_ax(stack)) {
+ printk(KERN_WARNING "LTTng: bytecode: Empty stack\n\n");
+ ret = -EINVAL;
+ goto end;
+ }
+ switch (vstack_ax(stack)->type) {
+ case REG_S64:
+ case REG_U64:
+ case REG_TYPE_UNKNOWN:
+ break;
+ default:
+ printk(KERN_WARNING "LTTng: bytecode: Unexpected register type %d for operation\n",
+ (int) vstack_ax(stack)->type);
+ ret = -EINVAL;
+ goto end;
+ }
+
+ next_pc += sizeof(struct unary_op);
+ break;
+ }
+
+ case BYTECODE_OP_UNARY_BIT_NOT:
+ {
+ /* Pop 1, push 1 */
+ if (!vstack_ax(stack)) {
+ printk(KERN_WARNING "LTTng: bytecode: Empty stack\n");
+ ret = -EINVAL;
+ goto end;
+ }
+ switch (vstack_ax(stack)->type) {
+ case REG_S64:
+ case REG_U64:
+ case REG_TYPE_UNKNOWN:
+ break;
+ case REG_DOUBLE:
+ default:
+ printk(KERN_WARNING "LTTng: bytecode: Unexpected register type %d for operation\n",
+ (int) vstack_ax(stack)->type);
+ ret = -EINVAL;
+ goto end;
+ }
+
+ vstack_ax(stack)->type = REG_U64;
+ next_pc += sizeof(struct unary_op);
+ break;
+ }
+
+ /* logical */
+ case BYTECODE_OP_AND:
+ case BYTECODE_OP_OR:
+ {
+ struct logical_op *insn = (struct logical_op *) pc;
+ int merge_ret;
+
+ /* Add merge point to table */
+ merge_ret = merge_point_add_check(mp_table,
+ insn->skip_offset, stack);
+ if (merge_ret) {
+ ret = merge_ret;
+ goto end;
+ }
+
+ if (!vstack_ax(stack)) {
+ printk(KERN_WARNING "LTTng: bytecode: Empty stack\n\n");
+ ret = -EINVAL;
+ goto end;
+ }
+ /* There is always a cast-to-s64 operation before a or/and op. */
+ switch (vstack_ax(stack)->type) {
+ case REG_S64:
+ case REG_U64:
+ break;
+ default:
+ printk(KERN_WARNING "LTTng: bytecode: Incorrect register type %d for operation\n",
+ (int) vstack_ax(stack)->type);
+ ret = -EINVAL;
+ goto end;
+ }
+
+ /* Continue to next instruction */
+ /* Pop 1 when jump not taken */
+ if (vstack_pop(stack)) {
+ ret = -EINVAL;
+ goto end;
+ }
+ next_pc += sizeof(struct logical_op);
+ break;
+ }
+
+ /* load field ref */
+ case BYTECODE_OP_LOAD_FIELD_REF:
+ {
+ printk(KERN_WARNING "LTTng: bytecode: Unknown field ref type\n");
+ ret = -EINVAL;
+ goto end;
+ }
+ /* get context ref */
+ case BYTECODE_OP_GET_CONTEXT_REF:
+ {
+ printk(KERN_WARNING "LTTng: bytecode: Unknown get context ref type\n");
+ ret = -EINVAL;
+ goto end;
+ }
+ case BYTECODE_OP_LOAD_FIELD_REF_STRING:
+ case BYTECODE_OP_LOAD_FIELD_REF_SEQUENCE:
+ case BYTECODE_OP_GET_CONTEXT_REF_STRING:
+ case BYTECODE_OP_LOAD_FIELD_REF_USER_STRING:
+ case BYTECODE_OP_LOAD_FIELD_REF_USER_SEQUENCE:
+ {
+ if (vstack_push(stack)) {
+ ret = -EINVAL;
+ goto end;
+ }
+ vstack_ax(stack)->type = REG_STRING;
+ next_pc += sizeof(struct load_op) + sizeof(struct field_ref);
+ break;
+ }
+ case BYTECODE_OP_LOAD_FIELD_REF_S64:
+ case BYTECODE_OP_GET_CONTEXT_REF_S64:
+ {
+ if (vstack_push(stack)) {
+ ret = -EINVAL;
+ goto end;
+ }
+ vstack_ax(stack)->type = REG_S64;
+ next_pc += sizeof(struct load_op) + sizeof(struct field_ref);
+ break;
+ }
+
+ /* load from immediate operand */
+ case BYTECODE_OP_LOAD_STRING:
+ {
+ struct load_op *insn = (struct load_op *) pc;
+
+ if (vstack_push(stack)) {
+ ret = -EINVAL;
+ goto end;
+ }
+ vstack_ax(stack)->type = REG_STRING;
+ next_pc += sizeof(struct load_op) + strlen(insn->data) + 1;
+ break;
+ }
+
+ case BYTECODE_OP_LOAD_STAR_GLOB_STRING:
+ {
+ struct load_op *insn = (struct load_op *) pc;
+
+ if (vstack_push(stack)) {
+ ret = -EINVAL;
+ goto end;
+ }
+ vstack_ax(stack)->type = REG_STAR_GLOB_STRING;
+ next_pc += sizeof(struct load_op) + strlen(insn->data) + 1;
+ break;
+ }
+
+ case BYTECODE_OP_LOAD_S64:
+ {
+ if (vstack_push(stack)) {
+ ret = -EINVAL;
+ goto end;
+ }
+ vstack_ax(stack)->type = REG_S64;
+ next_pc += sizeof(struct load_op)
+ + sizeof(struct literal_numeric);
+ break;
+ }
+
+ case BYTECODE_OP_CAST_TO_S64:
+ {
+ /* Pop 1, push 1 */
+ if (!vstack_ax(stack)) {
+ printk(KERN_WARNING "LTTng: bytecode: Empty stack\n");
+ ret = -EINVAL;
+ goto end;
+ }
+ switch (vstack_ax(stack)->type) {
+ case REG_S64:
+ case REG_U64:
+ case REG_DOUBLE:
+ case REG_TYPE_UNKNOWN:
+ break;
+ default:
+ printk(KERN_WARNING "LTTng: bytecode: Incorrect register type %d for cast\n",
+ (int) vstack_ax(stack)->type);
+ ret = -EINVAL;
+ goto end;
+ }
+ vstack_ax(stack)->type = REG_S64;
+ next_pc += sizeof(struct cast_op);
+ break;
+ }
+ case BYTECODE_OP_CAST_NOP:
+ {
+ next_pc += sizeof(struct cast_op);
+ break;
+ }
+
+ /*
+ * Instructions for recursive traversal through composed types.
+ */
+ case BYTECODE_OP_GET_CONTEXT_ROOT:
+ case BYTECODE_OP_GET_APP_CONTEXT_ROOT:
+ case BYTECODE_OP_GET_PAYLOAD_ROOT:
+ {
+ if (vstack_push(stack)) {
+ ret = -EINVAL;
+ goto end;
+ }
+ vstack_ax(stack)->type = REG_PTR;
+ next_pc += sizeof(struct load_op);
+ break;
+ }
+
+ case BYTECODE_OP_LOAD_FIELD:
+ {
+ /* Pop 1, push 1 */
+ if (!vstack_ax(stack)) {
+ printk(KERN_WARNING "LTTng: bytecode: Empty stack\n\n");
+ ret = -EINVAL;
+ goto end;
+ }
+ if (vstack_ax(stack)->type != REG_PTR) {
+ printk(KERN_WARNING "LTTng: bytecode: Expecting pointer on top of stack\n\n");
+ ret = -EINVAL;
+ goto end;
+ }
+ vstack_ax(stack)->type = REG_TYPE_UNKNOWN;
+ next_pc += sizeof(struct load_op);
+ break;
+ }
+
+ case BYTECODE_OP_LOAD_FIELD_S8:
+ case BYTECODE_OP_LOAD_FIELD_S16:
+ case BYTECODE_OP_LOAD_FIELD_S32:
+ case BYTECODE_OP_LOAD_FIELD_S64:
+ {
+ /* Pop 1, push 1 */
+ if (!vstack_ax(stack)) {
+ printk(KERN_WARNING "Empty stack\n\n");
+ ret = -EINVAL;
+ goto end;
+ }
+ if (vstack_ax(stack)->type != REG_PTR) {
+ printk(KERN_WARNING "Expecting pointer on top of stack\n\n");
+ ret = -EINVAL;
+ goto end;
+ }
+ vstack_ax(stack)->type = REG_S64;
+ next_pc += sizeof(struct load_op);
+ break;
+ }
+ case BYTECODE_OP_LOAD_FIELD_U8:
+ case BYTECODE_OP_LOAD_FIELD_U16:
+ case BYTECODE_OP_LOAD_FIELD_U32:
+ case BYTECODE_OP_LOAD_FIELD_U64:
+ {
+ /* Pop 1, push 1 */
+ if (!vstack_ax(stack)) {
+ printk(KERN_WARNING "LTTng: bytecode: Empty stack\n\n");
+ ret = -EINVAL;
+ goto end;
+ }
+ if (vstack_ax(stack)->type != REG_PTR) {
+ printk(KERN_WARNING "LTTng: bytecode: Expecting pointer on top of stack\n\n");
+ ret = -EINVAL;
+ goto end;
+ }
+ vstack_ax(stack)->type = REG_U64;
+ next_pc += sizeof(struct load_op);
+ break;
+ }
+ case BYTECODE_OP_LOAD_FIELD_STRING:
+ case BYTECODE_OP_LOAD_FIELD_SEQUENCE:
+ {
+ /* Pop 1, push 1 */
+ if (!vstack_ax(stack)) {
+ printk(KERN_WARNING "LTTng: bytecode: Empty stack\n\n");
+ ret = -EINVAL;
+ goto end;
+ }
+ if (vstack_ax(stack)->type != REG_PTR) {
+ printk(KERN_WARNING "LTTng: bytecode: Expecting pointer on top of stack\n\n");
+ ret = -EINVAL;
+ goto end;
+ }
+ vstack_ax(stack)->type = REG_STRING;
+ next_pc += sizeof(struct load_op);
+ break;
+ }
+
+ case BYTECODE_OP_LOAD_FIELD_DOUBLE:
+ {
+ /* Pop 1, push 1 */
+ if (!vstack_ax(stack)) {
+ printk(KERN_WARNING "LTTng: bytecode: Empty stack\n\n");
+ ret = -EINVAL;
+ goto end;
+ }
+ if (vstack_ax(stack)->type != REG_PTR) {
+ printk(KERN_WARNING "LTTng: bytecode: Expecting pointer on top of stack\n\n");
+ ret = -EINVAL;
+ goto end;
+ }
+ vstack_ax(stack)->type = REG_DOUBLE;
+ next_pc += sizeof(struct load_op);
+ break;
+ }
+
+ case BYTECODE_OP_GET_SYMBOL:
+ case BYTECODE_OP_GET_SYMBOL_FIELD:
+ {
+ /* Pop 1, push 1 */
+ if (!vstack_ax(stack)) {
+ printk(KERN_WARNING "LTTng: bytecode: Empty stack\n\n");
+ ret = -EINVAL;
+ goto end;
+ }
+ if (vstack_ax(stack)->type != REG_PTR) {
+ printk(KERN_WARNING "LTTng: bytecode: Expecting pointer on top of stack\n\n");
+ ret = -EINVAL;
+ goto end;
+ }
+ next_pc += sizeof(struct load_op) + sizeof(struct get_symbol);
+ break;
+ }
+
+ case BYTECODE_OP_GET_INDEX_U16:
+ {
+ /* Pop 1, push 1 */
+ if (!vstack_ax(stack)) {
+ printk(KERN_WARNING "LTTng: bytecode: Empty stack\n\n");
+ ret = -EINVAL;
+ goto end;
+ }
+ if (vstack_ax(stack)->type != REG_PTR) {
+ printk(KERN_WARNING "LTTng: bytecode: Expecting pointer on top of stack\n\n");
+ ret = -EINVAL;
+ goto end;
+ }
+ next_pc += sizeof(struct load_op) + sizeof(struct get_index_u16);
+ break;
+ }
+
+ case BYTECODE_OP_GET_INDEX_U64:
+ {
+ /* Pop 1, push 1 */
+ if (!vstack_ax(stack)) {
+ printk(KERN_WARNING "LTTng: bytecode: Empty stack\n\n");
+ ret = -EINVAL;
+ goto end;
+ }
+ if (vstack_ax(stack)->type != REG_PTR) {
+ printk(KERN_WARNING "LTTng: bytecode: Expecting pointer on top of stack\n\n");
+ ret = -EINVAL;
+ goto end;
+ }
+ next_pc += sizeof(struct load_op) + sizeof(struct get_index_u64);
+ break;
+ }
+
+ }
+end:
+ *_next_pc = next_pc;
+ return ret;
+}
+
+/*
+ * Never called concurrently (hash seed is shared).
+ */
+int lttng_bytecode_validate(struct bytecode_runtime *bytecode)
+{
+ struct mp_table *mp_table;
+ char *pc, *next_pc, *start_pc;
+ int ret = -EINVAL;
+ struct vstack stack;
+
+ vstack_init(&stack);
+
+ mp_table = kzalloc(sizeof(*mp_table), GFP_KERNEL);
+ if (!mp_table) {
+ printk(KERN_WARNING "LTTng: bytecode: Error allocating hash table for bytecode validation\n");
+ return -ENOMEM;
+ }
+ start_pc = &bytecode->code[0];
+ for (pc = next_pc = start_pc; pc - start_pc < bytecode->len;
+ pc = next_pc) {
+ ret = bytecode_validate_overflow(bytecode, start_pc, pc);
+ if (ret != 0) {
+ if (ret == -ERANGE)
+ printk(KERN_WARNING "LTTng: bytecode: bytecode overflow\n");
+ goto end;
+ }
+ dbg_printk("Validating op %s (%u)\n",
+ lttng_bytecode_print_op((unsigned int) *(bytecode_opcode_t *) pc),
+ (unsigned int) *(bytecode_opcode_t *) pc);
+
+ /*
+ * For each instruction, validate the current context
+ * (traversal of entire execution flow), and validate
+ * all merge points targeting this instruction.
+ */
+ ret = validate_instruction_all_contexts(bytecode, mp_table,
+ &stack, start_pc, pc);
+ if (ret)
+ goto end;
+ ret = exec_insn(bytecode, mp_table, &stack, &next_pc, pc);
+ if (ret <= 0)
+ goto end;
+ }
+end:
+ if (delete_all_nodes(mp_table)) {
+ if (!ret) {
+ printk(KERN_WARNING "LTTng: bytecode: Unexpected merge points\n");
+ ret = -EINVAL;
+ }
+ }
+ kfree(mp_table);
+ return ret;
+}
--- /dev/null
+/* SPDX-License-Identifier: MIT
+ *
+ * lttng-bytecode.c
+ *
+ * LTTng modules bytecode code.
+ *
+ * Copyright (C) 2010-2016 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ */
+
+#include <linux/list.h>
+#include <linux/slab.h>
+
+#include <lttng/lttng-bytecode.h>
+
+static const char *opnames[] = {
+ [ BYTECODE_OP_UNKNOWN ] = "UNKNOWN",
+
+ [ BYTECODE_OP_RETURN ] = "RETURN",
+
+ /* binary */
+ [ BYTECODE_OP_MUL ] = "MUL",
+ [ BYTECODE_OP_DIV ] = "DIV",
+ [ BYTECODE_OP_MOD ] = "MOD",
+ [ BYTECODE_OP_PLUS ] = "PLUS",
+ [ BYTECODE_OP_MINUS ] = "MINUS",
+ [ BYTECODE_OP_BIT_RSHIFT ] = "BIT_RSHIFT",
+ [ BYTECODE_OP_BIT_LSHIFT ] = "BIT_LSHIFT",
+ [ BYTECODE_OP_BIT_AND ] = "BIT_AND",
+ [ BYTECODE_OP_BIT_OR ] = "BIT_OR",
+ [ BYTECODE_OP_BIT_XOR ] = "BIT_XOR",
+
+ /* binary comparators */
+ [ BYTECODE_OP_EQ ] = "EQ",
+ [ BYTECODE_OP_NE ] = "NE",
+ [ BYTECODE_OP_GT ] = "GT",
+ [ BYTECODE_OP_LT ] = "LT",
+ [ BYTECODE_OP_GE ] = "GE",
+ [ BYTECODE_OP_LE ] = "LE",
+
+ /* string binary comparators */
+ [ BYTECODE_OP_EQ_STRING ] = "EQ_STRING",
+ [ BYTECODE_OP_NE_STRING ] = "NE_STRING",
+ [ BYTECODE_OP_GT_STRING ] = "GT_STRING",
+ [ BYTECODE_OP_LT_STRING ] = "LT_STRING",
+ [ BYTECODE_OP_GE_STRING ] = "GE_STRING",
+ [ BYTECODE_OP_LE_STRING ] = "LE_STRING",
+
+ /* s64 binary comparators */
+ [ BYTECODE_OP_EQ_S64 ] = "EQ_S64",
+ [ BYTECODE_OP_NE_S64 ] = "NE_S64",
+ [ BYTECODE_OP_GT_S64 ] = "GT_S64",
+ [ BYTECODE_OP_LT_S64 ] = "LT_S64",
+ [ BYTECODE_OP_GE_S64 ] = "GE_S64",
+ [ BYTECODE_OP_LE_S64 ] = "LE_S64",
+
+ /* double binary comparators */
+ [ BYTECODE_OP_EQ_DOUBLE ] = "EQ_DOUBLE",
+ [ BYTECODE_OP_NE_DOUBLE ] = "NE_DOUBLE",
+ [ BYTECODE_OP_GT_DOUBLE ] = "GT_DOUBLE",
+ [ BYTECODE_OP_LT_DOUBLE ] = "LT_DOUBLE",
+ [ BYTECODE_OP_GE_DOUBLE ] = "GE_DOUBLE",
+ [ BYTECODE_OP_LE_DOUBLE ] = "LE_DOUBLE",
+
+ /* Mixed S64-double binary comparators */
+ [ BYTECODE_OP_EQ_DOUBLE_S64 ] = "EQ_DOUBLE_S64",
+ [ BYTECODE_OP_NE_DOUBLE_S64 ] = "NE_DOUBLE_S64",
+ [ BYTECODE_OP_GT_DOUBLE_S64 ] = "GT_DOUBLE_S64",
+ [ BYTECODE_OP_LT_DOUBLE_S64 ] = "LT_DOUBLE_S64",
+ [ BYTECODE_OP_GE_DOUBLE_S64 ] = "GE_DOUBLE_S64",
+ [ BYTECODE_OP_LE_DOUBLE_S64 ] = "LE_DOUBLE_S64",
+
+ [ BYTECODE_OP_EQ_S64_DOUBLE ] = "EQ_S64_DOUBLE",
+ [ BYTECODE_OP_NE_S64_DOUBLE ] = "NE_S64_DOUBLE",
+ [ BYTECODE_OP_GT_S64_DOUBLE ] = "GT_S64_DOUBLE",
+ [ BYTECODE_OP_LT_S64_DOUBLE ] = "LT_S64_DOUBLE",
+ [ BYTECODE_OP_GE_S64_DOUBLE ] = "GE_S64_DOUBLE",
+ [ BYTECODE_OP_LE_S64_DOUBLE ] = "LE_S64_DOUBLE",
+
+ /* unary */
+ [ BYTECODE_OP_UNARY_PLUS ] = "UNARY_PLUS",
+ [ BYTECODE_OP_UNARY_MINUS ] = "UNARY_MINUS",
+ [ BYTECODE_OP_UNARY_NOT ] = "UNARY_NOT",
+ [ BYTECODE_OP_UNARY_PLUS_S64 ] = "UNARY_PLUS_S64",
+ [ BYTECODE_OP_UNARY_MINUS_S64 ] = "UNARY_MINUS_S64",
+ [ BYTECODE_OP_UNARY_NOT_S64 ] = "UNARY_NOT_S64",
+ [ BYTECODE_OP_UNARY_PLUS_DOUBLE ] = "UNARY_PLUS_DOUBLE",
+ [ BYTECODE_OP_UNARY_MINUS_DOUBLE ] = "UNARY_MINUS_DOUBLE",
+ [ BYTECODE_OP_UNARY_NOT_DOUBLE ] = "UNARY_NOT_DOUBLE",
+
+ /* logical */
+ [ BYTECODE_OP_AND ] = "AND",
+ [ BYTECODE_OP_OR ] = "OR",
+
+ /* load field ref */
+ [ BYTECODE_OP_LOAD_FIELD_REF ] = "LOAD_FIELD_REF",
+ [ BYTECODE_OP_LOAD_FIELD_REF_STRING ] = "LOAD_FIELD_REF_STRING",
+ [ BYTECODE_OP_LOAD_FIELD_REF_SEQUENCE ] = "LOAD_FIELD_REF_SEQUENCE",
+ [ BYTECODE_OP_LOAD_FIELD_REF_S64 ] = "LOAD_FIELD_REF_S64",
+ [ BYTECODE_OP_LOAD_FIELD_REF_DOUBLE ] = "LOAD_FIELD_REF_DOUBLE",
+
+ /* load from immediate operand */
+ [ BYTECODE_OP_LOAD_STRING ] = "LOAD_STRING",
+ [ BYTECODE_OP_LOAD_S64 ] = "LOAD_S64",
+ [ BYTECODE_OP_LOAD_DOUBLE ] = "LOAD_DOUBLE",
+
+ /* cast */
+ [ BYTECODE_OP_CAST_TO_S64 ] = "CAST_TO_S64",
+ [ BYTECODE_OP_CAST_DOUBLE_TO_S64 ] = "CAST_DOUBLE_TO_S64",
+ [ BYTECODE_OP_CAST_NOP ] = "CAST_NOP",
+
+ /* get context ref */
+ [ BYTECODE_OP_GET_CONTEXT_REF ] = "GET_CONTEXT_REF",
+ [ BYTECODE_OP_GET_CONTEXT_REF_STRING ] = "GET_CONTEXT_REF_STRING",
+ [ BYTECODE_OP_GET_CONTEXT_REF_S64 ] = "GET_CONTEXT_REF_S64",
+ [ BYTECODE_OP_GET_CONTEXT_REF_DOUBLE ] = "GET_CONTEXT_REF_DOUBLE",
+
+ /* load userspace field ref */
+ [ BYTECODE_OP_LOAD_FIELD_REF_USER_STRING ] = "LOAD_FIELD_REF_USER_STRING",
+ [ BYTECODE_OP_LOAD_FIELD_REF_USER_SEQUENCE ] = "LOAD_FIELD_REF_USER_SEQUENCE",
+
+ /*
+ * load immediate star globbing pattern (literal string)
+ * from immediate.
+ */
+ [ BYTECODE_OP_LOAD_STAR_GLOB_STRING ] = "LOAD_STAR_GLOB_STRING",
+
+ /* globbing pattern binary operator: apply to */
+ [ BYTECODE_OP_EQ_STAR_GLOB_STRING ] = "EQ_STAR_GLOB_STRING",
+ [ BYTECODE_OP_NE_STAR_GLOB_STRING ] = "NE_STAR_GLOB_STRING",
+
+ /*
+ * Instructions for recursive traversal through composed types.
+ */
+ [ BYTECODE_OP_GET_CONTEXT_ROOT ] = "GET_CONTEXT_ROOT",
+ [ BYTECODE_OP_GET_APP_CONTEXT_ROOT ] = "GET_APP_CONTEXT_ROOT",
+ [ BYTECODE_OP_GET_PAYLOAD_ROOT ] = "GET_PAYLOAD_ROOT",
+
+ [ BYTECODE_OP_GET_SYMBOL ] = "GET_SYMBOL",
+ [ BYTECODE_OP_GET_SYMBOL_FIELD ] = "GET_SYMBOL_FIELD",
+ [ BYTECODE_OP_GET_INDEX_U16 ] = "GET_INDEX_U16",
+ [ BYTECODE_OP_GET_INDEX_U64 ] = "GET_INDEX_U64",
+
+ [ BYTECODE_OP_LOAD_FIELD ] = "LOAD_FIELD",
+ [ BYTECODE_OP_LOAD_FIELD_S8 ] = "LOAD_FIELD_S8",
+ [ BYTECODE_OP_LOAD_FIELD_S16 ] = "LOAD_FIELD_S16",
+ [ BYTECODE_OP_LOAD_FIELD_S32 ] = "LOAD_FIELD_S32",
+ [ BYTECODE_OP_LOAD_FIELD_S64 ] = "LOAD_FIELD_S64",
+ [ BYTECODE_OP_LOAD_FIELD_U8 ] = "LOAD_FIELD_U8",
+ [ BYTECODE_OP_LOAD_FIELD_U16 ] = "LOAD_FIELD_U16",
+ [ BYTECODE_OP_LOAD_FIELD_U32 ] = "LOAD_FIELD_U32",
+ [ BYTECODE_OP_LOAD_FIELD_U64 ] = "LOAD_FIELD_U64",
+ [ BYTECODE_OP_LOAD_FIELD_STRING ] = "LOAD_FIELD_STRING",
+ [ BYTECODE_OP_LOAD_FIELD_SEQUENCE ] = "LOAD_FIELD_SEQUENCE",
+ [ BYTECODE_OP_LOAD_FIELD_DOUBLE ] = "LOAD_FIELD_DOUBLE",
+
+ [ BYTECODE_OP_UNARY_BIT_NOT ] = "UNARY_BIT_NOT",
+
+ [ BYTECODE_OP_RETURN_S64 ] = "RETURN_S64",
+};
+
+const char *lttng_bytecode_print_op(enum bytecode_op op)
+{
+ if (op >= NR_BYTECODE_OPS)
+ return "UNKNOWN";
+ else
+ return opnames[op];
+}
+
+static
+int apply_field_reloc(const struct lttng_event_desc *event_desc,
+ struct bytecode_runtime *runtime,
+ uint32_t runtime_len,
+ uint32_t reloc_offset,
+ const char *field_name,
+ enum bytecode_op bytecode_op)
+{
+ const struct lttng_event_field *fields, *field = NULL;
+ unsigned int nr_fields, i;
+ struct load_op *op;
+ uint32_t field_offset = 0;
+
+ dbg_printk("Apply field reloc: %u %s\n", reloc_offset, field_name);
+
+ /* Lookup event by name */
+ if (!event_desc)
+ return -EINVAL;
+ fields = event_desc->fields;
+ if (!fields)
+ return -EINVAL;
+ nr_fields = event_desc->nr_fields;
+ for (i = 0; i < nr_fields; i++) {
+ if (fields[i].nofilter)
+ continue;
+ if (!strcmp(fields[i].name, field_name)) {
+ field = &fields[i];
+ break;
+ }
+ /* compute field offset */
+ switch (fields[i].type.atype) {
+ case atype_integer:
+ case atype_enum_nestable:
+ field_offset += sizeof(int64_t);
+ break;
+ case atype_array_nestable:
+ if (!lttng_is_bytewise_integer(fields[i].type.u.array_nestable.elem_type))
+ return -EINVAL;
+ field_offset += sizeof(unsigned long);
+ field_offset += sizeof(void *);
+ break;
+ case atype_sequence_nestable:
+ if (!lttng_is_bytewise_integer(fields[i].type.u.sequence_nestable.elem_type))
+ return -EINVAL;
+ field_offset += sizeof(unsigned long);
+ field_offset += sizeof(void *);
+ break;
+ case atype_string:
+ field_offset += sizeof(void *);
+ break;
+ case atype_struct_nestable: /* Unsupported. */
+ case atype_variant_nestable: /* Unsupported. */
+ default:
+ return -EINVAL;
+ }
+ }
+ if (!field)
+ return -EINVAL;
+
+ /* Check if field offset is too large for 16-bit offset */
+ if (field_offset > LTTNG_KERNEL_FILTER_BYTECODE_MAX_LEN - 1)
+ return -EINVAL;
+
+ /* set type */
+ op = (struct load_op *) &runtime->code[reloc_offset];
+
+ switch (bytecode_op) {
+ case BYTECODE_OP_LOAD_FIELD_REF:
+ {
+ struct field_ref *field_ref;
+
+ field_ref = (struct field_ref *) op->data;
+ switch (field->type.atype) {
+ case atype_integer:
+ case atype_enum_nestable:
+ op->op = BYTECODE_OP_LOAD_FIELD_REF_S64;
+ break;
+ case atype_array_nestable:
+ case atype_sequence_nestable:
+ if (field->user)
+ op->op = BYTECODE_OP_LOAD_FIELD_REF_USER_SEQUENCE;
+ else
+ op->op = BYTECODE_OP_LOAD_FIELD_REF_SEQUENCE;
+ break;
+ case atype_string:
+ if (field->user)
+ op->op = BYTECODE_OP_LOAD_FIELD_REF_USER_STRING;
+ else
+ op->op = BYTECODE_OP_LOAD_FIELD_REF_STRING;
+ break;
+ case atype_struct_nestable: /* Unsupported. */
+ case atype_variant_nestable: /* Unsupported. */
+ default:
+ return -EINVAL;
+ }
+ /* set offset */
+ field_ref->offset = (uint16_t) field_offset;
+ break;
+ }
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static
+int apply_context_reloc(struct bytecode_runtime *runtime,
+ uint32_t runtime_len,
+ uint32_t reloc_offset,
+ const char *context_name,
+ enum bytecode_op bytecode_op)
+{
+ struct load_op *op;
+ struct lttng_ctx_field *ctx_field;
+ int idx;
+
+ dbg_printk("Apply context reloc: %u %s\n", reloc_offset, context_name);
+
+ /* Get context index */
+ idx = lttng_get_context_index(lttng_static_ctx, context_name);
+ if (idx < 0)
+ return -ENOENT;
+
+ /* Check if idx is too large for 16-bit offset */
+ if (idx > LTTNG_KERNEL_FILTER_BYTECODE_MAX_LEN - 1)
+ return -EINVAL;
+
+ /* Get context return type */
+ ctx_field = <tng_static_ctx->fields[idx];
+ op = (struct load_op *) &runtime->code[reloc_offset];
+
+ switch (bytecode_op) {
+ case BYTECODE_OP_GET_CONTEXT_REF:
+ {
+ struct field_ref *field_ref;
+
+ field_ref = (struct field_ref *) op->data;
+ switch (ctx_field->event_field.type.atype) {
+ case atype_integer:
+ case atype_enum_nestable:
+ op->op = BYTECODE_OP_GET_CONTEXT_REF_S64;
+ break;
+ /* Sequence and array supported as string */
+ case atype_string:
+ BUG_ON(ctx_field->event_field.user);
+ op->op = BYTECODE_OP_GET_CONTEXT_REF_STRING;
+ break;
+ case atype_array_nestable:
+ if (!lttng_is_bytewise_integer(ctx_field->event_field.type.u.array_nestable.elem_type))
+ return -EINVAL;
+ BUG_ON(ctx_field->event_field.user);
+ op->op = BYTECODE_OP_GET_CONTEXT_REF_STRING;
+ break;
+ case atype_sequence_nestable:
+ if (!lttng_is_bytewise_integer(ctx_field->event_field.type.u.sequence_nestable.elem_type))
+ return -EINVAL;
+ BUG_ON(ctx_field->event_field.user);
+ op->op = BYTECODE_OP_GET_CONTEXT_REF_STRING;
+ break;
+ case atype_struct_nestable: /* Unsupported. */
+ case atype_variant_nestable: /* Unsupported. */
+ default:
+ return -EINVAL;
+ }
+ /* set offset to context index within channel contexts */
+ field_ref->offset = (uint16_t) idx;
+ break;
+ }
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static
+int apply_reloc(const struct lttng_event_desc *event_desc,
+ struct bytecode_runtime *runtime,
+ uint32_t runtime_len,
+ uint32_t reloc_offset,
+ const char *name)
+{
+ struct load_op *op;
+
+ dbg_printk("Apply reloc: %u %s\n", reloc_offset, name);
+
+ /* Ensure that the reloc is within the code */
+ if (runtime_len - reloc_offset < sizeof(uint16_t))
+ return -EINVAL;
+
+ op = (struct load_op *) &runtime->code[reloc_offset];
+ switch (op->op) {
+ case BYTECODE_OP_LOAD_FIELD_REF:
+ return apply_field_reloc(event_desc, runtime, runtime_len,
+ reloc_offset, name, op->op);
+ case BYTECODE_OP_GET_CONTEXT_REF:
+ return apply_context_reloc(runtime, runtime_len,
+ reloc_offset, name, op->op);
+ case BYTECODE_OP_GET_SYMBOL:
+ case BYTECODE_OP_GET_SYMBOL_FIELD:
+ /*
+ * Will be handled by load specialize phase or
+ * dynamically by interpreter.
+ */
+ return 0;
+ default:
+ printk(KERN_WARNING "LTTng: filter: Unknown reloc op type %u\n", op->op);
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static
+int bytecode_is_linked(struct lttng_bytecode_node *bytecode,
+ struct list_head *bytecode_runtime_head)
+{
+ struct lttng_bytecode_runtime *bc_runtime;
+
+ list_for_each_entry(bc_runtime, bytecode_runtime_head, node) {
+ if (bc_runtime->bc == bytecode)
+ return 1;
+ }
+ return 0;
+}
+
+/*
+ * Take a bytecode with reloc table and link it to an event to create a
+ * bytecode runtime.
+ */
+static
+int _lttng_filter_link_bytecode(const struct lttng_event_desc *event_desc,
+ struct lttng_ctx *ctx,
+ struct lttng_bytecode_node *bytecode,
+ struct list_head *insert_loc)
+{
+ int ret, offset, next_offset;
+ struct bytecode_runtime *runtime = NULL;
+ size_t runtime_alloc_len;
+
+ if (!bytecode)
+ return 0;
+ /* Bytecode already linked */
+ if (bytecode_is_linked(bytecode, insert_loc))
+ return 0;
+
+ dbg_printk("Linking...\n");
+
+ /* We don't need the reloc table in the runtime */
+ runtime_alloc_len = sizeof(*runtime) + bytecode->bc.reloc_offset;
+ runtime = kzalloc(runtime_alloc_len, GFP_KERNEL);
+ if (!runtime) {
+ ret = -ENOMEM;
+ goto alloc_error;
+ }
+ runtime->p.bc = bytecode;
+ runtime->p.ctx = ctx;
+ runtime->len = bytecode->bc.reloc_offset;
+ /* copy original bytecode */
+ memcpy(runtime->code, bytecode->bc.data, runtime->len);
+ /*
+ * apply relocs. Those are a uint16_t (offset in bytecode)
+ * followed by a string (field name).
+ */
+ for (offset = bytecode->bc.reloc_offset;
+ offset < bytecode->bc.len;
+ offset = next_offset) {
+ uint16_t reloc_offset =
+ *(uint16_t *) &bytecode->bc.data[offset];
+ const char *name =
+ (const char *) &bytecode->bc.data[offset + sizeof(uint16_t)];
+
+ ret = apply_reloc(event_desc, runtime, runtime->len, reloc_offset, name);
+ if (ret) {
+ goto link_error;
+ }
+ next_offset = offset + sizeof(uint16_t) + strlen(name) + 1;
+ }
+ /* Validate bytecode */
+ ret = lttng_bytecode_validate(runtime);
+ if (ret) {
+ goto link_error;
+ }
+ /* Specialize bytecode */
+ ret = lttng_bytecode_specialize(event_desc, runtime);
+ if (ret) {
+ goto link_error;
+ }
+ runtime->p.filter = lttng_bytecode_filter_interpret;
+ runtime->p.link_failed = 0;
+ list_add_rcu(&runtime->p.node, insert_loc);
+ dbg_printk("Linking successful.\n");
+ return 0;
+
+link_error:
+ runtime->p.filter = lttng_bytecode_filter_interpret_false;
+ runtime->p.link_failed = 1;
+ list_add_rcu(&runtime->p.node, insert_loc);
+alloc_error:
+ dbg_printk("Linking failed.\n");
+ return ret;
+}
+
+void lttng_filter_sync_state(struct lttng_bytecode_runtime *runtime)
+{
+ struct lttng_bytecode_node *bc = runtime->bc;
+
+ if (!bc->enabler->enabled || runtime->link_failed)
+ runtime->filter = lttng_bytecode_filter_interpret_false;
+ else
+ runtime->filter = lttng_bytecode_filter_interpret;
+}
+
+/*
+ * Link bytecode for all enablers referenced by an event.
+ */
+void lttng_enabler_link_bytecode(const struct lttng_event_desc *event_desc,
+ struct lttng_ctx *ctx,
+ struct list_head *bytecode_runtime_head,
+ struct lttng_enabler *enabler)
+{
+ struct lttng_bytecode_node *bc;
+ struct lttng_bytecode_runtime *runtime;
+
+ /* Can only be called for events with desc attached */
+ WARN_ON_ONCE(!event_desc);
+
+ /* Link each bytecode. */
+ list_for_each_entry(bc, &enabler->filter_bytecode_head, node) {
+ int found = 0, ret;
+ struct list_head *insert_loc;
+
+ list_for_each_entry(runtime,
+ bytecode_runtime_head, node) {
+ if (runtime->bc == bc) {
+ found = 1;
+ break;
+ }
+ }
+ /* Skip bytecode already linked */
+ if (found)
+ continue;
+
+ /*
+ * Insert at specified priority (seqnum) in increasing
+ * order. If there already is a bytecode of the same priority,
+ * insert the new bytecode right after it.
+ */
+ list_for_each_entry_reverse(runtime,
+ bytecode_runtime_head, node) {
+ if (runtime->bc->bc.seqnum <= bc->bc.seqnum) {
+ /* insert here */
+ insert_loc = &runtime->node;
+ goto add_within;
+ }
+ }
+ /* Add to head to list */
+ insert_loc = bytecode_runtime_head;
+ add_within:
+ dbg_printk("linking bytecode\n");
+ ret = _lttng_filter_link_bytecode(event_desc, ctx, bc,
+ insert_loc);
+ if (ret) {
+ dbg_printk("[lttng filter] warning: cannot link event bytecode\n");
+ }
+ }
+}
+
+/*
+ * We own the filter_bytecode if we return success.
+ */
+int lttng_filter_enabler_attach_bytecode(struct lttng_enabler *enabler,
+ struct lttng_bytecode_node *filter_bytecode)
+{
+ list_add(&filter_bytecode->node, &enabler->filter_bytecode_head);
+ return 0;
+}
+
+void lttng_free_enabler_filter_bytecode(struct lttng_enabler *enabler)
+{
+ struct lttng_bytecode_node *filter_bytecode, *tmp;
+
+ list_for_each_entry_safe(filter_bytecode, tmp,
+ &enabler->filter_bytecode_head, node) {
+ kfree(filter_bytecode);
+ }
+}
+
+void lttng_free_event_filter_runtime(struct lttng_event *event)
+{
+ struct bytecode_runtime *runtime, *tmp;
+
+ list_for_each_entry_safe(runtime, tmp,
+ &event->filter_bytecode_runtime_head, p.node) {
+ kfree(runtime->data);
+ kfree(runtime);
+ }
+}
+++ /dev/null
-/* SPDX-License-Identifier: MIT
- *
- * lttng-filter-interpreter.c
- *
- * LTTng modules filter interpreter.
- *
- * Copyright (C) 2010-2016 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
- */
-
-#include <wrapper/uaccess.h>
-#include <wrapper/objtool.h>
-#include <wrapper/types.h>
-#include <linux/swab.h>
-
-#include <lttng/filter.h>
-#include <lttng/string-utils.h>
-
-/*
- * get_char should be called with page fault handler disabled if it is expected
- * to handle user-space read.
- */
-static
-char get_char(struct estack_entry *reg, size_t offset)
-{
- if (unlikely(offset >= reg->u.s.seq_len))
- return '\0';
- if (reg->u.s.user) {
- char c;
-
- /* Handle invalid access as end of string. */
- if (unlikely(!lttng_access_ok(VERIFY_READ,
- reg->u.s.user_str + offset,
- sizeof(c))))
- return '\0';
- /* Handle fault (nonzero return value) as end of string. */
- if (unlikely(__copy_from_user_inatomic(&c,
- reg->u.s.user_str + offset,
- sizeof(c))))
- return '\0';
- return c;
- } else {
- return reg->u.s.str[offset];
- }
-}
-
-/*
- * -1: wildcard found.
- * -2: unknown escape char.
- * 0: normal char.
- */
-static
-int parse_char(struct estack_entry *reg, char *c, size_t *offset)
-{
- switch (*c) {
- case '\\':
- (*offset)++;
- *c = get_char(reg, *offset);
- switch (*c) {
- case '\\':
- case '*':
- return 0;
- default:
- return -2;
- }
- case '*':
- return -1;
- default:
- return 0;
- }
-}
-
-static
-char get_char_at_cb(size_t at, void *data)
-{
- return get_char(data, at);
-}
-
-static
-int stack_star_glob_match(struct estack *stack, int top, const char *cmp_type)
-{
- bool has_user = false;
- int result;
- struct estack_entry *pattern_reg;
- struct estack_entry *candidate_reg;
-
- /* Disable the page fault handler when reading from userspace. */
- if (estack_bx(stack, top)->u.s.user
- || estack_ax(stack, top)->u.s.user) {
- has_user = true;
- pagefault_disable();
- }
-
- /* Find out which side is the pattern vs. the candidate. */
- if (estack_ax(stack, top)->u.s.literal_type == ESTACK_STRING_LITERAL_TYPE_STAR_GLOB) {
- pattern_reg = estack_ax(stack, top);
- candidate_reg = estack_bx(stack, top);
- } else {
- pattern_reg = estack_bx(stack, top);
- candidate_reg = estack_ax(stack, top);
- }
-
- /* Perform the match operation. */
- result = !strutils_star_glob_match_char_cb(get_char_at_cb,
- pattern_reg, get_char_at_cb, candidate_reg);
- if (has_user)
- pagefault_enable();
-
- return result;
-}
-
-static
-int stack_strcmp(struct estack *stack, int top, const char *cmp_type)
-{
- size_t offset_bx = 0, offset_ax = 0;
- int diff, has_user = 0;
-
- if (estack_bx(stack, top)->u.s.user
- || estack_ax(stack, top)->u.s.user) {
- has_user = 1;
- pagefault_disable();
- }
-
- for (;;) {
- int ret;
- int escaped_r0 = 0;
- char char_bx, char_ax;
-
- char_bx = get_char(estack_bx(stack, top), offset_bx);
- char_ax = get_char(estack_ax(stack, top), offset_ax);
-
- if (unlikely(char_bx == '\0')) {
- if (char_ax == '\0') {
- diff = 0;
- break;
- } else {
- if (estack_ax(stack, top)->u.s.literal_type ==
- ESTACK_STRING_LITERAL_TYPE_PLAIN) {
- ret = parse_char(estack_ax(stack, top),
- &char_ax, &offset_ax);
- if (ret == -1) {
- diff = 0;
- break;
- }
- }
- diff = -1;
- break;
- }
- }
- if (unlikely(char_ax == '\0')) {
- if (estack_bx(stack, top)->u.s.literal_type ==
- ESTACK_STRING_LITERAL_TYPE_PLAIN) {
- ret = parse_char(estack_bx(stack, top),
- &char_bx, &offset_bx);
- if (ret == -1) {
- diff = 0;
- break;
- }
- }
- diff = 1;
- break;
- }
- if (estack_bx(stack, top)->u.s.literal_type ==
- ESTACK_STRING_LITERAL_TYPE_PLAIN) {
- ret = parse_char(estack_bx(stack, top),
- &char_bx, &offset_bx);
- if (ret == -1) {
- diff = 0;
- break;
- } else if (ret == -2) {
- escaped_r0 = 1;
- }
- /* else compare both char */
- }
- if (estack_ax(stack, top)->u.s.literal_type ==
- ESTACK_STRING_LITERAL_TYPE_PLAIN) {
- ret = parse_char(estack_ax(stack, top),
- &char_ax, &offset_ax);
- if (ret == -1) {
- diff = 0;
- break;
- } else if (ret == -2) {
- if (!escaped_r0) {
- diff = -1;
- break;
- }
- } else {
- if (escaped_r0) {
- diff = 1;
- break;
- }
- }
- } else {
- if (escaped_r0) {
- diff = 1;
- break;
- }
- }
- diff = char_bx - char_ax;
- if (diff != 0)
- break;
- offset_bx++;
- offset_ax++;
- }
- if (has_user)
- pagefault_enable();
-
- return diff;
-}
-
-uint64_t lttng_filter_interpret_bytecode_false(void *filter_data,
- struct lttng_probe_ctx *lttng_probe_ctx,
- const char *filter_stack_data)
-{
- return LTTNG_FILTER_DISCARD;
-}
-
-#ifdef INTERPRETER_USE_SWITCH
-
-/*
- * Fallback for compilers that do not support taking address of labels.
- */
-
-#define START_OP \
- start_pc = &bytecode->data[0]; \
- for (pc = next_pc = start_pc; pc - start_pc < bytecode->len; \
- pc = next_pc) { \
- dbg_printk("LTTng: Executing op %s (%u)\n", \
- lttng_filter_print_op((unsigned int) *(filter_opcode_t *) pc), \
- (unsigned int) *(filter_opcode_t *) pc); \
- switch (*(filter_opcode_t *) pc) {
-
-#define OP(name) case name
-
-#define PO break
-
-#define END_OP } \
- }
-
-#else
-
-/*
- * Dispatch-table based interpreter.
- */
-
-#define START_OP \
- start_pc = &bytecode->code[0]; \
- pc = next_pc = start_pc; \
- if (unlikely(pc - start_pc >= bytecode->len)) \
- goto end; \
- goto *dispatch[*(filter_opcode_t *) pc];
-
-#define OP(name) \
-LABEL_##name
-
-#define PO \
- pc = next_pc; \
- goto *dispatch[*(filter_opcode_t *) pc];
-
-#define END_OP
-
-#endif
-
-#define IS_INTEGER_REGISTER(reg_type) \
- (reg_type == REG_S64 || reg_type == REG_U64)
-
-static int context_get_index(struct lttng_probe_ctx *lttng_probe_ctx,
- struct load_ptr *ptr,
- uint32_t idx)
-{
-
- struct lttng_ctx_field *ctx_field;
- struct lttng_event_field *field;
- union lttng_ctx_value v;
-
- ctx_field = <tng_static_ctx->fields[idx];
- field = &ctx_field->event_field;
- ptr->type = LOAD_OBJECT;
- /* field is only used for types nested within variants. */
- ptr->field = NULL;
-
- switch (field->type.atype) {
- case atype_integer:
- ctx_field->get_value(ctx_field, lttng_probe_ctx, &v);
- if (field->type.u.integer.signedness) {
- ptr->object_type = OBJECT_TYPE_S64;
- ptr->u.s64 = v.s64;
- ptr->ptr = &ptr->u.s64;
- } else {
- ptr->object_type = OBJECT_TYPE_U64;
- ptr->u.u64 = v.s64; /* Cast. */
- ptr->ptr = &ptr->u.u64;
- }
- break;
- case atype_enum_nestable:
- {
- const struct lttng_integer_type *itype =
- &field->type.u.enum_nestable.container_type->u.integer;
-
- ctx_field->get_value(ctx_field, lttng_probe_ctx, &v);
- if (itype->signedness) {
- ptr->object_type = OBJECT_TYPE_S64;
- ptr->u.s64 = v.s64;
- ptr->ptr = &ptr->u.s64;
- } else {
- ptr->object_type = OBJECT_TYPE_U64;
- ptr->u.u64 = v.s64; /* Cast. */
- ptr->ptr = &ptr->u.u64;
- }
- break;
- }
- case atype_array_nestable:
- if (!lttng_is_bytewise_integer(field->type.u.array_nestable.elem_type)) {
- printk(KERN_WARNING "LTTng: filter: Array nesting only supports integer types.\n");
- return -EINVAL;
- }
- if (field->type.u.array_nestable.elem_type->u.integer.encoding == lttng_encode_none) {
- printk(KERN_WARNING "LTTng: filter: Only string arrays are supported for contexts.\n");
- return -EINVAL;
- }
- ptr->object_type = OBJECT_TYPE_STRING;
- ctx_field->get_value(ctx_field, lttng_probe_ctx, &v);
- ptr->ptr = v.str;
- break;
- case atype_sequence_nestable:
- if (!lttng_is_bytewise_integer(field->type.u.sequence_nestable.elem_type)) {
- printk(KERN_WARNING "LTTng: filter: Sequence nesting only supports integer types.\n");
- return -EINVAL;
- }
- if (field->type.u.sequence_nestable.elem_type->u.integer.encoding == lttng_encode_none) {
- printk(KERN_WARNING "LTTng: filter: Only string sequences are supported for contexts.\n");
- return -EINVAL;
- }
- ptr->object_type = OBJECT_TYPE_STRING;
- ctx_field->get_value(ctx_field, lttng_probe_ctx, &v);
- ptr->ptr = v.str;
- break;
- case atype_string:
- ptr->object_type = OBJECT_TYPE_STRING;
- ctx_field->get_value(ctx_field, lttng_probe_ctx, &v);
- ptr->ptr = v.str;
- break;
- case atype_struct_nestable:
- printk(KERN_WARNING "LTTng: filter: Structure type cannot be loaded.\n");
- return -EINVAL;
- case atype_variant_nestable:
- printk(KERN_WARNING "LTTng: filter: Variant type cannot be loaded.\n");
- return -EINVAL;
- default:
- printk(KERN_WARNING "LTTng: filter: Unknown type: %d", (int) field->type.atype);
- return -EINVAL;
- }
- return 0;
-}
-
-static int dynamic_get_index(struct lttng_probe_ctx *lttng_probe_ctx,
- struct bytecode_runtime *runtime,
- uint64_t index, struct estack_entry *stack_top)
-{
- int ret;
- const struct filter_get_index_data *gid;
-
- gid = (const struct filter_get_index_data *) &runtime->data[index];
- switch (stack_top->u.ptr.type) {
- case LOAD_OBJECT:
- switch (stack_top->u.ptr.object_type) {
- case OBJECT_TYPE_ARRAY:
- {
- const char *ptr;
-
- WARN_ON_ONCE(gid->offset >= gid->array_len);
- /* Skip count (unsigned long) */
- ptr = *(const char **) (stack_top->u.ptr.ptr + sizeof(unsigned long));
- ptr = ptr + gid->offset;
- stack_top->u.ptr.ptr = ptr;
- stack_top->u.ptr.object_type = gid->elem.type;
- stack_top->u.ptr.rev_bo = gid->elem.rev_bo;
- BUG_ON(stack_top->u.ptr.field->type.atype != atype_array_nestable);
- stack_top->u.ptr.field = NULL;
- break;
- }
- case OBJECT_TYPE_SEQUENCE:
- {
- const char *ptr;
- size_t ptr_seq_len;
-
- ptr = *(const char **) (stack_top->u.ptr.ptr + sizeof(unsigned long));
- ptr_seq_len = *(unsigned long *) stack_top->u.ptr.ptr;
- if (gid->offset >= gid->elem.len * ptr_seq_len) {
- ret = -EINVAL;
- goto end;
- }
- ptr = ptr + gid->offset;
- stack_top->u.ptr.ptr = ptr;
- stack_top->u.ptr.object_type = gid->elem.type;
- stack_top->u.ptr.rev_bo = gid->elem.rev_bo;
- BUG_ON(stack_top->u.ptr.field->type.atype != atype_sequence_nestable);
- stack_top->u.ptr.field = NULL;
- break;
- }
- case OBJECT_TYPE_STRUCT:
- printk(KERN_WARNING "LTTng: filter: Nested structures are not supported yet.\n");
- ret = -EINVAL;
- goto end;
- case OBJECT_TYPE_VARIANT:
- default:
- printk(KERN_WARNING "LTTng: filter: Unexpected get index type %d",
- (int) stack_top->u.ptr.object_type);
- ret = -EINVAL;
- goto end;
- }
- break;
- case LOAD_ROOT_CONTEXT:
- case LOAD_ROOT_APP_CONTEXT: /* Fall-through */
- {
- ret = context_get_index(lttng_probe_ctx,
- &stack_top->u.ptr,
- gid->ctx_index);
- if (ret) {
- goto end;
- }
- break;
- }
- case LOAD_ROOT_PAYLOAD:
- stack_top->u.ptr.ptr += gid->offset;
- if (gid->elem.type == OBJECT_TYPE_STRING)
- stack_top->u.ptr.ptr = *(const char * const *) stack_top->u.ptr.ptr;
- stack_top->u.ptr.object_type = gid->elem.type;
- stack_top->u.ptr.type = LOAD_OBJECT;
- stack_top->u.ptr.field = gid->field;
- stack_top->u.ptr.rev_bo = gid->elem.rev_bo;
- break;
- }
-
- stack_top->type = REG_PTR;
-
- return 0;
-
-end:
- return ret;
-}
-
-static int dynamic_load_field(struct estack_entry *stack_top)
-{
- int ret;
-
- switch (stack_top->u.ptr.type) {
- case LOAD_OBJECT:
- break;
- case LOAD_ROOT_CONTEXT:
- case LOAD_ROOT_APP_CONTEXT:
- case LOAD_ROOT_PAYLOAD:
- default:
- dbg_printk("Filter warning: cannot load root, missing field name.\n");
- ret = -EINVAL;
- goto end;
- }
- switch (stack_top->u.ptr.object_type) {
- case OBJECT_TYPE_S8:
- dbg_printk("op load field s8\n");
- stack_top->u.v = *(int8_t *) stack_top->u.ptr.ptr;
- stack_top->type = REG_S64;
- break;
- case OBJECT_TYPE_S16:
- {
- int16_t tmp;
-
- dbg_printk("op load field s16\n");
- tmp = *(int16_t *) stack_top->u.ptr.ptr;
- if (stack_top->u.ptr.rev_bo)
- __swab16s(&tmp);
- stack_top->u.v = tmp;
- stack_top->type = REG_S64;
- break;
- }
- case OBJECT_TYPE_S32:
- {
- int32_t tmp;
-
- dbg_printk("op load field s32\n");
- tmp = *(int32_t *) stack_top->u.ptr.ptr;
- if (stack_top->u.ptr.rev_bo)
- __swab32s(&tmp);
- stack_top->u.v = tmp;
- stack_top->type = REG_S64;
- break;
- }
- case OBJECT_TYPE_S64:
- {
- int64_t tmp;
-
- dbg_printk("op load field s64\n");
- tmp = *(int64_t *) stack_top->u.ptr.ptr;
- if (stack_top->u.ptr.rev_bo)
- __swab64s(&tmp);
- stack_top->u.v = tmp;
- stack_top->type = REG_S64;
- break;
- }
- case OBJECT_TYPE_U8:
- dbg_printk("op load field u8\n");
- stack_top->u.v = *(uint8_t *) stack_top->u.ptr.ptr;
- stack_top->type = REG_U64;
- break;
- case OBJECT_TYPE_U16:
- {
- uint16_t tmp;
-
- dbg_printk("op load field u16\n");
- tmp = *(uint16_t *) stack_top->u.ptr.ptr;
- if (stack_top->u.ptr.rev_bo)
- __swab16s(&tmp);
- stack_top->u.v = tmp;
- stack_top->type = REG_U64;
- break;
- }
- case OBJECT_TYPE_U32:
- {
- uint32_t tmp;
-
- dbg_printk("op load field u32\n");
- tmp = *(uint32_t *) stack_top->u.ptr.ptr;
- if (stack_top->u.ptr.rev_bo)
- __swab32s(&tmp);
- stack_top->u.v = tmp;
- stack_top->type = REG_U64;
- break;
- }
- case OBJECT_TYPE_U64:
- {
- uint64_t tmp;
-
- dbg_printk("op load field u64\n");
- tmp = *(uint64_t *) stack_top->u.ptr.ptr;
- if (stack_top->u.ptr.rev_bo)
- __swab64s(&tmp);
- stack_top->u.v = tmp;
- stack_top->type = REG_U64;
- break;
- }
- case OBJECT_TYPE_STRING:
- {
- const char *str;
-
- dbg_printk("op load field string\n");
- str = (const char *) stack_top->u.ptr.ptr;
- stack_top->u.s.str = str;
- if (unlikely(!stack_top->u.s.str)) {
- dbg_printk("Filter warning: loading a NULL string.\n");
- ret = -EINVAL;
- goto end;
- }
- stack_top->u.s.seq_len = LTTNG_SIZE_MAX;
- stack_top->u.s.literal_type =
- ESTACK_STRING_LITERAL_TYPE_NONE;
- stack_top->type = REG_STRING;
- break;
- }
- case OBJECT_TYPE_STRING_SEQUENCE:
- {
- const char *ptr;
-
- dbg_printk("op load field string sequence\n");
- ptr = stack_top->u.ptr.ptr;
- stack_top->u.s.seq_len = *(unsigned long *) ptr;
- stack_top->u.s.str = *(const char **) (ptr + sizeof(unsigned long));
- if (unlikely(!stack_top->u.s.str)) {
- dbg_printk("Filter warning: loading a NULL sequence.\n");
- ret = -EINVAL;
- goto end;
- }
- stack_top->u.s.literal_type =
- ESTACK_STRING_LITERAL_TYPE_NONE;
- stack_top->type = REG_STRING;
- break;
- }
- case OBJECT_TYPE_DYNAMIC:
- /*
- * Dynamic types in context are looked up
- * by context get index.
- */
- ret = -EINVAL;
- goto end;
- case OBJECT_TYPE_DOUBLE:
- ret = -EINVAL;
- goto end;
- case OBJECT_TYPE_SEQUENCE:
- case OBJECT_TYPE_ARRAY:
- case OBJECT_TYPE_STRUCT:
- case OBJECT_TYPE_VARIANT:
- printk(KERN_WARNING "LTTng: filter: Sequences, arrays, struct and variant cannot be loaded (nested types).\n");
- ret = -EINVAL;
- goto end;
- }
- return 0;
-
-end:
- return ret;
-}
-
-static
-int lttng_bytecode_interpret_format_output(struct estack_entry *ax,
- struct lttng_interpreter_output *output)
-{
- int ret;
-
-again:
- switch (ax->type) {
- case REG_S64:
- output->type = LTTNG_INTERPRETER_TYPE_S64;
- output->u.s = ax->u.v;
- break;
- case REG_U64:
- output->type = LTTNG_INTERPRETER_TYPE_U64;
- output->u.u = (uint64_t) ax->u.v;
- break;
- case REG_STRING:
- output->type = LTTNG_INTERPRETER_TYPE_STRING;
- output->u.str.str = ax->u.s.str;
- output->u.str.len = ax->u.s.seq_len;
- break;
- case REG_PTR:
- switch (ax->u.ptr.object_type) {
- case OBJECT_TYPE_S8:
- case OBJECT_TYPE_S16:
- case OBJECT_TYPE_S32:
- case OBJECT_TYPE_S64:
- case OBJECT_TYPE_U8:
- case OBJECT_TYPE_U16:
- case OBJECT_TYPE_U32:
- case OBJECT_TYPE_U64:
- case OBJECT_TYPE_DOUBLE:
- case OBJECT_TYPE_STRING:
- case OBJECT_TYPE_STRING_SEQUENCE:
- ret = dynamic_load_field(ax);
- if (ret)
- return ret;
- /* Retry after loading ptr into stack top. */
- goto again;
- case OBJECT_TYPE_SEQUENCE:
- output->type = LTTNG_INTERPRETER_TYPE_SEQUENCE;
- output->u.sequence.ptr = *(const char **) (ax->u.ptr.ptr + sizeof(unsigned long));
- output->u.sequence.nr_elem = *(unsigned long *) ax->u.ptr.ptr;
- output->u.sequence.nested_type = ax->u.ptr.field->type.u.sequence_nestable.elem_type;
- break;
- case OBJECT_TYPE_ARRAY:
- /* Skip count (unsigned long) */
- output->type = LTTNG_INTERPRETER_TYPE_SEQUENCE;
- output->u.sequence.ptr = *(const char **) (ax->u.ptr.ptr + sizeof(unsigned long));
- output->u.sequence.nr_elem = ax->u.ptr.field->type.u.array_nestable.length;
- output->u.sequence.nested_type = ax->u.ptr.field->type.u.array_nestable.elem_type;
- break;
- case OBJECT_TYPE_STRUCT:
- case OBJECT_TYPE_VARIANT:
- default:
- return -EINVAL;
- }
-
- break;
- case REG_STAR_GLOB_STRING:
- case REG_TYPE_UNKNOWN:
- default:
- return -EINVAL;
- }
-
- return LTTNG_FILTER_RECORD_FLAG;
-}
-
-/*
- * Return 0 (discard), or raise the 0x1 flag (log event).
- * Currently, other flags are kept for future extensions and have no
- * effect.
- */
-static
-uint64_t bytecode_interpret(void *interpreter_data,
- struct lttng_probe_ctx *lttng_probe_ctx,
- const char *interpreter_stack_data,
- struct lttng_interpreter_output *output)
-{
- struct bytecode_runtime *bytecode = interpreter_data;
- void *pc, *next_pc, *start_pc;
- int ret = -EINVAL;
- uint64_t retval = 0;
- struct estack _stack;
- struct estack *stack = &_stack;
- register int64_t ax = 0, bx = 0;
- register enum entry_type ax_t = REG_TYPE_UNKNOWN, bx_t = REG_TYPE_UNKNOWN;
- register int top = FILTER_STACK_EMPTY;
-#ifndef INTERPRETER_USE_SWITCH
- static void *dispatch[NR_FILTER_OPS] = {
- [ FILTER_OP_UNKNOWN ] = &&LABEL_FILTER_OP_UNKNOWN,
-
- [ FILTER_OP_RETURN ] = &&LABEL_FILTER_OP_RETURN,
-
- /* binary */
- [ FILTER_OP_MUL ] = &&LABEL_FILTER_OP_MUL,
- [ FILTER_OP_DIV ] = &&LABEL_FILTER_OP_DIV,
- [ FILTER_OP_MOD ] = &&LABEL_FILTER_OP_MOD,
- [ FILTER_OP_PLUS ] = &&LABEL_FILTER_OP_PLUS,
- [ FILTER_OP_MINUS ] = &&LABEL_FILTER_OP_MINUS,
- [ FILTER_OP_BIT_RSHIFT ] = &&LABEL_FILTER_OP_BIT_RSHIFT,
- [ FILTER_OP_BIT_LSHIFT ] = &&LABEL_FILTER_OP_BIT_LSHIFT,
- [ FILTER_OP_BIT_AND ] = &&LABEL_FILTER_OP_BIT_AND,
- [ FILTER_OP_BIT_OR ] = &&LABEL_FILTER_OP_BIT_OR,
- [ FILTER_OP_BIT_XOR ] = &&LABEL_FILTER_OP_BIT_XOR,
-
- /* binary comparators */
- [ FILTER_OP_EQ ] = &&LABEL_FILTER_OP_EQ,
- [ FILTER_OP_NE ] = &&LABEL_FILTER_OP_NE,
- [ FILTER_OP_GT ] = &&LABEL_FILTER_OP_GT,
- [ FILTER_OP_LT ] = &&LABEL_FILTER_OP_LT,
- [ FILTER_OP_GE ] = &&LABEL_FILTER_OP_GE,
- [ FILTER_OP_LE ] = &&LABEL_FILTER_OP_LE,
-
- /* string binary comparator */
- [ FILTER_OP_EQ_STRING ] = &&LABEL_FILTER_OP_EQ_STRING,
- [ FILTER_OP_NE_STRING ] = &&LABEL_FILTER_OP_NE_STRING,
- [ FILTER_OP_GT_STRING ] = &&LABEL_FILTER_OP_GT_STRING,
- [ FILTER_OP_LT_STRING ] = &&LABEL_FILTER_OP_LT_STRING,
- [ FILTER_OP_GE_STRING ] = &&LABEL_FILTER_OP_GE_STRING,
- [ FILTER_OP_LE_STRING ] = &&LABEL_FILTER_OP_LE_STRING,
-
- /* globbing pattern binary comparator */
- [ FILTER_OP_EQ_STAR_GLOB_STRING ] = &&LABEL_FILTER_OP_EQ_STAR_GLOB_STRING,
- [ FILTER_OP_NE_STAR_GLOB_STRING ] = &&LABEL_FILTER_OP_NE_STAR_GLOB_STRING,
-
- /* s64 binary comparator */
- [ FILTER_OP_EQ_S64 ] = &&LABEL_FILTER_OP_EQ_S64,
- [ FILTER_OP_NE_S64 ] = &&LABEL_FILTER_OP_NE_S64,
- [ FILTER_OP_GT_S64 ] = &&LABEL_FILTER_OP_GT_S64,
- [ FILTER_OP_LT_S64 ] = &&LABEL_FILTER_OP_LT_S64,
- [ FILTER_OP_GE_S64 ] = &&LABEL_FILTER_OP_GE_S64,
- [ FILTER_OP_LE_S64 ] = &&LABEL_FILTER_OP_LE_S64,
-
- /* double binary comparator */
- [ FILTER_OP_EQ_DOUBLE ] = &&LABEL_FILTER_OP_EQ_DOUBLE,
- [ FILTER_OP_NE_DOUBLE ] = &&LABEL_FILTER_OP_NE_DOUBLE,
- [ FILTER_OP_GT_DOUBLE ] = &&LABEL_FILTER_OP_GT_DOUBLE,
- [ FILTER_OP_LT_DOUBLE ] = &&LABEL_FILTER_OP_LT_DOUBLE,
- [ FILTER_OP_GE_DOUBLE ] = &&LABEL_FILTER_OP_GE_DOUBLE,
- [ FILTER_OP_LE_DOUBLE ] = &&LABEL_FILTER_OP_LE_DOUBLE,
-
- /* Mixed S64-double binary comparators */
- [ FILTER_OP_EQ_DOUBLE_S64 ] = &&LABEL_FILTER_OP_EQ_DOUBLE_S64,
- [ FILTER_OP_NE_DOUBLE_S64 ] = &&LABEL_FILTER_OP_NE_DOUBLE_S64,
- [ FILTER_OP_GT_DOUBLE_S64 ] = &&LABEL_FILTER_OP_GT_DOUBLE_S64,
- [ FILTER_OP_LT_DOUBLE_S64 ] = &&LABEL_FILTER_OP_LT_DOUBLE_S64,
- [ FILTER_OP_GE_DOUBLE_S64 ] = &&LABEL_FILTER_OP_GE_DOUBLE_S64,
- [ FILTER_OP_LE_DOUBLE_S64 ] = &&LABEL_FILTER_OP_LE_DOUBLE_S64,
-
- [ FILTER_OP_EQ_S64_DOUBLE ] = &&LABEL_FILTER_OP_EQ_S64_DOUBLE,
- [ FILTER_OP_NE_S64_DOUBLE ] = &&LABEL_FILTER_OP_NE_S64_DOUBLE,
- [ FILTER_OP_GT_S64_DOUBLE ] = &&LABEL_FILTER_OP_GT_S64_DOUBLE,
- [ FILTER_OP_LT_S64_DOUBLE ] = &&LABEL_FILTER_OP_LT_S64_DOUBLE,
- [ FILTER_OP_GE_S64_DOUBLE ] = &&LABEL_FILTER_OP_GE_S64_DOUBLE,
- [ FILTER_OP_LE_S64_DOUBLE ] = &&LABEL_FILTER_OP_LE_S64_DOUBLE,
-
- /* unary */
- [ FILTER_OP_UNARY_PLUS ] = &&LABEL_FILTER_OP_UNARY_PLUS,
- [ FILTER_OP_UNARY_MINUS ] = &&LABEL_FILTER_OP_UNARY_MINUS,
- [ FILTER_OP_UNARY_NOT ] = &&LABEL_FILTER_OP_UNARY_NOT,
- [ FILTER_OP_UNARY_PLUS_S64 ] = &&LABEL_FILTER_OP_UNARY_PLUS_S64,
- [ FILTER_OP_UNARY_MINUS_S64 ] = &&LABEL_FILTER_OP_UNARY_MINUS_S64,
- [ FILTER_OP_UNARY_NOT_S64 ] = &&LABEL_FILTER_OP_UNARY_NOT_S64,
- [ FILTER_OP_UNARY_PLUS_DOUBLE ] = &&LABEL_FILTER_OP_UNARY_PLUS_DOUBLE,
- [ FILTER_OP_UNARY_MINUS_DOUBLE ] = &&LABEL_FILTER_OP_UNARY_MINUS_DOUBLE,
- [ FILTER_OP_UNARY_NOT_DOUBLE ] = &&LABEL_FILTER_OP_UNARY_NOT_DOUBLE,
-
- /* logical */
- [ FILTER_OP_AND ] = &&LABEL_FILTER_OP_AND,
- [ FILTER_OP_OR ] = &&LABEL_FILTER_OP_OR,
-
- /* load field ref */
- [ FILTER_OP_LOAD_FIELD_REF ] = &&LABEL_FILTER_OP_LOAD_FIELD_REF,
- [ FILTER_OP_LOAD_FIELD_REF_STRING ] = &&LABEL_FILTER_OP_LOAD_FIELD_REF_STRING,
- [ FILTER_OP_LOAD_FIELD_REF_SEQUENCE ] = &&LABEL_FILTER_OP_LOAD_FIELD_REF_SEQUENCE,
- [ FILTER_OP_LOAD_FIELD_REF_S64 ] = &&LABEL_FILTER_OP_LOAD_FIELD_REF_S64,
- [ FILTER_OP_LOAD_FIELD_REF_DOUBLE ] = &&LABEL_FILTER_OP_LOAD_FIELD_REF_DOUBLE,
-
- /* load from immediate operand */
- [ FILTER_OP_LOAD_STRING ] = &&LABEL_FILTER_OP_LOAD_STRING,
- [ FILTER_OP_LOAD_STAR_GLOB_STRING ] = &&LABEL_FILTER_OP_LOAD_STAR_GLOB_STRING,
- [ FILTER_OP_LOAD_S64 ] = &&LABEL_FILTER_OP_LOAD_S64,
- [ FILTER_OP_LOAD_DOUBLE ] = &&LABEL_FILTER_OP_LOAD_DOUBLE,
-
- /* cast */
- [ FILTER_OP_CAST_TO_S64 ] = &&LABEL_FILTER_OP_CAST_TO_S64,
- [ FILTER_OP_CAST_DOUBLE_TO_S64 ] = &&LABEL_FILTER_OP_CAST_DOUBLE_TO_S64,
- [ FILTER_OP_CAST_NOP ] = &&LABEL_FILTER_OP_CAST_NOP,
-
- /* get context ref */
- [ FILTER_OP_GET_CONTEXT_REF ] = &&LABEL_FILTER_OP_GET_CONTEXT_REF,
- [ FILTER_OP_GET_CONTEXT_REF_STRING ] = &&LABEL_FILTER_OP_GET_CONTEXT_REF_STRING,
- [ FILTER_OP_GET_CONTEXT_REF_S64 ] = &&LABEL_FILTER_OP_GET_CONTEXT_REF_S64,
- [ FILTER_OP_GET_CONTEXT_REF_DOUBLE ] = &&LABEL_FILTER_OP_GET_CONTEXT_REF_DOUBLE,
-
- /* load userspace field ref */
- [ FILTER_OP_LOAD_FIELD_REF_USER_STRING ] = &&LABEL_FILTER_OP_LOAD_FIELD_REF_USER_STRING,
- [ FILTER_OP_LOAD_FIELD_REF_USER_SEQUENCE ] = &&LABEL_FILTER_OP_LOAD_FIELD_REF_USER_SEQUENCE,
-
- /* Instructions for recursive traversal through composed types. */
- [ FILTER_OP_GET_CONTEXT_ROOT ] = &&LABEL_FILTER_OP_GET_CONTEXT_ROOT,
- [ FILTER_OP_GET_APP_CONTEXT_ROOT ] = &&LABEL_FILTER_OP_GET_APP_CONTEXT_ROOT,
- [ FILTER_OP_GET_PAYLOAD_ROOT ] = &&LABEL_FILTER_OP_GET_PAYLOAD_ROOT,
-
- [ FILTER_OP_GET_SYMBOL ] = &&LABEL_FILTER_OP_GET_SYMBOL,
- [ FILTER_OP_GET_SYMBOL_FIELD ] = &&LABEL_FILTER_OP_GET_SYMBOL_FIELD,
- [ FILTER_OP_GET_INDEX_U16 ] = &&LABEL_FILTER_OP_GET_INDEX_U16,
- [ FILTER_OP_GET_INDEX_U64 ] = &&LABEL_FILTER_OP_GET_INDEX_U64,
-
- [ FILTER_OP_LOAD_FIELD ] = &&LABEL_FILTER_OP_LOAD_FIELD,
- [ FILTER_OP_LOAD_FIELD_S8 ] = &&LABEL_FILTER_OP_LOAD_FIELD_S8,
- [ FILTER_OP_LOAD_FIELD_S16 ] = &&LABEL_FILTER_OP_LOAD_FIELD_S16,
- [ FILTER_OP_LOAD_FIELD_S32 ] = &&LABEL_FILTER_OP_LOAD_FIELD_S32,
- [ FILTER_OP_LOAD_FIELD_S64 ] = &&LABEL_FILTER_OP_LOAD_FIELD_S64,
- [ FILTER_OP_LOAD_FIELD_U8 ] = &&LABEL_FILTER_OP_LOAD_FIELD_U8,
- [ FILTER_OP_LOAD_FIELD_U16 ] = &&LABEL_FILTER_OP_LOAD_FIELD_U16,
- [ FILTER_OP_LOAD_FIELD_U32 ] = &&LABEL_FILTER_OP_LOAD_FIELD_U32,
- [ FILTER_OP_LOAD_FIELD_U64 ] = &&LABEL_FILTER_OP_LOAD_FIELD_U64,
- [ FILTER_OP_LOAD_FIELD_STRING ] = &&LABEL_FILTER_OP_LOAD_FIELD_STRING,
- [ FILTER_OP_LOAD_FIELD_SEQUENCE ] = &&LABEL_FILTER_OP_LOAD_FIELD_SEQUENCE,
- [ FILTER_OP_LOAD_FIELD_DOUBLE ] = &&LABEL_FILTER_OP_LOAD_FIELD_DOUBLE,
-
- [ FILTER_OP_UNARY_BIT_NOT ] = &&LABEL_FILTER_OP_UNARY_BIT_NOT,
-
- [ FILTER_OP_RETURN_S64 ] = &&LABEL_FILTER_OP_RETURN_S64,
- };
-#endif /* #ifndef INTERPRETER_USE_SWITCH */
-
- START_OP
-
- OP(FILTER_OP_UNKNOWN):
- OP(FILTER_OP_LOAD_FIELD_REF):
- OP(FILTER_OP_GET_CONTEXT_REF):
-#ifdef INTERPRETER_USE_SWITCH
- default:
-#endif /* INTERPRETER_USE_SWITCH */
- printk(KERN_WARNING "LTTng: filter: unknown bytecode op %u\n",
- (unsigned int) *(filter_opcode_t *) pc);
- ret = -EINVAL;
- goto end;
-
- OP(FILTER_OP_RETURN):
- OP(FILTER_OP_RETURN_S64):
- /* LTTNG_FILTER_DISCARD or LTTNG_FILTER_RECORD_FLAG */
- switch (estack_ax_t) {
- case REG_S64:
- case REG_U64:
- retval = !!estack_ax_v;
- break;
- case REG_DOUBLE:
- case REG_STRING:
- case REG_PTR:
- if (!output) {
- ret = -EINVAL;
- goto end;
- }
- retval = 0;
- break;
- case REG_STAR_GLOB_STRING:
- case REG_TYPE_UNKNOWN:
- ret = -EINVAL;
- goto end;
- }
- ret = 0;
- goto end;
-
- /* binary */
- OP(FILTER_OP_MUL):
- OP(FILTER_OP_DIV):
- OP(FILTER_OP_MOD):
- OP(FILTER_OP_PLUS):
- OP(FILTER_OP_MINUS):
- printk(KERN_WARNING "LTTng: filter: unsupported bytecode op %u\n",
- (unsigned int) *(filter_opcode_t *) pc);
- ret = -EINVAL;
- goto end;
-
- OP(FILTER_OP_EQ):
- OP(FILTER_OP_NE):
- OP(FILTER_OP_GT):
- OP(FILTER_OP_LT):
- OP(FILTER_OP_GE):
- OP(FILTER_OP_LE):
- printk(KERN_WARNING "LTTng: filter: unsupported non-specialized bytecode op %u\n",
- (unsigned int) *(filter_opcode_t *) pc);
- ret = -EINVAL;
- goto end;
-
- OP(FILTER_OP_EQ_STRING):
- {
- int res;
-
- res = (stack_strcmp(stack, top, "==") == 0);
- estack_pop(stack, top, ax, bx, ax_t, bx_t);
- estack_ax_v = res;
- estack_ax_t = REG_S64;
- next_pc += sizeof(struct binary_op);
- PO;
- }
- OP(FILTER_OP_NE_STRING):
- {
- int res;
-
- res = (stack_strcmp(stack, top, "!=") != 0);
- estack_pop(stack, top, ax, bx, ax_t, bx_t);
- estack_ax_v = res;
- estack_ax_t = REG_S64;
- next_pc += sizeof(struct binary_op);
- PO;
- }
- OP(FILTER_OP_GT_STRING):
- {
- int res;
-
- res = (stack_strcmp(stack, top, ">") > 0);
- estack_pop(stack, top, ax, bx, ax_t, bx_t);
- estack_ax_v = res;
- estack_ax_t = REG_S64;
- next_pc += sizeof(struct binary_op);
- PO;
- }
- OP(FILTER_OP_LT_STRING):
- {
- int res;
-
- res = (stack_strcmp(stack, top, "<") < 0);
- estack_pop(stack, top, ax, bx, ax_t, bx_t);
- estack_ax_v = res;
- estack_ax_t = REG_S64;
- next_pc += sizeof(struct binary_op);
- PO;
- }
- OP(FILTER_OP_GE_STRING):
- {
- int res;
-
- res = (stack_strcmp(stack, top, ">=") >= 0);
- estack_pop(stack, top, ax, bx, ax_t, bx_t);
- estack_ax_v = res;
- estack_ax_t = REG_S64;
- next_pc += sizeof(struct binary_op);
- PO;
- }
- OP(FILTER_OP_LE_STRING):
- {
- int res;
-
- res = (stack_strcmp(stack, top, "<=") <= 0);
- estack_pop(stack, top, ax, bx, ax_t, bx_t);
- estack_ax_v = res;
- estack_ax_t = REG_S64;
- next_pc += sizeof(struct binary_op);
- PO;
- }
-
- OP(FILTER_OP_EQ_STAR_GLOB_STRING):
- {
- int res;
-
- res = (stack_star_glob_match(stack, top, "==") == 0);
- estack_pop(stack, top, ax, bx, ax_t, bx_t);
- estack_ax_v = res;
- estack_ax_t = REG_S64;
- next_pc += sizeof(struct binary_op);
- PO;
- }
- OP(FILTER_OP_NE_STAR_GLOB_STRING):
- {
- int res;
-
- res = (stack_star_glob_match(stack, top, "!=") != 0);
- estack_pop(stack, top, ax, bx, ax_t, bx_t);
- estack_ax_v = res;
- estack_ax_t = REG_S64;
- next_pc += sizeof(struct binary_op);
- PO;
- }
-
- OP(FILTER_OP_EQ_S64):
- {
- int res;
-
- res = (estack_bx_v == estack_ax_v);
- estack_pop(stack, top, ax, bx, ax_t, bx_t);
- estack_ax_v = res;
- estack_ax_t = REG_S64;
- next_pc += sizeof(struct binary_op);
- PO;
- }
- OP(FILTER_OP_NE_S64):
- {
- int res;
-
- res = (estack_bx_v != estack_ax_v);
- estack_pop(stack, top, ax, bx, ax_t, bx_t);
- estack_ax_v = res;
- estack_ax_t = REG_S64;
- next_pc += sizeof(struct binary_op);
- PO;
- }
- OP(FILTER_OP_GT_S64):
- {
- int res;
-
- res = (estack_bx_v > estack_ax_v);
- estack_pop(stack, top, ax, bx, ax_t, bx_t);
- estack_ax_v = res;
- estack_ax_t = REG_S64;
- next_pc += sizeof(struct binary_op);
- PO;
- }
- OP(FILTER_OP_LT_S64):
- {
- int res;
-
- res = (estack_bx_v < estack_ax_v);
- estack_pop(stack, top, ax, bx, ax_t, bx_t);
- estack_ax_v = res;
- estack_ax_t = REG_S64;
- next_pc += sizeof(struct binary_op);
- PO;
- }
- OP(FILTER_OP_GE_S64):
- {
- int res;
-
- res = (estack_bx_v >= estack_ax_v);
- estack_pop(stack, top, ax, bx, ax_t, bx_t);
- estack_ax_v = res;
- estack_ax_t = REG_S64;
- next_pc += sizeof(struct binary_op);
- PO;
- }
- OP(FILTER_OP_LE_S64):
- {
- int res;
-
- res = (estack_bx_v <= estack_ax_v);
- estack_pop(stack, top, ax, bx, ax_t, bx_t);
- estack_ax_v = res;
- estack_ax_t = REG_S64;
- next_pc += sizeof(struct binary_op);
- PO;
- }
-
- OP(FILTER_OP_EQ_DOUBLE):
- OP(FILTER_OP_NE_DOUBLE):
- OP(FILTER_OP_GT_DOUBLE):
- OP(FILTER_OP_LT_DOUBLE):
- OP(FILTER_OP_GE_DOUBLE):
- OP(FILTER_OP_LE_DOUBLE):
- {
- BUG_ON(1);
- PO;
- }
-
- /* Mixed S64-double binary comparators */
- OP(FILTER_OP_EQ_DOUBLE_S64):
- OP(FILTER_OP_NE_DOUBLE_S64):
- OP(FILTER_OP_GT_DOUBLE_S64):
- OP(FILTER_OP_LT_DOUBLE_S64):
- OP(FILTER_OP_GE_DOUBLE_S64):
- OP(FILTER_OP_LE_DOUBLE_S64):
- OP(FILTER_OP_EQ_S64_DOUBLE):
- OP(FILTER_OP_NE_S64_DOUBLE):
- OP(FILTER_OP_GT_S64_DOUBLE):
- OP(FILTER_OP_LT_S64_DOUBLE):
- OP(FILTER_OP_GE_S64_DOUBLE):
- OP(FILTER_OP_LE_S64_DOUBLE):
- {
- BUG_ON(1);
- PO;
- }
- OP(FILTER_OP_BIT_RSHIFT):
- {
- int64_t res;
-
- if (!IS_INTEGER_REGISTER(estack_ax_t) || !IS_INTEGER_REGISTER(estack_bx_t)) {
- ret = -EINVAL;
- goto end;
- }
-
- /* Catch undefined behavior. */
- if (unlikely(estack_ax_v < 0 || estack_ax_v >= 64)) {
- ret = -EINVAL;
- goto end;
- }
- res = ((uint64_t) estack_bx_v >> (uint32_t) estack_ax_v);
- estack_pop(stack, top, ax, bx, ax_t, bx_t);
- estack_ax_v = res;
- estack_ax_t = REG_U64;
- next_pc += sizeof(struct binary_op);
- PO;
- }
- OP(FILTER_OP_BIT_LSHIFT):
- {
- int64_t res;
-
- if (!IS_INTEGER_REGISTER(estack_ax_t) || !IS_INTEGER_REGISTER(estack_bx_t)) {
- ret = -EINVAL;
- goto end;
- }
-
- /* Catch undefined behavior. */
- if (unlikely(estack_ax_v < 0 || estack_ax_v >= 64)) {
- ret = -EINVAL;
- goto end;
- }
- res = ((uint64_t) estack_bx_v << (uint32_t) estack_ax_v);
- estack_pop(stack, top, ax, bx, ax_t, bx_t);
- estack_ax_v = res;
- estack_ax_t = REG_U64;
- next_pc += sizeof(struct binary_op);
- PO;
- }
- OP(FILTER_OP_BIT_AND):
- {
- int64_t res;
-
- if (!IS_INTEGER_REGISTER(estack_ax_t) || !IS_INTEGER_REGISTER(estack_bx_t)) {
- ret = -EINVAL;
- goto end;
- }
-
- res = ((uint64_t) estack_bx_v & (uint64_t) estack_ax_v);
- estack_pop(stack, top, ax, bx, ax_t, bx_t);
- estack_ax_v = res;
- estack_ax_t = REG_U64;
- next_pc += sizeof(struct binary_op);
- PO;
- }
- OP(FILTER_OP_BIT_OR):
- {
- int64_t res;
-
- if (!IS_INTEGER_REGISTER(estack_ax_t) || !IS_INTEGER_REGISTER(estack_bx_t)) {
- ret = -EINVAL;
- goto end;
- }
-
- res = ((uint64_t) estack_bx_v | (uint64_t) estack_ax_v);
- estack_pop(stack, top, ax, bx, ax_t, bx_t);
- estack_ax_v = res;
- estack_ax_t = REG_U64;
- next_pc += sizeof(struct binary_op);
- PO;
- }
- OP(FILTER_OP_BIT_XOR):
- {
- int64_t res;
-
- if (!IS_INTEGER_REGISTER(estack_ax_t) || !IS_INTEGER_REGISTER(estack_bx_t)) {
- ret = -EINVAL;
- goto end;
- }
-
- res = ((uint64_t) estack_bx_v ^ (uint64_t) estack_ax_v);
- estack_pop(stack, top, ax, bx, ax_t, bx_t);
- estack_ax_v = res;
- estack_ax_t = REG_U64;
- next_pc += sizeof(struct binary_op);
- PO;
- }
-
- /* unary */
- OP(FILTER_OP_UNARY_PLUS):
- OP(FILTER_OP_UNARY_MINUS):
- OP(FILTER_OP_UNARY_NOT):
- printk(KERN_WARNING "LTTng: filter: unsupported non-specialized bytecode op %u\n",
- (unsigned int) *(filter_opcode_t *) pc);
- ret = -EINVAL;
- goto end;
-
-
- OP(FILTER_OP_UNARY_BIT_NOT):
- {
- estack_ax_v = ~(uint64_t) estack_ax_v;
- estack_ax_t = REG_S64;
- next_pc += sizeof(struct unary_op);
- PO;
- }
-
- OP(FILTER_OP_UNARY_PLUS_S64):
- {
- next_pc += sizeof(struct unary_op);
- PO;
- }
- OP(FILTER_OP_UNARY_MINUS_S64):
- {
- estack_ax_v = -estack_ax_v;
- estack_ax_t = REG_S64;
- next_pc += sizeof(struct unary_op);
- PO;
- }
- OP(FILTER_OP_UNARY_PLUS_DOUBLE):
- OP(FILTER_OP_UNARY_MINUS_DOUBLE):
- {
- BUG_ON(1);
- PO;
- }
- OP(FILTER_OP_UNARY_NOT_S64):
- {
- estack_ax_v = !estack_ax_v;
- estack_ax_t = REG_S64;
- next_pc += sizeof(struct unary_op);
- PO;
- }
- OP(FILTER_OP_UNARY_NOT_DOUBLE):
- {
- BUG_ON(1);
- PO;
- }
-
- /* logical */
- OP(FILTER_OP_AND):
- {
- struct logical_op *insn = (struct logical_op *) pc;
-
- /* If AX is 0, skip and evaluate to 0 */
- if (unlikely(estack_ax_v == 0)) {
- dbg_printk("Jumping to bytecode offset %u\n",
- (unsigned int) insn->skip_offset);
- next_pc = start_pc + insn->skip_offset;
- } else {
- /* Pop 1 when jump not taken */
- estack_pop(stack, top, ax, bx, ax_t, bx_t);
- next_pc += sizeof(struct logical_op);
- }
- PO;
- }
- OP(FILTER_OP_OR):
- {
- struct logical_op *insn = (struct logical_op *) pc;
-
- /* If AX is nonzero, skip and evaluate to 1 */
-
- if (unlikely(estack_ax_v != 0)) {
- estack_ax_v = 1;
- dbg_printk("Jumping to bytecode offset %u\n",
- (unsigned int) insn->skip_offset);
- next_pc = start_pc + insn->skip_offset;
- } else {
- /* Pop 1 when jump not taken */
- estack_pop(stack, top, ax, bx, ax_t, bx_t);
- next_pc += sizeof(struct logical_op);
- }
- PO;
- }
-
-
- /* load field ref */
- OP(FILTER_OP_LOAD_FIELD_REF_STRING):
- {
- struct load_op *insn = (struct load_op *) pc;
- struct field_ref *ref = (struct field_ref *) insn->data;
-
- dbg_printk("load field ref offset %u type string\n",
- ref->offset);
- estack_push(stack, top, ax, bx, ax_t, bx_t);
- estack_ax(stack, top)->u.s.str =
- *(const char * const *) &interpreter_stack_data[ref->offset];
- if (unlikely(!estack_ax(stack, top)->u.s.str)) {
- dbg_printk("Filter warning: loading a NULL string.\n");
- ret = -EINVAL;
- goto end;
- }
- estack_ax(stack, top)->u.s.seq_len = LTTNG_SIZE_MAX;
- estack_ax(stack, top)->u.s.literal_type =
- ESTACK_STRING_LITERAL_TYPE_NONE;
- estack_ax(stack, top)->u.s.user = 0;
- estack_ax(stack, top)->type = REG_STRING;
- dbg_printk("ref load string %s\n", estack_ax(stack, top)->u.s.str);
- next_pc += sizeof(struct load_op) + sizeof(struct field_ref);
- PO;
- }
-
- OP(FILTER_OP_LOAD_FIELD_REF_SEQUENCE):
- {
- struct load_op *insn = (struct load_op *) pc;
- struct field_ref *ref = (struct field_ref *) insn->data;
-
- dbg_printk("load field ref offset %u type sequence\n",
- ref->offset);
- estack_push(stack, top, ax, bx, ax_t, bx_t);
- estack_ax(stack, top)->u.s.seq_len =
- *(unsigned long *) &interpreter_stack_data[ref->offset];
- estack_ax(stack, top)->u.s.str =
- *(const char **) (&interpreter_stack_data[ref->offset
- + sizeof(unsigned long)]);
- if (unlikely(!estack_ax(stack, top)->u.s.str)) {
- dbg_printk("Filter warning: loading a NULL sequence.\n");
- ret = -EINVAL;
- goto end;
- }
- estack_ax(stack, top)->u.s.literal_type =
- ESTACK_STRING_LITERAL_TYPE_NONE;
- estack_ax(stack, top)->u.s.user = 0;
- next_pc += sizeof(struct load_op) + sizeof(struct field_ref);
- PO;
- }
-
- OP(FILTER_OP_LOAD_FIELD_REF_S64):
- {
- struct load_op *insn = (struct load_op *) pc;
- struct field_ref *ref = (struct field_ref *) insn->data;
-
- dbg_printk("load field ref offset %u type s64\n",
- ref->offset);
- estack_push(stack, top, ax, bx, ax_t, bx_t);
- estack_ax_v =
- ((struct literal_numeric *) &interpreter_stack_data[ref->offset])->v;
- estack_ax_t = REG_S64;
- dbg_printk("ref load s64 %lld\n",
- (long long) estack_ax_v);
- next_pc += sizeof(struct load_op) + sizeof(struct field_ref);
- PO;
- }
-
- OP(FILTER_OP_LOAD_FIELD_REF_DOUBLE):
- {
- BUG_ON(1);
- PO;
- }
-
- /* load from immediate operand */
- OP(FILTER_OP_LOAD_STRING):
- {
- struct load_op *insn = (struct load_op *) pc;
-
- dbg_printk("load string %s\n", insn->data);
- estack_push(stack, top, ax, bx, ax_t, bx_t);
- estack_ax(stack, top)->u.s.str = insn->data;
- estack_ax(stack, top)->u.s.seq_len = LTTNG_SIZE_MAX;
- estack_ax(stack, top)->u.s.literal_type =
- ESTACK_STRING_LITERAL_TYPE_PLAIN;
- estack_ax(stack, top)->u.s.user = 0;
- next_pc += sizeof(struct load_op) + strlen(insn->data) + 1;
- PO;
- }
-
- OP(FILTER_OP_LOAD_STAR_GLOB_STRING):
- {
- struct load_op *insn = (struct load_op *) pc;
-
- dbg_printk("load globbing pattern %s\n", insn->data);
- estack_push(stack, top, ax, bx, ax_t, bx_t);
- estack_ax(stack, top)->u.s.str = insn->data;
- estack_ax(stack, top)->u.s.seq_len = LTTNG_SIZE_MAX;
- estack_ax(stack, top)->u.s.literal_type =
- ESTACK_STRING_LITERAL_TYPE_STAR_GLOB;
- estack_ax(stack, top)->u.s.user = 0;
- next_pc += sizeof(struct load_op) + strlen(insn->data) + 1;
- PO;
- }
-
- OP(FILTER_OP_LOAD_S64):
- {
- struct load_op *insn = (struct load_op *) pc;
-
- estack_push(stack, top, ax, bx, ax_t, bx_t);
- estack_ax_v = ((struct literal_numeric *) insn->data)->v;
- estack_ax_t = REG_S64;
- dbg_printk("load s64 %lld\n",
- (long long) estack_ax_v);
- next_pc += sizeof(struct load_op)
- + sizeof(struct literal_numeric);
- PO;
- }
-
- OP(FILTER_OP_LOAD_DOUBLE):
- {
- BUG_ON(1);
- PO;
- }
-
- /* cast */
- OP(FILTER_OP_CAST_TO_S64):
- printk(KERN_WARNING "LTTng: filter: unsupported non-specialized bytecode op %u\n",
- (unsigned int) *(filter_opcode_t *) pc);
- ret = -EINVAL;
- goto end;
-
- OP(FILTER_OP_CAST_DOUBLE_TO_S64):
- {
- BUG_ON(1);
- PO;
- }
-
- OP(FILTER_OP_CAST_NOP):
- {
- next_pc += sizeof(struct cast_op);
- PO;
- }
-
- /* get context ref */
- OP(FILTER_OP_GET_CONTEXT_REF_STRING):
- {
- struct load_op *insn = (struct load_op *) pc;
- struct field_ref *ref = (struct field_ref *) insn->data;
- struct lttng_ctx_field *ctx_field;
- union lttng_ctx_value v;
-
- dbg_printk("get context ref offset %u type string\n",
- ref->offset);
- ctx_field = <tng_static_ctx->fields[ref->offset];
- ctx_field->get_value(ctx_field, lttng_probe_ctx, &v);
- estack_push(stack, top, ax, bx, ax_t, bx_t);
- estack_ax(stack, top)->u.s.str = v.str;
- if (unlikely(!estack_ax(stack, top)->u.s.str)) {
- dbg_printk("Filter warning: loading a NULL string.\n");
- ret = -EINVAL;
- goto end;
- }
- estack_ax(stack, top)->u.s.seq_len = LTTNG_SIZE_MAX;
- estack_ax(stack, top)->u.s.literal_type =
- ESTACK_STRING_LITERAL_TYPE_NONE;
- estack_ax(stack, top)->u.s.user = 0;
- estack_ax(stack, top)->type = REG_STRING;
- dbg_printk("ref get context string %s\n", estack_ax(stack, top)->u.s.str);
- next_pc += sizeof(struct load_op) + sizeof(struct field_ref);
- PO;
- }
-
- OP(FILTER_OP_GET_CONTEXT_REF_S64):
- {
- struct load_op *insn = (struct load_op *) pc;
- struct field_ref *ref = (struct field_ref *) insn->data;
- struct lttng_ctx_field *ctx_field;
- union lttng_ctx_value v;
-
- dbg_printk("get context ref offset %u type s64\n",
- ref->offset);
- ctx_field = <tng_static_ctx->fields[ref->offset];
- ctx_field->get_value(ctx_field, lttng_probe_ctx, &v);
- estack_push(stack, top, ax, bx, ax_t, bx_t);
- estack_ax_v = v.s64;
- estack_ax_t = REG_S64;
- dbg_printk("ref get context s64 %lld\n",
- (long long) estack_ax_v);
- next_pc += sizeof(struct load_op) + sizeof(struct field_ref);
- PO;
- }
-
- OP(FILTER_OP_GET_CONTEXT_REF_DOUBLE):
- {
- BUG_ON(1);
- PO;
- }
-
- /* load userspace field ref */
- OP(FILTER_OP_LOAD_FIELD_REF_USER_STRING):
- {
- struct load_op *insn = (struct load_op *) pc;
- struct field_ref *ref = (struct field_ref *) insn->data;
-
- dbg_printk("load field ref offset %u type user string\n",
- ref->offset);
- estack_push(stack, top, ax, bx, ax_t, bx_t);
- estack_ax(stack, top)->u.s.user_str =
- *(const char * const *) &interpreter_stack_data[ref->offset];
- if (unlikely(!estack_ax(stack, top)->u.s.str)) {
- dbg_printk("Filter warning: loading a NULL string.\n");
- ret = -EINVAL;
- goto end;
- }
- estack_ax(stack, top)->u.s.seq_len = LTTNG_SIZE_MAX;
- estack_ax(stack, top)->u.s.literal_type =
- ESTACK_STRING_LITERAL_TYPE_NONE;
- estack_ax(stack, top)->u.s.user = 1;
- estack_ax(stack, top)->type = REG_STRING;
- dbg_printk("ref load string %s\n", estack_ax(stack, top)->u.s.str);
- next_pc += sizeof(struct load_op) + sizeof(struct field_ref);
- PO;
- }
-
- OP(FILTER_OP_LOAD_FIELD_REF_USER_SEQUENCE):
- {
- struct load_op *insn = (struct load_op *) pc;
- struct field_ref *ref = (struct field_ref *) insn->data;
-
- dbg_printk("load field ref offset %u type user sequence\n",
- ref->offset);
- estack_push(stack, top, ax, bx, ax_t, bx_t);
- estack_ax(stack, top)->u.s.seq_len =
- *(unsigned long *) &interpreter_stack_data[ref->offset];
- estack_ax(stack, top)->u.s.user_str =
- *(const char **) (&interpreter_stack_data[ref->offset
- + sizeof(unsigned long)]);
- if (unlikely(!estack_ax(stack, top)->u.s.str)) {
- dbg_printk("Filter warning: loading a NULL sequence.\n");
- ret = -EINVAL;
- goto end;
- }
- estack_ax(stack, top)->u.s.literal_type =
- ESTACK_STRING_LITERAL_TYPE_NONE;
- estack_ax(stack, top)->u.s.user = 1;
- next_pc += sizeof(struct load_op) + sizeof(struct field_ref);
- PO;
- }
-
- OP(FILTER_OP_GET_CONTEXT_ROOT):
- {
- dbg_printk("op get context root\n");
- estack_push(stack, top, ax, bx, ax_t, bx_t);
- estack_ax(stack, top)->u.ptr.type = LOAD_ROOT_CONTEXT;
- /* "field" only needed for variants. */
- estack_ax(stack, top)->u.ptr.field = NULL;
- estack_ax(stack, top)->type = REG_PTR;
- next_pc += sizeof(struct load_op);
- PO;
- }
-
- OP(FILTER_OP_GET_APP_CONTEXT_ROOT):
- {
- BUG_ON(1);
- PO;
- }
-
- OP(FILTER_OP_GET_PAYLOAD_ROOT):
- {
- dbg_printk("op get app payload root\n");
- estack_push(stack, top, ax, bx, ax_t, bx_t);
- estack_ax(stack, top)->u.ptr.type = LOAD_ROOT_PAYLOAD;
- estack_ax(stack, top)->u.ptr.ptr = interpreter_stack_data;
- /* "field" only needed for variants. */
- estack_ax(stack, top)->u.ptr.field = NULL;
- estack_ax(stack, top)->type = REG_PTR;
- next_pc += sizeof(struct load_op);
- PO;
- }
-
- OP(FILTER_OP_GET_SYMBOL):
- {
- dbg_printk("op get symbol\n");
- switch (estack_ax(stack, top)->u.ptr.type) {
- case LOAD_OBJECT:
- printk(KERN_WARNING "LTTng: filter: Nested fields not implemented yet.\n");
- ret = -EINVAL;
- goto end;
- case LOAD_ROOT_CONTEXT:
- case LOAD_ROOT_APP_CONTEXT:
- case LOAD_ROOT_PAYLOAD:
- /*
- * symbol lookup is performed by
- * specialization.
- */
- ret = -EINVAL;
- goto end;
- }
- next_pc += sizeof(struct load_op) + sizeof(struct get_symbol);
- PO;
- }
-
- OP(FILTER_OP_GET_SYMBOL_FIELD):
- {
- /*
- * Used for first variant encountered in a
- * traversal. Variants are not implemented yet.
- */
- ret = -EINVAL;
- goto end;
- }
-
- OP(FILTER_OP_GET_INDEX_U16):
- {
- struct load_op *insn = (struct load_op *) pc;
- struct get_index_u16 *index = (struct get_index_u16 *) insn->data;
-
- dbg_printk("op get index u16\n");
- ret = dynamic_get_index(lttng_probe_ctx, bytecode, index->index, estack_ax(stack, top));
- if (ret)
- goto end;
- estack_ax_v = estack_ax(stack, top)->u.v;
- estack_ax_t = estack_ax(stack, top)->type;
- next_pc += sizeof(struct load_op) + sizeof(struct get_index_u16);
- PO;
- }
-
- OP(FILTER_OP_GET_INDEX_U64):
- {
- struct load_op *insn = (struct load_op *) pc;
- struct get_index_u64 *index = (struct get_index_u64 *) insn->data;
-
- dbg_printk("op get index u64\n");
- ret = dynamic_get_index(lttng_probe_ctx, bytecode, index->index, estack_ax(stack, top));
- if (ret)
- goto end;
- estack_ax_v = estack_ax(stack, top)->u.v;
- estack_ax_t = estack_ax(stack, top)->type;
- next_pc += sizeof(struct load_op) + sizeof(struct get_index_u64);
- PO;
- }
-
- OP(FILTER_OP_LOAD_FIELD):
- {
- dbg_printk("op load field\n");
- ret = dynamic_load_field(estack_ax(stack, top));
- if (ret)
- goto end;
- estack_ax_v = estack_ax(stack, top)->u.v;
- estack_ax_t = estack_ax(stack, top)->type;
- next_pc += sizeof(struct load_op);
- PO;
- }
-
- OP(FILTER_OP_LOAD_FIELD_S8):
- {
- dbg_printk("op load field s8\n");
-
- estack_ax_v = *(int8_t *) estack_ax(stack, top)->u.ptr.ptr;
- estack_ax_t = REG_S64;
- next_pc += sizeof(struct load_op);
- PO;
- }
- OP(FILTER_OP_LOAD_FIELD_S16):
- {
- dbg_printk("op load field s16\n");
-
- estack_ax_v = *(int16_t *) estack_ax(stack, top)->u.ptr.ptr;
- estack_ax_t = REG_S64;
- next_pc += sizeof(struct load_op);
- PO;
- }
- OP(FILTER_OP_LOAD_FIELD_S32):
- {
- dbg_printk("op load field s32\n");
-
- estack_ax_v = *(int32_t *) estack_ax(stack, top)->u.ptr.ptr;
- estack_ax_t = REG_S64;
- next_pc += sizeof(struct load_op);
- PO;
- }
- OP(FILTER_OP_LOAD_FIELD_S64):
- {
- dbg_printk("op load field s64\n");
-
- estack_ax_v = *(int64_t *) estack_ax(stack, top)->u.ptr.ptr;
- estack_ax_t = REG_S64;
- next_pc += sizeof(struct load_op);
- PO;
- }
- OP(FILTER_OP_LOAD_FIELD_U8):
- {
- dbg_printk("op load field u8\n");
-
- estack_ax_v = *(uint8_t *) estack_ax(stack, top)->u.ptr.ptr;
- estack_ax_t = REG_S64;
- next_pc += sizeof(struct load_op);
- PO;
- }
- OP(FILTER_OP_LOAD_FIELD_U16):
- {
- dbg_printk("op load field u16\n");
-
- estack_ax_v = *(uint16_t *) estack_ax(stack, top)->u.ptr.ptr;
- estack_ax_t = REG_S64;
- next_pc += sizeof(struct load_op);
- PO;
- }
- OP(FILTER_OP_LOAD_FIELD_U32):
- {
- dbg_printk("op load field u32\n");
-
- estack_ax_v = *(uint32_t *) estack_ax(stack, top)->u.ptr.ptr;
- estack_ax_t = REG_S64;
- next_pc += sizeof(struct load_op);
- PO;
- }
- OP(FILTER_OP_LOAD_FIELD_U64):
- {
- dbg_printk("op load field u64\n");
-
- estack_ax_v = *(uint64_t *) estack_ax(stack, top)->u.ptr.ptr;
- estack_ax_t = REG_S64;
- next_pc += sizeof(struct load_op);
- PO;
- }
- OP(FILTER_OP_LOAD_FIELD_DOUBLE):
- {
- ret = -EINVAL;
- goto end;
- }
-
- OP(FILTER_OP_LOAD_FIELD_STRING):
- {
- const char *str;
-
- dbg_printk("op load field string\n");
- str = (const char *) estack_ax(stack, top)->u.ptr.ptr;
- estack_ax(stack, top)->u.s.str = str;
- if (unlikely(!estack_ax(stack, top)->u.s.str)) {
- dbg_printk("Filter warning: loading a NULL string.\n");
- ret = -EINVAL;
- goto end;
- }
- estack_ax(stack, top)->u.s.seq_len = LTTNG_SIZE_MAX;
- estack_ax(stack, top)->u.s.literal_type =
- ESTACK_STRING_LITERAL_TYPE_NONE;
- estack_ax(stack, top)->type = REG_STRING;
- next_pc += sizeof(struct load_op);
- PO;
- }
-
- OP(FILTER_OP_LOAD_FIELD_SEQUENCE):
- {
- const char *ptr;
-
- dbg_printk("op load field string sequence\n");
- ptr = estack_ax(stack, top)->u.ptr.ptr;
- estack_ax(stack, top)->u.s.seq_len = *(unsigned long *) ptr;
- estack_ax(stack, top)->u.s.str = *(const char **) (ptr + sizeof(unsigned long));
- if (unlikely(!estack_ax(stack, top)->u.s.str)) {
- dbg_printk("Filter warning: loading a NULL sequence.\n");
- ret = -EINVAL;
- goto end;
- }
- estack_ax(stack, top)->u.s.literal_type =
- ESTACK_STRING_LITERAL_TYPE_NONE;
- estack_ax(stack, top)->type = REG_STRING;
- next_pc += sizeof(struct load_op);
- PO;
- }
-
- END_OP
-end:
- /* Return _DISCARD on error. */
- if (ret)
- return LTTNG_FILTER_DISCARD;
-
- if (output) {
- return lttng_bytecode_interpret_format_output(
- estack_ax(stack, top), output);
- }
-
- return retval;
-}
-LTTNG_STACK_FRAME_NON_STANDARD(bytecode_interpret);
-
-uint64_t lttng_filter_interpret_bytecode(void *filter_data,
- struct lttng_probe_ctx *lttng_probe_ctx,
- const char *filter_stack_data)
-{
- return bytecode_interpret(filter_data, lttng_probe_ctx,
- filter_stack_data, NULL);
-}
-
-#undef START_OP
-#undef OP
-#undef PO
-#undef END_OP
+++ /dev/null
-/* SPDX-License-Identifier: MIT
- *
- * lttng-filter-specialize.c
- *
- * LTTng modules filter code specializer.
- *
- * Copyright (C) 2010-2016 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
- */
-
-#include <linux/slab.h>
-#include <lttng/filter.h>
-#include <lttng/align.h>
-
-static ssize_t bytecode_reserve_data(struct bytecode_runtime *runtime,
- size_t align, size_t len)
-{
- ssize_t ret;
- size_t padding = offset_align(runtime->data_len, align);
- size_t new_len = runtime->data_len + padding + len;
- size_t new_alloc_len = new_len;
- size_t old_alloc_len = runtime->data_alloc_len;
-
- if (new_len > FILTER_MAX_DATA_LEN)
- return -EINVAL;
-
- if (new_alloc_len > old_alloc_len) {
- char *newptr;
-
- new_alloc_len =
- max_t(size_t, 1U << get_count_order(new_alloc_len), old_alloc_len << 1);
- newptr = krealloc(runtime->data, new_alloc_len, GFP_KERNEL);
- if (!newptr)
- return -ENOMEM;
- runtime->data = newptr;
- /* We zero directly the memory from start of allocation. */
- memset(&runtime->data[old_alloc_len], 0, new_alloc_len - old_alloc_len);
- runtime->data_alloc_len = new_alloc_len;
- }
- runtime->data_len += padding;
- ret = runtime->data_len;
- runtime->data_len += len;
- return ret;
-}
-
-static ssize_t bytecode_push_data(struct bytecode_runtime *runtime,
- const void *p, size_t align, size_t len)
-{
- ssize_t offset;
-
- offset = bytecode_reserve_data(runtime, align, len);
- if (offset < 0)
- return -ENOMEM;
- memcpy(&runtime->data[offset], p, len);
- return offset;
-}
-
-static int specialize_load_field(struct vstack_entry *stack_top,
- struct load_op *insn)
-{
- int ret;
-
- switch (stack_top->load.type) {
- case LOAD_OBJECT:
- break;
- case LOAD_ROOT_CONTEXT:
- case LOAD_ROOT_APP_CONTEXT:
- case LOAD_ROOT_PAYLOAD:
- default:
- dbg_printk("Filter warning: cannot load root, missing field name.\n");
- ret = -EINVAL;
- goto end;
- }
- switch (stack_top->load.object_type) {
- case OBJECT_TYPE_S8:
- dbg_printk("op load field s8\n");
- stack_top->type = REG_S64;
- if (!stack_top->load.rev_bo)
- insn->op = FILTER_OP_LOAD_FIELD_S8;
- break;
- case OBJECT_TYPE_S16:
- dbg_printk("op load field s16\n");
- stack_top->type = REG_S64;
- if (!stack_top->load.rev_bo)
- insn->op = FILTER_OP_LOAD_FIELD_S16;
- break;
- case OBJECT_TYPE_S32:
- dbg_printk("op load field s32\n");
- stack_top->type = REG_S64;
- if (!stack_top->load.rev_bo)
- insn->op = FILTER_OP_LOAD_FIELD_S32;
- break;
- case OBJECT_TYPE_S64:
- dbg_printk("op load field s64\n");
- stack_top->type = REG_S64;
- if (!stack_top->load.rev_bo)
- insn->op = FILTER_OP_LOAD_FIELD_S64;
- break;
- case OBJECT_TYPE_U8:
- dbg_printk("op load field u8\n");
- stack_top->type = REG_S64;
- insn->op = FILTER_OP_LOAD_FIELD_U8;
- break;
- case OBJECT_TYPE_U16:
- dbg_printk("op load field u16\n");
- stack_top->type = REG_S64;
- if (!stack_top->load.rev_bo)
- insn->op = FILTER_OP_LOAD_FIELD_U16;
- break;
- case OBJECT_TYPE_U32:
- dbg_printk("op load field u32\n");
- stack_top->type = REG_S64;
- if (!stack_top->load.rev_bo)
- insn->op = FILTER_OP_LOAD_FIELD_U32;
- break;
- case OBJECT_TYPE_U64:
- dbg_printk("op load field u64\n");
- stack_top->type = REG_S64;
- if (!stack_top->load.rev_bo)
- insn->op = FILTER_OP_LOAD_FIELD_U64;
- break;
- case OBJECT_TYPE_DOUBLE:
- printk(KERN_WARNING "LTTng: filter: Double type unsupported\n\n");
- ret = -EINVAL;
- goto end;
- case OBJECT_TYPE_STRING:
- dbg_printk("op load field string\n");
- stack_top->type = REG_STRING;
- insn->op = FILTER_OP_LOAD_FIELD_STRING;
- break;
- case OBJECT_TYPE_STRING_SEQUENCE:
- dbg_printk("op load field string sequence\n");
- stack_top->type = REG_STRING;
- insn->op = FILTER_OP_LOAD_FIELD_SEQUENCE;
- break;
- case OBJECT_TYPE_DYNAMIC:
- ret = -EINVAL;
- goto end;
- case OBJECT_TYPE_SEQUENCE:
- case OBJECT_TYPE_ARRAY:
- case OBJECT_TYPE_STRUCT:
- case OBJECT_TYPE_VARIANT:
- printk(KERN_WARNING "LTTng: filter: Sequences, arrays, struct and variant cannot be loaded (nested types).\n");
- ret = -EINVAL;
- goto end;
- }
- return 0;
-
-end:
- return ret;
-}
-
-static int specialize_get_index_object_type(enum object_type *otype,
- int signedness, uint32_t elem_len)
-{
- switch (elem_len) {
- case 8:
- if (signedness)
- *otype = OBJECT_TYPE_S8;
- else
- *otype = OBJECT_TYPE_U8;
- break;
- case 16:
- if (signedness)
- *otype = OBJECT_TYPE_S16;
- else
- *otype = OBJECT_TYPE_U16;
- break;
- case 32:
- if (signedness)
- *otype = OBJECT_TYPE_S32;
- else
- *otype = OBJECT_TYPE_U32;
- break;
- case 64:
- if (signedness)
- *otype = OBJECT_TYPE_S64;
- else
- *otype = OBJECT_TYPE_U64;
- break;
- default:
- return -EINVAL;
- }
- return 0;
-}
-
-static int specialize_get_index(struct bytecode_runtime *runtime,
- struct load_op *insn, uint64_t index,
- struct vstack_entry *stack_top,
- int idx_len)
-{
- int ret;
- struct filter_get_index_data gid;
- ssize_t data_offset;
-
- memset(&gid, 0, sizeof(gid));
- switch (stack_top->load.type) {
- case LOAD_OBJECT:
- switch (stack_top->load.object_type) {
- case OBJECT_TYPE_ARRAY:
- {
- const struct lttng_integer_type *integer_type;
- const struct lttng_event_field *field;
- uint32_t elem_len, num_elems;
- int signedness;
-
- field = stack_top->load.field;
- if (!lttng_is_bytewise_integer(field->type.u.array_nestable.elem_type)) {
- ret = -EINVAL;
- goto end;
- }
- integer_type = &field->type.u.array_nestable.elem_type->u.integer;
- num_elems = field->type.u.array_nestable.length;
- elem_len = integer_type->size;
- signedness = integer_type->signedness;
- if (index >= num_elems) {
- ret = -EINVAL;
- goto end;
- }
- ret = specialize_get_index_object_type(&stack_top->load.object_type,
- signedness, elem_len);
- if (ret)
- goto end;
- gid.offset = index * (elem_len / CHAR_BIT);
- gid.array_len = num_elems * (elem_len / CHAR_BIT);
- gid.elem.type = stack_top->load.object_type;
- gid.elem.len = elem_len;
- if (integer_type->reverse_byte_order)
- gid.elem.rev_bo = true;
- stack_top->load.rev_bo = gid.elem.rev_bo;
- break;
- }
- case OBJECT_TYPE_SEQUENCE:
- {
- const struct lttng_integer_type *integer_type;
- const struct lttng_event_field *field;
- uint32_t elem_len;
- int signedness;
-
- field = stack_top->load.field;
- if (!lttng_is_bytewise_integer(field->type.u.sequence_nestable.elem_type)) {
- ret = -EINVAL;
- goto end;
- }
- integer_type = &field->type.u.sequence_nestable.elem_type->u.integer;
- elem_len = integer_type->size;
- signedness = integer_type->signedness;
- ret = specialize_get_index_object_type(&stack_top->load.object_type,
- signedness, elem_len);
- if (ret)
- goto end;
- gid.offset = index * (elem_len / CHAR_BIT);
- gid.elem.type = stack_top->load.object_type;
- gid.elem.len = elem_len;
- if (integer_type->reverse_byte_order)
- gid.elem.rev_bo = true;
- stack_top->load.rev_bo = gid.elem.rev_bo;
- break;
- }
- case OBJECT_TYPE_STRUCT:
- /* Only generated by the specialize phase. */
- case OBJECT_TYPE_VARIANT: /* Fall-through */
- default:
- printk(KERN_WARNING "LTTng: filter: Unexpected get index type %d",
- (int) stack_top->load.object_type);
- ret = -EINVAL;
- goto end;
- }
- break;
- case LOAD_ROOT_CONTEXT:
- case LOAD_ROOT_APP_CONTEXT:
- case LOAD_ROOT_PAYLOAD:
- printk(KERN_WARNING "LTTng: filter: Index lookup for root field not implemented yet.\n");
- ret = -EINVAL;
- goto end;
- }
- data_offset = bytecode_push_data(runtime, &gid,
- __alignof__(gid), sizeof(gid));
- if (data_offset < 0) {
- ret = -EINVAL;
- goto end;
- }
- switch (idx_len) {
- case 2:
- ((struct get_index_u16 *) insn->data)->index = data_offset;
- break;
- case 8:
- ((struct get_index_u64 *) insn->data)->index = data_offset;
- break;
- default:
- ret = -EINVAL;
- goto end;
- }
-
- return 0;
-
-end:
- return ret;
-}
-
-static int specialize_context_lookup_name(struct lttng_ctx *ctx,
- struct bytecode_runtime *bytecode,
- struct load_op *insn)
-{
- uint16_t offset;
- const char *name;
-
- offset = ((struct get_symbol *) insn->data)->offset;
- name = bytecode->p.bc->bc.data + bytecode->p.bc->bc.reloc_offset + offset;
- return lttng_get_context_index(ctx, name);
-}
-
-static int specialize_load_object(const struct lttng_event_field *field,
- struct vstack_load *load, bool is_context)
-{
- load->type = LOAD_OBJECT;
-
- switch (field->type.atype) {
- case atype_integer:
- if (field->type.u.integer.signedness)
- load->object_type = OBJECT_TYPE_S64;
- else
- load->object_type = OBJECT_TYPE_U64;
- load->rev_bo = false;
- break;
- case atype_enum_nestable:
- {
- const struct lttng_integer_type *itype =
- &field->type.u.enum_nestable.container_type->u.integer;
-
- if (itype->signedness)
- load->object_type = OBJECT_TYPE_S64;
- else
- load->object_type = OBJECT_TYPE_U64;
- load->rev_bo = false;
- break;
- }
- case atype_array_nestable:
- if (!lttng_is_bytewise_integer(field->type.u.array_nestable.elem_type)) {
- printk(KERN_WARNING "LTTng: filter Array nesting only supports integer types.\n");
- return -EINVAL;
- }
- if (is_context) {
- load->object_type = OBJECT_TYPE_STRING;
- } else {
- if (field->type.u.array_nestable.elem_type->u.integer.encoding == lttng_encode_none) {
- load->object_type = OBJECT_TYPE_ARRAY;
- load->field = field;
- } else {
- load->object_type = OBJECT_TYPE_STRING_SEQUENCE;
- }
- }
- break;
- case atype_sequence_nestable:
- if (!lttng_is_bytewise_integer(field->type.u.sequence_nestable.elem_type)) {
- printk(KERN_WARNING "LTTng: filter Sequence nesting only supports integer types.\n");
- return -EINVAL;
- }
- if (is_context) {
- load->object_type = OBJECT_TYPE_STRING;
- } else {
- if (field->type.u.sequence_nestable.elem_type->u.integer.encoding == lttng_encode_none) {
- load->object_type = OBJECT_TYPE_SEQUENCE;
- load->field = field;
- } else {
- load->object_type = OBJECT_TYPE_STRING_SEQUENCE;
- }
- }
- break;
- case atype_string:
- load->object_type = OBJECT_TYPE_STRING;
- break;
- case atype_struct_nestable:
- printk(KERN_WARNING "LTTng: filter: Structure type cannot be loaded.\n");
- return -EINVAL;
- case atype_variant_nestable:
- printk(KERN_WARNING "LTTng: filter: Variant type cannot be loaded.\n");
- return -EINVAL;
- default:
- printk(KERN_WARNING "LTTng: filter: Unknown type: %d", (int) field->type.atype);
- return -EINVAL;
- }
- return 0;
-}
-
-static int specialize_context_lookup(struct lttng_ctx *ctx,
- struct bytecode_runtime *runtime,
- struct load_op *insn,
- struct vstack_load *load)
-{
- int idx, ret;
- struct lttng_ctx_field *ctx_field;
- struct lttng_event_field *field;
- struct filter_get_index_data gid;
- ssize_t data_offset;
-
- idx = specialize_context_lookup_name(ctx, runtime, insn);
- if (idx < 0) {
- return -ENOENT;
- }
- ctx_field = <tng_static_ctx->fields[idx];
- field = &ctx_field->event_field;
- ret = specialize_load_object(field, load, true);
- if (ret)
- return ret;
- /* Specialize each get_symbol into a get_index. */
- insn->op = FILTER_OP_GET_INDEX_U16;
- memset(&gid, 0, sizeof(gid));
- gid.ctx_index = idx;
- gid.elem.type = load->object_type;
- gid.elem.rev_bo = load->rev_bo;
- gid.field = field;
- data_offset = bytecode_push_data(runtime, &gid,
- __alignof__(gid), sizeof(gid));
- if (data_offset < 0) {
- return -EINVAL;
- }
- ((struct get_index_u16 *) insn->data)->index = data_offset;
- return 0;
-}
-
-static int specialize_payload_lookup(const struct lttng_event_desc *event_desc,
- struct bytecode_runtime *runtime,
- struct load_op *insn,
- struct vstack_load *load)
-{
- const char *name;
- uint16_t offset;
- unsigned int i, nr_fields;
- bool found = false;
- uint32_t field_offset = 0;
- const struct lttng_event_field *field;
- int ret;
- struct filter_get_index_data gid;
- ssize_t data_offset;
-
- nr_fields = event_desc->nr_fields;
- offset = ((struct get_symbol *) insn->data)->offset;
- name = runtime->p.bc->bc.data + runtime->p.bc->bc.reloc_offset + offset;
- for (i = 0; i < nr_fields; i++) {
- field = &event_desc->fields[i];
- if (field->nofilter) {
- continue;
- }
- if (!strcmp(field->name, name)) {
- found = true;
- break;
- }
- /* compute field offset on stack */
- switch (field->type.atype) {
- case atype_integer:
- case atype_enum_nestable:
- field_offset += sizeof(int64_t);
- break;
- case atype_array_nestable:
- case atype_sequence_nestable:
- field_offset += sizeof(unsigned long);
- field_offset += sizeof(void *);
- break;
- case atype_string:
- field_offset += sizeof(void *);
- break;
- default:
- ret = -EINVAL;
- goto end;
- }
- }
- if (!found) {
- ret = -EINVAL;
- goto end;
- }
-
- ret = specialize_load_object(field, load, false);
- if (ret)
- goto end;
-
- /* Specialize each get_symbol into a get_index. */
- insn->op = FILTER_OP_GET_INDEX_U16;
- memset(&gid, 0, sizeof(gid));
- gid.offset = field_offset;
- gid.elem.type = load->object_type;
- gid.elem.rev_bo = load->rev_bo;
- gid.field = field;
- data_offset = bytecode_push_data(runtime, &gid,
- __alignof__(gid), sizeof(gid));
- if (data_offset < 0) {
- ret = -EINVAL;
- goto end;
- }
- ((struct get_index_u16 *) insn->data)->index = data_offset;
- ret = 0;
-end:
- return ret;
-}
-
-int lttng_filter_specialize_bytecode(const struct lttng_event_desc *event_desc,
- struct bytecode_runtime *bytecode)
-{
- void *pc, *next_pc, *start_pc;
- int ret = -EINVAL;
- struct vstack _stack;
- struct vstack *stack = &_stack;
- struct lttng_ctx *ctx = bytecode->p.ctx;
-
- vstack_init(stack);
-
- start_pc = &bytecode->code[0];
- for (pc = next_pc = start_pc; pc - start_pc < bytecode->len;
- pc = next_pc) {
- switch (*(filter_opcode_t *) pc) {
- case FILTER_OP_UNKNOWN:
- default:
- printk(KERN_WARNING "LTTng: filter: unknown bytecode op %u\n",
- (unsigned int) *(filter_opcode_t *) pc);
- ret = -EINVAL;
- goto end;
-
- case FILTER_OP_RETURN:
- case FILTER_OP_RETURN_S64:
- ret = 0;
- goto end;
-
- /* binary */
- case FILTER_OP_MUL:
- case FILTER_OP_DIV:
- case FILTER_OP_MOD:
- case FILTER_OP_PLUS:
- case FILTER_OP_MINUS:
- printk(KERN_WARNING "LTTng: filter: unsupported bytecode op %u\n",
- (unsigned int) *(filter_opcode_t *) pc);
- ret = -EINVAL;
- goto end;
-
- case FILTER_OP_EQ:
- {
- struct binary_op *insn = (struct binary_op *) pc;
-
- switch(vstack_ax(stack)->type) {
- default:
- printk(KERN_WARNING "LTTng: filter: unknown register type\n");
- ret = -EINVAL;
- goto end;
-
- case REG_STRING:
- if (vstack_bx(stack)->type == REG_STAR_GLOB_STRING)
- insn->op = FILTER_OP_EQ_STAR_GLOB_STRING;
- else
- insn->op = FILTER_OP_EQ_STRING;
- break;
- case REG_STAR_GLOB_STRING:
- insn->op = FILTER_OP_EQ_STAR_GLOB_STRING;
- break;
- case REG_S64:
- if (vstack_bx(stack)->type == REG_S64)
- insn->op = FILTER_OP_EQ_S64;
- else
- insn->op = FILTER_OP_EQ_DOUBLE_S64;
- break;
- case REG_DOUBLE:
- if (vstack_bx(stack)->type == REG_S64)
- insn->op = FILTER_OP_EQ_S64_DOUBLE;
- else
- insn->op = FILTER_OP_EQ_DOUBLE;
- break;
- }
- /* Pop 2, push 1 */
- if (vstack_pop(stack)) {
- ret = -EINVAL;
- goto end;
- }
- vstack_ax(stack)->type = REG_S64;
- next_pc += sizeof(struct binary_op);
- break;
- }
-
- case FILTER_OP_NE:
- {
- struct binary_op *insn = (struct binary_op *) pc;
-
- switch(vstack_ax(stack)->type) {
- default:
- printk(KERN_WARNING "LTTng: filter: unknown register type\n");
- ret = -EINVAL;
- goto end;
-
- case REG_STRING:
- if (vstack_bx(stack)->type == REG_STAR_GLOB_STRING)
- insn->op = FILTER_OP_NE_STAR_GLOB_STRING;
- else
- insn->op = FILTER_OP_NE_STRING;
- break;
- case REG_STAR_GLOB_STRING:
- insn->op = FILTER_OP_NE_STAR_GLOB_STRING;
- break;
- case REG_S64:
- if (vstack_bx(stack)->type == REG_S64)
- insn->op = FILTER_OP_NE_S64;
- else
- insn->op = FILTER_OP_NE_DOUBLE_S64;
- break;
- case REG_DOUBLE:
- if (vstack_bx(stack)->type == REG_S64)
- insn->op = FILTER_OP_NE_S64_DOUBLE;
- else
- insn->op = FILTER_OP_NE_DOUBLE;
- break;
- }
- /* Pop 2, push 1 */
- if (vstack_pop(stack)) {
- ret = -EINVAL;
- goto end;
- }
- vstack_ax(stack)->type = REG_S64;
- next_pc += sizeof(struct binary_op);
- break;
- }
-
- case FILTER_OP_GT:
- {
- struct binary_op *insn = (struct binary_op *) pc;
-
- switch(vstack_ax(stack)->type) {
- default:
- printk(KERN_WARNING "LTTng: filter: unknown register type\n");
- ret = -EINVAL;
- goto end;
-
- case REG_STAR_GLOB_STRING:
- printk(KERN_WARNING "LTTng: filter: invalid register type for '>' binary operator\n");
- ret = -EINVAL;
- goto end;
- case REG_STRING:
- insn->op = FILTER_OP_GT_STRING;
- break;
- case REG_S64:
- if (vstack_bx(stack)->type == REG_S64)
- insn->op = FILTER_OP_GT_S64;
- else
- insn->op = FILTER_OP_GT_DOUBLE_S64;
- break;
- case REG_DOUBLE:
- if (vstack_bx(stack)->type == REG_S64)
- insn->op = FILTER_OP_GT_S64_DOUBLE;
- else
- insn->op = FILTER_OP_GT_DOUBLE;
- break;
- }
- /* Pop 2, push 1 */
- if (vstack_pop(stack)) {
- ret = -EINVAL;
- goto end;
- }
- vstack_ax(stack)->type = REG_S64;
- next_pc += sizeof(struct binary_op);
- break;
- }
-
- case FILTER_OP_LT:
- {
- struct binary_op *insn = (struct binary_op *) pc;
-
- switch(vstack_ax(stack)->type) {
- default:
- printk(KERN_WARNING "LTTng: filter: unknown register type\n");
- ret = -EINVAL;
- goto end;
-
- case REG_STAR_GLOB_STRING:
- printk(KERN_WARNING "LTTng: filter: invalid register type for '<' binary operator\n");
- ret = -EINVAL;
- goto end;
- case REG_STRING:
- insn->op = FILTER_OP_LT_STRING;
- break;
- case REG_S64:
- if (vstack_bx(stack)->type == REG_S64)
- insn->op = FILTER_OP_LT_S64;
- else
- insn->op = FILTER_OP_LT_DOUBLE_S64;
- break;
- case REG_DOUBLE:
- if (vstack_bx(stack)->type == REG_S64)
- insn->op = FILTER_OP_LT_S64_DOUBLE;
- else
- insn->op = FILTER_OP_LT_DOUBLE;
- break;
- }
- /* Pop 2, push 1 */
- if (vstack_pop(stack)) {
- ret = -EINVAL;
- goto end;
- }
- vstack_ax(stack)->type = REG_S64;
- next_pc += sizeof(struct binary_op);
- break;
- }
-
- case FILTER_OP_GE:
- {
- struct binary_op *insn = (struct binary_op *) pc;
-
- switch(vstack_ax(stack)->type) {
- default:
- printk(KERN_WARNING "LTTng: filter: unknown register type\n");
- ret = -EINVAL;
- goto end;
-
- case REG_STAR_GLOB_STRING:
- printk(KERN_WARNING "LTTng: filter: invalid register type for '>=' binary operator\n");
- ret = -EINVAL;
- goto end;
- case REG_STRING:
- insn->op = FILTER_OP_GE_STRING;
- break;
- case REG_S64:
- if (vstack_bx(stack)->type == REG_S64)
- insn->op = FILTER_OP_GE_S64;
- else
- insn->op = FILTER_OP_GE_DOUBLE_S64;
- break;
- case REG_DOUBLE:
- if (vstack_bx(stack)->type == REG_S64)
- insn->op = FILTER_OP_GE_S64_DOUBLE;
- else
- insn->op = FILTER_OP_GE_DOUBLE;
- break;
- }
- /* Pop 2, push 1 */
- if (vstack_pop(stack)) {
- ret = -EINVAL;
- goto end;
- }
- vstack_ax(stack)->type = REG_S64;
- next_pc += sizeof(struct binary_op);
- break;
- }
- case FILTER_OP_LE:
- {
- struct binary_op *insn = (struct binary_op *) pc;
-
- switch(vstack_ax(stack)->type) {
- default:
- printk(KERN_WARNING "LTTng: filter: unknown register type\n");
- ret = -EINVAL;
- goto end;
-
- case REG_STAR_GLOB_STRING:
- printk(KERN_WARNING "LTTng: filter: invalid register type for '<=' binary operator\n");
- ret = -EINVAL;
- goto end;
- case REG_STRING:
- insn->op = FILTER_OP_LE_STRING;
- break;
- case REG_S64:
- if (vstack_bx(stack)->type == REG_S64)
- insn->op = FILTER_OP_LE_S64;
- else
- insn->op = FILTER_OP_LE_DOUBLE_S64;
- break;
- case REG_DOUBLE:
- if (vstack_bx(stack)->type == REG_S64)
- insn->op = FILTER_OP_LE_S64_DOUBLE;
- else
- insn->op = FILTER_OP_LE_DOUBLE;
- break;
- }
- vstack_ax(stack)->type = REG_S64;
- next_pc += sizeof(struct binary_op);
- break;
- }
-
- case FILTER_OP_EQ_STRING:
- case FILTER_OP_NE_STRING:
- case FILTER_OP_GT_STRING:
- case FILTER_OP_LT_STRING:
- case FILTER_OP_GE_STRING:
- case FILTER_OP_LE_STRING:
- case FILTER_OP_EQ_STAR_GLOB_STRING:
- case FILTER_OP_NE_STAR_GLOB_STRING:
- case FILTER_OP_EQ_S64:
- case FILTER_OP_NE_S64:
- case FILTER_OP_GT_S64:
- case FILTER_OP_LT_S64:
- case FILTER_OP_GE_S64:
- case FILTER_OP_LE_S64:
- case FILTER_OP_EQ_DOUBLE:
- case FILTER_OP_NE_DOUBLE:
- case FILTER_OP_GT_DOUBLE:
- case FILTER_OP_LT_DOUBLE:
- case FILTER_OP_GE_DOUBLE:
- case FILTER_OP_LE_DOUBLE:
- case FILTER_OP_EQ_DOUBLE_S64:
- case FILTER_OP_NE_DOUBLE_S64:
- case FILTER_OP_GT_DOUBLE_S64:
- case FILTER_OP_LT_DOUBLE_S64:
- case FILTER_OP_GE_DOUBLE_S64:
- case FILTER_OP_LE_DOUBLE_S64:
- case FILTER_OP_EQ_S64_DOUBLE:
- case FILTER_OP_NE_S64_DOUBLE:
- case FILTER_OP_GT_S64_DOUBLE:
- case FILTER_OP_LT_S64_DOUBLE:
- case FILTER_OP_GE_S64_DOUBLE:
- case FILTER_OP_LE_S64_DOUBLE:
- case FILTER_OP_BIT_RSHIFT:
- case FILTER_OP_BIT_LSHIFT:
- case FILTER_OP_BIT_AND:
- case FILTER_OP_BIT_OR:
- case FILTER_OP_BIT_XOR:
- {
- /* Pop 2, push 1 */
- if (vstack_pop(stack)) {
- ret = -EINVAL;
- goto end;
- }
- vstack_ax(stack)->type = REG_S64;
- next_pc += sizeof(struct binary_op);
- break;
- }
-
- /* unary */
- case FILTER_OP_UNARY_PLUS:
- {
- struct unary_op *insn = (struct unary_op *) pc;
-
- switch(vstack_ax(stack)->type) {
- default:
- printk(KERN_WARNING "LTTng: filter: unknown register type\n");
- ret = -EINVAL;
- goto end;
-
- case REG_S64:
- insn->op = FILTER_OP_UNARY_PLUS_S64;
- break;
- case REG_DOUBLE:
- insn->op = FILTER_OP_UNARY_PLUS_DOUBLE;
- break;
- }
- /* Pop 1, push 1 */
- next_pc += sizeof(struct unary_op);
- break;
- }
-
- case FILTER_OP_UNARY_MINUS:
- {
- struct unary_op *insn = (struct unary_op *) pc;
-
- switch(vstack_ax(stack)->type) {
- default:
- printk(KERN_WARNING "LTTng: filter: unknown register type\n");
- ret = -EINVAL;
- goto end;
-
- case REG_S64:
- insn->op = FILTER_OP_UNARY_MINUS_S64;
- break;
- case REG_DOUBLE:
- insn->op = FILTER_OP_UNARY_MINUS_DOUBLE;
- break;
- }
- /* Pop 1, push 1 */
- next_pc += sizeof(struct unary_op);
- break;
- }
-
- case FILTER_OP_UNARY_NOT:
- {
- struct unary_op *insn = (struct unary_op *) pc;
-
- switch(vstack_ax(stack)->type) {
- default:
- printk(KERN_WARNING "LTTng: filter: unknown register type\n");
- ret = -EINVAL;
- goto end;
-
- case REG_S64:
- insn->op = FILTER_OP_UNARY_NOT_S64;
- break;
- case REG_DOUBLE:
- insn->op = FILTER_OP_UNARY_NOT_DOUBLE;
- break;
- }
- /* Pop 1, push 1 */
- next_pc += sizeof(struct unary_op);
- break;
- }
-
- case FILTER_OP_UNARY_BIT_NOT:
- {
- /* Pop 1, push 1 */
- next_pc += sizeof(struct unary_op);
- break;
- }
-
- case FILTER_OP_UNARY_PLUS_S64:
- case FILTER_OP_UNARY_MINUS_S64:
- case FILTER_OP_UNARY_NOT_S64:
- case FILTER_OP_UNARY_PLUS_DOUBLE:
- case FILTER_OP_UNARY_MINUS_DOUBLE:
- case FILTER_OP_UNARY_NOT_DOUBLE:
- {
- /* Pop 1, push 1 */
- next_pc += sizeof(struct unary_op);
- break;
- }
-
- /* logical */
- case FILTER_OP_AND:
- case FILTER_OP_OR:
- {
- /* Continue to next instruction */
- /* Pop 1 when jump not taken */
- if (vstack_pop(stack)) {
- ret = -EINVAL;
- goto end;
- }
- next_pc += sizeof(struct logical_op);
- break;
- }
-
- /* load field ref */
- case FILTER_OP_LOAD_FIELD_REF:
- {
- printk(KERN_WARNING "LTTng: filter: Unknown field ref type\n");
- ret = -EINVAL;
- goto end;
- }
- /* get context ref */
- case FILTER_OP_GET_CONTEXT_REF:
- {
- printk(KERN_WARNING "LTTng: filter: Unknown get context ref type\n");
- ret = -EINVAL;
- goto end;
- }
- case FILTER_OP_LOAD_FIELD_REF_STRING:
- case FILTER_OP_LOAD_FIELD_REF_SEQUENCE:
- case FILTER_OP_GET_CONTEXT_REF_STRING:
- case FILTER_OP_LOAD_FIELD_REF_USER_STRING:
- case FILTER_OP_LOAD_FIELD_REF_USER_SEQUENCE:
- {
- if (vstack_push(stack)) {
- ret = -EINVAL;
- goto end;
- }
- vstack_ax(stack)->type = REG_STRING;
- next_pc += sizeof(struct load_op) + sizeof(struct field_ref);
- break;
- }
- case FILTER_OP_LOAD_FIELD_REF_S64:
- case FILTER_OP_GET_CONTEXT_REF_S64:
- {
- if (vstack_push(stack)) {
- ret = -EINVAL;
- goto end;
- }
- vstack_ax(stack)->type = REG_S64;
- next_pc += sizeof(struct load_op) + sizeof(struct field_ref);
- break;
- }
- case FILTER_OP_LOAD_FIELD_REF_DOUBLE:
- case FILTER_OP_GET_CONTEXT_REF_DOUBLE:
- {
- if (vstack_push(stack)) {
- ret = -EINVAL;
- goto end;
- }
- vstack_ax(stack)->type = REG_DOUBLE;
- next_pc += sizeof(struct load_op) + sizeof(struct field_ref);
- break;
- }
-
- /* load from immediate operand */
- case FILTER_OP_LOAD_STRING:
- {
- struct load_op *insn = (struct load_op *) pc;
-
- if (vstack_push(stack)) {
- ret = -EINVAL;
- goto end;
- }
- vstack_ax(stack)->type = REG_STRING;
- next_pc += sizeof(struct load_op) + strlen(insn->data) + 1;
- break;
- }
-
- case FILTER_OP_LOAD_STAR_GLOB_STRING:
- {
- struct load_op *insn = (struct load_op *) pc;
-
- if (vstack_push(stack)) {
- ret = -EINVAL;
- goto end;
- }
- vstack_ax(stack)->type = REG_STAR_GLOB_STRING;
- next_pc += sizeof(struct load_op) + strlen(insn->data) + 1;
- break;
- }
-
- case FILTER_OP_LOAD_S64:
- {
- if (vstack_push(stack)) {
- ret = -EINVAL;
- goto end;
- }
- vstack_ax(stack)->type = REG_S64;
- next_pc += sizeof(struct load_op)
- + sizeof(struct literal_numeric);
- break;
- }
-
- case FILTER_OP_LOAD_DOUBLE:
- {
- if (vstack_push(stack)) {
- ret = -EINVAL;
- goto end;
- }
- vstack_ax(stack)->type = REG_DOUBLE;
- next_pc += sizeof(struct load_op)
- + sizeof(struct literal_double);
- break;
- }
-
- /* cast */
- case FILTER_OP_CAST_TO_S64:
- {
- struct cast_op *insn = (struct cast_op *) pc;
-
- switch (vstack_ax(stack)->type) {
- default:
- printk(KERN_WARNING "LTTng: filter: unknown register type\n");
- ret = -EINVAL;
- goto end;
-
- case REG_STRING:
- case REG_STAR_GLOB_STRING:
- printk(KERN_WARNING "LTTng: filter: Cast op can only be applied to numeric or floating point registers\n");
- ret = -EINVAL;
- goto end;
- case REG_S64:
- insn->op = FILTER_OP_CAST_NOP;
- break;
- case REG_DOUBLE:
- insn->op = FILTER_OP_CAST_DOUBLE_TO_S64;
- break;
- }
- /* Pop 1, push 1 */
- vstack_ax(stack)->type = REG_S64;
- next_pc += sizeof(struct cast_op);
- break;
- }
- case FILTER_OP_CAST_DOUBLE_TO_S64:
- {
- /* Pop 1, push 1 */
- vstack_ax(stack)->type = REG_S64;
- next_pc += sizeof(struct cast_op);
- break;
- }
- case FILTER_OP_CAST_NOP:
- {
- next_pc += sizeof(struct cast_op);
- break;
- }
-
- /*
- * Instructions for recursive traversal through composed types.
- */
- case FILTER_OP_GET_CONTEXT_ROOT:
- {
- if (vstack_push(stack)) {
- ret = -EINVAL;
- goto end;
- }
- vstack_ax(stack)->type = REG_PTR;
- vstack_ax(stack)->load.type = LOAD_ROOT_CONTEXT;
- next_pc += sizeof(struct load_op);
- break;
- }
- case FILTER_OP_GET_APP_CONTEXT_ROOT:
- {
- if (vstack_push(stack)) {
- ret = -EINVAL;
- goto end;
- }
- vstack_ax(stack)->type = REG_PTR;
- vstack_ax(stack)->load.type = LOAD_ROOT_APP_CONTEXT;
- next_pc += sizeof(struct load_op);
- break;
- }
- case FILTER_OP_GET_PAYLOAD_ROOT:
- {
- if (vstack_push(stack)) {
- ret = -EINVAL;
- goto end;
- }
- vstack_ax(stack)->type = REG_PTR;
- vstack_ax(stack)->load.type = LOAD_ROOT_PAYLOAD;
- next_pc += sizeof(struct load_op);
- break;
- }
-
- case FILTER_OP_LOAD_FIELD:
- {
- struct load_op *insn = (struct load_op *) pc;
-
- WARN_ON_ONCE(vstack_ax(stack)->type != REG_PTR);
- /* Pop 1, push 1 */
- ret = specialize_load_field(vstack_ax(stack), insn);
- if (ret)
- goto end;
-
- next_pc += sizeof(struct load_op);
- break;
- }
-
- case FILTER_OP_LOAD_FIELD_S8:
- case FILTER_OP_LOAD_FIELD_S16:
- case FILTER_OP_LOAD_FIELD_S32:
- case FILTER_OP_LOAD_FIELD_S64:
- case FILTER_OP_LOAD_FIELD_U8:
- case FILTER_OP_LOAD_FIELD_U16:
- case FILTER_OP_LOAD_FIELD_U32:
- case FILTER_OP_LOAD_FIELD_U64:
- {
- /* Pop 1, push 1 */
- vstack_ax(stack)->type = REG_S64;
- next_pc += sizeof(struct load_op);
- break;
- }
-
- case FILTER_OP_LOAD_FIELD_STRING:
- case FILTER_OP_LOAD_FIELD_SEQUENCE:
- {
- /* Pop 1, push 1 */
- vstack_ax(stack)->type = REG_STRING;
- next_pc += sizeof(struct load_op);
- break;
- }
-
- case FILTER_OP_LOAD_FIELD_DOUBLE:
- {
- /* Pop 1, push 1 */
- vstack_ax(stack)->type = REG_DOUBLE;
- next_pc += sizeof(struct load_op);
- break;
- }
-
- case FILTER_OP_GET_SYMBOL:
- {
- struct load_op *insn = (struct load_op *) pc;
-
- dbg_printk("op get symbol\n");
- switch (vstack_ax(stack)->load.type) {
- case LOAD_OBJECT:
- printk(KERN_WARNING "LTTng: filter: Nested fields not implemented yet.\n");
- ret = -EINVAL;
- goto end;
- case LOAD_ROOT_CONTEXT:
- /* Lookup context field. */
- ret = specialize_context_lookup(ctx, bytecode, insn,
- &vstack_ax(stack)->load);
- if (ret)
- goto end;
- break;
- case LOAD_ROOT_APP_CONTEXT:
- ret = -EINVAL;
- goto end;
- case LOAD_ROOT_PAYLOAD:
- /* Lookup event payload field. */
- ret = specialize_payload_lookup(event_desc,
- bytecode, insn,
- &vstack_ax(stack)->load);
- if (ret)
- goto end;
- break;
- }
- next_pc += sizeof(struct load_op) + sizeof(struct get_symbol);
- break;
- }
-
- case FILTER_OP_GET_SYMBOL_FIELD:
- {
- /* Always generated by specialize phase. */
- ret = -EINVAL;
- goto end;
- }
-
- case FILTER_OP_GET_INDEX_U16:
- {
- struct load_op *insn = (struct load_op *) pc;
- struct get_index_u16 *index = (struct get_index_u16 *) insn->data;
-
- dbg_printk("op get index u16\n");
- /* Pop 1, push 1 */
- ret = specialize_get_index(bytecode, insn, index->index,
- vstack_ax(stack), sizeof(*index));
- if (ret)
- goto end;
- next_pc += sizeof(struct load_op) + sizeof(struct get_index_u16);
- break;
- }
-
- case FILTER_OP_GET_INDEX_U64:
- {
- struct load_op *insn = (struct load_op *) pc;
- struct get_index_u64 *index = (struct get_index_u64 *) insn->data;
-
- dbg_printk("op get index u64\n");
- /* Pop 1, push 1 */
- ret = specialize_get_index(bytecode, insn, index->index,
- vstack_ax(stack), sizeof(*index));
- if (ret)
- goto end;
- next_pc += sizeof(struct load_op) + sizeof(struct get_index_u64);
- break;
- }
-
- }
- }
-end:
- return ret;
-}
+++ /dev/null
-/* SPDX-License-Identifier: MIT
- *
- * lttng-filter-validator.c
- *
- * LTTng modules filter bytecode validator.
- *
- * Copyright (C) 2010-2016 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
- */
-
-#include <linux/types.h>
-#include <linux/jhash.h>
-#include <linux/slab.h>
-
-#include <wrapper/list.h>
-#include <lttng/filter.h>
-
-#define MERGE_POINT_TABLE_BITS 7
-#define MERGE_POINT_TABLE_SIZE (1U << MERGE_POINT_TABLE_BITS)
-
-/* merge point table node */
-struct mp_node {
- struct hlist_node node;
-
- /* Context at merge point */
- struct vstack stack;
- unsigned long target_pc;
-};
-
-struct mp_table {
- struct hlist_head mp_head[MERGE_POINT_TABLE_SIZE];
-};
-
-static
-int lttng_hash_match(struct mp_node *mp_node, unsigned long key_pc)
-{
- if (mp_node->target_pc == key_pc)
- return 1;
- else
- return 0;
-}
-
-static
-int merge_points_compare(const struct vstack *stacka,
- const struct vstack *stackb)
-{
- int i, len;
-
- if (stacka->top != stackb->top)
- return 1;
- len = stacka->top + 1;
- WARN_ON_ONCE(len < 0);
- for (i = 0; i < len; i++) {
- if (stacka->e[i].type != stackb->e[i].type)
- return 1;
- }
- return 0;
-}
-
-static
-int merge_point_add_check(struct mp_table *mp_table, unsigned long target_pc,
- const struct vstack *stack)
-{
- struct mp_node *mp_node;
- unsigned long hash = jhash_1word(target_pc, 0);
- struct hlist_head *head;
- struct mp_node *lookup_node;
- int found = 0;
-
- dbg_printk("Filter: adding merge point at offset %lu, hash %lu\n",
- target_pc, hash);
- mp_node = kzalloc(sizeof(struct mp_node), GFP_KERNEL);
- if (!mp_node)
- return -ENOMEM;
- mp_node->target_pc = target_pc;
- memcpy(&mp_node->stack, stack, sizeof(mp_node->stack));
-
- head = &mp_table->mp_head[hash & (MERGE_POINT_TABLE_SIZE - 1)];
- lttng_hlist_for_each_entry(lookup_node, head, node) {
- if (lttng_hash_match(lookup_node, target_pc)) {
- found = 1;
- break;
- }
- }
- if (found) {
- /* Key already present */
- dbg_printk("Filter: compare merge points for offset %lu, hash %lu\n",
- target_pc, hash);
- kfree(mp_node);
- if (merge_points_compare(stack, &lookup_node->stack)) {
- printk(KERN_WARNING "LTTng: filter: Merge points differ for offset %lu\n",
- target_pc);
- return -EINVAL;
- }
- } else {
- hlist_add_head(&mp_node->node, head);
- }
- return 0;
-}
-
-/*
- * Binary comparators use top of stack and top of stack -1.
- */
-static
-int bin_op_compare_check(struct vstack *stack, const filter_opcode_t opcode,
- const char *str)
-{
- if (unlikely(!vstack_ax(stack) || !vstack_bx(stack)))
- goto error_empty;
-
- switch (vstack_ax(stack)->type) {
- default:
- case REG_DOUBLE:
- goto error_type;
-
- case REG_STRING:
- switch (vstack_bx(stack)->type) {
- default:
- case REG_DOUBLE:
- goto error_type;
- case REG_TYPE_UNKNOWN:
- goto unknown;
- case REG_STRING:
- break;
- case REG_STAR_GLOB_STRING:
- if (opcode != FILTER_OP_EQ && opcode != FILTER_OP_NE) {
- goto error_mismatch;
- }
- break;
- case REG_S64:
- case REG_U64:
- goto error_mismatch;
- }
- break;
- case REG_STAR_GLOB_STRING:
- switch (vstack_bx(stack)->type) {
- default:
- case REG_DOUBLE:
- goto error_type;
- case REG_TYPE_UNKNOWN:
- goto unknown;
- case REG_STRING:
- if (opcode != FILTER_OP_EQ && opcode != FILTER_OP_NE) {
- goto error_mismatch;
- }
- break;
- case REG_STAR_GLOB_STRING:
- case REG_S64:
- case REG_U64:
- goto error_mismatch;
- }
- break;
- case REG_S64:
- case REG_U64:
- switch (vstack_bx(stack)->type) {
- default:
- case REG_DOUBLE:
- goto error_type;
- case REG_TYPE_UNKNOWN:
- goto unknown;
- case REG_STRING:
- case REG_STAR_GLOB_STRING:
- goto error_mismatch;
- case REG_S64:
- case REG_U64:
- break;
- }
- break;
- case REG_TYPE_UNKNOWN:
- switch (vstack_bx(stack)->type) {
- default:
- case REG_DOUBLE:
- goto error_type;
- case REG_TYPE_UNKNOWN:
- case REG_STRING:
- case REG_STAR_GLOB_STRING:
- case REG_S64:
- case REG_U64:
- goto unknown;
- }
- break;
- }
- return 0;
-
-unknown:
- return 1;
-
-error_empty:
- printk(KERN_WARNING "LTTng: filter: empty stack for '%s' binary operator\n", str);
- return -EINVAL;
-
-error_mismatch:
- printk(KERN_WARNING "LTTng: filter: type mismatch for '%s' binary operator\n", str);
- return -EINVAL;
-
-error_type:
- printk(KERN_WARNING "LTTng: filter: unknown type for '%s' binary operator\n", str);
- return -EINVAL;
-}
-
-/*
- * Binary bitwise operators use top of stack and top of stack -1.
- * Return 0 if typing is known to match, 1 if typing is dynamic
- * (unknown), negative error value on error.
- */
-static
-int bin_op_bitwise_check(struct vstack *stack, filter_opcode_t opcode,
- const char *str)
-{
- if (unlikely(!vstack_ax(stack) || !vstack_bx(stack)))
- goto error_empty;
-
- switch (vstack_ax(stack)->type) {
- default:
- case REG_DOUBLE:
- goto error_type;
-
- case REG_TYPE_UNKNOWN:
- switch (vstack_bx(stack)->type) {
- default:
- case REG_DOUBLE:
- goto error_type;
- case REG_TYPE_UNKNOWN:
- case REG_S64:
- case REG_U64:
- goto unknown;
- }
- break;
- case REG_S64:
- case REG_U64:
- switch (vstack_bx(stack)->type) {
- default:
- case REG_DOUBLE:
- goto error_type;
- case REG_TYPE_UNKNOWN:
- goto unknown;
- case REG_S64:
- case REG_U64:
- break;
- }
- break;
- }
- return 0;
-
-unknown:
- return 1;
-
-error_empty:
- printk(KERN_WARNING "LTTng: filter: empty stack for '%s' binary operator\n", str);
- return -EINVAL;
-
-error_type:
- printk(KERN_WARNING "LTTng: filter: unknown type for '%s' binary operator\n", str);
- return -EINVAL;
-}
-
-static
-int validate_get_symbol(struct bytecode_runtime *bytecode,
- const struct get_symbol *sym)
-{
- const char *str, *str_limit;
- size_t len_limit;
-
- if (sym->offset >= bytecode->p.bc->bc.len - bytecode->p.bc->bc.reloc_offset)
- return -EINVAL;
-
- str = bytecode->p.bc->bc.data + bytecode->p.bc->bc.reloc_offset + sym->offset;
- str_limit = bytecode->p.bc->bc.data + bytecode->p.bc->bc.len;
- len_limit = str_limit - str;
- if (strnlen(str, len_limit) == len_limit)
- return -EINVAL;
- return 0;
-}
-
-/*
- * Validate bytecode range overflow within the validation pass.
- * Called for each instruction encountered.
- */
-static
-int bytecode_validate_overflow(struct bytecode_runtime *bytecode,
- char *start_pc, char *pc)
-{
- int ret = 0;
-
- switch (*(filter_opcode_t *) pc) {
- case FILTER_OP_UNKNOWN:
- default:
- {
- printk(KERN_WARNING "LTTng: filter: unknown bytecode op %u\n",
- (unsigned int) *(filter_opcode_t *) pc);
- ret = -EINVAL;
- break;
- }
-
- case FILTER_OP_RETURN:
- case FILTER_OP_RETURN_S64:
- {
- if (unlikely(pc + sizeof(struct return_op)
- > start_pc + bytecode->len)) {
- ret = -ERANGE;
- }
- break;
- }
-
- /* binary */
- case FILTER_OP_MUL:
- case FILTER_OP_DIV:
- case FILTER_OP_MOD:
- case FILTER_OP_PLUS:
- case FILTER_OP_MINUS:
- case FILTER_OP_EQ_DOUBLE:
- case FILTER_OP_NE_DOUBLE:
- case FILTER_OP_GT_DOUBLE:
- case FILTER_OP_LT_DOUBLE:
- case FILTER_OP_GE_DOUBLE:
- case FILTER_OP_LE_DOUBLE:
- /* Floating point */
- case FILTER_OP_EQ_DOUBLE_S64:
- case FILTER_OP_NE_DOUBLE_S64:
- case FILTER_OP_GT_DOUBLE_S64:
- case FILTER_OP_LT_DOUBLE_S64:
- case FILTER_OP_GE_DOUBLE_S64:
- case FILTER_OP_LE_DOUBLE_S64:
- case FILTER_OP_EQ_S64_DOUBLE:
- case FILTER_OP_NE_S64_DOUBLE:
- case FILTER_OP_GT_S64_DOUBLE:
- case FILTER_OP_LT_S64_DOUBLE:
- case FILTER_OP_GE_S64_DOUBLE:
- case FILTER_OP_LE_S64_DOUBLE:
- case FILTER_OP_LOAD_FIELD_REF_DOUBLE:
- case FILTER_OP_GET_CONTEXT_REF_DOUBLE:
- case FILTER_OP_LOAD_DOUBLE:
- case FILTER_OP_CAST_DOUBLE_TO_S64:
- case FILTER_OP_UNARY_PLUS_DOUBLE:
- case FILTER_OP_UNARY_MINUS_DOUBLE:
- case FILTER_OP_UNARY_NOT_DOUBLE:
- {
- printk(KERN_WARNING "LTTng: filter: unsupported bytecode op %u\n",
- (unsigned int) *(filter_opcode_t *) pc);
- ret = -EINVAL;
- break;
- }
-
- case FILTER_OP_EQ:
- case FILTER_OP_NE:
- case FILTER_OP_GT:
- case FILTER_OP_LT:
- case FILTER_OP_GE:
- case FILTER_OP_LE:
- case FILTER_OP_EQ_STRING:
- case FILTER_OP_NE_STRING:
- case FILTER_OP_GT_STRING:
- case FILTER_OP_LT_STRING:
- case FILTER_OP_GE_STRING:
- case FILTER_OP_LE_STRING:
- case FILTER_OP_EQ_STAR_GLOB_STRING:
- case FILTER_OP_NE_STAR_GLOB_STRING:
- case FILTER_OP_EQ_S64:
- case FILTER_OP_NE_S64:
- case FILTER_OP_GT_S64:
- case FILTER_OP_LT_S64:
- case FILTER_OP_GE_S64:
- case FILTER_OP_LE_S64:
- case FILTER_OP_BIT_RSHIFT:
- case FILTER_OP_BIT_LSHIFT:
- case FILTER_OP_BIT_AND:
- case FILTER_OP_BIT_OR:
- case FILTER_OP_BIT_XOR:
- {
- if (unlikely(pc + sizeof(struct binary_op)
- > start_pc + bytecode->len)) {
- ret = -ERANGE;
- }
- break;
- }
-
- /* unary */
- case FILTER_OP_UNARY_PLUS:
- case FILTER_OP_UNARY_MINUS:
- case FILTER_OP_UNARY_NOT:
- case FILTER_OP_UNARY_PLUS_S64:
- case FILTER_OP_UNARY_MINUS_S64:
- case FILTER_OP_UNARY_NOT_S64:
- case FILTER_OP_UNARY_BIT_NOT:
- {
- if (unlikely(pc + sizeof(struct unary_op)
- > start_pc + bytecode->len)) {
- ret = -ERANGE;
- }
- break;
- }
-
- /* logical */
- case FILTER_OP_AND:
- case FILTER_OP_OR:
- {
- if (unlikely(pc + sizeof(struct logical_op)
- > start_pc + bytecode->len)) {
- ret = -ERANGE;
- }
- break;
- }
-
- /* load field ref */
- case FILTER_OP_LOAD_FIELD_REF:
- {
- printk(KERN_WARNING "LTTng: filter: Unknown field ref type\n");
- ret = -EINVAL;
- break;
- }
-
- /* get context ref */
- case FILTER_OP_GET_CONTEXT_REF:
- {
- printk(KERN_WARNING "LTTng: filter: Unknown field ref type\n");
- ret = -EINVAL;
- break;
- }
- case FILTER_OP_LOAD_FIELD_REF_STRING:
- case FILTER_OP_LOAD_FIELD_REF_SEQUENCE:
- case FILTER_OP_LOAD_FIELD_REF_USER_STRING:
- case FILTER_OP_LOAD_FIELD_REF_USER_SEQUENCE:
- case FILTER_OP_LOAD_FIELD_REF_S64:
- case FILTER_OP_GET_CONTEXT_REF_STRING:
- case FILTER_OP_GET_CONTEXT_REF_S64:
- {
- if (unlikely(pc + sizeof(struct load_op) + sizeof(struct field_ref)
- > start_pc + bytecode->len)) {
- ret = -ERANGE;
- }
- break;
- }
-
- /* load from immediate operand */
- case FILTER_OP_LOAD_STRING:
- case FILTER_OP_LOAD_STAR_GLOB_STRING:
- {
- struct load_op *insn = (struct load_op *) pc;
- uint32_t str_len, maxlen;
-
- if (unlikely(pc + sizeof(struct load_op)
- > start_pc + bytecode->len)) {
- ret = -ERANGE;
- break;
- }
-
- maxlen = start_pc + bytecode->len - pc - sizeof(struct load_op);
- str_len = strnlen(insn->data, maxlen);
- if (unlikely(str_len >= maxlen)) {
- /* Final '\0' not found within range */
- ret = -ERANGE;
- }
- break;
- }
-
- case FILTER_OP_LOAD_S64:
- {
- if (unlikely(pc + sizeof(struct load_op) + sizeof(struct literal_numeric)
- > start_pc + bytecode->len)) {
- ret = -ERANGE;
- }
- break;
- }
-
- case FILTER_OP_CAST_TO_S64:
- case FILTER_OP_CAST_NOP:
- {
- if (unlikely(pc + sizeof(struct cast_op)
- > start_pc + bytecode->len)) {
- ret = -ERANGE;
- }
- break;
- }
-
- /*
- * Instructions for recursive traversal through composed types.
- */
- case FILTER_OP_GET_CONTEXT_ROOT:
- case FILTER_OP_GET_APP_CONTEXT_ROOT:
- case FILTER_OP_GET_PAYLOAD_ROOT:
- case FILTER_OP_LOAD_FIELD:
- case FILTER_OP_LOAD_FIELD_S8:
- case FILTER_OP_LOAD_FIELD_S16:
- case FILTER_OP_LOAD_FIELD_S32:
- case FILTER_OP_LOAD_FIELD_S64:
- case FILTER_OP_LOAD_FIELD_U8:
- case FILTER_OP_LOAD_FIELD_U16:
- case FILTER_OP_LOAD_FIELD_U32:
- case FILTER_OP_LOAD_FIELD_U64:
- case FILTER_OP_LOAD_FIELD_STRING:
- case FILTER_OP_LOAD_FIELD_SEQUENCE:
- case FILTER_OP_LOAD_FIELD_DOUBLE:
- if (unlikely(pc + sizeof(struct load_op)
- > start_pc + bytecode->len)) {
- ret = -ERANGE;
- }
- break;
-
- case FILTER_OP_GET_SYMBOL:
- {
- struct load_op *insn = (struct load_op *) pc;
- struct get_symbol *sym = (struct get_symbol *) insn->data;
-
- if (unlikely(pc + sizeof(struct load_op) + sizeof(struct get_symbol)
- > start_pc + bytecode->len)) {
- ret = -ERANGE;
- break;
- }
- ret = validate_get_symbol(bytecode, sym);
- break;
- }
-
- case FILTER_OP_GET_SYMBOL_FIELD:
- printk(KERN_WARNING "LTTng: filter: Unexpected get symbol field\n");
- ret = -EINVAL;
- break;
-
- case FILTER_OP_GET_INDEX_U16:
- if (unlikely(pc + sizeof(struct load_op) + sizeof(struct get_index_u16)
- > start_pc + bytecode->len)) {
- ret = -ERANGE;
- }
- break;
-
- case FILTER_OP_GET_INDEX_U64:
- if (unlikely(pc + sizeof(struct load_op) + sizeof(struct get_index_u64)
- > start_pc + bytecode->len)) {
- ret = -ERANGE;
- }
- break;
- }
-
- return ret;
-}
-
-static
-unsigned long delete_all_nodes(struct mp_table *mp_table)
-{
- struct mp_node *mp_node;
- struct hlist_node *tmp;
- unsigned long nr_nodes = 0;
- int i;
-
- for (i = 0; i < MERGE_POINT_TABLE_SIZE; i++) {
- struct hlist_head *head;
-
- head = &mp_table->mp_head[i];
- lttng_hlist_for_each_entry_safe(mp_node, tmp, head, node) {
- kfree(mp_node);
- nr_nodes++;
- }
- }
- return nr_nodes;
-}
-
-/*
- * Return value:
- * >=0: success
- * <0: error
- */
-static
-int validate_instruction_context(struct bytecode_runtime *bytecode,
- struct vstack *stack,
- char *start_pc,
- char *pc)
-{
- int ret = 0;
- const filter_opcode_t opcode = *(filter_opcode_t *) pc;
-
- switch (opcode) {
- case FILTER_OP_UNKNOWN:
- default:
- {
- printk(KERN_WARNING "LTTng: filter: unknown bytecode op %u\n",
- (unsigned int) *(filter_opcode_t *) pc);
- ret = -EINVAL;
- goto end;
- }
-
- case FILTER_OP_RETURN:
- case FILTER_OP_RETURN_S64:
- {
- goto end;
- }
-
- /* binary */
- case FILTER_OP_MUL:
- case FILTER_OP_DIV:
- case FILTER_OP_MOD:
- case FILTER_OP_PLUS:
- case FILTER_OP_MINUS:
- /* Floating point */
- case FILTER_OP_EQ_DOUBLE:
- case FILTER_OP_NE_DOUBLE:
- case FILTER_OP_GT_DOUBLE:
- case FILTER_OP_LT_DOUBLE:
- case FILTER_OP_GE_DOUBLE:
- case FILTER_OP_LE_DOUBLE:
- case FILTER_OP_EQ_DOUBLE_S64:
- case FILTER_OP_NE_DOUBLE_S64:
- case FILTER_OP_GT_DOUBLE_S64:
- case FILTER_OP_LT_DOUBLE_S64:
- case FILTER_OP_GE_DOUBLE_S64:
- case FILTER_OP_LE_DOUBLE_S64:
- case FILTER_OP_EQ_S64_DOUBLE:
- case FILTER_OP_NE_S64_DOUBLE:
- case FILTER_OP_GT_S64_DOUBLE:
- case FILTER_OP_LT_S64_DOUBLE:
- case FILTER_OP_GE_S64_DOUBLE:
- case FILTER_OP_LE_S64_DOUBLE:
- case FILTER_OP_UNARY_PLUS_DOUBLE:
- case FILTER_OP_UNARY_MINUS_DOUBLE:
- case FILTER_OP_UNARY_NOT_DOUBLE:
- case FILTER_OP_LOAD_FIELD_REF_DOUBLE:
- case FILTER_OP_LOAD_DOUBLE:
- case FILTER_OP_CAST_DOUBLE_TO_S64:
- case FILTER_OP_GET_CONTEXT_REF_DOUBLE:
- {
- printk(KERN_WARNING "LTTng: filter: unsupported bytecode op %u\n",
- (unsigned int) *(filter_opcode_t *) pc);
- ret = -EINVAL;
- goto end;
- }
-
- case FILTER_OP_EQ:
- {
- ret = bin_op_compare_check(stack, opcode, "==");
- if (ret < 0)
- goto end;
- break;
- }
- case FILTER_OP_NE:
- {
- ret = bin_op_compare_check(stack, opcode, "!=");
- if (ret < 0)
- goto end;
- break;
- }
- case FILTER_OP_GT:
- {
- ret = bin_op_compare_check(stack, opcode, ">");
- if (ret < 0)
- goto end;
- break;
- }
- case FILTER_OP_LT:
- {
- ret = bin_op_compare_check(stack, opcode, "<");
- if (ret < 0)
- goto end;
- break;
- }
- case FILTER_OP_GE:
- {
- ret = bin_op_compare_check(stack, opcode, ">=");
- if (ret < 0)
- goto end;
- break;
- }
- case FILTER_OP_LE:
- {
- ret = bin_op_compare_check(stack, opcode, "<=");
- if (ret < 0)
- goto end;
- break;
- }
-
- case FILTER_OP_EQ_STRING:
- case FILTER_OP_NE_STRING:
- case FILTER_OP_GT_STRING:
- case FILTER_OP_LT_STRING:
- case FILTER_OP_GE_STRING:
- case FILTER_OP_LE_STRING:
- {
- if (!vstack_ax(stack) || !vstack_bx(stack)) {
- printk(KERN_WARNING "LTTng: filter: Empty stack\n");
- ret = -EINVAL;
- goto end;
- }
- if (vstack_ax(stack)->type != REG_STRING
- || vstack_bx(stack)->type != REG_STRING) {
- printk(KERN_WARNING "LTTng: filter: Unexpected register type for string comparator\n");
- ret = -EINVAL;
- goto end;
- }
- break;
- }
-
-
- case FILTER_OP_EQ_STAR_GLOB_STRING:
- case FILTER_OP_NE_STAR_GLOB_STRING:
- {
- if (!vstack_ax(stack) || !vstack_bx(stack)) {
- printk(KERN_WARNING "LTTng: filter: Empty stack\n");
- ret = -EINVAL;
- goto end;
- }
- if (vstack_ax(stack)->type != REG_STAR_GLOB_STRING
- && vstack_bx(stack)->type != REG_STAR_GLOB_STRING) {
- printk(KERN_WARNING "LTTng: filter: Unexpected register type for globbing pattern comparator\n");
- ret = -EINVAL;
- goto end;
- }
- break;
- }
-
- case FILTER_OP_EQ_S64:
- case FILTER_OP_NE_S64:
- case FILTER_OP_GT_S64:
- case FILTER_OP_LT_S64:
- case FILTER_OP_GE_S64:
- case FILTER_OP_LE_S64:
- {
- if (!vstack_ax(stack) || !vstack_bx(stack)) {
- printk(KERN_WARNING "LTTng: filter: Empty stack\n");
- ret = -EINVAL;
- goto end;
- }
- switch (vstack_ax(stack)->type) {
- case REG_S64:
- case REG_U64:
- break;
- default:
- printk(KERN_WARNING "LTTng: filter: Unexpected register type for s64 comparator\n");
- ret = -EINVAL;
- goto end;
- }
- switch (vstack_bx(stack)->type) {
- case REG_S64:
- case REG_U64:
- break;
- default:
- printk(KERN_WARNING "LTTng: filter: Unexpected register type for s64 comparator\n");
- ret = -EINVAL;
- goto end;
- }
- break;
- }
-
- case FILTER_OP_BIT_RSHIFT:
- ret = bin_op_bitwise_check(stack, opcode, ">>");
- if (ret < 0)
- goto end;
- break;
- case FILTER_OP_BIT_LSHIFT:
- ret = bin_op_bitwise_check(stack, opcode, "<<");
- if (ret < 0)
- goto end;
- break;
- case FILTER_OP_BIT_AND:
- ret = bin_op_bitwise_check(stack, opcode, "&");
- if (ret < 0)
- goto end;
- break;
- case FILTER_OP_BIT_OR:
- ret = bin_op_bitwise_check(stack, opcode, "|");
- if (ret < 0)
- goto end;
- break;
- case FILTER_OP_BIT_XOR:
- ret = bin_op_bitwise_check(stack, opcode, "^");
- if (ret < 0)
- goto end;
- break;
-
- /* unary */
- case FILTER_OP_UNARY_PLUS:
- case FILTER_OP_UNARY_MINUS:
- case FILTER_OP_UNARY_NOT:
- {
- if (!vstack_ax(stack)) {
- printk(KERN_WARNING "LTTng: filter: Empty stack\n");
- ret = -EINVAL;
- goto end;
- }
- switch (vstack_ax(stack)->type) {
- default:
- case REG_DOUBLE:
- printk(KERN_WARNING "LTTng: filter: unknown register type\n");
- ret = -EINVAL;
- goto end;
-
- case REG_STRING:
- case REG_STAR_GLOB_STRING:
- printk(KERN_WARNING "LTTng: filter: Unary op can only be applied to numeric or floating point registers\n");
- ret = -EINVAL;
- goto end;
- case REG_S64:
- case REG_U64:
- case REG_TYPE_UNKNOWN:
- break;
- }
- break;
- }
- case FILTER_OP_UNARY_BIT_NOT:
- {
- if (!vstack_ax(stack)) {
- printk(KERN_WARNING "LTTng: filter: Empty stack\n");
- ret = -EINVAL;
- goto end;
- }
- switch (vstack_ax(stack)->type) {
- default:
- printk(KERN_WARNING "LTTng: filter: unknown register type\n");
- ret = -EINVAL;
- goto end;
-
- case REG_STRING:
- case REG_STAR_GLOB_STRING:
- case REG_DOUBLE:
- printk(KERN_WARNING "LTTng: filter: Unary bitwise op can only be applied to numeric registers\n");
- ret = -EINVAL;
- goto end;
- case REG_S64:
- case REG_U64:
- case REG_TYPE_UNKNOWN:
- break;
- }
- break;
- }
-
- case FILTER_OP_UNARY_PLUS_S64:
- case FILTER_OP_UNARY_MINUS_S64:
- case FILTER_OP_UNARY_NOT_S64:
- {
- if (!vstack_ax(stack)) {
- printk(KERN_WARNING "LTTng: filter: Empty stack\n");
- ret = -EINVAL;
- goto end;
- }
- if (vstack_ax(stack)->type != REG_S64 &&
- vstack_ax(stack)->type != REG_U64) {
- printk(KERN_WARNING "LTTng: filter: Invalid register type\n");
- ret = -EINVAL;
- goto end;
- }
- break;
- }
-
- /* logical */
- case FILTER_OP_AND:
- case FILTER_OP_OR:
- {
- struct logical_op *insn = (struct logical_op *) pc;
-
- if (!vstack_ax(stack)) {
- printk(KERN_WARNING "LTTng: filter: Empty stack\n");
- ret = -EINVAL;
- goto end;
- }
- if (vstack_ax(stack)->type != REG_S64 &&
- vstack_ax(stack)->type != REG_U64) {
- printk(KERN_WARNING "LTTng: filter: Logical comparator expects S64 register\n");
- ret = -EINVAL;
- goto end;
- }
-
- dbg_printk("Validate jumping to bytecode offset %u\n",
- (unsigned int) insn->skip_offset);
- if (unlikely(start_pc + insn->skip_offset <= pc)) {
- printk(KERN_WARNING "LTTng: filter: Loops are not allowed in bytecode\n");
- ret = -EINVAL;
- goto end;
- }
- break;
- }
-
- /* load field ref */
- case FILTER_OP_LOAD_FIELD_REF:
- {
- printk(KERN_WARNING "LTTng: filter: Unknown field ref type\n");
- ret = -EINVAL;
- goto end;
- }
- case FILTER_OP_LOAD_FIELD_REF_STRING:
- case FILTER_OP_LOAD_FIELD_REF_SEQUENCE:
- case FILTER_OP_LOAD_FIELD_REF_USER_STRING:
- case FILTER_OP_LOAD_FIELD_REF_USER_SEQUENCE:
- {
- struct load_op *insn = (struct load_op *) pc;
- struct field_ref *ref = (struct field_ref *) insn->data;
-
- dbg_printk("Validate load field ref offset %u type string\n",
- ref->offset);
- break;
- }
- case FILTER_OP_LOAD_FIELD_REF_S64:
- {
- struct load_op *insn = (struct load_op *) pc;
- struct field_ref *ref = (struct field_ref *) insn->data;
-
- dbg_printk("Validate load field ref offset %u type s64\n",
- ref->offset);
- break;
- }
-
- /* load from immediate operand */
- case FILTER_OP_LOAD_STRING:
- case FILTER_OP_LOAD_STAR_GLOB_STRING:
- {
- break;
- }
-
- case FILTER_OP_LOAD_S64:
- {
- break;
- }
-
- case FILTER_OP_CAST_TO_S64:
- {
- struct cast_op *insn = (struct cast_op *) pc;
-
- if (!vstack_ax(stack)) {
- printk(KERN_WARNING "LTTng: filter: Empty stack\n");
- ret = -EINVAL;
- goto end;
- }
- switch (vstack_ax(stack)->type) {
- default:
- case REG_DOUBLE:
- printk(KERN_WARNING "LTTng: filter: unknown register type\n");
- ret = -EINVAL;
- goto end;
-
- case REG_STRING:
- case REG_STAR_GLOB_STRING:
- printk(KERN_WARNING "LTTng: filter: Cast op can only be applied to numeric or floating point registers\n");
- ret = -EINVAL;
- goto end;
- case REG_S64:
- break;
- }
- if (insn->op == FILTER_OP_CAST_DOUBLE_TO_S64) {
- if (vstack_ax(stack)->type != REG_DOUBLE) {
- printk(KERN_WARNING "LTTng: filter: Cast expects double\n");
- ret = -EINVAL;
- goto end;
- }
- }
- break;
- }
- case FILTER_OP_CAST_NOP:
- {
- break;
- }
-
- /* get context ref */
- case FILTER_OP_GET_CONTEXT_REF:
- {
- printk(KERN_WARNING "LTTng: filter: Unknown get context ref type\n");
- ret = -EINVAL;
- goto end;
- }
- case FILTER_OP_GET_CONTEXT_REF_STRING:
- {
- struct load_op *insn = (struct load_op *) pc;
- struct field_ref *ref = (struct field_ref *) insn->data;
-
- dbg_printk("Validate get context ref offset %u type string\n",
- ref->offset);
- break;
- }
- case FILTER_OP_GET_CONTEXT_REF_S64:
- {
- struct load_op *insn = (struct load_op *) pc;
- struct field_ref *ref = (struct field_ref *) insn->data;
-
- dbg_printk("Validate get context ref offset %u type s64\n",
- ref->offset);
- break;
- }
-
- /*
- * Instructions for recursive traversal through composed types.
- */
- case FILTER_OP_GET_CONTEXT_ROOT:
- {
- dbg_printk("Validate get context root\n");
- break;
- }
- case FILTER_OP_GET_APP_CONTEXT_ROOT:
- {
- dbg_printk("Validate get app context root\n");
- break;
- }
- case FILTER_OP_GET_PAYLOAD_ROOT:
- {
- dbg_printk("Validate get payload root\n");
- break;
- }
- case FILTER_OP_LOAD_FIELD:
- {
- /*
- * We tolerate that field type is unknown at validation,
- * because we are performing the load specialization in
- * a phase after validation.
- */
- dbg_printk("Validate load field\n");
- break;
- }
- case FILTER_OP_LOAD_FIELD_S8:
- {
- dbg_printk("Validate load field s8\n");
- break;
- }
- case FILTER_OP_LOAD_FIELD_S16:
- {
- dbg_printk("Validate load field s16\n");
- break;
- }
- case FILTER_OP_LOAD_FIELD_S32:
- {
- dbg_printk("Validate load field s32\n");
- break;
- }
- case FILTER_OP_LOAD_FIELD_S64:
- {
- dbg_printk("Validate load field s64\n");
- break;
- }
- case FILTER_OP_LOAD_FIELD_U8:
- {
- dbg_printk("Validate load field u8\n");
- break;
- }
- case FILTER_OP_LOAD_FIELD_U16:
- {
- dbg_printk("Validate load field u16\n");
- break;
- }
- case FILTER_OP_LOAD_FIELD_U32:
- {
- dbg_printk("Validate load field u32\n");
- break;
- }
- case FILTER_OP_LOAD_FIELD_U64:
- {
- dbg_printk("Validate load field u64\n");
- break;
- }
- case FILTER_OP_LOAD_FIELD_STRING:
- {
- dbg_printk("Validate load field string\n");
- break;
- }
- case FILTER_OP_LOAD_FIELD_SEQUENCE:
- {
- dbg_printk("Validate load field sequence\n");
- break;
- }
- case FILTER_OP_LOAD_FIELD_DOUBLE:
- {
- dbg_printk("Validate load field double\n");
- break;
- }
-
- case FILTER_OP_GET_SYMBOL:
- {
- struct load_op *insn = (struct load_op *) pc;
- struct get_symbol *sym = (struct get_symbol *) insn->data;
-
- dbg_printk("Validate get symbol offset %u\n", sym->offset);
- break;
- }
-
- case FILTER_OP_GET_SYMBOL_FIELD:
- {
- struct load_op *insn = (struct load_op *) pc;
- struct get_symbol *sym = (struct get_symbol *) insn->data;
-
- dbg_printk("Validate get symbol field offset %u\n", sym->offset);
- break;
- }
-
- case FILTER_OP_GET_INDEX_U16:
- {
- struct load_op *insn = (struct load_op *) pc;
- struct get_index_u16 *get_index = (struct get_index_u16 *) insn->data;
-
- dbg_printk("Validate get index u16 index %u\n", get_index->index);
- break;
- }
-
- case FILTER_OP_GET_INDEX_U64:
- {
- struct load_op *insn = (struct load_op *) pc;
- struct get_index_u64 *get_index = (struct get_index_u64 *) insn->data;
-
- dbg_printk("Validate get index u64 index %llu\n",
- (unsigned long long) get_index->index);
- break;
- }
- }
-end:
- return ret;
-}
-
-/*
- * Return value:
- * 0: success
- * <0: error
- */
-static
-int validate_instruction_all_contexts(struct bytecode_runtime *bytecode,
- struct mp_table *mp_table,
- struct vstack *stack,
- char *start_pc,
- char *pc)
-{
- int ret, found = 0;
- unsigned long target_pc = pc - start_pc;
- unsigned long hash;
- struct hlist_head *head;
- struct mp_node *mp_node;
-
- /* Validate the context resulting from the previous instruction */
- ret = validate_instruction_context(bytecode, stack, start_pc, pc);
- if (ret < 0)
- return ret;
-
- /* Validate merge points */
- hash = jhash_1word(target_pc, 0);
- head = &mp_table->mp_head[hash & (MERGE_POINT_TABLE_SIZE - 1)];
- lttng_hlist_for_each_entry(mp_node, head, node) {
- if (lttng_hash_match(mp_node, target_pc)) {
- found = 1;
- break;
- }
- }
- if (found) {
- dbg_printk("Filter: validate merge point at offset %lu\n",
- target_pc);
- if (merge_points_compare(stack, &mp_node->stack)) {
- printk(KERN_WARNING "LTTng: filter: Merge points differ for offset %lu\n",
- target_pc);
- return -EINVAL;
- }
- /* Once validated, we can remove the merge point */
- dbg_printk("Filter: remove merge point at offset %lu\n",
- target_pc);
- hlist_del(&mp_node->node);
- }
- return 0;
-}
-
-/*
- * Return value:
- * >0: going to next insn.
- * 0: success, stop iteration.
- * <0: error
- */
-static
-int exec_insn(struct bytecode_runtime *bytecode,
- struct mp_table *mp_table,
- struct vstack *stack,
- char **_next_pc,
- char *pc)
-{
- int ret = 1;
- char *next_pc = *_next_pc;
-
- switch (*(filter_opcode_t *) pc) {
- case FILTER_OP_UNKNOWN:
- default:
- {
- printk(KERN_WARNING "LTTng: filter: unknown bytecode op %u\n",
- (unsigned int) *(filter_opcode_t *) pc);
- ret = -EINVAL;
- goto end;
- }
-
- case FILTER_OP_RETURN:
- {
- if (!vstack_ax(stack)) {
- printk(KERN_WARNING "LTTng: filter: Empty stack\n");
- ret = -EINVAL;
- goto end;
- }
- switch (vstack_ax(stack)->type) {
- case REG_S64:
- case REG_U64:
- case REG_DOUBLE:
- case REG_STRING:
- case REG_PTR:
- case REG_TYPE_UNKNOWN:
- break;
- default:
- printk(KERN_WARNING "LTTng: filter: Unexpected register type %d at end of bytecode\n",
- (int) vstack_ax(stack)->type);
- ret = -EINVAL;
- goto end;
- }
-
- ret = 0;
- goto end;
- }
-
- case FILTER_OP_RETURN_S64:
- {
- if (!vstack_ax(stack)) {
- printk(KERN_WARNING "LTTng: filter: Empty stack\n");
- ret = -EINVAL;
- goto end;
- }
- switch (vstack_ax(stack)->type) {
- case REG_S64:
- case REG_U64:
- break;
- default:
- case REG_TYPE_UNKNOWN:
- printk(KERN_WARNING "LTTng: filter: Unexpected register type %d at end of bytecode\n",
- (int) vstack_ax(stack)->type);
- ret = -EINVAL;
- goto end;
- }
-
- ret = 0;
- goto end;
- }
-
- /* binary */
- case FILTER_OP_MUL:
- case FILTER_OP_DIV:
- case FILTER_OP_MOD:
- case FILTER_OP_PLUS:
- case FILTER_OP_MINUS:
- /* Floating point */
- case FILTER_OP_EQ_DOUBLE:
- case FILTER_OP_NE_DOUBLE:
- case FILTER_OP_GT_DOUBLE:
- case FILTER_OP_LT_DOUBLE:
- case FILTER_OP_GE_DOUBLE:
- case FILTER_OP_LE_DOUBLE:
- case FILTER_OP_EQ_DOUBLE_S64:
- case FILTER_OP_NE_DOUBLE_S64:
- case FILTER_OP_GT_DOUBLE_S64:
- case FILTER_OP_LT_DOUBLE_S64:
- case FILTER_OP_GE_DOUBLE_S64:
- case FILTER_OP_LE_DOUBLE_S64:
- case FILTER_OP_EQ_S64_DOUBLE:
- case FILTER_OP_NE_S64_DOUBLE:
- case FILTER_OP_GT_S64_DOUBLE:
- case FILTER_OP_LT_S64_DOUBLE:
- case FILTER_OP_GE_S64_DOUBLE:
- case FILTER_OP_LE_S64_DOUBLE:
- case FILTER_OP_UNARY_PLUS_DOUBLE:
- case FILTER_OP_UNARY_MINUS_DOUBLE:
- case FILTER_OP_UNARY_NOT_DOUBLE:
- case FILTER_OP_LOAD_FIELD_REF_DOUBLE:
- case FILTER_OP_GET_CONTEXT_REF_DOUBLE:
- case FILTER_OP_LOAD_DOUBLE:
- case FILTER_OP_CAST_DOUBLE_TO_S64:
- {
- printk(KERN_WARNING "LTTng: filter: unsupported bytecode op %u\n",
- (unsigned int) *(filter_opcode_t *) pc);
- ret = -EINVAL;
- goto end;
- }
-
- case FILTER_OP_EQ:
- case FILTER_OP_NE:
- case FILTER_OP_GT:
- case FILTER_OP_LT:
- case FILTER_OP_GE:
- case FILTER_OP_LE:
- case FILTER_OP_EQ_STRING:
- case FILTER_OP_NE_STRING:
- case FILTER_OP_GT_STRING:
- case FILTER_OP_LT_STRING:
- case FILTER_OP_GE_STRING:
- case FILTER_OP_LE_STRING:
- case FILTER_OP_EQ_STAR_GLOB_STRING:
- case FILTER_OP_NE_STAR_GLOB_STRING:
- case FILTER_OP_EQ_S64:
- case FILTER_OP_NE_S64:
- case FILTER_OP_GT_S64:
- case FILTER_OP_LT_S64:
- case FILTER_OP_GE_S64:
- case FILTER_OP_LE_S64:
- {
- /* Pop 2, push 1 */
- if (vstack_pop(stack)) {
- ret = -EINVAL;
- goto end;
- }
- if (!vstack_ax(stack)) {
- printk(KERN_WARNING "Empty stack\n");
- ret = -EINVAL;
- goto end;
- }
- switch (vstack_ax(stack)->type) {
- case REG_S64:
- case REG_U64:
- case REG_DOUBLE:
- case REG_STRING:
- case REG_STAR_GLOB_STRING:
- case REG_TYPE_UNKNOWN:
- break;
- default:
- printk(KERN_WARNING "Unexpected register type %d for operation\n",
- (int) vstack_ax(stack)->type);
- ret = -EINVAL;
- goto end;
- }
-
- vstack_ax(stack)->type = REG_S64;
- next_pc += sizeof(struct binary_op);
- break;
- }
- case FILTER_OP_BIT_RSHIFT:
- case FILTER_OP_BIT_LSHIFT:
- case FILTER_OP_BIT_AND:
- case FILTER_OP_BIT_OR:
- case FILTER_OP_BIT_XOR:
- {
- /* Pop 2, push 1 */
- if (vstack_pop(stack)) {
- ret = -EINVAL;
- goto end;
- }
- if (!vstack_ax(stack)) {
- printk(KERN_WARNING "LTTng: filter: Empty stack\n");
- ret = -EINVAL;
- goto end;
- }
- switch (vstack_ax(stack)->type) {
- case REG_S64:
- case REG_U64:
- case REG_DOUBLE:
- case REG_STRING:
- case REG_STAR_GLOB_STRING:
- case REG_TYPE_UNKNOWN:
- break;
- default:
- printk(KERN_WARNING "LTTng: filter: Unexpected register type %d for operation\n",
- (int) vstack_ax(stack)->type);
- ret = -EINVAL;
- goto end;
- }
-
- vstack_ax(stack)->type = REG_U64;
- next_pc += sizeof(struct binary_op);
- break;
- }
-
- /* unary */
- case FILTER_OP_UNARY_PLUS:
- case FILTER_OP_UNARY_MINUS:
- {
- /* Pop 1, push 1 */
- if (!vstack_ax(stack)) {
- printk(KERN_WARNING "LTTng: filter: Empty stack\n\n");
- ret = -EINVAL;
- goto end;
- }
- switch (vstack_ax(stack)->type) {
- case REG_S64:
- case REG_U64:
- case REG_TYPE_UNKNOWN:
- break;
- default:
- printk(KERN_WARNING "LTTng: filter: Unexpected register type %d for operation\n",
- (int) vstack_ax(stack)->type);
- ret = -EINVAL;
- goto end;
- }
-
- vstack_ax(stack)->type = REG_TYPE_UNKNOWN;
- next_pc += sizeof(struct unary_op);
- break;
- }
-
- case FILTER_OP_UNARY_PLUS_S64:
- case FILTER_OP_UNARY_MINUS_S64:
- case FILTER_OP_UNARY_NOT_S64:
- {
- /* Pop 1, push 1 */
- if (!vstack_ax(stack)) {
- printk(KERN_WARNING "LTTng: filter: Empty stack\n\n");
- ret = -EINVAL;
- goto end;
- }
- switch (vstack_ax(stack)->type) {
- case REG_S64:
- case REG_U64:
- break;
- default:
- printk(KERN_WARNING "LTTng: filter: Unexpected register type %d for operation\n",
- (int) vstack_ax(stack)->type);
- ret = -EINVAL;
- goto end;
- }
-
- next_pc += sizeof(struct unary_op);
- break;
- }
-
- case FILTER_OP_UNARY_NOT:
- {
- /* Pop 1, push 1 */
- if (!vstack_ax(stack)) {
- printk(KERN_WARNING "LTTng: filter: Empty stack\n\n");
- ret = -EINVAL;
- goto end;
- }
- switch (vstack_ax(stack)->type) {
- case REG_S64:
- case REG_U64:
- case REG_TYPE_UNKNOWN:
- break;
- default:
- printk(KERN_WARNING "LTTng: filter: Unexpected register type %d for operation\n",
- (int) vstack_ax(stack)->type);
- ret = -EINVAL;
- goto end;
- }
-
- next_pc += sizeof(struct unary_op);
- break;
- }
-
- case FILTER_OP_UNARY_BIT_NOT:
- {
- /* Pop 1, push 1 */
- if (!vstack_ax(stack)) {
- printk(KERN_WARNING "LTTng: filter: Empty stack\n");
- ret = -EINVAL;
- goto end;
- }
- switch (vstack_ax(stack)->type) {
- case REG_S64:
- case REG_U64:
- case REG_TYPE_UNKNOWN:
- break;
- case REG_DOUBLE:
- default:
- printk(KERN_WARNING "LTTng: filter: Unexpected register type %d for operation\n",
- (int) vstack_ax(stack)->type);
- ret = -EINVAL;
- goto end;
- }
-
- vstack_ax(stack)->type = REG_U64;
- next_pc += sizeof(struct unary_op);
- break;
- }
-
- /* logical */
- case FILTER_OP_AND:
- case FILTER_OP_OR:
- {
- struct logical_op *insn = (struct logical_op *) pc;
- int merge_ret;
-
- /* Add merge point to table */
- merge_ret = merge_point_add_check(mp_table,
- insn->skip_offset, stack);
- if (merge_ret) {
- ret = merge_ret;
- goto end;
- }
-
- if (!vstack_ax(stack)) {
- printk(KERN_WARNING "LTTng: filter: Empty stack\n\n");
- ret = -EINVAL;
- goto end;
- }
- /* There is always a cast-to-s64 operation before a or/and op. */
- switch (vstack_ax(stack)->type) {
- case REG_S64:
- case REG_U64:
- break;
- default:
- printk(KERN_WARNING "LTTng: filter: Incorrect register type %d for operation\n",
- (int) vstack_ax(stack)->type);
- ret = -EINVAL;
- goto end;
- }
-
- /* Continue to next instruction */
- /* Pop 1 when jump not taken */
- if (vstack_pop(stack)) {
- ret = -EINVAL;
- goto end;
- }
- next_pc += sizeof(struct logical_op);
- break;
- }
-
- /* load field ref */
- case FILTER_OP_LOAD_FIELD_REF:
- {
- printk(KERN_WARNING "LTTng: filter: Unknown field ref type\n");
- ret = -EINVAL;
- goto end;
- }
- /* get context ref */
- case FILTER_OP_GET_CONTEXT_REF:
- {
- printk(KERN_WARNING "LTTng: filter: Unknown get context ref type\n");
- ret = -EINVAL;
- goto end;
- }
- case FILTER_OP_LOAD_FIELD_REF_STRING:
- case FILTER_OP_LOAD_FIELD_REF_SEQUENCE:
- case FILTER_OP_GET_CONTEXT_REF_STRING:
- case FILTER_OP_LOAD_FIELD_REF_USER_STRING:
- case FILTER_OP_LOAD_FIELD_REF_USER_SEQUENCE:
- {
- if (vstack_push(stack)) {
- ret = -EINVAL;
- goto end;
- }
- vstack_ax(stack)->type = REG_STRING;
- next_pc += sizeof(struct load_op) + sizeof(struct field_ref);
- break;
- }
- case FILTER_OP_LOAD_FIELD_REF_S64:
- case FILTER_OP_GET_CONTEXT_REF_S64:
- {
- if (vstack_push(stack)) {
- ret = -EINVAL;
- goto end;
- }
- vstack_ax(stack)->type = REG_S64;
- next_pc += sizeof(struct load_op) + sizeof(struct field_ref);
- break;
- }
-
- /* load from immediate operand */
- case FILTER_OP_LOAD_STRING:
- {
- struct load_op *insn = (struct load_op *) pc;
-
- if (vstack_push(stack)) {
- ret = -EINVAL;
- goto end;
- }
- vstack_ax(stack)->type = REG_STRING;
- next_pc += sizeof(struct load_op) + strlen(insn->data) + 1;
- break;
- }
-
- case FILTER_OP_LOAD_STAR_GLOB_STRING:
- {
- struct load_op *insn = (struct load_op *) pc;
-
- if (vstack_push(stack)) {
- ret = -EINVAL;
- goto end;
- }
- vstack_ax(stack)->type = REG_STAR_GLOB_STRING;
- next_pc += sizeof(struct load_op) + strlen(insn->data) + 1;
- break;
- }
-
- case FILTER_OP_LOAD_S64:
- {
- if (vstack_push(stack)) {
- ret = -EINVAL;
- goto end;
- }
- vstack_ax(stack)->type = REG_S64;
- next_pc += sizeof(struct load_op)
- + sizeof(struct literal_numeric);
- break;
- }
-
- case FILTER_OP_CAST_TO_S64:
- {
- /* Pop 1, push 1 */
- if (!vstack_ax(stack)) {
- printk(KERN_WARNING "LTTng: filter: Empty stack\n");
- ret = -EINVAL;
- goto end;
- }
- switch (vstack_ax(stack)->type) {
- case REG_S64:
- case REG_U64:
- case REG_DOUBLE:
- case REG_TYPE_UNKNOWN:
- break;
- default:
- printk(KERN_WARNING "LTTng: filter: Incorrect register type %d for cast\n",
- (int) vstack_ax(stack)->type);
- ret = -EINVAL;
- goto end;
- }
- vstack_ax(stack)->type = REG_S64;
- next_pc += sizeof(struct cast_op);
- break;
- }
- case FILTER_OP_CAST_NOP:
- {
- next_pc += sizeof(struct cast_op);
- break;
- }
-
- /*
- * Instructions for recursive traversal through composed types.
- */
- case FILTER_OP_GET_CONTEXT_ROOT:
- case FILTER_OP_GET_APP_CONTEXT_ROOT:
- case FILTER_OP_GET_PAYLOAD_ROOT:
- {
- if (vstack_push(stack)) {
- ret = -EINVAL;
- goto end;
- }
- vstack_ax(stack)->type = REG_PTR;
- next_pc += sizeof(struct load_op);
- break;
- }
-
- case FILTER_OP_LOAD_FIELD:
- {
- /* Pop 1, push 1 */
- if (!vstack_ax(stack)) {
- printk(KERN_WARNING "LTTng: filter: Empty stack\n\n");
- ret = -EINVAL;
- goto end;
- }
- if (vstack_ax(stack)->type != REG_PTR) {
- printk(KERN_WARNING "LTTng: filter: Expecting pointer on top of stack\n\n");
- ret = -EINVAL;
- goto end;
- }
- vstack_ax(stack)->type = REG_TYPE_UNKNOWN;
- next_pc += sizeof(struct load_op);
- break;
- }
-
- case FILTER_OP_LOAD_FIELD_S8:
- case FILTER_OP_LOAD_FIELD_S16:
- case FILTER_OP_LOAD_FIELD_S32:
- case FILTER_OP_LOAD_FIELD_S64:
- {
- /* Pop 1, push 1 */
- if (!vstack_ax(stack)) {
- printk(KERN_WARNING "Empty stack\n\n");
- ret = -EINVAL;
- goto end;
- }
- if (vstack_ax(stack)->type != REG_PTR) {
- printk(KERN_WARNING "Expecting pointer on top of stack\n\n");
- ret = -EINVAL;
- goto end;
- }
- vstack_ax(stack)->type = REG_S64;
- next_pc += sizeof(struct load_op);
- break;
- }
- case FILTER_OP_LOAD_FIELD_U8:
- case FILTER_OP_LOAD_FIELD_U16:
- case FILTER_OP_LOAD_FIELD_U32:
- case FILTER_OP_LOAD_FIELD_U64:
- {
- /* Pop 1, push 1 */
- if (!vstack_ax(stack)) {
- printk(KERN_WARNING "LTTng: filter: Empty stack\n\n");
- ret = -EINVAL;
- goto end;
- }
- if (vstack_ax(stack)->type != REG_PTR) {
- printk(KERN_WARNING "LTTng: filter: Expecting pointer on top of stack\n\n");
- ret = -EINVAL;
- goto end;
- }
- vstack_ax(stack)->type = REG_U64;
- next_pc += sizeof(struct load_op);
- break;
- }
- case FILTER_OP_LOAD_FIELD_STRING:
- case FILTER_OP_LOAD_FIELD_SEQUENCE:
- {
- /* Pop 1, push 1 */
- if (!vstack_ax(stack)) {
- printk(KERN_WARNING "LTTng: filter: Empty stack\n\n");
- ret = -EINVAL;
- goto end;
- }
- if (vstack_ax(stack)->type != REG_PTR) {
- printk(KERN_WARNING "LTTng: filter: Expecting pointer on top of stack\n\n");
- ret = -EINVAL;
- goto end;
- }
- vstack_ax(stack)->type = REG_STRING;
- next_pc += sizeof(struct load_op);
- break;
- }
-
- case FILTER_OP_LOAD_FIELD_DOUBLE:
- {
- /* Pop 1, push 1 */
- if (!vstack_ax(stack)) {
- printk(KERN_WARNING "LTTng: filter: Empty stack\n\n");
- ret = -EINVAL;
- goto end;
- }
- if (vstack_ax(stack)->type != REG_PTR) {
- printk(KERN_WARNING "LTTng: filter: Expecting pointer on top of stack\n\n");
- ret = -EINVAL;
- goto end;
- }
- vstack_ax(stack)->type = REG_DOUBLE;
- next_pc += sizeof(struct load_op);
- break;
- }
-
- case FILTER_OP_GET_SYMBOL:
- case FILTER_OP_GET_SYMBOL_FIELD:
- {
- /* Pop 1, push 1 */
- if (!vstack_ax(stack)) {
- printk(KERN_WARNING "LTTng: filter: Empty stack\n\n");
- ret = -EINVAL;
- goto end;
- }
- if (vstack_ax(stack)->type != REG_PTR) {
- printk(KERN_WARNING "LTTng: filter: Expecting pointer on top of stack\n\n");
- ret = -EINVAL;
- goto end;
- }
- next_pc += sizeof(struct load_op) + sizeof(struct get_symbol);
- break;
- }
-
- case FILTER_OP_GET_INDEX_U16:
- {
- /* Pop 1, push 1 */
- if (!vstack_ax(stack)) {
- printk(KERN_WARNING "LTTng: filter: Empty stack\n\n");
- ret = -EINVAL;
- goto end;
- }
- if (vstack_ax(stack)->type != REG_PTR) {
- printk(KERN_WARNING "LTTng: filter: Expecting pointer on top of stack\n\n");
- ret = -EINVAL;
- goto end;
- }
- next_pc += sizeof(struct load_op) + sizeof(struct get_index_u16);
- break;
- }
-
- case FILTER_OP_GET_INDEX_U64:
- {
- /* Pop 1, push 1 */
- if (!vstack_ax(stack)) {
- printk(KERN_WARNING "LTTng: filter: Empty stack\n\n");
- ret = -EINVAL;
- goto end;
- }
- if (vstack_ax(stack)->type != REG_PTR) {
- printk(KERN_WARNING "LTTng: filter: Expecting pointer on top of stack\n\n");
- ret = -EINVAL;
- goto end;
- }
- next_pc += sizeof(struct load_op) + sizeof(struct get_index_u64);
- break;
- }
-
- }
-end:
- *_next_pc = next_pc;
- return ret;
-}
-
-/*
- * Never called concurrently (hash seed is shared).
- */
-int lttng_filter_validate_bytecode(struct bytecode_runtime *bytecode)
-{
- struct mp_table *mp_table;
- char *pc, *next_pc, *start_pc;
- int ret = -EINVAL;
- struct vstack stack;
-
- vstack_init(&stack);
-
- mp_table = kzalloc(sizeof(*mp_table), GFP_KERNEL);
- if (!mp_table) {
- printk(KERN_WARNING "LTTng: filter: Error allocating hash table for bytecode validation\n");
- return -ENOMEM;
- }
- start_pc = &bytecode->code[0];
- for (pc = next_pc = start_pc; pc - start_pc < bytecode->len;
- pc = next_pc) {
- ret = bytecode_validate_overflow(bytecode, start_pc, pc);
- if (ret != 0) {
- if (ret == -ERANGE)
- printk(KERN_WARNING "LTTng: filter: filter bytecode overflow\n");
- goto end;
- }
- dbg_printk("Validating op %s (%u)\n",
- lttng_filter_print_op((unsigned int) *(filter_opcode_t *) pc),
- (unsigned int) *(filter_opcode_t *) pc);
-
- /*
- * For each instruction, validate the current context
- * (traversal of entire execution flow), and validate
- * all merge points targeting this instruction.
- */
- ret = validate_instruction_all_contexts(bytecode, mp_table,
- &stack, start_pc, pc);
- if (ret)
- goto end;
- ret = exec_insn(bytecode, mp_table, &stack, &next_pc, pc);
- if (ret <= 0)
- goto end;
- }
-end:
- if (delete_all_nodes(mp_table)) {
- if (!ret) {
- printk(KERN_WARNING "LTTng: filter: Unexpected merge points\n");
- ret = -EINVAL;
- }
- }
- kfree(mp_table);
- return ret;
-}
+++ /dev/null
-/* SPDX-License-Identifier: MIT
- *
- * lttng-filter.c
- *
- * LTTng modules filter code.
- *
- * Copyright (C) 2010-2016 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
- */
-
-#include <linux/list.h>
-#include <linux/slab.h>
-
-#include <lttng/filter.h>
-
-static const char *opnames[] = {
- [ FILTER_OP_UNKNOWN ] = "UNKNOWN",
-
- [ FILTER_OP_RETURN ] = "RETURN",
-
- /* binary */
- [ FILTER_OP_MUL ] = "MUL",
- [ FILTER_OP_DIV ] = "DIV",
- [ FILTER_OP_MOD ] = "MOD",
- [ FILTER_OP_PLUS ] = "PLUS",
- [ FILTER_OP_MINUS ] = "MINUS",
- [ FILTER_OP_BIT_RSHIFT ] = "BIT_RSHIFT",
- [ FILTER_OP_BIT_LSHIFT ] = "BIT_LSHIFT",
- [ FILTER_OP_BIT_AND ] = "BIT_AND",
- [ FILTER_OP_BIT_OR ] = "BIT_OR",
- [ FILTER_OP_BIT_XOR ] = "BIT_XOR",
-
- /* binary comparators */
- [ FILTER_OP_EQ ] = "EQ",
- [ FILTER_OP_NE ] = "NE",
- [ FILTER_OP_GT ] = "GT",
- [ FILTER_OP_LT ] = "LT",
- [ FILTER_OP_GE ] = "GE",
- [ FILTER_OP_LE ] = "LE",
-
- /* string binary comparators */
- [ FILTER_OP_EQ_STRING ] = "EQ_STRING",
- [ FILTER_OP_NE_STRING ] = "NE_STRING",
- [ FILTER_OP_GT_STRING ] = "GT_STRING",
- [ FILTER_OP_LT_STRING ] = "LT_STRING",
- [ FILTER_OP_GE_STRING ] = "GE_STRING",
- [ FILTER_OP_LE_STRING ] = "LE_STRING",
-
- /* s64 binary comparators */
- [ FILTER_OP_EQ_S64 ] = "EQ_S64",
- [ FILTER_OP_NE_S64 ] = "NE_S64",
- [ FILTER_OP_GT_S64 ] = "GT_S64",
- [ FILTER_OP_LT_S64 ] = "LT_S64",
- [ FILTER_OP_GE_S64 ] = "GE_S64",
- [ FILTER_OP_LE_S64 ] = "LE_S64",
-
- /* double binary comparators */
- [ FILTER_OP_EQ_DOUBLE ] = "EQ_DOUBLE",
- [ FILTER_OP_NE_DOUBLE ] = "NE_DOUBLE",
- [ FILTER_OP_GT_DOUBLE ] = "GT_DOUBLE",
- [ FILTER_OP_LT_DOUBLE ] = "LT_DOUBLE",
- [ FILTER_OP_GE_DOUBLE ] = "GE_DOUBLE",
- [ FILTER_OP_LE_DOUBLE ] = "LE_DOUBLE",
-
- /* Mixed S64-double binary comparators */
- [ FILTER_OP_EQ_DOUBLE_S64 ] = "EQ_DOUBLE_S64",
- [ FILTER_OP_NE_DOUBLE_S64 ] = "NE_DOUBLE_S64",
- [ FILTER_OP_GT_DOUBLE_S64 ] = "GT_DOUBLE_S64",
- [ FILTER_OP_LT_DOUBLE_S64 ] = "LT_DOUBLE_S64",
- [ FILTER_OP_GE_DOUBLE_S64 ] = "GE_DOUBLE_S64",
- [ FILTER_OP_LE_DOUBLE_S64 ] = "LE_DOUBLE_S64",
-
- [ FILTER_OP_EQ_S64_DOUBLE ] = "EQ_S64_DOUBLE",
- [ FILTER_OP_NE_S64_DOUBLE ] = "NE_S64_DOUBLE",
- [ FILTER_OP_GT_S64_DOUBLE ] = "GT_S64_DOUBLE",
- [ FILTER_OP_LT_S64_DOUBLE ] = "LT_S64_DOUBLE",
- [ FILTER_OP_GE_S64_DOUBLE ] = "GE_S64_DOUBLE",
- [ FILTER_OP_LE_S64_DOUBLE ] = "LE_S64_DOUBLE",
-
- /* unary */
- [ FILTER_OP_UNARY_PLUS ] = "UNARY_PLUS",
- [ FILTER_OP_UNARY_MINUS ] = "UNARY_MINUS",
- [ FILTER_OP_UNARY_NOT ] = "UNARY_NOT",
- [ FILTER_OP_UNARY_PLUS_S64 ] = "UNARY_PLUS_S64",
- [ FILTER_OP_UNARY_MINUS_S64 ] = "UNARY_MINUS_S64",
- [ FILTER_OP_UNARY_NOT_S64 ] = "UNARY_NOT_S64",
- [ FILTER_OP_UNARY_PLUS_DOUBLE ] = "UNARY_PLUS_DOUBLE",
- [ FILTER_OP_UNARY_MINUS_DOUBLE ] = "UNARY_MINUS_DOUBLE",
- [ FILTER_OP_UNARY_NOT_DOUBLE ] = "UNARY_NOT_DOUBLE",
-
- /* logical */
- [ FILTER_OP_AND ] = "AND",
- [ FILTER_OP_OR ] = "OR",
-
- /* load field ref */
- [ FILTER_OP_LOAD_FIELD_REF ] = "LOAD_FIELD_REF",
- [ FILTER_OP_LOAD_FIELD_REF_STRING ] = "LOAD_FIELD_REF_STRING",
- [ FILTER_OP_LOAD_FIELD_REF_SEQUENCE ] = "LOAD_FIELD_REF_SEQUENCE",
- [ FILTER_OP_LOAD_FIELD_REF_S64 ] = "LOAD_FIELD_REF_S64",
- [ FILTER_OP_LOAD_FIELD_REF_DOUBLE ] = "LOAD_FIELD_REF_DOUBLE",
-
- /* load from immediate operand */
- [ FILTER_OP_LOAD_STRING ] = "LOAD_STRING",
- [ FILTER_OP_LOAD_S64 ] = "LOAD_S64",
- [ FILTER_OP_LOAD_DOUBLE ] = "LOAD_DOUBLE",
-
- /* cast */
- [ FILTER_OP_CAST_TO_S64 ] = "CAST_TO_S64",
- [ FILTER_OP_CAST_DOUBLE_TO_S64 ] = "CAST_DOUBLE_TO_S64",
- [ FILTER_OP_CAST_NOP ] = "CAST_NOP",
-
- /* get context ref */
- [ FILTER_OP_GET_CONTEXT_REF ] = "GET_CONTEXT_REF",
- [ FILTER_OP_GET_CONTEXT_REF_STRING ] = "GET_CONTEXT_REF_STRING",
- [ FILTER_OP_GET_CONTEXT_REF_S64 ] = "GET_CONTEXT_REF_S64",
- [ FILTER_OP_GET_CONTEXT_REF_DOUBLE ] = "GET_CONTEXT_REF_DOUBLE",
-
- /* load userspace field ref */
- [ FILTER_OP_LOAD_FIELD_REF_USER_STRING ] = "LOAD_FIELD_REF_USER_STRING",
- [ FILTER_OP_LOAD_FIELD_REF_USER_SEQUENCE ] = "LOAD_FIELD_REF_USER_SEQUENCE",
-
- /*
- * load immediate star globbing pattern (literal string)
- * from immediate.
- */
- [ FILTER_OP_LOAD_STAR_GLOB_STRING ] = "LOAD_STAR_GLOB_STRING",
-
- /* globbing pattern binary operator: apply to */
- [ FILTER_OP_EQ_STAR_GLOB_STRING ] = "EQ_STAR_GLOB_STRING",
- [ FILTER_OP_NE_STAR_GLOB_STRING ] = "NE_STAR_GLOB_STRING",
-
- /*
- * Instructions for recursive traversal through composed types.
- */
- [ FILTER_OP_GET_CONTEXT_ROOT ] = "GET_CONTEXT_ROOT",
- [ FILTER_OP_GET_APP_CONTEXT_ROOT ] = "GET_APP_CONTEXT_ROOT",
- [ FILTER_OP_GET_PAYLOAD_ROOT ] = "GET_PAYLOAD_ROOT",
-
- [ FILTER_OP_GET_SYMBOL ] = "GET_SYMBOL",
- [ FILTER_OP_GET_SYMBOL_FIELD ] = "GET_SYMBOL_FIELD",
- [ FILTER_OP_GET_INDEX_U16 ] = "GET_INDEX_U16",
- [ FILTER_OP_GET_INDEX_U64 ] = "GET_INDEX_U64",
-
- [ FILTER_OP_LOAD_FIELD ] = "LOAD_FIELD",
- [ FILTER_OP_LOAD_FIELD_S8 ] = "LOAD_FIELD_S8",
- [ FILTER_OP_LOAD_FIELD_S16 ] = "LOAD_FIELD_S16",
- [ FILTER_OP_LOAD_FIELD_S32 ] = "LOAD_FIELD_S32",
- [ FILTER_OP_LOAD_FIELD_S64 ] = "LOAD_FIELD_S64",
- [ FILTER_OP_LOAD_FIELD_U8 ] = "LOAD_FIELD_U8",
- [ FILTER_OP_LOAD_FIELD_U16 ] = "LOAD_FIELD_U16",
- [ FILTER_OP_LOAD_FIELD_U32 ] = "LOAD_FIELD_U32",
- [ FILTER_OP_LOAD_FIELD_U64 ] = "LOAD_FIELD_U64",
- [ FILTER_OP_LOAD_FIELD_STRING ] = "LOAD_FIELD_STRING",
- [ FILTER_OP_LOAD_FIELD_SEQUENCE ] = "LOAD_FIELD_SEQUENCE",
- [ FILTER_OP_LOAD_FIELD_DOUBLE ] = "LOAD_FIELD_DOUBLE",
-
- [ FILTER_OP_UNARY_BIT_NOT ] = "UNARY_BIT_NOT",
-
- [ FILTER_OP_RETURN_S64 ] = "RETURN_S64",
-};
-
-const char *lttng_filter_print_op(enum filter_op op)
-{
- if (op >= NR_FILTER_OPS)
- return "UNKNOWN";
- else
- return opnames[op];
-}
-
-static
-int apply_field_reloc(const struct lttng_event_desc *event_desc,
- struct bytecode_runtime *runtime,
- uint32_t runtime_len,
- uint32_t reloc_offset,
- const char *field_name,
- enum filter_op filter_op)
-{
- const struct lttng_event_field *fields, *field = NULL;
- unsigned int nr_fields, i;
- struct load_op *op;
- uint32_t field_offset = 0;
-
- dbg_printk("Apply field reloc: %u %s\n", reloc_offset, field_name);
-
- /* Lookup event by name */
- if (!event_desc)
- return -EINVAL;
- fields = event_desc->fields;
- if (!fields)
- return -EINVAL;
- nr_fields = event_desc->nr_fields;
- for (i = 0; i < nr_fields; i++) {
- if (fields[i].nofilter)
- continue;
- if (!strcmp(fields[i].name, field_name)) {
- field = &fields[i];
- break;
- }
- /* compute field offset */
- switch (fields[i].type.atype) {
- case atype_integer:
- case atype_enum_nestable:
- field_offset += sizeof(int64_t);
- break;
- case atype_array_nestable:
- if (!lttng_is_bytewise_integer(fields[i].type.u.array_nestable.elem_type))
- return -EINVAL;
- field_offset += sizeof(unsigned long);
- field_offset += sizeof(void *);
- break;
- case atype_sequence_nestable:
- if (!lttng_is_bytewise_integer(fields[i].type.u.sequence_nestable.elem_type))
- return -EINVAL;
- field_offset += sizeof(unsigned long);
- field_offset += sizeof(void *);
- break;
- case atype_string:
- field_offset += sizeof(void *);
- break;
- case atype_struct_nestable: /* Unsupported. */
- case atype_variant_nestable: /* Unsupported. */
- default:
- return -EINVAL;
- }
- }
- if (!field)
- return -EINVAL;
-
- /* Check if field offset is too large for 16-bit offset */
- if (field_offset > LTTNG_KERNEL_FILTER_BYTECODE_MAX_LEN - 1)
- return -EINVAL;
-
- /* set type */
- op = (struct load_op *) &runtime->code[reloc_offset];
-
- switch (filter_op) {
- case FILTER_OP_LOAD_FIELD_REF:
- {
- struct field_ref *field_ref;
-
- field_ref = (struct field_ref *) op->data;
- switch (field->type.atype) {
- case atype_integer:
- case atype_enum_nestable:
- op->op = FILTER_OP_LOAD_FIELD_REF_S64;
- break;
- case atype_array_nestable:
- case atype_sequence_nestable:
- if (field->user)
- op->op = FILTER_OP_LOAD_FIELD_REF_USER_SEQUENCE;
- else
- op->op = FILTER_OP_LOAD_FIELD_REF_SEQUENCE;
- break;
- case atype_string:
- if (field->user)
- op->op = FILTER_OP_LOAD_FIELD_REF_USER_STRING;
- else
- op->op = FILTER_OP_LOAD_FIELD_REF_STRING;
- break;
- case atype_struct_nestable: /* Unsupported. */
- case atype_variant_nestable: /* Unsupported. */
- default:
- return -EINVAL;
- }
- /* set offset */
- field_ref->offset = (uint16_t) field_offset;
- break;
- }
- default:
- return -EINVAL;
- }
- return 0;
-}
-
-static
-int apply_context_reloc(struct bytecode_runtime *runtime,
- uint32_t runtime_len,
- uint32_t reloc_offset,
- const char *context_name,
- enum filter_op filter_op)
-{
- struct load_op *op;
- struct lttng_ctx_field *ctx_field;
- int idx;
-
- dbg_printk("Apply context reloc: %u %s\n", reloc_offset, context_name);
-
- /* Get context index */
- idx = lttng_get_context_index(lttng_static_ctx, context_name);
- if (idx < 0)
- return -ENOENT;
-
- /* Check if idx is too large for 16-bit offset */
- if (idx > LTTNG_KERNEL_FILTER_BYTECODE_MAX_LEN - 1)
- return -EINVAL;
-
- /* Get context return type */
- ctx_field = <tng_static_ctx->fields[idx];
- op = (struct load_op *) &runtime->code[reloc_offset];
-
- switch (filter_op) {
- case FILTER_OP_GET_CONTEXT_REF:
- {
- struct field_ref *field_ref;
-
- field_ref = (struct field_ref *) op->data;
- switch (ctx_field->event_field.type.atype) {
- case atype_integer:
- case atype_enum_nestable:
- op->op = FILTER_OP_GET_CONTEXT_REF_S64;
- break;
- /* Sequence and array supported as string */
- case atype_string:
- BUG_ON(ctx_field->event_field.user);
- op->op = FILTER_OP_GET_CONTEXT_REF_STRING;
- break;
- case atype_array_nestable:
- if (!lttng_is_bytewise_integer(ctx_field->event_field.type.u.array_nestable.elem_type))
- return -EINVAL;
- BUG_ON(ctx_field->event_field.user);
- op->op = FILTER_OP_GET_CONTEXT_REF_STRING;
- break;
- case atype_sequence_nestable:
- if (!lttng_is_bytewise_integer(ctx_field->event_field.type.u.sequence_nestable.elem_type))
- return -EINVAL;
- BUG_ON(ctx_field->event_field.user);
- op->op = FILTER_OP_GET_CONTEXT_REF_STRING;
- break;
- case atype_struct_nestable: /* Unsupported. */
- case atype_variant_nestable: /* Unsupported. */
- default:
- return -EINVAL;
- }
- /* set offset to context index within channel contexts */
- field_ref->offset = (uint16_t) idx;
- break;
- }
- default:
- return -EINVAL;
- }
- return 0;
-}
-
-static
-int apply_reloc(const struct lttng_event_desc *event_desc,
- struct bytecode_runtime *runtime,
- uint32_t runtime_len,
- uint32_t reloc_offset,
- const char *name)
-{
- struct load_op *op;
-
- dbg_printk("Apply reloc: %u %s\n", reloc_offset, name);
-
- /* Ensure that the reloc is within the code */
- if (runtime_len - reloc_offset < sizeof(uint16_t))
- return -EINVAL;
-
- op = (struct load_op *) &runtime->code[reloc_offset];
- switch (op->op) {
- case FILTER_OP_LOAD_FIELD_REF:
- return apply_field_reloc(event_desc, runtime, runtime_len,
- reloc_offset, name, op->op);
- case FILTER_OP_GET_CONTEXT_REF:
- return apply_context_reloc(runtime, runtime_len,
- reloc_offset, name, op->op);
- case FILTER_OP_GET_SYMBOL:
- case FILTER_OP_GET_SYMBOL_FIELD:
- /*
- * Will be handled by load specialize phase or
- * dynamically by interpreter.
- */
- return 0;
- default:
- printk(KERN_WARNING "LTTng: filter: Unknown reloc op type %u\n", op->op);
- return -EINVAL;
- }
- return 0;
-}
-
-static
-int bytecode_is_linked(struct lttng_bytecode_node *bytecode,
- struct list_head *bytecode_runtime_head)
-{
- struct lttng_bytecode_runtime *bc_runtime;
-
- list_for_each_entry(bc_runtime, bytecode_runtime_head, node) {
- if (bc_runtime->bc == bytecode)
- return 1;
- }
- return 0;
-}
-
-/*
- * Take a bytecode with reloc table and link it to an event to create a
- * bytecode runtime.
- */
-static
-int _lttng_filter_link_bytecode(const struct lttng_event_desc *event_desc,
- struct lttng_ctx *ctx,
- struct lttng_bytecode_node *bytecode,
- struct list_head *insert_loc)
-{
- int ret, offset, next_offset;
- struct bytecode_runtime *runtime = NULL;
- size_t runtime_alloc_len;
-
- if (!bytecode)
- return 0;
- /* Bytecode already linked */
- if (bytecode_is_linked(bytecode, insert_loc))
- return 0;
-
- dbg_printk("Linking...\n");
-
- /* We don't need the reloc table in the runtime */
- runtime_alloc_len = sizeof(*runtime) + bytecode->bc.reloc_offset;
- runtime = kzalloc(runtime_alloc_len, GFP_KERNEL);
- if (!runtime) {
- ret = -ENOMEM;
- goto alloc_error;
- }
- runtime->p.bc = bytecode;
- runtime->p.ctx = ctx;
- runtime->len = bytecode->bc.reloc_offset;
- /* copy original bytecode */
- memcpy(runtime->code, bytecode->bc.data, runtime->len);
- /*
- * apply relocs. Those are a uint16_t (offset in bytecode)
- * followed by a string (field name).
- */
- for (offset = bytecode->bc.reloc_offset;
- offset < bytecode->bc.len;
- offset = next_offset) {
- uint16_t reloc_offset =
- *(uint16_t *) &bytecode->bc.data[offset];
- const char *name =
- (const char *) &bytecode->bc.data[offset + sizeof(uint16_t)];
-
- ret = apply_reloc(event_desc, runtime, runtime->len, reloc_offset, name);
- if (ret) {
- goto link_error;
- }
- next_offset = offset + sizeof(uint16_t) + strlen(name) + 1;
- }
- /* Validate bytecode */
- ret = lttng_filter_validate_bytecode(runtime);
- if (ret) {
- goto link_error;
- }
- /* Specialize bytecode */
- ret = lttng_filter_specialize_bytecode(event_desc, runtime);
- if (ret) {
- goto link_error;
- }
- runtime->p.filter = lttng_filter_interpret_bytecode;
- runtime->p.link_failed = 0;
- list_add_rcu(&runtime->p.node, insert_loc);
- dbg_printk("Linking successful.\n");
- return 0;
-
-link_error:
- runtime->p.filter = lttng_filter_interpret_bytecode_false;
- runtime->p.link_failed = 1;
- list_add_rcu(&runtime->p.node, insert_loc);
-alloc_error:
- dbg_printk("Linking failed.\n");
- return ret;
-}
-
-void lttng_filter_sync_state(struct lttng_bytecode_runtime *runtime)
-{
- struct lttng_bytecode_node *bc = runtime->bc;
-
- if (!bc->enabler->enabled || runtime->link_failed)
- runtime->filter = lttng_filter_interpret_bytecode_false;
- else
- runtime->filter = lttng_filter_interpret_bytecode;
-}
-
-/*
- * Link bytecode for all enablers referenced by an event.
- */
-void lttng_enabler_link_bytecode(const struct lttng_event_desc *event_desc,
- struct lttng_ctx *ctx,
- struct list_head *bytecode_runtime_head,
- struct lttng_enabler *enabler)
-{
- struct lttng_bytecode_node *bc;
- struct lttng_bytecode_runtime *runtime;
-
- /* Can only be called for events with desc attached */
- WARN_ON_ONCE(!event_desc);
-
- /* Link each bytecode. */
- list_for_each_entry(bc, &enabler->filter_bytecode_head, node) {
- int found = 0, ret;
- struct list_head *insert_loc;
-
- list_for_each_entry(runtime,
- bytecode_runtime_head, node) {
- if (runtime->bc == bc) {
- found = 1;
- break;
- }
- }
- /* Skip bytecode already linked */
- if (found)
- continue;
-
- /*
- * Insert at specified priority (seqnum) in increasing
- * order. If there already is a bytecode of the same priority,
- * insert the new bytecode right after it.
- */
- list_for_each_entry_reverse(runtime,
- bytecode_runtime_head, node) {
- if (runtime->bc->bc.seqnum <= bc->bc.seqnum) {
- /* insert here */
- insert_loc = &runtime->node;
- goto add_within;
- }
- }
- /* Add to head to list */
- insert_loc = bytecode_runtime_head;
- add_within:
- dbg_printk("linking bytecode\n");
- ret = _lttng_filter_link_bytecode(event_desc, ctx, bc,
- insert_loc);
- if (ret) {
- dbg_printk("[lttng filter] warning: cannot link event bytecode\n");
- }
- }
-}
-
-/*
- * We own the filter_bytecode if we return success.
- */
-int lttng_filter_enabler_attach_bytecode(struct lttng_enabler *enabler,
- struct lttng_bytecode_node *filter_bytecode)
-{
- list_add(&filter_bytecode->node, &enabler->filter_bytecode_head);
- return 0;
-}
-
-void lttng_free_enabler_filter_bytecode(struct lttng_enabler *enabler)
-{
- struct lttng_bytecode_node *filter_bytecode, *tmp;
-
- list_for_each_entry_safe(filter_bytecode, tmp,
- &enabler->filter_bytecode_head, node) {
- kfree(filter_bytecode);
- }
-}
-
-void lttng_free_event_filter_runtime(struct lttng_event *event)
-{
- struct bytecode_runtime *runtime, *tmp;
-
- list_for_each_entry_safe(runtime, tmp,
- &event->filter_bytecode_runtime_head, p.node) {
- kfree(runtime->data);
- kfree(runtime);
- }
-}