From: Francis Deslauriers Date: Wed, 15 Apr 2020 15:03:53 +0000 (-0400) Subject: Rename filter bytecode types and files X-Git-Tag: v2.13.0-rc1~442 X-Git-Url: https://git.lttng.org./?a=commitdiff_plain;h=04aa13f8c2944839f6514e3841b93057b443a783;p=lttng-ust.git Rename filter bytecode types and files File renames: - filter-bytecode.h -> bytecode.h - lttng-filter-interpreter.c -> lttng-bytecode-interpreter.c - lttng-filter-specialize.c -> lttng-bytecode-specialize.c - lttng-filter-validator.c -> lttng-bytecode-validator.c - lttng-filter.c -> lttng-bytecode.c - lttng-filter.h -> lttng-bytecode.h Function renames: - `lttng_filter_interpret_bytecode_false()` -> `lttng_bytecode_filter_interpret_false()` - `lttng_filter_interpret_bytecode()` -> `lttng_bytecode_filter_interpret()` - `lttng_filter_specialize_bytecode()` -> `lttng_bytecode_specialize()` - `lttng_filter_validate_bytecode()` -> `lttng_bytecode_validate()` Type renames - `filter_opcode_t` to `bytecode_opcode_t` Enum renames: - `enum filter_op` to `enum bytecode_op` - `FILTER_OP_*` to `BYTECODE_OP_*` - `enum lttng_filter_ret` -> `enum lttng_bytecode_interpreter_ret` - `LTTNG_FILTER_DISCARD` -> `LTTNG_INTERPRETER_DISCARD` - `LTTNG_FILTER_RECORD_FLAG` -> `LTTNG_INTERPRETER_RECORD_FLAG` Define renames: - `FILTER_STACK_EMPTY` -> `INTERPRETER_STACK_EMPTY` - `FILTER_STACK_LEN`-> `INTERPRETER_STACK_LEN` - `FILTER_MAX_DATA_LEN` -> `BYTECODE_MAX_DATA_LEN` Signed-off-by: Francis Deslauriers Signed-off-by: Mathieu Desnoyers Change-Id: Iaab55116da5a3a8562b0f9e5b6033b556292f55b --- diff --git a/include/lttng/ust-events.h b/include/lttng/ust-events.h index 4c02ffa3..356fa3b0 100644 --- a/include/lttng/ust-events.h +++ b/include/lttng/ust-events.h @@ -426,11 +426,11 @@ struct ust_pending_probe; struct lttng_event; /* - * Filter return value masks. + * Bytecode interpreter return value masks. */ -enum lttng_filter_ret { - LTTNG_FILTER_DISCARD = 0, - LTTNG_FILTER_RECORD_FLAG = (1ULL << 0), +enum lttng_bytecode_interpreter_ret { + LTTNG_INTERPRETER_DISCARD = 0, + LTTNG_INTERPRETER_RECORD_FLAG = (1ULL << 0), /* Other bits are kept for future use. */ }; diff --git a/include/lttng/ust-tracepoint-event.h b/include/lttng/ust-tracepoint-event.h index 5e9245f1..00e931a8 100644 --- a/include/lttng/ust-tracepoint-event.h +++ b/include/lttng/ust-tracepoint-event.h @@ -879,7 +879,7 @@ void __event_probe__##_provider##___##_name(_TP_ARGS_DATA_PROTO(_args)) \ _TP_ARGS_DATA_VAR(_args)); \ tp_list_for_each_entry_rcu(__filter_bc_runtime, &__event->filter_bytecode_runtime_head, node) { \ if (caa_unlikely(__filter_bc_runtime->filter(__filter_bc_runtime, \ - __stackvar.__filter_stack_data) & LTTNG_FILTER_RECORD_FLAG)) { \ + __stackvar.__filter_stack_data) & LTTNG_INTERPRETER_RECORD_FLAG)) { \ __filter_record = 1; \ break; \ } \ @@ -961,7 +961,7 @@ void __event_notifier_probe__##_provider##___##_name(_TP_ARGS_DATA_PROTO(_args)) _TP_ARGS_DATA_VAR(_args)); \ tp_list_for_each_entry_rcu(__filter_bc_runtime, &__event_notifier->filter_bytecode_runtime_head, node) { \ if (caa_unlikely(__filter_bc_runtime->filter(__filter_bc_runtime, \ - __stackvar.__filter_stack_data) & LTTNG_FILTER_RECORD_FLAG)) \ + __stackvar.__filter_stack_data) & LTTNG_INTERPRETER_RECORD_FLAG)) \ __filter_record = 1; \ } \ if (caa_likely(!__filter_record)) \ diff --git a/liblttng-ust/Makefile.am b/liblttng-ust/Makefile.am index f2cc835e..1f35535e 100644 --- a/liblttng-ust/Makefile.am +++ b/liblttng-ust/Makefile.am @@ -21,9 +21,15 @@ liblttng_ust_tracepoint_la_LDFLAGS = -no-undefined -version-info $(LTTNG_UST_LIB liblttng_ust_tracepoint_la_CFLAGS = -DUST_COMPONENT="liblttng_ust_tracepoint" $(AM_CFLAGS) liblttng_ust_runtime_la_SOURCES = \ + bytecode.h \ lttng-ust-comm.c \ lttng-ust-abi.c \ lttng-probes.c \ + lttng-bytecode.c \ + lttng-bytecode.h \ + lttng-bytecode-validator.c \ + lttng-bytecode-specialize.c \ + lttng-bytecode-interpreter.c \ lttng-context-provider.c \ lttng-context-vtid.c \ lttng-context-vpid.c \ @@ -47,12 +53,6 @@ liblttng_ust_runtime_la_SOURCES = \ lttng-context-vsgid.c \ lttng-context.c \ lttng-events.c \ - lttng-filter.c \ - lttng-filter.h \ - lttng-filter-validator.c \ - lttng-filter-specialize.c \ - lttng-filter-interpreter.c \ - filter-bytecode.h \ lttng-hash-helper.h \ lttng-ust-elf.c \ lttng-ust-statedump.c \ diff --git a/liblttng-ust/bytecode.h b/liblttng-ust/bytecode.h new file mode 100644 index 00000000..90ea9ad4 --- /dev/null +++ b/liblttng-ust/bytecode.h @@ -0,0 +1,251 @@ +#ifndef _BYTECODE_H +#define _BYTECODE_H + +/* + * bytecode.h + * + * LTTng bytecode + * + * Copyright 2012-2016 - Mathieu Desnoyers + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#include +#include + +#ifndef LTTNG_PACKED +#error "LTTNG_PACKED should be defined" +#endif + +/* + * offsets are absolute from start of bytecode. + */ + +struct field_ref { + /* Initially, symbol offset. After link, field offset. */ + uint16_t offset; +} __attribute__((packed)); + +struct get_symbol { + /* Symbol offset. */ + uint16_t offset; +} LTTNG_PACKED; + +struct get_index_u16 { + uint16_t index; +} LTTNG_PACKED; + +struct get_index_u64 { + uint64_t index; +} LTTNG_PACKED; + +struct literal_numeric { + int64_t v; +} __attribute__((packed)); + +struct literal_double { + double v; +} __attribute__((packed)); + +struct literal_string { + char string[0]; +} __attribute__((packed)); + +enum bytecode_op { + BYTECODE_OP_UNKNOWN = 0, + + BYTECODE_OP_RETURN = 1, + + /* binary */ + BYTECODE_OP_MUL = 2, + BYTECODE_OP_DIV = 3, + BYTECODE_OP_MOD = 4, + BYTECODE_OP_PLUS = 5, + BYTECODE_OP_MINUS = 6, + BYTECODE_OP_BIT_RSHIFT = 7, + BYTECODE_OP_BIT_LSHIFT = 8, + BYTECODE_OP_BIT_AND = 9, + BYTECODE_OP_BIT_OR = 10, + BYTECODE_OP_BIT_XOR = 11, + + /* binary comparators */ + BYTECODE_OP_EQ = 12, + BYTECODE_OP_NE = 13, + BYTECODE_OP_GT = 14, + BYTECODE_OP_LT = 15, + BYTECODE_OP_GE = 16, + BYTECODE_OP_LE = 17, + + /* string binary comparator: apply to */ + BYTECODE_OP_EQ_STRING = 18, + BYTECODE_OP_NE_STRING = 19, + BYTECODE_OP_GT_STRING = 20, + BYTECODE_OP_LT_STRING = 21, + BYTECODE_OP_GE_STRING = 22, + BYTECODE_OP_LE_STRING = 23, + + /* s64 binary comparator */ + BYTECODE_OP_EQ_S64 = 24, + BYTECODE_OP_NE_S64 = 25, + BYTECODE_OP_GT_S64 = 26, + BYTECODE_OP_LT_S64 = 27, + BYTECODE_OP_GE_S64 = 28, + BYTECODE_OP_LE_S64 = 29, + + /* double binary comparator */ + BYTECODE_OP_EQ_DOUBLE = 30, + BYTECODE_OP_NE_DOUBLE = 31, + BYTECODE_OP_GT_DOUBLE = 32, + BYTECODE_OP_LT_DOUBLE = 33, + BYTECODE_OP_GE_DOUBLE = 34, + BYTECODE_OP_LE_DOUBLE = 35, + + /* Mixed S64-double binary comparators */ + BYTECODE_OP_EQ_DOUBLE_S64 = 36, + BYTECODE_OP_NE_DOUBLE_S64 = 37, + BYTECODE_OP_GT_DOUBLE_S64 = 38, + BYTECODE_OP_LT_DOUBLE_S64 = 39, + BYTECODE_OP_GE_DOUBLE_S64 = 40, + BYTECODE_OP_LE_DOUBLE_S64 = 41, + + BYTECODE_OP_EQ_S64_DOUBLE = 42, + BYTECODE_OP_NE_S64_DOUBLE = 43, + BYTECODE_OP_GT_S64_DOUBLE = 44, + BYTECODE_OP_LT_S64_DOUBLE = 45, + BYTECODE_OP_GE_S64_DOUBLE = 46, + BYTECODE_OP_LE_S64_DOUBLE = 47, + + /* unary */ + BYTECODE_OP_UNARY_PLUS = 48, + BYTECODE_OP_UNARY_MINUS = 49, + BYTECODE_OP_UNARY_NOT = 50, + BYTECODE_OP_UNARY_PLUS_S64 = 51, + BYTECODE_OP_UNARY_MINUS_S64 = 52, + BYTECODE_OP_UNARY_NOT_S64 = 53, + BYTECODE_OP_UNARY_PLUS_DOUBLE = 54, + BYTECODE_OP_UNARY_MINUS_DOUBLE = 55, + BYTECODE_OP_UNARY_NOT_DOUBLE = 56, + + /* logical */ + BYTECODE_OP_AND = 57, + BYTECODE_OP_OR = 58, + + /* load field ref */ + BYTECODE_OP_LOAD_FIELD_REF = 59, + BYTECODE_OP_LOAD_FIELD_REF_STRING = 60, + BYTECODE_OP_LOAD_FIELD_REF_SEQUENCE = 61, + BYTECODE_OP_LOAD_FIELD_REF_S64 = 62, + BYTECODE_OP_LOAD_FIELD_REF_DOUBLE = 63, + + /* load immediate from operand */ + BYTECODE_OP_LOAD_STRING = 64, + BYTECODE_OP_LOAD_S64 = 65, + BYTECODE_OP_LOAD_DOUBLE = 66, + + /* cast */ + BYTECODE_OP_CAST_TO_S64 = 67, + BYTECODE_OP_CAST_DOUBLE_TO_S64 = 68, + BYTECODE_OP_CAST_NOP = 69, + + /* get context ref */ + BYTECODE_OP_GET_CONTEXT_REF = 70, + BYTECODE_OP_GET_CONTEXT_REF_STRING = 71, + BYTECODE_OP_GET_CONTEXT_REF_S64 = 72, + BYTECODE_OP_GET_CONTEXT_REF_DOUBLE = 73, + + /* load userspace field ref */ + BYTECODE_OP_LOAD_FIELD_REF_USER_STRING = 74, + BYTECODE_OP_LOAD_FIELD_REF_USER_SEQUENCE = 75, + + /* + * load immediate star globbing pattern (literal string) + * from immediate + */ + BYTECODE_OP_LOAD_STAR_GLOB_STRING = 76, + + /* globbing pattern binary operator: apply to */ + BYTECODE_OP_EQ_STAR_GLOB_STRING = 77, + BYTECODE_OP_NE_STAR_GLOB_STRING = 78, + + /* + * Instructions for recursive traversal through composed types. + */ + BYTECODE_OP_GET_CONTEXT_ROOT = 79, + BYTECODE_OP_GET_APP_CONTEXT_ROOT = 80, + BYTECODE_OP_GET_PAYLOAD_ROOT = 81, + + BYTECODE_OP_GET_SYMBOL = 82, + BYTECODE_OP_GET_SYMBOL_FIELD = 83, + BYTECODE_OP_GET_INDEX_U16 = 84, + BYTECODE_OP_GET_INDEX_U64 = 85, + + BYTECODE_OP_LOAD_FIELD = 86, + BYTECODE_OP_LOAD_FIELD_S8 = 87, + BYTECODE_OP_LOAD_FIELD_S16 = 88, + BYTECODE_OP_LOAD_FIELD_S32 = 89, + BYTECODE_OP_LOAD_FIELD_S64 = 90, + BYTECODE_OP_LOAD_FIELD_U8 = 91, + BYTECODE_OP_LOAD_FIELD_U16 = 92, + BYTECODE_OP_LOAD_FIELD_U32 = 93, + BYTECODE_OP_LOAD_FIELD_U64 = 94, + BYTECODE_OP_LOAD_FIELD_STRING = 95, + BYTECODE_OP_LOAD_FIELD_SEQUENCE = 96, + BYTECODE_OP_LOAD_FIELD_DOUBLE = 97, + + BYTECODE_OP_UNARY_BIT_NOT = 98, + + BYTECODE_OP_RETURN_S64 = 99, + + NR_BYTECODE_OPS, +}; + +typedef uint8_t bytecode_opcode_t; + +struct load_op { + bytecode_opcode_t op; + /* + * data to load. Size known by enum bytecode_opcode and null-term char. + */ + char data[0]; +} __attribute__((packed)); + +struct binary_op { + bytecode_opcode_t op; +} __attribute__((packed)); + +struct unary_op { + bytecode_opcode_t op; +} __attribute__((packed)); + +/* skip_offset is absolute from start of bytecode */ +struct logical_op { + bytecode_opcode_t op; + uint16_t skip_offset; /* bytecode insn, if skip second test */ +} __attribute__((packed)); + +struct cast_op { + bytecode_opcode_t op; +} __attribute__((packed)); + +struct return_op { + bytecode_opcode_t op; +} __attribute__((packed)); + +#endif /* _BYTECODE_H */ diff --git a/liblttng-ust/filter-bytecode.h b/liblttng-ust/filter-bytecode.h deleted file mode 100644 index 59e84555..00000000 --- a/liblttng-ust/filter-bytecode.h +++ /dev/null @@ -1,249 +0,0 @@ -#ifndef _FILTER_BYTECODE_H -#define _FILTER_BYTECODE_H - -/* - * filter-bytecode.h - * - * LTTng filter bytecode - * - * Copyright 2012-2016 - Mathieu Desnoyers - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - -#include -#include - -#ifndef LTTNG_PACKED -#error "LTTNG_PACKED should be defined" -#endif - -/* - * offsets are absolute from start of bytecode. - */ - -struct field_ref { - /* Initially, symbol offset. After link, field offset. */ - uint16_t offset; -} __attribute__((packed)); - -struct get_symbol { - /* Symbol offset. */ - uint16_t offset; -} LTTNG_PACKED; - -struct get_index_u16 { - uint16_t index; -} LTTNG_PACKED; - -struct get_index_u64 { - uint64_t index; -} LTTNG_PACKED; - -struct literal_numeric { - int64_t v; -} __attribute__((packed)); - -struct literal_double { - double v; -} __attribute__((packed)); - -struct literal_string { - char string[0]; -} __attribute__((packed)); - -enum filter_op { - FILTER_OP_UNKNOWN = 0, - - FILTER_OP_RETURN = 1, - - /* binary */ - FILTER_OP_MUL = 2, - FILTER_OP_DIV = 3, - FILTER_OP_MOD = 4, - FILTER_OP_PLUS = 5, - FILTER_OP_MINUS = 6, - FILTER_OP_BIT_RSHIFT = 7, - FILTER_OP_BIT_LSHIFT = 8, - FILTER_OP_BIT_AND = 9, - FILTER_OP_BIT_OR = 10, - FILTER_OP_BIT_XOR = 11, - - /* binary comparators */ - FILTER_OP_EQ = 12, - FILTER_OP_NE = 13, - FILTER_OP_GT = 14, - FILTER_OP_LT = 15, - FILTER_OP_GE = 16, - FILTER_OP_LE = 17, - - /* string binary comparator: apply to */ - FILTER_OP_EQ_STRING = 18, - FILTER_OP_NE_STRING = 19, - FILTER_OP_GT_STRING = 20, - FILTER_OP_LT_STRING = 21, - FILTER_OP_GE_STRING = 22, - FILTER_OP_LE_STRING = 23, - - /* s64 binary comparator */ - FILTER_OP_EQ_S64 = 24, - FILTER_OP_NE_S64 = 25, - FILTER_OP_GT_S64 = 26, - FILTER_OP_LT_S64 = 27, - FILTER_OP_GE_S64 = 28, - FILTER_OP_LE_S64 = 29, - - /* double binary comparator */ - FILTER_OP_EQ_DOUBLE = 30, - FILTER_OP_NE_DOUBLE = 31, - FILTER_OP_GT_DOUBLE = 32, - FILTER_OP_LT_DOUBLE = 33, - FILTER_OP_GE_DOUBLE = 34, - FILTER_OP_LE_DOUBLE = 35, - - /* Mixed S64-double binary comparators */ - FILTER_OP_EQ_DOUBLE_S64 = 36, - FILTER_OP_NE_DOUBLE_S64 = 37, - FILTER_OP_GT_DOUBLE_S64 = 38, - FILTER_OP_LT_DOUBLE_S64 = 39, - FILTER_OP_GE_DOUBLE_S64 = 40, - FILTER_OP_LE_DOUBLE_S64 = 41, - - FILTER_OP_EQ_S64_DOUBLE = 42, - FILTER_OP_NE_S64_DOUBLE = 43, - FILTER_OP_GT_S64_DOUBLE = 44, - FILTER_OP_LT_S64_DOUBLE = 45, - FILTER_OP_GE_S64_DOUBLE = 46, - FILTER_OP_LE_S64_DOUBLE = 47, - - /* unary */ - FILTER_OP_UNARY_PLUS = 48, - FILTER_OP_UNARY_MINUS = 49, - FILTER_OP_UNARY_NOT = 50, - FILTER_OP_UNARY_PLUS_S64 = 51, - FILTER_OP_UNARY_MINUS_S64 = 52, - FILTER_OP_UNARY_NOT_S64 = 53, - FILTER_OP_UNARY_PLUS_DOUBLE = 54, - FILTER_OP_UNARY_MINUS_DOUBLE = 55, - FILTER_OP_UNARY_NOT_DOUBLE = 56, - - /* logical */ - FILTER_OP_AND = 57, - FILTER_OP_OR = 58, - - /* load field ref */ - FILTER_OP_LOAD_FIELD_REF = 59, - FILTER_OP_LOAD_FIELD_REF_STRING = 60, - FILTER_OP_LOAD_FIELD_REF_SEQUENCE = 61, - FILTER_OP_LOAD_FIELD_REF_S64 = 62, - FILTER_OP_LOAD_FIELD_REF_DOUBLE = 63, - - /* load immediate from operand */ - FILTER_OP_LOAD_STRING = 64, - FILTER_OP_LOAD_S64 = 65, - FILTER_OP_LOAD_DOUBLE = 66, - - /* cast */ - FILTER_OP_CAST_TO_S64 = 67, - FILTER_OP_CAST_DOUBLE_TO_S64 = 68, - FILTER_OP_CAST_NOP = 69, - - /* get context ref */ - FILTER_OP_GET_CONTEXT_REF = 70, - FILTER_OP_GET_CONTEXT_REF_STRING = 71, - FILTER_OP_GET_CONTEXT_REF_S64 = 72, - FILTER_OP_GET_CONTEXT_REF_DOUBLE = 73, - - /* load userspace field ref */ - FILTER_OP_LOAD_FIELD_REF_USER_STRING = 74, - FILTER_OP_LOAD_FIELD_REF_USER_SEQUENCE = 75, - - /* - * load immediate star globbing pattern (literal string) - * from immediate - */ - FILTER_OP_LOAD_STAR_GLOB_STRING = 76, - - /* globbing pattern binary operator: apply to */ - FILTER_OP_EQ_STAR_GLOB_STRING = 77, - FILTER_OP_NE_STAR_GLOB_STRING = 78, - - /* - * Instructions for recursive traversal through composed types. - */ - FILTER_OP_GET_CONTEXT_ROOT = 79, - FILTER_OP_GET_APP_CONTEXT_ROOT = 80, - FILTER_OP_GET_PAYLOAD_ROOT = 81, - - FILTER_OP_GET_SYMBOL = 82, - FILTER_OP_GET_SYMBOL_FIELD = 83, - FILTER_OP_GET_INDEX_U16 = 84, - FILTER_OP_GET_INDEX_U64 = 85, - - FILTER_OP_LOAD_FIELD = 86, - FILTER_OP_LOAD_FIELD_S8 = 87, - FILTER_OP_LOAD_FIELD_S16 = 88, - FILTER_OP_LOAD_FIELD_S32 = 89, - FILTER_OP_LOAD_FIELD_S64 = 90, - FILTER_OP_LOAD_FIELD_U8 = 91, - FILTER_OP_LOAD_FIELD_U16 = 92, - FILTER_OP_LOAD_FIELD_U32 = 93, - FILTER_OP_LOAD_FIELD_U64 = 94, - FILTER_OP_LOAD_FIELD_STRING = 95, - FILTER_OP_LOAD_FIELD_SEQUENCE = 96, - FILTER_OP_LOAD_FIELD_DOUBLE = 97, - - FILTER_OP_UNARY_BIT_NOT = 98, - - FILTER_OP_RETURN_S64 = 99, - - NR_FILTER_OPS, -}; - -typedef uint8_t filter_opcode_t; - -struct load_op { - filter_opcode_t op; - char data[0]; - /* data to load. Size known by enum filter_opcode and null-term char. */ -} __attribute__((packed)); - -struct binary_op { - filter_opcode_t op; -} __attribute__((packed)); - -struct unary_op { - filter_opcode_t op; -} __attribute__((packed)); - -/* skip_offset is absolute from start of bytecode */ -struct logical_op { - filter_opcode_t op; - uint16_t skip_offset; /* bytecode insn, if skip second test */ -} __attribute__((packed)); - -struct cast_op { - filter_opcode_t op; -} __attribute__((packed)); - -struct return_op { - filter_opcode_t op; -} __attribute__((packed)); - -#endif /* _FILTER_BYTECODE_H */ diff --git a/liblttng-ust/lttng-bytecode-interpreter.c b/liblttng-ust/lttng-bytecode-interpreter.c new file mode 100644 index 00000000..9d7258f7 --- /dev/null +++ b/liblttng-ust/lttng-bytecode-interpreter.c @@ -0,0 +1,2499 @@ +/* + * lttng-bytecode-interpreter.c + * + * LTTng UST bytecode interpreter. + * + * Copyright (C) 2010-2016 Mathieu Desnoyers + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#define _LGPL_SOURCE +#include +#include +#include +#include + +#include +#include + +#include "lttng-bytecode.h" +#include "string-utils.h" + + +/* + * -1: wildcard found. + * -2: unknown escape char. + * 0: normal char. + */ + +static +int parse_char(const char **p) +{ + switch (**p) { + case '\\': + (*p)++; + switch (**p) { + case '\\': + case '*': + return 0; + default: + return -2; + } + case '*': + return -1; + default: + return 0; + } +} + +/* + * Returns SIZE_MAX if the string is null-terminated, or the number of + * characters if not. + */ +static +size_t get_str_or_seq_len(const struct estack_entry *entry) +{ + return entry->u.s.seq_len; +} + +static +int stack_star_glob_match(struct estack *stack, int top, const char *cmp_type) +{ + const char *pattern; + const char *candidate; + size_t pattern_len; + size_t candidate_len; + + /* Find out which side is the pattern vs. the candidate. */ + if (estack_ax(stack, top)->u.s.literal_type == ESTACK_STRING_LITERAL_TYPE_STAR_GLOB) { + pattern = estack_ax(stack, top)->u.s.str; + pattern_len = get_str_or_seq_len(estack_ax(stack, top)); + candidate = estack_bx(stack, top)->u.s.str; + candidate_len = get_str_or_seq_len(estack_bx(stack, top)); + } else { + pattern = estack_bx(stack, top)->u.s.str; + pattern_len = get_str_or_seq_len(estack_bx(stack, top)); + candidate = estack_ax(stack, top)->u.s.str; + candidate_len = get_str_or_seq_len(estack_ax(stack, top)); + } + + /* Perform the match. Returns 0 when the result is true. */ + return !strutils_star_glob_match(pattern, pattern_len, candidate, + candidate_len); +} + +static +int stack_strcmp(struct estack *stack, int top, const char *cmp_type) +{ + const char *p = estack_bx(stack, top)->u.s.str, *q = estack_ax(stack, top)->u.s.str; + int ret; + int diff; + + for (;;) { + int escaped_r0 = 0; + + if (unlikely(p - estack_bx(stack, top)->u.s.str >= estack_bx(stack, top)->u.s.seq_len || *p == '\0')) { + if (q - estack_ax(stack, top)->u.s.str >= estack_ax(stack, top)->u.s.seq_len || *q == '\0') { + return 0; + } else { + if (estack_ax(stack, top)->u.s.literal_type == + ESTACK_STRING_LITERAL_TYPE_PLAIN) { + ret = parse_char(&q); + if (ret == -1) + return 0; + } + return -1; + } + } + if (unlikely(q - estack_ax(stack, top)->u.s.str >= estack_ax(stack, top)->u.s.seq_len || *q == '\0')) { + if (estack_bx(stack, top)->u.s.literal_type == + ESTACK_STRING_LITERAL_TYPE_PLAIN) { + ret = parse_char(&p); + if (ret == -1) + return 0; + } + return 1; + } + if (estack_bx(stack, top)->u.s.literal_type == + ESTACK_STRING_LITERAL_TYPE_PLAIN) { + ret = parse_char(&p); + if (ret == -1) { + return 0; + } else if (ret == -2) { + escaped_r0 = 1; + } + /* else compare both char */ + } + if (estack_ax(stack, top)->u.s.literal_type == + ESTACK_STRING_LITERAL_TYPE_PLAIN) { + ret = parse_char(&q); + if (ret == -1) { + return 0; + } else if (ret == -2) { + if (!escaped_r0) + return -1; + } else { + if (escaped_r0) + return 1; + } + } else { + if (escaped_r0) + return 1; + } + diff = *p - *q; + if (diff != 0) + break; + p++; + q++; + } + return diff; +} + +uint64_t lttng_bytecode_filter_interpret_false(void *filter_data, + const char *filter_stack_data) +{ + return LTTNG_INTERPRETER_DISCARD; +} + +#ifdef INTERPRETER_USE_SWITCH + +/* + * Fallback for compilers that do not support taking address of labels. + */ + +#define START_OP \ + start_pc = &bytecode->data[0]; \ + for (pc = next_pc = start_pc; pc - start_pc < bytecode->len; \ + pc = next_pc) { \ + dbg_printf("Executing op %s (%u)\n", \ + print_op((unsigned int) *(bytecode_opcode_t *) pc), \ + (unsigned int) *(bytecode_opcode_t *) pc); \ + switch (*(bytecode_opcode_t *) pc) { + +#define OP(name) jump_target_##name: __attribute__((unused)); \ + case name + +#define PO break + +#define END_OP } \ + } + +#define JUMP_TO(name) \ + goto jump_target_##name + +#else + +/* + * Dispatch-table based interpreter. + */ + +#define START_OP \ + start_pc = &bytecode->code[0]; \ + pc = next_pc = start_pc; \ + if (unlikely(pc - start_pc >= bytecode->len)) \ + goto end; \ + goto *dispatch[*(bytecode_opcode_t *) pc]; + +#define OP(name) \ +LABEL_##name + +#define PO \ + pc = next_pc; \ + goto *dispatch[*(bytecode_opcode_t *) pc]; + +#define END_OP + +#define JUMP_TO(name) \ + goto LABEL_##name + +#endif + +#define IS_INTEGER_REGISTER(reg_type) \ + (reg_type == REG_U64 || reg_type == REG_S64) + +static int context_get_index(struct lttng_ctx *ctx, + struct load_ptr *ptr, + uint32_t idx) +{ + + struct lttng_ctx_field *ctx_field; + struct lttng_event_field *field; + struct lttng_ctx_value v; + + ctx_field = &ctx->fields[idx]; + field = &ctx_field->event_field; + ptr->type = LOAD_OBJECT; + ptr->field = field; + + switch (field->type.atype) { + case atype_integer: + ctx_field->get_value(ctx_field, &v); + if (field->type.u.integer.signedness) { + ptr->object_type = OBJECT_TYPE_S64; + ptr->u.s64 = v.u.s64; + ptr->ptr = &ptr->u.s64; + } else { + ptr->object_type = OBJECT_TYPE_U64; + ptr->u.u64 = v.u.s64; /* Cast. */ + ptr->ptr = &ptr->u.u64; + } + break; + case atype_enum: /* Fall-through */ + case atype_enum_nestable: + { + const struct lttng_integer_type *itype; + + if (field->type.atype == atype_enum) { + itype = &field->type.u.legacy.basic.enumeration.container_type; + } else { + itype = &field->type.u.enum_nestable.container_type->u.integer; + } + ctx_field->get_value(ctx_field, &v); + if (itype->signedness) { + ptr->object_type = OBJECT_TYPE_S64; + ptr->u.s64 = v.u.s64; + ptr->ptr = &ptr->u.s64; + } else { + ptr->object_type = OBJECT_TYPE_U64; + ptr->u.u64 = v.u.s64; /* Cast. */ + ptr->ptr = &ptr->u.u64; + } + break; + } + case atype_array: + if (field->type.u.legacy.array.elem_type.atype != atype_integer) { + ERR("Array nesting only supports integer types."); + return -EINVAL; + } + if (field->type.u.legacy.array.elem_type.u.basic.integer.encoding == lttng_encode_none) { + ERR("Only string arrays are supported for contexts."); + return -EINVAL; + } + ptr->object_type = OBJECT_TYPE_STRING; + ctx_field->get_value(ctx_field, &v); + ptr->ptr = v.u.str; + break; + case atype_array_nestable: + if (field->type.u.array_nestable.elem_type->atype != atype_integer) { + ERR("Array nesting only supports integer types."); + return -EINVAL; + } + if (field->type.u.array_nestable.elem_type->u.integer.encoding == lttng_encode_none) { + ERR("Only string arrays are supported for contexts."); + return -EINVAL; + } + ptr->object_type = OBJECT_TYPE_STRING; + ctx_field->get_value(ctx_field, &v); + ptr->ptr = v.u.str; + break; + case atype_sequence: + if (field->type.u.legacy.sequence.elem_type.atype != atype_integer) { + ERR("Sequence nesting only supports integer types."); + return -EINVAL; + } + if (field->type.u.legacy.sequence.elem_type.u.basic.integer.encoding == lttng_encode_none) { + ERR("Only string sequences are supported for contexts."); + return -EINVAL; + } + ptr->object_type = OBJECT_TYPE_STRING; + ctx_field->get_value(ctx_field, &v); + ptr->ptr = v.u.str; + break; + case atype_sequence_nestable: + if (field->type.u.sequence_nestable.elem_type->atype != atype_integer) { + ERR("Sequence nesting only supports integer types."); + return -EINVAL; + } + if (field->type.u.sequence_nestable.elem_type->u.integer.encoding == lttng_encode_none) { + ERR("Only string sequences are supported for contexts."); + return -EINVAL; + } + ptr->object_type = OBJECT_TYPE_STRING; + ctx_field->get_value(ctx_field, &v); + ptr->ptr = v.u.str; + break; + case atype_string: + ptr->object_type = OBJECT_TYPE_STRING; + ctx_field->get_value(ctx_field, &v); + ptr->ptr = v.u.str; + break; + case atype_float: + ptr->object_type = OBJECT_TYPE_DOUBLE; + ctx_field->get_value(ctx_field, &v); + ptr->u.d = v.u.d; + ptr->ptr = &ptr->u.d; + break; + case atype_dynamic: + ctx_field->get_value(ctx_field, &v); + switch (v.sel) { + case LTTNG_UST_DYNAMIC_TYPE_NONE: + return -EINVAL; + case LTTNG_UST_DYNAMIC_TYPE_S64: + ptr->object_type = OBJECT_TYPE_S64; + ptr->u.s64 = v.u.s64; + ptr->ptr = &ptr->u.s64; + dbg_printf("context get index dynamic s64 %" PRIi64 "\n", ptr->u.s64); + break; + case LTTNG_UST_DYNAMIC_TYPE_DOUBLE: + ptr->object_type = OBJECT_TYPE_DOUBLE; + ptr->u.d = v.u.d; + ptr->ptr = &ptr->u.d; + dbg_printf("context get index dynamic double %g\n", ptr->u.d); + break; + case LTTNG_UST_DYNAMIC_TYPE_STRING: + ptr->object_type = OBJECT_TYPE_STRING; + ptr->ptr = v.u.str; + dbg_printf("context get index dynamic string %s\n", (const char *) ptr->ptr); + break; + default: + dbg_printf("Interpreter warning: unknown dynamic type (%d).\n", (int) v.sel); + return -EINVAL; + } + break; + case atype_struct: + ERR("Structure type cannot be loaded."); + return -EINVAL; + default: + ERR("Unknown type: %d", (int) field->type.atype); + return -EINVAL; + } + return 0; +} + +static int dynamic_get_index(struct lttng_ctx *ctx, + struct bytecode_runtime *runtime, + uint64_t index, struct estack_entry *stack_top) +{ + int ret; + const struct bytecode_get_index_data *gid; + + gid = (const struct bytecode_get_index_data *) &runtime->data[index]; + switch (stack_top->u.ptr.type) { + case LOAD_OBJECT: + switch (stack_top->u.ptr.object_type) { + case OBJECT_TYPE_ARRAY: + { + const char *ptr; + + assert(gid->offset < gid->array_len); + /* Skip count (unsigned long) */ + ptr = *(const char **) (stack_top->u.ptr.ptr + sizeof(unsigned long)); + ptr = ptr + gid->offset; + stack_top->u.ptr.ptr = ptr; + stack_top->u.ptr.object_type = gid->elem.type; + stack_top->u.ptr.rev_bo = gid->elem.rev_bo; + assert(stack_top->u.ptr.field->type.atype == atype_array || + stack_top->u.ptr.field->type.atype == atype_array_nestable); + stack_top->u.ptr.field = NULL; + break; + } + case OBJECT_TYPE_SEQUENCE: + { + const char *ptr; + size_t ptr_seq_len; + + ptr = *(const char **) (stack_top->u.ptr.ptr + sizeof(unsigned long)); + ptr_seq_len = *(unsigned long *) stack_top->u.ptr.ptr; + if (gid->offset >= gid->elem.len * ptr_seq_len) { + ret = -EINVAL; + goto end; + } + ptr = ptr + gid->offset; + stack_top->u.ptr.ptr = ptr; + stack_top->u.ptr.object_type = gid->elem.type; + stack_top->u.ptr.rev_bo = gid->elem.rev_bo; + assert(stack_top->u.ptr.field->type.atype == atype_sequence || + stack_top->u.ptr.field->type.atype == atype_sequence_nestable); + stack_top->u.ptr.field = NULL; + break; + } + case OBJECT_TYPE_STRUCT: + ERR("Nested structures are not supported yet."); + ret = -EINVAL; + goto end; + case OBJECT_TYPE_VARIANT: + default: + ERR("Unexpected get index type %d", + (int) stack_top->u.ptr.object_type); + ret = -EINVAL; + goto end; + } + break; + case LOAD_ROOT_CONTEXT: + case LOAD_ROOT_APP_CONTEXT: /* Fall-through */ + { + ret = context_get_index(ctx, + &stack_top->u.ptr, + gid->ctx_index); + if (ret) { + goto end; + } + break; + } + case LOAD_ROOT_PAYLOAD: + stack_top->u.ptr.ptr += gid->offset; + if (gid->elem.type == OBJECT_TYPE_STRING) + stack_top->u.ptr.ptr = *(const char * const *) stack_top->u.ptr.ptr; + stack_top->u.ptr.object_type = gid->elem.type; + stack_top->u.ptr.type = LOAD_OBJECT; + stack_top->u.ptr.field = gid->field; + stack_top->u.ptr.rev_bo = gid->elem.rev_bo; + break; + } + + stack_top->type = REG_PTR; + + return 0; + +end: + return ret; +} + +static int dynamic_load_field(struct estack_entry *stack_top) +{ + int ret; + + switch (stack_top->u.ptr.type) { + case LOAD_OBJECT: + break; + case LOAD_ROOT_CONTEXT: + case LOAD_ROOT_APP_CONTEXT: + case LOAD_ROOT_PAYLOAD: + default: + dbg_printf("Interpreter warning: cannot load root, missing field name.\n"); + ret = -EINVAL; + goto end; + } + switch (stack_top->u.ptr.object_type) { + case OBJECT_TYPE_S8: + dbg_printf("op load field s8\n"); + stack_top->u.v = *(int8_t *) stack_top->u.ptr.ptr; + stack_top->type = REG_S64; + break; + case OBJECT_TYPE_S16: + { + int16_t tmp; + + dbg_printf("op load field s16\n"); + tmp = *(int16_t *) stack_top->u.ptr.ptr; + if (stack_top->u.ptr.rev_bo) + tmp = bswap_16(tmp); + stack_top->u.v = tmp; + stack_top->type = REG_S64; + break; + } + case OBJECT_TYPE_S32: + { + int32_t tmp; + + dbg_printf("op load field s32\n"); + tmp = *(int32_t *) stack_top->u.ptr.ptr; + if (stack_top->u.ptr.rev_bo) + tmp = bswap_32(tmp); + stack_top->u.v = tmp; + stack_top->type = REG_S64; + break; + } + case OBJECT_TYPE_S64: + { + int64_t tmp; + + dbg_printf("op load field s64\n"); + tmp = *(int64_t *) stack_top->u.ptr.ptr; + if (stack_top->u.ptr.rev_bo) + tmp = bswap_64(tmp); + stack_top->u.v = tmp; + stack_top->type = REG_S64; + break; + } + case OBJECT_TYPE_U8: + dbg_printf("op load field u8\n"); + stack_top->u.v = *(uint8_t *) stack_top->u.ptr.ptr; + stack_top->type = REG_U64; + break; + case OBJECT_TYPE_U16: + { + uint16_t tmp; + + dbg_printf("op load field u16\n"); + tmp = *(uint16_t *) stack_top->u.ptr.ptr; + if (stack_top->u.ptr.rev_bo) + tmp = bswap_16(tmp); + stack_top->u.v = tmp; + stack_top->type = REG_U64; + break; + } + case OBJECT_TYPE_U32: + { + uint32_t tmp; + + dbg_printf("op load field u32\n"); + tmp = *(uint32_t *) stack_top->u.ptr.ptr; + if (stack_top->u.ptr.rev_bo) + tmp = bswap_32(tmp); + stack_top->u.v = tmp; + stack_top->type = REG_U64; + break; + } + case OBJECT_TYPE_U64: + { + uint64_t tmp; + + dbg_printf("op load field u64\n"); + tmp = *(uint64_t *) stack_top->u.ptr.ptr; + if (stack_top->u.ptr.rev_bo) + tmp = bswap_64(tmp); + stack_top->u.v = tmp; + stack_top->type = REG_U64; + break; + } + case OBJECT_TYPE_DOUBLE: + memcpy(&stack_top->u.d, + stack_top->u.ptr.ptr, + sizeof(struct literal_double)); + stack_top->type = REG_DOUBLE; + break; + case OBJECT_TYPE_STRING: + { + const char *str; + + dbg_printf("op load field string\n"); + str = (const char *) stack_top->u.ptr.ptr; + stack_top->u.s.str = str; + if (unlikely(!stack_top->u.s.str)) { + dbg_printf("Interpreter warning: loading a NULL string.\n"); + ret = -EINVAL; + goto end; + } + stack_top->u.s.seq_len = SIZE_MAX; + stack_top->u.s.literal_type = + ESTACK_STRING_LITERAL_TYPE_NONE; + stack_top->type = REG_STRING; + break; + } + case OBJECT_TYPE_STRING_SEQUENCE: + { + const char *ptr; + + dbg_printf("op load field string sequence\n"); + ptr = stack_top->u.ptr.ptr; + stack_top->u.s.seq_len = *(unsigned long *) ptr; + stack_top->u.s.str = *(const char **) (ptr + sizeof(unsigned long)); + stack_top->type = REG_STRING; + if (unlikely(!stack_top->u.s.str)) { + dbg_printf("Interpreter warning: loading a NULL sequence.\n"); + ret = -EINVAL; + goto end; + } + stack_top->u.s.literal_type = + ESTACK_STRING_LITERAL_TYPE_NONE; + break; + } + case OBJECT_TYPE_DYNAMIC: + /* + * Dynamic types in context are looked up + * by context get index. + */ + ret = -EINVAL; + goto end; + case OBJECT_TYPE_SEQUENCE: + case OBJECT_TYPE_ARRAY: + case OBJECT_TYPE_STRUCT: + case OBJECT_TYPE_VARIANT: + ERR("Sequences, arrays, struct and variant cannot be loaded (nested types)."); + ret = -EINVAL; + goto end; + } + return 0; + +end: + return ret; +} + +static +int lttng_bytecode_interpret_format_output(struct estack_entry *ax, + struct lttng_interpreter_output *output) +{ + int ret; + +again: + switch (ax->type) { + case REG_S64: + output->type = LTTNG_INTERPRETER_TYPE_S64; + output->u.s = ax->u.v; + break; + case REG_U64: + output->type = LTTNG_INTERPRETER_TYPE_U64; + output->u.u = (uint64_t) ax->u.v; + break; + case REG_DOUBLE: + output->type = LTTNG_INTERPRETER_TYPE_DOUBLE; + output->u.d = ax->u.d; + break; + case REG_STRING: + output->type = LTTNG_INTERPRETER_TYPE_STRING; + output->u.str.str = ax->u.s.str; + output->u.str.len = ax->u.s.seq_len; + break; + case REG_PTR: + switch (ax->u.ptr.object_type) { + case OBJECT_TYPE_S8: + case OBJECT_TYPE_S16: + case OBJECT_TYPE_S32: + case OBJECT_TYPE_S64: + case OBJECT_TYPE_U8: + case OBJECT_TYPE_U16: + case OBJECT_TYPE_U32: + case OBJECT_TYPE_U64: + case OBJECT_TYPE_DOUBLE: + case OBJECT_TYPE_STRING: + case OBJECT_TYPE_STRING_SEQUENCE: + ret = dynamic_load_field(ax); + if (ret) + return ret; + /* Retry after loading ptr into stack top. */ + goto again; + case OBJECT_TYPE_SEQUENCE: + output->type = LTTNG_INTERPRETER_TYPE_SEQUENCE; + output->u.sequence.ptr = *(const char **) (ax->u.ptr.ptr + sizeof(unsigned long)); + output->u.sequence.nr_elem = *(unsigned long *) ax->u.ptr.ptr; + output->u.sequence.nested_type = ax->u.ptr.field->type.u.sequence_nestable.elem_type; + break; + case OBJECT_TYPE_ARRAY: + /* Skip count (unsigned long) */ + output->type = LTTNG_INTERPRETER_TYPE_SEQUENCE; + output->u.sequence.ptr = *(const char **) (ax->u.ptr.ptr + sizeof(unsigned long)); + output->u.sequence.nr_elem = ax->u.ptr.field->type.u.array_nestable.length; + output->u.sequence.nested_type = ax->u.ptr.field->type.u.array_nestable.elem_type; + break; + case OBJECT_TYPE_STRUCT: + case OBJECT_TYPE_VARIANT: + default: + return -EINVAL; + } + + break; + case REG_STAR_GLOB_STRING: + case REG_UNKNOWN: + default: + return -EINVAL; + } + + return LTTNG_INTERPRETER_RECORD_FLAG; +} + +/* + * Return 0 (discard), or raise the 0x1 flag (log event). + * Currently, other flags are kept for future extensions and have no + * effect. + */ +static +uint64_t bytecode_interpret(void *interpreter_data, + const char *interpreter_stack_data, + struct lttng_interpreter_output *output) +{ + struct bytecode_runtime *bytecode = interpreter_data; + struct lttng_ctx *ctx = rcu_dereference(*bytecode->p.pctx); + void *pc, *next_pc, *start_pc; + int ret = -EINVAL; + uint64_t retval = 0; + struct estack _stack; + struct estack *stack = &_stack; + register int64_t ax = 0, bx = 0; + register enum entry_type ax_t = REG_UNKNOWN, bx_t = REG_UNKNOWN; + register int top = INTERPRETER_STACK_EMPTY; +#ifndef INTERPRETER_USE_SWITCH + static void *dispatch[NR_BYTECODE_OPS] = { + [ BYTECODE_OP_UNKNOWN ] = &&LABEL_BYTECODE_OP_UNKNOWN, + + [ BYTECODE_OP_RETURN ] = &&LABEL_BYTECODE_OP_RETURN, + + /* binary */ + [ BYTECODE_OP_MUL ] = &&LABEL_BYTECODE_OP_MUL, + [ BYTECODE_OP_DIV ] = &&LABEL_BYTECODE_OP_DIV, + [ BYTECODE_OP_MOD ] = &&LABEL_BYTECODE_OP_MOD, + [ BYTECODE_OP_PLUS ] = &&LABEL_BYTECODE_OP_PLUS, + [ BYTECODE_OP_MINUS ] = &&LABEL_BYTECODE_OP_MINUS, + [ BYTECODE_OP_BIT_RSHIFT ] = &&LABEL_BYTECODE_OP_BIT_RSHIFT, + [ BYTECODE_OP_BIT_LSHIFT ] = &&LABEL_BYTECODE_OP_BIT_LSHIFT, + [ BYTECODE_OP_BIT_AND ] = &&LABEL_BYTECODE_OP_BIT_AND, + [ BYTECODE_OP_BIT_OR ] = &&LABEL_BYTECODE_OP_BIT_OR, + [ BYTECODE_OP_BIT_XOR ] = &&LABEL_BYTECODE_OP_BIT_XOR, + + /* binary comparators */ + [ BYTECODE_OP_EQ ] = &&LABEL_BYTECODE_OP_EQ, + [ BYTECODE_OP_NE ] = &&LABEL_BYTECODE_OP_NE, + [ BYTECODE_OP_GT ] = &&LABEL_BYTECODE_OP_GT, + [ BYTECODE_OP_LT ] = &&LABEL_BYTECODE_OP_LT, + [ BYTECODE_OP_GE ] = &&LABEL_BYTECODE_OP_GE, + [ BYTECODE_OP_LE ] = &&LABEL_BYTECODE_OP_LE, + + /* string binary comparator */ + [ BYTECODE_OP_EQ_STRING ] = &&LABEL_BYTECODE_OP_EQ_STRING, + [ BYTECODE_OP_NE_STRING ] = &&LABEL_BYTECODE_OP_NE_STRING, + [ BYTECODE_OP_GT_STRING ] = &&LABEL_BYTECODE_OP_GT_STRING, + [ BYTECODE_OP_LT_STRING ] = &&LABEL_BYTECODE_OP_LT_STRING, + [ BYTECODE_OP_GE_STRING ] = &&LABEL_BYTECODE_OP_GE_STRING, + [ BYTECODE_OP_LE_STRING ] = &&LABEL_BYTECODE_OP_LE_STRING, + + /* globbing pattern binary comparator */ + [ BYTECODE_OP_EQ_STAR_GLOB_STRING ] = &&LABEL_BYTECODE_OP_EQ_STAR_GLOB_STRING, + [ BYTECODE_OP_NE_STAR_GLOB_STRING ] = &&LABEL_BYTECODE_OP_NE_STAR_GLOB_STRING, + + /* s64 binary comparator */ + [ BYTECODE_OP_EQ_S64 ] = &&LABEL_BYTECODE_OP_EQ_S64, + [ BYTECODE_OP_NE_S64 ] = &&LABEL_BYTECODE_OP_NE_S64, + [ BYTECODE_OP_GT_S64 ] = &&LABEL_BYTECODE_OP_GT_S64, + [ BYTECODE_OP_LT_S64 ] = &&LABEL_BYTECODE_OP_LT_S64, + [ BYTECODE_OP_GE_S64 ] = &&LABEL_BYTECODE_OP_GE_S64, + [ BYTECODE_OP_LE_S64 ] = &&LABEL_BYTECODE_OP_LE_S64, + + /* double binary comparator */ + [ BYTECODE_OP_EQ_DOUBLE ] = &&LABEL_BYTECODE_OP_EQ_DOUBLE, + [ BYTECODE_OP_NE_DOUBLE ] = &&LABEL_BYTECODE_OP_NE_DOUBLE, + [ BYTECODE_OP_GT_DOUBLE ] = &&LABEL_BYTECODE_OP_GT_DOUBLE, + [ BYTECODE_OP_LT_DOUBLE ] = &&LABEL_BYTECODE_OP_LT_DOUBLE, + [ BYTECODE_OP_GE_DOUBLE ] = &&LABEL_BYTECODE_OP_GE_DOUBLE, + [ BYTECODE_OP_LE_DOUBLE ] = &&LABEL_BYTECODE_OP_LE_DOUBLE, + + /* Mixed S64-double binary comparators */ + [ BYTECODE_OP_EQ_DOUBLE_S64 ] = &&LABEL_BYTECODE_OP_EQ_DOUBLE_S64, + [ BYTECODE_OP_NE_DOUBLE_S64 ] = &&LABEL_BYTECODE_OP_NE_DOUBLE_S64, + [ BYTECODE_OP_GT_DOUBLE_S64 ] = &&LABEL_BYTECODE_OP_GT_DOUBLE_S64, + [ BYTECODE_OP_LT_DOUBLE_S64 ] = &&LABEL_BYTECODE_OP_LT_DOUBLE_S64, + [ BYTECODE_OP_GE_DOUBLE_S64 ] = &&LABEL_BYTECODE_OP_GE_DOUBLE_S64, + [ BYTECODE_OP_LE_DOUBLE_S64 ] = &&LABEL_BYTECODE_OP_LE_DOUBLE_S64, + + [ BYTECODE_OP_EQ_S64_DOUBLE ] = &&LABEL_BYTECODE_OP_EQ_S64_DOUBLE, + [ BYTECODE_OP_NE_S64_DOUBLE ] = &&LABEL_BYTECODE_OP_NE_S64_DOUBLE, + [ BYTECODE_OP_GT_S64_DOUBLE ] = &&LABEL_BYTECODE_OP_GT_S64_DOUBLE, + [ BYTECODE_OP_LT_S64_DOUBLE ] = &&LABEL_BYTECODE_OP_LT_S64_DOUBLE, + [ BYTECODE_OP_GE_S64_DOUBLE ] = &&LABEL_BYTECODE_OP_GE_S64_DOUBLE, + [ BYTECODE_OP_LE_S64_DOUBLE ] = &&LABEL_BYTECODE_OP_LE_S64_DOUBLE, + + /* unary */ + [ BYTECODE_OP_UNARY_PLUS ] = &&LABEL_BYTECODE_OP_UNARY_PLUS, + [ BYTECODE_OP_UNARY_MINUS ] = &&LABEL_BYTECODE_OP_UNARY_MINUS, + [ BYTECODE_OP_UNARY_NOT ] = &&LABEL_BYTECODE_OP_UNARY_NOT, + [ BYTECODE_OP_UNARY_PLUS_S64 ] = &&LABEL_BYTECODE_OP_UNARY_PLUS_S64, + [ BYTECODE_OP_UNARY_MINUS_S64 ] = &&LABEL_BYTECODE_OP_UNARY_MINUS_S64, + [ BYTECODE_OP_UNARY_NOT_S64 ] = &&LABEL_BYTECODE_OP_UNARY_NOT_S64, + [ BYTECODE_OP_UNARY_PLUS_DOUBLE ] = &&LABEL_BYTECODE_OP_UNARY_PLUS_DOUBLE, + [ BYTECODE_OP_UNARY_MINUS_DOUBLE ] = &&LABEL_BYTECODE_OP_UNARY_MINUS_DOUBLE, + [ BYTECODE_OP_UNARY_NOT_DOUBLE ] = &&LABEL_BYTECODE_OP_UNARY_NOT_DOUBLE, + + /* logical */ + [ BYTECODE_OP_AND ] = &&LABEL_BYTECODE_OP_AND, + [ BYTECODE_OP_OR ] = &&LABEL_BYTECODE_OP_OR, + + /* load field ref */ + [ BYTECODE_OP_LOAD_FIELD_REF ] = &&LABEL_BYTECODE_OP_LOAD_FIELD_REF, + [ BYTECODE_OP_LOAD_FIELD_REF_STRING ] = &&LABEL_BYTECODE_OP_LOAD_FIELD_REF_STRING, + [ BYTECODE_OP_LOAD_FIELD_REF_SEQUENCE ] = &&LABEL_BYTECODE_OP_LOAD_FIELD_REF_SEQUENCE, + [ BYTECODE_OP_LOAD_FIELD_REF_S64 ] = &&LABEL_BYTECODE_OP_LOAD_FIELD_REF_S64, + [ BYTECODE_OP_LOAD_FIELD_REF_DOUBLE ] = &&LABEL_BYTECODE_OP_LOAD_FIELD_REF_DOUBLE, + + /* load from immediate operand */ + [ BYTECODE_OP_LOAD_STRING ] = &&LABEL_BYTECODE_OP_LOAD_STRING, + [ BYTECODE_OP_LOAD_STAR_GLOB_STRING ] = &&LABEL_BYTECODE_OP_LOAD_STAR_GLOB_STRING, + [ BYTECODE_OP_LOAD_S64 ] = &&LABEL_BYTECODE_OP_LOAD_S64, + [ BYTECODE_OP_LOAD_DOUBLE ] = &&LABEL_BYTECODE_OP_LOAD_DOUBLE, + + /* cast */ + [ BYTECODE_OP_CAST_TO_S64 ] = &&LABEL_BYTECODE_OP_CAST_TO_S64, + [ BYTECODE_OP_CAST_DOUBLE_TO_S64 ] = &&LABEL_BYTECODE_OP_CAST_DOUBLE_TO_S64, + [ BYTECODE_OP_CAST_NOP ] = &&LABEL_BYTECODE_OP_CAST_NOP, + + /* get context ref */ + [ BYTECODE_OP_GET_CONTEXT_REF ] = &&LABEL_BYTECODE_OP_GET_CONTEXT_REF, + [ BYTECODE_OP_GET_CONTEXT_REF_STRING ] = &&LABEL_BYTECODE_OP_GET_CONTEXT_REF_STRING, + [ BYTECODE_OP_GET_CONTEXT_REF_S64 ] = &&LABEL_BYTECODE_OP_GET_CONTEXT_REF_S64, + [ BYTECODE_OP_GET_CONTEXT_REF_DOUBLE ] = &&LABEL_BYTECODE_OP_GET_CONTEXT_REF_DOUBLE, + + /* Instructions for recursive traversal through composed types. */ + [ BYTECODE_OP_GET_CONTEXT_ROOT ] = &&LABEL_BYTECODE_OP_GET_CONTEXT_ROOT, + [ BYTECODE_OP_GET_APP_CONTEXT_ROOT ] = &&LABEL_BYTECODE_OP_GET_APP_CONTEXT_ROOT, + [ BYTECODE_OP_GET_PAYLOAD_ROOT ] = &&LABEL_BYTECODE_OP_GET_PAYLOAD_ROOT, + + [ BYTECODE_OP_GET_SYMBOL ] = &&LABEL_BYTECODE_OP_GET_SYMBOL, + [ BYTECODE_OP_GET_SYMBOL_FIELD ] = &&LABEL_BYTECODE_OP_GET_SYMBOL_FIELD, + [ BYTECODE_OP_GET_INDEX_U16 ] = &&LABEL_BYTECODE_OP_GET_INDEX_U16, + [ BYTECODE_OP_GET_INDEX_U64 ] = &&LABEL_BYTECODE_OP_GET_INDEX_U64, + + [ BYTECODE_OP_LOAD_FIELD ] = &&LABEL_BYTECODE_OP_LOAD_FIELD, + [ BYTECODE_OP_LOAD_FIELD_S8 ] = &&LABEL_BYTECODE_OP_LOAD_FIELD_S8, + [ BYTECODE_OP_LOAD_FIELD_S16 ] = &&LABEL_BYTECODE_OP_LOAD_FIELD_S16, + [ BYTECODE_OP_LOAD_FIELD_S32 ] = &&LABEL_BYTECODE_OP_LOAD_FIELD_S32, + [ BYTECODE_OP_LOAD_FIELD_S64 ] = &&LABEL_BYTECODE_OP_LOAD_FIELD_S64, + [ BYTECODE_OP_LOAD_FIELD_U8 ] = &&LABEL_BYTECODE_OP_LOAD_FIELD_U8, + [ BYTECODE_OP_LOAD_FIELD_U16 ] = &&LABEL_BYTECODE_OP_LOAD_FIELD_U16, + [ BYTECODE_OP_LOAD_FIELD_U32 ] = &&LABEL_BYTECODE_OP_LOAD_FIELD_U32, + [ BYTECODE_OP_LOAD_FIELD_U64 ] = &&LABEL_BYTECODE_OP_LOAD_FIELD_U64, + [ BYTECODE_OP_LOAD_FIELD_STRING ] = &&LABEL_BYTECODE_OP_LOAD_FIELD_STRING, + [ BYTECODE_OP_LOAD_FIELD_SEQUENCE ] = &&LABEL_BYTECODE_OP_LOAD_FIELD_SEQUENCE, + [ BYTECODE_OP_LOAD_FIELD_DOUBLE ] = &&LABEL_BYTECODE_OP_LOAD_FIELD_DOUBLE, + + [ BYTECODE_OP_UNARY_BIT_NOT ] = &&LABEL_BYTECODE_OP_UNARY_BIT_NOT, + + [ BYTECODE_OP_RETURN_S64 ] = &&LABEL_BYTECODE_OP_RETURN_S64, + }; +#endif /* #ifndef INTERPRETER_USE_SWITCH */ + + START_OP + + OP(BYTECODE_OP_UNKNOWN): + OP(BYTECODE_OP_LOAD_FIELD_REF): +#ifdef INTERPRETER_USE_SWITCH + default: +#endif /* INTERPRETER_USE_SWITCH */ + ERR("unknown bytecode op %u", + (unsigned int) *(bytecode_opcode_t *) pc); + ret = -EINVAL; + goto end; + + OP(BYTECODE_OP_RETURN): + /* LTTNG_INTERPRETER_DISCARD or LTTNG_INTERPRETER_RECORD_FLAG */ + /* Handle dynamic typing. */ + switch (estack_ax_t) { + case REG_S64: + case REG_U64: + retval = !!estack_ax_v; + break; + case REG_DOUBLE: + case REG_STRING: + case REG_PTR: + if (!output) { + ret = -EINVAL; + goto end; + } + retval = 0; + break; + case REG_STAR_GLOB_STRING: + case REG_UNKNOWN: + default: + ret = -EINVAL; + goto end; + } + ret = 0; + goto end; + + OP(BYTECODE_OP_RETURN_S64): + /* LTTNG_INTERPRETER_DISCARD or LTTNG_INTERPRETER_RECORD_FLAG */ + retval = !!estack_ax_v; + ret = 0; + goto end; + + /* binary */ + OP(BYTECODE_OP_MUL): + OP(BYTECODE_OP_DIV): + OP(BYTECODE_OP_MOD): + OP(BYTECODE_OP_PLUS): + OP(BYTECODE_OP_MINUS): + ERR("unsupported bytecode op %u", + (unsigned int) *(bytecode_opcode_t *) pc); + ret = -EINVAL; + goto end; + + OP(BYTECODE_OP_EQ): + { + /* Dynamic typing. */ + switch (estack_ax_t) { + case REG_S64: /* Fall-through */ + case REG_U64: + switch (estack_bx_t) { + case REG_S64: /* Fall-through */ + case REG_U64: + JUMP_TO(BYTECODE_OP_EQ_S64); + case REG_DOUBLE: + JUMP_TO(BYTECODE_OP_EQ_DOUBLE_S64); + case REG_STRING: /* Fall-through */ + case REG_STAR_GLOB_STRING: + ret = -EINVAL; + goto end; + default: + ERR("Unknown interpreter register type (%d)", + (int) estack_bx_t); + ret = -EINVAL; + goto end; + } + break; + case REG_DOUBLE: + switch (estack_bx_t) { + case REG_S64: /* Fall-through */ + case REG_U64: + JUMP_TO(BYTECODE_OP_EQ_S64_DOUBLE); + case REG_DOUBLE: + JUMP_TO(BYTECODE_OP_EQ_DOUBLE); + case REG_STRING: /* Fall-through */ + case REG_STAR_GLOB_STRING: + ret = -EINVAL; + goto end; + default: + ERR("Unknown interpreter register type (%d)", + (int) estack_bx_t); + ret = -EINVAL; + goto end; + } + break; + case REG_STRING: + switch (estack_bx_t) { + case REG_S64: /* Fall-through */ + case REG_U64: /* Fall-through */ + case REG_DOUBLE: + ret = -EINVAL; + goto end; + case REG_STRING: + JUMP_TO(BYTECODE_OP_EQ_STRING); + case REG_STAR_GLOB_STRING: + JUMP_TO(BYTECODE_OP_EQ_STAR_GLOB_STRING); + default: + ERR("Unknown interpreter register type (%d)", + (int) estack_bx_t); + ret = -EINVAL; + goto end; + } + break; + case REG_STAR_GLOB_STRING: + switch (estack_bx_t) { + case REG_S64: /* Fall-through */ + case REG_U64: /* Fall-through */ + case REG_DOUBLE: + ret = -EINVAL; + goto end; + case REG_STRING: + JUMP_TO(BYTECODE_OP_EQ_STAR_GLOB_STRING); + case REG_STAR_GLOB_STRING: + ret = -EINVAL; + goto end; + default: + ERR("Unknown interpreter register type (%d)", + (int) estack_bx_t); + ret = -EINVAL; + goto end; + } + break; + default: + ERR("Unknown interpreter register type (%d)", + (int) estack_ax_t); + ret = -EINVAL; + goto end; + } + } + OP(BYTECODE_OP_NE): + { + /* Dynamic typing. */ + switch (estack_ax_t) { + case REG_S64: /* Fall-through */ + case REG_U64: + switch (estack_bx_t) { + case REG_S64: /* Fall-through */ + case REG_U64: + JUMP_TO(BYTECODE_OP_NE_S64); + case REG_DOUBLE: + JUMP_TO(BYTECODE_OP_NE_DOUBLE_S64); + case REG_STRING: /* Fall-through */ + case REG_STAR_GLOB_STRING: + ret = -EINVAL; + goto end; + default: + ERR("Unknown interpreter register type (%d)", + (int) estack_bx_t); + ret = -EINVAL; + goto end; + } + break; + case REG_DOUBLE: + switch (estack_bx_t) { + case REG_S64: /* Fall-through */ + case REG_U64: + JUMP_TO(BYTECODE_OP_NE_S64_DOUBLE); + case REG_DOUBLE: + JUMP_TO(BYTECODE_OP_NE_DOUBLE); + case REG_STRING: /* Fall-through */ + case REG_STAR_GLOB_STRING: + ret = -EINVAL; + goto end; + default: + ERR("Unknown interpreter register type (%d)", + (int) estack_bx_t); + ret = -EINVAL; + goto end; + } + break; + case REG_STRING: + switch (estack_bx_t) { + case REG_S64: /* Fall-through */ + case REG_U64: + case REG_DOUBLE: + ret = -EINVAL; + goto end; + case REG_STRING: + JUMP_TO(BYTECODE_OP_NE_STRING); + case REG_STAR_GLOB_STRING: + JUMP_TO(BYTECODE_OP_NE_STAR_GLOB_STRING); + default: + ERR("Unknown interpreter register type (%d)", + (int) estack_bx_t); + ret = -EINVAL; + goto end; + } + break; + case REG_STAR_GLOB_STRING: + switch (estack_bx_t) { + case REG_S64: /* Fall-through */ + case REG_U64: + case REG_DOUBLE: + ret = -EINVAL; + goto end; + case REG_STRING: + JUMP_TO(BYTECODE_OP_NE_STAR_GLOB_STRING); + case REG_STAR_GLOB_STRING: + ret = -EINVAL; + goto end; + default: + ERR("Unknown interpreter register type (%d)", + (int) estack_bx_t); + ret = -EINVAL; + goto end; + } + break; + default: + ERR("Unknown interpreter register type (%d)", + (int) estack_ax_t); + ret = -EINVAL; + goto end; + } + } + OP(BYTECODE_OP_GT): + { + /* Dynamic typing. */ + switch (estack_ax_t) { + case REG_S64: /* Fall-through */ + case REG_U64: + switch (estack_bx_t) { + case REG_S64: /* Fall-through */ + case REG_U64: + JUMP_TO(BYTECODE_OP_GT_S64); + case REG_DOUBLE: + JUMP_TO(BYTECODE_OP_GT_DOUBLE_S64); + case REG_STRING: /* Fall-through */ + case REG_STAR_GLOB_STRING: + ret = -EINVAL; + goto end; + default: + ERR("Unknown interpreter register type (%d)", + (int) estack_bx_t); + ret = -EINVAL; + goto end; + } + break; + case REG_DOUBLE: + switch (estack_bx_t) { + case REG_S64: /* Fall-through */ + case REG_U64: + JUMP_TO(BYTECODE_OP_GT_S64_DOUBLE); + case REG_DOUBLE: + JUMP_TO(BYTECODE_OP_GT_DOUBLE); + case REG_STRING: /* Fall-through */ + case REG_STAR_GLOB_STRING: + ret = -EINVAL; + goto end; + default: + ERR("Unknown interpreter register type (%d)", + (int) estack_bx_t); + ret = -EINVAL; + goto end; + } + break; + case REG_STRING: + switch (estack_bx_t) { + case REG_S64: /* Fall-through */ + case REG_U64: /* Fall-through */ + case REG_DOUBLE: /* Fall-through */ + case REG_STAR_GLOB_STRING: + ret = -EINVAL; + goto end; + case REG_STRING: + JUMP_TO(BYTECODE_OP_GT_STRING); + default: + ERR("Unknown interpreter register type (%d)", + (int) estack_bx_t); + ret = -EINVAL; + goto end; + } + break; + default: + ERR("Unknown interpreter register type (%d)", + (int) estack_ax_t); + ret = -EINVAL; + goto end; + } + } + OP(BYTECODE_OP_LT): + { + /* Dynamic typing. */ + switch (estack_ax_t) { + case REG_S64: /* Fall-through */ + case REG_U64: + switch (estack_bx_t) { + case REG_S64: /* Fall-through */ + case REG_U64: + JUMP_TO(BYTECODE_OP_LT_S64); + case REG_DOUBLE: + JUMP_TO(BYTECODE_OP_LT_DOUBLE_S64); + case REG_STRING: /* Fall-through */ + case REG_STAR_GLOB_STRING: + ret = -EINVAL; + goto end; + default: + ERR("Unknown interpreter register type (%d)", + (int) estack_bx_t); + ret = -EINVAL; + goto end; + } + break; + case REG_DOUBLE: + switch (estack_bx_t) { + case REG_S64: /* Fall-through */ + case REG_U64: + JUMP_TO(BYTECODE_OP_LT_S64_DOUBLE); + case REG_DOUBLE: + JUMP_TO(BYTECODE_OP_LT_DOUBLE); + case REG_STRING: /* Fall-through */ + case REG_STAR_GLOB_STRING: + ret = -EINVAL; + goto end; + default: + ERR("Unknown interpreter register type (%d)", + (int) estack_bx_t); + ret = -EINVAL; + goto end; + } + break; + case REG_STRING: + switch (estack_bx_t) { + case REG_S64: /* Fall-through */ + case REG_U64: /* Fall-through */ + case REG_DOUBLE: /* Fall-through */ + case REG_STAR_GLOB_STRING: + ret = -EINVAL; + goto end; + case REG_STRING: + JUMP_TO(BYTECODE_OP_LT_STRING); + default: + ERR("Unknown interpreter register type (%d)", + (int) estack_bx_t); + ret = -EINVAL; + goto end; + } + break; + default: + ERR("Unknown interpreter register type (%d)", + (int) estack_ax_t); + ret = -EINVAL; + goto end; + } + } + OP(BYTECODE_OP_GE): + { + /* Dynamic typing. */ + switch (estack_ax_t) { + case REG_S64: /* Fall-through */ + case REG_U64: + switch (estack_bx_t) { + case REG_S64: /* Fall-through */ + case REG_U64: + JUMP_TO(BYTECODE_OP_GE_S64); + case REG_DOUBLE: + JUMP_TO(BYTECODE_OP_GE_DOUBLE_S64); + case REG_STRING: /* Fall-through */ + case REG_STAR_GLOB_STRING: + ret = -EINVAL; + goto end; + default: + ERR("Unknown interpreter register type (%d)", + (int) estack_bx_t); + ret = -EINVAL; + goto end; + } + break; + case REG_DOUBLE: + switch (estack_bx_t) { + case REG_S64: /* Fall-through */ + case REG_U64: + JUMP_TO(BYTECODE_OP_GE_S64_DOUBLE); + case REG_DOUBLE: + JUMP_TO(BYTECODE_OP_GE_DOUBLE); + case REG_STRING: /* Fall-through */ + case REG_STAR_GLOB_STRING: + ret = -EINVAL; + goto end; + default: + ERR("Unknown interpreter register type (%d)", + (int) estack_bx_t); + ret = -EINVAL; + goto end; + } + break; + case REG_STRING: + switch (estack_bx_t) { + case REG_S64: /* Fall-through */ + case REG_U64: /* Fall-through */ + case REG_DOUBLE: /* Fall-through */ + case REG_STAR_GLOB_STRING: + ret = -EINVAL; + goto end; + case REG_STRING: + JUMP_TO(BYTECODE_OP_GE_STRING); + default: + ERR("Unknown interpreter register type (%d)", + (int) estack_bx_t); + ret = -EINVAL; + goto end; + } + break; + default: + ERR("Unknown interpreter register type (%d)", + (int) estack_ax_t); + ret = -EINVAL; + goto end; + } + } + OP(BYTECODE_OP_LE): + { + /* Dynamic typing. */ + switch (estack_ax_t) { + case REG_S64: /* Fall-through */ + case REG_U64: + switch (estack_bx_t) { + case REG_S64: /* Fall-through */ + case REG_U64: + JUMP_TO(BYTECODE_OP_LE_S64); + case REG_DOUBLE: + JUMP_TO(BYTECODE_OP_LE_DOUBLE_S64); + case REG_STRING: /* Fall-through */ + case REG_STAR_GLOB_STRING: + ret = -EINVAL; + goto end; + default: + ERR("Unknown interpreter register type (%d)", + (int) estack_bx_t); + ret = -EINVAL; + goto end; + } + break; + case REG_DOUBLE: + switch (estack_bx_t) { + case REG_S64: /* Fall-through */ + case REG_U64: + JUMP_TO(BYTECODE_OP_LE_S64_DOUBLE); + case REG_DOUBLE: + JUMP_TO(BYTECODE_OP_LE_DOUBLE); + case REG_STRING: /* Fall-through */ + case REG_STAR_GLOB_STRING: + ret = -EINVAL; + goto end; + default: + ERR("Unknown interpreter register type (%d)", + (int) estack_bx_t); + ret = -EINVAL; + goto end; + } + break; + case REG_STRING: + switch (estack_bx_t) { + case REG_S64: /* Fall-through */ + case REG_U64: /* Fall-through */ + case REG_DOUBLE: /* Fall-through */ + case REG_STAR_GLOB_STRING: + ret = -EINVAL; + goto end; + case REG_STRING: + JUMP_TO(BYTECODE_OP_LE_STRING); + default: + ERR("Unknown interpreter register type (%d)", + (int) estack_bx_t); + ret = -EINVAL; + goto end; + } + break; + default: + ERR("Unknown interpreter register type (%d)", + (int) estack_ax_t); + ret = -EINVAL; + goto end; + } + } + + OP(BYTECODE_OP_EQ_STRING): + { + int res; + + res = (stack_strcmp(stack, top, "==") == 0); + estack_pop(stack, top, ax, bx, ax_t, bx_t); + estack_ax_v = res; + estack_ax_t = REG_S64; + next_pc += sizeof(struct binary_op); + PO; + } + OP(BYTECODE_OP_NE_STRING): + { + int res; + + res = (stack_strcmp(stack, top, "!=") != 0); + estack_pop(stack, top, ax, bx, ax_t, bx_t); + estack_ax_v = res; + estack_ax_t = REG_S64; + next_pc += sizeof(struct binary_op); + PO; + } + OP(BYTECODE_OP_GT_STRING): + { + int res; + + res = (stack_strcmp(stack, top, ">") > 0); + estack_pop(stack, top, ax, bx, ax_t, bx_t); + estack_ax_v = res; + estack_ax_t = REG_S64; + next_pc += sizeof(struct binary_op); + PO; + } + OP(BYTECODE_OP_LT_STRING): + { + int res; + + res = (stack_strcmp(stack, top, "<") < 0); + estack_pop(stack, top, ax, bx, ax_t, bx_t); + estack_ax_v = res; + estack_ax_t = REG_S64; + next_pc += sizeof(struct binary_op); + PO; + } + OP(BYTECODE_OP_GE_STRING): + { + int res; + + res = (stack_strcmp(stack, top, ">=") >= 0); + estack_pop(stack, top, ax, bx, ax_t, bx_t); + estack_ax_v = res; + estack_ax_t = REG_S64; + next_pc += sizeof(struct binary_op); + PO; + } + OP(BYTECODE_OP_LE_STRING): + { + int res; + + res = (stack_strcmp(stack, top, "<=") <= 0); + estack_pop(stack, top, ax, bx, ax_t, bx_t); + estack_ax_v = res; + estack_ax_t = REG_S64; + next_pc += sizeof(struct binary_op); + PO; + } + + OP(BYTECODE_OP_EQ_STAR_GLOB_STRING): + { + int res; + + res = (stack_star_glob_match(stack, top, "==") == 0); + estack_pop(stack, top, ax, bx, ax_t, bx_t); + estack_ax_v = res; + estack_ax_t = REG_S64; + next_pc += sizeof(struct binary_op); + PO; + } + OP(BYTECODE_OP_NE_STAR_GLOB_STRING): + { + int res; + + res = (stack_star_glob_match(stack, top, "!=") != 0); + estack_pop(stack, top, ax, bx, ax_t, bx_t); + estack_ax_v = res; + estack_ax_t = REG_S64; + next_pc += sizeof(struct binary_op); + PO; + } + + OP(BYTECODE_OP_EQ_S64): + { + int res; + + res = (estack_bx_v == estack_ax_v); + estack_pop(stack, top, ax, bx, ax_t, bx_t); + estack_ax_v = res; + estack_ax_t = REG_S64; + next_pc += sizeof(struct binary_op); + PO; + } + OP(BYTECODE_OP_NE_S64): + { + int res; + + res = (estack_bx_v != estack_ax_v); + estack_pop(stack, top, ax, bx, ax_t, bx_t); + estack_ax_v = res; + estack_ax_t = REG_S64; + next_pc += sizeof(struct binary_op); + PO; + } + OP(BYTECODE_OP_GT_S64): + { + int res; + + res = (estack_bx_v > estack_ax_v); + estack_pop(stack, top, ax, bx, ax_t, bx_t); + estack_ax_v = res; + estack_ax_t = REG_S64; + next_pc += sizeof(struct binary_op); + PO; + } + OP(BYTECODE_OP_LT_S64): + { + int res; + + res = (estack_bx_v < estack_ax_v); + estack_pop(stack, top, ax, bx, ax_t, bx_t); + estack_ax_v = res; + estack_ax_t = REG_S64; + next_pc += sizeof(struct binary_op); + PO; + } + OP(BYTECODE_OP_GE_S64): + { + int res; + + res = (estack_bx_v >= estack_ax_v); + estack_pop(stack, top, ax, bx, ax_t, bx_t); + estack_ax_v = res; + estack_ax_t = REG_S64; + next_pc += sizeof(struct binary_op); + PO; + } + OP(BYTECODE_OP_LE_S64): + { + int res; + + res = (estack_bx_v <= estack_ax_v); + estack_pop(stack, top, ax, bx, ax_t, bx_t); + estack_ax_v = res; + estack_ax_t = REG_S64; + next_pc += sizeof(struct binary_op); + PO; + } + + OP(BYTECODE_OP_EQ_DOUBLE): + { + int res; + + res = (estack_bx(stack, top)->u.d == estack_ax(stack, top)->u.d); + estack_pop(stack, top, ax, bx, ax_t, bx_t); + estack_ax_v = res; + estack_ax_t = REG_S64; + next_pc += sizeof(struct binary_op); + PO; + } + OP(BYTECODE_OP_NE_DOUBLE): + { + int res; + + res = (estack_bx(stack, top)->u.d != estack_ax(stack, top)->u.d); + estack_pop(stack, top, ax, bx, ax_t, bx_t); + estack_ax_v = res; + estack_ax_t = REG_S64; + next_pc += sizeof(struct binary_op); + PO; + } + OP(BYTECODE_OP_GT_DOUBLE): + { + int res; + + res = (estack_bx(stack, top)->u.d > estack_ax(stack, top)->u.d); + estack_pop(stack, top, ax, bx, ax_t, bx_t); + estack_ax_v = res; + estack_ax_t = REG_S64; + next_pc += sizeof(struct binary_op); + PO; + } + OP(BYTECODE_OP_LT_DOUBLE): + { + int res; + + res = (estack_bx(stack, top)->u.d < estack_ax(stack, top)->u.d); + estack_pop(stack, top, ax, bx, ax_t, bx_t); + estack_ax_v = res; + estack_ax_t = REG_S64; + next_pc += sizeof(struct binary_op); + PO; + } + OP(BYTECODE_OP_GE_DOUBLE): + { + int res; + + res = (estack_bx(stack, top)->u.d >= estack_ax(stack, top)->u.d); + estack_pop(stack, top, ax, bx, ax_t, bx_t); + estack_ax_v = res; + estack_ax_t = REG_S64; + next_pc += sizeof(struct binary_op); + PO; + } + OP(BYTECODE_OP_LE_DOUBLE): + { + int res; + + res = (estack_bx(stack, top)->u.d <= estack_ax(stack, top)->u.d); + estack_pop(stack, top, ax, bx, ax_t, bx_t); + estack_ax_v = res; + estack_ax_t = REG_S64; + next_pc += sizeof(struct binary_op); + PO; + } + + /* Mixed S64-double binary comparators */ + OP(BYTECODE_OP_EQ_DOUBLE_S64): + { + int res; + + res = (estack_bx(stack, top)->u.d == estack_ax_v); + estack_pop(stack, top, ax, bx, ax_t, bx_t); + estack_ax_v = res; + estack_ax_t = REG_S64; + next_pc += sizeof(struct binary_op); + PO; + } + OP(BYTECODE_OP_NE_DOUBLE_S64): + { + int res; + + res = (estack_bx(stack, top)->u.d != estack_ax_v); + estack_pop(stack, top, ax, bx, ax_t, bx_t); + estack_ax_v = res; + estack_ax_t = REG_S64; + next_pc += sizeof(struct binary_op); + PO; + } + OP(BYTECODE_OP_GT_DOUBLE_S64): + { + int res; + + res = (estack_bx(stack, top)->u.d > estack_ax_v); + estack_pop(stack, top, ax, bx, ax_t, bx_t); + estack_ax_v = res; + estack_ax_t = REG_S64; + next_pc += sizeof(struct binary_op); + PO; + } + OP(BYTECODE_OP_LT_DOUBLE_S64): + { + int res; + + res = (estack_bx(stack, top)->u.d < estack_ax_v); + estack_pop(stack, top, ax, bx, ax_t, bx_t); + estack_ax_v = res; + estack_ax_t = REG_S64; + next_pc += sizeof(struct binary_op); + PO; + } + OP(BYTECODE_OP_GE_DOUBLE_S64): + { + int res; + + res = (estack_bx(stack, top)->u.d >= estack_ax_v); + estack_pop(stack, top, ax, bx, ax_t, bx_t); + estack_ax_v = res; + estack_ax_t = REG_S64; + next_pc += sizeof(struct binary_op); + PO; + } + OP(BYTECODE_OP_LE_DOUBLE_S64): + { + int res; + + res = (estack_bx(stack, top)->u.d <= estack_ax_v); + estack_pop(stack, top, ax, bx, ax_t, bx_t); + estack_ax_v = res; + estack_ax_t = REG_S64; + next_pc += sizeof(struct binary_op); + PO; + } + + OP(BYTECODE_OP_EQ_S64_DOUBLE): + { + int res; + + res = (estack_bx_v == estack_ax(stack, top)->u.d); + estack_pop(stack, top, ax, bx, ax_t, bx_t); + estack_ax_v = res; + estack_ax_t = REG_S64; + next_pc += sizeof(struct binary_op); + PO; + } + OP(BYTECODE_OP_NE_S64_DOUBLE): + { + int res; + + res = (estack_bx_v != estack_ax(stack, top)->u.d); + estack_pop(stack, top, ax, bx, ax_t, bx_t); + estack_ax_v = res; + estack_ax_t = REG_S64; + next_pc += sizeof(struct binary_op); + PO; + } + OP(BYTECODE_OP_GT_S64_DOUBLE): + { + int res; + + res = (estack_bx_v > estack_ax(stack, top)->u.d); + estack_pop(stack, top, ax, bx, ax_t, bx_t); + estack_ax_v = res; + estack_ax_t = REG_S64; + next_pc += sizeof(struct binary_op); + PO; + } + OP(BYTECODE_OP_LT_S64_DOUBLE): + { + int res; + + res = (estack_bx_v < estack_ax(stack, top)->u.d); + estack_pop(stack, top, ax, bx, ax_t, bx_t); + estack_ax_v = res; + estack_ax_t = REG_S64; + next_pc += sizeof(struct binary_op); + PO; + } + OP(BYTECODE_OP_GE_S64_DOUBLE): + { + int res; + + res = (estack_bx_v >= estack_ax(stack, top)->u.d); + estack_pop(stack, top, ax, bx, ax_t, bx_t); + estack_ax_v = res; + estack_ax_t = REG_S64; + next_pc += sizeof(struct binary_op); + PO; + } + OP(BYTECODE_OP_LE_S64_DOUBLE): + { + int res; + + res = (estack_bx_v <= estack_ax(stack, top)->u.d); + estack_pop(stack, top, ax, bx, ax_t, bx_t); + estack_ax_v = res; + estack_ax_t = REG_S64; + next_pc += sizeof(struct binary_op); + PO; + } + OP(BYTECODE_OP_BIT_RSHIFT): + { + int64_t res; + + if (!IS_INTEGER_REGISTER(estack_ax_t) || !IS_INTEGER_REGISTER(estack_bx_t)) { + ret = -EINVAL; + goto end; + } + + /* Catch undefined behavior. */ + if (caa_unlikely(estack_ax_v < 0 || estack_ax_v >= 64)) { + ret = -EINVAL; + goto end; + } + res = ((uint64_t) estack_bx_v >> (uint32_t) estack_ax_v); + estack_pop(stack, top, ax, bx, ax_t, bx_t); + estack_ax_v = res; + estack_ax_t = REG_U64; + next_pc += sizeof(struct binary_op); + PO; + } + OP(BYTECODE_OP_BIT_LSHIFT): + { + int64_t res; + + if (!IS_INTEGER_REGISTER(estack_ax_t) || !IS_INTEGER_REGISTER(estack_bx_t)) { + ret = -EINVAL; + goto end; + } + + /* Catch undefined behavior. */ + if (caa_unlikely(estack_ax_v < 0 || estack_ax_v >= 64)) { + ret = -EINVAL; + goto end; + } + res = ((uint64_t) estack_bx_v << (uint32_t) estack_ax_v); + estack_pop(stack, top, ax, bx, ax_t, bx_t); + estack_ax_v = res; + estack_ax_t = REG_U64; + next_pc += sizeof(struct binary_op); + PO; + } + OP(BYTECODE_OP_BIT_AND): + { + int64_t res; + + if (!IS_INTEGER_REGISTER(estack_ax_t) || !IS_INTEGER_REGISTER(estack_bx_t)) { + ret = -EINVAL; + goto end; + } + + res = ((uint64_t) estack_bx_v & (uint64_t) estack_ax_v); + estack_pop(stack, top, ax, bx, ax_t, bx_t); + estack_ax_v = res; + estack_ax_t = REG_U64; + next_pc += sizeof(struct binary_op); + PO; + } + OP(BYTECODE_OP_BIT_OR): + { + int64_t res; + + if (!IS_INTEGER_REGISTER(estack_ax_t) || !IS_INTEGER_REGISTER(estack_bx_t)) { + ret = -EINVAL; + goto end; + } + + res = ((uint64_t) estack_bx_v | (uint64_t) estack_ax_v); + estack_pop(stack, top, ax, bx, ax_t, bx_t); + estack_ax_v = res; + estack_ax_t = REG_U64; + next_pc += sizeof(struct binary_op); + PO; + } + OP(BYTECODE_OP_BIT_XOR): + { + int64_t res; + + if (!IS_INTEGER_REGISTER(estack_ax_t) || !IS_INTEGER_REGISTER(estack_bx_t)) { + ret = -EINVAL; + goto end; + } + + res = ((uint64_t) estack_bx_v ^ (uint64_t) estack_ax_v); + estack_pop(stack, top, ax, bx, ax_t, bx_t); + estack_ax_v = res; + estack_ax_t = REG_U64; + next_pc += sizeof(struct binary_op); + PO; + } + + /* unary */ + OP(BYTECODE_OP_UNARY_PLUS): + { + /* Dynamic typing. */ + switch (estack_ax_t) { + case REG_S64: /* Fall-through. */ + case REG_U64: + JUMP_TO(BYTECODE_OP_UNARY_PLUS_S64); + case REG_DOUBLE: + JUMP_TO(BYTECODE_OP_UNARY_PLUS_DOUBLE); + case REG_STRING: /* Fall-through */ + case REG_STAR_GLOB_STRING: + ret = -EINVAL; + goto end; + default: + ERR("Unknown interpreter register type (%d)", + (int) estack_ax_t); + ret = -EINVAL; + goto end; + } + } + OP(BYTECODE_OP_UNARY_MINUS): + { + /* Dynamic typing. */ + switch (estack_ax_t) { + case REG_S64: /* Fall-through. */ + case REG_U64: + JUMP_TO(BYTECODE_OP_UNARY_MINUS_S64); + case REG_DOUBLE: + JUMP_TO(BYTECODE_OP_UNARY_MINUS_DOUBLE); + case REG_STRING: /* Fall-through */ + case REG_STAR_GLOB_STRING: + ret = -EINVAL; + goto end; + default: + ERR("Unknown interpreter register type (%d)", + (int) estack_ax_t); + ret = -EINVAL; + goto end; + } + } + OP(BYTECODE_OP_UNARY_NOT): + { + /* Dynamic typing. */ + switch (estack_ax_t) { + case REG_S64: /* Fall-through. */ + case REG_U64: + JUMP_TO(BYTECODE_OP_UNARY_NOT_S64); + case REG_DOUBLE: + JUMP_TO(BYTECODE_OP_UNARY_NOT_DOUBLE); + case REG_STRING: /* Fall-through */ + case REG_STAR_GLOB_STRING: + ret = -EINVAL; + goto end; + default: + ERR("Unknown interpreter register type (%d)", + (int) estack_ax_t); + ret = -EINVAL; + goto end; + } + next_pc += sizeof(struct unary_op); + PO; + } + + OP(BYTECODE_OP_UNARY_BIT_NOT): + { + /* Dynamic typing. */ + if (!IS_INTEGER_REGISTER(estack_ax_t)) { + ret = -EINVAL; + goto end; + } + + estack_ax_v = ~(uint64_t) estack_ax_v; + estack_ax_t = REG_U64; + next_pc += sizeof(struct unary_op); + PO; + } + + OP(BYTECODE_OP_UNARY_PLUS_S64): + OP(BYTECODE_OP_UNARY_PLUS_DOUBLE): + { + next_pc += sizeof(struct unary_op); + PO; + } + OP(BYTECODE_OP_UNARY_MINUS_S64): + { + estack_ax_v = -estack_ax_v; + next_pc += sizeof(struct unary_op); + PO; + } + OP(BYTECODE_OP_UNARY_MINUS_DOUBLE): + { + estack_ax(stack, top)->u.d = -estack_ax(stack, top)->u.d; + next_pc += sizeof(struct unary_op); + PO; + } + OP(BYTECODE_OP_UNARY_NOT_S64): + { + estack_ax_v = !estack_ax_v; + estack_ax_t = REG_S64; + next_pc += sizeof(struct unary_op); + PO; + } + OP(BYTECODE_OP_UNARY_NOT_DOUBLE): + { + estack_ax_v = !estack_ax(stack, top)->u.d; + estack_ax_t = REG_S64; + next_pc += sizeof(struct unary_op); + PO; + } + + /* logical */ + OP(BYTECODE_OP_AND): + { + struct logical_op *insn = (struct logical_op *) pc; + + if (estack_ax_t != REG_S64 && estack_ax_t != REG_U64) { + ret = -EINVAL; + goto end; + } + /* If AX is 0, skip and evaluate to 0 */ + if (unlikely(estack_ax_v == 0)) { + dbg_printf("Jumping to bytecode offset %u\n", + (unsigned int) insn->skip_offset); + next_pc = start_pc + insn->skip_offset; + } else { + /* Pop 1 when jump not taken */ + estack_pop(stack, top, ax, bx, ax_t, bx_t); + next_pc += sizeof(struct logical_op); + } + PO; + } + OP(BYTECODE_OP_OR): + { + struct logical_op *insn = (struct logical_op *) pc; + + if (estack_ax_t != REG_S64 && estack_ax_t != REG_U64) { + ret = -EINVAL; + goto end; + } + /* If AX is nonzero, skip and evaluate to 1 */ + if (unlikely(estack_ax_v != 0)) { + estack_ax_v = 1; + dbg_printf("Jumping to bytecode offset %u\n", + (unsigned int) insn->skip_offset); + next_pc = start_pc + insn->skip_offset; + } else { + /* Pop 1 when jump not taken */ + estack_pop(stack, top, ax, bx, ax_t, bx_t); + next_pc += sizeof(struct logical_op); + } + PO; + } + + + /* load field ref */ + OP(BYTECODE_OP_LOAD_FIELD_REF_STRING): + { + struct load_op *insn = (struct load_op *) pc; + struct field_ref *ref = (struct field_ref *) insn->data; + + dbg_printf("load field ref offset %u type string\n", + ref->offset); + estack_push(stack, top, ax, bx, ax_t, bx_t); + estack_ax(stack, top)->u.s.str = + *(const char * const *) &interpreter_stack_data[ref->offset]; + if (unlikely(!estack_ax(stack, top)->u.s.str)) { + dbg_printf("Interpreter warning: loading a NULL string.\n"); + ret = -EINVAL; + goto end; + } + estack_ax(stack, top)->u.s.seq_len = SIZE_MAX; + estack_ax(stack, top)->u.s.literal_type = + ESTACK_STRING_LITERAL_TYPE_NONE; + estack_ax_t = REG_STRING; + dbg_printf("ref load string %s\n", estack_ax(stack, top)->u.s.str); + next_pc += sizeof(struct load_op) + sizeof(struct field_ref); + PO; + } + + OP(BYTECODE_OP_LOAD_FIELD_REF_SEQUENCE): + { + struct load_op *insn = (struct load_op *) pc; + struct field_ref *ref = (struct field_ref *) insn->data; + + dbg_printf("load field ref offset %u type sequence\n", + ref->offset); + estack_push(stack, top, ax, bx, ax_t, bx_t); + estack_ax(stack, top)->u.s.seq_len = + *(unsigned long *) &interpreter_stack_data[ref->offset]; + estack_ax(stack, top)->u.s.str = + *(const char **) (&interpreter_stack_data[ref->offset + + sizeof(unsigned long)]); + estack_ax_t = REG_STRING; + if (unlikely(!estack_ax(stack, top)->u.s.str)) { + dbg_printf("Interpreter warning: loading a NULL sequence.\n"); + ret = -EINVAL; + goto end; + } + estack_ax(stack, top)->u.s.literal_type = + ESTACK_STRING_LITERAL_TYPE_NONE; + next_pc += sizeof(struct load_op) + sizeof(struct field_ref); + PO; + } + + OP(BYTECODE_OP_LOAD_FIELD_REF_S64): + { + struct load_op *insn = (struct load_op *) pc; + struct field_ref *ref = (struct field_ref *) insn->data; + + dbg_printf("load field ref offset %u type s64\n", + ref->offset); + estack_push(stack, top, ax, bx, ax_t, bx_t); + estack_ax_v = + ((struct literal_numeric *) &interpreter_stack_data[ref->offset])->v; + estack_ax_t = REG_S64; + dbg_printf("ref load s64 %" PRIi64 "\n", estack_ax_v); + next_pc += sizeof(struct load_op) + sizeof(struct field_ref); + PO; + } + + OP(BYTECODE_OP_LOAD_FIELD_REF_DOUBLE): + { + struct load_op *insn = (struct load_op *) pc; + struct field_ref *ref = (struct field_ref *) insn->data; + + dbg_printf("load field ref offset %u type double\n", + ref->offset); + estack_push(stack, top, ax, bx, ax_t, bx_t); + memcpy(&estack_ax(stack, top)->u.d, &interpreter_stack_data[ref->offset], + sizeof(struct literal_double)); + estack_ax_t = REG_DOUBLE; + dbg_printf("ref load double %g\n", estack_ax(stack, top)->u.d); + next_pc += sizeof(struct load_op) + sizeof(struct field_ref); + PO; + } + + /* load from immediate operand */ + OP(BYTECODE_OP_LOAD_STRING): + { + struct load_op *insn = (struct load_op *) pc; + + dbg_printf("load string %s\n", insn->data); + estack_push(stack, top, ax, bx, ax_t, bx_t); + estack_ax(stack, top)->u.s.str = insn->data; + estack_ax(stack, top)->u.s.seq_len = SIZE_MAX; + estack_ax(stack, top)->u.s.literal_type = + ESTACK_STRING_LITERAL_TYPE_PLAIN; + estack_ax_t = REG_STRING; + next_pc += sizeof(struct load_op) + strlen(insn->data) + 1; + PO; + } + + OP(BYTECODE_OP_LOAD_STAR_GLOB_STRING): + { + struct load_op *insn = (struct load_op *) pc; + + dbg_printf("load globbing pattern %s\n", insn->data); + estack_push(stack, top, ax, bx, ax_t, bx_t); + estack_ax(stack, top)->u.s.str = insn->data; + estack_ax(stack, top)->u.s.seq_len = SIZE_MAX; + estack_ax(stack, top)->u.s.literal_type = + ESTACK_STRING_LITERAL_TYPE_STAR_GLOB; + estack_ax_t = REG_STAR_GLOB_STRING; + next_pc += sizeof(struct load_op) + strlen(insn->data) + 1; + PO; + } + + OP(BYTECODE_OP_LOAD_S64): + { + struct load_op *insn = (struct load_op *) pc; + + estack_push(stack, top, ax, bx, ax_t, bx_t); + estack_ax_v = ((struct literal_numeric *) insn->data)->v; + estack_ax_t = REG_S64; + dbg_printf("load s64 %" PRIi64 "\n", estack_ax_v); + next_pc += sizeof(struct load_op) + + sizeof(struct literal_numeric); + PO; + } + + OP(BYTECODE_OP_LOAD_DOUBLE): + { + struct load_op *insn = (struct load_op *) pc; + + estack_push(stack, top, ax, bx, ax_t, bx_t); + memcpy(&estack_ax(stack, top)->u.d, insn->data, + sizeof(struct literal_double)); + estack_ax_t = REG_DOUBLE; + dbg_printf("load double %g\n", estack_ax(stack, top)->u.d); + next_pc += sizeof(struct load_op) + + sizeof(struct literal_double); + PO; + } + + /* cast */ + OP(BYTECODE_OP_CAST_TO_S64): + { + /* Dynamic typing. */ + switch (estack_ax_t) { + case REG_S64: + JUMP_TO(BYTECODE_OP_CAST_NOP); + case REG_DOUBLE: + JUMP_TO(BYTECODE_OP_CAST_DOUBLE_TO_S64); + case REG_U64: + estack_ax_t = REG_S64; + next_pc += sizeof(struct cast_op); + case REG_STRING: /* Fall-through */ + case REG_STAR_GLOB_STRING: + ret = -EINVAL; + goto end; + default: + ERR("Unknown interpreter register type (%d)", + (int) estack_ax_t); + ret = -EINVAL; + goto end; + } + } + + OP(BYTECODE_OP_CAST_DOUBLE_TO_S64): + { + estack_ax_v = (int64_t) estack_ax(stack, top)->u.d; + estack_ax_t = REG_S64; + next_pc += sizeof(struct cast_op); + PO; + } + + OP(BYTECODE_OP_CAST_NOP): + { + next_pc += sizeof(struct cast_op); + PO; + } + + /* get context ref */ + OP(BYTECODE_OP_GET_CONTEXT_REF): + { + struct load_op *insn = (struct load_op *) pc; + struct field_ref *ref = (struct field_ref *) insn->data; + struct lttng_ctx_field *ctx_field; + struct lttng_ctx_value v; + + dbg_printf("get context ref offset %u type dynamic\n", + ref->offset); + ctx_field = &ctx->fields[ref->offset]; + ctx_field->get_value(ctx_field, &v); + estack_push(stack, top, ax, bx, ax_t, bx_t); + switch (v.sel) { + case LTTNG_UST_DYNAMIC_TYPE_NONE: + ret = -EINVAL; + goto end; + case LTTNG_UST_DYNAMIC_TYPE_S64: + estack_ax_v = v.u.s64; + estack_ax_t = REG_S64; + dbg_printf("ref get context dynamic s64 %" PRIi64 "\n", estack_ax_v); + break; + case LTTNG_UST_DYNAMIC_TYPE_DOUBLE: + estack_ax(stack, top)->u.d = v.u.d; + estack_ax_t = REG_DOUBLE; + dbg_printf("ref get context dynamic double %g\n", estack_ax(stack, top)->u.d); + break; + case LTTNG_UST_DYNAMIC_TYPE_STRING: + estack_ax(stack, top)->u.s.str = v.u.str; + if (unlikely(!estack_ax(stack, top)->u.s.str)) { + dbg_printf("Interpreter warning: loading a NULL string.\n"); + ret = -EINVAL; + goto end; + } + estack_ax(stack, top)->u.s.seq_len = SIZE_MAX; + estack_ax(stack, top)->u.s.literal_type = + ESTACK_STRING_LITERAL_TYPE_NONE; + dbg_printf("ref get context dynamic string %s\n", estack_ax(stack, top)->u.s.str); + estack_ax_t = REG_STRING; + break; + default: + dbg_printf("Interpreter warning: unknown dynamic type (%d).\n", (int) v.sel); + ret = -EINVAL; + goto end; + } + next_pc += sizeof(struct load_op) + sizeof(struct field_ref); + PO; + } + + OP(BYTECODE_OP_GET_CONTEXT_REF_STRING): + { + struct load_op *insn = (struct load_op *) pc; + struct field_ref *ref = (struct field_ref *) insn->data; + struct lttng_ctx_field *ctx_field; + struct lttng_ctx_value v; + + dbg_printf("get context ref offset %u type string\n", + ref->offset); + ctx_field = &ctx->fields[ref->offset]; + ctx_field->get_value(ctx_field, &v); + estack_push(stack, top, ax, bx, ax_t, bx_t); + estack_ax(stack, top)->u.s.str = v.u.str; + if (unlikely(!estack_ax(stack, top)->u.s.str)) { + dbg_printf("Interpreter warning: loading a NULL string.\n"); + ret = -EINVAL; + goto end; + } + estack_ax(stack, top)->u.s.seq_len = SIZE_MAX; + estack_ax(stack, top)->u.s.literal_type = + ESTACK_STRING_LITERAL_TYPE_NONE; + estack_ax_t = REG_STRING; + dbg_printf("ref get context string %s\n", estack_ax(stack, top)->u.s.str); + next_pc += sizeof(struct load_op) + sizeof(struct field_ref); + PO; + } + + OP(BYTECODE_OP_GET_CONTEXT_REF_S64): + { + struct load_op *insn = (struct load_op *) pc; + struct field_ref *ref = (struct field_ref *) insn->data; + struct lttng_ctx_field *ctx_field; + struct lttng_ctx_value v; + + dbg_printf("get context ref offset %u type s64\n", + ref->offset); + ctx_field = &ctx->fields[ref->offset]; + ctx_field->get_value(ctx_field, &v); + estack_push(stack, top, ax, bx, ax_t, bx_t); + estack_ax_v = v.u.s64; + estack_ax_t = REG_S64; + dbg_printf("ref get context s64 %" PRIi64 "\n", estack_ax_v); + next_pc += sizeof(struct load_op) + sizeof(struct field_ref); + PO; + } + + OP(BYTECODE_OP_GET_CONTEXT_REF_DOUBLE): + { + struct load_op *insn = (struct load_op *) pc; + struct field_ref *ref = (struct field_ref *) insn->data; + struct lttng_ctx_field *ctx_field; + struct lttng_ctx_value v; + + dbg_printf("get context ref offset %u type double\n", + ref->offset); + ctx_field = &ctx->fields[ref->offset]; + ctx_field->get_value(ctx_field, &v); + estack_push(stack, top, ax, bx, ax_t, bx_t); + memcpy(&estack_ax(stack, top)->u.d, &v.u.d, sizeof(struct literal_double)); + estack_ax_t = REG_DOUBLE; + dbg_printf("ref get context double %g\n", estack_ax(stack, top)->u.d); + next_pc += sizeof(struct load_op) + sizeof(struct field_ref); + PO; + } + + OP(BYTECODE_OP_GET_CONTEXT_ROOT): + { + dbg_printf("op get context root\n"); + estack_push(stack, top, ax, bx, ax_t, bx_t); + estack_ax(stack, top)->u.ptr.type = LOAD_ROOT_CONTEXT; + /* "field" only needed for variants. */ + estack_ax(stack, top)->u.ptr.field = NULL; + estack_ax_t = REG_PTR; + next_pc += sizeof(struct load_op); + PO; + } + + OP(BYTECODE_OP_GET_APP_CONTEXT_ROOT): + { + dbg_printf("op get app context root\n"); + estack_push(stack, top, ax, bx, ax_t, bx_t); + estack_ax(stack, top)->u.ptr.type = LOAD_ROOT_APP_CONTEXT; + /* "field" only needed for variants. */ + estack_ax(stack, top)->u.ptr.field = NULL; + estack_ax_t = REG_PTR; + next_pc += sizeof(struct load_op); + PO; + } + + OP(BYTECODE_OP_GET_PAYLOAD_ROOT): + { + dbg_printf("op get app payload root\n"); + estack_push(stack, top, ax, bx, ax_t, bx_t); + estack_ax(stack, top)->u.ptr.type = LOAD_ROOT_PAYLOAD; + estack_ax(stack, top)->u.ptr.ptr = interpreter_stack_data; + /* "field" only needed for variants. */ + estack_ax(stack, top)->u.ptr.field = NULL; + estack_ax_t = REG_PTR; + next_pc += sizeof(struct load_op); + PO; + } + + OP(BYTECODE_OP_GET_SYMBOL): + { + dbg_printf("op get symbol\n"); + switch (estack_ax(stack, top)->u.ptr.type) { + case LOAD_OBJECT: + ERR("Nested fields not implemented yet."); + ret = -EINVAL; + goto end; + case LOAD_ROOT_CONTEXT: + case LOAD_ROOT_APP_CONTEXT: + case LOAD_ROOT_PAYLOAD: + /* + * symbol lookup is performed by + * specialization. + */ + ret = -EINVAL; + goto end; + } + next_pc += sizeof(struct load_op) + sizeof(struct get_symbol); + PO; + } + + OP(BYTECODE_OP_GET_SYMBOL_FIELD): + { + /* + * Used for first variant encountered in a + * traversal. Variants are not implemented yet. + */ + ret = -EINVAL; + goto end; + } + + OP(BYTECODE_OP_GET_INDEX_U16): + { + struct load_op *insn = (struct load_op *) pc; + struct get_index_u16 *index = (struct get_index_u16 *) insn->data; + + dbg_printf("op get index u16\n"); + ret = dynamic_get_index(ctx, bytecode, index->index, estack_ax(stack, top)); + if (ret) + goto end; + estack_ax_v = estack_ax(stack, top)->u.v; + estack_ax_t = estack_ax(stack, top)->type; + next_pc += sizeof(struct load_op) + sizeof(struct get_index_u16); + PO; + } + + OP(BYTECODE_OP_GET_INDEX_U64): + { + struct load_op *insn = (struct load_op *) pc; + struct get_index_u64 *index = (struct get_index_u64 *) insn->data; + + dbg_printf("op get index u64\n"); + ret = dynamic_get_index(ctx, bytecode, index->index, estack_ax(stack, top)); + if (ret) + goto end; + estack_ax_v = estack_ax(stack, top)->u.v; + estack_ax_t = estack_ax(stack, top)->type; + next_pc += sizeof(struct load_op) + sizeof(struct get_index_u64); + PO; + } + + OP(BYTECODE_OP_LOAD_FIELD): + { + dbg_printf("op load field\n"); + ret = dynamic_load_field(estack_ax(stack, top)); + if (ret) + goto end; + estack_ax_v = estack_ax(stack, top)->u.v; + estack_ax_t = estack_ax(stack, top)->type; + next_pc += sizeof(struct load_op); + PO; + } + + OP(BYTECODE_OP_LOAD_FIELD_S8): + { + dbg_printf("op load field s8\n"); + + estack_ax_v = *(int8_t *) estack_ax(stack, top)->u.ptr.ptr; + estack_ax_t = REG_S64; + next_pc += sizeof(struct load_op); + PO; + } + OP(BYTECODE_OP_LOAD_FIELD_S16): + { + dbg_printf("op load field s16\n"); + + estack_ax_v = *(int16_t *) estack_ax(stack, top)->u.ptr.ptr; + estack_ax_t = REG_S64; + next_pc += sizeof(struct load_op); + PO; + } + OP(BYTECODE_OP_LOAD_FIELD_S32): + { + dbg_printf("op load field s32\n"); + + estack_ax_v = *(int32_t *) estack_ax(stack, top)->u.ptr.ptr; + estack_ax_t = REG_S64; + next_pc += sizeof(struct load_op); + PO; + } + OP(BYTECODE_OP_LOAD_FIELD_S64): + { + dbg_printf("op load field s64\n"); + + estack_ax_v = *(int64_t *) estack_ax(stack, top)->u.ptr.ptr; + estack_ax_t = REG_S64; + next_pc += sizeof(struct load_op); + PO; + } + OP(BYTECODE_OP_LOAD_FIELD_U8): + { + dbg_printf("op load field u8\n"); + + estack_ax_v = *(uint8_t *) estack_ax(stack, top)->u.ptr.ptr; + estack_ax_t = REG_U64; + next_pc += sizeof(struct load_op); + PO; + } + OP(BYTECODE_OP_LOAD_FIELD_U16): + { + dbg_printf("op load field u16\n"); + + estack_ax_v = *(uint16_t *) estack_ax(stack, top)->u.ptr.ptr; + estack_ax_t = REG_U64; + next_pc += sizeof(struct load_op); + PO; + } + OP(BYTECODE_OP_LOAD_FIELD_U32): + { + dbg_printf("op load field u32\n"); + + estack_ax_v = *(uint32_t *) estack_ax(stack, top)->u.ptr.ptr; + estack_ax_t = REG_U64; + next_pc += sizeof(struct load_op); + PO; + } + OP(BYTECODE_OP_LOAD_FIELD_U64): + { + dbg_printf("op load field u64\n"); + + estack_ax_v = *(uint64_t *) estack_ax(stack, top)->u.ptr.ptr; + estack_ax_t = REG_U64; + next_pc += sizeof(struct load_op); + PO; + } + OP(BYTECODE_OP_LOAD_FIELD_DOUBLE): + { + dbg_printf("op load field double\n"); + + memcpy(&estack_ax(stack, top)->u.d, + estack_ax(stack, top)->u.ptr.ptr, + sizeof(struct literal_double)); + estack_ax(stack, top)->type = REG_DOUBLE; + next_pc += sizeof(struct load_op); + PO; + } + + OP(BYTECODE_OP_LOAD_FIELD_STRING): + { + const char *str; + + dbg_printf("op load field string\n"); + str = (const char *) estack_ax(stack, top)->u.ptr.ptr; + estack_ax(stack, top)->u.s.str = str; + if (unlikely(!estack_ax(stack, top)->u.s.str)) { + dbg_printf("Interpreter warning: loading a NULL string.\n"); + ret = -EINVAL; + goto end; + } + estack_ax(stack, top)->u.s.seq_len = SIZE_MAX; + estack_ax(stack, top)->u.s.literal_type = + ESTACK_STRING_LITERAL_TYPE_NONE; + estack_ax(stack, top)->type = REG_STRING; + next_pc += sizeof(struct load_op); + PO; + } + + OP(BYTECODE_OP_LOAD_FIELD_SEQUENCE): + { + const char *ptr; + + dbg_printf("op load field string sequence\n"); + ptr = estack_ax(stack, top)->u.ptr.ptr; + estack_ax(stack, top)->u.s.seq_len = *(unsigned long *) ptr; + estack_ax(stack, top)->u.s.str = *(const char **) (ptr + sizeof(unsigned long)); + estack_ax(stack, top)->type = REG_STRING; + if (unlikely(!estack_ax(stack, top)->u.s.str)) { + dbg_printf("Interpreter warning: loading a NULL sequence.\n"); + ret = -EINVAL; + goto end; + } + estack_ax(stack, top)->u.s.literal_type = + ESTACK_STRING_LITERAL_TYPE_NONE; + next_pc += sizeof(struct load_op); + PO; + } + + END_OP +end: + /* Return _DISCARD on error. */ + if (ret) + return LTTNG_INTERPRETER_DISCARD; + + if (output) { + return lttng_bytecode_interpret_format_output(estack_ax(stack, top), + output); + } + + return retval; +} + +uint64_t lttng_bytecode_filter_interpret(void *filter_data, + const char *filter_stack_data) +{ + return bytecode_interpret(filter_data, filter_stack_data, NULL); +} + +#undef START_OP +#undef OP +#undef PO +#undef END_OP diff --git a/liblttng-ust/lttng-bytecode-specialize.c b/liblttng-ust/lttng-bytecode-specialize.c new file mode 100644 index 00000000..55b2ebf2 --- /dev/null +++ b/liblttng-ust/lttng-bytecode-specialize.c @@ -0,0 +1,1528 @@ +/* + * lttng-bytecode-specialize.c + * + * LTTng UST bytecode specializer. + * + * Copyright (C) 2010-2016 Mathieu Desnoyers + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#define _LGPL_SOURCE +#include +#include + +#include "lttng-bytecode.h" +#include +#include "ust-events-internal.h" + +static int lttng_fls(int val) +{ + int r = 32; + unsigned int x = (unsigned int) val; + + if (!x) + return 0; + if (!(x & 0xFFFF0000U)) { + x <<= 16; + r -= 16; + } + if (!(x & 0xFF000000U)) { + x <<= 8; + r -= 8; + } + if (!(x & 0xF0000000U)) { + x <<= 4; + r -= 4; + } + if (!(x & 0xC0000000U)) { + x <<= 2; + r -= 2; + } + if (!(x & 0x80000000U)) { + r -= 1; + } + return r; +} + +static int get_count_order(unsigned int count) +{ + int order; + + order = lttng_fls(count) - 1; + if (count & (count - 1)) + order++; + return order; +} + +static ssize_t bytecode_reserve_data(struct bytecode_runtime *runtime, + size_t align, size_t len) +{ + ssize_t ret; + size_t padding = lttng_ust_offset_align(runtime->data_len, align); + size_t new_len = runtime->data_len + padding + len; + size_t new_alloc_len = new_len; + size_t old_alloc_len = runtime->data_alloc_len; + + if (new_len > BYTECODE_MAX_DATA_LEN) + return -EINVAL; + + if (new_alloc_len > old_alloc_len) { + char *newptr; + + new_alloc_len = + max_t(size_t, 1U << get_count_order(new_alloc_len), old_alloc_len << 1); + newptr = realloc(runtime->data, new_alloc_len); + if (!newptr) + return -ENOMEM; + runtime->data = newptr; + /* We zero directly the memory from start of allocation. */ + memset(&runtime->data[old_alloc_len], 0, new_alloc_len - old_alloc_len); + runtime->data_alloc_len = new_alloc_len; + } + runtime->data_len += padding; + ret = runtime->data_len; + runtime->data_len += len; + return ret; +} + +static ssize_t bytecode_push_data(struct bytecode_runtime *runtime, + const void *p, size_t align, size_t len) +{ + ssize_t offset; + + offset = bytecode_reserve_data(runtime, align, len); + if (offset < 0) + return -ENOMEM; + memcpy(&runtime->data[offset], p, len); + return offset; +} + +static int specialize_load_field(struct vstack_entry *stack_top, + struct load_op *insn) +{ + int ret; + + switch (stack_top->load.type) { + case LOAD_OBJECT: + break; + case LOAD_ROOT_CONTEXT: + case LOAD_ROOT_APP_CONTEXT: + case LOAD_ROOT_PAYLOAD: + default: + dbg_printf("Bytecode warning: cannot load root, missing field name.\n"); + ret = -EINVAL; + goto end; + } + switch (stack_top->load.object_type) { + case OBJECT_TYPE_S8: + dbg_printf("op load field s8\n"); + stack_top->type = REG_S64; + if (!stack_top->load.rev_bo) + insn->op = BYTECODE_OP_LOAD_FIELD_S8; + break; + case OBJECT_TYPE_S16: + dbg_printf("op load field s16\n"); + stack_top->type = REG_S64; + if (!stack_top->load.rev_bo) + insn->op = BYTECODE_OP_LOAD_FIELD_S16; + break; + case OBJECT_TYPE_S32: + dbg_printf("op load field s32\n"); + stack_top->type = REG_S64; + if (!stack_top->load.rev_bo) + insn->op = BYTECODE_OP_LOAD_FIELD_S32; + break; + case OBJECT_TYPE_S64: + dbg_printf("op load field s64\n"); + stack_top->type = REG_S64; + if (!stack_top->load.rev_bo) + insn->op = BYTECODE_OP_LOAD_FIELD_S64; + break; + case OBJECT_TYPE_U8: + dbg_printf("op load field u8\n"); + stack_top->type = REG_U64; + insn->op = BYTECODE_OP_LOAD_FIELD_U8; + break; + case OBJECT_TYPE_U16: + dbg_printf("op load field u16\n"); + stack_top->type = REG_U64; + if (!stack_top->load.rev_bo) + insn->op = BYTECODE_OP_LOAD_FIELD_U16; + break; + case OBJECT_TYPE_U32: + dbg_printf("op load field u32\n"); + stack_top->type = REG_U64; + if (!stack_top->load.rev_bo) + insn->op = BYTECODE_OP_LOAD_FIELD_U32; + break; + case OBJECT_TYPE_U64: + dbg_printf("op load field u64\n"); + stack_top->type = REG_U64; + if (!stack_top->load.rev_bo) + insn->op = BYTECODE_OP_LOAD_FIELD_U64; + break; + case OBJECT_TYPE_DOUBLE: + stack_top->type = REG_DOUBLE; + insn->op = BYTECODE_OP_LOAD_FIELD_DOUBLE; + break; + case OBJECT_TYPE_STRING: + dbg_printf("op load field string\n"); + stack_top->type = REG_STRING; + insn->op = BYTECODE_OP_LOAD_FIELD_STRING; + break; + case OBJECT_TYPE_STRING_SEQUENCE: + dbg_printf("op load field string sequence\n"); + stack_top->type = REG_STRING; + insn->op = BYTECODE_OP_LOAD_FIELD_SEQUENCE; + break; + case OBJECT_TYPE_DYNAMIC: + dbg_printf("op load field dynamic\n"); + stack_top->type = REG_UNKNOWN; + /* Don't specialize load op. */ + break; + case OBJECT_TYPE_SEQUENCE: + case OBJECT_TYPE_ARRAY: + case OBJECT_TYPE_STRUCT: + case OBJECT_TYPE_VARIANT: + ERR("Sequences, arrays, struct and variant cannot be loaded (nested types)."); + ret = -EINVAL; + goto end; + } + return 0; + +end: + return ret; +} + +static int specialize_get_index_object_type(enum object_type *otype, + int signedness, uint32_t elem_len) +{ + switch (elem_len) { + case 8: + if (signedness) + *otype = OBJECT_TYPE_S8; + else + *otype = OBJECT_TYPE_U8; + break; + case 16: + if (signedness) + *otype = OBJECT_TYPE_S16; + else + *otype = OBJECT_TYPE_U16; + break; + case 32: + if (signedness) + *otype = OBJECT_TYPE_S32; + else + *otype = OBJECT_TYPE_U32; + break; + case 64: + if (signedness) + *otype = OBJECT_TYPE_S64; + else + *otype = OBJECT_TYPE_U64; + break; + default: + return -EINVAL; + } + return 0; +} + +static int specialize_get_index(struct bytecode_runtime *runtime, + struct load_op *insn, uint64_t index, + struct vstack_entry *stack_top, + int idx_len) +{ + int ret; + struct bytecode_get_index_data gid; + ssize_t data_offset; + + memset(&gid, 0, sizeof(gid)); + switch (stack_top->load.type) { + case LOAD_OBJECT: + switch (stack_top->load.object_type) { + case OBJECT_TYPE_ARRAY: + { + const struct lttng_integer_type *integer_type; + const struct lttng_event_field *field; + uint32_t elem_len, num_elems; + int signedness; + + field = stack_top->load.field; + switch (field->type.atype) { + case atype_array: + integer_type = &field->type.u.legacy.array.elem_type.u.basic.integer; + num_elems = field->type.u.legacy.array.length; + break; + case atype_array_nestable: + if (field->type.u.array_nestable.elem_type->atype != atype_integer) { + ret = -EINVAL; + goto end; + } + integer_type = &field->type.u.array_nestable.elem_type->u.integer; + num_elems = field->type.u.array_nestable.length; + break; + default: + ret = -EINVAL; + goto end; + } + elem_len = integer_type->size; + signedness = integer_type->signedness; + if (index >= num_elems) { + ret = -EINVAL; + goto end; + } + ret = specialize_get_index_object_type(&stack_top->load.object_type, + signedness, elem_len); + if (ret) + goto end; + gid.offset = index * (elem_len / CHAR_BIT); + gid.array_len = num_elems * (elem_len / CHAR_BIT); + gid.elem.type = stack_top->load.object_type; + gid.elem.len = elem_len; + if (integer_type->reverse_byte_order) + gid.elem.rev_bo = true; + stack_top->load.rev_bo = gid.elem.rev_bo; + break; + } + case OBJECT_TYPE_SEQUENCE: + { + const struct lttng_integer_type *integer_type; + const struct lttng_event_field *field; + uint32_t elem_len; + int signedness; + + field = stack_top->load.field; + switch (field->type.atype) { + case atype_sequence: + integer_type = &field->type.u.legacy.sequence.elem_type.u.basic.integer; + break; + case atype_sequence_nestable: + if (field->type.u.sequence_nestable.elem_type->atype != atype_integer) { + ret = -EINVAL; + goto end; + } + integer_type = &field->type.u.sequence_nestable.elem_type->u.integer; + break; + default: + ret = -EINVAL; + goto end; + } + elem_len = integer_type->size; + signedness = integer_type->signedness; + ret = specialize_get_index_object_type(&stack_top->load.object_type, + signedness, elem_len); + if (ret) + goto end; + gid.offset = index * (elem_len / CHAR_BIT); + gid.elem.type = stack_top->load.object_type; + gid.elem.len = elem_len; + if (integer_type->reverse_byte_order) + gid.elem.rev_bo = true; + stack_top->load.rev_bo = gid.elem.rev_bo; + break; + } + case OBJECT_TYPE_STRUCT: + /* Only generated by the specialize phase. */ + case OBJECT_TYPE_VARIANT: /* Fall-through */ + default: + ERR("Unexpected get index type %d", + (int) stack_top->load.object_type); + ret = -EINVAL; + goto end; + } + break; + case LOAD_ROOT_CONTEXT: + case LOAD_ROOT_APP_CONTEXT: + case LOAD_ROOT_PAYLOAD: + ERR("Index lookup for root field not implemented yet."); + ret = -EINVAL; + goto end; + } + data_offset = bytecode_push_data(runtime, &gid, + __alignof__(gid), sizeof(gid)); + if (data_offset < 0) { + ret = -EINVAL; + goto end; + } + switch (idx_len) { + case 2: + ((struct get_index_u16 *) insn->data)->index = data_offset; + break; + case 8: + ((struct get_index_u64 *) insn->data)->index = data_offset; + break; + default: + ret = -EINVAL; + goto end; + } + + return 0; + +end: + return ret; +} + +static int specialize_context_lookup_name(struct lttng_ctx *ctx, + struct bytecode_runtime *bytecode, + struct load_op *insn) +{ + uint16_t offset; + const char *name; + + offset = ((struct get_symbol *) insn->data)->offset; + name = bytecode->p.bc->bc.data + bytecode->p.bc->bc.reloc_offset + offset; + return lttng_get_context_index(ctx, name); +} + +static int specialize_load_object(const struct lttng_event_field *field, + struct vstack_load *load, bool is_context) +{ + load->type = LOAD_OBJECT; + + switch (field->type.atype) { + case atype_integer: + if (field->type.u.integer.signedness) + load->object_type = OBJECT_TYPE_S64; + else + load->object_type = OBJECT_TYPE_U64; + load->rev_bo = false; + break; + case atype_enum: + case atype_enum_nestable: + { + const struct lttng_integer_type *itype; + + if (field->type.atype == atype_enum) { + itype = &field->type.u.legacy.basic.enumeration.container_type; + } else { + itype = &field->type.u.enum_nestable.container_type->u.integer; + } + if (itype->signedness) + load->object_type = OBJECT_TYPE_S64; + else + load->object_type = OBJECT_TYPE_U64; + load->rev_bo = false; + break; + } + case atype_array: + if (field->type.u.legacy.array.elem_type.atype != atype_integer) { + ERR("Array nesting only supports integer types."); + return -EINVAL; + } + if (is_context) { + load->object_type = OBJECT_TYPE_STRING; + } else { + if (field->type.u.legacy.array.elem_type.u.basic.integer.encoding == lttng_encode_none) { + load->object_type = OBJECT_TYPE_ARRAY; + load->field = field; + } else { + load->object_type = OBJECT_TYPE_STRING_SEQUENCE; + } + } + break; + case atype_array_nestable: + if (field->type.u.array_nestable.elem_type->atype != atype_integer) { + ERR("Array nesting only supports integer types."); + return -EINVAL; + } + if (is_context) { + load->object_type = OBJECT_TYPE_STRING; + } else { + if (field->type.u.array_nestable.elem_type->u.integer.encoding == lttng_encode_none) { + load->object_type = OBJECT_TYPE_ARRAY; + load->field = field; + } else { + load->object_type = OBJECT_TYPE_STRING_SEQUENCE; + } + } + break; + case atype_sequence: + if (field->type.u.legacy.sequence.elem_type.atype != atype_integer) { + ERR("Sequence nesting only supports integer types."); + return -EINVAL; + } + if (is_context) { + load->object_type = OBJECT_TYPE_STRING; + } else { + if (field->type.u.legacy.sequence.elem_type.u.basic.integer.encoding == lttng_encode_none) { + load->object_type = OBJECT_TYPE_SEQUENCE; + load->field = field; + } else { + load->object_type = OBJECT_TYPE_STRING_SEQUENCE; + } + } + break; + case atype_sequence_nestable: + if (field->type.u.sequence_nestable.elem_type->atype != atype_integer) { + ERR("Sequence nesting only supports integer types."); + return -EINVAL; + } + if (is_context) { + load->object_type = OBJECT_TYPE_STRING; + } else { + if (field->type.u.sequence_nestable.elem_type->u.integer.encoding == lttng_encode_none) { + load->object_type = OBJECT_TYPE_SEQUENCE; + load->field = field; + } else { + load->object_type = OBJECT_TYPE_STRING_SEQUENCE; + } + } + break; + + case atype_string: + load->object_type = OBJECT_TYPE_STRING; + break; + case atype_float: + load->object_type = OBJECT_TYPE_DOUBLE; + break; + case atype_dynamic: + load->object_type = OBJECT_TYPE_DYNAMIC; + break; + case atype_struct: + ERR("Structure type cannot be loaded."); + return -EINVAL; + default: + ERR("Unknown type: %d", (int) field->type.atype); + return -EINVAL; + } + return 0; +} + +static int specialize_context_lookup(struct lttng_ctx *ctx, + struct bytecode_runtime *runtime, + struct load_op *insn, + struct vstack_load *load) +{ + int idx, ret; + struct lttng_ctx_field *ctx_field; + struct lttng_event_field *field; + struct bytecode_get_index_data gid; + ssize_t data_offset; + + idx = specialize_context_lookup_name(ctx, runtime, insn); + if (idx < 0) { + return -ENOENT; + } + ctx_field = &ctx->fields[idx]; + field = &ctx_field->event_field; + ret = specialize_load_object(field, load, true); + if (ret) + return ret; + /* Specialize each get_symbol into a get_index. */ + insn->op = BYTECODE_OP_GET_INDEX_U16; + memset(&gid, 0, sizeof(gid)); + gid.ctx_index = idx; + gid.elem.type = load->object_type; + gid.elem.rev_bo = load->rev_bo; + gid.field = field; + data_offset = bytecode_push_data(runtime, &gid, + __alignof__(gid), sizeof(gid)); + if (data_offset < 0) { + return -EINVAL; + } + ((struct get_index_u16 *) insn->data)->index = data_offset; + return 0; +} + +static int specialize_app_context_lookup(struct lttng_ctx **pctx, + struct bytecode_runtime *runtime, + struct load_op *insn, + struct vstack_load *load) +{ + uint16_t offset; + const char *orig_name; + char *name = NULL; + int idx, ret; + struct lttng_ctx_field *ctx_field; + struct lttng_event_field *field; + struct bytecode_get_index_data gid; + ssize_t data_offset; + + offset = ((struct get_symbol *) insn->data)->offset; + orig_name = runtime->p.bc->bc.data + runtime->p.bc->bc.reloc_offset + offset; + name = zmalloc(strlen(orig_name) + strlen("$app.") + 1); + if (!name) { + ret = -ENOMEM; + goto end; + } + strcpy(name, "$app."); + strcat(name, orig_name); + idx = lttng_get_context_index(*pctx, name); + if (idx < 0) { + assert(lttng_context_is_app(name)); + ret = lttng_ust_add_app_context_to_ctx_rcu(name, + pctx); + if (ret) + return ret; + idx = lttng_get_context_index(*pctx, name); + if (idx < 0) + return -ENOENT; + } + ctx_field = &(*pctx)->fields[idx]; + field = &ctx_field->event_field; + ret = specialize_load_object(field, load, true); + if (ret) + goto end; + /* Specialize each get_symbol into a get_index. */ + insn->op = BYTECODE_OP_GET_INDEX_U16; + memset(&gid, 0, sizeof(gid)); + gid.ctx_index = idx; + gid.elem.type = load->object_type; + gid.elem.rev_bo = load->rev_bo; + gid.field = field; + data_offset = bytecode_push_data(runtime, &gid, + __alignof__(gid), sizeof(gid)); + if (data_offset < 0) { + ret = -EINVAL; + goto end; + } + ((struct get_index_u16 *) insn->data)->index = data_offset; + ret = 0; +end: + free(name); + return ret; +} + +static int specialize_payload_lookup(const struct lttng_event_desc *event_desc, + struct bytecode_runtime *runtime, + struct load_op *insn, + struct vstack_load *load) +{ + const char *name; + uint16_t offset; + unsigned int i, nr_fields; + bool found = false; + uint32_t field_offset = 0; + const struct lttng_event_field *field; + int ret; + struct bytecode_get_index_data gid; + ssize_t data_offset; + + nr_fields = event_desc->nr_fields; + offset = ((struct get_symbol *) insn->data)->offset; + name = runtime->p.bc->bc.data + runtime->p.bc->bc.reloc_offset + offset; + for (i = 0; i < nr_fields; i++) { + field = &event_desc->fields[i]; + if (field->u.ext.nofilter) { + continue; + } + if (!strcmp(field->name, name)) { + found = true; + break; + } + /* compute field offset on stack */ + switch (field->type.atype) { + case atype_integer: + case atype_enum: + case atype_enum_nestable: + field_offset += sizeof(int64_t); + break; + case atype_array: + case atype_array_nestable: + case atype_sequence: + case atype_sequence_nestable: + field_offset += sizeof(unsigned long); + field_offset += sizeof(void *); + break; + case atype_string: + field_offset += sizeof(void *); + break; + case atype_float: + field_offset += sizeof(double); + break; + default: + ret = -EINVAL; + goto end; + } + } + if (!found) { + ret = -EINVAL; + goto end; + } + + ret = specialize_load_object(field, load, false); + if (ret) + goto end; + + /* Specialize each get_symbol into a get_index. */ + insn->op = BYTECODE_OP_GET_INDEX_U16; + memset(&gid, 0, sizeof(gid)); + gid.offset = field_offset; + gid.elem.type = load->object_type; + gid.elem.rev_bo = load->rev_bo; + gid.field = field; + data_offset = bytecode_push_data(runtime, &gid, + __alignof__(gid), sizeof(gid)); + if (data_offset < 0) { + ret = -EINVAL; + goto end; + } + ((struct get_index_u16 *) insn->data)->index = data_offset; + ret = 0; +end: + return ret; +} + +int lttng_bytecode_specialize(const struct lttng_event_desc *event_desc, + struct bytecode_runtime *bytecode) +{ + void *pc, *next_pc, *start_pc; + int ret = -EINVAL; + struct vstack _stack; + struct vstack *stack = &_stack; + struct lttng_ctx **pctx = bytecode->p.pctx; + + vstack_init(stack); + + start_pc = &bytecode->code[0]; + for (pc = next_pc = start_pc; pc - start_pc < bytecode->len; + pc = next_pc) { + switch (*(bytecode_opcode_t *) pc) { + case BYTECODE_OP_UNKNOWN: + default: + ERR("unknown bytecode op %u\n", + (unsigned int) *(bytecode_opcode_t *) pc); + ret = -EINVAL; + goto end; + + case BYTECODE_OP_RETURN: + if (vstack_ax(stack)->type == REG_S64 || + vstack_ax(stack)->type == REG_U64) + *(bytecode_opcode_t *) pc = BYTECODE_OP_RETURN_S64; + ret = 0; + goto end; + + case BYTECODE_OP_RETURN_S64: + if (vstack_ax(stack)->type != REG_S64 && + vstack_ax(stack)->type != REG_U64) { + ERR("Unexpected register type\n"); + ret = -EINVAL; + goto end; + } + ret = 0; + goto end; + + /* binary */ + case BYTECODE_OP_MUL: + case BYTECODE_OP_DIV: + case BYTECODE_OP_MOD: + case BYTECODE_OP_PLUS: + case BYTECODE_OP_MINUS: + ERR("unsupported bytecode op %u\n", + (unsigned int) *(bytecode_opcode_t *) pc); + ret = -EINVAL; + goto end; + + case BYTECODE_OP_EQ: + { + struct binary_op *insn = (struct binary_op *) pc; + + switch(vstack_ax(stack)->type) { + default: + ERR("unknown register type\n"); + ret = -EINVAL; + goto end; + + case REG_STRING: + if (vstack_bx(stack)->type == REG_UNKNOWN) + break; + if (vstack_bx(stack)->type == REG_STAR_GLOB_STRING) + insn->op = BYTECODE_OP_EQ_STAR_GLOB_STRING; + else + insn->op = BYTECODE_OP_EQ_STRING; + break; + case REG_STAR_GLOB_STRING: + if (vstack_bx(stack)->type == REG_UNKNOWN) + break; + insn->op = BYTECODE_OP_EQ_STAR_GLOB_STRING; + break; + case REG_S64: + case REG_U64: + if (vstack_bx(stack)->type == REG_UNKNOWN) + break; + if (vstack_bx(stack)->type == REG_S64 || + vstack_bx(stack)->type == REG_U64) + insn->op = BYTECODE_OP_EQ_S64; + else + insn->op = BYTECODE_OP_EQ_DOUBLE_S64; + break; + case REG_DOUBLE: + if (vstack_bx(stack)->type == REG_UNKNOWN) + break; + if (vstack_bx(stack)->type == REG_S64 || + vstack_bx(stack)->type == REG_U64) + insn->op = BYTECODE_OP_EQ_S64_DOUBLE; + else + insn->op = BYTECODE_OP_EQ_DOUBLE; + break; + case REG_UNKNOWN: + break; /* Dynamic typing. */ + } + /* Pop 2, push 1 */ + if (vstack_pop(stack)) { + ret = -EINVAL; + goto end; + } + vstack_ax(stack)->type = REG_S64; + next_pc += sizeof(struct binary_op); + break; + } + + case BYTECODE_OP_NE: + { + struct binary_op *insn = (struct binary_op *) pc; + + switch(vstack_ax(stack)->type) { + default: + ERR("unknown register type\n"); + ret = -EINVAL; + goto end; + + case REG_STRING: + if (vstack_bx(stack)->type == REG_UNKNOWN) + break; + if (vstack_bx(stack)->type == REG_STAR_GLOB_STRING) + insn->op = BYTECODE_OP_NE_STAR_GLOB_STRING; + else + insn->op = BYTECODE_OP_NE_STRING; + break; + case REG_STAR_GLOB_STRING: + if (vstack_bx(stack)->type == REG_UNKNOWN) + break; + insn->op = BYTECODE_OP_NE_STAR_GLOB_STRING; + break; + case REG_S64: + case REG_U64: + if (vstack_bx(stack)->type == REG_UNKNOWN) + break; + if (vstack_bx(stack)->type == REG_S64 || + vstack_bx(stack)->type == REG_U64) + insn->op = BYTECODE_OP_NE_S64; + else + insn->op = BYTECODE_OP_NE_DOUBLE_S64; + break; + case REG_DOUBLE: + if (vstack_bx(stack)->type == REG_UNKNOWN) + break; + if (vstack_bx(stack)->type == REG_S64 || + vstack_bx(stack)->type == REG_U64) + insn->op = BYTECODE_OP_NE_S64_DOUBLE; + else + insn->op = BYTECODE_OP_NE_DOUBLE; + break; + case REG_UNKNOWN: + break; /* Dynamic typing. */ + } + /* Pop 2, push 1 */ + if (vstack_pop(stack)) { + ret = -EINVAL; + goto end; + } + vstack_ax(stack)->type = REG_S64; + next_pc += sizeof(struct binary_op); + break; + } + + case BYTECODE_OP_GT: + { + struct binary_op *insn = (struct binary_op *) pc; + + switch(vstack_ax(stack)->type) { + default: + ERR("unknown register type\n"); + ret = -EINVAL; + goto end; + + case REG_STAR_GLOB_STRING: + ERR("invalid register type for > binary operator\n"); + ret = -EINVAL; + goto end; + case REG_STRING: + if (vstack_bx(stack)->type == REG_UNKNOWN) + break; + insn->op = BYTECODE_OP_GT_STRING; + break; + case REG_S64: + case REG_U64: + if (vstack_bx(stack)->type == REG_UNKNOWN) + break; + if (vstack_bx(stack)->type == REG_S64 || + vstack_bx(stack)->type == REG_U64) + insn->op = BYTECODE_OP_GT_S64; + else + insn->op = BYTECODE_OP_GT_DOUBLE_S64; + break; + case REG_DOUBLE: + if (vstack_bx(stack)->type == REG_UNKNOWN) + break; + if (vstack_bx(stack)->type == REG_S64 || + vstack_bx(stack)->type == REG_U64) + insn->op = BYTECODE_OP_GT_S64_DOUBLE; + else + insn->op = BYTECODE_OP_GT_DOUBLE; + break; + case REG_UNKNOWN: + break; /* Dynamic typing. */ + } + /* Pop 2, push 1 */ + if (vstack_pop(stack)) { + ret = -EINVAL; + goto end; + } + vstack_ax(stack)->type = REG_S64; + next_pc += sizeof(struct binary_op); + break; + } + + case BYTECODE_OP_LT: + { + struct binary_op *insn = (struct binary_op *) pc; + + switch(vstack_ax(stack)->type) { + default: + ERR("unknown register type\n"); + ret = -EINVAL; + goto end; + + case REG_STAR_GLOB_STRING: + ERR("invalid register type for < binary operator\n"); + ret = -EINVAL; + goto end; + case REG_STRING: + if (vstack_bx(stack)->type == REG_UNKNOWN) + break; + insn->op = BYTECODE_OP_LT_STRING; + break; + case REG_S64: + case REG_U64: + if (vstack_bx(stack)->type == REG_UNKNOWN) + break; + if (vstack_bx(stack)->type == REG_S64 || + vstack_bx(stack)->type == REG_U64) + insn->op = BYTECODE_OP_LT_S64; + else + insn->op = BYTECODE_OP_LT_DOUBLE_S64; + break; + case REG_DOUBLE: + if (vstack_bx(stack)->type == REG_UNKNOWN) + break; + if (vstack_bx(stack)->type == REG_S64 || + vstack_bx(stack)->type == REG_U64) + insn->op = BYTECODE_OP_LT_S64_DOUBLE; + else + insn->op = BYTECODE_OP_LT_DOUBLE; + break; + case REG_UNKNOWN: + break; /* Dynamic typing. */ + } + /* Pop 2, push 1 */ + if (vstack_pop(stack)) { + ret = -EINVAL; + goto end; + } + vstack_ax(stack)->type = REG_S64; + next_pc += sizeof(struct binary_op); + break; + } + + case BYTECODE_OP_GE: + { + struct binary_op *insn = (struct binary_op *) pc; + + switch(vstack_ax(stack)->type) { + default: + ERR("unknown register type\n"); + ret = -EINVAL; + goto end; + + case REG_STAR_GLOB_STRING: + ERR("invalid register type for >= binary operator\n"); + ret = -EINVAL; + goto end; + case REG_STRING: + if (vstack_bx(stack)->type == REG_UNKNOWN) + break; + insn->op = BYTECODE_OP_GE_STRING; + break; + case REG_S64: + case REG_U64: + if (vstack_bx(stack)->type == REG_UNKNOWN) + break; + if (vstack_bx(stack)->type == REG_S64 || + vstack_bx(stack)->type == REG_U64) + insn->op = BYTECODE_OP_GE_S64; + else + insn->op = BYTECODE_OP_GE_DOUBLE_S64; + break; + case REG_DOUBLE: + if (vstack_bx(stack)->type == REG_UNKNOWN) + break; + if (vstack_bx(stack)->type == REG_S64 || + vstack_bx(stack)->type == REG_U64) + insn->op = BYTECODE_OP_GE_S64_DOUBLE; + else + insn->op = BYTECODE_OP_GE_DOUBLE; + break; + case REG_UNKNOWN: + break; /* Dynamic typing. */ + } + /* Pop 2, push 1 */ + if (vstack_pop(stack)) { + ret = -EINVAL; + goto end; + } + vstack_ax(stack)->type = REG_U64; + next_pc += sizeof(struct binary_op); + break; + } + case BYTECODE_OP_LE: + { + struct binary_op *insn = (struct binary_op *) pc; + + switch(vstack_ax(stack)->type) { + default: + ERR("unknown register type\n"); + ret = -EINVAL; + goto end; + + case REG_STAR_GLOB_STRING: + ERR("invalid register type for <= binary operator\n"); + ret = -EINVAL; + goto end; + case REG_STRING: + if (vstack_bx(stack)->type == REG_UNKNOWN) + break; + insn->op = BYTECODE_OP_LE_STRING; + break; + case REG_S64: + case REG_U64: + if (vstack_bx(stack)->type == REG_UNKNOWN) + break; + if (vstack_bx(stack)->type == REG_S64 || + vstack_bx(stack)->type == REG_U64) + insn->op = BYTECODE_OP_LE_S64; + else + insn->op = BYTECODE_OP_LE_DOUBLE_S64; + break; + case REG_DOUBLE: + if (vstack_bx(stack)->type == REG_UNKNOWN) + break; + if (vstack_bx(stack)->type == REG_S64 || + vstack_bx(stack)->type == REG_U64) + insn->op = BYTECODE_OP_LE_S64_DOUBLE; + else + insn->op = BYTECODE_OP_LE_DOUBLE; + break; + case REG_UNKNOWN: + break; /* Dynamic typing. */ + } + vstack_ax(stack)->type = REG_S64; + next_pc += sizeof(struct binary_op); + break; + } + + case BYTECODE_OP_EQ_STRING: + case BYTECODE_OP_NE_STRING: + case BYTECODE_OP_GT_STRING: + case BYTECODE_OP_LT_STRING: + case BYTECODE_OP_GE_STRING: + case BYTECODE_OP_LE_STRING: + case BYTECODE_OP_EQ_STAR_GLOB_STRING: + case BYTECODE_OP_NE_STAR_GLOB_STRING: + case BYTECODE_OP_EQ_S64: + case BYTECODE_OP_NE_S64: + case BYTECODE_OP_GT_S64: + case BYTECODE_OP_LT_S64: + case BYTECODE_OP_GE_S64: + case BYTECODE_OP_LE_S64: + case BYTECODE_OP_EQ_DOUBLE: + case BYTECODE_OP_NE_DOUBLE: + case BYTECODE_OP_GT_DOUBLE: + case BYTECODE_OP_LT_DOUBLE: + case BYTECODE_OP_GE_DOUBLE: + case BYTECODE_OP_LE_DOUBLE: + case BYTECODE_OP_EQ_DOUBLE_S64: + case BYTECODE_OP_NE_DOUBLE_S64: + case BYTECODE_OP_GT_DOUBLE_S64: + case BYTECODE_OP_LT_DOUBLE_S64: + case BYTECODE_OP_GE_DOUBLE_S64: + case BYTECODE_OP_LE_DOUBLE_S64: + case BYTECODE_OP_EQ_S64_DOUBLE: + case BYTECODE_OP_NE_S64_DOUBLE: + case BYTECODE_OP_GT_S64_DOUBLE: + case BYTECODE_OP_LT_S64_DOUBLE: + case BYTECODE_OP_GE_S64_DOUBLE: + case BYTECODE_OP_LE_S64_DOUBLE: + { + /* Pop 2, push 1 */ + if (vstack_pop(stack)) { + ret = -EINVAL; + goto end; + } + vstack_ax(stack)->type = REG_S64; + next_pc += sizeof(struct binary_op); + break; + } + + case BYTECODE_OP_BIT_RSHIFT: + case BYTECODE_OP_BIT_LSHIFT: + case BYTECODE_OP_BIT_AND: + case BYTECODE_OP_BIT_OR: + case BYTECODE_OP_BIT_XOR: + { + /* Pop 2, push 1 */ + if (vstack_pop(stack)) { + ret = -EINVAL; + goto end; + } + vstack_ax(stack)->type = REG_S64; + next_pc += sizeof(struct binary_op); + break; + } + + /* unary */ + case BYTECODE_OP_UNARY_PLUS: + { + struct unary_op *insn = (struct unary_op *) pc; + + switch(vstack_ax(stack)->type) { + default: + ERR("unknown register type\n"); + ret = -EINVAL; + goto end; + + case REG_S64: + case REG_U64: + insn->op = BYTECODE_OP_UNARY_PLUS_S64; + break; + case REG_DOUBLE: + insn->op = BYTECODE_OP_UNARY_PLUS_DOUBLE; + break; + case REG_UNKNOWN: /* Dynamic typing. */ + break; + } + /* Pop 1, push 1 */ + next_pc += sizeof(struct unary_op); + break; + } + + case BYTECODE_OP_UNARY_MINUS: + { + struct unary_op *insn = (struct unary_op *) pc; + + switch(vstack_ax(stack)->type) { + default: + ERR("unknown register type\n"); + ret = -EINVAL; + goto end; + + case REG_S64: + case REG_U64: + insn->op = BYTECODE_OP_UNARY_MINUS_S64; + break; + case REG_DOUBLE: + insn->op = BYTECODE_OP_UNARY_MINUS_DOUBLE; + break; + case REG_UNKNOWN: /* Dynamic typing. */ + break; + } + /* Pop 1, push 1 */ + next_pc += sizeof(struct unary_op); + break; + } + + case BYTECODE_OP_UNARY_NOT: + { + struct unary_op *insn = (struct unary_op *) pc; + + switch(vstack_ax(stack)->type) { + default: + ERR("unknown register type\n"); + ret = -EINVAL; + goto end; + + case REG_S64: + case REG_U64: + insn->op = BYTECODE_OP_UNARY_NOT_S64; + break; + case REG_DOUBLE: + insn->op = BYTECODE_OP_UNARY_NOT_DOUBLE; + break; + case REG_UNKNOWN: /* Dynamic typing. */ + break; + } + /* Pop 1, push 1 */ + next_pc += sizeof(struct unary_op); + break; + } + + case BYTECODE_OP_UNARY_BIT_NOT: + { + /* Pop 1, push 1 */ + next_pc += sizeof(struct unary_op); + break; + } + + case BYTECODE_OP_UNARY_PLUS_S64: + case BYTECODE_OP_UNARY_MINUS_S64: + case BYTECODE_OP_UNARY_NOT_S64: + case BYTECODE_OP_UNARY_PLUS_DOUBLE: + case BYTECODE_OP_UNARY_MINUS_DOUBLE: + case BYTECODE_OP_UNARY_NOT_DOUBLE: + { + /* Pop 1, push 1 */ + next_pc += sizeof(struct unary_op); + break; + } + + /* logical */ + case BYTECODE_OP_AND: + case BYTECODE_OP_OR: + { + /* Continue to next instruction */ + /* Pop 1 when jump not taken */ + if (vstack_pop(stack)) { + ret = -EINVAL; + goto end; + } + next_pc += sizeof(struct logical_op); + break; + } + + /* load field ref */ + case BYTECODE_OP_LOAD_FIELD_REF: + { + ERR("Unknown field ref type\n"); + ret = -EINVAL; + goto end; + } + /* get context ref */ + case BYTECODE_OP_GET_CONTEXT_REF: + { + if (vstack_push(stack)) { + ret = -EINVAL; + goto end; + } + vstack_ax(stack)->type = REG_UNKNOWN; + next_pc += sizeof(struct load_op) + sizeof(struct field_ref); + break; + } + case BYTECODE_OP_LOAD_FIELD_REF_STRING: + case BYTECODE_OP_LOAD_FIELD_REF_SEQUENCE: + case BYTECODE_OP_GET_CONTEXT_REF_STRING: + { + if (vstack_push(stack)) { + ret = -EINVAL; + goto end; + } + vstack_ax(stack)->type = REG_STRING; + next_pc += sizeof(struct load_op) + sizeof(struct field_ref); + break; + } + case BYTECODE_OP_LOAD_FIELD_REF_S64: + case BYTECODE_OP_GET_CONTEXT_REF_S64: + { + if (vstack_push(stack)) { + ret = -EINVAL; + goto end; + } + vstack_ax(stack)->type = REG_S64; + next_pc += sizeof(struct load_op) + sizeof(struct field_ref); + break; + } + case BYTECODE_OP_LOAD_FIELD_REF_DOUBLE: + case BYTECODE_OP_GET_CONTEXT_REF_DOUBLE: + { + if (vstack_push(stack)) { + ret = -EINVAL; + goto end; + } + vstack_ax(stack)->type = REG_DOUBLE; + next_pc += sizeof(struct load_op) + sizeof(struct field_ref); + break; + } + + /* load from immediate operand */ + case BYTECODE_OP_LOAD_STRING: + { + struct load_op *insn = (struct load_op *) pc; + + if (vstack_push(stack)) { + ret = -EINVAL; + goto end; + } + vstack_ax(stack)->type = REG_STRING; + next_pc += sizeof(struct load_op) + strlen(insn->data) + 1; + break; + } + + case BYTECODE_OP_LOAD_STAR_GLOB_STRING: + { + struct load_op *insn = (struct load_op *) pc; + + if (vstack_push(stack)) { + ret = -EINVAL; + goto end; + } + vstack_ax(stack)->type = REG_STAR_GLOB_STRING; + next_pc += sizeof(struct load_op) + strlen(insn->data) + 1; + break; + } + + case BYTECODE_OP_LOAD_S64: + { + if (vstack_push(stack)) { + ret = -EINVAL; + goto end; + } + vstack_ax(stack)->type = REG_S64; + next_pc += sizeof(struct load_op) + + sizeof(struct literal_numeric); + break; + } + + case BYTECODE_OP_LOAD_DOUBLE: + { + if (vstack_push(stack)) { + ret = -EINVAL; + goto end; + } + vstack_ax(stack)->type = REG_DOUBLE; + next_pc += sizeof(struct load_op) + + sizeof(struct literal_double); + break; + } + + /* cast */ + case BYTECODE_OP_CAST_TO_S64: + { + struct cast_op *insn = (struct cast_op *) pc; + + switch (vstack_ax(stack)->type) { + default: + ERR("unknown register type\n"); + ret = -EINVAL; + goto end; + + case REG_STRING: + case REG_STAR_GLOB_STRING: + ERR("Cast op can only be applied to numeric or floating point registers\n"); + ret = -EINVAL; + goto end; + case REG_S64: + insn->op = BYTECODE_OP_CAST_NOP; + break; + case REG_DOUBLE: + insn->op = BYTECODE_OP_CAST_DOUBLE_TO_S64; + break; + case REG_UNKNOWN: + case REG_U64: + break; + } + /* Pop 1, push 1 */ + vstack_ax(stack)->type = REG_S64; + next_pc += sizeof(struct cast_op); + break; + } + case BYTECODE_OP_CAST_DOUBLE_TO_S64: + { + /* Pop 1, push 1 */ + vstack_ax(stack)->type = REG_S64; + next_pc += sizeof(struct cast_op); + break; + } + case BYTECODE_OP_CAST_NOP: + { + next_pc += sizeof(struct cast_op); + break; + } + + /* + * Instructions for recursive traversal through composed types. + */ + case BYTECODE_OP_GET_CONTEXT_ROOT: + { + if (vstack_push(stack)) { + ret = -EINVAL; + goto end; + } + vstack_ax(stack)->type = REG_PTR; + vstack_ax(stack)->load.type = LOAD_ROOT_CONTEXT; + next_pc += sizeof(struct load_op); + break; + } + case BYTECODE_OP_GET_APP_CONTEXT_ROOT: + { + if (vstack_push(stack)) { + ret = -EINVAL; + goto end; + } + vstack_ax(stack)->type = REG_PTR; + vstack_ax(stack)->load.type = LOAD_ROOT_APP_CONTEXT; + next_pc += sizeof(struct load_op); + break; + } + case BYTECODE_OP_GET_PAYLOAD_ROOT: + { + if (vstack_push(stack)) { + ret = -EINVAL; + goto end; + } + vstack_ax(stack)->type = REG_PTR; + vstack_ax(stack)->load.type = LOAD_ROOT_PAYLOAD; + next_pc += sizeof(struct load_op); + break; + } + + case BYTECODE_OP_LOAD_FIELD: + { + struct load_op *insn = (struct load_op *) pc; + + assert(vstack_ax(stack)->type == REG_PTR); + /* Pop 1, push 1 */ + ret = specialize_load_field(vstack_ax(stack), insn); + if (ret) + goto end; + + next_pc += sizeof(struct load_op); + break; + } + + case BYTECODE_OP_LOAD_FIELD_S8: + case BYTECODE_OP_LOAD_FIELD_S16: + case BYTECODE_OP_LOAD_FIELD_S32: + case BYTECODE_OP_LOAD_FIELD_S64: + { + /* Pop 1, push 1 */ + vstack_ax(stack)->type = REG_S64; + next_pc += sizeof(struct load_op); + break; + } + + case BYTECODE_OP_LOAD_FIELD_U8: + case BYTECODE_OP_LOAD_FIELD_U16: + case BYTECODE_OP_LOAD_FIELD_U32: + case BYTECODE_OP_LOAD_FIELD_U64: + { + /* Pop 1, push 1 */ + vstack_ax(stack)->type = REG_U64; + next_pc += sizeof(struct load_op); + break; + } + + case BYTECODE_OP_LOAD_FIELD_STRING: + case BYTECODE_OP_LOAD_FIELD_SEQUENCE: + { + /* Pop 1, push 1 */ + vstack_ax(stack)->type = REG_STRING; + next_pc += sizeof(struct load_op); + break; + } + + case BYTECODE_OP_LOAD_FIELD_DOUBLE: + { + /* Pop 1, push 1 */ + vstack_ax(stack)->type = REG_DOUBLE; + next_pc += sizeof(struct load_op); + break; + } + + case BYTECODE_OP_GET_SYMBOL: + { + struct load_op *insn = (struct load_op *) pc; + + dbg_printf("op get symbol\n"); + switch (vstack_ax(stack)->load.type) { + case LOAD_OBJECT: + ERR("Nested fields not implemented yet."); + ret = -EINVAL; + goto end; + case LOAD_ROOT_CONTEXT: + /* Lookup context field. */ + ret = specialize_context_lookup(*pctx, + bytecode, insn, + &vstack_ax(stack)->load); + if (ret) + goto end; + break; + case LOAD_ROOT_APP_CONTEXT: + /* Lookup app context field. */ + ret = specialize_app_context_lookup(pctx, + bytecode, insn, + &vstack_ax(stack)->load); + if (ret) + goto end; + break; + case LOAD_ROOT_PAYLOAD: + /* Lookup event payload field. */ + ret = specialize_payload_lookup(event_desc, + bytecode, insn, + &vstack_ax(stack)->load); + if (ret) + goto end; + break; + } + next_pc += sizeof(struct load_op) + sizeof(struct get_symbol); + break; + } + + case BYTECODE_OP_GET_SYMBOL_FIELD: + { + /* Always generated by specialize phase. */ + ret = -EINVAL; + goto end; + } + + case BYTECODE_OP_GET_INDEX_U16: + { + struct load_op *insn = (struct load_op *) pc; + struct get_index_u16 *index = (struct get_index_u16 *) insn->data; + + dbg_printf("op get index u16\n"); + /* Pop 1, push 1 */ + ret = specialize_get_index(bytecode, insn, index->index, + vstack_ax(stack), sizeof(*index)); + if (ret) + goto end; + next_pc += sizeof(struct load_op) + sizeof(struct get_index_u16); + break; + } + + case BYTECODE_OP_GET_INDEX_U64: + { + struct load_op *insn = (struct load_op *) pc; + struct get_index_u64 *index = (struct get_index_u64 *) insn->data; + + dbg_printf("op get index u64\n"); + /* Pop 1, push 1 */ + ret = specialize_get_index(bytecode, insn, index->index, + vstack_ax(stack), sizeof(*index)); + if (ret) + goto end; + next_pc += sizeof(struct load_op) + sizeof(struct get_index_u64); + break; + } + + } + } +end: + return ret; +} diff --git a/liblttng-ust/lttng-bytecode-validator.c b/liblttng-ust/lttng-bytecode-validator.c new file mode 100644 index 00000000..f60c9367 --- /dev/null +++ b/liblttng-ust/lttng-bytecode-validator.c @@ -0,0 +1,2024 @@ +/* + * lttng-bytecode-validator.c + * + * LTTng UST bytecode validator. + * + * Copyright (C) 2010-2016 Mathieu Desnoyers + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#define _LGPL_SOURCE +#include +#include +#include + +#include +#include + +#include "lttng-bytecode.h" +#include "lttng-hash-helper.h" +#include "string-utils.h" +#include "ust-events-internal.h" + +/* + * Number of merge points for hash table size. Hash table initialized to + * that size, and we do not resize, because we do not want to trigger + * RCU worker thread execution: fall-back on linear traversal if number + * of merge points exceeds this value. + */ +#define DEFAULT_NR_MERGE_POINTS 128 +#define MIN_NR_BUCKETS 128 +#define MAX_NR_BUCKETS 128 + +/* merge point table node */ +struct lfht_mp_node { + struct cds_lfht_node node; + + /* Context at merge point */ + struct vstack stack; + unsigned long target_pc; +}; + +static unsigned long lttng_hash_seed; +static unsigned int lttng_hash_seed_ready; + +static +int lttng_hash_match(struct cds_lfht_node *node, const void *key) +{ + struct lfht_mp_node *mp_node = + caa_container_of(node, struct lfht_mp_node, node); + unsigned long key_pc = (unsigned long) key; + + if (mp_node->target_pc == key_pc) + return 1; + else + return 0; +} + +static +int merge_points_compare(const struct vstack *stacka, + const struct vstack *stackb) +{ + int i, len; + + if (stacka->top != stackb->top) + return 1; + len = stacka->top + 1; + assert(len >= 0); + for (i = 0; i < len; i++) { + if (stacka->e[i].type != REG_UNKNOWN + && stackb->e[i].type != REG_UNKNOWN + && stacka->e[i].type != stackb->e[i].type) + return 1; + } + return 0; +} + +static +int merge_point_add_check(struct cds_lfht *ht, unsigned long target_pc, + const struct vstack *stack) +{ + struct lfht_mp_node *node; + unsigned long hash = lttng_hash_mix((const char *) target_pc, + sizeof(target_pc), + lttng_hash_seed); + struct cds_lfht_node *ret; + + dbg_printf("Bytecode: adding merge point at offset %lu, hash %lu\n", + target_pc, hash); + node = zmalloc(sizeof(struct lfht_mp_node)); + if (!node) + return -ENOMEM; + node->target_pc = target_pc; + memcpy(&node->stack, stack, sizeof(node->stack)); + ret = cds_lfht_add_unique(ht, hash, lttng_hash_match, + (const char *) target_pc, &node->node); + if (ret != &node->node) { + struct lfht_mp_node *ret_mp = + caa_container_of(ret, struct lfht_mp_node, node); + + /* Key already present */ + dbg_printf("Bytecode: compare merge points for offset %lu, hash %lu\n", + target_pc, hash); + free(node); + if (merge_points_compare(stack, &ret_mp->stack)) { + ERR("Merge points differ for offset %lu\n", + target_pc); + return -EINVAL; + } + } + return 0; +} + +/* + * Binary comparators use top of stack and top of stack -1. + * Return 0 if typing is known to match, 1 if typing is dynamic + * (unknown), negative error value on error. + */ +static +int bin_op_compare_check(struct vstack *stack, bytecode_opcode_t opcode, + const char *str) +{ + if (unlikely(!vstack_ax(stack) || !vstack_bx(stack))) + goto error_empty; + + switch (vstack_ax(stack)->type) { + default: + goto error_type; + + case REG_UNKNOWN: + goto unknown; + case REG_STRING: + switch (vstack_bx(stack)->type) { + default: + goto error_type; + + case REG_UNKNOWN: + goto unknown; + case REG_STRING: + break; + case REG_STAR_GLOB_STRING: + if (opcode != BYTECODE_OP_EQ && opcode != BYTECODE_OP_NE) { + goto error_mismatch; + } + break; + case REG_S64: + case REG_U64: + case REG_DOUBLE: + goto error_mismatch; + } + break; + case REG_STAR_GLOB_STRING: + switch (vstack_bx(stack)->type) { + default: + goto error_type; + + case REG_UNKNOWN: + goto unknown; + case REG_STRING: + if (opcode != BYTECODE_OP_EQ && opcode != BYTECODE_OP_NE) { + goto error_mismatch; + } + break; + case REG_STAR_GLOB_STRING: + case REG_S64: + case REG_U64: + case REG_DOUBLE: + goto error_mismatch; + } + break; + case REG_S64: + case REG_U64: + case REG_DOUBLE: + switch (vstack_bx(stack)->type) { + default: + goto error_type; + + case REG_UNKNOWN: + goto unknown; + case REG_STRING: + case REG_STAR_GLOB_STRING: + goto error_mismatch; + case REG_S64: + case REG_U64: + case REG_DOUBLE: + break; + } + break; + } + return 0; + +unknown: + return 1; + +error_mismatch: + ERR("type mismatch for '%s' binary operator\n", str); + return -EINVAL; + +error_empty: + ERR("empty stack for '%s' binary operator\n", str); + return -EINVAL; + +error_type: + ERR("unknown type for '%s' binary operator\n", str); + return -EINVAL; +} + +/* + * Binary bitwise operators use top of stack and top of stack -1. + * Return 0 if typing is known to match, 1 if typing is dynamic + * (unknown), negative error value on error. + */ +static +int bin_op_bitwise_check(struct vstack *stack, bytecode_opcode_t opcode, + const char *str) +{ + if (unlikely(!vstack_ax(stack) || !vstack_bx(stack))) + goto error_empty; + + switch (vstack_ax(stack)->type) { + default: + goto error_type; + + case REG_UNKNOWN: + goto unknown; + case REG_S64: + case REG_U64: + switch (vstack_bx(stack)->type) { + default: + goto error_type; + + case REG_UNKNOWN: + goto unknown; + case REG_S64: + case REG_U64: + break; + } + break; + } + return 0; + +unknown: + return 1; + +error_empty: + ERR("empty stack for '%s' binary operator\n", str); + return -EINVAL; + +error_type: + ERR("unknown type for '%s' binary operator\n", str); + return -EINVAL; +} + +static +int validate_get_symbol(struct bytecode_runtime *bytecode, + const struct get_symbol *sym) +{ + const char *str, *str_limit; + size_t len_limit; + + if (sym->offset >= bytecode->p.bc->bc.len - bytecode->p.bc->bc.reloc_offset) + return -EINVAL; + + str = bytecode->p.bc->bc.data + bytecode->p.bc->bc.reloc_offset + sym->offset; + str_limit = bytecode->p.bc->bc.data + bytecode->p.bc->bc.len; + len_limit = str_limit - str; + if (strnlen(str, len_limit) == len_limit) + return -EINVAL; + return 0; +} + +/* + * Validate bytecode range overflow within the validation pass. + * Called for each instruction encountered. + */ +static +int bytecode_validate_overflow(struct bytecode_runtime *bytecode, + char *start_pc, char *pc) +{ + int ret = 0; + + switch (*(bytecode_opcode_t *) pc) { + case BYTECODE_OP_UNKNOWN: + default: + { + ERR("unknown bytecode op %u\n", + (unsigned int) *(bytecode_opcode_t *) pc); + ret = -EINVAL; + break; + } + + case BYTECODE_OP_RETURN: + case BYTECODE_OP_RETURN_S64: + { + if (unlikely(pc + sizeof(struct return_op) + > start_pc + bytecode->len)) { + ret = -ERANGE; + } + break; + } + + /* binary */ + case BYTECODE_OP_MUL: + case BYTECODE_OP_DIV: + case BYTECODE_OP_MOD: + case BYTECODE_OP_PLUS: + case BYTECODE_OP_MINUS: + { + ERR("unsupported bytecode op %u\n", + (unsigned int) *(bytecode_opcode_t *) pc); + ret = -EINVAL; + break; + } + + case BYTECODE_OP_EQ: + case BYTECODE_OP_NE: + case BYTECODE_OP_GT: + case BYTECODE_OP_LT: + case BYTECODE_OP_GE: + case BYTECODE_OP_LE: + case BYTECODE_OP_EQ_STRING: + case BYTECODE_OP_NE_STRING: + case BYTECODE_OP_GT_STRING: + case BYTECODE_OP_LT_STRING: + case BYTECODE_OP_GE_STRING: + case BYTECODE_OP_LE_STRING: + case BYTECODE_OP_EQ_STAR_GLOB_STRING: + case BYTECODE_OP_NE_STAR_GLOB_STRING: + case BYTECODE_OP_EQ_S64: + case BYTECODE_OP_NE_S64: + case BYTECODE_OP_GT_S64: + case BYTECODE_OP_LT_S64: + case BYTECODE_OP_GE_S64: + case BYTECODE_OP_LE_S64: + case BYTECODE_OP_EQ_DOUBLE: + case BYTECODE_OP_NE_DOUBLE: + case BYTECODE_OP_GT_DOUBLE: + case BYTECODE_OP_LT_DOUBLE: + case BYTECODE_OP_GE_DOUBLE: + case BYTECODE_OP_LE_DOUBLE: + case BYTECODE_OP_EQ_DOUBLE_S64: + case BYTECODE_OP_NE_DOUBLE_S64: + case BYTECODE_OP_GT_DOUBLE_S64: + case BYTECODE_OP_LT_DOUBLE_S64: + case BYTECODE_OP_GE_DOUBLE_S64: + case BYTECODE_OP_LE_DOUBLE_S64: + case BYTECODE_OP_EQ_S64_DOUBLE: + case BYTECODE_OP_NE_S64_DOUBLE: + case BYTECODE_OP_GT_S64_DOUBLE: + case BYTECODE_OP_LT_S64_DOUBLE: + case BYTECODE_OP_GE_S64_DOUBLE: + case BYTECODE_OP_LE_S64_DOUBLE: + case BYTECODE_OP_BIT_RSHIFT: + case BYTECODE_OP_BIT_LSHIFT: + case BYTECODE_OP_BIT_AND: + case BYTECODE_OP_BIT_OR: + case BYTECODE_OP_BIT_XOR: + { + if (unlikely(pc + sizeof(struct binary_op) + > start_pc + bytecode->len)) { + ret = -ERANGE; + } + break; + } + + /* unary */ + case BYTECODE_OP_UNARY_PLUS: + case BYTECODE_OP_UNARY_MINUS: + case BYTECODE_OP_UNARY_NOT: + case BYTECODE_OP_UNARY_PLUS_S64: + case BYTECODE_OP_UNARY_MINUS_S64: + case BYTECODE_OP_UNARY_NOT_S64: + case BYTECODE_OP_UNARY_PLUS_DOUBLE: + case BYTECODE_OP_UNARY_MINUS_DOUBLE: + case BYTECODE_OP_UNARY_NOT_DOUBLE: + case BYTECODE_OP_UNARY_BIT_NOT: + { + if (unlikely(pc + sizeof(struct unary_op) + > start_pc + bytecode->len)) { + ret = -ERANGE; + } + break; + } + + /* logical */ + case BYTECODE_OP_AND: + case BYTECODE_OP_OR: + { + if (unlikely(pc + sizeof(struct logical_op) + > start_pc + bytecode->len)) { + ret = -ERANGE; + } + break; + } + + /* load field ref */ + case BYTECODE_OP_LOAD_FIELD_REF: + { + ERR("Unknown field ref type\n"); + ret = -EINVAL; + break; + } + + /* get context ref */ + case BYTECODE_OP_GET_CONTEXT_REF: + case BYTECODE_OP_LOAD_FIELD_REF_STRING: + case BYTECODE_OP_LOAD_FIELD_REF_SEQUENCE: + case BYTECODE_OP_LOAD_FIELD_REF_S64: + case BYTECODE_OP_LOAD_FIELD_REF_DOUBLE: + case BYTECODE_OP_GET_CONTEXT_REF_STRING: + case BYTECODE_OP_GET_CONTEXT_REF_S64: + case BYTECODE_OP_GET_CONTEXT_REF_DOUBLE: + { + if (unlikely(pc + sizeof(struct load_op) + sizeof(struct field_ref) + > start_pc + bytecode->len)) { + ret = -ERANGE; + } + break; + } + + /* load from immediate operand */ + case BYTECODE_OP_LOAD_STRING: + case BYTECODE_OP_LOAD_STAR_GLOB_STRING: + { + struct load_op *insn = (struct load_op *) pc; + uint32_t str_len, maxlen; + + if (unlikely(pc + sizeof(struct load_op) + > start_pc + bytecode->len)) { + ret = -ERANGE; + break; + } + + maxlen = start_pc + bytecode->len - pc - sizeof(struct load_op); + str_len = strnlen(insn->data, maxlen); + if (unlikely(str_len >= maxlen)) { + /* Final '\0' not found within range */ + ret = -ERANGE; + } + break; + } + + case BYTECODE_OP_LOAD_S64: + { + if (unlikely(pc + sizeof(struct load_op) + sizeof(struct literal_numeric) + > start_pc + bytecode->len)) { + ret = -ERANGE; + } + break; + } + + case BYTECODE_OP_LOAD_DOUBLE: + { + if (unlikely(pc + sizeof(struct load_op) + sizeof(struct literal_double) + > start_pc + bytecode->len)) { + ret = -ERANGE; + } + break; + } + + case BYTECODE_OP_CAST_TO_S64: + case BYTECODE_OP_CAST_DOUBLE_TO_S64: + case BYTECODE_OP_CAST_NOP: + { + if (unlikely(pc + sizeof(struct cast_op) + > start_pc + bytecode->len)) { + ret = -ERANGE; + } + break; + } + + /* + * Instructions for recursive traversal through composed types. + */ + case BYTECODE_OP_GET_CONTEXT_ROOT: + case BYTECODE_OP_GET_APP_CONTEXT_ROOT: + case BYTECODE_OP_GET_PAYLOAD_ROOT: + case BYTECODE_OP_LOAD_FIELD: + case BYTECODE_OP_LOAD_FIELD_S8: + case BYTECODE_OP_LOAD_FIELD_S16: + case BYTECODE_OP_LOAD_FIELD_S32: + case BYTECODE_OP_LOAD_FIELD_S64: + case BYTECODE_OP_LOAD_FIELD_U8: + case BYTECODE_OP_LOAD_FIELD_U16: + case BYTECODE_OP_LOAD_FIELD_U32: + case BYTECODE_OP_LOAD_FIELD_U64: + case BYTECODE_OP_LOAD_FIELD_STRING: + case BYTECODE_OP_LOAD_FIELD_SEQUENCE: + case BYTECODE_OP_LOAD_FIELD_DOUBLE: + if (unlikely(pc + sizeof(struct load_op) + > start_pc + bytecode->len)) { + ret = -ERANGE; + } + break; + + case BYTECODE_OP_GET_SYMBOL: + { + struct load_op *insn = (struct load_op *) pc; + struct get_symbol *sym = (struct get_symbol *) insn->data; + + if (unlikely(pc + sizeof(struct load_op) + sizeof(struct get_symbol) + > start_pc + bytecode->len)) { + ret = -ERANGE; + break; + } + ret = validate_get_symbol(bytecode, sym); + break; + } + + case BYTECODE_OP_GET_SYMBOL_FIELD: + ERR("Unexpected get symbol field"); + ret = -EINVAL; + break; + + case BYTECODE_OP_GET_INDEX_U16: + if (unlikely(pc + sizeof(struct load_op) + sizeof(struct get_index_u16) + > start_pc + bytecode->len)) { + ret = -ERANGE; + } + break; + + case BYTECODE_OP_GET_INDEX_U64: + if (unlikely(pc + sizeof(struct load_op) + sizeof(struct get_index_u64) + > start_pc + bytecode->len)) { + ret = -ERANGE; + } + break; + } + + return ret; +} + +static +unsigned long delete_all_nodes(struct cds_lfht *ht) +{ + struct cds_lfht_iter iter; + struct lfht_mp_node *node; + unsigned long nr_nodes = 0; + + cds_lfht_for_each_entry(ht, &iter, node, node) { + int ret; + + ret = cds_lfht_del(ht, cds_lfht_iter_get_node(&iter)); + assert(!ret); + /* note: this hash table is never used concurrently */ + free(node); + nr_nodes++; + } + return nr_nodes; +} + +/* + * Return value: + * >=0: success + * <0: error + */ +static +int validate_instruction_context(struct bytecode_runtime *bytecode, + struct vstack *stack, + char *start_pc, + char *pc) +{ + int ret = 0; + const bytecode_opcode_t opcode = *(bytecode_opcode_t *) pc; + + switch (opcode) { + case BYTECODE_OP_UNKNOWN: + default: + { + ERR("unknown bytecode op %u\n", + (unsigned int) *(bytecode_opcode_t *) pc); + ret = -EINVAL; + goto end; + } + + case BYTECODE_OP_RETURN: + case BYTECODE_OP_RETURN_S64: + { + goto end; + } + + /* binary */ + case BYTECODE_OP_MUL: + case BYTECODE_OP_DIV: + case BYTECODE_OP_MOD: + case BYTECODE_OP_PLUS: + case BYTECODE_OP_MINUS: + { + ERR("unsupported bytecode op %u\n", + (unsigned int) opcode); + ret = -EINVAL; + goto end; + } + + case BYTECODE_OP_EQ: + { + ret = bin_op_compare_check(stack, opcode, "=="); + if (ret < 0) + goto end; + break; + } + case BYTECODE_OP_NE: + { + ret = bin_op_compare_check(stack, opcode, "!="); + if (ret < 0) + goto end; + break; + } + case BYTECODE_OP_GT: + { + ret = bin_op_compare_check(stack, opcode, ">"); + if (ret < 0) + goto end; + break; + } + case BYTECODE_OP_LT: + { + ret = bin_op_compare_check(stack, opcode, "<"); + if (ret < 0) + goto end; + break; + } + case BYTECODE_OP_GE: + { + ret = bin_op_compare_check(stack, opcode, ">="); + if (ret < 0) + goto end; + break; + } + case BYTECODE_OP_LE: + { + ret = bin_op_compare_check(stack, opcode, "<="); + if (ret < 0) + goto end; + break; + } + + case BYTECODE_OP_EQ_STRING: + case BYTECODE_OP_NE_STRING: + case BYTECODE_OP_GT_STRING: + case BYTECODE_OP_LT_STRING: + case BYTECODE_OP_GE_STRING: + case BYTECODE_OP_LE_STRING: + { + if (!vstack_ax(stack) || !vstack_bx(stack)) { + ERR("Empty stack\n"); + ret = -EINVAL; + goto end; + } + if (vstack_ax(stack)->type != REG_STRING + || vstack_bx(stack)->type != REG_STRING) { + ERR("Unexpected register type for string comparator\n"); + ret = -EINVAL; + goto end; + } + break; + } + + case BYTECODE_OP_EQ_STAR_GLOB_STRING: + case BYTECODE_OP_NE_STAR_GLOB_STRING: + { + if (!vstack_ax(stack) || !vstack_bx(stack)) { + ERR("Empty stack\n"); + ret = -EINVAL; + goto end; + } + if (vstack_ax(stack)->type != REG_STAR_GLOB_STRING + && vstack_bx(stack)->type != REG_STAR_GLOB_STRING) { + ERR("Unexpected register type for globbing pattern comparator\n"); + ret = -EINVAL; + goto end; + } + break; + } + + case BYTECODE_OP_EQ_S64: + case BYTECODE_OP_NE_S64: + case BYTECODE_OP_GT_S64: + case BYTECODE_OP_LT_S64: + case BYTECODE_OP_GE_S64: + case BYTECODE_OP_LE_S64: + { + if (!vstack_ax(stack) || !vstack_bx(stack)) { + ERR("Empty stack\n"); + ret = -EINVAL; + goto end; + } + switch (vstack_ax(stack)->type) { + case REG_S64: + case REG_U64: + break; + default: + ERR("Unexpected register type for s64 comparator\n"); + ret = -EINVAL; + goto end; + } + switch (vstack_bx(stack)->type) { + case REG_S64: + case REG_U64: + break; + default: + ERR("Unexpected register type for s64 comparator\n"); + ret = -EINVAL; + goto end; + } + break; + } + + case BYTECODE_OP_EQ_DOUBLE: + case BYTECODE_OP_NE_DOUBLE: + case BYTECODE_OP_GT_DOUBLE: + case BYTECODE_OP_LT_DOUBLE: + case BYTECODE_OP_GE_DOUBLE: + case BYTECODE_OP_LE_DOUBLE: + { + if (!vstack_ax(stack) || !vstack_bx(stack)) { + ERR("Empty stack\n"); + ret = -EINVAL; + goto end; + } + if (vstack_ax(stack)->type != REG_DOUBLE && vstack_bx(stack)->type != REG_DOUBLE) { + ERR("Double operator should have two double registers\n"); + ret = -EINVAL; + goto end; + } + break; + } + + case BYTECODE_OP_EQ_DOUBLE_S64: + case BYTECODE_OP_NE_DOUBLE_S64: + case BYTECODE_OP_GT_DOUBLE_S64: + case BYTECODE_OP_LT_DOUBLE_S64: + case BYTECODE_OP_GE_DOUBLE_S64: + case BYTECODE_OP_LE_DOUBLE_S64: + { + if (!vstack_ax(stack) || !vstack_bx(stack)) { + ERR("Empty stack\n"); + ret = -EINVAL; + goto end; + } + switch (vstack_ax(stack)->type) { + case REG_S64: + case REG_U64: + break; + default: + ERR("Double-S64 operator has unexpected register types\n"); + ret = -EINVAL; + goto end; + } + switch (vstack_bx(stack)->type) { + case REG_DOUBLE: + break; + default: + ERR("Double-S64 operator has unexpected register types\n"); + ret = -EINVAL; + goto end; + } + break; + } + + case BYTECODE_OP_EQ_S64_DOUBLE: + case BYTECODE_OP_NE_S64_DOUBLE: + case BYTECODE_OP_GT_S64_DOUBLE: + case BYTECODE_OP_LT_S64_DOUBLE: + case BYTECODE_OP_GE_S64_DOUBLE: + case BYTECODE_OP_LE_S64_DOUBLE: + { + if (!vstack_ax(stack) || !vstack_bx(stack)) { + ERR("Empty stack\n"); + ret = -EINVAL; + goto end; + } + switch (vstack_ax(stack)->type) { + case REG_DOUBLE: + break; + default: + ERR("S64-Double operator has unexpected register types\n"); + ret = -EINVAL; + goto end; + } + switch (vstack_bx(stack)->type) { + case REG_S64: + case REG_U64: + break; + default: + ERR("S64-Double operator has unexpected register types\n"); + ret = -EINVAL; + goto end; + } + break; + } + + case BYTECODE_OP_BIT_RSHIFT: + ret = bin_op_bitwise_check(stack, opcode, ">>"); + if (ret < 0) + goto end; + break; + case BYTECODE_OP_BIT_LSHIFT: + ret = bin_op_bitwise_check(stack, opcode, "<<"); + if (ret < 0) + goto end; + break; + case BYTECODE_OP_BIT_AND: + ret = bin_op_bitwise_check(stack, opcode, "&"); + if (ret < 0) + goto end; + break; + case BYTECODE_OP_BIT_OR: + ret = bin_op_bitwise_check(stack, opcode, "|"); + if (ret < 0) + goto end; + break; + case BYTECODE_OP_BIT_XOR: + ret = bin_op_bitwise_check(stack, opcode, "^"); + if (ret < 0) + goto end; + break; + + /* unary */ + case BYTECODE_OP_UNARY_PLUS: + case BYTECODE_OP_UNARY_MINUS: + case BYTECODE_OP_UNARY_NOT: + { + if (!vstack_ax(stack)) { + ERR("Empty stack\n"); + ret = -EINVAL; + goto end; + } + switch (vstack_ax(stack)->type) { + default: + ERR("unknown register type\n"); + ret = -EINVAL; + goto end; + + case REG_STRING: + case REG_STAR_GLOB_STRING: + ERR("Unary op can only be applied to numeric or floating point registers\n"); + ret = -EINVAL; + goto end; + case REG_S64: + break; + case REG_U64: + break; + case REG_DOUBLE: + break; + case REG_UNKNOWN: + break; + } + break; + } + case BYTECODE_OP_UNARY_BIT_NOT: + { + if (!vstack_ax(stack)) { + ERR("Empty stack\n"); + ret = -EINVAL; + goto end; + } + switch (vstack_ax(stack)->type) { + default: + ERR("unknown register type\n"); + ret = -EINVAL; + goto end; + + case REG_STRING: + case REG_STAR_GLOB_STRING: + case REG_DOUBLE: + ERR("Unary bitwise op can only be applied to numeric registers\n"); + ret = -EINVAL; + goto end; + case REG_S64: + break; + case REG_U64: + break; + case REG_UNKNOWN: + break; + } + break; + } + + case BYTECODE_OP_UNARY_PLUS_S64: + case BYTECODE_OP_UNARY_MINUS_S64: + case BYTECODE_OP_UNARY_NOT_S64: + { + if (!vstack_ax(stack)) { + ERR("Empty stack\n"); + ret = -EINVAL; + goto end; + } + if (vstack_ax(stack)->type != REG_S64 && + vstack_ax(stack)->type != REG_U64) { + ERR("Invalid register type\n"); + ret = -EINVAL; + goto end; + } + break; + } + + case BYTECODE_OP_UNARY_PLUS_DOUBLE: + case BYTECODE_OP_UNARY_MINUS_DOUBLE: + case BYTECODE_OP_UNARY_NOT_DOUBLE: + { + if (!vstack_ax(stack)) { + ERR("Empty stack\n"); + ret = -EINVAL; + goto end; + } + if (vstack_ax(stack)->type != REG_DOUBLE) { + ERR("Invalid register type\n"); + ret = -EINVAL; + goto end; + } + break; + } + + /* logical */ + case BYTECODE_OP_AND: + case BYTECODE_OP_OR: + { + struct logical_op *insn = (struct logical_op *) pc; + + if (!vstack_ax(stack)) { + ERR("Empty stack\n"); + ret = -EINVAL; + goto end; + } + if (vstack_ax(stack)->type != REG_S64 + && vstack_ax(stack)->type != REG_U64 + && vstack_ax(stack)->type != REG_UNKNOWN) { + ERR("Logical comparator expects S64, U64 or dynamic register\n"); + ret = -EINVAL; + goto end; + } + + dbg_printf("Validate jumping to bytecode offset %u\n", + (unsigned int) insn->skip_offset); + if (unlikely(start_pc + insn->skip_offset <= pc)) { + ERR("Loops are not allowed in bytecode\n"); + ret = -EINVAL; + goto end; + } + break; + } + + /* load field ref */ + case BYTECODE_OP_LOAD_FIELD_REF: + { + ERR("Unknown field ref type\n"); + ret = -EINVAL; + goto end; + } + case BYTECODE_OP_LOAD_FIELD_REF_STRING: + case BYTECODE_OP_LOAD_FIELD_REF_SEQUENCE: + { + struct load_op *insn = (struct load_op *) pc; + struct field_ref *ref = (struct field_ref *) insn->data; + + dbg_printf("Validate load field ref offset %u type string\n", + ref->offset); + break; + } + case BYTECODE_OP_LOAD_FIELD_REF_S64: + { + struct load_op *insn = (struct load_op *) pc; + struct field_ref *ref = (struct field_ref *) insn->data; + + dbg_printf("Validate load field ref offset %u type s64\n", + ref->offset); + break; + } + case BYTECODE_OP_LOAD_FIELD_REF_DOUBLE: + { + struct load_op *insn = (struct load_op *) pc; + struct field_ref *ref = (struct field_ref *) insn->data; + + dbg_printf("Validate load field ref offset %u type double\n", + ref->offset); + break; + } + + /* load from immediate operand */ + case BYTECODE_OP_LOAD_STRING: + case BYTECODE_OP_LOAD_STAR_GLOB_STRING: + { + break; + } + + case BYTECODE_OP_LOAD_S64: + { + break; + } + + case BYTECODE_OP_LOAD_DOUBLE: + { + break; + } + + case BYTECODE_OP_CAST_TO_S64: + case BYTECODE_OP_CAST_DOUBLE_TO_S64: + { + struct cast_op *insn = (struct cast_op *) pc; + + if (!vstack_ax(stack)) { + ERR("Empty stack\n"); + ret = -EINVAL; + goto end; + } + switch (vstack_ax(stack)->type) { + default: + ERR("unknown register type\n"); + ret = -EINVAL; + goto end; + + case REG_STRING: + case REG_STAR_GLOB_STRING: + ERR("Cast op can only be applied to numeric or floating point registers\n"); + ret = -EINVAL; + goto end; + case REG_S64: + break; + case REG_U64: + break; + case REG_DOUBLE: + break; + case REG_UNKNOWN: + break; + } + if (insn->op == BYTECODE_OP_CAST_DOUBLE_TO_S64) { + if (vstack_ax(stack)->type != REG_DOUBLE) { + ERR("Cast expects double\n"); + ret = -EINVAL; + goto end; + } + } + break; + } + case BYTECODE_OP_CAST_NOP: + { + break; + } + + /* get context ref */ + case BYTECODE_OP_GET_CONTEXT_REF: + { + struct load_op *insn = (struct load_op *) pc; + struct field_ref *ref = (struct field_ref *) insn->data; + + dbg_printf("Validate get context ref offset %u type dynamic\n", + ref->offset); + break; + } + case BYTECODE_OP_GET_CONTEXT_REF_STRING: + { + struct load_op *insn = (struct load_op *) pc; + struct field_ref *ref = (struct field_ref *) insn->data; + + dbg_printf("Validate get context ref offset %u type string\n", + ref->offset); + break; + } + case BYTECODE_OP_GET_CONTEXT_REF_S64: + { + struct load_op *insn = (struct load_op *) pc; + struct field_ref *ref = (struct field_ref *) insn->data; + + dbg_printf("Validate get context ref offset %u type s64\n", + ref->offset); + break; + } + case BYTECODE_OP_GET_CONTEXT_REF_DOUBLE: + { + struct load_op *insn = (struct load_op *) pc; + struct field_ref *ref = (struct field_ref *) insn->data; + + dbg_printf("Validate get context ref offset %u type double\n", + ref->offset); + break; + } + + /* + * Instructions for recursive traversal through composed types. + */ + case BYTECODE_OP_GET_CONTEXT_ROOT: + { + dbg_printf("Validate get context root\n"); + break; + } + case BYTECODE_OP_GET_APP_CONTEXT_ROOT: + { + dbg_printf("Validate get app context root\n"); + break; + } + case BYTECODE_OP_GET_PAYLOAD_ROOT: + { + dbg_printf("Validate get payload root\n"); + break; + } + case BYTECODE_OP_LOAD_FIELD: + { + /* + * We tolerate that field type is unknown at validation, + * because we are performing the load specialization in + * a phase after validation. + */ + dbg_printf("Validate load field\n"); + break; + } + case BYTECODE_OP_LOAD_FIELD_S8: + { + dbg_printf("Validate load field s8\n"); + break; + } + case BYTECODE_OP_LOAD_FIELD_S16: + { + dbg_printf("Validate load field s16\n"); + break; + } + case BYTECODE_OP_LOAD_FIELD_S32: + { + dbg_printf("Validate load field s32\n"); + break; + } + case BYTECODE_OP_LOAD_FIELD_S64: + { + dbg_printf("Validate load field s64\n"); + break; + } + case BYTECODE_OP_LOAD_FIELD_U8: + { + dbg_printf("Validate load field u8\n"); + break; + } + case BYTECODE_OP_LOAD_FIELD_U16: + { + dbg_printf("Validate load field u16\n"); + break; + } + case BYTECODE_OP_LOAD_FIELD_U32: + { + dbg_printf("Validate load field u32\n"); + break; + } + case BYTECODE_OP_LOAD_FIELD_U64: + { + dbg_printf("Validate load field u64\n"); + break; + } + case BYTECODE_OP_LOAD_FIELD_STRING: + { + dbg_printf("Validate load field string\n"); + break; + } + case BYTECODE_OP_LOAD_FIELD_SEQUENCE: + { + dbg_printf("Validate load field sequence\n"); + break; + } + case BYTECODE_OP_LOAD_FIELD_DOUBLE: + { + dbg_printf("Validate load field double\n"); + break; + } + + case BYTECODE_OP_GET_SYMBOL: + { + struct load_op *insn = (struct load_op *) pc; + struct get_symbol *sym = (struct get_symbol *) insn->data; + + dbg_printf("Validate get symbol offset %u\n", sym->offset); + break; + } + + case BYTECODE_OP_GET_SYMBOL_FIELD: + { + struct load_op *insn = (struct load_op *) pc; + struct get_symbol *sym = (struct get_symbol *) insn->data; + + dbg_printf("Validate get symbol field offset %u\n", sym->offset); + break; + } + + case BYTECODE_OP_GET_INDEX_U16: + { + struct load_op *insn = (struct load_op *) pc; + struct get_index_u16 *get_index = (struct get_index_u16 *) insn->data; + + dbg_printf("Validate get index u16 index %u\n", get_index->index); + break; + } + + case BYTECODE_OP_GET_INDEX_U64: + { + struct load_op *insn = (struct load_op *) pc; + struct get_index_u64 *get_index = (struct get_index_u64 *) insn->data; + + dbg_printf("Validate get index u64 index %" PRIu64 "\n", get_index->index); + break; + } + } +end: + return ret; +} + +/* + * Return value: + * 0: success + * <0: error + */ +static +int validate_instruction_all_contexts(struct bytecode_runtime *bytecode, + struct cds_lfht *merge_points, + struct vstack *stack, + char *start_pc, + char *pc) +{ + int ret; + unsigned long target_pc = pc - start_pc; + struct cds_lfht_iter iter; + struct cds_lfht_node *node; + struct lfht_mp_node *mp_node; + unsigned long hash; + + /* Validate the context resulting from the previous instruction */ + ret = validate_instruction_context(bytecode, stack, start_pc, pc); + if (ret < 0) + return ret; + + /* Validate merge points */ + hash = lttng_hash_mix((const char *) target_pc, sizeof(target_pc), + lttng_hash_seed); + cds_lfht_lookup(merge_points, hash, lttng_hash_match, + (const char *) target_pc, &iter); + node = cds_lfht_iter_get_node(&iter); + if (node) { + mp_node = caa_container_of(node, struct lfht_mp_node, node); + + dbg_printf("Bytecode: validate merge point at offset %lu\n", + target_pc); + if (merge_points_compare(stack, &mp_node->stack)) { + ERR("Merge points differ for offset %lu\n", + target_pc); + return -EINVAL; + } + /* Once validated, we can remove the merge point */ + dbg_printf("Bytecode: remove merge point at offset %lu\n", + target_pc); + ret = cds_lfht_del(merge_points, node); + assert(!ret); + } + return 0; +} + +/* + * Return value: + * >0: going to next insn. + * 0: success, stop iteration. + * <0: error + */ +static +int exec_insn(struct bytecode_runtime *bytecode, + struct cds_lfht *merge_points, + struct vstack *stack, + char **_next_pc, + char *pc) +{ + int ret = 1; + char *next_pc = *_next_pc; + + switch (*(bytecode_opcode_t *) pc) { + case BYTECODE_OP_UNKNOWN: + default: + { + ERR("unknown bytecode op %u\n", + (unsigned int) *(bytecode_opcode_t *) pc); + ret = -EINVAL; + goto end; + } + + case BYTECODE_OP_RETURN: + { + if (!vstack_ax(stack)) { + ERR("Empty stack\n"); + ret = -EINVAL; + goto end; + } + switch (vstack_ax(stack)->type) { + case REG_S64: + case REG_U64: + case REG_DOUBLE: + case REG_STRING: + case REG_PTR: + case REG_UNKNOWN: + break; + default: + ERR("Unexpected register type %d at end of bytecode\n", + (int) vstack_ax(stack)->type); + ret = -EINVAL; + goto end; + } + + ret = 0; + goto end; + } + case BYTECODE_OP_RETURN_S64: + { + if (!vstack_ax(stack)) { + ERR("Empty stack\n"); + ret = -EINVAL; + goto end; + } + switch (vstack_ax(stack)->type) { + case REG_S64: + case REG_U64: + break; + default: + case REG_UNKNOWN: + ERR("Unexpected register type %d at end of bytecode\n", + (int) vstack_ax(stack)->type); + ret = -EINVAL; + goto end; + } + + ret = 0; + goto end; + } + + /* binary */ + case BYTECODE_OP_MUL: + case BYTECODE_OP_DIV: + case BYTECODE_OP_MOD: + case BYTECODE_OP_PLUS: + case BYTECODE_OP_MINUS: + { + ERR("unsupported bytecode op %u\n", + (unsigned int) *(bytecode_opcode_t *) pc); + ret = -EINVAL; + goto end; + } + + case BYTECODE_OP_EQ: + case BYTECODE_OP_NE: + case BYTECODE_OP_GT: + case BYTECODE_OP_LT: + case BYTECODE_OP_GE: + case BYTECODE_OP_LE: + case BYTECODE_OP_EQ_STRING: + case BYTECODE_OP_NE_STRING: + case BYTECODE_OP_GT_STRING: + case BYTECODE_OP_LT_STRING: + case BYTECODE_OP_GE_STRING: + case BYTECODE_OP_LE_STRING: + case BYTECODE_OP_EQ_STAR_GLOB_STRING: + case BYTECODE_OP_NE_STAR_GLOB_STRING: + case BYTECODE_OP_EQ_S64: + case BYTECODE_OP_NE_S64: + case BYTECODE_OP_GT_S64: + case BYTECODE_OP_LT_S64: + case BYTECODE_OP_GE_S64: + case BYTECODE_OP_LE_S64: + case BYTECODE_OP_EQ_DOUBLE: + case BYTECODE_OP_NE_DOUBLE: + case BYTECODE_OP_GT_DOUBLE: + case BYTECODE_OP_LT_DOUBLE: + case BYTECODE_OP_GE_DOUBLE: + case BYTECODE_OP_LE_DOUBLE: + case BYTECODE_OP_EQ_DOUBLE_S64: + case BYTECODE_OP_NE_DOUBLE_S64: + case BYTECODE_OP_GT_DOUBLE_S64: + case BYTECODE_OP_LT_DOUBLE_S64: + case BYTECODE_OP_GE_DOUBLE_S64: + case BYTECODE_OP_LE_DOUBLE_S64: + case BYTECODE_OP_EQ_S64_DOUBLE: + case BYTECODE_OP_NE_S64_DOUBLE: + case BYTECODE_OP_GT_S64_DOUBLE: + case BYTECODE_OP_LT_S64_DOUBLE: + case BYTECODE_OP_GE_S64_DOUBLE: + case BYTECODE_OP_LE_S64_DOUBLE: + { + /* Pop 2, push 1 */ + if (vstack_pop(stack)) { + ret = -EINVAL; + goto end; + } + if (!vstack_ax(stack)) { + ERR("Empty stack\n"); + ret = -EINVAL; + goto end; + } + switch (vstack_ax(stack)->type) { + case REG_S64: + case REG_U64: + case REG_DOUBLE: + case REG_STRING: + case REG_STAR_GLOB_STRING: + case REG_UNKNOWN: + break; + default: + ERR("Unexpected register type %d for operation\n", + (int) vstack_ax(stack)->type); + ret = -EINVAL; + goto end; + } + + vstack_ax(stack)->type = REG_S64; + next_pc += sizeof(struct binary_op); + break; + } + + case BYTECODE_OP_BIT_RSHIFT: + case BYTECODE_OP_BIT_LSHIFT: + case BYTECODE_OP_BIT_AND: + case BYTECODE_OP_BIT_OR: + case BYTECODE_OP_BIT_XOR: + { + /* Pop 2, push 1 */ + if (vstack_pop(stack)) { + ret = -EINVAL; + goto end; + } + if (!vstack_ax(stack)) { + ERR("Empty stack\n"); + ret = -EINVAL; + goto end; + } + switch (vstack_ax(stack)->type) { + case REG_S64: + case REG_U64: + case REG_DOUBLE: + case REG_STRING: + case REG_STAR_GLOB_STRING: + case REG_UNKNOWN: + break; + default: + ERR("Unexpected register type %d for operation\n", + (int) vstack_ax(stack)->type); + ret = -EINVAL; + goto end; + } + + vstack_ax(stack)->type = REG_U64; + next_pc += sizeof(struct binary_op); + break; + } + + /* unary */ + case BYTECODE_OP_UNARY_PLUS: + case BYTECODE_OP_UNARY_MINUS: + { + /* Pop 1, push 1 */ + if (!vstack_ax(stack)) { + ERR("Empty stack\n"); + ret = -EINVAL; + goto end; + } + switch (vstack_ax(stack)->type) { + case REG_UNKNOWN: + case REG_DOUBLE: + case REG_S64: + case REG_U64: + break; + default: + ERR("Unexpected register type %d for operation\n", + (int) vstack_ax(stack)->type); + ret = -EINVAL; + goto end; + } + vstack_ax(stack)->type = REG_UNKNOWN; + next_pc += sizeof(struct unary_op); + break; + } + + case BYTECODE_OP_UNARY_PLUS_S64: + case BYTECODE_OP_UNARY_MINUS_S64: + case BYTECODE_OP_UNARY_NOT_S64: + { + /* Pop 1, push 1 */ + if (!vstack_ax(stack)) { + ERR("Empty stack\n"); + ret = -EINVAL; + goto end; + } + switch (vstack_ax(stack)->type) { + case REG_S64: + case REG_U64: + break; + default: + ERR("Unexpected register type %d for operation\n", + (int) vstack_ax(stack)->type); + ret = -EINVAL; + goto end; + } + + next_pc += sizeof(struct unary_op); + break; + } + + case BYTECODE_OP_UNARY_NOT: + { + /* Pop 1, push 1 */ + if (!vstack_ax(stack)) { + ERR("Empty stack\n"); + ret = -EINVAL; + goto end; + } + switch (vstack_ax(stack)->type) { + case REG_UNKNOWN: + case REG_DOUBLE: + case REG_S64: + case REG_U64: + break; + default: + ERR("Unexpected register type %d for operation\n", + (int) vstack_ax(stack)->type); + ret = -EINVAL; + goto end; + } + + next_pc += sizeof(struct unary_op); + break; + } + + case BYTECODE_OP_UNARY_BIT_NOT: + { + /* Pop 1, push 1 */ + if (!vstack_ax(stack)) { + ERR("Empty stack\n"); + ret = -EINVAL; + goto end; + } + switch (vstack_ax(stack)->type) { + case REG_UNKNOWN: + case REG_S64: + case REG_U64: + break; + case REG_DOUBLE: + default: + ERR("Unexpected register type %d for operation\n", + (int) vstack_ax(stack)->type); + ret = -EINVAL; + goto end; + } + + vstack_ax(stack)->type = REG_U64; + next_pc += sizeof(struct unary_op); + break; + } + + case BYTECODE_OP_UNARY_NOT_DOUBLE: + { + /* Pop 1, push 1 */ + if (!vstack_ax(stack)) { + ERR("Empty stack\n"); + ret = -EINVAL; + goto end; + } + switch (vstack_ax(stack)->type) { + case REG_DOUBLE: + break; + default: + ERR("Incorrect register type %d for operation\n", + (int) vstack_ax(stack)->type); + ret = -EINVAL; + goto end; + } + + vstack_ax(stack)->type = REG_S64; + next_pc += sizeof(struct unary_op); + break; + } + + case BYTECODE_OP_UNARY_PLUS_DOUBLE: + case BYTECODE_OP_UNARY_MINUS_DOUBLE: + { + /* Pop 1, push 1 */ + if (!vstack_ax(stack)) { + ERR("Empty stack\n"); + ret = -EINVAL; + goto end; + } + switch (vstack_ax(stack)->type) { + case REG_DOUBLE: + break; + default: + ERR("Incorrect register type %d for operation\n", + (int) vstack_ax(stack)->type); + ret = -EINVAL; + goto end; + } + + vstack_ax(stack)->type = REG_DOUBLE; + next_pc += sizeof(struct unary_op); + break; + } + + /* logical */ + case BYTECODE_OP_AND: + case BYTECODE_OP_OR: + { + struct logical_op *insn = (struct logical_op *) pc; + int merge_ret; + + /* Add merge point to table */ + merge_ret = merge_point_add_check(merge_points, + insn->skip_offset, stack); + if (merge_ret) { + ret = merge_ret; + goto end; + } + + if (!vstack_ax(stack)) { + ERR("Empty stack\n"); + ret = -EINVAL; + goto end; + } + /* There is always a cast-to-s64 operation before a or/and op. */ + switch (vstack_ax(stack)->type) { + case REG_S64: + case REG_U64: + break; + default: + ERR("Incorrect register type %d for operation\n", + (int) vstack_ax(stack)->type); + ret = -EINVAL; + goto end; + } + + /* Continue to next instruction */ + /* Pop 1 when jump not taken */ + if (vstack_pop(stack)) { + ret = -EINVAL; + goto end; + } + next_pc += sizeof(struct logical_op); + break; + } + + /* load field ref */ + case BYTECODE_OP_LOAD_FIELD_REF: + { + ERR("Unknown field ref type\n"); + ret = -EINVAL; + goto end; + } + /* get context ref */ + case BYTECODE_OP_GET_CONTEXT_REF: + { + if (vstack_push(stack)) { + ret = -EINVAL; + goto end; + } + vstack_ax(stack)->type = REG_UNKNOWN; + next_pc += sizeof(struct load_op) + sizeof(struct field_ref); + break; + } + case BYTECODE_OP_LOAD_FIELD_REF_STRING: + case BYTECODE_OP_LOAD_FIELD_REF_SEQUENCE: + case BYTECODE_OP_GET_CONTEXT_REF_STRING: + { + if (vstack_push(stack)) { + ret = -EINVAL; + goto end; + } + vstack_ax(stack)->type = REG_STRING; + next_pc += sizeof(struct load_op) + sizeof(struct field_ref); + break; + } + case BYTECODE_OP_LOAD_FIELD_REF_S64: + case BYTECODE_OP_GET_CONTEXT_REF_S64: + { + if (vstack_push(stack)) { + ret = -EINVAL; + goto end; + } + vstack_ax(stack)->type = REG_S64; + next_pc += sizeof(struct load_op) + sizeof(struct field_ref); + break; + } + case BYTECODE_OP_LOAD_FIELD_REF_DOUBLE: + case BYTECODE_OP_GET_CONTEXT_REF_DOUBLE: + { + if (vstack_push(stack)) { + ret = -EINVAL; + goto end; + } + vstack_ax(stack)->type = REG_DOUBLE; + next_pc += sizeof(struct load_op) + sizeof(struct field_ref); + break; + } + + /* load from immediate operand */ + case BYTECODE_OP_LOAD_STRING: + { + struct load_op *insn = (struct load_op *) pc; + + if (vstack_push(stack)) { + ret = -EINVAL; + goto end; + } + vstack_ax(stack)->type = REG_STRING; + next_pc += sizeof(struct load_op) + strlen(insn->data) + 1; + break; + } + + case BYTECODE_OP_LOAD_STAR_GLOB_STRING: + { + struct load_op *insn = (struct load_op *) pc; + + if (vstack_push(stack)) { + ret = -EINVAL; + goto end; + } + vstack_ax(stack)->type = REG_STAR_GLOB_STRING; + next_pc += sizeof(struct load_op) + strlen(insn->data) + 1; + break; + } + + case BYTECODE_OP_LOAD_S64: + { + if (vstack_push(stack)) { + ret = -EINVAL; + goto end; + } + vstack_ax(stack)->type = REG_S64; + next_pc += sizeof(struct load_op) + + sizeof(struct literal_numeric); + break; + } + + case BYTECODE_OP_LOAD_DOUBLE: + { + if (vstack_push(stack)) { + ret = -EINVAL; + goto end; + } + vstack_ax(stack)->type = REG_DOUBLE; + next_pc += sizeof(struct load_op) + + sizeof(struct literal_double); + break; + } + + case BYTECODE_OP_CAST_TO_S64: + case BYTECODE_OP_CAST_DOUBLE_TO_S64: + { + /* Pop 1, push 1 */ + if (!vstack_ax(stack)) { + ERR("Empty stack\n"); + ret = -EINVAL; + goto end; + } + switch (vstack_ax(stack)->type) { + case REG_S64: + case REG_U64: + case REG_DOUBLE: + case REG_UNKNOWN: + break; + default: + ERR("Incorrect register type %d for cast\n", + (int) vstack_ax(stack)->type); + ret = -EINVAL; + goto end; + } + vstack_ax(stack)->type = REG_S64; + next_pc += sizeof(struct cast_op); + break; + } + case BYTECODE_OP_CAST_NOP: + { + next_pc += sizeof(struct cast_op); + break; + } + + /* + * Instructions for recursive traversal through composed types. + */ + case BYTECODE_OP_GET_CONTEXT_ROOT: + case BYTECODE_OP_GET_APP_CONTEXT_ROOT: + case BYTECODE_OP_GET_PAYLOAD_ROOT: + { + if (vstack_push(stack)) { + ret = -EINVAL; + goto end; + } + vstack_ax(stack)->type = REG_PTR; + next_pc += sizeof(struct load_op); + break; + } + + case BYTECODE_OP_LOAD_FIELD: + { + /* Pop 1, push 1 */ + if (!vstack_ax(stack)) { + ERR("Empty stack\n"); + ret = -EINVAL; + goto end; + } + if (vstack_ax(stack)->type != REG_PTR) { + ERR("Expecting pointer on top of stack\n"); + ret = -EINVAL; + goto end; + } + vstack_ax(stack)->type = REG_UNKNOWN; + next_pc += sizeof(struct load_op); + break; + } + + case BYTECODE_OP_LOAD_FIELD_S8: + case BYTECODE_OP_LOAD_FIELD_S16: + case BYTECODE_OP_LOAD_FIELD_S32: + case BYTECODE_OP_LOAD_FIELD_S64: + { + /* Pop 1, push 1 */ + if (!vstack_ax(stack)) { + ERR("Empty stack\n"); + ret = -EINVAL; + goto end; + } + if (vstack_ax(stack)->type != REG_PTR) { + ERR("Expecting pointer on top of stack\n"); + ret = -EINVAL; + goto end; + } + vstack_ax(stack)->type = REG_S64; + next_pc += sizeof(struct load_op); + break; + } + + case BYTECODE_OP_LOAD_FIELD_U8: + case BYTECODE_OP_LOAD_FIELD_U16: + case BYTECODE_OP_LOAD_FIELD_U32: + case BYTECODE_OP_LOAD_FIELD_U64: + { + /* Pop 1, push 1 */ + if (!vstack_ax(stack)) { + ERR("Empty stack\n"); + ret = -EINVAL; + goto end; + } + if (vstack_ax(stack)->type != REG_PTR) { + ERR("Expecting pointer on top of stack\n"); + ret = -EINVAL; + goto end; + } + vstack_ax(stack)->type = REG_U64; + next_pc += sizeof(struct load_op); + break; + } + + case BYTECODE_OP_LOAD_FIELD_STRING: + case BYTECODE_OP_LOAD_FIELD_SEQUENCE: + { + /* Pop 1, push 1 */ + if (!vstack_ax(stack)) { + ERR("Empty stack\n"); + ret = -EINVAL; + goto end; + } + if (vstack_ax(stack)->type != REG_PTR) { + ERR("Expecting pointer on top of stack\n"); + ret = -EINVAL; + goto end; + } + vstack_ax(stack)->type = REG_STRING; + next_pc += sizeof(struct load_op); + break; + } + + case BYTECODE_OP_LOAD_FIELD_DOUBLE: + { + /* Pop 1, push 1 */ + if (!vstack_ax(stack)) { + ERR("Empty stack\n"); + ret = -EINVAL; + goto end; + } + if (vstack_ax(stack)->type != REG_PTR) { + ERR("Expecting pointer on top of stack\n"); + ret = -EINVAL; + goto end; + } + vstack_ax(stack)->type = REG_DOUBLE; + next_pc += sizeof(struct load_op); + break; + } + + case BYTECODE_OP_GET_SYMBOL: + case BYTECODE_OP_GET_SYMBOL_FIELD: + { + /* Pop 1, push 1 */ + if (!vstack_ax(stack)) { + ERR("Empty stack\n"); + ret = -EINVAL; + goto end; + } + if (vstack_ax(stack)->type != REG_PTR) { + ERR("Expecting pointer on top of stack\n"); + ret = -EINVAL; + goto end; + } + next_pc += sizeof(struct load_op) + sizeof(struct get_symbol); + break; + } + + case BYTECODE_OP_GET_INDEX_U16: + { + /* Pop 1, push 1 */ + if (!vstack_ax(stack)) { + ERR("Empty stack\n"); + ret = -EINVAL; + goto end; + } + if (vstack_ax(stack)->type != REG_PTR) { + ERR("Expecting pointer on top of stack\n"); + ret = -EINVAL; + goto end; + } + next_pc += sizeof(struct load_op) + sizeof(struct get_index_u16); + break; + } + + case BYTECODE_OP_GET_INDEX_U64: + { + /* Pop 1, push 1 */ + if (!vstack_ax(stack)) { + ERR("Empty stack\n"); + ret = -EINVAL; + goto end; + } + if (vstack_ax(stack)->type != REG_PTR) { + ERR("Expecting pointer on top of stack\n"); + ret = -EINVAL; + goto end; + } + next_pc += sizeof(struct load_op) + sizeof(struct get_index_u64); + break; + } + + } +end: + *_next_pc = next_pc; + return ret; +} + +/* + * Never called concurrently (hash seed is shared). + */ +int lttng_bytecode_validate(struct bytecode_runtime *bytecode) +{ + struct cds_lfht *merge_points; + char *pc, *next_pc, *start_pc; + int ret = -EINVAL; + struct vstack stack; + + vstack_init(&stack); + + if (!lttng_hash_seed_ready) { + lttng_hash_seed = time(NULL); + lttng_hash_seed_ready = 1; + } + /* + * Note: merge_points hash table used by single thread, and + * never concurrently resized. Therefore, we can use it without + * holding RCU read-side lock and free nodes without using + * call_rcu. + */ + merge_points = cds_lfht_new(DEFAULT_NR_MERGE_POINTS, + MIN_NR_BUCKETS, MAX_NR_BUCKETS, + 0, NULL); + if (!merge_points) { + ERR("Error allocating hash table for bytecode validation\n"); + return -ENOMEM; + } + start_pc = &bytecode->code[0]; + for (pc = next_pc = start_pc; pc - start_pc < bytecode->len; + pc = next_pc) { + ret = bytecode_validate_overflow(bytecode, start_pc, pc); + if (ret != 0) { + if (ret == -ERANGE) + ERR("Bytecode overflow\n"); + goto end; + } + dbg_printf("Validating op %s (%u)\n", + print_op((unsigned int) *(bytecode_opcode_t *) pc), + (unsigned int) *(bytecode_opcode_t *) pc); + + /* + * For each instruction, validate the current context + * (traversal of entire execution flow), and validate + * all merge points targeting this instruction. + */ + ret = validate_instruction_all_contexts(bytecode, merge_points, + &stack, start_pc, pc); + if (ret) + goto end; + ret = exec_insn(bytecode, merge_points, &stack, &next_pc, pc); + if (ret <= 0) + goto end; + } +end: + if (delete_all_nodes(merge_points)) { + if (!ret) { + ERR("Unexpected merge points\n"); + ret = -EINVAL; + } + } + if (cds_lfht_destroy(merge_points, NULL)) { + ERR("Error destroying hash table\n"); + } + return ret; +} diff --git a/liblttng-ust/lttng-bytecode.c b/liblttng-ust/lttng-bytecode.c new file mode 100644 index 00000000..9153674d --- /dev/null +++ b/liblttng-ust/lttng-bytecode.c @@ -0,0 +1,592 @@ +/* + * lttng-bytecode.c + * + * LTTng UST bytecode code. + * + * Copyright (C) 2010-2016 Mathieu Desnoyers + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#define _LGPL_SOURCE +#include +#include + +#include + +#include "lttng-bytecode.h" +#include "ust-events-internal.h" + +static const char *opnames[] = { + [ BYTECODE_OP_UNKNOWN ] = "UNKNOWN", + + [ BYTECODE_OP_RETURN ] = "RETURN", + + /* binary */ + [ BYTECODE_OP_MUL ] = "MUL", + [ BYTECODE_OP_DIV ] = "DIV", + [ BYTECODE_OP_MOD ] = "MOD", + [ BYTECODE_OP_PLUS ] = "PLUS", + [ BYTECODE_OP_MINUS ] = "MINUS", + [ BYTECODE_OP_BIT_RSHIFT ] = "BIT_RSHIFT", + [ BYTECODE_OP_BIT_LSHIFT ] = "BIT_LSHIFT", + [ BYTECODE_OP_BIT_AND ] = "BIT_AND", + [ BYTECODE_OP_BIT_OR ] = "BIT_OR", + [ BYTECODE_OP_BIT_XOR ] = "BIT_XOR", + + /* binary comparators */ + [ BYTECODE_OP_EQ ] = "EQ", + [ BYTECODE_OP_NE ] = "NE", + [ BYTECODE_OP_GT ] = "GT", + [ BYTECODE_OP_LT ] = "LT", + [ BYTECODE_OP_GE ] = "GE", + [ BYTECODE_OP_LE ] = "LE", + + /* string binary comparators */ + [ BYTECODE_OP_EQ_STRING ] = "EQ_STRING", + [ BYTECODE_OP_NE_STRING ] = "NE_STRING", + [ BYTECODE_OP_GT_STRING ] = "GT_STRING", + [ BYTECODE_OP_LT_STRING ] = "LT_STRING", + [ BYTECODE_OP_GE_STRING ] = "GE_STRING", + [ BYTECODE_OP_LE_STRING ] = "LE_STRING", + + /* s64 binary comparators */ + [ BYTECODE_OP_EQ_S64 ] = "EQ_S64", + [ BYTECODE_OP_NE_S64 ] = "NE_S64", + [ BYTECODE_OP_GT_S64 ] = "GT_S64", + [ BYTECODE_OP_LT_S64 ] = "LT_S64", + [ BYTECODE_OP_GE_S64 ] = "GE_S64", + [ BYTECODE_OP_LE_S64 ] = "LE_S64", + + /* double binary comparators */ + [ BYTECODE_OP_EQ_DOUBLE ] = "EQ_DOUBLE", + [ BYTECODE_OP_NE_DOUBLE ] = "NE_DOUBLE", + [ BYTECODE_OP_GT_DOUBLE ] = "GT_DOUBLE", + [ BYTECODE_OP_LT_DOUBLE ] = "LT_DOUBLE", + [ BYTECODE_OP_GE_DOUBLE ] = "GE_DOUBLE", + [ BYTECODE_OP_LE_DOUBLE ] = "LE_DOUBLE", + + /* Mixed S64-double binary comparators */ + [ BYTECODE_OP_EQ_DOUBLE_S64 ] = "EQ_DOUBLE_S64", + [ BYTECODE_OP_NE_DOUBLE_S64 ] = "NE_DOUBLE_S64", + [ BYTECODE_OP_GT_DOUBLE_S64 ] = "GT_DOUBLE_S64", + [ BYTECODE_OP_LT_DOUBLE_S64 ] = "LT_DOUBLE_S64", + [ BYTECODE_OP_GE_DOUBLE_S64 ] = "GE_DOUBLE_S64", + [ BYTECODE_OP_LE_DOUBLE_S64 ] = "LE_DOUBLE_S64", + + [ BYTECODE_OP_EQ_S64_DOUBLE ] = "EQ_S64_DOUBLE", + [ BYTECODE_OP_NE_S64_DOUBLE ] = "NE_S64_DOUBLE", + [ BYTECODE_OP_GT_S64_DOUBLE ] = "GT_S64_DOUBLE", + [ BYTECODE_OP_LT_S64_DOUBLE ] = "LT_S64_DOUBLE", + [ BYTECODE_OP_GE_S64_DOUBLE ] = "GE_S64_DOUBLE", + [ BYTECODE_OP_LE_S64_DOUBLE ] = "LE_S64_DOUBLE", + + /* unary */ + [ BYTECODE_OP_UNARY_PLUS ] = "UNARY_PLUS", + [ BYTECODE_OP_UNARY_MINUS ] = "UNARY_MINUS", + [ BYTECODE_OP_UNARY_NOT ] = "UNARY_NOT", + [ BYTECODE_OP_UNARY_PLUS_S64 ] = "UNARY_PLUS_S64", + [ BYTECODE_OP_UNARY_MINUS_S64 ] = "UNARY_MINUS_S64", + [ BYTECODE_OP_UNARY_NOT_S64 ] = "UNARY_NOT_S64", + [ BYTECODE_OP_UNARY_PLUS_DOUBLE ] = "UNARY_PLUS_DOUBLE", + [ BYTECODE_OP_UNARY_MINUS_DOUBLE ] = "UNARY_MINUS_DOUBLE", + [ BYTECODE_OP_UNARY_NOT_DOUBLE ] = "UNARY_NOT_DOUBLE", + + /* logical */ + [ BYTECODE_OP_AND ] = "AND", + [ BYTECODE_OP_OR ] = "OR", + + /* load field ref */ + [ BYTECODE_OP_LOAD_FIELD_REF ] = "LOAD_FIELD_REF", + [ BYTECODE_OP_LOAD_FIELD_REF_STRING ] = "LOAD_FIELD_REF_STRING", + [ BYTECODE_OP_LOAD_FIELD_REF_SEQUENCE ] = "LOAD_FIELD_REF_SEQUENCE", + [ BYTECODE_OP_LOAD_FIELD_REF_S64 ] = "LOAD_FIELD_REF_S64", + [ BYTECODE_OP_LOAD_FIELD_REF_DOUBLE ] = "LOAD_FIELD_REF_DOUBLE", + + /* load from immediate operand */ + [ BYTECODE_OP_LOAD_STRING ] = "LOAD_STRING", + [ BYTECODE_OP_LOAD_S64 ] = "LOAD_S64", + [ BYTECODE_OP_LOAD_DOUBLE ] = "LOAD_DOUBLE", + + /* cast */ + [ BYTECODE_OP_CAST_TO_S64 ] = "CAST_TO_S64", + [ BYTECODE_OP_CAST_DOUBLE_TO_S64 ] = "CAST_DOUBLE_TO_S64", + [ BYTECODE_OP_CAST_NOP ] = "CAST_NOP", + + /* get context ref */ + [ BYTECODE_OP_GET_CONTEXT_REF ] = "GET_CONTEXT_REF", + [ BYTECODE_OP_GET_CONTEXT_REF_STRING ] = "GET_CONTEXT_REF_STRING", + [ BYTECODE_OP_GET_CONTEXT_REF_S64 ] = "GET_CONTEXT_REF_S64", + [ BYTECODE_OP_GET_CONTEXT_REF_DOUBLE ] = "GET_CONTEXT_REF_DOUBLE", + + /* load userspace field ref */ + [ BYTECODE_OP_LOAD_FIELD_REF_USER_STRING ] = "LOAD_FIELD_REF_USER_STRING", + [ BYTECODE_OP_LOAD_FIELD_REF_USER_SEQUENCE ] = "LOAD_FIELD_REF_USER_SEQUENCE", + + /* + * load immediate star globbing pattern (literal string) + * from immediate. + */ + [ BYTECODE_OP_LOAD_STAR_GLOB_STRING ] = "LOAD_STAR_GLOB_STRING", + + /* globbing pattern binary operator: apply to */ + [ BYTECODE_OP_EQ_STAR_GLOB_STRING ] = "EQ_STAR_GLOB_STRING", + [ BYTECODE_OP_NE_STAR_GLOB_STRING ] = "NE_STAR_GLOB_STRING", + + /* + * Instructions for recursive traversal through composed types. + */ + [ BYTECODE_OP_GET_CONTEXT_ROOT ] = "GET_CONTEXT_ROOT", + [ BYTECODE_OP_GET_APP_CONTEXT_ROOT ] = "GET_APP_CONTEXT_ROOT", + [ BYTECODE_OP_GET_PAYLOAD_ROOT ] = "GET_PAYLOAD_ROOT", + + [ BYTECODE_OP_GET_SYMBOL ] = "GET_SYMBOL", + [ BYTECODE_OP_GET_SYMBOL_FIELD ] = "GET_SYMBOL_FIELD", + [ BYTECODE_OP_GET_INDEX_U16 ] = "GET_INDEX_U16", + [ BYTECODE_OP_GET_INDEX_U64 ] = "GET_INDEX_U64", + + [ BYTECODE_OP_LOAD_FIELD ] = "LOAD_FIELD", + [ BYTECODE_OP_LOAD_FIELD_S8 ] = "LOAD_FIELD_S8", + [ BYTECODE_OP_LOAD_FIELD_S16 ] = "LOAD_FIELD_S16", + [ BYTECODE_OP_LOAD_FIELD_S32 ] = "LOAD_FIELD_S32", + [ BYTECODE_OP_LOAD_FIELD_S64 ] = "LOAD_FIELD_S64", + [ BYTECODE_OP_LOAD_FIELD_U8 ] = "LOAD_FIELD_U8", + [ BYTECODE_OP_LOAD_FIELD_U16 ] = "LOAD_FIELD_U16", + [ BYTECODE_OP_LOAD_FIELD_U32 ] = "LOAD_FIELD_U32", + [ BYTECODE_OP_LOAD_FIELD_U64 ] = "LOAD_FIELD_U64", + [ BYTECODE_OP_LOAD_FIELD_STRING ] = "LOAD_FIELD_STRING", + [ BYTECODE_OP_LOAD_FIELD_SEQUENCE ] = "LOAD_FIELD_SEQUENCE", + [ BYTECODE_OP_LOAD_FIELD_DOUBLE ] = "LOAD_FIELD_DOUBLE", + + [ BYTECODE_OP_UNARY_BIT_NOT ] = "UNARY_BIT_NOT", + + [ BYTECODE_OP_RETURN_S64 ] = "RETURN_S64", +}; + +const char *print_op(enum bytecode_op op) +{ + if (op >= NR_BYTECODE_OPS) + return "UNKNOWN"; + else + return opnames[op]; +} + +static +int apply_field_reloc(const struct lttng_event_desc *event_desc, + struct bytecode_runtime *runtime, + uint32_t runtime_len, + uint32_t reloc_offset, + const char *field_name, + enum bytecode_op bytecode_op) +{ + const struct lttng_event_field *fields, *field = NULL; + unsigned int nr_fields, i; + struct load_op *op; + uint32_t field_offset = 0; + + dbg_printf("Apply field reloc: %u %s\n", reloc_offset, field_name); + + /* Lookup event by name */ + if (!event_desc) + return -EINVAL; + fields = event_desc->fields; + if (!fields) + return -EINVAL; + nr_fields = event_desc->nr_fields; + for (i = 0; i < nr_fields; i++) { + if (fields[i].u.ext.nofilter) { + continue; + } + if (!strcmp(fields[i].name, field_name)) { + field = &fields[i]; + break; + } + /* compute field offset */ + switch (fields[i].type.atype) { + case atype_integer: + case atype_enum: + case atype_enum_nestable: + field_offset += sizeof(int64_t); + break; + case atype_array: + case atype_array_nestable: + case atype_sequence: + case atype_sequence_nestable: + field_offset += sizeof(unsigned long); + field_offset += sizeof(void *); + break; + case atype_string: + field_offset += sizeof(void *); + break; + case atype_float: + field_offset += sizeof(double); + break; + default: + return -EINVAL; + } + } + if (!field) + return -EINVAL; + + /* Check if field offset is too large for 16-bit offset */ + if (field_offset > FILTER_BYTECODE_MAX_LEN - 1) + return -EINVAL; + + /* set type */ + op = (struct load_op *) &runtime->code[reloc_offset]; + + switch (bytecode_op) { + case BYTECODE_OP_LOAD_FIELD_REF: + { + struct field_ref *field_ref; + + field_ref = (struct field_ref *) op->data; + switch (field->type.atype) { + case atype_integer: + case atype_enum: + case atype_enum_nestable: + op->op = BYTECODE_OP_LOAD_FIELD_REF_S64; + break; + case atype_array: + case atype_array_nestable: + case atype_sequence: + case atype_sequence_nestable: + op->op = BYTECODE_OP_LOAD_FIELD_REF_SEQUENCE; + break; + case atype_string: + op->op = BYTECODE_OP_LOAD_FIELD_REF_STRING; + break; + case atype_float: + op->op = BYTECODE_OP_LOAD_FIELD_REF_DOUBLE; + break; + default: + return -EINVAL; + } + /* set offset */ + field_ref->offset = (uint16_t) field_offset; + break; + } + default: + return -EINVAL; + } + return 0; +} + +static +int apply_context_reloc(struct bytecode_runtime *runtime, + uint32_t runtime_len, + uint32_t reloc_offset, + const char *context_name, + enum bytecode_op bytecode_op) +{ + struct load_op *op; + struct lttng_ctx_field *ctx_field; + int idx; + struct lttng_ctx *ctx = *runtime->p.pctx; + + dbg_printf("Apply context reloc: %u %s\n", reloc_offset, context_name); + + /* Get context index */ + idx = lttng_get_context_index(ctx, context_name); + if (idx < 0) { + if (lttng_context_is_app(context_name)) { + int ret; + + ret = lttng_ust_add_app_context_to_ctx_rcu(context_name, + &ctx); + if (ret) + return ret; + idx = lttng_get_context_index(ctx, context_name); + if (idx < 0) + return -ENOENT; + } else { + return -ENOENT; + } + } + /* Check if idx is too large for 16-bit offset */ + if (idx > FILTER_BYTECODE_MAX_LEN - 1) + return -EINVAL; + + /* Get context return type */ + ctx_field = &ctx->fields[idx]; + op = (struct load_op *) &runtime->code[reloc_offset]; + + switch (bytecode_op) { + case BYTECODE_OP_GET_CONTEXT_REF: + { + struct field_ref *field_ref; + + field_ref = (struct field_ref *) op->data; + switch (ctx_field->event_field.type.atype) { + case atype_integer: + case atype_enum: + case atype_enum_nestable: + op->op = BYTECODE_OP_GET_CONTEXT_REF_S64; + break; + /* Sequence and array supported as string */ + case atype_string: + case atype_array: + case atype_array_nestable: + case atype_sequence: + case atype_sequence_nestable: + op->op = BYTECODE_OP_GET_CONTEXT_REF_STRING; + break; + case atype_float: + op->op = BYTECODE_OP_GET_CONTEXT_REF_DOUBLE; + break; + case atype_dynamic: + op->op = BYTECODE_OP_GET_CONTEXT_REF; + break; + default: + return -EINVAL; + } + /* set offset to context index within channel contexts */ + field_ref->offset = (uint16_t) idx; + break; + } + default: + return -EINVAL; + } + return 0; +} + +static +int apply_reloc(const struct lttng_event_desc *event_desc, + struct bytecode_runtime *runtime, + uint32_t runtime_len, + uint32_t reloc_offset, + const char *name) +{ + struct load_op *op; + + dbg_printf("Apply reloc: %u %s\n", reloc_offset, name); + + /* Ensure that the reloc is within the code */ + if (runtime_len - reloc_offset < sizeof(uint16_t)) + return -EINVAL; + + op = (struct load_op *) &runtime->code[reloc_offset]; + switch (op->op) { + case BYTECODE_OP_LOAD_FIELD_REF: + return apply_field_reloc(event_desc, runtime, runtime_len, + reloc_offset, name, op->op); + case BYTECODE_OP_GET_CONTEXT_REF: + return apply_context_reloc(runtime, runtime_len, + reloc_offset, name, op->op); + case BYTECODE_OP_GET_SYMBOL: + case BYTECODE_OP_GET_SYMBOL_FIELD: + /* + * Will be handled by load specialize phase or + * dynamically by interpreter. + */ + return 0; + default: + ERR("Unknown reloc op type %u\n", op->op); + return -EINVAL; + } + return 0; +} + +static +int bytecode_is_linked(struct lttng_ust_bytecode_node *bytecode, + struct cds_list_head *bytecode_runtime_head) +{ + struct lttng_bytecode_runtime *bc_runtime; + + cds_list_for_each_entry(bc_runtime, bytecode_runtime_head, node) { + if (bc_runtime->bc == bytecode) + return 1; + } + return 0; +} + +/* + * Take a bytecode with reloc table and link it to an event to create a + * bytecode runtime. + */ +static +int _lttng_filter_link_bytecode(const struct lttng_event_desc *event_desc, + struct lttng_ctx **ctx, + struct lttng_ust_bytecode_node *bytecode, + struct cds_list_head *insert_loc) +{ + int ret, offset, next_offset; + struct bytecode_runtime *runtime = NULL; + size_t runtime_alloc_len; + + if (!bytecode) + return 0; + /* Bytecode already linked */ + if (bytecode_is_linked(bytecode, insert_loc)) + return 0; + + dbg_printf("Linking...\n"); + + /* We don't need the reloc table in the runtime */ + runtime_alloc_len = sizeof(*runtime) + bytecode->bc.reloc_offset; + runtime = zmalloc(runtime_alloc_len); + if (!runtime) { + ret = -ENOMEM; + goto alloc_error; + } + runtime->p.bc = bytecode; + runtime->p.pctx = ctx; + runtime->len = bytecode->bc.reloc_offset; + /* copy original bytecode */ + memcpy(runtime->code, bytecode->bc.data, runtime->len); + /* + * apply relocs. Those are a uint16_t (offset in bytecode) + * followed by a string (field name). + */ + for (offset = bytecode->bc.reloc_offset; + offset < bytecode->bc.len; + offset = next_offset) { + uint16_t reloc_offset = + *(uint16_t *) &bytecode->bc.data[offset]; + const char *name = + (const char *) &bytecode->bc.data[offset + sizeof(uint16_t)]; + + ret = apply_reloc(event_desc, runtime, runtime->len, reloc_offset, name); + if (ret) { + goto link_error; + } + next_offset = offset + sizeof(uint16_t) + strlen(name) + 1; + } + /* Validate bytecode */ + ret = lttng_bytecode_validate(runtime); + if (ret) { + goto link_error; + } + /* Specialize bytecode */ + ret = lttng_bytecode_specialize(event_desc, runtime); + if (ret) { + goto link_error; + } + runtime->p.filter = lttng_bytecode_filter_interpret; + runtime->p.link_failed = 0; + cds_list_add_rcu(&runtime->p.node, insert_loc); + dbg_printf("Linking successful.\n"); + return 0; + +link_error: + runtime->p.filter = lttng_bytecode_filter_interpret_false; + runtime->p.link_failed = 1; + cds_list_add_rcu(&runtime->p.node, insert_loc); +alloc_error: + dbg_printf("Linking failed.\n"); + return ret; +} + +void lttng_filter_sync_state(struct lttng_bytecode_runtime *runtime) +{ + struct lttng_ust_bytecode_node *bc = runtime->bc; + + if (!bc->enabler->enabled || runtime->link_failed) + runtime->filter = lttng_bytecode_filter_interpret_false; + else + runtime->filter = lttng_bytecode_filter_interpret; +} + +/* + * Link all bytecodes of the enabler referenced in the provided bytecode list. + */ +void lttng_enabler_link_bytecode(const struct lttng_event_desc *event_desc, + struct lttng_ctx **ctx, + struct cds_list_head *bytecode_runtime_head, + struct lttng_enabler *enabler) +{ + struct lttng_ust_bytecode_node *bc; + struct lttng_bytecode_runtime *runtime; + + assert(event_desc); + + /* Link each bytecode. */ + cds_list_for_each_entry(bc, &enabler->filter_bytecode_head, node) { + int found = 0, ret; + struct cds_list_head *insert_loc; + + cds_list_for_each_entry(runtime, + bytecode_runtime_head, node) { + if (runtime->bc == bc) { + found = 1; + break; + } + } + /* Skip bytecode already linked */ + if (found) + continue; + + /* + * Insert at specified priority (seqnum) in increasing + * order. If there already is a bytecode of the same priority, + * insert the new bytecode right after it. + */ + cds_list_for_each_entry_reverse(runtime, + bytecode_runtime_head, node) { + if (runtime->bc->bc.seqnum <= bc->bc.seqnum) { + /* insert here */ + insert_loc = &runtime->node; + goto add_within; + } + } + + /* Add to head to list */ + insert_loc = bytecode_runtime_head; + add_within: + dbg_printf("linking bytecode\n"); + ret = _lttng_filter_link_bytecode(event_desc, ctx, bc, + insert_loc); + if (ret) { + dbg_printf("[lttng filter] warning: cannot link event bytecode\n"); + } + } +} + +/* + * We own the bytecode if we return success. + */ +int lttng_filter_enabler_attach_bytecode(struct lttng_enabler *enabler, + struct lttng_ust_bytecode_node *bytecode) +{ + cds_list_add(&bytecode->node, &enabler->filter_bytecode_head); + return 0; +} + +static +void free_filter_runtime(struct cds_list_head *bytecode_runtime_head) +{ + struct bytecode_runtime *runtime, *tmp; + + cds_list_for_each_entry_safe(runtime, tmp, bytecode_runtime_head, + p.node) { + free(runtime->data); + free(runtime); + } +} + +void lttng_free_event_filter_runtime(struct lttng_event *event) +{ + free_filter_runtime(&event->filter_bytecode_runtime_head); +} + +void lttng_free_event_notifier_filter_runtime( + struct lttng_event_notifier *event_notifier) +{ + free_filter_runtime(&event_notifier->filter_bytecode_runtime_head); +} diff --git a/liblttng-ust/lttng-bytecode.h b/liblttng-ust/lttng-bytecode.h new file mode 100644 index 00000000..4f1b88f1 --- /dev/null +++ b/liblttng-ust/lttng-bytecode.h @@ -0,0 +1,343 @@ +#ifndef _LTTNG_BYTECODE_H +#define _LTTNG_BYTECODE_H + +/* + * lttng-bytecode.h + * + * LTTng UST bytecode header. + * + * Copyright (C) 2010-2016 Mathieu Desnoyers + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "bytecode.h" + +/* Interpreter stack length, in number of entries */ +#define INTERPRETER_STACK_LEN 10 /* includes 2 dummy */ +#define INTERPRETER_STACK_EMPTY 1 + +#define BYTECODE_MAX_DATA_LEN 65536 + +#ifndef min_t +#define min_t(type, a, b) \ + ((type) (a) < (type) (b) ? (type) (a) : (type) (b)) +#endif + +#ifndef likely +#define likely(x) __builtin_expect(!!(x), 1) +#endif + +#ifndef unlikely +#define unlikely(x) __builtin_expect(!!(x), 0) +#endif + +#ifdef DEBUG +#define dbg_printf(fmt, args...) \ + printf("[debug bytecode in %s:%s@%u] " fmt, \ + __FILE__, __func__, __LINE__, ## args) +#else +#define dbg_printf(fmt, args...) \ +do { \ + /* do nothing but check printf format */ \ + if (0) \ + printf("[debug bytecode in %s:%s@%u] " fmt, \ + __FILE__, __func__, __LINE__, ## args); \ +} while (0) +#endif + +/* Linked bytecode. Child of struct lttng_bytecode_runtime. */ +struct bytecode_runtime { + struct lttng_bytecode_runtime p; + size_t data_len; + size_t data_alloc_len; + char *data; + uint16_t len; + char code[0]; +}; + +enum entry_type { + REG_S64, + REG_U64, + REG_DOUBLE, + REG_STRING, + REG_STAR_GLOB_STRING, + REG_UNKNOWN, + REG_PTR, +}; + +enum load_type { + LOAD_ROOT_CONTEXT, + LOAD_ROOT_APP_CONTEXT, + LOAD_ROOT_PAYLOAD, + LOAD_OBJECT, +}; + +enum object_type { + OBJECT_TYPE_S8, + OBJECT_TYPE_S16, + OBJECT_TYPE_S32, + OBJECT_TYPE_S64, + OBJECT_TYPE_U8, + OBJECT_TYPE_U16, + OBJECT_TYPE_U32, + OBJECT_TYPE_U64, + + OBJECT_TYPE_DOUBLE, + OBJECT_TYPE_STRING, + OBJECT_TYPE_STRING_SEQUENCE, + + OBJECT_TYPE_SEQUENCE, + OBJECT_TYPE_ARRAY, + OBJECT_TYPE_STRUCT, + OBJECT_TYPE_VARIANT, + + OBJECT_TYPE_DYNAMIC, +}; + +struct bytecode_get_index_data { + uint64_t offset; /* in bytes */ + size_t ctx_index; + size_t array_len; + /* + * Field is only populated for LOAD_ROOT_CONTEXT, LOAD_ROOT_APP_CONTEXT + * and LOAD_ROOT_PAYLOAD. Left NULL for LOAD_OBJECT, considering that the + * interpreter needs to find it from the event fields and types to + * support variants. + */ + const struct lttng_event_field *field; + struct { + size_t len; + enum object_type type; + bool rev_bo; /* reverse byte order */ + } elem; +}; + +/* Validation stack */ +struct vstack_load { + enum load_type type; + enum object_type object_type; + const struct lttng_event_field *field; + bool rev_bo; /* reverse byte order */ +}; + +struct vstack_entry { + enum entry_type type; + struct vstack_load load; +}; + +struct vstack { + int top; /* top of stack */ + struct vstack_entry e[INTERPRETER_STACK_LEN]; +}; + +static inline +void vstack_init(struct vstack *stack) +{ + stack->top = -1; +} + +static inline +struct vstack_entry *vstack_ax(struct vstack *stack) +{ + if (unlikely(stack->top < 0)) + return NULL; + return &stack->e[stack->top]; +} + +static inline +struct vstack_entry *vstack_bx(struct vstack *stack) +{ + if (unlikely(stack->top < 1)) + return NULL; + return &stack->e[stack->top - 1]; +} + +static inline +int vstack_push(struct vstack *stack) +{ + if (stack->top >= INTERPRETER_STACK_LEN - 1) { + ERR("Stack full\n"); + return -EINVAL; + } + ++stack->top; + return 0; +} + +static inline +int vstack_pop(struct vstack *stack) +{ + if (unlikely(stack->top < 0)) { + ERR("Stack empty\n"); + return -EINVAL; + } + stack->top--; + return 0; +} + +/* Execution stack */ +enum estack_string_literal_type { + ESTACK_STRING_LITERAL_TYPE_NONE, + ESTACK_STRING_LITERAL_TYPE_PLAIN, + ESTACK_STRING_LITERAL_TYPE_STAR_GLOB, +}; + +struct load_ptr { + enum load_type type; + enum object_type object_type; + const void *ptr; + size_t nr_elem; + bool rev_bo; + /* Temporary place-holders for contexts. */ + union { + int64_t s64; + uint64_t u64; + double d; + } u; + const struct lttng_event_field *field; +}; + +struct estack_entry { + enum entry_type type; /* For dynamic typing. */ + union { + int64_t v; + double d; + + struct { + const char *str; + size_t seq_len; + enum estack_string_literal_type literal_type; + } s; + struct load_ptr ptr; + } u; +}; + +struct estack { + int top; /* top of stack */ + struct estack_entry e[INTERPRETER_STACK_LEN]; +}; + +/* + * Always use aliased type for ax/bx (top of stack). + * When ax/bx are S64, use aliased value. + */ +#define estack_ax_v ax +#define estack_bx_v bx +#define estack_ax_t ax_t +#define estack_bx_t bx_t + +/* + * ax and bx registers can hold either integer, double or string. + */ +#define estack_ax(stack, top) \ + ({ \ + assert((top) > INTERPRETER_STACK_EMPTY); \ + &(stack)->e[top]; \ + }) + +#define estack_bx(stack, top) \ + ({ \ + assert((top) > INTERPRETER_STACK_EMPTY + 1); \ + &(stack)->e[(top) - 1]; \ + }) + +/* + * Currently, only integers (REG_S64) can be pushed into the stack. + */ +#define estack_push(stack, top, ax, bx, ax_t, bx_t) \ + do { \ + assert((top) < INTERPRETER_STACK_LEN - 1); \ + (stack)->e[(top) - 1].u.v = (bx); \ + (stack)->e[(top) - 1].type = (bx_t); \ + (bx) = (ax); \ + (bx_t) = (ax_t); \ + ++(top); \ + } while (0) + +#define estack_pop(stack, top, ax, bx, ax_t, bx_t) \ + do { \ + assert((top) > INTERPRETER_STACK_EMPTY); \ + (ax) = (bx); \ + (ax_t) = (bx_t); \ + (bx) = (stack)->e[(top) - 2].u.v; \ + (bx_t) = (stack)->e[(top) - 2].type; \ + (top)--; \ + } while (0) + +enum lttng_interpreter_type { + LTTNG_INTERPRETER_TYPE_S64, + LTTNG_INTERPRETER_TYPE_U64, + LTTNG_INTERPRETER_TYPE_SIGNED_ENUM, + LTTNG_INTERPRETER_TYPE_UNSIGNED_ENUM, + LTTNG_INTERPRETER_TYPE_DOUBLE, + LTTNG_INTERPRETER_TYPE_STRING, + LTTNG_INTERPRETER_TYPE_SEQUENCE, +}; + +/* + * Represents the output parameter of the lttng interpreter. + * Currently capturable field classes are integer, double, string and sequence + * of integer. + */ +struct lttng_interpreter_output { + enum lttng_interpreter_type type; + union { + int64_t s; + uint64_t u; + double d; + + struct { + const char *str; + size_t len; + } str; + struct { + const void *ptr; + size_t nr_elem; + + /* Inner type. */ + const struct lttng_type *nested_type; + } sequence; + } u; +}; + +const char *print_op(enum bytecode_op op); + +int lttng_bytecode_validate(struct bytecode_runtime *bytecode); +int lttng_bytecode_specialize(const struct lttng_event_desc *event_desc, + struct bytecode_runtime *bytecode); + +uint64_t lttng_bytecode_filter_interpret_false(void *filter_data, + const char *filter_stack_data); +uint64_t lttng_bytecode_filter_interpret(void *filter_data, + const char *filter_stack_data); + +#endif /* _LTTNG_BYTECODE_H */ diff --git a/liblttng-ust/lttng-filter-interpreter.c b/liblttng-ust/lttng-filter-interpreter.c deleted file mode 100644 index d00179cd..00000000 --- a/liblttng-ust/lttng-filter-interpreter.c +++ /dev/null @@ -1,2499 +0,0 @@ -/* - * lttng-filter-interpreter.c - * - * LTTng UST filter interpreter. - * - * Copyright (C) 2010-2016 Mathieu Desnoyers - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - -#define _LGPL_SOURCE -#include -#include -#include -#include - -#include -#include - -#include "lttng-filter.h" -#include "string-utils.h" - - -/* - * -1: wildcard found. - * -2: unknown escape char. - * 0: normal char. - */ - -static -int parse_char(const char **p) -{ - switch (**p) { - case '\\': - (*p)++; - switch (**p) { - case '\\': - case '*': - return 0; - default: - return -2; - } - case '*': - return -1; - default: - return 0; - } -} - -/* - * Returns SIZE_MAX if the string is null-terminated, or the number of - * characters if not. - */ -static -size_t get_str_or_seq_len(const struct estack_entry *entry) -{ - return entry->u.s.seq_len; -} - -static -int stack_star_glob_match(struct estack *stack, int top, const char *cmp_type) -{ - const char *pattern; - const char *candidate; - size_t pattern_len; - size_t candidate_len; - - /* Find out which side is the pattern vs. the candidate. */ - if (estack_ax(stack, top)->u.s.literal_type == ESTACK_STRING_LITERAL_TYPE_STAR_GLOB) { - pattern = estack_ax(stack, top)->u.s.str; - pattern_len = get_str_or_seq_len(estack_ax(stack, top)); - candidate = estack_bx(stack, top)->u.s.str; - candidate_len = get_str_or_seq_len(estack_bx(stack, top)); - } else { - pattern = estack_bx(stack, top)->u.s.str; - pattern_len = get_str_or_seq_len(estack_bx(stack, top)); - candidate = estack_ax(stack, top)->u.s.str; - candidate_len = get_str_or_seq_len(estack_ax(stack, top)); - } - - /* Perform the match. Returns 0 when the result is true. */ - return !strutils_star_glob_match(pattern, pattern_len, candidate, - candidate_len); -} - -static -int stack_strcmp(struct estack *stack, int top, const char *cmp_type) -{ - const char *p = estack_bx(stack, top)->u.s.str, *q = estack_ax(stack, top)->u.s.str; - int ret; - int diff; - - for (;;) { - int escaped_r0 = 0; - - if (unlikely(p - estack_bx(stack, top)->u.s.str >= estack_bx(stack, top)->u.s.seq_len || *p == '\0')) { - if (q - estack_ax(stack, top)->u.s.str >= estack_ax(stack, top)->u.s.seq_len || *q == '\0') { - return 0; - } else { - if (estack_ax(stack, top)->u.s.literal_type == - ESTACK_STRING_LITERAL_TYPE_PLAIN) { - ret = parse_char(&q); - if (ret == -1) - return 0; - } - return -1; - } - } - if (unlikely(q - estack_ax(stack, top)->u.s.str >= estack_ax(stack, top)->u.s.seq_len || *q == '\0')) { - if (estack_bx(stack, top)->u.s.literal_type == - ESTACK_STRING_LITERAL_TYPE_PLAIN) { - ret = parse_char(&p); - if (ret == -1) - return 0; - } - return 1; - } - if (estack_bx(stack, top)->u.s.literal_type == - ESTACK_STRING_LITERAL_TYPE_PLAIN) { - ret = parse_char(&p); - if (ret == -1) { - return 0; - } else if (ret == -2) { - escaped_r0 = 1; - } - /* else compare both char */ - } - if (estack_ax(stack, top)->u.s.literal_type == - ESTACK_STRING_LITERAL_TYPE_PLAIN) { - ret = parse_char(&q); - if (ret == -1) { - return 0; - } else if (ret == -2) { - if (!escaped_r0) - return -1; - } else { - if (escaped_r0) - return 1; - } - } else { - if (escaped_r0) - return 1; - } - diff = *p - *q; - if (diff != 0) - break; - p++; - q++; - } - return diff; -} - -uint64_t lttng_filter_interpret_bytecode_false(void *filter_data, - const char *filter_stack_data) -{ - return LTTNG_FILTER_DISCARD; -} - -#ifdef INTERPRETER_USE_SWITCH - -/* - * Fallback for compilers that do not support taking address of labels. - */ - -#define START_OP \ - start_pc = &bytecode->data[0]; \ - for (pc = next_pc = start_pc; pc - start_pc < bytecode->len; \ - pc = next_pc) { \ - dbg_printf("Executing op %s (%u)\n", \ - print_op((unsigned int) *(filter_opcode_t *) pc), \ - (unsigned int) *(filter_opcode_t *) pc); \ - switch (*(filter_opcode_t *) pc) { - -#define OP(name) jump_target_##name: __attribute__((unused)); \ - case name - -#define PO break - -#define END_OP } \ - } - -#define JUMP_TO(name) \ - goto jump_target_##name - -#else - -/* - * Dispatch-table based interpreter. - */ - -#define START_OP \ - start_pc = &bytecode->code[0]; \ - pc = next_pc = start_pc; \ - if (unlikely(pc - start_pc >= bytecode->len)) \ - goto end; \ - goto *dispatch[*(filter_opcode_t *) pc]; - -#define OP(name) \ -LABEL_##name - -#define PO \ - pc = next_pc; \ - goto *dispatch[*(filter_opcode_t *) pc]; - -#define END_OP - -#define JUMP_TO(name) \ - goto LABEL_##name - -#endif - -#define IS_INTEGER_REGISTER(reg_type) \ - (reg_type == REG_U64 || reg_type == REG_S64) - -static int context_get_index(struct lttng_ctx *ctx, - struct load_ptr *ptr, - uint32_t idx) -{ - - struct lttng_ctx_field *ctx_field; - struct lttng_event_field *field; - struct lttng_ctx_value v; - - ctx_field = &ctx->fields[idx]; - field = &ctx_field->event_field; - ptr->type = LOAD_OBJECT; - ptr->field = field; - - switch (field->type.atype) { - case atype_integer: - ctx_field->get_value(ctx_field, &v); - if (field->type.u.integer.signedness) { - ptr->object_type = OBJECT_TYPE_S64; - ptr->u.s64 = v.u.s64; - ptr->ptr = &ptr->u.s64; - } else { - ptr->object_type = OBJECT_TYPE_U64; - ptr->u.u64 = v.u.s64; /* Cast. */ - ptr->ptr = &ptr->u.u64; - } - break; - case atype_enum: /* Fall-through */ - case atype_enum_nestable: - { - const struct lttng_integer_type *itype; - - if (field->type.atype == atype_enum) { - itype = &field->type.u.legacy.basic.enumeration.container_type; - } else { - itype = &field->type.u.enum_nestable.container_type->u.integer; - } - ctx_field->get_value(ctx_field, &v); - if (itype->signedness) { - ptr->object_type = OBJECT_TYPE_S64; - ptr->u.s64 = v.u.s64; - ptr->ptr = &ptr->u.s64; - } else { - ptr->object_type = OBJECT_TYPE_U64; - ptr->u.u64 = v.u.s64; /* Cast. */ - ptr->ptr = &ptr->u.u64; - } - break; - } - case atype_array: - if (field->type.u.legacy.array.elem_type.atype != atype_integer) { - ERR("Array nesting only supports integer types."); - return -EINVAL; - } - if (field->type.u.legacy.array.elem_type.u.basic.integer.encoding == lttng_encode_none) { - ERR("Only string arrays are supported for contexts."); - return -EINVAL; - } - ptr->object_type = OBJECT_TYPE_STRING; - ctx_field->get_value(ctx_field, &v); - ptr->ptr = v.u.str; - break; - case atype_array_nestable: - if (field->type.u.array_nestable.elem_type->atype != atype_integer) { - ERR("Array nesting only supports integer types."); - return -EINVAL; - } - if (field->type.u.array_nestable.elem_type->u.integer.encoding == lttng_encode_none) { - ERR("Only string arrays are supported for contexts."); - return -EINVAL; - } - ptr->object_type = OBJECT_TYPE_STRING; - ctx_field->get_value(ctx_field, &v); - ptr->ptr = v.u.str; - break; - case atype_sequence: - if (field->type.u.legacy.sequence.elem_type.atype != atype_integer) { - ERR("Sequence nesting only supports integer types."); - return -EINVAL; - } - if (field->type.u.legacy.sequence.elem_type.u.basic.integer.encoding == lttng_encode_none) { - ERR("Only string sequences are supported for contexts."); - return -EINVAL; - } - ptr->object_type = OBJECT_TYPE_STRING; - ctx_field->get_value(ctx_field, &v); - ptr->ptr = v.u.str; - break; - case atype_sequence_nestable: - if (field->type.u.sequence_nestable.elem_type->atype != atype_integer) { - ERR("Sequence nesting only supports integer types."); - return -EINVAL; - } - if (field->type.u.sequence_nestable.elem_type->u.integer.encoding == lttng_encode_none) { - ERR("Only string sequences are supported for contexts."); - return -EINVAL; - } - ptr->object_type = OBJECT_TYPE_STRING; - ctx_field->get_value(ctx_field, &v); - ptr->ptr = v.u.str; - break; - case atype_string: - ptr->object_type = OBJECT_TYPE_STRING; - ctx_field->get_value(ctx_field, &v); - ptr->ptr = v.u.str; - break; - case atype_float: - ptr->object_type = OBJECT_TYPE_DOUBLE; - ctx_field->get_value(ctx_field, &v); - ptr->u.d = v.u.d; - ptr->ptr = &ptr->u.d; - break; - case atype_dynamic: - ctx_field->get_value(ctx_field, &v); - switch (v.sel) { - case LTTNG_UST_DYNAMIC_TYPE_NONE: - return -EINVAL; - case LTTNG_UST_DYNAMIC_TYPE_S64: - ptr->object_type = OBJECT_TYPE_S64; - ptr->u.s64 = v.u.s64; - ptr->ptr = &ptr->u.s64; - dbg_printf("context get index dynamic s64 %" PRIi64 "\n", ptr->u.s64); - break; - case LTTNG_UST_DYNAMIC_TYPE_DOUBLE: - ptr->object_type = OBJECT_TYPE_DOUBLE; - ptr->u.d = v.u.d; - ptr->ptr = &ptr->u.d; - dbg_printf("context get index dynamic double %g\n", ptr->u.d); - break; - case LTTNG_UST_DYNAMIC_TYPE_STRING: - ptr->object_type = OBJECT_TYPE_STRING; - ptr->ptr = v.u.str; - dbg_printf("context get index dynamic string %s\n", (const char *) ptr->ptr); - break; - default: - dbg_printf("Filter warning: unknown dynamic type (%d).\n", (int) v.sel); - return -EINVAL; - } - break; - case atype_struct: - ERR("Structure type cannot be loaded."); - return -EINVAL; - default: - ERR("Unknown type: %d", (int) field->type.atype); - return -EINVAL; - } - return 0; -} - -static int dynamic_get_index(struct lttng_ctx *ctx, - struct bytecode_runtime *runtime, - uint64_t index, struct estack_entry *stack_top) -{ - int ret; - const struct filter_get_index_data *gid; - - gid = (const struct filter_get_index_data *) &runtime->data[index]; - switch (stack_top->u.ptr.type) { - case LOAD_OBJECT: - switch (stack_top->u.ptr.object_type) { - case OBJECT_TYPE_ARRAY: - { - const char *ptr; - - assert(gid->offset < gid->array_len); - /* Skip count (unsigned long) */ - ptr = *(const char **) (stack_top->u.ptr.ptr + sizeof(unsigned long)); - ptr = ptr + gid->offset; - stack_top->u.ptr.ptr = ptr; - stack_top->u.ptr.object_type = gid->elem.type; - stack_top->u.ptr.rev_bo = gid->elem.rev_bo; - assert(stack_top->u.ptr.field->type.atype == atype_array || - stack_top->u.ptr.field->type.atype == atype_array_nestable); - stack_top->u.ptr.field = NULL; - break; - } - case OBJECT_TYPE_SEQUENCE: - { - const char *ptr; - size_t ptr_seq_len; - - ptr = *(const char **) (stack_top->u.ptr.ptr + sizeof(unsigned long)); - ptr_seq_len = *(unsigned long *) stack_top->u.ptr.ptr; - if (gid->offset >= gid->elem.len * ptr_seq_len) { - ret = -EINVAL; - goto end; - } - ptr = ptr + gid->offset; - stack_top->u.ptr.ptr = ptr; - stack_top->u.ptr.object_type = gid->elem.type; - stack_top->u.ptr.rev_bo = gid->elem.rev_bo; - assert(stack_top->u.ptr.field->type.atype == atype_sequence || - stack_top->u.ptr.field->type.atype == atype_sequence_nestable); - stack_top->u.ptr.field = NULL; - break; - } - case OBJECT_TYPE_STRUCT: - ERR("Nested structures are not supported yet."); - ret = -EINVAL; - goto end; - case OBJECT_TYPE_VARIANT: - default: - ERR("Unexpected get index type %d", - (int) stack_top->u.ptr.object_type); - ret = -EINVAL; - goto end; - } - break; - case LOAD_ROOT_CONTEXT: - case LOAD_ROOT_APP_CONTEXT: /* Fall-through */ - { - ret = context_get_index(ctx, - &stack_top->u.ptr, - gid->ctx_index); - if (ret) { - goto end; - } - break; - } - case LOAD_ROOT_PAYLOAD: - stack_top->u.ptr.ptr += gid->offset; - if (gid->elem.type == OBJECT_TYPE_STRING) - stack_top->u.ptr.ptr = *(const char * const *) stack_top->u.ptr.ptr; - stack_top->u.ptr.object_type = gid->elem.type; - stack_top->u.ptr.type = LOAD_OBJECT; - stack_top->u.ptr.field = gid->field; - stack_top->u.ptr.rev_bo = gid->elem.rev_bo; - break; - } - - stack_top->type = REG_PTR; - - return 0; - -end: - return ret; -} - -static int dynamic_load_field(struct estack_entry *stack_top) -{ - int ret; - - switch (stack_top->u.ptr.type) { - case LOAD_OBJECT: - break; - case LOAD_ROOT_CONTEXT: - case LOAD_ROOT_APP_CONTEXT: - case LOAD_ROOT_PAYLOAD: - default: - dbg_printf("Filter warning: cannot load root, missing field name.\n"); - ret = -EINVAL; - goto end; - } - switch (stack_top->u.ptr.object_type) { - case OBJECT_TYPE_S8: - dbg_printf("op load field s8\n"); - stack_top->u.v = *(int8_t *) stack_top->u.ptr.ptr; - stack_top->type = REG_S64; - break; - case OBJECT_TYPE_S16: - { - int16_t tmp; - - dbg_printf("op load field s16\n"); - tmp = *(int16_t *) stack_top->u.ptr.ptr; - if (stack_top->u.ptr.rev_bo) - tmp = bswap_16(tmp); - stack_top->u.v = tmp; - stack_top->type = REG_S64; - break; - } - case OBJECT_TYPE_S32: - { - int32_t tmp; - - dbg_printf("op load field s32\n"); - tmp = *(int32_t *) stack_top->u.ptr.ptr; - if (stack_top->u.ptr.rev_bo) - tmp = bswap_32(tmp); - stack_top->u.v = tmp; - stack_top->type = REG_S64; - break; - } - case OBJECT_TYPE_S64: - { - int64_t tmp; - - dbg_printf("op load field s64\n"); - tmp = *(int64_t *) stack_top->u.ptr.ptr; - if (stack_top->u.ptr.rev_bo) - tmp = bswap_64(tmp); - stack_top->u.v = tmp; - stack_top->type = REG_S64; - break; - } - case OBJECT_TYPE_U8: - dbg_printf("op load field u8\n"); - stack_top->u.v = *(uint8_t *) stack_top->u.ptr.ptr; - stack_top->type = REG_U64; - break; - case OBJECT_TYPE_U16: - { - uint16_t tmp; - - dbg_printf("op load field u16\n"); - tmp = *(uint16_t *) stack_top->u.ptr.ptr; - if (stack_top->u.ptr.rev_bo) - tmp = bswap_16(tmp); - stack_top->u.v = tmp; - stack_top->type = REG_U64; - break; - } - case OBJECT_TYPE_U32: - { - uint32_t tmp; - - dbg_printf("op load field u32\n"); - tmp = *(uint32_t *) stack_top->u.ptr.ptr; - if (stack_top->u.ptr.rev_bo) - tmp = bswap_32(tmp); - stack_top->u.v = tmp; - stack_top->type = REG_U64; - break; - } - case OBJECT_TYPE_U64: - { - uint64_t tmp; - - dbg_printf("op load field u64\n"); - tmp = *(uint64_t *) stack_top->u.ptr.ptr; - if (stack_top->u.ptr.rev_bo) - tmp = bswap_64(tmp); - stack_top->u.v = tmp; - stack_top->type = REG_U64; - break; - } - case OBJECT_TYPE_DOUBLE: - memcpy(&stack_top->u.d, - stack_top->u.ptr.ptr, - sizeof(struct literal_double)); - stack_top->type = REG_DOUBLE; - break; - case OBJECT_TYPE_STRING: - { - const char *str; - - dbg_printf("op load field string\n"); - str = (const char *) stack_top->u.ptr.ptr; - stack_top->u.s.str = str; - if (unlikely(!stack_top->u.s.str)) { - dbg_printf("Filter warning: loading a NULL string.\n"); - ret = -EINVAL; - goto end; - } - stack_top->u.s.seq_len = SIZE_MAX; - stack_top->u.s.literal_type = - ESTACK_STRING_LITERAL_TYPE_NONE; - stack_top->type = REG_STRING; - break; - } - case OBJECT_TYPE_STRING_SEQUENCE: - { - const char *ptr; - - dbg_printf("op load field string sequence\n"); - ptr = stack_top->u.ptr.ptr; - stack_top->u.s.seq_len = *(unsigned long *) ptr; - stack_top->u.s.str = *(const char **) (ptr + sizeof(unsigned long)); - stack_top->type = REG_STRING; - if (unlikely(!stack_top->u.s.str)) { - dbg_printf("Filter warning: loading a NULL sequence.\n"); - ret = -EINVAL; - goto end; - } - stack_top->u.s.literal_type = - ESTACK_STRING_LITERAL_TYPE_NONE; - break; - } - case OBJECT_TYPE_DYNAMIC: - /* - * Dynamic types in context are looked up - * by context get index. - */ - ret = -EINVAL; - goto end; - case OBJECT_TYPE_SEQUENCE: - case OBJECT_TYPE_ARRAY: - case OBJECT_TYPE_STRUCT: - case OBJECT_TYPE_VARIANT: - ERR("Sequences, arrays, struct and variant cannot be loaded (nested types)."); - ret = -EINVAL; - goto end; - } - return 0; - -end: - return ret; -} - -static -int lttng_bytecode_interpret_format_output(struct estack_entry *ax, - struct lttng_interpreter_output *output) -{ - int ret; - -again: - switch (ax->type) { - case REG_S64: - output->type = LTTNG_INTERPRETER_TYPE_S64; - output->u.s = ax->u.v; - break; - case REG_U64: - output->type = LTTNG_INTERPRETER_TYPE_U64; - output->u.u = (uint64_t) ax->u.v; - break; - case REG_DOUBLE: - output->type = LTTNG_INTERPRETER_TYPE_DOUBLE; - output->u.d = ax->u.d; - break; - case REG_STRING: - output->type = LTTNG_INTERPRETER_TYPE_STRING; - output->u.str.str = ax->u.s.str; - output->u.str.len = ax->u.s.seq_len; - break; - case REG_PTR: - switch (ax->u.ptr.object_type) { - case OBJECT_TYPE_S8: - case OBJECT_TYPE_S16: - case OBJECT_TYPE_S32: - case OBJECT_TYPE_S64: - case OBJECT_TYPE_U8: - case OBJECT_TYPE_U16: - case OBJECT_TYPE_U32: - case OBJECT_TYPE_U64: - case OBJECT_TYPE_DOUBLE: - case OBJECT_TYPE_STRING: - case OBJECT_TYPE_STRING_SEQUENCE: - ret = dynamic_load_field(ax); - if (ret) - return ret; - /* Retry after loading ptr into stack top. */ - goto again; - case OBJECT_TYPE_SEQUENCE: - output->type = LTTNG_INTERPRETER_TYPE_SEQUENCE; - output->u.sequence.ptr = *(const char **) (ax->u.ptr.ptr + sizeof(unsigned long)); - output->u.sequence.nr_elem = *(unsigned long *) ax->u.ptr.ptr; - output->u.sequence.nested_type = ax->u.ptr.field->type.u.sequence_nestable.elem_type; - break; - case OBJECT_TYPE_ARRAY: - /* Skip count (unsigned long) */ - output->type = LTTNG_INTERPRETER_TYPE_SEQUENCE; - output->u.sequence.ptr = *(const char **) (ax->u.ptr.ptr + sizeof(unsigned long)); - output->u.sequence.nr_elem = ax->u.ptr.field->type.u.array_nestable.length; - output->u.sequence.nested_type = ax->u.ptr.field->type.u.array_nestable.elem_type; - break; - case OBJECT_TYPE_STRUCT: - case OBJECT_TYPE_VARIANT: - default: - return -EINVAL; - } - - break; - case REG_STAR_GLOB_STRING: - case REG_UNKNOWN: - default: - return -EINVAL; - } - - return LTTNG_FILTER_RECORD_FLAG; -} - -/* - * Return 0 (discard), or raise the 0x1 flag (log event). - * Currently, other flags are kept for future extensions and have no - * effect. - */ -static -uint64_t bytecode_interpret(void *interpreter_data, - const char *interpreter_stack_data, - struct lttng_interpreter_output *output) -{ - struct bytecode_runtime *bytecode = interpreter_data; - struct lttng_ctx *ctx = rcu_dereference(*bytecode->p.pctx); - void *pc, *next_pc, *start_pc; - int ret = -EINVAL; - uint64_t retval = 0; - struct estack _stack; - struct estack *stack = &_stack; - register int64_t ax = 0, bx = 0; - register enum entry_type ax_t = REG_UNKNOWN, bx_t = REG_UNKNOWN; - register int top = FILTER_STACK_EMPTY; -#ifndef INTERPRETER_USE_SWITCH - static void *dispatch[NR_FILTER_OPS] = { - [ FILTER_OP_UNKNOWN ] = &&LABEL_FILTER_OP_UNKNOWN, - - [ FILTER_OP_RETURN ] = &&LABEL_FILTER_OP_RETURN, - - /* binary */ - [ FILTER_OP_MUL ] = &&LABEL_FILTER_OP_MUL, - [ FILTER_OP_DIV ] = &&LABEL_FILTER_OP_DIV, - [ FILTER_OP_MOD ] = &&LABEL_FILTER_OP_MOD, - [ FILTER_OP_PLUS ] = &&LABEL_FILTER_OP_PLUS, - [ FILTER_OP_MINUS ] = &&LABEL_FILTER_OP_MINUS, - [ FILTER_OP_BIT_RSHIFT ] = &&LABEL_FILTER_OP_BIT_RSHIFT, - [ FILTER_OP_BIT_LSHIFT ] = &&LABEL_FILTER_OP_BIT_LSHIFT, - [ FILTER_OP_BIT_AND ] = &&LABEL_FILTER_OP_BIT_AND, - [ FILTER_OP_BIT_OR ] = &&LABEL_FILTER_OP_BIT_OR, - [ FILTER_OP_BIT_XOR ] = &&LABEL_FILTER_OP_BIT_XOR, - - /* binary comparators */ - [ FILTER_OP_EQ ] = &&LABEL_FILTER_OP_EQ, - [ FILTER_OP_NE ] = &&LABEL_FILTER_OP_NE, - [ FILTER_OP_GT ] = &&LABEL_FILTER_OP_GT, - [ FILTER_OP_LT ] = &&LABEL_FILTER_OP_LT, - [ FILTER_OP_GE ] = &&LABEL_FILTER_OP_GE, - [ FILTER_OP_LE ] = &&LABEL_FILTER_OP_LE, - - /* string binary comparator */ - [ FILTER_OP_EQ_STRING ] = &&LABEL_FILTER_OP_EQ_STRING, - [ FILTER_OP_NE_STRING ] = &&LABEL_FILTER_OP_NE_STRING, - [ FILTER_OP_GT_STRING ] = &&LABEL_FILTER_OP_GT_STRING, - [ FILTER_OP_LT_STRING ] = &&LABEL_FILTER_OP_LT_STRING, - [ FILTER_OP_GE_STRING ] = &&LABEL_FILTER_OP_GE_STRING, - [ FILTER_OP_LE_STRING ] = &&LABEL_FILTER_OP_LE_STRING, - - /* globbing pattern binary comparator */ - [ FILTER_OP_EQ_STAR_GLOB_STRING ] = &&LABEL_FILTER_OP_EQ_STAR_GLOB_STRING, - [ FILTER_OP_NE_STAR_GLOB_STRING ] = &&LABEL_FILTER_OP_NE_STAR_GLOB_STRING, - - /* s64 binary comparator */ - [ FILTER_OP_EQ_S64 ] = &&LABEL_FILTER_OP_EQ_S64, - [ FILTER_OP_NE_S64 ] = &&LABEL_FILTER_OP_NE_S64, - [ FILTER_OP_GT_S64 ] = &&LABEL_FILTER_OP_GT_S64, - [ FILTER_OP_LT_S64 ] = &&LABEL_FILTER_OP_LT_S64, - [ FILTER_OP_GE_S64 ] = &&LABEL_FILTER_OP_GE_S64, - [ FILTER_OP_LE_S64 ] = &&LABEL_FILTER_OP_LE_S64, - - /* double binary comparator */ - [ FILTER_OP_EQ_DOUBLE ] = &&LABEL_FILTER_OP_EQ_DOUBLE, - [ FILTER_OP_NE_DOUBLE ] = &&LABEL_FILTER_OP_NE_DOUBLE, - [ FILTER_OP_GT_DOUBLE ] = &&LABEL_FILTER_OP_GT_DOUBLE, - [ FILTER_OP_LT_DOUBLE ] = &&LABEL_FILTER_OP_LT_DOUBLE, - [ FILTER_OP_GE_DOUBLE ] = &&LABEL_FILTER_OP_GE_DOUBLE, - [ FILTER_OP_LE_DOUBLE ] = &&LABEL_FILTER_OP_LE_DOUBLE, - - /* Mixed S64-double binary comparators */ - [ FILTER_OP_EQ_DOUBLE_S64 ] = &&LABEL_FILTER_OP_EQ_DOUBLE_S64, - [ FILTER_OP_NE_DOUBLE_S64 ] = &&LABEL_FILTER_OP_NE_DOUBLE_S64, - [ FILTER_OP_GT_DOUBLE_S64 ] = &&LABEL_FILTER_OP_GT_DOUBLE_S64, - [ FILTER_OP_LT_DOUBLE_S64 ] = &&LABEL_FILTER_OP_LT_DOUBLE_S64, - [ FILTER_OP_GE_DOUBLE_S64 ] = &&LABEL_FILTER_OP_GE_DOUBLE_S64, - [ FILTER_OP_LE_DOUBLE_S64 ] = &&LABEL_FILTER_OP_LE_DOUBLE_S64, - - [ FILTER_OP_EQ_S64_DOUBLE ] = &&LABEL_FILTER_OP_EQ_S64_DOUBLE, - [ FILTER_OP_NE_S64_DOUBLE ] = &&LABEL_FILTER_OP_NE_S64_DOUBLE, - [ FILTER_OP_GT_S64_DOUBLE ] = &&LABEL_FILTER_OP_GT_S64_DOUBLE, - [ FILTER_OP_LT_S64_DOUBLE ] = &&LABEL_FILTER_OP_LT_S64_DOUBLE, - [ FILTER_OP_GE_S64_DOUBLE ] = &&LABEL_FILTER_OP_GE_S64_DOUBLE, - [ FILTER_OP_LE_S64_DOUBLE ] = &&LABEL_FILTER_OP_LE_S64_DOUBLE, - - /* unary */ - [ FILTER_OP_UNARY_PLUS ] = &&LABEL_FILTER_OP_UNARY_PLUS, - [ FILTER_OP_UNARY_MINUS ] = &&LABEL_FILTER_OP_UNARY_MINUS, - [ FILTER_OP_UNARY_NOT ] = &&LABEL_FILTER_OP_UNARY_NOT, - [ FILTER_OP_UNARY_PLUS_S64 ] = &&LABEL_FILTER_OP_UNARY_PLUS_S64, - [ FILTER_OP_UNARY_MINUS_S64 ] = &&LABEL_FILTER_OP_UNARY_MINUS_S64, - [ FILTER_OP_UNARY_NOT_S64 ] = &&LABEL_FILTER_OP_UNARY_NOT_S64, - [ FILTER_OP_UNARY_PLUS_DOUBLE ] = &&LABEL_FILTER_OP_UNARY_PLUS_DOUBLE, - [ FILTER_OP_UNARY_MINUS_DOUBLE ] = &&LABEL_FILTER_OP_UNARY_MINUS_DOUBLE, - [ FILTER_OP_UNARY_NOT_DOUBLE ] = &&LABEL_FILTER_OP_UNARY_NOT_DOUBLE, - - /* logical */ - [ FILTER_OP_AND ] = &&LABEL_FILTER_OP_AND, - [ FILTER_OP_OR ] = &&LABEL_FILTER_OP_OR, - - /* load field ref */ - [ FILTER_OP_LOAD_FIELD_REF ] = &&LABEL_FILTER_OP_LOAD_FIELD_REF, - [ FILTER_OP_LOAD_FIELD_REF_STRING ] = &&LABEL_FILTER_OP_LOAD_FIELD_REF_STRING, - [ FILTER_OP_LOAD_FIELD_REF_SEQUENCE ] = &&LABEL_FILTER_OP_LOAD_FIELD_REF_SEQUENCE, - [ FILTER_OP_LOAD_FIELD_REF_S64 ] = &&LABEL_FILTER_OP_LOAD_FIELD_REF_S64, - [ FILTER_OP_LOAD_FIELD_REF_DOUBLE ] = &&LABEL_FILTER_OP_LOAD_FIELD_REF_DOUBLE, - - /* load from immediate operand */ - [ FILTER_OP_LOAD_STRING ] = &&LABEL_FILTER_OP_LOAD_STRING, - [ FILTER_OP_LOAD_STAR_GLOB_STRING ] = &&LABEL_FILTER_OP_LOAD_STAR_GLOB_STRING, - [ FILTER_OP_LOAD_S64 ] = &&LABEL_FILTER_OP_LOAD_S64, - [ FILTER_OP_LOAD_DOUBLE ] = &&LABEL_FILTER_OP_LOAD_DOUBLE, - - /* cast */ - [ FILTER_OP_CAST_TO_S64 ] = &&LABEL_FILTER_OP_CAST_TO_S64, - [ FILTER_OP_CAST_DOUBLE_TO_S64 ] = &&LABEL_FILTER_OP_CAST_DOUBLE_TO_S64, - [ FILTER_OP_CAST_NOP ] = &&LABEL_FILTER_OP_CAST_NOP, - - /* get context ref */ - [ FILTER_OP_GET_CONTEXT_REF ] = &&LABEL_FILTER_OP_GET_CONTEXT_REF, - [ FILTER_OP_GET_CONTEXT_REF_STRING ] = &&LABEL_FILTER_OP_GET_CONTEXT_REF_STRING, - [ FILTER_OP_GET_CONTEXT_REF_S64 ] = &&LABEL_FILTER_OP_GET_CONTEXT_REF_S64, - [ FILTER_OP_GET_CONTEXT_REF_DOUBLE ] = &&LABEL_FILTER_OP_GET_CONTEXT_REF_DOUBLE, - - /* Instructions for recursive traversal through composed types. */ - [ FILTER_OP_GET_CONTEXT_ROOT ] = &&LABEL_FILTER_OP_GET_CONTEXT_ROOT, - [ FILTER_OP_GET_APP_CONTEXT_ROOT ] = &&LABEL_FILTER_OP_GET_APP_CONTEXT_ROOT, - [ FILTER_OP_GET_PAYLOAD_ROOT ] = &&LABEL_FILTER_OP_GET_PAYLOAD_ROOT, - - [ FILTER_OP_GET_SYMBOL ] = &&LABEL_FILTER_OP_GET_SYMBOL, - [ FILTER_OP_GET_SYMBOL_FIELD ] = &&LABEL_FILTER_OP_GET_SYMBOL_FIELD, - [ FILTER_OP_GET_INDEX_U16 ] = &&LABEL_FILTER_OP_GET_INDEX_U16, - [ FILTER_OP_GET_INDEX_U64 ] = &&LABEL_FILTER_OP_GET_INDEX_U64, - - [ FILTER_OP_LOAD_FIELD ] = &&LABEL_FILTER_OP_LOAD_FIELD, - [ FILTER_OP_LOAD_FIELD_S8 ] = &&LABEL_FILTER_OP_LOAD_FIELD_S8, - [ FILTER_OP_LOAD_FIELD_S16 ] = &&LABEL_FILTER_OP_LOAD_FIELD_S16, - [ FILTER_OP_LOAD_FIELD_S32 ] = &&LABEL_FILTER_OP_LOAD_FIELD_S32, - [ FILTER_OP_LOAD_FIELD_S64 ] = &&LABEL_FILTER_OP_LOAD_FIELD_S64, - [ FILTER_OP_LOAD_FIELD_U8 ] = &&LABEL_FILTER_OP_LOAD_FIELD_U8, - [ FILTER_OP_LOAD_FIELD_U16 ] = &&LABEL_FILTER_OP_LOAD_FIELD_U16, - [ FILTER_OP_LOAD_FIELD_U32 ] = &&LABEL_FILTER_OP_LOAD_FIELD_U32, - [ FILTER_OP_LOAD_FIELD_U64 ] = &&LABEL_FILTER_OP_LOAD_FIELD_U64, - [ FILTER_OP_LOAD_FIELD_STRING ] = &&LABEL_FILTER_OP_LOAD_FIELD_STRING, - [ FILTER_OP_LOAD_FIELD_SEQUENCE ] = &&LABEL_FILTER_OP_LOAD_FIELD_SEQUENCE, - [ FILTER_OP_LOAD_FIELD_DOUBLE ] = &&LABEL_FILTER_OP_LOAD_FIELD_DOUBLE, - - [ FILTER_OP_UNARY_BIT_NOT ] = &&LABEL_FILTER_OP_UNARY_BIT_NOT, - - [ FILTER_OP_RETURN_S64 ] = &&LABEL_FILTER_OP_RETURN_S64, - }; -#endif /* #ifndef INTERPRETER_USE_SWITCH */ - - START_OP - - OP(FILTER_OP_UNKNOWN): - OP(FILTER_OP_LOAD_FIELD_REF): -#ifdef INTERPRETER_USE_SWITCH - default: -#endif /* INTERPRETER_USE_SWITCH */ - ERR("unknown bytecode op %u", - (unsigned int) *(filter_opcode_t *) pc); - ret = -EINVAL; - goto end; - - OP(FILTER_OP_RETURN): - /* LTTNG_FILTER_DISCARD or LTTNG_FILTER_RECORD_FLAG */ - /* Handle dynamic typing. */ - switch (estack_ax_t) { - case REG_S64: - case REG_U64: - retval = !!estack_ax_v; - break; - case REG_DOUBLE: - case REG_STRING: - case REG_PTR: - if (!output) { - ret = -EINVAL; - goto end; - } - retval = 0; - break; - case REG_STAR_GLOB_STRING: - case REG_UNKNOWN: - default: - ret = -EINVAL; - goto end; - } - ret = 0; - goto end; - - OP(FILTER_OP_RETURN_S64): - /* LTTNG_FILTER_DISCARD or LTTNG_FILTER_RECORD_FLAG */ - retval = !!estack_ax_v; - ret = 0; - goto end; - - /* binary */ - OP(FILTER_OP_MUL): - OP(FILTER_OP_DIV): - OP(FILTER_OP_MOD): - OP(FILTER_OP_PLUS): - OP(FILTER_OP_MINUS): - ERR("unsupported bytecode op %u", - (unsigned int) *(filter_opcode_t *) pc); - ret = -EINVAL; - goto end; - - OP(FILTER_OP_EQ): - { - /* Dynamic typing. */ - switch (estack_ax_t) { - case REG_S64: /* Fall-through */ - case REG_U64: - switch (estack_bx_t) { - case REG_S64: /* Fall-through */ - case REG_U64: - JUMP_TO(FILTER_OP_EQ_S64); - case REG_DOUBLE: - JUMP_TO(FILTER_OP_EQ_DOUBLE_S64); - case REG_STRING: /* Fall-through */ - case REG_STAR_GLOB_STRING: - ret = -EINVAL; - goto end; - default: - ERR("Unknown filter register type (%d)", - (int) estack_bx_t); - ret = -EINVAL; - goto end; - } - break; - case REG_DOUBLE: - switch (estack_bx_t) { - case REG_S64: /* Fall-through */ - case REG_U64: - JUMP_TO(FILTER_OP_EQ_S64_DOUBLE); - case REG_DOUBLE: - JUMP_TO(FILTER_OP_EQ_DOUBLE); - case REG_STRING: /* Fall-through */ - case REG_STAR_GLOB_STRING: - ret = -EINVAL; - goto end; - default: - ERR("Unknown filter register type (%d)", - (int) estack_bx_t); - ret = -EINVAL; - goto end; - } - break; - case REG_STRING: - switch (estack_bx_t) { - case REG_S64: /* Fall-through */ - case REG_U64: /* Fall-through */ - case REG_DOUBLE: - ret = -EINVAL; - goto end; - case REG_STRING: - JUMP_TO(FILTER_OP_EQ_STRING); - case REG_STAR_GLOB_STRING: - JUMP_TO(FILTER_OP_EQ_STAR_GLOB_STRING); - default: - ERR("Unknown filter register type (%d)", - (int) estack_bx_t); - ret = -EINVAL; - goto end; - } - break; - case REG_STAR_GLOB_STRING: - switch (estack_bx_t) { - case REG_S64: /* Fall-through */ - case REG_U64: /* Fall-through */ - case REG_DOUBLE: - ret = -EINVAL; - goto end; - case REG_STRING: - JUMP_TO(FILTER_OP_EQ_STAR_GLOB_STRING); - case REG_STAR_GLOB_STRING: - ret = -EINVAL; - goto end; - default: - ERR("Unknown filter register type (%d)", - (int) estack_bx_t); - ret = -EINVAL; - goto end; - } - break; - default: - ERR("Unknown filter register type (%d)", - (int) estack_ax_t); - ret = -EINVAL; - goto end; - } - } - OP(FILTER_OP_NE): - { - /* Dynamic typing. */ - switch (estack_ax_t) { - case REG_S64: /* Fall-through */ - case REG_U64: - switch (estack_bx_t) { - case REG_S64: /* Fall-through */ - case REG_U64: - JUMP_TO(FILTER_OP_NE_S64); - case REG_DOUBLE: - JUMP_TO(FILTER_OP_NE_DOUBLE_S64); - case REG_STRING: /* Fall-through */ - case REG_STAR_GLOB_STRING: - ret = -EINVAL; - goto end; - default: - ERR("Unknown filter register type (%d)", - (int) estack_bx_t); - ret = -EINVAL; - goto end; - } - break; - case REG_DOUBLE: - switch (estack_bx_t) { - case REG_S64: /* Fall-through */ - case REG_U64: - JUMP_TO(FILTER_OP_NE_S64_DOUBLE); - case REG_DOUBLE: - JUMP_TO(FILTER_OP_NE_DOUBLE); - case REG_STRING: /* Fall-through */ - case REG_STAR_GLOB_STRING: - ret = -EINVAL; - goto end; - default: - ERR("Unknown filter register type (%d)", - (int) estack_bx_t); - ret = -EINVAL; - goto end; - } - break; - case REG_STRING: - switch (estack_bx_t) { - case REG_S64: /* Fall-through */ - case REG_U64: - case REG_DOUBLE: - ret = -EINVAL; - goto end; - case REG_STRING: - JUMP_TO(FILTER_OP_NE_STRING); - case REG_STAR_GLOB_STRING: - JUMP_TO(FILTER_OP_NE_STAR_GLOB_STRING); - default: - ERR("Unknown filter register type (%d)", - (int) estack_bx_t); - ret = -EINVAL; - goto end; - } - break; - case REG_STAR_GLOB_STRING: - switch (estack_bx_t) { - case REG_S64: /* Fall-through */ - case REG_U64: - case REG_DOUBLE: - ret = -EINVAL; - goto end; - case REG_STRING: - JUMP_TO(FILTER_OP_NE_STAR_GLOB_STRING); - case REG_STAR_GLOB_STRING: - ret = -EINVAL; - goto end; - default: - ERR("Unknown filter register type (%d)", - (int) estack_bx_t); - ret = -EINVAL; - goto end; - } - break; - default: - ERR("Unknown filter register type (%d)", - (int) estack_ax_t); - ret = -EINVAL; - goto end; - } - } - OP(FILTER_OP_GT): - { - /* Dynamic typing. */ - switch (estack_ax_t) { - case REG_S64: /* Fall-through */ - case REG_U64: - switch (estack_bx_t) { - case REG_S64: /* Fall-through */ - case REG_U64: - JUMP_TO(FILTER_OP_GT_S64); - case REG_DOUBLE: - JUMP_TO(FILTER_OP_GT_DOUBLE_S64); - case REG_STRING: /* Fall-through */ - case REG_STAR_GLOB_STRING: - ret = -EINVAL; - goto end; - default: - ERR("Unknown filter register type (%d)", - (int) estack_bx_t); - ret = -EINVAL; - goto end; - } - break; - case REG_DOUBLE: - switch (estack_bx_t) { - case REG_S64: /* Fall-through */ - case REG_U64: - JUMP_TO(FILTER_OP_GT_S64_DOUBLE); - case REG_DOUBLE: - JUMP_TO(FILTER_OP_GT_DOUBLE); - case REG_STRING: /* Fall-through */ - case REG_STAR_GLOB_STRING: - ret = -EINVAL; - goto end; - default: - ERR("Unknown filter register type (%d)", - (int) estack_bx_t); - ret = -EINVAL; - goto end; - } - break; - case REG_STRING: - switch (estack_bx_t) { - case REG_S64: /* Fall-through */ - case REG_U64: /* Fall-through */ - case REG_DOUBLE: /* Fall-through */ - case REG_STAR_GLOB_STRING: - ret = -EINVAL; - goto end; - case REG_STRING: - JUMP_TO(FILTER_OP_GT_STRING); - default: - ERR("Unknown filter register type (%d)", - (int) estack_bx_t); - ret = -EINVAL; - goto end; - } - break; - default: - ERR("Unknown filter register type (%d)", - (int) estack_ax_t); - ret = -EINVAL; - goto end; - } - } - OP(FILTER_OP_LT): - { - /* Dynamic typing. */ - switch (estack_ax_t) { - case REG_S64: /* Fall-through */ - case REG_U64: - switch (estack_bx_t) { - case REG_S64: /* Fall-through */ - case REG_U64: - JUMP_TO(FILTER_OP_LT_S64); - case REG_DOUBLE: - JUMP_TO(FILTER_OP_LT_DOUBLE_S64); - case REG_STRING: /* Fall-through */ - case REG_STAR_GLOB_STRING: - ret = -EINVAL; - goto end; - default: - ERR("Unknown filter register type (%d)", - (int) estack_bx_t); - ret = -EINVAL; - goto end; - } - break; - case REG_DOUBLE: - switch (estack_bx_t) { - case REG_S64: /* Fall-through */ - case REG_U64: - JUMP_TO(FILTER_OP_LT_S64_DOUBLE); - case REG_DOUBLE: - JUMP_TO(FILTER_OP_LT_DOUBLE); - case REG_STRING: /* Fall-through */ - case REG_STAR_GLOB_STRING: - ret = -EINVAL; - goto end; - default: - ERR("Unknown filter register type (%d)", - (int) estack_bx_t); - ret = -EINVAL; - goto end; - } - break; - case REG_STRING: - switch (estack_bx_t) { - case REG_S64: /* Fall-through */ - case REG_U64: /* Fall-through */ - case REG_DOUBLE: /* Fall-through */ - case REG_STAR_GLOB_STRING: - ret = -EINVAL; - goto end; - case REG_STRING: - JUMP_TO(FILTER_OP_LT_STRING); - default: - ERR("Unknown filter register type (%d)", - (int) estack_bx_t); - ret = -EINVAL; - goto end; - } - break; - default: - ERR("Unknown filter register type (%d)", - (int) estack_ax_t); - ret = -EINVAL; - goto end; - } - } - OP(FILTER_OP_GE): - { - /* Dynamic typing. */ - switch (estack_ax_t) { - case REG_S64: /* Fall-through */ - case REG_U64: - switch (estack_bx_t) { - case REG_S64: /* Fall-through */ - case REG_U64: - JUMP_TO(FILTER_OP_GE_S64); - case REG_DOUBLE: - JUMP_TO(FILTER_OP_GE_DOUBLE_S64); - case REG_STRING: /* Fall-through */ - case REG_STAR_GLOB_STRING: - ret = -EINVAL; - goto end; - default: - ERR("Unknown filter register type (%d)", - (int) estack_bx_t); - ret = -EINVAL; - goto end; - } - break; - case REG_DOUBLE: - switch (estack_bx_t) { - case REG_S64: /* Fall-through */ - case REG_U64: - JUMP_TO(FILTER_OP_GE_S64_DOUBLE); - case REG_DOUBLE: - JUMP_TO(FILTER_OP_GE_DOUBLE); - case REG_STRING: /* Fall-through */ - case REG_STAR_GLOB_STRING: - ret = -EINVAL; - goto end; - default: - ERR("Unknown filter register type (%d)", - (int) estack_bx_t); - ret = -EINVAL; - goto end; - } - break; - case REG_STRING: - switch (estack_bx_t) { - case REG_S64: /* Fall-through */ - case REG_U64: /* Fall-through */ - case REG_DOUBLE: /* Fall-through */ - case REG_STAR_GLOB_STRING: - ret = -EINVAL; - goto end; - case REG_STRING: - JUMP_TO(FILTER_OP_GE_STRING); - default: - ERR("Unknown filter register type (%d)", - (int) estack_bx_t); - ret = -EINVAL; - goto end; - } - break; - default: - ERR("Unknown filter register type (%d)", - (int) estack_ax_t); - ret = -EINVAL; - goto end; - } - } - OP(FILTER_OP_LE): - { - /* Dynamic typing. */ - switch (estack_ax_t) { - case REG_S64: /* Fall-through */ - case REG_U64: - switch (estack_bx_t) { - case REG_S64: /* Fall-through */ - case REG_U64: - JUMP_TO(FILTER_OP_LE_S64); - case REG_DOUBLE: - JUMP_TO(FILTER_OP_LE_DOUBLE_S64); - case REG_STRING: /* Fall-through */ - case REG_STAR_GLOB_STRING: - ret = -EINVAL; - goto end; - default: - ERR("Unknown filter register type (%d)", - (int) estack_bx_t); - ret = -EINVAL; - goto end; - } - break; - case REG_DOUBLE: - switch (estack_bx_t) { - case REG_S64: /* Fall-through */ - case REG_U64: - JUMP_TO(FILTER_OP_LE_S64_DOUBLE); - case REG_DOUBLE: - JUMP_TO(FILTER_OP_LE_DOUBLE); - case REG_STRING: /* Fall-through */ - case REG_STAR_GLOB_STRING: - ret = -EINVAL; - goto end; - default: - ERR("Unknown filter register type (%d)", - (int) estack_bx_t); - ret = -EINVAL; - goto end; - } - break; - case REG_STRING: - switch (estack_bx_t) { - case REG_S64: /* Fall-through */ - case REG_U64: /* Fall-through */ - case REG_DOUBLE: /* Fall-through */ - case REG_STAR_GLOB_STRING: - ret = -EINVAL; - goto end; - case REG_STRING: - JUMP_TO(FILTER_OP_LE_STRING); - default: - ERR("Unknown filter register type (%d)", - (int) estack_bx_t); - ret = -EINVAL; - goto end; - } - break; - default: - ERR("Unknown filter register type (%d)", - (int) estack_ax_t); - ret = -EINVAL; - goto end; - } - } - - OP(FILTER_OP_EQ_STRING): - { - int res; - - res = (stack_strcmp(stack, top, "==") == 0); - estack_pop(stack, top, ax, bx, ax_t, bx_t); - estack_ax_v = res; - estack_ax_t = REG_S64; - next_pc += sizeof(struct binary_op); - PO; - } - OP(FILTER_OP_NE_STRING): - { - int res; - - res = (stack_strcmp(stack, top, "!=") != 0); - estack_pop(stack, top, ax, bx, ax_t, bx_t); - estack_ax_v = res; - estack_ax_t = REG_S64; - next_pc += sizeof(struct binary_op); - PO; - } - OP(FILTER_OP_GT_STRING): - { - int res; - - res = (stack_strcmp(stack, top, ">") > 0); - estack_pop(stack, top, ax, bx, ax_t, bx_t); - estack_ax_v = res; - estack_ax_t = REG_S64; - next_pc += sizeof(struct binary_op); - PO; - } - OP(FILTER_OP_LT_STRING): - { - int res; - - res = (stack_strcmp(stack, top, "<") < 0); - estack_pop(stack, top, ax, bx, ax_t, bx_t); - estack_ax_v = res; - estack_ax_t = REG_S64; - next_pc += sizeof(struct binary_op); - PO; - } - OP(FILTER_OP_GE_STRING): - { - int res; - - res = (stack_strcmp(stack, top, ">=") >= 0); - estack_pop(stack, top, ax, bx, ax_t, bx_t); - estack_ax_v = res; - estack_ax_t = REG_S64; - next_pc += sizeof(struct binary_op); - PO; - } - OP(FILTER_OP_LE_STRING): - { - int res; - - res = (stack_strcmp(stack, top, "<=") <= 0); - estack_pop(stack, top, ax, bx, ax_t, bx_t); - estack_ax_v = res; - estack_ax_t = REG_S64; - next_pc += sizeof(struct binary_op); - PO; - } - - OP(FILTER_OP_EQ_STAR_GLOB_STRING): - { - int res; - - res = (stack_star_glob_match(stack, top, "==") == 0); - estack_pop(stack, top, ax, bx, ax_t, bx_t); - estack_ax_v = res; - estack_ax_t = REG_S64; - next_pc += sizeof(struct binary_op); - PO; - } - OP(FILTER_OP_NE_STAR_GLOB_STRING): - { - int res; - - res = (stack_star_glob_match(stack, top, "!=") != 0); - estack_pop(stack, top, ax, bx, ax_t, bx_t); - estack_ax_v = res; - estack_ax_t = REG_S64; - next_pc += sizeof(struct binary_op); - PO; - } - - OP(FILTER_OP_EQ_S64): - { - int res; - - res = (estack_bx_v == estack_ax_v); - estack_pop(stack, top, ax, bx, ax_t, bx_t); - estack_ax_v = res; - estack_ax_t = REG_S64; - next_pc += sizeof(struct binary_op); - PO; - } - OP(FILTER_OP_NE_S64): - { - int res; - - res = (estack_bx_v != estack_ax_v); - estack_pop(stack, top, ax, bx, ax_t, bx_t); - estack_ax_v = res; - estack_ax_t = REG_S64; - next_pc += sizeof(struct binary_op); - PO; - } - OP(FILTER_OP_GT_S64): - { - int res; - - res = (estack_bx_v > estack_ax_v); - estack_pop(stack, top, ax, bx, ax_t, bx_t); - estack_ax_v = res; - estack_ax_t = REG_S64; - next_pc += sizeof(struct binary_op); - PO; - } - OP(FILTER_OP_LT_S64): - { - int res; - - res = (estack_bx_v < estack_ax_v); - estack_pop(stack, top, ax, bx, ax_t, bx_t); - estack_ax_v = res; - estack_ax_t = REG_S64; - next_pc += sizeof(struct binary_op); - PO; - } - OP(FILTER_OP_GE_S64): - { - int res; - - res = (estack_bx_v >= estack_ax_v); - estack_pop(stack, top, ax, bx, ax_t, bx_t); - estack_ax_v = res; - estack_ax_t = REG_S64; - next_pc += sizeof(struct binary_op); - PO; - } - OP(FILTER_OP_LE_S64): - { - int res; - - res = (estack_bx_v <= estack_ax_v); - estack_pop(stack, top, ax, bx, ax_t, bx_t); - estack_ax_v = res; - estack_ax_t = REG_S64; - next_pc += sizeof(struct binary_op); - PO; - } - - OP(FILTER_OP_EQ_DOUBLE): - { - int res; - - res = (estack_bx(stack, top)->u.d == estack_ax(stack, top)->u.d); - estack_pop(stack, top, ax, bx, ax_t, bx_t); - estack_ax_v = res; - estack_ax_t = REG_S64; - next_pc += sizeof(struct binary_op); - PO; - } - OP(FILTER_OP_NE_DOUBLE): - { - int res; - - res = (estack_bx(stack, top)->u.d != estack_ax(stack, top)->u.d); - estack_pop(stack, top, ax, bx, ax_t, bx_t); - estack_ax_v = res; - estack_ax_t = REG_S64; - next_pc += sizeof(struct binary_op); - PO; - } - OP(FILTER_OP_GT_DOUBLE): - { - int res; - - res = (estack_bx(stack, top)->u.d > estack_ax(stack, top)->u.d); - estack_pop(stack, top, ax, bx, ax_t, bx_t); - estack_ax_v = res; - estack_ax_t = REG_S64; - next_pc += sizeof(struct binary_op); - PO; - } - OP(FILTER_OP_LT_DOUBLE): - { - int res; - - res = (estack_bx(stack, top)->u.d < estack_ax(stack, top)->u.d); - estack_pop(stack, top, ax, bx, ax_t, bx_t); - estack_ax_v = res; - estack_ax_t = REG_S64; - next_pc += sizeof(struct binary_op); - PO; - } - OP(FILTER_OP_GE_DOUBLE): - { - int res; - - res = (estack_bx(stack, top)->u.d >= estack_ax(stack, top)->u.d); - estack_pop(stack, top, ax, bx, ax_t, bx_t); - estack_ax_v = res; - estack_ax_t = REG_S64; - next_pc += sizeof(struct binary_op); - PO; - } - OP(FILTER_OP_LE_DOUBLE): - { - int res; - - res = (estack_bx(stack, top)->u.d <= estack_ax(stack, top)->u.d); - estack_pop(stack, top, ax, bx, ax_t, bx_t); - estack_ax_v = res; - estack_ax_t = REG_S64; - next_pc += sizeof(struct binary_op); - PO; - } - - /* Mixed S64-double binary comparators */ - OP(FILTER_OP_EQ_DOUBLE_S64): - { - int res; - - res = (estack_bx(stack, top)->u.d == estack_ax_v); - estack_pop(stack, top, ax, bx, ax_t, bx_t); - estack_ax_v = res; - estack_ax_t = REG_S64; - next_pc += sizeof(struct binary_op); - PO; - } - OP(FILTER_OP_NE_DOUBLE_S64): - { - int res; - - res = (estack_bx(stack, top)->u.d != estack_ax_v); - estack_pop(stack, top, ax, bx, ax_t, bx_t); - estack_ax_v = res; - estack_ax_t = REG_S64; - next_pc += sizeof(struct binary_op); - PO; - } - OP(FILTER_OP_GT_DOUBLE_S64): - { - int res; - - res = (estack_bx(stack, top)->u.d > estack_ax_v); - estack_pop(stack, top, ax, bx, ax_t, bx_t); - estack_ax_v = res; - estack_ax_t = REG_S64; - next_pc += sizeof(struct binary_op); - PO; - } - OP(FILTER_OP_LT_DOUBLE_S64): - { - int res; - - res = (estack_bx(stack, top)->u.d < estack_ax_v); - estack_pop(stack, top, ax, bx, ax_t, bx_t); - estack_ax_v = res; - estack_ax_t = REG_S64; - next_pc += sizeof(struct binary_op); - PO; - } - OP(FILTER_OP_GE_DOUBLE_S64): - { - int res; - - res = (estack_bx(stack, top)->u.d >= estack_ax_v); - estack_pop(stack, top, ax, bx, ax_t, bx_t); - estack_ax_v = res; - estack_ax_t = REG_S64; - next_pc += sizeof(struct binary_op); - PO; - } - OP(FILTER_OP_LE_DOUBLE_S64): - { - int res; - - res = (estack_bx(stack, top)->u.d <= estack_ax_v); - estack_pop(stack, top, ax, bx, ax_t, bx_t); - estack_ax_v = res; - estack_ax_t = REG_S64; - next_pc += sizeof(struct binary_op); - PO; - } - - OP(FILTER_OP_EQ_S64_DOUBLE): - { - int res; - - res = (estack_bx_v == estack_ax(stack, top)->u.d); - estack_pop(stack, top, ax, bx, ax_t, bx_t); - estack_ax_v = res; - estack_ax_t = REG_S64; - next_pc += sizeof(struct binary_op); - PO; - } - OP(FILTER_OP_NE_S64_DOUBLE): - { - int res; - - res = (estack_bx_v != estack_ax(stack, top)->u.d); - estack_pop(stack, top, ax, bx, ax_t, bx_t); - estack_ax_v = res; - estack_ax_t = REG_S64; - next_pc += sizeof(struct binary_op); - PO; - } - OP(FILTER_OP_GT_S64_DOUBLE): - { - int res; - - res = (estack_bx_v > estack_ax(stack, top)->u.d); - estack_pop(stack, top, ax, bx, ax_t, bx_t); - estack_ax_v = res; - estack_ax_t = REG_S64; - next_pc += sizeof(struct binary_op); - PO; - } - OP(FILTER_OP_LT_S64_DOUBLE): - { - int res; - - res = (estack_bx_v < estack_ax(stack, top)->u.d); - estack_pop(stack, top, ax, bx, ax_t, bx_t); - estack_ax_v = res; - estack_ax_t = REG_S64; - next_pc += sizeof(struct binary_op); - PO; - } - OP(FILTER_OP_GE_S64_DOUBLE): - { - int res; - - res = (estack_bx_v >= estack_ax(stack, top)->u.d); - estack_pop(stack, top, ax, bx, ax_t, bx_t); - estack_ax_v = res; - estack_ax_t = REG_S64; - next_pc += sizeof(struct binary_op); - PO; - } - OP(FILTER_OP_LE_S64_DOUBLE): - { - int res; - - res = (estack_bx_v <= estack_ax(stack, top)->u.d); - estack_pop(stack, top, ax, bx, ax_t, bx_t); - estack_ax_v = res; - estack_ax_t = REG_S64; - next_pc += sizeof(struct binary_op); - PO; - } - OP(FILTER_OP_BIT_RSHIFT): - { - int64_t res; - - if (!IS_INTEGER_REGISTER(estack_ax_t) || !IS_INTEGER_REGISTER(estack_bx_t)) { - ret = -EINVAL; - goto end; - } - - /* Catch undefined behavior. */ - if (caa_unlikely(estack_ax_v < 0 || estack_ax_v >= 64)) { - ret = -EINVAL; - goto end; - } - res = ((uint64_t) estack_bx_v >> (uint32_t) estack_ax_v); - estack_pop(stack, top, ax, bx, ax_t, bx_t); - estack_ax_v = res; - estack_ax_t = REG_U64; - next_pc += sizeof(struct binary_op); - PO; - } - OP(FILTER_OP_BIT_LSHIFT): - { - int64_t res; - - if (!IS_INTEGER_REGISTER(estack_ax_t) || !IS_INTEGER_REGISTER(estack_bx_t)) { - ret = -EINVAL; - goto end; - } - - /* Catch undefined behavior. */ - if (caa_unlikely(estack_ax_v < 0 || estack_ax_v >= 64)) { - ret = -EINVAL; - goto end; - } - res = ((uint64_t) estack_bx_v << (uint32_t) estack_ax_v); - estack_pop(stack, top, ax, bx, ax_t, bx_t); - estack_ax_v = res; - estack_ax_t = REG_U64; - next_pc += sizeof(struct binary_op); - PO; - } - OP(FILTER_OP_BIT_AND): - { - int64_t res; - - if (!IS_INTEGER_REGISTER(estack_ax_t) || !IS_INTEGER_REGISTER(estack_bx_t)) { - ret = -EINVAL; - goto end; - } - - res = ((uint64_t) estack_bx_v & (uint64_t) estack_ax_v); - estack_pop(stack, top, ax, bx, ax_t, bx_t); - estack_ax_v = res; - estack_ax_t = REG_U64; - next_pc += sizeof(struct binary_op); - PO; - } - OP(FILTER_OP_BIT_OR): - { - int64_t res; - - if (!IS_INTEGER_REGISTER(estack_ax_t) || !IS_INTEGER_REGISTER(estack_bx_t)) { - ret = -EINVAL; - goto end; - } - - res = ((uint64_t) estack_bx_v | (uint64_t) estack_ax_v); - estack_pop(stack, top, ax, bx, ax_t, bx_t); - estack_ax_v = res; - estack_ax_t = REG_U64; - next_pc += sizeof(struct binary_op); - PO; - } - OP(FILTER_OP_BIT_XOR): - { - int64_t res; - - if (!IS_INTEGER_REGISTER(estack_ax_t) || !IS_INTEGER_REGISTER(estack_bx_t)) { - ret = -EINVAL; - goto end; - } - - res = ((uint64_t) estack_bx_v ^ (uint64_t) estack_ax_v); - estack_pop(stack, top, ax, bx, ax_t, bx_t); - estack_ax_v = res; - estack_ax_t = REG_U64; - next_pc += sizeof(struct binary_op); - PO; - } - - /* unary */ - OP(FILTER_OP_UNARY_PLUS): - { - /* Dynamic typing. */ - switch (estack_ax_t) { - case REG_S64: /* Fall-through. */ - case REG_U64: - JUMP_TO(FILTER_OP_UNARY_PLUS_S64); - case REG_DOUBLE: - JUMP_TO(FILTER_OP_UNARY_PLUS_DOUBLE); - case REG_STRING: /* Fall-through */ - case REG_STAR_GLOB_STRING: - ret = -EINVAL; - goto end; - default: - ERR("Unknown filter register type (%d)", - (int) estack_ax_t); - ret = -EINVAL; - goto end; - } - } - OP(FILTER_OP_UNARY_MINUS): - { - /* Dynamic typing. */ - switch (estack_ax_t) { - case REG_S64: /* Fall-through. */ - case REG_U64: - JUMP_TO(FILTER_OP_UNARY_MINUS_S64); - case REG_DOUBLE: - JUMP_TO(FILTER_OP_UNARY_MINUS_DOUBLE); - case REG_STRING: /* Fall-through */ - case REG_STAR_GLOB_STRING: - ret = -EINVAL; - goto end; - default: - ERR("Unknown filter register type (%d)", - (int) estack_ax_t); - ret = -EINVAL; - goto end; - } - } - OP(FILTER_OP_UNARY_NOT): - { - /* Dynamic typing. */ - switch (estack_ax_t) { - case REG_S64: /* Fall-through. */ - case REG_U64: - JUMP_TO(FILTER_OP_UNARY_NOT_S64); - case REG_DOUBLE: - JUMP_TO(FILTER_OP_UNARY_NOT_DOUBLE); - case REG_STRING: /* Fall-through */ - case REG_STAR_GLOB_STRING: - ret = -EINVAL; - goto end; - default: - ERR("Unknown filter register type (%d)", - (int) estack_ax_t); - ret = -EINVAL; - goto end; - } - next_pc += sizeof(struct unary_op); - PO; - } - - OP(FILTER_OP_UNARY_BIT_NOT): - { - /* Dynamic typing. */ - if (!IS_INTEGER_REGISTER(estack_ax_t)) { - ret = -EINVAL; - goto end; - } - - estack_ax_v = ~(uint64_t) estack_ax_v; - estack_ax_t = REG_U64; - next_pc += sizeof(struct unary_op); - PO; - } - - OP(FILTER_OP_UNARY_PLUS_S64): - OP(FILTER_OP_UNARY_PLUS_DOUBLE): - { - next_pc += sizeof(struct unary_op); - PO; - } - OP(FILTER_OP_UNARY_MINUS_S64): - { - estack_ax_v = -estack_ax_v; - next_pc += sizeof(struct unary_op); - PO; - } - OP(FILTER_OP_UNARY_MINUS_DOUBLE): - { - estack_ax(stack, top)->u.d = -estack_ax(stack, top)->u.d; - next_pc += sizeof(struct unary_op); - PO; - } - OP(FILTER_OP_UNARY_NOT_S64): - { - estack_ax_v = !estack_ax_v; - estack_ax_t = REG_S64; - next_pc += sizeof(struct unary_op); - PO; - } - OP(FILTER_OP_UNARY_NOT_DOUBLE): - { - estack_ax_v = !estack_ax(stack, top)->u.d; - estack_ax_t = REG_S64; - next_pc += sizeof(struct unary_op); - PO; - } - - /* logical */ - OP(FILTER_OP_AND): - { - struct logical_op *insn = (struct logical_op *) pc; - - if (estack_ax_t != REG_S64 && estack_ax_t != REG_U64) { - ret = -EINVAL; - goto end; - } - /* If AX is 0, skip and evaluate to 0 */ - if (unlikely(estack_ax_v == 0)) { - dbg_printf("Jumping to bytecode offset %u\n", - (unsigned int) insn->skip_offset); - next_pc = start_pc + insn->skip_offset; - } else { - /* Pop 1 when jump not taken */ - estack_pop(stack, top, ax, bx, ax_t, bx_t); - next_pc += sizeof(struct logical_op); - } - PO; - } - OP(FILTER_OP_OR): - { - struct logical_op *insn = (struct logical_op *) pc; - - if (estack_ax_t != REG_S64 && estack_ax_t != REG_U64) { - ret = -EINVAL; - goto end; - } - /* If AX is nonzero, skip and evaluate to 1 */ - if (unlikely(estack_ax_v != 0)) { - estack_ax_v = 1; - dbg_printf("Jumping to bytecode offset %u\n", - (unsigned int) insn->skip_offset); - next_pc = start_pc + insn->skip_offset; - } else { - /* Pop 1 when jump not taken */ - estack_pop(stack, top, ax, bx, ax_t, bx_t); - next_pc += sizeof(struct logical_op); - } - PO; - } - - - /* load field ref */ - OP(FILTER_OP_LOAD_FIELD_REF_STRING): - { - struct load_op *insn = (struct load_op *) pc; - struct field_ref *ref = (struct field_ref *) insn->data; - - dbg_printf("load field ref offset %u type string\n", - ref->offset); - estack_push(stack, top, ax, bx, ax_t, bx_t); - estack_ax(stack, top)->u.s.str = - *(const char * const *) &interpreter_stack_data[ref->offset]; - if (unlikely(!estack_ax(stack, top)->u.s.str)) { - dbg_printf("Filter warning: loading a NULL string.\n"); - ret = -EINVAL; - goto end; - } - estack_ax(stack, top)->u.s.seq_len = SIZE_MAX; - estack_ax(stack, top)->u.s.literal_type = - ESTACK_STRING_LITERAL_TYPE_NONE; - estack_ax_t = REG_STRING; - dbg_printf("ref load string %s\n", estack_ax(stack, top)->u.s.str); - next_pc += sizeof(struct load_op) + sizeof(struct field_ref); - PO; - } - - OP(FILTER_OP_LOAD_FIELD_REF_SEQUENCE): - { - struct load_op *insn = (struct load_op *) pc; - struct field_ref *ref = (struct field_ref *) insn->data; - - dbg_printf("load field ref offset %u type sequence\n", - ref->offset); - estack_push(stack, top, ax, bx, ax_t, bx_t); - estack_ax(stack, top)->u.s.seq_len = - *(unsigned long *) &interpreter_stack_data[ref->offset]; - estack_ax(stack, top)->u.s.str = - *(const char **) (&interpreter_stack_data[ref->offset - + sizeof(unsigned long)]); - estack_ax_t = REG_STRING; - if (unlikely(!estack_ax(stack, top)->u.s.str)) { - dbg_printf("Filter warning: loading a NULL sequence.\n"); - ret = -EINVAL; - goto end; - } - estack_ax(stack, top)->u.s.literal_type = - ESTACK_STRING_LITERAL_TYPE_NONE; - next_pc += sizeof(struct load_op) + sizeof(struct field_ref); - PO; - } - - OP(FILTER_OP_LOAD_FIELD_REF_S64): - { - struct load_op *insn = (struct load_op *) pc; - struct field_ref *ref = (struct field_ref *) insn->data; - - dbg_printf("load field ref offset %u type s64\n", - ref->offset); - estack_push(stack, top, ax, bx, ax_t, bx_t); - estack_ax_v = - ((struct literal_numeric *) &interpreter_stack_data[ref->offset])->v; - estack_ax_t = REG_S64; - dbg_printf("ref load s64 %" PRIi64 "\n", estack_ax_v); - next_pc += sizeof(struct load_op) + sizeof(struct field_ref); - PO; - } - - OP(FILTER_OP_LOAD_FIELD_REF_DOUBLE): - { - struct load_op *insn = (struct load_op *) pc; - struct field_ref *ref = (struct field_ref *) insn->data; - - dbg_printf("load field ref offset %u type double\n", - ref->offset); - estack_push(stack, top, ax, bx, ax_t, bx_t); - memcpy(&estack_ax(stack, top)->u.d, &interpreter_stack_data[ref->offset], - sizeof(struct literal_double)); - estack_ax_t = REG_DOUBLE; - dbg_printf("ref load double %g\n", estack_ax(stack, top)->u.d); - next_pc += sizeof(struct load_op) + sizeof(struct field_ref); - PO; - } - - /* load from immediate operand */ - OP(FILTER_OP_LOAD_STRING): - { - struct load_op *insn = (struct load_op *) pc; - - dbg_printf("load string %s\n", insn->data); - estack_push(stack, top, ax, bx, ax_t, bx_t); - estack_ax(stack, top)->u.s.str = insn->data; - estack_ax(stack, top)->u.s.seq_len = SIZE_MAX; - estack_ax(stack, top)->u.s.literal_type = - ESTACK_STRING_LITERAL_TYPE_PLAIN; - estack_ax_t = REG_STRING; - next_pc += sizeof(struct load_op) + strlen(insn->data) + 1; - PO; - } - - OP(FILTER_OP_LOAD_STAR_GLOB_STRING): - { - struct load_op *insn = (struct load_op *) pc; - - dbg_printf("load globbing pattern %s\n", insn->data); - estack_push(stack, top, ax, bx, ax_t, bx_t); - estack_ax(stack, top)->u.s.str = insn->data; - estack_ax(stack, top)->u.s.seq_len = SIZE_MAX; - estack_ax(stack, top)->u.s.literal_type = - ESTACK_STRING_LITERAL_TYPE_STAR_GLOB; - estack_ax_t = REG_STAR_GLOB_STRING; - next_pc += sizeof(struct load_op) + strlen(insn->data) + 1; - PO; - } - - OP(FILTER_OP_LOAD_S64): - { - struct load_op *insn = (struct load_op *) pc; - - estack_push(stack, top, ax, bx, ax_t, bx_t); - estack_ax_v = ((struct literal_numeric *) insn->data)->v; - estack_ax_t = REG_S64; - dbg_printf("load s64 %" PRIi64 "\n", estack_ax_v); - next_pc += sizeof(struct load_op) - + sizeof(struct literal_numeric); - PO; - } - - OP(FILTER_OP_LOAD_DOUBLE): - { - struct load_op *insn = (struct load_op *) pc; - - estack_push(stack, top, ax, bx, ax_t, bx_t); - memcpy(&estack_ax(stack, top)->u.d, insn->data, - sizeof(struct literal_double)); - estack_ax_t = REG_DOUBLE; - dbg_printf("load double %g\n", estack_ax(stack, top)->u.d); - next_pc += sizeof(struct load_op) - + sizeof(struct literal_double); - PO; - } - - /* cast */ - OP(FILTER_OP_CAST_TO_S64): - { - /* Dynamic typing. */ - switch (estack_ax_t) { - case REG_S64: - JUMP_TO(FILTER_OP_CAST_NOP); - case REG_DOUBLE: - JUMP_TO(FILTER_OP_CAST_DOUBLE_TO_S64); - case REG_U64: - estack_ax_t = REG_S64; - next_pc += sizeof(struct cast_op); - case REG_STRING: /* Fall-through */ - case REG_STAR_GLOB_STRING: - ret = -EINVAL; - goto end; - default: - ERR("Unknown filter register type (%d)", - (int) estack_ax_t); - ret = -EINVAL; - goto end; - } - } - - OP(FILTER_OP_CAST_DOUBLE_TO_S64): - { - estack_ax_v = (int64_t) estack_ax(stack, top)->u.d; - estack_ax_t = REG_S64; - next_pc += sizeof(struct cast_op); - PO; - } - - OP(FILTER_OP_CAST_NOP): - { - next_pc += sizeof(struct cast_op); - PO; - } - - /* get context ref */ - OP(FILTER_OP_GET_CONTEXT_REF): - { - struct load_op *insn = (struct load_op *) pc; - struct field_ref *ref = (struct field_ref *) insn->data; - struct lttng_ctx_field *ctx_field; - struct lttng_ctx_value v; - - dbg_printf("get context ref offset %u type dynamic\n", - ref->offset); - ctx_field = &ctx->fields[ref->offset]; - ctx_field->get_value(ctx_field, &v); - estack_push(stack, top, ax, bx, ax_t, bx_t); - switch (v.sel) { - case LTTNG_UST_DYNAMIC_TYPE_NONE: - ret = -EINVAL; - goto end; - case LTTNG_UST_DYNAMIC_TYPE_S64: - estack_ax_v = v.u.s64; - estack_ax_t = REG_S64; - dbg_printf("ref get context dynamic s64 %" PRIi64 "\n", estack_ax_v); - break; - case LTTNG_UST_DYNAMIC_TYPE_DOUBLE: - estack_ax(stack, top)->u.d = v.u.d; - estack_ax_t = REG_DOUBLE; - dbg_printf("ref get context dynamic double %g\n", estack_ax(stack, top)->u.d); - break; - case LTTNG_UST_DYNAMIC_TYPE_STRING: - estack_ax(stack, top)->u.s.str = v.u.str; - if (unlikely(!estack_ax(stack, top)->u.s.str)) { - dbg_printf("Filter warning: loading a NULL string.\n"); - ret = -EINVAL; - goto end; - } - estack_ax(stack, top)->u.s.seq_len = SIZE_MAX; - estack_ax(stack, top)->u.s.literal_type = - ESTACK_STRING_LITERAL_TYPE_NONE; - dbg_printf("ref get context dynamic string %s\n", estack_ax(stack, top)->u.s.str); - estack_ax_t = REG_STRING; - break; - default: - dbg_printf("Filter warning: unknown dynamic type (%d).\n", (int) v.sel); - ret = -EINVAL; - goto end; - } - next_pc += sizeof(struct load_op) + sizeof(struct field_ref); - PO; - } - - OP(FILTER_OP_GET_CONTEXT_REF_STRING): - { - struct load_op *insn = (struct load_op *) pc; - struct field_ref *ref = (struct field_ref *) insn->data; - struct lttng_ctx_field *ctx_field; - struct lttng_ctx_value v; - - dbg_printf("get context ref offset %u type string\n", - ref->offset); - ctx_field = &ctx->fields[ref->offset]; - ctx_field->get_value(ctx_field, &v); - estack_push(stack, top, ax, bx, ax_t, bx_t); - estack_ax(stack, top)->u.s.str = v.u.str; - if (unlikely(!estack_ax(stack, top)->u.s.str)) { - dbg_printf("Filter warning: loading a NULL string.\n"); - ret = -EINVAL; - goto end; - } - estack_ax(stack, top)->u.s.seq_len = SIZE_MAX; - estack_ax(stack, top)->u.s.literal_type = - ESTACK_STRING_LITERAL_TYPE_NONE; - estack_ax_t = REG_STRING; - dbg_printf("ref get context string %s\n", estack_ax(stack, top)->u.s.str); - next_pc += sizeof(struct load_op) + sizeof(struct field_ref); - PO; - } - - OP(FILTER_OP_GET_CONTEXT_REF_S64): - { - struct load_op *insn = (struct load_op *) pc; - struct field_ref *ref = (struct field_ref *) insn->data; - struct lttng_ctx_field *ctx_field; - struct lttng_ctx_value v; - - dbg_printf("get context ref offset %u type s64\n", - ref->offset); - ctx_field = &ctx->fields[ref->offset]; - ctx_field->get_value(ctx_field, &v); - estack_push(stack, top, ax, bx, ax_t, bx_t); - estack_ax_v = v.u.s64; - estack_ax_t = REG_S64; - dbg_printf("ref get context s64 %" PRIi64 "\n", estack_ax_v); - next_pc += sizeof(struct load_op) + sizeof(struct field_ref); - PO; - } - - OP(FILTER_OP_GET_CONTEXT_REF_DOUBLE): - { - struct load_op *insn = (struct load_op *) pc; - struct field_ref *ref = (struct field_ref *) insn->data; - struct lttng_ctx_field *ctx_field; - struct lttng_ctx_value v; - - dbg_printf("get context ref offset %u type double\n", - ref->offset); - ctx_field = &ctx->fields[ref->offset]; - ctx_field->get_value(ctx_field, &v); - estack_push(stack, top, ax, bx, ax_t, bx_t); - memcpy(&estack_ax(stack, top)->u.d, &v.u.d, sizeof(struct literal_double)); - estack_ax_t = REG_DOUBLE; - dbg_printf("ref get context double %g\n", estack_ax(stack, top)->u.d); - next_pc += sizeof(struct load_op) + sizeof(struct field_ref); - PO; - } - - OP(FILTER_OP_GET_CONTEXT_ROOT): - { - dbg_printf("op get context root\n"); - estack_push(stack, top, ax, bx, ax_t, bx_t); - estack_ax(stack, top)->u.ptr.type = LOAD_ROOT_CONTEXT; - /* "field" only needed for variants. */ - estack_ax(stack, top)->u.ptr.field = NULL; - estack_ax_t = REG_PTR; - next_pc += sizeof(struct load_op); - PO; - } - - OP(FILTER_OP_GET_APP_CONTEXT_ROOT): - { - dbg_printf("op get app context root\n"); - estack_push(stack, top, ax, bx, ax_t, bx_t); - estack_ax(stack, top)->u.ptr.type = LOAD_ROOT_APP_CONTEXT; - /* "field" only needed for variants. */ - estack_ax(stack, top)->u.ptr.field = NULL; - estack_ax_t = REG_PTR; - next_pc += sizeof(struct load_op); - PO; - } - - OP(FILTER_OP_GET_PAYLOAD_ROOT): - { - dbg_printf("op get app payload root\n"); - estack_push(stack, top, ax, bx, ax_t, bx_t); - estack_ax(stack, top)->u.ptr.type = LOAD_ROOT_PAYLOAD; - estack_ax(stack, top)->u.ptr.ptr = interpreter_stack_data; - /* "field" only needed for variants. */ - estack_ax(stack, top)->u.ptr.field = NULL; - estack_ax_t = REG_PTR; - next_pc += sizeof(struct load_op); - PO; - } - - OP(FILTER_OP_GET_SYMBOL): - { - dbg_printf("op get symbol\n"); - switch (estack_ax(stack, top)->u.ptr.type) { - case LOAD_OBJECT: - ERR("Nested fields not implemented yet."); - ret = -EINVAL; - goto end; - case LOAD_ROOT_CONTEXT: - case LOAD_ROOT_APP_CONTEXT: - case LOAD_ROOT_PAYLOAD: - /* - * symbol lookup is performed by - * specialization. - */ - ret = -EINVAL; - goto end; - } - next_pc += sizeof(struct load_op) + sizeof(struct get_symbol); - PO; - } - - OP(FILTER_OP_GET_SYMBOL_FIELD): - { - /* - * Used for first variant encountered in a - * traversal. Variants are not implemented yet. - */ - ret = -EINVAL; - goto end; - } - - OP(FILTER_OP_GET_INDEX_U16): - { - struct load_op *insn = (struct load_op *) pc; - struct get_index_u16 *index = (struct get_index_u16 *) insn->data; - - dbg_printf("op get index u16\n"); - ret = dynamic_get_index(ctx, bytecode, index->index, estack_ax(stack, top)); - if (ret) - goto end; - estack_ax_v = estack_ax(stack, top)->u.v; - estack_ax_t = estack_ax(stack, top)->type; - next_pc += sizeof(struct load_op) + sizeof(struct get_index_u16); - PO; - } - - OP(FILTER_OP_GET_INDEX_U64): - { - struct load_op *insn = (struct load_op *) pc; - struct get_index_u64 *index = (struct get_index_u64 *) insn->data; - - dbg_printf("op get index u64\n"); - ret = dynamic_get_index(ctx, bytecode, index->index, estack_ax(stack, top)); - if (ret) - goto end; - estack_ax_v = estack_ax(stack, top)->u.v; - estack_ax_t = estack_ax(stack, top)->type; - next_pc += sizeof(struct load_op) + sizeof(struct get_index_u64); - PO; - } - - OP(FILTER_OP_LOAD_FIELD): - { - dbg_printf("op load field\n"); - ret = dynamic_load_field(estack_ax(stack, top)); - if (ret) - goto end; - estack_ax_v = estack_ax(stack, top)->u.v; - estack_ax_t = estack_ax(stack, top)->type; - next_pc += sizeof(struct load_op); - PO; - } - - OP(FILTER_OP_LOAD_FIELD_S8): - { - dbg_printf("op load field s8\n"); - - estack_ax_v = *(int8_t *) estack_ax(stack, top)->u.ptr.ptr; - estack_ax_t = REG_S64; - next_pc += sizeof(struct load_op); - PO; - } - OP(FILTER_OP_LOAD_FIELD_S16): - { - dbg_printf("op load field s16\n"); - - estack_ax_v = *(int16_t *) estack_ax(stack, top)->u.ptr.ptr; - estack_ax_t = REG_S64; - next_pc += sizeof(struct load_op); - PO; - } - OP(FILTER_OP_LOAD_FIELD_S32): - { - dbg_printf("op load field s32\n"); - - estack_ax_v = *(int32_t *) estack_ax(stack, top)->u.ptr.ptr; - estack_ax_t = REG_S64; - next_pc += sizeof(struct load_op); - PO; - } - OP(FILTER_OP_LOAD_FIELD_S64): - { - dbg_printf("op load field s64\n"); - - estack_ax_v = *(int64_t *) estack_ax(stack, top)->u.ptr.ptr; - estack_ax_t = REG_S64; - next_pc += sizeof(struct load_op); - PO; - } - OP(FILTER_OP_LOAD_FIELD_U8): - { - dbg_printf("op load field u8\n"); - - estack_ax_v = *(uint8_t *) estack_ax(stack, top)->u.ptr.ptr; - estack_ax_t = REG_U64; - next_pc += sizeof(struct load_op); - PO; - } - OP(FILTER_OP_LOAD_FIELD_U16): - { - dbg_printf("op load field u16\n"); - - estack_ax_v = *(uint16_t *) estack_ax(stack, top)->u.ptr.ptr; - estack_ax_t = REG_U64; - next_pc += sizeof(struct load_op); - PO; - } - OP(FILTER_OP_LOAD_FIELD_U32): - { - dbg_printf("op load field u32\n"); - - estack_ax_v = *(uint32_t *) estack_ax(stack, top)->u.ptr.ptr; - estack_ax_t = REG_U64; - next_pc += sizeof(struct load_op); - PO; - } - OP(FILTER_OP_LOAD_FIELD_U64): - { - dbg_printf("op load field u64\n"); - - estack_ax_v = *(uint64_t *) estack_ax(stack, top)->u.ptr.ptr; - estack_ax_t = REG_U64; - next_pc += sizeof(struct load_op); - PO; - } - OP(FILTER_OP_LOAD_FIELD_DOUBLE): - { - dbg_printf("op load field double\n"); - - memcpy(&estack_ax(stack, top)->u.d, - estack_ax(stack, top)->u.ptr.ptr, - sizeof(struct literal_double)); - estack_ax(stack, top)->type = REG_DOUBLE; - next_pc += sizeof(struct load_op); - PO; - } - - OP(FILTER_OP_LOAD_FIELD_STRING): - { - const char *str; - - dbg_printf("op load field string\n"); - str = (const char *) estack_ax(stack, top)->u.ptr.ptr; - estack_ax(stack, top)->u.s.str = str; - if (unlikely(!estack_ax(stack, top)->u.s.str)) { - dbg_printf("Filter warning: loading a NULL string.\n"); - ret = -EINVAL; - goto end; - } - estack_ax(stack, top)->u.s.seq_len = SIZE_MAX; - estack_ax(stack, top)->u.s.literal_type = - ESTACK_STRING_LITERAL_TYPE_NONE; - estack_ax(stack, top)->type = REG_STRING; - next_pc += sizeof(struct load_op); - PO; - } - - OP(FILTER_OP_LOAD_FIELD_SEQUENCE): - { - const char *ptr; - - dbg_printf("op load field string sequence\n"); - ptr = estack_ax(stack, top)->u.ptr.ptr; - estack_ax(stack, top)->u.s.seq_len = *(unsigned long *) ptr; - estack_ax(stack, top)->u.s.str = *(const char **) (ptr + sizeof(unsigned long)); - estack_ax(stack, top)->type = REG_STRING; - if (unlikely(!estack_ax(stack, top)->u.s.str)) { - dbg_printf("Filter warning: loading a NULL sequence.\n"); - ret = -EINVAL; - goto end; - } - estack_ax(stack, top)->u.s.literal_type = - ESTACK_STRING_LITERAL_TYPE_NONE; - next_pc += sizeof(struct load_op); - PO; - } - - END_OP -end: - /* Return _DISCARD on error. */ - if (ret) - return LTTNG_FILTER_DISCARD; - - if (output) { - return lttng_bytecode_interpret_format_output(estack_ax(stack, top), - output); - } - - return retval; -} - -uint64_t lttng_filter_interpret_bytecode(void *filter_data, - const char *filter_stack_data) -{ - return bytecode_interpret(filter_data, filter_stack_data, NULL); -} - -#undef START_OP -#undef OP -#undef PO -#undef END_OP diff --git a/liblttng-ust/lttng-filter-specialize.c b/liblttng-ust/lttng-filter-specialize.c deleted file mode 100644 index 947fde29..00000000 --- a/liblttng-ust/lttng-filter-specialize.c +++ /dev/null @@ -1,1528 +0,0 @@ -/* - * lttng-filter-specialize.c - * - * LTTng UST filter code specializer. - * - * Copyright (C) 2010-2016 Mathieu Desnoyers - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - -#define _LGPL_SOURCE -#include -#include - -#include "lttng-filter.h" -#include -#include "ust-events-internal.h" - -static int lttng_fls(int val) -{ - int r = 32; - unsigned int x = (unsigned int) val; - - if (!x) - return 0; - if (!(x & 0xFFFF0000U)) { - x <<= 16; - r -= 16; - } - if (!(x & 0xFF000000U)) { - x <<= 8; - r -= 8; - } - if (!(x & 0xF0000000U)) { - x <<= 4; - r -= 4; - } - if (!(x & 0xC0000000U)) { - x <<= 2; - r -= 2; - } - if (!(x & 0x80000000U)) { - r -= 1; - } - return r; -} - -static int get_count_order(unsigned int count) -{ - int order; - - order = lttng_fls(count) - 1; - if (count & (count - 1)) - order++; - return order; -} - -static ssize_t bytecode_reserve_data(struct bytecode_runtime *runtime, - size_t align, size_t len) -{ - ssize_t ret; - size_t padding = lttng_ust_offset_align(runtime->data_len, align); - size_t new_len = runtime->data_len + padding + len; - size_t new_alloc_len = new_len; - size_t old_alloc_len = runtime->data_alloc_len; - - if (new_len > FILTER_MAX_DATA_LEN) - return -EINVAL; - - if (new_alloc_len > old_alloc_len) { - char *newptr; - - new_alloc_len = - max_t(size_t, 1U << get_count_order(new_alloc_len), old_alloc_len << 1); - newptr = realloc(runtime->data, new_alloc_len); - if (!newptr) - return -ENOMEM; - runtime->data = newptr; - /* We zero directly the memory from start of allocation. */ - memset(&runtime->data[old_alloc_len], 0, new_alloc_len - old_alloc_len); - runtime->data_alloc_len = new_alloc_len; - } - runtime->data_len += padding; - ret = runtime->data_len; - runtime->data_len += len; - return ret; -} - -static ssize_t bytecode_push_data(struct bytecode_runtime *runtime, - const void *p, size_t align, size_t len) -{ - ssize_t offset; - - offset = bytecode_reserve_data(runtime, align, len); - if (offset < 0) - return -ENOMEM; - memcpy(&runtime->data[offset], p, len); - return offset; -} - -static int specialize_load_field(struct vstack_entry *stack_top, - struct load_op *insn) -{ - int ret; - - switch (stack_top->load.type) { - case LOAD_OBJECT: - break; - case LOAD_ROOT_CONTEXT: - case LOAD_ROOT_APP_CONTEXT: - case LOAD_ROOT_PAYLOAD: - default: - dbg_printf("Filter warning: cannot load root, missing field name.\n"); - ret = -EINVAL; - goto end; - } - switch (stack_top->load.object_type) { - case OBJECT_TYPE_S8: - dbg_printf("op load field s8\n"); - stack_top->type = REG_S64; - if (!stack_top->load.rev_bo) - insn->op = FILTER_OP_LOAD_FIELD_S8; - break; - case OBJECT_TYPE_S16: - dbg_printf("op load field s16\n"); - stack_top->type = REG_S64; - if (!stack_top->load.rev_bo) - insn->op = FILTER_OP_LOAD_FIELD_S16; - break; - case OBJECT_TYPE_S32: - dbg_printf("op load field s32\n"); - stack_top->type = REG_S64; - if (!stack_top->load.rev_bo) - insn->op = FILTER_OP_LOAD_FIELD_S32; - break; - case OBJECT_TYPE_S64: - dbg_printf("op load field s64\n"); - stack_top->type = REG_S64; - if (!stack_top->load.rev_bo) - insn->op = FILTER_OP_LOAD_FIELD_S64; - break; - case OBJECT_TYPE_U8: - dbg_printf("op load field u8\n"); - stack_top->type = REG_U64; - insn->op = FILTER_OP_LOAD_FIELD_U8; - break; - case OBJECT_TYPE_U16: - dbg_printf("op load field u16\n"); - stack_top->type = REG_U64; - if (!stack_top->load.rev_bo) - insn->op = FILTER_OP_LOAD_FIELD_U16; - break; - case OBJECT_TYPE_U32: - dbg_printf("op load field u32\n"); - stack_top->type = REG_U64; - if (!stack_top->load.rev_bo) - insn->op = FILTER_OP_LOAD_FIELD_U32; - break; - case OBJECT_TYPE_U64: - dbg_printf("op load field u64\n"); - stack_top->type = REG_U64; - if (!stack_top->load.rev_bo) - insn->op = FILTER_OP_LOAD_FIELD_U64; - break; - case OBJECT_TYPE_DOUBLE: - stack_top->type = REG_DOUBLE; - insn->op = FILTER_OP_LOAD_FIELD_DOUBLE; - break; - case OBJECT_TYPE_STRING: - dbg_printf("op load field string\n"); - stack_top->type = REG_STRING; - insn->op = FILTER_OP_LOAD_FIELD_STRING; - break; - case OBJECT_TYPE_STRING_SEQUENCE: - dbg_printf("op load field string sequence\n"); - stack_top->type = REG_STRING; - insn->op = FILTER_OP_LOAD_FIELD_SEQUENCE; - break; - case OBJECT_TYPE_DYNAMIC: - dbg_printf("op load field dynamic\n"); - stack_top->type = REG_UNKNOWN; - /* Don't specialize load op. */ - break; - case OBJECT_TYPE_SEQUENCE: - case OBJECT_TYPE_ARRAY: - case OBJECT_TYPE_STRUCT: - case OBJECT_TYPE_VARIANT: - ERR("Sequences, arrays, struct and variant cannot be loaded (nested types)."); - ret = -EINVAL; - goto end; - } - return 0; - -end: - return ret; -} - -static int specialize_get_index_object_type(enum object_type *otype, - int signedness, uint32_t elem_len) -{ - switch (elem_len) { - case 8: - if (signedness) - *otype = OBJECT_TYPE_S8; - else - *otype = OBJECT_TYPE_U8; - break; - case 16: - if (signedness) - *otype = OBJECT_TYPE_S16; - else - *otype = OBJECT_TYPE_U16; - break; - case 32: - if (signedness) - *otype = OBJECT_TYPE_S32; - else - *otype = OBJECT_TYPE_U32; - break; - case 64: - if (signedness) - *otype = OBJECT_TYPE_S64; - else - *otype = OBJECT_TYPE_U64; - break; - default: - return -EINVAL; - } - return 0; -} - -static int specialize_get_index(struct bytecode_runtime *runtime, - struct load_op *insn, uint64_t index, - struct vstack_entry *stack_top, - int idx_len) -{ - int ret; - struct filter_get_index_data gid; - ssize_t data_offset; - - memset(&gid, 0, sizeof(gid)); - switch (stack_top->load.type) { - case LOAD_OBJECT: - switch (stack_top->load.object_type) { - case OBJECT_TYPE_ARRAY: - { - const struct lttng_integer_type *integer_type; - const struct lttng_event_field *field; - uint32_t elem_len, num_elems; - int signedness; - - field = stack_top->load.field; - switch (field->type.atype) { - case atype_array: - integer_type = &field->type.u.legacy.array.elem_type.u.basic.integer; - num_elems = field->type.u.legacy.array.length; - break; - case atype_array_nestable: - if (field->type.u.array_nestable.elem_type->atype != atype_integer) { - ret = -EINVAL; - goto end; - } - integer_type = &field->type.u.array_nestable.elem_type->u.integer; - num_elems = field->type.u.array_nestable.length; - break; - default: - ret = -EINVAL; - goto end; - } - elem_len = integer_type->size; - signedness = integer_type->signedness; - if (index >= num_elems) { - ret = -EINVAL; - goto end; - } - ret = specialize_get_index_object_type(&stack_top->load.object_type, - signedness, elem_len); - if (ret) - goto end; - gid.offset = index * (elem_len / CHAR_BIT); - gid.array_len = num_elems * (elem_len / CHAR_BIT); - gid.elem.type = stack_top->load.object_type; - gid.elem.len = elem_len; - if (integer_type->reverse_byte_order) - gid.elem.rev_bo = true; - stack_top->load.rev_bo = gid.elem.rev_bo; - break; - } - case OBJECT_TYPE_SEQUENCE: - { - const struct lttng_integer_type *integer_type; - const struct lttng_event_field *field; - uint32_t elem_len; - int signedness; - - field = stack_top->load.field; - switch (field->type.atype) { - case atype_sequence: - integer_type = &field->type.u.legacy.sequence.elem_type.u.basic.integer; - break; - case atype_sequence_nestable: - if (field->type.u.sequence_nestable.elem_type->atype != atype_integer) { - ret = -EINVAL; - goto end; - } - integer_type = &field->type.u.sequence_nestable.elem_type->u.integer; - break; - default: - ret = -EINVAL; - goto end; - } - elem_len = integer_type->size; - signedness = integer_type->signedness; - ret = specialize_get_index_object_type(&stack_top->load.object_type, - signedness, elem_len); - if (ret) - goto end; - gid.offset = index * (elem_len / CHAR_BIT); - gid.elem.type = stack_top->load.object_type; - gid.elem.len = elem_len; - if (integer_type->reverse_byte_order) - gid.elem.rev_bo = true; - stack_top->load.rev_bo = gid.elem.rev_bo; - break; - } - case OBJECT_TYPE_STRUCT: - /* Only generated by the specialize phase. */ - case OBJECT_TYPE_VARIANT: /* Fall-through */ - default: - ERR("Unexpected get index type %d", - (int) stack_top->load.object_type); - ret = -EINVAL; - goto end; - } - break; - case LOAD_ROOT_CONTEXT: - case LOAD_ROOT_APP_CONTEXT: - case LOAD_ROOT_PAYLOAD: - ERR("Index lookup for root field not implemented yet."); - ret = -EINVAL; - goto end; - } - data_offset = bytecode_push_data(runtime, &gid, - __alignof__(gid), sizeof(gid)); - if (data_offset < 0) { - ret = -EINVAL; - goto end; - } - switch (idx_len) { - case 2: - ((struct get_index_u16 *) insn->data)->index = data_offset; - break; - case 8: - ((struct get_index_u64 *) insn->data)->index = data_offset; - break; - default: - ret = -EINVAL; - goto end; - } - - return 0; - -end: - return ret; -} - -static int specialize_context_lookup_name(struct lttng_ctx *ctx, - struct bytecode_runtime *bytecode, - struct load_op *insn) -{ - uint16_t offset; - const char *name; - - offset = ((struct get_symbol *) insn->data)->offset; - name = bytecode->p.bc->bc.data + bytecode->p.bc->bc.reloc_offset + offset; - return lttng_get_context_index(ctx, name); -} - -static int specialize_load_object(const struct lttng_event_field *field, - struct vstack_load *load, bool is_context) -{ - load->type = LOAD_OBJECT; - - switch (field->type.atype) { - case atype_integer: - if (field->type.u.integer.signedness) - load->object_type = OBJECT_TYPE_S64; - else - load->object_type = OBJECT_TYPE_U64; - load->rev_bo = false; - break; - case atype_enum: - case atype_enum_nestable: - { - const struct lttng_integer_type *itype; - - if (field->type.atype == atype_enum) { - itype = &field->type.u.legacy.basic.enumeration.container_type; - } else { - itype = &field->type.u.enum_nestable.container_type->u.integer; - } - if (itype->signedness) - load->object_type = OBJECT_TYPE_S64; - else - load->object_type = OBJECT_TYPE_U64; - load->rev_bo = false; - break; - } - case atype_array: - if (field->type.u.legacy.array.elem_type.atype != atype_integer) { - ERR("Array nesting only supports integer types."); - return -EINVAL; - } - if (is_context) { - load->object_type = OBJECT_TYPE_STRING; - } else { - if (field->type.u.legacy.array.elem_type.u.basic.integer.encoding == lttng_encode_none) { - load->object_type = OBJECT_TYPE_ARRAY; - load->field = field; - } else { - load->object_type = OBJECT_TYPE_STRING_SEQUENCE; - } - } - break; - case atype_array_nestable: - if (field->type.u.array_nestable.elem_type->atype != atype_integer) { - ERR("Array nesting only supports integer types."); - return -EINVAL; - } - if (is_context) { - load->object_type = OBJECT_TYPE_STRING; - } else { - if (field->type.u.array_nestable.elem_type->u.integer.encoding == lttng_encode_none) { - load->object_type = OBJECT_TYPE_ARRAY; - load->field = field; - } else { - load->object_type = OBJECT_TYPE_STRING_SEQUENCE; - } - } - break; - case atype_sequence: - if (field->type.u.legacy.sequence.elem_type.atype != atype_integer) { - ERR("Sequence nesting only supports integer types."); - return -EINVAL; - } - if (is_context) { - load->object_type = OBJECT_TYPE_STRING; - } else { - if (field->type.u.legacy.sequence.elem_type.u.basic.integer.encoding == lttng_encode_none) { - load->object_type = OBJECT_TYPE_SEQUENCE; - load->field = field; - } else { - load->object_type = OBJECT_TYPE_STRING_SEQUENCE; - } - } - break; - case atype_sequence_nestable: - if (field->type.u.sequence_nestable.elem_type->atype != atype_integer) { - ERR("Sequence nesting only supports integer types."); - return -EINVAL; - } - if (is_context) { - load->object_type = OBJECT_TYPE_STRING; - } else { - if (field->type.u.sequence_nestable.elem_type->u.integer.encoding == lttng_encode_none) { - load->object_type = OBJECT_TYPE_SEQUENCE; - load->field = field; - } else { - load->object_type = OBJECT_TYPE_STRING_SEQUENCE; - } - } - break; - - case atype_string: - load->object_type = OBJECT_TYPE_STRING; - break; - case atype_float: - load->object_type = OBJECT_TYPE_DOUBLE; - break; - case atype_dynamic: - load->object_type = OBJECT_TYPE_DYNAMIC; - break; - case atype_struct: - ERR("Structure type cannot be loaded."); - return -EINVAL; - default: - ERR("Unknown type: %d", (int) field->type.atype); - return -EINVAL; - } - return 0; -} - -static int specialize_context_lookup(struct lttng_ctx *ctx, - struct bytecode_runtime *runtime, - struct load_op *insn, - struct vstack_load *load) -{ - int idx, ret; - struct lttng_ctx_field *ctx_field; - struct lttng_event_field *field; - struct filter_get_index_data gid; - ssize_t data_offset; - - idx = specialize_context_lookup_name(ctx, runtime, insn); - if (idx < 0) { - return -ENOENT; - } - ctx_field = &ctx->fields[idx]; - field = &ctx_field->event_field; - ret = specialize_load_object(field, load, true); - if (ret) - return ret; - /* Specialize each get_symbol into a get_index. */ - insn->op = FILTER_OP_GET_INDEX_U16; - memset(&gid, 0, sizeof(gid)); - gid.ctx_index = idx; - gid.elem.type = load->object_type; - gid.elem.rev_bo = load->rev_bo; - gid.field = field; - data_offset = bytecode_push_data(runtime, &gid, - __alignof__(gid), sizeof(gid)); - if (data_offset < 0) { - return -EINVAL; - } - ((struct get_index_u16 *) insn->data)->index = data_offset; - return 0; -} - -static int specialize_app_context_lookup(struct lttng_ctx **pctx, - struct bytecode_runtime *runtime, - struct load_op *insn, - struct vstack_load *load) -{ - uint16_t offset; - const char *orig_name; - char *name = NULL; - int idx, ret; - struct lttng_ctx_field *ctx_field; - struct lttng_event_field *field; - struct filter_get_index_data gid; - ssize_t data_offset; - - offset = ((struct get_symbol *) insn->data)->offset; - orig_name = runtime->p.bc->bc.data + runtime->p.bc->bc.reloc_offset + offset; - name = zmalloc(strlen(orig_name) + strlen("$app.") + 1); - if (!name) { - ret = -ENOMEM; - goto end; - } - strcpy(name, "$app."); - strcat(name, orig_name); - idx = lttng_get_context_index(*pctx, name); - if (idx < 0) { - assert(lttng_context_is_app(name)); - ret = lttng_ust_add_app_context_to_ctx_rcu(name, - pctx); - if (ret) - return ret; - idx = lttng_get_context_index(*pctx, name); - if (idx < 0) - return -ENOENT; - } - ctx_field = &(*pctx)->fields[idx]; - field = &ctx_field->event_field; - ret = specialize_load_object(field, load, true); - if (ret) - goto end; - /* Specialize each get_symbol into a get_index. */ - insn->op = FILTER_OP_GET_INDEX_U16; - memset(&gid, 0, sizeof(gid)); - gid.ctx_index = idx; - gid.elem.type = load->object_type; - gid.elem.rev_bo = load->rev_bo; - gid.field = field; - data_offset = bytecode_push_data(runtime, &gid, - __alignof__(gid), sizeof(gid)); - if (data_offset < 0) { - ret = -EINVAL; - goto end; - } - ((struct get_index_u16 *) insn->data)->index = data_offset; - ret = 0; -end: - free(name); - return ret; -} - -static int specialize_payload_lookup(const struct lttng_event_desc *event_desc, - struct bytecode_runtime *runtime, - struct load_op *insn, - struct vstack_load *load) -{ - const char *name; - uint16_t offset; - unsigned int i, nr_fields; - bool found = false; - uint32_t field_offset = 0; - const struct lttng_event_field *field; - int ret; - struct filter_get_index_data gid; - ssize_t data_offset; - - nr_fields = event_desc->nr_fields; - offset = ((struct get_symbol *) insn->data)->offset; - name = runtime->p.bc->bc.data + runtime->p.bc->bc.reloc_offset + offset; - for (i = 0; i < nr_fields; i++) { - field = &event_desc->fields[i]; - if (field->u.ext.nofilter) { - continue; - } - if (!strcmp(field->name, name)) { - found = true; - break; - } - /* compute field offset on stack */ - switch (field->type.atype) { - case atype_integer: - case atype_enum: - case atype_enum_nestable: - field_offset += sizeof(int64_t); - break; - case atype_array: - case atype_array_nestable: - case atype_sequence: - case atype_sequence_nestable: - field_offset += sizeof(unsigned long); - field_offset += sizeof(void *); - break; - case atype_string: - field_offset += sizeof(void *); - break; - case atype_float: - field_offset += sizeof(double); - break; - default: - ret = -EINVAL; - goto end; - } - } - if (!found) { - ret = -EINVAL; - goto end; - } - - ret = specialize_load_object(field, load, false); - if (ret) - goto end; - - /* Specialize each get_symbol into a get_index. */ - insn->op = FILTER_OP_GET_INDEX_U16; - memset(&gid, 0, sizeof(gid)); - gid.offset = field_offset; - gid.elem.type = load->object_type; - gid.elem.rev_bo = load->rev_bo; - gid.field = field; - data_offset = bytecode_push_data(runtime, &gid, - __alignof__(gid), sizeof(gid)); - if (data_offset < 0) { - ret = -EINVAL; - goto end; - } - ((struct get_index_u16 *) insn->data)->index = data_offset; - ret = 0; -end: - return ret; -} - -int lttng_filter_specialize_bytecode(const struct lttng_event_desc *event_desc, - struct bytecode_runtime *bytecode) -{ - void *pc, *next_pc, *start_pc; - int ret = -EINVAL; - struct vstack _stack; - struct vstack *stack = &_stack; - struct lttng_ctx **pctx = bytecode->p.pctx; - - vstack_init(stack); - - start_pc = &bytecode->code[0]; - for (pc = next_pc = start_pc; pc - start_pc < bytecode->len; - pc = next_pc) { - switch (*(filter_opcode_t *) pc) { - case FILTER_OP_UNKNOWN: - default: - ERR("unknown bytecode op %u\n", - (unsigned int) *(filter_opcode_t *) pc); - ret = -EINVAL; - goto end; - - case FILTER_OP_RETURN: - if (vstack_ax(stack)->type == REG_S64 || - vstack_ax(stack)->type == REG_U64) - *(filter_opcode_t *) pc = FILTER_OP_RETURN_S64; - ret = 0; - goto end; - - case FILTER_OP_RETURN_S64: - if (vstack_ax(stack)->type != REG_S64 && - vstack_ax(stack)->type != REG_U64) { - ERR("Unexpected register type\n"); - ret = -EINVAL; - goto end; - } - ret = 0; - goto end; - - /* binary */ - case FILTER_OP_MUL: - case FILTER_OP_DIV: - case FILTER_OP_MOD: - case FILTER_OP_PLUS: - case FILTER_OP_MINUS: - ERR("unsupported bytecode op %u\n", - (unsigned int) *(filter_opcode_t *) pc); - ret = -EINVAL; - goto end; - - case FILTER_OP_EQ: - { - struct binary_op *insn = (struct binary_op *) pc; - - switch(vstack_ax(stack)->type) { - default: - ERR("unknown register type\n"); - ret = -EINVAL; - goto end; - - case REG_STRING: - if (vstack_bx(stack)->type == REG_UNKNOWN) - break; - if (vstack_bx(stack)->type == REG_STAR_GLOB_STRING) - insn->op = FILTER_OP_EQ_STAR_GLOB_STRING; - else - insn->op = FILTER_OP_EQ_STRING; - break; - case REG_STAR_GLOB_STRING: - if (vstack_bx(stack)->type == REG_UNKNOWN) - break; - insn->op = FILTER_OP_EQ_STAR_GLOB_STRING; - break; - case REG_S64: - case REG_U64: - if (vstack_bx(stack)->type == REG_UNKNOWN) - break; - if (vstack_bx(stack)->type == REG_S64 || - vstack_bx(stack)->type == REG_U64) - insn->op = FILTER_OP_EQ_S64; - else - insn->op = FILTER_OP_EQ_DOUBLE_S64; - break; - case REG_DOUBLE: - if (vstack_bx(stack)->type == REG_UNKNOWN) - break; - if (vstack_bx(stack)->type == REG_S64 || - vstack_bx(stack)->type == REG_U64) - insn->op = FILTER_OP_EQ_S64_DOUBLE; - else - insn->op = FILTER_OP_EQ_DOUBLE; - break; - case REG_UNKNOWN: - break; /* Dynamic typing. */ - } - /* Pop 2, push 1 */ - if (vstack_pop(stack)) { - ret = -EINVAL; - goto end; - } - vstack_ax(stack)->type = REG_S64; - next_pc += sizeof(struct binary_op); - break; - } - - case FILTER_OP_NE: - { - struct binary_op *insn = (struct binary_op *) pc; - - switch(vstack_ax(stack)->type) { - default: - ERR("unknown register type\n"); - ret = -EINVAL; - goto end; - - case REG_STRING: - if (vstack_bx(stack)->type == REG_UNKNOWN) - break; - if (vstack_bx(stack)->type == REG_STAR_GLOB_STRING) - insn->op = FILTER_OP_NE_STAR_GLOB_STRING; - else - insn->op = FILTER_OP_NE_STRING; - break; - case REG_STAR_GLOB_STRING: - if (vstack_bx(stack)->type == REG_UNKNOWN) - break; - insn->op = FILTER_OP_NE_STAR_GLOB_STRING; - break; - case REG_S64: - case REG_U64: - if (vstack_bx(stack)->type == REG_UNKNOWN) - break; - if (vstack_bx(stack)->type == REG_S64 || - vstack_bx(stack)->type == REG_U64) - insn->op = FILTER_OP_NE_S64; - else - insn->op = FILTER_OP_NE_DOUBLE_S64; - break; - case REG_DOUBLE: - if (vstack_bx(stack)->type == REG_UNKNOWN) - break; - if (vstack_bx(stack)->type == REG_S64 || - vstack_bx(stack)->type == REG_U64) - insn->op = FILTER_OP_NE_S64_DOUBLE; - else - insn->op = FILTER_OP_NE_DOUBLE; - break; - case REG_UNKNOWN: - break; /* Dynamic typing. */ - } - /* Pop 2, push 1 */ - if (vstack_pop(stack)) { - ret = -EINVAL; - goto end; - } - vstack_ax(stack)->type = REG_S64; - next_pc += sizeof(struct binary_op); - break; - } - - case FILTER_OP_GT: - { - struct binary_op *insn = (struct binary_op *) pc; - - switch(vstack_ax(stack)->type) { - default: - ERR("unknown register type\n"); - ret = -EINVAL; - goto end; - - case REG_STAR_GLOB_STRING: - ERR("invalid register type for > binary operator\n"); - ret = -EINVAL; - goto end; - case REG_STRING: - if (vstack_bx(stack)->type == REG_UNKNOWN) - break; - insn->op = FILTER_OP_GT_STRING; - break; - case REG_S64: - case REG_U64: - if (vstack_bx(stack)->type == REG_UNKNOWN) - break; - if (vstack_bx(stack)->type == REG_S64 || - vstack_bx(stack)->type == REG_U64) - insn->op = FILTER_OP_GT_S64; - else - insn->op = FILTER_OP_GT_DOUBLE_S64; - break; - case REG_DOUBLE: - if (vstack_bx(stack)->type == REG_UNKNOWN) - break; - if (vstack_bx(stack)->type == REG_S64 || - vstack_bx(stack)->type == REG_U64) - insn->op = FILTER_OP_GT_S64_DOUBLE; - else - insn->op = FILTER_OP_GT_DOUBLE; - break; - case REG_UNKNOWN: - break; /* Dynamic typing. */ - } - /* Pop 2, push 1 */ - if (vstack_pop(stack)) { - ret = -EINVAL; - goto end; - } - vstack_ax(stack)->type = REG_S64; - next_pc += sizeof(struct binary_op); - break; - } - - case FILTER_OP_LT: - { - struct binary_op *insn = (struct binary_op *) pc; - - switch(vstack_ax(stack)->type) { - default: - ERR("unknown register type\n"); - ret = -EINVAL; - goto end; - - case REG_STAR_GLOB_STRING: - ERR("invalid register type for < binary operator\n"); - ret = -EINVAL; - goto end; - case REG_STRING: - if (vstack_bx(stack)->type == REG_UNKNOWN) - break; - insn->op = FILTER_OP_LT_STRING; - break; - case REG_S64: - case REG_U64: - if (vstack_bx(stack)->type == REG_UNKNOWN) - break; - if (vstack_bx(stack)->type == REG_S64 || - vstack_bx(stack)->type == REG_U64) - insn->op = FILTER_OP_LT_S64; - else - insn->op = FILTER_OP_LT_DOUBLE_S64; - break; - case REG_DOUBLE: - if (vstack_bx(stack)->type == REG_UNKNOWN) - break; - if (vstack_bx(stack)->type == REG_S64 || - vstack_bx(stack)->type == REG_U64) - insn->op = FILTER_OP_LT_S64_DOUBLE; - else - insn->op = FILTER_OP_LT_DOUBLE; - break; - case REG_UNKNOWN: - break; /* Dynamic typing. */ - } - /* Pop 2, push 1 */ - if (vstack_pop(stack)) { - ret = -EINVAL; - goto end; - } - vstack_ax(stack)->type = REG_S64; - next_pc += sizeof(struct binary_op); - break; - } - - case FILTER_OP_GE: - { - struct binary_op *insn = (struct binary_op *) pc; - - switch(vstack_ax(stack)->type) { - default: - ERR("unknown register type\n"); - ret = -EINVAL; - goto end; - - case REG_STAR_GLOB_STRING: - ERR("invalid register type for >= binary operator\n"); - ret = -EINVAL; - goto end; - case REG_STRING: - if (vstack_bx(stack)->type == REG_UNKNOWN) - break; - insn->op = FILTER_OP_GE_STRING; - break; - case REG_S64: - case REG_U64: - if (vstack_bx(stack)->type == REG_UNKNOWN) - break; - if (vstack_bx(stack)->type == REG_S64 || - vstack_bx(stack)->type == REG_U64) - insn->op = FILTER_OP_GE_S64; - else - insn->op = FILTER_OP_GE_DOUBLE_S64; - break; - case REG_DOUBLE: - if (vstack_bx(stack)->type == REG_UNKNOWN) - break; - if (vstack_bx(stack)->type == REG_S64 || - vstack_bx(stack)->type == REG_U64) - insn->op = FILTER_OP_GE_S64_DOUBLE; - else - insn->op = FILTER_OP_GE_DOUBLE; - break; - case REG_UNKNOWN: - break; /* Dynamic typing. */ - } - /* Pop 2, push 1 */ - if (vstack_pop(stack)) { - ret = -EINVAL; - goto end; - } - vstack_ax(stack)->type = REG_U64; - next_pc += sizeof(struct binary_op); - break; - } - case FILTER_OP_LE: - { - struct binary_op *insn = (struct binary_op *) pc; - - switch(vstack_ax(stack)->type) { - default: - ERR("unknown register type\n"); - ret = -EINVAL; - goto end; - - case REG_STAR_GLOB_STRING: - ERR("invalid register type for <= binary operator\n"); - ret = -EINVAL; - goto end; - case REG_STRING: - if (vstack_bx(stack)->type == REG_UNKNOWN) - break; - insn->op = FILTER_OP_LE_STRING; - break; - case REG_S64: - case REG_U64: - if (vstack_bx(stack)->type == REG_UNKNOWN) - break; - if (vstack_bx(stack)->type == REG_S64 || - vstack_bx(stack)->type == REG_U64) - insn->op = FILTER_OP_LE_S64; - else - insn->op = FILTER_OP_LE_DOUBLE_S64; - break; - case REG_DOUBLE: - if (vstack_bx(stack)->type == REG_UNKNOWN) - break; - if (vstack_bx(stack)->type == REG_S64 || - vstack_bx(stack)->type == REG_U64) - insn->op = FILTER_OP_LE_S64_DOUBLE; - else - insn->op = FILTER_OP_LE_DOUBLE; - break; - case REG_UNKNOWN: - break; /* Dynamic typing. */ - } - vstack_ax(stack)->type = REG_S64; - next_pc += sizeof(struct binary_op); - break; - } - - case FILTER_OP_EQ_STRING: - case FILTER_OP_NE_STRING: - case FILTER_OP_GT_STRING: - case FILTER_OP_LT_STRING: - case FILTER_OP_GE_STRING: - case FILTER_OP_LE_STRING: - case FILTER_OP_EQ_STAR_GLOB_STRING: - case FILTER_OP_NE_STAR_GLOB_STRING: - case FILTER_OP_EQ_S64: - case FILTER_OP_NE_S64: - case FILTER_OP_GT_S64: - case FILTER_OP_LT_S64: - case FILTER_OP_GE_S64: - case FILTER_OP_LE_S64: - case FILTER_OP_EQ_DOUBLE: - case FILTER_OP_NE_DOUBLE: - case FILTER_OP_GT_DOUBLE: - case FILTER_OP_LT_DOUBLE: - case FILTER_OP_GE_DOUBLE: - case FILTER_OP_LE_DOUBLE: - case FILTER_OP_EQ_DOUBLE_S64: - case FILTER_OP_NE_DOUBLE_S64: - case FILTER_OP_GT_DOUBLE_S64: - case FILTER_OP_LT_DOUBLE_S64: - case FILTER_OP_GE_DOUBLE_S64: - case FILTER_OP_LE_DOUBLE_S64: - case FILTER_OP_EQ_S64_DOUBLE: - case FILTER_OP_NE_S64_DOUBLE: - case FILTER_OP_GT_S64_DOUBLE: - case FILTER_OP_LT_S64_DOUBLE: - case FILTER_OP_GE_S64_DOUBLE: - case FILTER_OP_LE_S64_DOUBLE: - { - /* Pop 2, push 1 */ - if (vstack_pop(stack)) { - ret = -EINVAL; - goto end; - } - vstack_ax(stack)->type = REG_S64; - next_pc += sizeof(struct binary_op); - break; - } - - case FILTER_OP_BIT_RSHIFT: - case FILTER_OP_BIT_LSHIFT: - case FILTER_OP_BIT_AND: - case FILTER_OP_BIT_OR: - case FILTER_OP_BIT_XOR: - { - /* Pop 2, push 1 */ - if (vstack_pop(stack)) { - ret = -EINVAL; - goto end; - } - vstack_ax(stack)->type = REG_S64; - next_pc += sizeof(struct binary_op); - break; - } - - /* unary */ - case FILTER_OP_UNARY_PLUS: - { - struct unary_op *insn = (struct unary_op *) pc; - - switch(vstack_ax(stack)->type) { - default: - ERR("unknown register type\n"); - ret = -EINVAL; - goto end; - - case REG_S64: - case REG_U64: - insn->op = FILTER_OP_UNARY_PLUS_S64; - break; - case REG_DOUBLE: - insn->op = FILTER_OP_UNARY_PLUS_DOUBLE; - break; - case REG_UNKNOWN: /* Dynamic typing. */ - break; - } - /* Pop 1, push 1 */ - next_pc += sizeof(struct unary_op); - break; - } - - case FILTER_OP_UNARY_MINUS: - { - struct unary_op *insn = (struct unary_op *) pc; - - switch(vstack_ax(stack)->type) { - default: - ERR("unknown register type\n"); - ret = -EINVAL; - goto end; - - case REG_S64: - case REG_U64: - insn->op = FILTER_OP_UNARY_MINUS_S64; - break; - case REG_DOUBLE: - insn->op = FILTER_OP_UNARY_MINUS_DOUBLE; - break; - case REG_UNKNOWN: /* Dynamic typing. */ - break; - } - /* Pop 1, push 1 */ - next_pc += sizeof(struct unary_op); - break; - } - - case FILTER_OP_UNARY_NOT: - { - struct unary_op *insn = (struct unary_op *) pc; - - switch(vstack_ax(stack)->type) { - default: - ERR("unknown register type\n"); - ret = -EINVAL; - goto end; - - case REG_S64: - case REG_U64: - insn->op = FILTER_OP_UNARY_NOT_S64; - break; - case REG_DOUBLE: - insn->op = FILTER_OP_UNARY_NOT_DOUBLE; - break; - case REG_UNKNOWN: /* Dynamic typing. */ - break; - } - /* Pop 1, push 1 */ - next_pc += sizeof(struct unary_op); - break; - } - - case FILTER_OP_UNARY_BIT_NOT: - { - /* Pop 1, push 1 */ - next_pc += sizeof(struct unary_op); - break; - } - - case FILTER_OP_UNARY_PLUS_S64: - case FILTER_OP_UNARY_MINUS_S64: - case FILTER_OP_UNARY_NOT_S64: - case FILTER_OP_UNARY_PLUS_DOUBLE: - case FILTER_OP_UNARY_MINUS_DOUBLE: - case FILTER_OP_UNARY_NOT_DOUBLE: - { - /* Pop 1, push 1 */ - next_pc += sizeof(struct unary_op); - break; - } - - /* logical */ - case FILTER_OP_AND: - case FILTER_OP_OR: - { - /* Continue to next instruction */ - /* Pop 1 when jump not taken */ - if (vstack_pop(stack)) { - ret = -EINVAL; - goto end; - } - next_pc += sizeof(struct logical_op); - break; - } - - /* load field ref */ - case FILTER_OP_LOAD_FIELD_REF: - { - ERR("Unknown field ref type\n"); - ret = -EINVAL; - goto end; - } - /* get context ref */ - case FILTER_OP_GET_CONTEXT_REF: - { - if (vstack_push(stack)) { - ret = -EINVAL; - goto end; - } - vstack_ax(stack)->type = REG_UNKNOWN; - next_pc += sizeof(struct load_op) + sizeof(struct field_ref); - break; - } - case FILTER_OP_LOAD_FIELD_REF_STRING: - case FILTER_OP_LOAD_FIELD_REF_SEQUENCE: - case FILTER_OP_GET_CONTEXT_REF_STRING: - { - if (vstack_push(stack)) { - ret = -EINVAL; - goto end; - } - vstack_ax(stack)->type = REG_STRING; - next_pc += sizeof(struct load_op) + sizeof(struct field_ref); - break; - } - case FILTER_OP_LOAD_FIELD_REF_S64: - case FILTER_OP_GET_CONTEXT_REF_S64: - { - if (vstack_push(stack)) { - ret = -EINVAL; - goto end; - } - vstack_ax(stack)->type = REG_S64; - next_pc += sizeof(struct load_op) + sizeof(struct field_ref); - break; - } - case FILTER_OP_LOAD_FIELD_REF_DOUBLE: - case FILTER_OP_GET_CONTEXT_REF_DOUBLE: - { - if (vstack_push(stack)) { - ret = -EINVAL; - goto end; - } - vstack_ax(stack)->type = REG_DOUBLE; - next_pc += sizeof(struct load_op) + sizeof(struct field_ref); - break; - } - - /* load from immediate operand */ - case FILTER_OP_LOAD_STRING: - { - struct load_op *insn = (struct load_op *) pc; - - if (vstack_push(stack)) { - ret = -EINVAL; - goto end; - } - vstack_ax(stack)->type = REG_STRING; - next_pc += sizeof(struct load_op) + strlen(insn->data) + 1; - break; - } - - case FILTER_OP_LOAD_STAR_GLOB_STRING: - { - struct load_op *insn = (struct load_op *) pc; - - if (vstack_push(stack)) { - ret = -EINVAL; - goto end; - } - vstack_ax(stack)->type = REG_STAR_GLOB_STRING; - next_pc += sizeof(struct load_op) + strlen(insn->data) + 1; - break; - } - - case FILTER_OP_LOAD_S64: - { - if (vstack_push(stack)) { - ret = -EINVAL; - goto end; - } - vstack_ax(stack)->type = REG_S64; - next_pc += sizeof(struct load_op) - + sizeof(struct literal_numeric); - break; - } - - case FILTER_OP_LOAD_DOUBLE: - { - if (vstack_push(stack)) { - ret = -EINVAL; - goto end; - } - vstack_ax(stack)->type = REG_DOUBLE; - next_pc += sizeof(struct load_op) - + sizeof(struct literal_double); - break; - } - - /* cast */ - case FILTER_OP_CAST_TO_S64: - { - struct cast_op *insn = (struct cast_op *) pc; - - switch (vstack_ax(stack)->type) { - default: - ERR("unknown register type\n"); - ret = -EINVAL; - goto end; - - case REG_STRING: - case REG_STAR_GLOB_STRING: - ERR("Cast op can only be applied to numeric or floating point registers\n"); - ret = -EINVAL; - goto end; - case REG_S64: - insn->op = FILTER_OP_CAST_NOP; - break; - case REG_DOUBLE: - insn->op = FILTER_OP_CAST_DOUBLE_TO_S64; - break; - case REG_UNKNOWN: - case REG_U64: - break; - } - /* Pop 1, push 1 */ - vstack_ax(stack)->type = REG_S64; - next_pc += sizeof(struct cast_op); - break; - } - case FILTER_OP_CAST_DOUBLE_TO_S64: - { - /* Pop 1, push 1 */ - vstack_ax(stack)->type = REG_S64; - next_pc += sizeof(struct cast_op); - break; - } - case FILTER_OP_CAST_NOP: - { - next_pc += sizeof(struct cast_op); - break; - } - - /* - * Instructions for recursive traversal through composed types. - */ - case FILTER_OP_GET_CONTEXT_ROOT: - { - if (vstack_push(stack)) { - ret = -EINVAL; - goto end; - } - vstack_ax(stack)->type = REG_PTR; - vstack_ax(stack)->load.type = LOAD_ROOT_CONTEXT; - next_pc += sizeof(struct load_op); - break; - } - case FILTER_OP_GET_APP_CONTEXT_ROOT: - { - if (vstack_push(stack)) { - ret = -EINVAL; - goto end; - } - vstack_ax(stack)->type = REG_PTR; - vstack_ax(stack)->load.type = LOAD_ROOT_APP_CONTEXT; - next_pc += sizeof(struct load_op); - break; - } - case FILTER_OP_GET_PAYLOAD_ROOT: - { - if (vstack_push(stack)) { - ret = -EINVAL; - goto end; - } - vstack_ax(stack)->type = REG_PTR; - vstack_ax(stack)->load.type = LOAD_ROOT_PAYLOAD; - next_pc += sizeof(struct load_op); - break; - } - - case FILTER_OP_LOAD_FIELD: - { - struct load_op *insn = (struct load_op *) pc; - - assert(vstack_ax(stack)->type == REG_PTR); - /* Pop 1, push 1 */ - ret = specialize_load_field(vstack_ax(stack), insn); - if (ret) - goto end; - - next_pc += sizeof(struct load_op); - break; - } - - case FILTER_OP_LOAD_FIELD_S8: - case FILTER_OP_LOAD_FIELD_S16: - case FILTER_OP_LOAD_FIELD_S32: - case FILTER_OP_LOAD_FIELD_S64: - { - /* Pop 1, push 1 */ - vstack_ax(stack)->type = REG_S64; - next_pc += sizeof(struct load_op); - break; - } - - case FILTER_OP_LOAD_FIELD_U8: - case FILTER_OP_LOAD_FIELD_U16: - case FILTER_OP_LOAD_FIELD_U32: - case FILTER_OP_LOAD_FIELD_U64: - { - /* Pop 1, push 1 */ - vstack_ax(stack)->type = REG_U64; - next_pc += sizeof(struct load_op); - break; - } - - case FILTER_OP_LOAD_FIELD_STRING: - case FILTER_OP_LOAD_FIELD_SEQUENCE: - { - /* Pop 1, push 1 */ - vstack_ax(stack)->type = REG_STRING; - next_pc += sizeof(struct load_op); - break; - } - - case FILTER_OP_LOAD_FIELD_DOUBLE: - { - /* Pop 1, push 1 */ - vstack_ax(stack)->type = REG_DOUBLE; - next_pc += sizeof(struct load_op); - break; - } - - case FILTER_OP_GET_SYMBOL: - { - struct load_op *insn = (struct load_op *) pc; - - dbg_printf("op get symbol\n"); - switch (vstack_ax(stack)->load.type) { - case LOAD_OBJECT: - ERR("Nested fields not implemented yet."); - ret = -EINVAL; - goto end; - case LOAD_ROOT_CONTEXT: - /* Lookup context field. */ - ret = specialize_context_lookup(*pctx, - bytecode, insn, - &vstack_ax(stack)->load); - if (ret) - goto end; - break; - case LOAD_ROOT_APP_CONTEXT: - /* Lookup app context field. */ - ret = specialize_app_context_lookup(pctx, - bytecode, insn, - &vstack_ax(stack)->load); - if (ret) - goto end; - break; - case LOAD_ROOT_PAYLOAD: - /* Lookup event payload field. */ - ret = specialize_payload_lookup(event_desc, - bytecode, insn, - &vstack_ax(stack)->load); - if (ret) - goto end; - break; - } - next_pc += sizeof(struct load_op) + sizeof(struct get_symbol); - break; - } - - case FILTER_OP_GET_SYMBOL_FIELD: - { - /* Always generated by specialize phase. */ - ret = -EINVAL; - goto end; - } - - case FILTER_OP_GET_INDEX_U16: - { - struct load_op *insn = (struct load_op *) pc; - struct get_index_u16 *index = (struct get_index_u16 *) insn->data; - - dbg_printf("op get index u16\n"); - /* Pop 1, push 1 */ - ret = specialize_get_index(bytecode, insn, index->index, - vstack_ax(stack), sizeof(*index)); - if (ret) - goto end; - next_pc += sizeof(struct load_op) + sizeof(struct get_index_u16); - break; - } - - case FILTER_OP_GET_INDEX_U64: - { - struct load_op *insn = (struct load_op *) pc; - struct get_index_u64 *index = (struct get_index_u64 *) insn->data; - - dbg_printf("op get index u64\n"); - /* Pop 1, push 1 */ - ret = specialize_get_index(bytecode, insn, index->index, - vstack_ax(stack), sizeof(*index)); - if (ret) - goto end; - next_pc += sizeof(struct load_op) + sizeof(struct get_index_u64); - break; - } - - } - } -end: - return ret; -} diff --git a/liblttng-ust/lttng-filter-validator.c b/liblttng-ust/lttng-filter-validator.c deleted file mode 100644 index e6982369..00000000 --- a/liblttng-ust/lttng-filter-validator.c +++ /dev/null @@ -1,2024 +0,0 @@ -/* - * lttng-filter-validator.c - * - * LTTng UST filter bytecode validator. - * - * Copyright (C) 2010-2016 Mathieu Desnoyers - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - -#define _LGPL_SOURCE -#include -#include -#include - -#include -#include - -#include "lttng-filter.h" -#include "lttng-hash-helper.h" -#include "string-utils.h" -#include "ust-events-internal.h" - -/* - * Number of merge points for hash table size. Hash table initialized to - * that size, and we do not resize, because we do not want to trigger - * RCU worker thread execution: fall-back on linear traversal if number - * of merge points exceeds this value. - */ -#define DEFAULT_NR_MERGE_POINTS 128 -#define MIN_NR_BUCKETS 128 -#define MAX_NR_BUCKETS 128 - -/* merge point table node */ -struct lfht_mp_node { - struct cds_lfht_node node; - - /* Context at merge point */ - struct vstack stack; - unsigned long target_pc; -}; - -static unsigned long lttng_hash_seed; -static unsigned int lttng_hash_seed_ready; - -static -int lttng_hash_match(struct cds_lfht_node *node, const void *key) -{ - struct lfht_mp_node *mp_node = - caa_container_of(node, struct lfht_mp_node, node); - unsigned long key_pc = (unsigned long) key; - - if (mp_node->target_pc == key_pc) - return 1; - else - return 0; -} - -static -int merge_points_compare(const struct vstack *stacka, - const struct vstack *stackb) -{ - int i, len; - - if (stacka->top != stackb->top) - return 1; - len = stacka->top + 1; - assert(len >= 0); - for (i = 0; i < len; i++) { - if (stacka->e[i].type != REG_UNKNOWN - && stackb->e[i].type != REG_UNKNOWN - && stacka->e[i].type != stackb->e[i].type) - return 1; - } - return 0; -} - -static -int merge_point_add_check(struct cds_lfht *ht, unsigned long target_pc, - const struct vstack *stack) -{ - struct lfht_mp_node *node; - unsigned long hash = lttng_hash_mix((const char *) target_pc, - sizeof(target_pc), - lttng_hash_seed); - struct cds_lfht_node *ret; - - dbg_printf("Filter: adding merge point at offset %lu, hash %lu\n", - target_pc, hash); - node = zmalloc(sizeof(struct lfht_mp_node)); - if (!node) - return -ENOMEM; - node->target_pc = target_pc; - memcpy(&node->stack, stack, sizeof(node->stack)); - ret = cds_lfht_add_unique(ht, hash, lttng_hash_match, - (const char *) target_pc, &node->node); - if (ret != &node->node) { - struct lfht_mp_node *ret_mp = - caa_container_of(ret, struct lfht_mp_node, node); - - /* Key already present */ - dbg_printf("Filter: compare merge points for offset %lu, hash %lu\n", - target_pc, hash); - free(node); - if (merge_points_compare(stack, &ret_mp->stack)) { - ERR("Merge points differ for offset %lu\n", - target_pc); - return -EINVAL; - } - } - return 0; -} - -/* - * Binary comparators use top of stack and top of stack -1. - * Return 0 if typing is known to match, 1 if typing is dynamic - * (unknown), negative error value on error. - */ -static -int bin_op_compare_check(struct vstack *stack, filter_opcode_t opcode, - const char *str) -{ - if (unlikely(!vstack_ax(stack) || !vstack_bx(stack))) - goto error_empty; - - switch (vstack_ax(stack)->type) { - default: - goto error_type; - - case REG_UNKNOWN: - goto unknown; - case REG_STRING: - switch (vstack_bx(stack)->type) { - default: - goto error_type; - - case REG_UNKNOWN: - goto unknown; - case REG_STRING: - break; - case REG_STAR_GLOB_STRING: - if (opcode != FILTER_OP_EQ && opcode != FILTER_OP_NE) { - goto error_mismatch; - } - break; - case REG_S64: - case REG_U64: - case REG_DOUBLE: - goto error_mismatch; - } - break; - case REG_STAR_GLOB_STRING: - switch (vstack_bx(stack)->type) { - default: - goto error_type; - - case REG_UNKNOWN: - goto unknown; - case REG_STRING: - if (opcode != FILTER_OP_EQ && opcode != FILTER_OP_NE) { - goto error_mismatch; - } - break; - case REG_STAR_GLOB_STRING: - case REG_S64: - case REG_U64: - case REG_DOUBLE: - goto error_mismatch; - } - break; - case REG_S64: - case REG_U64: - case REG_DOUBLE: - switch (vstack_bx(stack)->type) { - default: - goto error_type; - - case REG_UNKNOWN: - goto unknown; - case REG_STRING: - case REG_STAR_GLOB_STRING: - goto error_mismatch; - case REG_S64: - case REG_U64: - case REG_DOUBLE: - break; - } - break; - } - return 0; - -unknown: - return 1; - -error_mismatch: - ERR("type mismatch for '%s' binary operator\n", str); - return -EINVAL; - -error_empty: - ERR("empty stack for '%s' binary operator\n", str); - return -EINVAL; - -error_type: - ERR("unknown type for '%s' binary operator\n", str); - return -EINVAL; -} - -/* - * Binary bitwise operators use top of stack and top of stack -1. - * Return 0 if typing is known to match, 1 if typing is dynamic - * (unknown), negative error value on error. - */ -static -int bin_op_bitwise_check(struct vstack *stack, filter_opcode_t opcode, - const char *str) -{ - if (unlikely(!vstack_ax(stack) || !vstack_bx(stack))) - goto error_empty; - - switch (vstack_ax(stack)->type) { - default: - goto error_type; - - case REG_UNKNOWN: - goto unknown; - case REG_S64: - case REG_U64: - switch (vstack_bx(stack)->type) { - default: - goto error_type; - - case REG_UNKNOWN: - goto unknown; - case REG_S64: - case REG_U64: - break; - } - break; - } - return 0; - -unknown: - return 1; - -error_empty: - ERR("empty stack for '%s' binary operator\n", str); - return -EINVAL; - -error_type: - ERR("unknown type for '%s' binary operator\n", str); - return -EINVAL; -} - -static -int validate_get_symbol(struct bytecode_runtime *bytecode, - const struct get_symbol *sym) -{ - const char *str, *str_limit; - size_t len_limit; - - if (sym->offset >= bytecode->p.bc->bc.len - bytecode->p.bc->bc.reloc_offset) - return -EINVAL; - - str = bytecode->p.bc->bc.data + bytecode->p.bc->bc.reloc_offset + sym->offset; - str_limit = bytecode->p.bc->bc.data + bytecode->p.bc->bc.len; - len_limit = str_limit - str; - if (strnlen(str, len_limit) == len_limit) - return -EINVAL; - return 0; -} - -/* - * Validate bytecode range overflow within the validation pass. - * Called for each instruction encountered. - */ -static -int bytecode_validate_overflow(struct bytecode_runtime *bytecode, - char *start_pc, char *pc) -{ - int ret = 0; - - switch (*(filter_opcode_t *) pc) { - case FILTER_OP_UNKNOWN: - default: - { - ERR("unknown bytecode op %u\n", - (unsigned int) *(filter_opcode_t *) pc); - ret = -EINVAL; - break; - } - - case FILTER_OP_RETURN: - case FILTER_OP_RETURN_S64: - { - if (unlikely(pc + sizeof(struct return_op) - > start_pc + bytecode->len)) { - ret = -ERANGE; - } - break; - } - - /* binary */ - case FILTER_OP_MUL: - case FILTER_OP_DIV: - case FILTER_OP_MOD: - case FILTER_OP_PLUS: - case FILTER_OP_MINUS: - { - ERR("unsupported bytecode op %u\n", - (unsigned int) *(filter_opcode_t *) pc); - ret = -EINVAL; - break; - } - - case FILTER_OP_EQ: - case FILTER_OP_NE: - case FILTER_OP_GT: - case FILTER_OP_LT: - case FILTER_OP_GE: - case FILTER_OP_LE: - case FILTER_OP_EQ_STRING: - case FILTER_OP_NE_STRING: - case FILTER_OP_GT_STRING: - case FILTER_OP_LT_STRING: - case FILTER_OP_GE_STRING: - case FILTER_OP_LE_STRING: - case FILTER_OP_EQ_STAR_GLOB_STRING: - case FILTER_OP_NE_STAR_GLOB_STRING: - case FILTER_OP_EQ_S64: - case FILTER_OP_NE_S64: - case FILTER_OP_GT_S64: - case FILTER_OP_LT_S64: - case FILTER_OP_GE_S64: - case FILTER_OP_LE_S64: - case FILTER_OP_EQ_DOUBLE: - case FILTER_OP_NE_DOUBLE: - case FILTER_OP_GT_DOUBLE: - case FILTER_OP_LT_DOUBLE: - case FILTER_OP_GE_DOUBLE: - case FILTER_OP_LE_DOUBLE: - case FILTER_OP_EQ_DOUBLE_S64: - case FILTER_OP_NE_DOUBLE_S64: - case FILTER_OP_GT_DOUBLE_S64: - case FILTER_OP_LT_DOUBLE_S64: - case FILTER_OP_GE_DOUBLE_S64: - case FILTER_OP_LE_DOUBLE_S64: - case FILTER_OP_EQ_S64_DOUBLE: - case FILTER_OP_NE_S64_DOUBLE: - case FILTER_OP_GT_S64_DOUBLE: - case FILTER_OP_LT_S64_DOUBLE: - case FILTER_OP_GE_S64_DOUBLE: - case FILTER_OP_LE_S64_DOUBLE: - case FILTER_OP_BIT_RSHIFT: - case FILTER_OP_BIT_LSHIFT: - case FILTER_OP_BIT_AND: - case FILTER_OP_BIT_OR: - case FILTER_OP_BIT_XOR: - { - if (unlikely(pc + sizeof(struct binary_op) - > start_pc + bytecode->len)) { - ret = -ERANGE; - } - break; - } - - /* unary */ - case FILTER_OP_UNARY_PLUS: - case FILTER_OP_UNARY_MINUS: - case FILTER_OP_UNARY_NOT: - case FILTER_OP_UNARY_PLUS_S64: - case FILTER_OP_UNARY_MINUS_S64: - case FILTER_OP_UNARY_NOT_S64: - case FILTER_OP_UNARY_PLUS_DOUBLE: - case FILTER_OP_UNARY_MINUS_DOUBLE: - case FILTER_OP_UNARY_NOT_DOUBLE: - case FILTER_OP_UNARY_BIT_NOT: - { - if (unlikely(pc + sizeof(struct unary_op) - > start_pc + bytecode->len)) { - ret = -ERANGE; - } - break; - } - - /* logical */ - case FILTER_OP_AND: - case FILTER_OP_OR: - { - if (unlikely(pc + sizeof(struct logical_op) - > start_pc + bytecode->len)) { - ret = -ERANGE; - } - break; - } - - /* load field ref */ - case FILTER_OP_LOAD_FIELD_REF: - { - ERR("Unknown field ref type\n"); - ret = -EINVAL; - break; - } - - /* get context ref */ - case FILTER_OP_GET_CONTEXT_REF: - case FILTER_OP_LOAD_FIELD_REF_STRING: - case FILTER_OP_LOAD_FIELD_REF_SEQUENCE: - case FILTER_OP_LOAD_FIELD_REF_S64: - case FILTER_OP_LOAD_FIELD_REF_DOUBLE: - case FILTER_OP_GET_CONTEXT_REF_STRING: - case FILTER_OP_GET_CONTEXT_REF_S64: - case FILTER_OP_GET_CONTEXT_REF_DOUBLE: - { - if (unlikely(pc + sizeof(struct load_op) + sizeof(struct field_ref) - > start_pc + bytecode->len)) { - ret = -ERANGE; - } - break; - } - - /* load from immediate operand */ - case FILTER_OP_LOAD_STRING: - case FILTER_OP_LOAD_STAR_GLOB_STRING: - { - struct load_op *insn = (struct load_op *) pc; - uint32_t str_len, maxlen; - - if (unlikely(pc + sizeof(struct load_op) - > start_pc + bytecode->len)) { - ret = -ERANGE; - break; - } - - maxlen = start_pc + bytecode->len - pc - sizeof(struct load_op); - str_len = strnlen(insn->data, maxlen); - if (unlikely(str_len >= maxlen)) { - /* Final '\0' not found within range */ - ret = -ERANGE; - } - break; - } - - case FILTER_OP_LOAD_S64: - { - if (unlikely(pc + sizeof(struct load_op) + sizeof(struct literal_numeric) - > start_pc + bytecode->len)) { - ret = -ERANGE; - } - break; - } - - case FILTER_OP_LOAD_DOUBLE: - { - if (unlikely(pc + sizeof(struct load_op) + sizeof(struct literal_double) - > start_pc + bytecode->len)) { - ret = -ERANGE; - } - break; - } - - case FILTER_OP_CAST_TO_S64: - case FILTER_OP_CAST_DOUBLE_TO_S64: - case FILTER_OP_CAST_NOP: - { - if (unlikely(pc + sizeof(struct cast_op) - > start_pc + bytecode->len)) { - ret = -ERANGE; - } - break; - } - - /* - * Instructions for recursive traversal through composed types. - */ - case FILTER_OP_GET_CONTEXT_ROOT: - case FILTER_OP_GET_APP_CONTEXT_ROOT: - case FILTER_OP_GET_PAYLOAD_ROOT: - case FILTER_OP_LOAD_FIELD: - case FILTER_OP_LOAD_FIELD_S8: - case FILTER_OP_LOAD_FIELD_S16: - case FILTER_OP_LOAD_FIELD_S32: - case FILTER_OP_LOAD_FIELD_S64: - case FILTER_OP_LOAD_FIELD_U8: - case FILTER_OP_LOAD_FIELD_U16: - case FILTER_OP_LOAD_FIELD_U32: - case FILTER_OP_LOAD_FIELD_U64: - case FILTER_OP_LOAD_FIELD_STRING: - case FILTER_OP_LOAD_FIELD_SEQUENCE: - case FILTER_OP_LOAD_FIELD_DOUBLE: - if (unlikely(pc + sizeof(struct load_op) - > start_pc + bytecode->len)) { - ret = -ERANGE; - } - break; - - case FILTER_OP_GET_SYMBOL: - { - struct load_op *insn = (struct load_op *) pc; - struct get_symbol *sym = (struct get_symbol *) insn->data; - - if (unlikely(pc + sizeof(struct load_op) + sizeof(struct get_symbol) - > start_pc + bytecode->len)) { - ret = -ERANGE; - break; - } - ret = validate_get_symbol(bytecode, sym); - break; - } - - case FILTER_OP_GET_SYMBOL_FIELD: - ERR("Unexpected get symbol field"); - ret = -EINVAL; - break; - - case FILTER_OP_GET_INDEX_U16: - if (unlikely(pc + sizeof(struct load_op) + sizeof(struct get_index_u16) - > start_pc + bytecode->len)) { - ret = -ERANGE; - } - break; - - case FILTER_OP_GET_INDEX_U64: - if (unlikely(pc + sizeof(struct load_op) + sizeof(struct get_index_u64) - > start_pc + bytecode->len)) { - ret = -ERANGE; - } - break; - } - - return ret; -} - -static -unsigned long delete_all_nodes(struct cds_lfht *ht) -{ - struct cds_lfht_iter iter; - struct lfht_mp_node *node; - unsigned long nr_nodes = 0; - - cds_lfht_for_each_entry(ht, &iter, node, node) { - int ret; - - ret = cds_lfht_del(ht, cds_lfht_iter_get_node(&iter)); - assert(!ret); - /* note: this hash table is never used concurrently */ - free(node); - nr_nodes++; - } - return nr_nodes; -} - -/* - * Return value: - * >=0: success - * <0: error - */ -static -int validate_instruction_context(struct bytecode_runtime *bytecode, - struct vstack *stack, - char *start_pc, - char *pc) -{ - int ret = 0; - const filter_opcode_t opcode = *(filter_opcode_t *) pc; - - switch (opcode) { - case FILTER_OP_UNKNOWN: - default: - { - ERR("unknown bytecode op %u\n", - (unsigned int) *(filter_opcode_t *) pc); - ret = -EINVAL; - goto end; - } - - case FILTER_OP_RETURN: - case FILTER_OP_RETURN_S64: - { - goto end; - } - - /* binary */ - case FILTER_OP_MUL: - case FILTER_OP_DIV: - case FILTER_OP_MOD: - case FILTER_OP_PLUS: - case FILTER_OP_MINUS: - { - ERR("unsupported bytecode op %u\n", - (unsigned int) opcode); - ret = -EINVAL; - goto end; - } - - case FILTER_OP_EQ: - { - ret = bin_op_compare_check(stack, opcode, "=="); - if (ret < 0) - goto end; - break; - } - case FILTER_OP_NE: - { - ret = bin_op_compare_check(stack, opcode, "!="); - if (ret < 0) - goto end; - break; - } - case FILTER_OP_GT: - { - ret = bin_op_compare_check(stack, opcode, ">"); - if (ret < 0) - goto end; - break; - } - case FILTER_OP_LT: - { - ret = bin_op_compare_check(stack, opcode, "<"); - if (ret < 0) - goto end; - break; - } - case FILTER_OP_GE: - { - ret = bin_op_compare_check(stack, opcode, ">="); - if (ret < 0) - goto end; - break; - } - case FILTER_OP_LE: - { - ret = bin_op_compare_check(stack, opcode, "<="); - if (ret < 0) - goto end; - break; - } - - case FILTER_OP_EQ_STRING: - case FILTER_OP_NE_STRING: - case FILTER_OP_GT_STRING: - case FILTER_OP_LT_STRING: - case FILTER_OP_GE_STRING: - case FILTER_OP_LE_STRING: - { - if (!vstack_ax(stack) || !vstack_bx(stack)) { - ERR("Empty stack\n"); - ret = -EINVAL; - goto end; - } - if (vstack_ax(stack)->type != REG_STRING - || vstack_bx(stack)->type != REG_STRING) { - ERR("Unexpected register type for string comparator\n"); - ret = -EINVAL; - goto end; - } - break; - } - - case FILTER_OP_EQ_STAR_GLOB_STRING: - case FILTER_OP_NE_STAR_GLOB_STRING: - { - if (!vstack_ax(stack) || !vstack_bx(stack)) { - ERR("Empty stack\n"); - ret = -EINVAL; - goto end; - } - if (vstack_ax(stack)->type != REG_STAR_GLOB_STRING - && vstack_bx(stack)->type != REG_STAR_GLOB_STRING) { - ERR("Unexpected register type for globbing pattern comparator\n"); - ret = -EINVAL; - goto end; - } - break; - } - - case FILTER_OP_EQ_S64: - case FILTER_OP_NE_S64: - case FILTER_OP_GT_S64: - case FILTER_OP_LT_S64: - case FILTER_OP_GE_S64: - case FILTER_OP_LE_S64: - { - if (!vstack_ax(stack) || !vstack_bx(stack)) { - ERR("Empty stack\n"); - ret = -EINVAL; - goto end; - } - switch (vstack_ax(stack)->type) { - case REG_S64: - case REG_U64: - break; - default: - ERR("Unexpected register type for s64 comparator\n"); - ret = -EINVAL; - goto end; - } - switch (vstack_bx(stack)->type) { - case REG_S64: - case REG_U64: - break; - default: - ERR("Unexpected register type for s64 comparator\n"); - ret = -EINVAL; - goto end; - } - break; - } - - case FILTER_OP_EQ_DOUBLE: - case FILTER_OP_NE_DOUBLE: - case FILTER_OP_GT_DOUBLE: - case FILTER_OP_LT_DOUBLE: - case FILTER_OP_GE_DOUBLE: - case FILTER_OP_LE_DOUBLE: - { - if (!vstack_ax(stack) || !vstack_bx(stack)) { - ERR("Empty stack\n"); - ret = -EINVAL; - goto end; - } - if (vstack_ax(stack)->type != REG_DOUBLE && vstack_bx(stack)->type != REG_DOUBLE) { - ERR("Double operator should have two double registers\n"); - ret = -EINVAL; - goto end; - } - break; - } - - case FILTER_OP_EQ_DOUBLE_S64: - case FILTER_OP_NE_DOUBLE_S64: - case FILTER_OP_GT_DOUBLE_S64: - case FILTER_OP_LT_DOUBLE_S64: - case FILTER_OP_GE_DOUBLE_S64: - case FILTER_OP_LE_DOUBLE_S64: - { - if (!vstack_ax(stack) || !vstack_bx(stack)) { - ERR("Empty stack\n"); - ret = -EINVAL; - goto end; - } - switch (vstack_ax(stack)->type) { - case REG_S64: - case REG_U64: - break; - default: - ERR("Double-S64 operator has unexpected register types\n"); - ret = -EINVAL; - goto end; - } - switch (vstack_bx(stack)->type) { - case REG_DOUBLE: - break; - default: - ERR("Double-S64 operator has unexpected register types\n"); - ret = -EINVAL; - goto end; - } - break; - } - - case FILTER_OP_EQ_S64_DOUBLE: - case FILTER_OP_NE_S64_DOUBLE: - case FILTER_OP_GT_S64_DOUBLE: - case FILTER_OP_LT_S64_DOUBLE: - case FILTER_OP_GE_S64_DOUBLE: - case FILTER_OP_LE_S64_DOUBLE: - { - if (!vstack_ax(stack) || !vstack_bx(stack)) { - ERR("Empty stack\n"); - ret = -EINVAL; - goto end; - } - switch (vstack_ax(stack)->type) { - case REG_DOUBLE: - break; - default: - ERR("S64-Double operator has unexpected register types\n"); - ret = -EINVAL; - goto end; - } - switch (vstack_bx(stack)->type) { - case REG_S64: - case REG_U64: - break; - default: - ERR("S64-Double operator has unexpected register types\n"); - ret = -EINVAL; - goto end; - } - break; - } - - case FILTER_OP_BIT_RSHIFT: - ret = bin_op_bitwise_check(stack, opcode, ">>"); - if (ret < 0) - goto end; - break; - case FILTER_OP_BIT_LSHIFT: - ret = bin_op_bitwise_check(stack, opcode, "<<"); - if (ret < 0) - goto end; - break; - case FILTER_OP_BIT_AND: - ret = bin_op_bitwise_check(stack, opcode, "&"); - if (ret < 0) - goto end; - break; - case FILTER_OP_BIT_OR: - ret = bin_op_bitwise_check(stack, opcode, "|"); - if (ret < 0) - goto end; - break; - case FILTER_OP_BIT_XOR: - ret = bin_op_bitwise_check(stack, opcode, "^"); - if (ret < 0) - goto end; - break; - - /* unary */ - case FILTER_OP_UNARY_PLUS: - case FILTER_OP_UNARY_MINUS: - case FILTER_OP_UNARY_NOT: - { - if (!vstack_ax(stack)) { - ERR("Empty stack\n"); - ret = -EINVAL; - goto end; - } - switch (vstack_ax(stack)->type) { - default: - ERR("unknown register type\n"); - ret = -EINVAL; - goto end; - - case REG_STRING: - case REG_STAR_GLOB_STRING: - ERR("Unary op can only be applied to numeric or floating point registers\n"); - ret = -EINVAL; - goto end; - case REG_S64: - break; - case REG_U64: - break; - case REG_DOUBLE: - break; - case REG_UNKNOWN: - break; - } - break; - } - case FILTER_OP_UNARY_BIT_NOT: - { - if (!vstack_ax(stack)) { - ERR("Empty stack\n"); - ret = -EINVAL; - goto end; - } - switch (vstack_ax(stack)->type) { - default: - ERR("unknown register type\n"); - ret = -EINVAL; - goto end; - - case REG_STRING: - case REG_STAR_GLOB_STRING: - case REG_DOUBLE: - ERR("Unary bitwise op can only be applied to numeric registers\n"); - ret = -EINVAL; - goto end; - case REG_S64: - break; - case REG_U64: - break; - case REG_UNKNOWN: - break; - } - break; - } - - case FILTER_OP_UNARY_PLUS_S64: - case FILTER_OP_UNARY_MINUS_S64: - case FILTER_OP_UNARY_NOT_S64: - { - if (!vstack_ax(stack)) { - ERR("Empty stack\n"); - ret = -EINVAL; - goto end; - } - if (vstack_ax(stack)->type != REG_S64 && - vstack_ax(stack)->type != REG_U64) { - ERR("Invalid register type\n"); - ret = -EINVAL; - goto end; - } - break; - } - - case FILTER_OP_UNARY_PLUS_DOUBLE: - case FILTER_OP_UNARY_MINUS_DOUBLE: - case FILTER_OP_UNARY_NOT_DOUBLE: - { - if (!vstack_ax(stack)) { - ERR("Empty stack\n"); - ret = -EINVAL; - goto end; - } - if (vstack_ax(stack)->type != REG_DOUBLE) { - ERR("Invalid register type\n"); - ret = -EINVAL; - goto end; - } - break; - } - - /* logical */ - case FILTER_OP_AND: - case FILTER_OP_OR: - { - struct logical_op *insn = (struct logical_op *) pc; - - if (!vstack_ax(stack)) { - ERR("Empty stack\n"); - ret = -EINVAL; - goto end; - } - if (vstack_ax(stack)->type != REG_S64 - && vstack_ax(stack)->type != REG_U64 - && vstack_ax(stack)->type != REG_UNKNOWN) { - ERR("Logical comparator expects S64, U64 or dynamic register\n"); - ret = -EINVAL; - goto end; - } - - dbg_printf("Validate jumping to bytecode offset %u\n", - (unsigned int) insn->skip_offset); - if (unlikely(start_pc + insn->skip_offset <= pc)) { - ERR("Loops are not allowed in bytecode\n"); - ret = -EINVAL; - goto end; - } - break; - } - - /* load field ref */ - case FILTER_OP_LOAD_FIELD_REF: - { - ERR("Unknown field ref type\n"); - ret = -EINVAL; - goto end; - } - case FILTER_OP_LOAD_FIELD_REF_STRING: - case FILTER_OP_LOAD_FIELD_REF_SEQUENCE: - { - struct load_op *insn = (struct load_op *) pc; - struct field_ref *ref = (struct field_ref *) insn->data; - - dbg_printf("Validate load field ref offset %u type string\n", - ref->offset); - break; - } - case FILTER_OP_LOAD_FIELD_REF_S64: - { - struct load_op *insn = (struct load_op *) pc; - struct field_ref *ref = (struct field_ref *) insn->data; - - dbg_printf("Validate load field ref offset %u type s64\n", - ref->offset); - break; - } - case FILTER_OP_LOAD_FIELD_REF_DOUBLE: - { - struct load_op *insn = (struct load_op *) pc; - struct field_ref *ref = (struct field_ref *) insn->data; - - dbg_printf("Validate load field ref offset %u type double\n", - ref->offset); - break; - } - - /* load from immediate operand */ - case FILTER_OP_LOAD_STRING: - case FILTER_OP_LOAD_STAR_GLOB_STRING: - { - break; - } - - case FILTER_OP_LOAD_S64: - { - break; - } - - case FILTER_OP_LOAD_DOUBLE: - { - break; - } - - case FILTER_OP_CAST_TO_S64: - case FILTER_OP_CAST_DOUBLE_TO_S64: - { - struct cast_op *insn = (struct cast_op *) pc; - - if (!vstack_ax(stack)) { - ERR("Empty stack\n"); - ret = -EINVAL; - goto end; - } - switch (vstack_ax(stack)->type) { - default: - ERR("unknown register type\n"); - ret = -EINVAL; - goto end; - - case REG_STRING: - case REG_STAR_GLOB_STRING: - ERR("Cast op can only be applied to numeric or floating point registers\n"); - ret = -EINVAL; - goto end; - case REG_S64: - break; - case REG_U64: - break; - case REG_DOUBLE: - break; - case REG_UNKNOWN: - break; - } - if (insn->op == FILTER_OP_CAST_DOUBLE_TO_S64) { - if (vstack_ax(stack)->type != REG_DOUBLE) { - ERR("Cast expects double\n"); - ret = -EINVAL; - goto end; - } - } - break; - } - case FILTER_OP_CAST_NOP: - { - break; - } - - /* get context ref */ - case FILTER_OP_GET_CONTEXT_REF: - { - struct load_op *insn = (struct load_op *) pc; - struct field_ref *ref = (struct field_ref *) insn->data; - - dbg_printf("Validate get context ref offset %u type dynamic\n", - ref->offset); - break; - } - case FILTER_OP_GET_CONTEXT_REF_STRING: - { - struct load_op *insn = (struct load_op *) pc; - struct field_ref *ref = (struct field_ref *) insn->data; - - dbg_printf("Validate get context ref offset %u type string\n", - ref->offset); - break; - } - case FILTER_OP_GET_CONTEXT_REF_S64: - { - struct load_op *insn = (struct load_op *) pc; - struct field_ref *ref = (struct field_ref *) insn->data; - - dbg_printf("Validate get context ref offset %u type s64\n", - ref->offset); - break; - } - case FILTER_OP_GET_CONTEXT_REF_DOUBLE: - { - struct load_op *insn = (struct load_op *) pc; - struct field_ref *ref = (struct field_ref *) insn->data; - - dbg_printf("Validate get context ref offset %u type double\n", - ref->offset); - break; - } - - /* - * Instructions for recursive traversal through composed types. - */ - case FILTER_OP_GET_CONTEXT_ROOT: - { - dbg_printf("Validate get context root\n"); - break; - } - case FILTER_OP_GET_APP_CONTEXT_ROOT: - { - dbg_printf("Validate get app context root\n"); - break; - } - case FILTER_OP_GET_PAYLOAD_ROOT: - { - dbg_printf("Validate get payload root\n"); - break; - } - case FILTER_OP_LOAD_FIELD: - { - /* - * We tolerate that field type is unknown at validation, - * because we are performing the load specialization in - * a phase after validation. - */ - dbg_printf("Validate load field\n"); - break; - } - case FILTER_OP_LOAD_FIELD_S8: - { - dbg_printf("Validate load field s8\n"); - break; - } - case FILTER_OP_LOAD_FIELD_S16: - { - dbg_printf("Validate load field s16\n"); - break; - } - case FILTER_OP_LOAD_FIELD_S32: - { - dbg_printf("Validate load field s32\n"); - break; - } - case FILTER_OP_LOAD_FIELD_S64: - { - dbg_printf("Validate load field s64\n"); - break; - } - case FILTER_OP_LOAD_FIELD_U8: - { - dbg_printf("Validate load field u8\n"); - break; - } - case FILTER_OP_LOAD_FIELD_U16: - { - dbg_printf("Validate load field u16\n"); - break; - } - case FILTER_OP_LOAD_FIELD_U32: - { - dbg_printf("Validate load field u32\n"); - break; - } - case FILTER_OP_LOAD_FIELD_U64: - { - dbg_printf("Validate load field u64\n"); - break; - } - case FILTER_OP_LOAD_FIELD_STRING: - { - dbg_printf("Validate load field string\n"); - break; - } - case FILTER_OP_LOAD_FIELD_SEQUENCE: - { - dbg_printf("Validate load field sequence\n"); - break; - } - case FILTER_OP_LOAD_FIELD_DOUBLE: - { - dbg_printf("Validate load field double\n"); - break; - } - - case FILTER_OP_GET_SYMBOL: - { - struct load_op *insn = (struct load_op *) pc; - struct get_symbol *sym = (struct get_symbol *) insn->data; - - dbg_printf("Validate get symbol offset %u\n", sym->offset); - break; - } - - case FILTER_OP_GET_SYMBOL_FIELD: - { - struct load_op *insn = (struct load_op *) pc; - struct get_symbol *sym = (struct get_symbol *) insn->data; - - dbg_printf("Validate get symbol field offset %u\n", sym->offset); - break; - } - - case FILTER_OP_GET_INDEX_U16: - { - struct load_op *insn = (struct load_op *) pc; - struct get_index_u16 *get_index = (struct get_index_u16 *) insn->data; - - dbg_printf("Validate get index u16 index %u\n", get_index->index); - break; - } - - case FILTER_OP_GET_INDEX_U64: - { - struct load_op *insn = (struct load_op *) pc; - struct get_index_u64 *get_index = (struct get_index_u64 *) insn->data; - - dbg_printf("Validate get index u64 index %" PRIu64 "\n", get_index->index); - break; - } - } -end: - return ret; -} - -/* - * Return value: - * 0: success - * <0: error - */ -static -int validate_instruction_all_contexts(struct bytecode_runtime *bytecode, - struct cds_lfht *merge_points, - struct vstack *stack, - char *start_pc, - char *pc) -{ - int ret; - unsigned long target_pc = pc - start_pc; - struct cds_lfht_iter iter; - struct cds_lfht_node *node; - struct lfht_mp_node *mp_node; - unsigned long hash; - - /* Validate the context resulting from the previous instruction */ - ret = validate_instruction_context(bytecode, stack, start_pc, pc); - if (ret < 0) - return ret; - - /* Validate merge points */ - hash = lttng_hash_mix((const char *) target_pc, sizeof(target_pc), - lttng_hash_seed); - cds_lfht_lookup(merge_points, hash, lttng_hash_match, - (const char *) target_pc, &iter); - node = cds_lfht_iter_get_node(&iter); - if (node) { - mp_node = caa_container_of(node, struct lfht_mp_node, node); - - dbg_printf("Filter: validate merge point at offset %lu\n", - target_pc); - if (merge_points_compare(stack, &mp_node->stack)) { - ERR("Merge points differ for offset %lu\n", - target_pc); - return -EINVAL; - } - /* Once validated, we can remove the merge point */ - dbg_printf("Filter: remove merge point at offset %lu\n", - target_pc); - ret = cds_lfht_del(merge_points, node); - assert(!ret); - } - return 0; -} - -/* - * Return value: - * >0: going to next insn. - * 0: success, stop iteration. - * <0: error - */ -static -int exec_insn(struct bytecode_runtime *bytecode, - struct cds_lfht *merge_points, - struct vstack *stack, - char **_next_pc, - char *pc) -{ - int ret = 1; - char *next_pc = *_next_pc; - - switch (*(filter_opcode_t *) pc) { - case FILTER_OP_UNKNOWN: - default: - { - ERR("unknown bytecode op %u\n", - (unsigned int) *(filter_opcode_t *) pc); - ret = -EINVAL; - goto end; - } - - case FILTER_OP_RETURN: - { - if (!vstack_ax(stack)) { - ERR("Empty stack\n"); - ret = -EINVAL; - goto end; - } - switch (vstack_ax(stack)->type) { - case REG_S64: - case REG_U64: - case REG_DOUBLE: - case REG_STRING: - case REG_PTR: - case REG_UNKNOWN: - break; - default: - ERR("Unexpected register type %d at end of bytecode\n", - (int) vstack_ax(stack)->type); - ret = -EINVAL; - goto end; - } - - ret = 0; - goto end; - } - case FILTER_OP_RETURN_S64: - { - if (!vstack_ax(stack)) { - ERR("Empty stack\n"); - ret = -EINVAL; - goto end; - } - switch (vstack_ax(stack)->type) { - case REG_S64: - case REG_U64: - break; - default: - case REG_UNKNOWN: - ERR("Unexpected register type %d at end of bytecode\n", - (int) vstack_ax(stack)->type); - ret = -EINVAL; - goto end; - } - - ret = 0; - goto end; - } - - /* binary */ - case FILTER_OP_MUL: - case FILTER_OP_DIV: - case FILTER_OP_MOD: - case FILTER_OP_PLUS: - case FILTER_OP_MINUS: - { - ERR("unsupported bytecode op %u\n", - (unsigned int) *(filter_opcode_t *) pc); - ret = -EINVAL; - goto end; - } - - case FILTER_OP_EQ: - case FILTER_OP_NE: - case FILTER_OP_GT: - case FILTER_OP_LT: - case FILTER_OP_GE: - case FILTER_OP_LE: - case FILTER_OP_EQ_STRING: - case FILTER_OP_NE_STRING: - case FILTER_OP_GT_STRING: - case FILTER_OP_LT_STRING: - case FILTER_OP_GE_STRING: - case FILTER_OP_LE_STRING: - case FILTER_OP_EQ_STAR_GLOB_STRING: - case FILTER_OP_NE_STAR_GLOB_STRING: - case FILTER_OP_EQ_S64: - case FILTER_OP_NE_S64: - case FILTER_OP_GT_S64: - case FILTER_OP_LT_S64: - case FILTER_OP_GE_S64: - case FILTER_OP_LE_S64: - case FILTER_OP_EQ_DOUBLE: - case FILTER_OP_NE_DOUBLE: - case FILTER_OP_GT_DOUBLE: - case FILTER_OP_LT_DOUBLE: - case FILTER_OP_GE_DOUBLE: - case FILTER_OP_LE_DOUBLE: - case FILTER_OP_EQ_DOUBLE_S64: - case FILTER_OP_NE_DOUBLE_S64: - case FILTER_OP_GT_DOUBLE_S64: - case FILTER_OP_LT_DOUBLE_S64: - case FILTER_OP_GE_DOUBLE_S64: - case FILTER_OP_LE_DOUBLE_S64: - case FILTER_OP_EQ_S64_DOUBLE: - case FILTER_OP_NE_S64_DOUBLE: - case FILTER_OP_GT_S64_DOUBLE: - case FILTER_OP_LT_S64_DOUBLE: - case FILTER_OP_GE_S64_DOUBLE: - case FILTER_OP_LE_S64_DOUBLE: - { - /* Pop 2, push 1 */ - if (vstack_pop(stack)) { - ret = -EINVAL; - goto end; - } - if (!vstack_ax(stack)) { - ERR("Empty stack\n"); - ret = -EINVAL; - goto end; - } - switch (vstack_ax(stack)->type) { - case REG_S64: - case REG_U64: - case REG_DOUBLE: - case REG_STRING: - case REG_STAR_GLOB_STRING: - case REG_UNKNOWN: - break; - default: - ERR("Unexpected register type %d for operation\n", - (int) vstack_ax(stack)->type); - ret = -EINVAL; - goto end; - } - - vstack_ax(stack)->type = REG_S64; - next_pc += sizeof(struct binary_op); - break; - } - - case FILTER_OP_BIT_RSHIFT: - case FILTER_OP_BIT_LSHIFT: - case FILTER_OP_BIT_AND: - case FILTER_OP_BIT_OR: - case FILTER_OP_BIT_XOR: - { - /* Pop 2, push 1 */ - if (vstack_pop(stack)) { - ret = -EINVAL; - goto end; - } - if (!vstack_ax(stack)) { - ERR("Empty stack\n"); - ret = -EINVAL; - goto end; - } - switch (vstack_ax(stack)->type) { - case REG_S64: - case REG_U64: - case REG_DOUBLE: - case REG_STRING: - case REG_STAR_GLOB_STRING: - case REG_UNKNOWN: - break; - default: - ERR("Unexpected register type %d for operation\n", - (int) vstack_ax(stack)->type); - ret = -EINVAL; - goto end; - } - - vstack_ax(stack)->type = REG_U64; - next_pc += sizeof(struct binary_op); - break; - } - - /* unary */ - case FILTER_OP_UNARY_PLUS: - case FILTER_OP_UNARY_MINUS: - { - /* Pop 1, push 1 */ - if (!vstack_ax(stack)) { - ERR("Empty stack\n"); - ret = -EINVAL; - goto end; - } - switch (vstack_ax(stack)->type) { - case REG_UNKNOWN: - case REG_DOUBLE: - case REG_S64: - case REG_U64: - break; - default: - ERR("Unexpected register type %d for operation\n", - (int) vstack_ax(stack)->type); - ret = -EINVAL; - goto end; - } - vstack_ax(stack)->type = REG_UNKNOWN; - next_pc += sizeof(struct unary_op); - break; - } - - case FILTER_OP_UNARY_PLUS_S64: - case FILTER_OP_UNARY_MINUS_S64: - case FILTER_OP_UNARY_NOT_S64: - { - /* Pop 1, push 1 */ - if (!vstack_ax(stack)) { - ERR("Empty stack\n"); - ret = -EINVAL; - goto end; - } - switch (vstack_ax(stack)->type) { - case REG_S64: - case REG_U64: - break; - default: - ERR("Unexpected register type %d for operation\n", - (int) vstack_ax(stack)->type); - ret = -EINVAL; - goto end; - } - - next_pc += sizeof(struct unary_op); - break; - } - - case FILTER_OP_UNARY_NOT: - { - /* Pop 1, push 1 */ - if (!vstack_ax(stack)) { - ERR("Empty stack\n"); - ret = -EINVAL; - goto end; - } - switch (vstack_ax(stack)->type) { - case REG_UNKNOWN: - case REG_DOUBLE: - case REG_S64: - case REG_U64: - break; - default: - ERR("Unexpected register type %d for operation\n", - (int) vstack_ax(stack)->type); - ret = -EINVAL; - goto end; - } - - next_pc += sizeof(struct unary_op); - break; - } - - case FILTER_OP_UNARY_BIT_NOT: - { - /* Pop 1, push 1 */ - if (!vstack_ax(stack)) { - ERR("Empty stack\n"); - ret = -EINVAL; - goto end; - } - switch (vstack_ax(stack)->type) { - case REG_UNKNOWN: - case REG_S64: - case REG_U64: - break; - case REG_DOUBLE: - default: - ERR("Unexpected register type %d for operation\n", - (int) vstack_ax(stack)->type); - ret = -EINVAL; - goto end; - } - - vstack_ax(stack)->type = REG_U64; - next_pc += sizeof(struct unary_op); - break; - } - - case FILTER_OP_UNARY_NOT_DOUBLE: - { - /* Pop 1, push 1 */ - if (!vstack_ax(stack)) { - ERR("Empty stack\n"); - ret = -EINVAL; - goto end; - } - switch (vstack_ax(stack)->type) { - case REG_DOUBLE: - break; - default: - ERR("Incorrect register type %d for operation\n", - (int) vstack_ax(stack)->type); - ret = -EINVAL; - goto end; - } - - vstack_ax(stack)->type = REG_S64; - next_pc += sizeof(struct unary_op); - break; - } - - case FILTER_OP_UNARY_PLUS_DOUBLE: - case FILTER_OP_UNARY_MINUS_DOUBLE: - { - /* Pop 1, push 1 */ - if (!vstack_ax(stack)) { - ERR("Empty stack\n"); - ret = -EINVAL; - goto end; - } - switch (vstack_ax(stack)->type) { - case REG_DOUBLE: - break; - default: - ERR("Incorrect register type %d for operation\n", - (int) vstack_ax(stack)->type); - ret = -EINVAL; - goto end; - } - - vstack_ax(stack)->type = REG_DOUBLE; - next_pc += sizeof(struct unary_op); - break; - } - - /* logical */ - case FILTER_OP_AND: - case FILTER_OP_OR: - { - struct logical_op *insn = (struct logical_op *) pc; - int merge_ret; - - /* Add merge point to table */ - merge_ret = merge_point_add_check(merge_points, - insn->skip_offset, stack); - if (merge_ret) { - ret = merge_ret; - goto end; - } - - if (!vstack_ax(stack)) { - ERR("Empty stack\n"); - ret = -EINVAL; - goto end; - } - /* There is always a cast-to-s64 operation before a or/and op. */ - switch (vstack_ax(stack)->type) { - case REG_S64: - case REG_U64: - break; - default: - ERR("Incorrect register type %d for operation\n", - (int) vstack_ax(stack)->type); - ret = -EINVAL; - goto end; - } - - /* Continue to next instruction */ - /* Pop 1 when jump not taken */ - if (vstack_pop(stack)) { - ret = -EINVAL; - goto end; - } - next_pc += sizeof(struct logical_op); - break; - } - - /* load field ref */ - case FILTER_OP_LOAD_FIELD_REF: - { - ERR("Unknown field ref type\n"); - ret = -EINVAL; - goto end; - } - /* get context ref */ - case FILTER_OP_GET_CONTEXT_REF: - { - if (vstack_push(stack)) { - ret = -EINVAL; - goto end; - } - vstack_ax(stack)->type = REG_UNKNOWN; - next_pc += sizeof(struct load_op) + sizeof(struct field_ref); - break; - } - case FILTER_OP_LOAD_FIELD_REF_STRING: - case FILTER_OP_LOAD_FIELD_REF_SEQUENCE: - case FILTER_OP_GET_CONTEXT_REF_STRING: - { - if (vstack_push(stack)) { - ret = -EINVAL; - goto end; - } - vstack_ax(stack)->type = REG_STRING; - next_pc += sizeof(struct load_op) + sizeof(struct field_ref); - break; - } - case FILTER_OP_LOAD_FIELD_REF_S64: - case FILTER_OP_GET_CONTEXT_REF_S64: - { - if (vstack_push(stack)) { - ret = -EINVAL; - goto end; - } - vstack_ax(stack)->type = REG_S64; - next_pc += sizeof(struct load_op) + sizeof(struct field_ref); - break; - } - case FILTER_OP_LOAD_FIELD_REF_DOUBLE: - case FILTER_OP_GET_CONTEXT_REF_DOUBLE: - { - if (vstack_push(stack)) { - ret = -EINVAL; - goto end; - } - vstack_ax(stack)->type = REG_DOUBLE; - next_pc += sizeof(struct load_op) + sizeof(struct field_ref); - break; - } - - /* load from immediate operand */ - case FILTER_OP_LOAD_STRING: - { - struct load_op *insn = (struct load_op *) pc; - - if (vstack_push(stack)) { - ret = -EINVAL; - goto end; - } - vstack_ax(stack)->type = REG_STRING; - next_pc += sizeof(struct load_op) + strlen(insn->data) + 1; - break; - } - - case FILTER_OP_LOAD_STAR_GLOB_STRING: - { - struct load_op *insn = (struct load_op *) pc; - - if (vstack_push(stack)) { - ret = -EINVAL; - goto end; - } - vstack_ax(stack)->type = REG_STAR_GLOB_STRING; - next_pc += sizeof(struct load_op) + strlen(insn->data) + 1; - break; - } - - case FILTER_OP_LOAD_S64: - { - if (vstack_push(stack)) { - ret = -EINVAL; - goto end; - } - vstack_ax(stack)->type = REG_S64; - next_pc += sizeof(struct load_op) - + sizeof(struct literal_numeric); - break; - } - - case FILTER_OP_LOAD_DOUBLE: - { - if (vstack_push(stack)) { - ret = -EINVAL; - goto end; - } - vstack_ax(stack)->type = REG_DOUBLE; - next_pc += sizeof(struct load_op) - + sizeof(struct literal_double); - break; - } - - case FILTER_OP_CAST_TO_S64: - case FILTER_OP_CAST_DOUBLE_TO_S64: - { - /* Pop 1, push 1 */ - if (!vstack_ax(stack)) { - ERR("Empty stack\n"); - ret = -EINVAL; - goto end; - } - switch (vstack_ax(stack)->type) { - case REG_S64: - case REG_U64: - case REG_DOUBLE: - case REG_UNKNOWN: - break; - default: - ERR("Incorrect register type %d for cast\n", - (int) vstack_ax(stack)->type); - ret = -EINVAL; - goto end; - } - vstack_ax(stack)->type = REG_S64; - next_pc += sizeof(struct cast_op); - break; - } - case FILTER_OP_CAST_NOP: - { - next_pc += sizeof(struct cast_op); - break; - } - - /* - * Instructions for recursive traversal through composed types. - */ - case FILTER_OP_GET_CONTEXT_ROOT: - case FILTER_OP_GET_APP_CONTEXT_ROOT: - case FILTER_OP_GET_PAYLOAD_ROOT: - { - if (vstack_push(stack)) { - ret = -EINVAL; - goto end; - } - vstack_ax(stack)->type = REG_PTR; - next_pc += sizeof(struct load_op); - break; - } - - case FILTER_OP_LOAD_FIELD: - { - /* Pop 1, push 1 */ - if (!vstack_ax(stack)) { - ERR("Empty stack\n"); - ret = -EINVAL; - goto end; - } - if (vstack_ax(stack)->type != REG_PTR) { - ERR("Expecting pointer on top of stack\n"); - ret = -EINVAL; - goto end; - } - vstack_ax(stack)->type = REG_UNKNOWN; - next_pc += sizeof(struct load_op); - break; - } - - case FILTER_OP_LOAD_FIELD_S8: - case FILTER_OP_LOAD_FIELD_S16: - case FILTER_OP_LOAD_FIELD_S32: - case FILTER_OP_LOAD_FIELD_S64: - { - /* Pop 1, push 1 */ - if (!vstack_ax(stack)) { - ERR("Empty stack\n"); - ret = -EINVAL; - goto end; - } - if (vstack_ax(stack)->type != REG_PTR) { - ERR("Expecting pointer on top of stack\n"); - ret = -EINVAL; - goto end; - } - vstack_ax(stack)->type = REG_S64; - next_pc += sizeof(struct load_op); - break; - } - - case FILTER_OP_LOAD_FIELD_U8: - case FILTER_OP_LOAD_FIELD_U16: - case FILTER_OP_LOAD_FIELD_U32: - case FILTER_OP_LOAD_FIELD_U64: - { - /* Pop 1, push 1 */ - if (!vstack_ax(stack)) { - ERR("Empty stack\n"); - ret = -EINVAL; - goto end; - } - if (vstack_ax(stack)->type != REG_PTR) { - ERR("Expecting pointer on top of stack\n"); - ret = -EINVAL; - goto end; - } - vstack_ax(stack)->type = REG_U64; - next_pc += sizeof(struct load_op); - break; - } - - case FILTER_OP_LOAD_FIELD_STRING: - case FILTER_OP_LOAD_FIELD_SEQUENCE: - { - /* Pop 1, push 1 */ - if (!vstack_ax(stack)) { - ERR("Empty stack\n"); - ret = -EINVAL; - goto end; - } - if (vstack_ax(stack)->type != REG_PTR) { - ERR("Expecting pointer on top of stack\n"); - ret = -EINVAL; - goto end; - } - vstack_ax(stack)->type = REG_STRING; - next_pc += sizeof(struct load_op); - break; - } - - case FILTER_OP_LOAD_FIELD_DOUBLE: - { - /* Pop 1, push 1 */ - if (!vstack_ax(stack)) { - ERR("Empty stack\n"); - ret = -EINVAL; - goto end; - } - if (vstack_ax(stack)->type != REG_PTR) { - ERR("Expecting pointer on top of stack\n"); - ret = -EINVAL; - goto end; - } - vstack_ax(stack)->type = REG_DOUBLE; - next_pc += sizeof(struct load_op); - break; - } - - case FILTER_OP_GET_SYMBOL: - case FILTER_OP_GET_SYMBOL_FIELD: - { - /* Pop 1, push 1 */ - if (!vstack_ax(stack)) { - ERR("Empty stack\n"); - ret = -EINVAL; - goto end; - } - if (vstack_ax(stack)->type != REG_PTR) { - ERR("Expecting pointer on top of stack\n"); - ret = -EINVAL; - goto end; - } - next_pc += sizeof(struct load_op) + sizeof(struct get_symbol); - break; - } - - case FILTER_OP_GET_INDEX_U16: - { - /* Pop 1, push 1 */ - if (!vstack_ax(stack)) { - ERR("Empty stack\n"); - ret = -EINVAL; - goto end; - } - if (vstack_ax(stack)->type != REG_PTR) { - ERR("Expecting pointer on top of stack\n"); - ret = -EINVAL; - goto end; - } - next_pc += sizeof(struct load_op) + sizeof(struct get_index_u16); - break; - } - - case FILTER_OP_GET_INDEX_U64: - { - /* Pop 1, push 1 */ - if (!vstack_ax(stack)) { - ERR("Empty stack\n"); - ret = -EINVAL; - goto end; - } - if (vstack_ax(stack)->type != REG_PTR) { - ERR("Expecting pointer on top of stack\n"); - ret = -EINVAL; - goto end; - } - next_pc += sizeof(struct load_op) + sizeof(struct get_index_u64); - break; - } - - } -end: - *_next_pc = next_pc; - return ret; -} - -/* - * Never called concurrently (hash seed is shared). - */ -int lttng_filter_validate_bytecode(struct bytecode_runtime *bytecode) -{ - struct cds_lfht *merge_points; - char *pc, *next_pc, *start_pc; - int ret = -EINVAL; - struct vstack stack; - - vstack_init(&stack); - - if (!lttng_hash_seed_ready) { - lttng_hash_seed = time(NULL); - lttng_hash_seed_ready = 1; - } - /* - * Note: merge_points hash table used by single thread, and - * never concurrently resized. Therefore, we can use it without - * holding RCU read-side lock and free nodes without using - * call_rcu. - */ - merge_points = cds_lfht_new(DEFAULT_NR_MERGE_POINTS, - MIN_NR_BUCKETS, MAX_NR_BUCKETS, - 0, NULL); - if (!merge_points) { - ERR("Error allocating hash table for bytecode validation\n"); - return -ENOMEM; - } - start_pc = &bytecode->code[0]; - for (pc = next_pc = start_pc; pc - start_pc < bytecode->len; - pc = next_pc) { - ret = bytecode_validate_overflow(bytecode, start_pc, pc); - if (ret != 0) { - if (ret == -ERANGE) - ERR("filter bytecode overflow\n"); - goto end; - } - dbg_printf("Validating op %s (%u)\n", - print_op((unsigned int) *(filter_opcode_t *) pc), - (unsigned int) *(filter_opcode_t *) pc); - - /* - * For each instruction, validate the current context - * (traversal of entire execution flow), and validate - * all merge points targeting this instruction. - */ - ret = validate_instruction_all_contexts(bytecode, merge_points, - &stack, start_pc, pc); - if (ret) - goto end; - ret = exec_insn(bytecode, merge_points, &stack, &next_pc, pc); - if (ret <= 0) - goto end; - } -end: - if (delete_all_nodes(merge_points)) { - if (!ret) { - ERR("Unexpected merge points\n"); - ret = -EINVAL; - } - } - if (cds_lfht_destroy(merge_points, NULL)) { - ERR("Error destroying hash table\n"); - } - return ret; -} diff --git a/liblttng-ust/lttng-filter.c b/liblttng-ust/lttng-filter.c deleted file mode 100644 index 55d707b8..00000000 --- a/liblttng-ust/lttng-filter.c +++ /dev/null @@ -1,592 +0,0 @@ -/* - * lttng-filter.c - * - * LTTng UST filter code. - * - * Copyright (C) 2010-2016 Mathieu Desnoyers - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - -#define _LGPL_SOURCE -#include -#include - -#include - -#include "lttng-filter.h" -#include "ust-events-internal.h" - -static const char *opnames[] = { - [ FILTER_OP_UNKNOWN ] = "UNKNOWN", - - [ FILTER_OP_RETURN ] = "RETURN", - - /* binary */ - [ FILTER_OP_MUL ] = "MUL", - [ FILTER_OP_DIV ] = "DIV", - [ FILTER_OP_MOD ] = "MOD", - [ FILTER_OP_PLUS ] = "PLUS", - [ FILTER_OP_MINUS ] = "MINUS", - [ FILTER_OP_BIT_RSHIFT ] = "BIT_RSHIFT", - [ FILTER_OP_BIT_LSHIFT ] = "BIT_LSHIFT", - [ FILTER_OP_BIT_AND ] = "BIT_AND", - [ FILTER_OP_BIT_OR ] = "BIT_OR", - [ FILTER_OP_BIT_XOR ] = "BIT_XOR", - - /* binary comparators */ - [ FILTER_OP_EQ ] = "EQ", - [ FILTER_OP_NE ] = "NE", - [ FILTER_OP_GT ] = "GT", - [ FILTER_OP_LT ] = "LT", - [ FILTER_OP_GE ] = "GE", - [ FILTER_OP_LE ] = "LE", - - /* string binary comparators */ - [ FILTER_OP_EQ_STRING ] = "EQ_STRING", - [ FILTER_OP_NE_STRING ] = "NE_STRING", - [ FILTER_OP_GT_STRING ] = "GT_STRING", - [ FILTER_OP_LT_STRING ] = "LT_STRING", - [ FILTER_OP_GE_STRING ] = "GE_STRING", - [ FILTER_OP_LE_STRING ] = "LE_STRING", - - /* s64 binary comparators */ - [ FILTER_OP_EQ_S64 ] = "EQ_S64", - [ FILTER_OP_NE_S64 ] = "NE_S64", - [ FILTER_OP_GT_S64 ] = "GT_S64", - [ FILTER_OP_LT_S64 ] = "LT_S64", - [ FILTER_OP_GE_S64 ] = "GE_S64", - [ FILTER_OP_LE_S64 ] = "LE_S64", - - /* double binary comparators */ - [ FILTER_OP_EQ_DOUBLE ] = "EQ_DOUBLE", - [ FILTER_OP_NE_DOUBLE ] = "NE_DOUBLE", - [ FILTER_OP_GT_DOUBLE ] = "GT_DOUBLE", - [ FILTER_OP_LT_DOUBLE ] = "LT_DOUBLE", - [ FILTER_OP_GE_DOUBLE ] = "GE_DOUBLE", - [ FILTER_OP_LE_DOUBLE ] = "LE_DOUBLE", - - /* Mixed S64-double binary comparators */ - [ FILTER_OP_EQ_DOUBLE_S64 ] = "EQ_DOUBLE_S64", - [ FILTER_OP_NE_DOUBLE_S64 ] = "NE_DOUBLE_S64", - [ FILTER_OP_GT_DOUBLE_S64 ] = "GT_DOUBLE_S64", - [ FILTER_OP_LT_DOUBLE_S64 ] = "LT_DOUBLE_S64", - [ FILTER_OP_GE_DOUBLE_S64 ] = "GE_DOUBLE_S64", - [ FILTER_OP_LE_DOUBLE_S64 ] = "LE_DOUBLE_S64", - - [ FILTER_OP_EQ_S64_DOUBLE ] = "EQ_S64_DOUBLE", - [ FILTER_OP_NE_S64_DOUBLE ] = "NE_S64_DOUBLE", - [ FILTER_OP_GT_S64_DOUBLE ] = "GT_S64_DOUBLE", - [ FILTER_OP_LT_S64_DOUBLE ] = "LT_S64_DOUBLE", - [ FILTER_OP_GE_S64_DOUBLE ] = "GE_S64_DOUBLE", - [ FILTER_OP_LE_S64_DOUBLE ] = "LE_S64_DOUBLE", - - /* unary */ - [ FILTER_OP_UNARY_PLUS ] = "UNARY_PLUS", - [ FILTER_OP_UNARY_MINUS ] = "UNARY_MINUS", - [ FILTER_OP_UNARY_NOT ] = "UNARY_NOT", - [ FILTER_OP_UNARY_PLUS_S64 ] = "UNARY_PLUS_S64", - [ FILTER_OP_UNARY_MINUS_S64 ] = "UNARY_MINUS_S64", - [ FILTER_OP_UNARY_NOT_S64 ] = "UNARY_NOT_S64", - [ FILTER_OP_UNARY_PLUS_DOUBLE ] = "UNARY_PLUS_DOUBLE", - [ FILTER_OP_UNARY_MINUS_DOUBLE ] = "UNARY_MINUS_DOUBLE", - [ FILTER_OP_UNARY_NOT_DOUBLE ] = "UNARY_NOT_DOUBLE", - - /* logical */ - [ FILTER_OP_AND ] = "AND", - [ FILTER_OP_OR ] = "OR", - - /* load field ref */ - [ FILTER_OP_LOAD_FIELD_REF ] = "LOAD_FIELD_REF", - [ FILTER_OP_LOAD_FIELD_REF_STRING ] = "LOAD_FIELD_REF_STRING", - [ FILTER_OP_LOAD_FIELD_REF_SEQUENCE ] = "LOAD_FIELD_REF_SEQUENCE", - [ FILTER_OP_LOAD_FIELD_REF_S64 ] = "LOAD_FIELD_REF_S64", - [ FILTER_OP_LOAD_FIELD_REF_DOUBLE ] = "LOAD_FIELD_REF_DOUBLE", - - /* load from immediate operand */ - [ FILTER_OP_LOAD_STRING ] = "LOAD_STRING", - [ FILTER_OP_LOAD_S64 ] = "LOAD_S64", - [ FILTER_OP_LOAD_DOUBLE ] = "LOAD_DOUBLE", - - /* cast */ - [ FILTER_OP_CAST_TO_S64 ] = "CAST_TO_S64", - [ FILTER_OP_CAST_DOUBLE_TO_S64 ] = "CAST_DOUBLE_TO_S64", - [ FILTER_OP_CAST_NOP ] = "CAST_NOP", - - /* get context ref */ - [ FILTER_OP_GET_CONTEXT_REF ] = "GET_CONTEXT_REF", - [ FILTER_OP_GET_CONTEXT_REF_STRING ] = "GET_CONTEXT_REF_STRING", - [ FILTER_OP_GET_CONTEXT_REF_S64 ] = "GET_CONTEXT_REF_S64", - [ FILTER_OP_GET_CONTEXT_REF_DOUBLE ] = "GET_CONTEXT_REF_DOUBLE", - - /* load userspace field ref */ - [ FILTER_OP_LOAD_FIELD_REF_USER_STRING ] = "LOAD_FIELD_REF_USER_STRING", - [ FILTER_OP_LOAD_FIELD_REF_USER_SEQUENCE ] = "LOAD_FIELD_REF_USER_SEQUENCE", - - /* - * load immediate star globbing pattern (literal string) - * from immediate. - */ - [ FILTER_OP_LOAD_STAR_GLOB_STRING ] = "LOAD_STAR_GLOB_STRING", - - /* globbing pattern binary operator: apply to */ - [ FILTER_OP_EQ_STAR_GLOB_STRING ] = "EQ_STAR_GLOB_STRING", - [ FILTER_OP_NE_STAR_GLOB_STRING ] = "NE_STAR_GLOB_STRING", - - /* - * Instructions for recursive traversal through composed types. - */ - [ FILTER_OP_GET_CONTEXT_ROOT ] = "GET_CONTEXT_ROOT", - [ FILTER_OP_GET_APP_CONTEXT_ROOT ] = "GET_APP_CONTEXT_ROOT", - [ FILTER_OP_GET_PAYLOAD_ROOT ] = "GET_PAYLOAD_ROOT", - - [ FILTER_OP_GET_SYMBOL ] = "GET_SYMBOL", - [ FILTER_OP_GET_SYMBOL_FIELD ] = "GET_SYMBOL_FIELD", - [ FILTER_OP_GET_INDEX_U16 ] = "GET_INDEX_U16", - [ FILTER_OP_GET_INDEX_U64 ] = "GET_INDEX_U64", - - [ FILTER_OP_LOAD_FIELD ] = "LOAD_FIELD", - [ FILTER_OP_LOAD_FIELD_S8 ] = "LOAD_FIELD_S8", - [ FILTER_OP_LOAD_FIELD_S16 ] = "LOAD_FIELD_S16", - [ FILTER_OP_LOAD_FIELD_S32 ] = "LOAD_FIELD_S32", - [ FILTER_OP_LOAD_FIELD_S64 ] = "LOAD_FIELD_S64", - [ FILTER_OP_LOAD_FIELD_U8 ] = "LOAD_FIELD_U8", - [ FILTER_OP_LOAD_FIELD_U16 ] = "LOAD_FIELD_U16", - [ FILTER_OP_LOAD_FIELD_U32 ] = "LOAD_FIELD_U32", - [ FILTER_OP_LOAD_FIELD_U64 ] = "LOAD_FIELD_U64", - [ FILTER_OP_LOAD_FIELD_STRING ] = "LOAD_FIELD_STRING", - [ FILTER_OP_LOAD_FIELD_SEQUENCE ] = "LOAD_FIELD_SEQUENCE", - [ FILTER_OP_LOAD_FIELD_DOUBLE ] = "LOAD_FIELD_DOUBLE", - - [ FILTER_OP_UNARY_BIT_NOT ] = "UNARY_BIT_NOT", - - [ FILTER_OP_RETURN_S64 ] = "RETURN_S64", -}; - -const char *print_op(enum filter_op op) -{ - if (op >= NR_FILTER_OPS) - return "UNKNOWN"; - else - return opnames[op]; -} - -static -int apply_field_reloc(const struct lttng_event_desc *event_desc, - struct bytecode_runtime *runtime, - uint32_t runtime_len, - uint32_t reloc_offset, - const char *field_name, - enum filter_op filter_op) -{ - const struct lttng_event_field *fields, *field = NULL; - unsigned int nr_fields, i; - struct load_op *op; - uint32_t field_offset = 0; - - dbg_printf("Apply field reloc: %u %s\n", reloc_offset, field_name); - - /* Lookup event by name */ - if (!event_desc) - return -EINVAL; - fields = event_desc->fields; - if (!fields) - return -EINVAL; - nr_fields = event_desc->nr_fields; - for (i = 0; i < nr_fields; i++) { - if (fields[i].u.ext.nofilter) { - continue; - } - if (!strcmp(fields[i].name, field_name)) { - field = &fields[i]; - break; - } - /* compute field offset */ - switch (fields[i].type.atype) { - case atype_integer: - case atype_enum: - case atype_enum_nestable: - field_offset += sizeof(int64_t); - break; - case atype_array: - case atype_array_nestable: - case atype_sequence: - case atype_sequence_nestable: - field_offset += sizeof(unsigned long); - field_offset += sizeof(void *); - break; - case atype_string: - field_offset += sizeof(void *); - break; - case atype_float: - field_offset += sizeof(double); - break; - default: - return -EINVAL; - } - } - if (!field) - return -EINVAL; - - /* Check if field offset is too large for 16-bit offset */ - if (field_offset > FILTER_BYTECODE_MAX_LEN - 1) - return -EINVAL; - - /* set type */ - op = (struct load_op *) &runtime->code[reloc_offset]; - - switch (filter_op) { - case FILTER_OP_LOAD_FIELD_REF: - { - struct field_ref *field_ref; - - field_ref = (struct field_ref *) op->data; - switch (field->type.atype) { - case atype_integer: - case atype_enum: - case atype_enum_nestable: - op->op = FILTER_OP_LOAD_FIELD_REF_S64; - break; - case atype_array: - case atype_array_nestable: - case atype_sequence: - case atype_sequence_nestable: - op->op = FILTER_OP_LOAD_FIELD_REF_SEQUENCE; - break; - case atype_string: - op->op = FILTER_OP_LOAD_FIELD_REF_STRING; - break; - case atype_float: - op->op = FILTER_OP_LOAD_FIELD_REF_DOUBLE; - break; - default: - return -EINVAL; - } - /* set offset */ - field_ref->offset = (uint16_t) field_offset; - break; - } - default: - return -EINVAL; - } - return 0; -} - -static -int apply_context_reloc(struct bytecode_runtime *runtime, - uint32_t runtime_len, - uint32_t reloc_offset, - const char *context_name, - enum filter_op filter_op) -{ - struct load_op *op; - struct lttng_ctx_field *ctx_field; - int idx; - struct lttng_ctx *ctx = *runtime->p.pctx; - - dbg_printf("Apply context reloc: %u %s\n", reloc_offset, context_name); - - /* Get context index */ - idx = lttng_get_context_index(ctx, context_name); - if (idx < 0) { - if (lttng_context_is_app(context_name)) { - int ret; - - ret = lttng_ust_add_app_context_to_ctx_rcu(context_name, - &ctx); - if (ret) - return ret; - idx = lttng_get_context_index(ctx, context_name); - if (idx < 0) - return -ENOENT; - } else { - return -ENOENT; - } - } - /* Check if idx is too large for 16-bit offset */ - if (idx > FILTER_BYTECODE_MAX_LEN - 1) - return -EINVAL; - - /* Get context return type */ - ctx_field = &ctx->fields[idx]; - op = (struct load_op *) &runtime->code[reloc_offset]; - - switch (filter_op) { - case FILTER_OP_GET_CONTEXT_REF: - { - struct field_ref *field_ref; - - field_ref = (struct field_ref *) op->data; - switch (ctx_field->event_field.type.atype) { - case atype_integer: - case atype_enum: - case atype_enum_nestable: - op->op = FILTER_OP_GET_CONTEXT_REF_S64; - break; - /* Sequence and array supported as string */ - case atype_string: - case atype_array: - case atype_array_nestable: - case atype_sequence: - case atype_sequence_nestable: - op->op = FILTER_OP_GET_CONTEXT_REF_STRING; - break; - case atype_float: - op->op = FILTER_OP_GET_CONTEXT_REF_DOUBLE; - break; - case atype_dynamic: - op->op = FILTER_OP_GET_CONTEXT_REF; - break; - default: - return -EINVAL; - } - /* set offset to context index within channel contexts */ - field_ref->offset = (uint16_t) idx; - break; - } - default: - return -EINVAL; - } - return 0; -} - -static -int apply_reloc(const struct lttng_event_desc *event_desc, - struct bytecode_runtime *runtime, - uint32_t runtime_len, - uint32_t reloc_offset, - const char *name) -{ - struct load_op *op; - - dbg_printf("Apply reloc: %u %s\n", reloc_offset, name); - - /* Ensure that the reloc is within the code */ - if (runtime_len - reloc_offset < sizeof(uint16_t)) - return -EINVAL; - - op = (struct load_op *) &runtime->code[reloc_offset]; - switch (op->op) { - case FILTER_OP_LOAD_FIELD_REF: - return apply_field_reloc(event_desc, runtime, runtime_len, - reloc_offset, name, op->op); - case FILTER_OP_GET_CONTEXT_REF: - return apply_context_reloc(runtime, runtime_len, - reloc_offset, name, op->op); - case FILTER_OP_GET_SYMBOL: - case FILTER_OP_GET_SYMBOL_FIELD: - /* - * Will be handled by load specialize phase or - * dynamically by interpreter. - */ - return 0; - default: - ERR("Unknown reloc op type %u\n", op->op); - return -EINVAL; - } - return 0; -} - -static -int bytecode_is_linked(struct lttng_ust_bytecode_node *bytecode, - struct cds_list_head *bytecode_runtime_head) -{ - struct lttng_bytecode_runtime *bc_runtime; - - cds_list_for_each_entry(bc_runtime, bytecode_runtime_head, node) { - if (bc_runtime->bc == bytecode) - return 1; - } - return 0; -} - -/* - * Take a bytecode with reloc table and link it to an event to create a - * bytecode runtime. - */ -static -int _lttng_filter_link_bytecode(const struct lttng_event_desc *event_desc, - struct lttng_ctx **ctx, - struct lttng_ust_bytecode_node *bytecode, - struct cds_list_head *insert_loc) -{ - int ret, offset, next_offset; - struct bytecode_runtime *runtime = NULL; - size_t runtime_alloc_len; - - if (!bytecode) - return 0; - /* Bytecode already linked */ - if (bytecode_is_linked(bytecode, insert_loc)) - return 0; - - dbg_printf("Linking...\n"); - - /* We don't need the reloc table in the runtime */ - runtime_alloc_len = sizeof(*runtime) + bytecode->bc.reloc_offset; - runtime = zmalloc(runtime_alloc_len); - if (!runtime) { - ret = -ENOMEM; - goto alloc_error; - } - runtime->p.bc = bytecode; - runtime->p.pctx = ctx; - runtime->len = bytecode->bc.reloc_offset; - /* copy original bytecode */ - memcpy(runtime->code, bytecode->bc.data, runtime->len); - /* - * apply relocs. Those are a uint16_t (offset in bytecode) - * followed by a string (field name). - */ - for (offset = bytecode->bc.reloc_offset; - offset < bytecode->bc.len; - offset = next_offset) { - uint16_t reloc_offset = - *(uint16_t *) &bytecode->bc.data[offset]; - const char *name = - (const char *) &bytecode->bc.data[offset + sizeof(uint16_t)]; - - ret = apply_reloc(event_desc, runtime, runtime->len, reloc_offset, name); - if (ret) { - goto link_error; - } - next_offset = offset + sizeof(uint16_t) + strlen(name) + 1; - } - /* Validate bytecode */ - ret = lttng_filter_validate_bytecode(runtime); - if (ret) { - goto link_error; - } - /* Specialize bytecode */ - ret = lttng_filter_specialize_bytecode(event_desc, runtime); - if (ret) { - goto link_error; - } - runtime->p.filter = lttng_filter_interpret_bytecode; - runtime->p.link_failed = 0; - cds_list_add_rcu(&runtime->p.node, insert_loc); - dbg_printf("Linking successful.\n"); - return 0; - -link_error: - runtime->p.filter = lttng_filter_interpret_bytecode_false; - runtime->p.link_failed = 1; - cds_list_add_rcu(&runtime->p.node, insert_loc); -alloc_error: - dbg_printf("Linking failed.\n"); - return ret; -} - -void lttng_filter_sync_state(struct lttng_bytecode_runtime *runtime) -{ - struct lttng_ust_bytecode_node *bc = runtime->bc; - - if (!bc->enabler->enabled || runtime->link_failed) - runtime->filter = lttng_filter_interpret_bytecode_false; - else - runtime->filter = lttng_filter_interpret_bytecode; -} - -/* - * Link all bytecodes of the enabler referenced in the provided bytecode list. - */ -void lttng_enabler_link_bytecode(const struct lttng_event_desc *event_desc, - struct lttng_ctx **ctx, - struct cds_list_head *bytecode_runtime_head, - struct lttng_enabler *enabler) -{ - struct lttng_ust_bytecode_node *bc; - struct lttng_bytecode_runtime *runtime; - - assert(event_desc); - - /* Link each bytecode. */ - cds_list_for_each_entry(bc, &enabler->filter_bytecode_head, node) { - int found = 0, ret; - struct cds_list_head *insert_loc; - - cds_list_for_each_entry(runtime, - bytecode_runtime_head, node) { - if (runtime->bc == bc) { - found = 1; - break; - } - } - /* Skip bytecode already linked */ - if (found) - continue; - - /* - * Insert at specified priority (seqnum) in increasing - * order. If there already is a bytecode of the same priority, - * insert the new bytecode right after it. - */ - cds_list_for_each_entry_reverse(runtime, - bytecode_runtime_head, node) { - if (runtime->bc->bc.seqnum <= bc->bc.seqnum) { - /* insert here */ - insert_loc = &runtime->node; - goto add_within; - } - } - - /* Add to head to list */ - insert_loc = bytecode_runtime_head; - add_within: - dbg_printf("linking bytecode\n"); - ret = _lttng_filter_link_bytecode(event_desc, ctx, bc, - insert_loc); - if (ret) { - dbg_printf("[lttng filter] warning: cannot link event bytecode\n"); - } - } -} - -/* - * We own the bytecode if we return success. - */ -int lttng_filter_enabler_attach_bytecode(struct lttng_enabler *enabler, - struct lttng_ust_bytecode_node *bytecode) -{ - cds_list_add(&bytecode->node, &enabler->filter_bytecode_head); - return 0; -} - -static -void free_filter_runtime(struct cds_list_head *bytecode_runtime_head) -{ - struct bytecode_runtime *runtime, *tmp; - - cds_list_for_each_entry_safe(runtime, tmp, bytecode_runtime_head, - p.node) { - free(runtime->data); - free(runtime); - } -} - -void lttng_free_event_filter_runtime(struct lttng_event *event) -{ - free_filter_runtime(&event->filter_bytecode_runtime_head); -} - -void lttng_free_event_notifier_filter_runtime( - struct lttng_event_notifier *event_notifier) -{ - free_filter_runtime(&event_notifier->filter_bytecode_runtime_head); -} diff --git a/liblttng-ust/lttng-filter.h b/liblttng-ust/lttng-filter.h deleted file mode 100644 index 61bc213b..00000000 --- a/liblttng-ust/lttng-filter.h +++ /dev/null @@ -1,343 +0,0 @@ -#ifndef _LTTNG_FILTER_H -#define _LTTNG_FILTER_H - -/* - * lttng-filter.h - * - * LTTng UST filter header. - * - * Copyright (C) 2010-2016 Mathieu Desnoyers - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include "filter-bytecode.h" - -/* Filter stack length, in number of entries */ -#define FILTER_STACK_LEN 10 /* includes 2 dummy */ -#define FILTER_STACK_EMPTY 1 - -#define FILTER_MAX_DATA_LEN 65536 - -#ifndef min_t -#define min_t(type, a, b) \ - ((type) (a) < (type) (b) ? (type) (a) : (type) (b)) -#endif - -#ifndef likely -#define likely(x) __builtin_expect(!!(x), 1) -#endif - -#ifndef unlikely -#define unlikely(x) __builtin_expect(!!(x), 0) -#endif - -#ifdef DEBUG -#define dbg_printf(fmt, args...) \ - printf("[debug bytecode in %s:%s@%u] " fmt, \ - __FILE__, __func__, __LINE__, ## args) -#else -#define dbg_printf(fmt, args...) \ -do { \ - /* do nothing but check printf format */ \ - if (0) \ - printf("[debug bytecode in %s:%s@%u] " fmt, \ - __FILE__, __func__, __LINE__, ## args); \ -} while (0) -#endif - -/* Linked bytecode. Child of struct lttng_bytecode_runtime. */ -struct bytecode_runtime { - struct lttng_bytecode_runtime p; - size_t data_len; - size_t data_alloc_len; - char *data; - uint16_t len; - char code[0]; -}; - -enum entry_type { - REG_S64, - REG_U64, - REG_DOUBLE, - REG_STRING, - REG_STAR_GLOB_STRING, - REG_UNKNOWN, - REG_PTR, -}; - -enum load_type { - LOAD_ROOT_CONTEXT, - LOAD_ROOT_APP_CONTEXT, - LOAD_ROOT_PAYLOAD, - LOAD_OBJECT, -}; - -enum object_type { - OBJECT_TYPE_S8, - OBJECT_TYPE_S16, - OBJECT_TYPE_S32, - OBJECT_TYPE_S64, - OBJECT_TYPE_U8, - OBJECT_TYPE_U16, - OBJECT_TYPE_U32, - OBJECT_TYPE_U64, - - OBJECT_TYPE_DOUBLE, - OBJECT_TYPE_STRING, - OBJECT_TYPE_STRING_SEQUENCE, - - OBJECT_TYPE_SEQUENCE, - OBJECT_TYPE_ARRAY, - OBJECT_TYPE_STRUCT, - OBJECT_TYPE_VARIANT, - - OBJECT_TYPE_DYNAMIC, -}; - -struct filter_get_index_data { - uint64_t offset; /* in bytes */ - size_t ctx_index; - size_t array_len; - /* - * Field is only populated for LOAD_ROOT_CONTEXT, LOAD_ROOT_APP_CONTEXT - * and LOAD_ROOT_PAYLOAD. Left NULL for LOAD_OBJECT, considering that the - * interpreter needs to find it from the event fields and types to - * support variants. - */ - const struct lttng_event_field *field; - struct { - size_t len; - enum object_type type; - bool rev_bo; /* reverse byte order */ - } elem; -}; - -/* Validation stack */ -struct vstack_load { - enum load_type type; - enum object_type object_type; - const struct lttng_event_field *field; - bool rev_bo; /* reverse byte order */ -}; - -struct vstack_entry { - enum entry_type type; - struct vstack_load load; -}; - -struct vstack { - int top; /* top of stack */ - struct vstack_entry e[FILTER_STACK_LEN]; -}; - -static inline -void vstack_init(struct vstack *stack) -{ - stack->top = -1; -} - -static inline -struct vstack_entry *vstack_ax(struct vstack *stack) -{ - if (unlikely(stack->top < 0)) - return NULL; - return &stack->e[stack->top]; -} - -static inline -struct vstack_entry *vstack_bx(struct vstack *stack) -{ - if (unlikely(stack->top < 1)) - return NULL; - return &stack->e[stack->top - 1]; -} - -static inline -int vstack_push(struct vstack *stack) -{ - if (stack->top >= FILTER_STACK_LEN - 1) { - ERR("Stack full\n"); - return -EINVAL; - } - ++stack->top; - return 0; -} - -static inline -int vstack_pop(struct vstack *stack) -{ - if (unlikely(stack->top < 0)) { - ERR("Stack empty\n"); - return -EINVAL; - } - stack->top--; - return 0; -} - -/* Execution stack */ -enum estack_string_literal_type { - ESTACK_STRING_LITERAL_TYPE_NONE, - ESTACK_STRING_LITERAL_TYPE_PLAIN, - ESTACK_STRING_LITERAL_TYPE_STAR_GLOB, -}; - -struct load_ptr { - enum load_type type; - enum object_type object_type; - const void *ptr; - size_t nr_elem; - bool rev_bo; - /* Temporary place-holders for contexts. */ - union { - int64_t s64; - uint64_t u64; - double d; - } u; - const struct lttng_event_field *field; -}; - -struct estack_entry { - enum entry_type type; /* For dynamic typing. */ - union { - int64_t v; - double d; - - struct { - const char *str; - size_t seq_len; - enum estack_string_literal_type literal_type; - } s; - struct load_ptr ptr; - } u; -}; - -struct estack { - int top; /* top of stack */ - struct estack_entry e[FILTER_STACK_LEN]; -}; - -/* - * Always use aliased type for ax/bx (top of stack). - * When ax/bx are S64, use aliased value. - */ -#define estack_ax_v ax -#define estack_bx_v bx -#define estack_ax_t ax_t -#define estack_bx_t bx_t - -/* - * ax and bx registers can hold either integer, double or string. - */ -#define estack_ax(stack, top) \ - ({ \ - assert((top) > FILTER_STACK_EMPTY); \ - &(stack)->e[top]; \ - }) - -#define estack_bx(stack, top) \ - ({ \ - assert((top) > FILTER_STACK_EMPTY + 1); \ - &(stack)->e[(top) - 1]; \ - }) - -/* - * Currently, only integers (REG_S64) can be pushed into the stack. - */ -#define estack_push(stack, top, ax, bx, ax_t, bx_t) \ - do { \ - assert((top) < FILTER_STACK_LEN - 1); \ - (stack)->e[(top) - 1].u.v = (bx); \ - (stack)->e[(top) - 1].type = (bx_t); \ - (bx) = (ax); \ - (bx_t) = (ax_t); \ - ++(top); \ - } while (0) - -#define estack_pop(stack, top, ax, bx, ax_t, bx_t) \ - do { \ - assert((top) > FILTER_STACK_EMPTY); \ - (ax) = (bx); \ - (ax_t) = (bx_t); \ - (bx) = (stack)->e[(top) - 2].u.v; \ - (bx_t) = (stack)->e[(top) - 2].type; \ - (top)--; \ - } while (0) - -enum lttng_interpreter_type { - LTTNG_INTERPRETER_TYPE_S64, - LTTNG_INTERPRETER_TYPE_U64, - LTTNG_INTERPRETER_TYPE_SIGNED_ENUM, - LTTNG_INTERPRETER_TYPE_UNSIGNED_ENUM, - LTTNG_INTERPRETER_TYPE_DOUBLE, - LTTNG_INTERPRETER_TYPE_STRING, - LTTNG_INTERPRETER_TYPE_SEQUENCE, -}; - -/* - * Represents the output parameter of the lttng interpreter. - * Currently capturable field classes are integer, double, string and sequence - * of integer. - */ -struct lttng_interpreter_output { - enum lttng_interpreter_type type; - union { - int64_t s; - uint64_t u; - double d; - - struct { - const char *str; - size_t len; - } str; - struct { - const void *ptr; - size_t nr_elem; - - /* Inner type. */ - const struct lttng_type *nested_type; - } sequence; - } u; -}; - -const char *print_op(enum filter_op op); - -int lttng_filter_validate_bytecode(struct bytecode_runtime *bytecode); -int lttng_filter_specialize_bytecode(const struct lttng_event_desc *event_desc, - struct bytecode_runtime *bytecode); - -uint64_t lttng_filter_interpret_bytecode_false(void *filter_data, - const char *filter_stack_data); -uint64_t lttng_filter_interpret_bytecode(void *filter_data, - const char *filter_stack_data); - -#endif /* _LTTNG_FILTER_H */