| 1 | /* SPDX-License-Identifier: MIT |
| 2 | * |
| 3 | * lttng-bytecode.c |
| 4 | * |
| 5 | * LTTng modules bytecode code. |
| 6 | * |
| 7 | * Copyright (C) 2010-2016 Mathieu Desnoyers <mathieu.desnoyers@efficios.com> |
| 8 | */ |
| 9 | |
| 10 | #include <linux/list.h> |
| 11 | #include <linux/slab.h> |
| 12 | |
| 13 | #include <lttng/lttng-bytecode.h> |
| 14 | #include <lttng/events-internal.h> |
| 15 | |
| 16 | static const char *opnames[] = { |
| 17 | [ BYTECODE_OP_UNKNOWN ] = "UNKNOWN", |
| 18 | |
| 19 | [ BYTECODE_OP_RETURN ] = "RETURN", |
| 20 | |
| 21 | /* binary */ |
| 22 | [ BYTECODE_OP_MUL ] = "MUL", |
| 23 | [ BYTECODE_OP_DIV ] = "DIV", |
| 24 | [ BYTECODE_OP_MOD ] = "MOD", |
| 25 | [ BYTECODE_OP_PLUS ] = "PLUS", |
| 26 | [ BYTECODE_OP_MINUS ] = "MINUS", |
| 27 | [ BYTECODE_OP_BIT_RSHIFT ] = "BIT_RSHIFT", |
| 28 | [ BYTECODE_OP_BIT_LSHIFT ] = "BIT_LSHIFT", |
| 29 | [ BYTECODE_OP_BIT_AND ] = "BIT_AND", |
| 30 | [ BYTECODE_OP_BIT_OR ] = "BIT_OR", |
| 31 | [ BYTECODE_OP_BIT_XOR ] = "BIT_XOR", |
| 32 | |
| 33 | /* binary comparators */ |
| 34 | [ BYTECODE_OP_EQ ] = "EQ", |
| 35 | [ BYTECODE_OP_NE ] = "NE", |
| 36 | [ BYTECODE_OP_GT ] = "GT", |
| 37 | [ BYTECODE_OP_LT ] = "LT", |
| 38 | [ BYTECODE_OP_GE ] = "GE", |
| 39 | [ BYTECODE_OP_LE ] = "LE", |
| 40 | |
| 41 | /* string binary comparators */ |
| 42 | [ BYTECODE_OP_EQ_STRING ] = "EQ_STRING", |
| 43 | [ BYTECODE_OP_NE_STRING ] = "NE_STRING", |
| 44 | [ BYTECODE_OP_GT_STRING ] = "GT_STRING", |
| 45 | [ BYTECODE_OP_LT_STRING ] = "LT_STRING", |
| 46 | [ BYTECODE_OP_GE_STRING ] = "GE_STRING", |
| 47 | [ BYTECODE_OP_LE_STRING ] = "LE_STRING", |
| 48 | |
| 49 | /* s64 binary comparators */ |
| 50 | [ BYTECODE_OP_EQ_S64 ] = "EQ_S64", |
| 51 | [ BYTECODE_OP_NE_S64 ] = "NE_S64", |
| 52 | [ BYTECODE_OP_GT_S64 ] = "GT_S64", |
| 53 | [ BYTECODE_OP_LT_S64 ] = "LT_S64", |
| 54 | [ BYTECODE_OP_GE_S64 ] = "GE_S64", |
| 55 | [ BYTECODE_OP_LE_S64 ] = "LE_S64", |
| 56 | |
| 57 | /* double binary comparators */ |
| 58 | [ BYTECODE_OP_EQ_DOUBLE ] = "EQ_DOUBLE", |
| 59 | [ BYTECODE_OP_NE_DOUBLE ] = "NE_DOUBLE", |
| 60 | [ BYTECODE_OP_GT_DOUBLE ] = "GT_DOUBLE", |
| 61 | [ BYTECODE_OP_LT_DOUBLE ] = "LT_DOUBLE", |
| 62 | [ BYTECODE_OP_GE_DOUBLE ] = "GE_DOUBLE", |
| 63 | [ BYTECODE_OP_LE_DOUBLE ] = "LE_DOUBLE", |
| 64 | |
| 65 | /* Mixed S64-double binary comparators */ |
| 66 | [ BYTECODE_OP_EQ_DOUBLE_S64 ] = "EQ_DOUBLE_S64", |
| 67 | [ BYTECODE_OP_NE_DOUBLE_S64 ] = "NE_DOUBLE_S64", |
| 68 | [ BYTECODE_OP_GT_DOUBLE_S64 ] = "GT_DOUBLE_S64", |
| 69 | [ BYTECODE_OP_LT_DOUBLE_S64 ] = "LT_DOUBLE_S64", |
| 70 | [ BYTECODE_OP_GE_DOUBLE_S64 ] = "GE_DOUBLE_S64", |
| 71 | [ BYTECODE_OP_LE_DOUBLE_S64 ] = "LE_DOUBLE_S64", |
| 72 | |
| 73 | [ BYTECODE_OP_EQ_S64_DOUBLE ] = "EQ_S64_DOUBLE", |
| 74 | [ BYTECODE_OP_NE_S64_DOUBLE ] = "NE_S64_DOUBLE", |
| 75 | [ BYTECODE_OP_GT_S64_DOUBLE ] = "GT_S64_DOUBLE", |
| 76 | [ BYTECODE_OP_LT_S64_DOUBLE ] = "LT_S64_DOUBLE", |
| 77 | [ BYTECODE_OP_GE_S64_DOUBLE ] = "GE_S64_DOUBLE", |
| 78 | [ BYTECODE_OP_LE_S64_DOUBLE ] = "LE_S64_DOUBLE", |
| 79 | |
| 80 | /* unary */ |
| 81 | [ BYTECODE_OP_UNARY_PLUS ] = "UNARY_PLUS", |
| 82 | [ BYTECODE_OP_UNARY_MINUS ] = "UNARY_MINUS", |
| 83 | [ BYTECODE_OP_UNARY_NOT ] = "UNARY_NOT", |
| 84 | [ BYTECODE_OP_UNARY_PLUS_S64 ] = "UNARY_PLUS_S64", |
| 85 | [ BYTECODE_OP_UNARY_MINUS_S64 ] = "UNARY_MINUS_S64", |
| 86 | [ BYTECODE_OP_UNARY_NOT_S64 ] = "UNARY_NOT_S64", |
| 87 | [ BYTECODE_OP_UNARY_PLUS_DOUBLE ] = "UNARY_PLUS_DOUBLE", |
| 88 | [ BYTECODE_OP_UNARY_MINUS_DOUBLE ] = "UNARY_MINUS_DOUBLE", |
| 89 | [ BYTECODE_OP_UNARY_NOT_DOUBLE ] = "UNARY_NOT_DOUBLE", |
| 90 | |
| 91 | /* logical */ |
| 92 | [ BYTECODE_OP_AND ] = "AND", |
| 93 | [ BYTECODE_OP_OR ] = "OR", |
| 94 | |
| 95 | /* load field ref */ |
| 96 | [ BYTECODE_OP_LOAD_FIELD_REF ] = "LOAD_FIELD_REF", |
| 97 | [ BYTECODE_OP_LOAD_FIELD_REF_STRING ] = "LOAD_FIELD_REF_STRING", |
| 98 | [ BYTECODE_OP_LOAD_FIELD_REF_SEQUENCE ] = "LOAD_FIELD_REF_SEQUENCE", |
| 99 | [ BYTECODE_OP_LOAD_FIELD_REF_S64 ] = "LOAD_FIELD_REF_S64", |
| 100 | [ BYTECODE_OP_LOAD_FIELD_REF_DOUBLE ] = "LOAD_FIELD_REF_DOUBLE", |
| 101 | |
| 102 | /* load from immediate operand */ |
| 103 | [ BYTECODE_OP_LOAD_STRING ] = "LOAD_STRING", |
| 104 | [ BYTECODE_OP_LOAD_S64 ] = "LOAD_S64", |
| 105 | [ BYTECODE_OP_LOAD_DOUBLE ] = "LOAD_DOUBLE", |
| 106 | |
| 107 | /* cast */ |
| 108 | [ BYTECODE_OP_CAST_TO_S64 ] = "CAST_TO_S64", |
| 109 | [ BYTECODE_OP_CAST_DOUBLE_TO_S64 ] = "CAST_DOUBLE_TO_S64", |
| 110 | [ BYTECODE_OP_CAST_NOP ] = "CAST_NOP", |
| 111 | |
| 112 | /* get context ref */ |
| 113 | [ BYTECODE_OP_GET_CONTEXT_REF ] = "GET_CONTEXT_REF", |
| 114 | [ BYTECODE_OP_GET_CONTEXT_REF_STRING ] = "GET_CONTEXT_REF_STRING", |
| 115 | [ BYTECODE_OP_GET_CONTEXT_REF_S64 ] = "GET_CONTEXT_REF_S64", |
| 116 | [ BYTECODE_OP_GET_CONTEXT_REF_DOUBLE ] = "GET_CONTEXT_REF_DOUBLE", |
| 117 | |
| 118 | /* load userspace field ref */ |
| 119 | [ BYTECODE_OP_LOAD_FIELD_REF_USER_STRING ] = "LOAD_FIELD_REF_USER_STRING", |
| 120 | [ BYTECODE_OP_LOAD_FIELD_REF_USER_SEQUENCE ] = "LOAD_FIELD_REF_USER_SEQUENCE", |
| 121 | |
| 122 | /* |
| 123 | * load immediate star globbing pattern (literal string) |
| 124 | * from immediate. |
| 125 | */ |
| 126 | [ BYTECODE_OP_LOAD_STAR_GLOB_STRING ] = "LOAD_STAR_GLOB_STRING", |
| 127 | |
| 128 | /* globbing pattern binary operator: apply to */ |
| 129 | [ BYTECODE_OP_EQ_STAR_GLOB_STRING ] = "EQ_STAR_GLOB_STRING", |
| 130 | [ BYTECODE_OP_NE_STAR_GLOB_STRING ] = "NE_STAR_GLOB_STRING", |
| 131 | |
| 132 | /* |
| 133 | * Instructions for recursive traversal through composed types. |
| 134 | */ |
| 135 | [ BYTECODE_OP_GET_CONTEXT_ROOT ] = "GET_CONTEXT_ROOT", |
| 136 | [ BYTECODE_OP_GET_APP_CONTEXT_ROOT ] = "GET_APP_CONTEXT_ROOT", |
| 137 | [ BYTECODE_OP_GET_PAYLOAD_ROOT ] = "GET_PAYLOAD_ROOT", |
| 138 | |
| 139 | [ BYTECODE_OP_GET_SYMBOL ] = "GET_SYMBOL", |
| 140 | [ BYTECODE_OP_GET_SYMBOL_FIELD ] = "GET_SYMBOL_FIELD", |
| 141 | [ BYTECODE_OP_GET_INDEX_U16 ] = "GET_INDEX_U16", |
| 142 | [ BYTECODE_OP_GET_INDEX_U64 ] = "GET_INDEX_U64", |
| 143 | |
| 144 | [ BYTECODE_OP_LOAD_FIELD ] = "LOAD_FIELD", |
| 145 | [ BYTECODE_OP_LOAD_FIELD_S8 ] = "LOAD_FIELD_S8", |
| 146 | [ BYTECODE_OP_LOAD_FIELD_S16 ] = "LOAD_FIELD_S16", |
| 147 | [ BYTECODE_OP_LOAD_FIELD_S32 ] = "LOAD_FIELD_S32", |
| 148 | [ BYTECODE_OP_LOAD_FIELD_S64 ] = "LOAD_FIELD_S64", |
| 149 | [ BYTECODE_OP_LOAD_FIELD_U8 ] = "LOAD_FIELD_U8", |
| 150 | [ BYTECODE_OP_LOAD_FIELD_U16 ] = "LOAD_FIELD_U16", |
| 151 | [ BYTECODE_OP_LOAD_FIELD_U32 ] = "LOAD_FIELD_U32", |
| 152 | [ BYTECODE_OP_LOAD_FIELD_U64 ] = "LOAD_FIELD_U64", |
| 153 | [ BYTECODE_OP_LOAD_FIELD_STRING ] = "LOAD_FIELD_STRING", |
| 154 | [ BYTECODE_OP_LOAD_FIELD_SEQUENCE ] = "LOAD_FIELD_SEQUENCE", |
| 155 | [ BYTECODE_OP_LOAD_FIELD_DOUBLE ] = "LOAD_FIELD_DOUBLE", |
| 156 | |
| 157 | [ BYTECODE_OP_UNARY_BIT_NOT ] = "UNARY_BIT_NOT", |
| 158 | |
| 159 | [ BYTECODE_OP_RETURN_S64 ] = "RETURN_S64", |
| 160 | }; |
| 161 | |
| 162 | const char *lttng_bytecode_print_op(enum bytecode_op op) |
| 163 | { |
| 164 | if (op >= NR_BYTECODE_OPS) |
| 165 | return "UNKNOWN"; |
| 166 | else |
| 167 | return opnames[op]; |
| 168 | } |
| 169 | |
| 170 | static |
| 171 | int apply_field_reloc(const struct lttng_kernel_event_desc *event_desc, |
| 172 | struct bytecode_runtime *runtime, |
| 173 | uint32_t runtime_len, |
| 174 | uint32_t reloc_offset, |
| 175 | const char *field_name, |
| 176 | enum bytecode_op bytecode_op) |
| 177 | { |
| 178 | const struct lttng_kernel_event_field * const *fields, *field = NULL; |
| 179 | unsigned int nr_fields, i; |
| 180 | struct load_op *op; |
| 181 | uint32_t field_offset = 0; |
| 182 | |
| 183 | dbg_printk("Apply field reloc: %u %s\n", reloc_offset, field_name); |
| 184 | |
| 185 | /* Lookup event by name */ |
| 186 | if (!event_desc) |
| 187 | return -EINVAL; |
| 188 | fields = event_desc->tp_class->fields; |
| 189 | if (!fields) |
| 190 | return -EINVAL; |
| 191 | nr_fields = event_desc->tp_class->nr_fields; |
| 192 | for (i = 0; i < nr_fields; i++) { |
| 193 | if (fields[i]->nofilter) |
| 194 | continue; |
| 195 | if (!strcmp(fields[i]->name, field_name)) { |
| 196 | field = fields[i]; |
| 197 | break; |
| 198 | } |
| 199 | /* compute field offset */ |
| 200 | switch (fields[i]->type->type) { |
| 201 | case lttng_kernel_type_integer: |
| 202 | case lttng_kernel_type_enum: |
| 203 | field_offset += sizeof(int64_t); |
| 204 | break; |
| 205 | case lttng_kernel_type_array: |
| 206 | if (!lttng_kernel_type_is_bytewise_integer(lttng_kernel_get_type_array(fields[i]->type)->elem_type)) |
| 207 | return -EINVAL; |
| 208 | field_offset += sizeof(unsigned long); |
| 209 | field_offset += sizeof(void *); |
| 210 | break; |
| 211 | case lttng_kernel_type_sequence: |
| 212 | if (!lttng_kernel_type_is_bytewise_integer(lttng_kernel_get_type_sequence(fields[i]->type)->elem_type)) |
| 213 | return -EINVAL; |
| 214 | field_offset += sizeof(unsigned long); |
| 215 | field_offset += sizeof(void *); |
| 216 | break; |
| 217 | case lttng_kernel_type_string: |
| 218 | field_offset += sizeof(void *); |
| 219 | break; |
| 220 | case lttng_kernel_type_struct: /* Unsupported. */ |
| 221 | case lttng_kernel_type_variant: /* Unsupported. */ |
| 222 | default: |
| 223 | return -EINVAL; |
| 224 | } |
| 225 | } |
| 226 | if (!field) |
| 227 | return -EINVAL; |
| 228 | |
| 229 | /* Check if field offset is too large for 16-bit offset */ |
| 230 | if (field_offset > LTTNG_KERNEL_ABI_FILTER_BYTECODE_MAX_LEN - 1) |
| 231 | return -EINVAL; |
| 232 | |
| 233 | /* set type */ |
| 234 | op = (struct load_op *) &runtime->code[reloc_offset]; |
| 235 | |
| 236 | switch (bytecode_op) { |
| 237 | case BYTECODE_OP_LOAD_FIELD_REF: |
| 238 | { |
| 239 | struct field_ref *field_ref; |
| 240 | |
| 241 | field_ref = (struct field_ref *) op->data; |
| 242 | switch (field->type->type) { |
| 243 | case lttng_kernel_type_integer: |
| 244 | case lttng_kernel_type_enum: |
| 245 | op->op = BYTECODE_OP_LOAD_FIELD_REF_S64; |
| 246 | break; |
| 247 | case lttng_kernel_type_array: |
| 248 | { |
| 249 | const struct lttng_kernel_type_array *array_type = lttng_kernel_get_type_array(field->type); |
| 250 | const struct lttng_kernel_type_common *elem_type = array_type->elem_type; |
| 251 | const struct lttng_kernel_type_integer *elem_integer_type; |
| 252 | |
| 253 | if (!lttng_kernel_type_is_bytewise_integer(elem_type) || array_type->encoding == lttng_kernel_string_encoding_none) |
| 254 | return -EINVAL; |
| 255 | elem_integer_type = container_of(elem_type, const struct lttng_kernel_type_integer, parent); |
| 256 | if (elem_integer_type->user) |
| 257 | op->op = BYTECODE_OP_LOAD_FIELD_REF_USER_SEQUENCE; |
| 258 | else |
| 259 | op->op = BYTECODE_OP_LOAD_FIELD_REF_SEQUENCE; |
| 260 | break; |
| 261 | } |
| 262 | case lttng_kernel_type_sequence: |
| 263 | { |
| 264 | const struct lttng_kernel_type_sequence *sequence_type = lttng_kernel_get_type_sequence(field->type); |
| 265 | const struct lttng_kernel_type_common *elem_type = sequence_type->elem_type; |
| 266 | const struct lttng_kernel_type_integer *elem_integer_type; |
| 267 | |
| 268 | if (!lttng_kernel_type_is_bytewise_integer(elem_type) || sequence_type->encoding == lttng_kernel_string_encoding_none) |
| 269 | return -EINVAL; |
| 270 | elem_integer_type = container_of(elem_type, const struct lttng_kernel_type_integer, parent); |
| 271 | if (elem_integer_type->user) |
| 272 | op->op = BYTECODE_OP_LOAD_FIELD_REF_USER_SEQUENCE; |
| 273 | else |
| 274 | op->op = BYTECODE_OP_LOAD_FIELD_REF_SEQUENCE; |
| 275 | break; |
| 276 | } |
| 277 | case lttng_kernel_type_string: |
| 278 | { |
| 279 | const struct lttng_kernel_type_string *string_type = lttng_kernel_get_type_string(field->type); |
| 280 | |
| 281 | if (string_type->user) |
| 282 | op->op = BYTECODE_OP_LOAD_FIELD_REF_USER_STRING; |
| 283 | else |
| 284 | op->op = BYTECODE_OP_LOAD_FIELD_REF_STRING; |
| 285 | break; |
| 286 | } |
| 287 | case lttng_kernel_type_struct: /* Unsupported. */ |
| 288 | case lttng_kernel_type_variant: /* Unsupported. */ |
| 289 | default: |
| 290 | return -EINVAL; |
| 291 | } |
| 292 | /* set offset */ |
| 293 | field_ref->offset = (uint16_t) field_offset; |
| 294 | break; |
| 295 | } |
| 296 | default: |
| 297 | return -EINVAL; |
| 298 | } |
| 299 | return 0; |
| 300 | } |
| 301 | |
| 302 | static |
| 303 | int apply_context_reloc(struct bytecode_runtime *runtime, |
| 304 | uint32_t runtime_len, |
| 305 | uint32_t reloc_offset, |
| 306 | const char *context_name, |
| 307 | enum bytecode_op bytecode_op) |
| 308 | { |
| 309 | struct load_op *op; |
| 310 | struct lttng_kernel_ctx_field *ctx_field; |
| 311 | int idx; |
| 312 | |
| 313 | dbg_printk("Apply context reloc: %u %s\n", reloc_offset, context_name); |
| 314 | |
| 315 | /* Get context index */ |
| 316 | idx = lttng_kernel_get_context_index(lttng_static_ctx, context_name); |
| 317 | if (idx < 0) |
| 318 | return -ENOENT; |
| 319 | |
| 320 | /* Check if idx is too large for 16-bit offset */ |
| 321 | if (idx > LTTNG_KERNEL_ABI_FILTER_BYTECODE_MAX_LEN - 1) |
| 322 | return -EINVAL; |
| 323 | |
| 324 | /* Get context return type */ |
| 325 | ctx_field = <tng_static_ctx->fields[idx]; |
| 326 | op = (struct load_op *) &runtime->code[reloc_offset]; |
| 327 | |
| 328 | switch (bytecode_op) { |
| 329 | case BYTECODE_OP_GET_CONTEXT_REF: |
| 330 | { |
| 331 | struct field_ref *field_ref; |
| 332 | |
| 333 | field_ref = (struct field_ref *) op->data; |
| 334 | switch (ctx_field->event_field->type->type) { |
| 335 | case lttng_kernel_type_integer: |
| 336 | case lttng_kernel_type_enum: |
| 337 | op->op = BYTECODE_OP_GET_CONTEXT_REF_S64; |
| 338 | break; |
| 339 | /* Sequence and array supported as string */ |
| 340 | case lttng_kernel_type_string: |
| 341 | { |
| 342 | const struct lttng_kernel_type_string *string_type = lttng_kernel_get_type_string(ctx_field->event_field->type); |
| 343 | |
| 344 | BUG_ON(string_type->user); |
| 345 | op->op = BYTECODE_OP_GET_CONTEXT_REF_STRING; |
| 346 | break; |
| 347 | } |
| 348 | case lttng_kernel_type_array: |
| 349 | { |
| 350 | const struct lttng_kernel_type_array *array_type = lttng_kernel_get_type_array(ctx_field->event_field->type); |
| 351 | const struct lttng_kernel_type_common *elem_type = array_type->elem_type; |
| 352 | const struct lttng_kernel_type_integer *elem_integer_type; |
| 353 | |
| 354 | if (!lttng_kernel_type_is_bytewise_integer(elem_type) || array_type->encoding == lttng_kernel_string_encoding_none) |
| 355 | return -EINVAL; |
| 356 | elem_integer_type = container_of(elem_type, const struct lttng_kernel_type_integer, parent); |
| 357 | BUG_ON(elem_integer_type->user); |
| 358 | op->op = BYTECODE_OP_GET_CONTEXT_REF_STRING; |
| 359 | break; |
| 360 | } |
| 361 | case lttng_kernel_type_sequence: |
| 362 | { |
| 363 | const struct lttng_kernel_type_sequence *sequence_type = lttng_kernel_get_type_sequence(ctx_field->event_field->type); |
| 364 | const struct lttng_kernel_type_common *elem_type = sequence_type->elem_type; |
| 365 | const struct lttng_kernel_type_integer *elem_integer_type; |
| 366 | |
| 367 | if (!lttng_kernel_type_is_bytewise_integer(elem_type) || sequence_type->encoding == lttng_kernel_string_encoding_none) |
| 368 | return -EINVAL; |
| 369 | elem_integer_type = container_of(elem_type, const struct lttng_kernel_type_integer, parent); |
| 370 | BUG_ON(elem_integer_type->user); |
| 371 | op->op = BYTECODE_OP_GET_CONTEXT_REF_STRING; |
| 372 | break; |
| 373 | } |
| 374 | case lttng_kernel_type_struct: /* Unsupported. */ |
| 375 | case lttng_kernel_type_variant: /* Unsupported. */ |
| 376 | default: |
| 377 | return -EINVAL; |
| 378 | } |
| 379 | /* set offset to context index within channel contexts */ |
| 380 | field_ref->offset = (uint16_t) idx; |
| 381 | break; |
| 382 | } |
| 383 | default: |
| 384 | return -EINVAL; |
| 385 | } |
| 386 | return 0; |
| 387 | } |
| 388 | |
| 389 | static |
| 390 | int apply_reloc(const struct lttng_kernel_event_desc *event_desc, |
| 391 | struct bytecode_runtime *runtime, |
| 392 | uint32_t runtime_len, |
| 393 | uint32_t reloc_offset, |
| 394 | const char *name) |
| 395 | { |
| 396 | struct load_op *op; |
| 397 | |
| 398 | dbg_printk("Apply reloc: %u %s\n", reloc_offset, name); |
| 399 | |
| 400 | /* Ensure that the reloc is within the code */ |
| 401 | if (runtime_len - reloc_offset < sizeof(uint16_t)) |
| 402 | return -EINVAL; |
| 403 | |
| 404 | op = (struct load_op *) &runtime->code[reloc_offset]; |
| 405 | switch (op->op) { |
| 406 | case BYTECODE_OP_LOAD_FIELD_REF: |
| 407 | return apply_field_reloc(event_desc, runtime, runtime_len, |
| 408 | reloc_offset, name, op->op); |
| 409 | case BYTECODE_OP_GET_CONTEXT_REF: |
| 410 | return apply_context_reloc(runtime, runtime_len, |
| 411 | reloc_offset, name, op->op); |
| 412 | case BYTECODE_OP_GET_SYMBOL: |
| 413 | case BYTECODE_OP_GET_SYMBOL_FIELD: |
| 414 | /* |
| 415 | * Will be handled by load specialize phase or |
| 416 | * dynamically by interpreter. |
| 417 | */ |
| 418 | return 0; |
| 419 | default: |
| 420 | printk(KERN_WARNING "LTTng: filter: Unknown reloc op type %u\n", op->op); |
| 421 | return -EINVAL; |
| 422 | } |
| 423 | return 0; |
| 424 | } |
| 425 | |
| 426 | static |
| 427 | int bytecode_is_linked(struct lttng_kernel_bytecode_node *bytecode, |
| 428 | struct list_head *bytecode_runtime_head) |
| 429 | { |
| 430 | struct lttng_kernel_bytecode_runtime *bc_runtime; |
| 431 | |
| 432 | list_for_each_entry(bc_runtime, bytecode_runtime_head, node) { |
| 433 | if (bc_runtime->bc == bytecode) |
| 434 | return 1; |
| 435 | } |
| 436 | return 0; |
| 437 | } |
| 438 | |
| 439 | /* |
| 440 | * Take a bytecode with reloc table and link it to an event to create a |
| 441 | * bytecode runtime. |
| 442 | */ |
| 443 | static |
| 444 | int link_bytecode(const struct lttng_kernel_event_desc *event_desc, |
| 445 | struct lttng_kernel_ctx *ctx, |
| 446 | struct lttng_kernel_bytecode_node *bytecode, |
| 447 | struct list_head *bytecode_runtime_head, |
| 448 | struct list_head *insert_loc) |
| 449 | { |
| 450 | int ret, offset, next_offset; |
| 451 | struct bytecode_runtime *runtime = NULL; |
| 452 | size_t runtime_alloc_len; |
| 453 | |
| 454 | if (!bytecode) |
| 455 | return 0; |
| 456 | /* Bytecode already linked */ |
| 457 | if (bytecode_is_linked(bytecode, bytecode_runtime_head)) |
| 458 | return 0; |
| 459 | |
| 460 | dbg_printk("Linking...\n"); |
| 461 | |
| 462 | /* We don't need the reloc table in the runtime */ |
| 463 | runtime_alloc_len = sizeof(*runtime) + bytecode->bc.reloc_offset; |
| 464 | runtime = kzalloc(runtime_alloc_len, GFP_KERNEL); |
| 465 | if (!runtime) { |
| 466 | ret = -ENOMEM; |
| 467 | goto alloc_error; |
| 468 | } |
| 469 | runtime->p.type = bytecode->type; |
| 470 | runtime->p.bc = bytecode; |
| 471 | runtime->p.ctx = ctx; |
| 472 | runtime->len = bytecode->bc.reloc_offset; |
| 473 | /* copy original bytecode */ |
| 474 | memcpy(runtime->code, bytecode->bc.data, runtime->len); |
| 475 | /* Validate bytecode load instructions before relocs. */ |
| 476 | ret = lttng_bytecode_validate_load(runtime); |
| 477 | if (ret) { |
| 478 | goto link_error; |
| 479 | } |
| 480 | /* |
| 481 | * apply relocs. Those are a uint16_t (offset in bytecode) |
| 482 | * followed by a string (field name). |
| 483 | */ |
| 484 | for (offset = bytecode->bc.reloc_offset; |
| 485 | offset < bytecode->bc.len; |
| 486 | offset = next_offset) { |
| 487 | uint16_t reloc_offset = |
| 488 | *(uint16_t *) &bytecode->bc.data[offset]; |
| 489 | const char *name = |
| 490 | (const char *) &bytecode->bc.data[offset + sizeof(uint16_t)]; |
| 491 | |
| 492 | ret = apply_reloc(event_desc, runtime, runtime->len, reloc_offset, name); |
| 493 | if (ret) { |
| 494 | goto link_error; |
| 495 | } |
| 496 | next_offset = offset + sizeof(uint16_t) + strlen(name) + 1; |
| 497 | } |
| 498 | /* Validate bytecode */ |
| 499 | ret = lttng_bytecode_validate(runtime); |
| 500 | if (ret) { |
| 501 | goto link_error; |
| 502 | } |
| 503 | /* Specialize bytecode */ |
| 504 | ret = lttng_bytecode_specialize(event_desc, runtime); |
| 505 | if (ret) { |
| 506 | goto link_error; |
| 507 | } |
| 508 | runtime->p.interpreter_func = lttng_bytecode_interpret; |
| 509 | runtime->p.link_failed = 0; |
| 510 | list_add_rcu(&runtime->p.node, insert_loc); |
| 511 | dbg_printk("Linking successful.\n"); |
| 512 | return 0; |
| 513 | |
| 514 | link_error: |
| 515 | |
| 516 | runtime->p.interpreter_func = lttng_bytecode_interpret_error; |
| 517 | runtime->p.link_failed = 1; |
| 518 | list_add_rcu(&runtime->p.node, insert_loc); |
| 519 | alloc_error: |
| 520 | dbg_printk("Linking failed.\n"); |
| 521 | return ret; |
| 522 | } |
| 523 | |
| 524 | void lttng_bytecode_sync_state(struct lttng_kernel_bytecode_runtime *runtime) |
| 525 | { |
| 526 | struct lttng_kernel_bytecode_node *bc = runtime->bc; |
| 527 | |
| 528 | if (!bc->enabler->enabled || runtime->link_failed) |
| 529 | runtime->interpreter_func = lttng_bytecode_interpret_error; |
| 530 | else |
| 531 | runtime->interpreter_func = lttng_bytecode_interpret; |
| 532 | } |
| 533 | |
| 534 | /* |
| 535 | * Given the lists of bytecode programs of an instance (event or event |
| 536 | * notifier) and of a matching enabler, try to link all the enabler's bytecode |
| 537 | * programs with the instance. |
| 538 | * |
| 539 | * This function is called after we confirmed that name enabler and the |
| 540 | * instance are matching names (or glob pattern matching). |
| 541 | */ |
| 542 | void lttng_enabler_link_bytecode(const struct lttng_kernel_event_desc *event_desc, |
| 543 | struct lttng_kernel_ctx *ctx, |
| 544 | struct list_head *instance_bytecode_head, |
| 545 | struct list_head *enabler_bytecode_head) |
| 546 | { |
| 547 | struct lttng_kernel_bytecode_node *enabler_bc; |
| 548 | struct lttng_kernel_bytecode_runtime *runtime; |
| 549 | |
| 550 | WARN_ON_ONCE(!event_desc); |
| 551 | |
| 552 | /* Go over all the bytecode programs of the enabler. */ |
| 553 | list_for_each_entry(enabler_bc, enabler_bytecode_head, node) { |
| 554 | int found = 0, ret; |
| 555 | struct list_head *insert_loc; |
| 556 | |
| 557 | /* |
| 558 | * Check if the current enabler bytecode program is already |
| 559 | * linked with the instance. |
| 560 | */ |
| 561 | list_for_each_entry(runtime, instance_bytecode_head, node) { |
| 562 | if (runtime->bc == enabler_bc) { |
| 563 | found = 1; |
| 564 | break; |
| 565 | } |
| 566 | } |
| 567 | |
| 568 | /* |
| 569 | * Skip bytecode already linked, go to the next enabler |
| 570 | * bytecode program. |
| 571 | */ |
| 572 | if (found) |
| 573 | continue; |
| 574 | |
| 575 | /* |
| 576 | * Insert at specified priority (seqnum) in increasing |
| 577 | * order. If there already is a bytecode of the same priority, |
| 578 | * insert the new bytecode right after it. |
| 579 | */ |
| 580 | list_for_each_entry_reverse(runtime, |
| 581 | instance_bytecode_head, node) { |
| 582 | if (runtime->bc->bc.seqnum <= enabler_bc->bc.seqnum) { |
| 583 | /* insert here */ |
| 584 | insert_loc = &runtime->node; |
| 585 | goto add_within; |
| 586 | } |
| 587 | } |
| 588 | /* Add to head to list */ |
| 589 | insert_loc = instance_bytecode_head; |
| 590 | add_within: |
| 591 | dbg_printk("linking bytecode\n"); |
| 592 | ret = link_bytecode(event_desc, ctx, enabler_bc, instance_bytecode_head, insert_loc); |
| 593 | if (ret) { |
| 594 | dbg_printk("[lttng filter] warning: cannot link event bytecode\n"); |
| 595 | } |
| 596 | } |
| 597 | } |
| 598 | |
| 599 | void lttng_free_event_filter_runtime(struct lttng_kernel_event_common *event) |
| 600 | { |
| 601 | struct bytecode_runtime *runtime, *tmp; |
| 602 | |
| 603 | list_for_each_entry_safe(runtime, tmp, |
| 604 | &event->priv->filter_bytecode_runtime_head, p.node) { |
| 605 | kfree(runtime->data); |
| 606 | kfree(runtime); |
| 607 | } |
| 608 | } |