4 * LTTng UST bytecode code.
6 * Copyright (C) 2010-2016 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
8 * Permission is hereby granted, free of charge, to any person obtaining a copy
9 * of this software and associated documentation files (the "Software"), to deal
10 * in the Software without restriction, including without limitation the rights
11 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
12 * copies of the Software, and to permit persons to whom the Software is
13 * furnished to do so, subject to the following conditions:
15 * The above copyright notice and this permission notice shall be included in
16 * all copies or substantial portions of the Software.
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
21 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
22 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
23 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31 #include <urcu/rculist.h>
33 #include "lttng-bytecode.h"
34 #include "ust-events-internal.h"
36 static const char *opnames
[] = {
37 [ BYTECODE_OP_UNKNOWN
] = "UNKNOWN",
39 [ BYTECODE_OP_RETURN
] = "RETURN",
42 [ BYTECODE_OP_MUL
] = "MUL",
43 [ BYTECODE_OP_DIV
] = "DIV",
44 [ BYTECODE_OP_MOD
] = "MOD",
45 [ BYTECODE_OP_PLUS
] = "PLUS",
46 [ BYTECODE_OP_MINUS
] = "MINUS",
47 [ BYTECODE_OP_BIT_RSHIFT
] = "BIT_RSHIFT",
48 [ BYTECODE_OP_BIT_LSHIFT
] = "BIT_LSHIFT",
49 [ BYTECODE_OP_BIT_AND
] = "BIT_AND",
50 [ BYTECODE_OP_BIT_OR
] = "BIT_OR",
51 [ BYTECODE_OP_BIT_XOR
] = "BIT_XOR",
53 /* binary comparators */
54 [ BYTECODE_OP_EQ
] = "EQ",
55 [ BYTECODE_OP_NE
] = "NE",
56 [ BYTECODE_OP_GT
] = "GT",
57 [ BYTECODE_OP_LT
] = "LT",
58 [ BYTECODE_OP_GE
] = "GE",
59 [ BYTECODE_OP_LE
] = "LE",
61 /* string binary comparators */
62 [ BYTECODE_OP_EQ_STRING
] = "EQ_STRING",
63 [ BYTECODE_OP_NE_STRING
] = "NE_STRING",
64 [ BYTECODE_OP_GT_STRING
] = "GT_STRING",
65 [ BYTECODE_OP_LT_STRING
] = "LT_STRING",
66 [ BYTECODE_OP_GE_STRING
] = "GE_STRING",
67 [ BYTECODE_OP_LE_STRING
] = "LE_STRING",
69 /* s64 binary comparators */
70 [ BYTECODE_OP_EQ_S64
] = "EQ_S64",
71 [ BYTECODE_OP_NE_S64
] = "NE_S64",
72 [ BYTECODE_OP_GT_S64
] = "GT_S64",
73 [ BYTECODE_OP_LT_S64
] = "LT_S64",
74 [ BYTECODE_OP_GE_S64
] = "GE_S64",
75 [ BYTECODE_OP_LE_S64
] = "LE_S64",
77 /* double binary comparators */
78 [ BYTECODE_OP_EQ_DOUBLE
] = "EQ_DOUBLE",
79 [ BYTECODE_OP_NE_DOUBLE
] = "NE_DOUBLE",
80 [ BYTECODE_OP_GT_DOUBLE
] = "GT_DOUBLE",
81 [ BYTECODE_OP_LT_DOUBLE
] = "LT_DOUBLE",
82 [ BYTECODE_OP_GE_DOUBLE
] = "GE_DOUBLE",
83 [ BYTECODE_OP_LE_DOUBLE
] = "LE_DOUBLE",
85 /* Mixed S64-double binary comparators */
86 [ BYTECODE_OP_EQ_DOUBLE_S64
] = "EQ_DOUBLE_S64",
87 [ BYTECODE_OP_NE_DOUBLE_S64
] = "NE_DOUBLE_S64",
88 [ BYTECODE_OP_GT_DOUBLE_S64
] = "GT_DOUBLE_S64",
89 [ BYTECODE_OP_LT_DOUBLE_S64
] = "LT_DOUBLE_S64",
90 [ BYTECODE_OP_GE_DOUBLE_S64
] = "GE_DOUBLE_S64",
91 [ BYTECODE_OP_LE_DOUBLE_S64
] = "LE_DOUBLE_S64",
93 [ BYTECODE_OP_EQ_S64_DOUBLE
] = "EQ_S64_DOUBLE",
94 [ BYTECODE_OP_NE_S64_DOUBLE
] = "NE_S64_DOUBLE",
95 [ BYTECODE_OP_GT_S64_DOUBLE
] = "GT_S64_DOUBLE",
96 [ BYTECODE_OP_LT_S64_DOUBLE
] = "LT_S64_DOUBLE",
97 [ BYTECODE_OP_GE_S64_DOUBLE
] = "GE_S64_DOUBLE",
98 [ BYTECODE_OP_LE_S64_DOUBLE
] = "LE_S64_DOUBLE",
101 [ BYTECODE_OP_UNARY_PLUS
] = "UNARY_PLUS",
102 [ BYTECODE_OP_UNARY_MINUS
] = "UNARY_MINUS",
103 [ BYTECODE_OP_UNARY_NOT
] = "UNARY_NOT",
104 [ BYTECODE_OP_UNARY_PLUS_S64
] = "UNARY_PLUS_S64",
105 [ BYTECODE_OP_UNARY_MINUS_S64
] = "UNARY_MINUS_S64",
106 [ BYTECODE_OP_UNARY_NOT_S64
] = "UNARY_NOT_S64",
107 [ BYTECODE_OP_UNARY_PLUS_DOUBLE
] = "UNARY_PLUS_DOUBLE",
108 [ BYTECODE_OP_UNARY_MINUS_DOUBLE
] = "UNARY_MINUS_DOUBLE",
109 [ BYTECODE_OP_UNARY_NOT_DOUBLE
] = "UNARY_NOT_DOUBLE",
112 [ BYTECODE_OP_AND
] = "AND",
113 [ BYTECODE_OP_OR
] = "OR",
116 [ BYTECODE_OP_LOAD_FIELD_REF
] = "LOAD_FIELD_REF",
117 [ BYTECODE_OP_LOAD_FIELD_REF_STRING
] = "LOAD_FIELD_REF_STRING",
118 [ BYTECODE_OP_LOAD_FIELD_REF_SEQUENCE
] = "LOAD_FIELD_REF_SEQUENCE",
119 [ BYTECODE_OP_LOAD_FIELD_REF_S64
] = "LOAD_FIELD_REF_S64",
120 [ BYTECODE_OP_LOAD_FIELD_REF_DOUBLE
] = "LOAD_FIELD_REF_DOUBLE",
122 /* load from immediate operand */
123 [ BYTECODE_OP_LOAD_STRING
] = "LOAD_STRING",
124 [ BYTECODE_OP_LOAD_S64
] = "LOAD_S64",
125 [ BYTECODE_OP_LOAD_DOUBLE
] = "LOAD_DOUBLE",
128 [ BYTECODE_OP_CAST_TO_S64
] = "CAST_TO_S64",
129 [ BYTECODE_OP_CAST_DOUBLE_TO_S64
] = "CAST_DOUBLE_TO_S64",
130 [ BYTECODE_OP_CAST_NOP
] = "CAST_NOP",
132 /* get context ref */
133 [ BYTECODE_OP_GET_CONTEXT_REF
] = "GET_CONTEXT_REF",
134 [ BYTECODE_OP_GET_CONTEXT_REF_STRING
] = "GET_CONTEXT_REF_STRING",
135 [ BYTECODE_OP_GET_CONTEXT_REF_S64
] = "GET_CONTEXT_REF_S64",
136 [ BYTECODE_OP_GET_CONTEXT_REF_DOUBLE
] = "GET_CONTEXT_REF_DOUBLE",
138 /* load userspace field ref */
139 [ BYTECODE_OP_LOAD_FIELD_REF_USER_STRING
] = "LOAD_FIELD_REF_USER_STRING",
140 [ BYTECODE_OP_LOAD_FIELD_REF_USER_SEQUENCE
] = "LOAD_FIELD_REF_USER_SEQUENCE",
143 * load immediate star globbing pattern (literal string)
146 [ BYTECODE_OP_LOAD_STAR_GLOB_STRING
] = "LOAD_STAR_GLOB_STRING",
148 /* globbing pattern binary operator: apply to */
149 [ BYTECODE_OP_EQ_STAR_GLOB_STRING
] = "EQ_STAR_GLOB_STRING",
150 [ BYTECODE_OP_NE_STAR_GLOB_STRING
] = "NE_STAR_GLOB_STRING",
153 * Instructions for recursive traversal through composed types.
155 [ BYTECODE_OP_GET_CONTEXT_ROOT
] = "GET_CONTEXT_ROOT",
156 [ BYTECODE_OP_GET_APP_CONTEXT_ROOT
] = "GET_APP_CONTEXT_ROOT",
157 [ BYTECODE_OP_GET_PAYLOAD_ROOT
] = "GET_PAYLOAD_ROOT",
159 [ BYTECODE_OP_GET_SYMBOL
] = "GET_SYMBOL",
160 [ BYTECODE_OP_GET_SYMBOL_FIELD
] = "GET_SYMBOL_FIELD",
161 [ BYTECODE_OP_GET_INDEX_U16
] = "GET_INDEX_U16",
162 [ BYTECODE_OP_GET_INDEX_U64
] = "GET_INDEX_U64",
164 [ BYTECODE_OP_LOAD_FIELD
] = "LOAD_FIELD",
165 [ BYTECODE_OP_LOAD_FIELD_S8
] = "LOAD_FIELD_S8",
166 [ BYTECODE_OP_LOAD_FIELD_S16
] = "LOAD_FIELD_S16",
167 [ BYTECODE_OP_LOAD_FIELD_S32
] = "LOAD_FIELD_S32",
168 [ BYTECODE_OP_LOAD_FIELD_S64
] = "LOAD_FIELD_S64",
169 [ BYTECODE_OP_LOAD_FIELD_U8
] = "LOAD_FIELD_U8",
170 [ BYTECODE_OP_LOAD_FIELD_U16
] = "LOAD_FIELD_U16",
171 [ BYTECODE_OP_LOAD_FIELD_U32
] = "LOAD_FIELD_U32",
172 [ BYTECODE_OP_LOAD_FIELD_U64
] = "LOAD_FIELD_U64",
173 [ BYTECODE_OP_LOAD_FIELD_STRING
] = "LOAD_FIELD_STRING",
174 [ BYTECODE_OP_LOAD_FIELD_SEQUENCE
] = "LOAD_FIELD_SEQUENCE",
175 [ BYTECODE_OP_LOAD_FIELD_DOUBLE
] = "LOAD_FIELD_DOUBLE",
177 [ BYTECODE_OP_UNARY_BIT_NOT
] = "UNARY_BIT_NOT",
179 [ BYTECODE_OP_RETURN_S64
] = "RETURN_S64",
182 const char *print_op(enum bytecode_op op
)
184 if (op
>= NR_BYTECODE_OPS
)
191 int apply_field_reloc(const struct lttng_event_desc
*event_desc
,
192 struct bytecode_runtime
*runtime
,
193 uint32_t runtime_len
,
194 uint32_t reloc_offset
,
195 const char *field_name
,
196 enum bytecode_op bytecode_op
)
198 const struct lttng_event_field
*fields
, *field
= NULL
;
199 unsigned int nr_fields
, i
;
201 uint32_t field_offset
= 0;
203 dbg_printf("Apply field reloc: %u %s\n", reloc_offset
, field_name
);
205 /* Lookup event by name */
208 fields
= event_desc
->fields
;
211 nr_fields
= event_desc
->nr_fields
;
212 for (i
= 0; i
< nr_fields
; i
++) {
213 if (fields
[i
].u
.ext
.nofilter
) {
216 if (!strcmp(fields
[i
].name
, field_name
)) {
220 /* compute field offset */
221 switch (fields
[i
].type
.atype
) {
224 case atype_enum_nestable
:
225 field_offset
+= sizeof(int64_t);
228 case atype_array_nestable
:
230 case atype_sequence_nestable
:
231 field_offset
+= sizeof(unsigned long);
232 field_offset
+= sizeof(void *);
235 field_offset
+= sizeof(void *);
238 field_offset
+= sizeof(double);
247 /* Check if field offset is too large for 16-bit offset */
248 if (field_offset
> FILTER_BYTECODE_MAX_LEN
- 1)
252 op
= (struct load_op
*) &runtime
->code
[reloc_offset
];
254 switch (bytecode_op
) {
255 case BYTECODE_OP_LOAD_FIELD_REF
:
257 struct field_ref
*field_ref
;
259 field_ref
= (struct field_ref
*) op
->data
;
260 switch (field
->type
.atype
) {
263 case atype_enum_nestable
:
264 op
->op
= BYTECODE_OP_LOAD_FIELD_REF_S64
;
267 case atype_array_nestable
:
269 case atype_sequence_nestable
:
270 op
->op
= BYTECODE_OP_LOAD_FIELD_REF_SEQUENCE
;
273 op
->op
= BYTECODE_OP_LOAD_FIELD_REF_STRING
;
276 op
->op
= BYTECODE_OP_LOAD_FIELD_REF_DOUBLE
;
282 field_ref
->offset
= (uint16_t) field_offset
;
292 int apply_context_reloc(struct bytecode_runtime
*runtime
,
293 uint32_t runtime_len
,
294 uint32_t reloc_offset
,
295 const char *context_name
,
296 enum bytecode_op bytecode_op
)
299 struct lttng_ctx_field
*ctx_field
;
301 struct lttng_ctx
*ctx
= *runtime
->p
.pctx
;
303 dbg_printf("Apply context reloc: %u %s\n", reloc_offset
, context_name
);
305 /* Get context index */
306 idx
= lttng_get_context_index(ctx
, context_name
);
308 if (lttng_context_is_app(context_name
)) {
311 ret
= lttng_ust_add_app_context_to_ctx_rcu(context_name
,
315 idx
= lttng_get_context_index(ctx
, context_name
);
322 /* Check if idx is too large for 16-bit offset */
323 if (idx
> FILTER_BYTECODE_MAX_LEN
- 1)
326 /* Get context return type */
327 ctx_field
= &ctx
->fields
[idx
];
328 op
= (struct load_op
*) &runtime
->code
[reloc_offset
];
330 switch (bytecode_op
) {
331 case BYTECODE_OP_GET_CONTEXT_REF
:
333 struct field_ref
*field_ref
;
335 field_ref
= (struct field_ref
*) op
->data
;
336 switch (ctx_field
->event_field
.type
.atype
) {
339 case atype_enum_nestable
:
340 op
->op
= BYTECODE_OP_GET_CONTEXT_REF_S64
;
342 /* Sequence and array supported as string */
345 case atype_array_nestable
:
347 case atype_sequence_nestable
:
348 op
->op
= BYTECODE_OP_GET_CONTEXT_REF_STRING
;
351 op
->op
= BYTECODE_OP_GET_CONTEXT_REF_DOUBLE
;
354 op
->op
= BYTECODE_OP_GET_CONTEXT_REF
;
359 /* set offset to context index within channel contexts */
360 field_ref
->offset
= (uint16_t) idx
;
370 int apply_reloc(const struct lttng_event_desc
*event_desc
,
371 struct bytecode_runtime
*runtime
,
372 uint32_t runtime_len
,
373 uint32_t reloc_offset
,
378 dbg_printf("Apply reloc: %u %s\n", reloc_offset
, name
);
380 /* Ensure that the reloc is within the code */
381 if (runtime_len
- reloc_offset
< sizeof(uint16_t))
384 op
= (struct load_op
*) &runtime
->code
[reloc_offset
];
386 case BYTECODE_OP_LOAD_FIELD_REF
:
387 return apply_field_reloc(event_desc
, runtime
, runtime_len
,
388 reloc_offset
, name
, op
->op
);
389 case BYTECODE_OP_GET_CONTEXT_REF
:
390 return apply_context_reloc(runtime
, runtime_len
,
391 reloc_offset
, name
, op
->op
);
392 case BYTECODE_OP_GET_SYMBOL
:
393 case BYTECODE_OP_GET_SYMBOL_FIELD
:
395 * Will be handled by load specialize phase or
396 * dynamically by interpreter.
400 ERR("Unknown reloc op type %u\n", op
->op
);
407 int bytecode_is_linked(struct lttng_ust_bytecode_node
*bytecode
,
408 struct cds_list_head
*bytecode_runtime_head
)
410 struct lttng_bytecode_runtime
*bc_runtime
;
412 cds_list_for_each_entry(bc_runtime
, bytecode_runtime_head
, node
) {
413 if (bc_runtime
->bc
== bytecode
)
420 * Take a bytecode with reloc table and link it to an event to create a
424 int link_bytecode(const struct lttng_event_desc
*event_desc
,
425 struct lttng_ctx
**ctx
,
426 struct lttng_ust_bytecode_node
*bytecode
,
427 struct cds_list_head
*insert_loc
)
429 int ret
, offset
, next_offset
;
430 struct bytecode_runtime
*runtime
= NULL
;
431 size_t runtime_alloc_len
;
435 /* Bytecode already linked */
436 if (bytecode_is_linked(bytecode
, insert_loc
))
439 dbg_printf("Linking...\n");
441 /* We don't need the reloc table in the runtime */
442 runtime_alloc_len
= sizeof(*runtime
) + bytecode
->bc
.reloc_offset
;
443 runtime
= zmalloc(runtime_alloc_len
);
448 runtime
->p
.bc
= bytecode
;
449 runtime
->p
.pctx
= ctx
;
450 runtime
->len
= bytecode
->bc
.reloc_offset
;
451 /* copy original bytecode */
452 memcpy(runtime
->code
, bytecode
->bc
.data
, runtime
->len
);
454 * apply relocs. Those are a uint16_t (offset in bytecode)
455 * followed by a string (field name).
457 for (offset
= bytecode
->bc
.reloc_offset
;
458 offset
< bytecode
->bc
.len
;
459 offset
= next_offset
) {
460 uint16_t reloc_offset
=
461 *(uint16_t *) &bytecode
->bc
.data
[offset
];
463 (const char *) &bytecode
->bc
.data
[offset
+ sizeof(uint16_t)];
465 ret
= apply_reloc(event_desc
, runtime
, runtime
->len
, reloc_offset
, name
);
469 next_offset
= offset
+ sizeof(uint16_t) + strlen(name
) + 1;
471 /* Validate bytecode */
472 ret
= lttng_bytecode_validate(runtime
);
476 /* Specialize bytecode */
477 ret
= lttng_bytecode_specialize(event_desc
, runtime
);
482 switch (bytecode
->type
) {
483 case LTTNG_UST_BYTECODE_NODE_TYPE_FILTER
:
484 runtime
->p
.interpreter_funcs
.filter
= lttng_bytecode_filter_interpret
;
490 runtime
->p
.link_failed
= 0;
491 cds_list_add_rcu(&runtime
->p
.node
, insert_loc
);
492 dbg_printf("Linking successful.\n");
496 switch (bytecode
->type
) {
497 case LTTNG_UST_BYTECODE_NODE_TYPE_FILTER
:
498 runtime
->p
.interpreter_funcs
.filter
= lttng_bytecode_filter_interpret_false
;
504 runtime
->p
.link_failed
= 1;
505 cds_list_add_rcu(&runtime
->p
.node
, insert_loc
);
507 dbg_printf("Linking failed.\n");
511 void lttng_bytecode_filter_sync_state(struct lttng_bytecode_runtime
*runtime
)
513 struct lttng_ust_bytecode_node
*bc
= runtime
->bc
;
515 if (!bc
->enabler
->enabled
|| runtime
->link_failed
)
516 runtime
->interpreter_funcs
.filter
= lttng_bytecode_filter_interpret_false
;
518 runtime
->interpreter_funcs
.filter
= lttng_bytecode_filter_interpret
;
522 * Given the lists of bytecode programs of an instance (trigger or event) and
523 * of a matching enabler, try to link all the enabler's bytecode programs with
526 * This function is called after we confirmed that name enabler and the
527 * instance are name matching (or glob pattern matching).
529 void lttng_enabler_link_bytecode(const struct lttng_event_desc
*event_desc
,
530 struct lttng_ctx
**ctx
,
531 struct cds_list_head
*instance_bytecode_head
,
532 struct cds_list_head
*enabler_bytecode_head
)
534 struct lttng_ust_bytecode_node
*enabler_bc
;
535 struct lttng_bytecode_runtime
*runtime
;
539 /* Go over all the bytecode programs of the enabler. */
540 cds_list_for_each_entry(enabler_bc
, enabler_bytecode_head
, node
) {
542 struct cds_list_head
*insert_loc
;
545 * Check if the current enabler bytecode program is already
546 * linked with the instance.
548 cds_list_for_each_entry(runtime
, instance_bytecode_head
, node
) {
549 if (runtime
->bc
== enabler_bc
) {
556 * Skip bytecode already linked, go to the next enabler
563 * Insert at specified priority (seqnum) in increasing
564 * order. If there already is a bytecode of the same priority,
565 * insert the new bytecode right after it.
567 cds_list_for_each_entry_reverse(runtime
,
568 instance_bytecode_head
, node
) {
569 if (runtime
->bc
->bc
.seqnum
<= enabler_bc
->bc
.seqnum
) {
571 insert_loc
= &runtime
->node
;
576 /* Add to head to list */
577 insert_loc
= instance_bytecode_head
;
579 dbg_printf("linking bytecode\n");
580 ret
= link_bytecode(event_desc
, ctx
, enabler_bc
, insert_loc
);
582 dbg_printf("[lttng filter] warning: cannot link event bytecode\n");
588 * We own the bytecode if we return success.
590 int lttng_filter_enabler_attach_bytecode(struct lttng_enabler
*enabler
,
591 struct lttng_ust_bytecode_node
*bytecode
)
593 cds_list_add(&bytecode
->node
, &enabler
->filter_bytecode_head
);
598 void free_filter_runtime(struct cds_list_head
*bytecode_runtime_head
)
600 struct bytecode_runtime
*runtime
, *tmp
;
602 cds_list_for_each_entry_safe(runtime
, tmp
, bytecode_runtime_head
,
609 void lttng_free_event_filter_runtime(struct lttng_event
*event
)
611 free_filter_runtime(&event
->filter_bytecode_runtime_head
);
614 void lttng_free_event_notifier_filter_runtime(
615 struct lttng_event_notifier
*event_notifier
)
617 free_filter_runtime(&event_notifier
->filter_bytecode_runtime_head
);
620 /* For backward compatibility. Leave those exported symbols in place. */
621 void lttng_filter_sync_state(struct lttng_bytecode_runtime
*runtime
)