2 * filter-visitor-generate-bytecode.c
4 * LTTng filter bytecode generation
6 * Copyright 2012 - Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
8 * This library is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU Lesser General Public License, version 2.1 only,
10 * as published by the Free Software Foundation.
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public License
18 * along with this library; if not, write to the Free Software Foundation,
19 * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
25 #include <common/align.h>
27 #include "filter-bytecode.h"
28 #include "filter-ir.h"
29 #include "filter-ast.h"
31 #include <common/macros.h>
34 #define max_t(type, a, b) ((type) ((a) > (b) ? (a) : (b)))
37 //#define INIT_ALLOC_SIZE PAGE_SIZE
38 #define INIT_ALLOC_SIZE 4
41 int recursive_visit_gen_bytecode(struct filter_parser_ctx
*ctx
,
44 static inline int fls(unsigned int x
)
50 if (!(x
& 0xFFFF0000U
)) {
54 if (!(x
& 0xFF000000U
)) {
58 if (!(x
& 0xF0000000U
)) {
62 if (!(x
& 0xC0000000U
)) {
66 if (!(x
& 0x80000000U
)) {
73 static inline int get_count_order(unsigned int count
)
77 order
= fls(count
) - 1;
78 if (count
& (count
- 1))
84 int bytecode_init(struct lttng_filter_bytecode_alloc
**fb
)
88 alloc_len
= sizeof(struct lttng_filter_bytecode_alloc
) + INIT_ALLOC_SIZE
;
89 *fb
= calloc(alloc_len
, 1);
93 (*fb
)->alloc_len
= alloc_len
;
99 int32_t bytecode_reserve(struct lttng_filter_bytecode_alloc
**fb
, uint32_t align
, uint32_t len
)
102 uint32_t padding
= offset_align((*fb
)->b
.len
, align
);
103 uint32_t new_len
= (*fb
)->b
.len
+ padding
+ len
;
104 uint32_t new_alloc_len
= sizeof(struct lttng_filter_bytecode_alloc
) + new_len
;
105 uint32_t old_alloc_len
= (*fb
)->alloc_len
;
107 if (new_len
> LTTNG_FILTER_MAX_LEN
)
110 if (new_alloc_len
> old_alloc_len
) {
111 struct lttng_filter_bytecode_alloc
*newptr
;
114 max_t(uint32_t, 1U << get_count_order(new_alloc_len
), old_alloc_len
<< 1);
115 newptr
= realloc(*fb
, new_alloc_len
);
119 /* We zero directly the memory from start of allocation. */
120 memset(&((char *) *fb
)[old_alloc_len
], 0, new_alloc_len
- old_alloc_len
);
121 (*fb
)->alloc_len
= new_alloc_len
;
123 (*fb
)->b
.len
+= padding
;
130 int bytecode_push(struct lttng_filter_bytecode_alloc
**fb
, const void *data
,
131 uint32_t align
, uint32_t len
)
135 offset
= bytecode_reserve(fb
, align
, len
);
138 memcpy(&(*fb
)->b
.data
[offset
], data
, len
);
143 int bytecode_push_logical(struct lttng_filter_bytecode_alloc
**fb
,
144 struct logical_op
*data
,
145 uint32_t align
, uint32_t len
,
146 uint16_t *skip_offset
)
150 offset
= bytecode_reserve(fb
, align
, len
);
153 memcpy(&(*fb
)->b
.data
[offset
], data
, len
);
155 (void *) &((struct logical_op
*) &(*fb
)->b
.data
[offset
])->skip_offset
156 - (void *) &(*fb
)->b
.data
[0];
161 int bytecode_patch(struct lttng_filter_bytecode_alloc
**fb
,
166 if (offset
>= (*fb
)->b
.len
) {
169 memcpy(&(*fb
)->b
.data
[offset
], data
, len
);
174 int visit_node_root(struct filter_parser_ctx
*ctx
, struct ir_op
*node
)
177 struct return_op insn
;
180 ret
= recursive_visit_gen_bytecode(ctx
, node
->u
.root
.child
);
184 /* Generate end of bytecode instruction */
185 insn
.op
= FILTER_OP_RETURN
;
186 return bytecode_push(&ctx
->bytecode
, &insn
, 1, sizeof(insn
));
190 int visit_node_load(struct filter_parser_ctx
*ctx
, struct ir_op
*node
)
194 switch (node
->data_type
) {
195 case IR_DATA_UNKNOWN
:
197 fprintf(stderr
, "[error] Unknown data type in %s\n",
203 struct load_op
*insn
;
204 uint32_t insn_len
= sizeof(struct load_op
)
205 + strlen(node
->u
.load
.u
.string
) + 1;
207 insn
= calloc(insn_len
, 1);
210 insn
->op
= FILTER_OP_LOAD_STRING
;
211 strcpy(insn
->data
, node
->u
.load
.u
.string
);
212 ret
= bytecode_push(&ctx
->bytecode
, insn
, 1, insn_len
);
216 case IR_DATA_NUMERIC
:
218 struct load_op
*insn
;
219 uint32_t insn_len
= sizeof(struct load_op
)
220 + sizeof(struct literal_numeric
);
222 insn
= calloc(insn_len
, 1);
225 insn
->op
= FILTER_OP_LOAD_S64
;
226 memcpy(insn
->data
, &node
->u
.load
.u
.num
, sizeof(int64_t));
227 ret
= bytecode_push(&ctx
->bytecode
, insn
, 1, insn_len
);
233 struct load_op
*insn
;
234 uint32_t insn_len
= sizeof(struct load_op
)
235 + sizeof(struct literal_double
);
237 insn
= calloc(insn_len
, 1);
240 insn
->op
= FILTER_OP_LOAD_DOUBLE
;
241 memcpy(insn
->data
, &node
->u
.load
.u
.flt
, sizeof(double));
242 ret
= bytecode_push(&ctx
->bytecode
, insn
, 1, insn_len
);
246 case IR_DATA_FIELD_REF
: /* fall-through */
247 case IR_DATA_GET_CONTEXT_REF
:
249 struct load_op
*insn
;
250 uint32_t insn_len
= sizeof(struct load_op
)
251 + sizeof(struct field_ref
);
252 struct field_ref ref_offset
;
253 uint32_t reloc_offset_u32
;
254 uint16_t reloc_offset
;
256 insn
= calloc(insn_len
, 1);
259 switch(node
->data_type
) {
260 case IR_DATA_FIELD_REF
:
261 insn
->op
= FILTER_OP_LOAD_FIELD_REF
;
263 case IR_DATA_GET_CONTEXT_REF
:
264 insn
->op
= FILTER_OP_GET_CONTEXT_REF
;
270 ref_offset
.offset
= (uint16_t) -1U;
271 memcpy(insn
->data
, &ref_offset
, sizeof(ref_offset
));
272 /* reloc_offset points to struct load_op */
273 reloc_offset_u32
= bytecode_get_len(&ctx
->bytecode
->b
);
274 if (reloc_offset_u32
> LTTNG_FILTER_MAX_LEN
- 1) {
278 reloc_offset
= (uint16_t) reloc_offset_u32
;
279 ret
= bytecode_push(&ctx
->bytecode
, insn
, 1, insn_len
);
285 ret
= bytecode_push(&ctx
->bytecode_reloc
, &reloc_offset
,
286 1, sizeof(reloc_offset
));
291 ret
= bytecode_push(&ctx
->bytecode_reloc
, node
->u
.load
.u
.ref
,
292 1, strlen(node
->u
.load
.u
.ref
) + 1);
300 int visit_node_unary(struct filter_parser_ctx
*ctx
, struct ir_op
*node
)
303 struct unary_op insn
;
306 ret
= recursive_visit_gen_bytecode(ctx
, node
->u
.unary
.child
);
310 /* Generate end of bytecode instruction */
311 switch (node
->u
.unary
.type
) {
312 case AST_UNARY_UNKNOWN
:
314 fprintf(stderr
, "[error] Unknown unary node type in %s\n",
320 case AST_UNARY_MINUS
:
321 insn
.op
= FILTER_OP_UNARY_MINUS
;
322 return bytecode_push(&ctx
->bytecode
, &insn
, 1, sizeof(insn
));
324 insn
.op
= FILTER_OP_UNARY_NOT
;
325 return bytecode_push(&ctx
->bytecode
, &insn
, 1, sizeof(insn
));
330 * Binary comparator nesting is disallowed. This allows fitting into
334 int visit_node_binary(struct filter_parser_ctx
*ctx
, struct ir_op
*node
)
337 struct binary_op insn
;
340 ret
= recursive_visit_gen_bytecode(ctx
, node
->u
.binary
.left
);
343 ret
= recursive_visit_gen_bytecode(ctx
, node
->u
.binary
.right
);
347 switch (node
->u
.binary
.type
) {
350 fprintf(stderr
, "[error] Unknown unary node type in %s\n",
356 fprintf(stderr
, "[error] Unexpected logical node type in %s\n",
361 insn
.op
= FILTER_OP_MUL
;
364 insn
.op
= FILTER_OP_DIV
;
367 insn
.op
= FILTER_OP_MOD
;
370 insn
.op
= FILTER_OP_PLUS
;
373 insn
.op
= FILTER_OP_MINUS
;
376 insn
.op
= FILTER_OP_RSHIFT
;
379 insn
.op
= FILTER_OP_LSHIFT
;
382 insn
.op
= FILTER_OP_BIN_AND
;
385 insn
.op
= FILTER_OP_BIN_OR
;
388 insn
.op
= FILTER_OP_BIN_XOR
;
392 insn
.op
= FILTER_OP_EQ
;
395 insn
.op
= FILTER_OP_NE
;
398 insn
.op
= FILTER_OP_GT
;
401 insn
.op
= FILTER_OP_LT
;
404 insn
.op
= FILTER_OP_GE
;
407 insn
.op
= FILTER_OP_LE
;
410 return bytecode_push(&ctx
->bytecode
, &insn
, 1, sizeof(insn
));
414 * A logical op always return a s64 (1 or 0).
417 int visit_node_logical(struct filter_parser_ctx
*ctx
, struct ir_op
*node
)
420 struct logical_op insn
;
421 uint16_t skip_offset_loc
;
424 /* Visit left child */
425 ret
= recursive_visit_gen_bytecode(ctx
, node
->u
.binary
.left
);
428 /* Cast to s64 if float or field ref */
429 if ((node
->u
.binary
.left
->data_type
== IR_DATA_FIELD_REF
430 || node
->u
.binary
.left
->data_type
== IR_DATA_GET_CONTEXT_REF
)
431 || node
->u
.binary
.left
->data_type
== IR_DATA_FLOAT
) {
432 struct cast_op cast_insn
;
434 if (node
->u
.binary
.left
->data_type
== IR_DATA_FIELD_REF
435 || node
->u
.binary
.left
->data_type
== IR_DATA_GET_CONTEXT_REF
) {
436 cast_insn
.op
= FILTER_OP_CAST_TO_S64
;
438 cast_insn
.op
= FILTER_OP_CAST_DOUBLE_TO_S64
;
440 ret
= bytecode_push(&ctx
->bytecode
, &cast_insn
,
441 1, sizeof(cast_insn
));
445 switch (node
->u
.logical
.type
) {
447 fprintf(stderr
, "[error] Unknown node type in %s\n",
452 insn
.op
= FILTER_OP_AND
;
455 insn
.op
= FILTER_OP_OR
;
458 insn
.skip_offset
= (uint16_t) -1UL; /* Temporary */
459 ret
= bytecode_push_logical(&ctx
->bytecode
, &insn
, 1, sizeof(insn
),
463 /* Visit right child */
464 ret
= recursive_visit_gen_bytecode(ctx
, node
->u
.binary
.right
);
467 /* Cast to s64 if float or field ref */
468 if ((node
->u
.binary
.right
->data_type
== IR_DATA_FIELD_REF
469 || node
->u
.binary
.right
->data_type
== IR_DATA_GET_CONTEXT_REF
)
470 || node
->u
.binary
.right
->data_type
== IR_DATA_FLOAT
) {
471 struct cast_op cast_insn
;
473 if (node
->u
.binary
.right
->data_type
== IR_DATA_FIELD_REF
474 || node
->u
.binary
.right
->data_type
== IR_DATA_GET_CONTEXT_REF
) {
475 cast_insn
.op
= FILTER_OP_CAST_TO_S64
;
477 cast_insn
.op
= FILTER_OP_CAST_DOUBLE_TO_S64
;
479 ret
= bytecode_push(&ctx
->bytecode
, &cast_insn
,
480 1, sizeof(cast_insn
));
484 /* We now know where the logical op can skip. */
485 target_loc
= (uint16_t) bytecode_get_len(&ctx
->bytecode
->b
);
486 ret
= bytecode_patch(&ctx
->bytecode
,
487 &target_loc
, /* Offset to jump to */
488 skip_offset_loc
, /* Where to patch */
494 * Postorder traversal of the tree. We need the children result before
495 * we can evaluate the parent.
498 int recursive_visit_gen_bytecode(struct filter_parser_ctx
*ctx
,
504 fprintf(stderr
, "[error] Unknown node type in %s\n",
509 return visit_node_root(ctx
, node
);
511 return visit_node_load(ctx
, node
);
513 return visit_node_unary(ctx
, node
);
515 return visit_node_binary(ctx
, node
);
517 return visit_node_logical(ctx
, node
);
522 void filter_bytecode_free(struct filter_parser_ctx
*ctx
)
530 ctx
->bytecode
= NULL
;
533 if (ctx
->bytecode_reloc
) {
534 free(ctx
->bytecode_reloc
);
535 ctx
->bytecode_reloc
= NULL
;
540 int filter_visitor_bytecode_generate(struct filter_parser_ctx
*ctx
)
544 ret
= bytecode_init(&ctx
->bytecode
);
547 ret
= bytecode_init(&ctx
->bytecode_reloc
);
550 ret
= recursive_visit_gen_bytecode(ctx
, ctx
->ir_root
);
554 /* Finally, append symbol table to bytecode */
555 ctx
->bytecode
->b
.reloc_table_offset
= bytecode_get_len(&ctx
->bytecode
->b
);
556 return bytecode_push(&ctx
->bytecode
, ctx
->bytecode_reloc
->b
.data
,
557 1, bytecode_get_len(&ctx
->bytecode_reloc
->b
));
560 filter_bytecode_free(ctx
);