};
struct lttng_interpreter_output;
+struct lttng_ust_bytecode_runtime_private;
/*
- * This structure is used in the probes. More specifically, the `filter` and
- * `node` fields are explicity used in the probes. When modifying this
- * structure we must not change the layout of these two fields as it is
- * considered ABI.
+ * This structure is used in the probes. More specifically, the
+ * `interpreter_funcs` and `node` fields are explicity used in the
+ * probes. When modifying this structure we must not change the layout
+ * of these two fields as it is considered ABI.
*/
struct lttng_bytecode_runtime {
+ struct lttng_ust_bytecode_runtime_private *priv;
+
/* Associated bytecode */
- struct lttng_ust_bytecode_node *bc;
union {
uint64_t (*filter)(void *interpreter_data,
const char *interpreter_stack_data);
const char *interpreter_stack_data,
struct lttng_interpreter_output *interpreter_output);
} interpreter_funcs;
- int link_failed;
struct cds_list_head node; /* list of bytecode runtime in event */
- /*
- * Pointer to a URCU-protected pointer owned by an `struct
- * lttng_session`or `struct lttng_event_notifier_group`.
- */
- struct lttng_ctx **pctx;
};
/*
#include <lttng/urcu/pointer.h>
#include <lttng/ust-endian.h>
#include <lttng/ust-events.h>
+#include "ust-events-internal.h"
#include "lttng-bytecode.h"
#include "string-utils.h"
struct lttng_interpreter_output *output)
{
struct bytecode_runtime *bytecode = interpreter_data;
- struct lttng_ctx *ctx = lttng_ust_rcu_dereference(*bytecode->p.pctx);
+ struct lttng_ctx *ctx = lttng_ust_rcu_dereference(*bytecode->p.priv->pctx);
void *pc, *next_pc, *start_pc;
int ret = -EINVAL;
uint64_t retval = 0;
const char *name;
offset = ((struct get_symbol *) insn->data)->offset;
- name = bytecode->p.bc->bc.data + bytecode->p.bc->bc.reloc_offset + offset;
+ name = bytecode->p.priv->bc->bc.data + bytecode->p.priv->bc->bc.reloc_offset + offset;
return lttng_get_context_index(ctx, name);
}
ssize_t data_offset;
offset = ((struct get_symbol *) insn->data)->offset;
- orig_name = runtime->p.bc->bc.data + runtime->p.bc->bc.reloc_offset + offset;
+ orig_name = runtime->p.priv->bc->bc.data + runtime->p.priv->bc->bc.reloc_offset + offset;
name = zmalloc(strlen(orig_name) + strlen("$app.") + 1);
if (!name) {
ret = -ENOMEM;
nr_fields = event_desc->nr_fields;
offset = ((struct get_symbol *) insn->data)->offset;
- name = runtime->p.bc->bc.data + runtime->p.bc->bc.reloc_offset + offset;
+ name = runtime->p.priv->bc->bc.data + runtime->p.priv->bc->bc.reloc_offset + offset;
for (i = 0; i < nr_fields; i++) {
field = &event_desc->fields[i];
if (field->u.ext.nofilter) {
int ret = -EINVAL;
struct vstack _stack;
struct vstack *stack = &_stack;
- struct lttng_ctx **pctx = bytecode->p.pctx;
+ struct lttng_ctx **pctx = bytecode->p.priv->pctx;
vstack_init(stack);
const char *str, *str_limit;
size_t len_limit;
- if (sym->offset >= bytecode->p.bc->bc.len - bytecode->p.bc->bc.reloc_offset)
+ if (sym->offset >= bytecode->p.priv->bc->bc.len - bytecode->p.priv->bc->bc.reloc_offset)
return -EINVAL;
- str = bytecode->p.bc->bc.data + bytecode->p.bc->bc.reloc_offset + sym->offset;
- str_limit = bytecode->p.bc->bc.data + bytecode->p.bc->bc.len;
+ str = bytecode->p.priv->bc->bc.data + bytecode->p.priv->bc->bc.reloc_offset + sym->offset;
+ str_limit = bytecode->p.priv->bc->bc.data + bytecode->p.priv->bc->bc.len;
len_limit = str_limit - str;
if (strnlen(str, len_limit) == len_limit)
return -EINVAL;
struct load_op *op;
struct lttng_ctx_field *ctx_field;
int idx;
- struct lttng_ctx **pctx = runtime->p.pctx;
+ struct lttng_ctx **pctx = runtime->p.priv->pctx;
dbg_printf("Apply context reloc: %u %s\n", reloc_offset, context_name);
struct lttng_bytecode_runtime *bc_runtime;
cds_list_for_each_entry(bc_runtime, bytecode_runtime_head, node) {
- if (bc_runtime->bc == bytecode)
+ if (bc_runtime->priv->bc == bytecode)
return 1;
}
return 0;
{
int ret, offset, next_offset;
struct bytecode_runtime *runtime = NULL;
+ struct lttng_ust_bytecode_runtime_private *runtime_priv = NULL;
size_t runtime_alloc_len;
if (!bytecode)
ret = -ENOMEM;
goto alloc_error;
}
- runtime->p.bc = bytecode;
- runtime->p.pctx = ctx;
+ runtime_priv = zmalloc(sizeof(struct lttng_ust_bytecode_runtime_private));
+ if (!runtime_priv) {
+ free(runtime);
+ runtime = NULL;
+ ret = -ENOMEM;
+ goto alloc_error;
+ }
+ runtime->p.priv = runtime_priv;
+ runtime_priv->pub = runtime;
+ runtime_priv->bc = bytecode;
+ runtime_priv->pctx = ctx;
runtime->len = bytecode->bc.reloc_offset;
/* copy original bytecode */
memcpy(runtime->code, bytecode->bc.data, runtime->len);
abort();
}
- runtime->p.link_failed = 0;
+ runtime->p.priv->link_failed = 0;
cds_list_add_rcu(&runtime->p.node, insert_loc);
dbg_printf("Linking successful.\n");
return 0;
abort();
}
- runtime->p.link_failed = 1;
+ runtime_priv->link_failed = 1;
cds_list_add_rcu(&runtime->p.node, insert_loc);
alloc_error:
dbg_printf("Linking failed.\n");
void lttng_bytecode_filter_sync_state(struct lttng_bytecode_runtime *runtime)
{
- struct lttng_ust_bytecode_node *bc = runtime->bc;
+ struct lttng_ust_bytecode_node *bc = runtime->priv->bc;
- if (!bc->enabler->enabled || runtime->link_failed)
+ if (!bc->enabler->enabled || runtime->priv->link_failed)
runtime->interpreter_funcs.filter = lttng_bytecode_filter_interpret_false;
else
runtime->interpreter_funcs.filter = lttng_bytecode_filter_interpret;
void lttng_bytecode_capture_sync_state(struct lttng_bytecode_runtime *runtime)
{
- struct lttng_ust_bytecode_node *bc = runtime->bc;
+ struct lttng_ust_bytecode_node *bc = runtime->priv->bc;
- if (!bc->enabler->enabled || runtime->link_failed)
+ if (!bc->enabler->enabled || runtime->priv->link_failed)
runtime->interpreter_funcs.capture = lttng_bytecode_capture_interpret_false;
else
runtime->interpreter_funcs.capture = lttng_bytecode_capture_interpret;
* linked with the instance.
*/
cds_list_for_each_entry(runtime, instance_bytecode_head, node) {
- if (runtime->bc == enabler_bc) {
+ if (runtime->priv->bc == enabler_bc) {
found = 1;
break;
}
*/
cds_list_for_each_entry_reverse(runtime,
instance_bytecode_head, node) {
- if (runtime->bc->bc.seqnum <= enabler_bc->bc.seqnum) {
+ if (runtime->priv->bc->bc.seqnum <= enabler_bc->bc.seqnum) {
/* insert here */
insert_loc = &runtime->node;
goto add_within;
cds_list_for_each_entry_safe(runtime, tmp, bytecode_runtime_head,
p.node) {
free(runtime->data);
+ free(runtime->p.priv);
free(runtime);
}
}
int registered; /* has reg'd tracepoint probe */
};
+struct lttng_ust_bytecode_runtime_private {
+ struct bytecode_runtime *pub; /* Public bytecode runtime interface */
+
+ struct lttng_ust_bytecode_node *bc;
+ int link_failed;
+ /*
+ * Pointer to a URCU-protected pointer owned by an `struct
+ * lttng_session`or `struct lttng_event_notifier_group`.
+ */
+ struct lttng_ctx **pctx;
+};
+
static inline
struct lttng_enabler *lttng_event_enabler_as_enabler(
struct lttng_event_enabler *event_enabler)