--- /dev/null
+/*
+ * ltt-context.c
+ *
+ * Copyright 2011 (c) - Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ *
+ * LTTng trace/channel/event context management.
+ */
+
+#include <linux/module.h>
+#include <linux/list.h>
+#include <linux/mutex.h>
+#include <linux/slab.h>
+#include "wrapper/vmalloc.h" /* for wrapper_vmalloc_sync_all() */
+#include "ltt-events.h"
+#include "ltt-tracer.h"
+
+struct lttng_ctx_field *lttng_append_context(struct lttng_ctx **ctx_p)
+{
+ struct lttng_ctx_field *field;
+ struct lttng_ctx *ctx;
+
+ if (!*ctx_p) {
+ *ctx_p = kzalloc(sizeof(struct lttng_ctx), GFP_KERNEL);
+ if (!*ctx_p)
+ return NULL;
+ }
+ ctx = *ctx_p;
+ if (ctx->nr_fields + 1 > ctx->allocated_fields) {
+ struct lttng_ctx_field *new_fields;
+
+ ctx->allocated_fields = min_t(size_t, 1, 2 * ctx->allocated_fields);
+ new_fields = kzalloc(ctx->allocated_fields * sizeof(struct lttng_ctx_field), GFP_KERNEL);
+ if (!new_fields)
+ return NULL;
+ if (ctx->fields)
+ memcpy(new_fields, ctx->fields, ctx->nr_fields);
+ kfree(ctx->fields);
+ ctx->fields = new_fields;
+ }
+ field = &ctx->fields[ctx->nr_fields];
+ ctx->nr_fields++;
+ return field;
+}
+EXPORT_SYMBOL_GPL(lttng_append_context);
+
+void lttng_destroy_context(struct lttng_ctx *ctx)
+{
+ int i;
+
+ for (i = 0; i < ctx->nr_fields; i++)
+ ctx->fields[i].destroy(&ctx->fields[i]);
+ kfree(ctx->fields);
+ kfree(ctx);
+}
struct perf_event_attr *attr;
} perf_counter;
} u;
+ void (*destroy)(struct lttng_ctx_field *field);
};
struct lttng_ctx {
void ltt_event_put(const struct lttng_event_desc *desc);
int ltt_probes_init(void);
void ltt_probes_exit(void);
+struct lttng_ctx_field *lttng_append_context(struct lttng_ctx **ctx);
+void lttng_destroy_context(struct lttng_ctx *ctx);
#ifdef CONFIG_KPROBES
int lttng_kprobes_register(const char *name,
{
}
+static
+void lttng_destroy_perf_counter_field(struct lttng_ctx_field *field)
+{
+ struct perf_event **events = field->u.perf_counter.e;
+ int cpu;
+
+ mutex_lock(&perf_counter_mutex);
+ list_del(&field->u.perf_counter.head);
+ for_each_online_cpu(cpu)
+ perf_event_release_kernel(events[cpu]);
+ mutex_unlock(&perf_counter_mutex);
+ kfree(field->u.perf_counter.attr);
+ kfree(events);
+}
+
int lttng_add_perf_counter_to_ctx(uint32_t type,
uint64_t config,
- struct lttng_ctx *ctx)
+ struct lttng_ctx **ctx)
{
struct lttng_ctx_field *field;
struct perf_event **events;
}
}
- ctx->nr_fields++;
- if (ctx->nr_fields > ctx->allocated_fields) {
- struct lttng_ctx_field *new_fields;
-
- ctx->allocated_fields = min_t(size_t, 1, 2 * ctx->allocated_fields);
- new_fields = kzalloc(ctx->allocated_fields * sizeof(struct lttng_ctx_field), GFP_KERNEL);
- if (!new_fields) {
- ret = -ENOMEM;
- goto error;
- }
- if (ctx->fields)
- memcpy(new_fields, ctx->fields, ctx->nr_fields - 1);
- kfree(ctx->fields);
- ctx->fields = new_fields;
+ field = lttng_append_context(ctx);
+ if (!field) {
+ ret = -ENOMEM;
+ goto error;
}
- field = &ctx->fields[ctx->nr_fields - 1];
+ field->destroy = lttng_destroy_perf_counter_field;
field->name = "dummyname";//TODO: lookup_counter_name(type, config);
field->type.atype = atype_integer;
return ret;
}
-struct lttng_ctx *lttng_create_perf_counter_ctx(void)
-{
- return kzalloc(sizeof(struct lttng_ctx), GFP_KERNEL);
-}
-
-void lttng_destroy_perf_counter_ctx(struct lttng_ctx *ctx)
-{
- int i;
-
- for (i = 0; i < ctx->nr_fields; i++) {
- struct perf_event **events = ctx->fields[i].u.perf_counter.e;
- int cpu;
-
- mutex_lock(&perf_counter_mutex);
- list_del(&ctx->fields[i].u.perf_counter.head);
- for_each_online_cpu(cpu)
- perf_event_release_kernel(events[cpu]);
- mutex_unlock(&perf_counter_mutex);
- kfree(ctx->fields[i].u.perf_counter.attr);
- kfree(events);
- }
- kfree(ctx->fields);
- kfree(ctx);
-}
-
MODULE_LICENSE("GPL and additional rights");
MODULE_AUTHOR("Mathieu Desnoyers");
MODULE_DESCRIPTION("Linux Trace Toolkit Perf Support");