From: Mathieu Desnoyers Date: Sat, 21 May 2011 01:33:54 +0000 (-0400) Subject: LTTng performance monitoring counters integration (work in progress) X-Git-Tag: v2.0-pre1~131 X-Git-Url: https://git.lttng.org./?a=commitdiff_plain;h=833ad6a0c344849269df6875f40c3d7cb21529d3;p=lttng-modules.git LTTng performance monitoring counters integration (work in progress) Integrate with Perf counters. Adds context fields to the events recorded. Currently, cpu hotplug is still not supported, and the callbacks are not called from the tracing code. It gives the rough idea how to add context fields to events on either: - per trace session - per stream class - per event type Signed-off-by: Mathieu Desnoyers --- diff --git a/ltt-events.h b/ltt-events.h index 6bdacf6e..0b463897 100644 --- a/ltt-events.h +++ b/ltt-events.h @@ -18,6 +18,7 @@ struct ltt_channel; struct ltt_session; struct lib_ring_buffer_ctx; struct perf_event; +struct perf_event_attr; /* Type description */ @@ -116,17 +117,18 @@ struct lttng_event_field { struct lttng_ctx_field { const char *name; struct lttng_type type; - void *ctx_field_callback; + void *callback; union { struct { struct perf_event **e; /* per-cpu array */ - struct list_head *head; + struct list_head head; + struct perf_event_attr *attr; } perf_counter; } u; }; struct lttng_ctx { - const struct lttng_ctx_field *fields; + struct lttng_ctx_field *fields; unsigned int nr_fields; unsigned int allocated_fields; }; diff --git a/probes/Makefile b/probes/Makefile index 7129e979..554e6ffa 100644 --- a/probes/Makefile +++ b/probes/Makefile @@ -24,6 +24,10 @@ ifneq ($(CONFIG_DYNAMIC_FTRACE),) obj-m += lttng-ftrace.o endif +ifneq ($(CONFIG_PERF_EVENTS),) +obj-m += lttng-perf-counters.o +endif + endif else diff --git a/probes/lttng-perf-counters.c b/probes/lttng-perf-counters.c new file mode 100644 index 00000000..cdda4833 --- /dev/null +++ b/probes/lttng-perf-counters.c @@ -0,0 +1,160 @@ +/* + * (C) Copyright 2009-2011 - + * Mathieu Desnoyers + * + * LTTng performance monitoring counters (perf-counters) integration module. + * + * Dual LGPL v2.1/GPL v2 license. + */ + +#include +#include +#include +#include +#include "../ltt-events.h" +#include "../wrapper/ringbuffer/frontend_types.h" +#include "../wrapper/vmalloc.h" +#include "../ltt-tracer.h" + +/* + * TODO: Add CPU hotplug support. + */ + +static DEFINE_MUTEX(perf_counter_mutex); +static LIST_HEAD(perf_counter_contexts); + +static +void perf_counter_record(struct lttng_ctx_field *field, + struct lib_ring_buffer_ctx *ctx, + struct ltt_channel *chan) +{ + struct perf_event *event; + uint64_t value; + + event = field->u.perf_counter.e[ctx->cpu]; + event->pmu->read(event); + value = local64_read(&event->count); + lib_ring_buffer_align_ctx(ctx, + ltt_alignof(field->type.u.basic.integer.alignment / CHAR_BIT)); + chan->ops->event_write(ctx, &value, sizeof(value)); +} + +static +void overflow_callback(struct perf_event *event, int nmi, + struct perf_sample_data *data, + struct pt_regs *regs) +{ +} + +int lttng_add_perf_counter_to_ctx(uint32_t type, + uint64_t config, + struct lttng_ctx *ctx) +{ + struct lttng_ctx_field *field; + struct perf_event **events; + struct perf_event_attr *attr; + int ret; + int cpu; + + events = kzalloc(num_possible_cpus() * sizeof(*events), GFP_KERNEL); + if (!events) + return -ENOMEM; + + attr = kzalloc(sizeof(*field->u.perf_counter.attr), GFP_KERNEL); + if (!attr) { + ret = -ENOMEM; + goto error_attr; + } + + attr->type = type; + attr->config = config; + attr->size = sizeof(struct perf_event_attr); + attr->pinned = 1; + attr->disabled = 0; + + mutex_lock(&perf_counter_mutex); + + for_each_online_cpu(cpu) { + events[cpu] = perf_event_create_kernel_counter(attr, + cpu, NULL, overflow_callback); + if (!events[cpu]) { + ret = -EINVAL; + goto error; + } + } + + ctx->nr_fields++; + if (ctx->nr_fields > ctx->allocated_fields) { + struct lttng_ctx_field *new_fields; + + ctx->allocated_fields = min_t(size_t, 1, 2 * ctx->allocated_fields); + new_fields = kzalloc(ctx->allocated_fields * sizeof(struct lttng_ctx_field), GFP_KERNEL); + if (!new_fields) { + ret = -ENOMEM; + goto error; + } + if (ctx->fields) + memcpy(new_fields, ctx->fields, ctx->nr_fields - 1); + kfree(ctx->fields); + ctx->fields = new_fields; + } + field = &ctx->fields[ctx->nr_fields - 1]; + + field->name = "dummyname";//TODO: lookup_counter_name(type, config); + field->type.atype = atype_integer; + field->type.u.basic.integer.size = sizeof(unsigned long) * CHAR_BIT; + field->type.u.basic.integer.alignment = ltt_alignof(unsigned long) * CHAR_BIT; + field->type.u.basic.integer.signedness = 0; + field->type.u.basic.integer.reverse_byte_order = 0; + field->type.u.basic.integer.base = 10; + field->type.u.basic.integer.encoding = lttng_encode_none; + field->callback = perf_counter_record; + field->u.perf_counter.e = events; + field->u.perf_counter.attr = attr; + + list_add(&field->u.perf_counter.head, &perf_counter_contexts); + mutex_unlock(&perf_counter_mutex); + + wrapper_vmalloc_sync_all(); + return 0; + +error: + for_each_online_cpu(cpu) { + if (events[cpu]) + perf_event_release_kernel(events[cpu]); + } + mutex_unlock(&perf_counter_mutex); + kfree(attr); +error_attr: + kfree(events); + return ret; +} + +struct lttng_ctx *lttng_create_perf_counter_ctx(void) +{ + return kzalloc(sizeof(struct lttng_ctx), GFP_KERNEL); +} + +void lttng_destroy_perf_counter_ctx(struct lttng_ctx *ctx) +{ + int i; + + for (i = 0; i < ctx->nr_fields; i++) { + struct perf_event **events = ctx->fields[i].u.perf_counter.e; + int cpu; + + mutex_lock(&perf_counter_mutex); + list_del(&ctx->fields[i].u.perf_counter.head); + for_each_online_cpu(cpu) + perf_event_release_kernel(events[cpu]); + mutex_unlock(&perf_counter_mutex); + kfree(ctx->fields[i].u.perf_counter.attr); + kfree(events); + } + kfree(ctx->fields); + kfree(ctx); +} + +MODULE_LICENSE("GPL and additional rights"); +MODULE_AUTHOR("Mathieu Desnoyers"); +MODULE_DESCRIPTION("Linux Trace Toolkit Perf Support");