Commit | Line | Data |
---|---|---|
833ad6a0 MD |
1 | /* |
2 | * (C) Copyright 2009-2011 - | |
3 | * Mathieu Desnoyers <mathieu.desnoyers@efficios.com> | |
4 | * | |
5 | * LTTng performance monitoring counters (perf-counters) integration module. | |
6 | * | |
7 | * Dual LGPL v2.1/GPL v2 license. | |
8 | */ | |
9 | ||
10 | #include <linux/module.h> | |
11 | #include <linux/slab.h> | |
12 | #include <linux/perf_event.h> | |
13 | #include <linux/list.h> | |
14 | #include "../ltt-events.h" | |
15 | #include "../wrapper/ringbuffer/frontend_types.h" | |
16 | #include "../wrapper/vmalloc.h" | |
17 | #include "../ltt-tracer.h" | |
18 | ||
19 | /* | |
20 | * TODO: Add CPU hotplug support. | |
21 | */ | |
22 | ||
23 | static DEFINE_MUTEX(perf_counter_mutex); | |
24 | static LIST_HEAD(perf_counter_contexts); | |
25 | ||
26 | static | |
27 | void perf_counter_record(struct lttng_ctx_field *field, | |
28 | struct lib_ring_buffer_ctx *ctx, | |
29 | struct ltt_channel *chan) | |
30 | { | |
31 | struct perf_event *event; | |
32 | uint64_t value; | |
33 | ||
34 | event = field->u.perf_counter.e[ctx->cpu]; | |
35 | event->pmu->read(event); | |
36 | value = local64_read(&event->count); | |
37 | lib_ring_buffer_align_ctx(ctx, | |
38 | ltt_alignof(field->type.u.basic.integer.alignment / CHAR_BIT)); | |
39 | chan->ops->event_write(ctx, &value, sizeof(value)); | |
40 | } | |
41 | ||
42 | static | |
43 | void overflow_callback(struct perf_event *event, int nmi, | |
44 | struct perf_sample_data *data, | |
45 | struct pt_regs *regs) | |
46 | { | |
47 | } | |
48 | ||
49 | int lttng_add_perf_counter_to_ctx(uint32_t type, | |
50 | uint64_t config, | |
51 | struct lttng_ctx *ctx) | |
52 | { | |
53 | struct lttng_ctx_field *field; | |
54 | struct perf_event **events; | |
55 | struct perf_event_attr *attr; | |
56 | int ret; | |
57 | int cpu; | |
58 | ||
59 | events = kzalloc(num_possible_cpus() * sizeof(*events), GFP_KERNEL); | |
60 | if (!events) | |
61 | return -ENOMEM; | |
62 | ||
63 | attr = kzalloc(sizeof(*field->u.perf_counter.attr), GFP_KERNEL); | |
64 | if (!attr) { | |
65 | ret = -ENOMEM; | |
66 | goto error_attr; | |
67 | } | |
68 | ||
69 | attr->type = type; | |
70 | attr->config = config; | |
71 | attr->size = sizeof(struct perf_event_attr); | |
72 | attr->pinned = 1; | |
73 | attr->disabled = 0; | |
74 | ||
75 | mutex_lock(&perf_counter_mutex); | |
76 | ||
77 | for_each_online_cpu(cpu) { | |
78 | events[cpu] = perf_event_create_kernel_counter(attr, | |
79 | cpu, NULL, overflow_callback); | |
80 | if (!events[cpu]) { | |
81 | ret = -EINVAL; | |
82 | goto error; | |
83 | } | |
84 | } | |
85 | ||
86 | ctx->nr_fields++; | |
87 | if (ctx->nr_fields > ctx->allocated_fields) { | |
88 | struct lttng_ctx_field *new_fields; | |
89 | ||
90 | ctx->allocated_fields = min_t(size_t, 1, 2 * ctx->allocated_fields); | |
91 | new_fields = kzalloc(ctx->allocated_fields * sizeof(struct lttng_ctx_field), GFP_KERNEL); | |
92 | if (!new_fields) { | |
93 | ret = -ENOMEM; | |
94 | goto error; | |
95 | } | |
96 | if (ctx->fields) | |
97 | memcpy(new_fields, ctx->fields, ctx->nr_fields - 1); | |
98 | kfree(ctx->fields); | |
99 | ctx->fields = new_fields; | |
100 | } | |
101 | field = &ctx->fields[ctx->nr_fields - 1]; | |
102 | ||
103 | field->name = "dummyname";//TODO: lookup_counter_name(type, config); | |
104 | field->type.atype = atype_integer; | |
105 | field->type.u.basic.integer.size = sizeof(unsigned long) * CHAR_BIT; | |
106 | field->type.u.basic.integer.alignment = ltt_alignof(unsigned long) * CHAR_BIT; | |
107 | field->type.u.basic.integer.signedness = 0; | |
108 | field->type.u.basic.integer.reverse_byte_order = 0; | |
109 | field->type.u.basic.integer.base = 10; | |
110 | field->type.u.basic.integer.encoding = lttng_encode_none; | |
111 | field->callback = perf_counter_record; | |
112 | field->u.perf_counter.e = events; | |
113 | field->u.perf_counter.attr = attr; | |
114 | ||
115 | list_add(&field->u.perf_counter.head, &perf_counter_contexts); | |
116 | mutex_unlock(&perf_counter_mutex); | |
117 | ||
118 | wrapper_vmalloc_sync_all(); | |
119 | return 0; | |
120 | ||
121 | error: | |
122 | for_each_online_cpu(cpu) { | |
123 | if (events[cpu]) | |
124 | perf_event_release_kernel(events[cpu]); | |
125 | } | |
126 | mutex_unlock(&perf_counter_mutex); | |
127 | kfree(attr); | |
128 | error_attr: | |
129 | kfree(events); | |
130 | return ret; | |
131 | } | |
132 | ||
133 | struct lttng_ctx *lttng_create_perf_counter_ctx(void) | |
134 | { | |
135 | return kzalloc(sizeof(struct lttng_ctx), GFP_KERNEL); | |
136 | } | |
137 | ||
138 | void lttng_destroy_perf_counter_ctx(struct lttng_ctx *ctx) | |
139 | { | |
140 | int i; | |
141 | ||
142 | for (i = 0; i < ctx->nr_fields; i++) { | |
143 | struct perf_event **events = ctx->fields[i].u.perf_counter.e; | |
144 | int cpu; | |
145 | ||
146 | mutex_lock(&perf_counter_mutex); | |
147 | list_del(&ctx->fields[i].u.perf_counter.head); | |
148 | for_each_online_cpu(cpu) | |
149 | perf_event_release_kernel(events[cpu]); | |
150 | mutex_unlock(&perf_counter_mutex); | |
151 | kfree(ctx->fields[i].u.perf_counter.attr); | |
152 | kfree(events); | |
153 | } | |
154 | kfree(ctx->fields); | |
155 | kfree(ctx); | |
156 | } | |
157 | ||
158 | MODULE_LICENSE("GPL and additional rights"); | |
159 | MODULE_AUTHOR("Mathieu Desnoyers"); | |
160 | MODULE_DESCRIPTION("Linux Trace Toolkit Perf Support"); |