Commit | Line | Data |
---|---|---|
40652b65 | 1 | #include <lttng.h> |
299338c8 | 2 | #include <lttng-types.h> |
d0dd2ecb | 3 | #include <linux/debugfs.h> |
e763dbf5 MD |
4 | #include <linux/ringbuffer/frontend_types.h> |
5 | #include "../ltt-events.h" | |
6db3d13b | 6 | #include "../ltt-tracer-core.h" |
40652b65 | 7 | |
299338c8 MD |
8 | struct lttng_event_field { |
9 | const char *name; | |
10 | const struct lttng_type type; | |
11 | }; | |
12 | ||
13 | struct lttng_event_desc { | |
14 | const struct lttng_event_field *fields; | |
d0dd2ecb MD |
15 | const char *name; |
16 | unsigned int nr_fields; | |
299338c8 | 17 | }; |
40652b65 MD |
18 | |
19 | /* | |
6db3d13b | 20 | * Macro declarations used for all stages. |
40652b65 MD |
21 | */ |
22 | ||
23 | /* | |
24 | * DECLARE_EVENT_CLASS can be used to add a generic function | |
25 | * handlers for events. That is, if all events have the same | |
26 | * parameters and just have distinct trace points. | |
27 | * Each tracepoint can be defined with DEFINE_EVENT and that | |
28 | * will map the DECLARE_EVENT_CLASS to the tracepoint. | |
29 | * | |
30 | * TRACE_EVENT is a one to one mapping between tracepoint and template. | |
31 | */ | |
6db3d13b | 32 | |
40652b65 MD |
33 | #undef TRACE_EVENT |
34 | #define TRACE_EVENT(name, proto, args, tstruct, assign, print) \ | |
35 | DECLARE_EVENT_CLASS(name, \ | |
36 | PARAMS(proto), \ | |
37 | PARAMS(args), \ | |
38 | PARAMS(tstruct), \ | |
39 | PARAMS(assign), \ | |
299338c8 MD |
40 | PARAMS(print)) \ |
41 | DEFINE_EVENT(name, name, PARAMS(proto), PARAMS(args)) | |
40652b65 | 42 | |
6db3d13b MD |
43 | #undef DEFINE_EVENT_PRINT |
44 | #define DEFINE_EVENT_PRINT(template, name, proto, args, print) \ | |
45 | DEFINE_EVENT(template, name, PARAMS(proto), PARAMS(args)) | |
46 | ||
47 | /* Callbacks are meaningless to LTTng. */ | |
48 | #undef TRACE_EVENT_FN | |
49 | #define TRACE_EVENT_FN(name, proto, args, tstruct, \ | |
50 | assign, print, reg, unreg) \ | |
51 | TRACE_EVENT(name, PARAMS(proto), PARAMS(args), \ | |
52 | PARAMS(tstruct), PARAMS(assign), PARAMS(print)) \ | |
53 | ||
54 | /* | |
55 | * Stage 1 of the trace events. | |
56 | * | |
57 | * Create event field type metadata section. | |
58 | * Each event produce an array of fields. | |
59 | */ | |
60 | ||
61 | #include "lttng-events-reset.h" /* Reset all macros within TRACE_EVENT */ | |
62 | ||
1d12cebd MD |
63 | /* Named field types must be defined in lttng-types.h */ |
64 | ||
40652b65 | 65 | #undef __field |
299338c8 MD |
66 | #define __field(_type, _item) \ |
67 | { .name = #_item, .type = { .atype = atype_integer, .name = #_type} }, | |
40652b65 MD |
68 | |
69 | #undef __field_ext | |
6db3d13b | 70 | #define __field_ext(_type, _item, _filter_type) __field(_type, _item) |
40652b65 MD |
71 | |
72 | #undef __array | |
299338c8 MD |
73 | #define __array(_type, _item, _length) \ |
74 | { \ | |
75 | .name = #_item, \ | |
76 | .type = { \ | |
77 | .atype = atype_array, \ | |
78 | .name = NULL, \ | |
79 | .u.array.elem_type = #_type, \ | |
80 | .u.array.length = _length, \ | |
81 | }, \ | |
82 | }, | |
40652b65 MD |
83 | |
84 | #undef __dynamic_array | |
299338c8 MD |
85 | #define __dynamic_array(_type, _item, _length) \ |
86 | { \ | |
87 | .name = #_item, \ | |
88 | .type = { \ | |
89 | .atype = atype_sequence, \ | |
90 | .name = NULL, \ | |
91 | .u.sequence.elem_type = #_type, \ | |
92 | .u.sequence.length_type = "u32", \ | |
93 | }, \ | |
94 | }, | |
40652b65 MD |
95 | |
96 | #undef __string | |
1d12cebd | 97 | #define __string(_item, _src) \ |
299338c8 | 98 | { \ |
0d1d4002 | 99 | .name = #_item, \ |
299338c8 MD |
100 | .type = { \ |
101 | .atype = atype_string, \ | |
102 | .name = NULL, \ | |
103 | .u.string.encoding = lttng_encode_UTF8, \ | |
104 | }, \ | |
105 | }, | |
1d12cebd | 106 | |
40652b65 | 107 | #undef TP_STRUCT__entry |
1d12cebd MD |
108 | #define TP_STRUCT__entry(args...) args /* Only one used in this phase */ |
109 | ||
40652b65 | 110 | #undef DECLARE_EVENT_CLASS |
0d1d4002 MD |
111 | #define DECLARE_EVENT_CLASS(_name, _proto, _args, _tstruct, _assign, _print) \ |
112 | static const struct lttng_event_field __event_fields___##_name[] = { \ | |
113 | _tstruct \ | |
299338c8 MD |
114 | }; |
115 | ||
299338c8 MD |
116 | #include TRACE_INCLUDE(TRACE_INCLUDE_FILE) |
117 | ||
118 | /* | |
119 | * Stage 2 of the trace events. | |
120 | * | |
121 | * Create an array of events. | |
122 | */ | |
123 | ||
299338c8 MD |
124 | /* Named field types must be defined in lttng-types.h */ |
125 | ||
6db3d13b | 126 | #include "lttng-events-reset.h" /* Reset all macros within TRACE_EVENT */ |
299338c8 | 127 | |
d32a57a2 MD |
128 | #undef DEFINE_EVENT |
129 | #define DEFINE_EVENT(_template, _name, _proto, _args) \ | |
130 | { \ | |
131 | .fields = __event_fields___##_template, \ | |
132 | .name = #_name, \ | |
133 | .nr_fields = ARRAY_SIZE(__event_fields___##_template), \ | |
6db3d13b | 134 | }, |
40652b65 | 135 | |
d0dd2ecb MD |
136 | #define TP_ID1(_token, _system) _token##_system |
137 | #define TP_ID(_token, _system) TP_ID1(_token, _system) | |
40652b65 | 138 | |
d0dd2ecb | 139 | static const struct lttng_event_desc TP_ID(__event_desc___, TRACE_SYSTEM)[] = { |
40652b65 | 140 | #include TRACE_INCLUDE(TRACE_INCLUDE_FILE) |
299338c8 MD |
141 | }; |
142 | ||
d0dd2ecb MD |
143 | #undef TP_ID1 |
144 | #undef TP_ID | |
145 | ||
146 | /* | |
147 | * Stage 3 of the trace events. | |
148 | * | |
149 | * Create seq file metadata output. | |
150 | */ | |
151 | ||
d0dd2ecb MD |
152 | #define TP_ID1(_token, _system) _token##_system |
153 | #define TP_ID(_token, _system) TP_ID1(_token, _system) | |
d0dd2ecb MD |
154 | |
155 | static void *TP_ID(__lttng_seq_start__, TRACE_SYSTEM)(struct seq_file *m, | |
156 | loff_t *pos) | |
157 | { | |
6db3d13b MD |
158 | const struct lttng_event_desc *desc = |
159 | &TP_ID(__event_desc___, TRACE_SYSTEM)[*pos]; | |
d0dd2ecb | 160 | |
6db3d13b MD |
161 | if (desc > &TP_ID(__event_desc___, TRACE_SYSTEM) |
162 | [ARRAY_SIZE(TP_ID(__event_desc___, TRACE_SYSTEM)) - 1]) | |
d0dd2ecb MD |
163 | return NULL; |
164 | return (void *) desc; | |
165 | } | |
166 | ||
167 | static void *TP_ID(__lttng_seq_next__, TRACE_SYSTEM)(struct seq_file *m, | |
168 | void *p, loff_t *ppos) | |
169 | { | |
6db3d13b MD |
170 | const struct lttng_event_desc *desc = |
171 | &TP_ID(__event_desc___, TRACE_SYSTEM)[++(*ppos)]; | |
d0dd2ecb | 172 | |
6db3d13b MD |
173 | if (desc > &TP_ID(__event_desc___, TRACE_SYSTEM) |
174 | [ARRAY_SIZE(TP_ID(__event_desc___, TRACE_SYSTEM)) - 1]) | |
d0dd2ecb MD |
175 | return NULL; |
176 | return (void *) desc; | |
177 | } | |
178 | ||
179 | static void TP_ID(__lttng_seq_stop__, TRACE_SYSTEM)(struct seq_file *m, | |
180 | void *p) | |
181 | { | |
182 | } | |
183 | ||
184 | static int TP_ID(__lttng_seq_show__, TRACE_SYSTEM)(struct seq_file *m, | |
185 | void *p) | |
186 | { | |
187 | const struct lttng_event_desc *desc = p; | |
188 | int i; | |
189 | ||
190 | seq_printf(m, "event {\n" | |
191 | "\tname = %s;\n" | |
192 | "\tid = UNKNOWN;\n" | |
193 | "\tstream = UNKNOWN;\n" | |
194 | "\tfields = {\n", | |
195 | desc->name); | |
196 | for (i = 0; i < desc->nr_fields; i++) { | |
197 | if (desc->fields[i].type.name) /* Named type */ | |
198 | seq_printf(m, "\t\t%s", | |
199 | desc->fields[i].type.name); | |
200 | else /* Nameless type */ | |
201 | lttng_print_event_type(m, 2, &desc->fields[i].type); | |
202 | seq_printf(m, " %s;\n", desc->fields[i].name); | |
203 | } | |
204 | seq_printf(m, "\t};\n"); | |
205 | seq_printf(m, "};\n"); | |
206 | return 0; | |
207 | } | |
208 | ||
209 | static const | |
210 | struct seq_operations TP_ID(__lttng_types_seq_ops__, TRACE_SYSTEM) = { | |
211 | .start = TP_ID(__lttng_seq_start__, TRACE_SYSTEM), | |
212 | .next = TP_ID(__lttng_seq_next__, TRACE_SYSTEM), | |
213 | .stop = TP_ID(__lttng_seq_stop__, TRACE_SYSTEM), | |
214 | .show = TP_ID(__lttng_seq_show__, TRACE_SYSTEM), | |
215 | }; | |
216 | ||
217 | static int | |
218 | TP_ID(__lttng_types_open__, TRACE_SYSTEM)(struct inode *inode, struct file *file) | |
219 | { | |
220 | return seq_open(file, &TP_ID(__lttng_types_seq_ops__, TRACE_SYSTEM)); | |
221 | } | |
222 | ||
6db3d13b MD |
223 | static const |
224 | struct file_operations TP_ID(__lttng_types_fops__, TRACE_SYSTEM) = { | |
d0dd2ecb MD |
225 | .open = TP_ID(__lttng_types_open__, TRACE_SYSTEM), |
226 | .read = seq_read, | |
227 | .llseek = seq_lseek, | |
228 | .release = seq_release_private, | |
229 | }; | |
230 | ||
231 | static struct dentry *TP_ID(__lttng_types_dentry__, TRACE_SYSTEM); | |
232 | ||
233 | static int TP_ID(__lttng_types_init__, TRACE_SYSTEM)(void) | |
234 | { | |
235 | int ret = 0; | |
236 | ||
237 | TP_ID(__lttng_types_dentry__, TRACE_SYSTEM) = | |
6db3d13b MD |
238 | debugfs_create_file("lttng-events-" __stringify(TRACE_SYSTEM), |
239 | S_IWUSR, NULL, NULL, | |
240 | &TP_ID(__lttng_types_fops__, TRACE_SYSTEM)); | |
d0dd2ecb MD |
241 | if (IS_ERR(TP_ID(__lttng_types_dentry__, TRACE_SYSTEM)) |
242 | || !TP_ID(__lttng_types_dentry__, TRACE_SYSTEM)) { | |
243 | printk(KERN_ERR "Error creating LTTng type export file\n"); | |
244 | ret = -ENOMEM; | |
245 | goto error; | |
246 | } | |
247 | error: | |
248 | return ret; | |
249 | } | |
250 | ||
d0dd2ecb MD |
251 | static void TP_ID(__lttng_types_exit__, TRACE_SYSTEM)(void) |
252 | { | |
253 | debugfs_remove(TP_ID(__lttng_types_dentry__, TRACE_SYSTEM)); | |
254 | } | |
255 | ||
d0dd2ecb MD |
256 | #undef TP_ID1 |
257 | #undef TP_ID | |
258 | ||
40652b65 | 259 | /* |
6db3d13b | 260 | * Stage 4 of the trace events. |
40652b65 MD |
261 | * |
262 | * Create static inline function that calculates event size. | |
263 | */ | |
264 | ||
6db3d13b | 265 | #include "lttng-events-reset.h" /* Reset all macros within TRACE_EVENT */ |
40652b65 | 266 | |
6db3d13b MD |
267 | /* Named field types must be defined in lttng-types.h */ |
268 | ||
269 | #undef __field | |
85a80742 MD |
270 | #define __field(_type, _item) \ |
271 | __event_len += lib_ring_buffer_align(__event_len, __alignof__(_type)); \ | |
0d1d4002 | 272 | __event_len += sizeof(_type); |
6db3d13b MD |
273 | |
274 | #undef __field_ext | |
275 | #define __field_ext(_type, _item, _filter_type) __field(_type, _item) | |
276 | ||
277 | #undef __array | |
85a80742 MD |
278 | #define __array(_type, _item, _length) \ |
279 | __event_len += lib_ring_buffer_align(__event_len, __alignof__(_type)); \ | |
0d1d4002 | 280 | __event_len += sizeof(_type) * (_length); |
6db3d13b MD |
281 | |
282 | #undef __dynamic_array | |
85a80742 MD |
283 | #define __dynamic_array(_type, _item, _length) \ |
284 | __event_len += lib_ring_buffer_align(__event_len, __alignof__(u32)); \ | |
285 | __event_len += sizeof(u32); \ | |
286 | __event_len += lib_ring_buffer_align(__event_len, __alignof__(_type)); \ | |
0d1d4002 | 287 | __event_len += sizeof(_type) * (_length); |
6db3d13b MD |
288 | |
289 | #undef __string | |
85a80742 | 290 | #define __string(_item, _src) \ |
0d1d4002 MD |
291 | __event_len += __dynamic_len[__dynamic_len_idx++] = strlen(_src) + 1; |
292 | ||
293 | #undef TP_PROTO | |
294 | #define TP_PROTO(args...) args | |
6db3d13b MD |
295 | |
296 | #undef TP_STRUCT__entry | |
0d1d4002 | 297 | #define TP_STRUCT__entry(args...) args |
6db3d13b MD |
298 | |
299 | #undef DECLARE_EVENT_CLASS | |
0d1d4002 MD |
300 | #define DECLARE_EVENT_CLASS(_name, _proto, _args, _tstruct, _assign, _print) \ |
301 | static inline size_t __event_get_size__##_name(size_t *__dynamic_len, _proto) \ | |
302 | { \ | |
303 | size_t __event_len = 0; \ | |
304 | unsigned int __dynamic_len_idx = 0; \ | |
305 | \ | |
306 | if (0) \ | |
307 | (void) __dynamic_len_idx; /* don't warn if unused */ \ | |
308 | _tstruct \ | |
309 | return __event_len; \ | |
6db3d13b | 310 | } |
40652b65 MD |
311 | |
312 | #include TRACE_INCLUDE(TRACE_INCLUDE_FILE) | |
313 | ||
6db3d13b | 314 | |
6db3d13b | 315 | |
40652b65 | 316 | /* |
e763dbf5 MD |
317 | * Stage 5 of the trace events. |
318 | * | |
319 | * Create static inline function that calculates event payload alignment. | |
320 | */ | |
321 | ||
322 | #include "lttng-events-reset.h" /* Reset all macros within TRACE_EVENT */ | |
323 | ||
324 | /* Named field types must be defined in lttng-types.h */ | |
325 | ||
326 | #undef __field | |
327 | #define __field(_type, _item) \ | |
67e5e60c | 328 | __event_align = max_t(size_t, __event_align, __alignof__(_type)); |
e763dbf5 MD |
329 | |
330 | #undef __field_ext | |
331 | #define __field_ext(_type, _item, _filter_type) __field(_type, _item) | |
332 | ||
333 | #undef __array | |
334 | #define __array(_type, _item, _length) \ | |
67e5e60c | 335 | __event_align = max_t(size_t, __event_align, __alignof__(_type)); |
e763dbf5 MD |
336 | |
337 | #undef __dynamic_array | |
338 | #define __dynamic_array(_type, _item, _length) \ | |
67e5e60c MD |
339 | __event_align = max_t(size_t, __event_align, __alignof__(u32)); \ |
340 | __event_align = max_t(size_t, __event_align, __alignof__(_type)); | |
e763dbf5 MD |
341 | |
342 | #undef __string | |
343 | #define __string(_item, _src) | |
344 | ||
345 | #undef TP_PROTO | |
346 | #define TP_PROTO(args...) args | |
347 | ||
348 | #undef TP_STRUCT__entry | |
349 | #define TP_STRUCT__entry(args...) args | |
350 | ||
351 | #undef DECLARE_EVENT_CLASS | |
352 | #define DECLARE_EVENT_CLASS(_name, _proto, _args, _tstruct, _assign, _print) \ | |
353 | static inline size_t __event_get_align__##_name(_proto) \ | |
354 | { \ | |
355 | size_t __event_align = 1; \ | |
356 | _tstruct \ | |
357 | return __event_align; \ | |
358 | } | |
359 | ||
360 | #include TRACE_INCLUDE(TRACE_INCLUDE_FILE) | |
361 | ||
362 | ||
e763dbf5 MD |
363 | /* |
364 | * Stage 6 of the trace events. | |
40652b65 | 365 | * |
3c4ffab9 MD |
366 | * Create structure declaration that allows the "assign" macros to access the |
367 | * field types. | |
368 | */ | |
369 | ||
370 | #include "lttng-events-reset.h" /* Reset all macros within TRACE_EVENT */ | |
371 | ||
372 | /* Named field types must be defined in lttng-types.h */ | |
373 | ||
374 | #undef __field | |
375 | #define __field(_type, _item) _type _item; | |
376 | ||
377 | #undef __field_ext | |
378 | #define __field_ext(_type, _item, _filter_type) __field(_type, _item) | |
379 | ||
380 | #undef __array | |
381 | #define __array(_type, _item, _length) _type _item; | |
382 | ||
383 | #undef __dynamic_array | |
384 | #define __dynamic_array(_type, _item, _length) _type _item; | |
385 | ||
386 | #undef __string | |
387 | #define __string(_item, _src) char _item; | |
388 | ||
389 | #undef TP_STRUCT__entry | |
390 | #define TP_STRUCT__entry(args...) args | |
391 | ||
392 | #undef DECLARE_EVENT_CLASS | |
393 | #define DECLARE_EVENT_CLASS(_name, _proto, _args, _tstruct, _assign, _print) \ | |
394 | struct __event_typemap__##_name { \ | |
395 | _tstruct \ | |
396 | }; | |
397 | ||
398 | #include TRACE_INCLUDE(TRACE_INCLUDE_FILE) | |
399 | ||
400 | ||
401 | /* | |
402 | * Stage 7 of the trace events. | |
403 | * | |
40652b65 MD |
404 | * Create the probe function : call even size calculation and write event data |
405 | * into the buffer. | |
e763dbf5 | 406 | * |
67e5e60c MD |
407 | * We use both the field and assignment macros to write the fields in the order |
408 | * defined in the field declaration. The field declarations control the | |
409 | * execution order, jumping to the appropriate assignment block. | |
40652b65 MD |
410 | */ |
411 | ||
e763dbf5 MD |
412 | #include "lttng-events-reset.h" /* Reset all macros within TRACE_EVENT */ |
413 | ||
414 | #undef __field | |
415 | #define __field(_type, _item) \ | |
e763dbf5 MD |
416 | goto __assign_##_item; \ |
417 | __end_field_##_item: | |
40652b65 | 418 | |
e763dbf5 MD |
419 | #undef __field_ext |
420 | #define __field_ext(_type, _item, _filter_type) __field(_type, _item) | |
40652b65 | 421 | |
e763dbf5 MD |
422 | #undef __array |
423 | #define __array(_type, _item, _length) \ | |
e763dbf5 MD |
424 | goto __assign_##_item; \ |
425 | __end_field_##_item: | |
40652b65 | 426 | |
e763dbf5 MD |
427 | #undef __dynamic_array |
428 | #define __dynamic_array(_type, _item, _length) \ | |
e763dbf5 MD |
429 | goto __assign_##_item##_1; \ |
430 | __end_field_##_item##_1: \ | |
e763dbf5 MD |
431 | goto __assign_##_item##_2; \ |
432 | __end_field_##_item##_2: | |
40652b65 | 433 | |
e763dbf5 MD |
434 | #undef __string |
435 | #define __string(_item, _src) \ | |
436 | goto __assign_##_item; \ | |
437 | __end_field_##_item: | |
438 | ||
439 | /* | |
440 | * Macros mapping tp_assign() to "=", tp_memcpy() to memcpy() and tp_strcpy() to | |
441 | * strcpy(). | |
442 | */ | |
443 | #undef tp_assign | |
444 | #define tp_assign(dest, src) \ | |
445 | __assign_##dest: \ | |
446 | { \ | |
3c4ffab9 MD |
447 | __typeof__(__typemap.dest) __tmp = (src); \ |
448 | lib_ring_buffer_align_ctx(&ctx, __alignof__(__tmp)); \ | |
449 | __chan->ops->event_write(&ctx, &__tmp, sizeof(__tmp)); \ | |
e763dbf5 MD |
450 | } \ |
451 | goto __end_field_##dest; | |
452 | ||
453 | #undef tp_memcpy | |
454 | #define tp_memcpy(dest, src, len) \ | |
455 | __assign_##dest: \ | |
3c4ffab9 | 456 | lib_ring_buffer_align_ctx(&ctx, __alignof__(__typemap.dest)); \ |
e763dbf5 MD |
457 | __chan->ops->event_write(&ctx, src, len); \ |
458 | goto __end_field_##dest; | |
459 | ||
460 | #undef tp_memcpy_dyn | |
461 | #define tp_memcpy_dyn(dest, src, len) \ | |
462 | __assign_##dest##_1: \ | |
463 | { \ | |
3c4ffab9 MD |
464 | u32 __tmpl = (len); \ |
465 | lib_ring_buffer_align_ctx(&ctx, __alignof__(u32)); \ | |
e763dbf5 MD |
466 | __chan->ops->event_write(&ctx, &__tmpl, sizeof(u32)); \ |
467 | } \ | |
468 | goto __end_field_##dest##_1; \ | |
469 | __assign_##dest##_2: \ | |
3c4ffab9 | 470 | lib_ring_buffer_align_ctx(&ctx, __alignof__(__typemap.dest)); \ |
e763dbf5 MD |
471 | __chan->ops->event_write(&ctx, src, len); \ |
472 | goto __end_field_##dest##_2; | |
473 | ||
474 | #undef tp_strcpy | |
475 | #define tp_strcpy(dest, src) \ | |
3c4ffab9 | 476 | tp_memcpy(dest, src, __get_dynamic_array_len(dest)) |
40652b65 | 477 | |
e763dbf5 MD |
478 | /* Named field types must be defined in lttng-types.h */ |
479 | ||
480 | #undef __get_str | |
481 | #define __get_str(field) field | |
482 | ||
483 | #undef __get_dynamic_array | |
484 | #define __get_dynamic_array(field) field | |
485 | ||
486 | /* Beware: this get len actually consumes the len value */ | |
487 | #undef __get_dynamic_array_len | |
488 | #define __get_dynamic_array_len(field) __dynamic_len[__dynamic_len_idx++] | |
489 | ||
490 | #undef TP_PROTO | |
491 | #define TP_PROTO(args...) args | |
492 | ||
493 | #undef TP_ARGS | |
494 | #define TP_ARGS(args...) args | |
495 | ||
496 | #undef TP_STRUCT__entry | |
497 | #define TP_STRUCT__entry(args...) args | |
498 | ||
499 | #undef TP_fast_assign | |
500 | #define TP_fast_assign(args...) args | |
501 | ||
502 | #undef DECLARE_EVENT_CLASS | |
503 | #define DECLARE_EVENT_CLASS(_name, _proto, _args, _tstruct, _assign, _print) \ | |
504 | static void __event_probe__##_name(void *__data, _proto) \ | |
505 | { \ | |
506 | struct ltt_event *__event = __data; \ | |
507 | struct ltt_channel *__chan = __event->chan; \ | |
508 | struct lib_ring_buffer_ctx ctx; \ | |
509 | size_t __event_len, __event_align; \ | |
510 | size_t __dynamic_len_idx = 0; \ | |
511 | size_t __dynamic_len[ARRAY_SIZE(__event_fields___##_name)]; \ | |
3c4ffab9 | 512 | struct __event_typemap__##_name __typemap; \ |
e763dbf5 MD |
513 | int __ret; \ |
514 | \ | |
515 | if (0) \ | |
516 | (void) __dynamic_len_idx; /* don't warn if unused */ \ | |
517 | __event_len = __event_get_size__##_name(__dynamic_len, _args); \ | |
518 | __event_align = __event_get_align__##_name(_args); \ | |
519 | lib_ring_buffer_ctx_init(&ctx, __chan->chan, NULL, __event_len, \ | |
520 | __event_align, -1); \ | |
521 | __ret = __chan->ops->event_reserve(&ctx); \ | |
522 | if (__ret < 0) \ | |
523 | return; \ | |
524 | /* Control code (field ordering) */ \ | |
525 | _tstruct \ | |
526 | __chan->ops->event_commit(&ctx); \ | |
527 | return; \ | |
528 | /* Copy code, steered by control code */ \ | |
529 | _assign \ | |
530 | } | |
531 | ||
532 | #include TRACE_INCLUDE(TRACE_INCLUDE_FILE) | |
533 | ||
534 | ||
3afe7aac MD |
535 | /* |
536 | * Stage 8 of the trace events. | |
537 | * | |
538 | * Register/unregister probes at module load/unload. | |
539 | */ | |
540 | ||
541 | #include "lttng-events-reset.h" /* Reset all macros within TRACE_EVENT */ | |
542 | ||
543 | #define TP_ID1(_token, _system) _token##_system | |
544 | #define TP_ID(_token, _system) TP_ID1(_token, _system) | |
545 | #define module_init_eval1(_token, _system) module_init(_token##_system) | |
546 | #define module_init_eval(_token, _system) module_init_eval1(_token, _system) | |
547 | #define module_exit_eval1(_token, _system) module_exit(_token##_system) | |
548 | #define module_exit_eval(_token, _system) module_exit_eval1(_token, _system) | |
549 | ||
550 | #undef DEFINE_EVENT | |
551 | #define DEFINE_EVENT(_template, _name, _proto, _args) \ | |
552 | ret = ltt_probe_register(#_name, (void *) __event_probe__##_template); \ | |
553 | WARN_ON_ONCE(ret); | |
554 | ||
555 | static int TP_ID(__lttng_events_init__, TRACE_SYSTEM)(void) | |
556 | { | |
557 | int ret = 0; | |
558 | ||
559 | ret = TP_ID(__lttng_types_init__, TRACE_SYSTEM)(); | |
560 | if (ret) | |
561 | return ret; | |
562 | #include TRACE_INCLUDE(TRACE_INCLUDE_FILE) | |
563 | return ret; | |
564 | } | |
565 | ||
566 | module_init_eval(__lttng_events_init__, TRACE_SYSTEM); | |
567 | ||
568 | #undef DEFINE_EVENT | |
569 | #define DEFINE_EVENT(_template, _name, _proto, _args) \ | |
570 | ltt_probe_unregister(#_name); | |
571 | ||
572 | static void TP_ID(__lttng_events_exit__, TRACE_SYSTEM)(void) | |
573 | { | |
574 | #include TRACE_INCLUDE(TRACE_INCLUDE_FILE) | |
575 | TP_ID(__lttng_types_exit__, TRACE_SYSTEM)(); | |
576 | } | |
577 | ||
578 | module_exit_eval(__lttng_events_exit__, TRACE_SYSTEM); | |
579 | ||
580 | #undef module_init_eval | |
581 | #undef module_exit_eval | |
582 | #undef TP_ID1 | |
583 | #undef TP_ID | |
584 | ||
e763dbf5 | 585 | #if 0 |
40652b65 MD |
586 | |
587 | #include <linux/ftrace_event.h> | |
588 | ||
589 | /* | |
590 | * DECLARE_EVENT_CLASS can be used to add a generic function | |
591 | * handlers for events. That is, if all events have the same | |
592 | * parameters and just have distinct trace points. | |
593 | * Each tracepoint can be defined with DEFINE_EVENT and that | |
594 | * will map the DECLARE_EVENT_CLASS to the tracepoint. | |
595 | * | |
596 | * TRACE_EVENT is a one to one mapping between tracepoint and template. | |
597 | */ | |
598 | #undef TRACE_EVENT | |
599 | #define TRACE_EVENT(name, proto, args, tstruct, assign, print) \ | |
600 | DECLARE_EVENT_CLASS(name, \ | |
601 | PARAMS(proto), \ | |
602 | PARAMS(args), \ | |
603 | PARAMS(tstruct), \ | |
604 | PARAMS(assign), \ | |
605 | PARAMS(print)); \ | |
606 | DEFINE_EVENT(name, name, PARAMS(proto), PARAMS(args)); | |
607 | ||
608 | ||
609 | #undef __field | |
610 | #define __field(type, item) type item; | |
611 | ||
612 | #undef __field_ext | |
613 | #define __field_ext(type, item, filter_type) type item; | |
614 | ||
615 | #undef __array | |
616 | #define __array(type, item, len) type item[len]; | |
617 | ||
618 | #undef __dynamic_array | |
619 | #define __dynamic_array(type, item, len) u32 __data_loc_##item; | |
620 | ||
621 | #undef __string | |
622 | #define __string(item, src) __dynamic_array(char, item, -1) | |
623 | ||
624 | #undef TP_STRUCT__entry | |
625 | #define TP_STRUCT__entry(args...) args | |
626 | ||
627 | #undef DECLARE_EVENT_CLASS | |
628 | #define DECLARE_EVENT_CLASS(name, proto, args, tstruct, assign, print) \ | |
629 | struct ftrace_raw_##name { \ | |
630 | struct trace_entry ent; \ | |
631 | tstruct \ | |
632 | char __data[0]; \ | |
633 | }; \ | |
634 | \ | |
635 | static struct ftrace_event_class event_class_##name; | |
636 | ||
637 | #undef DEFINE_EVENT | |
638 | #define DEFINE_EVENT(template, name, proto, args) \ | |
639 | static struct ftrace_event_call __used \ | |
640 | __attribute__((__aligned__(4))) event_##name | |
641 | ||
642 | #undef DEFINE_EVENT_PRINT | |
643 | #define DEFINE_EVENT_PRINT(template, name, proto, args, print) \ | |
644 | DEFINE_EVENT(template, name, PARAMS(proto), PARAMS(args)) | |
645 | ||
646 | /* Callbacks are meaningless to ftrace. */ | |
647 | #undef TRACE_EVENT_FN | |
648 | #define TRACE_EVENT_FN(name, proto, args, tstruct, \ | |
649 | assign, print, reg, unreg) \ | |
650 | TRACE_EVENT(name, PARAMS(proto), PARAMS(args), \ | |
651 | PARAMS(tstruct), PARAMS(assign), PARAMS(print)) \ | |
652 | ||
653 | #include TRACE_INCLUDE(TRACE_INCLUDE_FILE) | |
654 | ||
655 | ||
656 | /* | |
657 | * Stage 2 of the trace events. | |
658 | * | |
659 | * Create static inline function that calculates event size. | |
660 | */ | |
661 | ||
662 | #undef __field | |
663 | #define __field(type, item) | |
664 | ||
665 | #undef __field_ext | |
666 | #define __field_ext(type, item, filter_type) | |
667 | ||
668 | #undef __array | |
669 | #define __array(type, item, len) | |
670 | ||
671 | #undef __dynamic_array | |
672 | #define __dynamic_array(type, item, len) u32 item; | |
673 | ||
674 | #undef __string | |
675 | #define __string(item, src) __dynamic_array(char, item, -1) | |
676 | ||
677 | #undef DECLARE_EVENT_CLASS | |
678 | #define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \ | |
679 | struct ftrace_data_offsets_##call { \ | |
680 | tstruct; \ | |
681 | }; | |
682 | ||
683 | #undef DEFINE_EVENT | |
684 | #define DEFINE_EVENT(template, name, proto, args) | |
685 | ||
686 | #undef DEFINE_EVENT_PRINT | |
687 | #define DEFINE_EVENT_PRINT(template, name, proto, args, print) \ | |
688 | DEFINE_EVENT(template, name, PARAMS(proto), PARAMS(args)) | |
689 | ||
690 | #include TRACE_INCLUDE(TRACE_INCLUDE_FILE) | |
691 | ||
692 | /* | |
693 | * Stage 3 of the trace events. | |
694 | * | |
695 | * Create the probe function : call even size calculation and write event data | |
696 | * into the buffer. | |
697 | */ | |
698 | ||
699 | #undef __entry | |
700 | #define __entry field | |
701 | ||
702 | #undef TP_printk | |
703 | #define TP_printk(fmt, args...) fmt "\n", args | |
704 | ||
705 | #undef __get_dynamic_array | |
706 | #define __get_dynamic_array(field) \ | |
707 | ((void *)__entry + (__entry->__data_loc_##field & 0xffff)) | |
708 | ||
709 | #undef __get_str | |
710 | #define __get_str(field) (char *)__get_dynamic_array(field) | |
711 | ||
712 | #undef __print_flags | |
713 | #define __print_flags(flag, delim, flag_array...) \ | |
714 | ({ \ | |
715 | static const struct trace_print_flags __flags[] = \ | |
716 | { flag_array, { -1, NULL }}; \ | |
717 | ftrace_print_flags_seq(p, delim, flag, __flags); \ | |
718 | }) | |
719 | ||
720 | #undef __print_symbolic | |
721 | #define __print_symbolic(value, symbol_array...) \ | |
722 | ({ \ | |
723 | static const struct trace_print_flags symbols[] = \ | |
724 | { symbol_array, { -1, NULL }}; \ | |
725 | ftrace_print_symbols_seq(p, value, symbols); \ | |
726 | }) | |
727 | ||
728 | #undef __print_hex | |
729 | #define __print_hex(buf, buf_len) ftrace_print_hex_seq(p, buf, buf_len) | |
730 | ||
731 | #undef DECLARE_EVENT_CLASS | |
732 | #define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \ | |
733 | static notrace enum print_line_t \ | |
734 | ftrace_raw_output_##call(struct trace_iterator *iter, int flags, \ | |
735 | struct trace_event *trace_event) \ | |
736 | { \ | |
737 | struct ftrace_event_call *event; \ | |
738 | struct trace_seq *s = &iter->seq; \ | |
739 | struct ftrace_raw_##call *field; \ | |
740 | struct trace_entry *entry; \ | |
741 | struct trace_seq *p = &iter->tmp_seq; \ | |
742 | int ret; \ | |
743 | \ | |
744 | event = container_of(trace_event, struct ftrace_event_call, \ | |
745 | event); \ | |
746 | \ | |
747 | entry = iter->ent; \ | |
748 | \ | |
749 | if (entry->type != event->event.type) { \ | |
750 | WARN_ON_ONCE(1); \ | |
751 | return TRACE_TYPE_UNHANDLED; \ | |
752 | } \ | |
753 | \ | |
754 | field = (typeof(field))entry; \ | |
755 | \ | |
756 | trace_seq_init(p); \ | |
757 | ret = trace_seq_printf(s, "%s: ", event->name); \ | |
758 | if (ret) \ | |
759 | ret = trace_seq_printf(s, print); \ | |
760 | if (!ret) \ | |
761 | return TRACE_TYPE_PARTIAL_LINE; \ | |
762 | \ | |
763 | return TRACE_TYPE_HANDLED; \ | |
764 | } \ | |
765 | static struct trace_event_functions ftrace_event_type_funcs_##call = { \ | |
766 | .trace = ftrace_raw_output_##call, \ | |
767 | }; | |
768 | ||
769 | #undef DEFINE_EVENT_PRINT | |
770 | #define DEFINE_EVENT_PRINT(template, call, proto, args, print) \ | |
771 | static notrace enum print_line_t \ | |
772 | ftrace_raw_output_##call(struct trace_iterator *iter, int flags, \ | |
773 | struct trace_event *event) \ | |
774 | { \ | |
775 | struct trace_seq *s = &iter->seq; \ | |
776 | struct ftrace_raw_##template *field; \ | |
777 | struct trace_entry *entry; \ | |
778 | struct trace_seq *p = &iter->tmp_seq; \ | |
779 | int ret; \ | |
780 | \ | |
781 | entry = iter->ent; \ | |
782 | \ | |
783 | if (entry->type != event_##call.event.type) { \ | |
784 | WARN_ON_ONCE(1); \ | |
785 | return TRACE_TYPE_UNHANDLED; \ | |
786 | } \ | |
787 | \ | |
788 | field = (typeof(field))entry; \ | |
789 | \ | |
790 | trace_seq_init(p); \ | |
791 | ret = trace_seq_printf(s, "%s: ", #call); \ | |
792 | if (ret) \ | |
793 | ret = trace_seq_printf(s, print); \ | |
794 | if (!ret) \ | |
795 | return TRACE_TYPE_PARTIAL_LINE; \ | |
796 | \ | |
797 | return TRACE_TYPE_HANDLED; \ | |
798 | } \ | |
799 | static struct trace_event_functions ftrace_event_type_funcs_##call = { \ | |
800 | .trace = ftrace_raw_output_##call, \ | |
801 | }; | |
802 | ||
803 | #include TRACE_INCLUDE(TRACE_INCLUDE_FILE) | |
804 | ||
805 | #undef __field_ext | |
806 | #define __field_ext(type, item, filter_type) \ | |
807 | ret = trace_define_field(event_call, #type, #item, \ | |
808 | offsetof(typeof(field), item), \ | |
809 | sizeof(field.item), \ | |
810 | is_signed_type(type), filter_type); \ | |
811 | if (ret) \ | |
812 | return ret; | |
813 | ||
814 | #undef __field | |
815 | #define __field(type, item) __field_ext(type, item, FILTER_OTHER) | |
816 | ||
817 | #undef __array | |
818 | #define __array(type, item, len) \ | |
819 | BUILD_BUG_ON(len > MAX_FILTER_STR_VAL); \ | |
820 | ret = trace_define_field(event_call, #type "[" #len "]", #item, \ | |
821 | offsetof(typeof(field), item), \ | |
822 | sizeof(field.item), \ | |
823 | is_signed_type(type), FILTER_OTHER); \ | |
824 | if (ret) \ | |
825 | return ret; | |
826 | ||
827 | #undef __dynamic_array | |
828 | #define __dynamic_array(type, item, len) \ | |
829 | ret = trace_define_field(event_call, "__data_loc " #type "[]", #item, \ | |
830 | offsetof(typeof(field), __data_loc_##item), \ | |
831 | sizeof(field.__data_loc_##item), \ | |
832 | is_signed_type(type), FILTER_OTHER); | |
833 | ||
834 | #undef __string | |
835 | #define __string(item, src) __dynamic_array(char, item, -1) | |
836 | ||
837 | #undef DECLARE_EVENT_CLASS | |
838 | #define DECLARE_EVENT_CLASS(call, proto, args, tstruct, func, print) \ | |
839 | static int notrace \ | |
840 | ftrace_define_fields_##call(struct ftrace_event_call *event_call) \ | |
841 | { \ | |
842 | struct ftrace_raw_##call field; \ | |
843 | int ret; \ | |
844 | \ | |
845 | tstruct; \ | |
846 | \ | |
847 | return ret; \ | |
848 | } | |
849 | ||
850 | #undef DEFINE_EVENT | |
851 | #define DEFINE_EVENT(template, name, proto, args) | |
852 | ||
853 | #undef DEFINE_EVENT_PRINT | |
854 | #define DEFINE_EVENT_PRINT(template, name, proto, args, print) \ | |
855 | DEFINE_EVENT(template, name, PARAMS(proto), PARAMS(args)) | |
856 | ||
857 | #include TRACE_INCLUDE(TRACE_INCLUDE_FILE) | |
858 | ||
859 | /* | |
860 | * remember the offset of each array from the beginning of the event. | |
861 | */ | |
862 | ||
863 | #undef __entry | |
864 | #define __entry entry | |
865 | ||
866 | #undef __field | |
867 | #define __field(type, item) | |
868 | ||
869 | #undef __field_ext | |
870 | #define __field_ext(type, item, filter_type) | |
871 | ||
872 | #undef __array | |
873 | #define __array(type, item, len) | |
874 | ||
875 | #undef __dynamic_array | |
876 | #define __dynamic_array(type, item, len) \ | |
877 | __data_offsets->item = __data_size + \ | |
878 | offsetof(typeof(*entry), __data); \ | |
879 | __data_offsets->item |= (len * sizeof(type)) << 16; \ | |
880 | __data_size += (len) * sizeof(type); | |
881 | ||
882 | #undef __string | |
883 | #define __string(item, src) __dynamic_array(char, item, strlen(src) + 1) | |
884 | ||
885 | #undef DECLARE_EVENT_CLASS | |
886 | #define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \ | |
887 | static inline notrace int ftrace_get_offsets_##call( \ | |
888 | struct ftrace_data_offsets_##call *__data_offsets, proto) \ | |
889 | { \ | |
890 | int __data_size = 0; \ | |
891 | struct ftrace_raw_##call __maybe_unused *entry; \ | |
892 | \ | |
893 | tstruct; \ | |
894 | \ | |
895 | return __data_size; \ | |
896 | } | |
897 | ||
898 | #undef DEFINE_EVENT | |
899 | #define DEFINE_EVENT(template, name, proto, args) | |
900 | ||
901 | #undef DEFINE_EVENT_PRINT | |
902 | #define DEFINE_EVENT_PRINT(template, name, proto, args, print) \ | |
903 | DEFINE_EVENT(template, name, PARAMS(proto), PARAMS(args)) | |
904 | ||
905 | #include TRACE_INCLUDE(TRACE_INCLUDE_FILE) | |
906 | ||
907 | /* | |
908 | * Stage 4 of the trace events. | |
909 | * | |
910 | * Override the macros in <trace/trace_events.h> to include the following: | |
911 | * | |
912 | * For those macros defined with TRACE_EVENT: | |
913 | * | |
914 | * static struct ftrace_event_call event_<call>; | |
915 | * | |
916 | * static void ftrace_raw_event_<call>(void *__data, proto) | |
917 | * { | |
918 | * struct ftrace_event_call *event_call = __data; | |
919 | * struct ftrace_data_offsets_<call> __maybe_unused __data_offsets; | |
920 | * struct ring_buffer_event *event; | |
921 | * struct ftrace_raw_<call> *entry; <-- defined in stage 1 | |
922 | * struct ring_buffer *buffer; | |
923 | * unsigned long irq_flags; | |
924 | * int __data_size; | |
925 | * int pc; | |
926 | * | |
927 | * local_save_flags(irq_flags); | |
928 | * pc = preempt_count(); | |
929 | * | |
930 | * __data_size = ftrace_get_offsets_<call>(&__data_offsets, args); | |
931 | * | |
932 | * event = trace_current_buffer_lock_reserve(&buffer, | |
933 | * event_<call>->event.type, | |
934 | * sizeof(*entry) + __data_size, | |
935 | * irq_flags, pc); | |
936 | * if (!event) | |
937 | * return; | |
938 | * entry = ring_buffer_event_data(event); | |
939 | * | |
940 | * { <assign>; } <-- Here we assign the entries by the __field and | |
941 | * __array macros. | |
942 | * | |
943 | * if (!filter_current_check_discard(buffer, event_call, entry, event)) | |
944 | * trace_current_buffer_unlock_commit(buffer, | |
945 | * event, irq_flags, pc); | |
946 | * } | |
947 | * | |
948 | * static struct trace_event ftrace_event_type_<call> = { | |
949 | * .trace = ftrace_raw_output_<call>, <-- stage 2 | |
950 | * }; | |
951 | * | |
952 | * static const char print_fmt_<call>[] = <TP_printk>; | |
953 | * | |
954 | * static struct ftrace_event_class __used event_class_<template> = { | |
955 | * .system = "<system>", | |
956 | * .define_fields = ftrace_define_fields_<call>, | |
957 | * .fields = LIST_HEAD_INIT(event_class_##call.fields), | |
958 | * .raw_init = trace_event_raw_init, | |
959 | * .probe = ftrace_raw_event_##call, | |
960 | * .reg = ftrace_event_reg, | |
961 | * }; | |
962 | * | |
963 | * static struct ftrace_event_call __used | |
964 | * __attribute__((__aligned__(4))) | |
965 | * __attribute__((section("_ftrace_events"))) event_<call> = { | |
966 | * .name = "<call>", | |
967 | * .class = event_class_<template>, | |
968 | * .event = &ftrace_event_type_<call>, | |
969 | * .print_fmt = print_fmt_<call>, | |
970 | * }; | |
971 | * | |
972 | */ | |
973 | ||
974 | #ifdef CONFIG_PERF_EVENTS | |
975 | ||
976 | #define _TRACE_PERF_PROTO(call, proto) \ | |
977 | static notrace void \ | |
978 | perf_trace_##call(void *__data, proto); | |
979 | ||
980 | #define _TRACE_PERF_INIT(call) \ | |
981 | .perf_probe = perf_trace_##call, | |
982 | ||
983 | #else | |
984 | #define _TRACE_PERF_PROTO(call, proto) | |
985 | #define _TRACE_PERF_INIT(call) | |
986 | #endif /* CONFIG_PERF_EVENTS */ | |
987 | ||
988 | #undef __entry | |
989 | #define __entry entry | |
990 | ||
991 | #undef __field | |
992 | #define __field(type, item) | |
993 | ||
994 | #undef __array | |
995 | #define __array(type, item, len) | |
996 | ||
997 | #undef __dynamic_array | |
998 | #define __dynamic_array(type, item, len) \ | |
999 | __entry->__data_loc_##item = __data_offsets.item; | |
1000 | ||
1001 | #undef __string | |
1002 | #define __string(item, src) __dynamic_array(char, item, -1) \ | |
1003 | ||
1004 | #undef __assign_str | |
1005 | #define __assign_str(dst, src) \ | |
1006 | strcpy(__get_str(dst), src); | |
1007 | ||
1008 | #undef TP_fast_assign | |
1009 | #define TP_fast_assign(args...) args | |
1010 | ||
1011 | #undef TP_perf_assign | |
1012 | #define TP_perf_assign(args...) | |
1013 | ||
1014 | #undef DECLARE_EVENT_CLASS | |
1015 | #define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \ | |
1016 | \ | |
1017 | static notrace void \ | |
1018 | ftrace_raw_event_##call(void *__data, proto) \ | |
1019 | { \ | |
1020 | struct ftrace_event_call *event_call = __data; \ | |
1021 | struct ftrace_data_offsets_##call __maybe_unused __data_offsets;\ | |
1022 | struct ring_buffer_event *event; \ | |
1023 | struct ftrace_raw_##call *entry; \ | |
1024 | struct ring_buffer *buffer; \ | |
1025 | unsigned long irq_flags; \ | |
1026 | int __data_size; \ | |
1027 | int pc; \ | |
1028 | \ | |
1029 | local_save_flags(irq_flags); \ | |
1030 | pc = preempt_count(); \ | |
1031 | \ | |
1032 | __data_size = ftrace_get_offsets_##call(&__data_offsets, args); \ | |
1033 | \ | |
1034 | event = trace_current_buffer_lock_reserve(&buffer, \ | |
1035 | event_call->event.type, \ | |
1036 | sizeof(*entry) + __data_size, \ | |
1037 | irq_flags, pc); \ | |
1038 | if (!event) \ | |
1039 | return; \ | |
1040 | entry = ring_buffer_event_data(event); \ | |
1041 | \ | |
1042 | tstruct \ | |
1043 | \ | |
1044 | { assign; } \ | |
1045 | \ | |
1046 | if (!filter_current_check_discard(buffer, event_call, entry, event)) \ | |
1047 | trace_nowake_buffer_unlock_commit(buffer, \ | |
1048 | event, irq_flags, pc); \ | |
1049 | } | |
1050 | /* | |
1051 | * The ftrace_test_probe is compiled out, it is only here as a build time check | |
1052 | * to make sure that if the tracepoint handling changes, the ftrace probe will | |
1053 | * fail to compile unless it too is updated. | |
1054 | */ | |
1055 | ||
1056 | #undef DEFINE_EVENT | |
1057 | #define DEFINE_EVENT(template, call, proto, args) \ | |
1058 | static inline void ftrace_test_probe_##call(void) \ | |
1059 | { \ | |
1060 | check_trace_callback_type_##call(ftrace_raw_event_##template); \ | |
1061 | } | |
1062 | ||
1063 | #undef DEFINE_EVENT_PRINT | |
1064 | #define DEFINE_EVENT_PRINT(template, name, proto, args, print) | |
1065 | ||
1066 | #include TRACE_INCLUDE(TRACE_INCLUDE_FILE) | |
1067 | ||
1068 | #undef __entry | |
1069 | #define __entry REC | |
1070 | ||
1071 | #undef __print_flags | |
1072 | #undef __print_symbolic | |
1073 | #undef __get_dynamic_array | |
1074 | #undef __get_str | |
1075 | ||
1076 | #undef TP_printk | |
1077 | #define TP_printk(fmt, args...) "\"" fmt "\", " __stringify(args) | |
1078 | ||
1079 | #undef DECLARE_EVENT_CLASS | |
1080 | #define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \ | |
1081 | _TRACE_PERF_PROTO(call, PARAMS(proto)); \ | |
1082 | static const char print_fmt_##call[] = print; \ | |
1083 | static struct ftrace_event_class __used event_class_##call = { \ | |
1084 | .system = __stringify(TRACE_SYSTEM), \ | |
1085 | .define_fields = ftrace_define_fields_##call, \ | |
1086 | .fields = LIST_HEAD_INIT(event_class_##call.fields),\ | |
1087 | .raw_init = trace_event_raw_init, \ | |
1088 | .probe = ftrace_raw_event_##call, \ | |
1089 | .reg = ftrace_event_reg, \ | |
1090 | _TRACE_PERF_INIT(call) \ | |
1091 | }; | |
1092 | ||
1093 | #undef DEFINE_EVENT | |
1094 | #define DEFINE_EVENT(template, call, proto, args) \ | |
1095 | \ | |
1096 | static struct ftrace_event_call __used \ | |
1097 | __attribute__((__aligned__(4))) \ | |
1098 | __attribute__((section("_ftrace_events"))) event_##call = { \ | |
1099 | .name = #call, \ | |
1100 | .class = &event_class_##template, \ | |
1101 | .event.funcs = &ftrace_event_type_funcs_##template, \ | |
1102 | .print_fmt = print_fmt_##template, \ | |
1103 | }; | |
1104 | ||
1105 | #undef DEFINE_EVENT_PRINT | |
1106 | #define DEFINE_EVENT_PRINT(template, call, proto, args, print) \ | |
1107 | \ | |
1108 | static const char print_fmt_##call[] = print; \ | |
1109 | \ | |
1110 | static struct ftrace_event_call __used \ | |
1111 | __attribute__((__aligned__(4))) \ | |
1112 | __attribute__((section("_ftrace_events"))) event_##call = { \ | |
1113 | .name = #call, \ | |
1114 | .class = &event_class_##template, \ | |
1115 | .event.funcs = &ftrace_event_type_funcs_##call, \ | |
1116 | .print_fmt = print_fmt_##call, \ | |
1117 | } | |
1118 | ||
1119 | #include TRACE_INCLUDE(TRACE_INCLUDE_FILE) | |
1120 | ||
1121 | /* | |
1122 | * Define the insertion callback to perf events | |
1123 | * | |
1124 | * The job is very similar to ftrace_raw_event_<call> except that we don't | |
1125 | * insert in the ring buffer but in a perf counter. | |
1126 | * | |
1127 | * static void ftrace_perf_<call>(proto) | |
1128 | * { | |
1129 | * struct ftrace_data_offsets_<call> __maybe_unused __data_offsets; | |
1130 | * struct ftrace_event_call *event_call = &event_<call>; | |
1131 | * extern void perf_tp_event(int, u64, u64, void *, int); | |
1132 | * struct ftrace_raw_##call *entry; | |
1133 | * struct perf_trace_buf *trace_buf; | |
1134 | * u64 __addr = 0, __count = 1; | |
1135 | * unsigned long irq_flags; | |
1136 | * struct trace_entry *ent; | |
1137 | * int __entry_size; | |
1138 | * int __data_size; | |
1139 | * int __cpu | |
1140 | * int pc; | |
1141 | * | |
1142 | * pc = preempt_count(); | |
1143 | * | |
1144 | * __data_size = ftrace_get_offsets_<call>(&__data_offsets, args); | |
1145 | * | |
1146 | * // Below we want to get the aligned size by taking into account | |
1147 | * // the u32 field that will later store the buffer size | |
1148 | * __entry_size = ALIGN(__data_size + sizeof(*entry) + sizeof(u32), | |
1149 | * sizeof(u64)); | |
1150 | * __entry_size -= sizeof(u32); | |
1151 | * | |
1152 | * // Protect the non nmi buffer | |
1153 | * // This also protects the rcu read side | |
1154 | * local_irq_save(irq_flags); | |
1155 | * __cpu = smp_processor_id(); | |
1156 | * | |
1157 | * if (in_nmi()) | |
1158 | * trace_buf = rcu_dereference_sched(perf_trace_buf_nmi); | |
1159 | * else | |
1160 | * trace_buf = rcu_dereference_sched(perf_trace_buf); | |
1161 | * | |
1162 | * if (!trace_buf) | |
1163 | * goto end; | |
1164 | * | |
1165 | * trace_buf = per_cpu_ptr(trace_buf, __cpu); | |
1166 | * | |
1167 | * // Avoid recursion from perf that could mess up the buffer | |
1168 | * if (trace_buf->recursion++) | |
1169 | * goto end_recursion; | |
1170 | * | |
1171 | * raw_data = trace_buf->buf; | |
1172 | * | |
1173 | * // Make recursion update visible before entering perf_tp_event | |
1174 | * // so that we protect from perf recursions. | |
1175 | * | |
1176 | * barrier(); | |
1177 | * | |
1178 | * //zero dead bytes from alignment to avoid stack leak to userspace: | |
1179 | * *(u64 *)(&raw_data[__entry_size - sizeof(u64)]) = 0ULL; | |
1180 | * entry = (struct ftrace_raw_<call> *)raw_data; | |
1181 | * ent = &entry->ent; | |
1182 | * tracing_generic_entry_update(ent, irq_flags, pc); | |
1183 | * ent->type = event_call->id; | |
1184 | * | |
1185 | * <tstruct> <- do some jobs with dynamic arrays | |
1186 | * | |
1187 | * <assign> <- affect our values | |
1188 | * | |
1189 | * perf_tp_event(event_call->id, __addr, __count, entry, | |
1190 | * __entry_size); <- submit them to perf counter | |
1191 | * | |
1192 | * } | |
1193 | */ | |
1194 | ||
1195 | #ifdef CONFIG_PERF_EVENTS | |
1196 | ||
1197 | #undef __entry | |
1198 | #define __entry entry | |
1199 | ||
1200 | #undef __get_dynamic_array | |
1201 | #define __get_dynamic_array(field) \ | |
1202 | ((void *)__entry + (__entry->__data_loc_##field & 0xffff)) | |
1203 | ||
1204 | #undef __get_str | |
1205 | #define __get_str(field) (char *)__get_dynamic_array(field) | |
1206 | ||
1207 | #undef __perf_addr | |
1208 | #define __perf_addr(a) __addr = (a) | |
1209 | ||
1210 | #undef __perf_count | |
1211 | #define __perf_count(c) __count = (c) | |
1212 | ||
1213 | #undef DECLARE_EVENT_CLASS | |
1214 | #define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \ | |
1215 | static notrace void \ | |
1216 | perf_trace_##call(void *__data, proto) \ | |
1217 | { \ | |
1218 | struct ftrace_event_call *event_call = __data; \ | |
1219 | struct ftrace_data_offsets_##call __maybe_unused __data_offsets;\ | |
1220 | struct ftrace_raw_##call *entry; \ | |
1221 | struct pt_regs __regs; \ | |
1222 | u64 __addr = 0, __count = 1; \ | |
1223 | struct hlist_head *head; \ | |
1224 | int __entry_size; \ | |
1225 | int __data_size; \ | |
1226 | int rctx; \ | |
1227 | \ | |
1228 | perf_fetch_caller_regs(&__regs); \ | |
1229 | \ | |
1230 | __data_size = ftrace_get_offsets_##call(&__data_offsets, args); \ | |
1231 | __entry_size = ALIGN(__data_size + sizeof(*entry) + sizeof(u32),\ | |
1232 | sizeof(u64)); \ | |
1233 | __entry_size -= sizeof(u32); \ | |
1234 | \ | |
1235 | if (WARN_ONCE(__entry_size > PERF_MAX_TRACE_SIZE, \ | |
1236 | "profile buffer not large enough")) \ | |
1237 | return; \ | |
1238 | \ | |
1239 | entry = (struct ftrace_raw_##call *)perf_trace_buf_prepare( \ | |
1240 | __entry_size, event_call->event.type, &__regs, &rctx); \ | |
1241 | if (!entry) \ | |
1242 | return; \ | |
1243 | \ | |
1244 | tstruct \ | |
1245 | \ | |
1246 | { assign; } \ | |
1247 | \ | |
1248 | head = this_cpu_ptr(event_call->perf_events); \ | |
1249 | perf_trace_buf_submit(entry, __entry_size, rctx, __addr, \ | |
1250 | __count, &__regs, head); \ | |
1251 | } | |
1252 | ||
1253 | /* | |
1254 | * This part is compiled out, it is only here as a build time check | |
1255 | * to make sure that if the tracepoint handling changes, the | |
1256 | * perf probe will fail to compile unless it too is updated. | |
1257 | */ | |
1258 | #undef DEFINE_EVENT | |
1259 | #define DEFINE_EVENT(template, call, proto, args) \ | |
1260 | static inline void perf_test_probe_##call(void) \ | |
1261 | { \ | |
1262 | check_trace_callback_type_##call(perf_trace_##template); \ | |
1263 | } | |
1264 | ||
1265 | ||
1266 | #undef DEFINE_EVENT_PRINT | |
1267 | #define DEFINE_EVENT_PRINT(template, name, proto, args, print) \ | |
1268 | DEFINE_EVENT(template, name, PARAMS(proto), PARAMS(args)) | |
1269 | ||
1270 | #include TRACE_INCLUDE(TRACE_INCLUDE_FILE) | |
1271 | #endif /* CONFIG_PERF_EVENTS */ | |
1272 | ||
1273 | #undef _TRACE_PROFILE_INIT | |
1d12cebd | 1274 | #endif //0 |