c5eeac28cbf8aea5fba345dbabcc3c73195cd2e3
[lttng-modules.git] / probes / lttng-events.h
1 #include <lttng.h>
2 #include <lttng-types.h>
3 #include <linux/debugfs.h>
4 #include "../ltt-tracer-core.h"
5
6 #if 0
7
8 /* keep for a later stage (copy stage) */
9 /*
10 * Macros mapping tp_assign() to "=", tp_memcpy() to memcpy() and tp_strcpy() to
11 * strcpy().
12 */
13 #undef tp_assign
14 #define tp_assign(dest, src) \
15 lib_ring_buffer_align_ctx(config, &ctx, sizeof(src)); \
16 lib_ring_buffer_write(config, &ctx, &src, sizeof(src));
17
18 #undef tp_memcpy
19 #define tp_memcpy(dest, src, len) \
20 lib_ring_buffer_align_ctx(config, &ctx, sizeof(*(src))); \
21 lib_ring_buffer_write(config, &ctx, &src, len);
22
23 /* TODO */
24 #undef tp_strcpy
25 #define tp_strcpy(dest, src) __assign_str(dest, src);
26
27 #endif //0
28
29 /* TODO : deal with DEFINE_EVENT vs event class */
30
31 struct lttng_event_field {
32 const char *name;
33 const struct lttng_type type;
34 };
35
36 struct lttng_event_desc {
37 const struct lttng_event_field *fields;
38 const char *name;
39 unsigned int nr_fields;
40 };
41
42 /*
43 * Macro declarations used for all stages.
44 */
45
46 /*
47 * DECLARE_EVENT_CLASS can be used to add a generic function
48 * handlers for events. That is, if all events have the same
49 * parameters and just have distinct trace points.
50 * Each tracepoint can be defined with DEFINE_EVENT and that
51 * will map the DECLARE_EVENT_CLASS to the tracepoint.
52 *
53 * TRACE_EVENT is a one to one mapping between tracepoint and template.
54 */
55
56 #undef TRACE_EVENT
57 #define TRACE_EVENT(name, proto, args, tstruct, assign, print) \
58 DECLARE_EVENT_CLASS(name, \
59 PARAMS(proto), \
60 PARAMS(args), \
61 PARAMS(tstruct), \
62 PARAMS(assign), \
63 PARAMS(print)) \
64 DEFINE_EVENT(name, name, PARAMS(proto), PARAMS(args))
65
66 #undef DEFINE_EVENT_PRINT
67 #define DEFINE_EVENT_PRINT(template, name, proto, args, print) \
68 DEFINE_EVENT(template, name, PARAMS(proto), PARAMS(args))
69
70 /* Callbacks are meaningless to LTTng. */
71 #undef TRACE_EVENT_FN
72 #define TRACE_EVENT_FN(name, proto, args, tstruct, \
73 assign, print, reg, unreg) \
74 TRACE_EVENT(name, PARAMS(proto), PARAMS(args), \
75 PARAMS(tstruct), PARAMS(assign), PARAMS(print)) \
76
77 /*
78 * Stage 1 of the trace events.
79 *
80 * Create event field type metadata section.
81 * Each event produce an array of fields.
82 */
83
84 #include "lttng-events-reset.h" /* Reset all macros within TRACE_EVENT */
85
86 /* Named field types must be defined in lttng-types.h */
87
88 #undef __field
89 #define __field(_type, _item) \
90 { .name = #_item, .type = { .atype = atype_integer, .name = #_type} },
91
92 #undef __field_ext
93 #define __field_ext(_type, _item, _filter_type) __field(_type, _item)
94
95 #undef __array
96 #define __array(_type, _item, _length) \
97 { \
98 .name = #_item, \
99 .type = { \
100 .atype = atype_array, \
101 .name = NULL, \
102 .u.array.elem_type = #_type, \
103 .u.array.length = _length, \
104 }, \
105 },
106
107 #undef __dynamic_array
108 #define __dynamic_array(_type, _item, _length) \
109 { \
110 .name = #_item, \
111 .type = { \
112 .atype = atype_sequence, \
113 .name = NULL, \
114 .u.sequence.elem_type = #_type, \
115 .u.sequence.length_type = "u32", \
116 }, \
117 },
118
119 #undef __string
120 #define __string(_item, _src) \
121 { \
122 .name = #_item, \
123 .type = { \
124 .atype = atype_string, \
125 .name = NULL, \
126 .u.string.encoding = lttng_encode_UTF8, \
127 }, \
128 },
129
130 #undef TP_STRUCT__entry
131 #define TP_STRUCT__entry(args...) args /* Only one used in this phase */
132
133 #undef DECLARE_EVENT_CLASS
134 #define DECLARE_EVENT_CLASS(_name, _proto, _args, _tstruct, _assign, _print) \
135 static const struct lttng_event_field __event_fields___##_name[] = { \
136 _tstruct \
137 };
138
139 #include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
140
141 /*
142 * Stage 2 of the trace events.
143 *
144 * Create an array of events.
145 */
146
147 /* Named field types must be defined in lttng-types.h */
148
149 #include "lttng-events-reset.h" /* Reset all macros within TRACE_EVENT */
150
151 #undef DECLARE_EVENT_CLASS
152 #define DECLARE_EVENT_CLASS(_name, _proto, _args, _tstruct, _assign, _print) \
153 { \
154 .fields = __event_fields___##_name, \
155 .name = #_name, \
156 .nr_fields = ARRAY_SIZE(__event_fields___##_name), \
157 },
158
159 #define TP_ID1(_token, _system) _token##_system
160 #define TP_ID(_token, _system) TP_ID1(_token, _system)
161
162 static const struct lttng_event_desc TP_ID(__event_desc___, TRACE_SYSTEM)[] = {
163 #include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
164 };
165
166 #undef TP_ID1
167 #undef TP_ID
168
169 /*
170 * Stage 3 of the trace events.
171 *
172 * Create seq file metadata output.
173 */
174
175 #define TP_ID1(_token, _system) _token##_system
176 #define TP_ID(_token, _system) TP_ID1(_token, _system)
177 #define module_init_eval1(_token, _system) module_init(_token##_system)
178 #define module_init_eval(_token, _system) module_init_eval1(_token, _system)
179 #define module_exit_eval1(_token, _system) module_exit(_token##_system)
180 #define module_exit_eval(_token, _system) module_exit_eval1(_token, _system)
181
182 static void *TP_ID(__lttng_seq_start__, TRACE_SYSTEM)(struct seq_file *m,
183 loff_t *pos)
184 {
185 const struct lttng_event_desc *desc =
186 &TP_ID(__event_desc___, TRACE_SYSTEM)[*pos];
187
188 if (desc > &TP_ID(__event_desc___, TRACE_SYSTEM)
189 [ARRAY_SIZE(TP_ID(__event_desc___, TRACE_SYSTEM)) - 1])
190 return NULL;
191 return (void *) desc;
192 }
193
194 static void *TP_ID(__lttng_seq_next__, TRACE_SYSTEM)(struct seq_file *m,
195 void *p, loff_t *ppos)
196 {
197 const struct lttng_event_desc *desc =
198 &TP_ID(__event_desc___, TRACE_SYSTEM)[++(*ppos)];
199
200 if (desc > &TP_ID(__event_desc___, TRACE_SYSTEM)
201 [ARRAY_SIZE(TP_ID(__event_desc___, TRACE_SYSTEM)) - 1])
202 return NULL;
203 return (void *) desc;
204 }
205
206 static void TP_ID(__lttng_seq_stop__, TRACE_SYSTEM)(struct seq_file *m,
207 void *p)
208 {
209 }
210
211 static int TP_ID(__lttng_seq_show__, TRACE_SYSTEM)(struct seq_file *m,
212 void *p)
213 {
214 const struct lttng_event_desc *desc = p;
215 int i;
216
217 seq_printf(m, "event {\n"
218 "\tname = %s;\n"
219 "\tid = UNKNOWN;\n"
220 "\tstream = UNKNOWN;\n"
221 "\tfields = {\n",
222 desc->name);
223 for (i = 0; i < desc->nr_fields; i++) {
224 if (desc->fields[i].type.name) /* Named type */
225 seq_printf(m, "\t\t%s",
226 desc->fields[i].type.name);
227 else /* Nameless type */
228 lttng_print_event_type(m, 2, &desc->fields[i].type);
229 seq_printf(m, " %s;\n", desc->fields[i].name);
230 }
231 seq_printf(m, "\t};\n");
232 seq_printf(m, "};\n");
233 return 0;
234 }
235
236 static const
237 struct seq_operations TP_ID(__lttng_types_seq_ops__, TRACE_SYSTEM) = {
238 .start = TP_ID(__lttng_seq_start__, TRACE_SYSTEM),
239 .next = TP_ID(__lttng_seq_next__, TRACE_SYSTEM),
240 .stop = TP_ID(__lttng_seq_stop__, TRACE_SYSTEM),
241 .show = TP_ID(__lttng_seq_show__, TRACE_SYSTEM),
242 };
243
244 static int
245 TP_ID(__lttng_types_open__, TRACE_SYSTEM)(struct inode *inode, struct file *file)
246 {
247 return seq_open(file, &TP_ID(__lttng_types_seq_ops__, TRACE_SYSTEM));
248 }
249
250 static const
251 struct file_operations TP_ID(__lttng_types_fops__, TRACE_SYSTEM) = {
252 .open = TP_ID(__lttng_types_open__, TRACE_SYSTEM),
253 .read = seq_read,
254 .llseek = seq_lseek,
255 .release = seq_release_private,
256 };
257
258 static struct dentry *TP_ID(__lttng_types_dentry__, TRACE_SYSTEM);
259
260 static int TP_ID(__lttng_types_init__, TRACE_SYSTEM)(void)
261 {
262 int ret = 0;
263
264 TP_ID(__lttng_types_dentry__, TRACE_SYSTEM) =
265 debugfs_create_file("lttng-events-" __stringify(TRACE_SYSTEM),
266 S_IWUSR, NULL, NULL,
267 &TP_ID(__lttng_types_fops__, TRACE_SYSTEM));
268 if (IS_ERR(TP_ID(__lttng_types_dentry__, TRACE_SYSTEM))
269 || !TP_ID(__lttng_types_dentry__, TRACE_SYSTEM)) {
270 printk(KERN_ERR "Error creating LTTng type export file\n");
271 ret = -ENOMEM;
272 goto error;
273 }
274 error:
275 return ret;
276 }
277
278 module_init_eval(__lttng_types_init__, TRACE_SYSTEM);
279
280 static void TP_ID(__lttng_types_exit__, TRACE_SYSTEM)(void)
281 {
282 debugfs_remove(TP_ID(__lttng_types_dentry__, TRACE_SYSTEM));
283 }
284
285 module_exit_eval(__lttng_types_exit__, TRACE_SYSTEM);
286
287 #undef module_init_eval
288 #undef module_exit_eval
289 #undef TP_ID1
290 #undef TP_ID
291
292
293 /*
294 * Stage 4 of the trace events.
295 *
296 * Create static inline function that calculates event size.
297 */
298
299 #include "lttng-events-reset.h" /* Reset all macros within TRACE_EVENT */
300
301 /* Named field types must be defined in lttng-types.h */
302
303 #undef __field
304 #define __field(_type, _item) \
305 __event_len += lib_ring_buffer_align(__event_len, sizeof(_type)); \
306 __event_len += sizeof(_type);
307
308 #undef __field_ext
309 #define __field_ext(_type, _item, _filter_type) __field(_type, _item)
310
311 #undef __array
312 #define __array(_type, _item, _length) \
313 __event_len += lib_ring_buffer_align(__event_len, sizeof(_type)); \
314 __event_len += sizeof(_type) * (_length);
315
316 #undef __dynamic_array
317 #define __dynamic_array(_type, _item, _length) \
318 __event_len += lib_ring_buffer_align(__event_len, sizeof(u32)); \
319 __event_len += sizeof(u32); \
320 __event_len += lib_ring_buffer_align(__event_len, sizeof(_type)); \
321 __event_len += sizeof(_type) * (_length);
322
323 #undef __string
324 #define __string(_item, _src) \
325 __event_len += __dynamic_len[__dynamic_len_idx++] = strlen(_src) + 1;
326
327 #undef TP_PROTO
328 #define TP_PROTO(args...) args
329
330 #undef TP_STRUCT__entry
331 #define TP_STRUCT__entry(args...) args
332
333 #undef DECLARE_EVENT_CLASS
334 #define DECLARE_EVENT_CLASS(_name, _proto, _args, _tstruct, _assign, _print) \
335 static inline size_t __event_get_size__##_name(size_t *__dynamic_len, _proto) \
336 { \
337 size_t __event_len = 0; \
338 unsigned int __dynamic_len_idx = 0; \
339 \
340 if (0) \
341 (void) __dynamic_len_idx; /* don't warn if unused */ \
342 _tstruct \
343 return __event_len; \
344 }
345
346 #include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
347
348
349 #if 0
350
351 /*
352 * Stage 4 of the trace events.
353 *
354 * Create the probe function : call even size calculation and write event data
355 * into the buffer.
356 */
357
358
359
360 #include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
361
362
363
364
365 #include <linux/ftrace_event.h>
366
367 /*
368 * DECLARE_EVENT_CLASS can be used to add a generic function
369 * handlers for events. That is, if all events have the same
370 * parameters and just have distinct trace points.
371 * Each tracepoint can be defined with DEFINE_EVENT and that
372 * will map the DECLARE_EVENT_CLASS to the tracepoint.
373 *
374 * TRACE_EVENT is a one to one mapping between tracepoint and template.
375 */
376 #undef TRACE_EVENT
377 #define TRACE_EVENT(name, proto, args, tstruct, assign, print) \
378 DECLARE_EVENT_CLASS(name, \
379 PARAMS(proto), \
380 PARAMS(args), \
381 PARAMS(tstruct), \
382 PARAMS(assign), \
383 PARAMS(print)); \
384 DEFINE_EVENT(name, name, PARAMS(proto), PARAMS(args));
385
386
387 #undef __field
388 #define __field(type, item) type item;
389
390 #undef __field_ext
391 #define __field_ext(type, item, filter_type) type item;
392
393 #undef __array
394 #define __array(type, item, len) type item[len];
395
396 #undef __dynamic_array
397 #define __dynamic_array(type, item, len) u32 __data_loc_##item;
398
399 #undef __string
400 #define __string(item, src) __dynamic_array(char, item, -1)
401
402 #undef TP_STRUCT__entry
403 #define TP_STRUCT__entry(args...) args
404
405 #undef DECLARE_EVENT_CLASS
406 #define DECLARE_EVENT_CLASS(name, proto, args, tstruct, assign, print) \
407 struct ftrace_raw_##name { \
408 struct trace_entry ent; \
409 tstruct \
410 char __data[0]; \
411 }; \
412 \
413 static struct ftrace_event_class event_class_##name;
414
415 #undef DEFINE_EVENT
416 #define DEFINE_EVENT(template, name, proto, args) \
417 static struct ftrace_event_call __used \
418 __attribute__((__aligned__(4))) event_##name
419
420 #undef DEFINE_EVENT_PRINT
421 #define DEFINE_EVENT_PRINT(template, name, proto, args, print) \
422 DEFINE_EVENT(template, name, PARAMS(proto), PARAMS(args))
423
424 /* Callbacks are meaningless to ftrace. */
425 #undef TRACE_EVENT_FN
426 #define TRACE_EVENT_FN(name, proto, args, tstruct, \
427 assign, print, reg, unreg) \
428 TRACE_EVENT(name, PARAMS(proto), PARAMS(args), \
429 PARAMS(tstruct), PARAMS(assign), PARAMS(print)) \
430
431 #include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
432
433
434 /*
435 * Stage 2 of the trace events.
436 *
437 * Create static inline function that calculates event size.
438 */
439
440 #undef __field
441 #define __field(type, item)
442
443 #undef __field_ext
444 #define __field_ext(type, item, filter_type)
445
446 #undef __array
447 #define __array(type, item, len)
448
449 #undef __dynamic_array
450 #define __dynamic_array(type, item, len) u32 item;
451
452 #undef __string
453 #define __string(item, src) __dynamic_array(char, item, -1)
454
455 #undef DECLARE_EVENT_CLASS
456 #define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \
457 struct ftrace_data_offsets_##call { \
458 tstruct; \
459 };
460
461 #undef DEFINE_EVENT
462 #define DEFINE_EVENT(template, name, proto, args)
463
464 #undef DEFINE_EVENT_PRINT
465 #define DEFINE_EVENT_PRINT(template, name, proto, args, print) \
466 DEFINE_EVENT(template, name, PARAMS(proto), PARAMS(args))
467
468 #include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
469
470 /*
471 * Stage 3 of the trace events.
472 *
473 * Create the probe function : call even size calculation and write event data
474 * into the buffer.
475 */
476
477 #undef __entry
478 #define __entry field
479
480 #undef TP_printk
481 #define TP_printk(fmt, args...) fmt "\n", args
482
483 #undef __get_dynamic_array
484 #define __get_dynamic_array(field) \
485 ((void *)__entry + (__entry->__data_loc_##field & 0xffff))
486
487 #undef __get_str
488 #define __get_str(field) (char *)__get_dynamic_array(field)
489
490 #undef __print_flags
491 #define __print_flags(flag, delim, flag_array...) \
492 ({ \
493 static const struct trace_print_flags __flags[] = \
494 { flag_array, { -1, NULL }}; \
495 ftrace_print_flags_seq(p, delim, flag, __flags); \
496 })
497
498 #undef __print_symbolic
499 #define __print_symbolic(value, symbol_array...) \
500 ({ \
501 static const struct trace_print_flags symbols[] = \
502 { symbol_array, { -1, NULL }}; \
503 ftrace_print_symbols_seq(p, value, symbols); \
504 })
505
506 #undef __print_hex
507 #define __print_hex(buf, buf_len) ftrace_print_hex_seq(p, buf, buf_len)
508
509 #undef DECLARE_EVENT_CLASS
510 #define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \
511 static notrace enum print_line_t \
512 ftrace_raw_output_##call(struct trace_iterator *iter, int flags, \
513 struct trace_event *trace_event) \
514 { \
515 struct ftrace_event_call *event; \
516 struct trace_seq *s = &iter->seq; \
517 struct ftrace_raw_##call *field; \
518 struct trace_entry *entry; \
519 struct trace_seq *p = &iter->tmp_seq; \
520 int ret; \
521 \
522 event = container_of(trace_event, struct ftrace_event_call, \
523 event); \
524 \
525 entry = iter->ent; \
526 \
527 if (entry->type != event->event.type) { \
528 WARN_ON_ONCE(1); \
529 return TRACE_TYPE_UNHANDLED; \
530 } \
531 \
532 field = (typeof(field))entry; \
533 \
534 trace_seq_init(p); \
535 ret = trace_seq_printf(s, "%s: ", event->name); \
536 if (ret) \
537 ret = trace_seq_printf(s, print); \
538 if (!ret) \
539 return TRACE_TYPE_PARTIAL_LINE; \
540 \
541 return TRACE_TYPE_HANDLED; \
542 } \
543 static struct trace_event_functions ftrace_event_type_funcs_##call = { \
544 .trace = ftrace_raw_output_##call, \
545 };
546
547 #undef DEFINE_EVENT_PRINT
548 #define DEFINE_EVENT_PRINT(template, call, proto, args, print) \
549 static notrace enum print_line_t \
550 ftrace_raw_output_##call(struct trace_iterator *iter, int flags, \
551 struct trace_event *event) \
552 { \
553 struct trace_seq *s = &iter->seq; \
554 struct ftrace_raw_##template *field; \
555 struct trace_entry *entry; \
556 struct trace_seq *p = &iter->tmp_seq; \
557 int ret; \
558 \
559 entry = iter->ent; \
560 \
561 if (entry->type != event_##call.event.type) { \
562 WARN_ON_ONCE(1); \
563 return TRACE_TYPE_UNHANDLED; \
564 } \
565 \
566 field = (typeof(field))entry; \
567 \
568 trace_seq_init(p); \
569 ret = trace_seq_printf(s, "%s: ", #call); \
570 if (ret) \
571 ret = trace_seq_printf(s, print); \
572 if (!ret) \
573 return TRACE_TYPE_PARTIAL_LINE; \
574 \
575 return TRACE_TYPE_HANDLED; \
576 } \
577 static struct trace_event_functions ftrace_event_type_funcs_##call = { \
578 .trace = ftrace_raw_output_##call, \
579 };
580
581 #include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
582
583 #undef __field_ext
584 #define __field_ext(type, item, filter_type) \
585 ret = trace_define_field(event_call, #type, #item, \
586 offsetof(typeof(field), item), \
587 sizeof(field.item), \
588 is_signed_type(type), filter_type); \
589 if (ret) \
590 return ret;
591
592 #undef __field
593 #define __field(type, item) __field_ext(type, item, FILTER_OTHER)
594
595 #undef __array
596 #define __array(type, item, len) \
597 BUILD_BUG_ON(len > MAX_FILTER_STR_VAL); \
598 ret = trace_define_field(event_call, #type "[" #len "]", #item, \
599 offsetof(typeof(field), item), \
600 sizeof(field.item), \
601 is_signed_type(type), FILTER_OTHER); \
602 if (ret) \
603 return ret;
604
605 #undef __dynamic_array
606 #define __dynamic_array(type, item, len) \
607 ret = trace_define_field(event_call, "__data_loc " #type "[]", #item, \
608 offsetof(typeof(field), __data_loc_##item), \
609 sizeof(field.__data_loc_##item), \
610 is_signed_type(type), FILTER_OTHER);
611
612 #undef __string
613 #define __string(item, src) __dynamic_array(char, item, -1)
614
615 #undef DECLARE_EVENT_CLASS
616 #define DECLARE_EVENT_CLASS(call, proto, args, tstruct, func, print) \
617 static int notrace \
618 ftrace_define_fields_##call(struct ftrace_event_call *event_call) \
619 { \
620 struct ftrace_raw_##call field; \
621 int ret; \
622 \
623 tstruct; \
624 \
625 return ret; \
626 }
627
628 #undef DEFINE_EVENT
629 #define DEFINE_EVENT(template, name, proto, args)
630
631 #undef DEFINE_EVENT_PRINT
632 #define DEFINE_EVENT_PRINT(template, name, proto, args, print) \
633 DEFINE_EVENT(template, name, PARAMS(proto), PARAMS(args))
634
635 #include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
636
637 /*
638 * remember the offset of each array from the beginning of the event.
639 */
640
641 #undef __entry
642 #define __entry entry
643
644 #undef __field
645 #define __field(type, item)
646
647 #undef __field_ext
648 #define __field_ext(type, item, filter_type)
649
650 #undef __array
651 #define __array(type, item, len)
652
653 #undef __dynamic_array
654 #define __dynamic_array(type, item, len) \
655 __data_offsets->item = __data_size + \
656 offsetof(typeof(*entry), __data); \
657 __data_offsets->item |= (len * sizeof(type)) << 16; \
658 __data_size += (len) * sizeof(type);
659
660 #undef __string
661 #define __string(item, src) __dynamic_array(char, item, strlen(src) + 1)
662
663 #undef DECLARE_EVENT_CLASS
664 #define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \
665 static inline notrace int ftrace_get_offsets_##call( \
666 struct ftrace_data_offsets_##call *__data_offsets, proto) \
667 { \
668 int __data_size = 0; \
669 struct ftrace_raw_##call __maybe_unused *entry; \
670 \
671 tstruct; \
672 \
673 return __data_size; \
674 }
675
676 #undef DEFINE_EVENT
677 #define DEFINE_EVENT(template, name, proto, args)
678
679 #undef DEFINE_EVENT_PRINT
680 #define DEFINE_EVENT_PRINT(template, name, proto, args, print) \
681 DEFINE_EVENT(template, name, PARAMS(proto), PARAMS(args))
682
683 #include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
684
685 /*
686 * Stage 4 of the trace events.
687 *
688 * Override the macros in <trace/trace_events.h> to include the following:
689 *
690 * For those macros defined with TRACE_EVENT:
691 *
692 * static struct ftrace_event_call event_<call>;
693 *
694 * static void ftrace_raw_event_<call>(void *__data, proto)
695 * {
696 * struct ftrace_event_call *event_call = __data;
697 * struct ftrace_data_offsets_<call> __maybe_unused __data_offsets;
698 * struct ring_buffer_event *event;
699 * struct ftrace_raw_<call> *entry; <-- defined in stage 1
700 * struct ring_buffer *buffer;
701 * unsigned long irq_flags;
702 * int __data_size;
703 * int pc;
704 *
705 * local_save_flags(irq_flags);
706 * pc = preempt_count();
707 *
708 * __data_size = ftrace_get_offsets_<call>(&__data_offsets, args);
709 *
710 * event = trace_current_buffer_lock_reserve(&buffer,
711 * event_<call>->event.type,
712 * sizeof(*entry) + __data_size,
713 * irq_flags, pc);
714 * if (!event)
715 * return;
716 * entry = ring_buffer_event_data(event);
717 *
718 * { <assign>; } <-- Here we assign the entries by the __field and
719 * __array macros.
720 *
721 * if (!filter_current_check_discard(buffer, event_call, entry, event))
722 * trace_current_buffer_unlock_commit(buffer,
723 * event, irq_flags, pc);
724 * }
725 *
726 * static struct trace_event ftrace_event_type_<call> = {
727 * .trace = ftrace_raw_output_<call>, <-- stage 2
728 * };
729 *
730 * static const char print_fmt_<call>[] = <TP_printk>;
731 *
732 * static struct ftrace_event_class __used event_class_<template> = {
733 * .system = "<system>",
734 * .define_fields = ftrace_define_fields_<call>,
735 * .fields = LIST_HEAD_INIT(event_class_##call.fields),
736 * .raw_init = trace_event_raw_init,
737 * .probe = ftrace_raw_event_##call,
738 * .reg = ftrace_event_reg,
739 * };
740 *
741 * static struct ftrace_event_call __used
742 * __attribute__((__aligned__(4)))
743 * __attribute__((section("_ftrace_events"))) event_<call> = {
744 * .name = "<call>",
745 * .class = event_class_<template>,
746 * .event = &ftrace_event_type_<call>,
747 * .print_fmt = print_fmt_<call>,
748 * };
749 *
750 */
751
752 #ifdef CONFIG_PERF_EVENTS
753
754 #define _TRACE_PERF_PROTO(call, proto) \
755 static notrace void \
756 perf_trace_##call(void *__data, proto);
757
758 #define _TRACE_PERF_INIT(call) \
759 .perf_probe = perf_trace_##call,
760
761 #else
762 #define _TRACE_PERF_PROTO(call, proto)
763 #define _TRACE_PERF_INIT(call)
764 #endif /* CONFIG_PERF_EVENTS */
765
766 #undef __entry
767 #define __entry entry
768
769 #undef __field
770 #define __field(type, item)
771
772 #undef __array
773 #define __array(type, item, len)
774
775 #undef __dynamic_array
776 #define __dynamic_array(type, item, len) \
777 __entry->__data_loc_##item = __data_offsets.item;
778
779 #undef __string
780 #define __string(item, src) __dynamic_array(char, item, -1) \
781
782 #undef __assign_str
783 #define __assign_str(dst, src) \
784 strcpy(__get_str(dst), src);
785
786 #undef TP_fast_assign
787 #define TP_fast_assign(args...) args
788
789 #undef TP_perf_assign
790 #define TP_perf_assign(args...)
791
792 #undef DECLARE_EVENT_CLASS
793 #define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \
794 \
795 static notrace void \
796 ftrace_raw_event_##call(void *__data, proto) \
797 { \
798 struct ftrace_event_call *event_call = __data; \
799 struct ftrace_data_offsets_##call __maybe_unused __data_offsets;\
800 struct ring_buffer_event *event; \
801 struct ftrace_raw_##call *entry; \
802 struct ring_buffer *buffer; \
803 unsigned long irq_flags; \
804 int __data_size; \
805 int pc; \
806 \
807 local_save_flags(irq_flags); \
808 pc = preempt_count(); \
809 \
810 __data_size = ftrace_get_offsets_##call(&__data_offsets, args); \
811 \
812 event = trace_current_buffer_lock_reserve(&buffer, \
813 event_call->event.type, \
814 sizeof(*entry) + __data_size, \
815 irq_flags, pc); \
816 if (!event) \
817 return; \
818 entry = ring_buffer_event_data(event); \
819 \
820 tstruct \
821 \
822 { assign; } \
823 \
824 if (!filter_current_check_discard(buffer, event_call, entry, event)) \
825 trace_nowake_buffer_unlock_commit(buffer, \
826 event, irq_flags, pc); \
827 }
828 /*
829 * The ftrace_test_probe is compiled out, it is only here as a build time check
830 * to make sure that if the tracepoint handling changes, the ftrace probe will
831 * fail to compile unless it too is updated.
832 */
833
834 #undef DEFINE_EVENT
835 #define DEFINE_EVENT(template, call, proto, args) \
836 static inline void ftrace_test_probe_##call(void) \
837 { \
838 check_trace_callback_type_##call(ftrace_raw_event_##template); \
839 }
840
841 #undef DEFINE_EVENT_PRINT
842 #define DEFINE_EVENT_PRINT(template, name, proto, args, print)
843
844 #include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
845
846 #undef __entry
847 #define __entry REC
848
849 #undef __print_flags
850 #undef __print_symbolic
851 #undef __get_dynamic_array
852 #undef __get_str
853
854 #undef TP_printk
855 #define TP_printk(fmt, args...) "\"" fmt "\", " __stringify(args)
856
857 #undef DECLARE_EVENT_CLASS
858 #define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \
859 _TRACE_PERF_PROTO(call, PARAMS(proto)); \
860 static const char print_fmt_##call[] = print; \
861 static struct ftrace_event_class __used event_class_##call = { \
862 .system = __stringify(TRACE_SYSTEM), \
863 .define_fields = ftrace_define_fields_##call, \
864 .fields = LIST_HEAD_INIT(event_class_##call.fields),\
865 .raw_init = trace_event_raw_init, \
866 .probe = ftrace_raw_event_##call, \
867 .reg = ftrace_event_reg, \
868 _TRACE_PERF_INIT(call) \
869 };
870
871 #undef DEFINE_EVENT
872 #define DEFINE_EVENT(template, call, proto, args) \
873 \
874 static struct ftrace_event_call __used \
875 __attribute__((__aligned__(4))) \
876 __attribute__((section("_ftrace_events"))) event_##call = { \
877 .name = #call, \
878 .class = &event_class_##template, \
879 .event.funcs = &ftrace_event_type_funcs_##template, \
880 .print_fmt = print_fmt_##template, \
881 };
882
883 #undef DEFINE_EVENT_PRINT
884 #define DEFINE_EVENT_PRINT(template, call, proto, args, print) \
885 \
886 static const char print_fmt_##call[] = print; \
887 \
888 static struct ftrace_event_call __used \
889 __attribute__((__aligned__(4))) \
890 __attribute__((section("_ftrace_events"))) event_##call = { \
891 .name = #call, \
892 .class = &event_class_##template, \
893 .event.funcs = &ftrace_event_type_funcs_##call, \
894 .print_fmt = print_fmt_##call, \
895 }
896
897 #include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
898
899 /*
900 * Define the insertion callback to perf events
901 *
902 * The job is very similar to ftrace_raw_event_<call> except that we don't
903 * insert in the ring buffer but in a perf counter.
904 *
905 * static void ftrace_perf_<call>(proto)
906 * {
907 * struct ftrace_data_offsets_<call> __maybe_unused __data_offsets;
908 * struct ftrace_event_call *event_call = &event_<call>;
909 * extern void perf_tp_event(int, u64, u64, void *, int);
910 * struct ftrace_raw_##call *entry;
911 * struct perf_trace_buf *trace_buf;
912 * u64 __addr = 0, __count = 1;
913 * unsigned long irq_flags;
914 * struct trace_entry *ent;
915 * int __entry_size;
916 * int __data_size;
917 * int __cpu
918 * int pc;
919 *
920 * pc = preempt_count();
921 *
922 * __data_size = ftrace_get_offsets_<call>(&__data_offsets, args);
923 *
924 * // Below we want to get the aligned size by taking into account
925 * // the u32 field that will later store the buffer size
926 * __entry_size = ALIGN(__data_size + sizeof(*entry) + sizeof(u32),
927 * sizeof(u64));
928 * __entry_size -= sizeof(u32);
929 *
930 * // Protect the non nmi buffer
931 * // This also protects the rcu read side
932 * local_irq_save(irq_flags);
933 * __cpu = smp_processor_id();
934 *
935 * if (in_nmi())
936 * trace_buf = rcu_dereference_sched(perf_trace_buf_nmi);
937 * else
938 * trace_buf = rcu_dereference_sched(perf_trace_buf);
939 *
940 * if (!trace_buf)
941 * goto end;
942 *
943 * trace_buf = per_cpu_ptr(trace_buf, __cpu);
944 *
945 * // Avoid recursion from perf that could mess up the buffer
946 * if (trace_buf->recursion++)
947 * goto end_recursion;
948 *
949 * raw_data = trace_buf->buf;
950 *
951 * // Make recursion update visible before entering perf_tp_event
952 * // so that we protect from perf recursions.
953 *
954 * barrier();
955 *
956 * //zero dead bytes from alignment to avoid stack leak to userspace:
957 * *(u64 *)(&raw_data[__entry_size - sizeof(u64)]) = 0ULL;
958 * entry = (struct ftrace_raw_<call> *)raw_data;
959 * ent = &entry->ent;
960 * tracing_generic_entry_update(ent, irq_flags, pc);
961 * ent->type = event_call->id;
962 *
963 * <tstruct> <- do some jobs with dynamic arrays
964 *
965 * <assign> <- affect our values
966 *
967 * perf_tp_event(event_call->id, __addr, __count, entry,
968 * __entry_size); <- submit them to perf counter
969 *
970 * }
971 */
972
973 #ifdef CONFIG_PERF_EVENTS
974
975 #undef __entry
976 #define __entry entry
977
978 #undef __get_dynamic_array
979 #define __get_dynamic_array(field) \
980 ((void *)__entry + (__entry->__data_loc_##field & 0xffff))
981
982 #undef __get_str
983 #define __get_str(field) (char *)__get_dynamic_array(field)
984
985 #undef __perf_addr
986 #define __perf_addr(a) __addr = (a)
987
988 #undef __perf_count
989 #define __perf_count(c) __count = (c)
990
991 #undef DECLARE_EVENT_CLASS
992 #define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \
993 static notrace void \
994 perf_trace_##call(void *__data, proto) \
995 { \
996 struct ftrace_event_call *event_call = __data; \
997 struct ftrace_data_offsets_##call __maybe_unused __data_offsets;\
998 struct ftrace_raw_##call *entry; \
999 struct pt_regs __regs; \
1000 u64 __addr = 0, __count = 1; \
1001 struct hlist_head *head; \
1002 int __entry_size; \
1003 int __data_size; \
1004 int rctx; \
1005 \
1006 perf_fetch_caller_regs(&__regs); \
1007 \
1008 __data_size = ftrace_get_offsets_##call(&__data_offsets, args); \
1009 __entry_size = ALIGN(__data_size + sizeof(*entry) + sizeof(u32),\
1010 sizeof(u64)); \
1011 __entry_size -= sizeof(u32); \
1012 \
1013 if (WARN_ONCE(__entry_size > PERF_MAX_TRACE_SIZE, \
1014 "profile buffer not large enough")) \
1015 return; \
1016 \
1017 entry = (struct ftrace_raw_##call *)perf_trace_buf_prepare( \
1018 __entry_size, event_call->event.type, &__regs, &rctx); \
1019 if (!entry) \
1020 return; \
1021 \
1022 tstruct \
1023 \
1024 { assign; } \
1025 \
1026 head = this_cpu_ptr(event_call->perf_events); \
1027 perf_trace_buf_submit(entry, __entry_size, rctx, __addr, \
1028 __count, &__regs, head); \
1029 }
1030
1031 /*
1032 * This part is compiled out, it is only here as a build time check
1033 * to make sure that if the tracepoint handling changes, the
1034 * perf probe will fail to compile unless it too is updated.
1035 */
1036 #undef DEFINE_EVENT
1037 #define DEFINE_EVENT(template, call, proto, args) \
1038 static inline void perf_test_probe_##call(void) \
1039 { \
1040 check_trace_callback_type_##call(perf_trace_##template); \
1041 }
1042
1043
1044 #undef DEFINE_EVENT_PRINT
1045 #define DEFINE_EVENT_PRINT(template, name, proto, args, print) \
1046 DEFINE_EVENT(template, name, PARAMS(proto), PARAMS(args))
1047
1048 #include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
1049 #endif /* CONFIG_PERF_EVENTS */
1050
1051 #undef _TRACE_PROFILE_INIT
1052 #endif //0
This page took 0.072348 seconds and 4 git commands to generate.