2 * LTTng serializing code.
4 * Copyright Mathieu Desnoyers, March 2007.
6 * Dual LGPL v2.1/GPL v2 license.
8 * See this discussion about weirdness about passing va_list and then va_list to
9 * functions. (related to array argument passing). va_list seems to be
10 * implemented as an array on x86_64, but not on i386... This is why we pass a
11 * va_list * to ltt_vtrace.
15 #include <linux/ctype.h>
16 #include <linux/string.h>
17 #include <linux/module.h>
19 #include "ltt-tracer.h"
23 LTT_TYPE_UNSIGNED_INT
,
28 #define LTT_ATTRIBUTE_NETWORK_BYTE_ORDER (1<<1)
31 * Stack used to keep track of string length at size calculation, passed to
32 * string copy to handle racy input string updates.
33 * Can be used by any context; this is ensured by putting the stack position
34 * back to its original position after using it.
36 #define TRACER_STACK_LEN (PAGE_SIZE / sizeof(unsigned long))
37 static DEFINE_PER_CPU(unsigned long [TRACER_STACK_LEN
],
39 static DEFINE_PER_CPU(unsigned int, tracer_stack_pos
);
42 * Inspired from vsnprintf
44 * The serialization format string supports the basic printf format strings.
45 * In addition, it defines new formats that can be used to serialize more
46 * complex/non portable data structures.
51 * field_name #tracetype %ctype
52 * field_name #tracetype %ctype1 %ctype2 ...
54 * A conversion is performed between format string types supported by GCC and
55 * the trace type requested. GCC type is used to perform type checking on format
56 * strings. Trace type is used to specify the exact binary representation
57 * in the trace. A mapping is done between one or more GCC types to one trace
58 * type. Sign extension, if required by the conversion, is performed following
61 * If a gcc format is not declared with a trace format, the gcc format is
62 * also used as binary representation in the trace.
64 * Strings are supported with %s.
65 * A single tracetype (sequence) can take multiple c types as parameter.
71 * Note: to write a uint32_t in a trace, the following expression is recommended
72 * si it can be portable:
74 * ("#4u%lu", (unsigned long)var)
78 * Serialization specific formats :
90 * #1u%lu #2u%lu #4d%lu #8d%lu #llu%hu #d%lu
94 * n: (for network byte order)
96 * is written in the trace in network byte order.
98 * i.e.: #bn4u%lu, #n%lu, #b%u
101 * Variable length sequence
102 * #a #tracetype1 #tracetype2 %array_ptr %elem_size %num_elems
104 * #a specifies that this is a sequence
105 * #tracetype1 is the type of elements in the sequence
106 * #tracetype2 is the type of the element count
108 * array_ptr is a pointer to an array that contains members of size
110 * num_elems is the number of elements in the array.
111 * i.e.: #a #lu #lu %p %lu %u
114 * #k callback (taken from the probe data)
115 * The following % arguments are exepected by the callback
117 * i.e.: #a #lu #lu #k %p
119 * Note: No conversion is done from floats to integers, nor from integers to
120 * floats between c types and trace types. float conversion from double to float
121 * or from float to double is also not supported.
124 * %*b expects sizeof(data), data
125 * where sizeof(data) is 1, 2, 4 or 8
127 * Fixed length struct, union or array.
128 * FIXME: unable to extract those sizes statically.
129 * %*r expects sizeof(*ptr), ptr
130 * %*.*r expects sizeof(*ptr), __alignof__(*ptr), ptr
131 * struct and unions removed.
132 * Fixed length array:
133 * [%p]#a[len #tracetype]
134 * i.e.: [%p]#a[12 #lu]
136 * Variable length sequence
137 * %*.*:*v expects sizeof(*ptr), __alignof__(*ptr), elem_num, ptr
138 * where elem_num is the number of elements in the sequence
141 const char *parse_trace_type(const char *fmt
, char *trace_size
,
142 enum ltt_type
*trace_type
,
143 unsigned long *attributes
)
145 int qualifier
; /* 'h', 'l', or 'L' for integer fields */
146 /* 'z' support added 23/7/1999 S.H. */
147 /* 'z' changed to 'Z' --davidm 1/25/99 */
148 /* 't' added for ptrdiff_t */
150 /* parse attributes. */
154 *attributes
|= LTT_ATTRIBUTE_NETWORK_BYTE_ORDER
;
159 /* get the conversion qualifier */
161 if (*fmt
== 'h' || *fmt
== 'l' || *fmt
== 'L' ||
162 *fmt
== 'Z' || *fmt
== 'z' || *fmt
== 't' ||
163 *fmt
== 'S' || *fmt
== '1' || *fmt
== '2' ||
164 *fmt
== '4' || *fmt
== 8) {
167 if (qualifier
== 'l' && *fmt
== 'l') {
175 *trace_type
= LTT_TYPE_UNSIGNED_INT
;
176 *trace_size
= sizeof(unsigned char);
179 *trace_type
= LTT_TYPE_STRING
;
182 *trace_type
= LTT_TYPE_UNSIGNED_INT
;
183 *trace_size
= sizeof(void *);
187 *trace_type
= LTT_TYPE_SIGNED_INT
;
193 *trace_type
= LTT_TYPE_UNSIGNED_INT
;
202 *trace_size
= sizeof(long long);
205 *trace_size
= sizeof(long);
209 *trace_size
= sizeof(size_t);
212 *trace_size
= sizeof(ptrdiff_t);
215 *trace_size
= sizeof(short);
218 *trace_size
= sizeof(uint8_t);
221 *trace_size
= sizeof(uint16_t);
224 *trace_size
= sizeof(uint32_t);
227 *trace_size
= sizeof(uint64_t);
230 *trace_size
= sizeof(int);
239 * Field width and precision are *not* supported.
243 const char *parse_c_type(const char *fmt
, char *c_size
, enum ltt_type
*c_type
,
246 int qualifier
; /* 'h', 'l', or 'L' for integer fields */
247 /* 'z' support added 23/7/1999 S.H. */
248 /* 'z' changed to 'Z' --davidm 1/25/99 */
249 /* 't' added for ptrdiff_t */
251 /* process flags : ignore standard print formats for now. */
263 /* get the conversion qualifier */
265 if (*fmt
== 'h' || *fmt
== 'l' || *fmt
== 'L' ||
266 *fmt
== 'Z' || *fmt
== 'z' || *fmt
== 't' ||
270 if (qualifier
== 'l' && *fmt
== 'l') {
278 *outfmt
++ = (char)qualifier
;
285 *c_type
= LTT_TYPE_UNSIGNED_INT
;
286 *c_size
= sizeof(unsigned char);
289 *c_type
= LTT_TYPE_STRING
;
292 *c_type
= LTT_TYPE_UNSIGNED_INT
;
293 *c_size
= sizeof(void *);
297 *c_type
= LTT_TYPE_SIGNED_INT
;
303 *c_type
= LTT_TYPE_UNSIGNED_INT
;
312 *c_size
= sizeof(long long);
315 *c_size
= sizeof(long);
319 *c_size
= sizeof(size_t);
322 *c_size
= sizeof(ptrdiff_t);
325 *c_size
= sizeof(short);
328 *c_size
= sizeof(int);
336 size_t serialize_trace_data(struct ltt_chanbuf
*buf
, size_t buf_offset
,
337 char trace_size
, enum ltt_type trace_type
,
338 char c_size
, enum ltt_type c_type
,
339 unsigned int *stack_pos_ctx
,
344 unsigned long v_ulong
;
353 * Be careful about sign extension here.
354 * Sign extension is done with the destination (trace) type.
356 switch (trace_type
) {
357 case LTT_TYPE_SIGNED_INT
:
360 tmp
.v_ulong
= (long)(int8_t)va_arg(*args
, int);
363 tmp
.v_ulong
= (long)(int16_t)va_arg(*args
, int);
366 tmp
.v_ulong
= (long)(int32_t)va_arg(*args
, int);
369 tmp
.v_uint64
= va_arg(*args
, int64_t);
375 case LTT_TYPE_UNSIGNED_INT
:
378 tmp
.v_ulong
= (unsigned long)(uint8_t)va_arg(*args
, unsigned int);
381 tmp
.v_ulong
= (unsigned long)(uint16_t)va_arg(*args
, unsigned int);
384 tmp
.v_ulong
= (unsigned long)(uint32_t)va_arg(*args
, unsigned int);
387 tmp
.v_uint64
= va_arg(*args
, uint64_t);
393 case LTT_TYPE_STRING
:
394 tmp
.v_string
.s
= va_arg(*args
, const char *);
395 if ((unsigned long)tmp
.v_string
.s
< PAGE_SIZE
)
396 tmp
.v_string
.s
= "<NULL>";
399 * Reserve tracer stack entry.
401 __get_cpu_var(tracer_stack_pos
)++;
402 WARN_ON_ONCE(__get_cpu_var(tracer_stack_pos
)
405 __get_cpu_var(tracer_stack
)[*stack_pos_ctx
] =
406 strlen(tmp
.v_string
.s
) + 1;
408 tmp
.v_string
.len
= __get_cpu_var(tracer_stack
)
409 [(*stack_pos_ctx
)++];
411 ltt_relay_strncpy(&buf
->a
, buf
->a
.chan
, buf_offset
,
412 tmp
.v_string
.s
, tmp
.v_string
.len
);
413 buf_offset
+= tmp
.v_string
.len
;
420 * If trace_size is lower or equal to 4 bytes, there is no sign
421 * extension to do because we are already encoded in a long. Therefore,
422 * we can combine signed and unsigned ops. 4 bytes float also works
423 * with this, because we do a simple copy of 4 bytes into 4 bytes
424 * without manipulation (and we do not support conversion from integers
426 * It is also the case if c_size is 8 bytes, which is the largest
429 if (ltt_get_alignment()) {
430 buf_offset
+= ltt_align(buf_offset
, trace_size
);
432 *largest_align
= max_t(int, *largest_align
, trace_size
);
434 if (trace_size
<= 4 || c_size
== 8) {
436 switch (trace_size
) {
439 ltt_relay_write(&buf
->a
, buf
->a
.chan
,
441 (uint8_t[]){ (uint8_t)tmp
.v_uint64
},
444 ltt_relay_write(&buf
->a
, buf
->a
.chan
,
446 (uint8_t[]){ (uint8_t)tmp
.v_ulong
},
451 ltt_relay_write(&buf
->a
, buf
->a
.chan
,
453 (uint16_t[]){ (uint16_t)tmp
.v_uint64
},
456 ltt_relay_write(&buf
->a
, buf
->a
.chan
,
458 (uint16_t[]){ (uint16_t)tmp
.v_ulong
},
463 ltt_relay_write(&buf
->a
, buf
->a
.chan
,
465 (uint32_t[]){ (uint32_t)tmp
.v_uint64
},
468 ltt_relay_write(&buf
->a
, buf
->a
.chan
,
470 (uint32_t[]){ (uint32_t)tmp
.v_ulong
},
475 * c_size cannot be other than 8 here because
478 ltt_relay_write(&buf
->a
, buf
->a
.chan
, buf_offset
,
479 (uint64_t[]){ (uint64_t)tmp
.v_uint64
},
486 buf_offset
+= trace_size
;
490 * Perform sign extension.
493 switch (trace_type
) {
494 case LTT_TYPE_SIGNED_INT
:
495 ltt_relay_write(&buf
->a
, buf
->a
.chan
, buf_offset
,
496 (int64_t[]){ (int64_t)tmp
.v_ulong
},
499 case LTT_TYPE_UNSIGNED_INT
:
500 ltt_relay_write(&buf
->a
, buf
->a
.chan
, buf_offset
,
501 (uint64_t[]){ (uint64_t)tmp
.v_ulong
},
508 buf_offset
+= trace_size
;
517 ltt_serialize_data(struct ltt_chanbuf
*buf
, size_t buf_offset
,
518 struct ltt_serialize_closure
*closure
,
519 void *serialize_private
, unsigned int stack_pos_ctx
,
520 int *largest_align
, const char *fmt
, va_list *args
)
522 char trace_size
= 0, c_size
= 0; /*
523 * 0 (unset), 1, 2, 4, 8 bytes.
525 enum ltt_type trace_type
= LTT_TYPE_NONE
, c_type
= LTT_TYPE_NONE
;
526 unsigned long attributes
= 0;
528 for (; *fmt
; ++fmt
) {
532 ++fmt
; /* skip first '#' */
533 if (*fmt
== '#') /* Escaped ## */
536 fmt
= parse_trace_type(fmt
, &trace_size
, &trace_type
,
541 ++fmt
; /* skip first '%' */
542 if (*fmt
== '%') /* Escaped %% */
544 fmt
= parse_c_type(fmt
, &c_size
, &c_type
, NULL
);
546 * Output c types if no trace types has been
551 if (trace_type
== LTT_TYPE_NONE
)
553 if (c_type
== LTT_TYPE_STRING
)
554 trace_type
= LTT_TYPE_STRING
;
555 /* perform trace write */
556 buf_offset
= serialize_trace_data(buf
, buf_offset
,
565 trace_type
= LTT_TYPE_NONE
;
566 c_size
= LTT_TYPE_NONE
;
569 /* default is to skip the text, doing nothing */
574 EXPORT_SYMBOL_GPL(ltt_serialize_data
);
577 uint64_t unserialize_base_type(struct ltt_chanbuf
*buf
,
578 size_t *ppos
, char trace_size
,
579 enum ltt_type trace_type
)
583 *ppos
+= ltt_align(*ppos
, trace_size
);
584 ltt_relay_read(&buf
->a
, *ppos
, &tmp
, trace_size
);
587 switch (trace_type
) {
588 case LTT_TYPE_SIGNED_INT
:
589 switch (trace_size
) {
591 return (uint64_t)*(int8_t *)&tmp
;
593 return (uint64_t)*(int16_t *)&tmp
;
595 return (uint64_t)*(int32_t *)&tmp
;
600 case LTT_TYPE_UNSIGNED_INT
:
601 switch (trace_size
) {
603 return (uint64_t)*(uint8_t *)&tmp
;
605 return (uint64_t)*(uint16_t *)&tmp
;
607 return (uint64_t)*(uint32_t *)&tmp
;
621 int serialize_printf_data(struct ltt_chanbuf
*buf
, size_t *ppos
,
622 char trace_size
, enum ltt_type trace_type
,
623 char c_size
, enum ltt_type c_type
, char *output
,
624 ssize_t outlen
, const char *outfmt
)
627 outlen
= outlen
< 0 ? 0 : outlen
;
629 if (trace_type
== LTT_TYPE_STRING
) {
630 size_t len
= ltt_relay_read_cstr(&buf
->a
, *ppos
, output
,
636 value
= unserialize_base_type(buf
, ppos
, trace_size
, trace_type
);
639 return snprintf(output
, outlen
, outfmt
, value
);
641 return snprintf(output
, outlen
, outfmt
, (unsigned int)value
);
645 * ltt_serialize_printf - Format a string and place it in a buffer
646 * @buf: The ltt-relay buffer that store binary data
647 * @buf_offset: binary data's offset in @buf (should be masked to use as offset)
648 * @msg_size: return message's length
649 * @output: The buffer to place the result into
650 * @outlen: The size of the buffer, including the trailing '\0'
651 * @fmt: The format string to use
653 * The return value is the number of characters which would
654 * be generated for the given input, excluding the trailing
655 * '\0', as per ISO C99. If the return is greater than or equal to @outlen,
656 * the resulting string is truncated.
658 size_t ltt_serialize_printf(struct ltt_chanbuf
*buf
, unsigned long buf_offset
,
659 size_t *msg_size
, char *output
, size_t outlen
,
662 char trace_size
= 0, c_size
= 0; /*
663 * 0 (unset), 1, 2, 4, 8 bytes.
665 enum ltt_type trace_type
= LTT_TYPE_NONE
, c_type
= LTT_TYPE_NONE
;
666 unsigned long attributes
= 0;
667 char outfmt
[4] = "%";
670 size_t msgpos
= buf_offset
;
672 for (; *fmt
; ++fmt
) {
676 ++fmt
; /* skip first '#' */
677 if (*fmt
== '#') { /* Escaped ## */
679 output
[outpos
] = '#';
684 fmt
= parse_trace_type(fmt
, &trace_size
, &trace_type
,
689 ++fmt
; /* skip first '%' */
690 if (*fmt
== '%') { /* Escaped %% */
692 output
[outpos
] = '%';
696 fmt
= parse_c_type(fmt
, &c_size
, &c_type
, outfmt
+ 1);
698 * Output c types if no trace types has been
703 if (trace_type
== LTT_TYPE_NONE
)
705 if (c_type
== LTT_TYPE_STRING
)
706 trace_type
= LTT_TYPE_STRING
;
708 /* perform trace printf */
709 len
= serialize_printf_data(buf
, &msgpos
, trace_size
,
710 trace_type
, c_size
, c_type
,
712 outlen
- outpos
, outfmt
);
716 trace_type
= LTT_TYPE_NONE
;
717 c_size
= LTT_TYPE_NONE
;
722 output
[outpos
] = *fmt
;
728 *msg_size
= (size_t)(msgpos
- buf_offset
);
730 * Make sure we end output with terminating \0 when truncated.
732 if (outpos
>= outlen
+ 1)
733 output
[outlen
] = '\0';
736 EXPORT_SYMBOL_GPL(ltt_serialize_printf
);
738 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
740 unsigned int ltt_fmt_largest_align(size_t align_drift
, const char *fmt
)
742 char trace_size
= 0, c_size
= 0;
743 enum ltt_type trace_type
= LTT_TYPE_NONE
, c_type
= LTT_TYPE_NONE
;
744 unsigned long attributes
= 0;
745 int largest_align
= 1;
747 for (; *fmt
; ++fmt
) {
751 ++fmt
; /* skip first '#' */
752 if (*fmt
== '#') /* Escaped ## */
755 fmt
= parse_trace_type(fmt
, &trace_size
, &trace_type
,
758 largest_align
= max_t(int, largest_align
, trace_size
);
759 if (largest_align
>= ltt_get_alignment())
764 ++fmt
; /* skip first '%' */
765 if (*fmt
== '%') /* Escaped %% */
767 fmt
= parse_c_type(fmt
, &c_size
, &c_type
, NULL
);
769 * Output c types if no trace types has been
774 if (trace_type
== LTT_TYPE_NONE
)
776 if (c_type
== LTT_TYPE_STRING
)
777 trace_type
= LTT_TYPE_STRING
;
779 largest_align
= max_t(int, largest_align
, trace_size
);
780 if (largest_align
>= ltt_get_alignment())
785 trace_type
= LTT_TYPE_NONE
;
786 c_size
= LTT_TYPE_NONE
;
792 largest_align
= min_t(int, largest_align
, ltt_get_alignment());
793 return (largest_align
- align_drift
) & (largest_align
- 1);
795 EXPORT_SYMBOL_GPL(ltt_fmt_largest_align
);
800 * Calculate data size
801 * Assume that the padding for alignment starts at a sizeof(void *) address.
804 size_t ltt_get_data_size(struct ltt_serialize_closure
*closure
,
805 void *serialize_private
, unsigned int stack_pos_ctx
,
806 int *largest_align
, const char *fmt
, va_list *args
)
808 ltt_serialize_cb cb
= closure
->callbacks
[0];
810 return (size_t)cb(NULL
, 0, closure
, serialize_private
, stack_pos_ctx
,
811 largest_align
, fmt
, args
);
815 void ltt_write_event_data(struct ltt_chanbuf
*buf
, size_t buf_offset
,
816 struct ltt_serialize_closure
*closure
,
817 void *serialize_private
, unsigned int stack_pos_ctx
,
818 int largest_align
, const char *fmt
, va_list *args
)
820 ltt_serialize_cb cb
= closure
->callbacks
[0];
822 buf_offset
+= ltt_align(buf_offset
, largest_align
);
823 cb(buf
, buf_offset
, closure
, serialize_private
, stack_pos_ctx
, NULL
,
829 void ltt_vtrace(const struct marker
*mdata
, void *probe_data
, void *call_data
,
830 const char *fmt
, va_list *args
)
832 int largest_align
, ret
;
833 struct ltt_active_marker
*pdata
;
835 size_t data_size
, slot_size
;
836 unsigned int chan_index
;
837 struct ltt_chanbuf
*buf
;
838 struct ltt_chan
*chan
;
839 struct ltt_trace
*trace
, *dest_trace
= NULL
;
843 struct ltt_serialize_closure closure
;
844 struct ltt_probe_private_data
*private_data
= call_data
;
845 void *serialize_private
= NULL
;
848 unsigned int stack_pos_ctx
;
851 * This test is useful for quickly exiting static tracing when no trace
852 * is active. We expect to have an active trace when we get here.
854 if (unlikely(ltt_traces
.num_active_traces
== 0))
857 rcu_read_lock_sched_notrace();
858 cpu
= smp_processor_id();
859 __get_cpu_var(ltt_nesting
)++;
860 stack_pos_ctx
= __get_cpu_var(tracer_stack_pos
);
862 * asm volatile and "memory" clobber prevent the compiler from moving
863 * instructions out of the ltt nesting count. This is required to ensure
864 * that probe side-effects which can cause recursion (e.g. unforeseen
865 * traps, divisions by 0, ...) are triggered within the incremented
866 * nesting count section.
869 pdata
= (struct ltt_active_marker
*)probe_data
;
870 eID
= mdata
->event_id
;
871 chan_index
= mdata
->channel_id
;
872 closure
.callbacks
= pdata
->probe
->callbacks
;
874 if (unlikely(private_data
)) {
875 dest_trace
= private_data
->trace
;
876 if (private_data
->serializer
)
877 closure
.callbacks
= &private_data
->serializer
;
878 serialize_private
= private_data
->serialize_private
;
881 va_copy(args_copy
, *args
);
883 * Assumes event payload to start on largest_align alignment.
885 largest_align
= 1; /* must be non-zero for ltt_align */
886 data_size
= ltt_get_data_size(&closure
, serialize_private
,
887 stack_pos_ctx
, &largest_align
,
889 largest_align
= min_t(int, largest_align
, sizeof(void *));
892 /* Iterate on each trace */
893 list_for_each_entry_rcu(trace
, <t_traces
.head
, list
) {
895 * Expect the filter to filter out events. If we get here,
896 * we went through tracepoint activation as a first step.
898 if (unlikely(dest_trace
&& trace
!= dest_trace
))
900 if (unlikely(!trace
->active
))
902 if (unlikely(!ltt_run_filter(trace
, eID
)))
904 #ifdef LTT_DEBUG_EVENT_SIZE
905 rflags
= LTT_RFLAG_ID_SIZE
;
907 if (unlikely(eID
>= LTT_FREE_EVENTS
))
908 rflags
= LTT_RFLAG_ID
;
913 * Skip channels added after trace creation.
915 if (unlikely(chan_index
>= trace
->nr_channels
))
917 chan
= &trace
->channels
[chan_index
];
921 /* reserve space : header and data */
922 ret
= ltt_reserve_slot(chan
, trace
, data_size
, largest_align
,
923 cpu
, &buf
, &slot_size
, &buf_offset
,
925 if (unlikely(ret
< 0))
926 continue; /* buffer full */
928 va_copy(args_copy
, *args
);
929 /* Out-of-order write : header and data */
930 buf_offset
= ltt_write_event_header(&buf
->a
, &chan
->a
,
931 buf_offset
, eID
, data_size
,
933 ltt_write_event_data(buf
, buf_offset
, &closure
,
934 serialize_private
, stack_pos_ctx
,
935 largest_align
, fmt
, &args_copy
);
937 /* Out-of-order commit */
938 ltt_commit_slot(buf
, chan
, buf_offset
, data_size
, slot_size
);
941 * asm volatile and "memory" clobber prevent the compiler from moving
942 * instructions out of the ltt nesting count. This is required to ensure
943 * that probe side-effects which can cause recursion (e.g. unforeseen
944 * traps, divisions by 0, ...) are triggered within the incremented
945 * nesting count section.
948 __get_cpu_var(tracer_stack_pos
) = stack_pos_ctx
;
949 __get_cpu_var(ltt_nesting
)--;
950 rcu_read_unlock_sched_notrace();
952 EXPORT_SYMBOL_GPL(ltt_vtrace
);
955 void ltt_trace(const struct marker
*mdata
, void *probe_data
, void *call_data
,
956 const char *fmt
, ...)
961 ltt_vtrace(mdata
, probe_data
, call_data
, fmt
, &args
);
964 EXPORT_SYMBOL_GPL(ltt_trace
);
966 MODULE_LICENSE("GPL and additional rights");
967 MODULE_AUTHOR("Mathieu Desnoyers");
968 MODULE_DESCRIPTION("Linux Trace Toolkit Next Generation Serializer");