2 * LTTng serializing code.
4 * Copyright Mathieu Desnoyers, March 2007.
6 * Dual LGPL v2.1/GPL v2 license.
8 * See this discussion about weirdness about passing va_list and then va_list to
9 * functions. (related to array argument passing). va_list seems to be
10 * implemented as an array on x86_64, but not on i386... This is why we pass a
11 * va_list * to ltt_vtrace.
15 #include <linux/ctype.h>
16 #include <linux/string.h>
17 #include <linux/module.h>
19 #include "ltt-tracer.h"
20 #include "ltt-relay-lockless.h"
24 LTT_TYPE_UNSIGNED_INT
,
29 #define LTT_ATTRIBUTE_NETWORK_BYTE_ORDER (1<<1)
32 * Stack used to keep track of string length at size calculation, passed to
33 * string copy to handle racy input string updates.
34 * Can be used by any context; this is ensured by putting the stack position
35 * back to its original position after using it.
37 #define TRACER_STACK_LEN (PAGE_SIZE / sizeof(unsigned long))
38 static DEFINE_PER_CPU(unsigned long [TRACER_STACK_LEN
],
40 static DEFINE_PER_CPU(unsigned int, tracer_stack_pos
);
43 * Inspired from vsnprintf
45 * The serialization format string supports the basic printf format strings.
46 * In addition, it defines new formats that can be used to serialize more
47 * complex/non portable data structures.
52 * field_name #tracetype %ctype
53 * field_name #tracetype %ctype1 %ctype2 ...
55 * A conversion is performed between format string types supported by GCC and
56 * the trace type requested. GCC type is used to perform type checking on format
57 * strings. Trace type is used to specify the exact binary representation
58 * in the trace. A mapping is done between one or more GCC types to one trace
59 * type. Sign extension, if required by the conversion, is performed following
62 * If a gcc format is not declared with a trace format, the gcc format is
63 * also used as binary representation in the trace.
65 * Strings are supported with %s.
66 * A single tracetype (sequence) can take multiple c types as parameter.
72 * Note: to write a uint32_t in a trace, the following expression is recommended
73 * si it can be portable:
75 * ("#4u%lu", (unsigned long)var)
79 * Serialization specific formats :
91 * #1u%lu #2u%lu #4d%lu #8d%lu #llu%hu #d%lu
95 * n: (for network byte order)
97 * is written in the trace in network byte order.
99 * i.e.: #bn4u%lu, #n%lu, #b%u
102 * Variable length sequence
103 * #a #tracetype1 #tracetype2 %array_ptr %elem_size %num_elems
105 * #a specifies that this is a sequence
106 * #tracetype1 is the type of elements in the sequence
107 * #tracetype2 is the type of the element count
109 * array_ptr is a pointer to an array that contains members of size
111 * num_elems is the number of elements in the array.
112 * i.e.: #a #lu #lu %p %lu %u
115 * #k callback (taken from the probe data)
116 * The following % arguments are exepected by the callback
118 * i.e.: #a #lu #lu #k %p
120 * Note: No conversion is done from floats to integers, nor from integers to
121 * floats between c types and trace types. float conversion from double to float
122 * or from float to double is also not supported.
125 * %*b expects sizeof(data), data
126 * where sizeof(data) is 1, 2, 4 or 8
128 * Fixed length struct, union or array.
129 * FIXME: unable to extract those sizes statically.
130 * %*r expects sizeof(*ptr), ptr
131 * %*.*r expects sizeof(*ptr), __alignof__(*ptr), ptr
132 * struct and unions removed.
133 * Fixed length array:
134 * [%p]#a[len #tracetype]
135 * i.e.: [%p]#a[12 #lu]
137 * Variable length sequence
138 * %*.*:*v expects sizeof(*ptr), __alignof__(*ptr), elem_num, ptr
139 * where elem_num is the number of elements in the sequence
142 const char *parse_trace_type(const char *fmt
, char *trace_size
,
143 enum ltt_type
*trace_type
,
144 unsigned long *attributes
)
146 int qualifier
; /* 'h', 'l', or 'L' for integer fields */
147 /* 'z' support added 23/7/1999 S.H. */
148 /* 'z' changed to 'Z' --davidm 1/25/99 */
149 /* 't' added for ptrdiff_t */
151 /* parse attributes. */
155 *attributes
|= LTT_ATTRIBUTE_NETWORK_BYTE_ORDER
;
160 /* get the conversion qualifier */
162 if (*fmt
== 'h' || *fmt
== 'l' || *fmt
== 'L' ||
163 *fmt
== 'Z' || *fmt
== 'z' || *fmt
== 't' ||
164 *fmt
== 'S' || *fmt
== '1' || *fmt
== '2' ||
165 *fmt
== '4' || *fmt
== 8) {
168 if (qualifier
== 'l' && *fmt
== 'l') {
176 *trace_type
= LTT_TYPE_UNSIGNED_INT
;
177 *trace_size
= sizeof(unsigned char);
180 *trace_type
= LTT_TYPE_STRING
;
183 *trace_type
= LTT_TYPE_UNSIGNED_INT
;
184 *trace_size
= sizeof(void *);
188 *trace_type
= LTT_TYPE_SIGNED_INT
;
194 *trace_type
= LTT_TYPE_UNSIGNED_INT
;
203 *trace_size
= sizeof(long long);
206 *trace_size
= sizeof(long);
210 *trace_size
= sizeof(size_t);
213 *trace_size
= sizeof(ptrdiff_t);
216 *trace_size
= sizeof(short);
219 *trace_size
= sizeof(uint8_t);
222 *trace_size
= sizeof(uint16_t);
225 *trace_size
= sizeof(uint32_t);
228 *trace_size
= sizeof(uint64_t);
231 *trace_size
= sizeof(int);
240 * Field width and precision are *not* supported.
244 const char *parse_c_type(const char *fmt
, char *c_size
, enum ltt_type
*c_type
,
247 int qualifier
; /* 'h', 'l', or 'L' for integer fields */
248 /* 'z' support added 23/7/1999 S.H. */
249 /* 'z' changed to 'Z' --davidm 1/25/99 */
250 /* 't' added for ptrdiff_t */
252 /* process flags : ignore standard print formats for now. */
264 /* get the conversion qualifier */
266 if (*fmt
== 'h' || *fmt
== 'l' || *fmt
== 'L' ||
267 *fmt
== 'Z' || *fmt
== 'z' || *fmt
== 't' ||
271 if (qualifier
== 'l' && *fmt
== 'l') {
279 *outfmt
++ = (char)qualifier
;
286 *c_type
= LTT_TYPE_UNSIGNED_INT
;
287 *c_size
= sizeof(unsigned char);
290 *c_type
= LTT_TYPE_STRING
;
293 *c_type
= LTT_TYPE_UNSIGNED_INT
;
294 *c_size
= sizeof(void *);
298 *c_type
= LTT_TYPE_SIGNED_INT
;
304 *c_type
= LTT_TYPE_UNSIGNED_INT
;
313 *c_size
= sizeof(long long);
316 *c_size
= sizeof(long);
320 *c_size
= sizeof(size_t);
323 *c_size
= sizeof(ptrdiff_t);
326 *c_size
= sizeof(short);
329 *c_size
= sizeof(int);
337 size_t serialize_trace_data(struct ltt_chanbuf
*buf
, size_t buf_offset
,
338 char trace_size
, enum ltt_type trace_type
,
339 char c_size
, enum ltt_type c_type
,
340 unsigned int *stack_pos_ctx
,
345 unsigned long v_ulong
;
354 * Be careful about sign extension here.
355 * Sign extension is done with the destination (trace) type.
357 switch (trace_type
) {
358 case LTT_TYPE_SIGNED_INT
:
361 tmp
.v_ulong
= (long)(int8_t)va_arg(*args
, int);
364 tmp
.v_ulong
= (long)(int16_t)va_arg(*args
, int);
367 tmp
.v_ulong
= (long)(int32_t)va_arg(*args
, int);
370 tmp
.v_uint64
= va_arg(*args
, int64_t);
376 case LTT_TYPE_UNSIGNED_INT
:
379 tmp
.v_ulong
= (unsigned long)(uint8_t)va_arg(*args
, unsigned int);
382 tmp
.v_ulong
= (unsigned long)(uint16_t)va_arg(*args
, unsigned int);
385 tmp
.v_ulong
= (unsigned long)(uint32_t)va_arg(*args
, unsigned int);
388 tmp
.v_uint64
= va_arg(*args
, uint64_t);
394 case LTT_TYPE_STRING
:
395 tmp
.v_string
.s
= va_arg(*args
, const char *);
396 if ((unsigned long)tmp
.v_string
.s
< PAGE_SIZE
)
397 tmp
.v_string
.s
= "<NULL>";
400 * Reserve tracer stack entry.
402 __get_cpu_var(tracer_stack_pos
)++;
403 WARN_ON_ONCE(__get_cpu_var(tracer_stack_pos
)
406 __get_cpu_var(tracer_stack
)[*stack_pos_ctx
] =
407 strlen(tmp
.v_string
.s
) + 1;
409 tmp
.v_string
.len
= __get_cpu_var(tracer_stack
)
410 [(*stack_pos_ctx
)++];
412 ltt_relay_strncpy(&buf
->a
, buf
->a
.chan
, buf_offset
,
413 tmp
.v_string
.s
, tmp
.v_string
.len
);
414 buf_offset
+= tmp
.v_string
.len
;
421 * If trace_size is lower or equal to 4 bytes, there is no sign
422 * extension to do because we are already encoded in a long. Therefore,
423 * we can combine signed and unsigned ops. 4 bytes float also works
424 * with this, because we do a simple copy of 4 bytes into 4 bytes
425 * without manipulation (and we do not support conversion from integers
427 * It is also the case if c_size is 8 bytes, which is the largest
430 if (ltt_get_alignment()) {
431 buf_offset
+= ltt_align(buf_offset
, trace_size
);
433 *largest_align
= max_t(int, *largest_align
, trace_size
);
435 if (trace_size
<= 4 || c_size
== 8) {
437 switch (trace_size
) {
440 ltt_relay_write(&buf
->a
, buf
->a
.chan
,
442 (uint8_t[]){ (uint8_t)tmp
.v_uint64
},
445 ltt_relay_write(&buf
->a
, buf
->a
.chan
,
447 (uint8_t[]){ (uint8_t)tmp
.v_ulong
},
452 ltt_relay_write(&buf
->a
, buf
->a
.chan
,
454 (uint16_t[]){ (uint16_t)tmp
.v_uint64
},
457 ltt_relay_write(&buf
->a
, buf
->a
.chan
,
459 (uint16_t[]){ (uint16_t)tmp
.v_ulong
},
464 ltt_relay_write(&buf
->a
, buf
->a
.chan
,
466 (uint32_t[]){ (uint32_t)tmp
.v_uint64
},
469 ltt_relay_write(&buf
->a
, buf
->a
.chan
,
471 (uint32_t[]){ (uint32_t)tmp
.v_ulong
},
476 * c_size cannot be other than 8 here because
479 ltt_relay_write(&buf
->a
, buf
->a
.chan
, buf_offset
,
480 (uint64_t[]){ (uint64_t)tmp
.v_uint64
},
487 buf_offset
+= trace_size
;
491 * Perform sign extension.
494 switch (trace_type
) {
495 case LTT_TYPE_SIGNED_INT
:
496 ltt_relay_write(&buf
->a
, buf
->a
.chan
, buf_offset
,
497 (int64_t[]){ (int64_t)tmp
.v_ulong
},
500 case LTT_TYPE_UNSIGNED_INT
:
501 ltt_relay_write(&buf
->a
, buf
->a
.chan
, buf_offset
,
502 (uint64_t[]){ (uint64_t)tmp
.v_ulong
},
509 buf_offset
+= trace_size
;
518 ltt_serialize_data(struct ltt_chanbuf
*buf
, size_t buf_offset
,
519 struct ltt_serialize_closure
*closure
,
520 void *serialize_private
, unsigned int stack_pos_ctx
,
521 int *largest_align
, const char *fmt
, va_list *args
)
523 char trace_size
= 0, c_size
= 0; /*
524 * 0 (unset), 1, 2, 4, 8 bytes.
526 enum ltt_type trace_type
= LTT_TYPE_NONE
, c_type
= LTT_TYPE_NONE
;
527 unsigned long attributes
= 0;
529 for (; *fmt
; ++fmt
) {
533 ++fmt
; /* skip first '#' */
534 if (*fmt
== '#') /* Escaped ## */
537 fmt
= parse_trace_type(fmt
, &trace_size
, &trace_type
,
542 ++fmt
; /* skip first '%' */
543 if (*fmt
== '%') /* Escaped %% */
545 fmt
= parse_c_type(fmt
, &c_size
, &c_type
, NULL
);
547 * Output c types if no trace types has been
552 if (trace_type
== LTT_TYPE_NONE
)
554 if (c_type
== LTT_TYPE_STRING
)
555 trace_type
= LTT_TYPE_STRING
;
556 /* perform trace write */
557 buf_offset
= serialize_trace_data(buf
, buf_offset
,
566 trace_type
= LTT_TYPE_NONE
;
567 c_size
= LTT_TYPE_NONE
;
570 /* default is to skip the text, doing nothing */
575 EXPORT_SYMBOL_GPL(ltt_serialize_data
);
578 uint64_t unserialize_base_type(struct ltt_chanbuf
*buf
,
579 size_t *ppos
, char trace_size
,
580 enum ltt_type trace_type
)
584 *ppos
+= ltt_align(*ppos
, trace_size
);
585 ltt_relay_read(&buf
->a
, *ppos
, &tmp
, trace_size
);
588 switch (trace_type
) {
589 case LTT_TYPE_SIGNED_INT
:
590 switch (trace_size
) {
592 return (uint64_t)*(int8_t *)&tmp
;
594 return (uint64_t)*(int16_t *)&tmp
;
596 return (uint64_t)*(int32_t *)&tmp
;
601 case LTT_TYPE_UNSIGNED_INT
:
602 switch (trace_size
) {
604 return (uint64_t)*(uint8_t *)&tmp
;
606 return (uint64_t)*(uint16_t *)&tmp
;
608 return (uint64_t)*(uint32_t *)&tmp
;
622 int serialize_printf_data(struct ltt_chanbuf
*buf
, size_t *ppos
,
623 char trace_size
, enum ltt_type trace_type
,
624 char c_size
, enum ltt_type c_type
, char *output
,
625 ssize_t outlen
, const char *outfmt
)
628 outlen
= outlen
< 0 ? 0 : outlen
;
630 if (trace_type
== LTT_TYPE_STRING
) {
631 size_t len
= ltt_relay_read_cstr(&buf
->a
, *ppos
, output
,
637 value
= unserialize_base_type(buf
, ppos
, trace_size
, trace_type
);
640 return snprintf(output
, outlen
, outfmt
, value
);
642 return snprintf(output
, outlen
, outfmt
, (unsigned int)value
);
646 * ltt_serialize_printf - Format a string and place it in a buffer
647 * @buf: The ltt-relay buffer that store binary data
648 * @buf_offset: binary data's offset in @buf (should be masked to use as offset)
649 * @msg_size: return message's length
650 * @output: The buffer to place the result into
651 * @outlen: The size of the buffer, including the trailing '\0'
652 * @fmt: The format string to use
654 * The return value is the number of characters which would
655 * be generated for the given input, excluding the trailing
656 * '\0', as per ISO C99. If the return is greater than or equal to @outlen,
657 * the resulting string is truncated.
659 size_t ltt_serialize_printf(struct ltt_chanbuf
*buf
, unsigned long buf_offset
,
660 size_t *msg_size
, char *output
, size_t outlen
,
663 char trace_size
= 0, c_size
= 0; /*
664 * 0 (unset), 1, 2, 4, 8 bytes.
666 enum ltt_type trace_type
= LTT_TYPE_NONE
, c_type
= LTT_TYPE_NONE
;
667 unsigned long attributes
= 0;
668 char outfmt
[4] = "%";
671 size_t msgpos
= buf_offset
;
673 for (; *fmt
; ++fmt
) {
677 ++fmt
; /* skip first '#' */
678 if (*fmt
== '#') { /* Escaped ## */
680 output
[outpos
] = '#';
685 fmt
= parse_trace_type(fmt
, &trace_size
, &trace_type
,
690 ++fmt
; /* skip first '%' */
691 if (*fmt
== '%') { /* Escaped %% */
693 output
[outpos
] = '%';
697 fmt
= parse_c_type(fmt
, &c_size
, &c_type
, outfmt
+ 1);
699 * Output c types if no trace types has been
704 if (trace_type
== LTT_TYPE_NONE
)
706 if (c_type
== LTT_TYPE_STRING
)
707 trace_type
= LTT_TYPE_STRING
;
709 /* perform trace printf */
710 len
= serialize_printf_data(buf
, &msgpos
, trace_size
,
711 trace_type
, c_size
, c_type
,
713 outlen
- outpos
, outfmt
);
717 trace_type
= LTT_TYPE_NONE
;
718 c_size
= LTT_TYPE_NONE
;
723 output
[outpos
] = *fmt
;
729 *msg_size
= (size_t)(msgpos
- buf_offset
);
731 * Make sure we end output with terminating \0 when truncated.
733 if (outpos
>= outlen
+ 1)
734 output
[outlen
] = '\0';
737 EXPORT_SYMBOL_GPL(ltt_serialize_printf
);
739 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
741 unsigned int ltt_fmt_largest_align(size_t align_drift
, const char *fmt
)
743 char trace_size
= 0, c_size
= 0;
744 enum ltt_type trace_type
= LTT_TYPE_NONE
, c_type
= LTT_TYPE_NONE
;
745 unsigned long attributes
= 0;
746 int largest_align
= 1;
748 for (; *fmt
; ++fmt
) {
752 ++fmt
; /* skip first '#' */
753 if (*fmt
== '#') /* Escaped ## */
756 fmt
= parse_trace_type(fmt
, &trace_size
, &trace_type
,
759 largest_align
= max_t(int, largest_align
, trace_size
);
760 if (largest_align
>= ltt_get_alignment())
765 ++fmt
; /* skip first '%' */
766 if (*fmt
== '%') /* Escaped %% */
768 fmt
= parse_c_type(fmt
, &c_size
, &c_type
, NULL
);
770 * Output c types if no trace types has been
775 if (trace_type
== LTT_TYPE_NONE
)
777 if (c_type
== LTT_TYPE_STRING
)
778 trace_type
= LTT_TYPE_STRING
;
780 largest_align
= max_t(int, largest_align
, trace_size
);
781 if (largest_align
>= ltt_get_alignment())
786 trace_type
= LTT_TYPE_NONE
;
787 c_size
= LTT_TYPE_NONE
;
793 largest_align
= min_t(int, largest_align
, ltt_get_alignment());
794 return (largest_align
- align_drift
) & (largest_align
- 1);
796 EXPORT_SYMBOL_GPL(ltt_fmt_largest_align
);
801 * Calculate data size
802 * Assume that the padding for alignment starts at a sizeof(void *) address.
805 size_t ltt_get_data_size(struct ltt_serialize_closure
*closure
,
806 void *serialize_private
, unsigned int stack_pos_ctx
,
807 int *largest_align
, const char *fmt
, va_list *args
)
809 ltt_serialize_cb cb
= closure
->callbacks
[0];
811 return (size_t)cb(NULL
, 0, closure
, serialize_private
, stack_pos_ctx
,
812 largest_align
, fmt
, args
);
816 void ltt_write_event_data(struct ltt_chanbuf
*buf
, size_t buf_offset
,
817 struct ltt_serialize_closure
*closure
,
818 void *serialize_private
, unsigned int stack_pos_ctx
,
819 int largest_align
, const char *fmt
, va_list *args
)
821 ltt_serialize_cb cb
= closure
->callbacks
[0];
823 buf_offset
+= ltt_align(buf_offset
, largest_align
);
824 cb(buf
, buf_offset
, closure
, serialize_private
, stack_pos_ctx
, NULL
,
830 void ltt_vtrace(const struct marker
*mdata
, void *probe_data
, void *call_data
,
831 const char *fmt
, va_list *args
)
833 int largest_align
, ret
;
834 struct ltt_active_marker
*pdata
;
836 size_t data_size
, slot_size
;
837 unsigned int chan_index
;
838 struct ltt_chanbuf
*buf
;
839 struct ltt_chan
*chan
;
840 struct ltt_trace
*trace
, *dest_trace
= NULL
;
844 struct ltt_serialize_closure closure
;
845 struct ltt_probe_private_data
*private_data
= call_data
;
846 void *serialize_private
= NULL
;
849 unsigned int stack_pos_ctx
;
852 * This test is useful for quickly exiting static tracing when no trace
853 * is active. We expect to have an active trace when we get here.
855 if (unlikely(ltt_traces
.num_active_traces
== 0))
858 rcu_read_lock_sched_notrace();
859 cpu
= smp_processor_id();
860 __get_cpu_var(ltt_nesting
)++;
861 stack_pos_ctx
= __get_cpu_var(tracer_stack_pos
);
863 * asm volatile and "memory" clobber prevent the compiler from moving
864 * instructions out of the ltt nesting count. This is required to ensure
865 * that probe side-effects which can cause recursion (e.g. unforeseen
866 * traps, divisions by 0, ...) are triggered within the incremented
867 * nesting count section.
870 pdata
= (struct ltt_active_marker
*)probe_data
;
871 eID
= mdata
->event_id
;
872 chan_index
= mdata
->channel_id
;
873 closure
.callbacks
= pdata
->probe
->callbacks
;
875 if (unlikely(private_data
)) {
876 dest_trace
= private_data
->trace
;
877 if (private_data
->serializer
)
878 closure
.callbacks
= &private_data
->serializer
;
879 serialize_private
= private_data
->serialize_private
;
882 va_copy(args_copy
, *args
);
884 * Assumes event payload to start on largest_align alignment.
886 largest_align
= 1; /* must be non-zero for ltt_align */
887 data_size
= ltt_get_data_size(&closure
, serialize_private
,
888 stack_pos_ctx
, &largest_align
,
890 largest_align
= min_t(int, largest_align
, sizeof(void *));
893 /* Iterate on each trace */
894 list_for_each_entry_rcu(trace
, <t_traces
.head
, list
) {
896 * Expect the filter to filter out events. If we get here,
897 * we went through tracepoint activation as a first step.
899 if (unlikely(dest_trace
&& trace
!= dest_trace
))
901 if (unlikely(!trace
->active
))
903 if (unlikely(!ltt_run_filter(trace
, eID
)))
905 #ifdef CONFIG_LTT_DEBUG_EVENT_SIZE
906 rflags
= LTT_RFLAG_ID_SIZE
;
908 if (unlikely(eID
>= LTT_FREE_EVENTS
))
909 rflags
= LTT_RFLAG_ID
;
914 * Skip channels added after trace creation.
916 if (unlikely(chan_index
>= trace
->nr_channels
))
918 chan
= &trace
->channels
[chan_index
];
922 /* reserve space : header and data */
923 ret
= ltt_reserve_slot(chan
, trace
, data_size
, largest_align
,
924 cpu
, &buf
, &slot_size
, &buf_offset
,
926 if (unlikely(ret
< 0))
927 continue; /* buffer full */
929 va_copy(args_copy
, *args
);
930 /* Out-of-order write : header and data */
931 buf_offset
= ltt_write_event_header(&buf
->a
, &chan
->a
,
932 buf_offset
, eID
, data_size
,
934 ltt_write_event_data(buf
, buf_offset
, &closure
,
935 serialize_private
, stack_pos_ctx
,
936 largest_align
, fmt
, &args_copy
);
938 /* Out-of-order commit */
939 ltt_commit_slot(buf
, chan
, buf_offset
, data_size
, slot_size
);
942 * asm volatile and "memory" clobber prevent the compiler from moving
943 * instructions out of the ltt nesting count. This is required to ensure
944 * that probe side-effects which can cause recursion (e.g. unforeseen
945 * traps, divisions by 0, ...) are triggered within the incremented
946 * nesting count section.
949 __get_cpu_var(tracer_stack_pos
) = stack_pos_ctx
;
950 __get_cpu_var(ltt_nesting
)--;
951 rcu_read_unlock_sched_notrace();
953 EXPORT_SYMBOL_GPL(ltt_vtrace
);
956 void ltt_trace(const struct marker
*mdata
, void *probe_data
, void *call_data
,
957 const char *fmt
, ...)
962 ltt_vtrace(mdata
, probe_data
, call_data
, fmt
, &args
);
965 EXPORT_SYMBOL_GPL(ltt_trace
);
967 MODULE_LICENSE("GPL and additional rights");
968 MODULE_AUTHOR("Mathieu Desnoyers");
969 MODULE_DESCRIPTION("Linux Trace Toolkit Next Generation Serializer");