3 /* Maximum number of callbacks per marker */
4 #define LTT_NR_CALLBACKS 10
8 * LTT_FLAG_TRACE : first arg contains trace to write into.
9 * (type : struct ltt_trace_struct *)
10 * LTT_FLAG_CHANNEL : following arg contains channel index to write into.
12 * LTT_FLAG_FORCE : Force write in disabled traces (internal ltt use)
15 #define _LTT_FLAG_TRACE 0
16 #define _LTT_FLAG_CHANNEL 1
17 #define _LTT_FLAG_FORCE 2
19 #define LTT_FLAG_TRACE (1 << _LTT_FLAG_TRACE)
20 #define LTT_FLAG_CHANNEL (1 << _LTT_FLAG_CHANNEL)
21 #define LTT_FLAG_FORCE (1 << _LTT_FLAG_FORCE)
24 char *(*ltt_serialize_cb
)(char *buffer
, int *cb_args
,
25 const char *fmt
, va_list args
);
28 static int skip_atoi(const char **s
)
33 i
= i
*10 + *((*s
)++) - '0';
37 /* Inspired from vsnprintf */
39 * %r : serialized fixed length struct, union, array.
40 * %v : serialized sequence
43 static inline __attribute__((no_instrument_function
))
44 char *ltt_serialize_data(char *buffer
, int *cb_args
,
45 const char *fmt
, va_list args
)
49 int elem_size
; /* Size of the integer for 'b' */
50 /* Size of the data contained by 'r' */
51 int elem_alignment
; /* Element alignment for 'r' */
52 int qualifier
; /* 'h', 'l', or 'L' for integer fields */
53 /* 'z' support added 23/7/1999 S.H. */
54 /* 'z' changed to 'Z' --davidm 1/25/99 */
55 /* 't' added for ptrdiff_t */
56 char *str
; /* Pointer to write to */
62 for (; *fmt
; ++fmt
) {
68 /* process flags : ignore standard print formats for now. */
70 ++fmt
; /* this also skips first '%' */
76 case '0': goto repeat
;
79 /* get element size */
82 elem_size
= skip_atoi(&fmt
);
83 else if (*fmt
== '*') {
85 /* it's the next argument */
86 elem_size
= va_arg(args
, int);
89 /* get the alignment */
94 elem_alignment
= skip_atoi(&fmt
);
95 else if (*fmt
== '*') {
97 /* it's the next argument */
98 elem_alignment
= va_arg(args
, int);
102 /* get the conversion qualifier */
104 if (*fmt
== 'h' || *fmt
== 'l' || *fmt
== 'L' ||
105 *fmt
=='Z' || *fmt
== 'z' || *fmt
== 't') {
108 if (qualifier
== 'l' && *fmt
== 'l') {
117 *str
= (char) va_arg(args
, int);
122 s
= va_arg(args
, char *);
123 if ((unsigned long)s
< PAGE_SIZE
)
128 /* Following alignment for genevent
130 str
+= ltt_align(str
, sizeof(void*));
134 str
+= ltt_align(str
, sizeof(void*));
136 *(void**)str
= va_arg(args
, void *);
140 /* For array, struct, union */
141 if (elem_alignment
< 0)
142 elem_alignment
= sizeof(void*);
143 str
+= ltt_align(str
, elem_alignment
);
145 const char *src
= va_arg(args
,
148 memcpy(str
, src
, elem_size
);
155 str
+= ltt_align(str
, sizeof(int));
157 *(int*)str
= elem_size
;
159 if (elem_alignment
> 0)
160 str
+= ltt_align(str
, elem_alignment
);
162 const char *src
= va_arg(args
,
165 memcpy(str
, src
, elem_size
);
168 /* Following alignment for genevent
170 str
+= ltt_align(str
, sizeof(void*));
175 cb
= va_arg(args
, ltt_serialize_cb
);
176 /* The callback will take as many arguments
177 * as it needs from args. They won't be
179 if (cb_arg_nr
< LTT_NR_CALLBACKS
)
180 str
= cb(str
, &cb_args
[cb_arg_nr
++],
186 * What does C99 say about the overflow case
188 if (qualifier
== 'l') {
189 long * ip
= va_arg(args
, long *);
191 } else if (qualifier
== 'Z'
192 || qualifier
== 'z') {
193 size_t * ip
= va_arg(args
, size_t *);
196 int * ip
= va_arg(args
, int *);
219 str
+= ltt_align(str
, sizeof(long long));
221 *(long long*)str
= va_arg(args
, long long);
222 str
+= sizeof(long long);
225 str
+= ltt_align(str
, sizeof(long));
227 *(long*)str
= va_arg(args
, long);
232 str
+= ltt_align(str
, sizeof(size_t));
234 *(size_t*)str
= va_arg(args
, size_t);
235 str
+= sizeof(size_t);
238 str
+= ltt_align(str
, sizeof(ptrdiff_t));
240 *(ptrdiff_t*)str
= va_arg(args
, ptrdiff_t);
241 str
+= sizeof(ptrdiff_t);
244 str
+= ltt_align(str
, sizeof(short));
246 *(short*)str
= (short) va_arg(args
, int);
247 str
+= sizeof(short);
251 str
+= ltt_align(str
, elem_size
);
256 (int8_t)va_arg(args
, int);
260 (int16_t)va_arg(args
, int);
263 *(int32_t*)str
= va_arg(args
, int32_t);
266 *(int64_t*)str
= va_arg(args
, int64_t);
271 str
+= ltt_align(str
, sizeof(int));
273 *(int*)str
= va_arg(args
, int);
280 /* Calculate data size */
281 /* Assume that the padding for alignment starts at a
282 * sizeof(void *) address. */
283 static inline __attribute__((no_instrument_function
))
284 size_t ltt_get_data_size(ltt_facility_t fID
, uint8_t eID
,
286 const char *fmt
, va_list args
)
288 return (size_t)ltt_serialize_data(NULL
, fmt
, args
);
291 static inline __attribute__((no_instrument_function
))
292 void ltt_write_event_data(char *buffer
,
293 ltt_facility_t fID
, uint8_t eID
,
295 const char *fmt
, va_list args
)
297 ltt_serialize_data(buffer
, fmt
, args
);
301 __attribute__((no_instrument_function
))
302 void _vtrace(ltt_facility_t fID
, uint8_t eID
, long flags
,
303 const char *fmt
, va_list args
)
305 size_t data_size
, slot_size
;
307 struct ltt_channel_struct
*channel
;
308 struct ltt_trace_struct
*trace
, *dest_trace
;
309 void *transport_data
;
313 int cb_args
[LTT_NR_CALLBACKS
];
315 /* This test is useful for quickly exiting static tracing when no
316 * trace is active. */
317 if (likely(ltt_traces
.num_active_traces
== 0
318 && !(flags
& LTT_FLAG_FORCE
)))
322 ltt_nesting
[smp_processor_id()]++;
324 if (unlikely(flags
& LTT_FLAG_TRACE
))
325 dest_trace
= va_arg(args
, struct ltt_trace_struct
*);
326 if (unlikely(flags
& LTT_FLAG_CHANNEL
))
327 channel_index
= va_arg(args
, int);
329 channel_index
= ltt_get_channel_index(fID
, eID
);
331 va_copy(args_copy
, args
); /* Check : skip 2 st args if trace/ch */
332 data_size
= ltt_get_data_size(fID
, eID
, cb_args
, fmt
, args_copy
);
335 /* Iterate on each traces */
336 list_for_each_entry_rcu(trace
, <t_traces
.head
, list
) {
337 if (unlikely(!trace
->active
&& !(flags
& LTT_FLAG_FORCE
)))
339 if (unlikely(flags
& LTT_FLAG_TRACE
&& trace
!= dest_trace
))
341 channel
= ltt_get_channel_from_index(trace
, channel_index
);
342 /* reserve space : header and data */
343 buffer
= ltt_reserve_slot(trace
, channel
, &transport_data
,
344 data_size
, &slot_size
, &tsc
);
345 if (unlikely(!buffer
))
346 continue; /* buffer full */
347 /* Out-of-order write : header and data */
348 buffer
= ltt_write_event_header(trace
, channel
, buffer
,
349 fID
, eID
, data_size
, tsc
);
350 va_copy(args_copy
, args
);
351 ltt_write_event_data(buffer
, fID
, eID
, cb_args
, fmt
, args_copy
);
353 /* Out-of-order commit */
354 ltt_commit_slot(channel
, &transport_data
, buffer
, slot_size
);
357 ltt_nesting
[smp_processor_id()]--;
361 __attribute__((no_instrument_function
))
362 void _trace(ltt_facility_t fID
, uint8_t eID
, long flags
, const char *fmt
, ...)
367 _vtrace(fID
, eID
, flags
, fmt
, args
);
This page took 0.036514 seconds and 4 git commands to generate.