5 * LTT_FLAG_TRACE : first arg contains trace to write into.
6 * (type : struct ltt_trace_struct *)
7 * LTT_FLAG_CHANNEL : following arg contains channel index to write into.
9 * LTT_FLAG_FORCE : Force write in disabled traces (internal ltt use)
12 #define _LTT_FLAG_TRACE 0
13 #define _LTT_FLAG_CHANNEL 1
14 #define _LTT_FLAG_FORCE 2
16 #define LTT_FLAG_TRACE (1 << _LTT_FLAG_TRACE)
17 #define LTT_FLAG_CHANNEL (1 << _LTT_FLAG_CHANNEL)
18 #define LTT_FLAG_FORCE (1 << _LTT_FLAG_FORCE)
20 /* Calculate data size */
21 /* Assume that the padding for alignment starts at a
22 * sizeof(void *) address. */
23 static inline __attribute__((no_instrument_function
))
24 size_t ltt_get_data_size(ltt_facility_t fID
, uint8_t eID
,
25 const char *fmt
, va_list args
)
32 static inline __attribute__((no_instrument_function
))
33 size_t ltt_write_event_data(char *buffer
,
34 ltt_facility_t fID
, uint8_t eID
,
35 const char *fmt
, va_list args
)
43 __attribute__((no_instrument_function
))
44 void vtrace(ltt_facility_t fID
, uint8_t eID
, long flags
,
45 const char *fmt
, va_list args
)
47 size_t data_size
, slot_size
;
48 uint8_t channel_index
;
49 struct ltt_channel_struct
*channel
;
50 struct ltt_trace_struct
*trace
, *dest_trace
;
55 /* This test is useful for quickly exiting static tracing when no
57 if (likely(ltt_traces
.num_active_traces
== 0 && !(flags
& LTT_FLAG_FORCE
)))
60 data_size
= ltt_get_data_size(fID
, eID
, fmt
, args
);
62 ltt_nesting
[smp_processor_id()]++;
64 if (unlikely(flags
& LTT_FLAG_TRACE
))
65 dest_trace
= va_arg(args
, struct ltt_trace_struct
*);
66 if (unlikely(flags
& LTT_FLAG_CHANNEL
))
67 channel_index
= va_arg(args
, uint8_t);
69 channel_index
= ltt_get_channel_index(fID
, eID
);
71 /* Iterate on each traces */
72 list_for_each_entry_rcu(trace
, <t_traces
.head
, list
) {
73 if (unlikely(!trace
->active
&& !(flags
& LTT_FLAG_FORCE
)))
75 if (unlikely(flags
& LTT_FLAG_TRACE
&& trace
!= dest_trace
))
77 channel
= ltt_get_channel_from_index(trace
, channel_index
);
78 /* reserve space : header and data */
79 buffer
= ltt_reserve_slot(trace
, channel
, &transport_data
,
80 data_size
, &slot_size
, &tsc
);
81 if (unlikely(!buffer
))
82 continue; /* buffer full */
83 /* Out-of-order write : header and data */
84 buffer
= ltt_write_event_header(trace
, channel
, buffer
,
85 fID
, eID
, data_size
, tsc
);
86 ltt_write_event_data(buffer
, fID
, eID
, fmt
, args
);
87 /* Out-of-order commit */
88 ltt_commit_slot(channel
, &transport_data
, buffer
, slot_size
);
91 ltt_nesting
[smp_processor_id()]--;
95 __attribute__((no_instrument_function
))
96 void trace(ltt_facility_t fID
, uint8_t eID
, const char *fmt
, ...)
This page took 0.035441 seconds and 4 git commands to generate.