070542c65e2a938a158e00e88aad320f867a55b2
2 * ltt-type-serializer.c
4 * LTTng specialized type serializer.
6 * Copyright Mathieu Desnoyers, 2008.
8 * Dual LGPL v2.1/GPL v2 license.
11 /* This file contains functions for tracepoint custom probes support. */
15 #include <urcu/rculist.h>
17 #include <ust/clock.h>
20 #include "type-serializer.h"
23 void _ltt_specialized_trace(const struct ust_marker
*mdata
, void *probe_data
,
24 void *serialize_private
, unsigned int data_size
,
25 unsigned int largest_align
)
30 unsigned int chan_index
;
31 struct ust_buffer
*buf
;
32 struct ust_channel
*chan
;
33 struct ust_trace
*trace
;
40 * If we get here, it's probably because we have useful work to do.
42 if (unlikely(ltt_traces
.num_active_traces
== 0))
48 /* Force volatile access. */
49 CMM_STORE_SHARED(ltt_nesting
, CMM_LOAD_SHARED(ltt_nesting
) + 1);
52 * asm volatile and "memory" clobber prevent the compiler from moving
53 * instructions out of the ltt nesting count. This is required to ensure
54 * that probe side-effects which can cause recursion (e.g. unforeseen
55 * traps, divisions by 0, ...) are triggered within the incremented
56 * nesting count section.
59 eID
= mdata
->event_id
;
60 chan_index
= mdata
->channel_id
;
63 * Iterate on each trace, typically small number of active traces,
64 * list iteration with prefetch is usually slower.
66 cds_list_for_each_entry_rcu(trace
, <t_traces
.head
, list
) {
67 if (unlikely(!trace
->active
))
69 //ust// if (unlikely(!ltt_run_filter(trace, eID)))
71 #ifdef CONFIG_LTT_DEBUG_EVENT_SIZE
72 rflags
= LTT_RFLAG_ID_SIZE
;
74 if (unlikely(eID
>= LTT_FREE_EVENTS
))
75 rflags
= LTT_RFLAG_ID
;
80 * Skip channels added after trace creation.
82 if (unlikely(chan_index
>= trace
->nr_channels
))
84 chan
= &trace
->channels
[chan_index
];
88 /* If a new cpu was plugged since the trace was started, we did
89 * not add it to the trace, and therefore we write the event to
92 if(cpu
>= chan
->n_cpus
) {
96 /* reserve space : header and data */
97 ret
= ltt_reserve_slot(chan
, trace
, data_size
, largest_align
,
98 cpu
, &buf
, &slot_size
, &buf_offset
, &tsc
,
100 if (unlikely(ret
< 0))
101 continue; /* buffer full */
103 /* Out-of-order write : header and data */
104 buf_offset
= ltt_write_event_header(chan
, buf
,
105 buf_offset
, eID
, data_size
,
108 buf_offset
+= ltt_align(buf_offset
, largest_align
);
109 ust_buffers_write(buf
, buf_offset
,
110 serialize_private
, data_size
);
111 buf_offset
+= data_size
;
113 /* Out-of-order commit */
114 ltt_commit_slot(chan
, buf
, buf_offset
, data_size
, slot_size
);
117 * asm volatile and "memory" clobber prevent the compiler from moving
118 * instructions out of the ltt nesting count. This is required to ensure
119 * that probe side-effects which can cause recursion (e.g. unforeseen
120 * traps, divisions by 0, ...) are triggered within the incremented
121 * nesting count section.
124 CMM_STORE_SHARED(ltt_nesting
, CMM_LOAD_SHARED(ltt_nesting
) - 1);
This page took 0.034302 seconds and 5 git commands to generate.