1 /* Copyright (C) 2009 Pierre-Marc Fournier
3 * This library is free software; you can redistribute it and/or
4 * modify it under the terms of the GNU Lesser General Public
5 * License as published by the Free Software Foundation; either
6 * version 2.1 of the License, or (at your option) any later version.
8 * This library is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * Lesser General Public License for more details.
13 * You should have received a copy of the GNU Lesser General Public
14 * License along with this library; if not, write to the Free Software
15 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
22 #include "ust/ustconsumer.h"
27 /* This truncates to an offset in the buffer. */
28 #define USTD_BUFFER_TRUNC(offset, bufinfo) \
29 ((offset) & (~(((bufinfo)->subbuf_size*(bufinfo)->n_subbufs)-1)))
31 #define LTT_MAGIC_NUMBER 0x00D6B7ED
32 #define LTT_REV_MAGIC_NUMBER 0xEDB7D600
35 static void ltt_relay_print_subbuffer_errors(
36 struct buffer_info
*buf
,
37 long cons_off
, int cpu
)
39 struct ust_buffer
*ust_buf
= buf
->bufstruct_mem
;
40 long cons_idx
, commit_count
, commit_count_mask
, write_offset
;
42 cons_idx
= SUBBUF_INDEX(cons_off
, buf
);
43 commit_count
= uatomic_read(&ust_buf
->commit_seq
[cons_idx
]);
44 commit_count_mask
= (~0UL >> get_count_order(buf
->n_subbufs
));
47 * No need to order commit_count and write_offset reads because we
48 * execute after trace is stopped when there are no readers left.
50 write_offset
= uatomic_read(&ust_buf
->offset
);
51 WARN( "LTT : unread channel %s offset is %ld "
52 "and cons_off : %ld (cpu %d)\n",
53 buf
->channel
, write_offset
, cons_off
, cpu
);
54 /* Check each sub-buffer for non filled commit count */
55 if (((commit_count
- buf
->subbuf_size
) & commit_count_mask
)
56 - (BUFFER_TRUNC(cons_off
, buf
) >> get_count_order(buf
->n_subbufs
)) != 0) {
57 ERR("LTT : %s : subbuffer %lu has non filled "
58 "commit count [seq] [%lu].\n",
59 buf
->channel
, cons_idx
, commit_count
);
61 ERR("LTT : %s : commit count : %lu, subbuf size %d\n",
62 buf
->channel
, commit_count
,
66 static void ltt_relay_print_errors(struct buffer_info
*buf
, int cpu
)
68 struct ust_buffer
*ust_buf
= buf
->bufstruct_mem
;
71 for (cons_off
= uatomic_read(&ust_buf
->consumed
);
72 (SUBBUF_TRUNC(uatomic_read(&ust_buf
->offset
), buf
)
74 cons_off
= SUBBUF_ALIGN(cons_off
, buf
))
75 ltt_relay_print_subbuffer_errors(buf
, cons_off
, cpu
);
78 static void ltt_relay_print_buffer_errors(struct buffer_info
*buf
, int cpu
)
80 struct ust_buffer
*ust_buf
= buf
->bufstruct_mem
;
82 if (uatomic_read(&ust_buf
->events_lost
))
83 ERR("channel %s: %ld events lost (cpu %d)",
85 uatomic_read(&ust_buf
->events_lost
), cpu
);
86 if (uatomic_read(&ust_buf
->corrupted_subbuffers
))
87 ERR("channel %s : %ld corrupted subbuffers (cpu %d)",
89 uatomic_read(&ust_buf
->corrupted_subbuffers
), cpu
);
91 ltt_relay_print_errors(buf
, cpu
);
94 /* Returns the size of a subbuffer size. This is the size that
95 * will need to be written to disk.
97 * @subbuffer: pointer to the beginning of the subbuffer (the
98 * beginning of its header)
101 size_t subbuffer_data_size(void *subbuf
)
103 struct ltt_subbuffer_header
*header
= subbuf
;
107 if(header
->magic_number
== LTT_MAGIC_NUMBER
) {
110 else if(header
->magic_number
== LTT_REV_MAGIC_NUMBER
) {
117 data_size
= header
->sb_size
;
119 data_size
= bswap_32(data_size
);
125 void finish_consuming_dead_subbuffer(struct ustconsumer_callbacks
*callbacks
, struct buffer_info
*buf
)
127 struct ust_buffer
*ustbuf
= buf
->bufstruct_mem
;
128 long write_offset
= uatomic_read(&ustbuf
->offset
);
133 DBG("processing dead buffer (%s)", buf
->name
);
134 DBG("consumed offset is %ld (%s)", uatomic_read(&ustbuf
->consumed
),
136 DBG("write offset is %ld (%s)", write_offset
, buf
->name
);
138 /* First subbuf that we need to consume now. It is not modulo'd.
139 * Consumed_offset is the next byte to consume. */
140 long first_subbuf
= uatomic_read(&ustbuf
->consumed
) / buf
->subbuf_size
;
141 /* Last subbuf that we need to consume now. It is not modulo'd.
142 * Write_offset is the next place to write so write_offset-1 is the
143 * last place written. */
144 long last_subbuf
= (write_offset
- 1) / buf
->subbuf_size
;
146 DBG("first_subbuf=%ld", first_subbuf
);
147 DBG("last_subbuf=%ld", last_subbuf
);
149 if(last_subbuf
- first_subbuf
>= buf
->n_subbufs
) {
150 DBG("an overflow has occurred, nothing can be recovered");
154 /* Iterate on subbuffers to recover. */
155 for(i_subbuf
= first_subbuf
% buf
->n_subbufs
; ; i_subbuf
++, i_subbuf
%= buf
->n_subbufs
) {
156 /* commit_seq is the offset in the buffer of the end of the last sequential commit.
157 * Bytes beyond this limit cannot be recovered. This is a free-running counter. */
158 long commit_seq
= uatomic_read(&ustbuf
->commit_seq
[i_subbuf
]);
160 unsigned long valid_length
= buf
->subbuf_size
;
161 long n_subbufs_order
= get_count_order(buf
->n_subbufs
);
162 long commit_seq_mask
= (~0UL >> n_subbufs_order
);
164 struct ltt_subbuffer_header
*header
= (struct ltt_subbuffer_header
*)((char *)buf
->mem
+i_subbuf
*buf
->subbuf_size
);
166 /* Check if subbuf was fully written. This is from Mathieu's algorithm/paper. */
167 if (((commit_seq
- buf
->subbuf_size
) & commit_seq_mask
)
168 - (USTD_BUFFER_TRUNC(uatomic_read(&ustbuf
->consumed
), buf
) >> n_subbufs_order
) == 0
169 && header
->data_size
!= 0xffffffff) {
170 assert(header
->sb_size
!= 0xffffffff);
172 * If it was, we only check the data_size. This is the
173 * amount of valid data at the beginning of the
176 valid_length
= header
->data_size
;
177 DBG("writing full subbuffer (%ld) with valid_length = %ld", i_subbuf
, valid_length
);
181 * If the subbuffer was not fully written, then we don't
182 * check data_size because it hasn't been written yet.
183 * Instead we check commit_seq and use it to choose a
184 * value for data_size. The viewer will need this value
185 * when parsing. Generally, this will happen only for
186 * the last subbuffer. However, if we have threads still
187 * holding reserved slots in the previous subbuffers,
188 * which could happen for other subbuffers prior to the
189 * last one. Note that when data_size is set, the
190 * commit_seq count is still at a value that shows the
191 * amount of valid data to read. It's only _after_
192 * writing data_size that commit_seq is updated to
193 * include the end-of-buffer padding.
195 valid_length
= commit_seq
& (buf
->subbuf_size
-1);
196 DBG("writing unfull subbuffer (%ld) with valid_length = %ld", i_subbuf
, valid_length
);
197 header
->data_size
= valid_length
;
198 header
->sb_size
= PAGE_ALIGN(valid_length
);
201 if (callbacks
->on_read_partial_subbuffer
) {
202 ret
= callbacks
->on_read_partial_subbuffer(callbacks
, buf
, i_subbuf
, valid_length
);
203 /* Increment the consumed offset */
205 uatomic_add(&ustbuf
->consumed
, buf
->subbuf_size
);
207 break; /* Error happened */
209 uatomic_add(&ustbuf
->consumed
, buf
->subbuf_size
);
210 if(i_subbuf
== last_subbuf
% buf
->n_subbufs
)
214 ltt_relay_print_buffer_errors(buf
, buf
->channel_cpu
);