Commit | Line | Data |
---|---|---|
c39c72ee PMF |
1 | /* Copyright (C) 2009 Pierre-Marc Fournier |
2 | * | |
3 | * This library is free software; you can redistribute it and/or | |
4 | * modify it under the terms of the GNU Lesser General Public | |
5 | * License as published by the Free Software Foundation; either | |
6 | * version 2.1 of the License, or (at your option) any later version. | |
7 | * | |
8 | * This library is distributed in the hope that it will be useful, | |
9 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
10 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | |
11 | * Lesser General Public License for more details. | |
12 | * | |
13 | * You should have received a copy of the GNU Lesser General Public | |
14 | * License along with this library; if not, write to the Free Software | |
15 | * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA | |
16 | */ | |
17 | ||
909bc43f | 18 | #include <stdlib.h> |
53107b8f | 19 | #include <assert.h> |
02af3e60 | 20 | #include <byteswap.h> |
53107b8f | 21 | |
9dc7b7ff | 22 | #include "ust/ustconsumer.h" |
b73a4c47 | 23 | #include "buffers.h" |
c93858f1 | 24 | #include "tracer.h" |
30ffe279 | 25 | #include "usterr_signal_safe.h" |
0b0cd937 | 26 | |
d748a7de | 27 | /* This truncates to an offset in the buffer. */ |
0b0cd937 PMF |
28 | #define USTD_BUFFER_TRUNC(offset, bufinfo) \ |
29 | ((offset) & (~(((bufinfo)->subbuf_size*(bufinfo)->n_subbufs)-1))) | |
30 | ||
02af3e60 PMF |
31 | #define LTT_MAGIC_NUMBER 0x00D6B7ED |
32 | #define LTT_REV_MAGIC_NUMBER 0xEDB7D600 | |
33 | ||
cd6b7243 DG |
34 | |
35 | static void ltt_relay_print_subbuffer_errors( | |
36 | struct buffer_info *buf, | |
37 | long cons_off, int cpu) | |
38 | { | |
39 | struct ust_buffer *ust_buf = buf->bufstruct_mem; | |
40 | long cons_idx, commit_count, commit_count_mask, write_offset; | |
41 | ||
42 | cons_idx = SUBBUF_INDEX(cons_off, buf); | |
43 | commit_count = uatomic_read(&ust_buf->commit_seq[cons_idx]); | |
44 | commit_count_mask = (~0UL >> get_count_order(buf->n_subbufs)); | |
45 | ||
46 | /* | |
47 | * No need to order commit_count and write_offset reads because we | |
48 | * execute after trace is stopped when there are no readers left. | |
49 | */ | |
50 | write_offset = uatomic_read(&ust_buf->offset); | |
51 | WARN( "LTT : unread channel %s offset is %ld " | |
52 | "and cons_off : %ld (cpu %d)\n", | |
53 | buf->channel, write_offset, cons_off, cpu); | |
54 | /* Check each sub-buffer for non filled commit count */ | |
55 | if (((commit_count - buf->subbuf_size) & commit_count_mask) | |
56 | - (BUFFER_TRUNC(cons_off, buf) >> get_count_order(buf->n_subbufs)) != 0) { | |
57 | ERR("LTT : %s : subbuffer %lu has non filled " | |
58 | "commit count [seq] [%lu].\n", | |
59 | buf->channel, cons_idx, commit_count); | |
60 | } | |
61 | ERR("LTT : %s : commit count : %lu, subbuf size %d\n", | |
62 | buf->channel, commit_count, | |
63 | buf->subbuf_size); | |
64 | } | |
65 | ||
66 | static void ltt_relay_print_errors(struct buffer_info *buf, int cpu) | |
67 | { | |
68 | struct ust_buffer *ust_buf = buf->bufstruct_mem; | |
69 | long cons_off; | |
70 | ||
71 | for (cons_off = uatomic_read(&ust_buf->consumed); | |
72 | (SUBBUF_TRUNC(uatomic_read(&ust_buf->offset), buf) | |
73 | - cons_off) > 0; | |
74 | cons_off = SUBBUF_ALIGN(cons_off, buf)) | |
75 | ltt_relay_print_subbuffer_errors(buf, cons_off, cpu); | |
76 | } | |
77 | ||
78 | static void ltt_relay_print_buffer_errors(struct buffer_info *buf, int cpu) | |
79 | { | |
80 | struct ust_buffer *ust_buf = buf->bufstruct_mem; | |
81 | ||
82 | if (uatomic_read(&ust_buf->events_lost)) | |
83 | ERR("channel %s: %ld events lost (cpu %d)", | |
84 | buf->channel, | |
85 | uatomic_read(&ust_buf->events_lost), cpu); | |
86 | if (uatomic_read(&ust_buf->corrupted_subbuffers)) | |
87 | ERR("channel %s : %ld corrupted subbuffers (cpu %d)", | |
88 | buf->channel, | |
89 | uatomic_read(&ust_buf->corrupted_subbuffers), cpu); | |
90 | ||
91 | ltt_relay_print_errors(buf, cpu); | |
92 | } | |
93 | ||
02af3e60 PMF |
94 | /* Returns the size of a subbuffer size. This is the size that |
95 | * will need to be written to disk. | |
96 | * | |
97 | * @subbuffer: pointer to the beginning of the subbuffer (the | |
98 | * beginning of its header) | |
99 | */ | |
100 | ||
101 | size_t subbuffer_data_size(void *subbuf) | |
102 | { | |
103 | struct ltt_subbuffer_header *header = subbuf; | |
104 | int reverse; | |
105 | u32 data_size; | |
106 | ||
107 | if(header->magic_number == LTT_MAGIC_NUMBER) { | |
108 | reverse = 0; | |
109 | } | |
110 | else if(header->magic_number == LTT_REV_MAGIC_NUMBER) { | |
111 | reverse = 1; | |
112 | } | |
113 | else { | |
114 | return -1; | |
115 | } | |
116 | ||
117 | data_size = header->sb_size; | |
118 | if(reverse) | |
119 | data_size = bswap_32(data_size); | |
120 | ||
121 | return data_size; | |
122 | } | |
123 | ||
124 | ||
9dc7b7ff | 125 | void finish_consuming_dead_subbuffer(struct ustconsumer_callbacks *callbacks, struct buffer_info *buf) |
0b0cd937 | 126 | { |
022bb7e2 MD |
127 | struct ust_buffer *ust_buf = buf->bufstruct_mem; |
128 | unsigned long n_subbufs_order = get_count_order(buf->n_subbufs); | |
129 | unsigned long commit_seq_mask = (~0UL >> n_subbufs_order); | |
130 | unsigned long cons_off; | |
f3d96ee0 | 131 | int ret; |
0b0cd937 | 132 | |
920bdf71 | 133 | DBG("processing dead buffer (%s)", buf->name); |
022bb7e2 MD |
134 | DBG("consumed offset is %ld (%s)", uatomic_read(&ust_buf->consumed), |
135 | buf->name); | |
136 | DBG("write offset is %ld (%s)", uatomic_read(&ust_buf->offset), | |
137 | buf->name); | |
0b0cd937 | 138 | |
022bb7e2 MD |
139 | /* |
140 | * Iterate on subbuffers to recover, including the one the writer | |
141 | * just wrote data into. Using write position - 1 since the writer | |
142 | * position points into the position that is going to be written. | |
143 | */ | |
144 | for (cons_off = uatomic_read(&ust_buf->consumed); | |
145 | (long) (SUBBUF_TRUNC(uatomic_read(&ust_buf->offset) - 1, buf) | |
146 | - cons_off) >= 0; | |
147 | cons_off = SUBBUF_ALIGN(cons_off, buf)) { | |
148 | /* | |
149 | * commit_seq is the offset in the buffer of the end of the last sequential commit. | |
150 | * Bytes beyond this limit cannot be recovered. This is a free-running counter. | |
151 | */ | |
152 | unsigned long commit_seq = | |
153 | uatomic_read(&ust_buf->commit_seq[SUBBUF_INDEX(cons_off, buf)]); | |
154 | struct ltt_subbuffer_header *header = | |
155 | (struct ltt_subbuffer_header *)((char *) buf->mem | |
156 | + SUBBUF_INDEX(cons_off, buf) * buf->subbuf_size); | |
157 | unsigned long valid_length; | |
d748a7de | 158 | |
d748a7de PMF |
159 | /* Check if subbuf was fully written. This is from Mathieu's algorithm/paper. */ |
160 | if (((commit_seq - buf->subbuf_size) & commit_seq_mask) | |
022bb7e2 | 161 | - (USTD_BUFFER_TRUNC(uatomic_read(&ust_buf->consumed), buf) >> n_subbufs_order) == 0 |
c74e3560 MD |
162 | && header->data_size != 0xffffffff) { |
163 | assert(header->sb_size != 0xffffffff); | |
164 | /* | |
022bb7e2 MD |
165 | * If it was fully written, we only check the data_size. |
166 | * This is the amount of valid data at the beginning of | |
167 | * the subbuffer. | |
c74e3560 | 168 | */ |
8c36d1ee | 169 | valid_length = header->data_size; |
022bb7e2 MD |
170 | DBG("writing full subbuffer (%ld) with valid_length = %ld", |
171 | SUBBUF_INDEX(cons_off, buf), valid_length); | |
172 | } else { | |
c74e3560 MD |
173 | /* |
174 | * If the subbuffer was not fully written, then we don't | |
175 | * check data_size because it hasn't been written yet. | |
176 | * Instead we check commit_seq and use it to choose a | |
177 | * value for data_size. The viewer will need this value | |
178 | * when parsing. Generally, this will happen only for | |
179 | * the last subbuffer. However, if we have threads still | |
180 | * holding reserved slots in the previous subbuffers, | |
181 | * which could happen for other subbuffers prior to the | |
182 | * last one. Note that when data_size is set, the | |
183 | * commit_seq count is still at a value that shows the | |
184 | * amount of valid data to read. It's only _after_ | |
185 | * writing data_size that commit_seq is updated to | |
186 | * include the end-of-buffer padding. | |
d748a7de | 187 | */ |
022bb7e2 MD |
188 | valid_length = commit_seq & (buf->subbuf_size - 1); |
189 | DBG("writing unfull subbuffer (%ld) with valid_length = %ld", | |
190 | SUBBUF_INDEX(cons_off, buf), valid_length); | |
8c36d1ee | 191 | header->data_size = valid_length; |
02af3e60 | 192 | header->sb_size = PAGE_ALIGN(valid_length); |
53107b8f PMF |
193 | } |
194 | ||
c74e3560 | 195 | if (callbacks->on_read_partial_subbuffer) { |
022bb7e2 MD |
196 | ret = callbacks->on_read_partial_subbuffer(callbacks, buf, |
197 | SUBBUF_INDEX(cons_off, buf), | |
198 | valid_length); | |
199 | if (ret < 0) | |
c74e3560 | 200 | break; /* Error happened */ |
022bb7e2 | 201 | } |
0b0cd937 | 202 | } |
022bb7e2 MD |
203 | /* Increment the consumed offset */ |
204 | uatomic_set(&ust_buf->consumed, cons_off); | |
cd6b7243 | 205 | ltt_relay_print_buffer_errors(buf, buf->channel_cpu); |
0b0cd937 PMF |
206 | } |
207 |