Commit | Line | Data |
---|---|---|
c39c72ee PMF |
1 | /* Copyright (C) 2009 Pierre-Marc Fournier |
2 | * | |
3 | * This library is free software; you can redistribute it and/or | |
4 | * modify it under the terms of the GNU Lesser General Public | |
5 | * License as published by the Free Software Foundation; either | |
6 | * version 2.1 of the License, or (at your option) any later version. | |
7 | * | |
8 | * This library is distributed in the hope that it will be useful, | |
9 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
10 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | |
11 | * Lesser General Public License for more details. | |
12 | * | |
13 | * You should have received a copy of the GNU Lesser General Public | |
14 | * License along with this library; if not, write to the Free Software | |
15 | * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA | |
16 | */ | |
17 | ||
909bc43f | 18 | #include <stdlib.h> |
53107b8f | 19 | #include <assert.h> |
02af3e60 | 20 | #include <byteswap.h> |
53107b8f | 21 | |
9dc7b7ff | 22 | #include "ust/ustconsumer.h" |
b73a4c47 | 23 | #include "buffers.h" |
c93858f1 | 24 | #include "tracer.h" |
6af64c43 | 25 | #include "usterr.h" |
0b0cd937 | 26 | |
d748a7de | 27 | /* This truncates to an offset in the buffer. */ |
0b0cd937 PMF |
28 | #define USTD_BUFFER_TRUNC(offset, bufinfo) \ |
29 | ((offset) & (~(((bufinfo)->subbuf_size*(bufinfo)->n_subbufs)-1))) | |
30 | ||
02af3e60 PMF |
31 | #define LTT_MAGIC_NUMBER 0x00D6B7ED |
32 | #define LTT_REV_MAGIC_NUMBER 0xEDB7D600 | |
33 | ||
cd6b7243 DG |
34 | |
35 | static void ltt_relay_print_subbuffer_errors( | |
36 | struct buffer_info *buf, | |
37 | long cons_off, int cpu) | |
38 | { | |
39 | struct ust_buffer *ust_buf = buf->bufstruct_mem; | |
40 | long cons_idx, commit_count, commit_count_mask, write_offset; | |
41 | ||
42 | cons_idx = SUBBUF_INDEX(cons_off, buf); | |
43 | commit_count = uatomic_read(&ust_buf->commit_seq[cons_idx]); | |
44 | commit_count_mask = (~0UL >> get_count_order(buf->n_subbufs)); | |
45 | ||
46 | /* | |
47 | * No need to order commit_count and write_offset reads because we | |
48 | * execute after trace is stopped when there are no readers left. | |
49 | */ | |
50 | write_offset = uatomic_read(&ust_buf->offset); | |
51 | WARN( "LTT : unread channel %s offset is %ld " | |
52 | "and cons_off : %ld (cpu %d)\n", | |
53 | buf->channel, write_offset, cons_off, cpu); | |
54 | /* Check each sub-buffer for non filled commit count */ | |
55 | if (((commit_count - buf->subbuf_size) & commit_count_mask) | |
56 | - (BUFFER_TRUNC(cons_off, buf) >> get_count_order(buf->n_subbufs)) != 0) { | |
57 | ERR("LTT : %s : subbuffer %lu has non filled " | |
58 | "commit count [seq] [%lu].\n", | |
59 | buf->channel, cons_idx, commit_count); | |
60 | } | |
61 | ERR("LTT : %s : commit count : %lu, subbuf size %d\n", | |
62 | buf->channel, commit_count, | |
63 | buf->subbuf_size); | |
64 | } | |
65 | ||
66 | static void ltt_relay_print_errors(struct buffer_info *buf, int cpu) | |
67 | { | |
68 | struct ust_buffer *ust_buf = buf->bufstruct_mem; | |
69 | long cons_off; | |
70 | ||
71 | for (cons_off = uatomic_read(&ust_buf->consumed); | |
72 | (SUBBUF_TRUNC(uatomic_read(&ust_buf->offset), buf) | |
73 | - cons_off) > 0; | |
74 | cons_off = SUBBUF_ALIGN(cons_off, buf)) | |
75 | ltt_relay_print_subbuffer_errors(buf, cons_off, cpu); | |
76 | } | |
77 | ||
78 | static void ltt_relay_print_buffer_errors(struct buffer_info *buf, int cpu) | |
79 | { | |
80 | struct ust_buffer *ust_buf = buf->bufstruct_mem; | |
81 | ||
82 | if (uatomic_read(&ust_buf->events_lost)) | |
83 | ERR("channel %s: %ld events lost (cpu %d)", | |
84 | buf->channel, | |
85 | uatomic_read(&ust_buf->events_lost), cpu); | |
86 | if (uatomic_read(&ust_buf->corrupted_subbuffers)) | |
87 | ERR("channel %s : %ld corrupted subbuffers (cpu %d)", | |
88 | buf->channel, | |
89 | uatomic_read(&ust_buf->corrupted_subbuffers), cpu); | |
90 | ||
91 | ltt_relay_print_errors(buf, cpu); | |
92 | } | |
93 | ||
02af3e60 PMF |
94 | /* Returns the size of a subbuffer size. This is the size that |
95 | * will need to be written to disk. | |
96 | * | |
97 | * @subbuffer: pointer to the beginning of the subbuffer (the | |
98 | * beginning of its header) | |
99 | */ | |
100 | ||
101 | size_t subbuffer_data_size(void *subbuf) | |
102 | { | |
103 | struct ltt_subbuffer_header *header = subbuf; | |
104 | int reverse; | |
105 | u32 data_size; | |
106 | ||
107 | if(header->magic_number == LTT_MAGIC_NUMBER) { | |
108 | reverse = 0; | |
109 | } | |
110 | else if(header->magic_number == LTT_REV_MAGIC_NUMBER) { | |
111 | reverse = 1; | |
112 | } | |
113 | else { | |
114 | return -1; | |
115 | } | |
116 | ||
117 | data_size = header->sb_size; | |
118 | if(reverse) | |
119 | data_size = bswap_32(data_size); | |
120 | ||
121 | return data_size; | |
122 | } | |
123 | ||
124 | ||
9dc7b7ff | 125 | void finish_consuming_dead_subbuffer(struct ustconsumer_callbacks *callbacks, struct buffer_info *buf) |
0b0cd937 | 126 | { |
b5b073e2 | 127 | struct ust_buffer *ustbuf = buf->bufstruct_mem; |
0b0cd937 | 128 | |
b102c2b0 PMF |
129 | long write_offset = uatomic_read(&ustbuf->offset); |
130 | long consumed_offset = uatomic_read(&ustbuf->consumed); | |
0b0cd937 PMF |
131 | |
132 | long i_subbuf; | |
133 | ||
920bdf71 PMF |
134 | DBG("processing dead buffer (%s)", buf->name); |
135 | DBG("consumed offset is %ld (%s)", consumed_offset, buf->name); | |
136 | DBG("write offset is %ld (%s)", write_offset, buf->name); | |
0b0cd937 | 137 | |
d748a7de PMF |
138 | /* First subbuf that we need to consume now. It is not modulo'd. |
139 | * Consumed_offset is the next byte to consume. */ | |
53107b8f | 140 | long first_subbuf = consumed_offset / buf->subbuf_size; |
d748a7de PMF |
141 | /* Last subbuf that we need to consume now. It is not modulo'd. |
142 | * Write_offset is the next place to write so write_offset-1 is the | |
143 | * last place written. */ | |
144 | long last_subbuf = (write_offset - 1) / buf->subbuf_size; | |
0b0cd937 | 145 | |
211c34d2 PMF |
146 | DBG("first_subbuf=%ld", first_subbuf); |
147 | DBG("last_subbuf=%ld", last_subbuf); | |
d748a7de PMF |
148 | |
149 | if(last_subbuf - first_subbuf >= buf->n_subbufs) { | |
0b0cd937 PMF |
150 | DBG("an overflow has occurred, nothing can be recovered"); |
151 | return; | |
152 | } | |
153 | ||
d748a7de | 154 | /* Iterate on subbuffers to recover. */ |
54b74473 | 155 | for(i_subbuf = first_subbuf % buf->n_subbufs; ; i_subbuf++, i_subbuf %= buf->n_subbufs) { |
d748a7de PMF |
156 | /* commit_seq is the offset in the buffer of the end of the last sequential commit. |
157 | * Bytes beyond this limit cannot be recovered. This is a free-running counter. */ | |
b102c2b0 | 158 | long commit_seq = uatomic_read(&ustbuf->commit_seq[i_subbuf]); |
0b0cd937 PMF |
159 | |
160 | unsigned long valid_length = buf->subbuf_size; | |
161 | long n_subbufs_order = get_count_order(buf->n_subbufs); | |
53107b8f PMF |
162 | long commit_seq_mask = (~0UL >> n_subbufs_order); |
163 | ||
d748a7de PMF |
164 | struct ltt_subbuffer_header *header = (struct ltt_subbuffer_header *)((char *)buf->mem+i_subbuf*buf->subbuf_size); |
165 | ||
d748a7de | 166 | /* Check if subbuf was fully written. This is from Mathieu's algorithm/paper. */ |
14e859d8 PMF |
167 | /* FIXME: not sure data_size = 0xffffffff when the buffer is not full. It might |
168 | * take the value of the header size initially */ | |
d748a7de | 169 | if (((commit_seq - buf->subbuf_size) & commit_seq_mask) |
02af3e60 PMF |
170 | - (USTD_BUFFER_TRUNC(consumed_offset, buf) >> n_subbufs_order) == 0 |
171 | && header->data_size != 0xffffffff && header->sb_size != 0xffffffff) { | |
8c36d1ee PMF |
172 | /* If it was, we only check the data_size. This is the amount of valid data at |
173 | * the beginning of the subbuffer. */ | |
174 | valid_length = header->data_size; | |
fbae86d6 | 175 | DBG("writing full subbuffer (%ld) with valid_length = %ld", i_subbuf, valid_length); |
0b0cd937 | 176 | } |
53107b8f | 177 | else { |
8c36d1ee | 178 | /* If the subbuffer was not fully written, then we don't check data_size because |
d748a7de | 179 | * it hasn't been written yet. Instead we check commit_seq and use it to choose |
8c36d1ee | 180 | * a value for data_size. The viewer will need this value when parsing. |
d748a7de | 181 | */ |
53107b8f | 182 | |
d748a7de | 183 | valid_length = commit_seq & (buf->subbuf_size-1); |
fbae86d6 | 184 | DBG("writing unfull subbuffer (%ld) with valid_length = %ld", i_subbuf, valid_length); |
8c36d1ee | 185 | header->data_size = valid_length; |
02af3e60 | 186 | header->sb_size = PAGE_ALIGN(valid_length); |
d748a7de | 187 | assert(i_subbuf == (last_subbuf % buf->n_subbufs)); |
53107b8f PMF |
188 | } |
189 | ||
fbae86d6 | 190 | /* TODO: check on_read_partial_subbuffer return value */ |
d159ac37 AH |
191 | if(callbacks->on_read_partial_subbuffer) |
192 | callbacks->on_read_partial_subbuffer(callbacks, buf, i_subbuf, valid_length); | |
0b0cd937 | 193 | |
cd6b7243 DG |
194 | /* Manually increment the consumed offset */ |
195 | /* TODO ybrosseau 2011-03-02: Should only be done if the previous read was successful */ | |
196 | uatomic_add(&ustbuf->consumed, buf->subbuf_size); | |
197 | ||
d748a7de | 198 | if(i_subbuf == last_subbuf % buf->n_subbufs) |
0b0cd937 PMF |
199 | break; |
200 | } | |
cd6b7243 DG |
201 | |
202 | ltt_relay_print_buffer_errors(buf, buf->channel_cpu); | |
0b0cd937 PMF |
203 | } |
204 |