Commit | Line | Data |
---|---|---|
b5b073e2 PMF |
1 | /* |
2 | * buffers.c | |
3 | * LTTng userspace tracer buffering system | |
4 | * | |
5 | * Copyright (C) 2009 - Pierre-Marc Fournier (pierre-marc dot fournier at polymtl dot ca) | |
6 | * Copyright (C) 2008 - Mathieu Desnoyers (mathieu.desnoyers@polymtl.ca) | |
7 | * | |
8 | * This library is free software; you can redistribute it and/or | |
9 | * modify it under the terms of the GNU Lesser General Public | |
10 | * License as published by the Free Software Foundation; either | |
11 | * version 2.1 of the License, or (at your option) any later version. | |
12 | * | |
13 | * This library is distributed in the hope that it will be useful, | |
14 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
15 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | |
16 | * Lesser General Public License for more details. | |
17 | * | |
18 | * You should have received a copy of the GNU Lesser General Public | |
19 | * License along with this library; if not, write to the Free Software | |
20 | * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA | |
21 | */ | |
22 | ||
204141ee | 23 | #include <unistd.h> |
b5b073e2 PMF |
24 | #include <sys/mman.h> |
25 | #include <sys/ipc.h> | |
26 | #include <sys/shm.h> | |
27 | #include <fcntl.h> | |
909bc43f | 28 | #include <stdlib.h> |
518d7abb PMF |
29 | |
30 | #include <ust/clock.h> | |
31 | ||
b5b073e2 PMF |
32 | #include "buffers.h" |
33 | #include "channels.h" | |
34 | #include "tracer.h" | |
35 | #include "tracercore.h" | |
36 | #include "usterr.h" | |
37 | ||
b73a4c47 PMF |
38 | struct ltt_reserve_switch_offsets { |
39 | long begin, end, old; | |
40 | long begin_switch, end_switch_current, end_switch_old; | |
41 | size_t before_hdr_pad, size; | |
42 | }; | |
43 | ||
44 | ||
b5b073e2 | 45 | static DEFINE_MUTEX(ust_buffers_channels_mutex); |
0222e121 | 46 | static CDS_LIST_HEAD(ust_buffers_channels); |
b5b073e2 | 47 | |
204141ee PMF |
48 | static int get_n_cpus(void) |
49 | { | |
50 | int result; | |
51 | static int n_cpus = 0; | |
52 | ||
c7dc133c PMF |
53 | if(!n_cpus) { |
54 | /* On Linux, when some processors are offline | |
55 | * _SC_NPROCESSORS_CONF counts the offline | |
56 | * processors, whereas _SC_NPROCESSORS_ONLN | |
57 | * does not. If we used _SC_NPROCESSORS_ONLN, | |
58 | * getcpu() could return a value greater than | |
59 | * this sysconf, in which case the arrays | |
60 | * indexed by processor would overflow. | |
61 | */ | |
62 | result = sysconf(_SC_NPROCESSORS_CONF); | |
63 | if(result == -1) { | |
64 | return -1; | |
65 | } | |
66 | ||
67 | n_cpus = result; | |
204141ee PMF |
68 | } |
69 | ||
c7dc133c | 70 | return n_cpus; |
204141ee PMF |
71 | } |
72 | ||
bb3132c8 MD |
73 | /** |
74 | * _ust_buffers_strncpy_fixup - Fix an incomplete string in a ltt_relay buffer. | |
75 | * @buf : buffer | |
76 | * @offset : offset within the buffer | |
77 | * @len : length to write | |
78 | * @copied: string actually copied | |
79 | * @terminated: does string end with \0 | |
b73a4c47 | 80 | * |
bb3132c8 | 81 | * Fills string with "X" if incomplete. |
b73a4c47 | 82 | */ |
bb3132c8 MD |
83 | void _ust_buffers_strncpy_fixup(struct ust_buffer *buf, size_t offset, |
84 | size_t len, size_t copied, int terminated) | |
b73a4c47 | 85 | { |
bb3132c8 MD |
86 | size_t buf_offset, cpy; |
87 | ||
88 | if (copied == len) { | |
89 | /* | |
90 | * Deal with non-terminated string. | |
91 | */ | |
92 | assert(!terminated); | |
93 | offset += copied - 1; | |
94 | buf_offset = BUFFER_OFFSET(offset, buf->chan); | |
95 | /* | |
96 | * Underlying layer should never ask for writes across | |
97 | * subbuffers. | |
98 | */ | |
99 | assert(buf_offset | |
100 | < buf->chan->subbuf_size*buf->chan->subbuf_cnt); | |
101 | ust_buffers_do_memset(buf->buf_data + buf_offset, '\0', 1); | |
102 | return; | |
103 | } | |
104 | ||
105 | /* | |
106 | * Deal with incomplete string. | |
107 | * Overwrite string's \0 with X too. | |
108 | */ | |
109 | cpy = copied - 1; | |
110 | assert(terminated); | |
111 | len -= cpy; | |
112 | offset += cpy; | |
113 | buf_offset = BUFFER_OFFSET(offset, buf->chan); | |
114 | ||
115 | /* | |
116 | * Underlying layer should never ask for writes across subbuffers. | |
117 | */ | |
118 | assert(buf_offset | |
119 | < buf->chan->subbuf_size*buf->chan->subbuf_cnt); | |
b73a4c47 | 120 | |
bb3132c8 MD |
121 | ust_buffers_do_memset(buf->buf_data + buf_offset, |
122 | 'X', len); | |
b73a4c47 | 123 | |
bb3132c8 MD |
124 | /* |
125 | * Overwrite last 'X' with '\0'. | |
126 | */ | |
127 | offset += len - 1; | |
128 | buf_offset = BUFFER_OFFSET(offset, buf->chan); | |
129 | /* | |
130 | * Underlying layer should never ask for writes across subbuffers. | |
131 | */ | |
132 | assert(buf_offset | |
133 | < buf->chan->subbuf_size*buf->chan->subbuf_cnt); | |
134 | ust_buffers_do_memset(buf->buf_data + buf_offset, '\0', 1); | |
b73a4c47 PMF |
135 | } |
136 | ||
ef15e552 NC |
137 | static void ltt_buffer_begin(struct ust_buffer *buf, |
138 | u64 tsc, unsigned int subbuf_idx) | |
139 | { | |
140 | struct ust_channel *channel = buf->chan; | |
141 | struct ltt_subbuffer_header *header = | |
142 | (struct ltt_subbuffer_header *) | |
143 | ust_buffers_offset_address(buf, | |
144 | subbuf_idx * buf->chan->subbuf_size); | |
b5b073e2 | 145 | |
ef15e552 NC |
146 | header->cycle_count_begin = tsc; |
147 | header->data_size = 0xFFFFFFFF; /* for recognizing crashed buffers */ | |
148 | header->sb_size = 0xFFFFFFFF; /* for recognizing crashed buffers */ | |
149 | /* FIXME: add memory barrier? */ | |
150 | ltt_write_trace_header(channel->trace, header); | |
151 | } | |
152 | ||
153 | static int map_buf_data(struct ust_buffer *buf, size_t *size) | |
b5b073e2 PMF |
154 | { |
155 | void *ptr; | |
156 | int result; | |
157 | ||
158 | *size = PAGE_ALIGN(*size); | |
159 | ||
160 | result = buf->shmid = shmget(getpid(), *size, IPC_CREAT | IPC_EXCL | 0700); | |
ef15e552 | 161 | if (result < 0 && errno == EINVAL) { |
b5b073e2 PMF |
162 | ERR("shmget() returned EINVAL; maybe /proc/sys/kernel/shmmax should be increased."); |
163 | return -1; | |
ef15e552 | 164 | } else if (result < 0) { |
b5b073e2 PMF |
165 | PERROR("shmget"); |
166 | return -1; | |
167 | } | |
168 | ||
169 | ptr = shmat(buf->shmid, NULL, 0); | |
ef15e552 | 170 | if (ptr == (void *) -1) { |
b5b073e2 PMF |
171 | perror("shmat"); |
172 | goto destroy_shmem; | |
173 | } | |
174 | ||
175 | /* Already mark the shared memory for destruction. This will occur only | |
176 | * when all users have detached. | |
177 | */ | |
178 | result = shmctl(buf->shmid, IPC_RMID, NULL); | |
179 | if(result == -1) { | |
180 | perror("shmctl"); | |
181 | return -1; | |
182 | } | |
183 | ||
184 | buf->buf_data = ptr; | |
185 | buf->buf_size = *size; | |
186 | ||
187 | return 0; | |
188 | ||
ef15e552 | 189 | destroy_shmem: |
b5b073e2 PMF |
190 | result = shmctl(buf->shmid, IPC_RMID, NULL); |
191 | if(result == -1) { | |
192 | perror("shmctl"); | |
193 | } | |
194 | ||
195 | return -1; | |
196 | } | |
197 | ||
ef15e552 | 198 | static int open_buf(struct ust_channel *chan, int cpu) |
b5b073e2 | 199 | { |
ef15e552 NC |
200 | int result, fds[2]; |
201 | unsigned int j; | |
202 | struct ust_trace *trace = chan->trace; | |
203 | struct ust_buffer *buf = chan->buf[cpu]; | |
204 | unsigned int n_subbufs = chan->subbuf_cnt; | |
b5b073e2 | 205 | |
ef15e552 NC |
206 | |
207 | result = map_buf_data(buf, &chan->alloc_size); | |
208 | if (result < 0) | |
204141ee | 209 | return -1; |
b5b073e2 | 210 | |
ef15e552 NC |
211 | buf->commit_count = |
212 | zmalloc(sizeof(*buf->commit_count) * n_subbufs); | |
213 | if (!buf->commit_count) | |
214 | goto unmap_buf; | |
b5b073e2 | 215 | |
ef15e552 NC |
216 | result = pipe(fds); |
217 | if (result < 0) { | |
218 | PERROR("pipe"); | |
219 | goto free_commit_count; | |
220 | } | |
221 | buf->data_ready_fd_read = fds[0]; | |
222 | buf->data_ready_fd_write = fds[1]; | |
b5b073e2 | 223 | |
ef15e552 NC |
224 | buf->cpu = cpu; |
225 | buf->chan = chan; | |
b5b073e2 | 226 | |
ef15e552 NC |
227 | uatomic_set(&buf->offset, ltt_subbuffer_header_size()); |
228 | uatomic_set(&buf->consumed, 0); | |
229 | uatomic_set(&buf->active_readers, 0); | |
230 | for (j = 0; j < n_subbufs; j++) { | |
231 | uatomic_set(&buf->commit_count[j].cc, 0); | |
232 | uatomic_set(&buf->commit_count[j].cc_sb, 0); | |
b5b073e2 PMF |
233 | } |
234 | ||
ef15e552 | 235 | ltt_buffer_begin(buf, trace->start_tsc, 0); |
b5b073e2 | 236 | |
ef15e552 NC |
237 | uatomic_add(&buf->commit_count[0].cc, ltt_subbuffer_header_size()); |
238 | ||
239 | uatomic_set(&buf->events_lost, 0); | |
240 | uatomic_set(&buf->corrupted_subbuffers, 0); | |
241 | ||
242 | memset(buf->commit_seq, 0, sizeof(buf->commit_seq[0]) * n_subbufs); | |
243 | ||
244 | return 0; | |
245 | ||
246 | free_commit_count: | |
247 | free(buf->commit_count); | |
248 | ||
249 | unmap_buf: | |
250 | if (shmdt(buf->buf_data) < 0) { | |
251 | PERROR("shmdt failed"); | |
252 | } | |
253 | ||
254 | return -1; | |
b5b073e2 PMF |
255 | } |
256 | ||
ef15e552 NC |
257 | static void ltt_relay_print_buffer_errors(struct ust_channel *chan, int cpu); |
258 | ||
259 | static void close_buf(struct ust_buffer *buf) | |
b5b073e2 | 260 | { |
ef15e552 NC |
261 | struct ust_channel *chan = buf->chan; |
262 | int cpu = buf->cpu; | |
204141ee | 263 | int result; |
b5b073e2 | 264 | |
ef15e552 NC |
265 | result = shmdt(buf->buf_data); |
266 | if (result < 0) { | |
267 | PERROR("shmdt"); | |
268 | } | |
b5b073e2 | 269 | |
ef15e552 | 270 | free(buf->commit_count); |
b5b073e2 | 271 | |
ef15e552 NC |
272 | result = close(buf->data_ready_fd_read); |
273 | if (result < 0) { | |
274 | PERROR("close"); | |
275 | } | |
b5b073e2 | 276 | |
ef15e552 NC |
277 | result = close(buf->data_ready_fd_write); |
278 | if (result < 0 && errno != EBADF) { | |
279 | PERROR("close"); | |
280 | } | |
b5b073e2 | 281 | |
ef15e552 NC |
282 | /* FIXME: This spews out errors, are they real?: |
283 | * ltt_relay_print_buffer_errors(chan, cpu); */ | |
b5b073e2 PMF |
284 | } |
285 | ||
b5b073e2 | 286 | |
ef15e552 NC |
287 | static int open_channel(struct ust_channel *chan, size_t subbuf_size, |
288 | size_t subbuf_cnt) | |
b5b073e2 | 289 | { |
204141ee PMF |
290 | int i; |
291 | int result; | |
292 | ||
b5b073e2 PMF |
293 | if(subbuf_size == 0 || subbuf_cnt == 0) |
294 | return -1; | |
295 | ||
b73a4c47 PMF |
296 | /* Check that the subbuffer size is larger than a page. */ |
297 | WARN_ON_ONCE(subbuf_size < PAGE_SIZE); | |
298 | ||
299 | /* | |
300 | * Make sure the number of subbuffers and subbuffer size are power of 2. | |
301 | */ | |
302 | WARN_ON_ONCE(hweight32(subbuf_size) != 1); | |
303 | WARN_ON(hweight32(subbuf_cnt) != 1); | |
304 | ||
b5b073e2 PMF |
305 | chan->version = UST_CHANNEL_VERSION; |
306 | chan->subbuf_cnt = subbuf_cnt; | |
307 | chan->subbuf_size = subbuf_size; | |
308 | chan->subbuf_size_order = get_count_order(subbuf_size); | |
b73a4c47 | 309 | chan->alloc_size = subbuf_size * subbuf_cnt; |
204141ee | 310 | |
f7b16408 | 311 | pthread_mutex_lock(&ust_buffers_channels_mutex); |
ef15e552 NC |
312 | for (i=0; i < chan->n_cpus; i++) { |
313 | result = open_buf(chan, i); | |
204141ee PMF |
314 | if (result == -1) |
315 | goto error; | |
316 | } | |
0222e121 | 317 | cds_list_add(&chan->list, &ust_buffers_channels); |
f7b16408 | 318 | pthread_mutex_unlock(&ust_buffers_channels_mutex); |
b5b073e2 PMF |
319 | |
320 | return 0; | |
321 | ||
204141ee PMF |
322 | /* Jump directly inside the loop to close the buffers that were already |
323 | * opened. */ | |
324 | for(; i>=0; i--) { | |
ef15e552 | 325 | close_buf(chan->buf[i]); |
204141ee | 326 | error: |
120b0ec3 | 327 | do {} while(0); |
204141ee PMF |
328 | } |
329 | ||
f7b16408 | 330 | pthread_mutex_unlock(&ust_buffers_channels_mutex); |
b5b073e2 PMF |
331 | return -1; |
332 | } | |
333 | ||
ef15e552 | 334 | static void close_channel(struct ust_channel *chan) |
b5b073e2 | 335 | { |
204141ee PMF |
336 | int i; |
337 | if(!chan) | |
b5b073e2 PMF |
338 | return; |
339 | ||
f7b16408 | 340 | pthread_mutex_lock(&ust_buffers_channels_mutex); |
204141ee PMF |
341 | for(i=0; i<chan->n_cpus; i++) { |
342 | /* FIXME: if we make it here, then all buffers were necessarily allocated. Moreover, we don't | |
343 | * initialize to NULL so we cannot use this check. Should we? */ | |
344 | //ust// if (chan->buf[i]) | |
ef15e552 | 345 | close_buf(chan->buf[i]); |
204141ee | 346 | } |
b5b073e2 | 347 | |
0222e121 | 348 | cds_list_del(&chan->list); |
ef15e552 | 349 | |
f7b16408 | 350 | pthread_mutex_unlock(&ust_buffers_channels_mutex); |
b5b073e2 PMF |
351 | } |
352 | ||
b5b073e2 PMF |
353 | static void ltt_force_switch(struct ust_buffer *buf, |
354 | enum force_switch_mode mode); | |
355 | ||
b5b073e2 | 356 | |
b5b073e2 PMF |
357 | |
358 | /* | |
359 | * offset is assumed to never be 0 here : never deliver a completely empty | |
360 | * subbuffer. The lost size is between 0 and subbuf_size-1. | |
361 | */ | |
b73a4c47 | 362 | static notrace void ltt_buffer_end(struct ust_buffer *buf, |
b5b073e2 PMF |
363 | u64 tsc, unsigned int offset, unsigned int subbuf_idx) |
364 | { | |
365 | struct ltt_subbuffer_header *header = | |
366 | (struct ltt_subbuffer_header *) | |
b73a4c47 | 367 | ust_buffers_offset_address(buf, |
b5b073e2 | 368 | subbuf_idx * buf->chan->subbuf_size); |
8c36d1ee | 369 | u32 data_size = SUBBUF_OFFSET(offset - 1, buf->chan) + 1; |
b5b073e2 | 370 | |
8c36d1ee PMF |
371 | header->data_size = data_size; |
372 | header->sb_size = PAGE_ALIGN(data_size); | |
b5b073e2 | 373 | header->cycle_count_end = tsc; |
b102c2b0 PMF |
374 | header->events_lost = uatomic_read(&buf->events_lost); |
375 | header->subbuf_corrupt = uatomic_read(&buf->corrupted_subbuffers); | |
719569e4 PMF |
376 | if(unlikely(header->events_lost > 0)) { |
377 | DBG("Some events (%d) were lost in %s_%d", header->events_lost, buf->chan->channel_name, buf->cpu); | |
378 | } | |
b5b073e2 PMF |
379 | } |
380 | ||
381 | /* | |
382 | * This function should not be called from NMI interrupt context | |
383 | */ | |
384 | static notrace void ltt_buf_unfull(struct ust_buffer *buf, | |
385 | unsigned int subbuf_idx, | |
386 | long offset) | |
387 | { | |
b5b073e2 PMF |
388 | } |
389 | ||
b73a4c47 | 390 | /* |
0222e121 | 391 | * Promote compiler cmm_barrier to a smp_mb(). |
b73a4c47 PMF |
392 | * For the specific LTTng case, this IPI call should be removed if the |
393 | * architecture does not reorder writes. This should eventually be provided by | |
394 | * a separate architecture-specific infrastructure. | |
395 | */ | |
e17571a5 PMF |
396 | //ust// static void remote_mb(void *info) |
397 | //ust// { | |
398 | //ust// smp_mb(); | |
399 | //ust// } | |
b73a4c47 PMF |
400 | |
401 | int ust_buffers_get_subbuf(struct ust_buffer *buf, long *consumed) | |
b5b073e2 PMF |
402 | { |
403 | struct ust_channel *channel = buf->chan; | |
404 | long consumed_old, consumed_idx, commit_count, write_offset; | |
b73a4c47 PMF |
405 | //ust// int retval; |
406 | ||
b102c2b0 | 407 | consumed_old = uatomic_read(&buf->consumed); |
b5b073e2 | 408 | consumed_idx = SUBBUF_INDEX(consumed_old, buf->chan); |
b102c2b0 | 409 | commit_count = uatomic_read(&buf->commit_count[consumed_idx].cc_sb); |
b5b073e2 PMF |
410 | /* |
411 | * Make sure we read the commit count before reading the buffer | |
412 | * data and the write offset. Correct consumed offset ordering | |
413 | * wrt commit count is insured by the use of cmpxchg to update | |
414 | * the consumed offset. | |
b73a4c47 PMF |
415 | * smp_call_function_single can fail if the remote CPU is offline, |
416 | * this is OK because then there is no wmb to execute there. | |
417 | * If our thread is executing on the same CPU as the on the buffers | |
418 | * belongs to, we don't have to synchronize it at all. If we are | |
0222e121 | 419 | * migrated, the scheduler will take care of the memory cmm_barriers. |
b73a4c47 PMF |
420 | * Normally, smp_call_function_single() should ensure program order when |
421 | * executing the remote function, which implies that it surrounds the | |
422 | * function execution with : | |
423 | * smp_mb() | |
424 | * send IPI | |
425 | * csd_lock_wait | |
426 | * recv IPI | |
427 | * smp_mb() | |
428 | * exec. function | |
429 | * smp_mb() | |
430 | * csd unlock | |
431 | * smp_mb() | |
432 | * | |
433 | * However, smp_call_function_single() does not seem to clearly execute | |
ef15e552 | 434 | * such barriers. It depends on spinlock semantic to provide the barrier |
b73a4c47 PMF |
435 | * before executing the IPI and, when busy-looping, csd_lock_wait only |
436 | * executes smp_mb() when it has to wait for the other CPU. | |
437 | * | |
438 | * I don't trust this code. Therefore, let's add the smp_mb() sequence | |
439 | * required ourself, even if duplicated. It has no performance impact | |
440 | * anyway. | |
441 | * | |
0222e121 | 442 | * smp_mb() is needed because cmm_smp_rmb() and cmm_smp_wmb() only order read vs |
b73a4c47 | 443 | * read and write vs write. They do not ensure core synchronization. We |
0222e121 | 444 | * really have to ensure total order between the 3 cmm_barriers running on |
b73a4c47 PMF |
445 | * the 2 CPUs. |
446 | */ | |
447 | //ust// #ifdef LTT_NO_IPI_BARRIER | |
448 | /* | |
449 | * Local rmb to match the remote wmb to read the commit count before the | |
450 | * buffer data and the write offset. | |
b5b073e2 | 451 | */ |
0222e121 | 452 | cmm_smp_rmb(); |
b73a4c47 PMF |
453 | //ust// #else |
454 | //ust// if (raw_smp_processor_id() != buf->cpu) { | |
455 | //ust// smp_mb(); /* Total order with IPI handler smp_mb() */ | |
456 | //ust// smp_call_function_single(buf->cpu, remote_mb, NULL, 1); | |
457 | //ust// smp_mb(); /* Total order with IPI handler smp_mb() */ | |
458 | //ust// } | |
459 | //ust// #endif | |
460 | ||
b102c2b0 | 461 | write_offset = uatomic_read(&buf->offset); |
b5b073e2 PMF |
462 | /* |
463 | * Check that the subbuffer we are trying to consume has been | |
464 | * already fully committed. | |
465 | */ | |
466 | if (((commit_count - buf->chan->subbuf_size) | |
467 | & channel->commit_count_mask) | |
468 | - (BUFFER_TRUNC(consumed_old, buf->chan) | |
469 | >> channel->n_subbufs_order) | |
470 | != 0) { | |
471 | return -EAGAIN; | |
472 | } | |
473 | /* | |
474 | * Check that we are not about to read the same subbuffer in | |
475 | * which the writer head is. | |
476 | */ | |
477 | if ((SUBBUF_TRUNC(write_offset, buf->chan) | |
478 | - SUBBUF_TRUNC(consumed_old, buf->chan)) | |
479 | == 0) { | |
480 | return -EAGAIN; | |
481 | } | |
482 | ||
b73a4c47 PMF |
483 | /* FIXME: is this ok to disable the reading feature? */ |
484 | //ust// retval = update_read_sb_index(buf, consumed_idx); | |
485 | //ust// if (retval) | |
486 | //ust// return retval; | |
487 | ||
488 | *consumed = consumed_old; | |
489 | ||
b5b073e2 PMF |
490 | return 0; |
491 | } | |
492 | ||
b73a4c47 | 493 | int ust_buffers_put_subbuf(struct ust_buffer *buf, unsigned long uconsumed_old) |
b5b073e2 PMF |
494 | { |
495 | long consumed_new, consumed_old; | |
496 | ||
b102c2b0 | 497 | consumed_old = uatomic_read(&buf->consumed); |
b5b073e2 PMF |
498 | consumed_old = consumed_old & (~0xFFFFFFFFL); |
499 | consumed_old = consumed_old | uconsumed_old; | |
500 | consumed_new = SUBBUF_ALIGN(consumed_old, buf->chan); | |
501 | ||
502 | //ust// spin_lock(<t_buf->full_lock); | |
b102c2b0 | 503 | if (uatomic_cmpxchg(&buf->consumed, consumed_old, |
b5b073e2 PMF |
504 | consumed_new) |
505 | != consumed_old) { | |
506 | /* We have been pushed by the writer : the last | |
507 | * buffer read _is_ corrupted! It can also | |
508 | * happen if this is a buffer we never got. */ | |
509 | //ust// spin_unlock(<t_buf->full_lock); | |
510 | return -EIO; | |
511 | } else { | |
512 | /* tell the client that buffer is now unfull */ | |
513 | int index; | |
514 | long data; | |
515 | index = SUBBUF_INDEX(consumed_old, buf->chan); | |
516 | data = BUFFER_OFFSET(consumed_old, buf->chan); | |
517 | ltt_buf_unfull(buf, index, data); | |
518 | //ust// spin_unlock(<t_buf->full_lock); | |
519 | } | |
520 | return 0; | |
521 | } | |
522 | ||
523 | static void ltt_relay_print_subbuffer_errors( | |
524 | struct ust_channel *channel, | |
204141ee | 525 | long cons_off, int cpu) |
b5b073e2 | 526 | { |
204141ee | 527 | struct ust_buffer *ltt_buf = channel->buf[cpu]; |
b73a4c47 | 528 | long cons_idx, commit_count, commit_count_sb, write_offset; |
b5b073e2 PMF |
529 | |
530 | cons_idx = SUBBUF_INDEX(cons_off, channel); | |
b102c2b0 PMF |
531 | commit_count = uatomic_read(<t_buf->commit_count[cons_idx].cc); |
532 | commit_count_sb = uatomic_read(<t_buf->commit_count[cons_idx].cc_sb); | |
b73a4c47 | 533 | |
b5b073e2 PMF |
534 | /* |
535 | * No need to order commit_count and write_offset reads because we | |
536 | * execute after trace is stopped when there are no readers left. | |
537 | */ | |
b102c2b0 | 538 | write_offset = uatomic_read(<t_buf->offset); |
b5b073e2 | 539 | WARN( "LTT : unread channel %s offset is %ld " |
b73a4c47 PMF |
540 | "and cons_off : %ld (cpu %d)\n", |
541 | channel->channel_name, write_offset, cons_off, cpu); | |
b5b073e2 PMF |
542 | /* Check each sub-buffer for non filled commit count */ |
543 | if (((commit_count - channel->subbuf_size) & channel->commit_count_mask) | |
544 | - (BUFFER_TRUNC(cons_off, channel) >> channel->n_subbufs_order) != 0) { | |
545 | ERR("LTT : %s : subbuffer %lu has non filled " | |
b73a4c47 PMF |
546 | "commit count [cc, cc_sb] [%lu,%lu].\n", |
547 | channel->channel_name, cons_idx, commit_count, commit_count_sb); | |
b5b073e2 PMF |
548 | } |
549 | ERR("LTT : %s : commit count : %lu, subbuf size %zd\n", | |
550 | channel->channel_name, commit_count, | |
551 | channel->subbuf_size); | |
552 | } | |
553 | ||
b73a4c47 | 554 | static void ltt_relay_print_errors(struct ust_trace *trace, |
204141ee | 555 | struct ust_channel *channel, int cpu) |
b5b073e2 | 556 | { |
204141ee | 557 | struct ust_buffer *ltt_buf = channel->buf[cpu]; |
b5b073e2 PMF |
558 | long cons_off; |
559 | ||
4292ed8a PMF |
560 | /* |
561 | * Can be called in the error path of allocation when | |
562 | * trans_channel_data is not yet set. | |
563 | */ | |
564 | if (!channel) | |
565 | return; | |
566 | ||
e17571a5 PMF |
567 | //ust// for (cons_off = 0; cons_off < rchan->alloc_size; |
568 | //ust// cons_off = SUBBUF_ALIGN(cons_off, rchan)) | |
569 | //ust// ust_buffers_print_written(ltt_chan, cons_off, cpu); | |
b102c2b0 PMF |
570 | for (cons_off = uatomic_read(<t_buf->consumed); |
571 | (SUBBUF_TRUNC(uatomic_read(<t_buf->offset), | |
b5b073e2 PMF |
572 | channel) |
573 | - cons_off) > 0; | |
574 | cons_off = SUBBUF_ALIGN(cons_off, channel)) | |
204141ee | 575 | ltt_relay_print_subbuffer_errors(channel, cons_off, cpu); |
b5b073e2 PMF |
576 | } |
577 | ||
204141ee | 578 | static void ltt_relay_print_buffer_errors(struct ust_channel *channel, int cpu) |
b5b073e2 | 579 | { |
b73a4c47 | 580 | struct ust_trace *trace = channel->trace; |
204141ee | 581 | struct ust_buffer *ltt_buf = channel->buf[cpu]; |
b5b073e2 | 582 | |
b102c2b0 | 583 | if (uatomic_read(<t_buf->events_lost)) |
b73a4c47 | 584 | ERR("channel %s: %ld events lost (cpu %d)", |
b5b073e2 | 585 | channel->channel_name, |
b102c2b0 PMF |
586 | uatomic_read(<t_buf->events_lost), cpu); |
587 | if (uatomic_read(<t_buf->corrupted_subbuffers)) | |
b73a4c47 | 588 | ERR("channel %s : %ld corrupted subbuffers (cpu %d)", |
b5b073e2 | 589 | channel->channel_name, |
b102c2b0 | 590 | uatomic_read(<t_buf->corrupted_subbuffers), cpu); |
b5b073e2 | 591 | |
204141ee | 592 | ltt_relay_print_errors(trace, channel, cpu); |
b5b073e2 PMF |
593 | } |
594 | ||
ef15e552 | 595 | static int map_buf_structs(struct ust_channel *chan) |
b5b073e2 PMF |
596 | { |
597 | void *ptr; | |
598 | int result; | |
204141ee PMF |
599 | size_t size; |
600 | int i; | |
b5b073e2 | 601 | |
204141ee | 602 | size = PAGE_ALIGN(1); |
b5b073e2 | 603 | |
204141ee | 604 | for(i=0; i<chan->n_cpus; i++) { |
b5b073e2 | 605 | |
204141ee PMF |
606 | result = chan->buf_struct_shmids[i] = shmget(getpid(), size, IPC_CREAT | IPC_EXCL | 0700); |
607 | if(result == -1) { | |
608 | PERROR("shmget"); | |
609 | goto destroy_previous; | |
610 | } | |
b5b073e2 | 611 | |
204141ee PMF |
612 | ptr = shmat(chan->buf_struct_shmids[i], NULL, 0); |
613 | if(ptr == (void *) -1) { | |
614 | perror("shmat"); | |
615 | goto destroy_shm; | |
616 | } | |
617 | ||
618 | /* Already mark the shared memory for destruction. This will occur only | |
619 | * when all users have detached. | |
620 | */ | |
621 | result = shmctl(chan->buf_struct_shmids[i], IPC_RMID, NULL); | |
622 | if(result == -1) { | |
623 | perror("shmctl"); | |
624 | goto destroy_previous; | |
625 | } | |
626 | ||
627 | chan->buf[i] = ptr; | |
b5b073e2 PMF |
628 | } |
629 | ||
204141ee | 630 | return 0; |
b5b073e2 | 631 | |
204141ee PMF |
632 | /* Jumping inside this loop occurs from within the other loop above with i as |
633 | * counter, so it unallocates the structures for the cpu = current_i down to | |
634 | * zero. */ | |
635 | for(; i>=0; i--) { | |
636 | destroy_shm: | |
637 | result = shmctl(chan->buf_struct_shmids[i], IPC_RMID, NULL); | |
638 | if(result == -1) { | |
639 | perror("shmctl"); | |
640 | } | |
b5b073e2 | 641 | |
204141ee PMF |
642 | destroy_previous: |
643 | continue; | |
b5b073e2 PMF |
644 | } |
645 | ||
204141ee | 646 | return -1; |
b5b073e2 PMF |
647 | } |
648 | ||
ef15e552 NC |
649 | static int unmap_buf_structs(struct ust_channel *chan) |
650 | { | |
651 | int i; | |
652 | ||
653 | for (i=0; i < chan->n_cpus; i++) { | |
654 | if (shmdt(chan->buf[i]) < 0) { | |
655 | PERROR("shmdt"); | |
656 | } | |
657 | } | |
fbae86d6 | 658 | return 0; |
ef15e552 NC |
659 | } |
660 | ||
b5b073e2 PMF |
661 | /* |
662 | * Create channel. | |
663 | */ | |
ef15e552 NC |
664 | static int create_channel(const char *trace_name, struct ust_trace *trace, |
665 | const char *channel_name, struct ust_channel *chan, | |
b5b073e2 PMF |
666 | unsigned int subbuf_size, unsigned int n_subbufs, int overwrite) |
667 | { | |
ef15e552 | 668 | int i, result; |
b5b073e2 | 669 | |
ef15e552 NC |
670 | chan->trace = trace; |
671 | chan->overwrite = overwrite; | |
672 | chan->n_subbufs_order = get_count_order(n_subbufs); | |
673 | chan->commit_count_mask = (~0UL >> chan->n_subbufs_order); | |
674 | chan->n_cpus = get_n_cpus(); | |
b5b073e2 | 675 | |
ef15e552 NC |
676 | /* These mappings should ideall be per-cpu, if somebody can do that |
677 | * from userspace, that would be cool! | |
678 | */ | |
679 | chan->buf = (void *) zmalloc(chan->n_cpus * sizeof(void *)); | |
680 | if(chan->buf == NULL) { | |
204141ee PMF |
681 | goto error; |
682 | } | |
ef15e552 NC |
683 | chan->buf_struct_shmids = (int *) zmalloc(chan->n_cpus * sizeof(int)); |
684 | if(chan->buf_struct_shmids == NULL) | |
204141ee | 685 | goto free_buf; |
b5b073e2 | 686 | |
ef15e552 | 687 | result = map_buf_structs(chan); |
204141ee PMF |
688 | if(result != 0) { |
689 | goto free_buf_struct_shmids; | |
690 | } | |
b5b073e2 | 691 | |
ef15e552 | 692 | result = open_channel(chan, subbuf_size, n_subbufs); |
204141ee | 693 | if (result != 0) { |
c1f20530 | 694 | ERR("Cannot open channel for trace %s", trace_name); |
ef15e552 | 695 | goto unmap_buf_structs; |
b5b073e2 PMF |
696 | } |
697 | ||
204141ee PMF |
698 | return 0; |
699 | ||
ef15e552 NC |
700 | unmap_buf_structs: |
701 | for (i=0; i < chan->n_cpus; i++) { | |
702 | if (shmdt(chan->buf[i]) < 0) { | |
703 | PERROR("shmdt bufstruct"); | |
704 | } | |
705 | } | |
204141ee PMF |
706 | |
707 | free_buf_struct_shmids: | |
ef15e552 | 708 | free(chan->buf_struct_shmids); |
b5b073e2 | 709 | |
204141ee | 710 | free_buf: |
ef15e552 | 711 | free(chan->buf); |
204141ee PMF |
712 | |
713 | error: | |
714 | return -1; | |
b5b073e2 PMF |
715 | } |
716 | ||
ef15e552 NC |
717 | |
718 | static void remove_channel(struct ust_channel *chan) | |
719 | { | |
720 | close_channel(chan); | |
721 | ||
722 | unmap_buf_structs(chan); | |
723 | ||
724 | free(chan->buf_struct_shmids); | |
725 | ||
726 | free(chan->buf); | |
727 | ||
728 | } | |
729 | ||
b5b073e2 PMF |
730 | static void ltt_relay_async_wakeup_chan(struct ust_channel *ltt_channel) |
731 | { | |
732 | //ust// unsigned int i; | |
733 | //ust// struct rchan *rchan = ltt_channel->trans_channel_data; | |
734 | //ust// | |
735 | //ust// for_each_possible_cpu(i) { | |
736 | //ust// struct ltt_channel_buf_struct *ltt_buf = | |
737 | //ust// percpu_ptr(ltt_channel->buf, i); | |
738 | //ust// | |
b102c2b0 PMF |
739 | //ust// if (uatomic_read(<t_buf->wakeup_readers) == 1) { |
740 | //ust// uatomic_set(<t_buf->wakeup_readers, 0); | |
b5b073e2 PMF |
741 | //ust// wake_up_interruptible(&rchan->buf[i]->read_wait); |
742 | //ust// } | |
743 | //ust// } | |
744 | } | |
745 | ||
204141ee | 746 | static void ltt_relay_finish_buffer(struct ust_channel *channel, unsigned int cpu) |
b5b073e2 PMF |
747 | { |
748 | // int result; | |
749 | ||
204141ee PMF |
750 | if (channel->buf[cpu]) { |
751 | struct ust_buffer *buf = channel->buf[cpu]; | |
97c10252 | 752 | ltt_force_switch(buf, FORCE_FLUSH); |
ef15e552 | 753 | |
b5b073e2 | 754 | /* closing the pipe tells the consumer the buffer is finished */ |
b5b073e2 PMF |
755 | close(buf->data_ready_fd_write); |
756 | } | |
757 | } | |
758 | ||
759 | ||
ef15e552 | 760 | static void finish_channel(struct ust_channel *channel) |
b5b073e2 | 761 | { |
204141ee | 762 | unsigned int i; |
b5b073e2 | 763 | |
204141ee PMF |
764 | for(i=0; i<channel->n_cpus; i++) { |
765 | ltt_relay_finish_buffer(channel, i); | |
766 | } | |
b5b073e2 PMF |
767 | } |
768 | ||
b5b073e2 | 769 | |
b5b073e2 | 770 | /* |
b73a4c47 PMF |
771 | * ltt_reserve_switch_old_subbuf: switch old subbuffer |
772 | * | |
773 | * Concurrency safe because we are the last and only thread to alter this | |
774 | * sub-buffer. As long as it is not delivered and read, no other thread can | |
775 | * alter the offset, alter the reserve_count or call the | |
776 | * client_buffer_end_callback on this sub-buffer. | |
777 | * | |
778 | * The only remaining threads could be the ones with pending commits. They will | |
779 | * have to do the deliver themselves. Not concurrency safe in overwrite mode. | |
780 | * We detect corrupted subbuffers with commit and reserve counts. We keep a | |
781 | * corrupted sub-buffers count and push the readers across these sub-buffers. | |
782 | * | |
783 | * Not concurrency safe if a writer is stalled in a subbuffer and another writer | |
784 | * switches in, finding out it's corrupted. The result will be than the old | |
785 | * (uncommited) subbuffer will be declared corrupted, and that the new subbuffer | |
786 | * will be declared corrupted too because of the commit count adjustment. | |
787 | * | |
788 | * Note : offset_old should never be 0 here. | |
b5b073e2 | 789 | */ |
b73a4c47 PMF |
790 | static void ltt_reserve_switch_old_subbuf( |
791 | struct ust_channel *chan, struct ust_buffer *buf, | |
792 | struct ltt_reserve_switch_offsets *offsets, u64 *tsc) | |
b5b073e2 | 793 | { |
b73a4c47 PMF |
794 | long oldidx = SUBBUF_INDEX(offsets->old - 1, chan); |
795 | long commit_count, padding_size; | |
b5b073e2 | 796 | |
b73a4c47 PMF |
797 | padding_size = chan->subbuf_size |
798 | - (SUBBUF_OFFSET(offsets->old - 1, chan) + 1); | |
799 | ltt_buffer_end(buf, *tsc, offsets->old, oldidx); | |
b5b073e2 | 800 | |
b73a4c47 PMF |
801 | /* |
802 | * Must write slot data before incrementing commit count. | |
ef15e552 | 803 | * This compiler barrier is upgraded into a cmm_smp_wmb() by the IPI |
0222e121 | 804 | * sent by get_subbuf() when it does its cmm_smp_rmb(). |
b73a4c47 | 805 | */ |
0222e121 | 806 | cmm_smp_wmb(); |
b102c2b0 PMF |
807 | uatomic_add(&buf->commit_count[oldidx].cc, padding_size); |
808 | commit_count = uatomic_read(&buf->commit_count[oldidx].cc); | |
b73a4c47 | 809 | ltt_check_deliver(chan, buf, offsets->old - 1, commit_count, oldidx); |
1e8c9e7b | 810 | ltt_write_commit_counter(chan, buf, oldidx, |
b73a4c47 PMF |
811 | offsets->old, commit_count, padding_size); |
812 | } | |
b5b073e2 | 813 | |
b73a4c47 PMF |
814 | /* |
815 | * ltt_reserve_switch_new_subbuf: Populate new subbuffer. | |
816 | * | |
817 | * This code can be executed unordered : writers may already have written to the | |
818 | * sub-buffer before this code gets executed, caution. The commit makes sure | |
819 | * that this code is executed before the deliver of this sub-buffer. | |
820 | */ | |
821 | static void ltt_reserve_switch_new_subbuf( | |
822 | struct ust_channel *chan, struct ust_buffer *buf, | |
823 | struct ltt_reserve_switch_offsets *offsets, u64 *tsc) | |
824 | { | |
825 | long beginidx = SUBBUF_INDEX(offsets->begin, chan); | |
826 | long commit_count; | |
b5b073e2 | 827 | |
b73a4c47 | 828 | ltt_buffer_begin(buf, *tsc, beginidx); |
b5b073e2 | 829 | |
b73a4c47 PMF |
830 | /* |
831 | * Must write slot data before incrementing commit count. | |
ef15e552 | 832 | * This compiler barrier is upgraded into a cmm_smp_wmb() by the IPI |
0222e121 | 833 | * sent by get_subbuf() when it does its cmm_smp_rmb(). |
b73a4c47 | 834 | */ |
0222e121 | 835 | cmm_smp_wmb(); |
b102c2b0 PMF |
836 | uatomic_add(&buf->commit_count[beginidx].cc, ltt_subbuffer_header_size()); |
837 | commit_count = uatomic_read(&buf->commit_count[beginidx].cc); | |
b73a4c47 PMF |
838 | /* Check if the written buffer has to be delivered */ |
839 | ltt_check_deliver(chan, buf, offsets->begin, commit_count, beginidx); | |
1e8c9e7b | 840 | ltt_write_commit_counter(chan, buf, beginidx, |
b73a4c47 PMF |
841 | offsets->begin, commit_count, ltt_subbuffer_header_size()); |
842 | } | |
b5b073e2 | 843 | |
b73a4c47 PMF |
844 | /* |
845 | * ltt_reserve_end_switch_current: finish switching current subbuffer | |
846 | * | |
847 | * Concurrency safe because we are the last and only thread to alter this | |
848 | * sub-buffer. As long as it is not delivered and read, no other thread can | |
849 | * alter the offset, alter the reserve_count or call the | |
850 | * client_buffer_end_callback on this sub-buffer. | |
851 | * | |
852 | * The only remaining threads could be the ones with pending commits. They will | |
853 | * have to do the deliver themselves. Not concurrency safe in overwrite mode. | |
854 | * We detect corrupted subbuffers with commit and reserve counts. We keep a | |
855 | * corrupted sub-buffers count and push the readers across these sub-buffers. | |
856 | * | |
857 | * Not concurrency safe if a writer is stalled in a subbuffer and another writer | |
858 | * switches in, finding out it's corrupted. The result will be than the old | |
859 | * (uncommited) subbuffer will be declared corrupted, and that the new subbuffer | |
860 | * will be declared corrupted too because of the commit count adjustment. | |
861 | */ | |
862 | static void ltt_reserve_end_switch_current( | |
863 | struct ust_channel *chan, | |
864 | struct ust_buffer *buf, | |
865 | struct ltt_reserve_switch_offsets *offsets, u64 *tsc) | |
866 | { | |
867 | long endidx = SUBBUF_INDEX(offsets->end - 1, chan); | |
868 | long commit_count, padding_size; | |
869 | ||
870 | padding_size = chan->subbuf_size | |
871 | - (SUBBUF_OFFSET(offsets->end - 1, chan) + 1); | |
872 | ||
873 | ltt_buffer_end(buf, *tsc, offsets->end, endidx); | |
874 | ||
875 | /* | |
876 | * Must write slot data before incrementing commit count. | |
ef15e552 | 877 | * This compiler barrier is upgraded into a cmm_smp_wmb() by the IPI |
0222e121 | 878 | * sent by get_subbuf() when it does its cmm_smp_rmb(). |
b73a4c47 | 879 | */ |
0222e121 | 880 | cmm_smp_wmb(); |
b102c2b0 PMF |
881 | uatomic_add(&buf->commit_count[endidx].cc, padding_size); |
882 | commit_count = uatomic_read(&buf->commit_count[endidx].cc); | |
b73a4c47 PMF |
883 | ltt_check_deliver(chan, buf, |
884 | offsets->end - 1, commit_count, endidx); | |
1e8c9e7b | 885 | ltt_write_commit_counter(chan, buf, endidx, |
b73a4c47 | 886 | offsets->end, commit_count, padding_size); |
b5b073e2 PMF |
887 | } |
888 | ||
889 | /* | |
890 | * Returns : | |
891 | * 0 if ok | |
892 | * !0 if execution must be aborted. | |
893 | */ | |
b73a4c47 | 894 | static int ltt_relay_try_switch_slow( |
b5b073e2 | 895 | enum force_switch_mode mode, |
b73a4c47 | 896 | struct ust_channel *chan, |
b5b073e2 PMF |
897 | struct ust_buffer *buf, |
898 | struct ltt_reserve_switch_offsets *offsets, | |
899 | u64 *tsc) | |
900 | { | |
901 | long subbuf_index; | |
b73a4c47 | 902 | long reserve_commit_diff; |
b5b073e2 | 903 | |
b102c2b0 | 904 | offsets->begin = uatomic_read(&buf->offset); |
b5b073e2 PMF |
905 | offsets->old = offsets->begin; |
906 | offsets->begin_switch = 0; | |
907 | offsets->end_switch_old = 0; | |
908 | ||
909 | *tsc = trace_clock_read64(); | |
910 | ||
911 | if (SUBBUF_OFFSET(offsets->begin, buf->chan) != 0) { | |
912 | offsets->begin = SUBBUF_ALIGN(offsets->begin, buf->chan); | |
913 | offsets->end_switch_old = 1; | |
914 | } else { | |
915 | /* we do not have to switch : buffer is empty */ | |
916 | return -1; | |
917 | } | |
918 | if (mode == FORCE_ACTIVE) | |
919 | offsets->begin += ltt_subbuffer_header_size(); | |
920 | /* | |
921 | * Always begin_switch in FORCE_ACTIVE mode. | |
922 | * Test new buffer integrity | |
923 | */ | |
924 | subbuf_index = SUBBUF_INDEX(offsets->begin, buf->chan); | |
b73a4c47 | 925 | reserve_commit_diff = |
b5b073e2 | 926 | (BUFFER_TRUNC(offsets->begin, buf->chan) |
b73a4c47 | 927 | >> chan->n_subbufs_order) |
b102c2b0 | 928 | - (uatomic_read(&buf->commit_count[subbuf_index].cc_sb) |
b73a4c47 PMF |
929 | & chan->commit_count_mask); |
930 | if (reserve_commit_diff == 0) { | |
b5b073e2 PMF |
931 | /* Next buffer not corrupted. */ |
932 | if (mode == FORCE_ACTIVE | |
b73a4c47 | 933 | && !chan->overwrite |
b102c2b0 | 934 | && offsets->begin - uatomic_read(&buf->consumed) |
b73a4c47 | 935 | >= chan->alloc_size) { |
b5b073e2 PMF |
936 | /* |
937 | * We do not overwrite non consumed buffers and we are | |
938 | * full : ignore switch while tracing is active. | |
939 | */ | |
940 | return -1; | |
941 | } | |
942 | } else { | |
943 | /* | |
944 | * Next subbuffer corrupted. Force pushing reader even in normal | |
945 | * mode | |
946 | */ | |
947 | } | |
948 | offsets->end = offsets->begin; | |
949 | return 0; | |
950 | } | |
951 | ||
b5b073e2 | 952 | /* |
b73a4c47 PMF |
953 | * Force a sub-buffer switch for a per-cpu buffer. This operation is |
954 | * completely reentrant : can be called while tracing is active with | |
955 | * absolutely no lock held. | |
b5b073e2 | 956 | */ |
b73a4c47 PMF |
957 | void ltt_force_switch_lockless_slow(struct ust_buffer *buf, |
958 | enum force_switch_mode mode) | |
b5b073e2 | 959 | { |
b73a4c47 | 960 | struct ust_channel *chan = buf->chan; |
b5b073e2 | 961 | struct ltt_reserve_switch_offsets offsets; |
b73a4c47 | 962 | u64 tsc; |
b5b073e2 | 963 | |
b5b073e2 PMF |
964 | offsets.size = 0; |
965 | ||
10dd3941 | 966 | DBG("Switching (forced) %s_%d", chan->channel_name, buf->cpu); |
b5b073e2 PMF |
967 | /* |
968 | * Perform retryable operations. | |
969 | */ | |
b5b073e2 | 970 | do { |
b73a4c47 PMF |
971 | if (ltt_relay_try_switch_slow(mode, chan, buf, |
972 | &offsets, &tsc)) | |
973 | return; | |
b102c2b0 | 974 | } while (uatomic_cmpxchg(&buf->offset, offsets.old, |
b5b073e2 PMF |
975 | offsets.end) != offsets.old); |
976 | ||
977 | /* | |
978 | * Atomically update last_tsc. This update races against concurrent | |
979 | * atomic updates, but the race will always cause supplementary full TSC | |
980 | * events, never the opposite (missing a full TSC event when it would be | |
981 | * needed). | |
982 | */ | |
b73a4c47 | 983 | save_last_tsc(buf, tsc); |
b5b073e2 PMF |
984 | |
985 | /* | |
986 | * Push the reader if necessary | |
987 | */ | |
b73a4c47 PMF |
988 | if (mode == FORCE_ACTIVE) { |
989 | ltt_reserve_push_reader(chan, buf, offsets.end - 1); | |
990 | //ust// ltt_clear_noref_flag(chan, buf, SUBBUF_INDEX(offsets.end - 1, chan)); | |
991 | } | |
b5b073e2 PMF |
992 | |
993 | /* | |
994 | * Switch old subbuffer if needed. | |
995 | */ | |
b73a4c47 PMF |
996 | if (offsets.end_switch_old) { |
997 | //ust// ltt_clear_noref_flag(rchan, buf, SUBBUF_INDEX(offsets.old - 1, rchan)); | |
998 | ltt_reserve_switch_old_subbuf(chan, buf, &offsets, &tsc); | |
999 | } | |
b5b073e2 PMF |
1000 | |
1001 | /* | |
1002 | * Populate new subbuffer. | |
1003 | */ | |
b73a4c47 PMF |
1004 | if (mode == FORCE_ACTIVE) |
1005 | ltt_reserve_switch_new_subbuf(chan, buf, &offsets, &tsc); | |
1006 | } | |
b5b073e2 | 1007 | |
b73a4c47 PMF |
1008 | /* |
1009 | * Returns : | |
1010 | * 0 if ok | |
1011 | * !0 if execution must be aborted. | |
1012 | */ | |
1013 | static int ltt_relay_try_reserve_slow(struct ust_channel *chan, struct ust_buffer *buf, | |
1014 | struct ltt_reserve_switch_offsets *offsets, size_t data_size, | |
1015 | u64 *tsc, unsigned int *rflags, int largest_align) | |
1016 | { | |
1017 | long reserve_commit_diff; | |
b5b073e2 | 1018 | |
b102c2b0 | 1019 | offsets->begin = uatomic_read(&buf->offset); |
b73a4c47 PMF |
1020 | offsets->old = offsets->begin; |
1021 | offsets->begin_switch = 0; | |
1022 | offsets->end_switch_current = 0; | |
1023 | offsets->end_switch_old = 0; | |
1024 | ||
1025 | *tsc = trace_clock_read64(); | |
1026 | if (last_tsc_overflow(buf, *tsc)) | |
1027 | *rflags = LTT_RFLAG_ID_SIZE_TSC; | |
1028 | ||
1029 | if (unlikely(SUBBUF_OFFSET(offsets->begin, buf->chan) == 0)) { | |
1030 | offsets->begin_switch = 1; /* For offsets->begin */ | |
1031 | } else { | |
1032 | offsets->size = ust_get_header_size(chan, | |
1033 | offsets->begin, data_size, | |
1034 | &offsets->before_hdr_pad, *rflags); | |
1035 | offsets->size += ltt_align(offsets->begin + offsets->size, | |
1036 | largest_align) | |
1037 | + data_size; | |
1038 | if (unlikely((SUBBUF_OFFSET(offsets->begin, buf->chan) + | |
1039 | offsets->size) > buf->chan->subbuf_size)) { | |
1040 | offsets->end_switch_old = 1; /* For offsets->old */ | |
1041 | offsets->begin_switch = 1; /* For offsets->begin */ | |
1042 | } | |
1043 | } | |
1044 | if (unlikely(offsets->begin_switch)) { | |
1045 | long subbuf_index; | |
1046 | ||
1047 | /* | |
1048 | * We are typically not filling the previous buffer completely. | |
1049 | */ | |
1050 | if (likely(offsets->end_switch_old)) | |
1051 | offsets->begin = SUBBUF_ALIGN(offsets->begin, | |
1052 | buf->chan); | |
1053 | offsets->begin = offsets->begin + ltt_subbuffer_header_size(); | |
1054 | /* Test new buffer integrity */ | |
1055 | subbuf_index = SUBBUF_INDEX(offsets->begin, buf->chan); | |
1056 | reserve_commit_diff = | |
1057 | (BUFFER_TRUNC(offsets->begin, buf->chan) | |
1058 | >> chan->n_subbufs_order) | |
b102c2b0 | 1059 | - (uatomic_read(&buf->commit_count[subbuf_index].cc_sb) |
b73a4c47 PMF |
1060 | & chan->commit_count_mask); |
1061 | if (likely(reserve_commit_diff == 0)) { | |
1062 | /* Next buffer not corrupted. */ | |
1063 | if (unlikely(!chan->overwrite && | |
1064 | (SUBBUF_TRUNC(offsets->begin, buf->chan) | |
b102c2b0 | 1065 | - SUBBUF_TRUNC(uatomic_read( |
b73a4c47 PMF |
1066 | &buf->consumed), |
1067 | buf->chan)) | |
1068 | >= chan->alloc_size)) { | |
1069 | /* | |
1070 | * We do not overwrite non consumed buffers | |
1071 | * and we are full : event is lost. | |
1072 | */ | |
b102c2b0 | 1073 | uatomic_inc(&buf->events_lost); |
b73a4c47 PMF |
1074 | return -1; |
1075 | } else { | |
1076 | /* | |
1077 | * next buffer not corrupted, we are either in | |
1078 | * overwrite mode or the buffer is not full. | |
1079 | * It's safe to write in this new subbuffer. | |
1080 | */ | |
1081 | } | |
1082 | } else { | |
1083 | /* | |
1084 | * Next subbuffer corrupted. Drop event in normal and | |
1085 | * overwrite mode. Caused by either a writer OOPS or | |
1086 | * too many nested writes over a reserve/commit pair. | |
1087 | */ | |
b102c2b0 | 1088 | uatomic_inc(&buf->events_lost); |
b73a4c47 PMF |
1089 | return -1; |
1090 | } | |
1091 | offsets->size = ust_get_header_size(chan, | |
1092 | offsets->begin, data_size, | |
1093 | &offsets->before_hdr_pad, *rflags); | |
1094 | offsets->size += ltt_align(offsets->begin + offsets->size, | |
1095 | largest_align) | |
1096 | + data_size; | |
1097 | if (unlikely((SUBBUF_OFFSET(offsets->begin, buf->chan) | |
1098 | + offsets->size) > buf->chan->subbuf_size)) { | |
1099 | /* | |
1100 | * Event too big for subbuffers, report error, don't | |
1101 | * complete the sub-buffer switch. | |
1102 | */ | |
b102c2b0 | 1103 | uatomic_inc(&buf->events_lost); |
b73a4c47 PMF |
1104 | return -1; |
1105 | } else { | |
1106 | /* | |
1107 | * We just made a successful buffer switch and the event | |
1108 | * fits in the new subbuffer. Let's write. | |
1109 | */ | |
1110 | } | |
1111 | } else { | |
1112 | /* | |
1113 | * Event fits in the current buffer and we are not on a switch | |
1114 | * boundary. It's safe to write. | |
1115 | */ | |
1116 | } | |
1117 | offsets->end = offsets->begin + offsets->size; | |
1118 | ||
1119 | if (unlikely((SUBBUF_OFFSET(offsets->end, buf->chan)) == 0)) { | |
1120 | /* | |
1121 | * The offset_end will fall at the very beginning of the next | |
1122 | * subbuffer. | |
1123 | */ | |
1124 | offsets->end_switch_current = 1; /* For offsets->begin */ | |
1125 | } | |
b5b073e2 PMF |
1126 | return 0; |
1127 | } | |
1128 | ||
b73a4c47 PMF |
1129 | /** |
1130 | * ltt_relay_reserve_slot_lockless_slow - Atomic slot reservation in a buffer. | |
1131 | * @trace: the trace structure to log to. | |
1132 | * @ltt_channel: channel structure | |
1133 | * @transport_data: data structure specific to ltt relay | |
1134 | * @data_size: size of the variable length data to log. | |
1135 | * @slot_size: pointer to total size of the slot (out) | |
1136 | * @buf_offset : pointer to reserved buffer offset (out) | |
1137 | * @tsc: pointer to the tsc at the slot reservation (out) | |
1138 | * @cpu: cpuid | |
b5b073e2 | 1139 | * |
b73a4c47 PMF |
1140 | * Return : -ENOSPC if not enough space, else returns 0. |
1141 | * It will take care of sub-buffer switching. | |
b5b073e2 | 1142 | */ |
12e81b07 PMF |
1143 | int ltt_reserve_slot_lockless_slow(struct ust_channel *chan, |
1144 | struct ust_trace *trace, size_t data_size, | |
1145 | int largest_align, int cpu, | |
1146 | struct ust_buffer **ret_buf, | |
1147 | size_t *slot_size, long *buf_offset, | |
1148 | u64 *tsc, unsigned int *rflags) | |
b5b073e2 | 1149 | { |
12e81b07 | 1150 | struct ust_buffer *buf = *ret_buf = chan->buf[cpu]; |
b5b073e2 | 1151 | struct ltt_reserve_switch_offsets offsets; |
b5b073e2 | 1152 | |
b5b073e2 PMF |
1153 | offsets.size = 0; |
1154 | ||
b5b073e2 | 1155 | do { |
b73a4c47 PMF |
1156 | if (unlikely(ltt_relay_try_reserve_slow(chan, buf, &offsets, |
1157 | data_size, tsc, rflags, largest_align))) | |
1158 | return -ENOSPC; | |
b102c2b0 | 1159 | } while (unlikely(uatomic_cmpxchg(&buf->offset, offsets.old, |
b73a4c47 | 1160 | offsets.end) != offsets.old)); |
b5b073e2 PMF |
1161 | |
1162 | /* | |
1163 | * Atomically update last_tsc. This update races against concurrent | |
1164 | * atomic updates, but the race will always cause supplementary full TSC | |
1165 | * events, never the opposite (missing a full TSC event when it would be | |
1166 | * needed). | |
1167 | */ | |
b73a4c47 | 1168 | save_last_tsc(buf, *tsc); |
b5b073e2 PMF |
1169 | |
1170 | /* | |
1171 | * Push the reader if necessary | |
1172 | */ | |
b73a4c47 PMF |
1173 | ltt_reserve_push_reader(chan, buf, offsets.end - 1); |
1174 | ||
1175 | /* | |
1176 | * Clear noref flag for this subbuffer. | |
1177 | */ | |
1178 | //ust// ltt_clear_noref_flag(chan, buf, SUBBUF_INDEX(offsets.end - 1, chan)); | |
b5b073e2 PMF |
1179 | |
1180 | /* | |
1181 | * Switch old subbuffer if needed. | |
1182 | */ | |
b73a4c47 PMF |
1183 | if (unlikely(offsets.end_switch_old)) { |
1184 | //ust// ltt_clear_noref_flag(chan, buf, SUBBUF_INDEX(offsets.old - 1, chan)); | |
1185 | ltt_reserve_switch_old_subbuf(chan, buf, &offsets, tsc); | |
10dd3941 | 1186 | DBG("Switching %s_%d", chan->channel_name, cpu); |
b73a4c47 | 1187 | } |
b5b073e2 PMF |
1188 | |
1189 | /* | |
1190 | * Populate new subbuffer. | |
1191 | */ | |
b73a4c47 PMF |
1192 | if (unlikely(offsets.begin_switch)) |
1193 | ltt_reserve_switch_new_subbuf(chan, buf, &offsets, tsc); | |
1194 | ||
1195 | if (unlikely(offsets.end_switch_current)) | |
1196 | ltt_reserve_end_switch_current(chan, buf, &offsets, tsc); | |
1197 | ||
1198 | *slot_size = offsets.size; | |
1199 | *buf_offset = offsets.begin + offsets.before_hdr_pad; | |
1200 | return 0; | |
b5b073e2 PMF |
1201 | } |
1202 | ||
b5b073e2 PMF |
1203 | static struct ltt_transport ust_relay_transport = { |
1204 | .name = "ustrelay", | |
1205 | .ops = { | |
ef15e552 NC |
1206 | .create_channel = create_channel, |
1207 | .finish_channel = finish_channel, | |
1208 | .remove_channel = remove_channel, | |
b5b073e2 | 1209 | .wakeup_channel = ltt_relay_async_wakeup_chan, |
b5b073e2 PMF |
1210 | }, |
1211 | }; | |
1212 | ||
b5b073e2 PMF |
1213 | static char initialized = 0; |
1214 | ||
1215 | void __attribute__((constructor)) init_ustrelay_transport(void) | |
1216 | { | |
1217 | if(!initialized) { | |
1218 | ltt_transport_register(&ust_relay_transport); | |
1219 | initialized = 1; | |
1220 | } | |
1221 | } | |
1222 | ||
b73a4c47 | 1223 | static void __attribute__((destructor)) ust_buffers_exit(void) |
b5b073e2 PMF |
1224 | { |
1225 | ltt_transport_unregister(&ust_relay_transport); | |
1226 | } | |
b73a4c47 | 1227 | |
12e81b07 | 1228 | size_t ltt_write_event_header_slow(struct ust_channel *channel, |
b73a4c47 PMF |
1229 | struct ust_buffer *buf, long buf_offset, |
1230 | u16 eID, u32 event_size, | |
1231 | u64 tsc, unsigned int rflags) | |
1232 | { | |
1233 | struct ltt_event_header header; | |
1234 | u16 small_size; | |
1235 | ||
1236 | switch (rflags) { | |
1237 | case LTT_RFLAG_ID_SIZE_TSC: | |
1238 | header.id_time = 29 << LTT_TSC_BITS; | |
1239 | break; | |
1240 | case LTT_RFLAG_ID_SIZE: | |
1241 | header.id_time = 30 << LTT_TSC_BITS; | |
1242 | break; | |
1243 | case LTT_RFLAG_ID: | |
1244 | header.id_time = 31 << LTT_TSC_BITS; | |
1245 | break; | |
e2b46575 DG |
1246 | default: |
1247 | WARN_ON_ONCE(1); | |
1248 | header.id_time = 0; | |
1249 | break; | |
b73a4c47 PMF |
1250 | } |
1251 | ||
1252 | header.id_time |= (u32)tsc & LTT_TSC_MASK; | |
1253 | ust_buffers_write(buf, buf_offset, &header, sizeof(header)); | |
1254 | buf_offset += sizeof(header); | |
1255 | ||
1256 | switch (rflags) { | |
1257 | case LTT_RFLAG_ID_SIZE_TSC: | |
1258 | small_size = (u16)min_t(u32, event_size, LTT_MAX_SMALL_SIZE); | |
1259 | ust_buffers_write(buf, buf_offset, | |
1260 | &eID, sizeof(u16)); | |
1261 | buf_offset += sizeof(u16); | |
1262 | ust_buffers_write(buf, buf_offset, | |
1263 | &small_size, sizeof(u16)); | |
1264 | buf_offset += sizeof(u16); | |
1265 | if (small_size == LTT_MAX_SMALL_SIZE) { | |
1266 | ust_buffers_write(buf, buf_offset, | |
1267 | &event_size, sizeof(u32)); | |
1268 | buf_offset += sizeof(u32); | |
1269 | } | |
1270 | buf_offset += ltt_align(buf_offset, sizeof(u64)); | |
1271 | ust_buffers_write(buf, buf_offset, | |
1272 | &tsc, sizeof(u64)); | |
1273 | buf_offset += sizeof(u64); | |
1274 | break; | |
1275 | case LTT_RFLAG_ID_SIZE: | |
1276 | small_size = (u16)min_t(u32, event_size, LTT_MAX_SMALL_SIZE); | |
1277 | ust_buffers_write(buf, buf_offset, | |
1278 | &eID, sizeof(u16)); | |
1279 | buf_offset += sizeof(u16); | |
1280 | ust_buffers_write(buf, buf_offset, | |
1281 | &small_size, sizeof(u16)); | |
1282 | buf_offset += sizeof(u16); | |
1283 | if (small_size == LTT_MAX_SMALL_SIZE) { | |
1284 | ust_buffers_write(buf, buf_offset, | |
1285 | &event_size, sizeof(u32)); | |
1286 | buf_offset += sizeof(u32); | |
1287 | } | |
1288 | break; | |
1289 | case LTT_RFLAG_ID: | |
1290 | ust_buffers_write(buf, buf_offset, | |
1291 | &eID, sizeof(u16)); | |
1292 | buf_offset += sizeof(u16); | |
1293 | break; | |
1294 | } | |
1295 | ||
1296 | return buf_offset; | |
1297 | } |