Commit | Line | Data |
---|---|---|
b5b073e2 PMF |
1 | /* |
2 | * buffers.c | |
3 | * LTTng userspace tracer buffering system | |
4 | * | |
5 | * Copyright (C) 2009 - Pierre-Marc Fournier (pierre-marc dot fournier at polymtl dot ca) | |
d4d511d9 | 6 | * Copyright (C) 2008-2011 - Mathieu Desnoyers (mathieu.desnoyers@polymtl.ca) |
b5b073e2 PMF |
7 | * |
8 | * This library is free software; you can redistribute it and/or | |
9 | * modify it under the terms of the GNU Lesser General Public | |
10 | * License as published by the Free Software Foundation; either | |
11 | * version 2.1 of the License, or (at your option) any later version. | |
12 | * | |
13 | * This library is distributed in the hope that it will be useful, | |
14 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
15 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | |
16 | * Lesser General Public License for more details. | |
17 | * | |
18 | * You should have received a copy of the GNU Lesser General Public | |
19 | * License along with this library; if not, write to the Free Software | |
20 | * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA | |
21 | */ | |
22 | ||
d4d511d9 MD |
23 | /* |
24 | * Note: this code does not support the ref/noref flag and reader-owned | |
283f1158 MD |
25 | * subbuffer scheme. Therefore, flight recorder mode uses a mechanism |
26 | * where the reader can read corrupted data (and detect this), thus | |
27 | * returning -EIO. | |
d4d511d9 MD |
28 | */ |
29 | ||
204141ee | 30 | #include <unistd.h> |
b5b073e2 PMF |
31 | #include <sys/mman.h> |
32 | #include <sys/ipc.h> | |
33 | #include <sys/shm.h> | |
34 | #include <fcntl.h> | |
909bc43f | 35 | #include <stdlib.h> |
518d7abb PMF |
36 | |
37 | #include <ust/clock.h> | |
38 | ||
b5b073e2 PMF |
39 | #include "buffers.h" |
40 | #include "channels.h" | |
41 | #include "tracer.h" | |
42 | #include "tracercore.h" | |
30ffe279 | 43 | #include "usterr_signal_safe.h" |
b5b073e2 | 44 | |
b73a4c47 PMF |
45 | struct ltt_reserve_switch_offsets { |
46 | long begin, end, old; | |
47 | long begin_switch, end_switch_current, end_switch_old; | |
48 | size_t before_hdr_pad, size; | |
49 | }; | |
50 | ||
51 | ||
b5b073e2 | 52 | static DEFINE_MUTEX(ust_buffers_channels_mutex); |
0222e121 | 53 | static CDS_LIST_HEAD(ust_buffers_channels); |
b5b073e2 | 54 | |
d4d511d9 MD |
55 | static void ltt_force_switch(struct ust_buffer *buf, |
56 | enum force_switch_mode mode); | |
57 | ||
204141ee PMF |
58 | static int get_n_cpus(void) |
59 | { | |
60 | int result; | |
61 | static int n_cpus = 0; | |
62 | ||
c7dc133c PMF |
63 | if(!n_cpus) { |
64 | /* On Linux, when some processors are offline | |
65 | * _SC_NPROCESSORS_CONF counts the offline | |
66 | * processors, whereas _SC_NPROCESSORS_ONLN | |
67 | * does not. If we used _SC_NPROCESSORS_ONLN, | |
68 | * getcpu() could return a value greater than | |
69 | * this sysconf, in which case the arrays | |
70 | * indexed by processor would overflow. | |
71 | */ | |
72 | result = sysconf(_SC_NPROCESSORS_CONF); | |
73 | if(result == -1) { | |
74 | return -1; | |
75 | } | |
76 | ||
77 | n_cpus = result; | |
204141ee PMF |
78 | } |
79 | ||
c7dc133c | 80 | return n_cpus; |
204141ee PMF |
81 | } |
82 | ||
bb3132c8 MD |
83 | /** |
84 | * _ust_buffers_strncpy_fixup - Fix an incomplete string in a ltt_relay buffer. | |
85 | * @buf : buffer | |
86 | * @offset : offset within the buffer | |
87 | * @len : length to write | |
88 | * @copied: string actually copied | |
89 | * @terminated: does string end with \0 | |
b73a4c47 | 90 | * |
bb3132c8 | 91 | * Fills string with "X" if incomplete. |
b73a4c47 | 92 | */ |
bb3132c8 MD |
93 | void _ust_buffers_strncpy_fixup(struct ust_buffer *buf, size_t offset, |
94 | size_t len, size_t copied, int terminated) | |
b73a4c47 | 95 | { |
bb3132c8 MD |
96 | size_t buf_offset, cpy; |
97 | ||
98 | if (copied == len) { | |
99 | /* | |
100 | * Deal with non-terminated string. | |
101 | */ | |
102 | assert(!terminated); | |
103 | offset += copied - 1; | |
104 | buf_offset = BUFFER_OFFSET(offset, buf->chan); | |
105 | /* | |
106 | * Underlying layer should never ask for writes across | |
107 | * subbuffers. | |
108 | */ | |
109 | assert(buf_offset | |
110 | < buf->chan->subbuf_size*buf->chan->subbuf_cnt); | |
111 | ust_buffers_do_memset(buf->buf_data + buf_offset, '\0', 1); | |
112 | return; | |
113 | } | |
114 | ||
115 | /* | |
116 | * Deal with incomplete string. | |
117 | * Overwrite string's \0 with X too. | |
118 | */ | |
119 | cpy = copied - 1; | |
120 | assert(terminated); | |
121 | len -= cpy; | |
122 | offset += cpy; | |
123 | buf_offset = BUFFER_OFFSET(offset, buf->chan); | |
124 | ||
125 | /* | |
126 | * Underlying layer should never ask for writes across subbuffers. | |
127 | */ | |
128 | assert(buf_offset | |
129 | < buf->chan->subbuf_size*buf->chan->subbuf_cnt); | |
b73a4c47 | 130 | |
bb3132c8 MD |
131 | ust_buffers_do_memset(buf->buf_data + buf_offset, |
132 | 'X', len); | |
b73a4c47 | 133 | |
bb3132c8 MD |
134 | /* |
135 | * Overwrite last 'X' with '\0'. | |
136 | */ | |
137 | offset += len - 1; | |
138 | buf_offset = BUFFER_OFFSET(offset, buf->chan); | |
139 | /* | |
140 | * Underlying layer should never ask for writes across subbuffers. | |
141 | */ | |
142 | assert(buf_offset | |
143 | < buf->chan->subbuf_size*buf->chan->subbuf_cnt); | |
144 | ust_buffers_do_memset(buf->buf_data + buf_offset, '\0', 1); | |
b73a4c47 PMF |
145 | } |
146 | ||
ef15e552 NC |
147 | static void ltt_buffer_begin(struct ust_buffer *buf, |
148 | u64 tsc, unsigned int subbuf_idx) | |
149 | { | |
150 | struct ust_channel *channel = buf->chan; | |
151 | struct ltt_subbuffer_header *header = | |
152 | (struct ltt_subbuffer_header *) | |
153 | ust_buffers_offset_address(buf, | |
154 | subbuf_idx * buf->chan->subbuf_size); | |
b5b073e2 | 155 | |
ef15e552 NC |
156 | header->cycle_count_begin = tsc; |
157 | header->data_size = 0xFFFFFFFF; /* for recognizing crashed buffers */ | |
158 | header->sb_size = 0xFFFFFFFF; /* for recognizing crashed buffers */ | |
c74e3560 MD |
159 | /* |
160 | * No memory barrier needed to order data_data/sb_size vs commit count | |
161 | * update, because commit count update contains a compiler barrier that | |
162 | * ensures the order of the writes are OK from a program POV. It only | |
163 | * matters for crash dump recovery which is not executed concurrently, | |
164 | * so memory write order does not matter. | |
165 | */ | |
ef15e552 NC |
166 | ltt_write_trace_header(channel->trace, header); |
167 | } | |
168 | ||
169 | static int map_buf_data(struct ust_buffer *buf, size_t *size) | |
b5b073e2 PMF |
170 | { |
171 | void *ptr; | |
172 | int result; | |
173 | ||
174 | *size = PAGE_ALIGN(*size); | |
175 | ||
176 | result = buf->shmid = shmget(getpid(), *size, IPC_CREAT | IPC_EXCL | 0700); | |
ef15e552 | 177 | if (result < 0 && errno == EINVAL) { |
b5b073e2 PMF |
178 | ERR("shmget() returned EINVAL; maybe /proc/sys/kernel/shmmax should be increased."); |
179 | return -1; | |
ef15e552 | 180 | } else if (result < 0) { |
b5b073e2 PMF |
181 | PERROR("shmget"); |
182 | return -1; | |
183 | } | |
184 | ||
185 | ptr = shmat(buf->shmid, NULL, 0); | |
ef15e552 | 186 | if (ptr == (void *) -1) { |
b5b073e2 PMF |
187 | perror("shmat"); |
188 | goto destroy_shmem; | |
189 | } | |
190 | ||
191 | /* Already mark the shared memory for destruction. This will occur only | |
192 | * when all users have detached. | |
193 | */ | |
194 | result = shmctl(buf->shmid, IPC_RMID, NULL); | |
195 | if(result == -1) { | |
196 | perror("shmctl"); | |
197 | return -1; | |
198 | } | |
199 | ||
200 | buf->buf_data = ptr; | |
201 | buf->buf_size = *size; | |
202 | ||
203 | return 0; | |
204 | ||
ef15e552 | 205 | destroy_shmem: |
b5b073e2 PMF |
206 | result = shmctl(buf->shmid, IPC_RMID, NULL); |
207 | if(result == -1) { | |
208 | perror("shmctl"); | |
209 | } | |
210 | ||
211 | return -1; | |
212 | } | |
213 | ||
ef15e552 | 214 | static int open_buf(struct ust_channel *chan, int cpu) |
b5b073e2 | 215 | { |
ef15e552 NC |
216 | int result, fds[2]; |
217 | unsigned int j; | |
218 | struct ust_trace *trace = chan->trace; | |
219 | struct ust_buffer *buf = chan->buf[cpu]; | |
220 | unsigned int n_subbufs = chan->subbuf_cnt; | |
b5b073e2 | 221 | |
ef15e552 NC |
222 | |
223 | result = map_buf_data(buf, &chan->alloc_size); | |
224 | if (result < 0) | |
204141ee | 225 | return -1; |
b5b073e2 | 226 | |
ef15e552 NC |
227 | buf->commit_count = |
228 | zmalloc(sizeof(*buf->commit_count) * n_subbufs); | |
229 | if (!buf->commit_count) | |
230 | goto unmap_buf; | |
b5b073e2 | 231 | |
ef15e552 NC |
232 | result = pipe(fds); |
233 | if (result < 0) { | |
234 | PERROR("pipe"); | |
235 | goto free_commit_count; | |
236 | } | |
237 | buf->data_ready_fd_read = fds[0]; | |
238 | buf->data_ready_fd_write = fds[1]; | |
b5b073e2 | 239 | |
ef15e552 NC |
240 | buf->cpu = cpu; |
241 | buf->chan = chan; | |
b5b073e2 | 242 | |
ef15e552 NC |
243 | uatomic_set(&buf->offset, ltt_subbuffer_header_size()); |
244 | uatomic_set(&buf->consumed, 0); | |
245 | uatomic_set(&buf->active_readers, 0); | |
246 | for (j = 0; j < n_subbufs; j++) { | |
247 | uatomic_set(&buf->commit_count[j].cc, 0); | |
248 | uatomic_set(&buf->commit_count[j].cc_sb, 0); | |
b5b073e2 PMF |
249 | } |
250 | ||
ef15e552 | 251 | ltt_buffer_begin(buf, trace->start_tsc, 0); |
b5b073e2 | 252 | |
ef15e552 NC |
253 | uatomic_add(&buf->commit_count[0].cc, ltt_subbuffer_header_size()); |
254 | ||
255 | uatomic_set(&buf->events_lost, 0); | |
256 | uatomic_set(&buf->corrupted_subbuffers, 0); | |
257 | ||
258 | memset(buf->commit_seq, 0, sizeof(buf->commit_seq[0]) * n_subbufs); | |
259 | ||
260 | return 0; | |
261 | ||
262 | free_commit_count: | |
263 | free(buf->commit_count); | |
264 | ||
265 | unmap_buf: | |
266 | if (shmdt(buf->buf_data) < 0) { | |
267 | PERROR("shmdt failed"); | |
268 | } | |
269 | ||
270 | return -1; | |
b5b073e2 PMF |
271 | } |
272 | ||
ef15e552 | 273 | static void close_buf(struct ust_buffer *buf) |
b5b073e2 | 274 | { |
204141ee | 275 | int result; |
b5b073e2 | 276 | |
ef15e552 NC |
277 | result = shmdt(buf->buf_data); |
278 | if (result < 0) { | |
279 | PERROR("shmdt"); | |
280 | } | |
b5b073e2 | 281 | |
ef15e552 NC |
282 | result = close(buf->data_ready_fd_read); |
283 | if (result < 0) { | |
284 | PERROR("close"); | |
285 | } | |
b5b073e2 | 286 | |
ef15e552 NC |
287 | result = close(buf->data_ready_fd_write); |
288 | if (result < 0 && errno != EBADF) { | |
289 | PERROR("close"); | |
290 | } | |
b5b073e2 PMF |
291 | } |
292 | ||
b5b073e2 | 293 | |
ef15e552 NC |
294 | static int open_channel(struct ust_channel *chan, size_t subbuf_size, |
295 | size_t subbuf_cnt) | |
b5b073e2 | 296 | { |
204141ee PMF |
297 | int i; |
298 | int result; | |
299 | ||
b5b073e2 PMF |
300 | if(subbuf_size == 0 || subbuf_cnt == 0) |
301 | return -1; | |
302 | ||
b73a4c47 PMF |
303 | /* Check that the subbuffer size is larger than a page. */ |
304 | WARN_ON_ONCE(subbuf_size < PAGE_SIZE); | |
305 | ||
306 | /* | |
307 | * Make sure the number of subbuffers and subbuffer size are power of 2. | |
308 | */ | |
309 | WARN_ON_ONCE(hweight32(subbuf_size) != 1); | |
310 | WARN_ON(hweight32(subbuf_cnt) != 1); | |
311 | ||
b5b073e2 PMF |
312 | chan->version = UST_CHANNEL_VERSION; |
313 | chan->subbuf_cnt = subbuf_cnt; | |
314 | chan->subbuf_size = subbuf_size; | |
315 | chan->subbuf_size_order = get_count_order(subbuf_size); | |
b73a4c47 | 316 | chan->alloc_size = subbuf_size * subbuf_cnt; |
204141ee | 317 | |
f7b16408 | 318 | pthread_mutex_lock(&ust_buffers_channels_mutex); |
ef15e552 NC |
319 | for (i=0; i < chan->n_cpus; i++) { |
320 | result = open_buf(chan, i); | |
204141ee PMF |
321 | if (result == -1) |
322 | goto error; | |
323 | } | |
0222e121 | 324 | cds_list_add(&chan->list, &ust_buffers_channels); |
f7b16408 | 325 | pthread_mutex_unlock(&ust_buffers_channels_mutex); |
b5b073e2 PMF |
326 | |
327 | return 0; | |
328 | ||
b2ccc231 | 329 | /* Error handling */ |
204141ee | 330 | error: |
b2ccc231 MD |
331 | for(i--; i >= 0; i--) |
332 | close_buf(chan->buf[i]); | |
204141ee | 333 | |
f7b16408 | 334 | pthread_mutex_unlock(&ust_buffers_channels_mutex); |
b5b073e2 PMF |
335 | return -1; |
336 | } | |
337 | ||
ef15e552 | 338 | static void close_channel(struct ust_channel *chan) |
b5b073e2 | 339 | { |
204141ee PMF |
340 | int i; |
341 | if(!chan) | |
b5b073e2 PMF |
342 | return; |
343 | ||
f7b16408 | 344 | pthread_mutex_lock(&ust_buffers_channels_mutex); |
d4d511d9 MD |
345 | /* |
346 | * checking for chan->buf[i] being NULL or not is useless in | |
347 | * practice because we allocate buffers for all possible cpus. | |
348 | * However, should we decide to change this and only allocate | |
349 | * for online cpus, this check becomes useful. | |
350 | */ | |
351 | for (i=0; i<chan->n_cpus; i++) { | |
352 | if (chan->buf[i]) | |
ef15e552 | 353 | close_buf(chan->buf[i]); |
204141ee | 354 | } |
b5b073e2 | 355 | |
0222e121 | 356 | cds_list_del(&chan->list); |
ef15e552 | 357 | |
f7b16408 | 358 | pthread_mutex_unlock(&ust_buffers_channels_mutex); |
b5b073e2 PMF |
359 | } |
360 | ||
b5b073e2 PMF |
361 | /* |
362 | * offset is assumed to never be 0 here : never deliver a completely empty | |
363 | * subbuffer. The lost size is between 0 and subbuf_size-1. | |
364 | */ | |
b73a4c47 | 365 | static notrace void ltt_buffer_end(struct ust_buffer *buf, |
b5b073e2 PMF |
366 | u64 tsc, unsigned int offset, unsigned int subbuf_idx) |
367 | { | |
368 | struct ltt_subbuffer_header *header = | |
369 | (struct ltt_subbuffer_header *) | |
b73a4c47 | 370 | ust_buffers_offset_address(buf, |
b5b073e2 | 371 | subbuf_idx * buf->chan->subbuf_size); |
8c36d1ee | 372 | u32 data_size = SUBBUF_OFFSET(offset - 1, buf->chan) + 1; |
b5b073e2 | 373 | |
8c36d1ee | 374 | header->sb_size = PAGE_ALIGN(data_size); |
b5b073e2 | 375 | header->cycle_count_end = tsc; |
b102c2b0 PMF |
376 | header->events_lost = uatomic_read(&buf->events_lost); |
377 | header->subbuf_corrupt = uatomic_read(&buf->corrupted_subbuffers); | |
719569e4 PMF |
378 | if(unlikely(header->events_lost > 0)) { |
379 | DBG("Some events (%d) were lost in %s_%d", header->events_lost, buf->chan->channel_name, buf->cpu); | |
380 | } | |
c74e3560 MD |
381 | /* |
382 | * Makes sure data_size write happens after write of the rest of the | |
383 | * buffer end data, because data_size is used to identify a completely | |
384 | * written subbuffer in a crash dump. | |
385 | */ | |
386 | cmm_barrier(); | |
387 | header->data_size = data_size; | |
b5b073e2 PMF |
388 | } |
389 | ||
390 | /* | |
391 | * This function should not be called from NMI interrupt context | |
392 | */ | |
393 | static notrace void ltt_buf_unfull(struct ust_buffer *buf, | |
394 | unsigned int subbuf_idx, | |
395 | long offset) | |
396 | { | |
b5b073e2 PMF |
397 | } |
398 | ||
b73a4c47 | 399 | /* |
0222e121 | 400 | * Promote compiler cmm_barrier to a smp_mb(). |
b73a4c47 PMF |
401 | * For the specific LTTng case, this IPI call should be removed if the |
402 | * architecture does not reorder writes. This should eventually be provided by | |
403 | * a separate architecture-specific infrastructure. | |
404 | */ | |
e17571a5 PMF |
405 | //ust// static void remote_mb(void *info) |
406 | //ust// { | |
407 | //ust// smp_mb(); | |
408 | //ust// } | |
b73a4c47 PMF |
409 | |
410 | int ust_buffers_get_subbuf(struct ust_buffer *buf, long *consumed) | |
b5b073e2 PMF |
411 | { |
412 | struct ust_channel *channel = buf->chan; | |
413 | long consumed_old, consumed_idx, commit_count, write_offset; | |
b73a4c47 PMF |
414 | //ust// int retval; |
415 | ||
b102c2b0 | 416 | consumed_old = uatomic_read(&buf->consumed); |
b5b073e2 | 417 | consumed_idx = SUBBUF_INDEX(consumed_old, buf->chan); |
b102c2b0 | 418 | commit_count = uatomic_read(&buf->commit_count[consumed_idx].cc_sb); |
b5b073e2 PMF |
419 | /* |
420 | * Make sure we read the commit count before reading the buffer | |
421 | * data and the write offset. Correct consumed offset ordering | |
422 | * wrt commit count is insured by the use of cmpxchg to update | |
423 | * the consumed offset. | |
b73a4c47 | 424 | */ |
d4d511d9 | 425 | |
b73a4c47 PMF |
426 | /* |
427 | * Local rmb to match the remote wmb to read the commit count before the | |
428 | * buffer data and the write offset. | |
b5b073e2 | 429 | */ |
0222e121 | 430 | cmm_smp_rmb(); |
b73a4c47 | 431 | |
b102c2b0 | 432 | write_offset = uatomic_read(&buf->offset); |
b5b073e2 PMF |
433 | /* |
434 | * Check that the subbuffer we are trying to consume has been | |
435 | * already fully committed. | |
436 | */ | |
437 | if (((commit_count - buf->chan->subbuf_size) | |
438 | & channel->commit_count_mask) | |
439 | - (BUFFER_TRUNC(consumed_old, buf->chan) | |
440 | >> channel->n_subbufs_order) | |
441 | != 0) { | |
442 | return -EAGAIN; | |
443 | } | |
444 | /* | |
445 | * Check that we are not about to read the same subbuffer in | |
446 | * which the writer head is. | |
447 | */ | |
448 | if ((SUBBUF_TRUNC(write_offset, buf->chan) | |
449 | - SUBBUF_TRUNC(consumed_old, buf->chan)) | |
450 | == 0) { | |
451 | return -EAGAIN; | |
452 | } | |
b73a4c47 PMF |
453 | *consumed = consumed_old; |
454 | ||
b5b073e2 PMF |
455 | return 0; |
456 | } | |
457 | ||
b73a4c47 | 458 | int ust_buffers_put_subbuf(struct ust_buffer *buf, unsigned long uconsumed_old) |
b5b073e2 PMF |
459 | { |
460 | long consumed_new, consumed_old; | |
461 | ||
b102c2b0 | 462 | consumed_old = uatomic_read(&buf->consumed); |
b5b073e2 PMF |
463 | consumed_old = consumed_old & (~0xFFFFFFFFL); |
464 | consumed_old = consumed_old | uconsumed_old; | |
465 | consumed_new = SUBBUF_ALIGN(consumed_old, buf->chan); | |
466 | ||
b102c2b0 | 467 | if (uatomic_cmpxchg(&buf->consumed, consumed_old, |
b5b073e2 PMF |
468 | consumed_new) |
469 | != consumed_old) { | |
470 | /* We have been pushed by the writer : the last | |
471 | * buffer read _is_ corrupted! It can also | |
472 | * happen if this is a buffer we never got. */ | |
b5b073e2 PMF |
473 | return -EIO; |
474 | } else { | |
475 | /* tell the client that buffer is now unfull */ | |
476 | int index; | |
477 | long data; | |
478 | index = SUBBUF_INDEX(consumed_old, buf->chan); | |
479 | data = BUFFER_OFFSET(consumed_old, buf->chan); | |
480 | ltt_buf_unfull(buf, index, data); | |
b5b073e2 PMF |
481 | } |
482 | return 0; | |
483 | } | |
484 | ||
ef15e552 | 485 | static int map_buf_structs(struct ust_channel *chan) |
b5b073e2 PMF |
486 | { |
487 | void *ptr; | |
488 | int result; | |
204141ee PMF |
489 | size_t size; |
490 | int i; | |
b5b073e2 | 491 | |
204141ee | 492 | size = PAGE_ALIGN(1); |
b5b073e2 | 493 | |
204141ee | 494 | for(i=0; i<chan->n_cpus; i++) { |
b5b073e2 | 495 | |
204141ee PMF |
496 | result = chan->buf_struct_shmids[i] = shmget(getpid(), size, IPC_CREAT | IPC_EXCL | 0700); |
497 | if(result == -1) { | |
498 | PERROR("shmget"); | |
499 | goto destroy_previous; | |
500 | } | |
b5b073e2 | 501 | |
204141ee PMF |
502 | ptr = shmat(chan->buf_struct_shmids[i], NULL, 0); |
503 | if(ptr == (void *) -1) { | |
504 | perror("shmat"); | |
505 | goto destroy_shm; | |
506 | } | |
507 | ||
508 | /* Already mark the shared memory for destruction. This will occur only | |
509 | * when all users have detached. | |
510 | */ | |
511 | result = shmctl(chan->buf_struct_shmids[i], IPC_RMID, NULL); | |
512 | if(result == -1) { | |
513 | perror("shmctl"); | |
514 | goto destroy_previous; | |
515 | } | |
516 | ||
517 | chan->buf[i] = ptr; | |
b5b073e2 PMF |
518 | } |
519 | ||
204141ee | 520 | return 0; |
b5b073e2 | 521 | |
204141ee PMF |
522 | /* Jumping inside this loop occurs from within the other loop above with i as |
523 | * counter, so it unallocates the structures for the cpu = current_i down to | |
524 | * zero. */ | |
525 | for(; i>=0; i--) { | |
526 | destroy_shm: | |
527 | result = shmctl(chan->buf_struct_shmids[i], IPC_RMID, NULL); | |
528 | if(result == -1) { | |
529 | perror("shmctl"); | |
530 | } | |
b5b073e2 | 531 | |
204141ee PMF |
532 | destroy_previous: |
533 | continue; | |
b5b073e2 PMF |
534 | } |
535 | ||
204141ee | 536 | return -1; |
b5b073e2 PMF |
537 | } |
538 | ||
ef15e552 NC |
539 | static int unmap_buf_structs(struct ust_channel *chan) |
540 | { | |
541 | int i; | |
542 | ||
543 | for (i=0; i < chan->n_cpus; i++) { | |
544 | if (shmdt(chan->buf[i]) < 0) { | |
545 | PERROR("shmdt"); | |
546 | } | |
547 | } | |
fbae86d6 | 548 | return 0; |
ef15e552 NC |
549 | } |
550 | ||
b5b073e2 PMF |
551 | /* |
552 | * Create channel. | |
553 | */ | |
ef15e552 NC |
554 | static int create_channel(const char *trace_name, struct ust_trace *trace, |
555 | const char *channel_name, struct ust_channel *chan, | |
b5b073e2 PMF |
556 | unsigned int subbuf_size, unsigned int n_subbufs, int overwrite) |
557 | { | |
ef15e552 | 558 | int i, result; |
b5b073e2 | 559 | |
ef15e552 NC |
560 | chan->trace = trace; |
561 | chan->overwrite = overwrite; | |
562 | chan->n_subbufs_order = get_count_order(n_subbufs); | |
563 | chan->commit_count_mask = (~0UL >> chan->n_subbufs_order); | |
564 | chan->n_cpus = get_n_cpus(); | |
b5b073e2 | 565 | |
ef15e552 NC |
566 | /* These mappings should ideall be per-cpu, if somebody can do that |
567 | * from userspace, that would be cool! | |
568 | */ | |
569 | chan->buf = (void *) zmalloc(chan->n_cpus * sizeof(void *)); | |
570 | if(chan->buf == NULL) { | |
204141ee PMF |
571 | goto error; |
572 | } | |
ef15e552 NC |
573 | chan->buf_struct_shmids = (int *) zmalloc(chan->n_cpus * sizeof(int)); |
574 | if(chan->buf_struct_shmids == NULL) | |
204141ee | 575 | goto free_buf; |
b5b073e2 | 576 | |
ef15e552 | 577 | result = map_buf_structs(chan); |
204141ee PMF |
578 | if(result != 0) { |
579 | goto free_buf_struct_shmids; | |
580 | } | |
b5b073e2 | 581 | |
ef15e552 | 582 | result = open_channel(chan, subbuf_size, n_subbufs); |
204141ee | 583 | if (result != 0) { |
c1f20530 | 584 | ERR("Cannot open channel for trace %s", trace_name); |
ef15e552 | 585 | goto unmap_buf_structs; |
b5b073e2 PMF |
586 | } |
587 | ||
204141ee PMF |
588 | return 0; |
589 | ||
ef15e552 NC |
590 | unmap_buf_structs: |
591 | for (i=0; i < chan->n_cpus; i++) { | |
592 | if (shmdt(chan->buf[i]) < 0) { | |
593 | PERROR("shmdt bufstruct"); | |
594 | } | |
595 | } | |
204141ee PMF |
596 | |
597 | free_buf_struct_shmids: | |
ef15e552 | 598 | free(chan->buf_struct_shmids); |
b5b073e2 | 599 | |
204141ee | 600 | free_buf: |
ef15e552 | 601 | free(chan->buf); |
204141ee PMF |
602 | |
603 | error: | |
604 | return -1; | |
b5b073e2 PMF |
605 | } |
606 | ||
ef15e552 NC |
607 | |
608 | static void remove_channel(struct ust_channel *chan) | |
609 | { | |
610 | close_channel(chan); | |
611 | ||
612 | unmap_buf_structs(chan); | |
613 | ||
614 | free(chan->buf_struct_shmids); | |
615 | ||
616 | free(chan->buf); | |
ef15e552 NC |
617 | } |
618 | ||
b5b073e2 PMF |
619 | static void ltt_relay_async_wakeup_chan(struct ust_channel *ltt_channel) |
620 | { | |
b5b073e2 PMF |
621 | } |
622 | ||
204141ee | 623 | static void ltt_relay_finish_buffer(struct ust_channel *channel, unsigned int cpu) |
b5b073e2 | 624 | { |
204141ee PMF |
625 | if (channel->buf[cpu]) { |
626 | struct ust_buffer *buf = channel->buf[cpu]; | |
97c10252 | 627 | ltt_force_switch(buf, FORCE_FLUSH); |
ef15e552 | 628 | |
b5b073e2 | 629 | /* closing the pipe tells the consumer the buffer is finished */ |
b5b073e2 PMF |
630 | close(buf->data_ready_fd_write); |
631 | } | |
632 | } | |
633 | ||
634 | ||
ef15e552 | 635 | static void finish_channel(struct ust_channel *channel) |
b5b073e2 | 636 | { |
204141ee | 637 | unsigned int i; |
b5b073e2 | 638 | |
d4d511d9 | 639 | for (i=0; i<channel->n_cpus; i++) { |
204141ee PMF |
640 | ltt_relay_finish_buffer(channel, i); |
641 | } | |
b5b073e2 PMF |
642 | } |
643 | ||
b5b073e2 | 644 | |
b5b073e2 | 645 | /* |
b73a4c47 PMF |
646 | * ltt_reserve_switch_old_subbuf: switch old subbuffer |
647 | * | |
648 | * Concurrency safe because we are the last and only thread to alter this | |
649 | * sub-buffer. As long as it is not delivered and read, no other thread can | |
650 | * alter the offset, alter the reserve_count or call the | |
651 | * client_buffer_end_callback on this sub-buffer. | |
652 | * | |
653 | * The only remaining threads could be the ones with pending commits. They will | |
654 | * have to do the deliver themselves. Not concurrency safe in overwrite mode. | |
655 | * We detect corrupted subbuffers with commit and reserve counts. We keep a | |
656 | * corrupted sub-buffers count and push the readers across these sub-buffers. | |
657 | * | |
658 | * Not concurrency safe if a writer is stalled in a subbuffer and another writer | |
659 | * switches in, finding out it's corrupted. The result will be than the old | |
660 | * (uncommited) subbuffer will be declared corrupted, and that the new subbuffer | |
661 | * will be declared corrupted too because of the commit count adjustment. | |
662 | * | |
663 | * Note : offset_old should never be 0 here. | |
b5b073e2 | 664 | */ |
b73a4c47 PMF |
665 | static void ltt_reserve_switch_old_subbuf( |
666 | struct ust_channel *chan, struct ust_buffer *buf, | |
667 | struct ltt_reserve_switch_offsets *offsets, u64 *tsc) | |
b5b073e2 | 668 | { |
b73a4c47 PMF |
669 | long oldidx = SUBBUF_INDEX(offsets->old - 1, chan); |
670 | long commit_count, padding_size; | |
b5b073e2 | 671 | |
b73a4c47 PMF |
672 | padding_size = chan->subbuf_size |
673 | - (SUBBUF_OFFSET(offsets->old - 1, chan) + 1); | |
674 | ltt_buffer_end(buf, *tsc, offsets->old, oldidx); | |
b5b073e2 | 675 | |
b73a4c47 PMF |
676 | /* |
677 | * Must write slot data before incrementing commit count. | |
ef15e552 | 678 | * This compiler barrier is upgraded into a cmm_smp_wmb() by the IPI |
0222e121 | 679 | * sent by get_subbuf() when it does its cmm_smp_rmb(). |
b73a4c47 | 680 | */ |
0222e121 | 681 | cmm_smp_wmb(); |
b102c2b0 PMF |
682 | uatomic_add(&buf->commit_count[oldidx].cc, padding_size); |
683 | commit_count = uatomic_read(&buf->commit_count[oldidx].cc); | |
b73a4c47 | 684 | ltt_check_deliver(chan, buf, offsets->old - 1, commit_count, oldidx); |
1e8c9e7b | 685 | ltt_write_commit_counter(chan, buf, oldidx, |
b73a4c47 PMF |
686 | offsets->old, commit_count, padding_size); |
687 | } | |
b5b073e2 | 688 | |
b73a4c47 PMF |
689 | /* |
690 | * ltt_reserve_switch_new_subbuf: Populate new subbuffer. | |
691 | * | |
692 | * This code can be executed unordered : writers may already have written to the | |
693 | * sub-buffer before this code gets executed, caution. The commit makes sure | |
694 | * that this code is executed before the deliver of this sub-buffer. | |
695 | */ | |
696 | static void ltt_reserve_switch_new_subbuf( | |
697 | struct ust_channel *chan, struct ust_buffer *buf, | |
698 | struct ltt_reserve_switch_offsets *offsets, u64 *tsc) | |
699 | { | |
700 | long beginidx = SUBBUF_INDEX(offsets->begin, chan); | |
701 | long commit_count; | |
b5b073e2 | 702 | |
b73a4c47 | 703 | ltt_buffer_begin(buf, *tsc, beginidx); |
b5b073e2 | 704 | |
b73a4c47 PMF |
705 | /* |
706 | * Must write slot data before incrementing commit count. | |
ef15e552 | 707 | * This compiler barrier is upgraded into a cmm_smp_wmb() by the IPI |
0222e121 | 708 | * sent by get_subbuf() when it does its cmm_smp_rmb(). |
b73a4c47 | 709 | */ |
0222e121 | 710 | cmm_smp_wmb(); |
b102c2b0 PMF |
711 | uatomic_add(&buf->commit_count[beginidx].cc, ltt_subbuffer_header_size()); |
712 | commit_count = uatomic_read(&buf->commit_count[beginidx].cc); | |
b73a4c47 PMF |
713 | /* Check if the written buffer has to be delivered */ |
714 | ltt_check_deliver(chan, buf, offsets->begin, commit_count, beginidx); | |
1e8c9e7b | 715 | ltt_write_commit_counter(chan, buf, beginidx, |
b73a4c47 PMF |
716 | offsets->begin, commit_count, ltt_subbuffer_header_size()); |
717 | } | |
b5b073e2 | 718 | |
b73a4c47 PMF |
719 | /* |
720 | * ltt_reserve_end_switch_current: finish switching current subbuffer | |
721 | * | |
722 | * Concurrency safe because we are the last and only thread to alter this | |
723 | * sub-buffer. As long as it is not delivered and read, no other thread can | |
724 | * alter the offset, alter the reserve_count or call the | |
725 | * client_buffer_end_callback on this sub-buffer. | |
726 | * | |
727 | * The only remaining threads could be the ones with pending commits. They will | |
728 | * have to do the deliver themselves. Not concurrency safe in overwrite mode. | |
729 | * We detect corrupted subbuffers with commit and reserve counts. We keep a | |
730 | * corrupted sub-buffers count and push the readers across these sub-buffers. | |
731 | * | |
732 | * Not concurrency safe if a writer is stalled in a subbuffer and another writer | |
733 | * switches in, finding out it's corrupted. The result will be than the old | |
734 | * (uncommited) subbuffer will be declared corrupted, and that the new subbuffer | |
735 | * will be declared corrupted too because of the commit count adjustment. | |
736 | */ | |
737 | static void ltt_reserve_end_switch_current( | |
738 | struct ust_channel *chan, | |
739 | struct ust_buffer *buf, | |
740 | struct ltt_reserve_switch_offsets *offsets, u64 *tsc) | |
741 | { | |
742 | long endidx = SUBBUF_INDEX(offsets->end - 1, chan); | |
743 | long commit_count, padding_size; | |
744 | ||
745 | padding_size = chan->subbuf_size | |
746 | - (SUBBUF_OFFSET(offsets->end - 1, chan) + 1); | |
747 | ||
748 | ltt_buffer_end(buf, *tsc, offsets->end, endidx); | |
749 | ||
750 | /* | |
751 | * Must write slot data before incrementing commit count. | |
ef15e552 | 752 | * This compiler barrier is upgraded into a cmm_smp_wmb() by the IPI |
0222e121 | 753 | * sent by get_subbuf() when it does its cmm_smp_rmb(). |
b73a4c47 | 754 | */ |
0222e121 | 755 | cmm_smp_wmb(); |
b102c2b0 PMF |
756 | uatomic_add(&buf->commit_count[endidx].cc, padding_size); |
757 | commit_count = uatomic_read(&buf->commit_count[endidx].cc); | |
b73a4c47 PMF |
758 | ltt_check_deliver(chan, buf, |
759 | offsets->end - 1, commit_count, endidx); | |
1e8c9e7b | 760 | ltt_write_commit_counter(chan, buf, endidx, |
b73a4c47 | 761 | offsets->end, commit_count, padding_size); |
b5b073e2 PMF |
762 | } |
763 | ||
764 | /* | |
765 | * Returns : | |
766 | * 0 if ok | |
767 | * !0 if execution must be aborted. | |
768 | */ | |
b73a4c47 | 769 | static int ltt_relay_try_switch_slow( |
b5b073e2 | 770 | enum force_switch_mode mode, |
b73a4c47 | 771 | struct ust_channel *chan, |
b5b073e2 PMF |
772 | struct ust_buffer *buf, |
773 | struct ltt_reserve_switch_offsets *offsets, | |
774 | u64 *tsc) | |
775 | { | |
776 | long subbuf_index; | |
b73a4c47 | 777 | long reserve_commit_diff; |
b5b073e2 | 778 | |
b102c2b0 | 779 | offsets->begin = uatomic_read(&buf->offset); |
b5b073e2 PMF |
780 | offsets->old = offsets->begin; |
781 | offsets->begin_switch = 0; | |
782 | offsets->end_switch_old = 0; | |
783 | ||
784 | *tsc = trace_clock_read64(); | |
785 | ||
786 | if (SUBBUF_OFFSET(offsets->begin, buf->chan) != 0) { | |
787 | offsets->begin = SUBBUF_ALIGN(offsets->begin, buf->chan); | |
788 | offsets->end_switch_old = 1; | |
789 | } else { | |
790 | /* we do not have to switch : buffer is empty */ | |
791 | return -1; | |
792 | } | |
793 | if (mode == FORCE_ACTIVE) | |
794 | offsets->begin += ltt_subbuffer_header_size(); | |
795 | /* | |
796 | * Always begin_switch in FORCE_ACTIVE mode. | |
797 | * Test new buffer integrity | |
798 | */ | |
799 | subbuf_index = SUBBUF_INDEX(offsets->begin, buf->chan); | |
b73a4c47 | 800 | reserve_commit_diff = |
b5b073e2 | 801 | (BUFFER_TRUNC(offsets->begin, buf->chan) |
b73a4c47 | 802 | >> chan->n_subbufs_order) |
b102c2b0 | 803 | - (uatomic_read(&buf->commit_count[subbuf_index].cc_sb) |
b73a4c47 PMF |
804 | & chan->commit_count_mask); |
805 | if (reserve_commit_diff == 0) { | |
b5b073e2 PMF |
806 | /* Next buffer not corrupted. */ |
807 | if (mode == FORCE_ACTIVE | |
b73a4c47 | 808 | && !chan->overwrite |
b102c2b0 | 809 | && offsets->begin - uatomic_read(&buf->consumed) |
b73a4c47 | 810 | >= chan->alloc_size) { |
b5b073e2 PMF |
811 | /* |
812 | * We do not overwrite non consumed buffers and we are | |
813 | * full : ignore switch while tracing is active. | |
814 | */ | |
815 | return -1; | |
816 | } | |
817 | } else { | |
818 | /* | |
819 | * Next subbuffer corrupted. Force pushing reader even in normal | |
820 | * mode | |
821 | */ | |
822 | } | |
823 | offsets->end = offsets->begin; | |
824 | return 0; | |
825 | } | |
826 | ||
b5b073e2 | 827 | /* |
b73a4c47 PMF |
828 | * Force a sub-buffer switch for a per-cpu buffer. This operation is |
829 | * completely reentrant : can be called while tracing is active with | |
830 | * absolutely no lock held. | |
b5b073e2 | 831 | */ |
b73a4c47 PMF |
832 | void ltt_force_switch_lockless_slow(struct ust_buffer *buf, |
833 | enum force_switch_mode mode) | |
b5b073e2 | 834 | { |
b73a4c47 | 835 | struct ust_channel *chan = buf->chan; |
b5b073e2 | 836 | struct ltt_reserve_switch_offsets offsets; |
b73a4c47 | 837 | u64 tsc; |
b5b073e2 | 838 | |
b5b073e2 PMF |
839 | offsets.size = 0; |
840 | ||
10dd3941 | 841 | DBG("Switching (forced) %s_%d", chan->channel_name, buf->cpu); |
b5b073e2 PMF |
842 | /* |
843 | * Perform retryable operations. | |
844 | */ | |
b5b073e2 | 845 | do { |
b73a4c47 PMF |
846 | if (ltt_relay_try_switch_slow(mode, chan, buf, |
847 | &offsets, &tsc)) | |
848 | return; | |
b102c2b0 | 849 | } while (uatomic_cmpxchg(&buf->offset, offsets.old, |
b5b073e2 PMF |
850 | offsets.end) != offsets.old); |
851 | ||
852 | /* | |
853 | * Atomically update last_tsc. This update races against concurrent | |
854 | * atomic updates, but the race will always cause supplementary full TSC | |
855 | * events, never the opposite (missing a full TSC event when it would be | |
856 | * needed). | |
857 | */ | |
b73a4c47 | 858 | save_last_tsc(buf, tsc); |
b5b073e2 PMF |
859 | |
860 | /* | |
861 | * Push the reader if necessary | |
862 | */ | |
b73a4c47 PMF |
863 | if (mode == FORCE_ACTIVE) { |
864 | ltt_reserve_push_reader(chan, buf, offsets.end - 1); | |
b73a4c47 | 865 | } |
b5b073e2 PMF |
866 | |
867 | /* | |
868 | * Switch old subbuffer if needed. | |
869 | */ | |
b73a4c47 | 870 | if (offsets.end_switch_old) { |
b73a4c47 PMF |
871 | ltt_reserve_switch_old_subbuf(chan, buf, &offsets, &tsc); |
872 | } | |
b5b073e2 PMF |
873 | |
874 | /* | |
875 | * Populate new subbuffer. | |
876 | */ | |
b73a4c47 PMF |
877 | if (mode == FORCE_ACTIVE) |
878 | ltt_reserve_switch_new_subbuf(chan, buf, &offsets, &tsc); | |
879 | } | |
b5b073e2 | 880 | |
b73a4c47 PMF |
881 | /* |
882 | * Returns : | |
883 | * 0 if ok | |
884 | * !0 if execution must be aborted. | |
885 | */ | |
886 | static int ltt_relay_try_reserve_slow(struct ust_channel *chan, struct ust_buffer *buf, | |
887 | struct ltt_reserve_switch_offsets *offsets, size_t data_size, | |
888 | u64 *tsc, unsigned int *rflags, int largest_align) | |
889 | { | |
890 | long reserve_commit_diff; | |
b5b073e2 | 891 | |
b102c2b0 | 892 | offsets->begin = uatomic_read(&buf->offset); |
b73a4c47 PMF |
893 | offsets->old = offsets->begin; |
894 | offsets->begin_switch = 0; | |
895 | offsets->end_switch_current = 0; | |
896 | offsets->end_switch_old = 0; | |
897 | ||
898 | *tsc = trace_clock_read64(); | |
899 | if (last_tsc_overflow(buf, *tsc)) | |
900 | *rflags = LTT_RFLAG_ID_SIZE_TSC; | |
901 | ||
902 | if (unlikely(SUBBUF_OFFSET(offsets->begin, buf->chan) == 0)) { | |
903 | offsets->begin_switch = 1; /* For offsets->begin */ | |
904 | } else { | |
905 | offsets->size = ust_get_header_size(chan, | |
906 | offsets->begin, data_size, | |
907 | &offsets->before_hdr_pad, *rflags); | |
908 | offsets->size += ltt_align(offsets->begin + offsets->size, | |
909 | largest_align) | |
910 | + data_size; | |
911 | if (unlikely((SUBBUF_OFFSET(offsets->begin, buf->chan) + | |
912 | offsets->size) > buf->chan->subbuf_size)) { | |
913 | offsets->end_switch_old = 1; /* For offsets->old */ | |
914 | offsets->begin_switch = 1; /* For offsets->begin */ | |
915 | } | |
916 | } | |
917 | if (unlikely(offsets->begin_switch)) { | |
918 | long subbuf_index; | |
919 | ||
920 | /* | |
921 | * We are typically not filling the previous buffer completely. | |
922 | */ | |
923 | if (likely(offsets->end_switch_old)) | |
924 | offsets->begin = SUBBUF_ALIGN(offsets->begin, | |
925 | buf->chan); | |
926 | offsets->begin = offsets->begin + ltt_subbuffer_header_size(); | |
927 | /* Test new buffer integrity */ | |
928 | subbuf_index = SUBBUF_INDEX(offsets->begin, buf->chan); | |
929 | reserve_commit_diff = | |
930 | (BUFFER_TRUNC(offsets->begin, buf->chan) | |
931 | >> chan->n_subbufs_order) | |
b102c2b0 | 932 | - (uatomic_read(&buf->commit_count[subbuf_index].cc_sb) |
b73a4c47 PMF |
933 | & chan->commit_count_mask); |
934 | if (likely(reserve_commit_diff == 0)) { | |
935 | /* Next buffer not corrupted. */ | |
936 | if (unlikely(!chan->overwrite && | |
937 | (SUBBUF_TRUNC(offsets->begin, buf->chan) | |
b102c2b0 | 938 | - SUBBUF_TRUNC(uatomic_read( |
b73a4c47 PMF |
939 | &buf->consumed), |
940 | buf->chan)) | |
941 | >= chan->alloc_size)) { | |
942 | /* | |
943 | * We do not overwrite non consumed buffers | |
944 | * and we are full : event is lost. | |
945 | */ | |
b102c2b0 | 946 | uatomic_inc(&buf->events_lost); |
b73a4c47 PMF |
947 | return -1; |
948 | } else { | |
949 | /* | |
950 | * next buffer not corrupted, we are either in | |
951 | * overwrite mode or the buffer is not full. | |
952 | * It's safe to write in this new subbuffer. | |
953 | */ | |
954 | } | |
955 | } else { | |
956 | /* | |
957 | * Next subbuffer corrupted. Drop event in normal and | |
958 | * overwrite mode. Caused by either a writer OOPS or | |
959 | * too many nested writes over a reserve/commit pair. | |
960 | */ | |
b102c2b0 | 961 | uatomic_inc(&buf->events_lost); |
b73a4c47 PMF |
962 | return -1; |
963 | } | |
964 | offsets->size = ust_get_header_size(chan, | |
965 | offsets->begin, data_size, | |
966 | &offsets->before_hdr_pad, *rflags); | |
967 | offsets->size += ltt_align(offsets->begin + offsets->size, | |
968 | largest_align) | |
969 | + data_size; | |
970 | if (unlikely((SUBBUF_OFFSET(offsets->begin, buf->chan) | |
971 | + offsets->size) > buf->chan->subbuf_size)) { | |
972 | /* | |
973 | * Event too big for subbuffers, report error, don't | |
974 | * complete the sub-buffer switch. | |
975 | */ | |
b102c2b0 | 976 | uatomic_inc(&buf->events_lost); |
b73a4c47 PMF |
977 | return -1; |
978 | } else { | |
979 | /* | |
980 | * We just made a successful buffer switch and the event | |
981 | * fits in the new subbuffer. Let's write. | |
982 | */ | |
983 | } | |
984 | } else { | |
985 | /* | |
986 | * Event fits in the current buffer and we are not on a switch | |
987 | * boundary. It's safe to write. | |
988 | */ | |
989 | } | |
990 | offsets->end = offsets->begin + offsets->size; | |
991 | ||
992 | if (unlikely((SUBBUF_OFFSET(offsets->end, buf->chan)) == 0)) { | |
993 | /* | |
994 | * The offset_end will fall at the very beginning of the next | |
995 | * subbuffer. | |
996 | */ | |
997 | offsets->end_switch_current = 1; /* For offsets->begin */ | |
998 | } | |
b5b073e2 PMF |
999 | return 0; |
1000 | } | |
1001 | ||
b73a4c47 PMF |
1002 | /** |
1003 | * ltt_relay_reserve_slot_lockless_slow - Atomic slot reservation in a buffer. | |
1004 | * @trace: the trace structure to log to. | |
1005 | * @ltt_channel: channel structure | |
1006 | * @transport_data: data structure specific to ltt relay | |
1007 | * @data_size: size of the variable length data to log. | |
1008 | * @slot_size: pointer to total size of the slot (out) | |
1009 | * @buf_offset : pointer to reserved buffer offset (out) | |
1010 | * @tsc: pointer to the tsc at the slot reservation (out) | |
1011 | * @cpu: cpuid | |
b5b073e2 | 1012 | * |
b73a4c47 PMF |
1013 | * Return : -ENOSPC if not enough space, else returns 0. |
1014 | * It will take care of sub-buffer switching. | |
b5b073e2 | 1015 | */ |
12e81b07 PMF |
1016 | int ltt_reserve_slot_lockless_slow(struct ust_channel *chan, |
1017 | struct ust_trace *trace, size_t data_size, | |
1018 | int largest_align, int cpu, | |
1019 | struct ust_buffer **ret_buf, | |
1020 | size_t *slot_size, long *buf_offset, | |
1021 | u64 *tsc, unsigned int *rflags) | |
b5b073e2 | 1022 | { |
12e81b07 | 1023 | struct ust_buffer *buf = *ret_buf = chan->buf[cpu]; |
b5b073e2 | 1024 | struct ltt_reserve_switch_offsets offsets; |
b5b073e2 | 1025 | |
b5b073e2 PMF |
1026 | offsets.size = 0; |
1027 | ||
b5b073e2 | 1028 | do { |
b73a4c47 PMF |
1029 | if (unlikely(ltt_relay_try_reserve_slow(chan, buf, &offsets, |
1030 | data_size, tsc, rflags, largest_align))) | |
1031 | return -ENOSPC; | |
b102c2b0 | 1032 | } while (unlikely(uatomic_cmpxchg(&buf->offset, offsets.old, |
b73a4c47 | 1033 | offsets.end) != offsets.old)); |
b5b073e2 PMF |
1034 | |
1035 | /* | |
1036 | * Atomically update last_tsc. This update races against concurrent | |
1037 | * atomic updates, but the race will always cause supplementary full TSC | |
1038 | * events, never the opposite (missing a full TSC event when it would be | |
1039 | * needed). | |
1040 | */ | |
b73a4c47 | 1041 | save_last_tsc(buf, *tsc); |
b5b073e2 PMF |
1042 | |
1043 | /* | |
1044 | * Push the reader if necessary | |
1045 | */ | |
b73a4c47 PMF |
1046 | ltt_reserve_push_reader(chan, buf, offsets.end - 1); |
1047 | ||
b5b073e2 PMF |
1048 | /* |
1049 | * Switch old subbuffer if needed. | |
1050 | */ | |
b73a4c47 | 1051 | if (unlikely(offsets.end_switch_old)) { |
b73a4c47 | 1052 | ltt_reserve_switch_old_subbuf(chan, buf, &offsets, tsc); |
10dd3941 | 1053 | DBG("Switching %s_%d", chan->channel_name, cpu); |
b73a4c47 | 1054 | } |
b5b073e2 PMF |
1055 | |
1056 | /* | |
1057 | * Populate new subbuffer. | |
1058 | */ | |
b73a4c47 PMF |
1059 | if (unlikely(offsets.begin_switch)) |
1060 | ltt_reserve_switch_new_subbuf(chan, buf, &offsets, tsc); | |
1061 | ||
1062 | if (unlikely(offsets.end_switch_current)) | |
1063 | ltt_reserve_end_switch_current(chan, buf, &offsets, tsc); | |
1064 | ||
1065 | *slot_size = offsets.size; | |
1066 | *buf_offset = offsets.begin + offsets.before_hdr_pad; | |
1067 | return 0; | |
b5b073e2 PMF |
1068 | } |
1069 | ||
b5b073e2 PMF |
1070 | static struct ltt_transport ust_relay_transport = { |
1071 | .name = "ustrelay", | |
1072 | .ops = { | |
ef15e552 NC |
1073 | .create_channel = create_channel, |
1074 | .finish_channel = finish_channel, | |
1075 | .remove_channel = remove_channel, | |
b5b073e2 | 1076 | .wakeup_channel = ltt_relay_async_wakeup_chan, |
b5b073e2 PMF |
1077 | }, |
1078 | }; | |
1079 | ||
b5b073e2 PMF |
1080 | static char initialized = 0; |
1081 | ||
1082 | void __attribute__((constructor)) init_ustrelay_transport(void) | |
1083 | { | |
1084 | if(!initialized) { | |
1085 | ltt_transport_register(&ust_relay_transport); | |
1086 | initialized = 1; | |
1087 | } | |
1088 | } | |
1089 | ||
b73a4c47 | 1090 | static void __attribute__((destructor)) ust_buffers_exit(void) |
b5b073e2 PMF |
1091 | { |
1092 | ltt_transport_unregister(&ust_relay_transport); | |
1093 | } | |
b73a4c47 | 1094 | |
12e81b07 | 1095 | size_t ltt_write_event_header_slow(struct ust_channel *channel, |
b73a4c47 PMF |
1096 | struct ust_buffer *buf, long buf_offset, |
1097 | u16 eID, u32 event_size, | |
1098 | u64 tsc, unsigned int rflags) | |
1099 | { | |
1100 | struct ltt_event_header header; | |
1101 | u16 small_size; | |
1102 | ||
1103 | switch (rflags) { | |
1104 | case LTT_RFLAG_ID_SIZE_TSC: | |
1105 | header.id_time = 29 << LTT_TSC_BITS; | |
1106 | break; | |
1107 | case LTT_RFLAG_ID_SIZE: | |
1108 | header.id_time = 30 << LTT_TSC_BITS; | |
1109 | break; | |
1110 | case LTT_RFLAG_ID: | |
1111 | header.id_time = 31 << LTT_TSC_BITS; | |
1112 | break; | |
e2b46575 DG |
1113 | default: |
1114 | WARN_ON_ONCE(1); | |
1115 | header.id_time = 0; | |
1116 | break; | |
b73a4c47 PMF |
1117 | } |
1118 | ||
1119 | header.id_time |= (u32)tsc & LTT_TSC_MASK; | |
1120 | ust_buffers_write(buf, buf_offset, &header, sizeof(header)); | |
1121 | buf_offset += sizeof(header); | |
1122 | ||
1123 | switch (rflags) { | |
1124 | case LTT_RFLAG_ID_SIZE_TSC: | |
1125 | small_size = (u16)min_t(u32, event_size, LTT_MAX_SMALL_SIZE); | |
1126 | ust_buffers_write(buf, buf_offset, | |
1127 | &eID, sizeof(u16)); | |
1128 | buf_offset += sizeof(u16); | |
1129 | ust_buffers_write(buf, buf_offset, | |
1130 | &small_size, sizeof(u16)); | |
1131 | buf_offset += sizeof(u16); | |
1132 | if (small_size == LTT_MAX_SMALL_SIZE) { | |
1133 | ust_buffers_write(buf, buf_offset, | |
1134 | &event_size, sizeof(u32)); | |
1135 | buf_offset += sizeof(u32); | |
1136 | } | |
1137 | buf_offset += ltt_align(buf_offset, sizeof(u64)); | |
1138 | ust_buffers_write(buf, buf_offset, | |
1139 | &tsc, sizeof(u64)); | |
1140 | buf_offset += sizeof(u64); | |
1141 | break; | |
1142 | case LTT_RFLAG_ID_SIZE: | |
1143 | small_size = (u16)min_t(u32, event_size, LTT_MAX_SMALL_SIZE); | |
1144 | ust_buffers_write(buf, buf_offset, | |
1145 | &eID, sizeof(u16)); | |
1146 | buf_offset += sizeof(u16); | |
1147 | ust_buffers_write(buf, buf_offset, | |
1148 | &small_size, sizeof(u16)); | |
1149 | buf_offset += sizeof(u16); | |
1150 | if (small_size == LTT_MAX_SMALL_SIZE) { | |
1151 | ust_buffers_write(buf, buf_offset, | |
1152 | &event_size, sizeof(u32)); | |
1153 | buf_offset += sizeof(u32); | |
1154 | } | |
1155 | break; | |
1156 | case LTT_RFLAG_ID: | |
1157 | ust_buffers_write(buf, buf_offset, | |
1158 | &eID, sizeof(u16)); | |
1159 | buf_offset += sizeof(u16); | |
1160 | break; | |
1161 | } | |
1162 | ||
1163 | return buf_offset; | |
1164 | } |