Commit | Line | Data |
---|---|---|
b5b073e2 PMF |
1 | /* |
2 | * buffers.c | |
3 | * LTTng userspace tracer buffering system | |
4 | * | |
5 | * Copyright (C) 2009 - Pierre-Marc Fournier (pierre-marc dot fournier at polymtl dot ca) | |
6 | * Copyright (C) 2008 - Mathieu Desnoyers (mathieu.desnoyers@polymtl.ca) | |
7 | * | |
8 | * This library is free software; you can redistribute it and/or | |
9 | * modify it under the terms of the GNU Lesser General Public | |
10 | * License as published by the Free Software Foundation; either | |
11 | * version 2.1 of the License, or (at your option) any later version. | |
12 | * | |
13 | * This library is distributed in the hope that it will be useful, | |
14 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
15 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | |
16 | * Lesser General Public License for more details. | |
17 | * | |
18 | * You should have received a copy of the GNU Lesser General Public | |
19 | * License along with this library; if not, write to the Free Software | |
20 | * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA | |
21 | */ | |
22 | ||
23 | #include <sys/mman.h> | |
24 | #include <sys/ipc.h> | |
25 | #include <sys/shm.h> | |
26 | #include <fcntl.h> | |
27 | #include <ust/kernelcompat.h> | |
28 | #include <kcompat/kref.h> | |
29 | #include "buffers.h" | |
30 | #include "channels.h" | |
31 | #include "tracer.h" | |
32 | #include "tracercore.h" | |
33 | #include "usterr.h" | |
34 | ||
35 | static DEFINE_MUTEX(ust_buffers_channels_mutex); | |
36 | static LIST_HEAD(ust_buffers_channels); | |
37 | ||
38 | static int ust_buffers_init_buffer(struct ltt_trace_struct *trace, | |
39 | struct ust_channel *ltt_chan, | |
40 | struct ust_buffer *buf, | |
41 | unsigned int n_subbufs); | |
42 | ||
43 | static int ust_buffers_alloc_buf(struct ust_buffer *buf, size_t *size) | |
44 | { | |
45 | void *ptr; | |
46 | int result; | |
47 | ||
48 | *size = PAGE_ALIGN(*size); | |
49 | ||
50 | result = buf->shmid = shmget(getpid(), *size, IPC_CREAT | IPC_EXCL | 0700); | |
51 | if(result == -1 && errno == EINVAL) { | |
52 | ERR("shmget() returned EINVAL; maybe /proc/sys/kernel/shmmax should be increased."); | |
53 | return -1; | |
54 | } | |
55 | else if(result == -1) { | |
56 | PERROR("shmget"); | |
57 | return -1; | |
58 | } | |
59 | ||
60 | ptr = shmat(buf->shmid, NULL, 0); | |
61 | if(ptr == (void *) -1) { | |
62 | perror("shmat"); | |
63 | goto destroy_shmem; | |
64 | } | |
65 | ||
66 | /* Already mark the shared memory for destruction. This will occur only | |
67 | * when all users have detached. | |
68 | */ | |
69 | result = shmctl(buf->shmid, IPC_RMID, NULL); | |
70 | if(result == -1) { | |
71 | perror("shmctl"); | |
72 | return -1; | |
73 | } | |
74 | ||
75 | buf->buf_data = ptr; | |
76 | buf->buf_size = *size; | |
77 | ||
78 | return 0; | |
79 | ||
80 | destroy_shmem: | |
81 | result = shmctl(buf->shmid, IPC_RMID, NULL); | |
82 | if(result == -1) { | |
83 | perror("shmctl"); | |
84 | } | |
85 | ||
86 | return -1; | |
87 | } | |
88 | ||
89 | static struct ust_buffer *ust_buffers_create_buf(struct ust_channel *channel) | |
90 | { | |
91 | int result; | |
92 | ||
93 | result = ust_buffers_alloc_buf(channel->buf, &channel->alloc_size); | |
94 | if(result) | |
95 | goto free_buf; | |
96 | ||
97 | ((struct ust_buffer *)channel->buf)->chan = channel; | |
98 | kref_get(&channel->kref); | |
99 | return channel->buf; | |
100 | ||
101 | free_buf: | |
102 | return NULL; | |
103 | } | |
104 | ||
105 | static void ust_buffers_destroy_channel(struct kref *kref) | |
106 | { | |
107 | struct ust_channel *chan = container_of(kref, struct ust_channel, kref); | |
108 | free(chan); | |
109 | } | |
110 | ||
111 | static void ust_buffers_destroy_buf(struct ust_buffer *buf) | |
112 | { | |
113 | struct ust_channel *chan = buf->chan; | |
114 | int result; | |
115 | ||
116 | result = munmap(buf->buf_data, buf->buf_size); | |
117 | if(result == -1) { | |
118 | PERROR("munmap"); | |
119 | } | |
120 | ||
121 | free(buf); | |
122 | kref_put(&chan->kref, ust_buffers_destroy_channel); | |
123 | } | |
124 | ||
125 | /* called from kref_put */ | |
126 | static void ust_buffers_remove_buf(struct kref *kref) | |
127 | { | |
128 | struct ust_buffer *buf = container_of(kref, struct ust_buffer, kref); | |
129 | ust_buffers_destroy_buf(buf); | |
130 | } | |
131 | ||
132 | static struct ust_buffer *ust_buffers_open_buf(struct ust_channel *chan) | |
133 | { | |
134 | struct ust_buffer *buf = NULL; | |
135 | int err; | |
136 | ||
137 | buf = ust_buffers_create_buf(chan); | |
138 | if (!buf) | |
139 | return NULL; | |
140 | ||
141 | kref_init(&buf->kref); | |
142 | ||
143 | err = ust_buffers_init_buffer(chan->trace, chan, buf, chan->subbuf_cnt); | |
144 | ||
145 | if (err) | |
146 | return ERR_PTR(err); | |
147 | ||
148 | return buf; | |
149 | ||
150 | /* FIXME: decrementally destroy on error? */ | |
151 | } | |
152 | ||
153 | /** | |
154 | * ust_buffers_close_buf - close a channel buffer | |
155 | * @buf: buffer | |
156 | */ | |
157 | static void ust_buffers_close_buf(struct ust_buffer *buf) | |
158 | { | |
159 | kref_put(&buf->kref, ust_buffers_remove_buf); | |
160 | } | |
161 | ||
162 | int ust_buffers_channel_open(struct ust_channel *chan, size_t subbuf_size, size_t subbuf_cnt) | |
163 | { | |
164 | if(subbuf_size == 0 || subbuf_cnt == 0) | |
165 | return -1; | |
166 | ||
167 | chan->version = UST_CHANNEL_VERSION; | |
168 | chan->subbuf_cnt = subbuf_cnt; | |
169 | chan->subbuf_size = subbuf_size; | |
170 | chan->subbuf_size_order = get_count_order(subbuf_size); | |
171 | chan->alloc_size = FIX_SIZE(subbuf_size * subbuf_cnt); | |
172 | kref_init(&chan->kref); | |
173 | ||
174 | mutex_lock(&ust_buffers_channels_mutex); | |
175 | chan->buf = ust_buffers_open_buf(chan); | |
176 | if (!chan->buf) | |
177 | goto error; | |
178 | list_add(&chan->list, &ust_buffers_channels); | |
179 | mutex_unlock(&ust_buffers_channels_mutex); | |
180 | ||
181 | return 0; | |
182 | ||
183 | error: | |
184 | kref_put(&chan->kref, ust_buffers_destroy_channel); | |
185 | mutex_unlock(&ust_buffers_channels_mutex); | |
186 | return -1; | |
187 | } | |
188 | ||
189 | void ust_buffers_channel_close(struct ust_channel *chan) | |
190 | { | |
191 | if (!chan) | |
192 | return; | |
193 | ||
194 | mutex_lock(&ust_buffers_channels_mutex); | |
195 | if (chan->buf) | |
196 | ust_buffers_close_buf(chan->buf); | |
197 | ||
198 | list_del(&chan->list); | |
199 | kref_put(&chan->kref, ust_buffers_destroy_channel); | |
200 | mutex_unlock(&ust_buffers_channels_mutex); | |
201 | } | |
202 | ||
203 | /* _ust_buffers_write() | |
204 | * | |
205 | * @buf: destination buffer | |
206 | * @offset: offset in destination | |
207 | * @src: source buffer | |
208 | * @len: length of source | |
209 | * @cpy: already copied | |
210 | */ | |
211 | ||
212 | void _ust_buffers_write(struct ust_buffer *buf, size_t offset, | |
213 | const void *src, size_t len, ssize_t cpy) | |
214 | { | |
215 | do { | |
216 | len -= cpy; | |
217 | src += cpy; | |
218 | offset += cpy; | |
219 | WARN_ON(offset >= buf->buf_size); | |
220 | ||
221 | cpy = min_t(size_t, len, buf->buf_size - offset); | |
222 | ust_buffers_do_copy(buf->buf_data + offset, src, cpy); | |
223 | } while (unlikely(len != cpy)); | |
224 | } | |
225 | ||
226 | /** | |
227 | * ltt_buffers_offset_address - get address of a location within the buffer | |
228 | * @buf : buffer | |
229 | * @offset : offset within the buffer. | |
230 | * | |
231 | * Return the address where a given offset is located. | |
232 | * Should be used to get the current subbuffer header pointer. Given we know | |
233 | * it's never on a page boundary, it's safe to write directly to this address, | |
234 | * as long as the write is never bigger than a page size. | |
235 | */ | |
236 | void *ltt_buffers_offset_address(struct ust_buffer *buf, size_t offset) | |
237 | { | |
238 | return ((char *)buf->buf_data)+offset; | |
239 | } | |
240 | ||
241 | /* | |
242 | * ------- | |
243 | */ | |
244 | ||
245 | /* | |
246 | * Last TSC comparison functions. Check if the current TSC overflows | |
247 | * LTT_TSC_BITS bits from the last TSC read. Reads and writes last_tsc | |
248 | * atomically. | |
249 | */ | |
250 | ||
251 | /* FIXME: does this test work properly? */ | |
252 | #if (BITS_PER_LONG == 32) | |
c9dab68a | 253 | static inline void save_last_tsc(struct ust_buffer *ltt_buf, |
b5b073e2 PMF |
254 | u64 tsc) |
255 | { | |
256 | ltt_buf->last_tsc = (unsigned long)(tsc >> LTT_TSC_BITS); | |
257 | } | |
258 | ||
c9dab68a | 259 | static inline int last_tsc_overflow(struct ust_buffer *ltt_buf, |
b5b073e2 PMF |
260 | u64 tsc) |
261 | { | |
262 | unsigned long tsc_shifted = (unsigned long)(tsc >> LTT_TSC_BITS); | |
263 | ||
264 | if (unlikely((tsc_shifted - ltt_buf->last_tsc))) | |
265 | return 1; | |
266 | else | |
267 | return 0; | |
268 | } | |
269 | #else | |
270 | static inline void save_last_tsc(struct ust_buffer *ltt_buf, | |
271 | u64 tsc) | |
272 | { | |
273 | ltt_buf->last_tsc = (unsigned long)tsc; | |
274 | } | |
275 | ||
276 | static inline int last_tsc_overflow(struct ust_buffer *ltt_buf, | |
277 | u64 tsc) | |
278 | { | |
279 | if (unlikely((tsc - ltt_buf->last_tsc) >> LTT_TSC_BITS)) | |
280 | return 1; | |
281 | else | |
282 | return 0; | |
283 | } | |
284 | #endif | |
285 | ||
286 | /* | |
287 | * A switch is done during tracing or as a final flush after tracing (so it | |
288 | * won't write in the new sub-buffer). | |
289 | */ | |
290 | enum force_switch_mode { FORCE_ACTIVE, FORCE_FLUSH }; | |
291 | ||
292 | static void ust_buffers_destroy_buffer(struct ust_channel *ltt_chan); | |
293 | ||
294 | static void ltt_force_switch(struct ust_buffer *buf, | |
295 | enum force_switch_mode mode); | |
296 | ||
297 | /* | |
298 | * Trace callbacks | |
299 | */ | |
300 | static void ltt_buffer_begin_callback(struct ust_buffer *buf, | |
301 | u64 tsc, unsigned int subbuf_idx) | |
302 | { | |
303 | struct ust_channel *channel = buf->chan; | |
304 | struct ltt_subbuffer_header *header = | |
305 | (struct ltt_subbuffer_header *) | |
306 | ltt_buffers_offset_address(buf, | |
307 | subbuf_idx * buf->chan->subbuf_size); | |
308 | ||
309 | header->cycle_count_begin = tsc; | |
310 | header->lost_size = 0xFFFFFFFF; /* for debugging */ | |
311 | header->buf_size = buf->chan->subbuf_size; | |
312 | ltt_write_trace_header(channel->trace, header); | |
313 | } | |
314 | ||
315 | /* | |
316 | * offset is assumed to never be 0 here : never deliver a completely empty | |
317 | * subbuffer. The lost size is between 0 and subbuf_size-1. | |
318 | */ | |
319 | static notrace void ltt_buffer_end_callback(struct ust_buffer *buf, | |
320 | u64 tsc, unsigned int offset, unsigned int subbuf_idx) | |
321 | { | |
322 | struct ltt_subbuffer_header *header = | |
323 | (struct ltt_subbuffer_header *) | |
324 | ltt_buffers_offset_address(buf, | |
325 | subbuf_idx * buf->chan->subbuf_size); | |
326 | ||
327 | header->lost_size = SUBBUF_OFFSET((buf->chan->subbuf_size - offset), | |
328 | buf->chan); | |
329 | header->cycle_count_end = tsc; | |
330 | header->events_lost = local_read(&buf->events_lost); | |
331 | header->subbuf_corrupt = local_read(&buf->corrupted_subbuffers); | |
332 | ||
333 | } | |
334 | ||
335 | void (*wake_consumer)(void *, int) = NULL; | |
336 | ||
337 | void relay_set_wake_consumer(void (*wake)(void *, int)) | |
338 | { | |
339 | wake_consumer = wake; | |
340 | } | |
341 | ||
342 | void relay_wake_consumer(void *arg, int finished) | |
343 | { | |
344 | if(wake_consumer) | |
345 | wake_consumer(arg, finished); | |
346 | } | |
347 | ||
348 | static notrace void ltt_deliver(struct ust_buffer *buf, unsigned int subbuf_idx, | |
349 | long commit_count) | |
350 | { | |
351 | int result; | |
352 | ||
353 | //ust// #ifdef CONFIG_LTT_VMCORE | |
354 | local_set(&buf->commit_seq[subbuf_idx], commit_count); | |
355 | //ust// #endif | |
356 | ||
357 | /* wakeup consumer */ | |
358 | result = write(buf->data_ready_fd_write, "1", 1); | |
359 | if(result == -1) { | |
360 | PERROR("write (in ltt_relay_buffer_flush)"); | |
361 | ERR("this should never happen!"); | |
362 | } | |
363 | //ust// atomic_set(<t_buf->wakeup_readers, 1); | |
364 | } | |
365 | ||
366 | /* | |
367 | * This function should not be called from NMI interrupt context | |
368 | */ | |
369 | static notrace void ltt_buf_unfull(struct ust_buffer *buf, | |
370 | unsigned int subbuf_idx, | |
371 | long offset) | |
372 | { | |
373 | //ust// struct ltt_channel_struct *ltt_channel = | |
374 | //ust// (struct ltt_channel_struct *)buf->chan->private_data; | |
375 | //ust// struct ltt_channel_buf_struct *ltt_buf = ltt_channel->buf; | |
376 | //ust// | |
377 | //ust// ltt_relay_wake_writers(ltt_buf); | |
378 | } | |
379 | ||
380 | int ust_buffers_do_get_subbuf(struct ust_buffer *buf, long *pconsumed_old) | |
381 | { | |
382 | struct ust_channel *channel = buf->chan; | |
383 | long consumed_old, consumed_idx, commit_count, write_offset; | |
384 | consumed_old = atomic_long_read(&buf->consumed); | |
385 | consumed_idx = SUBBUF_INDEX(consumed_old, buf->chan); | |
386 | commit_count = local_read(&buf->commit_count[consumed_idx]); | |
387 | /* | |
388 | * Make sure we read the commit count before reading the buffer | |
389 | * data and the write offset. Correct consumed offset ordering | |
390 | * wrt commit count is insured by the use of cmpxchg to update | |
391 | * the consumed offset. | |
392 | */ | |
393 | smp_rmb(); | |
394 | write_offset = local_read(&buf->offset); | |
395 | /* | |
396 | * Check that the subbuffer we are trying to consume has been | |
397 | * already fully committed. | |
398 | */ | |
399 | if (((commit_count - buf->chan->subbuf_size) | |
400 | & channel->commit_count_mask) | |
401 | - (BUFFER_TRUNC(consumed_old, buf->chan) | |
402 | >> channel->n_subbufs_order) | |
403 | != 0) { | |
404 | return -EAGAIN; | |
405 | } | |
406 | /* | |
407 | * Check that we are not about to read the same subbuffer in | |
408 | * which the writer head is. | |
409 | */ | |
410 | if ((SUBBUF_TRUNC(write_offset, buf->chan) | |
411 | - SUBBUF_TRUNC(consumed_old, buf->chan)) | |
412 | == 0) { | |
413 | return -EAGAIN; | |
414 | } | |
415 | ||
416 | *pconsumed_old = consumed_old; | |
417 | return 0; | |
418 | } | |
419 | ||
420 | int ust_buffers_do_put_subbuf(struct ust_buffer *buf, u32 uconsumed_old) | |
421 | { | |
422 | long consumed_new, consumed_old; | |
423 | ||
424 | consumed_old = atomic_long_read(&buf->consumed); | |
425 | consumed_old = consumed_old & (~0xFFFFFFFFL); | |
426 | consumed_old = consumed_old | uconsumed_old; | |
427 | consumed_new = SUBBUF_ALIGN(consumed_old, buf->chan); | |
428 | ||
429 | //ust// spin_lock(<t_buf->full_lock); | |
430 | if (atomic_long_cmpxchg(&buf->consumed, consumed_old, | |
431 | consumed_new) | |
432 | != consumed_old) { | |
433 | /* We have been pushed by the writer : the last | |
434 | * buffer read _is_ corrupted! It can also | |
435 | * happen if this is a buffer we never got. */ | |
436 | //ust// spin_unlock(<t_buf->full_lock); | |
437 | return -EIO; | |
438 | } else { | |
439 | /* tell the client that buffer is now unfull */ | |
440 | int index; | |
441 | long data; | |
442 | index = SUBBUF_INDEX(consumed_old, buf->chan); | |
443 | data = BUFFER_OFFSET(consumed_old, buf->chan); | |
444 | ltt_buf_unfull(buf, index, data); | |
445 | //ust// spin_unlock(<t_buf->full_lock); | |
446 | } | |
447 | return 0; | |
448 | } | |
449 | ||
450 | static void ltt_relay_print_subbuffer_errors( | |
451 | struct ust_channel *channel, | |
452 | long cons_off) | |
453 | { | |
454 | struct ust_buffer *ltt_buf = channel->buf; | |
455 | long cons_idx, commit_count, write_offset; | |
456 | ||
457 | cons_idx = SUBBUF_INDEX(cons_off, channel); | |
458 | commit_count = local_read(<t_buf->commit_count[cons_idx]); | |
459 | /* | |
460 | * No need to order commit_count and write_offset reads because we | |
461 | * execute after trace is stopped when there are no readers left. | |
462 | */ | |
463 | write_offset = local_read(<t_buf->offset); | |
464 | WARN( "LTT : unread channel %s offset is %ld " | |
465 | "and cons_off : %ld\n", | |
466 | channel->channel_name, write_offset, cons_off); | |
467 | /* Check each sub-buffer for non filled commit count */ | |
468 | if (((commit_count - channel->subbuf_size) & channel->commit_count_mask) | |
469 | - (BUFFER_TRUNC(cons_off, channel) >> channel->n_subbufs_order) != 0) { | |
470 | ERR("LTT : %s : subbuffer %lu has non filled " | |
471 | "commit count %lu.\n", | |
472 | channel->channel_name, cons_idx, commit_count); | |
473 | } | |
474 | ERR("LTT : %s : commit count : %lu, subbuf size %zd\n", | |
475 | channel->channel_name, commit_count, | |
476 | channel->subbuf_size); | |
477 | } | |
478 | ||
479 | static void ltt_relay_print_errors(struct ltt_trace_struct *trace, | |
480 | struct ust_channel *channel) | |
481 | { | |
482 | struct ust_buffer *ltt_buf = channel->buf; | |
483 | long cons_off; | |
484 | ||
4292ed8a PMF |
485 | /* |
486 | * Can be called in the error path of allocation when | |
487 | * trans_channel_data is not yet set. | |
488 | */ | |
489 | if (!channel) | |
490 | return; | |
491 | ||
b5b073e2 PMF |
492 | for (cons_off = atomic_long_read(<t_buf->consumed); |
493 | (SUBBUF_TRUNC(local_read(<t_buf->offset), | |
494 | channel) | |
495 | - cons_off) > 0; | |
496 | cons_off = SUBBUF_ALIGN(cons_off, channel)) | |
497 | ltt_relay_print_subbuffer_errors(channel, cons_off); | |
498 | } | |
499 | ||
500 | static void ltt_relay_print_buffer_errors(struct ust_channel *channel) | |
501 | { | |
502 | struct ltt_trace_struct *trace = channel->trace; | |
503 | struct ust_buffer *ltt_buf = channel->buf; | |
504 | ||
505 | if (local_read(<t_buf->events_lost)) | |
c1f20530 | 506 | ERR("channel %s: %ld events lost", |
b5b073e2 | 507 | channel->channel_name, |
c1f20530 | 508 | local_read(<t_buf->events_lost)); |
b5b073e2 | 509 | if (local_read(<t_buf->corrupted_subbuffers)) |
c1f20530 | 510 | ERR("channel %s : %ld corrupted subbuffers", |
b5b073e2 | 511 | channel->channel_name, |
c1f20530 | 512 | local_read(<t_buf->corrupted_subbuffers)); |
b5b073e2 PMF |
513 | |
514 | ltt_relay_print_errors(trace, channel); | |
515 | } | |
516 | ||
517 | static void ltt_relay_release_channel(struct kref *kref) | |
518 | { | |
519 | struct ust_channel *ltt_chan = container_of(kref, | |
520 | struct ust_channel, kref); | |
521 | free(ltt_chan->buf); | |
522 | } | |
523 | ||
524 | /* | |
525 | * Create ltt buffer. | |
526 | */ | |
527 | //ust// static int ltt_relay_create_buffer(struct ltt_trace_struct *trace, | |
528 | //ust// struct ltt_channel_struct *ltt_chan, struct rchan_buf *buf, | |
529 | //ust// unsigned int cpu, unsigned int n_subbufs) | |
530 | //ust// { | |
531 | //ust// struct ltt_channel_buf_struct *ltt_buf = | |
532 | //ust// percpu_ptr(ltt_chan->buf, cpu); | |
533 | //ust// unsigned int j; | |
534 | //ust// | |
535 | //ust// ltt_buf->commit_count = | |
536 | //ust// kzalloc_node(sizeof(ltt_buf->commit_count) * n_subbufs, | |
537 | //ust// GFP_KERNEL, cpu_to_node(cpu)); | |
538 | //ust// if (!ltt_buf->commit_count) | |
539 | //ust// return -ENOMEM; | |
540 | //ust// kref_get(&trace->kref); | |
541 | //ust// kref_get(&trace->ltt_transport_kref); | |
542 | //ust// kref_get(<t_chan->kref); | |
543 | //ust// local_set(<t_buf->offset, ltt_subbuffer_header_size()); | |
544 | //ust// atomic_long_set(<t_buf->consumed, 0); | |
545 | //ust// atomic_long_set(<t_buf->active_readers, 0); | |
546 | //ust// for (j = 0; j < n_subbufs; j++) | |
547 | //ust// local_set(<t_buf->commit_count[j], 0); | |
548 | //ust// init_waitqueue_head(<t_buf->write_wait); | |
549 | //ust// atomic_set(<t_buf->wakeup_readers, 0); | |
550 | //ust// spin_lock_init(<t_buf->full_lock); | |
551 | //ust// | |
552 | //ust// ltt_buffer_begin_callback(buf, trace->start_tsc, 0); | |
553 | //ust// /* atomic_add made on local variable on data that belongs to | |
554 | //ust// * various CPUs : ok because tracing not started (for this cpu). */ | |
555 | //ust// local_add(ltt_subbuffer_header_size(), <t_buf->commit_count[0]); | |
556 | //ust// | |
557 | //ust// local_set(<t_buf->events_lost, 0); | |
558 | //ust// local_set(<t_buf->corrupted_subbuffers, 0); | |
559 | //ust// | |
560 | //ust// return 0; | |
561 | //ust// } | |
562 | ||
563 | static int ust_buffers_init_buffer(struct ltt_trace_struct *trace, | |
564 | struct ust_channel *ltt_chan, struct ust_buffer *buf, | |
565 | unsigned int n_subbufs) | |
566 | { | |
567 | unsigned int j; | |
568 | int fds[2]; | |
569 | int result; | |
570 | ||
571 | buf->commit_count = | |
572 | zmalloc(sizeof(buf->commit_count) * n_subbufs); | |
573 | if (!buf->commit_count) | |
574 | return -ENOMEM; | |
575 | kref_get(&trace->kref); | |
576 | kref_get(&trace->ltt_transport_kref); | |
577 | kref_get(<t_chan->kref); | |
578 | local_set(&buf->offset, ltt_subbuffer_header_size()); | |
579 | atomic_long_set(&buf->consumed, 0); | |
580 | atomic_long_set(&buf->active_readers, 0); | |
581 | for (j = 0; j < n_subbufs; j++) | |
582 | local_set(&buf->commit_count[j], 0); | |
583 | //ust// init_waitqueue_head(&buf->write_wait); | |
584 | //ust// atomic_set(&buf->wakeup_readers, 0); | |
585 | //ust// spin_lock_init(&buf->full_lock); | |
586 | ||
587 | ltt_buffer_begin_callback(buf, trace->start_tsc, 0); | |
588 | ||
589 | local_add(ltt_subbuffer_header_size(), &buf->commit_count[0]); | |
590 | ||
591 | local_set(&buf->events_lost, 0); | |
592 | local_set(&buf->corrupted_subbuffers, 0); | |
593 | ||
594 | result = pipe(fds); | |
595 | if(result == -1) { | |
596 | PERROR("pipe"); | |
597 | return -1; | |
598 | } | |
599 | buf->data_ready_fd_read = fds[0]; | |
600 | buf->data_ready_fd_write = fds[1]; | |
601 | ||
602 | /* FIXME: do we actually need this? */ | |
603 | result = fcntl(fds[0], F_SETFL, O_NONBLOCK); | |
604 | if(result == -1) { | |
605 | PERROR("fcntl"); | |
606 | } | |
607 | ||
608 | //ust// buf->commit_seq = malloc(sizeof(buf->commit_seq) * n_subbufs); | |
609 | //ust// if(!ltt_buf->commit_seq) { | |
610 | //ust// return -1; | |
611 | //ust// } | |
612 | ||
613 | /* FIXME: decrementally destroy on error */ | |
614 | ||
615 | return 0; | |
616 | } | |
617 | ||
618 | /* FIXME: use this function */ | |
619 | static void ust_buffers_destroy_buffer(struct ust_channel *ltt_chan) | |
620 | { | |
621 | struct ltt_trace_struct *trace = ltt_chan->trace; | |
622 | struct ust_buffer *ltt_buf = ltt_chan->buf; | |
623 | ||
624 | kref_put(<t_chan->trace->ltt_transport_kref, | |
625 | ltt_release_transport); | |
626 | ltt_relay_print_buffer_errors(ltt_chan); | |
627 | //ust// free(ltt_buf->commit_seq); | |
628 | kfree(ltt_buf->commit_count); | |
629 | ltt_buf->commit_count = NULL; | |
630 | kref_put(<t_chan->kref, ltt_relay_release_channel); | |
631 | kref_put(&trace->kref, ltt_release_trace); | |
632 | //ust// wake_up_interruptible(&trace->kref_wq); | |
633 | } | |
634 | ||
635 | static void ltt_chan_alloc_ltt_buf(struct ust_channel *chan) | |
636 | { | |
637 | void *ptr; | |
638 | int result; | |
639 | ||
640 | /* Get one page */ | |
641 | /* FIXME: increase size if we have a seq_commit array that overflows the page */ | |
642 | size_t size = PAGE_ALIGN(1); | |
643 | ||
644 | result = chan->buf_shmid = shmget(getpid(), size, IPC_CREAT | IPC_EXCL | 0700); | |
645 | if(chan->buf_shmid == -1) { | |
646 | PERROR("shmget"); | |
647 | return; | |
648 | } | |
649 | ||
650 | ptr = shmat(chan->buf_shmid, NULL, 0); | |
651 | if(ptr == (void *) -1) { | |
652 | perror("shmat"); | |
653 | goto destroy_shmem; | |
654 | } | |
655 | ||
656 | /* Already mark the shared memory for destruction. This will occur only | |
657 | * when all users have detached. | |
658 | */ | |
659 | result = shmctl(chan->buf_shmid, IPC_RMID, NULL); | |
660 | if(result == -1) { | |
661 | perror("shmctl"); | |
662 | return; | |
663 | } | |
664 | ||
665 | chan->buf = ptr; | |
666 | ||
667 | return; | |
668 | ||
669 | destroy_shmem: | |
670 | result = shmctl(chan->buf_shmid, IPC_RMID, NULL); | |
671 | if(result == -1) { | |
672 | perror("shmctl"); | |
673 | } | |
674 | ||
675 | return; | |
676 | } | |
677 | ||
678 | /* | |
679 | * Create channel. | |
680 | */ | |
681 | static int ust_buffers_create_channel(const char *trace_name, struct ltt_trace_struct *trace, | |
682 | const char *channel_name, struct ust_channel *ltt_chan, | |
683 | unsigned int subbuf_size, unsigned int n_subbufs, int overwrite) | |
684 | { | |
685 | int err = 0; | |
686 | int result; | |
687 | ||
688 | kref_init(<t_chan->kref); | |
689 | ||
690 | ltt_chan->trace = trace; | |
691 | ltt_chan->buffer_begin = ltt_buffer_begin_callback; | |
692 | ltt_chan->buffer_end = ltt_buffer_end_callback; | |
693 | ltt_chan->overwrite = overwrite; | |
694 | ltt_chan->n_subbufs_order = get_count_order(n_subbufs); | |
695 | ltt_chan->commit_count_mask = (~0UL >> ltt_chan->n_subbufs_order); | |
696 | //ust// ltt_chan->buf = percpu_alloc_mask(sizeof(struct ltt_channel_buf_struct), GFP_KERNEL, cpu_possible_map); | |
697 | ||
698 | ltt_chan_alloc_ltt_buf(ltt_chan); | |
699 | ||
700 | //ust// ltt_chan->buf = malloc(sizeof(struct ltt_channel_buf_struct)); | |
701 | if (!ltt_chan->buf) | |
702 | goto alloc_error; | |
703 | /* FIXME: handle error of this call */ | |
704 | result = ust_buffers_channel_open(ltt_chan, subbuf_size, n_subbufs); | |
705 | if (result == -1) { | |
c1f20530 | 706 | ERR("Cannot open channel for trace %s", trace_name); |
b5b073e2 PMF |
707 | goto relay_open_error; |
708 | } | |
709 | ||
710 | err = 0; | |
711 | goto end; | |
712 | ||
713 | relay_open_error: | |
714 | //ust// percpu_free(ltt_chan->buf); | |
715 | alloc_error: | |
716 | err = EPERM; | |
717 | end: | |
718 | return err; | |
719 | } | |
720 | ||
721 | /* | |
722 | * LTTng channel flush function. | |
723 | * | |
724 | * Must be called when no tracing is active in the channel, because of | |
725 | * accesses across CPUs. | |
726 | */ | |
727 | static notrace void ltt_relay_buffer_flush(struct ust_buffer *buf) | |
728 | { | |
729 | int result; | |
730 | ||
731 | //ust// buf->finalized = 1; | |
732 | ltt_force_switch(buf, FORCE_FLUSH); | |
733 | ||
734 | result = write(buf->data_ready_fd_write, "1", 1); | |
735 | if(result == -1) { | |
736 | PERROR("write (in ltt_relay_buffer_flush)"); | |
737 | ERR("this should never happen!"); | |
738 | } | |
739 | } | |
740 | ||
741 | static void ltt_relay_async_wakeup_chan(struct ust_channel *ltt_channel) | |
742 | { | |
743 | //ust// unsigned int i; | |
744 | //ust// struct rchan *rchan = ltt_channel->trans_channel_data; | |
745 | //ust// | |
746 | //ust// for_each_possible_cpu(i) { | |
747 | //ust// struct ltt_channel_buf_struct *ltt_buf = | |
748 | //ust// percpu_ptr(ltt_channel->buf, i); | |
749 | //ust// | |
750 | //ust// if (atomic_read(<t_buf->wakeup_readers) == 1) { | |
751 | //ust// atomic_set(<t_buf->wakeup_readers, 0); | |
752 | //ust// wake_up_interruptible(&rchan->buf[i]->read_wait); | |
753 | //ust// } | |
754 | //ust// } | |
755 | } | |
756 | ||
757 | static void ltt_relay_finish_buffer(struct ust_channel *channel) | |
758 | { | |
759 | // int result; | |
760 | ||
761 | if (channel->buf) { | |
762 | struct ust_buffer *buf = channel->buf; | |
763 | ltt_relay_buffer_flush(buf); | |
764 | //ust// ltt_relay_wake_writers(ltt_buf); | |
765 | /* closing the pipe tells the consumer the buffer is finished */ | |
766 | ||
767 | //result = write(ltt_buf->data_ready_fd_write, "D", 1); | |
768 | //if(result == -1) { | |
769 | // PERROR("write (in ltt_relay_finish_buffer)"); | |
770 | // ERR("this should never happen!"); | |
771 | //} | |
772 | close(buf->data_ready_fd_write); | |
773 | } | |
774 | } | |
775 | ||
776 | ||
777 | static void ltt_relay_finish_channel(struct ust_channel *channel) | |
778 | { | |
779 | //ust// unsigned int i; | |
780 | ||
781 | //ust// for_each_possible_cpu(i) | |
782 | ltt_relay_finish_buffer(channel); | |
783 | } | |
784 | ||
785 | static void ltt_relay_remove_channel(struct ust_channel *channel) | |
786 | { | |
787 | ust_buffers_channel_close(channel); | |
788 | kref_put(&channel->kref, ltt_relay_release_channel); | |
789 | } | |
790 | ||
791 | struct ltt_reserve_switch_offsets { | |
792 | long begin, end, old; | |
793 | long begin_switch, end_switch_current, end_switch_old; | |
794 | long commit_count, reserve_commit_diff; | |
795 | size_t before_hdr_pad, size; | |
796 | }; | |
797 | ||
798 | /* | |
799 | * Returns : | |
800 | * 0 if ok | |
801 | * !0 if execution must be aborted. | |
802 | */ | |
803 | static inline int ltt_relay_try_reserve( | |
804 | struct ust_channel *channel, struct ust_buffer *buf, | |
805 | struct ltt_reserve_switch_offsets *offsets, size_t data_size, | |
806 | u64 *tsc, unsigned int *rflags, int largest_align) | |
807 | { | |
808 | offsets->begin = local_read(&buf->offset); | |
809 | offsets->old = offsets->begin; | |
810 | offsets->begin_switch = 0; | |
811 | offsets->end_switch_current = 0; | |
812 | offsets->end_switch_old = 0; | |
813 | ||
814 | *tsc = trace_clock_read64(); | |
815 | if (last_tsc_overflow(buf, *tsc)) | |
816 | *rflags = LTT_RFLAG_ID_SIZE_TSC; | |
817 | ||
818 | if (SUBBUF_OFFSET(offsets->begin, buf->chan) == 0) { | |
819 | offsets->begin_switch = 1; /* For offsets->begin */ | |
820 | } else { | |
821 | offsets->size = ust_get_header_size(channel, | |
822 | offsets->begin, data_size, | |
823 | &offsets->before_hdr_pad, *rflags); | |
824 | offsets->size += ltt_align(offsets->begin + offsets->size, | |
825 | largest_align) | |
826 | + data_size; | |
827 | if ((SUBBUF_OFFSET(offsets->begin, buf->chan) + offsets->size) | |
828 | > buf->chan->subbuf_size) { | |
829 | offsets->end_switch_old = 1; /* For offsets->old */ | |
830 | offsets->begin_switch = 1; /* For offsets->begin */ | |
831 | } | |
832 | } | |
833 | if (offsets->begin_switch) { | |
834 | long subbuf_index; | |
835 | ||
836 | if (offsets->end_switch_old) | |
837 | offsets->begin = SUBBUF_ALIGN(offsets->begin, | |
838 | buf->chan); | |
839 | offsets->begin = offsets->begin + ltt_subbuffer_header_size(); | |
840 | /* Test new buffer integrity */ | |
841 | subbuf_index = SUBBUF_INDEX(offsets->begin, buf->chan); | |
842 | offsets->reserve_commit_diff = | |
843 | (BUFFER_TRUNC(offsets->begin, buf->chan) | |
844 | >> channel->n_subbufs_order) | |
845 | - (local_read(&buf->commit_count[subbuf_index]) | |
846 | & channel->commit_count_mask); | |
847 | if (offsets->reserve_commit_diff == 0) { | |
848 | long consumed; | |
849 | ||
850 | consumed = atomic_long_read(&buf->consumed); | |
851 | ||
852 | /* Next buffer not corrupted. */ | |
853 | if (!channel->overwrite && | |
854 | (SUBBUF_TRUNC(offsets->begin, buf->chan) | |
855 | - SUBBUF_TRUNC(consumed, buf->chan)) | |
856 | >= channel->alloc_size) { | |
857 | ||
858 | long consumed_idx = SUBBUF_INDEX(consumed, buf->chan); | |
859 | long commit_count = local_read(&buf->commit_count[consumed_idx]); | |
860 | if(((commit_count - buf->chan->subbuf_size) & channel->commit_count_mask) - (BUFFER_TRUNC(consumed, buf->chan) >> channel->n_subbufs_order) != 0) { | |
861 | WARN("Event dropped. Caused by non-committed event."); | |
862 | } | |
863 | else { | |
864 | WARN("Event dropped. Caused by non-consumed buffer."); | |
865 | } | |
866 | /* | |
867 | * We do not overwrite non consumed buffers | |
868 | * and we are full : event is lost. | |
869 | */ | |
870 | local_inc(&buf->events_lost); | |
871 | return -1; | |
872 | } else { | |
873 | /* | |
874 | * next buffer not corrupted, we are either in | |
875 | * overwrite mode or the buffer is not full. | |
876 | * It's safe to write in this new subbuffer. | |
877 | */ | |
878 | } | |
879 | } else { | |
880 | /* | |
881 | * Next subbuffer corrupted. Force pushing reader even | |
882 | * in normal mode. It's safe to write in this new | |
883 | * subbuffer. | |
884 | */ | |
885 | } | |
886 | offsets->size = ust_get_header_size(channel, | |
887 | offsets->begin, data_size, | |
888 | &offsets->before_hdr_pad, *rflags); | |
889 | offsets->size += ltt_align(offsets->begin + offsets->size, | |
890 | largest_align) | |
891 | + data_size; | |
892 | if ((SUBBUF_OFFSET(offsets->begin, buf->chan) + offsets->size) | |
893 | > buf->chan->subbuf_size) { | |
894 | /* | |
895 | * Event too big for subbuffers, report error, don't | |
896 | * complete the sub-buffer switch. | |
897 | */ | |
898 | local_inc(&buf->events_lost); | |
899 | return -1; | |
900 | } else { | |
901 | /* | |
902 | * We just made a successful buffer switch and the event | |
903 | * fits in the new subbuffer. Let's write. | |
904 | */ | |
905 | } | |
906 | } else { | |
907 | /* | |
908 | * Event fits in the current buffer and we are not on a switch | |
909 | * boundary. It's safe to write. | |
910 | */ | |
911 | } | |
912 | offsets->end = offsets->begin + offsets->size; | |
913 | ||
914 | if ((SUBBUF_OFFSET(offsets->end, buf->chan)) == 0) { | |
915 | /* | |
916 | * The offset_end will fall at the very beginning of the next | |
917 | * subbuffer. | |
918 | */ | |
919 | offsets->end_switch_current = 1; /* For offsets->begin */ | |
920 | } | |
921 | return 0; | |
922 | } | |
923 | ||
924 | /* | |
925 | * Returns : | |
926 | * 0 if ok | |
927 | * !0 if execution must be aborted. | |
928 | */ | |
929 | static inline int ltt_relay_try_switch( | |
930 | enum force_switch_mode mode, | |
931 | struct ust_channel *channel, | |
932 | struct ust_buffer *buf, | |
933 | struct ltt_reserve_switch_offsets *offsets, | |
934 | u64 *tsc) | |
935 | { | |
936 | long subbuf_index; | |
937 | ||
938 | offsets->begin = local_read(&buf->offset); | |
939 | offsets->old = offsets->begin; | |
940 | offsets->begin_switch = 0; | |
941 | offsets->end_switch_old = 0; | |
942 | ||
943 | *tsc = trace_clock_read64(); | |
944 | ||
945 | if (SUBBUF_OFFSET(offsets->begin, buf->chan) != 0) { | |
946 | offsets->begin = SUBBUF_ALIGN(offsets->begin, buf->chan); | |
947 | offsets->end_switch_old = 1; | |
948 | } else { | |
949 | /* we do not have to switch : buffer is empty */ | |
950 | return -1; | |
951 | } | |
952 | if (mode == FORCE_ACTIVE) | |
953 | offsets->begin += ltt_subbuffer_header_size(); | |
954 | /* | |
955 | * Always begin_switch in FORCE_ACTIVE mode. | |
956 | * Test new buffer integrity | |
957 | */ | |
958 | subbuf_index = SUBBUF_INDEX(offsets->begin, buf->chan); | |
959 | offsets->reserve_commit_diff = | |
960 | (BUFFER_TRUNC(offsets->begin, buf->chan) | |
961 | >> channel->n_subbufs_order) | |
962 | - (local_read(&buf->commit_count[subbuf_index]) | |
963 | & channel->commit_count_mask); | |
964 | if (offsets->reserve_commit_diff == 0) { | |
965 | /* Next buffer not corrupted. */ | |
966 | if (mode == FORCE_ACTIVE | |
967 | && !channel->overwrite | |
968 | && offsets->begin - atomic_long_read(&buf->consumed) | |
969 | >= channel->alloc_size) { | |
970 | /* | |
971 | * We do not overwrite non consumed buffers and we are | |
972 | * full : ignore switch while tracing is active. | |
973 | */ | |
974 | return -1; | |
975 | } | |
976 | } else { | |
977 | /* | |
978 | * Next subbuffer corrupted. Force pushing reader even in normal | |
979 | * mode | |
980 | */ | |
981 | } | |
982 | offsets->end = offsets->begin; | |
983 | return 0; | |
984 | } | |
985 | ||
986 | static inline void ltt_reserve_push_reader( | |
987 | struct ust_channel *channel, | |
988 | struct ust_buffer *buf, | |
989 | struct ltt_reserve_switch_offsets *offsets) | |
990 | { | |
991 | long consumed_old, consumed_new; | |
992 | ||
993 | do { | |
994 | consumed_old = atomic_long_read(&buf->consumed); | |
995 | /* | |
996 | * If buffer is in overwrite mode, push the reader consumed | |
997 | * count if the write position has reached it and we are not | |
998 | * at the first iteration (don't push the reader farther than | |
999 | * the writer). This operation can be done concurrently by many | |
1000 | * writers in the same buffer, the writer being at the farthest | |
1001 | * write position sub-buffer index in the buffer being the one | |
1002 | * which will win this loop. | |
1003 | * If the buffer is not in overwrite mode, pushing the reader | |
1004 | * only happens if a sub-buffer is corrupted. | |
1005 | */ | |
1006 | if ((SUBBUF_TRUNC(offsets->end-1, buf->chan) | |
1007 | - SUBBUF_TRUNC(consumed_old, buf->chan)) | |
1008 | >= channel->alloc_size) | |
1009 | consumed_new = SUBBUF_ALIGN(consumed_old, buf->chan); | |
1010 | else { | |
1011 | consumed_new = consumed_old; | |
1012 | break; | |
1013 | } | |
1014 | } while (atomic_long_cmpxchg(&buf->consumed, consumed_old, | |
1015 | consumed_new) != consumed_old); | |
1016 | ||
1017 | if (consumed_old != consumed_new) { | |
1018 | /* | |
1019 | * Reader pushed : we are the winner of the push, we can | |
1020 | * therefore reequilibrate reserve and commit. Atomic increment | |
1021 | * of the commit count permits other writers to play around | |
1022 | * with this variable before us. We keep track of | |
1023 | * corrupted_subbuffers even in overwrite mode : | |
1024 | * we never want to write over a non completely committed | |
1025 | * sub-buffer : possible causes : the buffer size is too low | |
1026 | * compared to the unordered data input, or there is a writer | |
1027 | * that died between the reserve and the commit. | |
1028 | */ | |
1029 | if (offsets->reserve_commit_diff) { | |
1030 | /* | |
1031 | * We have to alter the sub-buffer commit count. | |
1032 | * We do not deliver the previous subbuffer, given it | |
1033 | * was either corrupted or not consumed (overwrite | |
1034 | * mode). | |
1035 | */ | |
1036 | local_add(offsets->reserve_commit_diff, | |
1037 | &buf->commit_count[ | |
1038 | SUBBUF_INDEX(offsets->begin, | |
1039 | buf->chan)]); | |
1040 | if (!channel->overwrite | |
1041 | || offsets->reserve_commit_diff | |
1042 | != channel->subbuf_size) { | |
1043 | /* | |
1044 | * The reserve commit diff was not subbuf_size : | |
1045 | * it means the subbuffer was partly written to | |
1046 | * and is therefore corrupted. If it is multiple | |
1047 | * of subbuffer size and we are in flight | |
1048 | * recorder mode, we are skipping over a whole | |
1049 | * subbuffer. | |
1050 | */ | |
1051 | local_inc(&buf->corrupted_subbuffers); | |
1052 | } | |
1053 | } | |
1054 | } | |
1055 | } | |
1056 | ||
1057 | ||
1058 | /* | |
1059 | * ltt_reserve_switch_old_subbuf: switch old subbuffer | |
1060 | * | |
1061 | * Concurrency safe because we are the last and only thread to alter this | |
1062 | * sub-buffer. As long as it is not delivered and read, no other thread can | |
1063 | * alter the offset, alter the reserve_count or call the | |
1064 | * client_buffer_end_callback on this sub-buffer. | |
1065 | * | |
1066 | * The only remaining threads could be the ones with pending commits. They will | |
1067 | * have to do the deliver themselves. Not concurrency safe in overwrite mode. | |
1068 | * We detect corrupted subbuffers with commit and reserve counts. We keep a | |
1069 | * corrupted sub-buffers count and push the readers across these sub-buffers. | |
1070 | * | |
1071 | * Not concurrency safe if a writer is stalled in a subbuffer and another writer | |
1072 | * switches in, finding out it's corrupted. The result will be than the old | |
1073 | * (uncommited) subbuffer will be declared corrupted, and that the new subbuffer | |
1074 | * will be declared corrupted too because of the commit count adjustment. | |
1075 | * | |
1076 | * Note : offset_old should never be 0 here. | |
1077 | */ | |
1078 | static inline void ltt_reserve_switch_old_subbuf( | |
1079 | struct ust_channel *channel, | |
1080 | struct ust_buffer *buf, | |
1081 | struct ltt_reserve_switch_offsets *offsets, u64 *tsc) | |
1082 | { | |
1083 | long oldidx = SUBBUF_INDEX(offsets->old - 1, channel); | |
1084 | ||
1085 | channel->buffer_end(buf, *tsc, offsets->old, oldidx); | |
1086 | /* Must write buffer end before incrementing commit count */ | |
1087 | smp_wmb(); | |
1088 | offsets->commit_count = | |
1089 | local_add_return(channel->subbuf_size | |
1090 | - (SUBBUF_OFFSET(offsets->old - 1, channel) | |
1091 | + 1), | |
1092 | &buf->commit_count[oldidx]); | |
1093 | if ((BUFFER_TRUNC(offsets->old - 1, channel) | |
1094 | >> channel->n_subbufs_order) | |
1095 | - ((offsets->commit_count - channel->subbuf_size) | |
1096 | & channel->commit_count_mask) == 0) | |
1097 | ltt_deliver(buf, oldidx, offsets->commit_count); | |
1098 | } | |
1099 | ||
1100 | /* | |
1101 | * ltt_reserve_switch_new_subbuf: Populate new subbuffer. | |
1102 | * | |
1103 | * This code can be executed unordered : writers may already have written to the | |
1104 | * sub-buffer before this code gets executed, caution. The commit makes sure | |
1105 | * that this code is executed before the deliver of this sub-buffer. | |
1106 | */ | |
1107 | static /*inline*/ void ltt_reserve_switch_new_subbuf( | |
1108 | struct ust_channel *channel, | |
1109 | struct ust_buffer *buf, | |
1110 | struct ltt_reserve_switch_offsets *offsets, u64 *tsc) | |
1111 | { | |
1112 | long beginidx = SUBBUF_INDEX(offsets->begin, channel); | |
1113 | ||
1114 | channel->buffer_begin(buf, *tsc, beginidx); | |
1115 | /* Must write buffer end before incrementing commit count */ | |
1116 | smp_wmb(); | |
1117 | offsets->commit_count = local_add_return(ltt_subbuffer_header_size(), | |
1118 | &buf->commit_count[beginidx]); | |
1119 | /* Check if the written buffer has to be delivered */ | |
1120 | if ((BUFFER_TRUNC(offsets->begin, channel) | |
1121 | >> channel->n_subbufs_order) | |
1122 | - ((offsets->commit_count - channel->subbuf_size) | |
1123 | & channel->commit_count_mask) == 0) | |
1124 | ltt_deliver(buf, beginidx, offsets->commit_count); | |
1125 | } | |
1126 | ||
1127 | ||
1128 | /* | |
1129 | * ltt_reserve_end_switch_current: finish switching current subbuffer | |
1130 | * | |
1131 | * Concurrency safe because we are the last and only thread to alter this | |
1132 | * sub-buffer. As long as it is not delivered and read, no other thread can | |
1133 | * alter the offset, alter the reserve_count or call the | |
1134 | * client_buffer_end_callback on this sub-buffer. | |
1135 | * | |
1136 | * The only remaining threads could be the ones with pending commits. They will | |
1137 | * have to do the deliver themselves. Not concurrency safe in overwrite mode. | |
1138 | * We detect corrupted subbuffers with commit and reserve counts. We keep a | |
1139 | * corrupted sub-buffers count and push the readers across these sub-buffers. | |
1140 | * | |
1141 | * Not concurrency safe if a writer is stalled in a subbuffer and another writer | |
1142 | * switches in, finding out it's corrupted. The result will be than the old | |
1143 | * (uncommited) subbuffer will be declared corrupted, and that the new subbuffer | |
1144 | * will be declared corrupted too because of the commit count adjustment. | |
1145 | */ | |
1146 | static inline void ltt_reserve_end_switch_current( | |
1147 | struct ust_channel *channel, | |
1148 | struct ust_buffer *buf, | |
1149 | struct ltt_reserve_switch_offsets *offsets, u64 *tsc) | |
1150 | { | |
1151 | long endidx = SUBBUF_INDEX(offsets->end - 1, channel); | |
1152 | ||
1153 | channel->buffer_end(buf, *tsc, offsets->end, endidx); | |
1154 | /* Must write buffer begin before incrementing commit count */ | |
1155 | smp_wmb(); | |
1156 | offsets->commit_count = | |
1157 | local_add_return(channel->subbuf_size | |
1158 | - (SUBBUF_OFFSET(offsets->end - 1, channel) | |
1159 | + 1), | |
1160 | &buf->commit_count[endidx]); | |
1161 | if ((BUFFER_TRUNC(offsets->end - 1, channel) | |
1162 | >> channel->n_subbufs_order) | |
1163 | - ((offsets->commit_count - channel->subbuf_size) | |
1164 | & channel->commit_count_mask) == 0) | |
1165 | ltt_deliver(buf, endidx, offsets->commit_count); | |
1166 | } | |
1167 | ||
1168 | /** | |
1169 | * ltt_relay_reserve_slot - Atomic slot reservation in a LTTng buffer. | |
1170 | * @trace: the trace structure to log to. | |
1171 | * @ltt_channel: channel structure | |
1172 | * @transport_data: data structure specific to ltt relay | |
1173 | * @data_size: size of the variable length data to log. | |
1174 | * @slot_size: pointer to total size of the slot (out) | |
1175 | * @buf_offset : pointer to reserved buffer offset (out) | |
1176 | * @tsc: pointer to the tsc at the slot reservation (out) | |
1177 | * @cpu: cpuid | |
1178 | * | |
1179 | * Return : -ENOSPC if not enough space, else returns 0. | |
1180 | * It will take care of sub-buffer switching. | |
1181 | */ | |
1182 | static notrace int ltt_relay_reserve_slot(struct ltt_trace_struct *trace, | |
1183 | struct ust_channel *channel, void **transport_data, | |
1184 | size_t data_size, size_t *slot_size, long *buf_offset, u64 *tsc, | |
1185 | unsigned int *rflags, int largest_align) | |
1186 | { | |
1187 | struct ust_buffer *buf = *transport_data = channel->buf; | |
1188 | struct ltt_reserve_switch_offsets offsets; | |
1189 | ||
1190 | offsets.reserve_commit_diff = 0; | |
1191 | offsets.size = 0; | |
1192 | ||
1193 | /* | |
1194 | * Perform retryable operations. | |
1195 | */ | |
1196 | if (ltt_nesting > 4) { | |
1197 | local_inc(&buf->events_lost); | |
1198 | return -EPERM; | |
1199 | } | |
1200 | do { | |
1201 | if (ltt_relay_try_reserve(channel, buf, &offsets, data_size, tsc, rflags, | |
1202 | largest_align)) | |
1203 | return -ENOSPC; | |
1204 | } while (local_cmpxchg(&buf->offset, offsets.old, | |
1205 | offsets.end) != offsets.old); | |
1206 | ||
1207 | /* | |
1208 | * Atomically update last_tsc. This update races against concurrent | |
1209 | * atomic updates, but the race will always cause supplementary full TSC | |
1210 | * events, never the opposite (missing a full TSC event when it would be | |
1211 | * needed). | |
1212 | */ | |
1213 | save_last_tsc(buf, *tsc); | |
1214 | ||
1215 | /* | |
1216 | * Push the reader if necessary | |
1217 | */ | |
1218 | ltt_reserve_push_reader(channel, buf, &offsets); | |
1219 | ||
1220 | /* | |
1221 | * Switch old subbuffer if needed. | |
1222 | */ | |
1223 | if (offsets.end_switch_old) | |
1224 | ltt_reserve_switch_old_subbuf(channel, buf, &offsets, tsc); | |
1225 | ||
1226 | /* | |
1227 | * Populate new subbuffer. | |
1228 | */ | |
1229 | if (offsets.begin_switch) | |
1230 | ltt_reserve_switch_new_subbuf(channel, buf, &offsets, tsc); | |
1231 | ||
1232 | if (offsets.end_switch_current) | |
1233 | ltt_reserve_end_switch_current(channel, buf, &offsets, tsc); | |
1234 | ||
1235 | *slot_size = offsets.size; | |
1236 | *buf_offset = offsets.begin + offsets.before_hdr_pad; | |
1237 | return 0; | |
1238 | } | |
1239 | ||
1240 | /* | |
1241 | * Force a sub-buffer switch for a per-cpu buffer. This operation is | |
1242 | * completely reentrant : can be called while tracing is active with | |
1243 | * absolutely no lock held. | |
1244 | * | |
1245 | * Note, however, that as a local_cmpxchg is used for some atomic | |
1246 | * operations, this function must be called from the CPU which owns the buffer | |
1247 | * for a ACTIVE flush. | |
1248 | */ | |
1249 | static notrace void ltt_force_switch(struct ust_buffer *buf, | |
1250 | enum force_switch_mode mode) | |
1251 | { | |
1252 | struct ust_channel *channel = buf->chan; | |
1253 | struct ltt_reserve_switch_offsets offsets; | |
1254 | u64 tsc; | |
1255 | ||
1256 | offsets.reserve_commit_diff = 0; | |
1257 | offsets.size = 0; | |
1258 | ||
1259 | /* | |
1260 | * Perform retryable operations. | |
1261 | */ | |
1262 | do { | |
1263 | if (ltt_relay_try_switch(mode, channel, buf, &offsets, &tsc)) | |
1264 | return; | |
1265 | } while (local_cmpxchg(&buf->offset, offsets.old, | |
1266 | offsets.end) != offsets.old); | |
1267 | ||
1268 | /* | |
1269 | * Atomically update last_tsc. This update races against concurrent | |
1270 | * atomic updates, but the race will always cause supplementary full TSC | |
1271 | * events, never the opposite (missing a full TSC event when it would be | |
1272 | * needed). | |
1273 | */ | |
1274 | save_last_tsc(buf, tsc); | |
1275 | ||
1276 | /* | |
1277 | * Push the reader if necessary | |
1278 | */ | |
1279 | if (mode == FORCE_ACTIVE) | |
1280 | ltt_reserve_push_reader(channel, buf, &offsets); | |
1281 | ||
1282 | /* | |
1283 | * Switch old subbuffer if needed. | |
1284 | */ | |
1285 | if (offsets.end_switch_old) | |
1286 | ltt_reserve_switch_old_subbuf(channel, buf, &offsets, &tsc); | |
1287 | ||
1288 | /* | |
1289 | * Populate new subbuffer. | |
1290 | */ | |
1291 | if (mode == FORCE_ACTIVE) | |
1292 | ltt_reserve_switch_new_subbuf(channel, buf, &offsets, &tsc); | |
1293 | } | |
1294 | ||
b5b073e2 PMF |
1295 | static struct ltt_transport ust_relay_transport = { |
1296 | .name = "ustrelay", | |
1297 | .ops = { | |
1298 | .create_channel = ust_buffers_create_channel, | |
1299 | .finish_channel = ltt_relay_finish_channel, | |
1300 | .remove_channel = ltt_relay_remove_channel, | |
1301 | .wakeup_channel = ltt_relay_async_wakeup_chan, | |
1302 | // .commit_slot = ltt_relay_commit_slot, | |
1303 | .reserve_slot = ltt_relay_reserve_slot, | |
b5b073e2 PMF |
1304 | }, |
1305 | }; | |
1306 | ||
1307 | /* | |
1308 | * for flight recording. must be called after relay_commit. | |
1309 | * This function decrements de subbuffer's lost_size each time the commit count | |
1310 | * reaches back the reserve offset (module subbuffer size). It is useful for | |
1311 | * crash dump. | |
1312 | */ | |
1313 | static /* inline */ void ltt_write_commit_counter(struct ust_buffer *buf, | |
1314 | struct ust_buffer *ltt_buf, | |
1315 | long idx, long buf_offset, long commit_count, size_t data_size) | |
1316 | { | |
1317 | long offset; | |
1318 | long commit_seq_old; | |
1319 | ||
1320 | offset = buf_offset + data_size; | |
1321 | ||
1322 | /* | |
1323 | * SUBBUF_OFFSET includes commit_count_mask. We can simply | |
1324 | * compare the offsets within the subbuffer without caring about | |
1325 | * buffer full/empty mismatch because offset is never zero here | |
1326 | * (subbuffer header and event headers have non-zero length). | |
1327 | */ | |
1328 | if (unlikely(SUBBUF_OFFSET(offset - commit_count, buf->chan))) | |
1329 | return; | |
1330 | ||
1331 | commit_seq_old = local_read(<t_buf->commit_seq[idx]); | |
1332 | while (commit_seq_old < commit_count) | |
1333 | commit_seq_old = local_cmpxchg(<t_buf->commit_seq[idx], | |
1334 | commit_seq_old, commit_count); | |
1335 | } | |
1336 | ||
1337 | /* | |
1338 | * Atomic unordered slot commit. Increments the commit count in the | |
1339 | * specified sub-buffer, and delivers it if necessary. | |
1340 | * | |
1341 | * Parameters: | |
1342 | * | |
1343 | * @ltt_channel : channel structure | |
1344 | * @transport_data: transport-specific data | |
1345 | * @buf_offset : offset following the event header. | |
1346 | * @data_size : size of the event data. | |
1347 | * @slot_size : size of the reserved slot. | |
1348 | */ | |
1349 | /* FIXME: make this function static inline in the .h! */ | |
1350 | /*static*/ /* inline */ notrace void ltt_commit_slot( | |
1351 | struct ust_channel *channel, | |
1352 | void **transport_data, long buf_offset, | |
1353 | size_t data_size, size_t slot_size) | |
1354 | { | |
1355 | struct ust_buffer *buf = *transport_data; | |
1356 | long offset_end = buf_offset; | |
1357 | long endidx = SUBBUF_INDEX(offset_end - 1, channel); | |
1358 | long commit_count; | |
1359 | ||
1360 | /* Must write slot data before incrementing commit count */ | |
1361 | smp_wmb(); | |
1362 | commit_count = local_add_return(slot_size, | |
1363 | &buf->commit_count[endidx]); | |
1364 | /* Check if all commits have been done */ | |
1365 | if ((BUFFER_TRUNC(offset_end - 1, channel) | |
1366 | >> channel->n_subbufs_order) | |
1367 | - ((commit_count - channel->subbuf_size) | |
1368 | & channel->commit_count_mask) == 0) | |
1369 | ltt_deliver(buf, endidx, commit_count); | |
1370 | /* | |
1371 | * Update lost_size for each commit. It's needed only for extracting | |
1372 | * ltt buffers from vmcore, after crash. | |
1373 | */ | |
1374 | ltt_write_commit_counter(buf, buf, endidx, | |
1375 | buf_offset, commit_count, data_size); | |
1376 | } | |
1377 | ||
1378 | ||
1379 | static char initialized = 0; | |
1380 | ||
1381 | void __attribute__((constructor)) init_ustrelay_transport(void) | |
1382 | { | |
1383 | if(!initialized) { | |
1384 | ltt_transport_register(&ust_relay_transport); | |
1385 | initialized = 1; | |
1386 | } | |
1387 | } | |
1388 | ||
1389 | static void __attribute__((destructor)) ltt_relay_exit(void) | |
1390 | { | |
1391 | ltt_transport_unregister(&ust_relay_transport); | |
1392 | } |