3 * LTTng userspace tracer buffering system
5 * Copyright (C) 2009 - Pierre-Marc Fournier (pierre-marc dot fournier at polymtl dot ca)
6 * Copyright (C) 2008 - Mathieu Desnoyers (mathieu.desnoyers@polymtl.ca)
8 * This library is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; either
11 * version 2.1 of the License, or (at your option) any later version.
13 * This library is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
18 * You should have received a copy of the GNU Lesser General Public
19 * License along with this library; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
28 #include <ust/kernelcompat.h>
29 #include <kcompat/kref.h>
33 #include "tracercore.h"
36 struct ltt_reserve_switch_offsets
{
38 long begin_switch
, end_switch_current
, end_switch_old
;
39 size_t before_hdr_pad
, size
;
43 static DEFINE_MUTEX(ust_buffers_channels_mutex
);
44 static LIST_HEAD(ust_buffers_channels
);
46 static int get_n_cpus(void)
49 static int n_cpus
= 0;
55 /* On Linux, when some processors are offline
56 * _SC_NPROCESSORS_CONF counts the offline
57 * processors, whereas _SC_NPROCESSORS_ONLN
58 * does not. If we used _SC_NPROCESSORS_ONLN,
59 * getcpu() could return a value greater than
60 * this sysconf, in which case the arrays
61 * indexed by processor would overflow.
63 result
= sysconf(_SC_NPROCESSORS_CONF
);
73 /* _ust_buffers_write()
75 * @buf: destination buffer
76 * @offset: offset in destination
78 * @len: length of source
79 * @cpy: already copied
82 void _ust_buffers_write(struct ust_buffer
*buf
, size_t offset
,
83 const void *src
, size_t len
, ssize_t cpy
)
90 WARN_ON(offset
>= buf
->buf_size
);
92 cpy
= min_t(size_t, len
, buf
->buf_size
- offset
);
93 ust_buffers_do_copy(buf
->buf_data
+ offset
, src
, cpy
);
94 } while (unlikely(len
!= cpy
));
97 static int ust_buffers_init_buffer(struct ust_trace
*trace
,
98 struct ust_channel
*ltt_chan
,
99 struct ust_buffer
*buf
,
100 unsigned int n_subbufs
);
102 static int ust_buffers_alloc_buf(struct ust_buffer
*buf
, size_t *size
)
107 *size
= PAGE_ALIGN(*size
);
109 result
= buf
->shmid
= shmget(getpid(), *size
, IPC_CREAT
| IPC_EXCL
| 0700);
110 if(result
== -1 && errno
== EINVAL
) {
111 ERR("shmget() returned EINVAL; maybe /proc/sys/kernel/shmmax should be increased.");
114 else if(result
== -1) {
119 /* FIXME: should have matching call to shmdt */
120 ptr
= shmat(buf
->shmid
, NULL
, 0);
121 if(ptr
== (void *) -1) {
126 /* Already mark the shared memory for destruction. This will occur only
127 * when all users have detached.
129 result
= shmctl(buf
->shmid
, IPC_RMID
, NULL
);
136 buf
->buf_size
= *size
;
141 result
= shmctl(buf
->shmid
, IPC_RMID
, NULL
);
149 int ust_buffers_create_buf(struct ust_channel
*channel
, int cpu
)
152 struct ust_buffer
*buf
= channel
->buf
[cpu
];
155 result
= ust_buffers_alloc_buf(buf
, &channel
->alloc_size
);
160 kref_get(&channel
->kref
);
164 static void ust_buffers_destroy_channel(struct kref
*kref
)
166 struct ust_channel
*chan
= container_of(kref
, struct ust_channel
, kref
);
170 static void ust_buffers_destroy_buf(struct ust_buffer
*buf
)
172 struct ust_channel
*chan
= buf
->chan
;
175 result
= munmap(buf
->buf_data
, buf
->buf_size
);
180 //ust// chan->buf[buf->cpu] = NULL;
182 kref_put(&chan
->kref
, ust_buffers_destroy_channel
);
185 /* called from kref_put */
186 static void ust_buffers_remove_buf(struct kref
*kref
)
188 struct ust_buffer
*buf
= container_of(kref
, struct ust_buffer
, kref
);
189 ust_buffers_destroy_buf(buf
);
192 int ust_buffers_open_buf(struct ust_channel
*chan
, int cpu
)
196 result
= ust_buffers_create_buf(chan
, cpu
);
200 kref_init(&chan
->buf
[cpu
]->kref
);
202 result
= ust_buffers_init_buffer(chan
->trace
, chan
, chan
->buf
[cpu
], chan
->subbuf_cnt
);
208 /* FIXME: decrementally destroy on error? */
212 * ust_buffers_close_buf - close a channel buffer
215 static void ust_buffers_close_buf(struct ust_buffer
*buf
)
217 kref_put(&buf
->kref
, ust_buffers_remove_buf
);
220 int ust_buffers_channel_open(struct ust_channel
*chan
, size_t subbuf_size
, size_t subbuf_cnt
)
225 if(subbuf_size
== 0 || subbuf_cnt
== 0)
228 /* Check that the subbuffer size is larger than a page. */
229 WARN_ON_ONCE(subbuf_size
< PAGE_SIZE
);
232 * Make sure the number of subbuffers and subbuffer size are power of 2.
234 WARN_ON_ONCE(hweight32(subbuf_size
) != 1);
235 WARN_ON(hweight32(subbuf_cnt
) != 1);
237 chan
->version
= UST_CHANNEL_VERSION
;
238 chan
->subbuf_cnt
= subbuf_cnt
;
239 chan
->subbuf_size
= subbuf_size
;
240 chan
->subbuf_size_order
= get_count_order(subbuf_size
);
241 chan
->alloc_size
= subbuf_size
* subbuf_cnt
;
243 kref_init(&chan
->kref
);
245 mutex_lock(&ust_buffers_channels_mutex
);
246 for(i
=0; i
<chan
->n_cpus
; i
++) {
247 result
= ust_buffers_open_buf(chan
, i
);
251 list_add(&chan
->list
, &ust_buffers_channels
);
252 mutex_unlock(&ust_buffers_channels_mutex
);
256 /* Jump directly inside the loop to close the buffers that were already
259 ust_buffers_close_buf(chan
->buf
[i
]);
264 kref_put(&chan
->kref
, ust_buffers_destroy_channel
);
265 mutex_unlock(&ust_buffers_channels_mutex
);
269 void ust_buffers_channel_close(struct ust_channel
*chan
)
275 mutex_lock(&ust_buffers_channels_mutex
);
276 for(i
=0; i
<chan
->n_cpus
; i
++) {
277 /* FIXME: if we make it here, then all buffers were necessarily allocated. Moreover, we don't
278 * initialize to NULL so we cannot use this check. Should we? */
279 //ust// if (chan->buf[i])
280 ust_buffers_close_buf(chan
->buf
[i
]);
283 list_del(&chan
->list
);
284 kref_put(&chan
->kref
, ust_buffers_destroy_channel
);
285 mutex_unlock(&ust_buffers_channels_mutex
);
292 static void ust_buffers_destroy_buffer(struct ust_channel
*ltt_chan
, int cpu
);
294 static void ltt_force_switch(struct ust_buffer
*buf
,
295 enum force_switch_mode mode
);
300 static void ltt_buffer_begin(struct ust_buffer
*buf
,
301 u64 tsc
, unsigned int subbuf_idx
)
303 struct ust_channel
*channel
= buf
->chan
;
304 struct ltt_subbuffer_header
*header
=
305 (struct ltt_subbuffer_header
*)
306 ust_buffers_offset_address(buf
,
307 subbuf_idx
* buf
->chan
->subbuf_size
);
309 header
->cycle_count_begin
= tsc
;
310 header
->data_size
= 0xFFFFFFFF; /* for recognizing crashed buffers */
311 header
->sb_size
= 0xFFFFFFFF; /* for recognizing crashed buffers */
312 /* FIXME: add memory barrier? */
313 ltt_write_trace_header(channel
->trace
, header
);
317 * offset is assumed to never be 0 here : never deliver a completely empty
318 * subbuffer. The lost size is between 0 and subbuf_size-1.
320 static notrace
void ltt_buffer_end(struct ust_buffer
*buf
,
321 u64 tsc
, unsigned int offset
, unsigned int subbuf_idx
)
323 struct ltt_subbuffer_header
*header
=
324 (struct ltt_subbuffer_header
*)
325 ust_buffers_offset_address(buf
,
326 subbuf_idx
* buf
->chan
->subbuf_size
);
327 u32 data_size
= SUBBUF_OFFSET(offset
- 1, buf
->chan
) + 1;
329 header
->data_size
= data_size
;
330 header
->sb_size
= PAGE_ALIGN(data_size
);
331 header
->cycle_count_end
= tsc
;
332 header
->events_lost
= local_read(&buf
->events_lost
);
333 header
->subbuf_corrupt
= local_read(&buf
->corrupted_subbuffers
);
337 * This function should not be called from NMI interrupt context
339 static notrace
void ltt_buf_unfull(struct ust_buffer
*buf
,
340 unsigned int subbuf_idx
,
346 * Promote compiler barrier to a smp_mb().
347 * For the specific LTTng case, this IPI call should be removed if the
348 * architecture does not reorder writes. This should eventually be provided by
349 * a separate architecture-specific infrastructure.
351 //ust// static void remote_mb(void *info)
356 int ust_buffers_get_subbuf(struct ust_buffer
*buf
, long *consumed
)
358 struct ust_channel
*channel
= buf
->chan
;
359 long consumed_old
, consumed_idx
, commit_count
, write_offset
;
362 consumed_old
= atomic_long_read(&buf
->consumed
);
363 consumed_idx
= SUBBUF_INDEX(consumed_old
, buf
->chan
);
364 commit_count
= local_read(&buf
->commit_count
[consumed_idx
].cc_sb
);
366 * Make sure we read the commit count before reading the buffer
367 * data and the write offset. Correct consumed offset ordering
368 * wrt commit count is insured by the use of cmpxchg to update
369 * the consumed offset.
370 * smp_call_function_single can fail if the remote CPU is offline,
371 * this is OK because then there is no wmb to execute there.
372 * If our thread is executing on the same CPU as the on the buffers
373 * belongs to, we don't have to synchronize it at all. If we are
374 * migrated, the scheduler will take care of the memory barriers.
375 * Normally, smp_call_function_single() should ensure program order when
376 * executing the remote function, which implies that it surrounds the
377 * function execution with :
388 * However, smp_call_function_single() does not seem to clearly execute
389 * such barriers. It depends on spinlock semantic to provide the barrier
390 * before executing the IPI and, when busy-looping, csd_lock_wait only
391 * executes smp_mb() when it has to wait for the other CPU.
393 * I don't trust this code. Therefore, let's add the smp_mb() sequence
394 * required ourself, even if duplicated. It has no performance impact
397 * smp_mb() is needed because smp_rmb() and smp_wmb() only order read vs
398 * read and write vs write. They do not ensure core synchronization. We
399 * really have to ensure total order between the 3 barriers running on
402 //ust// #ifdef LTT_NO_IPI_BARRIER
404 * Local rmb to match the remote wmb to read the commit count before the
405 * buffer data and the write offset.
409 //ust// if (raw_smp_processor_id() != buf->cpu) {
410 //ust// smp_mb(); /* Total order with IPI handler smp_mb() */
411 //ust// smp_call_function_single(buf->cpu, remote_mb, NULL, 1);
412 //ust// smp_mb(); /* Total order with IPI handler smp_mb() */
416 write_offset
= local_read(&buf
->offset
);
418 * Check that the subbuffer we are trying to consume has been
419 * already fully committed.
421 if (((commit_count
- buf
->chan
->subbuf_size
)
422 & channel
->commit_count_mask
)
423 - (BUFFER_TRUNC(consumed_old
, buf
->chan
)
424 >> channel
->n_subbufs_order
)
429 * Check that we are not about to read the same subbuffer in
430 * which the writer head is.
432 if ((SUBBUF_TRUNC(write_offset
, buf
->chan
)
433 - SUBBUF_TRUNC(consumed_old
, buf
->chan
))
438 /* FIXME: is this ok to disable the reading feature? */
439 //ust// retval = update_read_sb_index(buf, consumed_idx);
441 //ust// return retval;
443 *consumed
= consumed_old
;
448 int ust_buffers_put_subbuf(struct ust_buffer
*buf
, unsigned long uconsumed_old
)
450 long consumed_new
, consumed_old
;
452 consumed_old
= atomic_long_read(&buf
->consumed
);
453 consumed_old
= consumed_old
& (~0xFFFFFFFFL
);
454 consumed_old
= consumed_old
| uconsumed_old
;
455 consumed_new
= SUBBUF_ALIGN(consumed_old
, buf
->chan
);
457 //ust// spin_lock(<t_buf->full_lock);
458 if (atomic_long_cmpxchg(&buf
->consumed
, consumed_old
,
461 /* We have been pushed by the writer : the last
462 * buffer read _is_ corrupted! It can also
463 * happen if this is a buffer we never got. */
464 //ust// spin_unlock(<t_buf->full_lock);
467 /* tell the client that buffer is now unfull */
470 index
= SUBBUF_INDEX(consumed_old
, buf
->chan
);
471 data
= BUFFER_OFFSET(consumed_old
, buf
->chan
);
472 ltt_buf_unfull(buf
, index
, data
);
473 //ust// spin_unlock(<t_buf->full_lock);
478 //ust// static void switch_buffer(unsigned long data)
480 //ust// struct ltt_channel_buf_struct *ltt_buf =
481 //ust// (struct ltt_channel_buf_struct *)data;
482 //ust// struct rchan_buf *buf = ltt_buf->rbuf;
485 //ust// ltt_force_switch(buf, FORCE_ACTIVE);
487 //ust// ltt_buf->switch_timer.expires += ltt_buf->switch_timer_interval;
488 //ust// add_timer_on(<t_buf->switch_timer, smp_processor_id());
491 //ust// static void start_switch_timer(struct ltt_channel_struct *ltt_channel)
493 //ust// struct rchan *rchan = ltt_channel->trans_channel_data;
496 //ust// if (!ltt_channel->switch_timer_interval)
499 //ust// // TODO : hotplug
500 //ust// for_each_online_cpu(cpu) {
501 //ust// struct ltt_channel_buf_struct *ltt_buf;
502 //ust// struct rchan_buf *buf;
504 //ust// buf = rchan->buf[cpu];
505 //ust// ltt_buf = buf->chan_private;
506 //ust// buf->random_access = 1;
507 //ust// ltt_buf->switch_timer_interval =
508 //ust// ltt_channel->switch_timer_interval;
509 //ust// init_timer(<t_buf->switch_timer);
510 //ust// ltt_buf->switch_timer.function = switch_buffer;
511 //ust// ltt_buf->switch_timer.expires = jiffies +
512 //ust// ltt_buf->switch_timer_interval;
513 //ust// ltt_buf->switch_timer.data = (unsigned long)ltt_buf;
514 //ust// add_timer_on(<t_buf->switch_timer, cpu);
519 //ust// * Cannot use del_timer_sync with add_timer_on, so use an IPI to locally
520 //ust// * delete the timer.
522 //ust// static void stop_switch_timer_ipi(void *info)
524 //ust// struct ltt_channel_buf_struct *ltt_buf =
525 //ust// (struct ltt_channel_buf_struct *)info;
527 //ust// del_timer(<t_buf->switch_timer);
530 //ust// static void stop_switch_timer(struct ltt_channel_struct *ltt_channel)
532 //ust// struct rchan *rchan = ltt_channel->trans_channel_data;
535 //ust// if (!ltt_channel->switch_timer_interval)
538 //ust// // TODO : hotplug
539 //ust// for_each_online_cpu(cpu) {
540 //ust// struct ltt_channel_buf_struct *ltt_buf;
541 //ust// struct rchan_buf *buf;
543 //ust// buf = rchan->buf[cpu];
544 //ust// ltt_buf = buf->chan_private;
545 //ust// smp_call_function(stop_switch_timer_ipi, ltt_buf, 1);
546 //ust// buf->random_access = 0;
550 //ust// static void ust_buffers_print_written(struct ust_channel *chan,
551 //ust// long cons_off, unsigned int cpu)
553 //ust// struct ust_buffer *buf = chan->buf[cpu];
554 //ust// long cons_idx, events_count;
556 //ust// cons_idx = SUBBUF_INDEX(cons_off, chan);
557 //ust// events_count = local_read(&buf->commit_count[cons_idx].events);
559 //ust// if (events_count)
560 //ust// printk(KERN_INFO
561 //ust// "channel %s: %lu events written (cpu %u, index %lu)\n",
562 //ust// chan->channel_name, events_count, cpu, cons_idx);
565 static void ltt_relay_print_subbuffer_errors(
566 struct ust_channel
*channel
,
567 long cons_off
, int cpu
)
569 struct ust_buffer
*ltt_buf
= channel
->buf
[cpu
];
570 long cons_idx
, commit_count
, commit_count_sb
, write_offset
;
572 cons_idx
= SUBBUF_INDEX(cons_off
, channel
);
573 commit_count
= local_read(<t_buf
->commit_count
[cons_idx
].cc
);
574 commit_count_sb
= local_read(<t_buf
->commit_count
[cons_idx
].cc_sb
);
577 * No need to order commit_count and write_offset reads because we
578 * execute after trace is stopped when there are no readers left.
580 write_offset
= local_read(<t_buf
->offset
);
581 WARN( "LTT : unread channel %s offset is %ld "
582 "and cons_off : %ld (cpu %d)\n",
583 channel
->channel_name
, write_offset
, cons_off
, cpu
);
584 /* Check each sub-buffer for non filled commit count */
585 if (((commit_count
- channel
->subbuf_size
) & channel
->commit_count_mask
)
586 - (BUFFER_TRUNC(cons_off
, channel
) >> channel
->n_subbufs_order
) != 0) {
587 ERR("LTT : %s : subbuffer %lu has non filled "
588 "commit count [cc, cc_sb] [%lu,%lu].\n",
589 channel
->channel_name
, cons_idx
, commit_count
, commit_count_sb
);
591 ERR("LTT : %s : commit count : %lu, subbuf size %zd\n",
592 channel
->channel_name
, commit_count
,
593 channel
->subbuf_size
);
596 static void ltt_relay_print_errors(struct ust_trace
*trace
,
597 struct ust_channel
*channel
, int cpu
)
599 struct ust_buffer
*ltt_buf
= channel
->buf
[cpu
];
603 * Can be called in the error path of allocation when
604 * trans_channel_data is not yet set.
609 //ust// for (cons_off = 0; cons_off < rchan->alloc_size;
610 //ust// cons_off = SUBBUF_ALIGN(cons_off, rchan))
611 //ust// ust_buffers_print_written(ltt_chan, cons_off, cpu);
612 for (cons_off
= atomic_long_read(<t_buf
->consumed
);
613 (SUBBUF_TRUNC(local_read(<t_buf
->offset
),
616 cons_off
= SUBBUF_ALIGN(cons_off
, channel
))
617 ltt_relay_print_subbuffer_errors(channel
, cons_off
, cpu
);
620 static void ltt_relay_print_buffer_errors(struct ust_channel
*channel
, int cpu
)
622 struct ust_trace
*trace
= channel
->trace
;
623 struct ust_buffer
*ltt_buf
= channel
->buf
[cpu
];
625 if (local_read(<t_buf
->events_lost
))
626 ERR("channel %s: %ld events lost (cpu %d)",
627 channel
->channel_name
,
628 local_read(<t_buf
->events_lost
), cpu
);
629 if (local_read(<t_buf
->corrupted_subbuffers
))
630 ERR("channel %s : %ld corrupted subbuffers (cpu %d)",
631 channel
->channel_name
,
632 local_read(<t_buf
->corrupted_subbuffers
), cpu
);
634 ltt_relay_print_errors(trace
, channel
, cpu
);
637 static void ltt_relay_release_channel(struct kref
*kref
)
639 struct ust_channel
*ltt_chan
= container_of(kref
,
640 struct ust_channel
, kref
);
647 //ust// static int ltt_relay_create_buffer(struct ust_trace *trace,
648 //ust// struct ltt_channel_struct *ltt_chan, struct rchan_buf *buf,
649 //ust// unsigned int cpu, unsigned int n_subbufs)
651 //ust// struct ltt_channel_buf_struct *ltt_buf =
652 //ust// percpu_ptr(ltt_chan->buf, cpu);
653 //ust// unsigned int j;
655 //ust// ltt_buf->commit_count =
656 //ust// kzalloc_node(sizeof(ltt_buf->commit_count) * n_subbufs,
657 //ust// GFP_KERNEL, cpu_to_node(cpu));
658 //ust// if (!ltt_buf->commit_count)
659 //ust// return -ENOMEM;
660 //ust// kref_get(&trace->kref);
661 //ust// kref_get(&trace->ltt_transport_kref);
662 //ust// kref_get(<t_chan->kref);
663 //ust// local_set(<t_buf->offset, ltt_subbuffer_header_size());
664 //ust// atomic_long_set(<t_buf->consumed, 0);
665 //ust// atomic_long_set(<t_buf->active_readers, 0);
666 //ust// for (j = 0; j < n_subbufs; j++)
667 //ust// local_set(<t_buf->commit_count[j], 0);
668 //ust// init_waitqueue_head(<t_buf->write_wait);
669 //ust// atomic_set(<t_buf->wakeup_readers, 0);
670 //ust// spin_lock_init(<t_buf->full_lock);
672 //ust// ltt_buffer_begin_callback(buf, trace->start_tsc, 0);
673 //ust// /* atomic_add made on local variable on data that belongs to
674 //ust// * various CPUs : ok because tracing not started (for this cpu). */
675 //ust// local_add(ltt_subbuffer_header_size(), <t_buf->commit_count[0]);
677 //ust// local_set(<t_buf->events_lost, 0);
678 //ust// local_set(<t_buf->corrupted_subbuffers, 0);
683 static int ust_buffers_init_buffer(struct ust_trace
*trace
,
684 struct ust_channel
*ltt_chan
, struct ust_buffer
*buf
,
685 unsigned int n_subbufs
)
692 zmalloc(sizeof(*buf
->commit_count
) * n_subbufs
);
693 if (!buf
->commit_count
)
695 kref_get(&trace
->kref
);
696 kref_get(&trace
->ltt_transport_kref
);
697 kref_get(<t_chan
->kref
);
698 local_set(&buf
->offset
, ltt_subbuffer_header_size());
699 atomic_long_set(&buf
->consumed
, 0);
700 atomic_long_set(&buf
->active_readers
, 0);
701 for (j
= 0; j
< n_subbufs
; j
++) {
702 local_set(&buf
->commit_count
[j
].cc
, 0);
703 local_set(&buf
->commit_count
[j
].cc_sb
, 0);
705 //ust// init_waitqueue_head(&buf->write_wait);
706 //ust// atomic_set(&buf->wakeup_readers, 0);
707 //ust// spin_lock_init(&buf->full_lock);
709 ltt_buffer_begin(buf
, trace
->start_tsc
, 0);
711 local_add(ltt_subbuffer_header_size(), &buf
->commit_count
[0].cc
);
713 local_set(&buf
->events_lost
, 0);
714 local_set(&buf
->corrupted_subbuffers
, 0);
721 buf
->data_ready_fd_read
= fds
[0];
722 buf
->data_ready_fd_write
= fds
[1];
724 /* FIXME: do we actually need this? */
725 result
= fcntl(fds
[0], F_SETFL
, O_NONBLOCK
);
730 //ust// buf->commit_seq = malloc(sizeof(buf->commit_seq) * n_subbufs);
731 //ust// if(!ltt_buf->commit_seq) {
734 memset(buf
->commit_seq
, 0, sizeof(buf
->commit_seq
[0]) * n_subbufs
);
736 /* FIXME: decrementally destroy on error */
741 /* FIXME: use this function */
742 static void ust_buffers_destroy_buffer(struct ust_channel
*ltt_chan
, int cpu
)
744 struct ust_trace
*trace
= ltt_chan
->trace
;
745 struct ust_buffer
*ltt_buf
= ltt_chan
->buf
[cpu
];
747 kref_put(<t_chan
->trace
->ltt_transport_kref
,
748 ltt_release_transport
);
749 ltt_relay_print_buffer_errors(ltt_chan
, cpu
);
750 //ust// free(ltt_buf->commit_seq);
751 kfree(ltt_buf
->commit_count
);
752 ltt_buf
->commit_count
= NULL
;
753 kref_put(<t_chan
->kref
, ltt_relay_release_channel
);
754 kref_put(&trace
->kref
, ltt_release_trace
);
755 //ust// wake_up_interruptible(&trace->kref_wq);
758 static int ust_buffers_alloc_channel_buf_structs(struct ust_channel
*chan
)
765 size
= PAGE_ALIGN(1);
767 for(i
=0; i
<chan
->n_cpus
; i
++) {
769 result
= chan
->buf_struct_shmids
[i
] = shmget(getpid(), size
, IPC_CREAT
| IPC_EXCL
| 0700);
772 goto destroy_previous
;
775 /* FIXME: should have matching call to shmdt */
776 ptr
= shmat(chan
->buf_struct_shmids
[i
], NULL
, 0);
777 if(ptr
== (void *) -1) {
782 /* Already mark the shared memory for destruction. This will occur only
783 * when all users have detached.
785 result
= shmctl(chan
->buf_struct_shmids
[i
], IPC_RMID
, NULL
);
788 goto destroy_previous
;
796 /* Jumping inside this loop occurs from within the other loop above with i as
797 * counter, so it unallocates the structures for the cpu = current_i down to
801 result
= shmctl(chan
->buf_struct_shmids
[i
], IPC_RMID
, NULL
);
816 static int ust_buffers_create_channel(const char *trace_name
, struct ust_trace
*trace
,
817 const char *channel_name
, struct ust_channel
*ltt_chan
,
818 unsigned int subbuf_size
, unsigned int n_subbufs
, int overwrite
)
822 kref_init(<t_chan
->kref
);
824 ltt_chan
->trace
= trace
;
825 ltt_chan
->overwrite
= overwrite
;
826 ltt_chan
->n_subbufs_order
= get_count_order(n_subbufs
);
827 ltt_chan
->commit_count_mask
= (~0UL >> ltt_chan
->n_subbufs_order
);
828 ltt_chan
->n_cpus
= get_n_cpus();
829 //ust// ltt_chan->buf = percpu_alloc_mask(sizeof(struct ltt_channel_buf_struct), GFP_KERNEL, cpu_possible_map);
830 ltt_chan
->buf
= (void *) malloc(ltt_chan
->n_cpus
* sizeof(void *));
831 if(ltt_chan
->buf
== NULL
) {
834 ltt_chan
->buf_struct_shmids
= (int *) malloc(ltt_chan
->n_cpus
* sizeof(int));
835 if(ltt_chan
->buf_struct_shmids
== NULL
)
838 result
= ust_buffers_alloc_channel_buf_structs(ltt_chan
);
840 goto free_buf_struct_shmids
;
843 result
= ust_buffers_channel_open(ltt_chan
, subbuf_size
, n_subbufs
);
845 ERR("Cannot open channel for trace %s", trace_name
);
846 goto unalloc_buf_structs
;
852 /* FIXME: put a call here to unalloc the buf structs! */
854 free_buf_struct_shmids
:
855 free(ltt_chan
->buf_struct_shmids
);
865 * LTTng channel flush function.
867 * Must be called when no tracing is active in the channel, because of
868 * accesses across CPUs.
870 static notrace
void ltt_relay_buffer_flush(struct ust_buffer
*buf
)
874 //ust// buf->finalized = 1;
875 ltt_force_switch(buf
, FORCE_FLUSH
);
877 result
= write(buf
->data_ready_fd_write
, "1", 1);
879 PERROR("write (in ltt_relay_buffer_flush)");
880 ERR("this should never happen!");
884 static void ltt_relay_async_wakeup_chan(struct ust_channel
*ltt_channel
)
886 //ust// unsigned int i;
887 //ust// struct rchan *rchan = ltt_channel->trans_channel_data;
889 //ust// for_each_possible_cpu(i) {
890 //ust// struct ltt_channel_buf_struct *ltt_buf =
891 //ust// percpu_ptr(ltt_channel->buf, i);
893 //ust// if (atomic_read(<t_buf->wakeup_readers) == 1) {
894 //ust// atomic_set(<t_buf->wakeup_readers, 0);
895 //ust// wake_up_interruptible(&rchan->buf[i]->read_wait);
900 static void ltt_relay_finish_buffer(struct ust_channel
*channel
, unsigned int cpu
)
904 if (channel
->buf
[cpu
]) {
905 struct ust_buffer
*buf
= channel
->buf
[cpu
];
906 ltt_relay_buffer_flush(buf
);
907 //ust// ltt_relay_wake_writers(ltt_buf);
908 /* closing the pipe tells the consumer the buffer is finished */
910 //result = write(ltt_buf->data_ready_fd_write, "D", 1);
912 // PERROR("write (in ltt_relay_finish_buffer)");
913 // ERR("this should never happen!");
915 close(buf
->data_ready_fd_write
);
920 static void ltt_relay_finish_channel(struct ust_channel
*channel
)
924 for(i
=0; i
<channel
->n_cpus
; i
++) {
925 ltt_relay_finish_buffer(channel
, i
);
929 static void ltt_relay_remove_channel(struct ust_channel
*channel
)
931 ust_buffers_channel_close(channel
);
932 kref_put(&channel
->kref
, ltt_relay_release_channel
);
938 //ust// * !0 if execution must be aborted.
940 //ust// static inline int ltt_relay_try_reserve(
941 //ust// struct ust_channel *channel, struct ust_buffer *buf,
942 //ust// struct ltt_reserve_switch_offsets *offsets, size_t data_size,
943 //ust// u64 *tsc, unsigned int *rflags, int largest_align)
945 //ust// offsets->begin = local_read(&buf->offset);
946 //ust// offsets->old = offsets->begin;
947 //ust// offsets->begin_switch = 0;
948 //ust// offsets->end_switch_current = 0;
949 //ust// offsets->end_switch_old = 0;
951 //ust// *tsc = trace_clock_read64();
952 //ust// if (last_tsc_overflow(buf, *tsc))
953 //ust// *rflags = LTT_RFLAG_ID_SIZE_TSC;
955 //ust// if (SUBBUF_OFFSET(offsets->begin, buf->chan) == 0) {
956 //ust// offsets->begin_switch = 1; /* For offsets->begin */
958 //ust// offsets->size = ust_get_header_size(channel,
959 //ust// offsets->begin, data_size,
960 //ust// &offsets->before_hdr_pad, *rflags);
961 //ust// offsets->size += ltt_align(offsets->begin + offsets->size,
962 //ust// largest_align)
964 //ust// if ((SUBBUF_OFFSET(offsets->begin, buf->chan) + offsets->size)
965 //ust// > buf->chan->subbuf_size) {
966 //ust// offsets->end_switch_old = 1; /* For offsets->old */
967 //ust// offsets->begin_switch = 1; /* For offsets->begin */
970 //ust// if (offsets->begin_switch) {
971 //ust// long subbuf_index;
973 //ust// if (offsets->end_switch_old)
974 //ust// offsets->begin = SUBBUF_ALIGN(offsets->begin,
976 //ust// offsets->begin = offsets->begin + ltt_subbuffer_header_size();
977 //ust// /* Test new buffer integrity */
978 //ust// subbuf_index = SUBBUF_INDEX(offsets->begin, buf->chan);
979 //ust// offsets->reserve_commit_diff =
980 //ust// (BUFFER_TRUNC(offsets->begin, buf->chan)
981 //ust// >> channel->n_subbufs_order)
982 //ust// - (local_read(&buf->commit_count[subbuf_index])
983 //ust// & channel->commit_count_mask);
984 //ust// if (offsets->reserve_commit_diff == 0) {
985 //ust// long consumed;
987 //ust// consumed = atomic_long_read(&buf->consumed);
989 //ust// /* Next buffer not corrupted. */
990 //ust// if (!channel->overwrite &&
991 //ust// (SUBBUF_TRUNC(offsets->begin, buf->chan)
992 //ust// - SUBBUF_TRUNC(consumed, buf->chan))
993 //ust// >= channel->alloc_size) {
995 //ust// long consumed_idx = SUBBUF_INDEX(consumed, buf->chan);
996 //ust// long commit_count = local_read(&buf->commit_count[consumed_idx]);
997 //ust// if(((commit_count - buf->chan->subbuf_size) & channel->commit_count_mask) - (BUFFER_TRUNC(consumed, buf->chan) >> channel->n_subbufs_order) != 0) {
998 //ust// WARN("Event dropped. Caused by non-committed event.");
1001 //ust// WARN("Event dropped. Caused by non-consumed buffer.");
1004 //ust// * We do not overwrite non consumed buffers
1005 //ust// * and we are full : event is lost.
1007 //ust// local_inc(&buf->events_lost);
1011 //ust// * next buffer not corrupted, we are either in
1012 //ust// * overwrite mode or the buffer is not full.
1013 //ust// * It's safe to write in this new subbuffer.
1018 //ust// * Next subbuffer corrupted. Force pushing reader even
1019 //ust// * in normal mode. It's safe to write in this new
1020 //ust// * subbuffer.
1023 //ust// offsets->size = ust_get_header_size(channel,
1024 //ust// offsets->begin, data_size,
1025 //ust// &offsets->before_hdr_pad, *rflags);
1026 //ust// offsets->size += ltt_align(offsets->begin + offsets->size,
1027 //ust// largest_align)
1028 //ust// + data_size;
1029 //ust// if ((SUBBUF_OFFSET(offsets->begin, buf->chan) + offsets->size)
1030 //ust// > buf->chan->subbuf_size) {
1032 //ust// * Event too big for subbuffers, report error, don't
1033 //ust// * complete the sub-buffer switch.
1035 //ust// local_inc(&buf->events_lost);
1039 //ust// * We just made a successful buffer switch and the event
1040 //ust// * fits in the new subbuffer. Let's write.
1045 //ust// * Event fits in the current buffer and we are not on a switch
1046 //ust// * boundary. It's safe to write.
1049 //ust// offsets->end = offsets->begin + offsets->size;
1051 //ust// if ((SUBBUF_OFFSET(offsets->end, buf->chan)) == 0) {
1053 //ust// * The offset_end will fall at the very beginning of the next
1054 //ust// * subbuffer.
1056 //ust// offsets->end_switch_current = 1; /* For offsets->begin */
1064 //ust// * !0 if execution must be aborted.
1066 //ust// static inline int ltt_relay_try_switch(
1067 //ust// enum force_switch_mode mode,
1068 //ust// struct ust_channel *channel,
1069 //ust// struct ust_buffer *buf,
1070 //ust// struct ltt_reserve_switch_offsets *offsets,
1073 //ust// long subbuf_index;
1075 //ust// offsets->begin = local_read(&buf->offset);
1076 //ust// offsets->old = offsets->begin;
1077 //ust// offsets->begin_switch = 0;
1078 //ust// offsets->end_switch_old = 0;
1080 //ust// *tsc = trace_clock_read64();
1082 //ust// if (SUBBUF_OFFSET(offsets->begin, buf->chan) != 0) {
1083 //ust// offsets->begin = SUBBUF_ALIGN(offsets->begin, buf->chan);
1084 //ust// offsets->end_switch_old = 1;
1086 //ust// /* we do not have to switch : buffer is empty */
1089 //ust// if (mode == FORCE_ACTIVE)
1090 //ust// offsets->begin += ltt_subbuffer_header_size();
1092 //ust// * Always begin_switch in FORCE_ACTIVE mode.
1093 //ust// * Test new buffer integrity
1095 //ust// subbuf_index = SUBBUF_INDEX(offsets->begin, buf->chan);
1096 //ust// offsets->reserve_commit_diff =
1097 //ust// (BUFFER_TRUNC(offsets->begin, buf->chan)
1098 //ust// >> channel->n_subbufs_order)
1099 //ust// - (local_read(&buf->commit_count[subbuf_index])
1100 //ust// & channel->commit_count_mask);
1101 //ust// if (offsets->reserve_commit_diff == 0) {
1102 //ust// /* Next buffer not corrupted. */
1103 //ust// if (mode == FORCE_ACTIVE
1104 //ust// && !channel->overwrite
1105 //ust// && offsets->begin - atomic_long_read(&buf->consumed)
1106 //ust// >= channel->alloc_size) {
1108 //ust// * We do not overwrite non consumed buffers and we are
1109 //ust// * full : ignore switch while tracing is active.
1115 //ust// * Next subbuffer corrupted. Force pushing reader even in normal
1119 //ust// offsets->end = offsets->begin;
1123 //ust// static inline void ltt_reserve_push_reader(
1124 //ust// struct ust_channel *channel,
1125 //ust// struct ust_buffer *buf,
1126 //ust// struct ltt_reserve_switch_offsets *offsets)
1128 //ust// long consumed_old, consumed_new;
1131 //ust// consumed_old = atomic_long_read(&buf->consumed);
1133 //ust// * If buffer is in overwrite mode, push the reader consumed
1134 //ust// * count if the write position has reached it and we are not
1135 //ust// * at the first iteration (don't push the reader farther than
1136 //ust// * the writer). This operation can be done concurrently by many
1137 //ust// * writers in the same buffer, the writer being at the farthest
1138 //ust// * write position sub-buffer index in the buffer being the one
1139 //ust// * which will win this loop.
1140 //ust// * If the buffer is not in overwrite mode, pushing the reader
1141 //ust// * only happens if a sub-buffer is corrupted.
1143 //ust// if ((SUBBUF_TRUNC(offsets->end-1, buf->chan)
1144 //ust// - SUBBUF_TRUNC(consumed_old, buf->chan))
1145 //ust// >= channel->alloc_size)
1146 //ust// consumed_new = SUBBUF_ALIGN(consumed_old, buf->chan);
1148 //ust// consumed_new = consumed_old;
1151 //ust// } while (atomic_long_cmpxchg(&buf->consumed, consumed_old,
1152 //ust// consumed_new) != consumed_old);
1154 //ust// if (consumed_old != consumed_new) {
1156 //ust// * Reader pushed : we are the winner of the push, we can
1157 //ust// * therefore reequilibrate reserve and commit. Atomic increment
1158 //ust// * of the commit count permits other writers to play around
1159 //ust// * with this variable before us. We keep track of
1160 //ust// * corrupted_subbuffers even in overwrite mode :
1161 //ust// * we never want to write over a non completely committed
1162 //ust// * sub-buffer : possible causes : the buffer size is too low
1163 //ust// * compared to the unordered data input, or there is a writer
1164 //ust// * that died between the reserve and the commit.
1166 //ust// if (offsets->reserve_commit_diff) {
1168 //ust// * We have to alter the sub-buffer commit count.
1169 //ust// * We do not deliver the previous subbuffer, given it
1170 //ust// * was either corrupted or not consumed (overwrite
1173 //ust// local_add(offsets->reserve_commit_diff,
1174 //ust// &buf->commit_count[
1175 //ust// SUBBUF_INDEX(offsets->begin,
1176 //ust// buf->chan)]);
1177 //ust// if (!channel->overwrite
1178 //ust// || offsets->reserve_commit_diff
1179 //ust// != channel->subbuf_size) {
1181 //ust// * The reserve commit diff was not subbuf_size :
1182 //ust// * it means the subbuffer was partly written to
1183 //ust// * and is therefore corrupted. If it is multiple
1184 //ust// * of subbuffer size and we are in flight
1185 //ust// * recorder mode, we are skipping over a whole
1186 //ust// * subbuffer.
1188 //ust// local_inc(&buf->corrupted_subbuffers);
1195 //ust// * ltt_relay_reserve_slot - Atomic slot reservation in a LTTng buffer.
1196 //ust// * @trace: the trace structure to log to.
1197 //ust// * @ltt_channel: channel structure
1198 //ust// * @transport_data: data structure specific to ltt relay
1199 //ust// * @data_size: size of the variable length data to log.
1200 //ust// * @slot_size: pointer to total size of the slot (out)
1201 //ust// * @buf_offset : pointer to reserved buffer offset (out)
1202 //ust// * @tsc: pointer to the tsc at the slot reservation (out)
1203 //ust// * @cpu: cpuid
1205 //ust// * Return : -ENOSPC if not enough space, else returns 0.
1206 //ust// * It will take care of sub-buffer switching.
1208 //ust// static notrace int ltt_relay_reserve_slot(struct ust_trace *trace,
1209 //ust// struct ust_channel *channel, void **transport_data,
1210 //ust// size_t data_size, size_t *slot_size, long *buf_offset, u64 *tsc,
1211 //ust// unsigned int *rflags, int largest_align, int cpu)
1213 //ust// struct ust_buffer *buf = *transport_data = channel->buf[cpu];
1214 //ust// struct ltt_reserve_switch_offsets offsets;
1216 //ust// offsets.reserve_commit_diff = 0;
1217 //ust// offsets.size = 0;
1220 //ust// * Perform retryable operations.
1222 //ust// if (ltt_nesting > 4) {
1223 //ust// local_inc(&buf->events_lost);
1224 //ust// return -EPERM;
1227 //ust// if (ltt_relay_try_reserve(channel, buf, &offsets, data_size, tsc, rflags,
1228 //ust// largest_align))
1229 //ust// return -ENOSPC;
1230 //ust// } while (local_cmpxchg(&buf->offset, offsets.old,
1231 //ust// offsets.end) != offsets.old);
1234 //ust// * Atomically update last_tsc. This update races against concurrent
1235 //ust// * atomic updates, but the race will always cause supplementary full TSC
1236 //ust// * events, never the opposite (missing a full TSC event when it would be
1239 //ust// save_last_tsc(buf, *tsc);
1242 //ust// * Push the reader if necessary
1244 //ust// ltt_reserve_push_reader(channel, buf, &offsets);
1247 //ust// * Switch old subbuffer if needed.
1249 //ust// if (offsets.end_switch_old)
1250 //ust// ltt_reserve_switch_old_subbuf(channel, buf, &offsets, tsc);
1253 //ust// * Populate new subbuffer.
1255 //ust// if (offsets.begin_switch)
1256 //ust// ltt_reserve_switch_new_subbuf(channel, buf, &offsets, tsc);
1258 //ust// if (offsets.end_switch_current)
1259 //ust// ltt_reserve_end_switch_current(channel, buf, &offsets, tsc);
1261 //ust// *slot_size = offsets.size;
1262 //ust// *buf_offset = offsets.begin + offsets.before_hdr_pad;
1267 //ust// * Force a sub-buffer switch for a per-cpu buffer. This operation is
1268 //ust// * completely reentrant : can be called while tracing is active with
1269 //ust// * absolutely no lock held.
1271 //ust// * Note, however, that as a local_cmpxchg is used for some atomic
1272 //ust// * operations, this function must be called from the CPU which owns the buffer
1273 //ust// * for a ACTIVE flush.
1275 //ust// static notrace void ltt_force_switch(struct ust_buffer *buf,
1276 //ust// enum force_switch_mode mode)
1278 //ust// struct ust_channel *channel = buf->chan;
1279 //ust// struct ltt_reserve_switch_offsets offsets;
1282 //ust// offsets.reserve_commit_diff = 0;
1283 //ust// offsets.size = 0;
1286 //ust// * Perform retryable operations.
1289 //ust// if (ltt_relay_try_switch(mode, channel, buf, &offsets, &tsc))
1291 //ust// } while (local_cmpxchg(&buf->offset, offsets.old,
1292 //ust// offsets.end) != offsets.old);
1295 //ust// * Atomically update last_tsc. This update races against concurrent
1296 //ust// * atomic updates, but the race will always cause supplementary full TSC
1297 //ust// * events, never the opposite (missing a full TSC event when it would be
1300 //ust// save_last_tsc(buf, tsc);
1303 //ust// * Push the reader if necessary
1305 //ust// if (mode == FORCE_ACTIVE)
1306 //ust// ltt_reserve_push_reader(channel, buf, &offsets);
1309 //ust// * Switch old subbuffer if needed.
1311 //ust// if (offsets.end_switch_old)
1312 //ust// ltt_reserve_switch_old_subbuf(channel, buf, &offsets, &tsc);
1315 //ust// * Populate new subbuffer.
1317 //ust// if (mode == FORCE_ACTIVE)
1318 //ust// ltt_reserve_switch_new_subbuf(channel, buf, &offsets, &tsc);
1322 * ltt_reserve_switch_old_subbuf: switch old subbuffer
1324 * Concurrency safe because we are the last and only thread to alter this
1325 * sub-buffer. As long as it is not delivered and read, no other thread can
1326 * alter the offset, alter the reserve_count or call the
1327 * client_buffer_end_callback on this sub-buffer.
1329 * The only remaining threads could be the ones with pending commits. They will
1330 * have to do the deliver themselves. Not concurrency safe in overwrite mode.
1331 * We detect corrupted subbuffers with commit and reserve counts. We keep a
1332 * corrupted sub-buffers count and push the readers across these sub-buffers.
1334 * Not concurrency safe if a writer is stalled in a subbuffer and another writer
1335 * switches in, finding out it's corrupted. The result will be than the old
1336 * (uncommited) subbuffer will be declared corrupted, and that the new subbuffer
1337 * will be declared corrupted too because of the commit count adjustment.
1339 * Note : offset_old should never be 0 here.
1341 static void ltt_reserve_switch_old_subbuf(
1342 struct ust_channel
*chan
, struct ust_buffer
*buf
,
1343 struct ltt_reserve_switch_offsets
*offsets
, u64
*tsc
)
1345 long oldidx
= SUBBUF_INDEX(offsets
->old
- 1, chan
);
1346 long commit_count
, padding_size
;
1348 padding_size
= chan
->subbuf_size
1349 - (SUBBUF_OFFSET(offsets
->old
- 1, chan
) + 1);
1350 ltt_buffer_end(buf
, *tsc
, offsets
->old
, oldidx
);
1353 * Must write slot data before incrementing commit count.
1354 * This compiler barrier is upgraded into a smp_wmb() by the IPI
1355 * sent by get_subbuf() when it does its smp_rmb().
1358 local_add(padding_size
,
1359 &buf
->commit_count
[oldidx
].cc
);
1360 commit_count
= local_read(&buf
->commit_count
[oldidx
].cc
);
1361 ltt_check_deliver(chan
, buf
, offsets
->old
- 1, commit_count
, oldidx
);
1362 ltt_write_commit_counter(buf
, oldidx
,
1363 offsets
->old
, commit_count
, padding_size
);
1367 * ltt_reserve_switch_new_subbuf: Populate new subbuffer.
1369 * This code can be executed unordered : writers may already have written to the
1370 * sub-buffer before this code gets executed, caution. The commit makes sure
1371 * that this code is executed before the deliver of this sub-buffer.
1373 static void ltt_reserve_switch_new_subbuf(
1374 struct ust_channel
*chan
, struct ust_buffer
*buf
,
1375 struct ltt_reserve_switch_offsets
*offsets
, u64
*tsc
)
1377 long beginidx
= SUBBUF_INDEX(offsets
->begin
, chan
);
1380 ltt_buffer_begin(buf
, *tsc
, beginidx
);
1383 * Must write slot data before incrementing commit count.
1384 * This compiler barrier is upgraded into a smp_wmb() by the IPI
1385 * sent by get_subbuf() when it does its smp_rmb().
1388 local_add(ltt_subbuffer_header_size(),
1389 &buf
->commit_count
[beginidx
].cc
);
1390 commit_count
= local_read(&buf
->commit_count
[beginidx
].cc
);
1391 /* Check if the written buffer has to be delivered */
1392 ltt_check_deliver(chan
, buf
, offsets
->begin
, commit_count
, beginidx
);
1393 ltt_write_commit_counter(buf
, beginidx
,
1394 offsets
->begin
, commit_count
, ltt_subbuffer_header_size());
1398 * ltt_reserve_end_switch_current: finish switching current subbuffer
1400 * Concurrency safe because we are the last and only thread to alter this
1401 * sub-buffer. As long as it is not delivered and read, no other thread can
1402 * alter the offset, alter the reserve_count or call the
1403 * client_buffer_end_callback on this sub-buffer.
1405 * The only remaining threads could be the ones with pending commits. They will
1406 * have to do the deliver themselves. Not concurrency safe in overwrite mode.
1407 * We detect corrupted subbuffers with commit and reserve counts. We keep a
1408 * corrupted sub-buffers count and push the readers across these sub-buffers.
1410 * Not concurrency safe if a writer is stalled in a subbuffer and another writer
1411 * switches in, finding out it's corrupted. The result will be than the old
1412 * (uncommited) subbuffer will be declared corrupted, and that the new subbuffer
1413 * will be declared corrupted too because of the commit count adjustment.
1415 static void ltt_reserve_end_switch_current(
1416 struct ust_channel
*chan
,
1417 struct ust_buffer
*buf
,
1418 struct ltt_reserve_switch_offsets
*offsets
, u64
*tsc
)
1420 long endidx
= SUBBUF_INDEX(offsets
->end
- 1, chan
);
1421 long commit_count
, padding_size
;
1423 padding_size
= chan
->subbuf_size
1424 - (SUBBUF_OFFSET(offsets
->end
- 1, chan
) + 1);
1426 ltt_buffer_end(buf
, *tsc
, offsets
->end
, endidx
);
1429 * Must write slot data before incrementing commit count.
1430 * This compiler barrier is upgraded into a smp_wmb() by the IPI
1431 * sent by get_subbuf() when it does its smp_rmb().
1434 local_add(padding_size
,
1435 &buf
->commit_count
[endidx
].cc
);
1436 commit_count
= local_read(&buf
->commit_count
[endidx
].cc
);
1437 ltt_check_deliver(chan
, buf
,
1438 offsets
->end
- 1, commit_count
, endidx
);
1439 ltt_write_commit_counter(buf
, endidx
,
1440 offsets
->end
, commit_count
, padding_size
);
1446 * !0 if execution must be aborted.
1448 static int ltt_relay_try_switch_slow(
1449 enum force_switch_mode mode
,
1450 struct ust_channel
*chan
,
1451 struct ust_buffer
*buf
,
1452 struct ltt_reserve_switch_offsets
*offsets
,
1456 long reserve_commit_diff
;
1458 offsets
->begin
= local_read(&buf
->offset
);
1459 offsets
->old
= offsets
->begin
;
1460 offsets
->begin_switch
= 0;
1461 offsets
->end_switch_old
= 0;
1463 *tsc
= trace_clock_read64();
1465 if (SUBBUF_OFFSET(offsets
->begin
, buf
->chan
) != 0) {
1466 offsets
->begin
= SUBBUF_ALIGN(offsets
->begin
, buf
->chan
);
1467 offsets
->end_switch_old
= 1;
1469 /* we do not have to switch : buffer is empty */
1472 if (mode
== FORCE_ACTIVE
)
1473 offsets
->begin
+= ltt_subbuffer_header_size();
1475 * Always begin_switch in FORCE_ACTIVE mode.
1476 * Test new buffer integrity
1478 subbuf_index
= SUBBUF_INDEX(offsets
->begin
, buf
->chan
);
1479 reserve_commit_diff
=
1480 (BUFFER_TRUNC(offsets
->begin
, buf
->chan
)
1481 >> chan
->n_subbufs_order
)
1482 - (local_read(&buf
->commit_count
[subbuf_index
].cc_sb
)
1483 & chan
->commit_count_mask
);
1484 if (reserve_commit_diff
== 0) {
1485 /* Next buffer not corrupted. */
1486 if (mode
== FORCE_ACTIVE
1488 && offsets
->begin
- atomic_long_read(&buf
->consumed
)
1489 >= chan
->alloc_size
) {
1491 * We do not overwrite non consumed buffers and we are
1492 * full : ignore switch while tracing is active.
1498 * Next subbuffer corrupted. Force pushing reader even in normal
1502 offsets
->end
= offsets
->begin
;
1507 * Force a sub-buffer switch for a per-cpu buffer. This operation is
1508 * completely reentrant : can be called while tracing is active with
1509 * absolutely no lock held.
1511 * Note, however, that as a local_cmpxchg is used for some atomic
1512 * operations, this function must be called from the CPU which owns the buffer
1513 * for a ACTIVE flush.
1515 void ltt_force_switch_lockless_slow(struct ust_buffer
*buf
,
1516 enum force_switch_mode mode
)
1518 struct ust_channel
*chan
= buf
->chan
;
1519 struct ltt_reserve_switch_offsets offsets
;
1524 DBG("Switching (forced) %s_%d", chan
->channel_name
, buf
->cpu
);
1526 * Perform retryable operations.
1529 if (ltt_relay_try_switch_slow(mode
, chan
, buf
,
1532 } while (local_cmpxchg(&buf
->offset
, offsets
.old
,
1533 offsets
.end
) != offsets
.old
);
1536 * Atomically update last_tsc. This update races against concurrent
1537 * atomic updates, but the race will always cause supplementary full TSC
1538 * events, never the opposite (missing a full TSC event when it would be
1541 save_last_tsc(buf
, tsc
);
1544 * Push the reader if necessary
1546 if (mode
== FORCE_ACTIVE
) {
1547 ltt_reserve_push_reader(chan
, buf
, offsets
.end
- 1);
1548 //ust// ltt_clear_noref_flag(chan, buf, SUBBUF_INDEX(offsets.end - 1, chan));
1552 * Switch old subbuffer if needed.
1554 if (offsets
.end_switch_old
) {
1555 //ust// ltt_clear_noref_flag(rchan, buf, SUBBUF_INDEX(offsets.old - 1, rchan));
1556 ltt_reserve_switch_old_subbuf(chan
, buf
, &offsets
, &tsc
);
1560 * Populate new subbuffer.
1562 if (mode
== FORCE_ACTIVE
)
1563 ltt_reserve_switch_new_subbuf(chan
, buf
, &offsets
, &tsc
);
1569 * !0 if execution must be aborted.
1571 static int ltt_relay_try_reserve_slow(struct ust_channel
*chan
, struct ust_buffer
*buf
,
1572 struct ltt_reserve_switch_offsets
*offsets
, size_t data_size
,
1573 u64
*tsc
, unsigned int *rflags
, int largest_align
)
1575 long reserve_commit_diff
;
1577 offsets
->begin
= local_read(&buf
->offset
);
1578 offsets
->old
= offsets
->begin
;
1579 offsets
->begin_switch
= 0;
1580 offsets
->end_switch_current
= 0;
1581 offsets
->end_switch_old
= 0;
1583 *tsc
= trace_clock_read64();
1584 if (last_tsc_overflow(buf
, *tsc
))
1585 *rflags
= LTT_RFLAG_ID_SIZE_TSC
;
1587 if (unlikely(SUBBUF_OFFSET(offsets
->begin
, buf
->chan
) == 0)) {
1588 offsets
->begin_switch
= 1; /* For offsets->begin */
1590 offsets
->size
= ust_get_header_size(chan
,
1591 offsets
->begin
, data_size
,
1592 &offsets
->before_hdr_pad
, *rflags
);
1593 offsets
->size
+= ltt_align(offsets
->begin
+ offsets
->size
,
1596 if (unlikely((SUBBUF_OFFSET(offsets
->begin
, buf
->chan
) +
1597 offsets
->size
) > buf
->chan
->subbuf_size
)) {
1598 offsets
->end_switch_old
= 1; /* For offsets->old */
1599 offsets
->begin_switch
= 1; /* For offsets->begin */
1602 if (unlikely(offsets
->begin_switch
)) {
1606 * We are typically not filling the previous buffer completely.
1608 if (likely(offsets
->end_switch_old
))
1609 offsets
->begin
= SUBBUF_ALIGN(offsets
->begin
,
1611 offsets
->begin
= offsets
->begin
+ ltt_subbuffer_header_size();
1612 /* Test new buffer integrity */
1613 subbuf_index
= SUBBUF_INDEX(offsets
->begin
, buf
->chan
);
1614 reserve_commit_diff
=
1615 (BUFFER_TRUNC(offsets
->begin
, buf
->chan
)
1616 >> chan
->n_subbufs_order
)
1617 - (local_read(&buf
->commit_count
[subbuf_index
].cc_sb
)
1618 & chan
->commit_count_mask
);
1619 if (likely(reserve_commit_diff
== 0)) {
1620 /* Next buffer not corrupted. */
1621 if (unlikely(!chan
->overwrite
&&
1622 (SUBBUF_TRUNC(offsets
->begin
, buf
->chan
)
1623 - SUBBUF_TRUNC(atomic_long_read(
1626 >= chan
->alloc_size
)) {
1628 * We do not overwrite non consumed buffers
1629 * and we are full : event is lost.
1631 local_inc(&buf
->events_lost
);
1635 * next buffer not corrupted, we are either in
1636 * overwrite mode or the buffer is not full.
1637 * It's safe to write in this new subbuffer.
1642 * Next subbuffer corrupted. Drop event in normal and
1643 * overwrite mode. Caused by either a writer OOPS or
1644 * too many nested writes over a reserve/commit pair.
1646 local_inc(&buf
->events_lost
);
1649 offsets
->size
= ust_get_header_size(chan
,
1650 offsets
->begin
, data_size
,
1651 &offsets
->before_hdr_pad
, *rflags
);
1652 offsets
->size
+= ltt_align(offsets
->begin
+ offsets
->size
,
1655 if (unlikely((SUBBUF_OFFSET(offsets
->begin
, buf
->chan
)
1656 + offsets
->size
) > buf
->chan
->subbuf_size
)) {
1658 * Event too big for subbuffers, report error, don't
1659 * complete the sub-buffer switch.
1661 local_inc(&buf
->events_lost
);
1665 * We just made a successful buffer switch and the event
1666 * fits in the new subbuffer. Let's write.
1671 * Event fits in the current buffer and we are not on a switch
1672 * boundary. It's safe to write.
1675 offsets
->end
= offsets
->begin
+ offsets
->size
;
1677 if (unlikely((SUBBUF_OFFSET(offsets
->end
, buf
->chan
)) == 0)) {
1679 * The offset_end will fall at the very beginning of the next
1682 offsets
->end_switch_current
= 1; /* For offsets->begin */
1688 * ltt_relay_reserve_slot_lockless_slow - Atomic slot reservation in a buffer.
1689 * @trace: the trace structure to log to.
1690 * @ltt_channel: channel structure
1691 * @transport_data: data structure specific to ltt relay
1692 * @data_size: size of the variable length data to log.
1693 * @slot_size: pointer to total size of the slot (out)
1694 * @buf_offset : pointer to reserved buffer offset (out)
1695 * @tsc: pointer to the tsc at the slot reservation (out)
1698 * Return : -ENOSPC if not enough space, else returns 0.
1699 * It will take care of sub-buffer switching.
1701 int ltt_reserve_slot_lockless_slow(struct ust_trace
*trace
,
1702 struct ust_channel
*chan
, void **transport_data
,
1703 size_t data_size
, size_t *slot_size
, long *buf_offset
, u64
*tsc
,
1704 unsigned int *rflags
, int largest_align
, int cpu
)
1706 struct ust_buffer
*buf
= chan
->buf
[cpu
];
1707 struct ltt_reserve_switch_offsets offsets
;
1712 if (unlikely(ltt_relay_try_reserve_slow(chan
, buf
, &offsets
,
1713 data_size
, tsc
, rflags
, largest_align
)))
1715 } while (unlikely(local_cmpxchg(&buf
->offset
, offsets
.old
,
1716 offsets
.end
) != offsets
.old
));
1719 * Atomically update last_tsc. This update races against concurrent
1720 * atomic updates, but the race will always cause supplementary full TSC
1721 * events, never the opposite (missing a full TSC event when it would be
1724 save_last_tsc(buf
, *tsc
);
1727 * Push the reader if necessary
1729 ltt_reserve_push_reader(chan
, buf
, offsets
.end
- 1);
1732 * Clear noref flag for this subbuffer.
1734 //ust// ltt_clear_noref_flag(chan, buf, SUBBUF_INDEX(offsets.end - 1, chan));
1737 * Switch old subbuffer if needed.
1739 if (unlikely(offsets
.end_switch_old
)) {
1740 //ust// ltt_clear_noref_flag(chan, buf, SUBBUF_INDEX(offsets.old - 1, chan));
1741 ltt_reserve_switch_old_subbuf(chan
, buf
, &offsets
, tsc
);
1742 DBG("Switching %s_%d", chan
->channel_name
, cpu
);
1746 * Populate new subbuffer.
1748 if (unlikely(offsets
.begin_switch
))
1749 ltt_reserve_switch_new_subbuf(chan
, buf
, &offsets
, tsc
);
1751 if (unlikely(offsets
.end_switch_current
))
1752 ltt_reserve_end_switch_current(chan
, buf
, &offsets
, tsc
);
1754 *slot_size
= offsets
.size
;
1755 *buf_offset
= offsets
.begin
+ offsets
.before_hdr_pad
;
1759 static struct ltt_transport ust_relay_transport
= {
1762 .create_channel
= ust_buffers_create_channel
,
1763 .finish_channel
= ltt_relay_finish_channel
,
1764 .remove_channel
= ltt_relay_remove_channel
,
1765 .wakeup_channel
= ltt_relay_async_wakeup_chan
,
1769 static char initialized
= 0;
1771 void __attribute__((constructor
)) init_ustrelay_transport(void)
1774 ltt_transport_register(&ust_relay_transport
);
1779 static void __attribute__((destructor
)) ust_buffers_exit(void)
1781 ltt_transport_unregister(&ust_relay_transport
);
1784 size_t ltt_write_event_header_slow(struct ust_trace
*trace
,
1785 struct ust_channel
*channel
,
1786 struct ust_buffer
*buf
, long buf_offset
,
1787 u16 eID
, u32 event_size
,
1788 u64 tsc
, unsigned int rflags
)
1790 struct ltt_event_header header
;
1794 case LTT_RFLAG_ID_SIZE_TSC
:
1795 header
.id_time
= 29 << LTT_TSC_BITS
;
1797 case LTT_RFLAG_ID_SIZE
:
1798 header
.id_time
= 30 << LTT_TSC_BITS
;
1801 header
.id_time
= 31 << LTT_TSC_BITS
;
1805 header
.id_time
|= (u32
)tsc
& LTT_TSC_MASK
;
1806 ust_buffers_write(buf
, buf_offset
, &header
, sizeof(header
));
1807 buf_offset
+= sizeof(header
);
1810 case LTT_RFLAG_ID_SIZE_TSC
:
1811 small_size
= (u16
)min_t(u32
, event_size
, LTT_MAX_SMALL_SIZE
);
1812 ust_buffers_write(buf
, buf_offset
,
1814 buf_offset
+= sizeof(u16
);
1815 ust_buffers_write(buf
, buf_offset
,
1816 &small_size
, sizeof(u16
));
1817 buf_offset
+= sizeof(u16
);
1818 if (small_size
== LTT_MAX_SMALL_SIZE
) {
1819 ust_buffers_write(buf
, buf_offset
,
1820 &event_size
, sizeof(u32
));
1821 buf_offset
+= sizeof(u32
);
1823 buf_offset
+= ltt_align(buf_offset
, sizeof(u64
));
1824 ust_buffers_write(buf
, buf_offset
,
1826 buf_offset
+= sizeof(u64
);
1828 case LTT_RFLAG_ID_SIZE
:
1829 small_size
= (u16
)min_t(u32
, event_size
, LTT_MAX_SMALL_SIZE
);
1830 ust_buffers_write(buf
, buf_offset
,
1832 buf_offset
+= sizeof(u16
);
1833 ust_buffers_write(buf
, buf_offset
,
1834 &small_size
, sizeof(u16
));
1835 buf_offset
+= sizeof(u16
);
1836 if (small_size
== LTT_MAX_SMALL_SIZE
) {
1837 ust_buffers_write(buf
, buf_offset
,
1838 &event_size
, sizeof(u32
));
1839 buf_offset
+= sizeof(u32
);
1843 ust_buffers_write(buf
, buf_offset
,
1845 buf_offset
+= sizeof(u16
);