3 * LTTng userspace tracer buffering system
5 * Copyright (C) 2009 - Pierre-Marc Fournier (pierre-marc dot fournier at polymtl dot ca)
6 * Copyright (C) 2008-2011 - Mathieu Desnoyers (mathieu.desnoyers@polymtl.ca)
8 * This library is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; either
11 * version 2.1 of the License, or (at your option) any later version.
13 * This library is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
18 * You should have received a copy of the GNU Lesser General Public
19 * License along with this library; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
24 * Note: this code does not support the ref/noref flag and reader-owned
25 * subbuffer scheme needed for flight recorder mode.
35 #include <ust/clock.h>
40 #include "tracercore.h"
41 #include "usterr_signal_safe.h"
43 struct ltt_reserve_switch_offsets
{
45 long begin_switch
, end_switch_current
, end_switch_old
;
46 size_t before_hdr_pad
, size
;
50 static DEFINE_MUTEX(ust_buffers_channels_mutex
);
51 static CDS_LIST_HEAD(ust_buffers_channels
);
53 static void ltt_force_switch(struct ust_buffer
*buf
,
54 enum force_switch_mode mode
);
56 static int get_n_cpus(void)
59 static int n_cpus
= 0;
62 /* On Linux, when some processors are offline
63 * _SC_NPROCESSORS_CONF counts the offline
64 * processors, whereas _SC_NPROCESSORS_ONLN
65 * does not. If we used _SC_NPROCESSORS_ONLN,
66 * getcpu() could return a value greater than
67 * this sysconf, in which case the arrays
68 * indexed by processor would overflow.
70 result
= sysconf(_SC_NPROCESSORS_CONF
);
82 * _ust_buffers_strncpy_fixup - Fix an incomplete string in a ltt_relay buffer.
84 * @offset : offset within the buffer
85 * @len : length to write
86 * @copied: string actually copied
87 * @terminated: does string end with \0
89 * Fills string with "X" if incomplete.
91 void _ust_buffers_strncpy_fixup(struct ust_buffer
*buf
, size_t offset
,
92 size_t len
, size_t copied
, int terminated
)
94 size_t buf_offset
, cpy
;
98 * Deal with non-terminated string.
101 offset
+= copied
- 1;
102 buf_offset
= BUFFER_OFFSET(offset
, buf
->chan
);
104 * Underlying layer should never ask for writes across
108 < buf
->chan
->subbuf_size
*buf
->chan
->subbuf_cnt
);
109 ust_buffers_do_memset(buf
->buf_data
+ buf_offset
, '\0', 1);
114 * Deal with incomplete string.
115 * Overwrite string's \0 with X too.
121 buf_offset
= BUFFER_OFFSET(offset
, buf
->chan
);
124 * Underlying layer should never ask for writes across subbuffers.
127 < buf
->chan
->subbuf_size
*buf
->chan
->subbuf_cnt
);
129 ust_buffers_do_memset(buf
->buf_data
+ buf_offset
,
133 * Overwrite last 'X' with '\0'.
136 buf_offset
= BUFFER_OFFSET(offset
, buf
->chan
);
138 * Underlying layer should never ask for writes across subbuffers.
141 < buf
->chan
->subbuf_size
*buf
->chan
->subbuf_cnt
);
142 ust_buffers_do_memset(buf
->buf_data
+ buf_offset
, '\0', 1);
145 static void ltt_buffer_begin(struct ust_buffer
*buf
,
146 u64 tsc
, unsigned int subbuf_idx
)
148 struct ust_channel
*channel
= buf
->chan
;
149 struct ltt_subbuffer_header
*header
=
150 (struct ltt_subbuffer_header
*)
151 ust_buffers_offset_address(buf
,
152 subbuf_idx
* buf
->chan
->subbuf_size
);
154 header
->cycle_count_begin
= tsc
;
155 header
->data_size
= 0xFFFFFFFF; /* for recognizing crashed buffers */
156 header
->sb_size
= 0xFFFFFFFF; /* for recognizing crashed buffers */
158 * No memory barrier needed to order data_data/sb_size vs commit count
159 * update, because commit count update contains a compiler barrier that
160 * ensures the order of the writes are OK from a program POV. It only
161 * matters for crash dump recovery which is not executed concurrently,
162 * so memory write order does not matter.
164 ltt_write_trace_header(channel
->trace
, header
);
167 static int map_buf_data(struct ust_buffer
*buf
, size_t *size
)
172 *size
= PAGE_ALIGN(*size
);
174 result
= buf
->shmid
= shmget(getpid(), *size
, IPC_CREAT
| IPC_EXCL
| 0700);
175 if (result
< 0 && errno
== EINVAL
) {
176 ERR("shmget() returned EINVAL; maybe /proc/sys/kernel/shmmax should be increased.");
178 } else if (result
< 0) {
183 ptr
= shmat(buf
->shmid
, NULL
, 0);
184 if (ptr
== (void *) -1) {
189 /* Already mark the shared memory for destruction. This will occur only
190 * when all users have detached.
192 result
= shmctl(buf
->shmid
, IPC_RMID
, NULL
);
199 buf
->buf_size
= *size
;
204 result
= shmctl(buf
->shmid
, IPC_RMID
, NULL
);
212 static int open_buf(struct ust_channel
*chan
, int cpu
)
216 struct ust_trace
*trace
= chan
->trace
;
217 struct ust_buffer
*buf
= chan
->buf
[cpu
];
218 unsigned int n_subbufs
= chan
->subbuf_cnt
;
221 result
= map_buf_data(buf
, &chan
->alloc_size
);
226 zmalloc(sizeof(*buf
->commit_count
) * n_subbufs
);
227 if (!buf
->commit_count
)
233 goto free_commit_count
;
235 buf
->data_ready_fd_read
= fds
[0];
236 buf
->data_ready_fd_write
= fds
[1];
241 uatomic_set(&buf
->offset
, ltt_subbuffer_header_size());
242 uatomic_set(&buf
->consumed
, 0);
243 uatomic_set(&buf
->active_readers
, 0);
244 for (j
= 0; j
< n_subbufs
; j
++) {
245 uatomic_set(&buf
->commit_count
[j
].cc
, 0);
246 uatomic_set(&buf
->commit_count
[j
].cc_sb
, 0);
249 ltt_buffer_begin(buf
, trace
->start_tsc
, 0);
251 uatomic_add(&buf
->commit_count
[0].cc
, ltt_subbuffer_header_size());
253 uatomic_set(&buf
->events_lost
, 0);
254 uatomic_set(&buf
->corrupted_subbuffers
, 0);
256 memset(buf
->commit_seq
, 0, sizeof(buf
->commit_seq
[0]) * n_subbufs
);
261 free(buf
->commit_count
);
264 if (shmdt(buf
->buf_data
) < 0) {
265 PERROR("shmdt failed");
271 static void close_buf(struct ust_buffer
*buf
)
275 result
= shmdt(buf
->buf_data
);
280 result
= close(buf
->data_ready_fd_read
);
285 result
= close(buf
->data_ready_fd_write
);
286 if (result
< 0 && errno
!= EBADF
) {
292 static int open_channel(struct ust_channel
*chan
, size_t subbuf_size
,
298 if(subbuf_size
== 0 || subbuf_cnt
== 0)
301 /* Check that the subbuffer size is larger than a page. */
302 WARN_ON_ONCE(subbuf_size
< PAGE_SIZE
);
305 * Make sure the number of subbuffers and subbuffer size are power of 2.
307 WARN_ON_ONCE(hweight32(subbuf_size
) != 1);
308 WARN_ON(hweight32(subbuf_cnt
) != 1);
310 chan
->version
= UST_CHANNEL_VERSION
;
311 chan
->subbuf_cnt
= subbuf_cnt
;
312 chan
->subbuf_size
= subbuf_size
;
313 chan
->subbuf_size_order
= get_count_order(subbuf_size
);
314 chan
->alloc_size
= subbuf_size
* subbuf_cnt
;
316 pthread_mutex_lock(&ust_buffers_channels_mutex
);
317 for (i
=0; i
< chan
->n_cpus
; i
++) {
318 result
= open_buf(chan
, i
);
322 cds_list_add(&chan
->list
, &ust_buffers_channels
);
323 pthread_mutex_unlock(&ust_buffers_channels_mutex
);
329 for(i
--; i
>= 0; i
--)
330 close_buf(chan
->buf
[i
]);
332 pthread_mutex_unlock(&ust_buffers_channels_mutex
);
336 static void close_channel(struct ust_channel
*chan
)
342 pthread_mutex_lock(&ust_buffers_channels_mutex
);
344 * checking for chan->buf[i] being NULL or not is useless in
345 * practice because we allocate buffers for all possible cpus.
346 * However, should we decide to change this and only allocate
347 * for online cpus, this check becomes useful.
349 for (i
=0; i
<chan
->n_cpus
; i
++) {
351 close_buf(chan
->buf
[i
]);
354 cds_list_del(&chan
->list
);
356 pthread_mutex_unlock(&ust_buffers_channels_mutex
);
360 * offset is assumed to never be 0 here : never deliver a completely empty
361 * subbuffer. The lost size is between 0 and subbuf_size-1.
363 static notrace
void ltt_buffer_end(struct ust_buffer
*buf
,
364 u64 tsc
, unsigned int offset
, unsigned int subbuf_idx
)
366 struct ltt_subbuffer_header
*header
=
367 (struct ltt_subbuffer_header
*)
368 ust_buffers_offset_address(buf
,
369 subbuf_idx
* buf
->chan
->subbuf_size
);
370 u32 data_size
= SUBBUF_OFFSET(offset
- 1, buf
->chan
) + 1;
372 header
->sb_size
= PAGE_ALIGN(data_size
);
373 header
->cycle_count_end
= tsc
;
374 header
->events_lost
= uatomic_read(&buf
->events_lost
);
375 header
->subbuf_corrupt
= uatomic_read(&buf
->corrupted_subbuffers
);
376 if(unlikely(header
->events_lost
> 0)) {
377 DBG("Some events (%d) were lost in %s_%d", header
->events_lost
, buf
->chan
->channel_name
, buf
->cpu
);
380 * Makes sure data_size write happens after write of the rest of the
381 * buffer end data, because data_size is used to identify a completely
382 * written subbuffer in a crash dump.
385 header
->data_size
= data_size
;
389 * This function should not be called from NMI interrupt context
391 static notrace
void ltt_buf_unfull(struct ust_buffer
*buf
,
392 unsigned int subbuf_idx
,
398 * Promote compiler cmm_barrier to a smp_mb().
399 * For the specific LTTng case, this IPI call should be removed if the
400 * architecture does not reorder writes. This should eventually be provided by
401 * a separate architecture-specific infrastructure.
403 //ust// static void remote_mb(void *info)
408 int ust_buffers_get_subbuf(struct ust_buffer
*buf
, long *consumed
)
410 struct ust_channel
*channel
= buf
->chan
;
411 long consumed_old
, consumed_idx
, commit_count
, write_offset
;
414 consumed_old
= uatomic_read(&buf
->consumed
);
415 consumed_idx
= SUBBUF_INDEX(consumed_old
, buf
->chan
);
416 commit_count
= uatomic_read(&buf
->commit_count
[consumed_idx
].cc_sb
);
418 * Make sure we read the commit count before reading the buffer
419 * data and the write offset. Correct consumed offset ordering
420 * wrt commit count is insured by the use of cmpxchg to update
421 * the consumed offset.
425 * Local rmb to match the remote wmb to read the commit count before the
426 * buffer data and the write offset.
430 write_offset
= uatomic_read(&buf
->offset
);
432 * Check that the subbuffer we are trying to consume has been
433 * already fully committed.
435 if (((commit_count
- buf
->chan
->subbuf_size
)
436 & channel
->commit_count_mask
)
437 - (BUFFER_TRUNC(consumed_old
, buf
->chan
)
438 >> channel
->n_subbufs_order
)
443 * Check that we are not about to read the same subbuffer in
444 * which the writer head is.
446 if ((SUBBUF_TRUNC(write_offset
, buf
->chan
)
447 - SUBBUF_TRUNC(consumed_old
, buf
->chan
))
451 *consumed
= consumed_old
;
456 int ust_buffers_put_subbuf(struct ust_buffer
*buf
, unsigned long uconsumed_old
)
458 long consumed_new
, consumed_old
;
460 consumed_old
= uatomic_read(&buf
->consumed
);
461 consumed_old
= consumed_old
& (~0xFFFFFFFFL
);
462 consumed_old
= consumed_old
| uconsumed_old
;
463 consumed_new
= SUBBUF_ALIGN(consumed_old
, buf
->chan
);
465 if (uatomic_cmpxchg(&buf
->consumed
, consumed_old
,
468 /* We have been pushed by the writer : the last
469 * buffer read _is_ corrupted! It can also
470 * happen if this is a buffer we never got. */
473 /* tell the client that buffer is now unfull */
476 index
= SUBBUF_INDEX(consumed_old
, buf
->chan
);
477 data
= BUFFER_OFFSET(consumed_old
, buf
->chan
);
478 ltt_buf_unfull(buf
, index
, data
);
483 static int map_buf_structs(struct ust_channel
*chan
)
490 size
= PAGE_ALIGN(1);
492 for(i
=0; i
<chan
->n_cpus
; i
++) {
494 result
= chan
->buf_struct_shmids
[i
] = shmget(getpid(), size
, IPC_CREAT
| IPC_EXCL
| 0700);
497 goto destroy_previous
;
500 ptr
= shmat(chan
->buf_struct_shmids
[i
], NULL
, 0);
501 if(ptr
== (void *) -1) {
506 /* Already mark the shared memory for destruction. This will occur only
507 * when all users have detached.
509 result
= shmctl(chan
->buf_struct_shmids
[i
], IPC_RMID
, NULL
);
512 goto destroy_previous
;
520 /* Jumping inside this loop occurs from within the other loop above with i as
521 * counter, so it unallocates the structures for the cpu = current_i down to
525 result
= shmctl(chan
->buf_struct_shmids
[i
], IPC_RMID
, NULL
);
537 static int unmap_buf_structs(struct ust_channel
*chan
)
541 for (i
=0; i
< chan
->n_cpus
; i
++) {
542 if (shmdt(chan
->buf
[i
]) < 0) {
552 static int create_channel(const char *trace_name
, struct ust_trace
*trace
,
553 const char *channel_name
, struct ust_channel
*chan
,
554 unsigned int subbuf_size
, unsigned int n_subbufs
, int overwrite
)
559 chan
->overwrite
= overwrite
;
560 chan
->n_subbufs_order
= get_count_order(n_subbufs
);
561 chan
->commit_count_mask
= (~0UL >> chan
->n_subbufs_order
);
562 chan
->n_cpus
= get_n_cpus();
564 /* These mappings should ideall be per-cpu, if somebody can do that
565 * from userspace, that would be cool!
567 chan
->buf
= (void *) zmalloc(chan
->n_cpus
* sizeof(void *));
568 if(chan
->buf
== NULL
) {
571 chan
->buf_struct_shmids
= (int *) zmalloc(chan
->n_cpus
* sizeof(int));
572 if(chan
->buf_struct_shmids
== NULL
)
575 result
= map_buf_structs(chan
);
577 goto free_buf_struct_shmids
;
580 result
= open_channel(chan
, subbuf_size
, n_subbufs
);
582 ERR("Cannot open channel for trace %s", trace_name
);
583 goto unmap_buf_structs
;
589 for (i
=0; i
< chan
->n_cpus
; i
++) {
590 if (shmdt(chan
->buf
[i
]) < 0) {
591 PERROR("shmdt bufstruct");
595 free_buf_struct_shmids
:
596 free(chan
->buf_struct_shmids
);
606 static void remove_channel(struct ust_channel
*chan
)
610 unmap_buf_structs(chan
);
612 free(chan
->buf_struct_shmids
);
617 static void ltt_relay_async_wakeup_chan(struct ust_channel
*ltt_channel
)
621 static void ltt_relay_finish_buffer(struct ust_channel
*channel
, unsigned int cpu
)
623 if (channel
->buf
[cpu
]) {
624 struct ust_buffer
*buf
= channel
->buf
[cpu
];
625 ltt_force_switch(buf
, FORCE_FLUSH
);
627 /* closing the pipe tells the consumer the buffer is finished */
628 close(buf
->data_ready_fd_write
);
633 static void finish_channel(struct ust_channel
*channel
)
637 for (i
=0; i
<channel
->n_cpus
; i
++) {
638 ltt_relay_finish_buffer(channel
, i
);
644 * ltt_reserve_switch_old_subbuf: switch old subbuffer
646 * Concurrency safe because we are the last and only thread to alter this
647 * sub-buffer. As long as it is not delivered and read, no other thread can
648 * alter the offset, alter the reserve_count or call the
649 * client_buffer_end_callback on this sub-buffer.
651 * The only remaining threads could be the ones with pending commits. They will
652 * have to do the deliver themselves. Not concurrency safe in overwrite mode.
653 * We detect corrupted subbuffers with commit and reserve counts. We keep a
654 * corrupted sub-buffers count and push the readers across these sub-buffers.
656 * Not concurrency safe if a writer is stalled in a subbuffer and another writer
657 * switches in, finding out it's corrupted. The result will be than the old
658 * (uncommited) subbuffer will be declared corrupted, and that the new subbuffer
659 * will be declared corrupted too because of the commit count adjustment.
661 * Note : offset_old should never be 0 here.
663 static void ltt_reserve_switch_old_subbuf(
664 struct ust_channel
*chan
, struct ust_buffer
*buf
,
665 struct ltt_reserve_switch_offsets
*offsets
, u64
*tsc
)
667 long oldidx
= SUBBUF_INDEX(offsets
->old
- 1, chan
);
668 long commit_count
, padding_size
;
670 padding_size
= chan
->subbuf_size
671 - (SUBBUF_OFFSET(offsets
->old
- 1, chan
) + 1);
672 ltt_buffer_end(buf
, *tsc
, offsets
->old
, oldidx
);
675 * Must write slot data before incrementing commit count.
676 * This compiler barrier is upgraded into a cmm_smp_wmb() by the IPI
677 * sent by get_subbuf() when it does its cmm_smp_rmb().
680 uatomic_add(&buf
->commit_count
[oldidx
].cc
, padding_size
);
681 commit_count
= uatomic_read(&buf
->commit_count
[oldidx
].cc
);
682 ltt_check_deliver(chan
, buf
, offsets
->old
- 1, commit_count
, oldidx
);
683 ltt_write_commit_counter(chan
, buf
, oldidx
,
684 offsets
->old
, commit_count
, padding_size
);
688 * ltt_reserve_switch_new_subbuf: Populate new subbuffer.
690 * This code can be executed unordered : writers may already have written to the
691 * sub-buffer before this code gets executed, caution. The commit makes sure
692 * that this code is executed before the deliver of this sub-buffer.
694 static void ltt_reserve_switch_new_subbuf(
695 struct ust_channel
*chan
, struct ust_buffer
*buf
,
696 struct ltt_reserve_switch_offsets
*offsets
, u64
*tsc
)
698 long beginidx
= SUBBUF_INDEX(offsets
->begin
, chan
);
701 ltt_buffer_begin(buf
, *tsc
, beginidx
);
704 * Must write slot data before incrementing commit count.
705 * This compiler barrier is upgraded into a cmm_smp_wmb() by the IPI
706 * sent by get_subbuf() when it does its cmm_smp_rmb().
709 uatomic_add(&buf
->commit_count
[beginidx
].cc
, ltt_subbuffer_header_size());
710 commit_count
= uatomic_read(&buf
->commit_count
[beginidx
].cc
);
711 /* Check if the written buffer has to be delivered */
712 ltt_check_deliver(chan
, buf
, offsets
->begin
, commit_count
, beginidx
);
713 ltt_write_commit_counter(chan
, buf
, beginidx
,
714 offsets
->begin
, commit_count
, ltt_subbuffer_header_size());
718 * ltt_reserve_end_switch_current: finish switching current subbuffer
720 * Concurrency safe because we are the last and only thread to alter this
721 * sub-buffer. As long as it is not delivered and read, no other thread can
722 * alter the offset, alter the reserve_count or call the
723 * client_buffer_end_callback on this sub-buffer.
725 * The only remaining threads could be the ones with pending commits. They will
726 * have to do the deliver themselves. Not concurrency safe in overwrite mode.
727 * We detect corrupted subbuffers with commit and reserve counts. We keep a
728 * corrupted sub-buffers count and push the readers across these sub-buffers.
730 * Not concurrency safe if a writer is stalled in a subbuffer and another writer
731 * switches in, finding out it's corrupted. The result will be than the old
732 * (uncommited) subbuffer will be declared corrupted, and that the new subbuffer
733 * will be declared corrupted too because of the commit count adjustment.
735 static void ltt_reserve_end_switch_current(
736 struct ust_channel
*chan
,
737 struct ust_buffer
*buf
,
738 struct ltt_reserve_switch_offsets
*offsets
, u64
*tsc
)
740 long endidx
= SUBBUF_INDEX(offsets
->end
- 1, chan
);
741 long commit_count
, padding_size
;
743 padding_size
= chan
->subbuf_size
744 - (SUBBUF_OFFSET(offsets
->end
- 1, chan
) + 1);
746 ltt_buffer_end(buf
, *tsc
, offsets
->end
, endidx
);
749 * Must write slot data before incrementing commit count.
750 * This compiler barrier is upgraded into a cmm_smp_wmb() by the IPI
751 * sent by get_subbuf() when it does its cmm_smp_rmb().
754 uatomic_add(&buf
->commit_count
[endidx
].cc
, padding_size
);
755 commit_count
= uatomic_read(&buf
->commit_count
[endidx
].cc
);
756 ltt_check_deliver(chan
, buf
,
757 offsets
->end
- 1, commit_count
, endidx
);
758 ltt_write_commit_counter(chan
, buf
, endidx
,
759 offsets
->end
, commit_count
, padding_size
);
765 * !0 if execution must be aborted.
767 static int ltt_relay_try_switch_slow(
768 enum force_switch_mode mode
,
769 struct ust_channel
*chan
,
770 struct ust_buffer
*buf
,
771 struct ltt_reserve_switch_offsets
*offsets
,
775 long reserve_commit_diff
;
777 offsets
->begin
= uatomic_read(&buf
->offset
);
778 offsets
->old
= offsets
->begin
;
779 offsets
->begin_switch
= 0;
780 offsets
->end_switch_old
= 0;
782 *tsc
= trace_clock_read64();
784 if (SUBBUF_OFFSET(offsets
->begin
, buf
->chan
) != 0) {
785 offsets
->begin
= SUBBUF_ALIGN(offsets
->begin
, buf
->chan
);
786 offsets
->end_switch_old
= 1;
788 /* we do not have to switch : buffer is empty */
791 if (mode
== FORCE_ACTIVE
)
792 offsets
->begin
+= ltt_subbuffer_header_size();
794 * Always begin_switch in FORCE_ACTIVE mode.
795 * Test new buffer integrity
797 subbuf_index
= SUBBUF_INDEX(offsets
->begin
, buf
->chan
);
798 reserve_commit_diff
=
799 (BUFFER_TRUNC(offsets
->begin
, buf
->chan
)
800 >> chan
->n_subbufs_order
)
801 - (uatomic_read(&buf
->commit_count
[subbuf_index
].cc_sb
)
802 & chan
->commit_count_mask
);
803 if (reserve_commit_diff
== 0) {
804 /* Next buffer not corrupted. */
805 if (mode
== FORCE_ACTIVE
807 && offsets
->begin
- uatomic_read(&buf
->consumed
)
808 >= chan
->alloc_size
) {
810 * We do not overwrite non consumed buffers and we are
811 * full : ignore switch while tracing is active.
817 * Next subbuffer corrupted. Force pushing reader even in normal
821 offsets
->end
= offsets
->begin
;
826 * Force a sub-buffer switch for a per-cpu buffer. This operation is
827 * completely reentrant : can be called while tracing is active with
828 * absolutely no lock held.
830 void ltt_force_switch_lockless_slow(struct ust_buffer
*buf
,
831 enum force_switch_mode mode
)
833 struct ust_channel
*chan
= buf
->chan
;
834 struct ltt_reserve_switch_offsets offsets
;
839 DBG("Switching (forced) %s_%d", chan
->channel_name
, buf
->cpu
);
841 * Perform retryable operations.
844 if (ltt_relay_try_switch_slow(mode
, chan
, buf
,
847 } while (uatomic_cmpxchg(&buf
->offset
, offsets
.old
,
848 offsets
.end
) != offsets
.old
);
851 * Atomically update last_tsc. This update races against concurrent
852 * atomic updates, but the race will always cause supplementary full TSC
853 * events, never the opposite (missing a full TSC event when it would be
856 save_last_tsc(buf
, tsc
);
859 * Push the reader if necessary
861 if (mode
== FORCE_ACTIVE
) {
862 ltt_reserve_push_reader(chan
, buf
, offsets
.end
- 1);
866 * Switch old subbuffer if needed.
868 if (offsets
.end_switch_old
) {
869 ltt_reserve_switch_old_subbuf(chan
, buf
, &offsets
, &tsc
);
873 * Populate new subbuffer.
875 if (mode
== FORCE_ACTIVE
)
876 ltt_reserve_switch_new_subbuf(chan
, buf
, &offsets
, &tsc
);
882 * !0 if execution must be aborted.
884 static int ltt_relay_try_reserve_slow(struct ust_channel
*chan
, struct ust_buffer
*buf
,
885 struct ltt_reserve_switch_offsets
*offsets
, size_t data_size
,
886 u64
*tsc
, unsigned int *rflags
, int largest_align
)
888 long reserve_commit_diff
;
890 offsets
->begin
= uatomic_read(&buf
->offset
);
891 offsets
->old
= offsets
->begin
;
892 offsets
->begin_switch
= 0;
893 offsets
->end_switch_current
= 0;
894 offsets
->end_switch_old
= 0;
896 *tsc
= trace_clock_read64();
897 if (last_tsc_overflow(buf
, *tsc
))
898 *rflags
= LTT_RFLAG_ID_SIZE_TSC
;
900 if (unlikely(SUBBUF_OFFSET(offsets
->begin
, buf
->chan
) == 0)) {
901 offsets
->begin_switch
= 1; /* For offsets->begin */
903 offsets
->size
= ust_get_header_size(chan
,
904 offsets
->begin
, data_size
,
905 &offsets
->before_hdr_pad
, *rflags
);
906 offsets
->size
+= ltt_align(offsets
->begin
+ offsets
->size
,
909 if (unlikely((SUBBUF_OFFSET(offsets
->begin
, buf
->chan
) +
910 offsets
->size
) > buf
->chan
->subbuf_size
)) {
911 offsets
->end_switch_old
= 1; /* For offsets->old */
912 offsets
->begin_switch
= 1; /* For offsets->begin */
915 if (unlikely(offsets
->begin_switch
)) {
919 * We are typically not filling the previous buffer completely.
921 if (likely(offsets
->end_switch_old
))
922 offsets
->begin
= SUBBUF_ALIGN(offsets
->begin
,
924 offsets
->begin
= offsets
->begin
+ ltt_subbuffer_header_size();
925 /* Test new buffer integrity */
926 subbuf_index
= SUBBUF_INDEX(offsets
->begin
, buf
->chan
);
927 reserve_commit_diff
=
928 (BUFFER_TRUNC(offsets
->begin
, buf
->chan
)
929 >> chan
->n_subbufs_order
)
930 - (uatomic_read(&buf
->commit_count
[subbuf_index
].cc_sb
)
931 & chan
->commit_count_mask
);
932 if (likely(reserve_commit_diff
== 0)) {
933 /* Next buffer not corrupted. */
934 if (unlikely(!chan
->overwrite
&&
935 (SUBBUF_TRUNC(offsets
->begin
, buf
->chan
)
936 - SUBBUF_TRUNC(uatomic_read(
939 >= chan
->alloc_size
)) {
941 * We do not overwrite non consumed buffers
942 * and we are full : event is lost.
944 uatomic_inc(&buf
->events_lost
);
948 * next buffer not corrupted, we are either in
949 * overwrite mode or the buffer is not full.
950 * It's safe to write in this new subbuffer.
955 * Next subbuffer corrupted. Drop event in normal and
956 * overwrite mode. Caused by either a writer OOPS or
957 * too many nested writes over a reserve/commit pair.
959 uatomic_inc(&buf
->events_lost
);
962 offsets
->size
= ust_get_header_size(chan
,
963 offsets
->begin
, data_size
,
964 &offsets
->before_hdr_pad
, *rflags
);
965 offsets
->size
+= ltt_align(offsets
->begin
+ offsets
->size
,
968 if (unlikely((SUBBUF_OFFSET(offsets
->begin
, buf
->chan
)
969 + offsets
->size
) > buf
->chan
->subbuf_size
)) {
971 * Event too big for subbuffers, report error, don't
972 * complete the sub-buffer switch.
974 uatomic_inc(&buf
->events_lost
);
978 * We just made a successful buffer switch and the event
979 * fits in the new subbuffer. Let's write.
984 * Event fits in the current buffer and we are not on a switch
985 * boundary. It's safe to write.
988 offsets
->end
= offsets
->begin
+ offsets
->size
;
990 if (unlikely((SUBBUF_OFFSET(offsets
->end
, buf
->chan
)) == 0)) {
992 * The offset_end will fall at the very beginning of the next
995 offsets
->end_switch_current
= 1; /* For offsets->begin */
1001 * ltt_relay_reserve_slot_lockless_slow - Atomic slot reservation in a buffer.
1002 * @trace: the trace structure to log to.
1003 * @ltt_channel: channel structure
1004 * @transport_data: data structure specific to ltt relay
1005 * @data_size: size of the variable length data to log.
1006 * @slot_size: pointer to total size of the slot (out)
1007 * @buf_offset : pointer to reserved buffer offset (out)
1008 * @tsc: pointer to the tsc at the slot reservation (out)
1011 * Return : -ENOSPC if not enough space, else returns 0.
1012 * It will take care of sub-buffer switching.
1014 int ltt_reserve_slot_lockless_slow(struct ust_channel
*chan
,
1015 struct ust_trace
*trace
, size_t data_size
,
1016 int largest_align
, int cpu
,
1017 struct ust_buffer
**ret_buf
,
1018 size_t *slot_size
, long *buf_offset
,
1019 u64
*tsc
, unsigned int *rflags
)
1021 struct ust_buffer
*buf
= *ret_buf
= chan
->buf
[cpu
];
1022 struct ltt_reserve_switch_offsets offsets
;
1027 if (unlikely(ltt_relay_try_reserve_slow(chan
, buf
, &offsets
,
1028 data_size
, tsc
, rflags
, largest_align
)))
1030 } while (unlikely(uatomic_cmpxchg(&buf
->offset
, offsets
.old
,
1031 offsets
.end
) != offsets
.old
));
1034 * Atomically update last_tsc. This update races against concurrent
1035 * atomic updates, but the race will always cause supplementary full TSC
1036 * events, never the opposite (missing a full TSC event when it would be
1039 save_last_tsc(buf
, *tsc
);
1042 * Push the reader if necessary
1044 ltt_reserve_push_reader(chan
, buf
, offsets
.end
- 1);
1047 * Switch old subbuffer if needed.
1049 if (unlikely(offsets
.end_switch_old
)) {
1050 ltt_reserve_switch_old_subbuf(chan
, buf
, &offsets
, tsc
);
1051 DBG("Switching %s_%d", chan
->channel_name
, cpu
);
1055 * Populate new subbuffer.
1057 if (unlikely(offsets
.begin_switch
))
1058 ltt_reserve_switch_new_subbuf(chan
, buf
, &offsets
, tsc
);
1060 if (unlikely(offsets
.end_switch_current
))
1061 ltt_reserve_end_switch_current(chan
, buf
, &offsets
, tsc
);
1063 *slot_size
= offsets
.size
;
1064 *buf_offset
= offsets
.begin
+ offsets
.before_hdr_pad
;
1068 static struct ltt_transport ust_relay_transport
= {
1071 .create_channel
= create_channel
,
1072 .finish_channel
= finish_channel
,
1073 .remove_channel
= remove_channel
,
1074 .wakeup_channel
= ltt_relay_async_wakeup_chan
,
1078 static char initialized
= 0;
1080 void __attribute__((constructor
)) init_ustrelay_transport(void)
1083 ltt_transport_register(&ust_relay_transport
);
1088 static void __attribute__((destructor
)) ust_buffers_exit(void)
1090 ltt_transport_unregister(&ust_relay_transport
);
1093 size_t ltt_write_event_header_slow(struct ust_channel
*channel
,
1094 struct ust_buffer
*buf
, long buf_offset
,
1095 u16 eID
, u32 event_size
,
1096 u64 tsc
, unsigned int rflags
)
1098 struct ltt_event_header header
;
1102 case LTT_RFLAG_ID_SIZE_TSC
:
1103 header
.id_time
= 29 << LTT_TSC_BITS
;
1105 case LTT_RFLAG_ID_SIZE
:
1106 header
.id_time
= 30 << LTT_TSC_BITS
;
1109 header
.id_time
= 31 << LTT_TSC_BITS
;
1117 header
.id_time
|= (u32
)tsc
& LTT_TSC_MASK
;
1118 ust_buffers_write(buf
, buf_offset
, &header
, sizeof(header
));
1119 buf_offset
+= sizeof(header
);
1122 case LTT_RFLAG_ID_SIZE_TSC
:
1123 small_size
= (u16
)min_t(u32
, event_size
, LTT_MAX_SMALL_SIZE
);
1124 ust_buffers_write(buf
, buf_offset
,
1126 buf_offset
+= sizeof(u16
);
1127 ust_buffers_write(buf
, buf_offset
,
1128 &small_size
, sizeof(u16
));
1129 buf_offset
+= sizeof(u16
);
1130 if (small_size
== LTT_MAX_SMALL_SIZE
) {
1131 ust_buffers_write(buf
, buf_offset
,
1132 &event_size
, sizeof(u32
));
1133 buf_offset
+= sizeof(u32
);
1135 buf_offset
+= ltt_align(buf_offset
, sizeof(u64
));
1136 ust_buffers_write(buf
, buf_offset
,
1138 buf_offset
+= sizeof(u64
);
1140 case LTT_RFLAG_ID_SIZE
:
1141 small_size
= (u16
)min_t(u32
, event_size
, LTT_MAX_SMALL_SIZE
);
1142 ust_buffers_write(buf
, buf_offset
,
1144 buf_offset
+= sizeof(u16
);
1145 ust_buffers_write(buf
, buf_offset
,
1146 &small_size
, sizeof(u16
));
1147 buf_offset
+= sizeof(u16
);
1148 if (small_size
== LTT_MAX_SMALL_SIZE
) {
1149 ust_buffers_write(buf
, buf_offset
,
1150 &event_size
, sizeof(u32
));
1151 buf_offset
+= sizeof(u32
);
1155 ust_buffers_write(buf
, buf_offset
,
1157 buf_offset
+= sizeof(u16
);