3 * LTTng userspace tracer buffering system
5 * Copyright (C) 2009 - Pierre-Marc Fournier (pierre-marc dot fournier at polymtl dot ca)
6 * Copyright (C) 2008-2011 - Mathieu Desnoyers (mathieu.desnoyers@polymtl.ca)
8 * This library is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; either
11 * version 2.1 of the License, or (at your option) any later version.
13 * This library is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
18 * You should have received a copy of the GNU Lesser General Public
19 * License along with this library; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
24 * Note: this code does not support the ref/noref flag and reader-owned
25 * subbuffer scheme. Therefore, flight recorder mode uses a mechanism
26 * where the reader can read corrupted data (and detect this), thus
37 #include <ust/clock.h>
42 #include "tracercore.h"
43 #include "usterr_signal_safe.h"
45 struct ltt_reserve_switch_offsets
{
47 long begin_switch
, end_switch_current
, end_switch_old
;
48 size_t before_hdr_pad
, size
;
52 static DEFINE_MUTEX(ust_buffers_channels_mutex
);
53 static CDS_LIST_HEAD(ust_buffers_channels
);
55 static void ltt_force_switch(struct ust_buffer
*buf
,
56 enum force_switch_mode mode
);
58 static int get_n_cpus(void)
61 static int n_cpus
= 0;
64 /* On Linux, when some processors are offline
65 * _SC_NPROCESSORS_CONF counts the offline
66 * processors, whereas _SC_NPROCESSORS_ONLN
67 * does not. If we used _SC_NPROCESSORS_ONLN,
68 * getcpu() could return a value greater than
69 * this sysconf, in which case the arrays
70 * indexed by processor would overflow.
72 result
= sysconf(_SC_NPROCESSORS_CONF
);
84 * _ust_buffers_strncpy_fixup - Fix an incomplete string in a ltt_relay buffer.
86 * @offset : offset within the buffer
87 * @len : length to write
88 * @copied: string actually copied
89 * @terminated: does string end with \0
91 * Fills string with "X" if incomplete.
93 void _ust_buffers_strncpy_fixup(struct ust_buffer
*buf
, size_t offset
,
94 size_t len
, size_t copied
, int terminated
)
96 size_t buf_offset
, cpy
;
100 * Deal with non-terminated string.
103 offset
+= copied
- 1;
104 buf_offset
= BUFFER_OFFSET(offset
, buf
->chan
);
106 * Underlying layer should never ask for writes across
110 < buf
->chan
->subbuf_size
*buf
->chan
->subbuf_cnt
);
111 ust_buffers_do_memset(buf
->buf_data
+ buf_offset
, '\0', 1);
116 * Deal with incomplete string.
117 * Overwrite string's \0 with X too.
123 buf_offset
= BUFFER_OFFSET(offset
, buf
->chan
);
126 * Underlying layer should never ask for writes across subbuffers.
129 < buf
->chan
->subbuf_size
*buf
->chan
->subbuf_cnt
);
131 ust_buffers_do_memset(buf
->buf_data
+ buf_offset
,
135 * Overwrite last 'X' with '\0'.
138 buf_offset
= BUFFER_OFFSET(offset
, buf
->chan
);
140 * Underlying layer should never ask for writes across subbuffers.
143 < buf
->chan
->subbuf_size
*buf
->chan
->subbuf_cnt
);
144 ust_buffers_do_memset(buf
->buf_data
+ buf_offset
, '\0', 1);
147 static void ltt_buffer_begin(struct ust_buffer
*buf
,
148 u64 tsc
, unsigned int subbuf_idx
)
150 struct ust_channel
*channel
= buf
->chan
;
151 struct ltt_subbuffer_header
*header
=
152 (struct ltt_subbuffer_header
*)
153 ust_buffers_offset_address(buf
,
154 subbuf_idx
* buf
->chan
->subbuf_size
);
156 header
->cycle_count_begin
= tsc
;
157 header
->data_size
= 0xFFFFFFFF; /* for recognizing crashed buffers */
158 header
->sb_size
= 0xFFFFFFFF; /* for recognizing crashed buffers */
160 * No memory barrier needed to order data_data/sb_size vs commit count
161 * update, because commit count update contains a compiler barrier that
162 * ensures the order of the writes are OK from a program POV. It only
163 * matters for crash dump recovery which is not executed concurrently,
164 * so memory write order does not matter.
166 ltt_write_trace_header(channel
->trace
, header
);
169 static int map_buf_data(struct ust_buffer
*buf
, size_t *size
)
174 *size
= PAGE_ALIGN(*size
);
176 result
= buf
->shmid
= shmget(getpid(), *size
, IPC_CREAT
| IPC_EXCL
| 0700);
177 if (result
< 0 && errno
== EINVAL
) {
178 ERR("shmget() returned EINVAL; maybe /proc/sys/kernel/shmmax should be increased.");
180 } else if (result
< 0) {
185 ptr
= shmat(buf
->shmid
, NULL
, 0);
186 if (ptr
== (void *) -1) {
191 /* Already mark the shared memory for destruction. This will occur only
192 * when all users have detached.
194 result
= shmctl(buf
->shmid
, IPC_RMID
, NULL
);
201 buf
->buf_size
= *size
;
206 result
= shmctl(buf
->shmid
, IPC_RMID
, NULL
);
214 static int open_buf(struct ust_channel
*chan
, int cpu
)
218 struct ust_trace
*trace
= chan
->trace
;
219 struct ust_buffer
*buf
= chan
->buf
[cpu
];
220 unsigned int n_subbufs
= chan
->subbuf_cnt
;
223 result
= map_buf_data(buf
, &chan
->alloc_size
);
228 zmalloc(sizeof(*buf
->commit_count
) * n_subbufs
);
229 if (!buf
->commit_count
)
235 goto free_commit_count
;
237 buf
->data_ready_fd_read
= fds
[0];
238 buf
->data_ready_fd_write
= fds
[1];
243 uatomic_set(&buf
->offset
, ltt_subbuffer_header_size());
244 uatomic_set(&buf
->consumed
, 0);
245 uatomic_set(&buf
->active_readers
, 0);
246 for (j
= 0; j
< n_subbufs
; j
++) {
247 uatomic_set(&buf
->commit_count
[j
].cc
, 0);
248 uatomic_set(&buf
->commit_count
[j
].cc_sb
, 0);
251 ltt_buffer_begin(buf
, trace
->start_tsc
, 0);
253 uatomic_add(&buf
->commit_count
[0].cc
, ltt_subbuffer_header_size());
255 uatomic_set(&buf
->events_lost
, 0);
256 uatomic_set(&buf
->corrupted_subbuffers
, 0);
258 memset(buf
->commit_seq
, 0, sizeof(buf
->commit_seq
[0]) * n_subbufs
);
263 free(buf
->commit_count
);
266 if (shmdt(buf
->buf_data
) < 0) {
267 PERROR("shmdt failed");
273 static void close_buf(struct ust_buffer
*buf
)
277 result
= shmdt(buf
->buf_data
);
282 result
= close(buf
->data_ready_fd_read
);
287 result
= close(buf
->data_ready_fd_write
);
288 if (result
< 0 && errno
!= EBADF
) {
294 static int open_channel(struct ust_channel
*chan
, size_t subbuf_size
,
300 if(subbuf_size
== 0 || subbuf_cnt
== 0)
303 /* Check that the subbuffer size is larger than a page. */
304 WARN_ON_ONCE(subbuf_size
< PAGE_SIZE
);
307 * Make sure the number of subbuffers and subbuffer size are power of 2.
309 WARN_ON_ONCE(hweight32(subbuf_size
) != 1);
310 WARN_ON(hweight32(subbuf_cnt
) != 1);
312 chan
->version
= UST_CHANNEL_VERSION
;
313 chan
->subbuf_cnt
= subbuf_cnt
;
314 chan
->subbuf_size
= subbuf_size
;
315 chan
->subbuf_size_order
= get_count_order(subbuf_size
);
316 chan
->alloc_size
= subbuf_size
* subbuf_cnt
;
318 pthread_mutex_lock(&ust_buffers_channels_mutex
);
319 for (i
=0; i
< chan
->n_cpus
; i
++) {
320 result
= open_buf(chan
, i
);
324 cds_list_add(&chan
->list
, &ust_buffers_channels
);
325 pthread_mutex_unlock(&ust_buffers_channels_mutex
);
331 for(i
--; i
>= 0; i
--)
332 close_buf(chan
->buf
[i
]);
334 pthread_mutex_unlock(&ust_buffers_channels_mutex
);
338 static void close_channel(struct ust_channel
*chan
)
344 pthread_mutex_lock(&ust_buffers_channels_mutex
);
346 * checking for chan->buf[i] being NULL or not is useless in
347 * practice because we allocate buffers for all possible cpus.
348 * However, should we decide to change this and only allocate
349 * for online cpus, this check becomes useful.
351 for (i
=0; i
<chan
->n_cpus
; i
++) {
353 close_buf(chan
->buf
[i
]);
356 cds_list_del(&chan
->list
);
358 pthread_mutex_unlock(&ust_buffers_channels_mutex
);
362 * offset is assumed to never be 0 here : never deliver a completely empty
363 * subbuffer. The lost size is between 0 and subbuf_size-1.
365 static notrace
void ltt_buffer_end(struct ust_buffer
*buf
,
366 u64 tsc
, unsigned int offset
, unsigned int subbuf_idx
)
368 struct ltt_subbuffer_header
*header
=
369 (struct ltt_subbuffer_header
*)
370 ust_buffers_offset_address(buf
,
371 subbuf_idx
* buf
->chan
->subbuf_size
);
372 u32 data_size
= SUBBUF_OFFSET(offset
- 1, buf
->chan
) + 1;
374 header
->sb_size
= PAGE_ALIGN(data_size
);
375 header
->cycle_count_end
= tsc
;
376 header
->events_lost
= uatomic_read(&buf
->events_lost
);
377 header
->subbuf_corrupt
= uatomic_read(&buf
->corrupted_subbuffers
);
378 if(unlikely(header
->events_lost
> 0)) {
379 DBG("Some events (%d) were lost in %s_%d", header
->events_lost
, buf
->chan
->channel_name
, buf
->cpu
);
382 * Makes sure data_size write happens after write of the rest of the
383 * buffer end data, because data_size is used to identify a completely
384 * written subbuffer in a crash dump.
387 header
->data_size
= data_size
;
391 * This function should not be called from NMI interrupt context
393 static notrace
void ltt_buf_unfull(struct ust_buffer
*buf
,
394 unsigned int subbuf_idx
,
400 * Promote compiler cmm_barrier to a smp_mb().
401 * For the specific LTTng case, this IPI call should be removed if the
402 * architecture does not reorder writes. This should eventually be provided by
403 * a separate architecture-specific infrastructure.
405 //ust// static void remote_mb(void *info)
410 int ust_buffers_get_subbuf(struct ust_buffer
*buf
, long *consumed
)
412 struct ust_channel
*channel
= buf
->chan
;
413 long consumed_old
, consumed_idx
, commit_count
, write_offset
;
416 consumed_old
= uatomic_read(&buf
->consumed
);
417 consumed_idx
= SUBBUF_INDEX(consumed_old
, buf
->chan
);
418 commit_count
= uatomic_read(&buf
->commit_count
[consumed_idx
].cc_sb
);
420 * Make sure we read the commit count before reading the buffer
421 * data and the write offset. Correct consumed offset ordering
422 * wrt commit count is insured by the use of cmpxchg to update
423 * the consumed offset.
427 * Local rmb to match the remote wmb to read the commit count before the
428 * buffer data and the write offset.
432 write_offset
= uatomic_read(&buf
->offset
);
434 * Check that the subbuffer we are trying to consume has been
435 * already fully committed.
437 if (((commit_count
- buf
->chan
->subbuf_size
)
438 & channel
->commit_count_mask
)
439 - (BUFFER_TRUNC(consumed_old
, buf
->chan
)
440 >> channel
->n_subbufs_order
)
445 * Check that we are not about to read the same subbuffer in
446 * which the writer head is.
448 if ((SUBBUF_TRUNC(write_offset
, buf
->chan
)
449 - SUBBUF_TRUNC(consumed_old
, buf
->chan
))
453 *consumed
= consumed_old
;
458 int ust_buffers_put_subbuf(struct ust_buffer
*buf
, unsigned long uconsumed_old
)
460 long consumed_new
, consumed_old
;
462 consumed_old
= uatomic_read(&buf
->consumed
);
463 consumed_old
= consumed_old
& (~0xFFFFFFFFL
);
464 consumed_old
= consumed_old
| uconsumed_old
;
465 consumed_new
= SUBBUF_ALIGN(consumed_old
, buf
->chan
);
467 if (uatomic_cmpxchg(&buf
->consumed
, consumed_old
,
470 /* We have been pushed by the writer : the last
471 * buffer read _is_ corrupted! It can also
472 * happen if this is a buffer we never got. */
475 /* tell the client that buffer is now unfull */
478 index
= SUBBUF_INDEX(consumed_old
, buf
->chan
);
479 data
= BUFFER_OFFSET(consumed_old
, buf
->chan
);
480 ltt_buf_unfull(buf
, index
, data
);
485 static int map_buf_structs(struct ust_channel
*chan
)
492 size
= PAGE_ALIGN(1);
494 for(i
=0; i
<chan
->n_cpus
; i
++) {
496 result
= chan
->buf_struct_shmids
[i
] = shmget(getpid(), size
, IPC_CREAT
| IPC_EXCL
| 0700);
499 goto destroy_previous
;
502 ptr
= shmat(chan
->buf_struct_shmids
[i
], NULL
, 0);
503 if(ptr
== (void *) -1) {
508 /* Already mark the shared memory for destruction. This will occur only
509 * when all users have detached.
511 result
= shmctl(chan
->buf_struct_shmids
[i
], IPC_RMID
, NULL
);
514 goto destroy_previous
;
522 /* Jumping inside this loop occurs from within the other loop above with i as
523 * counter, so it unallocates the structures for the cpu = current_i down to
527 result
= shmctl(chan
->buf_struct_shmids
[i
], IPC_RMID
, NULL
);
539 static int unmap_buf_structs(struct ust_channel
*chan
)
543 for (i
=0; i
< chan
->n_cpus
; i
++) {
544 if (shmdt(chan
->buf
[i
]) < 0) {
554 static int create_channel(const char *trace_name
, struct ust_trace
*trace
,
555 const char *channel_name
, struct ust_channel
*chan
,
556 unsigned int subbuf_size
, unsigned int n_subbufs
, int overwrite
)
561 chan
->overwrite
= overwrite
;
562 chan
->n_subbufs_order
= get_count_order(n_subbufs
);
563 chan
->commit_count_mask
= (~0UL >> chan
->n_subbufs_order
);
564 chan
->n_cpus
= get_n_cpus();
566 /* These mappings should ideall be per-cpu, if somebody can do that
567 * from userspace, that would be cool!
569 chan
->buf
= (void *) zmalloc(chan
->n_cpus
* sizeof(void *));
570 if(chan
->buf
== NULL
) {
573 chan
->buf_struct_shmids
= (int *) zmalloc(chan
->n_cpus
* sizeof(int));
574 if(chan
->buf_struct_shmids
== NULL
)
577 result
= map_buf_structs(chan
);
579 goto free_buf_struct_shmids
;
582 result
= open_channel(chan
, subbuf_size
, n_subbufs
);
584 ERR("Cannot open channel for trace %s", trace_name
);
585 goto unmap_buf_structs
;
591 for (i
=0; i
< chan
->n_cpus
; i
++) {
592 if (shmdt(chan
->buf
[i
]) < 0) {
593 PERROR("shmdt bufstruct");
597 free_buf_struct_shmids
:
598 free(chan
->buf_struct_shmids
);
608 static void remove_channel(struct ust_channel
*chan
)
612 unmap_buf_structs(chan
);
614 free(chan
->buf_struct_shmids
);
619 static void ltt_relay_async_wakeup_chan(struct ust_channel
*ltt_channel
)
623 static void ltt_relay_finish_buffer(struct ust_channel
*channel
, unsigned int cpu
)
625 if (channel
->buf
[cpu
]) {
626 struct ust_buffer
*buf
= channel
->buf
[cpu
];
627 ltt_force_switch(buf
, FORCE_FLUSH
);
629 /* closing the pipe tells the consumer the buffer is finished */
630 close(buf
->data_ready_fd_write
);
635 static void finish_channel(struct ust_channel
*channel
)
639 for (i
=0; i
<channel
->n_cpus
; i
++) {
640 ltt_relay_finish_buffer(channel
, i
);
646 * ltt_reserve_switch_old_subbuf: switch old subbuffer
648 * Concurrency safe because we are the last and only thread to alter this
649 * sub-buffer. As long as it is not delivered and read, no other thread can
650 * alter the offset, alter the reserve_count or call the
651 * client_buffer_end_callback on this sub-buffer.
653 * The only remaining threads could be the ones with pending commits. They will
654 * have to do the deliver themselves. Not concurrency safe in overwrite mode.
655 * We detect corrupted subbuffers with commit and reserve counts. We keep a
656 * corrupted sub-buffers count and push the readers across these sub-buffers.
658 * Not concurrency safe if a writer is stalled in a subbuffer and another writer
659 * switches in, finding out it's corrupted. The result will be than the old
660 * (uncommited) subbuffer will be declared corrupted, and that the new subbuffer
661 * will be declared corrupted too because of the commit count adjustment.
663 * Note : offset_old should never be 0 here.
665 static void ltt_reserve_switch_old_subbuf(
666 struct ust_channel
*chan
, struct ust_buffer
*buf
,
667 struct ltt_reserve_switch_offsets
*offsets
, u64
*tsc
)
669 long oldidx
= SUBBUF_INDEX(offsets
->old
- 1, chan
);
670 long commit_count
, padding_size
;
672 padding_size
= chan
->subbuf_size
673 - (SUBBUF_OFFSET(offsets
->old
- 1, chan
) + 1);
674 ltt_buffer_end(buf
, *tsc
, offsets
->old
, oldidx
);
677 * Must write slot data before incrementing commit count.
678 * This compiler barrier is upgraded into a cmm_smp_wmb() by the IPI
679 * sent by get_subbuf() when it does its cmm_smp_rmb().
682 uatomic_add(&buf
->commit_count
[oldidx
].cc
, padding_size
);
683 commit_count
= uatomic_read(&buf
->commit_count
[oldidx
].cc
);
684 ltt_check_deliver(chan
, buf
, offsets
->old
- 1, commit_count
, oldidx
);
685 ltt_write_commit_counter(chan
, buf
, oldidx
,
686 offsets
->old
, commit_count
, padding_size
);
690 * ltt_reserve_switch_new_subbuf: Populate new subbuffer.
692 * This code can be executed unordered : writers may already have written to the
693 * sub-buffer before this code gets executed, caution. The commit makes sure
694 * that this code is executed before the deliver of this sub-buffer.
696 static void ltt_reserve_switch_new_subbuf(
697 struct ust_channel
*chan
, struct ust_buffer
*buf
,
698 struct ltt_reserve_switch_offsets
*offsets
, u64
*tsc
)
700 long beginidx
= SUBBUF_INDEX(offsets
->begin
, chan
);
703 ltt_buffer_begin(buf
, *tsc
, beginidx
);
706 * Must write slot data before incrementing commit count.
707 * This compiler barrier is upgraded into a cmm_smp_wmb() by the IPI
708 * sent by get_subbuf() when it does its cmm_smp_rmb().
711 uatomic_add(&buf
->commit_count
[beginidx
].cc
, ltt_subbuffer_header_size());
712 commit_count
= uatomic_read(&buf
->commit_count
[beginidx
].cc
);
713 /* Check if the written buffer has to be delivered */
714 ltt_check_deliver(chan
, buf
, offsets
->begin
, commit_count
, beginidx
);
715 ltt_write_commit_counter(chan
, buf
, beginidx
,
716 offsets
->begin
, commit_count
, ltt_subbuffer_header_size());
720 * ltt_reserve_end_switch_current: finish switching current subbuffer
722 * Concurrency safe because we are the last and only thread to alter this
723 * sub-buffer. As long as it is not delivered and read, no other thread can
724 * alter the offset, alter the reserve_count or call the
725 * client_buffer_end_callback on this sub-buffer.
727 * The only remaining threads could be the ones with pending commits. They will
728 * have to do the deliver themselves. Not concurrency safe in overwrite mode.
729 * We detect corrupted subbuffers with commit and reserve counts. We keep a
730 * corrupted sub-buffers count and push the readers across these sub-buffers.
732 * Not concurrency safe if a writer is stalled in a subbuffer and another writer
733 * switches in, finding out it's corrupted. The result will be than the old
734 * (uncommited) subbuffer will be declared corrupted, and that the new subbuffer
735 * will be declared corrupted too because of the commit count adjustment.
737 static void ltt_reserve_end_switch_current(
738 struct ust_channel
*chan
,
739 struct ust_buffer
*buf
,
740 struct ltt_reserve_switch_offsets
*offsets
, u64
*tsc
)
742 long endidx
= SUBBUF_INDEX(offsets
->end
- 1, chan
);
743 long commit_count
, padding_size
;
745 padding_size
= chan
->subbuf_size
746 - (SUBBUF_OFFSET(offsets
->end
- 1, chan
) + 1);
748 ltt_buffer_end(buf
, *tsc
, offsets
->end
, endidx
);
751 * Must write slot data before incrementing commit count.
752 * This compiler barrier is upgraded into a cmm_smp_wmb() by the IPI
753 * sent by get_subbuf() when it does its cmm_smp_rmb().
756 uatomic_add(&buf
->commit_count
[endidx
].cc
, padding_size
);
757 commit_count
= uatomic_read(&buf
->commit_count
[endidx
].cc
);
758 ltt_check_deliver(chan
, buf
,
759 offsets
->end
- 1, commit_count
, endidx
);
760 ltt_write_commit_counter(chan
, buf
, endidx
,
761 offsets
->end
, commit_count
, padding_size
);
767 * !0 if execution must be aborted.
769 static int ltt_relay_try_switch_slow(
770 enum force_switch_mode mode
,
771 struct ust_channel
*chan
,
772 struct ust_buffer
*buf
,
773 struct ltt_reserve_switch_offsets
*offsets
,
777 long reserve_commit_diff
;
779 offsets
->begin
= uatomic_read(&buf
->offset
);
780 offsets
->old
= offsets
->begin
;
781 offsets
->begin_switch
= 0;
782 offsets
->end_switch_old
= 0;
784 *tsc
= trace_clock_read64();
786 if (SUBBUF_OFFSET(offsets
->begin
, buf
->chan
) != 0) {
787 offsets
->begin
= SUBBUF_ALIGN(offsets
->begin
, buf
->chan
);
788 offsets
->end_switch_old
= 1;
790 /* we do not have to switch : buffer is empty */
793 if (mode
== FORCE_ACTIVE
)
794 offsets
->begin
+= ltt_subbuffer_header_size();
796 * Always begin_switch in FORCE_ACTIVE mode.
797 * Test new buffer integrity
799 subbuf_index
= SUBBUF_INDEX(offsets
->begin
, buf
->chan
);
800 reserve_commit_diff
=
801 (BUFFER_TRUNC(offsets
->begin
, buf
->chan
)
802 >> chan
->n_subbufs_order
)
803 - (uatomic_read(&buf
->commit_count
[subbuf_index
].cc_sb
)
804 & chan
->commit_count_mask
);
805 if (reserve_commit_diff
== 0) {
806 /* Next buffer not corrupted. */
807 if (mode
== FORCE_ACTIVE
809 && offsets
->begin
- uatomic_read(&buf
->consumed
)
810 >= chan
->alloc_size
) {
812 * We do not overwrite non consumed buffers and we are
813 * full : ignore switch while tracing is active.
819 * Next subbuffer corrupted. Force pushing reader even in normal
823 offsets
->end
= offsets
->begin
;
828 * Force a sub-buffer switch for a per-cpu buffer. This operation is
829 * completely reentrant : can be called while tracing is active with
830 * absolutely no lock held.
832 void ltt_force_switch_lockless_slow(struct ust_buffer
*buf
,
833 enum force_switch_mode mode
)
835 struct ust_channel
*chan
= buf
->chan
;
836 struct ltt_reserve_switch_offsets offsets
;
841 DBG("Switching (forced) %s_%d", chan
->channel_name
, buf
->cpu
);
843 * Perform retryable operations.
846 if (ltt_relay_try_switch_slow(mode
, chan
, buf
,
849 } while (uatomic_cmpxchg(&buf
->offset
, offsets
.old
,
850 offsets
.end
) != offsets
.old
);
853 * Atomically update last_tsc. This update races against concurrent
854 * atomic updates, but the race will always cause supplementary full TSC
855 * events, never the opposite (missing a full TSC event when it would be
858 save_last_tsc(buf
, tsc
);
861 * Push the reader if necessary
863 if (mode
== FORCE_ACTIVE
) {
864 ltt_reserve_push_reader(chan
, buf
, offsets
.end
- 1);
868 * Switch old subbuffer if needed.
870 if (offsets
.end_switch_old
) {
871 ltt_reserve_switch_old_subbuf(chan
, buf
, &offsets
, &tsc
);
875 * Populate new subbuffer.
877 if (mode
== FORCE_ACTIVE
)
878 ltt_reserve_switch_new_subbuf(chan
, buf
, &offsets
, &tsc
);
884 * !0 if execution must be aborted.
886 static int ltt_relay_try_reserve_slow(struct ust_channel
*chan
, struct ust_buffer
*buf
,
887 struct ltt_reserve_switch_offsets
*offsets
, size_t data_size
,
888 u64
*tsc
, unsigned int *rflags
, int largest_align
)
890 long reserve_commit_diff
;
892 offsets
->begin
= uatomic_read(&buf
->offset
);
893 offsets
->old
= offsets
->begin
;
894 offsets
->begin_switch
= 0;
895 offsets
->end_switch_current
= 0;
896 offsets
->end_switch_old
= 0;
898 *tsc
= trace_clock_read64();
899 if (last_tsc_overflow(buf
, *tsc
))
900 *rflags
= LTT_RFLAG_ID_SIZE_TSC
;
902 if (unlikely(SUBBUF_OFFSET(offsets
->begin
, buf
->chan
) == 0)) {
903 offsets
->begin_switch
= 1; /* For offsets->begin */
905 offsets
->size
= ust_get_header_size(chan
,
906 offsets
->begin
, data_size
,
907 &offsets
->before_hdr_pad
, *rflags
);
908 offsets
->size
+= ltt_align(offsets
->begin
+ offsets
->size
,
911 if (unlikely((SUBBUF_OFFSET(offsets
->begin
, buf
->chan
) +
912 offsets
->size
) > buf
->chan
->subbuf_size
)) {
913 offsets
->end_switch_old
= 1; /* For offsets->old */
914 offsets
->begin_switch
= 1; /* For offsets->begin */
917 if (unlikely(offsets
->begin_switch
)) {
921 * We are typically not filling the previous buffer completely.
923 if (likely(offsets
->end_switch_old
))
924 offsets
->begin
= SUBBUF_ALIGN(offsets
->begin
,
926 offsets
->begin
= offsets
->begin
+ ltt_subbuffer_header_size();
927 /* Test new buffer integrity */
928 subbuf_index
= SUBBUF_INDEX(offsets
->begin
, buf
->chan
);
929 reserve_commit_diff
=
930 (BUFFER_TRUNC(offsets
->begin
, buf
->chan
)
931 >> chan
->n_subbufs_order
)
932 - (uatomic_read(&buf
->commit_count
[subbuf_index
].cc_sb
)
933 & chan
->commit_count_mask
);
934 if (likely(reserve_commit_diff
== 0)) {
935 /* Next buffer not corrupted. */
936 if (unlikely(!chan
->overwrite
&&
937 (SUBBUF_TRUNC(offsets
->begin
, buf
->chan
)
938 - SUBBUF_TRUNC(uatomic_read(
941 >= chan
->alloc_size
)) {
943 * We do not overwrite non consumed buffers
944 * and we are full : event is lost.
946 uatomic_inc(&buf
->events_lost
);
950 * next buffer not corrupted, we are either in
951 * overwrite mode or the buffer is not full.
952 * It's safe to write in this new subbuffer.
957 * Next subbuffer corrupted. Drop event in normal and
958 * overwrite mode. Caused by either a writer OOPS or
959 * too many nested writes over a reserve/commit pair.
961 uatomic_inc(&buf
->events_lost
);
964 offsets
->size
= ust_get_header_size(chan
,
965 offsets
->begin
, data_size
,
966 &offsets
->before_hdr_pad
, *rflags
);
967 offsets
->size
+= ltt_align(offsets
->begin
+ offsets
->size
,
970 if (unlikely((SUBBUF_OFFSET(offsets
->begin
, buf
->chan
)
971 + offsets
->size
) > buf
->chan
->subbuf_size
)) {
973 * Event too big for subbuffers, report error, don't
974 * complete the sub-buffer switch.
976 uatomic_inc(&buf
->events_lost
);
980 * We just made a successful buffer switch and the event
981 * fits in the new subbuffer. Let's write.
986 * Event fits in the current buffer and we are not on a switch
987 * boundary. It's safe to write.
990 offsets
->end
= offsets
->begin
+ offsets
->size
;
992 if (unlikely((SUBBUF_OFFSET(offsets
->end
, buf
->chan
)) == 0)) {
994 * The offset_end will fall at the very beginning of the next
997 offsets
->end_switch_current
= 1; /* For offsets->begin */
1003 * ltt_relay_reserve_slot_lockless_slow - Atomic slot reservation in a buffer.
1004 * @trace: the trace structure to log to.
1005 * @ltt_channel: channel structure
1006 * @transport_data: data structure specific to ltt relay
1007 * @data_size: size of the variable length data to log.
1008 * @slot_size: pointer to total size of the slot (out)
1009 * @buf_offset : pointer to reserved buffer offset (out)
1010 * @tsc: pointer to the tsc at the slot reservation (out)
1013 * Return : -ENOSPC if not enough space, else returns 0.
1014 * It will take care of sub-buffer switching.
1016 int ltt_reserve_slot_lockless_slow(struct ust_channel
*chan
,
1017 struct ust_trace
*trace
, size_t data_size
,
1018 int largest_align
, int cpu
,
1019 struct ust_buffer
**ret_buf
,
1020 size_t *slot_size
, long *buf_offset
,
1021 u64
*tsc
, unsigned int *rflags
)
1023 struct ust_buffer
*buf
= *ret_buf
= chan
->buf
[cpu
];
1024 struct ltt_reserve_switch_offsets offsets
;
1029 if (unlikely(ltt_relay_try_reserve_slow(chan
, buf
, &offsets
,
1030 data_size
, tsc
, rflags
, largest_align
)))
1032 } while (unlikely(uatomic_cmpxchg(&buf
->offset
, offsets
.old
,
1033 offsets
.end
) != offsets
.old
));
1036 * Atomically update last_tsc. This update races against concurrent
1037 * atomic updates, but the race will always cause supplementary full TSC
1038 * events, never the opposite (missing a full TSC event when it would be
1041 save_last_tsc(buf
, *tsc
);
1044 * Push the reader if necessary
1046 ltt_reserve_push_reader(chan
, buf
, offsets
.end
- 1);
1049 * Switch old subbuffer if needed.
1051 if (unlikely(offsets
.end_switch_old
)) {
1052 ltt_reserve_switch_old_subbuf(chan
, buf
, &offsets
, tsc
);
1053 DBG("Switching %s_%d", chan
->channel_name
, cpu
);
1057 * Populate new subbuffer.
1059 if (unlikely(offsets
.begin_switch
))
1060 ltt_reserve_switch_new_subbuf(chan
, buf
, &offsets
, tsc
);
1062 if (unlikely(offsets
.end_switch_current
))
1063 ltt_reserve_end_switch_current(chan
, buf
, &offsets
, tsc
);
1065 *slot_size
= offsets
.size
;
1066 *buf_offset
= offsets
.begin
+ offsets
.before_hdr_pad
;
1070 static struct ltt_transport ust_relay_transport
= {
1073 .create_channel
= create_channel
,
1074 .finish_channel
= finish_channel
,
1075 .remove_channel
= remove_channel
,
1076 .wakeup_channel
= ltt_relay_async_wakeup_chan
,
1080 static char initialized
= 0;
1082 void __attribute__((constructor
)) init_ustrelay_transport(void)
1085 ltt_transport_register(&ust_relay_transport
);
1090 static void __attribute__((destructor
)) ust_buffers_exit(void)
1092 ltt_transport_unregister(&ust_relay_transport
);
1095 size_t ltt_write_event_header_slow(struct ust_channel
*channel
,
1096 struct ust_buffer
*buf
, long buf_offset
,
1097 u16 eID
, u32 event_size
,
1098 u64 tsc
, unsigned int rflags
)
1100 struct ltt_event_header header
;
1104 case LTT_RFLAG_ID_SIZE_TSC
:
1105 header
.id_time
= 29 << LTT_TSC_BITS
;
1107 case LTT_RFLAG_ID_SIZE
:
1108 header
.id_time
= 30 << LTT_TSC_BITS
;
1111 header
.id_time
= 31 << LTT_TSC_BITS
;
1119 header
.id_time
|= (u32
)tsc
& LTT_TSC_MASK
;
1120 ust_buffers_write(buf
, buf_offset
, &header
, sizeof(header
));
1121 buf_offset
+= sizeof(header
);
1124 case LTT_RFLAG_ID_SIZE_TSC
:
1125 small_size
= (u16
)min_t(u32
, event_size
, LTT_MAX_SMALL_SIZE
);
1126 ust_buffers_write(buf
, buf_offset
,
1128 buf_offset
+= sizeof(u16
);
1129 ust_buffers_write(buf
, buf_offset
,
1130 &small_size
, sizeof(u16
));
1131 buf_offset
+= sizeof(u16
);
1132 if (small_size
== LTT_MAX_SMALL_SIZE
) {
1133 ust_buffers_write(buf
, buf_offset
,
1134 &event_size
, sizeof(u32
));
1135 buf_offset
+= sizeof(u32
);
1137 buf_offset
+= ltt_align(buf_offset
, sizeof(u64
));
1138 ust_buffers_write(buf
, buf_offset
,
1140 buf_offset
+= sizeof(u64
);
1142 case LTT_RFLAG_ID_SIZE
:
1143 small_size
= (u16
)min_t(u32
, event_size
, LTT_MAX_SMALL_SIZE
);
1144 ust_buffers_write(buf
, buf_offset
,
1146 buf_offset
+= sizeof(u16
);
1147 ust_buffers_write(buf
, buf_offset
,
1148 &small_size
, sizeof(u16
));
1149 buf_offset
+= sizeof(u16
);
1150 if (small_size
== LTT_MAX_SMALL_SIZE
) {
1151 ust_buffers_write(buf
, buf_offset
,
1152 &event_size
, sizeof(u32
));
1153 buf_offset
+= sizeof(u32
);
1157 ust_buffers_write(buf
, buf_offset
,
1159 buf_offset
+= sizeof(u16
);