2 * Copyright (C) 2011 Julien Desfossez <julien.desfossez@polymtl.ca>
3 * Copyright (C) 2011 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
4 * Copyright (C) 2017 Jérémie Galarneau <jeremie.galarneau@efficios.com>
6 * SPDX-License-Identifier: GPL-2.0-only
16 #include <sys/socket.h>
17 #include <sys/types.h>
23 #include <bin/lttng-consumerd/health-consumerd.h>
24 #include <common/common.h>
25 #include <common/kernel-ctl/kernel-ctl.h>
26 #include <common/sessiond-comm/sessiond-comm.h>
27 #include <common/sessiond-comm/relayd.h>
28 #include <common/compat/fcntl.h>
29 #include <common/compat/endian.h>
30 #include <common/pipe.h>
31 #include <common/relayd/relayd.h>
32 #include <common/utils.h>
33 #include <common/consumer/consumer-stream.h>
34 #include <common/index/index.h>
35 #include <common/consumer/consumer-timer.h>
36 #include <common/optional.h>
37 #include <common/buffer-view.h>
38 #include <common/consumer/consumer.h>
39 #include <common/consumer/metadata-bucket.h>
41 #include "kernel-consumer.h"
43 extern struct lttng_consumer_global_data the_consumer_data
;
44 extern int consumer_poll_timeout
;
47 * Take a snapshot for a specific fd
49 * Returns 0 on success, < 0 on error
51 int lttng_kconsumer_take_snapshot(struct lttng_consumer_stream
*stream
)
54 int infd
= stream
->wait_fd
;
56 ret
= kernctl_snapshot(infd
);
58 * -EAGAIN is not an error, it just means that there is no data to
61 if (ret
!= 0 && ret
!= -EAGAIN
) {
62 PERROR("Getting sub-buffer snapshot.");
69 * Sample consumed and produced positions for a specific fd.
71 * Returns 0 on success, < 0 on error.
73 int lttng_kconsumer_sample_snapshot_positions(
74 struct lttng_consumer_stream
*stream
)
78 return kernctl_snapshot_sample_positions(stream
->wait_fd
);
82 * Get the produced position
84 * Returns 0 on success, < 0 on error
86 int lttng_kconsumer_get_produced_snapshot(struct lttng_consumer_stream
*stream
,
90 int infd
= stream
->wait_fd
;
92 ret
= kernctl_snapshot_get_produced(infd
, pos
);
94 PERROR("kernctl_snapshot_get_produced");
101 * Get the consumerd position
103 * Returns 0 on success, < 0 on error
105 int lttng_kconsumer_get_consumed_snapshot(struct lttng_consumer_stream
*stream
,
109 int infd
= stream
->wait_fd
;
111 ret
= kernctl_snapshot_get_consumed(infd
, pos
);
113 PERROR("kernctl_snapshot_get_consumed");
120 int get_current_subbuf_addr(struct lttng_consumer_stream
*stream
,
124 unsigned long mmap_offset
;
125 const char *mmap_base
= (const char *) stream
->mmap_base
;
127 ret
= kernctl_get_mmap_read_offset(stream
->wait_fd
, &mmap_offset
);
129 PERROR("Failed to get mmap read offset");
133 *addr
= mmap_base
+ mmap_offset
;
139 * Take a snapshot of all the stream of a channel
140 * RCU read-side lock must be held across this function to ensure existence of
141 * channel. The channel lock must be held by the caller.
143 * Returns 0 on success, < 0 on error
145 static int lttng_kconsumer_snapshot_channel(
146 struct lttng_consumer_channel
*channel
,
147 uint64_t key
, char *path
, uint64_t relayd_id
,
148 uint64_t nb_packets_per_stream
,
149 struct lttng_consumer_local_data
*ctx
)
152 struct lttng_consumer_stream
*stream
;
154 DBG("Kernel consumer snapshot channel %" PRIu64
, key
);
158 /* Splice is not supported yet for channel snapshot. */
159 if (channel
->output
!= CONSUMER_CHANNEL_MMAP
) {
160 ERR("Unsupported output type for channel \"%s\": mmap output is required to record a snapshot",
166 cds_list_for_each_entry(stream
, &channel
->streams
.head
, send_node
) {
167 unsigned long consumed_pos
, produced_pos
;
169 health_code_update();
172 * Lock stream because we are about to change its state.
174 pthread_mutex_lock(&stream
->lock
);
176 LTTNG_ASSERT(channel
->trace_chunk
);
177 if (!lttng_trace_chunk_get(channel
->trace_chunk
)) {
179 * Can't happen barring an internal error as the channel
180 * holds a reference to the trace chunk.
182 ERR("Failed to acquire reference to channel's trace chunk");
186 LTTNG_ASSERT(!stream
->trace_chunk
);
187 stream
->trace_chunk
= channel
->trace_chunk
;
190 * Assign the received relayd ID so we can use it for streaming. The streams
191 * are not visible to anyone so this is OK to change it.
193 stream
->net_seq_idx
= relayd_id
;
194 channel
->relayd_id
= relayd_id
;
195 if (relayd_id
!= (uint64_t) -1ULL) {
196 ret
= consumer_send_relayd_stream(stream
, path
);
198 ERR("sending stream to relayd");
202 ret
= consumer_stream_create_output_files(stream
,
207 DBG("Kernel consumer snapshot stream (%" PRIu64
")",
211 ret
= kernctl_buffer_flush_empty(stream
->wait_fd
);
214 * Doing a buffer flush which does not take into
215 * account empty packets. This is not perfect
216 * for stream intersection, but required as a
217 * fall-back when "flush_empty" is not
218 * implemented by lttng-modules.
220 ret
= kernctl_buffer_flush(stream
->wait_fd
);
222 ERR("Failed to flush kernel stream");
228 ret
= lttng_kconsumer_take_snapshot(stream
);
230 ERR("Taking kernel snapshot");
234 ret
= lttng_kconsumer_get_produced_snapshot(stream
, &produced_pos
);
236 ERR("Produced kernel snapshot position");
240 ret
= lttng_kconsumer_get_consumed_snapshot(stream
, &consumed_pos
);
242 ERR("Consumerd kernel snapshot position");
246 consumed_pos
= consumer_get_consume_start_pos(consumed_pos
,
247 produced_pos
, nb_packets_per_stream
,
248 stream
->max_sb_size
);
250 while ((long) (consumed_pos
- produced_pos
) < 0) {
252 unsigned long len
, padded_len
;
253 const char *subbuf_addr
;
254 struct lttng_buffer_view subbuf_view
;
256 health_code_update();
257 DBG("Kernel consumer taking snapshot at pos %lu", consumed_pos
);
259 ret
= kernctl_get_subbuf(stream
->wait_fd
, &consumed_pos
);
261 if (ret
!= -EAGAIN
) {
262 PERROR("kernctl_get_subbuf snapshot");
265 DBG("Kernel consumer get subbuf failed. Skipping it.");
266 consumed_pos
+= stream
->max_sb_size
;
267 stream
->chan
->lost_packets
++;
271 ret
= kernctl_get_subbuf_size(stream
->wait_fd
, &len
);
273 ERR("Snapshot kernctl_get_subbuf_size");
274 goto error_put_subbuf
;
277 ret
= kernctl_get_padded_subbuf_size(stream
->wait_fd
, &padded_len
);
279 ERR("Snapshot kernctl_get_padded_subbuf_size");
280 goto error_put_subbuf
;
283 ret
= get_current_subbuf_addr(stream
, &subbuf_addr
);
285 goto error_put_subbuf
;
288 subbuf_view
= lttng_buffer_view_init(
289 subbuf_addr
, 0, padded_len
);
290 read_len
= lttng_consumer_on_read_subbuffer_mmap(
291 stream
, &subbuf_view
,
294 * We write the padded len in local tracefiles but the data len
295 * when using a relay. Display the error but continue processing
296 * to try to release the subbuffer.
298 if (relayd_id
!= (uint64_t) -1ULL) {
299 if (read_len
!= len
) {
300 ERR("Error sending to the relay (ret: %zd != len: %lu)",
304 if (read_len
!= padded_len
) {
305 ERR("Error writing to tracefile (ret: %zd != len: %lu)",
306 read_len
, padded_len
);
310 ret
= kernctl_put_subbuf(stream
->wait_fd
);
312 ERR("Snapshot kernctl_put_subbuf");
315 consumed_pos
+= stream
->max_sb_size
;
318 if (relayd_id
== (uint64_t) -1ULL) {
319 if (stream
->out_fd
>= 0) {
320 ret
= close(stream
->out_fd
);
322 PERROR("Kernel consumer snapshot close out_fd");
328 close_relayd_stream(stream
);
329 stream
->net_seq_idx
= (uint64_t) -1ULL;
331 lttng_trace_chunk_put(stream
->trace_chunk
);
332 stream
->trace_chunk
= NULL
;
333 pthread_mutex_unlock(&stream
->lock
);
341 ret
= kernctl_put_subbuf(stream
->wait_fd
);
343 ERR("Snapshot kernctl_put_subbuf error path");
346 pthread_mutex_unlock(&stream
->lock
);
353 * Read the whole metadata available for a snapshot.
354 * RCU read-side lock must be held across this function to ensure existence of
355 * metadata_channel. The channel lock must be held by the caller.
357 * Returns 0 on success, < 0 on error
359 static int lttng_kconsumer_snapshot_metadata(
360 struct lttng_consumer_channel
*metadata_channel
,
361 uint64_t key
, char *path
, uint64_t relayd_id
,
362 struct lttng_consumer_local_data
*ctx
)
364 int ret
, use_relayd
= 0;
366 struct lttng_consumer_stream
*metadata_stream
;
370 DBG("Kernel consumer snapshot metadata with key %" PRIu64
" at path %s",
375 metadata_stream
= metadata_channel
->metadata_stream
;
376 LTTNG_ASSERT(metadata_stream
);
378 pthread_mutex_lock(&metadata_stream
->lock
);
379 LTTNG_ASSERT(metadata_channel
->trace_chunk
);
380 LTTNG_ASSERT(metadata_stream
->trace_chunk
);
382 /* Flag once that we have a valid relayd for the stream. */
383 if (relayd_id
!= (uint64_t) -1ULL) {
388 ret
= consumer_send_relayd_stream(metadata_stream
, path
);
393 ret
= consumer_stream_create_output_files(metadata_stream
,
401 health_code_update();
403 ret_read
= lttng_consumer_read_subbuffer(metadata_stream
, ctx
, true);
405 ERR("Kernel snapshot reading metadata subbuffer (ret: %zd)",
410 } while (ret_read
> 0);
413 close_relayd_stream(metadata_stream
);
414 metadata_stream
->net_seq_idx
= (uint64_t) -1ULL;
416 if (metadata_stream
->out_fd
>= 0) {
417 ret
= close(metadata_stream
->out_fd
);
419 PERROR("Kernel consumer snapshot metadata close out_fd");
421 * Don't go on error here since the snapshot was successful at this
422 * point but somehow the close failed.
425 metadata_stream
->out_fd
= -1;
426 lttng_trace_chunk_put(metadata_stream
->trace_chunk
);
427 metadata_stream
->trace_chunk
= NULL
;
433 pthread_mutex_unlock(&metadata_stream
->lock
);
434 cds_list_del(&metadata_stream
->send_node
);
435 consumer_stream_destroy(metadata_stream
, NULL
);
436 metadata_channel
->metadata_stream
= NULL
;
442 * Receive command from session daemon and process it.
444 * Return 1 on success else a negative value or 0.
446 int lttng_kconsumer_recv_cmd(struct lttng_consumer_local_data
*ctx
,
447 int sock
, struct pollfd
*consumer_sockpoll
)
450 enum lttcomm_return_code ret_code
= LTTCOMM_CONSUMERD_SUCCESS
;
451 struct lttcomm_consumer_msg msg
;
453 health_code_update();
458 ret_recv
= lttcomm_recv_unix_sock(sock
, &msg
, sizeof(msg
));
459 if (ret_recv
!= sizeof(msg
)) {
461 lttng_consumer_send_error(ctx
,
462 LTTCOMM_CONSUMERD_ERROR_RECV_CMD
);
469 health_code_update();
471 /* Deprecated command */
472 LTTNG_ASSERT(msg
.cmd_type
!= LTTNG_CONSUMER_STOP
);
474 health_code_update();
476 /* relayd needs RCU read-side protection */
479 switch (msg
.cmd_type
) {
480 case LTTNG_CONSUMER_ADD_RELAYD_SOCKET
:
482 /* Session daemon status message are handled in the following call. */
483 consumer_add_relayd_socket(msg
.u
.relayd_sock
.net_index
,
484 msg
.u
.relayd_sock
.type
, ctx
, sock
, consumer_sockpoll
,
485 &msg
.u
.relayd_sock
.sock
, msg
.u
.relayd_sock
.session_id
,
486 msg
.u
.relayd_sock
.relayd_session_id
);
489 case LTTNG_CONSUMER_ADD_CHANNEL
:
491 struct lttng_consumer_channel
*new_channel
;
492 int ret_send_status
, ret_add_channel
= 0;
493 const uint64_t chunk_id
= msg
.u
.channel
.chunk_id
.value
;
495 health_code_update();
497 /* First send a status message before receiving the fds. */
498 ret_send_status
= consumer_send_status_msg(sock
, ret_code
);
499 if (ret_send_status
< 0) {
500 /* Somehow, the session daemon is not responding anymore. */
504 health_code_update();
506 DBG("consumer_add_channel %" PRIu64
, msg
.u
.channel
.channel_key
);
507 new_channel
= consumer_allocate_channel(msg
.u
.channel
.channel_key
,
508 msg
.u
.channel
.session_id
,
509 msg
.u
.channel
.chunk_id
.is_set
?
511 msg
.u
.channel
.pathname
,
513 msg
.u
.channel
.relayd_id
, msg
.u
.channel
.output
,
514 msg
.u
.channel
.tracefile_size
,
515 msg
.u
.channel
.tracefile_count
, 0,
516 msg
.u
.channel
.monitor
,
517 msg
.u
.channel
.live_timer_interval
,
518 msg
.u
.channel
.is_live
,
520 if (new_channel
== NULL
) {
521 lttng_consumer_send_error(ctx
, LTTCOMM_CONSUMERD_OUTFD_ERROR
);
524 new_channel
->nb_init_stream_left
= msg
.u
.channel
.nb_init_streams
;
525 switch (msg
.u
.channel
.output
) {
526 case LTTNG_EVENT_SPLICE
:
527 new_channel
->output
= CONSUMER_CHANNEL_SPLICE
;
529 case LTTNG_EVENT_MMAP
:
530 new_channel
->output
= CONSUMER_CHANNEL_MMAP
;
533 ERR("Channel output unknown %d", msg
.u
.channel
.output
);
537 /* Translate and save channel type. */
538 switch (msg
.u
.channel
.type
) {
539 case CONSUMER_CHANNEL_TYPE_DATA
:
540 case CONSUMER_CHANNEL_TYPE_METADATA
:
541 new_channel
->type
= (consumer_channel_type
) msg
.u
.channel
.type
;
548 health_code_update();
550 if (ctx
->on_recv_channel
!= NULL
) {
551 int ret_recv_channel
=
552 ctx
->on_recv_channel(new_channel
);
553 if (ret_recv_channel
== 0) {
554 ret_add_channel
= consumer_add_channel(
556 } else if (ret_recv_channel
< 0) {
561 consumer_add_channel(new_channel
, ctx
);
563 if (msg
.u
.channel
.type
== CONSUMER_CHANNEL_TYPE_DATA
&&
565 int monitor_start_ret
;
567 DBG("Consumer starting monitor timer");
568 consumer_timer_live_start(new_channel
,
569 msg
.u
.channel
.live_timer_interval
);
570 monitor_start_ret
= consumer_timer_monitor_start(
572 msg
.u
.channel
.monitor_timer_interval
);
573 if (monitor_start_ret
< 0) {
574 ERR("Starting channel monitoring timer failed");
579 health_code_update();
581 /* If we received an error in add_channel, we need to report it. */
582 if (ret_add_channel
< 0) {
583 ret_send_status
= consumer_send_status_msg(
584 sock
, ret_add_channel
);
585 if (ret_send_status
< 0) {
593 case LTTNG_CONSUMER_ADD_STREAM
:
596 struct lttng_pipe
*stream_pipe
;
597 struct lttng_consumer_stream
*new_stream
;
598 struct lttng_consumer_channel
*channel
;
600 int ret_send_status
, ret_poll
, ret_get_max_subbuf_size
;
601 ssize_t ret_pipe_write
, ret_recv
;
604 * Get stream's channel reference. Needed when adding the stream to the
607 channel
= consumer_find_channel(msg
.u
.stream
.channel_key
);
610 * We could not find the channel. Can happen if cpu hotplug
611 * happens while tearing down.
613 ERR("Unable to find channel key %" PRIu64
, msg
.u
.stream
.channel_key
);
614 ret_code
= LTTCOMM_CONSUMERD_CHAN_NOT_FOUND
;
617 health_code_update();
619 /* First send a status message before receiving the fds. */
620 ret_send_status
= consumer_send_status_msg(sock
, ret_code
);
621 if (ret_send_status
< 0) {
622 /* Somehow, the session daemon is not responding anymore. */
623 goto error_add_stream_fatal
;
626 health_code_update();
628 if (ret_code
!= LTTCOMM_CONSUMERD_SUCCESS
) {
629 /* Channel was not found. */
630 goto error_add_stream_nosignal
;
635 ret_poll
= lttng_consumer_poll_socket(consumer_sockpoll
);
638 goto error_add_stream_fatal
;
641 health_code_update();
643 /* Get stream file descriptor from socket */
644 ret_recv
= lttcomm_recv_fds_unix_sock(sock
, &fd
, 1);
645 if (ret_recv
!= sizeof(fd
)) {
646 lttng_consumer_send_error(ctx
, LTTCOMM_CONSUMERD_ERROR_RECV_FD
);
651 health_code_update();
654 * Send status code to session daemon only if the recv works. If the
655 * above recv() failed, the session daemon is notified through the
656 * error socket and the teardown is eventually done.
658 ret_send_status
= consumer_send_status_msg(sock
, ret_code
);
659 if (ret_send_status
< 0) {
660 /* Somehow, the session daemon is not responding anymore. */
661 goto error_add_stream_nosignal
;
664 health_code_update();
666 pthread_mutex_lock(&channel
->lock
);
667 new_stream
= consumer_stream_create(
674 channel
->trace_chunk
,
679 if (new_stream
== NULL
) {
684 lttng_consumer_send_error(ctx
, LTTCOMM_CONSUMERD_OUTFD_ERROR
);
687 pthread_mutex_unlock(&channel
->lock
);
688 goto error_add_stream_nosignal
;
691 new_stream
->wait_fd
= fd
;
692 ret_get_max_subbuf_size
= kernctl_get_max_subbuf_size(
693 new_stream
->wait_fd
, &new_stream
->max_sb_size
);
694 if (ret_get_max_subbuf_size
< 0) {
695 pthread_mutex_unlock(&channel
->lock
);
696 ERR("Failed to get kernel maximal subbuffer size");
697 goto error_add_stream_nosignal
;
700 consumer_stream_update_channel_attributes(new_stream
,
704 * We've just assigned the channel to the stream so increment the
705 * refcount right now. We don't need to increment the refcount for
706 * streams in no monitor because we handle manually the cleanup of
707 * those. It is very important to make sure there is NO prior
708 * consumer_del_stream() calls or else the refcount will be unbalanced.
710 if (channel
->monitor
) {
711 uatomic_inc(&new_stream
->chan
->refcount
);
715 * The buffer flush is done on the session daemon side for the kernel
716 * so no need for the stream "hangup_flush_done" variable to be
717 * tracked. This is important for a kernel stream since we don't rely
718 * on the flush state of the stream to read data. It's not the case for
719 * user space tracing.
721 new_stream
->hangup_flush_done
= 0;
723 health_code_update();
725 pthread_mutex_lock(&new_stream
->lock
);
726 if (ctx
->on_recv_stream
) {
727 int ret_recv_stream
= ctx
->on_recv_stream(new_stream
);
728 if (ret_recv_stream
< 0) {
729 pthread_mutex_unlock(&new_stream
->lock
);
730 pthread_mutex_unlock(&channel
->lock
);
731 consumer_stream_free(new_stream
);
732 goto error_add_stream_nosignal
;
735 health_code_update();
737 if (new_stream
->metadata_flag
) {
738 channel
->metadata_stream
= new_stream
;
741 /* Do not monitor this stream. */
742 if (!channel
->monitor
) {
743 DBG("Kernel consumer add stream %s in no monitor mode with "
744 "relayd id %" PRIu64
, new_stream
->name
,
745 new_stream
->net_seq_idx
);
746 cds_list_add(&new_stream
->send_node
, &channel
->streams
.head
);
747 pthread_mutex_unlock(&new_stream
->lock
);
748 pthread_mutex_unlock(&channel
->lock
);
752 /* Send stream to relayd if the stream has an ID. */
753 if (new_stream
->net_seq_idx
!= (uint64_t) -1ULL) {
754 int ret_send_relayd_stream
;
756 ret_send_relayd_stream
= consumer_send_relayd_stream(
757 new_stream
, new_stream
->chan
->pathname
);
758 if (ret_send_relayd_stream
< 0) {
759 pthread_mutex_unlock(&new_stream
->lock
);
760 pthread_mutex_unlock(&channel
->lock
);
761 consumer_stream_free(new_stream
);
762 goto error_add_stream_nosignal
;
766 * If adding an extra stream to an already
767 * existing channel (e.g. cpu hotplug), we need
768 * to send the "streams_sent" command to relayd.
770 if (channel
->streams_sent_to_relayd
) {
771 int ret_send_relayd_streams_sent
;
773 ret_send_relayd_streams_sent
=
774 consumer_send_relayd_streams_sent(
775 new_stream
->net_seq_idx
);
776 if (ret_send_relayd_streams_sent
< 0) {
777 pthread_mutex_unlock(&new_stream
->lock
);
778 pthread_mutex_unlock(&channel
->lock
);
779 goto error_add_stream_nosignal
;
783 pthread_mutex_unlock(&new_stream
->lock
);
784 pthread_mutex_unlock(&channel
->lock
);
786 /* Get the right pipe where the stream will be sent. */
787 if (new_stream
->metadata_flag
) {
788 consumer_add_metadata_stream(new_stream
);
789 stream_pipe
= ctx
->consumer_metadata_pipe
;
791 consumer_add_data_stream(new_stream
);
792 stream_pipe
= ctx
->consumer_data_pipe
;
795 /* Visible to other threads */
796 new_stream
->globally_visible
= 1;
798 health_code_update();
800 ret_pipe_write
= lttng_pipe_write(
801 stream_pipe
, &new_stream
, sizeof(new_stream
));
802 if (ret_pipe_write
< 0) {
803 ERR("Consumer write %s stream to pipe %d",
804 new_stream
->metadata_flag
? "metadata" : "data",
805 lttng_pipe_get_writefd(stream_pipe
));
806 if (new_stream
->metadata_flag
) {
807 consumer_del_stream_for_metadata(new_stream
);
809 consumer_del_stream_for_data(new_stream
);
811 goto error_add_stream_nosignal
;
814 DBG("Kernel consumer ADD_STREAM %s (fd: %d) %s with relayd id %" PRIu64
,
815 new_stream
->name
, fd
, new_stream
->chan
->pathname
, new_stream
->relayd_stream_id
);
818 error_add_stream_nosignal
:
820 error_add_stream_fatal
:
823 case LTTNG_CONSUMER_STREAMS_SENT
:
825 struct lttng_consumer_channel
*channel
;
829 * Get stream's channel reference. Needed when adding the stream to the
832 channel
= consumer_find_channel(msg
.u
.sent_streams
.channel_key
);
835 * We could not find the channel. Can happen if cpu hotplug
836 * happens while tearing down.
838 ERR("Unable to find channel key %" PRIu64
,
839 msg
.u
.sent_streams
.channel_key
);
840 ret_code
= LTTCOMM_CONSUMERD_CHAN_NOT_FOUND
;
843 health_code_update();
846 * Send status code to session daemon.
848 ret_send_status
= consumer_send_status_msg(sock
, ret_code
);
849 if (ret_send_status
< 0 ||
850 ret_code
!= LTTCOMM_CONSUMERD_SUCCESS
) {
851 /* Somehow, the session daemon is not responding anymore. */
852 goto error_streams_sent_nosignal
;
855 health_code_update();
858 * We should not send this message if we don't monitor the
859 * streams in this channel.
861 if (!channel
->monitor
) {
862 goto end_error_streams_sent
;
865 health_code_update();
866 /* Send stream to relayd if the stream has an ID. */
867 if (msg
.u
.sent_streams
.net_seq_idx
!= (uint64_t) -1ULL) {
868 int ret_send_relay_streams
;
870 ret_send_relay_streams
= consumer_send_relayd_streams_sent(
871 msg
.u
.sent_streams
.net_seq_idx
);
872 if (ret_send_relay_streams
< 0) {
873 goto error_streams_sent_nosignal
;
875 channel
->streams_sent_to_relayd
= true;
877 end_error_streams_sent
:
879 error_streams_sent_nosignal
:
882 case LTTNG_CONSUMER_UPDATE_STREAM
:
887 case LTTNG_CONSUMER_DESTROY_RELAYD
:
889 uint64_t index
= msg
.u
.destroy_relayd
.net_seq_idx
;
890 struct consumer_relayd_sock_pair
*relayd
;
893 DBG("Kernel consumer destroying relayd %" PRIu64
, index
);
895 /* Get relayd reference if exists. */
896 relayd
= consumer_find_relayd(index
);
897 if (relayd
== NULL
) {
898 DBG("Unable to find relayd %" PRIu64
, index
);
899 ret_code
= LTTCOMM_CONSUMERD_RELAYD_FAIL
;
903 * Each relayd socket pair has a refcount of stream attached to it
904 * which tells if the relayd is still active or not depending on the
907 * This will set the destroy flag of the relayd object and destroy it
908 * if the refcount reaches zero when called.
910 * The destroy can happen either here or when a stream fd hangs up.
913 consumer_flag_relayd_for_destroy(relayd
);
916 health_code_update();
918 ret_send_status
= consumer_send_status_msg(sock
, ret_code
);
919 if (ret_send_status
< 0) {
920 /* Somehow, the session daemon is not responding anymore. */
926 case LTTNG_CONSUMER_DATA_PENDING
:
928 int32_t ret_data_pending
;
929 uint64_t id
= msg
.u
.data_pending
.session_id
;
932 DBG("Kernel consumer data pending command for id %" PRIu64
, id
);
934 ret_data_pending
= consumer_data_pending(id
);
936 health_code_update();
938 /* Send back returned value to session daemon */
939 ret_send
= lttcomm_send_unix_sock(sock
, &ret_data_pending
,
940 sizeof(ret_data_pending
));
942 PERROR("send data pending ret code");
947 * No need to send back a status message since the data pending
948 * returned value is the response.
952 case LTTNG_CONSUMER_SNAPSHOT_CHANNEL
:
954 struct lttng_consumer_channel
*channel
;
955 uint64_t key
= msg
.u
.snapshot_channel
.key
;
958 channel
= consumer_find_channel(key
);
960 ERR("Channel %" PRIu64
" not found", key
);
961 ret_code
= LTTCOMM_CONSUMERD_CHAN_NOT_FOUND
;
963 pthread_mutex_lock(&channel
->lock
);
964 if (msg
.u
.snapshot_channel
.metadata
== 1) {
967 ret_snapshot
= lttng_kconsumer_snapshot_metadata(
969 msg
.u
.snapshot_channel
.pathname
,
970 msg
.u
.snapshot_channel
.relayd_id
,
972 if (ret_snapshot
< 0) {
973 ERR("Snapshot metadata failed");
974 ret_code
= LTTCOMM_CONSUMERD_SNAPSHOT_FAILED
;
979 ret_snapshot
= lttng_kconsumer_snapshot_channel(
981 msg
.u
.snapshot_channel
.pathname
,
982 msg
.u
.snapshot_channel
.relayd_id
,
983 msg
.u
.snapshot_channel
984 .nb_packets_per_stream
,
986 if (ret_snapshot
< 0) {
987 ERR("Snapshot channel failed");
988 ret_code
= LTTCOMM_CONSUMERD_SNAPSHOT_FAILED
;
991 pthread_mutex_unlock(&channel
->lock
);
993 health_code_update();
995 ret_send_status
= consumer_send_status_msg(sock
, ret_code
);
996 if (ret_send_status
< 0) {
997 /* Somehow, the session daemon is not responding anymore. */
1002 case LTTNG_CONSUMER_DESTROY_CHANNEL
:
1004 uint64_t key
= msg
.u
.destroy_channel
.key
;
1005 struct lttng_consumer_channel
*channel
;
1006 int ret_send_status
;
1008 channel
= consumer_find_channel(key
);
1010 ERR("Kernel consumer destroy channel %" PRIu64
" not found", key
);
1011 ret_code
= LTTCOMM_CONSUMERD_CHAN_NOT_FOUND
;
1014 health_code_update();
1016 ret_send_status
= consumer_send_status_msg(sock
, ret_code
);
1017 if (ret_send_status
< 0) {
1018 /* Somehow, the session daemon is not responding anymore. */
1019 goto end_destroy_channel
;
1022 health_code_update();
1024 /* Stop right now if no channel was found. */
1026 goto end_destroy_channel
;
1030 * This command should ONLY be issued for channel with streams set in
1033 LTTNG_ASSERT(!channel
->monitor
);
1036 * The refcount should ALWAYS be 0 in the case of a channel in no
1039 LTTNG_ASSERT(!uatomic_sub_return(&channel
->refcount
, 1));
1041 consumer_del_channel(channel
);
1042 end_destroy_channel
:
1045 case LTTNG_CONSUMER_DISCARDED_EVENTS
:
1049 struct lttng_consumer_channel
*channel
;
1050 uint64_t id
= msg
.u
.discarded_events
.session_id
;
1051 uint64_t key
= msg
.u
.discarded_events
.channel_key
;
1053 DBG("Kernel consumer discarded events command for session id %"
1054 PRIu64
", channel key %" PRIu64
, id
, key
);
1056 channel
= consumer_find_channel(key
);
1058 ERR("Kernel consumer discarded events channel %"
1059 PRIu64
" not found", key
);
1062 count
= channel
->discarded_events
;
1065 health_code_update();
1067 /* Send back returned value to session daemon */
1068 ret
= lttcomm_send_unix_sock(sock
, &count
, sizeof(count
));
1070 PERROR("send discarded events");
1076 case LTTNG_CONSUMER_LOST_PACKETS
:
1080 struct lttng_consumer_channel
*channel
;
1081 uint64_t id
= msg
.u
.lost_packets
.session_id
;
1082 uint64_t key
= msg
.u
.lost_packets
.channel_key
;
1084 DBG("Kernel consumer lost packets command for session id %"
1085 PRIu64
", channel key %" PRIu64
, id
, key
);
1087 channel
= consumer_find_channel(key
);
1089 ERR("Kernel consumer lost packets channel %"
1090 PRIu64
" not found", key
);
1093 count
= channel
->lost_packets
;
1096 health_code_update();
1098 /* Send back returned value to session daemon */
1099 ret
= lttcomm_send_unix_sock(sock
, &count
, sizeof(count
));
1101 PERROR("send lost packets");
1107 case LTTNG_CONSUMER_SET_CHANNEL_MONITOR_PIPE
:
1109 int channel_monitor_pipe
;
1110 int ret_send_status
, ret_set_channel_monitor_pipe
;
1113 ret_code
= LTTCOMM_CONSUMERD_SUCCESS
;
1114 /* Successfully received the command's type. */
1115 ret_send_status
= consumer_send_status_msg(sock
, ret_code
);
1116 if (ret_send_status
< 0) {
1120 ret_recv
= lttcomm_recv_fds_unix_sock(
1121 sock
, &channel_monitor_pipe
, 1);
1122 if (ret_recv
!= sizeof(channel_monitor_pipe
)) {
1123 ERR("Failed to receive channel monitor pipe");
1127 DBG("Received channel monitor pipe (%d)", channel_monitor_pipe
);
1128 ret_set_channel_monitor_pipe
=
1129 consumer_timer_thread_set_channel_monitor_pipe(
1130 channel_monitor_pipe
);
1131 if (!ret_set_channel_monitor_pipe
) {
1135 ret_code
= LTTCOMM_CONSUMERD_SUCCESS
;
1136 /* Set the pipe as non-blocking. */
1137 ret_fcntl
= fcntl(channel_monitor_pipe
, F_GETFL
, 0);
1138 if (ret_fcntl
== -1) {
1139 PERROR("fcntl get flags of the channel monitoring pipe");
1144 ret_fcntl
= fcntl(channel_monitor_pipe
, F_SETFL
,
1145 flags
| O_NONBLOCK
);
1146 if (ret_fcntl
== -1) {
1147 PERROR("fcntl set O_NONBLOCK flag of the channel monitoring pipe");
1150 DBG("Channel monitor pipe set as non-blocking");
1152 ret_code
= LTTCOMM_CONSUMERD_ALREADY_SET
;
1154 ret_send_status
= consumer_send_status_msg(sock
, ret_code
);
1155 if (ret_send_status
< 0) {
1160 case LTTNG_CONSUMER_ROTATE_CHANNEL
:
1162 struct lttng_consumer_channel
*channel
;
1163 uint64_t key
= msg
.u
.rotate_channel
.key
;
1164 int ret_send_status
;
1166 DBG("Consumer rotate channel %" PRIu64
, key
);
1168 channel
= consumer_find_channel(key
);
1170 ERR("Channel %" PRIu64
" not found", key
);
1171 ret_code
= LTTCOMM_CONSUMERD_CHAN_NOT_FOUND
;
1174 * Sample the rotate position of all the streams in this channel.
1176 int ret_rotate_channel
;
1178 ret_rotate_channel
= lttng_consumer_rotate_channel(
1180 msg
.u
.rotate_channel
.relayd_id
,
1181 msg
.u
.rotate_channel
.metadata
, ctx
);
1182 if (ret_rotate_channel
< 0) {
1183 ERR("Rotate channel failed");
1184 ret_code
= LTTCOMM_CONSUMERD_ROTATION_FAIL
;
1187 health_code_update();
1190 ret_send_status
= consumer_send_status_msg(sock
, ret_code
);
1191 if (ret_send_status
< 0) {
1192 /* Somehow, the session daemon is not responding anymore. */
1193 goto error_rotate_channel
;
1196 /* Rotate the streams that are ready right now. */
1199 ret_rotate
= lttng_consumer_rotate_ready_streams(
1201 if (ret_rotate
< 0) {
1202 ERR("Rotate ready streams failed");
1206 error_rotate_channel
:
1209 case LTTNG_CONSUMER_CLEAR_CHANNEL
:
1211 struct lttng_consumer_channel
*channel
;
1212 uint64_t key
= msg
.u
.clear_channel
.key
;
1213 int ret_send_status
;
1215 channel
= consumer_find_channel(key
);
1217 DBG("Channel %" PRIu64
" not found", key
);
1218 ret_code
= LTTCOMM_CONSUMERD_CHAN_NOT_FOUND
;
1220 int ret_clear_channel
;
1223 lttng_consumer_clear_channel(channel
);
1224 if (ret_clear_channel
) {
1225 ERR("Clear channel failed");
1226 ret_code
= (lttcomm_return_code
) ret_clear_channel
;
1229 health_code_update();
1232 ret_send_status
= consumer_send_status_msg(sock
, ret_code
);
1233 if (ret_send_status
< 0) {
1234 /* Somehow, the session daemon is not responding anymore. */
1240 case LTTNG_CONSUMER_INIT
:
1242 int ret_send_status
;
1244 ret_code
= lttng_consumer_init_command(ctx
,
1245 msg
.u
.init
.sessiond_uuid
);
1246 health_code_update();
1247 ret_send_status
= consumer_send_status_msg(sock
, ret_code
);
1248 if (ret_send_status
< 0) {
1249 /* Somehow, the session daemon is not responding anymore. */
1254 case LTTNG_CONSUMER_CREATE_TRACE_CHUNK
:
1256 const struct lttng_credentials credentials
= {
1257 .uid
= LTTNG_OPTIONAL_INIT_VALUE(msg
.u
.create_trace_chunk
.credentials
.value
.uid
),
1258 .gid
= LTTNG_OPTIONAL_INIT_VALUE(msg
.u
.create_trace_chunk
.credentials
.value
.gid
),
1260 const bool is_local_trace
=
1261 !msg
.u
.create_trace_chunk
.relayd_id
.is_set
;
1262 const uint64_t relayd_id
=
1263 msg
.u
.create_trace_chunk
.relayd_id
.value
;
1264 const char *chunk_override_name
=
1265 *msg
.u
.create_trace_chunk
.override_name
?
1266 msg
.u
.create_trace_chunk
.override_name
:
1268 struct lttng_directory_handle
*chunk_directory_handle
= NULL
;
1271 * The session daemon will only provide a chunk directory file
1272 * descriptor for local traces.
1274 if (is_local_trace
) {
1276 int ret_send_status
;
1279 /* Acnowledge the reception of the command. */
1280 ret_send_status
= consumer_send_status_msg(
1281 sock
, LTTCOMM_CONSUMERD_SUCCESS
);
1282 if (ret_send_status
< 0) {
1283 /* Somehow, the session daemon is not responding anymore. */
1287 ret_recv
= lttcomm_recv_fds_unix_sock(
1288 sock
, &chunk_dirfd
, 1);
1289 if (ret_recv
!= sizeof(chunk_dirfd
)) {
1290 ERR("Failed to receive trace chunk directory file descriptor");
1294 DBG("Received trace chunk directory fd (%d)",
1296 chunk_directory_handle
= lttng_directory_handle_create_from_dirfd(
1298 if (!chunk_directory_handle
) {
1299 ERR("Failed to initialize chunk directory handle from directory file descriptor");
1300 if (close(chunk_dirfd
)) {
1301 PERROR("Failed to close chunk directory file descriptor");
1307 ret_code
= lttng_consumer_create_trace_chunk(
1308 !is_local_trace
? &relayd_id
: NULL
,
1309 msg
.u
.create_trace_chunk
.session_id
,
1310 msg
.u
.create_trace_chunk
.chunk_id
,
1311 (time_t) msg
.u
.create_trace_chunk
1312 .creation_timestamp
,
1313 chunk_override_name
,
1314 msg
.u
.create_trace_chunk
.credentials
.is_set
?
1317 chunk_directory_handle
);
1318 lttng_directory_handle_put(chunk_directory_handle
);
1319 goto end_msg_sessiond
;
1321 case LTTNG_CONSUMER_CLOSE_TRACE_CHUNK
:
1323 enum lttng_trace_chunk_command_type close_command
=
1324 (lttng_trace_chunk_command_type
) msg
.u
.close_trace_chunk
.close_command
.value
;
1325 const uint64_t relayd_id
=
1326 msg
.u
.close_trace_chunk
.relayd_id
.value
;
1327 struct lttcomm_consumer_close_trace_chunk_reply reply
;
1328 char path
[LTTNG_PATH_MAX
];
1331 ret_code
= lttng_consumer_close_trace_chunk(
1332 msg
.u
.close_trace_chunk
.relayd_id
.is_set
?
1335 msg
.u
.close_trace_chunk
.session_id
,
1336 msg
.u
.close_trace_chunk
.chunk_id
,
1337 (time_t) msg
.u
.close_trace_chunk
.close_timestamp
,
1338 msg
.u
.close_trace_chunk
.close_command
.is_set
?
1341 reply
.ret_code
= ret_code
;
1342 reply
.path_length
= strlen(path
) + 1;
1343 ret_send
= lttcomm_send_unix_sock(sock
, &reply
, sizeof(reply
));
1344 if (ret_send
!= sizeof(reply
)) {
1347 ret_send
= lttcomm_send_unix_sock(
1348 sock
, path
, reply
.path_length
);
1349 if (ret_send
!= reply
.path_length
) {
1354 case LTTNG_CONSUMER_TRACE_CHUNK_EXISTS
:
1356 const uint64_t relayd_id
=
1357 msg
.u
.trace_chunk_exists
.relayd_id
.value
;
1359 ret_code
= lttng_consumer_trace_chunk_exists(
1360 msg
.u
.trace_chunk_exists
.relayd_id
.is_set
?
1362 msg
.u
.trace_chunk_exists
.session_id
,
1363 msg
.u
.trace_chunk_exists
.chunk_id
);
1364 goto end_msg_sessiond
;
1366 case LTTNG_CONSUMER_OPEN_CHANNEL_PACKETS
:
1368 const uint64_t key
= msg
.u
.open_channel_packets
.key
;
1369 struct lttng_consumer_channel
*channel
=
1370 consumer_find_channel(key
);
1373 pthread_mutex_lock(&channel
->lock
);
1374 ret_code
= lttng_consumer_open_channel_packets(channel
);
1375 pthread_mutex_unlock(&channel
->lock
);
1377 WARN("Channel %" PRIu64
" not found", key
);
1378 ret_code
= LTTCOMM_CONSUMERD_CHAN_NOT_FOUND
;
1381 health_code_update();
1382 goto end_msg_sessiond
;
1390 * Return 1 to indicate success since the 0 value can be a socket
1391 * shutdown during the recv() or send() call.
1396 /* This will issue a consumer stop. */
1401 * The returned value here is not useful since either way we'll return 1 to
1402 * the caller because the session daemon socket management is done
1403 * elsewhere. Returning a negative code or 0 will shutdown the consumer.
1406 int ret_send_status
;
1408 ret_send_status
= consumer_send_status_msg(sock
, ret_code
);
1409 if (ret_send_status
< 0) {
1417 health_code_update();
1423 * Sync metadata meaning request them to the session daemon and snapshot to the
1424 * metadata thread can consumer them.
1426 * Metadata stream lock MUST be acquired.
1428 enum sync_metadata_status
lttng_kconsumer_sync_metadata(
1429 struct lttng_consumer_stream
*metadata
)
1432 enum sync_metadata_status status
;
1434 LTTNG_ASSERT(metadata
);
1436 ret
= kernctl_buffer_flush(metadata
->wait_fd
);
1438 ERR("Failed to flush kernel stream");
1439 status
= SYNC_METADATA_STATUS_ERROR
;
1443 ret
= kernctl_snapshot(metadata
->wait_fd
);
1445 if (errno
== EAGAIN
) {
1446 /* No new metadata, exit. */
1447 DBG("Sync metadata, no new kernel metadata");
1448 status
= SYNC_METADATA_STATUS_NO_DATA
;
1450 ERR("Sync metadata, taking kernel snapshot failed.");
1451 status
= SYNC_METADATA_STATUS_ERROR
;
1454 status
= SYNC_METADATA_STATUS_NEW_DATA
;
1462 int extract_common_subbuffer_info(struct lttng_consumer_stream
*stream
,
1463 struct stream_subbuffer
*subbuf
)
1467 ret
= kernctl_get_subbuf_size(
1468 stream
->wait_fd
, &subbuf
->info
.data
.subbuf_size
);
1473 ret
= kernctl_get_padded_subbuf_size(
1474 stream
->wait_fd
, &subbuf
->info
.data
.padded_subbuf_size
);
1484 int extract_metadata_subbuffer_info(struct lttng_consumer_stream
*stream
,
1485 struct stream_subbuffer
*subbuf
)
1489 ret
= extract_common_subbuffer_info(stream
, subbuf
);
1494 ret
= kernctl_get_metadata_version(
1495 stream
->wait_fd
, &subbuf
->info
.metadata
.version
);
1505 int extract_data_subbuffer_info(struct lttng_consumer_stream
*stream
,
1506 struct stream_subbuffer
*subbuf
)
1510 ret
= extract_common_subbuffer_info(stream
, subbuf
);
1515 ret
= kernctl_get_packet_size(
1516 stream
->wait_fd
, &subbuf
->info
.data
.packet_size
);
1518 PERROR("Failed to get sub-buffer packet size");
1522 ret
= kernctl_get_content_size(
1523 stream
->wait_fd
, &subbuf
->info
.data
.content_size
);
1525 PERROR("Failed to get sub-buffer content size");
1529 ret
= kernctl_get_timestamp_begin(
1530 stream
->wait_fd
, &subbuf
->info
.data
.timestamp_begin
);
1532 PERROR("Failed to get sub-buffer begin timestamp");
1536 ret
= kernctl_get_timestamp_end(
1537 stream
->wait_fd
, &subbuf
->info
.data
.timestamp_end
);
1539 PERROR("Failed to get sub-buffer end timestamp");
1543 ret
= kernctl_get_events_discarded(
1544 stream
->wait_fd
, &subbuf
->info
.data
.events_discarded
);
1546 PERROR("Failed to get sub-buffer events discarded count");
1550 ret
= kernctl_get_sequence_number(stream
->wait_fd
,
1551 &subbuf
->info
.data
.sequence_number
.value
);
1553 /* May not be supported by older LTTng-modules. */
1554 if (ret
!= -ENOTTY
) {
1555 PERROR("Failed to get sub-buffer sequence number");
1559 subbuf
->info
.data
.sequence_number
.is_set
= true;
1562 ret
= kernctl_get_stream_id(
1563 stream
->wait_fd
, &subbuf
->info
.data
.stream_id
);
1565 PERROR("Failed to get stream id");
1569 ret
= kernctl_get_instance_id(stream
->wait_fd
,
1570 &subbuf
->info
.data
.stream_instance_id
.value
);
1572 /* May not be supported by older LTTng-modules. */
1573 if (ret
!= -ENOTTY
) {
1574 PERROR("Failed to get stream instance id");
1578 subbuf
->info
.data
.stream_instance_id
.is_set
= true;
1585 enum get_next_subbuffer_status
get_subbuffer_common(
1586 struct lttng_consumer_stream
*stream
,
1587 struct stream_subbuffer
*subbuffer
)
1590 enum get_next_subbuffer_status status
;
1592 ret
= kernctl_get_next_subbuf(stream
->wait_fd
);
1595 status
= GET_NEXT_SUBBUFFER_STATUS_OK
;
1600 * The caller only expects -ENODATA when there is no data to
1601 * read, but the kernel tracer returns -EAGAIN when there is
1602 * currently no data for a non-finalized stream, and -ENODATA
1603 * when there is no data for a finalized stream. Those can be
1604 * combined into a -ENODATA return value.
1606 status
= GET_NEXT_SUBBUFFER_STATUS_NO_DATA
;
1609 status
= GET_NEXT_SUBBUFFER_STATUS_ERROR
;
1613 ret
= stream
->read_subbuffer_ops
.extract_subbuffer_info(
1616 status
= GET_NEXT_SUBBUFFER_STATUS_ERROR
;
1623 enum get_next_subbuffer_status
get_next_subbuffer_splice(
1624 struct lttng_consumer_stream
*stream
,
1625 struct stream_subbuffer
*subbuffer
)
1627 const enum get_next_subbuffer_status status
=
1628 get_subbuffer_common(stream
, subbuffer
);
1630 if (status
!= GET_NEXT_SUBBUFFER_STATUS_OK
) {
1634 subbuffer
->buffer
.fd
= stream
->wait_fd
;
1640 enum get_next_subbuffer_status
get_next_subbuffer_mmap(
1641 struct lttng_consumer_stream
*stream
,
1642 struct stream_subbuffer
*subbuffer
)
1645 enum get_next_subbuffer_status status
;
1648 status
= get_subbuffer_common(stream
, subbuffer
);
1649 if (status
!= GET_NEXT_SUBBUFFER_STATUS_OK
) {
1653 ret
= get_current_subbuf_addr(stream
, &addr
);
1655 status
= GET_NEXT_SUBBUFFER_STATUS_ERROR
;
1659 subbuffer
->buffer
.buffer
= lttng_buffer_view_init(
1660 addr
, 0, subbuffer
->info
.data
.padded_subbuf_size
);
1666 enum get_next_subbuffer_status
get_next_subbuffer_metadata_check(struct lttng_consumer_stream
*stream
,
1667 struct stream_subbuffer
*subbuffer
)
1672 enum get_next_subbuffer_status status
;
1674 ret
= kernctl_get_next_subbuf_metadata_check(stream
->wait_fd
,
1680 ret
= stream
->read_subbuffer_ops
.extract_subbuffer_info(
1686 LTTNG_OPTIONAL_SET(&subbuffer
->info
.metadata
.coherent
, coherent
);
1688 ret
= get_current_subbuf_addr(stream
, &addr
);
1693 subbuffer
->buffer
.buffer
= lttng_buffer_view_init(
1694 addr
, 0, subbuffer
->info
.data
.padded_subbuf_size
);
1695 DBG("Got metadata packet with padded_subbuf_size = %lu, coherent = %s",
1696 subbuffer
->info
.metadata
.padded_subbuf_size
,
1697 coherent
? "true" : "false");
1700 * The caller only expects -ENODATA when there is no data to read, but
1701 * the kernel tracer returns -EAGAIN when there is currently no data
1702 * for a non-finalized stream, and -ENODATA when there is no data for a
1703 * finalized stream. Those can be combined into a -ENODATA return value.
1707 status
= GET_NEXT_SUBBUFFER_STATUS_OK
;
1712 * The caller only expects -ENODATA when there is no data to
1713 * read, but the kernel tracer returns -EAGAIN when there is
1714 * currently no data for a non-finalized stream, and -ENODATA
1715 * when there is no data for a finalized stream. Those can be
1716 * combined into a -ENODATA return value.
1718 status
= GET_NEXT_SUBBUFFER_STATUS_NO_DATA
;
1721 status
= GET_NEXT_SUBBUFFER_STATUS_ERROR
;
1729 int put_next_subbuffer(struct lttng_consumer_stream
*stream
,
1730 struct stream_subbuffer
*subbuffer
)
1732 const int ret
= kernctl_put_next_subbuf(stream
->wait_fd
);
1735 if (ret
== -EFAULT
) {
1736 PERROR("Error in unreserving sub buffer");
1737 } else if (ret
== -EIO
) {
1738 /* Should never happen with newer LTTng versions */
1739 PERROR("Reader has been pushed by the writer, last sub-buffer corrupted");
1747 bool is_get_next_check_metadata_available(int tracer_fd
)
1749 const int ret
= kernctl_get_next_subbuf_metadata_check(tracer_fd
, NULL
);
1750 const bool available
= ret
!= -ENOTTY
;
1753 /* get succeeded, make sure to put the subbuffer. */
1754 kernctl_put_subbuf(tracer_fd
);
1761 int signal_metadata(struct lttng_consumer_stream
*stream
,
1762 struct lttng_consumer_local_data
*ctx
)
1764 ASSERT_LOCKED(stream
->metadata_rdv_lock
);
1765 return pthread_cond_broadcast(&stream
->metadata_rdv
) ? -errno
: 0;
1769 int lttng_kconsumer_set_stream_ops(
1770 struct lttng_consumer_stream
*stream
)
1774 if (stream
->metadata_flag
&& stream
->chan
->is_live
) {
1775 DBG("Attempting to enable metadata bucketization for live consumers");
1776 if (is_get_next_check_metadata_available(stream
->wait_fd
)) {
1777 DBG("Kernel tracer supports get_next_subbuffer_metadata_check, metadata will be accumulated until a coherent state is reached");
1778 stream
->read_subbuffer_ops
.get_next_subbuffer
=
1779 get_next_subbuffer_metadata_check
;
1780 ret
= consumer_stream_enable_metadata_bucketization(
1787 * The kernel tracer version is too old to indicate
1788 * when the metadata stream has reached a "coherent"
1789 * (parseable) point.
1791 * This means that a live viewer may see an incoherent
1792 * sequence of metadata and fail to parse it.
1794 WARN("Kernel tracer does not support get_next_subbuffer_metadata_check which may cause live clients to fail to parse the metadata stream");
1795 metadata_bucket_destroy(stream
->metadata_bucket
);
1796 stream
->metadata_bucket
= NULL
;
1799 stream
->read_subbuffer_ops
.on_sleep
= signal_metadata
;
1802 if (!stream
->read_subbuffer_ops
.get_next_subbuffer
) {
1803 if (stream
->chan
->output
== CONSUMER_CHANNEL_MMAP
) {
1804 stream
->read_subbuffer_ops
.get_next_subbuffer
=
1805 get_next_subbuffer_mmap
;
1807 stream
->read_subbuffer_ops
.get_next_subbuffer
=
1808 get_next_subbuffer_splice
;
1812 if (stream
->metadata_flag
) {
1813 stream
->read_subbuffer_ops
.extract_subbuffer_info
=
1814 extract_metadata_subbuffer_info
;
1816 stream
->read_subbuffer_ops
.extract_subbuffer_info
=
1817 extract_data_subbuffer_info
;
1818 if (stream
->chan
->is_live
) {
1819 stream
->read_subbuffer_ops
.send_live_beacon
=
1820 consumer_flush_kernel_index
;
1824 stream
->read_subbuffer_ops
.put_next_subbuffer
= put_next_subbuffer
;
1829 int lttng_kconsumer_on_recv_stream(struct lttng_consumer_stream
*stream
)
1833 LTTNG_ASSERT(stream
);
1836 * Don't create anything if this is set for streaming or if there is
1837 * no current trace chunk on the parent channel.
1839 if (stream
->net_seq_idx
== (uint64_t) -1ULL && stream
->chan
->monitor
&&
1840 stream
->chan
->trace_chunk
) {
1841 ret
= consumer_stream_create_output_files(stream
, true);
1847 if (stream
->output
== LTTNG_EVENT_MMAP
) {
1848 /* get the len of the mmap region */
1849 unsigned long mmap_len
;
1851 ret
= kernctl_get_mmap_len(stream
->wait_fd
, &mmap_len
);
1853 PERROR("kernctl_get_mmap_len");
1854 goto error_close_fd
;
1856 stream
->mmap_len
= (size_t) mmap_len
;
1858 stream
->mmap_base
= mmap(NULL
, stream
->mmap_len
, PROT_READ
,
1859 MAP_PRIVATE
, stream
->wait_fd
, 0);
1860 if (stream
->mmap_base
== MAP_FAILED
) {
1861 PERROR("Error mmaping");
1863 goto error_close_fd
;
1867 ret
= lttng_kconsumer_set_stream_ops(stream
);
1869 goto error_close_fd
;
1872 /* we return 0 to let the library handle the FD internally */
1876 if (stream
->out_fd
>= 0) {
1879 err
= close(stream
->out_fd
);
1881 stream
->out_fd
= -1;
1888 * Check if data is still being extracted from the buffers for a specific
1889 * stream. Consumer data lock MUST be acquired before calling this function
1890 * and the stream lock.
1892 * Return 1 if the traced data are still getting read else 0 meaning that the
1893 * data is available for trace viewer reading.
1895 int lttng_kconsumer_data_pending(struct lttng_consumer_stream
*stream
)
1899 LTTNG_ASSERT(stream
);
1901 if (stream
->endpoint_status
!= CONSUMER_ENDPOINT_ACTIVE
) {
1906 ret
= kernctl_get_next_subbuf(stream
->wait_fd
);
1908 /* There is still data so let's put back this subbuffer. */
1909 ret
= kernctl_put_subbuf(stream
->wait_fd
);
1910 LTTNG_ASSERT(ret
== 0);
1911 ret
= 1; /* Data is pending */
1915 /* Data is NOT pending and ready to be read. */