2 * Copyright (C) 2011 EfficiOS Inc.
3 * Copyright (C) 2011 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
4 * Copyright (C) 2017 Jérémie Galarneau <jeremie.galarneau@efficios.com>
6 * SPDX-License-Identifier: GPL-2.0-only
16 #include <sys/socket.h>
17 #include <sys/types.h>
23 #include <bin/lttng-consumerd/health-consumerd.hpp>
24 #include <common/common.hpp>
25 #include <common/kernel-ctl/kernel-ctl.hpp>
26 #include <common/sessiond-comm/sessiond-comm.hpp>
27 #include <common/sessiond-comm/relayd.hpp>
28 #include <common/compat/fcntl.hpp>
29 #include <common/compat/endian.hpp>
30 #include <common/pipe.hpp>
31 #include <common/relayd/relayd.hpp>
32 #include <common/utils.hpp>
33 #include <common/consumer/consumer-stream.hpp>
34 #include <common/index/index.hpp>
35 #include <common/consumer/consumer-timer.hpp>
36 #include <common/optional.hpp>
37 #include <common/buffer-view.hpp>
38 #include <common/consumer/consumer.hpp>
39 #include <common/consumer/metadata-bucket.hpp>
41 #include "kernel-consumer.hpp"
43 extern struct lttng_consumer_global_data the_consumer_data
;
44 extern int consumer_poll_timeout
;
47 * Take a snapshot for a specific fd
49 * Returns 0 on success, < 0 on error
51 int lttng_kconsumer_take_snapshot(struct lttng_consumer_stream
*stream
)
54 int infd
= stream
->wait_fd
;
56 ret
= kernctl_snapshot(infd
);
58 * -EAGAIN is not an error, it just means that there is no data to
61 if (ret
!= 0 && ret
!= -EAGAIN
) {
62 PERROR("Getting sub-buffer snapshot.");
69 * Sample consumed and produced positions for a specific fd.
71 * Returns 0 on success, < 0 on error.
73 int lttng_kconsumer_sample_snapshot_positions(
74 struct lttng_consumer_stream
*stream
)
78 return kernctl_snapshot_sample_positions(stream
->wait_fd
);
82 * Get the produced position
84 * Returns 0 on success, < 0 on error
86 int lttng_kconsumer_get_produced_snapshot(struct lttng_consumer_stream
*stream
,
90 int infd
= stream
->wait_fd
;
92 ret
= kernctl_snapshot_get_produced(infd
, pos
);
94 PERROR("kernctl_snapshot_get_produced");
101 * Get the consumerd position
103 * Returns 0 on success, < 0 on error
105 int lttng_kconsumer_get_consumed_snapshot(struct lttng_consumer_stream
*stream
,
109 int infd
= stream
->wait_fd
;
111 ret
= kernctl_snapshot_get_consumed(infd
, pos
);
113 PERROR("kernctl_snapshot_get_consumed");
120 int get_current_subbuf_addr(struct lttng_consumer_stream
*stream
,
124 unsigned long mmap_offset
;
125 const char *mmap_base
= (const char *) stream
->mmap_base
;
127 ret
= kernctl_get_mmap_read_offset(stream
->wait_fd
, &mmap_offset
);
129 PERROR("Failed to get mmap read offset");
133 *addr
= mmap_base
+ mmap_offset
;
139 * Take a snapshot of all the stream of a channel
140 * RCU read-side lock must be held across this function to ensure existence of
143 * Returns 0 on success, < 0 on error
145 static int lttng_kconsumer_snapshot_channel(
146 struct lttng_consumer_channel
*channel
,
147 uint64_t key
, char *path
, uint64_t relayd_id
,
148 uint64_t nb_packets_per_stream
)
151 struct lttng_consumer_stream
*stream
;
153 DBG("Kernel consumer snapshot channel %" PRIu64
, key
);
155 /* Prevent channel modifications while we perform the snapshot.*/
156 pthread_mutex_lock(&channel
->lock
);
160 /* Splice is not supported yet for channel snapshot. */
161 if (channel
->output
!= CONSUMER_CHANNEL_MMAP
) {
162 ERR("Unsupported output type for channel \"%s\": mmap output is required to record a snapshot",
168 cds_list_for_each_entry(stream
, &channel
->streams
.head
, send_node
) {
169 unsigned long consumed_pos
, produced_pos
;
171 health_code_update();
174 * Lock stream because we are about to change its state.
176 pthread_mutex_lock(&stream
->lock
);
178 LTTNG_ASSERT(channel
->trace_chunk
);
179 if (!lttng_trace_chunk_get(channel
->trace_chunk
)) {
181 * Can't happen barring an internal error as the channel
182 * holds a reference to the trace chunk.
184 ERR("Failed to acquire reference to channel's trace chunk");
188 LTTNG_ASSERT(!stream
->trace_chunk
);
189 stream
->trace_chunk
= channel
->trace_chunk
;
192 * Assign the received relayd ID so we can use it for streaming. The streams
193 * are not visible to anyone so this is OK to change it.
195 stream
->net_seq_idx
= relayd_id
;
196 channel
->relayd_id
= relayd_id
;
197 if (relayd_id
!= (uint64_t) -1ULL) {
198 ret
= consumer_send_relayd_stream(stream
, path
);
200 ERR("sending stream to relayd");
204 ret
= consumer_stream_create_output_files(stream
,
209 DBG("Kernel consumer snapshot stream (%" PRIu64
")",
213 ret
= kernctl_buffer_flush_empty(stream
->wait_fd
);
216 * Doing a buffer flush which does not take into
217 * account empty packets. This is not perfect
218 * for stream intersection, but required as a
219 * fall-back when "flush_empty" is not
220 * implemented by lttng-modules.
222 ret
= kernctl_buffer_flush(stream
->wait_fd
);
224 ERR("Failed to flush kernel stream");
230 ret
= lttng_kconsumer_take_snapshot(stream
);
232 ERR("Taking kernel snapshot");
236 ret
= lttng_kconsumer_get_produced_snapshot(stream
, &produced_pos
);
238 ERR("Produced kernel snapshot position");
242 ret
= lttng_kconsumer_get_consumed_snapshot(stream
, &consumed_pos
);
244 ERR("Consumerd kernel snapshot position");
248 consumed_pos
= consumer_get_consume_start_pos(consumed_pos
,
249 produced_pos
, nb_packets_per_stream
,
250 stream
->max_sb_size
);
252 while ((long) (consumed_pos
- produced_pos
) < 0) {
254 unsigned long len
, padded_len
;
255 const char *subbuf_addr
;
256 struct lttng_buffer_view subbuf_view
;
258 health_code_update();
259 DBG("Kernel consumer taking snapshot at pos %lu", consumed_pos
);
261 ret
= kernctl_get_subbuf(stream
->wait_fd
, &consumed_pos
);
263 if (ret
!= -EAGAIN
) {
264 PERROR("kernctl_get_subbuf snapshot");
267 DBG("Kernel consumer get subbuf failed. Skipping it.");
268 consumed_pos
+= stream
->max_sb_size
;
269 stream
->chan
->lost_packets
++;
273 ret
= kernctl_get_subbuf_size(stream
->wait_fd
, &len
);
275 ERR("Snapshot kernctl_get_subbuf_size");
276 goto error_put_subbuf
;
279 ret
= kernctl_get_padded_subbuf_size(stream
->wait_fd
, &padded_len
);
281 ERR("Snapshot kernctl_get_padded_subbuf_size");
282 goto error_put_subbuf
;
285 ret
= get_current_subbuf_addr(stream
, &subbuf_addr
);
287 goto error_put_subbuf
;
290 subbuf_view
= lttng_buffer_view_init(
291 subbuf_addr
, 0, padded_len
);
292 read_len
= lttng_consumer_on_read_subbuffer_mmap(
293 stream
, &subbuf_view
,
296 * We write the padded len in local tracefiles but the data len
297 * when using a relay. Display the error but continue processing
298 * to try to release the subbuffer.
300 if (relayd_id
!= (uint64_t) -1ULL) {
301 if (read_len
!= len
) {
302 ERR("Error sending to the relay (ret: %zd != len: %lu)",
306 if (read_len
!= padded_len
) {
307 ERR("Error writing to tracefile (ret: %zd != len: %lu)",
308 read_len
, padded_len
);
312 ret
= kernctl_put_subbuf(stream
->wait_fd
);
314 ERR("Snapshot kernctl_put_subbuf");
317 consumed_pos
+= stream
->max_sb_size
;
320 if (relayd_id
== (uint64_t) -1ULL) {
321 if (stream
->out_fd
>= 0) {
322 ret
= close(stream
->out_fd
);
324 PERROR("Kernel consumer snapshot close out_fd");
330 close_relayd_stream(stream
);
331 stream
->net_seq_idx
= (uint64_t) -1ULL;
333 lttng_trace_chunk_put(stream
->trace_chunk
);
334 stream
->trace_chunk
= NULL
;
335 pthread_mutex_unlock(&stream
->lock
);
343 ret
= kernctl_put_subbuf(stream
->wait_fd
);
345 ERR("Snapshot kernctl_put_subbuf error path");
348 pthread_mutex_unlock(&stream
->lock
);
351 pthread_mutex_unlock(&channel
->lock
);
356 * Read the whole metadata available for a snapshot.
357 * RCU read-side lock must be held across this function to ensure existence of
360 * Returns 0 on success, < 0 on error
362 static int lttng_kconsumer_snapshot_metadata(
363 struct lttng_consumer_channel
*metadata_channel
,
364 uint64_t key
, char *path
, uint64_t relayd_id
,
365 struct lttng_consumer_local_data
*ctx
)
367 int ret
, use_relayd
= 0;
369 struct lttng_consumer_stream
*metadata_stream
;
373 DBG("Kernel consumer snapshot metadata with key %" PRIu64
" at path %s",
378 metadata_stream
= metadata_channel
->metadata_stream
;
379 LTTNG_ASSERT(metadata_stream
);
381 metadata_stream
->read_subbuffer_ops
.lock(metadata_stream
);
382 LTTNG_ASSERT(metadata_channel
->trace_chunk
);
383 LTTNG_ASSERT(metadata_stream
->trace_chunk
);
385 /* Flag once that we have a valid relayd for the stream. */
386 if (relayd_id
!= (uint64_t) -1ULL) {
391 ret
= consumer_send_relayd_stream(metadata_stream
, path
);
396 ret
= consumer_stream_create_output_files(metadata_stream
,
404 health_code_update();
406 ret_read
= lttng_consumer_read_subbuffer(metadata_stream
, ctx
, true);
408 ERR("Kernel snapshot reading metadata subbuffer (ret: %zd)",
413 } while (ret_read
> 0);
416 close_relayd_stream(metadata_stream
);
417 metadata_stream
->net_seq_idx
= (uint64_t) -1ULL;
419 if (metadata_stream
->out_fd
>= 0) {
420 ret
= close(metadata_stream
->out_fd
);
422 PERROR("Kernel consumer snapshot metadata close out_fd");
424 * Don't go on error here since the snapshot was successful at this
425 * point but somehow the close failed.
428 metadata_stream
->out_fd
= -1;
429 lttng_trace_chunk_put(metadata_stream
->trace_chunk
);
430 metadata_stream
->trace_chunk
= NULL
;
436 metadata_stream
->read_subbuffer_ops
.unlock(metadata_stream
);
437 consumer_stream_destroy(metadata_stream
, NULL
);
438 metadata_channel
->metadata_stream
= NULL
;
444 * Receive command from session daemon and process it.
446 * Return 1 on success else a negative value or 0.
448 int lttng_kconsumer_recv_cmd(struct lttng_consumer_local_data
*ctx
,
449 int sock
, struct pollfd
*consumer_sockpoll
)
452 enum lttcomm_return_code ret_code
= LTTCOMM_CONSUMERD_SUCCESS
;
453 struct lttcomm_consumer_msg msg
;
455 health_code_update();
460 ret_recv
= lttcomm_recv_unix_sock(sock
, &msg
, sizeof(msg
));
461 if (ret_recv
!= sizeof(msg
)) {
463 lttng_consumer_send_error(ctx
,
464 LTTCOMM_CONSUMERD_ERROR_RECV_CMD
);
471 health_code_update();
473 /* Deprecated command */
474 LTTNG_ASSERT(msg
.cmd_type
!= LTTNG_CONSUMER_STOP
);
476 health_code_update();
478 /* relayd needs RCU read-side protection */
481 switch (msg
.cmd_type
) {
482 case LTTNG_CONSUMER_ADD_RELAYD_SOCKET
:
484 uint32_t major
= msg
.u
.relayd_sock
.major
;
485 uint32_t minor
= msg
.u
.relayd_sock
.minor
;
486 enum lttcomm_sock_proto protocol
= (enum lttcomm_sock_proto
)
487 msg
.u
.relayd_sock
.relayd_socket_protocol
;
489 /* Session daemon status message are handled in the following call. */
490 consumer_add_relayd_socket(msg
.u
.relayd_sock
.net_index
,
491 msg
.u
.relayd_sock
.type
, ctx
, sock
,
492 consumer_sockpoll
, msg
.u
.relayd_sock
.session_id
,
493 msg
.u
.relayd_sock
.relayd_session_id
, major
,
497 case LTTNG_CONSUMER_ADD_CHANNEL
:
499 struct lttng_consumer_channel
*new_channel
;
500 int ret_send_status
, ret_add_channel
= 0;
501 const uint64_t chunk_id
= msg
.u
.channel
.chunk_id
.value
;
503 health_code_update();
505 /* First send a status message before receiving the fds. */
506 ret_send_status
= consumer_send_status_msg(sock
, ret_code
);
507 if (ret_send_status
< 0) {
508 /* Somehow, the session daemon is not responding anymore. */
512 health_code_update();
514 DBG("consumer_add_channel %" PRIu64
, msg
.u
.channel
.channel_key
);
515 new_channel
= consumer_allocate_channel(msg
.u
.channel
.channel_key
,
516 msg
.u
.channel
.session_id
,
517 msg
.u
.channel
.chunk_id
.is_set
?
519 msg
.u
.channel
.pathname
,
521 msg
.u
.channel
.relayd_id
, msg
.u
.channel
.output
,
522 msg
.u
.channel
.tracefile_size
,
523 msg
.u
.channel
.tracefile_count
, 0,
524 msg
.u
.channel
.monitor
,
525 msg
.u
.channel
.live_timer_interval
,
526 msg
.u
.channel
.is_live
,
528 if (new_channel
== NULL
) {
529 lttng_consumer_send_error(ctx
, LTTCOMM_CONSUMERD_OUTFD_ERROR
);
532 new_channel
->nb_init_stream_left
= msg
.u
.channel
.nb_init_streams
;
533 switch (msg
.u
.channel
.output
) {
534 case LTTNG_EVENT_SPLICE
:
535 new_channel
->output
= CONSUMER_CHANNEL_SPLICE
;
537 case LTTNG_EVENT_MMAP
:
538 new_channel
->output
= CONSUMER_CHANNEL_MMAP
;
541 ERR("Channel output unknown %d", msg
.u
.channel
.output
);
545 /* Translate and save channel type. */
546 switch (msg
.u
.channel
.type
) {
547 case CONSUMER_CHANNEL_TYPE_DATA
:
548 case CONSUMER_CHANNEL_TYPE_METADATA
:
549 new_channel
->type
= (consumer_channel_type
) msg
.u
.channel
.type
;
556 health_code_update();
558 if (ctx
->on_recv_channel
!= NULL
) {
559 int ret_recv_channel
=
560 ctx
->on_recv_channel(new_channel
);
561 if (ret_recv_channel
== 0) {
562 ret_add_channel
= consumer_add_channel(
564 } else if (ret_recv_channel
< 0) {
569 consumer_add_channel(new_channel
, ctx
);
571 if (msg
.u
.channel
.type
== CONSUMER_CHANNEL_TYPE_DATA
&&
573 int monitor_start_ret
;
575 DBG("Consumer starting monitor timer");
576 consumer_timer_live_start(new_channel
,
577 msg
.u
.channel
.live_timer_interval
);
578 monitor_start_ret
= consumer_timer_monitor_start(
580 msg
.u
.channel
.monitor_timer_interval
);
581 if (monitor_start_ret
< 0) {
582 ERR("Starting channel monitoring timer failed");
587 health_code_update();
589 /* If we received an error in add_channel, we need to report it. */
590 if (ret_add_channel
< 0) {
591 ret_send_status
= consumer_send_status_msg(
592 sock
, ret_add_channel
);
593 if (ret_send_status
< 0) {
601 case LTTNG_CONSUMER_ADD_STREAM
:
604 struct lttng_pipe
*stream_pipe
;
605 struct lttng_consumer_stream
*new_stream
;
606 struct lttng_consumer_channel
*channel
;
608 int ret_send_status
, ret_poll
, ret_get_max_subbuf_size
;
609 ssize_t ret_pipe_write
, ret_recv
;
612 * Get stream's channel reference. Needed when adding the stream to the
615 channel
= consumer_find_channel(msg
.u
.stream
.channel_key
);
618 * We could not find the channel. Can happen if cpu hotplug
619 * happens while tearing down.
621 ERR("Unable to find channel key %" PRIu64
, msg
.u
.stream
.channel_key
);
622 ret_code
= LTTCOMM_CONSUMERD_CHAN_NOT_FOUND
;
625 health_code_update();
627 /* First send a status message before receiving the fds. */
628 ret_send_status
= consumer_send_status_msg(sock
, ret_code
);
629 if (ret_send_status
< 0) {
630 /* Somehow, the session daemon is not responding anymore. */
631 goto error_add_stream_fatal
;
634 health_code_update();
636 if (ret_code
!= LTTCOMM_CONSUMERD_SUCCESS
) {
637 /* Channel was not found. */
638 goto error_add_stream_nosignal
;
643 ret_poll
= lttng_consumer_poll_socket(consumer_sockpoll
);
646 goto error_add_stream_fatal
;
649 health_code_update();
651 /* Get stream file descriptor from socket */
652 ret_recv
= lttcomm_recv_fds_unix_sock(sock
, &fd
, 1);
653 if (ret_recv
!= sizeof(fd
)) {
654 lttng_consumer_send_error(ctx
, LTTCOMM_CONSUMERD_ERROR_RECV_FD
);
659 health_code_update();
662 * Send status code to session daemon only if the recv works. If the
663 * above recv() failed, the session daemon is notified through the
664 * error socket and the teardown is eventually done.
666 ret_send_status
= consumer_send_status_msg(sock
, ret_code
);
667 if (ret_send_status
< 0) {
668 /* Somehow, the session daemon is not responding anymore. */
669 goto error_add_stream_nosignal
;
672 health_code_update();
674 pthread_mutex_lock(&channel
->lock
);
675 new_stream
= consumer_stream_create(
682 channel
->trace_chunk
,
687 if (new_stream
== NULL
) {
692 lttng_consumer_send_error(ctx
, LTTCOMM_CONSUMERD_OUTFD_ERROR
);
695 pthread_mutex_unlock(&channel
->lock
);
696 goto error_add_stream_nosignal
;
699 new_stream
->wait_fd
= fd
;
700 ret_get_max_subbuf_size
= kernctl_get_max_subbuf_size(
701 new_stream
->wait_fd
, &new_stream
->max_sb_size
);
702 if (ret_get_max_subbuf_size
< 0) {
703 pthread_mutex_unlock(&channel
->lock
);
704 ERR("Failed to get kernel maximal subbuffer size");
705 goto error_add_stream_nosignal
;
708 consumer_stream_update_channel_attributes(new_stream
,
712 * We've just assigned the channel to the stream so increment the
713 * refcount right now. We don't need to increment the refcount for
714 * streams in no monitor because we handle manually the cleanup of
715 * those. It is very important to make sure there is NO prior
716 * consumer_del_stream() calls or else the refcount will be unbalanced.
718 if (channel
->monitor
) {
719 uatomic_inc(&new_stream
->chan
->refcount
);
723 * The buffer flush is done on the session daemon side for the kernel
724 * so no need for the stream "hangup_flush_done" variable to be
725 * tracked. This is important for a kernel stream since we don't rely
726 * on the flush state of the stream to read data. It's not the case for
727 * user space tracing.
729 new_stream
->hangup_flush_done
= 0;
731 health_code_update();
733 pthread_mutex_lock(&new_stream
->lock
);
734 if (ctx
->on_recv_stream
) {
735 int ret_recv_stream
= ctx
->on_recv_stream(new_stream
);
736 if (ret_recv_stream
< 0) {
737 pthread_mutex_unlock(&new_stream
->lock
);
738 pthread_mutex_unlock(&channel
->lock
);
739 consumer_stream_free(new_stream
);
740 goto error_add_stream_nosignal
;
743 health_code_update();
745 if (new_stream
->metadata_flag
) {
746 channel
->metadata_stream
= new_stream
;
749 /* Do not monitor this stream. */
750 if (!channel
->monitor
) {
751 DBG("Kernel consumer add stream %s in no monitor mode with "
752 "relayd id %" PRIu64
, new_stream
->name
,
753 new_stream
->net_seq_idx
);
754 cds_list_add(&new_stream
->send_node
, &channel
->streams
.head
);
755 pthread_mutex_unlock(&new_stream
->lock
);
756 pthread_mutex_unlock(&channel
->lock
);
760 /* Send stream to relayd if the stream has an ID. */
761 if (new_stream
->net_seq_idx
!= (uint64_t) -1ULL) {
762 int ret_send_relayd_stream
;
764 ret_send_relayd_stream
= consumer_send_relayd_stream(
765 new_stream
, new_stream
->chan
->pathname
);
766 if (ret_send_relayd_stream
< 0) {
767 pthread_mutex_unlock(&new_stream
->lock
);
768 pthread_mutex_unlock(&channel
->lock
);
769 consumer_stream_free(new_stream
);
770 goto error_add_stream_nosignal
;
774 * If adding an extra stream to an already
775 * existing channel (e.g. cpu hotplug), we need
776 * to send the "streams_sent" command to relayd.
778 if (channel
->streams_sent_to_relayd
) {
779 int ret_send_relayd_streams_sent
;
781 ret_send_relayd_streams_sent
=
782 consumer_send_relayd_streams_sent(
783 new_stream
->net_seq_idx
);
784 if (ret_send_relayd_streams_sent
< 0) {
785 pthread_mutex_unlock(&new_stream
->lock
);
786 pthread_mutex_unlock(&channel
->lock
);
787 goto error_add_stream_nosignal
;
791 pthread_mutex_unlock(&new_stream
->lock
);
792 pthread_mutex_unlock(&channel
->lock
);
794 /* Get the right pipe where the stream will be sent. */
795 if (new_stream
->metadata_flag
) {
796 consumer_add_metadata_stream(new_stream
);
797 stream_pipe
= ctx
->consumer_metadata_pipe
;
799 consumer_add_data_stream(new_stream
);
800 stream_pipe
= ctx
->consumer_data_pipe
;
803 /* Visible to other threads */
804 new_stream
->globally_visible
= 1;
806 health_code_update();
808 ret_pipe_write
= lttng_pipe_write(
809 stream_pipe
, &new_stream
, sizeof(new_stream
));
810 if (ret_pipe_write
< 0) {
811 ERR("Consumer write %s stream to pipe %d",
812 new_stream
->metadata_flag
? "metadata" : "data",
813 lttng_pipe_get_writefd(stream_pipe
));
814 if (new_stream
->metadata_flag
) {
815 consumer_del_stream_for_metadata(new_stream
);
817 consumer_del_stream_for_data(new_stream
);
819 goto error_add_stream_nosignal
;
822 DBG("Kernel consumer ADD_STREAM %s (fd: %d) %s with relayd id %" PRIu64
,
823 new_stream
->name
, fd
, new_stream
->chan
->pathname
, new_stream
->relayd_stream_id
);
826 error_add_stream_nosignal
:
828 error_add_stream_fatal
:
831 case LTTNG_CONSUMER_STREAMS_SENT
:
833 struct lttng_consumer_channel
*channel
;
837 * Get stream's channel reference. Needed when adding the stream to the
840 channel
= consumer_find_channel(msg
.u
.sent_streams
.channel_key
);
843 * We could not find the channel. Can happen if cpu hotplug
844 * happens while tearing down.
846 ERR("Unable to find channel key %" PRIu64
,
847 msg
.u
.sent_streams
.channel_key
);
848 ret_code
= LTTCOMM_CONSUMERD_CHAN_NOT_FOUND
;
851 health_code_update();
854 * Send status code to session daemon.
856 ret_send_status
= consumer_send_status_msg(sock
, ret_code
);
857 if (ret_send_status
< 0 ||
858 ret_code
!= LTTCOMM_CONSUMERD_SUCCESS
) {
859 /* Somehow, the session daemon is not responding anymore. */
860 goto error_streams_sent_nosignal
;
863 health_code_update();
866 * We should not send this message if we don't monitor the
867 * streams in this channel.
869 if (!channel
->monitor
) {
870 goto end_error_streams_sent
;
873 health_code_update();
874 /* Send stream to relayd if the stream has an ID. */
875 if (msg
.u
.sent_streams
.net_seq_idx
!= (uint64_t) -1ULL) {
876 int ret_send_relay_streams
;
878 ret_send_relay_streams
= consumer_send_relayd_streams_sent(
879 msg
.u
.sent_streams
.net_seq_idx
);
880 if (ret_send_relay_streams
< 0) {
881 goto error_streams_sent_nosignal
;
883 channel
->streams_sent_to_relayd
= true;
885 end_error_streams_sent
:
887 error_streams_sent_nosignal
:
890 case LTTNG_CONSUMER_UPDATE_STREAM
:
895 case LTTNG_CONSUMER_DESTROY_RELAYD
:
897 uint64_t index
= msg
.u
.destroy_relayd
.net_seq_idx
;
898 struct consumer_relayd_sock_pair
*relayd
;
901 DBG("Kernel consumer destroying relayd %" PRIu64
, index
);
903 /* Get relayd reference if exists. */
904 relayd
= consumer_find_relayd(index
);
905 if (relayd
== NULL
) {
906 DBG("Unable to find relayd %" PRIu64
, index
);
907 ret_code
= LTTCOMM_CONSUMERD_RELAYD_FAIL
;
911 * Each relayd socket pair has a refcount of stream attached to it
912 * which tells if the relayd is still active or not depending on the
915 * This will set the destroy flag of the relayd object and destroy it
916 * if the refcount reaches zero when called.
918 * The destroy can happen either here or when a stream fd hangs up.
921 consumer_flag_relayd_for_destroy(relayd
);
924 health_code_update();
926 ret_send_status
= consumer_send_status_msg(sock
, ret_code
);
927 if (ret_send_status
< 0) {
928 /* Somehow, the session daemon is not responding anymore. */
934 case LTTNG_CONSUMER_DATA_PENDING
:
936 int32_t ret_data_pending
;
937 uint64_t id
= msg
.u
.data_pending
.session_id
;
940 DBG("Kernel consumer data pending command for id %" PRIu64
, id
);
942 ret_data_pending
= consumer_data_pending(id
);
944 health_code_update();
946 /* Send back returned value to session daemon */
947 ret_send
= lttcomm_send_unix_sock(sock
, &ret_data_pending
,
948 sizeof(ret_data_pending
));
950 PERROR("send data pending ret code");
955 * No need to send back a status message since the data pending
956 * returned value is the response.
960 case LTTNG_CONSUMER_SNAPSHOT_CHANNEL
:
962 struct lttng_consumer_channel
*channel
;
963 uint64_t key
= msg
.u
.snapshot_channel
.key
;
966 channel
= consumer_find_channel(key
);
968 ERR("Channel %" PRIu64
" not found", key
);
969 ret_code
= LTTCOMM_CONSUMERD_CHAN_NOT_FOUND
;
971 if (msg
.u
.snapshot_channel
.metadata
== 1) {
974 ret_snapshot
= lttng_kconsumer_snapshot_metadata(
976 msg
.u
.snapshot_channel
.pathname
,
977 msg
.u
.snapshot_channel
.relayd_id
,
979 if (ret_snapshot
< 0) {
980 ERR("Snapshot metadata failed");
981 ret_code
= LTTCOMM_CONSUMERD_SNAPSHOT_FAILED
;
986 ret_snapshot
= lttng_kconsumer_snapshot_channel(
988 msg
.u
.snapshot_channel
.pathname
,
989 msg
.u
.snapshot_channel
.relayd_id
,
990 msg
.u
.snapshot_channel
991 .nb_packets_per_stream
);
992 if (ret_snapshot
< 0) {
993 ERR("Snapshot channel failed");
994 ret_code
= LTTCOMM_CONSUMERD_SNAPSHOT_FAILED
;
998 health_code_update();
1000 ret_send_status
= consumer_send_status_msg(sock
, ret_code
);
1001 if (ret_send_status
< 0) {
1002 /* Somehow, the session daemon is not responding anymore. */
1007 case LTTNG_CONSUMER_DESTROY_CHANNEL
:
1009 uint64_t key
= msg
.u
.destroy_channel
.key
;
1010 struct lttng_consumer_channel
*channel
;
1011 int ret_send_status
;
1013 channel
= consumer_find_channel(key
);
1015 ERR("Kernel consumer destroy channel %" PRIu64
" not found", key
);
1016 ret_code
= LTTCOMM_CONSUMERD_CHAN_NOT_FOUND
;
1019 health_code_update();
1021 ret_send_status
= consumer_send_status_msg(sock
, ret_code
);
1022 if (ret_send_status
< 0) {
1023 /* Somehow, the session daemon is not responding anymore. */
1024 goto end_destroy_channel
;
1027 health_code_update();
1029 /* Stop right now if no channel was found. */
1031 goto end_destroy_channel
;
1035 * This command should ONLY be issued for channel with streams set in
1038 LTTNG_ASSERT(!channel
->monitor
);
1041 * The refcount should ALWAYS be 0 in the case of a channel in no
1044 LTTNG_ASSERT(!uatomic_sub_return(&channel
->refcount
, 1));
1046 consumer_del_channel(channel
);
1047 end_destroy_channel
:
1050 case LTTNG_CONSUMER_DISCARDED_EVENTS
:
1054 struct lttng_consumer_channel
*channel
;
1055 uint64_t id
= msg
.u
.discarded_events
.session_id
;
1056 uint64_t key
= msg
.u
.discarded_events
.channel_key
;
1058 DBG("Kernel consumer discarded events command for session id %"
1059 PRIu64
", channel key %" PRIu64
, id
, key
);
1061 channel
= consumer_find_channel(key
);
1063 ERR("Kernel consumer discarded events channel %"
1064 PRIu64
" not found", key
);
1067 count
= channel
->discarded_events
;
1070 health_code_update();
1072 /* Send back returned value to session daemon */
1073 ret
= lttcomm_send_unix_sock(sock
, &count
, sizeof(count
));
1075 PERROR("send discarded events");
1081 case LTTNG_CONSUMER_LOST_PACKETS
:
1085 struct lttng_consumer_channel
*channel
;
1086 uint64_t id
= msg
.u
.lost_packets
.session_id
;
1087 uint64_t key
= msg
.u
.lost_packets
.channel_key
;
1089 DBG("Kernel consumer lost packets command for session id %"
1090 PRIu64
", channel key %" PRIu64
, id
, key
);
1092 channel
= consumer_find_channel(key
);
1094 ERR("Kernel consumer lost packets channel %"
1095 PRIu64
" not found", key
);
1098 count
= channel
->lost_packets
;
1101 health_code_update();
1103 /* Send back returned value to session daemon */
1104 ret
= lttcomm_send_unix_sock(sock
, &count
, sizeof(count
));
1106 PERROR("send lost packets");
1112 case LTTNG_CONSUMER_SET_CHANNEL_MONITOR_PIPE
:
1114 int channel_monitor_pipe
;
1115 int ret_send_status
, ret_set_channel_monitor_pipe
;
1118 ret_code
= LTTCOMM_CONSUMERD_SUCCESS
;
1119 /* Successfully received the command's type. */
1120 ret_send_status
= consumer_send_status_msg(sock
, ret_code
);
1121 if (ret_send_status
< 0) {
1125 ret_recv
= lttcomm_recv_fds_unix_sock(
1126 sock
, &channel_monitor_pipe
, 1);
1127 if (ret_recv
!= sizeof(channel_monitor_pipe
)) {
1128 ERR("Failed to receive channel monitor pipe");
1132 DBG("Received channel monitor pipe (%d)", channel_monitor_pipe
);
1133 ret_set_channel_monitor_pipe
=
1134 consumer_timer_thread_set_channel_monitor_pipe(
1135 channel_monitor_pipe
);
1136 if (!ret_set_channel_monitor_pipe
) {
1140 ret_code
= LTTCOMM_CONSUMERD_SUCCESS
;
1141 /* Set the pipe as non-blocking. */
1142 ret_fcntl
= fcntl(channel_monitor_pipe
, F_GETFL
, 0);
1143 if (ret_fcntl
== -1) {
1144 PERROR("fcntl get flags of the channel monitoring pipe");
1149 ret_fcntl
= fcntl(channel_monitor_pipe
, F_SETFL
,
1150 flags
| O_NONBLOCK
);
1151 if (ret_fcntl
== -1) {
1152 PERROR("fcntl set O_NONBLOCK flag of the channel monitoring pipe");
1155 DBG("Channel monitor pipe set as non-blocking");
1157 ret_code
= LTTCOMM_CONSUMERD_ALREADY_SET
;
1159 ret_send_status
= consumer_send_status_msg(sock
, ret_code
);
1160 if (ret_send_status
< 0) {
1165 case LTTNG_CONSUMER_ROTATE_CHANNEL
:
1167 struct lttng_consumer_channel
*channel
;
1168 uint64_t key
= msg
.u
.rotate_channel
.key
;
1169 int ret_send_status
;
1171 DBG("Consumer rotate channel %" PRIu64
, key
);
1173 channel
= consumer_find_channel(key
);
1175 ERR("Channel %" PRIu64
" not found", key
);
1176 ret_code
= LTTCOMM_CONSUMERD_CHAN_NOT_FOUND
;
1179 * Sample the rotate position of all the streams in this channel.
1181 int ret_rotate_channel
;
1183 ret_rotate_channel
= lttng_consumer_rotate_channel(
1185 msg
.u
.rotate_channel
.relayd_id
);
1186 if (ret_rotate_channel
< 0) {
1187 ERR("Rotate channel failed");
1188 ret_code
= LTTCOMM_CONSUMERD_ROTATION_FAIL
;
1191 health_code_update();
1194 ret_send_status
= consumer_send_status_msg(sock
, ret_code
);
1195 if (ret_send_status
< 0) {
1196 /* Somehow, the session daemon is not responding anymore. */
1197 goto error_rotate_channel
;
1200 /* Rotate the streams that are ready right now. */
1203 ret_rotate
= lttng_consumer_rotate_ready_streams(
1205 if (ret_rotate
< 0) {
1206 ERR("Rotate ready streams failed");
1210 error_rotate_channel
:
1213 case LTTNG_CONSUMER_CLEAR_CHANNEL
:
1215 struct lttng_consumer_channel
*channel
;
1216 uint64_t key
= msg
.u
.clear_channel
.key
;
1217 int ret_send_status
;
1219 channel
= consumer_find_channel(key
);
1221 DBG("Channel %" PRIu64
" not found", key
);
1222 ret_code
= LTTCOMM_CONSUMERD_CHAN_NOT_FOUND
;
1224 int ret_clear_channel
;
1227 lttng_consumer_clear_channel(channel
);
1228 if (ret_clear_channel
) {
1229 ERR("Clear channel failed");
1230 ret_code
= (lttcomm_return_code
) ret_clear_channel
;
1233 health_code_update();
1236 ret_send_status
= consumer_send_status_msg(sock
, ret_code
);
1237 if (ret_send_status
< 0) {
1238 /* Somehow, the session daemon is not responding anymore. */
1244 case LTTNG_CONSUMER_INIT
:
1246 int ret_send_status
;
1247 lttng_uuid sessiond_uuid
;
1249 std::copy(std::begin(msg
.u
.init
.sessiond_uuid
), std::end(msg
.u
.init
.sessiond_uuid
),
1250 sessiond_uuid
.begin());
1252 ret_code
= lttng_consumer_init_command(ctx
,
1254 health_code_update();
1255 ret_send_status
= consumer_send_status_msg(sock
, ret_code
);
1256 if (ret_send_status
< 0) {
1257 /* Somehow, the session daemon is not responding anymore. */
1262 case LTTNG_CONSUMER_CREATE_TRACE_CHUNK
:
1264 const struct lttng_credentials credentials
= {
1265 .uid
= LTTNG_OPTIONAL_INIT_VALUE(msg
.u
.create_trace_chunk
.credentials
.value
.uid
),
1266 .gid
= LTTNG_OPTIONAL_INIT_VALUE(msg
.u
.create_trace_chunk
.credentials
.value
.gid
),
1268 const bool is_local_trace
=
1269 !msg
.u
.create_trace_chunk
.relayd_id
.is_set
;
1270 const uint64_t relayd_id
=
1271 msg
.u
.create_trace_chunk
.relayd_id
.value
;
1272 const char *chunk_override_name
=
1273 *msg
.u
.create_trace_chunk
.override_name
?
1274 msg
.u
.create_trace_chunk
.override_name
:
1276 struct lttng_directory_handle
*chunk_directory_handle
= NULL
;
1279 * The session daemon will only provide a chunk directory file
1280 * descriptor for local traces.
1282 if (is_local_trace
) {
1284 int ret_send_status
;
1287 /* Acnowledge the reception of the command. */
1288 ret_send_status
= consumer_send_status_msg(
1289 sock
, LTTCOMM_CONSUMERD_SUCCESS
);
1290 if (ret_send_status
< 0) {
1291 /* Somehow, the session daemon is not responding anymore. */
1295 ret_recv
= lttcomm_recv_fds_unix_sock(
1296 sock
, &chunk_dirfd
, 1);
1297 if (ret_recv
!= sizeof(chunk_dirfd
)) {
1298 ERR("Failed to receive trace chunk directory file descriptor");
1302 DBG("Received trace chunk directory fd (%d)",
1304 chunk_directory_handle
= lttng_directory_handle_create_from_dirfd(
1306 if (!chunk_directory_handle
) {
1307 ERR("Failed to initialize chunk directory handle from directory file descriptor");
1308 if (close(chunk_dirfd
)) {
1309 PERROR("Failed to close chunk directory file descriptor");
1315 ret_code
= lttng_consumer_create_trace_chunk(
1316 !is_local_trace
? &relayd_id
: NULL
,
1317 msg
.u
.create_trace_chunk
.session_id
,
1318 msg
.u
.create_trace_chunk
.chunk_id
,
1319 (time_t) msg
.u
.create_trace_chunk
1320 .creation_timestamp
,
1321 chunk_override_name
,
1322 msg
.u
.create_trace_chunk
.credentials
.is_set
?
1325 chunk_directory_handle
);
1326 lttng_directory_handle_put(chunk_directory_handle
);
1327 goto end_msg_sessiond
;
1329 case LTTNG_CONSUMER_CLOSE_TRACE_CHUNK
:
1331 enum lttng_trace_chunk_command_type close_command
=
1332 (lttng_trace_chunk_command_type
) msg
.u
.close_trace_chunk
.close_command
.value
;
1333 const uint64_t relayd_id
=
1334 msg
.u
.close_trace_chunk
.relayd_id
.value
;
1335 struct lttcomm_consumer_close_trace_chunk_reply reply
;
1336 char path
[LTTNG_PATH_MAX
];
1339 ret_code
= lttng_consumer_close_trace_chunk(
1340 msg
.u
.close_trace_chunk
.relayd_id
.is_set
?
1343 msg
.u
.close_trace_chunk
.session_id
,
1344 msg
.u
.close_trace_chunk
.chunk_id
,
1345 (time_t) msg
.u
.close_trace_chunk
.close_timestamp
,
1346 msg
.u
.close_trace_chunk
.close_command
.is_set
?
1349 reply
.ret_code
= ret_code
;
1350 reply
.path_length
= strlen(path
) + 1;
1351 ret_send
= lttcomm_send_unix_sock(sock
, &reply
, sizeof(reply
));
1352 if (ret_send
!= sizeof(reply
)) {
1355 ret_send
= lttcomm_send_unix_sock(
1356 sock
, path
, reply
.path_length
);
1357 if (ret_send
!= reply
.path_length
) {
1362 case LTTNG_CONSUMER_TRACE_CHUNK_EXISTS
:
1364 const uint64_t relayd_id
=
1365 msg
.u
.trace_chunk_exists
.relayd_id
.value
;
1367 ret_code
= lttng_consumer_trace_chunk_exists(
1368 msg
.u
.trace_chunk_exists
.relayd_id
.is_set
?
1370 msg
.u
.trace_chunk_exists
.session_id
,
1371 msg
.u
.trace_chunk_exists
.chunk_id
);
1372 goto end_msg_sessiond
;
1374 case LTTNG_CONSUMER_OPEN_CHANNEL_PACKETS
:
1376 const uint64_t key
= msg
.u
.open_channel_packets
.key
;
1377 struct lttng_consumer_channel
*channel
=
1378 consumer_find_channel(key
);
1381 pthread_mutex_lock(&channel
->lock
);
1382 ret_code
= lttng_consumer_open_channel_packets(channel
);
1383 pthread_mutex_unlock(&channel
->lock
);
1385 WARN("Channel %" PRIu64
" not found", key
);
1386 ret_code
= LTTCOMM_CONSUMERD_CHAN_NOT_FOUND
;
1389 health_code_update();
1390 goto end_msg_sessiond
;
1398 * Return 1 to indicate success since the 0 value can be a socket
1399 * shutdown during the recv() or send() call.
1404 /* This will issue a consumer stop. */
1409 * The returned value here is not useful since either way we'll return 1 to
1410 * the caller because the session daemon socket management is done
1411 * elsewhere. Returning a negative code or 0 will shutdown the consumer.
1414 int ret_send_status
;
1416 ret_send_status
= consumer_send_status_msg(sock
, ret_code
);
1417 if (ret_send_status
< 0) {
1425 health_code_update();
1431 * Sync metadata meaning request them to the session daemon and snapshot to the
1432 * metadata thread can consumer them.
1434 * Metadata stream lock MUST be acquired.
1436 enum sync_metadata_status
lttng_kconsumer_sync_metadata(
1437 struct lttng_consumer_stream
*metadata
)
1440 enum sync_metadata_status status
;
1442 LTTNG_ASSERT(metadata
);
1444 ret
= kernctl_buffer_flush(metadata
->wait_fd
);
1446 ERR("Failed to flush kernel stream");
1447 status
= SYNC_METADATA_STATUS_ERROR
;
1451 ret
= kernctl_snapshot(metadata
->wait_fd
);
1453 if (errno
== EAGAIN
) {
1454 /* No new metadata, exit. */
1455 DBG("Sync metadata, no new kernel metadata");
1456 status
= SYNC_METADATA_STATUS_NO_DATA
;
1458 ERR("Sync metadata, taking kernel snapshot failed.");
1459 status
= SYNC_METADATA_STATUS_ERROR
;
1462 status
= SYNC_METADATA_STATUS_NEW_DATA
;
1470 int extract_common_subbuffer_info(struct lttng_consumer_stream
*stream
,
1471 struct stream_subbuffer
*subbuf
)
1475 ret
= kernctl_get_subbuf_size(
1476 stream
->wait_fd
, &subbuf
->info
.data
.subbuf_size
);
1481 ret
= kernctl_get_padded_subbuf_size(
1482 stream
->wait_fd
, &subbuf
->info
.data
.padded_subbuf_size
);
1492 int extract_metadata_subbuffer_info(struct lttng_consumer_stream
*stream
,
1493 struct stream_subbuffer
*subbuf
)
1497 ret
= extract_common_subbuffer_info(stream
, subbuf
);
1502 ret
= kernctl_get_metadata_version(
1503 stream
->wait_fd
, &subbuf
->info
.metadata
.version
);
1513 int extract_data_subbuffer_info(struct lttng_consumer_stream
*stream
,
1514 struct stream_subbuffer
*subbuf
)
1518 ret
= extract_common_subbuffer_info(stream
, subbuf
);
1523 ret
= kernctl_get_packet_size(
1524 stream
->wait_fd
, &subbuf
->info
.data
.packet_size
);
1526 PERROR("Failed to get sub-buffer packet size");
1530 ret
= kernctl_get_content_size(
1531 stream
->wait_fd
, &subbuf
->info
.data
.content_size
);
1533 PERROR("Failed to get sub-buffer content size");
1537 ret
= kernctl_get_timestamp_begin(
1538 stream
->wait_fd
, &subbuf
->info
.data
.timestamp_begin
);
1540 PERROR("Failed to get sub-buffer begin timestamp");
1544 ret
= kernctl_get_timestamp_end(
1545 stream
->wait_fd
, &subbuf
->info
.data
.timestamp_end
);
1547 PERROR("Failed to get sub-buffer end timestamp");
1551 ret
= kernctl_get_events_discarded(
1552 stream
->wait_fd
, &subbuf
->info
.data
.events_discarded
);
1554 PERROR("Failed to get sub-buffer events discarded count");
1558 ret
= kernctl_get_sequence_number(stream
->wait_fd
,
1559 &subbuf
->info
.data
.sequence_number
.value
);
1561 /* May not be supported by older LTTng-modules. */
1562 if (ret
!= -ENOTTY
) {
1563 PERROR("Failed to get sub-buffer sequence number");
1567 subbuf
->info
.data
.sequence_number
.is_set
= true;
1570 ret
= kernctl_get_stream_id(
1571 stream
->wait_fd
, &subbuf
->info
.data
.stream_id
);
1573 PERROR("Failed to get stream id");
1577 ret
= kernctl_get_instance_id(stream
->wait_fd
,
1578 &subbuf
->info
.data
.stream_instance_id
.value
);
1580 /* May not be supported by older LTTng-modules. */
1581 if (ret
!= -ENOTTY
) {
1582 PERROR("Failed to get stream instance id");
1586 subbuf
->info
.data
.stream_instance_id
.is_set
= true;
1593 enum get_next_subbuffer_status
get_subbuffer_common(
1594 struct lttng_consumer_stream
*stream
,
1595 struct stream_subbuffer
*subbuffer
)
1598 enum get_next_subbuffer_status status
;
1600 ret
= kernctl_get_next_subbuf(stream
->wait_fd
);
1603 status
= GET_NEXT_SUBBUFFER_STATUS_OK
;
1608 * The caller only expects -ENODATA when there is no data to
1609 * read, but the kernel tracer returns -EAGAIN when there is
1610 * currently no data for a non-finalized stream, and -ENODATA
1611 * when there is no data for a finalized stream. Those can be
1612 * combined into a -ENODATA return value.
1614 status
= GET_NEXT_SUBBUFFER_STATUS_NO_DATA
;
1617 status
= GET_NEXT_SUBBUFFER_STATUS_ERROR
;
1621 ret
= stream
->read_subbuffer_ops
.extract_subbuffer_info(
1624 status
= GET_NEXT_SUBBUFFER_STATUS_ERROR
;
1631 enum get_next_subbuffer_status
get_next_subbuffer_splice(
1632 struct lttng_consumer_stream
*stream
,
1633 struct stream_subbuffer
*subbuffer
)
1635 const enum get_next_subbuffer_status status
=
1636 get_subbuffer_common(stream
, subbuffer
);
1638 if (status
!= GET_NEXT_SUBBUFFER_STATUS_OK
) {
1642 subbuffer
->buffer
.fd
= stream
->wait_fd
;
1648 enum get_next_subbuffer_status
get_next_subbuffer_mmap(
1649 struct lttng_consumer_stream
*stream
,
1650 struct stream_subbuffer
*subbuffer
)
1653 enum get_next_subbuffer_status status
;
1656 status
= get_subbuffer_common(stream
, subbuffer
);
1657 if (status
!= GET_NEXT_SUBBUFFER_STATUS_OK
) {
1661 ret
= get_current_subbuf_addr(stream
, &addr
);
1663 status
= GET_NEXT_SUBBUFFER_STATUS_ERROR
;
1667 subbuffer
->buffer
.buffer
= lttng_buffer_view_init(
1668 addr
, 0, subbuffer
->info
.data
.padded_subbuf_size
);
1674 enum get_next_subbuffer_status
get_next_subbuffer_metadata_check(struct lttng_consumer_stream
*stream
,
1675 struct stream_subbuffer
*subbuffer
)
1680 enum get_next_subbuffer_status status
;
1682 ret
= kernctl_get_next_subbuf_metadata_check(stream
->wait_fd
,
1688 ret
= stream
->read_subbuffer_ops
.extract_subbuffer_info(
1694 LTTNG_OPTIONAL_SET(&subbuffer
->info
.metadata
.coherent
, coherent
);
1696 ret
= get_current_subbuf_addr(stream
, &addr
);
1701 subbuffer
->buffer
.buffer
= lttng_buffer_view_init(
1702 addr
, 0, subbuffer
->info
.data
.padded_subbuf_size
);
1703 DBG("Got metadata packet with padded_subbuf_size = %lu, coherent = %s",
1704 subbuffer
->info
.metadata
.padded_subbuf_size
,
1705 coherent
? "true" : "false");
1708 * The caller only expects -ENODATA when there is no data to read, but
1709 * the kernel tracer returns -EAGAIN when there is currently no data
1710 * for a non-finalized stream, and -ENODATA when there is no data for a
1711 * finalized stream. Those can be combined into a -ENODATA return value.
1715 status
= GET_NEXT_SUBBUFFER_STATUS_OK
;
1720 * The caller only expects -ENODATA when there is no data to
1721 * read, but the kernel tracer returns -EAGAIN when there is
1722 * currently no data for a non-finalized stream, and -ENODATA
1723 * when there is no data for a finalized stream. Those can be
1724 * combined into a -ENODATA return value.
1726 status
= GET_NEXT_SUBBUFFER_STATUS_NO_DATA
;
1729 status
= GET_NEXT_SUBBUFFER_STATUS_ERROR
;
1737 int put_next_subbuffer(struct lttng_consumer_stream
*stream
,
1738 struct stream_subbuffer
*subbuffer
__attribute__((unused
)))
1740 const int ret
= kernctl_put_next_subbuf(stream
->wait_fd
);
1743 if (ret
== -EFAULT
) {
1744 PERROR("Error in unreserving sub buffer");
1745 } else if (ret
== -EIO
) {
1746 /* Should never happen with newer LTTng versions */
1747 PERROR("Reader has been pushed by the writer, last sub-buffer corrupted");
1755 bool is_get_next_check_metadata_available(int tracer_fd
)
1757 const int ret
= kernctl_get_next_subbuf_metadata_check(tracer_fd
, NULL
);
1758 const bool available
= ret
!= -ENOTTY
;
1761 /* get succeeded, make sure to put the subbuffer. */
1762 kernctl_put_subbuf(tracer_fd
);
1769 int signal_metadata(struct lttng_consumer_stream
*stream
,
1770 struct lttng_consumer_local_data
*ctx
__attribute__((unused
)))
1772 ASSERT_LOCKED(stream
->metadata_rdv_lock
);
1773 return pthread_cond_broadcast(&stream
->metadata_rdv
) ? -errno
: 0;
1777 int lttng_kconsumer_set_stream_ops(
1778 struct lttng_consumer_stream
*stream
)
1782 if (stream
->metadata_flag
&& stream
->chan
->is_live
) {
1783 DBG("Attempting to enable metadata bucketization for live consumers");
1784 if (is_get_next_check_metadata_available(stream
->wait_fd
)) {
1785 DBG("Kernel tracer supports get_next_subbuffer_metadata_check, metadata will be accumulated until a coherent state is reached");
1786 stream
->read_subbuffer_ops
.get_next_subbuffer
=
1787 get_next_subbuffer_metadata_check
;
1788 ret
= consumer_stream_enable_metadata_bucketization(
1795 * The kernel tracer version is too old to indicate
1796 * when the metadata stream has reached a "coherent"
1797 * (parseable) point.
1799 * This means that a live viewer may see an incoherent
1800 * sequence of metadata and fail to parse it.
1802 WARN("Kernel tracer does not support get_next_subbuffer_metadata_check which may cause live clients to fail to parse the metadata stream");
1803 metadata_bucket_destroy(stream
->metadata_bucket
);
1804 stream
->metadata_bucket
= NULL
;
1807 stream
->read_subbuffer_ops
.on_sleep
= signal_metadata
;
1810 if (!stream
->read_subbuffer_ops
.get_next_subbuffer
) {
1811 if (stream
->chan
->output
== CONSUMER_CHANNEL_MMAP
) {
1812 stream
->read_subbuffer_ops
.get_next_subbuffer
=
1813 get_next_subbuffer_mmap
;
1815 stream
->read_subbuffer_ops
.get_next_subbuffer
=
1816 get_next_subbuffer_splice
;
1820 if (stream
->metadata_flag
) {
1821 stream
->read_subbuffer_ops
.extract_subbuffer_info
=
1822 extract_metadata_subbuffer_info
;
1824 stream
->read_subbuffer_ops
.extract_subbuffer_info
=
1825 extract_data_subbuffer_info
;
1826 if (stream
->chan
->is_live
) {
1827 stream
->read_subbuffer_ops
.send_live_beacon
=
1828 consumer_flush_kernel_index
;
1832 stream
->read_subbuffer_ops
.put_next_subbuffer
= put_next_subbuffer
;
1837 int lttng_kconsumer_on_recv_stream(struct lttng_consumer_stream
*stream
)
1841 LTTNG_ASSERT(stream
);
1844 * Don't create anything if this is set for streaming or if there is
1845 * no current trace chunk on the parent channel.
1847 if (stream
->net_seq_idx
== (uint64_t) -1ULL && stream
->chan
->monitor
&&
1848 stream
->chan
->trace_chunk
) {
1849 ret
= consumer_stream_create_output_files(stream
, true);
1855 if (stream
->output
== LTTNG_EVENT_MMAP
) {
1856 /* get the len of the mmap region */
1857 unsigned long mmap_len
;
1859 ret
= kernctl_get_mmap_len(stream
->wait_fd
, &mmap_len
);
1861 PERROR("kernctl_get_mmap_len");
1862 goto error_close_fd
;
1864 stream
->mmap_len
= (size_t) mmap_len
;
1866 stream
->mmap_base
= mmap(NULL
, stream
->mmap_len
, PROT_READ
,
1867 MAP_PRIVATE
, stream
->wait_fd
, 0);
1868 if (stream
->mmap_base
== MAP_FAILED
) {
1869 PERROR("Error mmaping");
1871 goto error_close_fd
;
1875 ret
= lttng_kconsumer_set_stream_ops(stream
);
1877 goto error_close_fd
;
1880 /* we return 0 to let the library handle the FD internally */
1884 if (stream
->out_fd
>= 0) {
1887 err
= close(stream
->out_fd
);
1889 stream
->out_fd
= -1;
1896 * Check if data is still being extracted from the buffers for a specific
1897 * stream. Consumer data lock MUST be acquired before calling this function
1898 * and the stream lock.
1900 * Return 1 if the traced data are still getting read else 0 meaning that the
1901 * data is available for trace viewer reading.
1903 int lttng_kconsumer_data_pending(struct lttng_consumer_stream
*stream
)
1907 LTTNG_ASSERT(stream
);
1909 if (stream
->endpoint_status
!= CONSUMER_ENDPOINT_ACTIVE
) {
1914 ret
= kernctl_get_next_subbuf(stream
->wait_fd
);
1916 /* There is still data so let's put back this subbuffer. */
1917 ret
= kernctl_put_subbuf(stream
->wait_fd
);
1918 LTTNG_ASSERT(ret
== 0);
1919 ret
= 1; /* Data is pending */
1923 /* Data is NOT pending and ready to be read. */