2 * Copyright (C) 2011 EfficiOS Inc.
3 * Copyright (C) 2011 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
4 * Copyright (C) 2017 Jérémie Galarneau <jeremie.galarneau@efficios.com>
6 * SPDX-License-Identifier: GPL-2.0-only
17 #include <sys/socket.h>
18 #include <sys/types.h>
24 #include <bin/lttng-consumerd/health-consumerd.h>
25 #include <common/common.h>
26 #include <common/kernel-ctl/kernel-ctl.h>
27 #include <common/sessiond-comm/sessiond-comm.h>
28 #include <common/sessiond-comm/relayd.h>
29 #include <common/compat/fcntl.h>
30 #include <common/compat/endian.h>
31 #include <common/pipe.h>
32 #include <common/relayd/relayd.h>
33 #include <common/utils.h>
34 #include <common/consumer/consumer-stream.h>
35 #include <common/index/index.h>
36 #include <common/consumer/consumer-timer.h>
37 #include <common/optional.h>
38 #include <common/buffer-view.h>
39 #include <common/consumer/consumer.h>
40 #include <common/consumer/metadata-bucket.h>
42 #include "kernel-consumer.h"
44 extern struct lttng_consumer_global_data the_consumer_data
;
45 extern int consumer_poll_timeout
;
48 * Take a snapshot for a specific fd
50 * Returns 0 on success, < 0 on error
52 int lttng_kconsumer_take_snapshot(struct lttng_consumer_stream
*stream
)
55 int infd
= stream
->wait_fd
;
57 ret
= kernctl_snapshot(infd
);
59 * -EAGAIN is not an error, it just means that there is no data to
62 if (ret
!= 0 && ret
!= -EAGAIN
) {
63 PERROR("Getting sub-buffer snapshot.");
70 * Sample consumed and produced positions for a specific fd.
72 * Returns 0 on success, < 0 on error.
74 int lttng_kconsumer_sample_snapshot_positions(
75 struct lttng_consumer_stream
*stream
)
79 return kernctl_snapshot_sample_positions(stream
->wait_fd
);
83 * Get the produced position
85 * Returns 0 on success, < 0 on error
87 int lttng_kconsumer_get_produced_snapshot(struct lttng_consumer_stream
*stream
,
91 int infd
= stream
->wait_fd
;
93 ret
= kernctl_snapshot_get_produced(infd
, pos
);
95 PERROR("kernctl_snapshot_get_produced");
102 * Get the consumerd position
104 * Returns 0 on success, < 0 on error
106 int lttng_kconsumer_get_consumed_snapshot(struct lttng_consumer_stream
*stream
,
110 int infd
= stream
->wait_fd
;
112 ret
= kernctl_snapshot_get_consumed(infd
, pos
);
114 PERROR("kernctl_snapshot_get_consumed");
121 int get_current_subbuf_addr(struct lttng_consumer_stream
*stream
,
125 unsigned long mmap_offset
;
126 const char *mmap_base
= stream
->mmap_base
;
128 ret
= kernctl_get_mmap_read_offset(stream
->wait_fd
, &mmap_offset
);
130 PERROR("Failed to get mmap read offset");
134 *addr
= mmap_base
+ mmap_offset
;
139 static void finalize_snapshot_stream(
140 struct lttng_consumer_stream
*stream
, uint64_t relayd_id
)
142 ASSERT_LOCKED(stream
->lock
);
144 if (relayd_id
== (uint64_t) -1ULL) {
145 if (stream
->out_fd
>= 0) {
146 const int ret
= close(stream
->out_fd
);
149 PERROR("Failed to close stream snapshot output file descriptor");
155 close_relayd_stream(stream
);
156 stream
->net_seq_idx
= (uint64_t) -1ULL;
159 lttng_trace_chunk_put(stream
->trace_chunk
);
160 stream
->trace_chunk
= NULL
;
164 * Take a snapshot of all the stream of a channel
165 * RCU read-side lock must be held across this function to ensure existence of
168 * Returns 0 on success, < 0 on error
170 static int lttng_kconsumer_snapshot_channel(
171 struct lttng_consumer_channel
*channel
,
172 uint64_t key
, char *path
, uint64_t relayd_id
,
173 uint64_t nb_packets_per_stream
,
174 struct lttng_consumer_local_data
*ctx
)
177 struct lttng_consumer_stream
*stream
;
179 DBG("Kernel consumer snapshot channel %" PRIu64
, key
);
181 /* Prevent channel modifications while we perform the snapshot.*/
182 pthread_mutex_lock(&channel
->lock
);
186 /* Splice is not supported yet for channel snapshot. */
187 if (channel
->output
!= CONSUMER_CHANNEL_MMAP
) {
188 ERR("Unsupported output type for channel \"%s\": mmap output is required to record a snapshot",
194 cds_list_for_each_entry(stream
, &channel
->streams
.head
, send_node
) {
195 unsigned long consumed_pos
, produced_pos
;
197 health_code_update();
200 * Lock stream because we are about to change its state.
202 pthread_mutex_lock(&stream
->lock
);
204 assert(channel
->trace_chunk
);
205 if (!lttng_trace_chunk_get(channel
->trace_chunk
)) {
207 * Can't happen barring an internal error as the channel
208 * holds a reference to the trace chunk.
210 ERR("Failed to acquire reference to channel's trace chunk");
214 assert(!stream
->trace_chunk
);
215 stream
->trace_chunk
= channel
->trace_chunk
;
218 * Assign the received relayd ID so we can use it for streaming. The streams
219 * are not visible to anyone so this is OK to change it.
221 stream
->net_seq_idx
= relayd_id
;
222 channel
->relayd_id
= relayd_id
;
223 if (relayd_id
!= (uint64_t) -1ULL) {
224 ret
= consumer_send_relayd_stream(stream
, path
);
226 ERR("sending stream to relayd");
227 goto error_finalize_stream
;
230 ret
= consumer_stream_create_output_files(stream
,
233 goto error_finalize_stream
;
235 DBG("Kernel consumer snapshot stream (%" PRIu64
")",
239 ret
= kernctl_buffer_flush_empty(stream
->wait_fd
);
242 * Doing a buffer flush which does not take into
243 * account empty packets. This is not perfect
244 * for stream intersection, but required as a
245 * fall-back when "flush_empty" is not
246 * implemented by lttng-modules.
248 ret
= kernctl_buffer_flush(stream
->wait_fd
);
250 ERR("Failed to flush kernel stream");
251 goto error_finalize_stream
;
256 ret
= lttng_kconsumer_take_snapshot(stream
);
258 ERR("Taking kernel snapshot");
259 goto error_finalize_stream
;
262 ret
= lttng_kconsumer_get_produced_snapshot(stream
, &produced_pos
);
264 ERR("Produced kernel snapshot position");
265 goto error_finalize_stream
;
268 ret
= lttng_kconsumer_get_consumed_snapshot(stream
, &consumed_pos
);
270 ERR("Consumerd kernel snapshot position");
271 goto error_finalize_stream
;
274 consumed_pos
= consumer_get_consume_start_pos(consumed_pos
,
275 produced_pos
, nb_packets_per_stream
,
276 stream
->max_sb_size
);
278 while ((long) (consumed_pos
- produced_pos
) < 0) {
280 unsigned long len
, padded_len
;
281 const char *subbuf_addr
;
282 struct lttng_buffer_view subbuf_view
;
284 health_code_update();
285 DBG("Kernel consumer taking snapshot at pos %lu", consumed_pos
);
287 ret
= kernctl_get_subbuf(stream
->wait_fd
, &consumed_pos
);
289 if (ret
!= -EAGAIN
) {
290 PERROR("kernctl_get_subbuf snapshot");
291 goto error_finalize_stream
;
293 DBG("Kernel consumer get subbuf failed. Skipping it.");
294 consumed_pos
+= stream
->max_sb_size
;
295 stream
->chan
->lost_packets
++;
299 ret
= kernctl_get_subbuf_size(stream
->wait_fd
, &len
);
301 ERR("Snapshot kernctl_get_subbuf_size");
302 goto error_put_subbuf
;
305 ret
= kernctl_get_padded_subbuf_size(stream
->wait_fd
, &padded_len
);
307 ERR("Snapshot kernctl_get_padded_subbuf_size");
308 goto error_put_subbuf
;
311 ret
= get_current_subbuf_addr(stream
, &subbuf_addr
);
313 goto error_put_subbuf
;
316 subbuf_view
= lttng_buffer_view_init(
317 subbuf_addr
, 0, padded_len
);
318 read_len
= lttng_consumer_on_read_subbuffer_mmap(
319 stream
, &subbuf_view
,
322 * We write the padded len in local tracefiles but the data len
323 * when using a relay. Display the error but continue processing
324 * to try to release the subbuffer.
326 if (relayd_id
!= (uint64_t) -1ULL) {
327 if (read_len
!= len
) {
328 ERR("Error sending to the relay (ret: %zd != len: %lu)",
332 if (read_len
!= padded_len
) {
333 ERR("Error writing to tracefile (ret: %zd != len: %lu)",
334 read_len
, padded_len
);
338 ret
= kernctl_put_subbuf(stream
->wait_fd
);
340 ERR("Snapshot kernctl_put_subbuf");
341 goto error_finalize_stream
;
343 consumed_pos
+= stream
->max_sb_size
;
346 finalize_snapshot_stream(stream
, relayd_id
);
347 pthread_mutex_unlock(&stream
->lock
);
355 ret
= kernctl_put_subbuf(stream
->wait_fd
);
357 ERR("Snapshot kernctl_put_subbuf error path");
359 error_finalize_stream
:
360 finalize_snapshot_stream(stream
, relayd_id
);
362 pthread_mutex_unlock(&stream
->lock
);
365 pthread_mutex_unlock(&channel
->lock
);
370 * Read the whole metadata available for a snapshot.
371 * RCU read-side lock must be held across this function to ensure existence of
374 * Returns 0 on success, < 0 on error
376 static int lttng_kconsumer_snapshot_metadata(
377 struct lttng_consumer_channel
*metadata_channel
,
378 uint64_t key
, char *path
, uint64_t relayd_id
,
379 struct lttng_consumer_local_data
*ctx
)
381 int ret
, use_relayd
= 0;
383 struct lttng_consumer_stream
*metadata_stream
;
387 DBG("Kernel consumer snapshot metadata with key %" PRIu64
" at path %s",
392 metadata_stream
= metadata_channel
->metadata_stream
;
393 assert(metadata_stream
);
395 metadata_stream
->read_subbuffer_ops
.lock(metadata_stream
);
396 assert(metadata_channel
->trace_chunk
);
397 assert(metadata_stream
->trace_chunk
);
399 /* Flag once that we have a valid relayd for the stream. */
400 if (relayd_id
!= (uint64_t) -1ULL) {
405 ret
= consumer_send_relayd_stream(metadata_stream
, path
);
410 ret
= consumer_stream_create_output_files(metadata_stream
,
418 health_code_update();
420 ret_read
= lttng_consumer_read_subbuffer(metadata_stream
, ctx
, true);
422 ERR("Kernel snapshot reading metadata subbuffer (ret: %zd)",
427 } while (ret_read
> 0);
430 close_relayd_stream(metadata_stream
);
431 metadata_stream
->net_seq_idx
= (uint64_t) -1ULL;
433 if (metadata_stream
->out_fd
>= 0) {
434 ret
= close(metadata_stream
->out_fd
);
436 PERROR("Kernel consumer snapshot metadata close out_fd");
438 * Don't go on error here since the snapshot was successful at this
439 * point but somehow the close failed.
442 metadata_stream
->out_fd
= -1;
443 lttng_trace_chunk_put(metadata_stream
->trace_chunk
);
444 metadata_stream
->trace_chunk
= NULL
;
450 metadata_stream
->read_subbuffer_ops
.unlock(metadata_stream
);
451 consumer_stream_destroy(metadata_stream
, NULL
);
452 metadata_channel
->metadata_stream
= NULL
;
458 * Receive command from session daemon and process it.
460 * Return 1 on success else a negative value or 0.
462 int lttng_kconsumer_recv_cmd(struct lttng_consumer_local_data
*ctx
,
463 int sock
, struct pollfd
*consumer_sockpoll
)
466 enum lttcomm_return_code ret_code
= LTTCOMM_CONSUMERD_SUCCESS
;
467 struct lttcomm_consumer_msg msg
;
469 health_code_update();
474 ret_recv
= lttcomm_recv_unix_sock(sock
, &msg
, sizeof(msg
));
475 if (ret_recv
!= sizeof(msg
)) {
477 lttng_consumer_send_error(ctx
,
478 LTTCOMM_CONSUMERD_ERROR_RECV_CMD
);
485 health_code_update();
487 /* Deprecated command */
488 assert(msg
.cmd_type
!= LTTNG_CONSUMER_STOP
);
490 health_code_update();
492 /* relayd needs RCU read-side protection */
495 switch (msg
.cmd_type
) {
496 case LTTNG_CONSUMER_ADD_RELAYD_SOCKET
:
498 uint32_t major
= msg
.u
.relayd_sock
.major
;
499 uint32_t minor
= msg
.u
.relayd_sock
.minor
;
500 enum lttcomm_sock_proto protocol
= (enum lttcomm_sock_proto
)
501 msg
.u
.relayd_sock
.relayd_socket_protocol
;
503 /* Session daemon status message are handled in the following call. */
504 consumer_add_relayd_socket(msg
.u
.relayd_sock
.net_index
,
505 msg
.u
.relayd_sock
.type
, ctx
, sock
,
506 consumer_sockpoll
, msg
.u
.relayd_sock
.session_id
,
507 msg
.u
.relayd_sock
.relayd_session_id
, major
,
511 case LTTNG_CONSUMER_ADD_CHANNEL
:
513 struct lttng_consumer_channel
*new_channel
;
514 int ret_send_status
, ret_add_channel
= 0;
515 const uint64_t chunk_id
= msg
.u
.channel
.chunk_id
.value
;
517 health_code_update();
519 /* First send a status message before receiving the fds. */
520 ret_send_status
= consumer_send_status_msg(sock
, ret_code
);
521 if (ret_send_status
< 0) {
522 /* Somehow, the session daemon is not responding anymore. */
526 health_code_update();
528 DBG("consumer_add_channel %" PRIu64
, msg
.u
.channel
.channel_key
);
529 new_channel
= consumer_allocate_channel(msg
.u
.channel
.channel_key
,
530 msg
.u
.channel
.session_id
,
531 msg
.u
.channel
.chunk_id
.is_set
?
533 msg
.u
.channel
.pathname
,
535 msg
.u
.channel
.relayd_id
, msg
.u
.channel
.output
,
536 msg
.u
.channel
.tracefile_size
,
537 msg
.u
.channel
.tracefile_count
, 0,
538 msg
.u
.channel
.monitor
,
539 msg
.u
.channel
.live_timer_interval
,
540 msg
.u
.channel
.is_live
,
542 if (new_channel
== NULL
) {
543 lttng_consumer_send_error(ctx
, LTTCOMM_CONSUMERD_OUTFD_ERROR
);
546 new_channel
->nb_init_stream_left
= msg
.u
.channel
.nb_init_streams
;
547 switch (msg
.u
.channel
.output
) {
548 case LTTNG_EVENT_SPLICE
:
549 new_channel
->output
= CONSUMER_CHANNEL_SPLICE
;
551 case LTTNG_EVENT_MMAP
:
552 new_channel
->output
= CONSUMER_CHANNEL_MMAP
;
555 ERR("Channel output unknown %d", msg
.u
.channel
.output
);
559 /* Translate and save channel type. */
560 switch (msg
.u
.channel
.type
) {
561 case CONSUMER_CHANNEL_TYPE_DATA
:
562 case CONSUMER_CHANNEL_TYPE_METADATA
:
563 new_channel
->type
= msg
.u
.channel
.type
;
570 health_code_update();
572 if (ctx
->on_recv_channel
!= NULL
) {
573 int ret_recv_channel
=
574 ctx
->on_recv_channel(new_channel
);
575 if (ret_recv_channel
== 0) {
576 ret_add_channel
= consumer_add_channel(
578 } else if (ret_recv_channel
< 0) {
583 consumer_add_channel(new_channel
, ctx
);
585 if (msg
.u
.channel
.type
== CONSUMER_CHANNEL_TYPE_DATA
&&
587 int monitor_start_ret
;
589 DBG("Consumer starting monitor timer");
590 consumer_timer_live_start(new_channel
,
591 msg
.u
.channel
.live_timer_interval
);
592 monitor_start_ret
= consumer_timer_monitor_start(
594 msg
.u
.channel
.monitor_timer_interval
);
595 if (monitor_start_ret
< 0) {
596 ERR("Starting channel monitoring timer failed");
601 health_code_update();
603 /* If we received an error in add_channel, we need to report it. */
604 if (ret_add_channel
< 0) {
605 ret_send_status
= consumer_send_status_msg(
606 sock
, ret_add_channel
);
607 if (ret_send_status
< 0) {
615 case LTTNG_CONSUMER_ADD_STREAM
:
618 struct lttng_pipe
*stream_pipe
;
619 struct lttng_consumer_stream
*new_stream
;
620 struct lttng_consumer_channel
*channel
;
622 int ret_send_status
, ret_poll
, ret_get_max_subbuf_size
;
623 ssize_t ret_pipe_write
, ret_recv
;
626 * Get stream's channel reference. Needed when adding the stream to the
629 channel
= consumer_find_channel(msg
.u
.stream
.channel_key
);
632 * We could not find the channel. Can happen if cpu hotplug
633 * happens while tearing down.
635 ERR("Unable to find channel key %" PRIu64
, msg
.u
.stream
.channel_key
);
636 ret_code
= LTTCOMM_CONSUMERD_CHAN_NOT_FOUND
;
639 health_code_update();
641 /* First send a status message before receiving the fds. */
642 ret_send_status
= consumer_send_status_msg(sock
, ret_code
);
643 if (ret_send_status
< 0) {
644 /* Somehow, the session daemon is not responding anymore. */
645 goto error_add_stream_fatal
;
648 health_code_update();
650 if (ret_code
!= LTTCOMM_CONSUMERD_SUCCESS
) {
651 /* Channel was not found. */
652 goto error_add_stream_nosignal
;
657 ret_poll
= lttng_consumer_poll_socket(consumer_sockpoll
);
660 goto error_add_stream_fatal
;
663 health_code_update();
665 /* Get stream file descriptor from socket */
666 ret_recv
= lttcomm_recv_fds_unix_sock(sock
, &fd
, 1);
667 if (ret_recv
!= sizeof(fd
)) {
668 lttng_consumer_send_error(ctx
, LTTCOMM_CONSUMERD_ERROR_RECV_FD
);
673 health_code_update();
676 * Send status code to session daemon only if the recv works. If the
677 * above recv() failed, the session daemon is notified through the
678 * error socket and the teardown is eventually done.
680 ret_send_status
= consumer_send_status_msg(sock
, ret_code
);
681 if (ret_send_status
< 0) {
682 /* Somehow, the session daemon is not responding anymore. */
683 goto error_add_stream_nosignal
;
686 health_code_update();
688 pthread_mutex_lock(&channel
->lock
);
689 new_stream
= consumer_stream_create(
696 channel
->trace_chunk
,
701 if (new_stream
== NULL
) {
706 lttng_consumer_send_error(ctx
, LTTCOMM_CONSUMERD_OUTFD_ERROR
);
709 pthread_mutex_unlock(&channel
->lock
);
710 goto error_add_stream_nosignal
;
713 new_stream
->wait_fd
= fd
;
714 ret_get_max_subbuf_size
= kernctl_get_max_subbuf_size(
715 new_stream
->wait_fd
, &new_stream
->max_sb_size
);
716 if (ret_get_max_subbuf_size
< 0) {
717 pthread_mutex_unlock(&channel
->lock
);
718 ERR("Failed to get kernel maximal subbuffer size");
719 goto error_add_stream_nosignal
;
722 consumer_stream_update_channel_attributes(new_stream
,
726 * We've just assigned the channel to the stream so increment the
727 * refcount right now. We don't need to increment the refcount for
728 * streams in no monitor because we handle manually the cleanup of
729 * those. It is very important to make sure there is NO prior
730 * consumer_del_stream() calls or else the refcount will be unbalanced.
732 if (channel
->monitor
) {
733 uatomic_inc(&new_stream
->chan
->refcount
);
737 * The buffer flush is done on the session daemon side for the kernel
738 * so no need for the stream "hangup_flush_done" variable to be
739 * tracked. This is important for a kernel stream since we don't rely
740 * on the flush state of the stream to read data. It's not the case for
741 * user space tracing.
743 new_stream
->hangup_flush_done
= 0;
745 health_code_update();
747 pthread_mutex_lock(&new_stream
->lock
);
748 if (ctx
->on_recv_stream
) {
749 int ret_recv_stream
= ctx
->on_recv_stream(new_stream
);
750 if (ret_recv_stream
< 0) {
751 pthread_mutex_unlock(&new_stream
->lock
);
752 pthread_mutex_unlock(&channel
->lock
);
753 consumer_stream_free(new_stream
);
754 goto error_add_stream_nosignal
;
757 health_code_update();
759 if (new_stream
->metadata_flag
) {
760 channel
->metadata_stream
= new_stream
;
763 /* Do not monitor this stream. */
764 if (!channel
->monitor
) {
765 DBG("Kernel consumer add stream %s in no monitor mode with "
766 "relayd id %" PRIu64
, new_stream
->name
,
767 new_stream
->net_seq_idx
);
768 cds_list_add(&new_stream
->send_node
, &channel
->streams
.head
);
769 pthread_mutex_unlock(&new_stream
->lock
);
770 pthread_mutex_unlock(&channel
->lock
);
774 /* Send stream to relayd if the stream has an ID. */
775 if (new_stream
->net_seq_idx
!= (uint64_t) -1ULL) {
776 int ret_send_relayd_stream
;
778 ret_send_relayd_stream
= consumer_send_relayd_stream(
779 new_stream
, new_stream
->chan
->pathname
);
780 if (ret_send_relayd_stream
< 0) {
781 pthread_mutex_unlock(&new_stream
->lock
);
782 pthread_mutex_unlock(&channel
->lock
);
783 consumer_stream_free(new_stream
);
784 goto error_add_stream_nosignal
;
788 * If adding an extra stream to an already
789 * existing channel (e.g. cpu hotplug), we need
790 * to send the "streams_sent" command to relayd.
792 if (channel
->streams_sent_to_relayd
) {
793 int ret_send_relayd_streams_sent
;
795 ret_send_relayd_streams_sent
=
796 consumer_send_relayd_streams_sent(
797 new_stream
->net_seq_idx
);
798 if (ret_send_relayd_streams_sent
< 0) {
799 pthread_mutex_unlock(&new_stream
->lock
);
800 pthread_mutex_unlock(&channel
->lock
);
801 goto error_add_stream_nosignal
;
805 pthread_mutex_unlock(&new_stream
->lock
);
806 pthread_mutex_unlock(&channel
->lock
);
808 /* Get the right pipe where the stream will be sent. */
809 if (new_stream
->metadata_flag
) {
810 consumer_add_metadata_stream(new_stream
);
811 stream_pipe
= ctx
->consumer_metadata_pipe
;
813 consumer_add_data_stream(new_stream
);
814 stream_pipe
= ctx
->consumer_data_pipe
;
817 /* Visible to other threads */
818 new_stream
->globally_visible
= 1;
820 health_code_update();
822 ret_pipe_write
= lttng_pipe_write(
823 stream_pipe
, &new_stream
, sizeof(new_stream
));
824 if (ret_pipe_write
< 0) {
825 ERR("Consumer write %s stream to pipe %d",
826 new_stream
->metadata_flag
? "metadata" : "data",
827 lttng_pipe_get_writefd(stream_pipe
));
828 if (new_stream
->metadata_flag
) {
829 consumer_del_stream_for_metadata(new_stream
);
831 consumer_del_stream_for_data(new_stream
);
833 goto error_add_stream_nosignal
;
836 DBG("Kernel consumer ADD_STREAM %s (fd: %d) %s with relayd id %" PRIu64
,
837 new_stream
->name
, fd
, new_stream
->chan
->pathname
, new_stream
->relayd_stream_id
);
840 error_add_stream_nosignal
:
842 error_add_stream_fatal
:
845 case LTTNG_CONSUMER_STREAMS_SENT
:
847 struct lttng_consumer_channel
*channel
;
851 * Get stream's channel reference. Needed when adding the stream to the
854 channel
= consumer_find_channel(msg
.u
.sent_streams
.channel_key
);
857 * We could not find the channel. Can happen if cpu hotplug
858 * happens while tearing down.
860 ERR("Unable to find channel key %" PRIu64
,
861 msg
.u
.sent_streams
.channel_key
);
862 ret_code
= LTTCOMM_CONSUMERD_CHAN_NOT_FOUND
;
865 health_code_update();
868 * Send status code to session daemon.
870 ret_send_status
= consumer_send_status_msg(sock
, ret_code
);
871 if (ret_send_status
< 0 ||
872 ret_code
!= LTTCOMM_CONSUMERD_SUCCESS
) {
873 /* Somehow, the session daemon is not responding anymore. */
874 goto error_streams_sent_nosignal
;
877 health_code_update();
880 * We should not send this message if we don't monitor the
881 * streams in this channel.
883 if (!channel
->monitor
) {
884 goto end_error_streams_sent
;
887 health_code_update();
888 /* Send stream to relayd if the stream has an ID. */
889 if (msg
.u
.sent_streams
.net_seq_idx
!= (uint64_t) -1ULL) {
890 int ret_send_relay_streams
;
892 ret_send_relay_streams
= consumer_send_relayd_streams_sent(
893 msg
.u
.sent_streams
.net_seq_idx
);
894 if (ret_send_relay_streams
< 0) {
895 goto error_streams_sent_nosignal
;
897 channel
->streams_sent_to_relayd
= true;
899 end_error_streams_sent
:
901 error_streams_sent_nosignal
:
904 case LTTNG_CONSUMER_UPDATE_STREAM
:
909 case LTTNG_CONSUMER_DESTROY_RELAYD
:
911 uint64_t index
= msg
.u
.destroy_relayd
.net_seq_idx
;
912 struct consumer_relayd_sock_pair
*relayd
;
915 DBG("Kernel consumer destroying relayd %" PRIu64
, index
);
917 /* Get relayd reference if exists. */
918 relayd
= consumer_find_relayd(index
);
919 if (relayd
== NULL
) {
920 DBG("Unable to find relayd %" PRIu64
, index
);
921 ret_code
= LTTCOMM_CONSUMERD_RELAYD_FAIL
;
925 * Each relayd socket pair has a refcount of stream attached to it
926 * which tells if the relayd is still active or not depending on the
929 * This will set the destroy flag of the relayd object and destroy it
930 * if the refcount reaches zero when called.
932 * The destroy can happen either here or when a stream fd hangs up.
935 consumer_flag_relayd_for_destroy(relayd
);
938 health_code_update();
940 ret_send_status
= consumer_send_status_msg(sock
, ret_code
);
941 if (ret_send_status
< 0) {
942 /* Somehow, the session daemon is not responding anymore. */
948 case LTTNG_CONSUMER_DATA_PENDING
:
950 int32_t ret_data_pending
;
951 uint64_t id
= msg
.u
.data_pending
.session_id
;
954 DBG("Kernel consumer data pending command for id %" PRIu64
, id
);
956 ret_data_pending
= consumer_data_pending(id
);
958 health_code_update();
960 /* Send back returned value to session daemon */
961 ret_send
= lttcomm_send_unix_sock(sock
, &ret_data_pending
,
962 sizeof(ret_data_pending
));
964 PERROR("send data pending ret code");
969 * No need to send back a status message since the data pending
970 * returned value is the response.
974 case LTTNG_CONSUMER_SNAPSHOT_CHANNEL
:
976 struct lttng_consumer_channel
*channel
;
977 uint64_t key
= msg
.u
.snapshot_channel
.key
;
980 channel
= consumer_find_channel(key
);
982 ERR("Channel %" PRIu64
" not found", key
);
983 ret_code
= LTTCOMM_CONSUMERD_CHAN_NOT_FOUND
;
985 if (msg
.u
.snapshot_channel
.metadata
== 1) {
988 ret_snapshot
= lttng_kconsumer_snapshot_metadata(
990 msg
.u
.snapshot_channel
.pathname
,
991 msg
.u
.snapshot_channel
.relayd_id
,
993 if (ret_snapshot
< 0) {
994 ERR("Snapshot metadata failed");
995 ret_code
= LTTCOMM_CONSUMERD_SNAPSHOT_FAILED
;
1000 ret_snapshot
= lttng_kconsumer_snapshot_channel(
1002 msg
.u
.snapshot_channel
.pathname
,
1003 msg
.u
.snapshot_channel
.relayd_id
,
1004 msg
.u
.snapshot_channel
1005 .nb_packets_per_stream
,
1007 if (ret_snapshot
< 0) {
1008 ERR("Snapshot channel failed");
1009 ret_code
= LTTCOMM_CONSUMERD_SNAPSHOT_FAILED
;
1013 health_code_update();
1015 ret_send_status
= consumer_send_status_msg(sock
, ret_code
);
1016 if (ret_send_status
< 0) {
1017 /* Somehow, the session daemon is not responding anymore. */
1022 case LTTNG_CONSUMER_DESTROY_CHANNEL
:
1024 uint64_t key
= msg
.u
.destroy_channel
.key
;
1025 struct lttng_consumer_channel
*channel
;
1026 int ret_send_status
;
1028 channel
= consumer_find_channel(key
);
1030 ERR("Kernel consumer destroy channel %" PRIu64
" not found", key
);
1031 ret_code
= LTTCOMM_CONSUMERD_CHAN_NOT_FOUND
;
1034 health_code_update();
1036 ret_send_status
= consumer_send_status_msg(sock
, ret_code
);
1037 if (ret_send_status
< 0) {
1038 /* Somehow, the session daemon is not responding anymore. */
1039 goto end_destroy_channel
;
1042 health_code_update();
1044 /* Stop right now if no channel was found. */
1046 goto end_destroy_channel
;
1050 * This command should ONLY be issued for channel with streams set in
1053 assert(!channel
->monitor
);
1056 * The refcount should ALWAYS be 0 in the case of a channel in no
1059 assert(!uatomic_sub_return(&channel
->refcount
, 1));
1061 consumer_del_channel(channel
);
1062 end_destroy_channel
:
1065 case LTTNG_CONSUMER_DISCARDED_EVENTS
:
1069 struct lttng_consumer_channel
*channel
;
1070 uint64_t id
= msg
.u
.discarded_events
.session_id
;
1071 uint64_t key
= msg
.u
.discarded_events
.channel_key
;
1073 DBG("Kernel consumer discarded events command for session id %"
1074 PRIu64
", channel key %" PRIu64
, id
, key
);
1076 channel
= consumer_find_channel(key
);
1078 ERR("Kernel consumer discarded events channel %"
1079 PRIu64
" not found", key
);
1082 count
= channel
->discarded_events
;
1085 health_code_update();
1087 /* Send back returned value to session daemon */
1088 ret
= lttcomm_send_unix_sock(sock
, &count
, sizeof(count
));
1090 PERROR("send discarded events");
1096 case LTTNG_CONSUMER_LOST_PACKETS
:
1100 struct lttng_consumer_channel
*channel
;
1101 uint64_t id
= msg
.u
.lost_packets
.session_id
;
1102 uint64_t key
= msg
.u
.lost_packets
.channel_key
;
1104 DBG("Kernel consumer lost packets command for session id %"
1105 PRIu64
", channel key %" PRIu64
, id
, key
);
1107 channel
= consumer_find_channel(key
);
1109 ERR("Kernel consumer lost packets channel %"
1110 PRIu64
" not found", key
);
1113 count
= channel
->lost_packets
;
1116 health_code_update();
1118 /* Send back returned value to session daemon */
1119 ret
= lttcomm_send_unix_sock(sock
, &count
, sizeof(count
));
1121 PERROR("send lost packets");
1127 case LTTNG_CONSUMER_SET_CHANNEL_MONITOR_PIPE
:
1129 int channel_monitor_pipe
;
1130 int ret_send_status
, ret_set_channel_monitor_pipe
;
1133 ret_code
= LTTCOMM_CONSUMERD_SUCCESS
;
1134 /* Successfully received the command's type. */
1135 ret_send_status
= consumer_send_status_msg(sock
, ret_code
);
1136 if (ret_send_status
< 0) {
1140 ret_recv
= lttcomm_recv_fds_unix_sock(
1141 sock
, &channel_monitor_pipe
, 1);
1142 if (ret_recv
!= sizeof(channel_monitor_pipe
)) {
1143 ERR("Failed to receive channel monitor pipe");
1147 DBG("Received channel monitor pipe (%d)", channel_monitor_pipe
);
1148 ret_set_channel_monitor_pipe
=
1149 consumer_timer_thread_set_channel_monitor_pipe(
1150 channel_monitor_pipe
);
1151 if (!ret_set_channel_monitor_pipe
) {
1155 ret_code
= LTTCOMM_CONSUMERD_SUCCESS
;
1156 /* Set the pipe as non-blocking. */
1157 ret_fcntl
= fcntl(channel_monitor_pipe
, F_GETFL
, 0);
1158 if (ret_fcntl
== -1) {
1159 PERROR("fcntl get flags of the channel monitoring pipe");
1164 ret_fcntl
= fcntl(channel_monitor_pipe
, F_SETFL
,
1165 flags
| O_NONBLOCK
);
1166 if (ret_fcntl
== -1) {
1167 PERROR("fcntl set O_NONBLOCK flag of the channel monitoring pipe");
1170 DBG("Channel monitor pipe set as non-blocking");
1172 ret_code
= LTTCOMM_CONSUMERD_ALREADY_SET
;
1174 ret_send_status
= consumer_send_status_msg(sock
, ret_code
);
1175 if (ret_send_status
< 0) {
1180 case LTTNG_CONSUMER_ROTATE_CHANNEL
:
1182 struct lttng_consumer_channel
*channel
;
1183 uint64_t key
= msg
.u
.rotate_channel
.key
;
1184 int ret_send_status
;
1186 DBG("Consumer rotate channel %" PRIu64
, key
);
1188 channel
= consumer_find_channel(key
);
1190 ERR("Channel %" PRIu64
" not found", key
);
1191 ret_code
= LTTCOMM_CONSUMERD_CHAN_NOT_FOUND
;
1194 * Sample the rotate position of all the streams in this channel.
1196 int ret_rotate_channel
;
1198 ret_rotate_channel
= lttng_consumer_rotate_channel(
1200 msg
.u
.rotate_channel
.relayd_id
,
1201 msg
.u
.rotate_channel
.metadata
, ctx
);
1202 if (ret_rotate_channel
< 0) {
1203 ERR("Rotate channel failed");
1204 ret_code
= LTTCOMM_CONSUMERD_ROTATION_FAIL
;
1207 health_code_update();
1210 ret_send_status
= consumer_send_status_msg(sock
, ret_code
);
1211 if (ret_send_status
< 0) {
1212 /* Somehow, the session daemon is not responding anymore. */
1213 goto error_rotate_channel
;
1216 /* Rotate the streams that are ready right now. */
1219 ret_rotate
= lttng_consumer_rotate_ready_streams(
1221 if (ret_rotate
< 0) {
1222 ERR("Rotate ready streams failed");
1226 error_rotate_channel
:
1229 case LTTNG_CONSUMER_CLEAR_CHANNEL
:
1231 struct lttng_consumer_channel
*channel
;
1232 uint64_t key
= msg
.u
.clear_channel
.key
;
1233 int ret_send_status
;
1235 channel
= consumer_find_channel(key
);
1237 DBG("Channel %" PRIu64
" not found", key
);
1238 ret_code
= LTTCOMM_CONSUMERD_CHAN_NOT_FOUND
;
1240 int ret_clear_channel
;
1243 lttng_consumer_clear_channel(channel
);
1244 if (ret_clear_channel
) {
1245 ERR("Clear channel failed");
1246 ret_code
= ret_clear_channel
;
1249 health_code_update();
1252 ret_send_status
= consumer_send_status_msg(sock
, ret_code
);
1253 if (ret_send_status
< 0) {
1254 /* Somehow, the session daemon is not responding anymore. */
1260 case LTTNG_CONSUMER_INIT
:
1262 int ret_send_status
;
1264 ret_code
= lttng_consumer_init_command(ctx
,
1265 msg
.u
.init
.sessiond_uuid
);
1266 health_code_update();
1267 ret_send_status
= consumer_send_status_msg(sock
, ret_code
);
1268 if (ret_send_status
< 0) {
1269 /* Somehow, the session daemon is not responding anymore. */
1274 case LTTNG_CONSUMER_CREATE_TRACE_CHUNK
:
1276 const struct lttng_credentials credentials
= {
1277 .uid
= LTTNG_OPTIONAL_INIT_VALUE(msg
.u
.create_trace_chunk
.credentials
.value
.uid
),
1278 .gid
= LTTNG_OPTIONAL_INIT_VALUE(msg
.u
.create_trace_chunk
.credentials
.value
.gid
),
1280 const bool is_local_trace
=
1281 !msg
.u
.create_trace_chunk
.relayd_id
.is_set
;
1282 const uint64_t relayd_id
=
1283 msg
.u
.create_trace_chunk
.relayd_id
.value
;
1284 const char *chunk_override_name
=
1285 *msg
.u
.create_trace_chunk
.override_name
?
1286 msg
.u
.create_trace_chunk
.override_name
:
1288 struct lttng_directory_handle
*chunk_directory_handle
= NULL
;
1291 * The session daemon will only provide a chunk directory file
1292 * descriptor for local traces.
1294 if (is_local_trace
) {
1296 int ret_send_status
;
1299 /* Acnowledge the reception of the command. */
1300 ret_send_status
= consumer_send_status_msg(
1301 sock
, LTTCOMM_CONSUMERD_SUCCESS
);
1302 if (ret_send_status
< 0) {
1303 /* Somehow, the session daemon is not responding anymore. */
1307 ret_recv
= lttcomm_recv_fds_unix_sock(
1308 sock
, &chunk_dirfd
, 1);
1309 if (ret_recv
!= sizeof(chunk_dirfd
)) {
1310 ERR("Failed to receive trace chunk directory file descriptor");
1314 DBG("Received trace chunk directory fd (%d)",
1316 chunk_directory_handle
= lttng_directory_handle_create_from_dirfd(
1318 if (!chunk_directory_handle
) {
1319 ERR("Failed to initialize chunk directory handle from directory file descriptor");
1320 if (close(chunk_dirfd
)) {
1321 PERROR("Failed to close chunk directory file descriptor");
1327 ret_code
= lttng_consumer_create_trace_chunk(
1328 !is_local_trace
? &relayd_id
: NULL
,
1329 msg
.u
.create_trace_chunk
.session_id
,
1330 msg
.u
.create_trace_chunk
.chunk_id
,
1331 (time_t) msg
.u
.create_trace_chunk
1332 .creation_timestamp
,
1333 chunk_override_name
,
1334 msg
.u
.create_trace_chunk
.credentials
.is_set
?
1337 chunk_directory_handle
);
1338 lttng_directory_handle_put(chunk_directory_handle
);
1339 goto end_msg_sessiond
;
1341 case LTTNG_CONSUMER_CLOSE_TRACE_CHUNK
:
1343 enum lttng_trace_chunk_command_type close_command
=
1344 msg
.u
.close_trace_chunk
.close_command
.value
;
1345 const uint64_t relayd_id
=
1346 msg
.u
.close_trace_chunk
.relayd_id
.value
;
1347 struct lttcomm_consumer_close_trace_chunk_reply reply
;
1348 char path
[LTTNG_PATH_MAX
];
1351 ret_code
= lttng_consumer_close_trace_chunk(
1352 msg
.u
.close_trace_chunk
.relayd_id
.is_set
?
1355 msg
.u
.close_trace_chunk
.session_id
,
1356 msg
.u
.close_trace_chunk
.chunk_id
,
1357 (time_t) msg
.u
.close_trace_chunk
.close_timestamp
,
1358 msg
.u
.close_trace_chunk
.close_command
.is_set
?
1361 reply
.ret_code
= ret_code
;
1362 reply
.path_length
= strlen(path
) + 1;
1363 ret_send
= lttcomm_send_unix_sock(sock
, &reply
, sizeof(reply
));
1364 if (ret_send
!= sizeof(reply
)) {
1367 ret_send
= lttcomm_send_unix_sock(
1368 sock
, path
, reply
.path_length
);
1369 if (ret_send
!= reply
.path_length
) {
1374 case LTTNG_CONSUMER_TRACE_CHUNK_EXISTS
:
1376 const uint64_t relayd_id
=
1377 msg
.u
.trace_chunk_exists
.relayd_id
.value
;
1379 ret_code
= lttng_consumer_trace_chunk_exists(
1380 msg
.u
.trace_chunk_exists
.relayd_id
.is_set
?
1382 msg
.u
.trace_chunk_exists
.session_id
,
1383 msg
.u
.trace_chunk_exists
.chunk_id
);
1384 goto end_msg_sessiond
;
1386 case LTTNG_CONSUMER_OPEN_CHANNEL_PACKETS
:
1388 const uint64_t key
= msg
.u
.open_channel_packets
.key
;
1389 struct lttng_consumer_channel
*channel
=
1390 consumer_find_channel(key
);
1393 pthread_mutex_lock(&channel
->lock
);
1394 ret_code
= lttng_consumer_open_channel_packets(channel
);
1395 pthread_mutex_unlock(&channel
->lock
);
1397 WARN("Channel %" PRIu64
" not found", key
);
1398 ret_code
= LTTCOMM_CONSUMERD_CHAN_NOT_FOUND
;
1401 health_code_update();
1402 goto end_msg_sessiond
;
1410 * Return 1 to indicate success since the 0 value can be a socket
1411 * shutdown during the recv() or send() call.
1416 /* This will issue a consumer stop. */
1421 * The returned value here is not useful since either way we'll return 1 to
1422 * the caller because the session daemon socket management is done
1423 * elsewhere. Returning a negative code or 0 will shutdown the consumer.
1426 int ret_send_status
;
1428 ret_send_status
= consumer_send_status_msg(sock
, ret_code
);
1429 if (ret_send_status
< 0) {
1437 health_code_update();
1443 * Sync metadata meaning request them to the session daemon and snapshot to the
1444 * metadata thread can consumer them.
1446 * Metadata stream lock MUST be acquired.
1448 enum sync_metadata_status
lttng_kconsumer_sync_metadata(
1449 struct lttng_consumer_stream
*metadata
)
1452 enum sync_metadata_status status
;
1456 ret
= kernctl_buffer_flush(metadata
->wait_fd
);
1458 ERR("Failed to flush kernel stream");
1459 status
= SYNC_METADATA_STATUS_ERROR
;
1463 ret
= kernctl_snapshot(metadata
->wait_fd
);
1465 if (errno
== EAGAIN
) {
1466 /* No new metadata, exit. */
1467 DBG("Sync metadata, no new kernel metadata");
1468 status
= SYNC_METADATA_STATUS_NO_DATA
;
1470 ERR("Sync metadata, taking kernel snapshot failed.");
1471 status
= SYNC_METADATA_STATUS_ERROR
;
1474 status
= SYNC_METADATA_STATUS_NEW_DATA
;
1482 int extract_common_subbuffer_info(struct lttng_consumer_stream
*stream
,
1483 struct stream_subbuffer
*subbuf
)
1487 ret
= kernctl_get_subbuf_size(
1488 stream
->wait_fd
, &subbuf
->info
.data
.subbuf_size
);
1493 ret
= kernctl_get_padded_subbuf_size(
1494 stream
->wait_fd
, &subbuf
->info
.data
.padded_subbuf_size
);
1504 int extract_metadata_subbuffer_info(struct lttng_consumer_stream
*stream
,
1505 struct stream_subbuffer
*subbuf
)
1509 ret
= extract_common_subbuffer_info(stream
, subbuf
);
1514 ret
= kernctl_get_metadata_version(
1515 stream
->wait_fd
, &subbuf
->info
.metadata
.version
);
1525 int extract_data_subbuffer_info(struct lttng_consumer_stream
*stream
,
1526 struct stream_subbuffer
*subbuf
)
1530 ret
= extract_common_subbuffer_info(stream
, subbuf
);
1535 ret
= kernctl_get_packet_size(
1536 stream
->wait_fd
, &subbuf
->info
.data
.packet_size
);
1538 PERROR("Failed to get sub-buffer packet size");
1542 ret
= kernctl_get_content_size(
1543 stream
->wait_fd
, &subbuf
->info
.data
.content_size
);
1545 PERROR("Failed to get sub-buffer content size");
1549 ret
= kernctl_get_timestamp_begin(
1550 stream
->wait_fd
, &subbuf
->info
.data
.timestamp_begin
);
1552 PERROR("Failed to get sub-buffer begin timestamp");
1556 ret
= kernctl_get_timestamp_end(
1557 stream
->wait_fd
, &subbuf
->info
.data
.timestamp_end
);
1559 PERROR("Failed to get sub-buffer end timestamp");
1563 ret
= kernctl_get_events_discarded(
1564 stream
->wait_fd
, &subbuf
->info
.data
.events_discarded
);
1566 PERROR("Failed to get sub-buffer events discarded count");
1570 ret
= kernctl_get_sequence_number(stream
->wait_fd
,
1571 &subbuf
->info
.data
.sequence_number
.value
);
1573 /* May not be supported by older LTTng-modules. */
1574 if (ret
!= -ENOTTY
) {
1575 PERROR("Failed to get sub-buffer sequence number");
1579 subbuf
->info
.data
.sequence_number
.is_set
= true;
1582 ret
= kernctl_get_stream_id(
1583 stream
->wait_fd
, &subbuf
->info
.data
.stream_id
);
1585 PERROR("Failed to get stream id");
1589 ret
= kernctl_get_instance_id(stream
->wait_fd
,
1590 &subbuf
->info
.data
.stream_instance_id
.value
);
1592 /* May not be supported by older LTTng-modules. */
1593 if (ret
!= -ENOTTY
) {
1594 PERROR("Failed to get stream instance id");
1598 subbuf
->info
.data
.stream_instance_id
.is_set
= true;
1605 enum get_next_subbuffer_status
get_subbuffer_common(
1606 struct lttng_consumer_stream
*stream
,
1607 struct stream_subbuffer
*subbuffer
)
1610 enum get_next_subbuffer_status status
;
1612 ret
= kernctl_get_next_subbuf(stream
->wait_fd
);
1615 status
= GET_NEXT_SUBBUFFER_STATUS_OK
;
1620 * The caller only expects -ENODATA when there is no data to
1621 * read, but the kernel tracer returns -EAGAIN when there is
1622 * currently no data for a non-finalized stream, and -ENODATA
1623 * when there is no data for a finalized stream. Those can be
1624 * combined into a -ENODATA return value.
1626 status
= GET_NEXT_SUBBUFFER_STATUS_NO_DATA
;
1629 status
= GET_NEXT_SUBBUFFER_STATUS_ERROR
;
1633 ret
= stream
->read_subbuffer_ops
.extract_subbuffer_info(
1636 status
= GET_NEXT_SUBBUFFER_STATUS_ERROR
;
1643 enum get_next_subbuffer_status
get_next_subbuffer_splice(
1644 struct lttng_consumer_stream
*stream
,
1645 struct stream_subbuffer
*subbuffer
)
1647 const enum get_next_subbuffer_status status
=
1648 get_subbuffer_common(stream
, subbuffer
);
1650 if (status
!= GET_NEXT_SUBBUFFER_STATUS_OK
) {
1654 subbuffer
->buffer
.fd
= stream
->wait_fd
;
1660 enum get_next_subbuffer_status
get_next_subbuffer_mmap(
1661 struct lttng_consumer_stream
*stream
,
1662 struct stream_subbuffer
*subbuffer
)
1665 enum get_next_subbuffer_status status
;
1668 status
= get_subbuffer_common(stream
, subbuffer
);
1669 if (status
!= GET_NEXT_SUBBUFFER_STATUS_OK
) {
1673 ret
= get_current_subbuf_addr(stream
, &addr
);
1675 status
= GET_NEXT_SUBBUFFER_STATUS_ERROR
;
1679 subbuffer
->buffer
.buffer
= lttng_buffer_view_init(
1680 addr
, 0, subbuffer
->info
.data
.padded_subbuf_size
);
1686 enum get_next_subbuffer_status
get_next_subbuffer_metadata_check(struct lttng_consumer_stream
*stream
,
1687 struct stream_subbuffer
*subbuffer
)
1692 enum get_next_subbuffer_status status
;
1694 ret
= kernctl_get_next_subbuf_metadata_check(stream
->wait_fd
,
1700 ret
= stream
->read_subbuffer_ops
.extract_subbuffer_info(
1706 LTTNG_OPTIONAL_SET(&subbuffer
->info
.metadata
.coherent
, coherent
);
1708 ret
= get_current_subbuf_addr(stream
, &addr
);
1713 subbuffer
->buffer
.buffer
= lttng_buffer_view_init(
1714 addr
, 0, subbuffer
->info
.data
.padded_subbuf_size
);
1715 DBG("Got metadata packet with padded_subbuf_size = %lu, coherent = %s",
1716 subbuffer
->info
.metadata
.padded_subbuf_size
,
1717 coherent
? "true" : "false");
1720 * The caller only expects -ENODATA when there is no data to read, but
1721 * the kernel tracer returns -EAGAIN when there is currently no data
1722 * for a non-finalized stream, and -ENODATA when there is no data for a
1723 * finalized stream. Those can be combined into a -ENODATA return value.
1727 status
= GET_NEXT_SUBBUFFER_STATUS_OK
;
1732 * The caller only expects -ENODATA when there is no data to
1733 * read, but the kernel tracer returns -EAGAIN when there is
1734 * currently no data for a non-finalized stream, and -ENODATA
1735 * when there is no data for a finalized stream. Those can be
1736 * combined into a -ENODATA return value.
1738 status
= GET_NEXT_SUBBUFFER_STATUS_NO_DATA
;
1741 status
= GET_NEXT_SUBBUFFER_STATUS_ERROR
;
1749 int put_next_subbuffer(struct lttng_consumer_stream
*stream
,
1750 struct stream_subbuffer
*subbuffer
)
1752 const int ret
= kernctl_put_next_subbuf(stream
->wait_fd
);
1755 if (ret
== -EFAULT
) {
1756 PERROR("Error in unreserving sub buffer");
1757 } else if (ret
== -EIO
) {
1758 /* Should never happen with newer LTTng versions */
1759 PERROR("Reader has been pushed by the writer, last sub-buffer corrupted");
1767 bool is_get_next_check_metadata_available(int tracer_fd
)
1769 const int ret
= kernctl_get_next_subbuf_metadata_check(tracer_fd
, NULL
);
1770 const bool available
= ret
!= -ENOTTY
;
1773 /* get succeeded, make sure to put the subbuffer. */
1774 kernctl_put_subbuf(tracer_fd
);
1781 int signal_metadata(struct lttng_consumer_stream
*stream
,
1782 struct lttng_consumer_local_data
*ctx
)
1784 ASSERT_LOCKED(stream
->metadata_rdv_lock
);
1785 return pthread_cond_broadcast(&stream
->metadata_rdv
) ? -errno
: 0;
1789 int lttng_kconsumer_set_stream_ops(
1790 struct lttng_consumer_stream
*stream
)
1794 if (stream
->metadata_flag
&& stream
->chan
->is_live
) {
1795 DBG("Attempting to enable metadata bucketization for live consumers");
1796 if (is_get_next_check_metadata_available(stream
->wait_fd
)) {
1797 DBG("Kernel tracer supports get_next_subbuffer_metadata_check, metadata will be accumulated until a coherent state is reached");
1798 stream
->read_subbuffer_ops
.get_next_subbuffer
=
1799 get_next_subbuffer_metadata_check
;
1800 ret
= consumer_stream_enable_metadata_bucketization(
1807 * The kernel tracer version is too old to indicate
1808 * when the metadata stream has reached a "coherent"
1809 * (parseable) point.
1811 * This means that a live viewer may see an incoherent
1812 * sequence of metadata and fail to parse it.
1814 WARN("Kernel tracer does not support get_next_subbuffer_metadata_check which may cause live clients to fail to parse the metadata stream");
1815 metadata_bucket_destroy(stream
->metadata_bucket
);
1816 stream
->metadata_bucket
= NULL
;
1819 stream
->read_subbuffer_ops
.on_sleep
= signal_metadata
;
1822 if (!stream
->read_subbuffer_ops
.get_next_subbuffer
) {
1823 if (stream
->chan
->output
== CONSUMER_CHANNEL_MMAP
) {
1824 stream
->read_subbuffer_ops
.get_next_subbuffer
=
1825 get_next_subbuffer_mmap
;
1827 stream
->read_subbuffer_ops
.get_next_subbuffer
=
1828 get_next_subbuffer_splice
;
1832 if (stream
->metadata_flag
) {
1833 stream
->read_subbuffer_ops
.extract_subbuffer_info
=
1834 extract_metadata_subbuffer_info
;
1836 stream
->read_subbuffer_ops
.extract_subbuffer_info
=
1837 extract_data_subbuffer_info
;
1838 if (stream
->chan
->is_live
) {
1839 stream
->read_subbuffer_ops
.send_live_beacon
=
1840 consumer_flush_kernel_index
;
1844 stream
->read_subbuffer_ops
.put_next_subbuffer
= put_next_subbuffer
;
1849 int lttng_kconsumer_on_recv_stream(struct lttng_consumer_stream
*stream
)
1856 * Don't create anything if this is set for streaming or if there is
1857 * no current trace chunk on the parent channel.
1859 if (stream
->net_seq_idx
== (uint64_t) -1ULL && stream
->chan
->monitor
&&
1860 stream
->chan
->trace_chunk
) {
1861 ret
= consumer_stream_create_output_files(stream
, true);
1867 if (stream
->output
== LTTNG_EVENT_MMAP
) {
1868 /* get the len of the mmap region */
1869 unsigned long mmap_len
;
1871 ret
= kernctl_get_mmap_len(stream
->wait_fd
, &mmap_len
);
1873 PERROR("kernctl_get_mmap_len");
1874 goto error_close_fd
;
1876 stream
->mmap_len
= (size_t) mmap_len
;
1878 stream
->mmap_base
= mmap(NULL
, stream
->mmap_len
, PROT_READ
,
1879 MAP_PRIVATE
, stream
->wait_fd
, 0);
1880 if (stream
->mmap_base
== MAP_FAILED
) {
1881 PERROR("Error mmaping");
1883 goto error_close_fd
;
1887 ret
= lttng_kconsumer_set_stream_ops(stream
);
1889 goto error_close_fd
;
1892 /* we return 0 to let the library handle the FD internally */
1896 if (stream
->out_fd
>= 0) {
1899 err
= close(stream
->out_fd
);
1901 stream
->out_fd
= -1;
1908 * Check if data is still being extracted from the buffers for a specific
1909 * stream. Consumer data lock MUST be acquired before calling this function
1910 * and the stream lock.
1912 * Return 1 if the traced data are still getting read else 0 meaning that the
1913 * data is available for trace viewer reading.
1915 int lttng_kconsumer_data_pending(struct lttng_consumer_stream
*stream
)
1921 if (stream
->endpoint_status
!= CONSUMER_ENDPOINT_ACTIVE
) {
1926 ret
= kernctl_get_next_subbuf(stream
->wait_fd
);
1928 /* There is still data so let's put back this subbuffer. */
1929 ret
= kernctl_put_subbuf(stream
->wait_fd
);
1931 ret
= 1; /* Data is pending */
1935 /* Data is NOT pending and ready to be read. */