2 * Copyright (C) 2011 Julien Desfossez <julien.desfossez@polymtl.ca>
3 * Copyright (C) 2011 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
4 * Copyright (C) 2017 Jérémie Galarneau <jeremie.galarneau@efficios.com>
6 * SPDX-License-Identifier: GPL-2.0-only
12 #include <lttng/ust-ctl.h>
18 #include <sys/socket.h>
20 #include <sys/types.h>
23 #include <urcu/list.h>
28 #include <bin/lttng-consumerd/health-consumerd.h>
29 #include <common/common.h>
30 #include <common/sessiond-comm/sessiond-comm.h>
31 #include <common/relayd/relayd.h>
32 #include <common/compat/fcntl.h>
33 #include <common/compat/endian.h>
34 #include <common/consumer/consumer-metadata-cache.h>
35 #include <common/consumer/consumer-stream.h>
36 #include <common/consumer/consumer-timer.h>
37 #include <common/utils.h>
38 #include <common/index/index.h>
39 #include <common/consumer/consumer.h>
40 #include <common/shm.h>
41 #include <common/optional.h>
43 #include "ust-consumer.h"
45 #define INT_MAX_STR_LEN 12 /* includes \0 */
47 extern struct lttng_consumer_global_data the_consumer_data
;
48 extern int consumer_poll_timeout
;
51 * Free channel object and all streams associated with it. This MUST be used
52 * only and only if the channel has _NEVER_ been added to the global channel
55 static void destroy_channel(struct lttng_consumer_channel
*channel
)
57 struct lttng_consumer_stream
*stream
, *stmp
;
61 DBG("UST consumer cleaning stream list");
63 cds_list_for_each_entry_safe(stream
, stmp
, &channel
->streams
.head
,
68 cds_list_del(&stream
->send_node
);
69 lttng_ust_ctl_destroy_stream(stream
->ustream
);
70 lttng_trace_chunk_put(stream
->trace_chunk
);
75 * If a channel is available meaning that was created before the streams
79 lttng_ustconsumer_del_channel(channel
);
80 lttng_ustconsumer_free_channel(channel
);
83 if (channel
->trace_chunk
) {
84 lttng_trace_chunk_put(channel
->trace_chunk
);
91 * Add channel to internal consumer state.
93 * Returns 0 on success or else a negative value.
95 static int add_channel(struct lttng_consumer_channel
*channel
,
96 struct lttng_consumer_local_data
*ctx
)
103 if (ctx
->on_recv_channel
!= NULL
) {
104 ret
= ctx
->on_recv_channel(channel
);
106 ret
= consumer_add_channel(channel
, ctx
);
107 } else if (ret
< 0) {
108 /* Most likely an ENOMEM. */
109 lttng_consumer_send_error(ctx
, LTTCOMM_CONSUMERD_OUTFD_ERROR
);
113 ret
= consumer_add_channel(channel
, ctx
);
116 DBG("UST consumer channel added (key: %" PRIu64
")", channel
->key
);
123 * Allocate and return a consumer stream object. If _alloc_ret is not NULL, the
124 * error value if applicable is set in it else it is kept untouched.
126 * Return NULL on error else the newly allocated stream object.
128 static struct lttng_consumer_stream
*allocate_stream(int cpu
, int key
,
129 struct lttng_consumer_channel
*channel
,
130 struct lttng_consumer_local_data
*ctx
, int *_alloc_ret
)
133 struct lttng_consumer_stream
*stream
= NULL
;
138 stream
= consumer_stream_create(
145 channel
->trace_chunk
,
150 if (stream
== NULL
) {
154 * We could not find the channel. Can happen if cpu hotplug
155 * happens while tearing down.
157 DBG3("Could not find channel");
162 lttng_consumer_send_error(ctx
, LTTCOMM_CONSUMERD_OUTFD_ERROR
);
168 consumer_stream_update_channel_attributes(stream
, channel
);
172 *_alloc_ret
= alloc_ret
;
178 * Send the given stream pointer to the corresponding thread.
180 * Returns 0 on success else a negative value.
182 static int send_stream_to_thread(struct lttng_consumer_stream
*stream
,
183 struct lttng_consumer_local_data
*ctx
)
186 struct lttng_pipe
*stream_pipe
;
188 /* Get the right pipe where the stream will be sent. */
189 if (stream
->metadata_flag
) {
190 consumer_add_metadata_stream(stream
);
191 stream_pipe
= ctx
->consumer_metadata_pipe
;
193 consumer_add_data_stream(stream
);
194 stream_pipe
= ctx
->consumer_data_pipe
;
198 * From this point on, the stream's ownership has been moved away from
199 * the channel and it becomes globally visible. Hence, remove it from
200 * the local stream list to prevent the stream from being both local and
203 stream
->globally_visible
= 1;
204 cds_list_del(&stream
->send_node
);
206 ret
= lttng_pipe_write(stream_pipe
, &stream
, sizeof(stream
));
208 ERR("Consumer write %s stream to pipe %d",
209 stream
->metadata_flag
? "metadata" : "data",
210 lttng_pipe_get_writefd(stream_pipe
));
211 if (stream
->metadata_flag
) {
212 consumer_del_stream_for_metadata(stream
);
214 consumer_del_stream_for_data(stream
);
224 int get_stream_shm_path(char *stream_shm_path
, const char *shm_path
, int cpu
)
226 char cpu_nr
[INT_MAX_STR_LEN
]; /* int max len */
229 strncpy(stream_shm_path
, shm_path
, PATH_MAX
);
230 stream_shm_path
[PATH_MAX
- 1] = '\0';
231 ret
= snprintf(cpu_nr
, INT_MAX_STR_LEN
, "%i", cpu
);
236 strncat(stream_shm_path
, cpu_nr
,
237 PATH_MAX
- strlen(stream_shm_path
) - 1);
244 * Create streams for the given channel using liblttng-ust-ctl.
245 * The channel lock must be acquired by the caller.
247 * Return 0 on success else a negative value.
249 static int create_ust_streams(struct lttng_consumer_channel
*channel
,
250 struct lttng_consumer_local_data
*ctx
)
253 struct lttng_ust_ctl_consumer_stream
*ustream
;
254 struct lttng_consumer_stream
*stream
;
255 pthread_mutex_t
*current_stream_lock
= NULL
;
261 * While a stream is available from ustctl. When NULL is returned, we've
262 * reached the end of the possible stream for the channel.
264 while ((ustream
= lttng_ust_ctl_create_stream(channel
->uchan
, cpu
))) {
266 int ust_metadata_pipe
[2];
268 health_code_update();
270 if (channel
->type
== CONSUMER_CHANNEL_TYPE_METADATA
&& channel
->monitor
) {
271 ret
= utils_create_pipe_cloexec_nonblock(ust_metadata_pipe
);
273 ERR("Create ust metadata poll pipe");
276 wait_fd
= ust_metadata_pipe
[0];
278 wait_fd
= lttng_ust_ctl_stream_get_wait_fd(ustream
);
281 /* Allocate consumer stream object. */
282 stream
= allocate_stream(cpu
, wait_fd
, channel
, ctx
, &ret
);
286 stream
->ustream
= ustream
;
288 * Store it so we can save multiple function calls afterwards since
289 * this value is used heavily in the stream threads. This is UST
290 * specific so this is why it's done after allocation.
292 stream
->wait_fd
= wait_fd
;
295 * Increment channel refcount since the channel reference has now been
296 * assigned in the allocation process above.
298 if (stream
->chan
->monitor
) {
299 uatomic_inc(&stream
->chan
->refcount
);
302 pthread_mutex_lock(&stream
->lock
);
303 current_stream_lock
= &stream
->lock
;
305 * Order is important this is why a list is used. On error, the caller
306 * should clean this list.
308 cds_list_add_tail(&stream
->send_node
, &channel
->streams
.head
);
310 ret
= lttng_ust_ctl_get_max_subbuf_size(stream
->ustream
,
311 &stream
->max_sb_size
);
313 ERR("lttng_ust_ctl_get_max_subbuf_size failed for stream %s",
318 /* Do actions once stream has been received. */
319 if (ctx
->on_recv_stream
) {
320 ret
= ctx
->on_recv_stream(stream
);
326 DBG("UST consumer add stream %s (key: %" PRIu64
") with relayd id %" PRIu64
,
327 stream
->name
, stream
->key
, stream
->relayd_stream_id
);
329 /* Set next CPU stream. */
330 channel
->streams
.count
= ++cpu
;
332 /* Keep stream reference when creating metadata. */
333 if (channel
->type
== CONSUMER_CHANNEL_TYPE_METADATA
) {
334 channel
->metadata_stream
= stream
;
335 if (channel
->monitor
) {
336 /* Set metadata poll pipe if we created one */
337 memcpy(stream
->ust_metadata_poll_pipe
,
339 sizeof(ust_metadata_pipe
));
342 pthread_mutex_unlock(&stream
->lock
);
343 current_stream_lock
= NULL
;
350 if (current_stream_lock
) {
351 pthread_mutex_unlock(current_stream_lock
);
356 static int open_ust_stream_fd(struct lttng_consumer_channel
*channel
, int cpu
,
357 const struct lttng_credentials
*session_credentials
)
359 char shm_path
[PATH_MAX
];
362 if (!channel
->shm_path
[0]) {
363 return shm_create_anonymous("ust-consumer");
365 ret
= get_stream_shm_path(shm_path
, channel
->shm_path
, cpu
);
369 return run_as_open(shm_path
,
370 O_RDWR
| O_CREAT
| O_EXCL
, S_IRUSR
| S_IWUSR
,
371 lttng_credentials_get_uid(session_credentials
),
372 lttng_credentials_get_gid(session_credentials
));
379 * Create an UST channel with the given attributes and send it to the session
380 * daemon using the ust ctl API.
382 * Return 0 on success or else a negative value.
384 static int create_ust_channel(struct lttng_consumer_channel
*channel
,
385 struct lttng_ust_ctl_consumer_channel_attr
*attr
,
386 struct lttng_ust_ctl_consumer_channel
**ust_chanp
)
388 int ret
, nr_stream_fds
, i
, j
;
390 struct lttng_ust_ctl_consumer_channel
*ust_channel
;
395 assert(channel
->buffer_credentials
.is_set
);
397 DBG3("Creating channel to ustctl with attr: [overwrite: %d, "
398 "subbuf_size: %" PRIu64
", num_subbuf: %" PRIu64
", "
399 "switch_timer_interval: %u, read_timer_interval: %u, "
400 "output: %d, type: %d", attr
->overwrite
, attr
->subbuf_size
,
401 attr
->num_subbuf
, attr
->switch_timer_interval
,
402 attr
->read_timer_interval
, attr
->output
, attr
->type
);
404 if (channel
->type
== CONSUMER_CHANNEL_TYPE_METADATA
)
407 nr_stream_fds
= lttng_ust_ctl_get_nr_stream_per_channel();
408 stream_fds
= zmalloc(nr_stream_fds
* sizeof(*stream_fds
));
413 for (i
= 0; i
< nr_stream_fds
; i
++) {
414 stream_fds
[i
] = open_ust_stream_fd(channel
, i
,
415 &channel
->buffer_credentials
.value
);
416 if (stream_fds
[i
] < 0) {
421 ust_channel
= lttng_ust_ctl_create_channel(attr
, stream_fds
, nr_stream_fds
);
426 channel
->nr_stream_fds
= nr_stream_fds
;
427 channel
->stream_fds
= stream_fds
;
428 *ust_chanp
= ust_channel
;
434 for (j
= i
- 1; j
>= 0; j
--) {
437 closeret
= close(stream_fds
[j
]);
441 if (channel
->shm_path
[0]) {
442 char shm_path
[PATH_MAX
];
444 closeret
= get_stream_shm_path(shm_path
,
445 channel
->shm_path
, j
);
447 ERR("Cannot get stream shm path");
449 closeret
= run_as_unlink(shm_path
,
450 lttng_credentials_get_uid(LTTNG_OPTIONAL_GET_PTR(
451 channel
->buffer_credentials
)),
452 lttng_credentials_get_gid(LTTNG_OPTIONAL_GET_PTR(
453 channel
->buffer_credentials
)));
455 PERROR("unlink %s", shm_path
);
459 /* Try to rmdir all directories under shm_path root. */
460 if (channel
->root_shm_path
[0]) {
461 (void) run_as_rmdir_recursive(channel
->root_shm_path
,
462 lttng_credentials_get_uid(LTTNG_OPTIONAL_GET_PTR(
463 channel
->buffer_credentials
)),
464 lttng_credentials_get_gid(LTTNG_OPTIONAL_GET_PTR(
465 channel
->buffer_credentials
)),
466 LTTNG_DIRECTORY_HANDLE_SKIP_NON_EMPTY_FLAG
);
474 * Send a single given stream to the session daemon using the sock.
476 * Return 0 on success else a negative value.
478 static int send_sessiond_stream(int sock
, struct lttng_consumer_stream
*stream
)
485 DBG("UST consumer sending stream %" PRIu64
" to sessiond", stream
->key
);
487 /* Send stream to session daemon. */
488 ret
= lttng_ust_ctl_send_stream_to_sessiond(sock
, stream
->ustream
);
498 * Send channel to sessiond and relayd if applicable.
500 * Return 0 on success or else a negative value.
502 static int send_channel_to_sessiond_and_relayd(int sock
,
503 struct lttng_consumer_channel
*channel
,
504 struct lttng_consumer_local_data
*ctx
, int *relayd_error
)
506 int ret
, ret_code
= LTTCOMM_CONSUMERD_SUCCESS
;
507 struct lttng_consumer_stream
*stream
;
508 uint64_t net_seq_idx
= -1ULL;
514 DBG("UST consumer sending channel %s to sessiond", channel
->name
);
516 if (channel
->relayd_id
!= (uint64_t) -1ULL) {
517 cds_list_for_each_entry(stream
, &channel
->streams
.head
, send_node
) {
519 health_code_update();
521 /* Try to send the stream to the relayd if one is available. */
522 DBG("Sending stream %" PRIu64
" of channel \"%s\" to relayd",
523 stream
->key
, channel
->name
);
524 ret
= consumer_send_relayd_stream(stream
, stream
->chan
->pathname
);
527 * Flag that the relayd was the problem here probably due to a
528 * communicaton error on the socket.
533 ret_code
= LTTCOMM_CONSUMERD_RELAYD_FAIL
;
535 if (net_seq_idx
== -1ULL) {
536 net_seq_idx
= stream
->net_seq_idx
;
541 /* Inform sessiond that we are about to send channel and streams. */
542 ret
= consumer_send_status_msg(sock
, ret_code
);
543 if (ret
< 0 || ret_code
!= LTTCOMM_CONSUMERD_SUCCESS
) {
545 * Either the session daemon is not responding or the relayd died so we
551 /* Send channel to sessiond. */
552 ret
= lttng_ust_ctl_send_channel_to_sessiond(sock
, channel
->uchan
);
557 ret
= lttng_ust_ctl_channel_close_wakeup_fd(channel
->uchan
);
562 /* The channel was sent successfully to the sessiond at this point. */
563 cds_list_for_each_entry(stream
, &channel
->streams
.head
, send_node
) {
565 health_code_update();
567 /* Send stream to session daemon. */
568 ret
= send_sessiond_stream(sock
, stream
);
574 /* Tell sessiond there is no more stream. */
575 ret
= lttng_ust_ctl_send_stream_to_sessiond(sock
, NULL
);
580 DBG("UST consumer NULL stream sent to sessiond");
585 if (ret_code
!= LTTCOMM_CONSUMERD_SUCCESS
) {
592 * Creates a channel and streams and add the channel it to the channel internal
593 * state. The created stream must ONLY be sent once the GET_CHANNEL command is
596 * Return 0 on success or else, a negative value is returned and the channel
597 * MUST be destroyed by consumer_del_channel().
599 static int ask_channel(struct lttng_consumer_local_data
*ctx
,
600 struct lttng_consumer_channel
*channel
,
601 struct lttng_ust_ctl_consumer_channel_attr
*attr
)
610 * This value is still used by the kernel consumer since for the kernel,
611 * the stream ownership is not IN the consumer so we need to have the
612 * number of left stream that needs to be initialized so we can know when
613 * to delete the channel (see consumer.c).
615 * As for the user space tracer now, the consumer creates and sends the
616 * stream to the session daemon which only sends them to the application
617 * once every stream of a channel is received making this value useless
618 * because we they will be added to the poll thread before the application
619 * receives them. This ensures that a stream can not hang up during
620 * initilization of a channel.
622 channel
->nb_init_stream_left
= 0;
624 /* The reply msg status is handled in the following call. */
625 ret
= create_ust_channel(channel
, attr
, &channel
->uchan
);
630 channel
->wait_fd
= lttng_ust_ctl_channel_get_wait_fd(channel
->uchan
);
633 * For the snapshots (no monitor), we create the metadata streams
634 * on demand, not during the channel creation.
636 if (channel
->type
== CONSUMER_CHANNEL_TYPE_METADATA
&& !channel
->monitor
) {
641 /* Open all streams for this channel. */
642 pthread_mutex_lock(&channel
->lock
);
643 ret
= create_ust_streams(channel
, ctx
);
644 pthread_mutex_unlock(&channel
->lock
);
654 * Send all stream of a channel to the right thread handling it.
656 * On error, return a negative value else 0 on success.
658 static int send_streams_to_thread(struct lttng_consumer_channel
*channel
,
659 struct lttng_consumer_local_data
*ctx
)
662 struct lttng_consumer_stream
*stream
, *stmp
;
667 /* Send streams to the corresponding thread. */
668 cds_list_for_each_entry_safe(stream
, stmp
, &channel
->streams
.head
,
671 health_code_update();
673 /* Sending the stream to the thread. */
674 ret
= send_stream_to_thread(stream
, ctx
);
677 * If we are unable to send the stream to the thread, there is
678 * a big problem so just stop everything.
689 * Flush channel's streams using the given key to retrieve the channel.
691 * Return 0 on success else an LTTng error code.
693 static int flush_channel(uint64_t chan_key
)
696 struct lttng_consumer_channel
*channel
;
697 struct lttng_consumer_stream
*stream
;
699 struct lttng_ht_iter iter
;
701 DBG("UST consumer flush channel key %" PRIu64
, chan_key
);
704 channel
= consumer_find_channel(chan_key
);
706 ERR("UST consumer flush channel %" PRIu64
" not found", chan_key
);
707 ret
= LTTNG_ERR_UST_CHAN_NOT_FOUND
;
711 ht
= the_consumer_data
.stream_per_chan_id_ht
;
713 /* For each stream of the channel id, flush it. */
714 cds_lfht_for_each_entry_duplicate(ht
->ht
,
715 ht
->hash_fct(&channel
->key
, lttng_ht_seed
), ht
->match_fct
,
716 &channel
->key
, &iter
.iter
, stream
, node_channel_id
.node
) {
718 health_code_update();
720 pthread_mutex_lock(&stream
->lock
);
723 * Protect against concurrent teardown of a stream.
725 if (cds_lfht_is_node_deleted(&stream
->node
.node
)) {
729 if (!stream
->quiescent
) {
730 lttng_ust_ctl_flush_buffer(stream
->ustream
, 0);
731 stream
->quiescent
= true;
734 pthread_mutex_unlock(&stream
->lock
);
742 * Clear quiescent state from channel's streams using the given key to
743 * retrieve the channel.
745 * Return 0 on success else an LTTng error code.
747 static int clear_quiescent_channel(uint64_t chan_key
)
750 struct lttng_consumer_channel
*channel
;
751 struct lttng_consumer_stream
*stream
;
753 struct lttng_ht_iter iter
;
755 DBG("UST consumer clear quiescent channel key %" PRIu64
, chan_key
);
758 channel
= consumer_find_channel(chan_key
);
760 ERR("UST consumer clear quiescent channel %" PRIu64
" not found", chan_key
);
761 ret
= LTTNG_ERR_UST_CHAN_NOT_FOUND
;
765 ht
= the_consumer_data
.stream_per_chan_id_ht
;
767 /* For each stream of the channel id, clear quiescent state. */
768 cds_lfht_for_each_entry_duplicate(ht
->ht
,
769 ht
->hash_fct(&channel
->key
, lttng_ht_seed
), ht
->match_fct
,
770 &channel
->key
, &iter
.iter
, stream
, node_channel_id
.node
) {
772 health_code_update();
774 pthread_mutex_lock(&stream
->lock
);
775 stream
->quiescent
= false;
776 pthread_mutex_unlock(&stream
->lock
);
784 * Close metadata stream wakeup_fd using the given key to retrieve the channel.
786 * Return 0 on success else an LTTng error code.
788 static int close_metadata(uint64_t chan_key
)
791 struct lttng_consumer_channel
*channel
;
792 unsigned int channel_monitor
;
794 DBG("UST consumer close metadata key %" PRIu64
, chan_key
);
796 channel
= consumer_find_channel(chan_key
);
799 * This is possible if the metadata thread has issue a delete because
800 * the endpoint point of the stream hung up. There is no way the
801 * session daemon can know about it thus use a DBG instead of an actual
804 DBG("UST consumer close metadata %" PRIu64
" not found", chan_key
);
805 ret
= LTTNG_ERR_UST_CHAN_NOT_FOUND
;
809 pthread_mutex_lock(&the_consumer_data
.lock
);
810 pthread_mutex_lock(&channel
->lock
);
811 channel_monitor
= channel
->monitor
;
812 if (cds_lfht_is_node_deleted(&channel
->node
.node
)) {
816 lttng_ustconsumer_close_metadata(channel
);
817 pthread_mutex_unlock(&channel
->lock
);
818 pthread_mutex_unlock(&the_consumer_data
.lock
);
821 * The ownership of a metadata channel depends on the type of
822 * session to which it belongs. In effect, the monitor flag is checked
823 * to determine if this metadata channel is in "snapshot" mode or not.
825 * In the non-snapshot case, the metadata channel is created along with
826 * a single stream which will remain present until the metadata channel
827 * is destroyed (on the destruction of its session). In this case, the
828 * metadata stream in "monitored" by the metadata poll thread and holds
829 * the ownership of its channel.
831 * Closing the metadata will cause the metadata stream's "metadata poll
832 * pipe" to be closed. Closing this pipe will wake-up the metadata poll
833 * thread which will teardown the metadata stream which, in return,
834 * deletes the metadata channel.
836 * In the snapshot case, the metadata stream is created and destroyed
837 * on every snapshot record. Since the channel doesn't have an owner
838 * other than the session daemon, it is safe to destroy it immediately
839 * on reception of the CLOSE_METADATA command.
841 if (!channel_monitor
) {
843 * The channel and consumer_data locks must be
844 * released before this call since consumer_del_channel
845 * re-acquires the channel and consumer_data locks to teardown
846 * the channel and queue its reclamation by the "call_rcu"
849 consumer_del_channel(channel
);
854 pthread_mutex_unlock(&channel
->lock
);
855 pthread_mutex_unlock(&the_consumer_data
.lock
);
861 * RCU read side lock MUST be acquired before calling this function.
863 * Return 0 on success else an LTTng error code.
865 static int setup_metadata(struct lttng_consumer_local_data
*ctx
, uint64_t key
)
868 struct lttng_consumer_channel
*metadata
;
870 DBG("UST consumer setup metadata key %" PRIu64
, key
);
872 metadata
= consumer_find_channel(key
);
874 ERR("UST consumer push metadata %" PRIu64
" not found", key
);
875 ret
= LTTNG_ERR_UST_CHAN_NOT_FOUND
;
880 * In no monitor mode, the metadata channel has no stream(s) so skip the
881 * ownership transfer to the metadata thread.
883 if (!metadata
->monitor
) {
884 DBG("Metadata channel in no monitor");
890 * Send metadata stream to relayd if one available. Availability is
891 * known if the stream is still in the list of the channel.
893 if (cds_list_empty(&metadata
->streams
.head
)) {
894 ERR("Metadata channel key %" PRIu64
", no stream available.", key
);
895 ret
= LTTCOMM_CONSUMERD_ERROR_METADATA
;
896 goto error_no_stream
;
899 /* Send metadata stream to relayd if needed. */
900 if (metadata
->metadata_stream
->net_seq_idx
!= (uint64_t) -1ULL) {
901 ret
= consumer_send_relayd_stream(metadata
->metadata_stream
,
904 ret
= LTTCOMM_CONSUMERD_ERROR_METADATA
;
907 ret
= consumer_send_relayd_streams_sent(
908 metadata
->metadata_stream
->net_seq_idx
);
910 ret
= LTTCOMM_CONSUMERD_RELAYD_FAIL
;
916 * Ownership of metadata stream is passed along. Freeing is handled by
919 ret
= send_streams_to_thread(metadata
, ctx
);
922 * If we are unable to send the stream to the thread, there is
923 * a big problem so just stop everything.
925 ret
= LTTCOMM_CONSUMERD_FATAL
;
926 goto send_streams_error
;
928 /* List MUST be empty after or else it could be reused. */
929 assert(cds_list_empty(&metadata
->streams
.head
));
936 * Delete metadata channel on error. At this point, the metadata stream can
937 * NOT be monitored by the metadata thread thus having the guarantee that
938 * the stream is still in the local stream list of the channel. This call
939 * will make sure to clean that list.
941 consumer_stream_destroy(metadata
->metadata_stream
, NULL
);
942 cds_list_del(&metadata
->metadata_stream
->send_node
);
943 metadata
->metadata_stream
= NULL
;
951 * Snapshot the whole metadata.
952 * RCU read-side lock must be held by the caller.
954 * Returns 0 on success, < 0 on error
956 static int snapshot_metadata(struct lttng_consumer_channel
*metadata_channel
,
957 uint64_t key
, char *path
, uint64_t relayd_id
,
958 struct lttng_consumer_local_data
*ctx
)
961 struct lttng_consumer_stream
*metadata_stream
;
966 DBG("UST consumer snapshot metadata with key %" PRIu64
" at path %s",
971 assert(!metadata_channel
->monitor
);
973 health_code_update();
976 * Ask the sessiond if we have new metadata waiting and update the
977 * consumer metadata cache.
979 ret
= lttng_ustconsumer_request_metadata(ctx
, metadata_channel
, 0, 1);
984 health_code_update();
987 * The metadata stream is NOT created in no monitor mode when the channel
988 * is created on a sessiond ask channel command.
990 ret
= create_ust_streams(metadata_channel
, ctx
);
995 metadata_stream
= metadata_channel
->metadata_stream
;
996 assert(metadata_stream
);
998 pthread_mutex_lock(&metadata_stream
->lock
);
999 if (relayd_id
!= (uint64_t) -1ULL) {
1000 metadata_stream
->net_seq_idx
= relayd_id
;
1001 ret
= consumer_send_relayd_stream(metadata_stream
, path
);
1003 ret
= consumer_stream_create_output_files(metadata_stream
,
1006 pthread_mutex_unlock(&metadata_stream
->lock
);
1012 health_code_update();
1014 ret
= lttng_consumer_read_subbuffer(metadata_stream
, ctx
, true);
1022 * Clean up the stream completly because the next snapshot will use a new
1025 consumer_stream_destroy(metadata_stream
, NULL
);
1026 cds_list_del(&metadata_stream
->send_node
);
1027 metadata_channel
->metadata_stream
= NULL
;
1035 int get_current_subbuf_addr(struct lttng_consumer_stream
*stream
,
1039 unsigned long mmap_offset
;
1040 const char *mmap_base
;
1042 mmap_base
= lttng_ust_ctl_get_mmap_base(stream
->ustream
);
1044 ERR("Failed to get mmap base for stream `%s`",
1050 ret
= lttng_ust_ctl_get_mmap_read_offset(stream
->ustream
, &mmap_offset
);
1052 ERR("Failed to get mmap offset for stream `%s`", stream
->name
);
1057 *addr
= mmap_base
+ mmap_offset
;
1064 * Take a snapshot of all the stream of a channel.
1065 * RCU read-side lock and the channel lock must be held by the caller.
1067 * Returns 0 on success, < 0 on error
1069 static int snapshot_channel(struct lttng_consumer_channel
*channel
,
1070 uint64_t key
, char *path
, uint64_t relayd_id
,
1071 uint64_t nb_packets_per_stream
,
1072 struct lttng_consumer_local_data
*ctx
)
1075 unsigned use_relayd
= 0;
1076 unsigned long consumed_pos
, produced_pos
;
1077 struct lttng_consumer_stream
*stream
;
1084 if (relayd_id
!= (uint64_t) -1ULL) {
1088 assert(!channel
->monitor
);
1089 DBG("UST consumer snapshot channel %" PRIu64
, key
);
1091 cds_list_for_each_entry(stream
, &channel
->streams
.head
, send_node
) {
1092 health_code_update();
1094 /* Lock stream because we are about to change its state. */
1095 pthread_mutex_lock(&stream
->lock
);
1096 assert(channel
->trace_chunk
);
1097 if (!lttng_trace_chunk_get(channel
->trace_chunk
)) {
1099 * Can't happen barring an internal error as the channel
1100 * holds a reference to the trace chunk.
1102 ERR("Failed to acquire reference to channel's trace chunk");
1106 assert(!stream
->trace_chunk
);
1107 stream
->trace_chunk
= channel
->trace_chunk
;
1109 stream
->net_seq_idx
= relayd_id
;
1112 ret
= consumer_send_relayd_stream(stream
, path
);
1117 ret
= consumer_stream_create_output_files(stream
,
1122 DBG("UST consumer snapshot stream (%" PRIu64
")",
1127 * If tracing is active, we want to perform a "full" buffer flush.
1128 * Else, if quiescent, it has already been done by the prior stop.
1130 if (!stream
->quiescent
) {
1131 lttng_ust_ctl_flush_buffer(stream
->ustream
, 0);
1134 ret
= lttng_ustconsumer_take_snapshot(stream
);
1136 ERR("Taking UST snapshot");
1140 ret
= lttng_ustconsumer_get_produced_snapshot(stream
, &produced_pos
);
1142 ERR("Produced UST snapshot position");
1146 ret
= lttng_ustconsumer_get_consumed_snapshot(stream
, &consumed_pos
);
1148 ERR("Consumerd UST snapshot position");
1153 * The original value is sent back if max stream size is larger than
1154 * the possible size of the snapshot. Also, we assume that the session
1155 * daemon should never send a maximum stream size that is lower than
1158 consumed_pos
= consumer_get_consume_start_pos(consumed_pos
,
1159 produced_pos
, nb_packets_per_stream
,
1160 stream
->max_sb_size
);
1162 while ((long) (consumed_pos
- produced_pos
) < 0) {
1164 unsigned long len
, padded_len
;
1165 const char *subbuf_addr
;
1166 struct lttng_buffer_view subbuf_view
;
1168 health_code_update();
1170 DBG("UST consumer taking snapshot at pos %lu", consumed_pos
);
1172 ret
= lttng_ust_ctl_get_subbuf(stream
->ustream
, &consumed_pos
);
1174 if (ret
!= -EAGAIN
) {
1175 PERROR("lttng_ust_ctl_get_subbuf snapshot");
1176 goto error_close_stream
;
1178 DBG("UST consumer get subbuf failed. Skipping it.");
1179 consumed_pos
+= stream
->max_sb_size
;
1180 stream
->chan
->lost_packets
++;
1184 ret
= lttng_ust_ctl_get_subbuf_size(stream
->ustream
, &len
);
1186 ERR("Snapshot lttng_ust_ctl_get_subbuf_size");
1187 goto error_put_subbuf
;
1190 ret
= lttng_ust_ctl_get_padded_subbuf_size(stream
->ustream
, &padded_len
);
1192 ERR("Snapshot lttng_ust_ctl_get_padded_subbuf_size");
1193 goto error_put_subbuf
;
1196 ret
= get_current_subbuf_addr(stream
, &subbuf_addr
);
1198 goto error_put_subbuf
;
1201 subbuf_view
= lttng_buffer_view_init(
1202 subbuf_addr
, 0, padded_len
);
1203 read_len
= lttng_consumer_on_read_subbuffer_mmap(
1204 stream
, &subbuf_view
, padded_len
- len
);
1206 if (read_len
!= len
) {
1208 goto error_put_subbuf
;
1211 if (read_len
!= padded_len
) {
1213 goto error_put_subbuf
;
1217 ret
= lttng_ust_ctl_put_subbuf(stream
->ustream
);
1219 ERR("Snapshot lttng_ust_ctl_put_subbuf");
1220 goto error_close_stream
;
1222 consumed_pos
+= stream
->max_sb_size
;
1225 /* Simply close the stream so we can use it on the next snapshot. */
1226 consumer_stream_close(stream
);
1227 pthread_mutex_unlock(&stream
->lock
);
1234 if (lttng_ust_ctl_put_subbuf(stream
->ustream
) < 0) {
1235 ERR("Snapshot lttng_ust_ctl_put_subbuf");
1238 consumer_stream_close(stream
);
1240 pthread_mutex_unlock(&stream
->lock
);
1246 void metadata_stream_reset_cache_consumed_position(
1247 struct lttng_consumer_stream
*stream
)
1249 ASSERT_LOCKED(stream
->lock
);
1251 DBG("Reset metadata cache of session %" PRIu64
,
1252 stream
->chan
->session_id
);
1253 stream
->ust_metadata_pushed
= 0;
1257 * Receive the metadata updates from the sessiond. Supports receiving
1258 * overlapping metadata, but is needs to always belong to a contiguous
1259 * range starting from 0.
1260 * Be careful about the locks held when calling this function: it needs
1261 * the metadata cache flush to concurrently progress in order to
1264 int lttng_ustconsumer_recv_metadata(int sock
, uint64_t key
, uint64_t offset
,
1265 uint64_t len
, uint64_t version
,
1266 struct lttng_consumer_channel
*channel
, int timer
, int wait
)
1268 int ret
, ret_code
= LTTCOMM_CONSUMERD_SUCCESS
;
1270 enum consumer_metadata_cache_write_status cache_write_status
;
1272 DBG("UST consumer push metadata key %" PRIu64
" of len %" PRIu64
, key
, len
);
1274 metadata_str
= zmalloc(len
* sizeof(char));
1275 if (!metadata_str
) {
1276 PERROR("zmalloc metadata string");
1277 ret_code
= LTTCOMM_CONSUMERD_ENOMEM
;
1281 health_code_update();
1283 /* Receive metadata string. */
1284 ret
= lttcomm_recv_unix_sock(sock
, metadata_str
, len
);
1286 /* Session daemon is dead so return gracefully. */
1291 health_code_update();
1293 pthread_mutex_lock(&channel
->metadata_cache
->lock
);
1294 cache_write_status
= consumer_metadata_cache_write(
1295 channel
->metadata_cache
, offset
, len
, version
,
1297 pthread_mutex_unlock(&channel
->metadata_cache
->lock
);
1298 switch (cache_write_status
) {
1299 case CONSUMER_METADATA_CACHE_WRITE_STATUS_NO_CHANGE
:
1301 * The write entirely overlapped with existing contents of the
1302 * same metadata version (same content); there is nothing to do.
1305 case CONSUMER_METADATA_CACHE_WRITE_STATUS_INVALIDATED
:
1307 * The metadata cache was invalidated (previously pushed
1308 * content has been overwritten). Reset the stream's consumed
1309 * metadata position to ensure the metadata poll thread consumes
1312 pthread_mutex_lock(&channel
->metadata_stream
->lock
);
1313 metadata_stream_reset_cache_consumed_position(
1314 channel
->metadata_stream
);
1315 pthread_mutex_unlock(&channel
->metadata_stream
->lock
);
1317 case CONSUMER_METADATA_CACHE_WRITE_STATUS_APPENDED_CONTENT
:
1319 * In both cases, the metadata poll thread has new data to
1322 ret
= consumer_metadata_wakeup_pipe(channel
);
1324 ret_code
= LTTCOMM_CONSUMERD_ERROR_METADATA
;
1328 case CONSUMER_METADATA_CACHE_WRITE_STATUS_ERROR
:
1329 /* Unable to handle metadata. Notify session daemon. */
1330 ret_code
= LTTCOMM_CONSUMERD_ERROR_METADATA
;
1332 * Skip metadata flush on write error since the offset and len might
1333 * not have been updated which could create an infinite loop below when
1334 * waiting for the metadata cache to be flushed.
1344 while (consumer_metadata_cache_flushed(channel
, offset
+ len
, timer
)) {
1345 DBG("Waiting for metadata to be flushed");
1347 health_code_update();
1349 usleep(DEFAULT_METADATA_AVAILABILITY_WAIT_TIME
);
1359 * Receive command from session daemon and process it.
1361 * Return 1 on success else a negative value or 0.
1363 int lttng_ustconsumer_recv_cmd(struct lttng_consumer_local_data
*ctx
,
1364 int sock
, struct pollfd
*consumer_sockpoll
)
1367 enum lttcomm_return_code ret_code
= LTTCOMM_CONSUMERD_SUCCESS
;
1368 struct lttcomm_consumer_msg msg
;
1369 struct lttng_consumer_channel
*channel
= NULL
;
1371 health_code_update();
1376 ret_recv
= lttcomm_recv_unix_sock(sock
, &msg
, sizeof(msg
));
1377 if (ret_recv
!= sizeof(msg
)) {
1378 DBG("Consumer received unexpected message size %zd (expects %zu)",
1379 ret_recv
, sizeof(msg
));
1381 * The ret value might 0 meaning an orderly shutdown but this is ok
1382 * since the caller handles this.
1385 lttng_consumer_send_error(ctx
,
1386 LTTCOMM_CONSUMERD_ERROR_RECV_CMD
);
1393 health_code_update();
1396 assert(msg
.cmd_type
!= LTTNG_CONSUMER_STOP
);
1398 health_code_update();
1400 /* relayd needs RCU read-side lock */
1403 switch (msg
.cmd_type
) {
1404 case LTTNG_CONSUMER_ADD_RELAYD_SOCKET
:
1406 /* Session daemon status message are handled in the following call. */
1407 consumer_add_relayd_socket(msg
.u
.relayd_sock
.net_index
,
1408 msg
.u
.relayd_sock
.type
, ctx
, sock
, consumer_sockpoll
,
1409 &msg
.u
.relayd_sock
.sock
, msg
.u
.relayd_sock
.session_id
,
1410 msg
.u
.relayd_sock
.relayd_session_id
);
1413 case LTTNG_CONSUMER_DESTROY_RELAYD
:
1415 uint64_t index
= msg
.u
.destroy_relayd
.net_seq_idx
;
1416 struct consumer_relayd_sock_pair
*relayd
;
1418 DBG("UST consumer destroying relayd %" PRIu64
, index
);
1420 /* Get relayd reference if exists. */
1421 relayd
= consumer_find_relayd(index
);
1422 if (relayd
== NULL
) {
1423 DBG("Unable to find relayd %" PRIu64
, index
);
1424 ret_code
= LTTCOMM_CONSUMERD_RELAYD_FAIL
;
1428 * Each relayd socket pair has a refcount of stream attached to it
1429 * which tells if the relayd is still active or not depending on the
1432 * This will set the destroy flag of the relayd object and destroy it
1433 * if the refcount reaches zero when called.
1435 * The destroy can happen either here or when a stream fd hangs up.
1438 consumer_flag_relayd_for_destroy(relayd
);
1441 goto end_msg_sessiond
;
1443 case LTTNG_CONSUMER_UPDATE_STREAM
:
1448 case LTTNG_CONSUMER_DATA_PENDING
:
1450 int is_data_pending
;
1452 uint64_t id
= msg
.u
.data_pending
.session_id
;
1454 DBG("UST consumer data pending command for id %" PRIu64
, id
);
1456 is_data_pending
= consumer_data_pending(id
);
1458 /* Send back returned value to session daemon */
1459 ret_send
= lttcomm_send_unix_sock(sock
, &is_data_pending
,
1460 sizeof(is_data_pending
));
1462 DBG("Error when sending the data pending ret code: %zd",
1468 * No need to send back a status message since the data pending
1469 * returned value is the response.
1473 case LTTNG_CONSUMER_ASK_CHANNEL_CREATION
:
1475 int ret_ask_channel
, ret_add_channel
, ret_send
;
1476 struct lttng_ust_ctl_consumer_channel_attr attr
;
1477 const uint64_t chunk_id
= msg
.u
.ask_channel
.chunk_id
.value
;
1478 const struct lttng_credentials buffer_credentials
= {
1479 .uid
= LTTNG_OPTIONAL_INIT_VALUE(msg
.u
.ask_channel
.buffer_credentials
.uid
),
1480 .gid
= LTTNG_OPTIONAL_INIT_VALUE(msg
.u
.ask_channel
.buffer_credentials
.gid
),
1483 /* Create a plain object and reserve a channel key. */
1484 channel
= consumer_allocate_channel(
1485 msg
.u
.ask_channel
.key
,
1486 msg
.u
.ask_channel
.session_id
,
1487 msg
.u
.ask_channel
.chunk_id
.is_set
?
1489 msg
.u
.ask_channel
.pathname
,
1490 msg
.u
.ask_channel
.name
,
1491 msg
.u
.ask_channel
.relayd_id
,
1492 (enum lttng_event_output
) msg
.u
.ask_channel
.output
,
1493 msg
.u
.ask_channel
.tracefile_size
,
1494 msg
.u
.ask_channel
.tracefile_count
,
1495 msg
.u
.ask_channel
.session_id_per_pid
,
1496 msg
.u
.ask_channel
.monitor
,
1497 msg
.u
.ask_channel
.live_timer_interval
,
1498 msg
.u
.ask_channel
.is_live
,
1499 msg
.u
.ask_channel
.root_shm_path
,
1500 msg
.u
.ask_channel
.shm_path
);
1502 goto end_channel_error
;
1505 LTTNG_OPTIONAL_SET(&channel
->buffer_credentials
,
1506 buffer_credentials
);
1509 * Assign UST application UID to the channel. This value is ignored for
1510 * per PID buffers. This is specific to UST thus setting this after the
1513 channel
->ust_app_uid
= msg
.u
.ask_channel
.ust_app_uid
;
1515 /* Build channel attributes from received message. */
1516 attr
.subbuf_size
= msg
.u
.ask_channel
.subbuf_size
;
1517 attr
.num_subbuf
= msg
.u
.ask_channel
.num_subbuf
;
1518 attr
.overwrite
= msg
.u
.ask_channel
.overwrite
;
1519 attr
.switch_timer_interval
= msg
.u
.ask_channel
.switch_timer_interval
;
1520 attr
.read_timer_interval
= msg
.u
.ask_channel
.read_timer_interval
;
1521 attr
.chan_id
= msg
.u
.ask_channel
.chan_id
;
1522 memcpy(attr
.uuid
, msg
.u
.ask_channel
.uuid
, sizeof(attr
.uuid
));
1523 attr
.blocking_timeout
= msg
.u
.ask_channel
.blocking_timeout
;
1525 /* Match channel buffer type to the UST abi. */
1526 switch (msg
.u
.ask_channel
.output
) {
1527 case LTTNG_EVENT_MMAP
:
1529 attr
.output
= LTTNG_UST_ABI_MMAP
;
1533 /* Translate and save channel type. */
1534 switch (msg
.u
.ask_channel
.type
) {
1535 case LTTNG_UST_ABI_CHAN_PER_CPU
:
1536 channel
->type
= CONSUMER_CHANNEL_TYPE_DATA
;
1537 attr
.type
= LTTNG_UST_ABI_CHAN_PER_CPU
;
1539 * Set refcount to 1 for owner. Below, we will
1540 * pass ownership to the
1541 * consumer_thread_channel_poll() thread.
1543 channel
->refcount
= 1;
1545 case LTTNG_UST_ABI_CHAN_METADATA
:
1546 channel
->type
= CONSUMER_CHANNEL_TYPE_METADATA
;
1547 attr
.type
= LTTNG_UST_ABI_CHAN_METADATA
;
1554 health_code_update();
1556 ret_ask_channel
= ask_channel(ctx
, channel
, &attr
);
1557 if (ret_ask_channel
< 0) {
1558 goto end_channel_error
;
1561 if (msg
.u
.ask_channel
.type
== LTTNG_UST_ABI_CHAN_METADATA
) {
1564 ret_allocate
= consumer_metadata_cache_allocate(
1566 if (ret_allocate
< 0) {
1567 ERR("Allocating metadata cache");
1568 goto end_channel_error
;
1570 consumer_timer_switch_start(channel
, attr
.switch_timer_interval
);
1571 attr
.switch_timer_interval
= 0;
1573 int monitor_start_ret
;
1575 consumer_timer_live_start(channel
,
1576 msg
.u
.ask_channel
.live_timer_interval
);
1577 monitor_start_ret
= consumer_timer_monitor_start(
1579 msg
.u
.ask_channel
.monitor_timer_interval
);
1580 if (monitor_start_ret
< 0) {
1581 ERR("Starting channel monitoring timer failed");
1582 goto end_channel_error
;
1586 health_code_update();
1589 * Add the channel to the internal state AFTER all streams were created
1590 * and successfully sent to session daemon. This way, all streams must
1591 * be ready before this channel is visible to the threads.
1592 * If add_channel succeeds, ownership of the channel is
1593 * passed to consumer_thread_channel_poll().
1595 ret_add_channel
= add_channel(channel
, ctx
);
1596 if (ret_add_channel
< 0) {
1597 if (msg
.u
.ask_channel
.type
== LTTNG_UST_ABI_CHAN_METADATA
) {
1598 if (channel
->switch_timer_enabled
== 1) {
1599 consumer_timer_switch_stop(channel
);
1601 consumer_metadata_cache_destroy(channel
);
1603 if (channel
->live_timer_enabled
== 1) {
1604 consumer_timer_live_stop(channel
);
1606 if (channel
->monitor_timer_enabled
== 1) {
1607 consumer_timer_monitor_stop(channel
);
1609 goto end_channel_error
;
1612 health_code_update();
1615 * Channel and streams are now created. Inform the session daemon that
1616 * everything went well and should wait to receive the channel and
1617 * streams with ustctl API.
1619 ret_send
= consumer_send_status_channel(sock
, channel
);
1622 * There is probably a problem on the socket.
1629 case LTTNG_CONSUMER_GET_CHANNEL
:
1631 int ret
, relayd_err
= 0;
1632 uint64_t key
= msg
.u
.get_channel
.key
;
1633 struct lttng_consumer_channel
*found_channel
;
1635 found_channel
= consumer_find_channel(key
);
1636 if (!found_channel
) {
1637 ERR("UST consumer get channel key %" PRIu64
" not found", key
);
1638 ret_code
= LTTCOMM_CONSUMERD_CHAN_NOT_FOUND
;
1639 goto end_get_channel
;
1642 health_code_update();
1644 /* Send the channel to sessiond (and relayd, if applicable). */
1645 ret
= send_channel_to_sessiond_and_relayd(
1646 sock
, found_channel
, ctx
, &relayd_err
);
1650 * We were unable to send to the relayd the stream so avoid
1651 * sending back a fatal error to the thread since this is OK
1652 * and the consumer can continue its work. The above call
1653 * has sent the error status message to the sessiond.
1655 goto end_get_channel_nosignal
;
1658 * The communicaton was broken hence there is a bad state between
1659 * the consumer and sessiond so stop everything.
1661 goto error_get_channel_fatal
;
1664 health_code_update();
1667 * In no monitor mode, the streams ownership is kept inside the channel
1668 * so don't send them to the data thread.
1670 if (!found_channel
->monitor
) {
1671 goto end_get_channel
;
1674 ret
= send_streams_to_thread(found_channel
, ctx
);
1677 * If we are unable to send the stream to the thread, there is
1678 * a big problem so just stop everything.
1680 goto error_get_channel_fatal
;
1682 /* List MUST be empty after or else it could be reused. */
1683 assert(cds_list_empty(&found_channel
->streams
.head
));
1685 goto end_msg_sessiond
;
1686 error_get_channel_fatal
:
1688 end_get_channel_nosignal
:
1691 case LTTNG_CONSUMER_DESTROY_CHANNEL
:
1693 uint64_t key
= msg
.u
.destroy_channel
.key
;
1696 * Only called if streams have not been sent to stream
1697 * manager thread. However, channel has been sent to
1698 * channel manager thread.
1700 notify_thread_del_channel(ctx
, key
);
1701 goto end_msg_sessiond
;
1703 case LTTNG_CONSUMER_CLOSE_METADATA
:
1707 ret
= close_metadata(msg
.u
.close_metadata
.key
);
1712 goto end_msg_sessiond
;
1714 case LTTNG_CONSUMER_FLUSH_CHANNEL
:
1718 ret
= flush_channel(msg
.u
.flush_channel
.key
);
1723 goto end_msg_sessiond
;
1725 case LTTNG_CONSUMER_CLEAR_QUIESCENT_CHANNEL
:
1729 ret
= clear_quiescent_channel(
1730 msg
.u
.clear_quiescent_channel
.key
);
1735 goto end_msg_sessiond
;
1737 case LTTNG_CONSUMER_PUSH_METADATA
:
1740 uint64_t len
= msg
.u
.push_metadata
.len
;
1741 uint64_t key
= msg
.u
.push_metadata
.key
;
1742 uint64_t offset
= msg
.u
.push_metadata
.target_offset
;
1743 uint64_t version
= msg
.u
.push_metadata
.version
;
1744 struct lttng_consumer_channel
*found_channel
;
1746 DBG("UST consumer push metadata key %" PRIu64
" of len %" PRIu64
, key
,
1749 found_channel
= consumer_find_channel(key
);
1750 if (!found_channel
) {
1752 * This is possible if the metadata creation on the consumer side
1753 * is in flight vis-a-vis a concurrent push metadata from the
1754 * session daemon. Simply return that the channel failed and the
1755 * session daemon will handle that message correctly considering
1756 * that this race is acceptable thus the DBG() statement here.
1758 DBG("UST consumer push metadata %" PRIu64
" not found", key
);
1759 ret_code
= LTTCOMM_CONSUMERD_CHANNEL_FAIL
;
1760 goto end_push_metadata_msg_sessiond
;
1763 health_code_update();
1767 * There is nothing to receive. We have simply
1768 * checked whether the channel can be found.
1770 ret_code
= LTTCOMM_CONSUMERD_SUCCESS
;
1771 goto end_push_metadata_msg_sessiond
;
1774 /* Tell session daemon we are ready to receive the metadata. */
1775 ret
= consumer_send_status_msg(sock
, LTTCOMM_CONSUMERD_SUCCESS
);
1777 /* Somehow, the session daemon is not responding anymore. */
1778 goto error_push_metadata_fatal
;
1781 health_code_update();
1783 /* Wait for more data. */
1784 health_poll_entry();
1785 ret
= lttng_consumer_poll_socket(consumer_sockpoll
);
1788 goto error_push_metadata_fatal
;
1791 health_code_update();
1793 ret
= lttng_ustconsumer_recv_metadata(sock
, key
, offset
, len
,
1794 version
, found_channel
, 0, 1);
1796 /* error receiving from sessiond */
1797 goto error_push_metadata_fatal
;
1800 goto end_push_metadata_msg_sessiond
;
1802 end_push_metadata_msg_sessiond
:
1803 goto end_msg_sessiond
;
1804 error_push_metadata_fatal
:
1807 case LTTNG_CONSUMER_SETUP_METADATA
:
1811 ret
= setup_metadata(ctx
, msg
.u
.setup_metadata
.key
);
1815 goto end_msg_sessiond
;
1817 case LTTNG_CONSUMER_SNAPSHOT_CHANNEL
:
1819 struct lttng_consumer_channel
*found_channel
;
1820 uint64_t key
= msg
.u
.snapshot_channel
.key
;
1823 found_channel
= consumer_find_channel(key
);
1824 if (!found_channel
) {
1825 DBG("UST snapshot channel not found for key %" PRIu64
, key
);
1826 ret_code
= LTTCOMM_CONSUMERD_CHAN_NOT_FOUND
;
1828 if (msg
.u
.snapshot_channel
.metadata
) {
1831 ret_snapshot
= snapshot_metadata(found_channel
,
1833 msg
.u
.snapshot_channel
.pathname
,
1834 msg
.u
.snapshot_channel
.relayd_id
,
1836 if (ret_snapshot
< 0) {
1837 ERR("Snapshot metadata failed");
1838 ret_code
= LTTCOMM_CONSUMERD_SNAPSHOT_FAILED
;
1843 ret_snapshot
= snapshot_channel(found_channel
,
1845 msg
.u
.snapshot_channel
.pathname
,
1846 msg
.u
.snapshot_channel
.relayd_id
,
1847 msg
.u
.snapshot_channel
1848 .nb_packets_per_stream
,
1850 if (ret_snapshot
< 0) {
1851 ERR("Snapshot channel failed");
1852 ret_code
= LTTCOMM_CONSUMERD_SNAPSHOT_FAILED
;
1856 health_code_update();
1857 ret_send
= consumer_send_status_msg(sock
, ret_code
);
1859 /* Somehow, the session daemon is not responding anymore. */
1862 health_code_update();
1865 case LTTNG_CONSUMER_DISCARDED_EVENTS
:
1868 uint64_t discarded_events
;
1869 struct lttng_ht_iter iter
;
1870 struct lttng_ht
*ht
;
1871 struct lttng_consumer_stream
*stream
;
1872 uint64_t id
= msg
.u
.discarded_events
.session_id
;
1873 uint64_t key
= msg
.u
.discarded_events
.channel_key
;
1875 DBG("UST consumer discarded events command for session id %"
1878 pthread_mutex_lock(&the_consumer_data
.lock
);
1880 ht
= the_consumer_data
.stream_list_ht
;
1883 * We only need a reference to the channel, but they are not
1884 * directly indexed, so we just use the first matching stream
1885 * to extract the information we need, we default to 0 if not
1886 * found (no events are dropped if the channel is not yet in
1889 discarded_events
= 0;
1890 cds_lfht_for_each_entry_duplicate(ht
->ht
,
1891 ht
->hash_fct(&id
, lttng_ht_seed
),
1893 &iter
.iter
, stream
, node_session_id
.node
) {
1894 if (stream
->chan
->key
== key
) {
1895 discarded_events
= stream
->chan
->discarded_events
;
1899 pthread_mutex_unlock(&the_consumer_data
.lock
);
1902 DBG("UST consumer discarded events command for session id %"
1903 PRIu64
", channel key %" PRIu64
, id
, key
);
1905 health_code_update();
1907 /* Send back returned value to session daemon */
1908 ret
= lttcomm_send_unix_sock(sock
, &discarded_events
, sizeof(discarded_events
));
1910 PERROR("send discarded events");
1916 case LTTNG_CONSUMER_LOST_PACKETS
:
1919 uint64_t lost_packets
;
1920 struct lttng_ht_iter iter
;
1921 struct lttng_ht
*ht
;
1922 struct lttng_consumer_stream
*stream
;
1923 uint64_t id
= msg
.u
.lost_packets
.session_id
;
1924 uint64_t key
= msg
.u
.lost_packets
.channel_key
;
1926 DBG("UST consumer lost packets command for session id %"
1929 pthread_mutex_lock(&the_consumer_data
.lock
);
1931 ht
= the_consumer_data
.stream_list_ht
;
1934 * We only need a reference to the channel, but they are not
1935 * directly indexed, so we just use the first matching stream
1936 * to extract the information we need, we default to 0 if not
1937 * found (no packets lost if the channel is not yet in use).
1940 cds_lfht_for_each_entry_duplicate(ht
->ht
,
1941 ht
->hash_fct(&id
, lttng_ht_seed
),
1943 &iter
.iter
, stream
, node_session_id
.node
) {
1944 if (stream
->chan
->key
== key
) {
1945 lost_packets
= stream
->chan
->lost_packets
;
1949 pthread_mutex_unlock(&the_consumer_data
.lock
);
1952 DBG("UST consumer lost packets command for session id %"
1953 PRIu64
", channel key %" PRIu64
, id
, key
);
1955 health_code_update();
1957 /* Send back returned value to session daemon */
1958 ret
= lttcomm_send_unix_sock(sock
, &lost_packets
,
1959 sizeof(lost_packets
));
1961 PERROR("send lost packets");
1967 case LTTNG_CONSUMER_SET_CHANNEL_MONITOR_PIPE
:
1969 int channel_monitor_pipe
, ret_send
,
1970 ret_set_channel_monitor_pipe
;
1973 ret_code
= LTTCOMM_CONSUMERD_SUCCESS
;
1974 /* Successfully received the command's type. */
1975 ret_send
= consumer_send_status_msg(sock
, ret_code
);
1980 ret_recv
= lttcomm_recv_fds_unix_sock(
1981 sock
, &channel_monitor_pipe
, 1);
1982 if (ret_recv
!= sizeof(channel_monitor_pipe
)) {
1983 ERR("Failed to receive channel monitor pipe");
1987 DBG("Received channel monitor pipe (%d)", channel_monitor_pipe
);
1988 ret_set_channel_monitor_pipe
=
1989 consumer_timer_thread_set_channel_monitor_pipe(
1990 channel_monitor_pipe
);
1991 if (!ret_set_channel_monitor_pipe
) {
1995 ret_code
= LTTCOMM_CONSUMERD_SUCCESS
;
1996 /* Set the pipe as non-blocking. */
1997 ret_fcntl
= fcntl(channel_monitor_pipe
, F_GETFL
, 0);
1998 if (ret_fcntl
== -1) {
1999 PERROR("fcntl get flags of the channel monitoring pipe");
2004 ret_fcntl
= fcntl(channel_monitor_pipe
, F_SETFL
,
2005 flags
| O_NONBLOCK
);
2006 if (ret_fcntl
== -1) {
2007 PERROR("fcntl set O_NONBLOCK flag of the channel monitoring pipe");
2010 DBG("Channel monitor pipe set as non-blocking");
2012 ret_code
= LTTCOMM_CONSUMERD_ALREADY_SET
;
2014 goto end_msg_sessiond
;
2016 case LTTNG_CONSUMER_ROTATE_CHANNEL
:
2018 struct lttng_consumer_channel
*found_channel
;
2019 uint64_t key
= msg
.u
.rotate_channel
.key
;
2020 int ret_send_status
;
2022 found_channel
= consumer_find_channel(key
);
2023 if (!found_channel
) {
2024 DBG("Channel %" PRIu64
" not found", key
);
2025 ret_code
= LTTCOMM_CONSUMERD_CHAN_NOT_FOUND
;
2030 * Sample the rotate position of all the streams in
2033 rotate_channel
= lttng_consumer_rotate_channel(
2035 msg
.u
.rotate_channel
.relayd_id
,
2036 msg
.u
.rotate_channel
.metadata
, ctx
);
2037 if (rotate_channel
< 0) {
2038 ERR("Rotate channel failed");
2039 ret_code
= LTTCOMM_CONSUMERD_ROTATION_FAIL
;
2042 health_code_update();
2045 ret_send_status
= consumer_send_status_msg(sock
, ret_code
);
2046 if (ret_send_status
< 0) {
2047 /* Somehow, the session daemon is not responding anymore. */
2048 goto end_rotate_channel_nosignal
;
2052 * Rotate the streams that are ready right now.
2053 * FIXME: this is a second consecutive iteration over the
2054 * streams in a channel, there is probably a better way to
2055 * handle this, but it needs to be after the
2056 * consumer_send_status_msg() call.
2058 if (found_channel
) {
2059 int ret_rotate_read_streams
;
2061 ret_rotate_read_streams
=
2062 lttng_consumer_rotate_ready_streams(
2065 if (ret_rotate_read_streams
< 0) {
2066 ERR("Rotate channel failed");
2070 end_rotate_channel_nosignal
:
2073 case LTTNG_CONSUMER_CLEAR_CHANNEL
:
2075 struct lttng_consumer_channel
*found_channel
;
2076 uint64_t key
= msg
.u
.clear_channel
.key
;
2077 int ret_send_status
;
2079 found_channel
= consumer_find_channel(key
);
2080 if (!found_channel
) {
2081 DBG("Channel %" PRIu64
" not found", key
);
2082 ret_code
= LTTCOMM_CONSUMERD_CHAN_NOT_FOUND
;
2084 int ret_clear_channel
;
2086 ret_clear_channel
= lttng_consumer_clear_channel(
2088 if (ret_clear_channel
) {
2089 ERR("Clear channel failed key %" PRIu64
, key
);
2090 ret_code
= ret_clear_channel
;
2093 health_code_update();
2095 ret_send_status
= consumer_send_status_msg(sock
, ret_code
);
2096 if (ret_send_status
< 0) {
2097 /* Somehow, the session daemon is not responding anymore. */
2102 case LTTNG_CONSUMER_INIT
:
2104 int ret_send_status
;
2106 ret_code
= lttng_consumer_init_command(ctx
,
2107 msg
.u
.init
.sessiond_uuid
);
2108 health_code_update();
2109 ret_send_status
= consumer_send_status_msg(sock
, ret_code
);
2110 if (ret_send_status
< 0) {
2111 /* Somehow, the session daemon is not responding anymore. */
2116 case LTTNG_CONSUMER_CREATE_TRACE_CHUNK
:
2118 const struct lttng_credentials credentials
= {
2119 .uid
= LTTNG_OPTIONAL_INIT_VALUE(msg
.u
.create_trace_chunk
.credentials
.value
.uid
),
2120 .gid
= LTTNG_OPTIONAL_INIT_VALUE(msg
.u
.create_trace_chunk
.credentials
.value
.gid
),
2122 const bool is_local_trace
=
2123 !msg
.u
.create_trace_chunk
.relayd_id
.is_set
;
2124 const uint64_t relayd_id
=
2125 msg
.u
.create_trace_chunk
.relayd_id
.value
;
2126 const char *chunk_override_name
=
2127 *msg
.u
.create_trace_chunk
.override_name
?
2128 msg
.u
.create_trace_chunk
.override_name
:
2130 struct lttng_directory_handle
*chunk_directory_handle
= NULL
;
2133 * The session daemon will only provide a chunk directory file
2134 * descriptor for local traces.
2136 if (is_local_trace
) {
2138 int ret_send_status
;
2141 /* Acnowledge the reception of the command. */
2142 ret_send_status
= consumer_send_status_msg(
2143 sock
, LTTCOMM_CONSUMERD_SUCCESS
);
2144 if (ret_send_status
< 0) {
2145 /* Somehow, the session daemon is not responding anymore. */
2150 * Receive trace chunk domain dirfd.
2152 ret_recv
= lttcomm_recv_fds_unix_sock(
2153 sock
, &chunk_dirfd
, 1);
2154 if (ret_recv
!= sizeof(chunk_dirfd
)) {
2155 ERR("Failed to receive trace chunk domain directory file descriptor");
2159 DBG("Received trace chunk domain directory fd (%d)",
2161 chunk_directory_handle
= lttng_directory_handle_create_from_dirfd(
2163 if (!chunk_directory_handle
) {
2164 ERR("Failed to initialize chunk domain directory handle from directory file descriptor");
2165 if (close(chunk_dirfd
)) {
2166 PERROR("Failed to close chunk directory file descriptor");
2172 ret_code
= lttng_consumer_create_trace_chunk(
2173 !is_local_trace
? &relayd_id
: NULL
,
2174 msg
.u
.create_trace_chunk
.session_id
,
2175 msg
.u
.create_trace_chunk
.chunk_id
,
2176 (time_t) msg
.u
.create_trace_chunk
2177 .creation_timestamp
,
2178 chunk_override_name
,
2179 msg
.u
.create_trace_chunk
.credentials
.is_set
?
2182 chunk_directory_handle
);
2183 lttng_directory_handle_put(chunk_directory_handle
);
2184 goto end_msg_sessiond
;
2186 case LTTNG_CONSUMER_CLOSE_TRACE_CHUNK
:
2188 enum lttng_trace_chunk_command_type close_command
=
2189 msg
.u
.close_trace_chunk
.close_command
.value
;
2190 const uint64_t relayd_id
=
2191 msg
.u
.close_trace_chunk
.relayd_id
.value
;
2192 struct lttcomm_consumer_close_trace_chunk_reply reply
;
2193 char closed_trace_chunk_path
[LTTNG_PATH_MAX
] = {};
2196 ret_code
= lttng_consumer_close_trace_chunk(
2197 msg
.u
.close_trace_chunk
.relayd_id
.is_set
?
2200 msg
.u
.close_trace_chunk
.session_id
,
2201 msg
.u
.close_trace_chunk
.chunk_id
,
2202 (time_t) msg
.u
.close_trace_chunk
.close_timestamp
,
2203 msg
.u
.close_trace_chunk
.close_command
.is_set
?
2205 NULL
, closed_trace_chunk_path
);
2206 reply
.ret_code
= ret_code
;
2207 reply
.path_length
= strlen(closed_trace_chunk_path
) + 1;
2208 ret
= lttcomm_send_unix_sock(sock
, &reply
, sizeof(reply
));
2209 if (ret
!= sizeof(reply
)) {
2212 ret
= lttcomm_send_unix_sock(sock
, closed_trace_chunk_path
,
2214 if (ret
!= reply
.path_length
) {
2219 case LTTNG_CONSUMER_TRACE_CHUNK_EXISTS
:
2221 const uint64_t relayd_id
=
2222 msg
.u
.trace_chunk_exists
.relayd_id
.value
;
2224 ret_code
= lttng_consumer_trace_chunk_exists(
2225 msg
.u
.trace_chunk_exists
.relayd_id
.is_set
?
2227 msg
.u
.trace_chunk_exists
.session_id
,
2228 msg
.u
.trace_chunk_exists
.chunk_id
);
2229 goto end_msg_sessiond
;
2231 case LTTNG_CONSUMER_OPEN_CHANNEL_PACKETS
:
2233 const uint64_t key
= msg
.u
.open_channel_packets
.key
;
2234 struct lttng_consumer_channel
*found_channel
=
2235 consumer_find_channel(key
);
2237 if (found_channel
) {
2238 pthread_mutex_lock(&found_channel
->lock
);
2239 ret_code
= lttng_consumer_open_channel_packets(
2241 pthread_mutex_unlock(&found_channel
->lock
);
2244 * The channel could have disappeared in per-pid
2247 DBG("Channel %" PRIu64
" not found", key
);
2248 ret_code
= LTTCOMM_CONSUMERD_CHAN_NOT_FOUND
;
2251 health_code_update();
2252 goto end_msg_sessiond
;
2260 * Return 1 to indicate success since the 0 value can be a socket
2261 * shutdown during the recv() or send() call.
2268 * The returned value here is not useful since either way we'll return 1 to
2269 * the caller because the session daemon socket management is done
2270 * elsewhere. Returning a negative code or 0 will shutdown the consumer.
2273 int ret_send_status
;
2275 ret_send_status
= consumer_send_status_msg(sock
, ret_code
);
2276 if (ret_send_status
< 0) {
2287 * Free channel here since no one has a reference to it. We don't
2288 * free after that because a stream can store this pointer.
2290 destroy_channel(channel
);
2292 /* We have to send a status channel message indicating an error. */
2294 int ret_send_status
;
2296 ret_send_status
= consumer_send_status_channel(sock
, NULL
);
2297 if (ret_send_status
< 0) {
2298 /* Stop everything if session daemon can not be notified. */
2307 /* This will issue a consumer stop. */
2313 health_code_update();
2317 void lttng_ust_flush_buffer(
2318 struct lttng_consumer_stream
*stream
, int producer_active
)
2321 assert(stream
->ustream
);
2323 lttng_ust_ctl_flush_buffer(stream
->ustream
, producer_active
);
2327 * Take a snapshot for a specific stream.
2329 * Returns 0 on success, < 0 on error
2331 int lttng_ustconsumer_take_snapshot(struct lttng_consumer_stream
*stream
)
2334 assert(stream
->ustream
);
2336 return lttng_ust_ctl_snapshot(stream
->ustream
);
2340 * Sample consumed and produced positions for a specific stream.
2342 * Returns 0 on success, < 0 on error.
2344 int lttng_ustconsumer_sample_snapshot_positions(
2345 struct lttng_consumer_stream
*stream
)
2348 assert(stream
->ustream
);
2350 return lttng_ust_ctl_snapshot_sample_positions(stream
->ustream
);
2354 * Get the produced position
2356 * Returns 0 on success, < 0 on error
2358 int lttng_ustconsumer_get_produced_snapshot(
2359 struct lttng_consumer_stream
*stream
, unsigned long *pos
)
2362 assert(stream
->ustream
);
2365 return lttng_ust_ctl_snapshot_get_produced(stream
->ustream
, pos
);
2369 * Get the consumed position
2371 * Returns 0 on success, < 0 on error
2373 int lttng_ustconsumer_get_consumed_snapshot(
2374 struct lttng_consumer_stream
*stream
, unsigned long *pos
)
2377 assert(stream
->ustream
);
2380 return lttng_ust_ctl_snapshot_get_consumed(stream
->ustream
, pos
);
2383 void lttng_ustconsumer_flush_buffer(struct lttng_consumer_stream
*stream
,
2387 assert(stream
->ustream
);
2389 lttng_ust_ctl_flush_buffer(stream
->ustream
, producer
);
2392 void lttng_ustconsumer_clear_buffer(struct lttng_consumer_stream
*stream
)
2395 assert(stream
->ustream
);
2397 lttng_ust_ctl_clear_buffer(stream
->ustream
);
2400 int lttng_ustconsumer_get_current_timestamp(
2401 struct lttng_consumer_stream
*stream
, uint64_t *ts
)
2404 assert(stream
->ustream
);
2407 return lttng_ust_ctl_get_current_timestamp(stream
->ustream
, ts
);
2410 int lttng_ustconsumer_get_sequence_number(
2411 struct lttng_consumer_stream
*stream
, uint64_t *seq
)
2414 assert(stream
->ustream
);
2417 return lttng_ust_ctl_get_sequence_number(stream
->ustream
, seq
);
2421 * Called when the stream signals the consumer that it has hung up.
2423 void lttng_ustconsumer_on_stream_hangup(struct lttng_consumer_stream
*stream
)
2426 assert(stream
->ustream
);
2428 pthread_mutex_lock(&stream
->lock
);
2429 if (!stream
->quiescent
) {
2430 lttng_ust_ctl_flush_buffer(stream
->ustream
, 0);
2431 stream
->quiescent
= true;
2433 pthread_mutex_unlock(&stream
->lock
);
2434 stream
->hangup_flush_done
= 1;
2437 void lttng_ustconsumer_del_channel(struct lttng_consumer_channel
*chan
)
2442 assert(chan
->uchan
);
2443 assert(chan
->buffer_credentials
.is_set
);
2445 if (chan
->switch_timer_enabled
== 1) {
2446 consumer_timer_switch_stop(chan
);
2448 for (i
= 0; i
< chan
->nr_stream_fds
; i
++) {
2451 ret
= close(chan
->stream_fds
[i
]);
2455 if (chan
->shm_path
[0]) {
2456 char shm_path
[PATH_MAX
];
2458 ret
= get_stream_shm_path(shm_path
, chan
->shm_path
, i
);
2460 ERR("Cannot get stream shm path");
2462 ret
= run_as_unlink(shm_path
,
2463 lttng_credentials_get_uid(LTTNG_OPTIONAL_GET_PTR(
2464 chan
->buffer_credentials
)),
2465 lttng_credentials_get_gid(LTTNG_OPTIONAL_GET_PTR(
2466 chan
->buffer_credentials
)));
2468 PERROR("unlink %s", shm_path
);
2474 void lttng_ustconsumer_free_channel(struct lttng_consumer_channel
*chan
)
2477 assert(chan
->uchan
);
2478 assert(chan
->buffer_credentials
.is_set
);
2480 consumer_metadata_cache_destroy(chan
);
2481 lttng_ust_ctl_destroy_channel(chan
->uchan
);
2482 /* Try to rmdir all directories under shm_path root. */
2483 if (chan
->root_shm_path
[0]) {
2484 (void) run_as_rmdir_recursive(chan
->root_shm_path
,
2485 lttng_credentials_get_uid(LTTNG_OPTIONAL_GET_PTR(
2486 chan
->buffer_credentials
)),
2487 lttng_credentials_get_gid(LTTNG_OPTIONAL_GET_PTR(
2488 chan
->buffer_credentials
)),
2489 LTTNG_DIRECTORY_HANDLE_SKIP_NON_EMPTY_FLAG
);
2491 free(chan
->stream_fds
);
2494 void lttng_ustconsumer_del_stream(struct lttng_consumer_stream
*stream
)
2497 assert(stream
->ustream
);
2499 if (stream
->chan
->switch_timer_enabled
== 1) {
2500 consumer_timer_switch_stop(stream
->chan
);
2502 lttng_ust_ctl_destroy_stream(stream
->ustream
);
2505 int lttng_ustconsumer_get_wakeup_fd(struct lttng_consumer_stream
*stream
)
2508 assert(stream
->ustream
);
2510 return lttng_ust_ctl_stream_get_wakeup_fd(stream
->ustream
);
2513 int lttng_ustconsumer_close_wakeup_fd(struct lttng_consumer_stream
*stream
)
2516 assert(stream
->ustream
);
2518 return lttng_ust_ctl_stream_close_wakeup_fd(stream
->ustream
);
2522 * Write up to one packet from the metadata cache to the channel.
2524 * Returns the number of bytes pushed from the cache into the ring buffer, or a
2525 * negative value on error.
2528 int commit_one_metadata_packet(struct lttng_consumer_stream
*stream
)
2533 pthread_mutex_lock(&stream
->chan
->metadata_cache
->lock
);
2534 if (stream
->chan
->metadata_cache
->contents
.size
==
2535 stream
->ust_metadata_pushed
) {
2537 * In the context of a user space metadata channel, a
2538 * change in version can be detected in two ways:
2539 * 1) During the pre-consume of the `read_subbuffer` loop,
2540 * 2) When populating the metadata ring buffer (i.e. here).
2542 * This function is invoked when there is no metadata
2543 * available in the ring-buffer. If all data was consumed
2544 * up to the size of the metadata cache, there is no metadata
2545 * to insert in the ring-buffer.
2547 * However, the metadata version could still have changed (a
2548 * regeneration without any new data will yield the same cache
2551 * The cache's version is checked for a version change and the
2552 * consumed position is reset if one occurred.
2554 * This check is only necessary for the user space domain as
2555 * it has to manage the cache explicitly. If this reset was not
2556 * performed, no metadata would be consumed (and no reset would
2557 * occur as part of the pre-consume) until the metadata size
2558 * exceeded the cache size.
2560 if (stream
->metadata_version
!=
2561 stream
->chan
->metadata_cache
->version
) {
2562 metadata_stream_reset_cache_consumed_position(stream
);
2563 consumer_stream_metadata_set_version(stream
,
2564 stream
->chan
->metadata_cache
->version
);
2571 write_len
= lttng_ust_ctl_write_one_packet_to_channel(stream
->chan
->uchan
,
2572 &stream
->chan
->metadata_cache
->contents
.data
[stream
->ust_metadata_pushed
],
2573 stream
->chan
->metadata_cache
->contents
.size
-
2574 stream
->ust_metadata_pushed
);
2575 assert(write_len
!= 0);
2576 if (write_len
< 0) {
2577 ERR("Writing one metadata packet");
2581 stream
->ust_metadata_pushed
+= write_len
;
2583 assert(stream
->chan
->metadata_cache
->contents
.size
>=
2584 stream
->ust_metadata_pushed
);
2588 * Switch packet (but don't open the next one) on every commit of
2589 * a metadata packet. Since the subbuffer is fully filled (with padding,
2590 * if needed), the stream is "quiescent" after this commit.
2592 lttng_ust_ctl_flush_buffer(stream
->ustream
, 1);
2593 stream
->quiescent
= true;
2595 pthread_mutex_unlock(&stream
->chan
->metadata_cache
->lock
);
2601 * Sync metadata meaning request them to the session daemon and snapshot to the
2602 * metadata thread can consumer them.
2604 * Metadata stream lock is held here, but we need to release it when
2605 * interacting with sessiond, else we cause a deadlock with live
2606 * awaiting on metadata to be pushed out.
2608 * The RCU read side lock must be held by the caller.
2610 enum sync_metadata_status
lttng_ustconsumer_sync_metadata(
2611 struct lttng_consumer_local_data
*ctx
,
2612 struct lttng_consumer_stream
*metadata_stream
)
2615 enum sync_metadata_status status
;
2616 struct lttng_consumer_channel
*metadata_channel
;
2619 assert(metadata_stream
);
2621 metadata_channel
= metadata_stream
->chan
;
2622 pthread_mutex_unlock(&metadata_stream
->lock
);
2624 * Request metadata from the sessiond, but don't wait for the flush
2625 * because we locked the metadata thread.
2627 ret
= lttng_ustconsumer_request_metadata(ctx
, metadata_channel
, 0, 0);
2628 pthread_mutex_lock(&metadata_stream
->lock
);
2630 status
= SYNC_METADATA_STATUS_ERROR
;
2635 * The metadata stream and channel can be deleted while the
2636 * metadata stream lock was released. The streamed is checked
2637 * for deletion before we use it further.
2639 * Note that it is safe to access a logically-deleted stream since its
2640 * existence is still guaranteed by the RCU read side lock. However,
2641 * it should no longer be used. The close/deletion of the metadata
2642 * channel and stream already guarantees that all metadata has been
2643 * consumed. Therefore, there is nothing left to do in this function.
2645 if (consumer_stream_is_deleted(metadata_stream
)) {
2646 DBG("Metadata stream %" PRIu64
" was deleted during the metadata synchronization",
2647 metadata_stream
->key
);
2648 status
= SYNC_METADATA_STATUS_NO_DATA
;
2652 ret
= commit_one_metadata_packet(metadata_stream
);
2654 status
= SYNC_METADATA_STATUS_ERROR
;
2656 } else if (ret
> 0) {
2657 status
= SYNC_METADATA_STATUS_NEW_DATA
;
2658 } else /* ret == 0 */ {
2659 status
= SYNC_METADATA_STATUS_NO_DATA
;
2663 ret
= lttng_ust_ctl_snapshot(metadata_stream
->ustream
);
2665 ERR("Failed to take a snapshot of the metadata ring-buffer positions, ret = %d", ret
);
2666 status
= SYNC_METADATA_STATUS_ERROR
;
2675 * Return 0 on success else a negative value.
2677 static int notify_if_more_data(struct lttng_consumer_stream
*stream
,
2678 struct lttng_consumer_local_data
*ctx
)
2681 struct lttng_ust_ctl_consumer_stream
*ustream
;
2686 ustream
= stream
->ustream
;
2689 * First, we are going to check if there is a new subbuffer available
2690 * before reading the stream wait_fd.
2692 /* Get the next subbuffer */
2693 ret
= lttng_ust_ctl_get_next_subbuf(ustream
);
2695 /* No more data found, flag the stream. */
2696 stream
->has_data
= 0;
2701 ret
= lttng_ust_ctl_put_subbuf(ustream
);
2704 /* This stream still has data. Flag it and wake up the data thread. */
2705 stream
->has_data
= 1;
2707 if (stream
->monitor
&& !stream
->hangup_flush_done
&& !ctx
->has_wakeup
) {
2710 writelen
= lttng_pipe_write(ctx
->consumer_wakeup_pipe
, "!", 1);
2711 if (writelen
< 0 && errno
!= EAGAIN
&& errno
!= EWOULDBLOCK
) {
2716 /* The wake up pipe has been notified. */
2717 ctx
->has_wakeup
= 1;
2725 static int consumer_stream_ust_on_wake_up(struct lttng_consumer_stream
*stream
)
2730 * We can consume the 1 byte written into the wait_fd by
2731 * UST. Don't trigger error if we cannot read this one byte
2732 * (read returns 0), or if the error is EAGAIN or EWOULDBLOCK.
2734 * This is only done when the stream is monitored by a thread,
2735 * before the flush is done after a hangup and if the stream
2736 * is not flagged with data since there might be nothing to
2737 * consume in the wait fd but still have data available
2738 * flagged by the consumer wake up pipe.
2740 if (stream
->monitor
&& !stream
->hangup_flush_done
&& !stream
->has_data
) {
2744 readlen
= lttng_read(stream
->wait_fd
, &dummy
, 1);
2745 if (readlen
< 0 && errno
!= EAGAIN
&& errno
!= EWOULDBLOCK
) {
2753 static int extract_common_subbuffer_info(struct lttng_consumer_stream
*stream
,
2754 struct stream_subbuffer
*subbuf
)
2758 ret
= lttng_ust_ctl_get_subbuf_size(
2759 stream
->ustream
, &subbuf
->info
.data
.subbuf_size
);
2764 ret
= lttng_ust_ctl_get_padded_subbuf_size(
2765 stream
->ustream
, &subbuf
->info
.data
.padded_subbuf_size
);
2774 static int extract_metadata_subbuffer_info(struct lttng_consumer_stream
*stream
,
2775 struct stream_subbuffer
*subbuf
)
2779 ret
= extract_common_subbuffer_info(stream
, subbuf
);
2784 subbuf
->info
.metadata
.version
= stream
->metadata_version
;
2790 static int extract_data_subbuffer_info(struct lttng_consumer_stream
*stream
,
2791 struct stream_subbuffer
*subbuf
)
2795 ret
= extract_common_subbuffer_info(stream
, subbuf
);
2800 ret
= lttng_ust_ctl_get_packet_size(
2801 stream
->ustream
, &subbuf
->info
.data
.packet_size
);
2803 PERROR("Failed to get sub-buffer packet size");
2807 ret
= lttng_ust_ctl_get_content_size(
2808 stream
->ustream
, &subbuf
->info
.data
.content_size
);
2810 PERROR("Failed to get sub-buffer content size");
2814 ret
= lttng_ust_ctl_get_timestamp_begin(
2815 stream
->ustream
, &subbuf
->info
.data
.timestamp_begin
);
2817 PERROR("Failed to get sub-buffer begin timestamp");
2821 ret
= lttng_ust_ctl_get_timestamp_end(
2822 stream
->ustream
, &subbuf
->info
.data
.timestamp_end
);
2824 PERROR("Failed to get sub-buffer end timestamp");
2828 ret
= lttng_ust_ctl_get_events_discarded(
2829 stream
->ustream
, &subbuf
->info
.data
.events_discarded
);
2831 PERROR("Failed to get sub-buffer events discarded count");
2835 ret
= lttng_ust_ctl_get_sequence_number(stream
->ustream
,
2836 &subbuf
->info
.data
.sequence_number
.value
);
2838 /* May not be supported by older LTTng-modules. */
2839 if (ret
!= -ENOTTY
) {
2840 PERROR("Failed to get sub-buffer sequence number");
2844 subbuf
->info
.data
.sequence_number
.is_set
= true;
2847 ret
= lttng_ust_ctl_get_stream_id(
2848 stream
->ustream
, &subbuf
->info
.data
.stream_id
);
2850 PERROR("Failed to get stream id");
2854 ret
= lttng_ust_ctl_get_instance_id(stream
->ustream
,
2855 &subbuf
->info
.data
.stream_instance_id
.value
);
2857 /* May not be supported by older LTTng-modules. */
2858 if (ret
!= -ENOTTY
) {
2859 PERROR("Failed to get stream instance id");
2863 subbuf
->info
.data
.stream_instance_id
.is_set
= true;
2869 static int get_next_subbuffer_common(struct lttng_consumer_stream
*stream
,
2870 struct stream_subbuffer
*subbuffer
)
2875 ret
= stream
->read_subbuffer_ops
.extract_subbuffer_info(
2881 ret
= get_current_subbuf_addr(stream
, &addr
);
2886 subbuffer
->buffer
.buffer
= lttng_buffer_view_init(
2887 addr
, 0, subbuffer
->info
.data
.padded_subbuf_size
);
2888 assert(subbuffer
->buffer
.buffer
.data
!= NULL
);
2893 static enum get_next_subbuffer_status
get_next_subbuffer(
2894 struct lttng_consumer_stream
*stream
,
2895 struct stream_subbuffer
*subbuffer
)
2898 enum get_next_subbuffer_status status
;
2900 ret
= lttng_ust_ctl_get_next_subbuf(stream
->ustream
);
2903 status
= GET_NEXT_SUBBUFFER_STATUS_OK
;
2908 * The caller only expects -ENODATA when there is no data to
2909 * read, but the kernel tracer returns -EAGAIN when there is
2910 * currently no data for a non-finalized stream, and -ENODATA
2911 * when there is no data for a finalized stream. Those can be
2912 * combined into a -ENODATA return value.
2914 status
= GET_NEXT_SUBBUFFER_STATUS_NO_DATA
;
2917 status
= GET_NEXT_SUBBUFFER_STATUS_ERROR
;
2921 ret
= get_next_subbuffer_common(stream
, subbuffer
);
2923 status
= GET_NEXT_SUBBUFFER_STATUS_ERROR
;
2930 static enum get_next_subbuffer_status
get_next_subbuffer_metadata(
2931 struct lttng_consumer_stream
*stream
,
2932 struct stream_subbuffer
*subbuffer
)
2939 unsigned long consumed_pos
, produced_pos
;
2940 enum get_next_subbuffer_status status
;
2943 ret
= lttng_ust_ctl_get_next_subbuf(stream
->ustream
);
2945 got_subbuffer
= true;
2947 got_subbuffer
= false;
2948 if (ret
!= -EAGAIN
) {
2950 status
= GET_NEXT_SUBBUFFER_STATUS_ERROR
;
2956 * Determine if the cache is empty and ensure that a sub-buffer
2957 * is made available if the cache is not empty.
2959 if (!got_subbuffer
) {
2960 ret
= commit_one_metadata_packet(stream
);
2961 if (ret
< 0 && ret
!= -ENOBUFS
) {
2962 status
= GET_NEXT_SUBBUFFER_STATUS_ERROR
;
2964 } else if (ret
== 0) {
2965 /* Not an error, the cache is empty. */
2967 status
= GET_NEXT_SUBBUFFER_STATUS_NO_DATA
;
2970 cache_empty
= false;
2973 pthread_mutex_lock(&stream
->chan
->metadata_cache
->lock
);
2974 cache_empty
= stream
->chan
->metadata_cache
->contents
.size
==
2975 stream
->ust_metadata_pushed
;
2976 pthread_mutex_unlock(&stream
->chan
->metadata_cache
->lock
);
2978 } while (!got_subbuffer
);
2980 /* Populate sub-buffer infos and view. */
2981 ret
= get_next_subbuffer_common(stream
, subbuffer
);
2983 status
= GET_NEXT_SUBBUFFER_STATUS_ERROR
;
2987 ret
= lttng_ustconsumer_sample_snapshot_positions(stream
);
2990 * -EAGAIN is not expected since we got a sub-buffer and haven't
2991 * pushed the consumption position yet (on put_next).
2993 PERROR("Failed to take a snapshot of metadata buffer positions");
2994 status
= GET_NEXT_SUBBUFFER_STATUS_ERROR
;
2998 ret
= lttng_ustconsumer_get_consumed_snapshot(stream
, &consumed_pos
);
3000 PERROR("Failed to get metadata consumed position");
3001 status
= GET_NEXT_SUBBUFFER_STATUS_ERROR
;
3005 ret
= lttng_ustconsumer_get_produced_snapshot(stream
, &produced_pos
);
3007 PERROR("Failed to get metadata produced position");
3008 status
= GET_NEXT_SUBBUFFER_STATUS_ERROR
;
3012 /* Last sub-buffer of the ring buffer ? */
3013 buffer_empty
= (consumed_pos
+ stream
->max_sb_size
) == produced_pos
;
3016 * The sessiond registry lock ensures that coherent units of metadata
3017 * are pushed to the consumer daemon at once. Hence, if a sub-buffer is
3018 * acquired, the cache is empty, and it is the only available sub-buffer
3019 * available, it is safe to assume that it is "coherent".
3021 coherent
= got_subbuffer
&& cache_empty
&& buffer_empty
;
3023 LTTNG_OPTIONAL_SET(&subbuffer
->info
.metadata
.coherent
, coherent
);
3024 status
= GET_NEXT_SUBBUFFER_STATUS_OK
;
3029 static int put_next_subbuffer(struct lttng_consumer_stream
*stream
,
3030 struct stream_subbuffer
*subbuffer
)
3032 const int ret
= lttng_ust_ctl_put_next_subbuf(stream
->ustream
);
3038 static int signal_metadata(struct lttng_consumer_stream
*stream
,
3039 struct lttng_consumer_local_data
*ctx
)
3041 ASSERT_LOCKED(stream
->metadata_rdv_lock
);
3042 return pthread_cond_broadcast(&stream
->metadata_rdv
) ? -errno
: 0;
3045 static int lttng_ustconsumer_set_stream_ops(
3046 struct lttng_consumer_stream
*stream
)
3050 stream
->read_subbuffer_ops
.on_wake_up
= consumer_stream_ust_on_wake_up
;
3051 if (stream
->metadata_flag
) {
3052 stream
->read_subbuffer_ops
.get_next_subbuffer
=
3053 get_next_subbuffer_metadata
;
3054 stream
->read_subbuffer_ops
.extract_subbuffer_info
=
3055 extract_metadata_subbuffer_info
;
3056 stream
->read_subbuffer_ops
.reset_metadata
=
3057 metadata_stream_reset_cache_consumed_position
;
3058 if (stream
->chan
->is_live
) {
3059 stream
->read_subbuffer_ops
.on_sleep
= signal_metadata
;
3060 ret
= consumer_stream_enable_metadata_bucketization(
3067 stream
->read_subbuffer_ops
.get_next_subbuffer
=
3069 stream
->read_subbuffer_ops
.extract_subbuffer_info
=
3070 extract_data_subbuffer_info
;
3071 stream
->read_subbuffer_ops
.on_sleep
= notify_if_more_data
;
3072 if (stream
->chan
->is_live
) {
3073 stream
->read_subbuffer_ops
.send_live_beacon
=
3074 consumer_flush_ust_index
;
3078 stream
->read_subbuffer_ops
.put_next_subbuffer
= put_next_subbuffer
;
3084 * Called when a stream is created.
3086 * Return 0 on success or else a negative value.
3088 int lttng_ustconsumer_on_recv_stream(struct lttng_consumer_stream
*stream
)
3095 * Don't create anything if this is set for streaming or if there is
3096 * no current trace chunk on the parent channel.
3098 if (stream
->net_seq_idx
== (uint64_t) -1ULL && stream
->chan
->monitor
&&
3099 stream
->chan
->trace_chunk
) {
3100 ret
= consumer_stream_create_output_files(stream
, true);
3106 lttng_ustconsumer_set_stream_ops(stream
);
3114 * Check if data is still being extracted from the buffers for a specific
3115 * stream. Consumer data lock MUST be acquired before calling this function
3116 * and the stream lock.
3118 * Return 1 if the traced data are still getting read else 0 meaning that the
3119 * data is available for trace viewer reading.
3121 int lttng_ustconsumer_data_pending(struct lttng_consumer_stream
*stream
)
3126 assert(stream
->ustream
);
3127 ASSERT_LOCKED(stream
->lock
);
3129 DBG("UST consumer checking data pending");
3131 if (stream
->endpoint_status
!= CONSUMER_ENDPOINT_ACTIVE
) {
3136 if (stream
->chan
->type
== CONSUMER_CHANNEL_TYPE_METADATA
) {
3137 uint64_t contiguous
, pushed
;
3139 /* Ease our life a bit. */
3140 pthread_mutex_lock(&stream
->chan
->metadata_cache
->lock
);
3141 contiguous
= stream
->chan
->metadata_cache
->contents
.size
;
3142 pthread_mutex_unlock(&stream
->chan
->metadata_cache
->lock
);
3143 pushed
= stream
->ust_metadata_pushed
;
3146 * We can simply check whether all contiguously available data
3147 * has been pushed to the ring buffer, since the push operation
3148 * is performed within get_next_subbuf(), and because both
3149 * get_next_subbuf() and put_next_subbuf() are issued atomically
3150 * thanks to the stream lock within
3151 * lttng_ustconsumer_read_subbuffer(). This basically means that
3152 * whetnever ust_metadata_pushed is incremented, the associated
3153 * metadata has been consumed from the metadata stream.
3155 DBG("UST consumer metadata pending check: contiguous %" PRIu64
" vs pushed %" PRIu64
,
3156 contiguous
, pushed
);
3157 assert(((int64_t) (contiguous
- pushed
)) >= 0);
3158 if ((contiguous
!= pushed
) ||
3159 (((int64_t) contiguous
- pushed
) > 0 || contiguous
== 0)) {
3160 ret
= 1; /* Data is pending */
3164 ret
= lttng_ust_ctl_get_next_subbuf(stream
->ustream
);
3167 * There is still data so let's put back this
3170 ret
= lttng_ust_ctl_put_subbuf(stream
->ustream
);
3172 ret
= 1; /* Data is pending */
3177 /* Data is NOT pending so ready to be read. */
3185 * Stop a given metadata channel timer if enabled and close the wait fd which
3186 * is the poll pipe of the metadata stream.
3188 * This MUST be called with the metadata channel lock acquired.
3190 void lttng_ustconsumer_close_metadata(struct lttng_consumer_channel
*metadata
)
3195 assert(metadata
->type
== CONSUMER_CHANNEL_TYPE_METADATA
);
3197 DBG("Closing metadata channel key %" PRIu64
, metadata
->key
);
3199 if (metadata
->switch_timer_enabled
== 1) {
3200 consumer_timer_switch_stop(metadata
);
3203 if (!metadata
->metadata_stream
) {
3208 * Closing write side so the thread monitoring the stream wakes up if any
3209 * and clean the metadata stream.
3211 if (metadata
->metadata_stream
->ust_metadata_poll_pipe
[1] >= 0) {
3212 ret
= close(metadata
->metadata_stream
->ust_metadata_poll_pipe
[1]);
3214 PERROR("closing metadata pipe write side");
3216 metadata
->metadata_stream
->ust_metadata_poll_pipe
[1] = -1;
3224 * Close every metadata stream wait fd of the metadata hash table. This
3225 * function MUST be used very carefully so not to run into a race between the
3226 * metadata thread handling streams and this function closing their wait fd.
3228 * For UST, this is used when the session daemon hangs up. Its the metadata
3229 * producer so calling this is safe because we are assured that no state change
3230 * can occur in the metadata thread for the streams in the hash table.
3232 void lttng_ustconsumer_close_all_metadata(struct lttng_ht
*metadata_ht
)
3234 struct lttng_ht_iter iter
;
3235 struct lttng_consumer_stream
*stream
;
3237 assert(metadata_ht
);
3238 assert(metadata_ht
->ht
);
3240 DBG("UST consumer closing all metadata streams");
3243 cds_lfht_for_each_entry(metadata_ht
->ht
, &iter
.iter
, stream
,
3246 health_code_update();
3248 pthread_mutex_lock(&stream
->chan
->lock
);
3249 lttng_ustconsumer_close_metadata(stream
->chan
);
3250 pthread_mutex_unlock(&stream
->chan
->lock
);
3256 void lttng_ustconsumer_close_stream_wakeup(struct lttng_consumer_stream
*stream
)
3260 ret
= lttng_ust_ctl_stream_close_wakeup_fd(stream
->ustream
);
3262 ERR("Unable to close wakeup fd");
3267 * Please refer to consumer-timer.c before adding any lock within this
3268 * function or any of its callees. Timers have a very strict locking
3269 * semantic with respect to teardown. Failure to respect this semantic
3270 * introduces deadlocks.
3272 * DON'T hold the metadata lock when calling this function, else this
3273 * can cause deadlock involving consumer awaiting for metadata to be
3274 * pushed out due to concurrent interaction with the session daemon.
3276 int lttng_ustconsumer_request_metadata(struct lttng_consumer_local_data
*ctx
,
3277 struct lttng_consumer_channel
*channel
, int timer
, int wait
)
3279 struct lttcomm_metadata_request_msg request
;
3280 struct lttcomm_consumer_msg msg
;
3281 enum lttcomm_return_code ret_code
= LTTCOMM_CONSUMERD_SUCCESS
;
3282 uint64_t len
, key
, offset
, version
;
3286 assert(channel
->metadata_cache
);
3288 memset(&request
, 0, sizeof(request
));
3290 /* send the metadata request to sessiond */
3291 switch (the_consumer_data
.type
) {
3292 case LTTNG_CONSUMER64_UST
:
3293 request
.bits_per_long
= 64;
3295 case LTTNG_CONSUMER32_UST
:
3296 request
.bits_per_long
= 32;
3299 request
.bits_per_long
= 0;
3303 request
.session_id
= channel
->session_id
;
3304 request
.session_id_per_pid
= channel
->session_id_per_pid
;
3306 * Request the application UID here so the metadata of that application can
3307 * be sent back. The channel UID corresponds to the user UID of the session
3308 * used for the rights on the stream file(s).
3310 request
.uid
= channel
->ust_app_uid
;
3311 request
.key
= channel
->key
;
3313 DBG("Sending metadata request to sessiond, session id %" PRIu64
3314 ", per-pid %" PRIu64
", app UID %u and channel key %" PRIu64
,
3315 request
.session_id
, request
.session_id_per_pid
, request
.uid
,
3318 pthread_mutex_lock(&ctx
->metadata_socket_lock
);
3320 health_code_update();
3322 ret
= lttcomm_send_unix_sock(ctx
->consumer_metadata_socket
, &request
,
3325 ERR("Asking metadata to sessiond");
3329 health_code_update();
3331 /* Receive the metadata from sessiond */
3332 ret
= lttcomm_recv_unix_sock(ctx
->consumer_metadata_socket
, &msg
,
3334 if (ret
!= sizeof(msg
)) {
3335 DBG("Consumer received unexpected message size %d (expects %zu)",
3337 lttng_consumer_send_error(ctx
, LTTCOMM_CONSUMERD_ERROR_RECV_CMD
);
3339 * The ret value might 0 meaning an orderly shutdown but this is ok
3340 * since the caller handles this.
3345 health_code_update();
3347 if (msg
.cmd_type
== LTTNG_ERR_UND
) {
3348 /* No registry found */
3349 (void) consumer_send_status_msg(ctx
->consumer_metadata_socket
,
3353 } else if (msg
.cmd_type
!= LTTNG_CONSUMER_PUSH_METADATA
) {
3354 ERR("Unexpected cmd_type received %d", msg
.cmd_type
);
3359 len
= msg
.u
.push_metadata
.len
;
3360 key
= msg
.u
.push_metadata
.key
;
3361 offset
= msg
.u
.push_metadata
.target_offset
;
3362 version
= msg
.u
.push_metadata
.version
;
3364 assert(key
== channel
->key
);
3366 DBG("No new metadata to receive for key %" PRIu64
, key
);
3369 health_code_update();
3371 /* Tell session daemon we are ready to receive the metadata. */
3372 ret
= consumer_send_status_msg(ctx
->consumer_metadata_socket
,
3373 LTTCOMM_CONSUMERD_SUCCESS
);
3374 if (ret
< 0 || len
== 0) {
3376 * Somehow, the session daemon is not responding anymore or there is
3377 * nothing to receive.
3382 health_code_update();
3384 ret
= lttng_ustconsumer_recv_metadata(ctx
->consumer_metadata_socket
,
3385 key
, offset
, len
, version
, channel
, timer
, wait
);
3388 * Only send the status msg if the sessiond is alive meaning a positive
3391 (void) consumer_send_status_msg(ctx
->consumer_metadata_socket
, ret
);
3396 health_code_update();
3398 pthread_mutex_unlock(&ctx
->metadata_socket_lock
);
3403 * Return the ustctl call for the get stream id.
3405 int lttng_ustconsumer_get_stream_id(struct lttng_consumer_stream
*stream
,
3406 uint64_t *stream_id
)
3411 return lttng_ust_ctl_get_stream_id(stream
->ustream
, stream_id
);