2 * Copyright (C) 2011 EfficiOS Inc.
3 * Copyright (C) 2011 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
4 * Copyright (C) 2017 Jérémie Galarneau <jeremie.galarneau@efficios.com>
6 * SPDX-License-Identifier: GPL-2.0-only
11 #include "ust-consumer.hpp"
13 #include <common/common.hpp>
14 #include <common/compat/endian.hpp>
15 #include <common/compat/fcntl.hpp>
16 #include <common/consumer/consumer-metadata-cache.hpp>
17 #include <common/consumer/consumer-stream.hpp>
18 #include <common/consumer/consumer-timer.hpp>
19 #include <common/consumer/consumer.hpp>
20 #include <common/index/index.hpp>
21 #include <common/optional.hpp>
22 #include <common/relayd/relayd.hpp>
23 #include <common/sessiond-comm/sessiond-comm.hpp>
24 #include <common/shm.hpp>
25 #include <common/urcu.hpp>
26 #include <common/utils.hpp>
28 #include <lttng/ust-ctl.h>
29 #include <lttng/ust-sigbus.h>
31 #include <bin/lttng-consumerd/health-consumerd.hpp>
41 #include <sys/socket.h>
43 #include <sys/types.h>
45 #include <urcu/list.h>
47 #define INT_MAX_STR_LEN 12 /* includes \0 */
49 extern struct lttng_consumer_global_data the_consumer_data
;
50 extern int consumer_poll_timeout
;
52 LTTNG_EXPORT
DEFINE_LTTNG_UST_SIGBUS_STATE();
55 * Add channel to internal consumer state.
57 * Returns 0 on success or else a negative value.
59 static int add_channel(struct lttng_consumer_channel
*channel
,
60 struct lttng_consumer_local_data
*ctx
)
64 LTTNG_ASSERT(channel
);
67 if (ctx
->on_recv_channel
!= nullptr) {
68 ret
= ctx
->on_recv_channel(channel
);
70 ret
= consumer_add_channel(channel
, ctx
);
72 /* Most likely an ENOMEM. */
73 lttng_consumer_send_error(ctx
, LTTCOMM_CONSUMERD_OUTFD_ERROR
);
77 ret
= consumer_add_channel(channel
, ctx
);
80 DBG("UST consumer channel added (key: %" PRIu64
")", channel
->key
);
87 * Allocate and return a consumer stream object. If _alloc_ret is not NULL, the
88 * error value if applicable is set in it else it is kept untouched.
90 * Return NULL on error else the newly allocated stream object.
92 static struct lttng_consumer_stream
*allocate_stream(int cpu
,
94 struct lttng_consumer_channel
*channel
,
95 struct lttng_consumer_local_data
*ctx
,
99 struct lttng_consumer_stream
*stream
= nullptr;
101 LTTNG_ASSERT(channel
);
104 stream
= consumer_stream_create(channel
,
110 channel
->trace_chunk
,
115 if (stream
== nullptr) {
119 * We could not find the channel. Can happen if cpu hotplug
120 * happens while tearing down.
122 DBG3("Could not find channel");
127 lttng_consumer_send_error(ctx
, LTTCOMM_CONSUMERD_OUTFD_ERROR
);
133 consumer_stream_update_channel_attributes(stream
, channel
);
137 *_alloc_ret
= alloc_ret
;
143 * Send the given stream pointer to the corresponding thread.
145 * Returns 0 on success else a negative value.
147 static int send_stream_to_thread(struct lttng_consumer_stream
*stream
,
148 struct lttng_consumer_local_data
*ctx
)
151 struct lttng_pipe
*stream_pipe
;
153 /* Get the right pipe where the stream will be sent. */
154 if (stream
->metadata_flag
) {
155 consumer_add_metadata_stream(stream
);
156 stream_pipe
= ctx
->consumer_metadata_pipe
;
158 consumer_add_data_stream(stream
);
159 stream_pipe
= ctx
->consumer_data_pipe
;
163 * From this point on, the stream's ownership has been moved away from
164 * the channel and it becomes globally visible. Hence, remove it from
165 * the local stream list to prevent the stream from being both local and
168 stream
->globally_visible
= 1;
169 cds_list_del_init(&stream
->send_node
);
171 ret
= lttng_pipe_write(stream_pipe
, &stream
, sizeof(stream
)); /* NOLINT sizeof used on a
174 ERR("Consumer write %s stream to pipe %d",
175 stream
->metadata_flag
? "metadata" : "data",
176 lttng_pipe_get_writefd(stream_pipe
));
177 if (stream
->metadata_flag
) {
178 consumer_del_stream_for_metadata(stream
);
180 consumer_del_stream_for_data(stream
);
189 static int get_stream_shm_path(char *stream_shm_path
, const char *shm_path
, int cpu
)
191 char cpu_nr
[INT_MAX_STR_LEN
]; /* int max len */
194 strncpy(stream_shm_path
, shm_path
, PATH_MAX
);
195 stream_shm_path
[PATH_MAX
- 1] = '\0';
196 ret
= snprintf(cpu_nr
, INT_MAX_STR_LEN
, "%i", cpu
);
201 strncat(stream_shm_path
, cpu_nr
, PATH_MAX
- strlen(stream_shm_path
) - 1);
208 * Create streams for the given channel using liblttng-ust-ctl.
209 * The channel lock must be acquired by the caller.
211 * Return 0 on success else a negative value.
213 static int create_ust_streams(struct lttng_consumer_channel
*channel
,
214 struct lttng_consumer_local_data
*ctx
)
217 struct lttng_ust_ctl_consumer_stream
*ustream
;
218 struct lttng_consumer_stream
*stream
;
219 pthread_mutex_t
*current_stream_lock
= nullptr;
221 LTTNG_ASSERT(channel
);
225 * While a stream is available from ustctl. When NULL is returned, we've
226 * reached the end of the possible stream for the channel.
228 while ((ustream
= lttng_ust_ctl_create_stream(channel
->uchan
, cpu
))) {
230 int ust_metadata_pipe
[2];
232 health_code_update();
234 if (channel
->type
== CONSUMER_CHANNEL_TYPE_METADATA
&& channel
->monitor
) {
235 ret
= utils_create_pipe_cloexec_nonblock(ust_metadata_pipe
);
237 ERR("Create ust metadata poll pipe");
240 wait_fd
= ust_metadata_pipe
[0];
242 wait_fd
= lttng_ust_ctl_stream_get_wait_fd(ustream
);
245 /* Allocate consumer stream object. */
246 stream
= allocate_stream(cpu
, wait_fd
, channel
, ctx
, &ret
);
250 stream
->ustream
= ustream
;
252 * Store it so we can save multiple function calls afterwards since
253 * this value is used heavily in the stream threads. This is UST
254 * specific so this is why it's done after allocation.
256 stream
->wait_fd
= wait_fd
;
259 * Increment channel refcount since the channel reference has now been
260 * assigned in the allocation process above.
262 if (stream
->chan
->monitor
) {
263 uatomic_inc(&stream
->chan
->refcount
);
266 pthread_mutex_lock(&stream
->lock
);
267 current_stream_lock
= &stream
->lock
;
269 * Order is important this is why a list is used. On error, the caller
270 * should clean this list.
272 cds_list_add_tail(&stream
->send_node
, &channel
->streams
.head
);
274 ret
= lttng_ust_ctl_get_max_subbuf_size(stream
->ustream
, &stream
->max_sb_size
);
276 ERR("lttng_ust_ctl_get_max_subbuf_size failed for stream %s", stream
->name
);
280 /* Do actions once stream has been received. */
281 if (ctx
->on_recv_stream
) {
282 ret
= ctx
->on_recv_stream(stream
);
288 DBG("UST consumer add stream %s (key: %" PRIu64
") with relayd id %" PRIu64
,
291 stream
->relayd_stream_id
);
293 /* Set next CPU stream. */
294 channel
->streams
.count
= ++cpu
;
296 /* Keep stream reference when creating metadata. */
297 if (channel
->type
== CONSUMER_CHANNEL_TYPE_METADATA
) {
298 channel
->metadata_stream
= stream
;
299 if (channel
->monitor
) {
300 /* Set metadata poll pipe if we created one */
301 memcpy(stream
->ust_metadata_poll_pipe
,
303 sizeof(ust_metadata_pipe
));
306 pthread_mutex_unlock(&stream
->lock
);
307 current_stream_lock
= nullptr;
314 if (current_stream_lock
) {
315 pthread_mutex_unlock(current_stream_lock
);
320 static int open_ust_stream_fd(struct lttng_consumer_channel
*channel
,
322 const struct lttng_credentials
*session_credentials
)
324 char shm_path
[PATH_MAX
];
327 if (!channel
->shm_path
[0]) {
328 return shm_create_anonymous("ust-consumer");
330 ret
= get_stream_shm_path(shm_path
, channel
->shm_path
, cpu
);
334 return run_as_open(shm_path
,
335 O_RDWR
| O_CREAT
| O_EXCL
,
337 lttng_credentials_get_uid(session_credentials
),
338 lttng_credentials_get_gid(session_credentials
));
345 * Create an UST channel with the given attributes and send it to the session
346 * daemon using the ust ctl API.
348 * Return 0 on success or else a negative value.
350 static int create_ust_channel(struct lttng_consumer_channel
*channel
,
351 struct lttng_ust_ctl_consumer_channel_attr
*attr
,
352 struct lttng_ust_ctl_consumer_channel
**ust_chanp
)
354 int ret
, nr_stream_fds
, i
, j
;
356 struct lttng_ust_ctl_consumer_channel
*ust_channel
;
358 LTTNG_ASSERT(channel
);
360 LTTNG_ASSERT(ust_chanp
);
361 LTTNG_ASSERT(channel
->buffer_credentials
.is_set
);
363 DBG3("Creating channel to ustctl with attr: [overwrite: %d, "
364 "subbuf_size: %" PRIu64
", num_subbuf: %" PRIu64
", "
365 "switch_timer_interval: %u, read_timer_interval: %u, "
366 "output: %d, type: %d",
370 attr
->switch_timer_interval
,
371 attr
->read_timer_interval
,
375 if (channel
->type
== CONSUMER_CHANNEL_TYPE_METADATA
)
378 nr_stream_fds
= lttng_ust_ctl_get_nr_stream_per_channel();
379 stream_fds
= calloc
<int>(nr_stream_fds
);
384 for (i
= 0; i
< nr_stream_fds
; i
++) {
385 stream_fds
[i
] = open_ust_stream_fd(channel
, i
, &channel
->buffer_credentials
.value
);
386 if (stream_fds
[i
] < 0) {
391 ust_channel
= lttng_ust_ctl_create_channel(attr
, stream_fds
, nr_stream_fds
);
396 channel
->nr_stream_fds
= nr_stream_fds
;
397 channel
->stream_fds
= stream_fds
;
398 *ust_chanp
= ust_channel
;
404 for (j
= i
- 1; j
>= 0; j
--) {
407 closeret
= close(stream_fds
[j
]);
411 if (channel
->shm_path
[0]) {
412 char shm_path
[PATH_MAX
];
414 closeret
= get_stream_shm_path(shm_path
, channel
->shm_path
, j
);
416 ERR("Cannot get stream shm path");
418 closeret
= run_as_unlink(shm_path
,
419 lttng_credentials_get_uid(LTTNG_OPTIONAL_GET_PTR(
420 channel
->buffer_credentials
)),
421 lttng_credentials_get_gid(LTTNG_OPTIONAL_GET_PTR(
422 channel
->buffer_credentials
)));
424 PERROR("unlink %s", shm_path
);
428 /* Try to rmdir all directories under shm_path root. */
429 if (channel
->root_shm_path
[0]) {
430 (void) run_as_rmdir_recursive(channel
->root_shm_path
,
431 lttng_credentials_get_uid(LTTNG_OPTIONAL_GET_PTR(
432 channel
->buffer_credentials
)),
433 lttng_credentials_get_gid(LTTNG_OPTIONAL_GET_PTR(
434 channel
->buffer_credentials
)),
435 LTTNG_DIRECTORY_HANDLE_SKIP_NON_EMPTY_FLAG
);
443 * Send a single given stream to the session daemon using the sock.
445 * Return 0 on success else a negative value.
447 static int send_sessiond_stream(int sock
, struct lttng_consumer_stream
*stream
)
451 LTTNG_ASSERT(stream
);
452 LTTNG_ASSERT(sock
>= 0);
454 DBG("UST consumer sending stream %" PRIu64
" to sessiond", stream
->key
);
456 /* Send stream to session daemon. */
457 ret
= lttng_ust_ctl_send_stream_to_sessiond(sock
, stream
->ustream
);
467 * Send channel to sessiond and relayd if applicable.
469 * Return 0 on success or else a negative value.
471 static int send_channel_to_sessiond_and_relayd(int sock
,
472 struct lttng_consumer_channel
*channel
,
473 struct lttng_consumer_local_data
*ctx
,
476 int ret
, ret_code
= LTTCOMM_CONSUMERD_SUCCESS
;
477 struct lttng_consumer_stream
*stream
;
478 uint64_t net_seq_idx
= -1ULL;
480 LTTNG_ASSERT(channel
);
482 LTTNG_ASSERT(sock
>= 0);
484 DBG("UST consumer sending channel %s to sessiond", channel
->name
);
486 if (channel
->relayd_id
!= (uint64_t) -1ULL) {
487 cds_list_for_each_entry (stream
, &channel
->streams
.head
, send_node
) {
488 health_code_update();
490 /* Try to send the stream to the relayd if one is available. */
491 DBG("Sending stream %" PRIu64
" of channel \"%s\" to relayd",
494 ret
= consumer_send_relayd_stream(stream
, stream
->chan
->pathname
);
497 * Flag that the relayd was the problem here probably due to a
498 * communicaton error on the socket.
503 ret_code
= LTTCOMM_CONSUMERD_RELAYD_FAIL
;
505 if (net_seq_idx
== -1ULL) {
506 net_seq_idx
= stream
->net_seq_idx
;
511 /* Inform sessiond that we are about to send channel and streams. */
512 ret
= consumer_send_status_msg(sock
, ret_code
);
513 if (ret
< 0 || ret_code
!= LTTCOMM_CONSUMERD_SUCCESS
) {
515 * Either the session daemon is not responding or the relayd died so we
521 /* Send channel to sessiond. */
522 ret
= lttng_ust_ctl_send_channel_to_sessiond(sock
, channel
->uchan
);
527 ret
= lttng_ust_ctl_channel_close_wakeup_fd(channel
->uchan
);
532 /* The channel was sent successfully to the sessiond at this point. */
533 cds_list_for_each_entry (stream
, &channel
->streams
.head
, send_node
) {
534 health_code_update();
536 /* Send stream to session daemon. */
537 ret
= send_sessiond_stream(sock
, stream
);
543 /* Tell sessiond there is no more stream. */
544 ret
= lttng_ust_ctl_send_stream_to_sessiond(sock
, nullptr);
549 DBG("UST consumer NULL stream sent to sessiond");
554 if (ret_code
!= LTTCOMM_CONSUMERD_SUCCESS
) {
561 * Creates a channel and streams and add the channel it to the channel internal
562 * state. The created stream must ONLY be sent once the GET_CHANNEL command is
565 * Return 0 on success or else, a negative value is returned and the channel
566 * MUST be destroyed by consumer_del_channel().
568 static int ask_channel(struct lttng_consumer_local_data
*ctx
,
569 struct lttng_consumer_channel
*channel
,
570 struct lttng_ust_ctl_consumer_channel_attr
*attr
)
575 LTTNG_ASSERT(channel
);
579 * This value is still used by the kernel consumer since for the kernel,
580 * the stream ownership is not IN the consumer so we need to have the
581 * number of left stream that needs to be initialized so we can know when
582 * to delete the channel (see consumer.c).
584 * As for the user space tracer now, the consumer creates and sends the
585 * stream to the session daemon which only sends them to the application
586 * once every stream of a channel is received making this value useless
587 * because we they will be added to the poll thread before the application
588 * receives them. This ensures that a stream can not hang up during
589 * initilization of a channel.
591 channel
->nb_init_stream_left
= 0;
593 /* The reply msg status is handled in the following call. */
594 ret
= create_ust_channel(channel
, attr
, &channel
->uchan
);
599 channel
->wait_fd
= lttng_ust_ctl_channel_get_wait_fd(channel
->uchan
);
602 * For the snapshots (no monitor), we create the metadata streams
603 * on demand, not during the channel creation.
605 if (channel
->type
== CONSUMER_CHANNEL_TYPE_METADATA
&& !channel
->monitor
) {
610 /* Open all streams for this channel. */
611 pthread_mutex_lock(&channel
->lock
);
612 ret
= create_ust_streams(channel
, ctx
);
613 pthread_mutex_unlock(&channel
->lock
);
623 * Send all stream of a channel to the right thread handling it.
625 * On error, return a negative value else 0 on success.
627 static int send_streams_to_thread(struct lttng_consumer_channel
*channel
,
628 struct lttng_consumer_local_data
*ctx
)
631 struct lttng_consumer_stream
*stream
, *stmp
;
633 LTTNG_ASSERT(channel
);
636 /* Send streams to the corresponding thread. */
637 cds_list_for_each_entry_safe (stream
, stmp
, &channel
->streams
.head
, send_node
) {
638 health_code_update();
640 /* Sending the stream to the thread. */
641 ret
= send_stream_to_thread(stream
, ctx
);
644 * If we are unable to send the stream to the thread, there is
645 * a big problem so just stop everything.
656 * Flush channel's streams using the given key to retrieve the channel.
658 * Return 0 on success else an LTTng error code.
660 static int flush_channel(uint64_t chan_key
)
663 struct lttng_consumer_channel
*channel
;
664 struct lttng_consumer_stream
*stream
;
666 struct lttng_ht_iter iter
;
668 DBG("UST consumer flush channel key %" PRIu64
, chan_key
);
670 lttng::urcu::read_lock_guard read_lock
;
671 channel
= consumer_find_channel(chan_key
);
673 ERR("UST consumer flush channel %" PRIu64
" not found", chan_key
);
674 ret
= LTTNG_ERR_UST_CHAN_NOT_FOUND
;
678 ht
= the_consumer_data
.stream_per_chan_id_ht
;
680 /* For each stream of the channel id, flush it. */
681 cds_lfht_for_each_entry_duplicate(ht
->ht
,
682 ht
->hash_fct(&channel
->key
, lttng_ht_seed
),
687 node_channel_id
.node
)
689 health_code_update();
691 pthread_mutex_lock(&stream
->lock
);
694 * Protect against concurrent teardown of a stream.
696 if (cds_lfht_is_node_deleted(&stream
->node
.node
)) {
700 if (!stream
->quiescent
) {
701 ret
= lttng_ust_ctl_flush_buffer(stream
->ustream
, 0);
703 ERR("Failed to flush buffer while flushing channel: channel key = %" PRIu64
704 ", channel name = '%s'",
707 ret
= LTTNG_ERR_BUFFER_FLUSH_FAILED
;
708 pthread_mutex_unlock(&stream
->lock
);
711 stream
->quiescent
= true;
714 pthread_mutex_unlock(&stream
->lock
);
718 * Send one last buffer statistics update to the session daemon. This
719 * ensures that the session daemon gets at least one statistics update
720 * per channel even in the case of short-lived channels, such as when a
721 * short-lived app is traced in per-pid mode.
723 sample_and_send_channel_buffer_stats(channel
);
729 * Clear quiescent state from channel's streams using the given key to
730 * retrieve the channel.
732 * Return 0 on success else an LTTng error code.
734 static int clear_quiescent_channel(uint64_t chan_key
)
737 struct lttng_consumer_channel
*channel
;
738 struct lttng_consumer_stream
*stream
;
740 struct lttng_ht_iter iter
;
742 DBG("UST consumer clear quiescent channel key %" PRIu64
, chan_key
);
744 lttng::urcu::read_lock_guard read_lock
;
745 channel
= consumer_find_channel(chan_key
);
747 ERR("UST consumer clear quiescent channel %" PRIu64
" not found", chan_key
);
748 ret
= LTTNG_ERR_UST_CHAN_NOT_FOUND
;
752 ht
= the_consumer_data
.stream_per_chan_id_ht
;
754 /* For each stream of the channel id, clear quiescent state. */
755 cds_lfht_for_each_entry_duplicate(ht
->ht
,
756 ht
->hash_fct(&channel
->key
, lttng_ht_seed
),
761 node_channel_id
.node
)
763 health_code_update();
765 pthread_mutex_lock(&stream
->lock
);
766 stream
->quiescent
= false;
767 pthread_mutex_unlock(&stream
->lock
);
774 * Close metadata stream wakeup_fd using the given key to retrieve the channel.
776 * Return 0 on success else an LTTng error code.
778 static int close_metadata(uint64_t chan_key
)
781 struct lttng_consumer_channel
*channel
;
782 unsigned int channel_monitor
;
784 DBG("UST consumer close metadata key %" PRIu64
, chan_key
);
786 channel
= consumer_find_channel(chan_key
);
789 * This is possible if the metadata thread has issue a delete because
790 * the endpoint point of the stream hung up. There is no way the
791 * session daemon can know about it thus use a DBG instead of an actual
794 DBG("UST consumer close metadata %" PRIu64
" not found", chan_key
);
795 ret
= LTTNG_ERR_UST_CHAN_NOT_FOUND
;
799 pthread_mutex_lock(&the_consumer_data
.lock
);
800 pthread_mutex_lock(&channel
->lock
);
801 channel_monitor
= channel
->monitor
;
802 if (cds_lfht_is_node_deleted(&channel
->node
.node
)) {
806 lttng_ustconsumer_close_metadata(channel
);
807 pthread_mutex_unlock(&channel
->lock
);
808 pthread_mutex_unlock(&the_consumer_data
.lock
);
811 * The ownership of a metadata channel depends on the type of
812 * session to which it belongs. In effect, the monitor flag is checked
813 * to determine if this metadata channel is in "snapshot" mode or not.
815 * In the non-snapshot case, the metadata channel is created along with
816 * a single stream which will remain present until the metadata channel
817 * is destroyed (on the destruction of its session). In this case, the
818 * metadata stream in "monitored" by the metadata poll thread and holds
819 * the ownership of its channel.
821 * Closing the metadata will cause the metadata stream's "metadata poll
822 * pipe" to be closed. Closing this pipe will wake-up the metadata poll
823 * thread which will teardown the metadata stream which, in return,
824 * deletes the metadata channel.
826 * In the snapshot case, the metadata stream is created and destroyed
827 * on every snapshot record. Since the channel doesn't have an owner
828 * other than the session daemon, it is safe to destroy it immediately
829 * on reception of the CLOSE_METADATA command.
831 if (!channel_monitor
) {
833 * The channel and consumer_data locks must be
834 * released before this call since consumer_del_channel
835 * re-acquires the channel and consumer_data locks to teardown
836 * the channel and queue its reclamation by the "call_rcu"
839 consumer_del_channel(channel
);
844 pthread_mutex_unlock(&channel
->lock
);
845 pthread_mutex_unlock(&the_consumer_data
.lock
);
851 * RCU read side lock MUST be acquired before calling this function.
853 * Return 0 on success else an LTTng error code.
855 static int setup_metadata(struct lttng_consumer_local_data
*ctx
, uint64_t key
)
858 struct lttng_consumer_channel
*metadata
;
860 ASSERT_RCU_READ_LOCKED();
862 DBG("UST consumer setup metadata key %" PRIu64
, key
);
864 metadata
= consumer_find_channel(key
);
866 ERR("UST consumer push metadata %" PRIu64
" not found", key
);
867 ret
= LTTNG_ERR_UST_CHAN_NOT_FOUND
;
872 * In no monitor mode, the metadata channel has no stream(s) so skip the
873 * ownership transfer to the metadata thread.
875 if (!metadata
->monitor
) {
876 DBG("Metadata channel in no monitor");
882 * Send metadata stream to relayd if one available. Availability is
883 * known if the stream is still in the list of the channel.
885 if (cds_list_empty(&metadata
->streams
.head
)) {
886 ERR("Metadata channel key %" PRIu64
", no stream available.", key
);
887 ret
= LTTCOMM_CONSUMERD_ERROR_METADATA
;
888 goto error_no_stream
;
891 /* Send metadata stream to relayd if needed. */
892 if (metadata
->metadata_stream
->net_seq_idx
!= (uint64_t) -1ULL) {
893 ret
= consumer_send_relayd_stream(metadata
->metadata_stream
, metadata
->pathname
);
895 ret
= LTTCOMM_CONSUMERD_ERROR_METADATA
;
898 ret
= consumer_send_relayd_streams_sent(metadata
->metadata_stream
->net_seq_idx
);
900 ret
= LTTCOMM_CONSUMERD_RELAYD_FAIL
;
906 * Ownership of metadata stream is passed along. Freeing is handled by
909 ret
= send_streams_to_thread(metadata
, ctx
);
912 * If we are unable to send the stream to the thread, there is
913 * a big problem so just stop everything.
915 ret
= LTTCOMM_CONSUMERD_FATAL
;
916 goto send_streams_error
;
918 /* List MUST be empty after or else it could be reused. */
919 LTTNG_ASSERT(cds_list_empty(&metadata
->streams
.head
));
926 * Delete metadata channel on error. At this point, the metadata stream can
927 * NOT be monitored by the metadata thread thus having the guarantee that
928 * the stream is still in the local stream list of the channel. This call
929 * will make sure to clean that list.
931 consumer_stream_destroy(metadata
->metadata_stream
, nullptr);
932 metadata
->metadata_stream
= nullptr;
940 * Snapshot the whole metadata.
941 * RCU read-side lock must be held by the caller.
943 * Returns 0 on success, < 0 on error
945 static int snapshot_metadata(struct lttng_consumer_channel
*metadata_channel
,
949 struct lttng_consumer_local_data
*ctx
)
952 struct lttng_consumer_stream
*metadata_stream
;
956 ASSERT_RCU_READ_LOCKED();
958 DBG("UST consumer snapshot metadata with key %" PRIu64
" at path %s", key
, path
);
960 lttng::urcu::read_lock_guard read_lock
;
962 LTTNG_ASSERT(!metadata_channel
->monitor
);
964 health_code_update();
967 * Ask the sessiond if we have new metadata waiting and update the
968 * consumer metadata cache.
970 ret
= lttng_ustconsumer_request_metadata(ctx
, metadata_channel
, 0, 1);
975 health_code_update();
978 * The metadata stream is NOT created in no monitor mode when the channel
979 * is created on a sessiond ask channel command.
981 ret
= create_ust_streams(metadata_channel
, ctx
);
986 metadata_stream
= metadata_channel
->metadata_stream
;
987 LTTNG_ASSERT(metadata_stream
);
989 metadata_stream
->read_subbuffer_ops
.lock(metadata_stream
);
990 if (relayd_id
!= (uint64_t) -1ULL) {
991 metadata_stream
->net_seq_idx
= relayd_id
;
992 ret
= consumer_send_relayd_stream(metadata_stream
, path
);
994 ret
= consumer_stream_create_output_files(metadata_stream
, false);
1001 health_code_update();
1002 ret
= lttng_consumer_read_subbuffer(metadata_stream
, ctx
, true);
1009 metadata_stream
->read_subbuffer_ops
.unlock(metadata_stream
);
1011 * Clean up the stream completely because the next snapshot will use a
1012 * new metadata stream.
1014 consumer_stream_destroy(metadata_stream
, nullptr);
1015 metadata_channel
->metadata_stream
= nullptr;
1021 static int get_current_subbuf_addr(struct lttng_consumer_stream
*stream
, const char **addr
)
1024 unsigned long mmap_offset
;
1025 const char *mmap_base
;
1027 mmap_base
= (const char *) lttng_ust_ctl_get_mmap_base(stream
->ustream
);
1029 ERR("Failed to get mmap base for stream `%s`", stream
->name
);
1034 ret
= lttng_ust_ctl_get_mmap_read_offset(stream
->ustream
, &mmap_offset
);
1036 ERR("Failed to get mmap offset for stream `%s`", stream
->name
);
1041 *addr
= mmap_base
+ mmap_offset
;
1047 * Take a snapshot of all the stream of a channel.
1048 * RCU read-side lock and the channel lock must be held by the caller.
1050 * Returns 0 on success, < 0 on error
1052 static int snapshot_channel(struct lttng_consumer_channel
*channel
,
1056 uint64_t nb_packets_per_stream
,
1057 struct lttng_consumer_local_data
*ctx
)
1060 unsigned use_relayd
= 0;
1061 unsigned long consumed_pos
, produced_pos
;
1062 struct lttng_consumer_stream
*stream
;
1066 ASSERT_RCU_READ_LOCKED();
1068 lttng::urcu::read_lock_guard read_lock
;
1070 if (relayd_id
!= (uint64_t) -1ULL) {
1074 LTTNG_ASSERT(!channel
->monitor
);
1075 DBG("UST consumer snapshot channel %" PRIu64
, key
);
1077 cds_list_for_each_entry (stream
, &channel
->streams
.head
, send_node
) {
1078 health_code_update();
1080 /* Lock stream because we are about to change its state. */
1081 pthread_mutex_lock(&stream
->lock
);
1082 LTTNG_ASSERT(channel
->trace_chunk
);
1083 if (!lttng_trace_chunk_get(channel
->trace_chunk
)) {
1085 * Can't happen barring an internal error as the channel
1086 * holds a reference to the trace chunk.
1088 ERR("Failed to acquire reference to channel's trace chunk");
1092 LTTNG_ASSERT(!stream
->trace_chunk
);
1093 stream
->trace_chunk
= channel
->trace_chunk
;
1095 stream
->net_seq_idx
= relayd_id
;
1098 ret
= consumer_send_relayd_stream(stream
, path
);
1100 goto error_close_stream
;
1103 ret
= consumer_stream_create_output_files(stream
, false);
1105 goto error_close_stream
;
1107 DBG("UST consumer snapshot stream (%" PRIu64
")", stream
->key
);
1111 * If tracing is active, we want to perform a "full" buffer flush.
1112 * Else, if quiescent, it has already been done by the prior stop.
1114 if (!stream
->quiescent
) {
1115 ret
= lttng_ust_ctl_flush_buffer(stream
->ustream
, 0);
1117 ERR("Failed to flush buffer during snapshot of channel: channel key = %" PRIu64
1118 ", channel name = '%s'",
1125 ret
= lttng_ustconsumer_take_snapshot(stream
);
1127 ERR("Taking UST snapshot");
1128 goto error_close_stream
;
1131 ret
= lttng_ustconsumer_get_produced_snapshot(stream
, &produced_pos
);
1133 ERR("Produced UST snapshot position");
1134 goto error_close_stream
;
1137 ret
= lttng_ustconsumer_get_consumed_snapshot(stream
, &consumed_pos
);
1139 ERR("Consumerd UST snapshot position");
1140 goto error_close_stream
;
1144 * The original value is sent back if max stream size is larger than
1145 * the possible size of the snapshot. Also, we assume that the session
1146 * daemon should never send a maximum stream size that is lower than
1149 consumed_pos
= consumer_get_consume_start_pos(
1150 consumed_pos
, produced_pos
, nb_packets_per_stream
, stream
->max_sb_size
);
1152 while ((long) (consumed_pos
- produced_pos
) < 0) {
1154 unsigned long len
, padded_len
;
1155 const char *subbuf_addr
;
1156 struct lttng_buffer_view subbuf_view
;
1158 health_code_update();
1160 DBG("UST consumer taking snapshot at pos %lu", consumed_pos
);
1162 ret
= lttng_ust_ctl_get_subbuf(stream
->ustream
, &consumed_pos
);
1164 if (ret
!= -EAGAIN
) {
1165 PERROR("lttng_ust_ctl_get_subbuf snapshot");
1166 goto error_close_stream
;
1168 DBG("UST consumer get subbuf failed. Skipping it.");
1169 consumed_pos
+= stream
->max_sb_size
;
1170 stream
->chan
->lost_packets
++;
1174 ret
= lttng_ust_ctl_get_subbuf_size(stream
->ustream
, &len
);
1176 ERR("Snapshot lttng_ust_ctl_get_subbuf_size");
1177 goto error_put_subbuf
;
1180 ret
= lttng_ust_ctl_get_padded_subbuf_size(stream
->ustream
, &padded_len
);
1182 ERR("Snapshot lttng_ust_ctl_get_padded_subbuf_size");
1183 goto error_put_subbuf
;
1186 ret
= get_current_subbuf_addr(stream
, &subbuf_addr
);
1188 goto error_put_subbuf
;
1191 subbuf_view
= lttng_buffer_view_init(subbuf_addr
, 0, padded_len
);
1192 read_len
= lttng_consumer_on_read_subbuffer_mmap(
1193 stream
, &subbuf_view
, padded_len
- len
);
1195 if (read_len
!= len
) {
1197 goto error_put_subbuf
;
1200 if (read_len
!= padded_len
) {
1202 goto error_put_subbuf
;
1206 ret
= lttng_ust_ctl_put_subbuf(stream
->ustream
);
1208 ERR("Snapshot lttng_ust_ctl_put_subbuf");
1209 goto error_close_stream
;
1211 consumed_pos
+= stream
->max_sb_size
;
1214 /* Simply close the stream so we can use it on the next snapshot. */
1215 consumer_stream_close_output(stream
);
1216 pthread_mutex_unlock(&stream
->lock
);
1222 if (lttng_ust_ctl_put_subbuf(stream
->ustream
) < 0) {
1223 ERR("Snapshot lttng_ust_ctl_put_subbuf");
1226 consumer_stream_close_output(stream
);
1228 pthread_mutex_unlock(&stream
->lock
);
1232 static void metadata_stream_reset_cache_consumed_position(struct lttng_consumer_stream
*stream
)
1234 ASSERT_LOCKED(stream
->lock
);
1236 DBG("Reset metadata cache of session %" PRIu64
, stream
->chan
->session_id
);
1237 stream
->ust_metadata_pushed
= 0;
1241 * Receive the metadata updates from the sessiond. Supports receiving
1242 * overlapping metadata, but is needs to always belong to a contiguous
1243 * range starting from 0.
1244 * Be careful about the locks held when calling this function: it needs
1245 * the metadata cache flush to concurrently progress in order to
1248 int lttng_ustconsumer_recv_metadata(int sock
,
1253 struct lttng_consumer_channel
*channel
,
1257 int ret
, ret_code
= LTTCOMM_CONSUMERD_SUCCESS
;
1259 enum consumer_metadata_cache_write_status cache_write_status
;
1261 DBG("UST consumer push metadata key %" PRIu64
" of len %" PRIu64
, key
, len
);
1263 metadata_str
= calloc
<char>(len
);
1264 if (!metadata_str
) {
1265 PERROR("zmalloc metadata string");
1266 ret_code
= LTTCOMM_CONSUMERD_ENOMEM
;
1270 health_code_update();
1272 /* Receive metadata string. */
1273 ret
= lttcomm_recv_unix_sock(sock
, metadata_str
, len
);
1275 /* Session daemon is dead so return gracefully. */
1280 health_code_update();
1282 pthread_mutex_lock(&channel
->metadata_cache
->lock
);
1283 cache_write_status
= consumer_metadata_cache_write(
1284 channel
->metadata_cache
, offset
, len
, version
, metadata_str
);
1285 pthread_mutex_unlock(&channel
->metadata_cache
->lock
);
1286 switch (cache_write_status
) {
1287 case CONSUMER_METADATA_CACHE_WRITE_STATUS_NO_CHANGE
:
1289 * The write entirely overlapped with existing contents of the
1290 * same metadata version (same content); there is nothing to do.
1293 case CONSUMER_METADATA_CACHE_WRITE_STATUS_INVALIDATED
:
1295 * The metadata cache was invalidated (previously pushed
1296 * content has been overwritten). Reset the stream's consumed
1297 * metadata position to ensure the metadata poll thread consumes
1302 * channel::metadata_stream can be null when the metadata
1303 * channel is under a snapshot session type. No need to update
1304 * the stream position in that scenario.
1306 if (channel
->metadata_stream
!= nullptr) {
1307 pthread_mutex_lock(&channel
->metadata_stream
->lock
);
1308 metadata_stream_reset_cache_consumed_position(channel
->metadata_stream
);
1309 pthread_mutex_unlock(&channel
->metadata_stream
->lock
);
1311 /* Validate we are in snapshot mode. */
1312 LTTNG_ASSERT(!channel
->monitor
);
1315 case CONSUMER_METADATA_CACHE_WRITE_STATUS_APPENDED_CONTENT
:
1317 * In both cases, the metadata poll thread has new data to
1320 ret
= consumer_metadata_wakeup_pipe(channel
);
1322 ret_code
= LTTCOMM_CONSUMERD_ERROR_METADATA
;
1326 case CONSUMER_METADATA_CACHE_WRITE_STATUS_ERROR
:
1327 /* Unable to handle metadata. Notify session daemon. */
1328 ret_code
= LTTCOMM_CONSUMERD_ERROR_METADATA
;
1330 * Skip metadata flush on write error since the offset and len might
1331 * not have been updated which could create an infinite loop below when
1332 * waiting for the metadata cache to be flushed.
1342 while (consumer_metadata_cache_flushed(channel
, offset
+ len
, timer
)) {
1343 DBG("Waiting for metadata to be flushed");
1345 health_code_update();
1347 usleep(DEFAULT_METADATA_AVAILABILITY_WAIT_TIME
);
1357 * Receive command from session daemon and process it.
1359 * Return 1 on success else a negative value or 0.
1361 int lttng_ustconsumer_recv_cmd(struct lttng_consumer_local_data
*ctx
,
1363 struct pollfd
*consumer_sockpoll
)
1366 enum lttcomm_return_code ret_code
= LTTCOMM_CONSUMERD_SUCCESS
;
1367 struct lttcomm_consumer_msg msg
;
1368 struct lttng_consumer_channel
*channel
= nullptr;
1370 health_code_update();
1375 ret_recv
= lttcomm_recv_unix_sock(sock
, &msg
, sizeof(msg
));
1376 if (ret_recv
!= sizeof(msg
)) {
1377 DBG("Consumer received unexpected message size %zd (expects %zu)",
1381 * The ret value might 0 meaning an orderly shutdown but this is ok
1382 * since the caller handles this.
1385 lttng_consumer_send_error(ctx
, LTTCOMM_CONSUMERD_ERROR_RECV_CMD
);
1392 health_code_update();
1395 LTTNG_ASSERT(msg
.cmd_type
!= LTTNG_CONSUMER_STOP
);
1397 health_code_update();
1399 /* relayd needs RCU read-side lock */
1400 lttng::urcu::read_lock_guard read_lock
;
1402 switch (msg
.cmd_type
) {
1403 case LTTNG_CONSUMER_ADD_RELAYD_SOCKET
:
1405 uint32_t major
= msg
.u
.relayd_sock
.major
;
1406 uint32_t minor
= msg
.u
.relayd_sock
.minor
;
1407 enum lttcomm_sock_proto protocol
=
1408 (enum lttcomm_sock_proto
) msg
.u
.relayd_sock
.relayd_socket_protocol
;
1410 /* Session daemon status message are handled in the following call. */
1411 consumer_add_relayd_socket(msg
.u
.relayd_sock
.net_index
,
1412 msg
.u
.relayd_sock
.type
,
1416 msg
.u
.relayd_sock
.session_id
,
1417 msg
.u
.relayd_sock
.relayd_session_id
,
1423 case LTTNG_CONSUMER_DESTROY_RELAYD
:
1425 uint64_t index
= msg
.u
.destroy_relayd
.net_seq_idx
;
1426 struct consumer_relayd_sock_pair
*relayd
;
1428 DBG("UST consumer destroying relayd %" PRIu64
, index
);
1430 /* Get relayd reference if exists. */
1431 relayd
= consumer_find_relayd(index
);
1432 if (relayd
== nullptr) {
1433 DBG("Unable to find relayd %" PRIu64
, index
);
1434 ret_code
= LTTCOMM_CONSUMERD_RELAYD_FAIL
;
1438 * Each relayd socket pair has a refcount of stream attached to it
1439 * which tells if the relayd is still active or not depending on the
1442 * This will set the destroy flag of the relayd object and destroy it
1443 * if the refcount reaches zero when called.
1445 * The destroy can happen either here or when a stream fd hangs up.
1448 consumer_flag_relayd_for_destroy(relayd
);
1451 goto end_msg_sessiond
;
1453 case LTTNG_CONSUMER_UPDATE_STREAM
:
1457 case LTTNG_CONSUMER_DATA_PENDING
:
1459 int is_data_pending
;
1461 uint64_t id
= msg
.u
.data_pending
.session_id
;
1463 DBG("UST consumer data pending command for id %" PRIu64
, id
);
1465 is_data_pending
= consumer_data_pending(id
);
1467 /* Send back returned value to session daemon */
1468 ret_send
= lttcomm_send_unix_sock(sock
, &is_data_pending
, sizeof(is_data_pending
));
1470 DBG("Error when sending the data pending ret code: %zd", ret_send
);
1475 * No need to send back a status message since the data pending
1476 * returned value is the response.
1480 case LTTNG_CONSUMER_ASK_CHANNEL_CREATION
:
1482 int ret_ask_channel
, ret_add_channel
, ret_send
;
1483 struct lttng_ust_ctl_consumer_channel_attr attr
;
1484 const uint64_t chunk_id
= msg
.u
.ask_channel
.chunk_id
.value
;
1485 const struct lttng_credentials buffer_credentials
= {
1486 .uid
= LTTNG_OPTIONAL_INIT_VALUE(msg
.u
.ask_channel
.buffer_credentials
.uid
),
1487 .gid
= LTTNG_OPTIONAL_INIT_VALUE(msg
.u
.ask_channel
.buffer_credentials
.gid
),
1490 /* Create a plain object and reserve a channel key. */
1491 channel
= consumer_allocate_channel(
1492 msg
.u
.ask_channel
.key
,
1493 msg
.u
.ask_channel
.session_id
,
1494 msg
.u
.ask_channel
.chunk_id
.is_set
? &chunk_id
: nullptr,
1495 msg
.u
.ask_channel
.pathname
,
1496 msg
.u
.ask_channel
.name
,
1497 msg
.u
.ask_channel
.relayd_id
,
1498 (enum lttng_event_output
) msg
.u
.ask_channel
.output
,
1499 msg
.u
.ask_channel
.tracefile_size
,
1500 msg
.u
.ask_channel
.tracefile_count
,
1501 msg
.u
.ask_channel
.session_id_per_pid
,
1502 msg
.u
.ask_channel
.monitor
,
1503 msg
.u
.ask_channel
.live_timer_interval
,
1504 msg
.u
.ask_channel
.is_live
,
1505 msg
.u
.ask_channel
.root_shm_path
,
1506 msg
.u
.ask_channel
.shm_path
);
1508 goto end_channel_error
;
1511 LTTNG_OPTIONAL_SET(&channel
->buffer_credentials
, buffer_credentials
);
1514 * Assign UST application UID to the channel. This value is ignored for
1515 * per PID buffers. This is specific to UST thus setting this after the
1518 channel
->ust_app_uid
= msg
.u
.ask_channel
.ust_app_uid
;
1520 /* Build channel attributes from received message. */
1521 attr
.subbuf_size
= msg
.u
.ask_channel
.subbuf_size
;
1522 attr
.num_subbuf
= msg
.u
.ask_channel
.num_subbuf
;
1523 attr
.overwrite
= msg
.u
.ask_channel
.overwrite
;
1524 attr
.switch_timer_interval
= msg
.u
.ask_channel
.switch_timer_interval
;
1525 attr
.read_timer_interval
= msg
.u
.ask_channel
.read_timer_interval
;
1526 attr
.chan_id
= msg
.u
.ask_channel
.chan_id
;
1527 memcpy(attr
.uuid
, msg
.u
.ask_channel
.uuid
, sizeof(attr
.uuid
));
1528 attr
.blocking_timeout
= msg
.u
.ask_channel
.blocking_timeout
;
1530 /* Match channel buffer type to the UST abi. */
1531 switch (msg
.u
.ask_channel
.output
) {
1532 case LTTNG_EVENT_MMAP
:
1534 attr
.output
= LTTNG_UST_ABI_MMAP
;
1538 /* Translate and save channel type. */
1539 switch (msg
.u
.ask_channel
.type
) {
1540 case LTTNG_UST_ABI_CHAN_PER_CPU
:
1541 channel
->type
= CONSUMER_CHANNEL_TYPE_DATA
;
1542 attr
.type
= LTTNG_UST_ABI_CHAN_PER_CPU
;
1544 * Set refcount to 1 for owner. Below, we will
1545 * pass ownership to the
1546 * consumer_thread_channel_poll() thread.
1548 channel
->refcount
= 1;
1550 case LTTNG_UST_ABI_CHAN_METADATA
:
1551 channel
->type
= CONSUMER_CHANNEL_TYPE_METADATA
;
1552 attr
.type
= LTTNG_UST_ABI_CHAN_METADATA
;
1559 health_code_update();
1561 ret_ask_channel
= ask_channel(ctx
, channel
, &attr
);
1562 if (ret_ask_channel
< 0) {
1563 goto end_channel_error
;
1566 if (msg
.u
.ask_channel
.type
== LTTNG_UST_ABI_CHAN_METADATA
) {
1569 ret_allocate
= consumer_metadata_cache_allocate(channel
);
1570 if (ret_allocate
< 0) {
1571 ERR("Allocating metadata cache");
1572 goto end_channel_error
;
1574 consumer_timer_switch_start(channel
, attr
.switch_timer_interval
);
1575 attr
.switch_timer_interval
= 0;
1577 int monitor_start_ret
;
1579 consumer_timer_live_start(channel
, msg
.u
.ask_channel
.live_timer_interval
);
1580 monitor_start_ret
= consumer_timer_monitor_start(
1581 channel
, msg
.u
.ask_channel
.monitor_timer_interval
);
1582 if (monitor_start_ret
< 0) {
1583 ERR("Starting channel monitoring timer failed");
1584 goto end_channel_error
;
1588 health_code_update();
1591 * Add the channel to the internal state AFTER all streams were created
1592 * and successfully sent to session daemon. This way, all streams must
1593 * be ready before this channel is visible to the threads.
1594 * If add_channel succeeds, ownership of the channel is
1595 * passed to consumer_thread_channel_poll().
1597 ret_add_channel
= add_channel(channel
, ctx
);
1598 if (ret_add_channel
< 0) {
1599 if (msg
.u
.ask_channel
.type
== LTTNG_UST_ABI_CHAN_METADATA
) {
1600 if (channel
->switch_timer_enabled
== 1) {
1601 consumer_timer_switch_stop(channel
);
1603 consumer_metadata_cache_destroy(channel
);
1605 if (channel
->live_timer_enabled
== 1) {
1606 consumer_timer_live_stop(channel
);
1608 if (channel
->monitor_timer_enabled
== 1) {
1609 consumer_timer_monitor_stop(channel
);
1611 goto end_channel_error
;
1614 health_code_update();
1617 * Channel and streams are now created. Inform the session daemon that
1618 * everything went well and should wait to receive the channel and
1619 * streams with ustctl API.
1621 ret_send
= consumer_send_status_channel(sock
, channel
);
1624 * There is probably a problem on the socket.
1631 case LTTNG_CONSUMER_GET_CHANNEL
:
1633 int ret
, relayd_err
= 0;
1634 uint64_t key
= msg
.u
.get_channel
.key
;
1635 struct lttng_consumer_channel
*found_channel
;
1637 found_channel
= consumer_find_channel(key
);
1638 if (!found_channel
) {
1639 ERR("UST consumer get channel key %" PRIu64
" not found", key
);
1640 ret_code
= LTTCOMM_CONSUMERD_CHAN_NOT_FOUND
;
1641 goto end_get_channel
;
1644 health_code_update();
1646 /* Send the channel to sessiond (and relayd, if applicable). */
1647 ret
= send_channel_to_sessiond_and_relayd(sock
, found_channel
, ctx
, &relayd_err
);
1651 * We were unable to send to the relayd the stream so avoid
1652 * sending back a fatal error to the thread since this is OK
1653 * and the consumer can continue its work. The above call
1654 * has sent the error status message to the sessiond.
1656 goto end_get_channel_nosignal
;
1659 * The communicaton was broken hence there is a bad state between
1660 * the consumer and sessiond so stop everything.
1662 goto error_get_channel_fatal
;
1665 health_code_update();
1668 * In no monitor mode, the streams ownership is kept inside the channel
1669 * so don't send them to the data thread.
1671 if (!found_channel
->monitor
) {
1672 goto end_get_channel
;
1675 ret
= send_streams_to_thread(found_channel
, ctx
);
1678 * If we are unable to send the stream to the thread, there is
1679 * a big problem so just stop everything.
1681 goto error_get_channel_fatal
;
1683 /* List MUST be empty after or else it could be reused. */
1684 LTTNG_ASSERT(cds_list_empty(&found_channel
->streams
.head
));
1686 goto end_msg_sessiond
;
1687 error_get_channel_fatal
:
1689 end_get_channel_nosignal
:
1692 case LTTNG_CONSUMER_DESTROY_CHANNEL
:
1694 uint64_t key
= msg
.u
.destroy_channel
.key
;
1697 * Only called if streams have not been sent to stream
1698 * manager thread. However, channel has been sent to
1699 * channel manager thread.
1701 notify_thread_del_channel(ctx
, key
);
1702 goto end_msg_sessiond
;
1704 case LTTNG_CONSUMER_CLOSE_METADATA
:
1708 ret
= close_metadata(msg
.u
.close_metadata
.key
);
1710 ret_code
= (lttcomm_return_code
) ret
;
1713 goto end_msg_sessiond
;
1715 case LTTNG_CONSUMER_FLUSH_CHANNEL
:
1719 ret
= flush_channel(msg
.u
.flush_channel
.key
);
1721 ret_code
= (lttcomm_return_code
) ret
;
1724 goto end_msg_sessiond
;
1726 case LTTNG_CONSUMER_CLEAR_QUIESCENT_CHANNEL
:
1730 ret
= clear_quiescent_channel(msg
.u
.clear_quiescent_channel
.key
);
1732 ret_code
= (lttcomm_return_code
) ret
;
1735 goto end_msg_sessiond
;
1737 case LTTNG_CONSUMER_PUSH_METADATA
:
1740 uint64_t len
= msg
.u
.push_metadata
.len
;
1741 uint64_t key
= msg
.u
.push_metadata
.key
;
1742 uint64_t offset
= msg
.u
.push_metadata
.target_offset
;
1743 uint64_t version
= msg
.u
.push_metadata
.version
;
1744 struct lttng_consumer_channel
*found_channel
;
1746 DBG("UST consumer push metadata key %" PRIu64
" of len %" PRIu64
, key
, len
);
1748 found_channel
= consumer_find_channel(key
);
1749 if (!found_channel
) {
1751 * This is possible if the metadata creation on the consumer side
1752 * is in flight vis-a-vis a concurrent push metadata from the
1753 * session daemon. Simply return that the channel failed and the
1754 * session daemon will handle that message correctly considering
1755 * that this race is acceptable thus the DBG() statement here.
1757 DBG("UST consumer push metadata %" PRIu64
" not found", key
);
1758 ret_code
= LTTCOMM_CONSUMERD_CHANNEL_FAIL
;
1759 goto end_push_metadata_msg_sessiond
;
1762 health_code_update();
1766 * There is nothing to receive. We have simply
1767 * checked whether the channel can be found.
1769 ret_code
= LTTCOMM_CONSUMERD_SUCCESS
;
1770 goto end_push_metadata_msg_sessiond
;
1773 /* Tell session daemon we are ready to receive the metadata. */
1774 ret
= consumer_send_status_msg(sock
, LTTCOMM_CONSUMERD_SUCCESS
);
1776 /* Somehow, the session daemon is not responding anymore. */
1777 goto error_push_metadata_fatal
;
1780 health_code_update();
1782 /* Wait for more data. */
1783 health_poll_entry();
1784 ret
= lttng_consumer_poll_socket(consumer_sockpoll
);
1787 goto error_push_metadata_fatal
;
1790 health_code_update();
1792 ret
= lttng_ustconsumer_recv_metadata(
1793 sock
, key
, offset
, len
, version
, found_channel
, 0, 1);
1795 /* error receiving from sessiond */
1796 goto error_push_metadata_fatal
;
1798 ret_code
= (lttcomm_return_code
) ret
;
1799 goto end_push_metadata_msg_sessiond
;
1801 end_push_metadata_msg_sessiond
:
1802 goto end_msg_sessiond
;
1803 error_push_metadata_fatal
:
1806 case LTTNG_CONSUMER_SETUP_METADATA
:
1810 ret
= setup_metadata(ctx
, msg
.u
.setup_metadata
.key
);
1812 ret_code
= (lttcomm_return_code
) ret
;
1814 goto end_msg_sessiond
;
1816 case LTTNG_CONSUMER_SNAPSHOT_CHANNEL
:
1818 struct lttng_consumer_channel
*found_channel
;
1819 uint64_t key
= msg
.u
.snapshot_channel
.key
;
1822 found_channel
= consumer_find_channel(key
);
1823 if (!found_channel
) {
1824 DBG("UST snapshot channel not found for key %" PRIu64
, key
);
1825 ret_code
= LTTCOMM_CONSUMERD_CHAN_NOT_FOUND
;
1827 if (msg
.u
.snapshot_channel
.metadata
) {
1830 ret_snapshot
= snapshot_metadata(found_channel
,
1832 msg
.u
.snapshot_channel
.pathname
,
1833 msg
.u
.snapshot_channel
.relayd_id
,
1835 if (ret_snapshot
< 0) {
1836 ERR("Snapshot metadata failed");
1837 ret_code
= LTTCOMM_CONSUMERD_SNAPSHOT_FAILED
;
1842 ret_snapshot
= snapshot_channel(
1845 msg
.u
.snapshot_channel
.pathname
,
1846 msg
.u
.snapshot_channel
.relayd_id
,
1847 msg
.u
.snapshot_channel
.nb_packets_per_stream
,
1849 if (ret_snapshot
< 0) {
1850 ERR("Snapshot channel failed");
1851 ret_code
= LTTCOMM_CONSUMERD_SNAPSHOT_FAILED
;
1855 health_code_update();
1856 ret_send
= consumer_send_status_msg(sock
, ret_code
);
1858 /* Somehow, the session daemon is not responding anymore. */
1861 health_code_update();
1864 case LTTNG_CONSUMER_DISCARDED_EVENTS
:
1867 uint64_t discarded_events
;
1868 struct lttng_ht_iter iter
;
1869 struct lttng_ht
*ht
;
1870 struct lttng_consumer_stream
*stream
;
1871 uint64_t id
= msg
.u
.discarded_events
.session_id
;
1872 uint64_t key
= msg
.u
.discarded_events
.channel_key
;
1874 DBG("UST consumer discarded events command for session id %" PRIu64
, id
);
1875 pthread_mutex_lock(&the_consumer_data
.lock
);
1877 ht
= the_consumer_data
.stream_list_ht
;
1880 * We only need a reference to the channel, but they are not
1881 * directly indexed, so we just use the first matching stream
1882 * to extract the information we need, we default to 0 if not
1883 * found (no events are dropped if the channel is not yet in
1886 discarded_events
= 0;
1887 cds_lfht_for_each_entry_duplicate(ht
->ht
,
1888 ht
->hash_fct(&id
, lttng_ht_seed
),
1893 node_session_id
.node
)
1895 if (stream
->chan
->key
== key
) {
1896 discarded_events
= stream
->chan
->discarded_events
;
1900 pthread_mutex_unlock(&the_consumer_data
.lock
);
1902 DBG("UST consumer discarded events command for session id %" PRIu64
1903 ", channel key %" PRIu64
,
1907 health_code_update();
1909 /* Send back returned value to session daemon */
1910 ret
= lttcomm_send_unix_sock(sock
, &discarded_events
, sizeof(discarded_events
));
1912 PERROR("send discarded events");
1918 case LTTNG_CONSUMER_LOST_PACKETS
:
1921 uint64_t lost_packets
;
1922 struct lttng_ht_iter iter
;
1923 struct lttng_ht
*ht
;
1924 struct lttng_consumer_stream
*stream
;
1925 uint64_t id
= msg
.u
.lost_packets
.session_id
;
1926 uint64_t key
= msg
.u
.lost_packets
.channel_key
;
1928 DBG("UST consumer lost packets command for session id %" PRIu64
, id
);
1929 pthread_mutex_lock(&the_consumer_data
.lock
);
1931 ht
= the_consumer_data
.stream_list_ht
;
1934 * We only need a reference to the channel, but they are not
1935 * directly indexed, so we just use the first matching stream
1936 * to extract the information we need, we default to 0 if not
1937 * found (no packets lost if the channel is not yet in use).
1940 cds_lfht_for_each_entry_duplicate(ht
->ht
,
1941 ht
->hash_fct(&id
, lttng_ht_seed
),
1946 node_session_id
.node
)
1948 if (stream
->chan
->key
== key
) {
1949 lost_packets
= stream
->chan
->lost_packets
;
1953 pthread_mutex_unlock(&the_consumer_data
.lock
);
1955 DBG("UST consumer lost packets command for session id %" PRIu64
1956 ", channel key %" PRIu64
,
1960 health_code_update();
1962 /* Send back returned value to session daemon */
1963 ret
= lttcomm_send_unix_sock(sock
, &lost_packets
, sizeof(lost_packets
));
1965 PERROR("send lost packets");
1971 case LTTNG_CONSUMER_SET_CHANNEL_MONITOR_PIPE
:
1973 int channel_monitor_pipe
, ret_send
, ret_set_channel_monitor_pipe
;
1976 ret_code
= LTTCOMM_CONSUMERD_SUCCESS
;
1977 /* Successfully received the command's type. */
1978 ret_send
= consumer_send_status_msg(sock
, ret_code
);
1983 ret_recv
= lttcomm_recv_fds_unix_sock(sock
, &channel_monitor_pipe
, 1);
1984 if (ret_recv
!= sizeof(channel_monitor_pipe
)) {
1985 ERR("Failed to receive channel monitor pipe");
1989 DBG("Received channel monitor pipe (%d)", channel_monitor_pipe
);
1990 ret_set_channel_monitor_pipe
=
1991 consumer_timer_thread_set_channel_monitor_pipe(channel_monitor_pipe
);
1992 if (!ret_set_channel_monitor_pipe
) {
1996 ret_code
= LTTCOMM_CONSUMERD_SUCCESS
;
1997 /* Set the pipe as non-blocking. */
1998 ret_fcntl
= fcntl(channel_monitor_pipe
, F_GETFL
, 0);
1999 if (ret_fcntl
== -1) {
2000 PERROR("fcntl get flags of the channel monitoring pipe");
2005 ret_fcntl
= fcntl(channel_monitor_pipe
, F_SETFL
, flags
| O_NONBLOCK
);
2006 if (ret_fcntl
== -1) {
2007 PERROR("fcntl set O_NONBLOCK flag of the channel monitoring pipe");
2010 DBG("Channel monitor pipe set as non-blocking");
2012 ret_code
= LTTCOMM_CONSUMERD_ALREADY_SET
;
2014 goto end_msg_sessiond
;
2016 case LTTNG_CONSUMER_ROTATE_CHANNEL
:
2018 struct lttng_consumer_channel
*found_channel
;
2019 uint64_t key
= msg
.u
.rotate_channel
.key
;
2020 int ret_send_status
;
2022 found_channel
= consumer_find_channel(key
);
2023 if (!found_channel
) {
2024 DBG("Channel %" PRIu64
" not found", key
);
2025 ret_code
= LTTCOMM_CONSUMERD_CHAN_NOT_FOUND
;
2030 * Sample the rotate position of all the streams in
2033 rotate_channel
= lttng_consumer_rotate_channel(
2034 found_channel
, key
, msg
.u
.rotate_channel
.relayd_id
);
2035 if (rotate_channel
< 0) {
2036 ERR("Rotate channel failed");
2037 ret_code
= LTTCOMM_CONSUMERD_ROTATION_FAIL
;
2040 health_code_update();
2043 ret_send_status
= consumer_send_status_msg(sock
, ret_code
);
2044 if (ret_send_status
< 0) {
2045 /* Somehow, the session daemon is not responding anymore. */
2046 goto end_rotate_channel_nosignal
;
2050 * Rotate the streams that are ready right now.
2051 * FIXME: this is a second consecutive iteration over the
2052 * streams in a channel, there is probably a better way to
2053 * handle this, but it needs to be after the
2054 * consumer_send_status_msg() call.
2056 if (found_channel
) {
2057 int ret_rotate_read_streams
;
2059 ret_rotate_read_streams
=
2060 lttng_consumer_rotate_ready_streams(found_channel
, key
);
2061 if (ret_rotate_read_streams
< 0) {
2062 ERR("Rotate channel failed");
2066 end_rotate_channel_nosignal
:
2069 case LTTNG_CONSUMER_CLEAR_CHANNEL
:
2071 struct lttng_consumer_channel
*found_channel
;
2072 uint64_t key
= msg
.u
.clear_channel
.key
;
2073 int ret_send_status
;
2075 found_channel
= consumer_find_channel(key
);
2076 if (!found_channel
) {
2077 DBG("Channel %" PRIu64
" not found", key
);
2078 ret_code
= LTTCOMM_CONSUMERD_CHAN_NOT_FOUND
;
2080 int ret_clear_channel
;
2082 ret_clear_channel
= lttng_consumer_clear_channel(found_channel
);
2083 if (ret_clear_channel
) {
2084 ERR("Clear channel failed key %" PRIu64
, key
);
2085 ret_code
= (lttcomm_return_code
) ret_clear_channel
;
2088 health_code_update();
2090 ret_send_status
= consumer_send_status_msg(sock
, ret_code
);
2091 if (ret_send_status
< 0) {
2092 /* Somehow, the session daemon is not responding anymore. */
2097 case LTTNG_CONSUMER_INIT
:
2099 int ret_send_status
;
2100 lttng_uuid sessiond_uuid
;
2102 std::copy(std::begin(msg
.u
.init
.sessiond_uuid
),
2103 std::end(msg
.u
.init
.sessiond_uuid
),
2104 sessiond_uuid
.begin());
2105 ret_code
= lttng_consumer_init_command(ctx
, sessiond_uuid
);
2106 health_code_update();
2107 ret_send_status
= consumer_send_status_msg(sock
, ret_code
);
2108 if (ret_send_status
< 0) {
2109 /* Somehow, the session daemon is not responding anymore. */
2114 case LTTNG_CONSUMER_CREATE_TRACE_CHUNK
:
2116 const struct lttng_credentials credentials
= {
2117 .uid
= LTTNG_OPTIONAL_INIT_VALUE(
2118 msg
.u
.create_trace_chunk
.credentials
.value
.uid
),
2119 .gid
= LTTNG_OPTIONAL_INIT_VALUE(
2120 msg
.u
.create_trace_chunk
.credentials
.value
.gid
),
2122 const bool is_local_trace
= !msg
.u
.create_trace_chunk
.relayd_id
.is_set
;
2123 const uint64_t relayd_id
= msg
.u
.create_trace_chunk
.relayd_id
.value
;
2124 const char *chunk_override_name
= *msg
.u
.create_trace_chunk
.override_name
?
2125 msg
.u
.create_trace_chunk
.override_name
:
2127 struct lttng_directory_handle
*chunk_directory_handle
= nullptr;
2130 * The session daemon will only provide a chunk directory file
2131 * descriptor for local traces.
2133 if (is_local_trace
) {
2135 int ret_send_status
;
2138 /* Acnowledge the reception of the command. */
2139 ret_send_status
= consumer_send_status_msg(sock
, LTTCOMM_CONSUMERD_SUCCESS
);
2140 if (ret_send_status
< 0) {
2141 /* Somehow, the session daemon is not responding anymore. */
2146 * Receive trace chunk domain dirfd.
2148 ret_recv
= lttcomm_recv_fds_unix_sock(sock
, &chunk_dirfd
, 1);
2149 if (ret_recv
!= sizeof(chunk_dirfd
)) {
2150 ERR("Failed to receive trace chunk domain directory file descriptor");
2154 DBG("Received trace chunk domain directory fd (%d)", chunk_dirfd
);
2155 chunk_directory_handle
=
2156 lttng_directory_handle_create_from_dirfd(chunk_dirfd
);
2157 if (!chunk_directory_handle
) {
2158 ERR("Failed to initialize chunk domain directory handle from directory file descriptor");
2159 if (close(chunk_dirfd
)) {
2160 PERROR("Failed to close chunk directory file descriptor");
2166 ret_code
= lttng_consumer_create_trace_chunk(
2167 !is_local_trace
? &relayd_id
: nullptr,
2168 msg
.u
.create_trace_chunk
.session_id
,
2169 msg
.u
.create_trace_chunk
.chunk_id
,
2170 (time_t) msg
.u
.create_trace_chunk
.creation_timestamp
,
2171 chunk_override_name
,
2172 msg
.u
.create_trace_chunk
.credentials
.is_set
? &credentials
: nullptr,
2173 chunk_directory_handle
);
2174 lttng_directory_handle_put(chunk_directory_handle
);
2175 goto end_msg_sessiond
;
2177 case LTTNG_CONSUMER_CLOSE_TRACE_CHUNK
:
2179 enum lttng_trace_chunk_command_type close_command
=
2180 (lttng_trace_chunk_command_type
) msg
.u
.close_trace_chunk
.close_command
.value
;
2181 const uint64_t relayd_id
= msg
.u
.close_trace_chunk
.relayd_id
.value
;
2182 struct lttcomm_consumer_close_trace_chunk_reply reply
;
2183 char closed_trace_chunk_path
[LTTNG_PATH_MAX
] = {};
2186 ret_code
= lttng_consumer_close_trace_chunk(
2187 msg
.u
.close_trace_chunk
.relayd_id
.is_set
? &relayd_id
: nullptr,
2188 msg
.u
.close_trace_chunk
.session_id
,
2189 msg
.u
.close_trace_chunk
.chunk_id
,
2190 (time_t) msg
.u
.close_trace_chunk
.close_timestamp
,
2191 msg
.u
.close_trace_chunk
.close_command
.is_set
? &close_command
: nullptr,
2192 closed_trace_chunk_path
);
2193 reply
.ret_code
= ret_code
;
2194 reply
.path_length
= strlen(closed_trace_chunk_path
) + 1;
2195 ret
= lttcomm_send_unix_sock(sock
, &reply
, sizeof(reply
));
2196 if (ret
!= sizeof(reply
)) {
2199 ret
= lttcomm_send_unix_sock(sock
, closed_trace_chunk_path
, reply
.path_length
);
2200 if (ret
!= reply
.path_length
) {
2205 case LTTNG_CONSUMER_TRACE_CHUNK_EXISTS
:
2207 const uint64_t relayd_id
= msg
.u
.trace_chunk_exists
.relayd_id
.value
;
2209 ret_code
= lttng_consumer_trace_chunk_exists(
2210 msg
.u
.trace_chunk_exists
.relayd_id
.is_set
? &relayd_id
: nullptr,
2211 msg
.u
.trace_chunk_exists
.session_id
,
2212 msg
.u
.trace_chunk_exists
.chunk_id
);
2213 goto end_msg_sessiond
;
2215 case LTTNG_CONSUMER_OPEN_CHANNEL_PACKETS
:
2217 const uint64_t key
= msg
.u
.open_channel_packets
.key
;
2218 struct lttng_consumer_channel
*found_channel
= consumer_find_channel(key
);
2220 if (found_channel
) {
2221 pthread_mutex_lock(&found_channel
->lock
);
2222 ret_code
= lttng_consumer_open_channel_packets(found_channel
);
2223 pthread_mutex_unlock(&found_channel
->lock
);
2226 * The channel could have disappeared in per-pid
2229 DBG("Channel %" PRIu64
" not found", key
);
2230 ret_code
= LTTCOMM_CONSUMERD_CHAN_NOT_FOUND
;
2233 health_code_update();
2234 goto end_msg_sessiond
;
2242 * Return 1 to indicate success since the 0 value can be a socket
2243 * shutdown during the recv() or send() call.
2250 * The returned value here is not useful since either way we'll return 1 to
2251 * the caller because the session daemon socket management is done
2252 * elsewhere. Returning a negative code or 0 will shutdown the consumer.
2255 int ret_send_status
;
2257 ret_send_status
= consumer_send_status_msg(sock
, ret_code
);
2258 if (ret_send_status
< 0) {
2268 consumer_del_channel(channel
);
2270 /* We have to send a status channel message indicating an error. */
2272 int ret_send_status
;
2274 ret_send_status
= consumer_send_status_channel(sock
, nullptr);
2275 if (ret_send_status
< 0) {
2276 /* Stop everything if session daemon can not be notified. */
2285 /* This will issue a consumer stop. */
2290 health_code_update();
2294 int lttng_ust_flush_buffer(struct lttng_consumer_stream
*stream
, int producer_active
)
2296 LTTNG_ASSERT(stream
);
2297 LTTNG_ASSERT(stream
->ustream
);
2299 return lttng_ust_ctl_flush_buffer(stream
->ustream
, producer_active
);
2303 * Take a snapshot for a specific stream.
2305 * Returns 0 on success, < 0 on error
2307 int lttng_ustconsumer_take_snapshot(struct lttng_consumer_stream
*stream
)
2309 LTTNG_ASSERT(stream
);
2310 LTTNG_ASSERT(stream
->ustream
);
2312 return lttng_ust_ctl_snapshot(stream
->ustream
);
2316 * Sample consumed and produced positions for a specific stream.
2318 * Returns 0 on success, < 0 on error.
2320 int lttng_ustconsumer_sample_snapshot_positions(struct lttng_consumer_stream
*stream
)
2322 LTTNG_ASSERT(stream
);
2323 LTTNG_ASSERT(stream
->ustream
);
2325 return lttng_ust_ctl_snapshot_sample_positions(stream
->ustream
);
2329 * Get the produced position
2331 * Returns 0 on success, < 0 on error
2333 int lttng_ustconsumer_get_produced_snapshot(struct lttng_consumer_stream
*stream
,
2336 LTTNG_ASSERT(stream
);
2337 LTTNG_ASSERT(stream
->ustream
);
2340 return lttng_ust_ctl_snapshot_get_produced(stream
->ustream
, pos
);
2344 * Get the consumed position
2346 * Returns 0 on success, < 0 on error
2348 int lttng_ustconsumer_get_consumed_snapshot(struct lttng_consumer_stream
*stream
,
2351 LTTNG_ASSERT(stream
);
2352 LTTNG_ASSERT(stream
->ustream
);
2355 return lttng_ust_ctl_snapshot_get_consumed(stream
->ustream
, pos
);
2358 int lttng_ustconsumer_flush_buffer(struct lttng_consumer_stream
*stream
, int producer
)
2360 LTTNG_ASSERT(stream
);
2361 LTTNG_ASSERT(stream
->ustream
);
2363 return lttng_ust_ctl_flush_buffer(stream
->ustream
, producer
);
2366 int lttng_ustconsumer_clear_buffer(struct lttng_consumer_stream
*stream
)
2368 LTTNG_ASSERT(stream
);
2369 LTTNG_ASSERT(stream
->ustream
);
2371 return lttng_ust_ctl_clear_buffer(stream
->ustream
);
2374 int lttng_ustconsumer_get_current_timestamp(struct lttng_consumer_stream
*stream
, uint64_t *ts
)
2376 LTTNG_ASSERT(stream
);
2377 LTTNG_ASSERT(stream
->ustream
);
2380 return lttng_ust_ctl_get_current_timestamp(stream
->ustream
, ts
);
2383 int lttng_ustconsumer_get_sequence_number(struct lttng_consumer_stream
*stream
, uint64_t *seq
)
2385 LTTNG_ASSERT(stream
);
2386 LTTNG_ASSERT(stream
->ustream
);
2389 return lttng_ust_ctl_get_sequence_number(stream
->ustream
, seq
);
2393 * Called when the stream signals the consumer that it has hung up.
2395 void lttng_ustconsumer_on_stream_hangup(struct lttng_consumer_stream
*stream
)
2397 LTTNG_ASSERT(stream
);
2398 LTTNG_ASSERT(stream
->ustream
);
2400 pthread_mutex_lock(&stream
->lock
);
2401 if (!stream
->quiescent
) {
2402 if (lttng_ust_ctl_flush_buffer(stream
->ustream
, 0) < 0) {
2403 ERR("Failed to flush buffer on stream hang-up");
2405 stream
->quiescent
= true;
2409 stream
->hangup_flush_done
= 1;
2410 pthread_mutex_unlock(&stream
->lock
);
2413 void lttng_ustconsumer_del_channel(struct lttng_consumer_channel
*chan
)
2418 LTTNG_ASSERT(chan
->uchan
);
2419 LTTNG_ASSERT(chan
->buffer_credentials
.is_set
);
2421 if (chan
->switch_timer_enabled
== 1) {
2422 consumer_timer_switch_stop(chan
);
2424 for (i
= 0; i
< chan
->nr_stream_fds
; i
++) {
2427 ret
= close(chan
->stream_fds
[i
]);
2431 if (chan
->shm_path
[0]) {
2432 char shm_path
[PATH_MAX
];
2434 ret
= get_stream_shm_path(shm_path
, chan
->shm_path
, i
);
2436 ERR("Cannot get stream shm path");
2438 ret
= run_as_unlink(shm_path
,
2439 lttng_credentials_get_uid(LTTNG_OPTIONAL_GET_PTR(
2440 chan
->buffer_credentials
)),
2441 lttng_credentials_get_gid(LTTNG_OPTIONAL_GET_PTR(
2442 chan
->buffer_credentials
)));
2444 PERROR("unlink %s", shm_path
);
2450 void lttng_ustconsumer_free_channel(struct lttng_consumer_channel
*chan
)
2453 LTTNG_ASSERT(chan
->uchan
);
2454 LTTNG_ASSERT(chan
->buffer_credentials
.is_set
);
2456 consumer_metadata_cache_destroy(chan
);
2457 lttng_ust_ctl_destroy_channel(chan
->uchan
);
2458 /* Try to rmdir all directories under shm_path root. */
2459 if (chan
->root_shm_path
[0]) {
2460 (void) run_as_rmdir_recursive(
2461 chan
->root_shm_path
,
2462 lttng_credentials_get_uid(LTTNG_OPTIONAL_GET_PTR(chan
->buffer_credentials
)),
2463 lttng_credentials_get_gid(LTTNG_OPTIONAL_GET_PTR(chan
->buffer_credentials
)),
2464 LTTNG_DIRECTORY_HANDLE_SKIP_NON_EMPTY_FLAG
);
2466 free(chan
->stream_fds
);
2469 void lttng_ustconsumer_del_stream(struct lttng_consumer_stream
*stream
)
2471 LTTNG_ASSERT(stream
);
2472 LTTNG_ASSERT(stream
->ustream
);
2474 if (stream
->chan
->switch_timer_enabled
== 1) {
2475 consumer_timer_switch_stop(stream
->chan
);
2477 lttng_ust_ctl_destroy_stream(stream
->ustream
);
2480 int lttng_ustconsumer_get_wakeup_fd(struct lttng_consumer_stream
*stream
)
2482 LTTNG_ASSERT(stream
);
2483 LTTNG_ASSERT(stream
->ustream
);
2485 return lttng_ust_ctl_stream_get_wakeup_fd(stream
->ustream
);
2488 int lttng_ustconsumer_close_wakeup_fd(struct lttng_consumer_stream
*stream
)
2490 LTTNG_ASSERT(stream
);
2491 LTTNG_ASSERT(stream
->ustream
);
2493 return lttng_ust_ctl_stream_close_wakeup_fd(stream
->ustream
);
2497 * Write up to one packet from the metadata cache to the channel.
2499 * Returns the number of bytes pushed from the cache into the ring buffer, or a
2500 * negative value on error.
2502 static int commit_one_metadata_packet(struct lttng_consumer_stream
*stream
)
2507 pthread_mutex_lock(&stream
->chan
->metadata_cache
->lock
);
2508 if (stream
->chan
->metadata_cache
->contents
.size
== stream
->ust_metadata_pushed
) {
2510 * In the context of a user space metadata channel, a
2511 * change in version can be detected in two ways:
2512 * 1) During the pre-consume of the `read_subbuffer` loop,
2513 * 2) When populating the metadata ring buffer (i.e. here).
2515 * This function is invoked when there is no metadata
2516 * available in the ring-buffer. If all data was consumed
2517 * up to the size of the metadata cache, there is no metadata
2518 * to insert in the ring-buffer.
2520 * However, the metadata version could still have changed (a
2521 * regeneration without any new data will yield the same cache
2524 * The cache's version is checked for a version change and the
2525 * consumed position is reset if one occurred.
2527 * This check is only necessary for the user space domain as
2528 * it has to manage the cache explicitly. If this reset was not
2529 * performed, no metadata would be consumed (and no reset would
2530 * occur as part of the pre-consume) until the metadata size
2531 * exceeded the cache size.
2533 if (stream
->metadata_version
!= stream
->chan
->metadata_cache
->version
) {
2534 metadata_stream_reset_cache_consumed_position(stream
);
2535 consumer_stream_metadata_set_version(stream
,
2536 stream
->chan
->metadata_cache
->version
);
2543 write_len
= lttng_ust_ctl_write_one_packet_to_channel(
2544 stream
->chan
->uchan
,
2545 &stream
->chan
->metadata_cache
->contents
.data
[stream
->ust_metadata_pushed
],
2546 stream
->chan
->metadata_cache
->contents
.size
- stream
->ust_metadata_pushed
);
2547 LTTNG_ASSERT(write_len
!= 0);
2548 if (write_len
< 0) {
2549 ERR("Writing one metadata packet");
2553 stream
->ust_metadata_pushed
+= write_len
;
2555 LTTNG_ASSERT(stream
->chan
->metadata_cache
->contents
.size
>= stream
->ust_metadata_pushed
);
2559 * Switch packet (but don't open the next one) on every commit of
2560 * a metadata packet. Since the subbuffer is fully filled (with padding,
2561 * if needed), the stream is "quiescent" after this commit.
2563 if (lttng_ust_ctl_flush_buffer(stream
->ustream
, 1)) {
2564 ERR("Failed to flush buffer while committing one metadata packet");
2567 stream
->quiescent
= true;
2570 pthread_mutex_unlock(&stream
->chan
->metadata_cache
->lock
);
2575 * Sync metadata meaning request them to the session daemon and snapshot to the
2576 * metadata thread can consumer them.
2578 * Metadata stream lock is held here, but we need to release it when
2579 * interacting with sessiond, else we cause a deadlock with live
2580 * awaiting on metadata to be pushed out.
2582 * The RCU read side lock must be held by the caller.
2584 enum sync_metadata_status
2585 lttng_ustconsumer_sync_metadata(struct lttng_consumer_local_data
*ctx
,
2586 struct lttng_consumer_stream
*metadata_stream
)
2589 enum sync_metadata_status status
;
2590 struct lttng_consumer_channel
*metadata_channel
;
2593 LTTNG_ASSERT(metadata_stream
);
2594 ASSERT_RCU_READ_LOCKED();
2596 metadata_channel
= metadata_stream
->chan
;
2597 pthread_mutex_unlock(&metadata_stream
->lock
);
2599 * Request metadata from the sessiond, but don't wait for the flush
2600 * because we locked the metadata thread.
2602 ret
= lttng_ustconsumer_request_metadata(ctx
, metadata_channel
, 0, 0);
2603 pthread_mutex_lock(&metadata_stream
->lock
);
2605 status
= SYNC_METADATA_STATUS_ERROR
;
2610 * The metadata stream and channel can be deleted while the
2611 * metadata stream lock was released. The streamed is checked
2612 * for deletion before we use it further.
2614 * Note that it is safe to access a logically-deleted stream since its
2615 * existence is still guaranteed by the RCU read side lock. However,
2616 * it should no longer be used. The close/deletion of the metadata
2617 * channel and stream already guarantees that all metadata has been
2618 * consumed. Therefore, there is nothing left to do in this function.
2620 if (consumer_stream_is_deleted(metadata_stream
)) {
2621 DBG("Metadata stream %" PRIu64
" was deleted during the metadata synchronization",
2622 metadata_stream
->key
);
2623 status
= SYNC_METADATA_STATUS_NO_DATA
;
2627 ret
= commit_one_metadata_packet(metadata_stream
);
2629 status
= SYNC_METADATA_STATUS_ERROR
;
2631 } else if (ret
> 0) {
2632 status
= SYNC_METADATA_STATUS_NEW_DATA
;
2633 } else /* ret == 0 */ {
2634 status
= SYNC_METADATA_STATUS_NO_DATA
;
2638 ret
= lttng_ust_ctl_snapshot(metadata_stream
->ustream
);
2640 ERR("Failed to take a snapshot of the metadata ring-buffer positions, ret = %d",
2642 status
= SYNC_METADATA_STATUS_ERROR
;
2651 * Return 0 on success else a negative value.
2653 static int notify_if_more_data(struct lttng_consumer_stream
*stream
,
2654 struct lttng_consumer_local_data
*ctx
)
2657 struct lttng_ust_ctl_consumer_stream
*ustream
;
2659 LTTNG_ASSERT(stream
);
2662 ustream
= stream
->ustream
;
2665 * First, we are going to check if there is a new subbuffer available
2666 * before reading the stream wait_fd.
2668 /* Get the next subbuffer */
2669 ret
= lttng_ust_ctl_get_next_subbuf(ustream
);
2671 /* No more data found, flag the stream. */
2672 stream
->has_data
= 0;
2677 ret
= lttng_ust_ctl_put_subbuf(ustream
);
2680 /* This stream still has data. Flag it and wake up the data thread. */
2681 stream
->has_data
= 1;
2683 if (stream
->monitor
&& !stream
->hangup_flush_done
&& !ctx
->has_wakeup
) {
2686 writelen
= lttng_pipe_write(ctx
->consumer_wakeup_pipe
, "!", 1);
2687 if (writelen
< 0 && errno
!= EAGAIN
&& errno
!= EWOULDBLOCK
) {
2692 /* The wake up pipe has been notified. */
2693 ctx
->has_wakeup
= 1;
2701 static int consumer_stream_ust_on_wake_up(struct lttng_consumer_stream
*stream
)
2706 * We can consume the 1 byte written into the wait_fd by
2707 * UST. Don't trigger error if we cannot read this one byte
2708 * (read returns 0), or if the error is EAGAIN or EWOULDBLOCK.
2710 * This is only done when the stream is monitored by a thread,
2711 * before the flush is done after a hangup and if the stream
2712 * is not flagged with data since there might be nothing to
2713 * consume in the wait fd but still have data available
2714 * flagged by the consumer wake up pipe.
2716 if (stream
->monitor
&& !stream
->hangup_flush_done
&& !stream
->has_data
) {
2720 readlen
= lttng_read(stream
->wait_fd
, &dummy
, 1);
2721 if (readlen
< 0 && errno
!= EAGAIN
&& errno
!= EWOULDBLOCK
) {
2729 static int extract_common_subbuffer_info(struct lttng_consumer_stream
*stream
,
2730 struct stream_subbuffer
*subbuf
)
2734 ret
= lttng_ust_ctl_get_subbuf_size(stream
->ustream
, &subbuf
->info
.data
.subbuf_size
);
2739 ret
= lttng_ust_ctl_get_padded_subbuf_size(stream
->ustream
,
2740 &subbuf
->info
.data
.padded_subbuf_size
);
2749 static int extract_metadata_subbuffer_info(struct lttng_consumer_stream
*stream
,
2750 struct stream_subbuffer
*subbuf
)
2754 ret
= extract_common_subbuffer_info(stream
, subbuf
);
2759 subbuf
->info
.metadata
.version
= stream
->metadata_version
;
2765 static int extract_data_subbuffer_info(struct lttng_consumer_stream
*stream
,
2766 struct stream_subbuffer
*subbuf
)
2770 ret
= extract_common_subbuffer_info(stream
, subbuf
);
2775 ret
= lttng_ust_ctl_get_packet_size(stream
->ustream
, &subbuf
->info
.data
.packet_size
);
2777 PERROR("Failed to get sub-buffer packet size");
2781 ret
= lttng_ust_ctl_get_content_size(stream
->ustream
, &subbuf
->info
.data
.content_size
);
2783 PERROR("Failed to get sub-buffer content size");
2787 ret
= lttng_ust_ctl_get_timestamp_begin(stream
->ustream
,
2788 &subbuf
->info
.data
.timestamp_begin
);
2790 PERROR("Failed to get sub-buffer begin timestamp");
2794 ret
= lttng_ust_ctl_get_timestamp_end(stream
->ustream
, &subbuf
->info
.data
.timestamp_end
);
2796 PERROR("Failed to get sub-buffer end timestamp");
2800 ret
= lttng_ust_ctl_get_events_discarded(stream
->ustream
,
2801 &subbuf
->info
.data
.events_discarded
);
2803 PERROR("Failed to get sub-buffer events discarded count");
2807 ret
= lttng_ust_ctl_get_sequence_number(stream
->ustream
,
2808 &subbuf
->info
.data
.sequence_number
.value
);
2810 /* May not be supported by older LTTng-modules. */
2811 if (ret
!= -ENOTTY
) {
2812 PERROR("Failed to get sub-buffer sequence number");
2816 subbuf
->info
.data
.sequence_number
.is_set
= true;
2819 ret
= lttng_ust_ctl_get_stream_id(stream
->ustream
, &subbuf
->info
.data
.stream_id
);
2821 PERROR("Failed to get stream id");
2825 ret
= lttng_ust_ctl_get_instance_id(stream
->ustream
,
2826 &subbuf
->info
.data
.stream_instance_id
.value
);
2828 /* May not be supported by older LTTng-modules. */
2829 if (ret
!= -ENOTTY
) {
2830 PERROR("Failed to get stream instance id");
2834 subbuf
->info
.data
.stream_instance_id
.is_set
= true;
2840 static int get_next_subbuffer_common(struct lttng_consumer_stream
*stream
,
2841 struct stream_subbuffer
*subbuffer
)
2846 ret
= stream
->read_subbuffer_ops
.extract_subbuffer_info(stream
, subbuffer
);
2851 ret
= get_current_subbuf_addr(stream
, &addr
);
2856 subbuffer
->buffer
.buffer
=
2857 lttng_buffer_view_init(addr
, 0, subbuffer
->info
.data
.padded_subbuf_size
);
2858 LTTNG_ASSERT(subbuffer
->buffer
.buffer
.data
!= nullptr);
2863 static enum get_next_subbuffer_status
get_next_subbuffer(struct lttng_consumer_stream
*stream
,
2864 struct stream_subbuffer
*subbuffer
)
2867 enum get_next_subbuffer_status status
;
2869 ret
= lttng_ust_ctl_get_next_subbuf(stream
->ustream
);
2872 status
= GET_NEXT_SUBBUFFER_STATUS_OK
;
2877 * The caller only expects -ENODATA when there is no data to
2878 * read, but the kernel tracer returns -EAGAIN when there is
2879 * currently no data for a non-finalized stream, and -ENODATA
2880 * when there is no data for a finalized stream. Those can be
2881 * combined into a -ENODATA return value.
2883 status
= GET_NEXT_SUBBUFFER_STATUS_NO_DATA
;
2886 status
= GET_NEXT_SUBBUFFER_STATUS_ERROR
;
2890 ret
= get_next_subbuffer_common(stream
, subbuffer
);
2892 status
= GET_NEXT_SUBBUFFER_STATUS_ERROR
;
2899 static enum get_next_subbuffer_status
2900 get_next_subbuffer_metadata(struct lttng_consumer_stream
*stream
,
2901 struct stream_subbuffer
*subbuffer
)
2908 unsigned long consumed_pos
, produced_pos
;
2909 enum get_next_subbuffer_status status
;
2912 ret
= lttng_ust_ctl_get_next_subbuf(stream
->ustream
);
2914 got_subbuffer
= true;
2916 got_subbuffer
= false;
2917 if (ret
!= -EAGAIN
) {
2919 status
= GET_NEXT_SUBBUFFER_STATUS_ERROR
;
2925 * Determine if the cache is empty and ensure that a sub-buffer
2926 * is made available if the cache is not empty.
2928 if (!got_subbuffer
) {
2929 ret
= commit_one_metadata_packet(stream
);
2930 if (ret
< 0 && ret
!= -ENOBUFS
) {
2931 status
= GET_NEXT_SUBBUFFER_STATUS_ERROR
;
2933 } else if (ret
== 0) {
2934 /* Not an error, the cache is empty. */
2936 status
= GET_NEXT_SUBBUFFER_STATUS_NO_DATA
;
2939 cache_empty
= false;
2942 pthread_mutex_lock(&stream
->chan
->metadata_cache
->lock
);
2943 cache_empty
= stream
->chan
->metadata_cache
->contents
.size
==
2944 stream
->ust_metadata_pushed
;
2945 pthread_mutex_unlock(&stream
->chan
->metadata_cache
->lock
);
2947 } while (!got_subbuffer
);
2949 /* Populate sub-buffer infos and view. */
2950 ret
= get_next_subbuffer_common(stream
, subbuffer
);
2952 status
= GET_NEXT_SUBBUFFER_STATUS_ERROR
;
2956 ret
= lttng_ustconsumer_sample_snapshot_positions(stream
);
2959 * -EAGAIN is not expected since we got a sub-buffer and haven't
2960 * pushed the consumption position yet (on put_next).
2962 PERROR("Failed to take a snapshot of metadata buffer positions");
2963 status
= GET_NEXT_SUBBUFFER_STATUS_ERROR
;
2967 ret
= lttng_ustconsumer_get_consumed_snapshot(stream
, &consumed_pos
);
2969 PERROR("Failed to get metadata consumed position");
2970 status
= GET_NEXT_SUBBUFFER_STATUS_ERROR
;
2974 ret
= lttng_ustconsumer_get_produced_snapshot(stream
, &produced_pos
);
2976 PERROR("Failed to get metadata produced position");
2977 status
= GET_NEXT_SUBBUFFER_STATUS_ERROR
;
2981 /* Last sub-buffer of the ring buffer ? */
2982 buffer_empty
= (consumed_pos
+ stream
->max_sb_size
) == produced_pos
;
2985 * The sessiond registry lock ensures that coherent units of metadata
2986 * are pushed to the consumer daemon at once. Hence, if a sub-buffer is
2987 * acquired, the cache is empty, and it is the only available sub-buffer
2988 * available, it is safe to assume that it is "coherent".
2990 coherent
= got_subbuffer
&& cache_empty
&& buffer_empty
;
2992 LTTNG_OPTIONAL_SET(&subbuffer
->info
.metadata
.coherent
, coherent
);
2993 status
= GET_NEXT_SUBBUFFER_STATUS_OK
;
2998 static int put_next_subbuffer(struct lttng_consumer_stream
*stream
,
2999 struct stream_subbuffer
*subbuffer
__attribute__((unused
)))
3001 const int ret
= lttng_ust_ctl_put_next_subbuf(stream
->ustream
);
3003 LTTNG_ASSERT(ret
== 0);
3007 static int signal_metadata(struct lttng_consumer_stream
*stream
,
3008 struct lttng_consumer_local_data
*ctx
__attribute__((unused
)))
3010 ASSERT_LOCKED(stream
->metadata_rdv_lock
);
3011 return pthread_cond_broadcast(&stream
->metadata_rdv
) ? -errno
: 0;
3014 static int lttng_ustconsumer_set_stream_ops(struct lttng_consumer_stream
*stream
)
3018 stream
->read_subbuffer_ops
.on_wake_up
= consumer_stream_ust_on_wake_up
;
3019 if (stream
->metadata_flag
) {
3020 stream
->read_subbuffer_ops
.get_next_subbuffer
= get_next_subbuffer_metadata
;
3021 stream
->read_subbuffer_ops
.extract_subbuffer_info
= extract_metadata_subbuffer_info
;
3022 stream
->read_subbuffer_ops
.reset_metadata
=
3023 metadata_stream_reset_cache_consumed_position
;
3024 if (stream
->chan
->is_live
) {
3025 stream
->read_subbuffer_ops
.on_sleep
= signal_metadata
;
3026 ret
= consumer_stream_enable_metadata_bucketization(stream
);
3032 stream
->read_subbuffer_ops
.get_next_subbuffer
= get_next_subbuffer
;
3033 stream
->read_subbuffer_ops
.extract_subbuffer_info
= extract_data_subbuffer_info
;
3034 stream
->read_subbuffer_ops
.on_sleep
= notify_if_more_data
;
3035 if (stream
->chan
->is_live
) {
3036 stream
->read_subbuffer_ops
.send_live_beacon
= consumer_flush_ust_index
;
3040 stream
->read_subbuffer_ops
.put_next_subbuffer
= put_next_subbuffer
;
3046 * Called when a stream is created.
3048 * Return 0 on success or else a negative value.
3050 int lttng_ustconsumer_on_recv_stream(struct lttng_consumer_stream
*stream
)
3054 LTTNG_ASSERT(stream
);
3057 * Don't create anything if this is set for streaming or if there is
3058 * no current trace chunk on the parent channel.
3060 if (stream
->net_seq_idx
== (uint64_t) -1ULL && stream
->chan
->monitor
&&
3061 stream
->chan
->trace_chunk
) {
3062 ret
= consumer_stream_create_output_files(stream
, true);
3068 lttng_ustconsumer_set_stream_ops(stream
);
3076 * Check if data is still being extracted from the buffers for a specific
3077 * stream. Consumer data lock MUST be acquired before calling this function
3078 * and the stream lock.
3080 * Return 1 if the traced data are still getting read else 0 meaning that the
3081 * data is available for trace viewer reading.
3083 int lttng_ustconsumer_data_pending(struct lttng_consumer_stream
*stream
)
3087 LTTNG_ASSERT(stream
);
3088 LTTNG_ASSERT(stream
->ustream
);
3089 ASSERT_LOCKED(stream
->lock
);
3091 DBG("UST consumer checking data pending");
3093 if (stream
->endpoint_status
!= CONSUMER_ENDPOINT_ACTIVE
) {
3098 if (stream
->chan
->type
== CONSUMER_CHANNEL_TYPE_METADATA
) {
3099 uint64_t contiguous
, pushed
;
3101 /* Ease our life a bit. */
3102 pthread_mutex_lock(&stream
->chan
->metadata_cache
->lock
);
3103 contiguous
= stream
->chan
->metadata_cache
->contents
.size
;
3104 pthread_mutex_unlock(&stream
->chan
->metadata_cache
->lock
);
3105 pushed
= stream
->ust_metadata_pushed
;
3108 * We can simply check whether all contiguously available data
3109 * has been pushed to the ring buffer, since the push operation
3110 * is performed within get_next_subbuf(), and because both
3111 * get_next_subbuf() and put_next_subbuf() are issued atomically
3112 * thanks to the stream lock within
3113 * lttng_ustconsumer_read_subbuffer(). This basically means that
3114 * whetnever ust_metadata_pushed is incremented, the associated
3115 * metadata has been consumed from the metadata stream.
3117 DBG("UST consumer metadata pending check: contiguous %" PRIu64
3118 " vs pushed %" PRIu64
,
3121 LTTNG_ASSERT(((int64_t) (contiguous
- pushed
)) >= 0);
3122 if ((contiguous
!= pushed
) ||
3123 (((int64_t) contiguous
- pushed
) > 0 || contiguous
== 0)) {
3124 ret
= 1; /* Data is pending */
3128 ret
= lttng_ust_ctl_get_next_subbuf(stream
->ustream
);
3131 * There is still data so let's put back this
3134 ret
= lttng_ust_ctl_put_subbuf(stream
->ustream
);
3135 LTTNG_ASSERT(ret
== 0);
3136 ret
= 1; /* Data is pending */
3141 /* Data is NOT pending so ready to be read. */
3149 * Stop a given metadata channel timer if enabled and close the wait fd which
3150 * is the poll pipe of the metadata stream.
3152 * This MUST be called with the metadata channel lock acquired.
3154 void lttng_ustconsumer_close_metadata(struct lttng_consumer_channel
*metadata
)
3158 LTTNG_ASSERT(metadata
);
3159 LTTNG_ASSERT(metadata
->type
== CONSUMER_CHANNEL_TYPE_METADATA
);
3161 DBG("Closing metadata channel key %" PRIu64
, metadata
->key
);
3163 if (metadata
->switch_timer_enabled
== 1) {
3164 consumer_timer_switch_stop(metadata
);
3167 if (!metadata
->metadata_stream
) {
3172 * Closing write side so the thread monitoring the stream wakes up if any
3173 * and clean the metadata stream.
3175 if (metadata
->metadata_stream
->ust_metadata_poll_pipe
[1] >= 0) {
3176 ret
= close(metadata
->metadata_stream
->ust_metadata_poll_pipe
[1]);
3178 PERROR("closing metadata pipe write side");
3180 metadata
->metadata_stream
->ust_metadata_poll_pipe
[1] = -1;
3188 * Close every metadata stream wait fd of the metadata hash table. This
3189 * function MUST be used very carefully so not to run into a race between the
3190 * metadata thread handling streams and this function closing their wait fd.
3192 * For UST, this is used when the session daemon hangs up. Its the metadata
3193 * producer so calling this is safe because we are assured that no state change
3194 * can occur in the metadata thread for the streams in the hash table.
3196 void lttng_ustconsumer_close_all_metadata(struct lttng_ht
*metadata_ht
)
3198 struct lttng_ht_iter iter
;
3199 struct lttng_consumer_stream
*stream
;
3201 LTTNG_ASSERT(metadata_ht
);
3202 LTTNG_ASSERT(metadata_ht
->ht
);
3204 DBG("UST consumer closing all metadata streams");
3207 lttng::urcu::read_lock_guard read_lock
;
3209 cds_lfht_for_each_entry (metadata_ht
->ht
, &iter
.iter
, stream
, node
.node
) {
3210 health_code_update();
3212 pthread_mutex_lock(&stream
->chan
->lock
);
3213 lttng_ustconsumer_close_metadata(stream
->chan
);
3214 pthread_mutex_unlock(&stream
->chan
->lock
);
3219 void lttng_ustconsumer_close_stream_wakeup(struct lttng_consumer_stream
*stream
)
3223 ret
= lttng_ust_ctl_stream_close_wakeup_fd(stream
->ustream
);
3225 ERR("Unable to close wakeup fd");
3230 * Please refer to consumer-timer.c before adding any lock within this
3231 * function or any of its callees. Timers have a very strict locking
3232 * semantic with respect to teardown. Failure to respect this semantic
3233 * introduces deadlocks.
3235 * DON'T hold the metadata lock when calling this function, else this
3236 * can cause deadlock involving consumer awaiting for metadata to be
3237 * pushed out due to concurrent interaction with the session daemon.
3239 int lttng_ustconsumer_request_metadata(struct lttng_consumer_local_data
*ctx
,
3240 struct lttng_consumer_channel
*channel
,
3244 struct lttcomm_metadata_request_msg request
;
3245 struct lttcomm_consumer_msg msg
;
3246 enum lttcomm_return_code ret_code
= LTTCOMM_CONSUMERD_SUCCESS
;
3247 uint64_t len
, key
, offset
, version
;
3250 LTTNG_ASSERT(channel
);
3251 LTTNG_ASSERT(channel
->metadata_cache
);
3253 memset(&request
, 0, sizeof(request
));
3255 /* send the metadata request to sessiond */
3256 switch (the_consumer_data
.type
) {
3257 case LTTNG_CONSUMER64_UST
:
3258 request
.bits_per_long
= 64;
3260 case LTTNG_CONSUMER32_UST
:
3261 request
.bits_per_long
= 32;
3264 request
.bits_per_long
= 0;
3268 request
.session_id
= channel
->session_id
;
3269 request
.session_id_per_pid
= channel
->session_id_per_pid
;
3271 * Request the application UID here so the metadata of that application can
3272 * be sent back. The channel UID corresponds to the user UID of the session
3273 * used for the rights on the stream file(s).
3275 request
.uid
= channel
->ust_app_uid
;
3276 request
.key
= channel
->key
;
3278 DBG("Sending metadata request to sessiond, session id %" PRIu64
", per-pid %" PRIu64
3279 ", app UID %u and channel key %" PRIu64
,
3281 request
.session_id_per_pid
,
3285 pthread_mutex_lock(&ctx
->metadata_socket_lock
);
3287 health_code_update();
3289 ret
= lttcomm_send_unix_sock(ctx
->consumer_metadata_socket
, &request
, sizeof(request
));
3291 ERR("Asking metadata to sessiond");
3295 health_code_update();
3297 /* Receive the metadata from sessiond */
3298 ret
= lttcomm_recv_unix_sock(ctx
->consumer_metadata_socket
, &msg
, sizeof(msg
));
3299 if (ret
!= sizeof(msg
)) {
3300 DBG("Consumer received unexpected message size %d (expects %zu)", ret
, sizeof(msg
));
3301 lttng_consumer_send_error(ctx
, LTTCOMM_CONSUMERD_ERROR_RECV_CMD
);
3303 * The ret value might 0 meaning an orderly shutdown but this is ok
3304 * since the caller handles this.
3309 health_code_update();
3311 if (msg
.cmd_type
== LTTNG_ERR_UND
) {
3312 /* No registry found */
3313 (void) consumer_send_status_msg(ctx
->consumer_metadata_socket
, ret_code
);
3316 } else if (msg
.cmd_type
!= LTTNG_CONSUMER_PUSH_METADATA
) {
3317 ERR("Unexpected cmd_type received %d", msg
.cmd_type
);
3322 len
= msg
.u
.push_metadata
.len
;
3323 key
= msg
.u
.push_metadata
.key
;
3324 offset
= msg
.u
.push_metadata
.target_offset
;
3325 version
= msg
.u
.push_metadata
.version
;
3327 LTTNG_ASSERT(key
== channel
->key
);
3329 DBG("No new metadata to receive for key %" PRIu64
, key
);
3332 health_code_update();
3334 /* Tell session daemon we are ready to receive the metadata. */
3335 ret
= consumer_send_status_msg(ctx
->consumer_metadata_socket
, LTTCOMM_CONSUMERD_SUCCESS
);
3336 if (ret
< 0 || len
== 0) {
3338 * Somehow, the session daemon is not responding anymore or there is
3339 * nothing to receive.
3344 health_code_update();
3346 ret
= lttng_ustconsumer_recv_metadata(
3347 ctx
->consumer_metadata_socket
, key
, offset
, len
, version
, channel
, timer
, wait
);
3350 * Only send the status msg if the sessiond is alive meaning a positive
3353 (void) consumer_send_status_msg(ctx
->consumer_metadata_socket
, ret
);
3358 health_code_update();
3360 pthread_mutex_unlock(&ctx
->metadata_socket_lock
);
3365 * Return the ustctl call for the get stream id.
3367 int lttng_ustconsumer_get_stream_id(struct lttng_consumer_stream
*stream
, uint64_t *stream_id
)
3369 LTTNG_ASSERT(stream
);
3370 LTTNG_ASSERT(stream_id
);
3372 return lttng_ust_ctl_get_stream_id(stream
->ustream
, stream_id
);
3375 void lttng_ustconsumer_sigbus_handle(void *addr
)
3377 lttng_ust_ctl_sigbus_handle(addr
);