2 * Copyright (C) 2011 Julien Desfossez <julien.desfossez@polymtl.ca>
3 * Copyright (C) 2011 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
4 * Copyright (C) 2017 Jérémie Galarneau <jeremie.galarneau@efficios.com>
6 * SPDX-License-Identifier: GPL-2.0-only
12 #include <lttng/ust-ctl.h>
18 #include <sys/socket.h>
20 #include <sys/types.h>
23 #include <urcu/list.h>
28 #include <bin/lttng-consumerd/health-consumerd.h>
29 #include <common/common.h>
30 #include <common/sessiond-comm/sessiond-comm.h>
31 #include <common/relayd/relayd.h>
32 #include <common/compat/fcntl.h>
33 #include <common/compat/endian.h>
34 #include <common/consumer/consumer-metadata-cache.h>
35 #include <common/consumer/consumer-stream.h>
36 #include <common/consumer/consumer-timer.h>
37 #include <common/utils.h>
38 #include <common/index/index.h>
39 #include <common/consumer/consumer.h>
40 #include <common/optional.h>
42 #include "ust-consumer.h"
44 #define INT_MAX_STR_LEN 12 /* includes \0 */
46 extern struct lttng_consumer_global_data consumer_data
;
47 extern int consumer_poll_timeout
;
50 * Free channel object and all streams associated with it. This MUST be used
51 * only and only if the channel has _NEVER_ been added to the global channel
54 static void destroy_channel(struct lttng_consumer_channel
*channel
)
56 struct lttng_consumer_stream
*stream
, *stmp
;
60 DBG("UST consumer cleaning stream list");
62 cds_list_for_each_entry_safe(stream
, stmp
, &channel
->streams
.head
,
67 cds_list_del(&stream
->send_node
);
68 ustctl_destroy_stream(stream
->ustream
);
69 lttng_trace_chunk_put(stream
->trace_chunk
);
74 * If a channel is available meaning that was created before the streams
78 lttng_ustconsumer_del_channel(channel
);
79 lttng_ustconsumer_free_channel(channel
);
82 if (channel
->trace_chunk
) {
83 lttng_trace_chunk_put(channel
->trace_chunk
);
90 * Add channel to internal consumer state.
92 * Returns 0 on success or else a negative value.
94 static int add_channel(struct lttng_consumer_channel
*channel
,
95 struct lttng_consumer_local_data
*ctx
)
102 if (ctx
->on_recv_channel
!= NULL
) {
103 ret
= ctx
->on_recv_channel(channel
);
105 ret
= consumer_add_channel(channel
, ctx
);
106 } else if (ret
< 0) {
107 /* Most likely an ENOMEM. */
108 lttng_consumer_send_error(ctx
, LTTCOMM_CONSUMERD_OUTFD_ERROR
);
112 ret
= consumer_add_channel(channel
, ctx
);
115 DBG("UST consumer channel added (key: %" PRIu64
")", channel
->key
);
122 * Allocate and return a consumer stream object. If _alloc_ret is not NULL, the
123 * error value if applicable is set in it else it is kept untouched.
125 * Return NULL on error else the newly allocated stream object.
127 static struct lttng_consumer_stream
*allocate_stream(int cpu
, int key
,
128 struct lttng_consumer_channel
*channel
,
129 struct lttng_consumer_local_data
*ctx
, int *_alloc_ret
)
132 struct lttng_consumer_stream
*stream
= NULL
;
137 stream
= consumer_stream_create(
144 channel
->trace_chunk
,
149 if (stream
== NULL
) {
153 * We could not find the channel. Can happen if cpu hotplug
154 * happens while tearing down.
156 DBG3("Could not find channel");
161 lttng_consumer_send_error(ctx
, LTTCOMM_CONSUMERD_OUTFD_ERROR
);
167 consumer_stream_update_channel_attributes(stream
, channel
);
171 *_alloc_ret
= alloc_ret
;
177 * Send the given stream pointer to the corresponding thread.
179 * Returns 0 on success else a negative value.
181 static int send_stream_to_thread(struct lttng_consumer_stream
*stream
,
182 struct lttng_consumer_local_data
*ctx
)
185 struct lttng_pipe
*stream_pipe
;
187 /* Get the right pipe where the stream will be sent. */
188 if (stream
->metadata_flag
) {
189 consumer_add_metadata_stream(stream
);
190 stream_pipe
= ctx
->consumer_metadata_pipe
;
192 consumer_add_data_stream(stream
);
193 stream_pipe
= ctx
->consumer_data_pipe
;
197 * From this point on, the stream's ownership has been moved away from
198 * the channel and it becomes globally visible. Hence, remove it from
199 * the local stream list to prevent the stream from being both local and
202 stream
->globally_visible
= 1;
203 cds_list_del(&stream
->send_node
);
205 ret
= lttng_pipe_write(stream_pipe
, &stream
, sizeof(stream
));
207 ERR("Consumer write %s stream to pipe %d",
208 stream
->metadata_flag
? "metadata" : "data",
209 lttng_pipe_get_writefd(stream_pipe
));
210 if (stream
->metadata_flag
) {
211 consumer_del_stream_for_metadata(stream
);
213 consumer_del_stream_for_data(stream
);
223 int get_stream_shm_path(char *stream_shm_path
, const char *shm_path
, int cpu
)
225 char cpu_nr
[INT_MAX_STR_LEN
]; /* int max len */
228 strncpy(stream_shm_path
, shm_path
, PATH_MAX
);
229 stream_shm_path
[PATH_MAX
- 1] = '\0';
230 ret
= snprintf(cpu_nr
, INT_MAX_STR_LEN
, "%i", cpu
);
235 strncat(stream_shm_path
, cpu_nr
,
236 PATH_MAX
- strlen(stream_shm_path
) - 1);
243 * Create streams for the given channel using liblttng-ust-ctl.
244 * The channel lock must be acquired by the caller.
246 * Return 0 on success else a negative value.
248 static int create_ust_streams(struct lttng_consumer_channel
*channel
,
249 struct lttng_consumer_local_data
*ctx
)
252 struct ustctl_consumer_stream
*ustream
;
253 struct lttng_consumer_stream
*stream
;
254 pthread_mutex_t
*current_stream_lock
= NULL
;
260 * While a stream is available from ustctl. When NULL is returned, we've
261 * reached the end of the possible stream for the channel.
263 while ((ustream
= ustctl_create_stream(channel
->uchan
, cpu
))) {
265 int ust_metadata_pipe
[2];
267 health_code_update();
269 if (channel
->type
== CONSUMER_CHANNEL_TYPE_METADATA
&& channel
->monitor
) {
270 ret
= utils_create_pipe_cloexec_nonblock(ust_metadata_pipe
);
272 ERR("Create ust metadata poll pipe");
275 wait_fd
= ust_metadata_pipe
[0];
277 wait_fd
= ustctl_stream_get_wait_fd(ustream
);
280 /* Allocate consumer stream object. */
281 stream
= allocate_stream(cpu
, wait_fd
, channel
, ctx
, &ret
);
285 stream
->ustream
= ustream
;
287 * Store it so we can save multiple function calls afterwards since
288 * this value is used heavily in the stream threads. This is UST
289 * specific so this is why it's done after allocation.
291 stream
->wait_fd
= wait_fd
;
294 * Increment channel refcount since the channel reference has now been
295 * assigned in the allocation process above.
297 if (stream
->chan
->monitor
) {
298 uatomic_inc(&stream
->chan
->refcount
);
301 pthread_mutex_lock(&stream
->lock
);
302 current_stream_lock
= &stream
->lock
;
304 * Order is important this is why a list is used. On error, the caller
305 * should clean this list.
307 cds_list_add_tail(&stream
->send_node
, &channel
->streams
.head
);
309 ret
= ustctl_get_max_subbuf_size(stream
->ustream
,
310 &stream
->max_sb_size
);
312 ERR("ustctl_get_max_subbuf_size failed for stream %s",
317 /* Do actions once stream has been received. */
318 if (ctx
->on_recv_stream
) {
319 ret
= ctx
->on_recv_stream(stream
);
325 DBG("UST consumer add stream %s (key: %" PRIu64
") with relayd id %" PRIu64
,
326 stream
->name
, stream
->key
, stream
->relayd_stream_id
);
328 /* Set next CPU stream. */
329 channel
->streams
.count
= ++cpu
;
331 /* Keep stream reference when creating metadata. */
332 if (channel
->type
== CONSUMER_CHANNEL_TYPE_METADATA
) {
333 channel
->metadata_stream
= stream
;
334 if (channel
->monitor
) {
335 /* Set metadata poll pipe if we created one */
336 memcpy(stream
->ust_metadata_poll_pipe
,
338 sizeof(ust_metadata_pipe
));
341 pthread_mutex_unlock(&stream
->lock
);
342 current_stream_lock
= NULL
;
349 if (current_stream_lock
) {
350 pthread_mutex_unlock(current_stream_lock
);
356 * create_posix_shm is never called concurrently within a process.
359 int create_posix_shm(void)
361 char tmp_name
[NAME_MAX
];
364 ret
= snprintf(tmp_name
, NAME_MAX
, "/ust-shm-consumer-%d", getpid());
370 * Allocate shm, and immediately unlink its shm oject, keeping
371 * only the file descriptor as a reference to the object.
372 * We specifically do _not_ use the / at the beginning of the
373 * pathname so that some OS implementations can keep it local to
374 * the process (POSIX leaves this implementation-defined).
376 shmfd
= shm_open(tmp_name
, O_CREAT
| O_EXCL
| O_RDWR
, 0700);
381 ret
= shm_unlink(tmp_name
);
382 if (ret
< 0 && errno
!= ENOENT
) {
383 PERROR("shm_unlink");
384 goto error_shm_release
;
397 static int open_ust_stream_fd(struct lttng_consumer_channel
*channel
, int cpu
,
398 const struct lttng_credentials
*session_credentials
)
400 char shm_path
[PATH_MAX
];
403 if (!channel
->shm_path
[0]) {
404 return create_posix_shm();
406 ret
= get_stream_shm_path(shm_path
, channel
->shm_path
, cpu
);
410 return run_as_open(shm_path
,
411 O_RDWR
| O_CREAT
| O_EXCL
, S_IRUSR
| S_IWUSR
,
412 lttng_credentials_get_uid(session_credentials
),
413 lttng_credentials_get_gid(session_credentials
));
420 * Create an UST channel with the given attributes and send it to the session
421 * daemon using the ust ctl API.
423 * Return 0 on success or else a negative value.
425 static int create_ust_channel(struct lttng_consumer_channel
*channel
,
426 struct ustctl_consumer_channel_attr
*attr
,
427 struct ustctl_consumer_channel
**ust_chanp
)
429 int ret
, nr_stream_fds
, i
, j
;
431 struct ustctl_consumer_channel
*ust_channel
;
436 assert(channel
->buffer_credentials
.is_set
);
438 DBG3("Creating channel to ustctl with attr: [overwrite: %d, "
439 "subbuf_size: %" PRIu64
", num_subbuf: %" PRIu64
", "
440 "switch_timer_interval: %u, read_timer_interval: %u, "
441 "output: %d, type: %d", attr
->overwrite
, attr
->subbuf_size
,
442 attr
->num_subbuf
, attr
->switch_timer_interval
,
443 attr
->read_timer_interval
, attr
->output
, attr
->type
);
445 if (channel
->type
== CONSUMER_CHANNEL_TYPE_METADATA
)
448 nr_stream_fds
= ustctl_get_nr_stream_per_channel();
449 stream_fds
= zmalloc(nr_stream_fds
* sizeof(*stream_fds
));
454 for (i
= 0; i
< nr_stream_fds
; i
++) {
455 stream_fds
[i
] = open_ust_stream_fd(channel
, i
,
456 &channel
->buffer_credentials
.value
);
457 if (stream_fds
[i
] < 0) {
462 ust_channel
= ustctl_create_channel(attr
, stream_fds
, nr_stream_fds
);
467 channel
->nr_stream_fds
= nr_stream_fds
;
468 channel
->stream_fds
= stream_fds
;
469 *ust_chanp
= ust_channel
;
475 for (j
= i
- 1; j
>= 0; j
--) {
478 closeret
= close(stream_fds
[j
]);
482 if (channel
->shm_path
[0]) {
483 char shm_path
[PATH_MAX
];
485 closeret
= get_stream_shm_path(shm_path
,
486 channel
->shm_path
, j
);
488 ERR("Cannot get stream shm path");
490 closeret
= run_as_unlink(shm_path
,
491 lttng_credentials_get_uid(LTTNG_OPTIONAL_GET_PTR(
492 channel
->buffer_credentials
)),
493 lttng_credentials_get_gid(LTTNG_OPTIONAL_GET_PTR(
494 channel
->buffer_credentials
)));
496 PERROR("unlink %s", shm_path
);
500 /* Try to rmdir all directories under shm_path root. */
501 if (channel
->root_shm_path
[0]) {
502 (void) run_as_rmdir_recursive(channel
->root_shm_path
,
503 lttng_credentials_get_uid(LTTNG_OPTIONAL_GET_PTR(
504 channel
->buffer_credentials
)),
505 lttng_credentials_get_gid(LTTNG_OPTIONAL_GET_PTR(
506 channel
->buffer_credentials
)),
507 LTTNG_DIRECTORY_HANDLE_SKIP_NON_EMPTY_FLAG
);
515 * Send a single given stream to the session daemon using the sock.
517 * Return 0 on success else a negative value.
519 static int send_sessiond_stream(int sock
, struct lttng_consumer_stream
*stream
)
526 DBG("UST consumer sending stream %" PRIu64
" to sessiond", stream
->key
);
528 /* Send stream to session daemon. */
529 ret
= ustctl_send_stream_to_sessiond(sock
, stream
->ustream
);
539 * Send channel to sessiond and relayd if applicable.
541 * Return 0 on success or else a negative value.
543 static int send_channel_to_sessiond_and_relayd(int sock
,
544 struct lttng_consumer_channel
*channel
,
545 struct lttng_consumer_local_data
*ctx
, int *relayd_error
)
547 int ret
, ret_code
= LTTCOMM_CONSUMERD_SUCCESS
;
548 struct lttng_consumer_stream
*stream
;
549 uint64_t net_seq_idx
= -1ULL;
555 DBG("UST consumer sending channel %s to sessiond", channel
->name
);
557 if (channel
->relayd_id
!= (uint64_t) -1ULL) {
558 cds_list_for_each_entry(stream
, &channel
->streams
.head
, send_node
) {
560 health_code_update();
562 /* Try to send the stream to the relayd if one is available. */
563 DBG("Sending stream %" PRIu64
" of channel \"%s\" to relayd",
564 stream
->key
, channel
->name
);
565 ret
= consumer_send_relayd_stream(stream
, stream
->chan
->pathname
);
568 * Flag that the relayd was the problem here probably due to a
569 * communicaton error on the socket.
574 ret_code
= LTTCOMM_CONSUMERD_RELAYD_FAIL
;
576 if (net_seq_idx
== -1ULL) {
577 net_seq_idx
= stream
->net_seq_idx
;
582 /* Inform sessiond that we are about to send channel and streams. */
583 ret
= consumer_send_status_msg(sock
, ret_code
);
584 if (ret
< 0 || ret_code
!= LTTCOMM_CONSUMERD_SUCCESS
) {
586 * Either the session daemon is not responding or the relayd died so we
592 /* Send channel to sessiond. */
593 ret
= ustctl_send_channel_to_sessiond(sock
, channel
->uchan
);
598 ret
= ustctl_channel_close_wakeup_fd(channel
->uchan
);
603 /* The channel was sent successfully to the sessiond at this point. */
604 cds_list_for_each_entry(stream
, &channel
->streams
.head
, send_node
) {
606 health_code_update();
608 /* Send stream to session daemon. */
609 ret
= send_sessiond_stream(sock
, stream
);
615 /* Tell sessiond there is no more stream. */
616 ret
= ustctl_send_stream_to_sessiond(sock
, NULL
);
621 DBG("UST consumer NULL stream sent to sessiond");
626 if (ret_code
!= LTTCOMM_CONSUMERD_SUCCESS
) {
633 * Creates a channel and streams and add the channel it to the channel internal
634 * state. The created stream must ONLY be sent once the GET_CHANNEL command is
637 * Return 0 on success or else, a negative value is returned and the channel
638 * MUST be destroyed by consumer_del_channel().
640 static int ask_channel(struct lttng_consumer_local_data
*ctx
,
641 struct lttng_consumer_channel
*channel
,
642 struct ustctl_consumer_channel_attr
*attr
)
651 * This value is still used by the kernel consumer since for the kernel,
652 * the stream ownership is not IN the consumer so we need to have the
653 * number of left stream that needs to be initialized so we can know when
654 * to delete the channel (see consumer.c).
656 * As for the user space tracer now, the consumer creates and sends the
657 * stream to the session daemon which only sends them to the application
658 * once every stream of a channel is received making this value useless
659 * because we they will be added to the poll thread before the application
660 * receives them. This ensures that a stream can not hang up during
661 * initilization of a channel.
663 channel
->nb_init_stream_left
= 0;
665 /* The reply msg status is handled in the following call. */
666 ret
= create_ust_channel(channel
, attr
, &channel
->uchan
);
671 channel
->wait_fd
= ustctl_channel_get_wait_fd(channel
->uchan
);
674 * For the snapshots (no monitor), we create the metadata streams
675 * on demand, not during the channel creation.
677 if (channel
->type
== CONSUMER_CHANNEL_TYPE_METADATA
&& !channel
->monitor
) {
682 /* Open all streams for this channel. */
683 pthread_mutex_lock(&channel
->lock
);
684 ret
= create_ust_streams(channel
, ctx
);
685 pthread_mutex_unlock(&channel
->lock
);
695 * Send all stream of a channel to the right thread handling it.
697 * On error, return a negative value else 0 on success.
699 static int send_streams_to_thread(struct lttng_consumer_channel
*channel
,
700 struct lttng_consumer_local_data
*ctx
)
703 struct lttng_consumer_stream
*stream
, *stmp
;
708 /* Send streams to the corresponding thread. */
709 cds_list_for_each_entry_safe(stream
, stmp
, &channel
->streams
.head
,
712 health_code_update();
714 /* Sending the stream to the thread. */
715 ret
= send_stream_to_thread(stream
, ctx
);
718 * If we are unable to send the stream to the thread, there is
719 * a big problem so just stop everything.
730 * Flush channel's streams using the given key to retrieve the channel.
732 * Return 0 on success else an LTTng error code.
734 static int flush_channel(uint64_t chan_key
)
737 struct lttng_consumer_channel
*channel
;
738 struct lttng_consumer_stream
*stream
;
740 struct lttng_ht_iter iter
;
742 DBG("UST consumer flush channel key %" PRIu64
, chan_key
);
745 channel
= consumer_find_channel(chan_key
);
747 ERR("UST consumer flush channel %" PRIu64
" not found", chan_key
);
748 ret
= LTTNG_ERR_UST_CHAN_NOT_FOUND
;
752 ht
= consumer_data
.stream_per_chan_id_ht
;
754 /* For each stream of the channel id, flush it. */
755 cds_lfht_for_each_entry_duplicate(ht
->ht
,
756 ht
->hash_fct(&channel
->key
, lttng_ht_seed
), ht
->match_fct
,
757 &channel
->key
, &iter
.iter
, stream
, node_channel_id
.node
) {
759 health_code_update();
761 pthread_mutex_lock(&stream
->lock
);
764 * Protect against concurrent teardown of a stream.
766 if (cds_lfht_is_node_deleted(&stream
->node
.node
)) {
770 if (!stream
->quiescent
) {
771 ustctl_flush_buffer(stream
->ustream
, 0);
772 stream
->quiescent
= true;
775 pthread_mutex_unlock(&stream
->lock
);
783 * Clear quiescent state from channel's streams using the given key to
784 * retrieve the channel.
786 * Return 0 on success else an LTTng error code.
788 static int clear_quiescent_channel(uint64_t chan_key
)
791 struct lttng_consumer_channel
*channel
;
792 struct lttng_consumer_stream
*stream
;
794 struct lttng_ht_iter iter
;
796 DBG("UST consumer clear quiescent channel key %" PRIu64
, chan_key
);
799 channel
= consumer_find_channel(chan_key
);
801 ERR("UST consumer clear quiescent channel %" PRIu64
" not found", chan_key
);
802 ret
= LTTNG_ERR_UST_CHAN_NOT_FOUND
;
806 ht
= consumer_data
.stream_per_chan_id_ht
;
808 /* For each stream of the channel id, clear quiescent state. */
809 cds_lfht_for_each_entry_duplicate(ht
->ht
,
810 ht
->hash_fct(&channel
->key
, lttng_ht_seed
), ht
->match_fct
,
811 &channel
->key
, &iter
.iter
, stream
, node_channel_id
.node
) {
813 health_code_update();
815 pthread_mutex_lock(&stream
->lock
);
816 stream
->quiescent
= false;
817 pthread_mutex_unlock(&stream
->lock
);
825 * Close metadata stream wakeup_fd using the given key to retrieve the channel.
827 * Return 0 on success else an LTTng error code.
829 static int close_metadata(uint64_t chan_key
)
832 struct lttng_consumer_channel
*channel
;
833 unsigned int channel_monitor
;
835 DBG("UST consumer close metadata key %" PRIu64
, chan_key
);
837 channel
= consumer_find_channel(chan_key
);
840 * This is possible if the metadata thread has issue a delete because
841 * the endpoint point of the stream hung up. There is no way the
842 * session daemon can know about it thus use a DBG instead of an actual
845 DBG("UST consumer close metadata %" PRIu64
" not found", chan_key
);
846 ret
= LTTNG_ERR_UST_CHAN_NOT_FOUND
;
850 pthread_mutex_lock(&consumer_data
.lock
);
851 pthread_mutex_lock(&channel
->lock
);
852 channel_monitor
= channel
->monitor
;
853 if (cds_lfht_is_node_deleted(&channel
->node
.node
)) {
857 lttng_ustconsumer_close_metadata(channel
);
858 pthread_mutex_unlock(&channel
->lock
);
859 pthread_mutex_unlock(&consumer_data
.lock
);
862 * The ownership of a metadata channel depends on the type of
863 * session to which it belongs. In effect, the monitor flag is checked
864 * to determine if this metadata channel is in "snapshot" mode or not.
866 * In the non-snapshot case, the metadata channel is created along with
867 * a single stream which will remain present until the metadata channel
868 * is destroyed (on the destruction of its session). In this case, the
869 * metadata stream in "monitored" by the metadata poll thread and holds
870 * the ownership of its channel.
872 * Closing the metadata will cause the metadata stream's "metadata poll
873 * pipe" to be closed. Closing this pipe will wake-up the metadata poll
874 * thread which will teardown the metadata stream which, in return,
875 * deletes the metadata channel.
877 * In the snapshot case, the metadata stream is created and destroyed
878 * on every snapshot record. Since the channel doesn't have an owner
879 * other than the session daemon, it is safe to destroy it immediately
880 * on reception of the CLOSE_METADATA command.
882 if (!channel_monitor
) {
884 * The channel and consumer_data locks must be
885 * released before this call since consumer_del_channel
886 * re-acquires the channel and consumer_data locks to teardown
887 * the channel and queue its reclamation by the "call_rcu"
890 consumer_del_channel(channel
);
895 pthread_mutex_unlock(&channel
->lock
);
896 pthread_mutex_unlock(&consumer_data
.lock
);
902 * RCU read side lock MUST be acquired before calling this function.
904 * Return 0 on success else an LTTng error code.
906 static int setup_metadata(struct lttng_consumer_local_data
*ctx
, uint64_t key
)
909 struct lttng_consumer_channel
*metadata
;
911 DBG("UST consumer setup metadata key %" PRIu64
, key
);
913 metadata
= consumer_find_channel(key
);
915 ERR("UST consumer push metadata %" PRIu64
" not found", key
);
916 ret
= LTTNG_ERR_UST_CHAN_NOT_FOUND
;
921 * In no monitor mode, the metadata channel has no stream(s) so skip the
922 * ownership transfer to the metadata thread.
924 if (!metadata
->monitor
) {
925 DBG("Metadata channel in no monitor");
931 * Send metadata stream to relayd if one available. Availability is
932 * known if the stream is still in the list of the channel.
934 if (cds_list_empty(&metadata
->streams
.head
)) {
935 ERR("Metadata channel key %" PRIu64
", no stream available.", key
);
936 ret
= LTTCOMM_CONSUMERD_ERROR_METADATA
;
937 goto error_no_stream
;
940 /* Send metadata stream to relayd if needed. */
941 if (metadata
->metadata_stream
->net_seq_idx
!= (uint64_t) -1ULL) {
942 ret
= consumer_send_relayd_stream(metadata
->metadata_stream
,
945 ret
= LTTCOMM_CONSUMERD_ERROR_METADATA
;
948 ret
= consumer_send_relayd_streams_sent(
949 metadata
->metadata_stream
->net_seq_idx
);
951 ret
= LTTCOMM_CONSUMERD_RELAYD_FAIL
;
957 * Ownership of metadata stream is passed along. Freeing is handled by
960 ret
= send_streams_to_thread(metadata
, ctx
);
963 * If we are unable to send the stream to the thread, there is
964 * a big problem so just stop everything.
966 ret
= LTTCOMM_CONSUMERD_FATAL
;
967 goto send_streams_error
;
969 /* List MUST be empty after or else it could be reused. */
970 assert(cds_list_empty(&metadata
->streams
.head
));
977 * Delete metadata channel on error. At this point, the metadata stream can
978 * NOT be monitored by the metadata thread thus having the guarantee that
979 * the stream is still in the local stream list of the channel. This call
980 * will make sure to clean that list.
982 consumer_stream_destroy(metadata
->metadata_stream
, NULL
);
983 cds_list_del(&metadata
->metadata_stream
->send_node
);
984 metadata
->metadata_stream
= NULL
;
992 * Snapshot the whole metadata.
993 * RCU read-side lock must be held by the caller.
995 * Returns 0 on success, < 0 on error
997 static int snapshot_metadata(struct lttng_consumer_channel
*metadata_channel
,
998 uint64_t key
, char *path
, uint64_t relayd_id
,
999 struct lttng_consumer_local_data
*ctx
)
1002 struct lttng_consumer_stream
*metadata_stream
;
1007 DBG("UST consumer snapshot metadata with key %" PRIu64
" at path %s",
1012 assert(!metadata_channel
->monitor
);
1014 health_code_update();
1017 * Ask the sessiond if we have new metadata waiting and update the
1018 * consumer metadata cache.
1020 ret
= lttng_ustconsumer_request_metadata(ctx
, metadata_channel
, 0, 1);
1025 health_code_update();
1028 * The metadata stream is NOT created in no monitor mode when the channel
1029 * is created on a sessiond ask channel command.
1031 ret
= create_ust_streams(metadata_channel
, ctx
);
1036 metadata_stream
= metadata_channel
->metadata_stream
;
1037 assert(metadata_stream
);
1039 pthread_mutex_lock(&metadata_stream
->lock
);
1040 if (relayd_id
!= (uint64_t) -1ULL) {
1041 metadata_stream
->net_seq_idx
= relayd_id
;
1042 ret
= consumer_send_relayd_stream(metadata_stream
, path
);
1044 ret
= consumer_stream_create_output_files(metadata_stream
,
1047 pthread_mutex_unlock(&metadata_stream
->lock
);
1053 health_code_update();
1055 ret
= lttng_consumer_read_subbuffer(metadata_stream
, ctx
, true);
1063 * Clean up the stream completly because the next snapshot will use a new
1066 consumer_stream_destroy(metadata_stream
, NULL
);
1067 cds_list_del(&metadata_stream
->send_node
);
1068 metadata_channel
->metadata_stream
= NULL
;
1076 int get_current_subbuf_addr(struct lttng_consumer_stream
*stream
,
1080 unsigned long mmap_offset
;
1081 const char *mmap_base
;
1083 mmap_base
= ustctl_get_mmap_base(stream
->ustream
);
1085 ERR("Failed to get mmap base for stream `%s`",
1091 ret
= ustctl_get_mmap_read_offset(stream
->ustream
, &mmap_offset
);
1093 ERR("Failed to get mmap offset for stream `%s`", stream
->name
);
1098 *addr
= mmap_base
+ mmap_offset
;
1105 * Take a snapshot of all the stream of a channel.
1106 * RCU read-side lock and the channel lock must be held by the caller.
1108 * Returns 0 on success, < 0 on error
1110 static int snapshot_channel(struct lttng_consumer_channel
*channel
,
1111 uint64_t key
, char *path
, uint64_t relayd_id
,
1112 uint64_t nb_packets_per_stream
,
1113 struct lttng_consumer_local_data
*ctx
)
1116 unsigned use_relayd
= 0;
1117 unsigned long consumed_pos
, produced_pos
;
1118 struct lttng_consumer_stream
*stream
;
1125 if (relayd_id
!= (uint64_t) -1ULL) {
1129 assert(!channel
->monitor
);
1130 DBG("UST consumer snapshot channel %" PRIu64
, key
);
1132 cds_list_for_each_entry(stream
, &channel
->streams
.head
, send_node
) {
1133 health_code_update();
1135 /* Lock stream because we are about to change its state. */
1136 pthread_mutex_lock(&stream
->lock
);
1137 assert(channel
->trace_chunk
);
1138 if (!lttng_trace_chunk_get(channel
->trace_chunk
)) {
1140 * Can't happen barring an internal error as the channel
1141 * holds a reference to the trace chunk.
1143 ERR("Failed to acquire reference to channel's trace chunk");
1147 assert(!stream
->trace_chunk
);
1148 stream
->trace_chunk
= channel
->trace_chunk
;
1150 stream
->net_seq_idx
= relayd_id
;
1153 ret
= consumer_send_relayd_stream(stream
, path
);
1158 ret
= consumer_stream_create_output_files(stream
,
1163 DBG("UST consumer snapshot stream (%" PRIu64
")",
1168 * If tracing is active, we want to perform a "full" buffer flush.
1169 * Else, if quiescent, it has already been done by the prior stop.
1171 if (!stream
->quiescent
) {
1172 ustctl_flush_buffer(stream
->ustream
, 0);
1175 ret
= lttng_ustconsumer_take_snapshot(stream
);
1177 ERR("Taking UST snapshot");
1181 ret
= lttng_ustconsumer_get_produced_snapshot(stream
, &produced_pos
);
1183 ERR("Produced UST snapshot position");
1187 ret
= lttng_ustconsumer_get_consumed_snapshot(stream
, &consumed_pos
);
1189 ERR("Consumerd UST snapshot position");
1194 * The original value is sent back if max stream size is larger than
1195 * the possible size of the snapshot. Also, we assume that the session
1196 * daemon should never send a maximum stream size that is lower than
1199 consumed_pos
= consumer_get_consume_start_pos(consumed_pos
,
1200 produced_pos
, nb_packets_per_stream
,
1201 stream
->max_sb_size
);
1203 while ((long) (consumed_pos
- produced_pos
) < 0) {
1205 unsigned long len
, padded_len
;
1206 const char *subbuf_addr
;
1207 struct lttng_buffer_view subbuf_view
;
1209 health_code_update();
1211 DBG("UST consumer taking snapshot at pos %lu", consumed_pos
);
1213 ret
= ustctl_get_subbuf(stream
->ustream
, &consumed_pos
);
1215 if (ret
!= -EAGAIN
) {
1216 PERROR("ustctl_get_subbuf snapshot");
1217 goto error_close_stream
;
1219 DBG("UST consumer get subbuf failed. Skipping it.");
1220 consumed_pos
+= stream
->max_sb_size
;
1221 stream
->chan
->lost_packets
++;
1225 ret
= ustctl_get_subbuf_size(stream
->ustream
, &len
);
1227 ERR("Snapshot ustctl_get_subbuf_size");
1228 goto error_put_subbuf
;
1231 ret
= ustctl_get_padded_subbuf_size(stream
->ustream
, &padded_len
);
1233 ERR("Snapshot ustctl_get_padded_subbuf_size");
1234 goto error_put_subbuf
;
1237 ret
= get_current_subbuf_addr(stream
, &subbuf_addr
);
1239 goto error_put_subbuf
;
1242 subbuf_view
= lttng_buffer_view_init(
1243 subbuf_addr
, 0, padded_len
);
1244 read_len
= lttng_consumer_on_read_subbuffer_mmap(
1245 stream
, &subbuf_view
, padded_len
- len
);
1247 if (read_len
!= len
) {
1249 goto error_put_subbuf
;
1252 if (read_len
!= padded_len
) {
1254 goto error_put_subbuf
;
1258 ret
= ustctl_put_subbuf(stream
->ustream
);
1260 ERR("Snapshot ustctl_put_subbuf");
1261 goto error_close_stream
;
1263 consumed_pos
+= stream
->max_sb_size
;
1266 /* Simply close the stream so we can use it on the next snapshot. */
1267 consumer_stream_close(stream
);
1268 pthread_mutex_unlock(&stream
->lock
);
1275 if (ustctl_put_subbuf(stream
->ustream
) < 0) {
1276 ERR("Snapshot ustctl_put_subbuf");
1279 consumer_stream_close(stream
);
1281 pthread_mutex_unlock(&stream
->lock
);
1287 void metadata_stream_reset_cache_consumed_position(
1288 struct lttng_consumer_stream
*stream
)
1290 ASSERT_LOCKED(stream
->lock
);
1292 DBG("Reset metadata cache of session %" PRIu64
,
1293 stream
->chan
->session_id
);
1294 stream
->ust_metadata_pushed
= 0;
1298 * Receive the metadata updates from the sessiond. Supports receiving
1299 * overlapping metadata, but is needs to always belong to a contiguous
1300 * range starting from 0.
1301 * Be careful about the locks held when calling this function: it needs
1302 * the metadata cache flush to concurrently progress in order to
1305 int lttng_ustconsumer_recv_metadata(int sock
, uint64_t key
, uint64_t offset
,
1306 uint64_t len
, uint64_t version
,
1307 struct lttng_consumer_channel
*channel
, int timer
, int wait
)
1309 int ret
, ret_code
= LTTCOMM_CONSUMERD_SUCCESS
;
1311 enum consumer_metadata_cache_write_status cache_write_status
;
1313 DBG("UST consumer push metadata key %" PRIu64
" of len %" PRIu64
, key
, len
);
1315 metadata_str
= zmalloc(len
* sizeof(char));
1316 if (!metadata_str
) {
1317 PERROR("zmalloc metadata string");
1318 ret_code
= LTTCOMM_CONSUMERD_ENOMEM
;
1322 health_code_update();
1324 /* Receive metadata string. */
1325 ret
= lttcomm_recv_unix_sock(sock
, metadata_str
, len
);
1327 /* Session daemon is dead so return gracefully. */
1332 health_code_update();
1334 pthread_mutex_lock(&channel
->metadata_cache
->lock
);
1335 cache_write_status
= consumer_metadata_cache_write(
1336 channel
->metadata_cache
, offset
, len
, version
,
1338 pthread_mutex_unlock(&channel
->metadata_cache
->lock
);
1339 switch (cache_write_status
) {
1340 case CONSUMER_METADATA_CACHE_WRITE_STATUS_NO_CHANGE
:
1342 * The write entirely overlapped with existing contents of the
1343 * same metadata version (same content); there is nothing to do.
1346 case CONSUMER_METADATA_CACHE_WRITE_STATUS_INVALIDATED
:
1348 * The metadata cache was invalidated (previously pushed
1349 * content has been overwritten). Reset the stream's consumed
1350 * metadata position to ensure the metadata poll thread consumes
1353 pthread_mutex_lock(&channel
->metadata_stream
->lock
);
1354 metadata_stream_reset_cache_consumed_position(
1355 channel
->metadata_stream
);
1356 pthread_mutex_unlock(&channel
->metadata_stream
->lock
);
1358 case CONSUMER_METADATA_CACHE_WRITE_STATUS_APPENDED_CONTENT
:
1360 * In both cases, the metadata poll thread has new data to
1363 ret
= consumer_metadata_wakeup_pipe(channel
);
1365 ret_code
= LTTCOMM_CONSUMERD_ERROR_METADATA
;
1369 case CONSUMER_METADATA_CACHE_WRITE_STATUS_ERROR
:
1370 /* Unable to handle metadata. Notify session daemon. */
1371 ret_code
= LTTCOMM_CONSUMERD_ERROR_METADATA
;
1373 * Skip metadata flush on write error since the offset and len might
1374 * not have been updated which could create an infinite loop below when
1375 * waiting for the metadata cache to be flushed.
1385 while (consumer_metadata_cache_flushed(channel
, offset
+ len
, timer
)) {
1386 DBG("Waiting for metadata to be flushed");
1388 health_code_update();
1390 usleep(DEFAULT_METADATA_AVAILABILITY_WAIT_TIME
);
1400 * Receive command from session daemon and process it.
1402 * Return 1 on success else a negative value or 0.
1404 int lttng_ustconsumer_recv_cmd(struct lttng_consumer_local_data
*ctx
,
1405 int sock
, struct pollfd
*consumer_sockpoll
)
1408 enum lttcomm_return_code ret_code
= LTTCOMM_CONSUMERD_SUCCESS
;
1409 struct lttcomm_consumer_msg msg
;
1410 struct lttng_consumer_channel
*channel
= NULL
;
1412 health_code_update();
1414 ret
= lttcomm_recv_unix_sock(sock
, &msg
, sizeof(msg
));
1415 if (ret
!= sizeof(msg
)) {
1416 DBG("Consumer received unexpected message size %zd (expects %zu)",
1419 * The ret value might 0 meaning an orderly shutdown but this is ok
1420 * since the caller handles this.
1423 lttng_consumer_send_error(ctx
, LTTCOMM_CONSUMERD_ERROR_RECV_CMD
);
1429 health_code_update();
1432 assert(msg
.cmd_type
!= LTTNG_CONSUMER_STOP
);
1434 health_code_update();
1436 /* relayd needs RCU read-side lock */
1439 switch (msg
.cmd_type
) {
1440 case LTTNG_CONSUMER_ADD_RELAYD_SOCKET
:
1442 /* Session daemon status message are handled in the following call. */
1443 consumer_add_relayd_socket(msg
.u
.relayd_sock
.net_index
,
1444 msg
.u
.relayd_sock
.type
, ctx
, sock
, consumer_sockpoll
,
1445 &msg
.u
.relayd_sock
.sock
, msg
.u
.relayd_sock
.session_id
,
1446 msg
.u
.relayd_sock
.relayd_session_id
);
1449 case LTTNG_CONSUMER_DESTROY_RELAYD
:
1451 uint64_t index
= msg
.u
.destroy_relayd
.net_seq_idx
;
1452 struct consumer_relayd_sock_pair
*relayd
;
1454 DBG("UST consumer destroying relayd %" PRIu64
, index
);
1456 /* Get relayd reference if exists. */
1457 relayd
= consumer_find_relayd(index
);
1458 if (relayd
== NULL
) {
1459 DBG("Unable to find relayd %" PRIu64
, index
);
1460 ret_code
= LTTCOMM_CONSUMERD_RELAYD_FAIL
;
1464 * Each relayd socket pair has a refcount of stream attached to it
1465 * which tells if the relayd is still active or not depending on the
1468 * This will set the destroy flag of the relayd object and destroy it
1469 * if the refcount reaches zero when called.
1471 * The destroy can happen either here or when a stream fd hangs up.
1474 consumer_flag_relayd_for_destroy(relayd
);
1477 goto end_msg_sessiond
;
1479 case LTTNG_CONSUMER_UPDATE_STREAM
:
1484 case LTTNG_CONSUMER_DATA_PENDING
:
1486 int ret
, is_data_pending
;
1487 uint64_t id
= msg
.u
.data_pending
.session_id
;
1489 DBG("UST consumer data pending command for id %" PRIu64
, id
);
1491 is_data_pending
= consumer_data_pending(id
);
1493 /* Send back returned value to session daemon */
1494 ret
= lttcomm_send_unix_sock(sock
, &is_data_pending
,
1495 sizeof(is_data_pending
));
1497 DBG("Error when sending the data pending ret code: %d", ret
);
1502 * No need to send back a status message since the data pending
1503 * returned value is the response.
1507 case LTTNG_CONSUMER_ASK_CHANNEL_CREATION
:
1510 struct ustctl_consumer_channel_attr attr
;
1511 const uint64_t chunk_id
= msg
.u
.ask_channel
.chunk_id
.value
;
1512 const struct lttng_credentials buffer_credentials
= {
1513 .uid
= LTTNG_OPTIONAL_INIT_VALUE(msg
.u
.ask_channel
.buffer_credentials
.uid
),
1514 .gid
= LTTNG_OPTIONAL_INIT_VALUE(msg
.u
.ask_channel
.buffer_credentials
.gid
),
1517 /* Create a plain object and reserve a channel key. */
1518 channel
= consumer_allocate_channel(
1519 msg
.u
.ask_channel
.key
,
1520 msg
.u
.ask_channel
.session_id
,
1521 msg
.u
.ask_channel
.chunk_id
.is_set
?
1523 msg
.u
.ask_channel
.pathname
,
1524 msg
.u
.ask_channel
.name
,
1525 msg
.u
.ask_channel
.relayd_id
,
1526 (enum lttng_event_output
) msg
.u
.ask_channel
.output
,
1527 msg
.u
.ask_channel
.tracefile_size
,
1528 msg
.u
.ask_channel
.tracefile_count
,
1529 msg
.u
.ask_channel
.session_id_per_pid
,
1530 msg
.u
.ask_channel
.monitor
,
1531 msg
.u
.ask_channel
.live_timer_interval
,
1532 msg
.u
.ask_channel
.is_live
,
1533 msg
.u
.ask_channel
.root_shm_path
,
1534 msg
.u
.ask_channel
.shm_path
);
1536 goto end_channel_error
;
1539 LTTNG_OPTIONAL_SET(&channel
->buffer_credentials
,
1540 buffer_credentials
);
1543 * Assign UST application UID to the channel. This value is ignored for
1544 * per PID buffers. This is specific to UST thus setting this after the
1547 channel
->ust_app_uid
= msg
.u
.ask_channel
.ust_app_uid
;
1549 /* Build channel attributes from received message. */
1550 attr
.subbuf_size
= msg
.u
.ask_channel
.subbuf_size
;
1551 attr
.num_subbuf
= msg
.u
.ask_channel
.num_subbuf
;
1552 attr
.overwrite
= msg
.u
.ask_channel
.overwrite
;
1553 attr
.switch_timer_interval
= msg
.u
.ask_channel
.switch_timer_interval
;
1554 attr
.read_timer_interval
= msg
.u
.ask_channel
.read_timer_interval
;
1555 attr
.chan_id
= msg
.u
.ask_channel
.chan_id
;
1556 memcpy(attr
.uuid
, msg
.u
.ask_channel
.uuid
, sizeof(attr
.uuid
));
1557 attr
.blocking_timeout
= msg
.u
.ask_channel
.blocking_timeout
;
1559 /* Match channel buffer type to the UST abi. */
1560 switch (msg
.u
.ask_channel
.output
) {
1561 case LTTNG_EVENT_MMAP
:
1563 attr
.output
= LTTNG_UST_MMAP
;
1567 /* Translate and save channel type. */
1568 switch (msg
.u
.ask_channel
.type
) {
1569 case LTTNG_UST_CHAN_PER_CPU
:
1570 channel
->type
= CONSUMER_CHANNEL_TYPE_DATA
;
1571 attr
.type
= LTTNG_UST_CHAN_PER_CPU
;
1573 * Set refcount to 1 for owner. Below, we will
1574 * pass ownership to the
1575 * consumer_thread_channel_poll() thread.
1577 channel
->refcount
= 1;
1579 case LTTNG_UST_CHAN_METADATA
:
1580 channel
->type
= CONSUMER_CHANNEL_TYPE_METADATA
;
1581 attr
.type
= LTTNG_UST_CHAN_METADATA
;
1588 health_code_update();
1590 ret
= ask_channel(ctx
, channel
, &attr
);
1592 goto end_channel_error
;
1595 if (msg
.u
.ask_channel
.type
== LTTNG_UST_CHAN_METADATA
) {
1596 ret
= consumer_metadata_cache_allocate(channel
);
1598 ERR("Allocating metadata cache");
1599 goto end_channel_error
;
1601 consumer_timer_switch_start(channel
, attr
.switch_timer_interval
);
1602 attr
.switch_timer_interval
= 0;
1604 int monitor_start_ret
;
1606 consumer_timer_live_start(channel
,
1607 msg
.u
.ask_channel
.live_timer_interval
);
1608 monitor_start_ret
= consumer_timer_monitor_start(
1610 msg
.u
.ask_channel
.monitor_timer_interval
);
1611 if (monitor_start_ret
< 0) {
1612 ERR("Starting channel monitoring timer failed");
1613 goto end_channel_error
;
1617 health_code_update();
1620 * Add the channel to the internal state AFTER all streams were created
1621 * and successfully sent to session daemon. This way, all streams must
1622 * be ready before this channel is visible to the threads.
1623 * If add_channel succeeds, ownership of the channel is
1624 * passed to consumer_thread_channel_poll().
1626 ret
= add_channel(channel
, ctx
);
1628 if (msg
.u
.ask_channel
.type
== LTTNG_UST_CHAN_METADATA
) {
1629 if (channel
->switch_timer_enabled
== 1) {
1630 consumer_timer_switch_stop(channel
);
1632 consumer_metadata_cache_destroy(channel
);
1634 if (channel
->live_timer_enabled
== 1) {
1635 consumer_timer_live_stop(channel
);
1637 if (channel
->monitor_timer_enabled
== 1) {
1638 consumer_timer_monitor_stop(channel
);
1640 goto end_channel_error
;
1643 health_code_update();
1646 * Channel and streams are now created. Inform the session daemon that
1647 * everything went well and should wait to receive the channel and
1648 * streams with ustctl API.
1650 ret
= consumer_send_status_channel(sock
, channel
);
1653 * There is probably a problem on the socket.
1660 case LTTNG_CONSUMER_GET_CHANNEL
:
1662 int ret
, relayd_err
= 0;
1663 uint64_t key
= msg
.u
.get_channel
.key
;
1664 struct lttng_consumer_channel
*channel
;
1666 channel
= consumer_find_channel(key
);
1668 ERR("UST consumer get channel key %" PRIu64
" not found", key
);
1669 ret_code
= LTTCOMM_CONSUMERD_CHAN_NOT_FOUND
;
1670 goto end_get_channel
;
1673 health_code_update();
1675 /* Send the channel to sessiond (and relayd, if applicable). */
1676 ret
= send_channel_to_sessiond_and_relayd(sock
, channel
, ctx
,
1681 * We were unable to send to the relayd the stream so avoid
1682 * sending back a fatal error to the thread since this is OK
1683 * and the consumer can continue its work. The above call
1684 * has sent the error status message to the sessiond.
1686 goto end_get_channel_nosignal
;
1689 * The communicaton was broken hence there is a bad state between
1690 * the consumer and sessiond so stop everything.
1692 goto error_get_channel_fatal
;
1695 health_code_update();
1698 * In no monitor mode, the streams ownership is kept inside the channel
1699 * so don't send them to the data thread.
1701 if (!channel
->monitor
) {
1702 goto end_get_channel
;
1705 ret
= send_streams_to_thread(channel
, ctx
);
1708 * If we are unable to send the stream to the thread, there is
1709 * a big problem so just stop everything.
1711 goto error_get_channel_fatal
;
1713 /* List MUST be empty after or else it could be reused. */
1714 assert(cds_list_empty(&channel
->streams
.head
));
1716 goto end_msg_sessiond
;
1717 error_get_channel_fatal
:
1719 end_get_channel_nosignal
:
1722 case LTTNG_CONSUMER_DESTROY_CHANNEL
:
1724 uint64_t key
= msg
.u
.destroy_channel
.key
;
1727 * Only called if streams have not been sent to stream
1728 * manager thread. However, channel has been sent to
1729 * channel manager thread.
1731 notify_thread_del_channel(ctx
, key
);
1732 goto end_msg_sessiond
;
1734 case LTTNG_CONSUMER_CLOSE_METADATA
:
1738 ret
= close_metadata(msg
.u
.close_metadata
.key
);
1743 goto end_msg_sessiond
;
1745 case LTTNG_CONSUMER_FLUSH_CHANNEL
:
1749 ret
= flush_channel(msg
.u
.flush_channel
.key
);
1754 goto end_msg_sessiond
;
1756 case LTTNG_CONSUMER_CLEAR_QUIESCENT_CHANNEL
:
1760 ret
= clear_quiescent_channel(
1761 msg
.u
.clear_quiescent_channel
.key
);
1766 goto end_msg_sessiond
;
1768 case LTTNG_CONSUMER_PUSH_METADATA
:
1771 uint64_t len
= msg
.u
.push_metadata
.len
;
1772 uint64_t key
= msg
.u
.push_metadata
.key
;
1773 uint64_t offset
= msg
.u
.push_metadata
.target_offset
;
1774 uint64_t version
= msg
.u
.push_metadata
.version
;
1775 struct lttng_consumer_channel
*channel
;
1777 DBG("UST consumer push metadata key %" PRIu64
" of len %" PRIu64
, key
,
1780 channel
= consumer_find_channel(key
);
1783 * This is possible if the metadata creation on the consumer side
1784 * is in flight vis-a-vis a concurrent push metadata from the
1785 * session daemon. Simply return that the channel failed and the
1786 * session daemon will handle that message correctly considering
1787 * that this race is acceptable thus the DBG() statement here.
1789 DBG("UST consumer push metadata %" PRIu64
" not found", key
);
1790 ret_code
= LTTCOMM_CONSUMERD_CHANNEL_FAIL
;
1791 goto end_push_metadata_msg_sessiond
;
1794 health_code_update();
1798 * There is nothing to receive. We have simply
1799 * checked whether the channel can be found.
1801 ret_code
= LTTCOMM_CONSUMERD_SUCCESS
;
1802 goto end_push_metadata_msg_sessiond
;
1805 /* Tell session daemon we are ready to receive the metadata. */
1806 ret
= consumer_send_status_msg(sock
, LTTCOMM_CONSUMERD_SUCCESS
);
1808 /* Somehow, the session daemon is not responding anymore. */
1809 goto error_push_metadata_fatal
;
1812 health_code_update();
1814 /* Wait for more data. */
1815 health_poll_entry();
1816 ret
= lttng_consumer_poll_socket(consumer_sockpoll
);
1819 goto error_push_metadata_fatal
;
1822 health_code_update();
1824 ret
= lttng_ustconsumer_recv_metadata(sock
, key
, offset
,
1825 len
, version
, channel
, 0, 1);
1827 /* error receiving from sessiond */
1828 goto error_push_metadata_fatal
;
1831 goto end_push_metadata_msg_sessiond
;
1833 end_push_metadata_msg_sessiond
:
1834 goto end_msg_sessiond
;
1835 error_push_metadata_fatal
:
1838 case LTTNG_CONSUMER_SETUP_METADATA
:
1842 ret
= setup_metadata(ctx
, msg
.u
.setup_metadata
.key
);
1846 goto end_msg_sessiond
;
1848 case LTTNG_CONSUMER_SNAPSHOT_CHANNEL
:
1850 struct lttng_consumer_channel
*channel
;
1851 uint64_t key
= msg
.u
.snapshot_channel
.key
;
1853 channel
= consumer_find_channel(key
);
1855 DBG("UST snapshot channel not found for key %" PRIu64
, key
);
1856 ret_code
= LTTCOMM_CONSUMERD_CHAN_NOT_FOUND
;
1858 if (msg
.u
.snapshot_channel
.metadata
) {
1859 ret
= snapshot_metadata(channel
, key
,
1860 msg
.u
.snapshot_channel
.pathname
,
1861 msg
.u
.snapshot_channel
.relayd_id
,
1864 ERR("Snapshot metadata failed");
1865 ret_code
= LTTCOMM_CONSUMERD_SNAPSHOT_FAILED
;
1868 ret
= snapshot_channel(channel
, key
,
1869 msg
.u
.snapshot_channel
.pathname
,
1870 msg
.u
.snapshot_channel
.relayd_id
,
1871 msg
.u
.snapshot_channel
.nb_packets_per_stream
,
1874 ERR("Snapshot channel failed");
1875 ret_code
= LTTCOMM_CONSUMERD_SNAPSHOT_FAILED
;
1879 health_code_update();
1880 ret
= consumer_send_status_msg(sock
, ret_code
);
1882 /* Somehow, the session daemon is not responding anymore. */
1885 health_code_update();
1888 case LTTNG_CONSUMER_DISCARDED_EVENTS
:
1891 uint64_t discarded_events
;
1892 struct lttng_ht_iter iter
;
1893 struct lttng_ht
*ht
;
1894 struct lttng_consumer_stream
*stream
;
1895 uint64_t id
= msg
.u
.discarded_events
.session_id
;
1896 uint64_t key
= msg
.u
.discarded_events
.channel_key
;
1898 DBG("UST consumer discarded events command for session id %"
1901 pthread_mutex_lock(&consumer_data
.lock
);
1903 ht
= consumer_data
.stream_list_ht
;
1906 * We only need a reference to the channel, but they are not
1907 * directly indexed, so we just use the first matching stream
1908 * to extract the information we need, we default to 0 if not
1909 * found (no events are dropped if the channel is not yet in
1912 discarded_events
= 0;
1913 cds_lfht_for_each_entry_duplicate(ht
->ht
,
1914 ht
->hash_fct(&id
, lttng_ht_seed
),
1916 &iter
.iter
, stream
, node_session_id
.node
) {
1917 if (stream
->chan
->key
== key
) {
1918 discarded_events
= stream
->chan
->discarded_events
;
1922 pthread_mutex_unlock(&consumer_data
.lock
);
1925 DBG("UST consumer discarded events command for session id %"
1926 PRIu64
", channel key %" PRIu64
, id
, key
);
1928 health_code_update();
1930 /* Send back returned value to session daemon */
1931 ret
= lttcomm_send_unix_sock(sock
, &discarded_events
, sizeof(discarded_events
));
1933 PERROR("send discarded events");
1939 case LTTNG_CONSUMER_LOST_PACKETS
:
1942 uint64_t lost_packets
;
1943 struct lttng_ht_iter iter
;
1944 struct lttng_ht
*ht
;
1945 struct lttng_consumer_stream
*stream
;
1946 uint64_t id
= msg
.u
.lost_packets
.session_id
;
1947 uint64_t key
= msg
.u
.lost_packets
.channel_key
;
1949 DBG("UST consumer lost packets command for session id %"
1952 pthread_mutex_lock(&consumer_data
.lock
);
1954 ht
= consumer_data
.stream_list_ht
;
1957 * We only need a reference to the channel, but they are not
1958 * directly indexed, so we just use the first matching stream
1959 * to extract the information we need, we default to 0 if not
1960 * found (no packets lost if the channel is not yet in use).
1963 cds_lfht_for_each_entry_duplicate(ht
->ht
,
1964 ht
->hash_fct(&id
, lttng_ht_seed
),
1966 &iter
.iter
, stream
, node_session_id
.node
) {
1967 if (stream
->chan
->key
== key
) {
1968 lost_packets
= stream
->chan
->lost_packets
;
1972 pthread_mutex_unlock(&consumer_data
.lock
);
1975 DBG("UST consumer lost packets command for session id %"
1976 PRIu64
", channel key %" PRIu64
, id
, key
);
1978 health_code_update();
1980 /* Send back returned value to session daemon */
1981 ret
= lttcomm_send_unix_sock(sock
, &lost_packets
,
1982 sizeof(lost_packets
));
1984 PERROR("send lost packets");
1990 case LTTNG_CONSUMER_SET_CHANNEL_MONITOR_PIPE
:
1992 int channel_monitor_pipe
;
1994 ret_code
= LTTCOMM_CONSUMERD_SUCCESS
;
1995 /* Successfully received the command's type. */
1996 ret
= consumer_send_status_msg(sock
, ret_code
);
2001 ret
= lttcomm_recv_fds_unix_sock(sock
, &channel_monitor_pipe
,
2003 if (ret
!= sizeof(channel_monitor_pipe
)) {
2004 ERR("Failed to receive channel monitor pipe");
2008 DBG("Received channel monitor pipe (%d)", channel_monitor_pipe
);
2009 ret
= consumer_timer_thread_set_channel_monitor_pipe(
2010 channel_monitor_pipe
);
2014 ret_code
= LTTCOMM_CONSUMERD_SUCCESS
;
2015 /* Set the pipe as non-blocking. */
2016 ret
= fcntl(channel_monitor_pipe
, F_GETFL
, 0);
2018 PERROR("fcntl get flags of the channel monitoring pipe");
2023 ret
= fcntl(channel_monitor_pipe
, F_SETFL
,
2024 flags
| O_NONBLOCK
);
2026 PERROR("fcntl set O_NONBLOCK flag of the channel monitoring pipe");
2029 DBG("Channel monitor pipe set as non-blocking");
2031 ret_code
= LTTCOMM_CONSUMERD_ALREADY_SET
;
2033 goto end_msg_sessiond
;
2035 case LTTNG_CONSUMER_ROTATE_CHANNEL
:
2037 struct lttng_consumer_channel
*channel
;
2038 uint64_t key
= msg
.u
.rotate_channel
.key
;
2040 channel
= consumer_find_channel(key
);
2042 DBG("Channel %" PRIu64
" not found", key
);
2043 ret_code
= LTTCOMM_CONSUMERD_CHAN_NOT_FOUND
;
2046 * Sample the rotate position of all the streams in
2049 ret
= lttng_consumer_rotate_channel(channel
, key
,
2050 msg
.u
.rotate_channel
.relayd_id
,
2051 msg
.u
.rotate_channel
.metadata
,
2054 ERR("Rotate channel failed");
2055 ret_code
= LTTCOMM_CONSUMERD_ROTATION_FAIL
;
2058 health_code_update();
2060 ret
= consumer_send_status_msg(sock
, ret_code
);
2062 /* Somehow, the session daemon is not responding anymore. */
2063 goto end_rotate_channel_nosignal
;
2067 * Rotate the streams that are ready right now.
2068 * FIXME: this is a second consecutive iteration over the
2069 * streams in a channel, there is probably a better way to
2070 * handle this, but it needs to be after the
2071 * consumer_send_status_msg() call.
2074 ret
= lttng_consumer_rotate_ready_streams(
2077 ERR("Rotate channel failed");
2081 end_rotate_channel_nosignal
:
2084 case LTTNG_CONSUMER_CLEAR_CHANNEL
:
2086 struct lttng_consumer_channel
*channel
;
2087 uint64_t key
= msg
.u
.clear_channel
.key
;
2089 channel
= consumer_find_channel(key
);
2091 DBG("Channel %" PRIu64
" not found", key
);
2092 ret_code
= LTTCOMM_CONSUMERD_CHAN_NOT_FOUND
;
2094 ret
= lttng_consumer_clear_channel(channel
);
2096 ERR("Clear channel failed key %" PRIu64
, key
);
2100 health_code_update();
2102 ret
= consumer_send_status_msg(sock
, ret_code
);
2104 /* Somehow, the session daemon is not responding anymore. */
2109 case LTTNG_CONSUMER_INIT
:
2111 ret_code
= lttng_consumer_init_command(ctx
,
2112 msg
.u
.init
.sessiond_uuid
);
2113 health_code_update();
2114 ret
= consumer_send_status_msg(sock
, ret_code
);
2116 /* Somehow, the session daemon is not responding anymore. */
2121 case LTTNG_CONSUMER_CREATE_TRACE_CHUNK
:
2123 const struct lttng_credentials credentials
= {
2124 .uid
= LTTNG_OPTIONAL_INIT_VALUE(msg
.u
.create_trace_chunk
.credentials
.value
.uid
),
2125 .gid
= LTTNG_OPTIONAL_INIT_VALUE(msg
.u
.create_trace_chunk
.credentials
.value
.gid
),
2127 const bool is_local_trace
=
2128 !msg
.u
.create_trace_chunk
.relayd_id
.is_set
;
2129 const uint64_t relayd_id
=
2130 msg
.u
.create_trace_chunk
.relayd_id
.value
;
2131 const char *chunk_override_name
=
2132 *msg
.u
.create_trace_chunk
.override_name
?
2133 msg
.u
.create_trace_chunk
.override_name
:
2135 struct lttng_directory_handle
*chunk_directory_handle
= NULL
;
2138 * The session daemon will only provide a chunk directory file
2139 * descriptor for local traces.
2141 if (is_local_trace
) {
2144 /* Acnowledge the reception of the command. */
2145 ret
= consumer_send_status_msg(sock
,
2146 LTTCOMM_CONSUMERD_SUCCESS
);
2148 /* Somehow, the session daemon is not responding anymore. */
2153 * Receive trace chunk domain dirfd.
2155 ret
= lttcomm_recv_fds_unix_sock(sock
, &chunk_dirfd
, 1);
2156 if (ret
!= sizeof(chunk_dirfd
)) {
2157 ERR("Failed to receive trace chunk domain directory file descriptor");
2161 DBG("Received trace chunk domain directory fd (%d)",
2163 chunk_directory_handle
= lttng_directory_handle_create_from_dirfd(
2165 if (!chunk_directory_handle
) {
2166 ERR("Failed to initialize chunk domain directory handle from directory file descriptor");
2167 if (close(chunk_dirfd
)) {
2168 PERROR("Failed to close chunk directory file descriptor");
2174 ret_code
= lttng_consumer_create_trace_chunk(
2175 !is_local_trace
? &relayd_id
: NULL
,
2176 msg
.u
.create_trace_chunk
.session_id
,
2177 msg
.u
.create_trace_chunk
.chunk_id
,
2178 (time_t) msg
.u
.create_trace_chunk
2179 .creation_timestamp
,
2180 chunk_override_name
,
2181 msg
.u
.create_trace_chunk
.credentials
.is_set
?
2184 chunk_directory_handle
);
2185 lttng_directory_handle_put(chunk_directory_handle
);
2186 goto end_msg_sessiond
;
2188 case LTTNG_CONSUMER_CLOSE_TRACE_CHUNK
:
2190 enum lttng_trace_chunk_command_type close_command
=
2191 msg
.u
.close_trace_chunk
.close_command
.value
;
2192 const uint64_t relayd_id
=
2193 msg
.u
.close_trace_chunk
.relayd_id
.value
;
2194 struct lttcomm_consumer_close_trace_chunk_reply reply
;
2195 char closed_trace_chunk_path
[LTTNG_PATH_MAX
];
2198 ret_code
= lttng_consumer_close_trace_chunk(
2199 msg
.u
.close_trace_chunk
.relayd_id
.is_set
?
2202 msg
.u
.close_trace_chunk
.session_id
,
2203 msg
.u
.close_trace_chunk
.chunk_id
,
2204 (time_t) msg
.u
.close_trace_chunk
.close_timestamp
,
2205 msg
.u
.close_trace_chunk
.close_command
.is_set
?
2207 NULL
, closed_trace_chunk_path
);
2208 reply
.ret_code
= ret_code
;
2209 reply
.path_length
= strlen(closed_trace_chunk_path
) + 1;
2210 ret
= lttcomm_send_unix_sock(sock
, &reply
, sizeof(reply
));
2211 if (ret
!= sizeof(reply
)) {
2214 ret
= lttcomm_send_unix_sock(sock
, closed_trace_chunk_path
,
2216 if (ret
!= reply
.path_length
) {
2221 case LTTNG_CONSUMER_TRACE_CHUNK_EXISTS
:
2223 const uint64_t relayd_id
=
2224 msg
.u
.trace_chunk_exists
.relayd_id
.value
;
2226 ret_code
= lttng_consumer_trace_chunk_exists(
2227 msg
.u
.trace_chunk_exists
.relayd_id
.is_set
?
2229 msg
.u
.trace_chunk_exists
.session_id
,
2230 msg
.u
.trace_chunk_exists
.chunk_id
);
2231 goto end_msg_sessiond
;
2233 case LTTNG_CONSUMER_OPEN_CHANNEL_PACKETS
:
2235 const uint64_t key
= msg
.u
.open_channel_packets
.key
;
2236 struct lttng_consumer_channel
*channel
=
2237 consumer_find_channel(key
);
2240 pthread_mutex_lock(&channel
->lock
);
2241 ret_code
= lttng_consumer_open_channel_packets(channel
);
2242 pthread_mutex_unlock(&channel
->lock
);
2245 * The channel could have disappeared in per-pid
2248 DBG("Channel %" PRIu64
" not found", key
);
2249 ret_code
= LTTCOMM_CONSUMERD_CHAN_NOT_FOUND
;
2252 health_code_update();
2253 goto end_msg_sessiond
;
2261 * Return 1 to indicate success since the 0 value can be a socket
2262 * shutdown during the recv() or send() call.
2269 * The returned value here is not useful since either way we'll return 1 to
2270 * the caller because the session daemon socket management is done
2271 * elsewhere. Returning a negative code or 0 will shutdown the consumer.
2273 ret
= consumer_send_status_msg(sock
, ret_code
);
2283 * Free channel here since no one has a reference to it. We don't
2284 * free after that because a stream can store this pointer.
2286 destroy_channel(channel
);
2288 /* We have to send a status channel message indicating an error. */
2289 ret
= consumer_send_status_channel(sock
, NULL
);
2291 /* Stop everything if session daemon can not be notified. */
2298 /* This will issue a consumer stop. */
2304 health_code_update();
2308 void lttng_ustctl_flush_buffer(struct lttng_consumer_stream
*stream
,
2309 int producer_active
)
2312 assert(stream
->ustream
);
2314 ustctl_flush_buffer(stream
->ustream
, producer_active
);
2318 * Take a snapshot for a specific stream.
2320 * Returns 0 on success, < 0 on error
2322 int lttng_ustconsumer_take_snapshot(struct lttng_consumer_stream
*stream
)
2325 assert(stream
->ustream
);
2327 return ustctl_snapshot(stream
->ustream
);
2331 * Sample consumed and produced positions for a specific stream.
2333 * Returns 0 on success, < 0 on error.
2335 int lttng_ustconsumer_sample_snapshot_positions(
2336 struct lttng_consumer_stream
*stream
)
2339 assert(stream
->ustream
);
2341 return ustctl_snapshot_sample_positions(stream
->ustream
);
2345 * Get the produced position
2347 * Returns 0 on success, < 0 on error
2349 int lttng_ustconsumer_get_produced_snapshot(
2350 struct lttng_consumer_stream
*stream
, unsigned long *pos
)
2353 assert(stream
->ustream
);
2356 return ustctl_snapshot_get_produced(stream
->ustream
, pos
);
2360 * Get the consumed position
2362 * Returns 0 on success, < 0 on error
2364 int lttng_ustconsumer_get_consumed_snapshot(
2365 struct lttng_consumer_stream
*stream
, unsigned long *pos
)
2368 assert(stream
->ustream
);
2371 return ustctl_snapshot_get_consumed(stream
->ustream
, pos
);
2374 void lttng_ustconsumer_flush_buffer(struct lttng_consumer_stream
*stream
,
2378 assert(stream
->ustream
);
2380 ustctl_flush_buffer(stream
->ustream
, producer
);
2383 void lttng_ustconsumer_clear_buffer(struct lttng_consumer_stream
*stream
)
2386 assert(stream
->ustream
);
2388 ustctl_clear_buffer(stream
->ustream
);
2391 int lttng_ustconsumer_get_current_timestamp(
2392 struct lttng_consumer_stream
*stream
, uint64_t *ts
)
2395 assert(stream
->ustream
);
2398 return ustctl_get_current_timestamp(stream
->ustream
, ts
);
2401 int lttng_ustconsumer_get_sequence_number(
2402 struct lttng_consumer_stream
*stream
, uint64_t *seq
)
2405 assert(stream
->ustream
);
2408 return ustctl_get_sequence_number(stream
->ustream
, seq
);
2412 * Called when the stream signals the consumer that it has hung up.
2414 void lttng_ustconsumer_on_stream_hangup(struct lttng_consumer_stream
*stream
)
2417 assert(stream
->ustream
);
2419 pthread_mutex_lock(&stream
->lock
);
2420 if (!stream
->quiescent
) {
2421 ustctl_flush_buffer(stream
->ustream
, 0);
2422 stream
->quiescent
= true;
2424 pthread_mutex_unlock(&stream
->lock
);
2425 stream
->hangup_flush_done
= 1;
2428 void lttng_ustconsumer_del_channel(struct lttng_consumer_channel
*chan
)
2433 assert(chan
->uchan
);
2434 assert(chan
->buffer_credentials
.is_set
);
2436 if (chan
->switch_timer_enabled
== 1) {
2437 consumer_timer_switch_stop(chan
);
2439 for (i
= 0; i
< chan
->nr_stream_fds
; i
++) {
2442 ret
= close(chan
->stream_fds
[i
]);
2446 if (chan
->shm_path
[0]) {
2447 char shm_path
[PATH_MAX
];
2449 ret
= get_stream_shm_path(shm_path
, chan
->shm_path
, i
);
2451 ERR("Cannot get stream shm path");
2453 ret
= run_as_unlink(shm_path
,
2454 lttng_credentials_get_uid(LTTNG_OPTIONAL_GET_PTR(
2455 chan
->buffer_credentials
)),
2456 lttng_credentials_get_gid(LTTNG_OPTIONAL_GET_PTR(
2457 chan
->buffer_credentials
)));
2459 PERROR("unlink %s", shm_path
);
2465 void lttng_ustconsumer_free_channel(struct lttng_consumer_channel
*chan
)
2468 assert(chan
->uchan
);
2469 assert(chan
->buffer_credentials
.is_set
);
2471 consumer_metadata_cache_destroy(chan
);
2472 ustctl_destroy_channel(chan
->uchan
);
2473 /* Try to rmdir all directories under shm_path root. */
2474 if (chan
->root_shm_path
[0]) {
2475 (void) run_as_rmdir_recursive(chan
->root_shm_path
,
2476 lttng_credentials_get_uid(LTTNG_OPTIONAL_GET_PTR(
2477 chan
->buffer_credentials
)),
2478 lttng_credentials_get_gid(LTTNG_OPTIONAL_GET_PTR(
2479 chan
->buffer_credentials
)),
2480 LTTNG_DIRECTORY_HANDLE_SKIP_NON_EMPTY_FLAG
);
2482 free(chan
->stream_fds
);
2485 void lttng_ustconsumer_del_stream(struct lttng_consumer_stream
*stream
)
2488 assert(stream
->ustream
);
2490 if (stream
->chan
->switch_timer_enabled
== 1) {
2491 consumer_timer_switch_stop(stream
->chan
);
2493 ustctl_destroy_stream(stream
->ustream
);
2496 int lttng_ustconsumer_get_wakeup_fd(struct lttng_consumer_stream
*stream
)
2499 assert(stream
->ustream
);
2501 return ustctl_stream_get_wakeup_fd(stream
->ustream
);
2504 int lttng_ustconsumer_close_wakeup_fd(struct lttng_consumer_stream
*stream
)
2507 assert(stream
->ustream
);
2509 return ustctl_stream_close_wakeup_fd(stream
->ustream
);
2513 * Write up to one packet from the metadata cache to the channel.
2515 * Returns the number of bytes pushed from the cache into the ring buffer, or a
2516 * negative value on error.
2519 int commit_one_metadata_packet(struct lttng_consumer_stream
*stream
)
2524 pthread_mutex_lock(&stream
->chan
->metadata_cache
->lock
);
2525 if (stream
->chan
->metadata_cache
->contents
.size
==
2526 stream
->ust_metadata_pushed
) {
2528 * In the context of a user space metadata channel, a
2529 * change in version can be detected in two ways:
2530 * 1) During the pre-consume of the `read_subbuffer` loop,
2531 * 2) When populating the metadata ring buffer (i.e. here).
2533 * This function is invoked when there is no metadata
2534 * available in the ring-buffer. If all data was consumed
2535 * up to the size of the metadata cache, there is no metadata
2536 * to insert in the ring-buffer.
2538 * However, the metadata version could still have changed (a
2539 * regeneration without any new data will yield the same cache
2542 * The cache's version is checked for a version change and the
2543 * consumed position is reset if one occurred.
2545 * This check is only necessary for the user space domain as
2546 * it has to manage the cache explicitly. If this reset was not
2547 * performed, no metadata would be consumed (and no reset would
2548 * occur as part of the pre-consume) until the metadata size
2549 * exceeded the cache size.
2551 if (stream
->metadata_version
!=
2552 stream
->chan
->metadata_cache
->version
) {
2553 metadata_stream_reset_cache_consumed_position(stream
);
2554 consumer_stream_metadata_set_version(stream
,
2555 stream
->chan
->metadata_cache
->version
);
2562 write_len
= ustctl_write_one_packet_to_channel(stream
->chan
->uchan
,
2563 &stream
->chan
->metadata_cache
->contents
.data
[stream
->ust_metadata_pushed
],
2564 stream
->chan
->metadata_cache
->contents
.size
-
2565 stream
->ust_metadata_pushed
);
2566 assert(write_len
!= 0);
2567 if (write_len
< 0) {
2568 ERR("Writing one metadata packet");
2572 stream
->ust_metadata_pushed
+= write_len
;
2574 assert(stream
->chan
->metadata_cache
->contents
.size
>=
2575 stream
->ust_metadata_pushed
);
2579 * Switch packet (but don't open the next one) on every commit of
2580 * a metadata packet. Since the subbuffer is fully filled (with padding,
2581 * if needed), the stream is "quiescent" after this commit.
2583 ustctl_flush_buffer(stream
->ustream
, 1);
2584 stream
->quiescent
= true;
2586 pthread_mutex_unlock(&stream
->chan
->metadata_cache
->lock
);
2592 * Sync metadata meaning request them to the session daemon and snapshot to the
2593 * metadata thread can consumer them.
2595 * Metadata stream lock is held here, but we need to release it when
2596 * interacting with sessiond, else we cause a deadlock with live
2597 * awaiting on metadata to be pushed out.
2599 * The RCU read side lock must be held by the caller.
2601 enum sync_metadata_status
lttng_ustconsumer_sync_metadata(
2602 struct lttng_consumer_local_data
*ctx
,
2603 struct lttng_consumer_stream
*metadata_stream
)
2606 enum sync_metadata_status status
;
2607 struct lttng_consumer_channel
*metadata_channel
;
2610 assert(metadata_stream
);
2612 metadata_channel
= metadata_stream
->chan
;
2613 pthread_mutex_unlock(&metadata_stream
->lock
);
2615 * Request metadata from the sessiond, but don't wait for the flush
2616 * because we locked the metadata thread.
2618 ret
= lttng_ustconsumer_request_metadata(ctx
, metadata_channel
, 0, 0);
2619 pthread_mutex_lock(&metadata_stream
->lock
);
2621 status
= SYNC_METADATA_STATUS_ERROR
;
2626 * The metadata stream and channel can be deleted while the
2627 * metadata stream lock was released. The streamed is checked
2628 * for deletion before we use it further.
2630 * Note that it is safe to access a logically-deleted stream since its
2631 * existence is still guaranteed by the RCU read side lock. However,
2632 * it should no longer be used. The close/deletion of the metadata
2633 * channel and stream already guarantees that all metadata has been
2634 * consumed. Therefore, there is nothing left to do in this function.
2636 if (consumer_stream_is_deleted(metadata_stream
)) {
2637 DBG("Metadata stream %" PRIu64
" was deleted during the metadata synchronization",
2638 metadata_stream
->key
);
2639 status
= SYNC_METADATA_STATUS_NO_DATA
;
2643 ret
= commit_one_metadata_packet(metadata_stream
);
2645 status
= SYNC_METADATA_STATUS_ERROR
;
2647 } else if (ret
> 0) {
2648 status
= SYNC_METADATA_STATUS_NEW_DATA
;
2649 } else /* ret == 0 */ {
2650 status
= SYNC_METADATA_STATUS_NO_DATA
;
2654 ret
= ustctl_snapshot(metadata_stream
->ustream
);
2656 ERR("Failed to take a snapshot of the metadata ring-buffer positions, ret = %d", ret
);
2657 status
= SYNC_METADATA_STATUS_ERROR
;
2666 * Return 0 on success else a negative value.
2668 static int notify_if_more_data(struct lttng_consumer_stream
*stream
,
2669 struct lttng_consumer_local_data
*ctx
)
2672 struct ustctl_consumer_stream
*ustream
;
2677 ustream
= stream
->ustream
;
2680 * First, we are going to check if there is a new subbuffer available
2681 * before reading the stream wait_fd.
2683 /* Get the next subbuffer */
2684 ret
= ustctl_get_next_subbuf(ustream
);
2686 /* No more data found, flag the stream. */
2687 stream
->has_data
= 0;
2692 ret
= ustctl_put_subbuf(ustream
);
2695 /* This stream still has data. Flag it and wake up the data thread. */
2696 stream
->has_data
= 1;
2698 if (stream
->monitor
&& !stream
->hangup_flush_done
&& !ctx
->has_wakeup
) {
2701 writelen
= lttng_pipe_write(ctx
->consumer_wakeup_pipe
, "!", 1);
2702 if (writelen
< 0 && errno
!= EAGAIN
&& errno
!= EWOULDBLOCK
) {
2707 /* The wake up pipe has been notified. */
2708 ctx
->has_wakeup
= 1;
2716 static int consumer_stream_ust_on_wake_up(struct lttng_consumer_stream
*stream
)
2721 * We can consume the 1 byte written into the wait_fd by
2722 * UST. Don't trigger error if we cannot read this one byte
2723 * (read returns 0), or if the error is EAGAIN or EWOULDBLOCK.
2725 * This is only done when the stream is monitored by a thread,
2726 * before the flush is done after a hangup and if the stream
2727 * is not flagged with data since there might be nothing to
2728 * consume in the wait fd but still have data available
2729 * flagged by the consumer wake up pipe.
2731 if (stream
->monitor
&& !stream
->hangup_flush_done
&& !stream
->has_data
) {
2735 readlen
= lttng_read(stream
->wait_fd
, &dummy
, 1);
2736 if (readlen
< 0 && errno
!= EAGAIN
&& errno
!= EWOULDBLOCK
) {
2744 static int extract_common_subbuffer_info(struct lttng_consumer_stream
*stream
,
2745 struct stream_subbuffer
*subbuf
)
2749 ret
= ustctl_get_subbuf_size(
2750 stream
->ustream
, &subbuf
->info
.data
.subbuf_size
);
2755 ret
= ustctl_get_padded_subbuf_size(
2756 stream
->ustream
, &subbuf
->info
.data
.padded_subbuf_size
);
2765 static int extract_metadata_subbuffer_info(struct lttng_consumer_stream
*stream
,
2766 struct stream_subbuffer
*subbuf
)
2770 ret
= extract_common_subbuffer_info(stream
, subbuf
);
2775 subbuf
->info
.metadata
.version
= stream
->metadata_version
;
2781 static int extract_data_subbuffer_info(struct lttng_consumer_stream
*stream
,
2782 struct stream_subbuffer
*subbuf
)
2786 ret
= extract_common_subbuffer_info(stream
, subbuf
);
2791 ret
= ustctl_get_packet_size(
2792 stream
->ustream
, &subbuf
->info
.data
.packet_size
);
2794 PERROR("Failed to get sub-buffer packet size");
2798 ret
= ustctl_get_content_size(
2799 stream
->ustream
, &subbuf
->info
.data
.content_size
);
2801 PERROR("Failed to get sub-buffer content size");
2805 ret
= ustctl_get_timestamp_begin(
2806 stream
->ustream
, &subbuf
->info
.data
.timestamp_begin
);
2808 PERROR("Failed to get sub-buffer begin timestamp");
2812 ret
= ustctl_get_timestamp_end(
2813 stream
->ustream
, &subbuf
->info
.data
.timestamp_end
);
2815 PERROR("Failed to get sub-buffer end timestamp");
2819 ret
= ustctl_get_events_discarded(
2820 stream
->ustream
, &subbuf
->info
.data
.events_discarded
);
2822 PERROR("Failed to get sub-buffer events discarded count");
2826 ret
= ustctl_get_sequence_number(stream
->ustream
,
2827 &subbuf
->info
.data
.sequence_number
.value
);
2829 /* May not be supported by older LTTng-modules. */
2830 if (ret
!= -ENOTTY
) {
2831 PERROR("Failed to get sub-buffer sequence number");
2835 subbuf
->info
.data
.sequence_number
.is_set
= true;
2838 ret
= ustctl_get_stream_id(
2839 stream
->ustream
, &subbuf
->info
.data
.stream_id
);
2841 PERROR("Failed to get stream id");
2845 ret
= ustctl_get_instance_id(stream
->ustream
,
2846 &subbuf
->info
.data
.stream_instance_id
.value
);
2848 /* May not be supported by older LTTng-modules. */
2849 if (ret
!= -ENOTTY
) {
2850 PERROR("Failed to get stream instance id");
2854 subbuf
->info
.data
.stream_instance_id
.is_set
= true;
2860 static int get_next_subbuffer_common(struct lttng_consumer_stream
*stream
,
2861 struct stream_subbuffer
*subbuffer
)
2866 ret
= stream
->read_subbuffer_ops
.extract_subbuffer_info(
2872 ret
= get_current_subbuf_addr(stream
, &addr
);
2877 subbuffer
->buffer
.buffer
= lttng_buffer_view_init(
2878 addr
, 0, subbuffer
->info
.data
.padded_subbuf_size
);
2879 assert(subbuffer
->buffer
.buffer
.data
!= NULL
);
2884 static int get_next_subbuffer(struct lttng_consumer_stream
*stream
,
2885 struct stream_subbuffer
*subbuffer
)
2889 ret
= ustctl_get_next_subbuf(stream
->ustream
);
2894 ret
= get_next_subbuffer_common(stream
, subbuffer
);
2902 static int get_next_subbuffer_metadata(struct lttng_consumer_stream
*stream
,
2903 struct stream_subbuffer
*subbuffer
)
2910 unsigned long consumed_pos
, produced_pos
;
2913 ret
= ustctl_get_next_subbuf(stream
->ustream
);
2915 got_subbuffer
= true;
2917 got_subbuffer
= false;
2918 if (ret
!= -EAGAIN
) {
2925 * Determine if the cache is empty and ensure that a sub-buffer
2926 * is made available if the cache is not empty.
2928 if (!got_subbuffer
) {
2929 ret
= commit_one_metadata_packet(stream
);
2930 if (ret
< 0 && ret
!= -ENOBUFS
) {
2932 } else if (ret
== 0) {
2933 /* Not an error, the cache is empty. */
2938 cache_empty
= false;
2941 pthread_mutex_lock(&stream
->chan
->metadata_cache
->lock
);
2942 cache_empty
= stream
->chan
->metadata_cache
->contents
.size
==
2943 stream
->ust_metadata_pushed
;
2944 pthread_mutex_unlock(&stream
->chan
->metadata_cache
->lock
);
2946 } while (!got_subbuffer
);
2948 /* Populate sub-buffer infos and view. */
2949 ret
= get_next_subbuffer_common(stream
, subbuffer
);
2954 ret
= lttng_ustconsumer_sample_snapshot_positions(stream
);
2957 * -EAGAIN is not expected since we got a sub-buffer and haven't
2958 * pushed the consumption position yet (on put_next).
2960 PERROR("Failed to take a snapshot of metadata buffer positions");
2964 ret
= lttng_ustconsumer_get_consumed_snapshot(stream
, &consumed_pos
);
2966 PERROR("Failed to get metadata consumed position");
2970 ret
= lttng_ustconsumer_get_produced_snapshot(stream
, &produced_pos
);
2972 PERROR("Failed to get metadata produced position");
2976 /* Last sub-buffer of the ring buffer ? */
2977 buffer_empty
= (consumed_pos
+ stream
->max_sb_size
) == produced_pos
;
2980 * The sessiond registry lock ensures that coherent units of metadata
2981 * are pushed to the consumer daemon at once. Hence, if a sub-buffer is
2982 * acquired, the cache is empty, and it is the only available sub-buffer
2983 * available, it is safe to assume that it is "coherent".
2985 coherent
= got_subbuffer
&& cache_empty
&& buffer_empty
;
2987 LTTNG_OPTIONAL_SET(&subbuffer
->info
.metadata
.coherent
, coherent
);
2992 static int put_next_subbuffer(struct lttng_consumer_stream
*stream
,
2993 struct stream_subbuffer
*subbuffer
)
2995 const int ret
= ustctl_put_next_subbuf(stream
->ustream
);
3001 static int signal_metadata(struct lttng_consumer_stream
*stream
,
3002 struct lttng_consumer_local_data
*ctx
)
3004 ASSERT_LOCKED(stream
->metadata_rdv_lock
);
3005 return pthread_cond_broadcast(&stream
->metadata_rdv
) ? -errno
: 0;
3008 static int lttng_ustconsumer_set_stream_ops(
3009 struct lttng_consumer_stream
*stream
)
3013 stream
->read_subbuffer_ops
.on_wake_up
= consumer_stream_ust_on_wake_up
;
3014 if (stream
->metadata_flag
) {
3015 stream
->read_subbuffer_ops
.get_next_subbuffer
=
3016 get_next_subbuffer_metadata
;
3017 stream
->read_subbuffer_ops
.extract_subbuffer_info
=
3018 extract_metadata_subbuffer_info
;
3019 stream
->read_subbuffer_ops
.reset_metadata
=
3020 metadata_stream_reset_cache_consumed_position
;
3021 if (stream
->chan
->is_live
) {
3022 stream
->read_subbuffer_ops
.on_sleep
= signal_metadata
;
3023 ret
= consumer_stream_enable_metadata_bucketization(
3030 stream
->read_subbuffer_ops
.get_next_subbuffer
=
3032 stream
->read_subbuffer_ops
.extract_subbuffer_info
=
3033 extract_data_subbuffer_info
;
3034 stream
->read_subbuffer_ops
.on_sleep
= notify_if_more_data
;
3035 if (stream
->chan
->is_live
) {
3036 stream
->read_subbuffer_ops
.send_live_beacon
=
3037 consumer_flush_ust_index
;
3041 stream
->read_subbuffer_ops
.put_next_subbuffer
= put_next_subbuffer
;
3047 * Called when a stream is created.
3049 * Return 0 on success or else a negative value.
3051 int lttng_ustconsumer_on_recv_stream(struct lttng_consumer_stream
*stream
)
3058 * Don't create anything if this is set for streaming or if there is
3059 * no current trace chunk on the parent channel.
3061 if (stream
->net_seq_idx
== (uint64_t) -1ULL && stream
->chan
->monitor
&&
3062 stream
->chan
->trace_chunk
) {
3063 ret
= consumer_stream_create_output_files(stream
, true);
3069 lttng_ustconsumer_set_stream_ops(stream
);
3077 * Check if data is still being extracted from the buffers for a specific
3078 * stream. Consumer data lock MUST be acquired before calling this function
3079 * and the stream lock.
3081 * Return 1 if the traced data are still getting read else 0 meaning that the
3082 * data is available for trace viewer reading.
3084 int lttng_ustconsumer_data_pending(struct lttng_consumer_stream
*stream
)
3089 assert(stream
->ustream
);
3090 ASSERT_LOCKED(stream
->lock
);
3092 DBG("UST consumer checking data pending");
3094 if (stream
->endpoint_status
!= CONSUMER_ENDPOINT_ACTIVE
) {
3099 if (stream
->chan
->type
== CONSUMER_CHANNEL_TYPE_METADATA
) {
3100 uint64_t contiguous
, pushed
;
3102 /* Ease our life a bit. */
3103 pthread_mutex_lock(&stream
->chan
->metadata_cache
->lock
);
3104 contiguous
= stream
->chan
->metadata_cache
->contents
.size
;
3105 pthread_mutex_unlock(&stream
->chan
->metadata_cache
->lock
);
3106 pushed
= stream
->ust_metadata_pushed
;
3109 * We can simply check whether all contiguously available data
3110 * has been pushed to the ring buffer, since the push operation
3111 * is performed within get_next_subbuf(), and because both
3112 * get_next_subbuf() and put_next_subbuf() are issued atomically
3113 * thanks to the stream lock within
3114 * lttng_ustconsumer_read_subbuffer(). This basically means that
3115 * whetnever ust_metadata_pushed is incremented, the associated
3116 * metadata has been consumed from the metadata stream.
3118 DBG("UST consumer metadata pending check: contiguous %" PRIu64
" vs pushed %" PRIu64
,
3119 contiguous
, pushed
);
3120 assert(((int64_t) (contiguous
- pushed
)) >= 0);
3121 if ((contiguous
!= pushed
) ||
3122 (((int64_t) contiguous
- pushed
) > 0 || contiguous
== 0)) {
3123 ret
= 1; /* Data is pending */
3127 ret
= ustctl_get_next_subbuf(stream
->ustream
);
3130 * There is still data so let's put back this
3133 ret
= ustctl_put_subbuf(stream
->ustream
);
3135 ret
= 1; /* Data is pending */
3140 /* Data is NOT pending so ready to be read. */
3148 * Stop a given metadata channel timer if enabled and close the wait fd which
3149 * is the poll pipe of the metadata stream.
3151 * This MUST be called with the metadata channel lock acquired.
3153 void lttng_ustconsumer_close_metadata(struct lttng_consumer_channel
*metadata
)
3158 assert(metadata
->type
== CONSUMER_CHANNEL_TYPE_METADATA
);
3160 DBG("Closing metadata channel key %" PRIu64
, metadata
->key
);
3162 if (metadata
->switch_timer_enabled
== 1) {
3163 consumer_timer_switch_stop(metadata
);
3166 if (!metadata
->metadata_stream
) {
3171 * Closing write side so the thread monitoring the stream wakes up if any
3172 * and clean the metadata stream.
3174 if (metadata
->metadata_stream
->ust_metadata_poll_pipe
[1] >= 0) {
3175 ret
= close(metadata
->metadata_stream
->ust_metadata_poll_pipe
[1]);
3177 PERROR("closing metadata pipe write side");
3179 metadata
->metadata_stream
->ust_metadata_poll_pipe
[1] = -1;
3187 * Close every metadata stream wait fd of the metadata hash table. This
3188 * function MUST be used very carefully so not to run into a race between the
3189 * metadata thread handling streams and this function closing their wait fd.
3191 * For UST, this is used when the session daemon hangs up. Its the metadata
3192 * producer so calling this is safe because we are assured that no state change
3193 * can occur in the metadata thread for the streams in the hash table.
3195 void lttng_ustconsumer_close_all_metadata(struct lttng_ht
*metadata_ht
)
3197 struct lttng_ht_iter iter
;
3198 struct lttng_consumer_stream
*stream
;
3200 assert(metadata_ht
);
3201 assert(metadata_ht
->ht
);
3203 DBG("UST consumer closing all metadata streams");
3206 cds_lfht_for_each_entry(metadata_ht
->ht
, &iter
.iter
, stream
,
3209 health_code_update();
3211 pthread_mutex_lock(&stream
->chan
->lock
);
3212 lttng_ustconsumer_close_metadata(stream
->chan
);
3213 pthread_mutex_unlock(&stream
->chan
->lock
);
3219 void lttng_ustconsumer_close_stream_wakeup(struct lttng_consumer_stream
*stream
)
3223 ret
= ustctl_stream_close_wakeup_fd(stream
->ustream
);
3225 ERR("Unable to close wakeup fd");
3230 * Please refer to consumer-timer.c before adding any lock within this
3231 * function or any of its callees. Timers have a very strict locking
3232 * semantic with respect to teardown. Failure to respect this semantic
3233 * introduces deadlocks.
3235 * DON'T hold the metadata lock when calling this function, else this
3236 * can cause deadlock involving consumer awaiting for metadata to be
3237 * pushed out due to concurrent interaction with the session daemon.
3239 int lttng_ustconsumer_request_metadata(struct lttng_consumer_local_data
*ctx
,
3240 struct lttng_consumer_channel
*channel
, int timer
, int wait
)
3242 struct lttcomm_metadata_request_msg request
;
3243 struct lttcomm_consumer_msg msg
;
3244 enum lttcomm_return_code ret_code
= LTTCOMM_CONSUMERD_SUCCESS
;
3245 uint64_t len
, key
, offset
, version
;
3249 assert(channel
->metadata_cache
);
3251 memset(&request
, 0, sizeof(request
));
3253 /* send the metadata request to sessiond */
3254 switch (consumer_data
.type
) {
3255 case LTTNG_CONSUMER64_UST
:
3256 request
.bits_per_long
= 64;
3258 case LTTNG_CONSUMER32_UST
:
3259 request
.bits_per_long
= 32;
3262 request
.bits_per_long
= 0;
3266 request
.session_id
= channel
->session_id
;
3267 request
.session_id_per_pid
= channel
->session_id_per_pid
;
3269 * Request the application UID here so the metadata of that application can
3270 * be sent back. The channel UID corresponds to the user UID of the session
3271 * used for the rights on the stream file(s).
3273 request
.uid
= channel
->ust_app_uid
;
3274 request
.key
= channel
->key
;
3276 DBG("Sending metadata request to sessiond, session id %" PRIu64
3277 ", per-pid %" PRIu64
", app UID %u and channel key %" PRIu64
,
3278 request
.session_id
, request
.session_id_per_pid
, request
.uid
,
3281 pthread_mutex_lock(&ctx
->metadata_socket_lock
);
3283 health_code_update();
3285 ret
= lttcomm_send_unix_sock(ctx
->consumer_metadata_socket
, &request
,
3288 ERR("Asking metadata to sessiond");
3292 health_code_update();
3294 /* Receive the metadata from sessiond */
3295 ret
= lttcomm_recv_unix_sock(ctx
->consumer_metadata_socket
, &msg
,
3297 if (ret
!= sizeof(msg
)) {
3298 DBG("Consumer received unexpected message size %d (expects %zu)",
3300 lttng_consumer_send_error(ctx
, LTTCOMM_CONSUMERD_ERROR_RECV_CMD
);
3302 * The ret value might 0 meaning an orderly shutdown but this is ok
3303 * since the caller handles this.
3308 health_code_update();
3310 if (msg
.cmd_type
== LTTNG_ERR_UND
) {
3311 /* No registry found */
3312 (void) consumer_send_status_msg(ctx
->consumer_metadata_socket
,
3316 } else if (msg
.cmd_type
!= LTTNG_CONSUMER_PUSH_METADATA
) {
3317 ERR("Unexpected cmd_type received %d", msg
.cmd_type
);
3322 len
= msg
.u
.push_metadata
.len
;
3323 key
= msg
.u
.push_metadata
.key
;
3324 offset
= msg
.u
.push_metadata
.target_offset
;
3325 version
= msg
.u
.push_metadata
.version
;
3327 assert(key
== channel
->key
);
3329 DBG("No new metadata to receive for key %" PRIu64
, key
);
3332 health_code_update();
3334 /* Tell session daemon we are ready to receive the metadata. */
3335 ret
= consumer_send_status_msg(ctx
->consumer_metadata_socket
,
3336 LTTCOMM_CONSUMERD_SUCCESS
);
3337 if (ret
< 0 || len
== 0) {
3339 * Somehow, the session daemon is not responding anymore or there is
3340 * nothing to receive.
3345 health_code_update();
3347 ret
= lttng_ustconsumer_recv_metadata(ctx
->consumer_metadata_socket
,
3348 key
, offset
, len
, version
, channel
, timer
, wait
);
3351 * Only send the status msg if the sessiond is alive meaning a positive
3354 (void) consumer_send_status_msg(ctx
->consumer_metadata_socket
, ret
);
3359 health_code_update();
3361 pthread_mutex_unlock(&ctx
->metadata_socket_lock
);
3366 * Return the ustctl call for the get stream id.
3368 int lttng_ustconsumer_get_stream_id(struct lttng_consumer_stream
*stream
,
3369 uint64_t *stream_id
)
3374 return ustctl_get_stream_id(stream
->ustream
, stream_id
);