2 * Copyright (C) 2011 - Julien Desfossez <julien.desfossez@polymtl.ca>
3 * Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
4 * 2012 - David Goulet <dgoulet@efficios.com>
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License, version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 * You should have received a copy of the GNU General Public License along
16 * with this program; if not, write to the Free Software Foundation, Inc.,
17 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
27 #include <sys/socket.h>
28 #include <sys/types.h>
33 #include <bin/lttng-consumerd/health-consumerd.h>
34 #include <common/common.h>
35 #include <common/utils.h>
36 #include <common/compat/poll.h>
37 #include <common/compat/endian.h>
38 #include <common/index/index.h>
39 #include <common/kernel-ctl/kernel-ctl.h>
40 #include <common/sessiond-comm/relayd.h>
41 #include <common/sessiond-comm/sessiond-comm.h>
42 #include <common/kernel-consumer/kernel-consumer.h>
43 #include <common/relayd/relayd.h>
44 #include <common/ust-consumer/ust-consumer.h>
45 #include <common/consumer/consumer-timer.h>
46 #include <common/consumer/consumer.h>
47 #include <common/consumer/consumer-stream.h>
48 #include <common/consumer/consumer-testpoint.h>
49 #include <common/align.h>
50 #include <common/consumer/consumer-metadata-cache.h>
52 struct lttng_consumer_global_data consumer_data
= {
55 .type
= LTTNG_CONSUMER_UNKNOWN
,
58 enum consumer_channel_action
{
61 CONSUMER_CHANNEL_QUIT
,
64 struct consumer_channel_msg
{
65 enum consumer_channel_action action
;
66 struct lttng_consumer_channel
*chan
; /* add */
67 uint64_t key
; /* del */
70 /* Flag used to temporarily pause data consumption from testpoints. */
71 int data_consumption_paused
;
74 * Flag to inform the polling thread to quit when all fd hung up. Updated by
75 * the consumer_thread_receive_fds when it notices that all fds has hung up.
76 * Also updated by the signal handler (consumer_should_exit()). Read by the
82 * Global hash table containing respectively metadata and data streams. The
83 * stream element in this ht should only be updated by the metadata poll thread
84 * for the metadata and the data poll thread for the data.
86 static struct lttng_ht
*metadata_ht
;
87 static struct lttng_ht
*data_ht
;
90 * Notify a thread lttng pipe to poll back again. This usually means that some
91 * global state has changed so we just send back the thread in a poll wait
94 static void notify_thread_lttng_pipe(struct lttng_pipe
*pipe
)
96 struct lttng_consumer_stream
*null_stream
= NULL
;
100 (void) lttng_pipe_write(pipe
, &null_stream
, sizeof(null_stream
));
103 static void notify_health_quit_pipe(int *pipe
)
107 ret
= lttng_write(pipe
[1], "4", 1);
109 PERROR("write consumer health quit");
113 static void notify_channel_pipe(struct lttng_consumer_local_data
*ctx
,
114 struct lttng_consumer_channel
*chan
,
116 enum consumer_channel_action action
)
118 struct consumer_channel_msg msg
;
121 memset(&msg
, 0, sizeof(msg
));
126 ret
= lttng_write(ctx
->consumer_channel_pipe
[1], &msg
, sizeof(msg
));
127 if (ret
< sizeof(msg
)) {
128 PERROR("notify_channel_pipe write error");
132 void notify_thread_del_channel(struct lttng_consumer_local_data
*ctx
,
135 notify_channel_pipe(ctx
, NULL
, key
, CONSUMER_CHANNEL_DEL
);
138 static int read_channel_pipe(struct lttng_consumer_local_data
*ctx
,
139 struct lttng_consumer_channel
**chan
,
141 enum consumer_channel_action
*action
)
143 struct consumer_channel_msg msg
;
146 ret
= lttng_read(ctx
->consumer_channel_pipe
[0], &msg
, sizeof(msg
));
147 if (ret
< sizeof(msg
)) {
151 *action
= msg
.action
;
159 * Cleanup the stream list of a channel. Those streams are not yet globally
162 static void clean_channel_stream_list(struct lttng_consumer_channel
*channel
)
164 struct lttng_consumer_stream
*stream
, *stmp
;
168 /* Delete streams that might have been left in the stream list. */
169 cds_list_for_each_entry_safe(stream
, stmp
, &channel
->streams
.head
,
171 cds_list_del(&stream
->send_node
);
173 * Once a stream is added to this list, the buffers were created so we
174 * have a guarantee that this call will succeed. Setting the monitor
175 * mode to 0 so we don't lock nor try to delete the stream from the
179 consumer_stream_destroy(stream
, NULL
);
184 * Find a stream. The consumer_data.lock must be locked during this
187 static struct lttng_consumer_stream
*find_stream(uint64_t key
,
190 struct lttng_ht_iter iter
;
191 struct lttng_ht_node_u64
*node
;
192 struct lttng_consumer_stream
*stream
= NULL
;
196 /* -1ULL keys are lookup failures */
197 if (key
== (uint64_t) -1ULL) {
203 lttng_ht_lookup(ht
, &key
, &iter
);
204 node
= lttng_ht_iter_get_node_u64(&iter
);
206 stream
= caa_container_of(node
, struct lttng_consumer_stream
, node
);
214 static void steal_stream_key(uint64_t key
, struct lttng_ht
*ht
)
216 struct lttng_consumer_stream
*stream
;
219 stream
= find_stream(key
, ht
);
221 stream
->key
= (uint64_t) -1ULL;
223 * We don't want the lookup to match, but we still need
224 * to iterate on this stream when iterating over the hash table. Just
225 * change the node key.
227 stream
->node
.key
= (uint64_t) -1ULL;
233 * Return a channel object for the given key.
235 * RCU read side lock MUST be acquired before calling this function and
236 * protects the channel ptr.
238 struct lttng_consumer_channel
*consumer_find_channel(uint64_t key
)
240 struct lttng_ht_iter iter
;
241 struct lttng_ht_node_u64
*node
;
242 struct lttng_consumer_channel
*channel
= NULL
;
244 /* -1ULL keys are lookup failures */
245 if (key
== (uint64_t) -1ULL) {
249 lttng_ht_lookup(consumer_data
.channel_ht
, &key
, &iter
);
250 node
= lttng_ht_iter_get_node_u64(&iter
);
252 channel
= caa_container_of(node
, struct lttng_consumer_channel
, node
);
259 * There is a possibility that the consumer does not have enough time between
260 * the close of the channel on the session daemon and the cleanup in here thus
261 * once we have a channel add with an existing key, we know for sure that this
262 * channel will eventually get cleaned up by all streams being closed.
264 * This function just nullifies the already existing channel key.
266 static void steal_channel_key(uint64_t key
)
268 struct lttng_consumer_channel
*channel
;
271 channel
= consumer_find_channel(key
);
273 channel
->key
= (uint64_t) -1ULL;
275 * We don't want the lookup to match, but we still need to iterate on
276 * this channel when iterating over the hash table. Just change the
279 channel
->node
.key
= (uint64_t) -1ULL;
284 static void free_channel_rcu(struct rcu_head
*head
)
286 struct lttng_ht_node_u64
*node
=
287 caa_container_of(head
, struct lttng_ht_node_u64
, head
);
288 struct lttng_consumer_channel
*channel
=
289 caa_container_of(node
, struct lttng_consumer_channel
, node
);
291 switch (consumer_data
.type
) {
292 case LTTNG_CONSUMER_KERNEL
:
294 case LTTNG_CONSUMER32_UST
:
295 case LTTNG_CONSUMER64_UST
:
296 lttng_ustconsumer_free_channel(channel
);
299 ERR("Unknown consumer_data type");
306 * RCU protected relayd socket pair free.
308 static void free_relayd_rcu(struct rcu_head
*head
)
310 struct lttng_ht_node_u64
*node
=
311 caa_container_of(head
, struct lttng_ht_node_u64
, head
);
312 struct consumer_relayd_sock_pair
*relayd
=
313 caa_container_of(node
, struct consumer_relayd_sock_pair
, node
);
316 * Close all sockets. This is done in the call RCU since we don't want the
317 * socket fds to be reassigned thus potentially creating bad state of the
320 * We do not have to lock the control socket mutex here since at this stage
321 * there is no one referencing to this relayd object.
323 (void) relayd_close(&relayd
->control_sock
);
324 (void) relayd_close(&relayd
->data_sock
);
326 pthread_mutex_destroy(&relayd
->ctrl_sock_mutex
);
331 * Destroy and free relayd socket pair object.
333 void consumer_destroy_relayd(struct consumer_relayd_sock_pair
*relayd
)
336 struct lttng_ht_iter iter
;
338 if (relayd
== NULL
) {
342 DBG("Consumer destroy and close relayd socket pair");
344 iter
.iter
.node
= &relayd
->node
.node
;
345 ret
= lttng_ht_del(consumer_data
.relayd_ht
, &iter
);
347 /* We assume the relayd is being or is destroyed */
351 /* RCU free() call */
352 call_rcu(&relayd
->node
.head
, free_relayd_rcu
);
356 * Remove a channel from the global list protected by a mutex. This function is
357 * also responsible for freeing its data structures.
359 void consumer_del_channel(struct lttng_consumer_channel
*channel
)
362 struct lttng_ht_iter iter
;
364 DBG("Consumer delete channel key %" PRIu64
, channel
->key
);
366 pthread_mutex_lock(&consumer_data
.lock
);
367 pthread_mutex_lock(&channel
->lock
);
369 /* Destroy streams that might have been left in the stream list. */
370 clean_channel_stream_list(channel
);
372 if (channel
->live_timer_enabled
== 1) {
373 consumer_timer_live_stop(channel
);
375 if (channel
->monitor_timer_enabled
== 1) {
376 consumer_timer_monitor_stop(channel
);
379 switch (consumer_data
.type
) {
380 case LTTNG_CONSUMER_KERNEL
:
382 case LTTNG_CONSUMER32_UST
:
383 case LTTNG_CONSUMER64_UST
:
384 lttng_ustconsumer_del_channel(channel
);
387 ERR("Unknown consumer_data type");
393 iter
.iter
.node
= &channel
->node
.node
;
394 ret
= lttng_ht_del(consumer_data
.channel_ht
, &iter
);
397 iter
.iter
.node
= &channel
->channels_by_session_id_ht_node
.node
;
398 ret
= lttng_ht_del(consumer_data
.channels_by_session_id_ht
, &iter
);
402 call_rcu(&channel
->node
.head
, free_channel_rcu
);
404 pthread_mutex_unlock(&channel
->lock
);
405 pthread_mutex_unlock(&consumer_data
.lock
);
409 * Iterate over the relayd hash table and destroy each element. Finally,
410 * destroy the whole hash table.
412 static void cleanup_relayd_ht(void)
414 struct lttng_ht_iter iter
;
415 struct consumer_relayd_sock_pair
*relayd
;
419 cds_lfht_for_each_entry(consumer_data
.relayd_ht
->ht
, &iter
.iter
, relayd
,
421 consumer_destroy_relayd(relayd
);
426 lttng_ht_destroy(consumer_data
.relayd_ht
);
430 * Update the end point status of all streams having the given network sequence
431 * index (relayd index).
433 * It's atomically set without having the stream mutex locked which is fine
434 * because we handle the write/read race with a pipe wakeup for each thread.
436 static void update_endpoint_status_by_netidx(uint64_t net_seq_idx
,
437 enum consumer_endpoint_status status
)
439 struct lttng_ht_iter iter
;
440 struct lttng_consumer_stream
*stream
;
442 DBG("Consumer set delete flag on stream by idx %" PRIu64
, net_seq_idx
);
446 /* Let's begin with metadata */
447 cds_lfht_for_each_entry(metadata_ht
->ht
, &iter
.iter
, stream
, node
.node
) {
448 if (stream
->net_seq_idx
== net_seq_idx
) {
449 uatomic_set(&stream
->endpoint_status
, status
);
450 DBG("Delete flag set to metadata stream %d", stream
->wait_fd
);
454 /* Follow up by the data streams */
455 cds_lfht_for_each_entry(data_ht
->ht
, &iter
.iter
, stream
, node
.node
) {
456 if (stream
->net_seq_idx
== net_seq_idx
) {
457 uatomic_set(&stream
->endpoint_status
, status
);
458 DBG("Delete flag set to data stream %d", stream
->wait_fd
);
465 * Cleanup a relayd object by flagging every associated streams for deletion,
466 * destroying the object meaning removing it from the relayd hash table,
467 * closing the sockets and freeing the memory in a RCU call.
469 * If a local data context is available, notify the threads that the streams'
470 * state have changed.
472 void lttng_consumer_cleanup_relayd(struct consumer_relayd_sock_pair
*relayd
)
478 DBG("Cleaning up relayd object ID %"PRIu64
, relayd
->net_seq_idx
);
480 /* Save the net sequence index before destroying the object */
481 netidx
= relayd
->net_seq_idx
;
484 * Delete the relayd from the relayd hash table, close the sockets and free
485 * the object in a RCU call.
487 consumer_destroy_relayd(relayd
);
489 /* Set inactive endpoint to all streams */
490 update_endpoint_status_by_netidx(netidx
, CONSUMER_ENDPOINT_INACTIVE
);
493 * With a local data context, notify the threads that the streams' state
494 * have changed. The write() action on the pipe acts as an "implicit"
495 * memory barrier ordering the updates of the end point status from the
496 * read of this status which happens AFTER receiving this notify.
498 notify_thread_lttng_pipe(relayd
->ctx
->consumer_data_pipe
);
499 notify_thread_lttng_pipe(relayd
->ctx
->consumer_metadata_pipe
);
503 * Flag a relayd socket pair for destruction. Destroy it if the refcount
506 * RCU read side lock MUST be aquired before calling this function.
508 void consumer_flag_relayd_for_destroy(struct consumer_relayd_sock_pair
*relayd
)
512 /* Set destroy flag for this object */
513 uatomic_set(&relayd
->destroy_flag
, 1);
515 /* Destroy the relayd if refcount is 0 */
516 if (uatomic_read(&relayd
->refcount
) == 0) {
517 consumer_destroy_relayd(relayd
);
522 * Completly destroy stream from every visiable data structure and the given
525 * One this call returns, the stream object is not longer usable nor visible.
527 void consumer_del_stream(struct lttng_consumer_stream
*stream
,
530 consumer_stream_destroy(stream
, ht
);
534 * XXX naming of del vs destroy is all mixed up.
536 void consumer_del_stream_for_data(struct lttng_consumer_stream
*stream
)
538 consumer_stream_destroy(stream
, data_ht
);
541 void consumer_del_stream_for_metadata(struct lttng_consumer_stream
*stream
)
543 consumer_stream_destroy(stream
, metadata_ht
);
546 void consumer_stream_update_channel_attributes(
547 struct lttng_consumer_stream
*stream
,
548 struct lttng_consumer_channel
*channel
)
550 stream
->channel_read_only_attributes
.tracefile_size
=
551 channel
->tracefile_size
;
552 memcpy(stream
->channel_read_only_attributes
.path
, channel
->pathname
,
553 sizeof(stream
->channel_read_only_attributes
.path
));
556 struct lttng_consumer_stream
*consumer_allocate_stream(uint64_t channel_key
,
558 enum lttng_consumer_stream_state state
,
559 const char *channel_name
,
566 enum consumer_channel_type type
,
567 unsigned int monitor
,
568 uint64_t trace_archive_id
)
571 struct lttng_consumer_stream
*stream
;
573 stream
= zmalloc(sizeof(*stream
));
574 if (stream
== NULL
) {
575 PERROR("malloc struct lttng_consumer_stream");
582 stream
->key
= stream_key
;
584 stream
->out_fd_offset
= 0;
585 stream
->output_written
= 0;
586 stream
->state
= state
;
589 stream
->net_seq_idx
= relayd_id
;
590 stream
->session_id
= session_id
;
591 stream
->monitor
= monitor
;
592 stream
->endpoint_status
= CONSUMER_ENDPOINT_ACTIVE
;
593 stream
->index_file
= NULL
;
594 stream
->last_sequence_number
= -1ULL;
595 stream
->trace_archive_id
= trace_archive_id
;
596 pthread_mutex_init(&stream
->lock
, NULL
);
597 pthread_mutex_init(&stream
->metadata_timer_lock
, NULL
);
599 /* If channel is the metadata, flag this stream as metadata. */
600 if (type
== CONSUMER_CHANNEL_TYPE_METADATA
) {
601 stream
->metadata_flag
= 1;
602 /* Metadata is flat out. */
603 strncpy(stream
->name
, DEFAULT_METADATA_NAME
, sizeof(stream
->name
));
604 /* Live rendez-vous point. */
605 pthread_cond_init(&stream
->metadata_rdv
, NULL
);
606 pthread_mutex_init(&stream
->metadata_rdv_lock
, NULL
);
608 /* Format stream name to <channel_name>_<cpu_number> */
609 ret
= snprintf(stream
->name
, sizeof(stream
->name
), "%s_%d",
612 PERROR("snprintf stream name");
617 /* Key is always the wait_fd for streams. */
618 lttng_ht_node_init_u64(&stream
->node
, stream
->key
);
620 /* Init node per channel id key */
621 lttng_ht_node_init_u64(&stream
->node_channel_id
, channel_key
);
623 /* Init session id node with the stream session id */
624 lttng_ht_node_init_u64(&stream
->node_session_id
, stream
->session_id
);
626 DBG3("Allocated stream %s (key %" PRIu64
", chan_key %" PRIu64
627 " relayd_id %" PRIu64
", session_id %" PRIu64
,
628 stream
->name
, stream
->key
, channel_key
,
629 stream
->net_seq_idx
, stream
->session_id
);
645 * Add a stream to the global list protected by a mutex.
647 void consumer_add_data_stream(struct lttng_consumer_stream
*stream
)
649 struct lttng_ht
*ht
= data_ht
;
654 DBG3("Adding consumer stream %" PRIu64
, stream
->key
);
656 pthread_mutex_lock(&consumer_data
.lock
);
657 pthread_mutex_lock(&stream
->chan
->lock
);
658 pthread_mutex_lock(&stream
->chan
->timer_lock
);
659 pthread_mutex_lock(&stream
->lock
);
662 /* Steal stream identifier to avoid having streams with the same key */
663 steal_stream_key(stream
->key
, ht
);
665 lttng_ht_add_unique_u64(ht
, &stream
->node
);
667 lttng_ht_add_u64(consumer_data
.stream_per_chan_id_ht
,
668 &stream
->node_channel_id
);
671 * Add stream to the stream_list_ht of the consumer data. No need to steal
672 * the key since the HT does not use it and we allow to add redundant keys
675 lttng_ht_add_u64(consumer_data
.stream_list_ht
, &stream
->node_session_id
);
678 * When nb_init_stream_left reaches 0, we don't need to trigger any action
679 * in terms of destroying the associated channel, because the action that
680 * causes the count to become 0 also causes a stream to be added. The
681 * channel deletion will thus be triggered by the following removal of this
684 if (uatomic_read(&stream
->chan
->nb_init_stream_left
) > 0) {
685 /* Increment refcount before decrementing nb_init_stream_left */
687 uatomic_dec(&stream
->chan
->nb_init_stream_left
);
690 /* Update consumer data once the node is inserted. */
691 consumer_data
.stream_count
++;
692 consumer_data
.need_update
= 1;
695 pthread_mutex_unlock(&stream
->lock
);
696 pthread_mutex_unlock(&stream
->chan
->timer_lock
);
697 pthread_mutex_unlock(&stream
->chan
->lock
);
698 pthread_mutex_unlock(&consumer_data
.lock
);
701 void consumer_del_data_stream(struct lttng_consumer_stream
*stream
)
703 consumer_del_stream(stream
, data_ht
);
707 * Add relayd socket to global consumer data hashtable. RCU read side lock MUST
708 * be acquired before calling this.
710 static int add_relayd(struct consumer_relayd_sock_pair
*relayd
)
713 struct lttng_ht_node_u64
*node
;
714 struct lttng_ht_iter iter
;
718 lttng_ht_lookup(consumer_data
.relayd_ht
,
719 &relayd
->net_seq_idx
, &iter
);
720 node
= lttng_ht_iter_get_node_u64(&iter
);
724 lttng_ht_add_unique_u64(consumer_data
.relayd_ht
, &relayd
->node
);
731 * Allocate and return a consumer relayd socket.
733 static struct consumer_relayd_sock_pair
*consumer_allocate_relayd_sock_pair(
734 uint64_t net_seq_idx
)
736 struct consumer_relayd_sock_pair
*obj
= NULL
;
738 /* net sequence index of -1 is a failure */
739 if (net_seq_idx
== (uint64_t) -1ULL) {
743 obj
= zmalloc(sizeof(struct consumer_relayd_sock_pair
));
745 PERROR("zmalloc relayd sock");
749 obj
->net_seq_idx
= net_seq_idx
;
751 obj
->destroy_flag
= 0;
752 obj
->control_sock
.sock
.fd
= -1;
753 obj
->data_sock
.sock
.fd
= -1;
754 lttng_ht_node_init_u64(&obj
->node
, obj
->net_seq_idx
);
755 pthread_mutex_init(&obj
->ctrl_sock_mutex
, NULL
);
762 * Find a relayd socket pair in the global consumer data.
764 * Return the object if found else NULL.
765 * RCU read-side lock must be held across this call and while using the
768 struct consumer_relayd_sock_pair
*consumer_find_relayd(uint64_t key
)
770 struct lttng_ht_iter iter
;
771 struct lttng_ht_node_u64
*node
;
772 struct consumer_relayd_sock_pair
*relayd
= NULL
;
774 /* Negative keys are lookup failures */
775 if (key
== (uint64_t) -1ULL) {
779 lttng_ht_lookup(consumer_data
.relayd_ht
, &key
,
781 node
= lttng_ht_iter_get_node_u64(&iter
);
783 relayd
= caa_container_of(node
, struct consumer_relayd_sock_pair
, node
);
791 * Find a relayd and send the stream
793 * Returns 0 on success, < 0 on error
795 int consumer_send_relayd_stream(struct lttng_consumer_stream
*stream
,
799 struct consumer_relayd_sock_pair
*relayd
;
802 assert(stream
->net_seq_idx
!= -1ULL);
805 /* The stream is not metadata. Get relayd reference if exists. */
807 relayd
= consumer_find_relayd(stream
->net_seq_idx
);
808 if (relayd
!= NULL
) {
809 /* Add stream on the relayd */
810 pthread_mutex_lock(&relayd
->ctrl_sock_mutex
);
811 ret
= relayd_add_stream(&relayd
->control_sock
, stream
->name
,
812 path
, &stream
->relayd_stream_id
,
813 stream
->chan
->tracefile_size
, stream
->chan
->tracefile_count
,
814 stream
->trace_archive_id
);
815 pthread_mutex_unlock(&relayd
->ctrl_sock_mutex
);
817 ERR("Relayd add stream failed. Cleaning up relayd %" PRIu64
".", relayd
->net_seq_idx
);
818 lttng_consumer_cleanup_relayd(relayd
);
822 uatomic_inc(&relayd
->refcount
);
823 stream
->sent_to_relayd
= 1;
825 ERR("Stream %" PRIu64
" relayd ID %" PRIu64
" unknown. Can't send it.",
826 stream
->key
, stream
->net_seq_idx
);
831 DBG("Stream %s with key %" PRIu64
" sent to relayd id %" PRIu64
,
832 stream
->name
, stream
->key
, stream
->net_seq_idx
);
840 * Find a relayd and send the streams sent message
842 * Returns 0 on success, < 0 on error
844 int consumer_send_relayd_streams_sent(uint64_t net_seq_idx
)
847 struct consumer_relayd_sock_pair
*relayd
;
849 assert(net_seq_idx
!= -1ULL);
851 /* The stream is not metadata. Get relayd reference if exists. */
853 relayd
= consumer_find_relayd(net_seq_idx
);
854 if (relayd
!= NULL
) {
855 /* Add stream on the relayd */
856 pthread_mutex_lock(&relayd
->ctrl_sock_mutex
);
857 ret
= relayd_streams_sent(&relayd
->control_sock
);
858 pthread_mutex_unlock(&relayd
->ctrl_sock_mutex
);
860 ERR("Relayd streams sent failed. Cleaning up relayd %" PRIu64
".", relayd
->net_seq_idx
);
861 lttng_consumer_cleanup_relayd(relayd
);
865 ERR("Relayd ID %" PRIu64
" unknown. Can't send streams_sent.",
872 DBG("All streams sent relayd id %" PRIu64
, net_seq_idx
);
880 * Find a relayd and close the stream
882 void close_relayd_stream(struct lttng_consumer_stream
*stream
)
884 struct consumer_relayd_sock_pair
*relayd
;
886 /* The stream is not metadata. Get relayd reference if exists. */
888 relayd
= consumer_find_relayd(stream
->net_seq_idx
);
890 consumer_stream_relayd_close(stream
, relayd
);
896 * Handle stream for relayd transmission if the stream applies for network
897 * streaming where the net sequence index is set.
899 * Return destination file descriptor or negative value on error.
901 static int write_relayd_stream_header(struct lttng_consumer_stream
*stream
,
902 size_t data_size
, unsigned long padding
,
903 struct consumer_relayd_sock_pair
*relayd
)
906 struct lttcomm_relayd_data_hdr data_hdr
;
912 /* Reset data header */
913 memset(&data_hdr
, 0, sizeof(data_hdr
));
915 if (stream
->metadata_flag
) {
916 /* Caller MUST acquire the relayd control socket lock */
917 ret
= relayd_send_metadata(&relayd
->control_sock
, data_size
);
922 /* Metadata are always sent on the control socket. */
923 outfd
= relayd
->control_sock
.sock
.fd
;
925 /* Set header with stream information */
926 data_hdr
.stream_id
= htobe64(stream
->relayd_stream_id
);
927 data_hdr
.data_size
= htobe32(data_size
);
928 data_hdr
.padding_size
= htobe32(padding
);
930 * Note that net_seq_num below is assigned with the *current* value of
931 * next_net_seq_num and only after that the next_net_seq_num will be
932 * increment. This is why when issuing a command on the relayd using
933 * this next value, 1 should always be substracted in order to compare
934 * the last seen sequence number on the relayd side to the last sent.
936 data_hdr
.net_seq_num
= htobe64(stream
->next_net_seq_num
);
937 /* Other fields are zeroed previously */
939 ret
= relayd_send_data_hdr(&relayd
->data_sock
, &data_hdr
,
945 ++stream
->next_net_seq_num
;
947 /* Set to go on data socket */
948 outfd
= relayd
->data_sock
.sock
.fd
;
956 * Allocate and return a new lttng_consumer_channel object using the given key
957 * to initialize the hash table node.
959 * On error, return NULL.
961 struct lttng_consumer_channel
*consumer_allocate_channel(uint64_t key
,
963 const char *pathname
,
968 enum lttng_event_output output
,
969 uint64_t tracefile_size
,
970 uint64_t tracefile_count
,
971 uint64_t session_id_per_pid
,
972 unsigned int monitor
,
973 unsigned int live_timer_interval
,
974 const char *root_shm_path
,
975 const char *shm_path
)
977 struct lttng_consumer_channel
*channel
;
979 channel
= zmalloc(sizeof(*channel
));
980 if (channel
== NULL
) {
981 PERROR("malloc struct lttng_consumer_channel");
986 channel
->refcount
= 0;
987 channel
->session_id
= session_id
;
988 channel
->session_id_per_pid
= session_id_per_pid
;
991 channel
->relayd_id
= relayd_id
;
992 channel
->tracefile_size
= tracefile_size
;
993 channel
->tracefile_count
= tracefile_count
;
994 channel
->monitor
= monitor
;
995 channel
->live_timer_interval
= live_timer_interval
;
996 pthread_mutex_init(&channel
->lock
, NULL
);
997 pthread_mutex_init(&channel
->timer_lock
, NULL
);
1000 case LTTNG_EVENT_SPLICE
:
1001 channel
->output
= CONSUMER_CHANNEL_SPLICE
;
1003 case LTTNG_EVENT_MMAP
:
1004 channel
->output
= CONSUMER_CHANNEL_MMAP
;
1014 * In monitor mode, the streams associated with the channel will be put in
1015 * a special list ONLY owned by this channel. So, the refcount is set to 1
1016 * here meaning that the channel itself has streams that are referenced.
1018 * On a channel deletion, once the channel is no longer visible, the
1019 * refcount is decremented and checked for a zero value to delete it. With
1020 * streams in no monitor mode, it will now be safe to destroy the channel.
1022 if (!channel
->monitor
) {
1023 channel
->refcount
= 1;
1026 strncpy(channel
->pathname
, pathname
, sizeof(channel
->pathname
));
1027 channel
->pathname
[sizeof(channel
->pathname
) - 1] = '\0';
1029 strncpy(channel
->name
, name
, sizeof(channel
->name
));
1030 channel
->name
[sizeof(channel
->name
) - 1] = '\0';
1032 if (root_shm_path
) {
1033 strncpy(channel
->root_shm_path
, root_shm_path
, sizeof(channel
->root_shm_path
));
1034 channel
->root_shm_path
[sizeof(channel
->root_shm_path
) - 1] = '\0';
1037 strncpy(channel
->shm_path
, shm_path
, sizeof(channel
->shm_path
));
1038 channel
->shm_path
[sizeof(channel
->shm_path
) - 1] = '\0';
1041 lttng_ht_node_init_u64(&channel
->node
, channel
->key
);
1042 lttng_ht_node_init_u64(&channel
->channels_by_session_id_ht_node
,
1043 channel
->session_id
);
1045 channel
->wait_fd
= -1;
1047 CDS_INIT_LIST_HEAD(&channel
->streams
.head
);
1049 DBG("Allocated channel (key %" PRIu64
")", channel
->key
);
1056 * Add a channel to the global list protected by a mutex.
1058 * Always return 0 indicating success.
1060 int consumer_add_channel(struct lttng_consumer_channel
*channel
,
1061 struct lttng_consumer_local_data
*ctx
)
1063 pthread_mutex_lock(&consumer_data
.lock
);
1064 pthread_mutex_lock(&channel
->lock
);
1065 pthread_mutex_lock(&channel
->timer_lock
);
1068 * This gives us a guarantee that the channel we are about to add to the
1069 * channel hash table will be unique. See this function comment on the why
1070 * we need to steel the channel key at this stage.
1072 steal_channel_key(channel
->key
);
1075 lttng_ht_add_unique_u64(consumer_data
.channel_ht
, &channel
->node
);
1076 lttng_ht_add_u64(consumer_data
.channels_by_session_id_ht
,
1077 &channel
->channels_by_session_id_ht_node
);
1080 pthread_mutex_unlock(&channel
->timer_lock
);
1081 pthread_mutex_unlock(&channel
->lock
);
1082 pthread_mutex_unlock(&consumer_data
.lock
);
1084 if (channel
->wait_fd
!= -1 && channel
->type
== CONSUMER_CHANNEL_TYPE_DATA
) {
1085 notify_channel_pipe(ctx
, channel
, -1, CONSUMER_CHANNEL_ADD
);
1092 * Allocate the pollfd structure and the local view of the out fds to avoid
1093 * doing a lookup in the linked list and concurrency issues when writing is
1094 * needed. Called with consumer_data.lock held.
1096 * Returns the number of fds in the structures.
1098 static int update_poll_array(struct lttng_consumer_local_data
*ctx
,
1099 struct pollfd
**pollfd
, struct lttng_consumer_stream
**local_stream
,
1100 struct lttng_ht
*ht
, int *nb_inactive_fd
)
1103 struct lttng_ht_iter iter
;
1104 struct lttng_consumer_stream
*stream
;
1109 assert(local_stream
);
1111 DBG("Updating poll fd array");
1112 *nb_inactive_fd
= 0;
1114 cds_lfht_for_each_entry(ht
->ht
, &iter
.iter
, stream
, node
.node
) {
1116 * Only active streams with an active end point can be added to the
1117 * poll set and local stream storage of the thread.
1119 * There is a potential race here for endpoint_status to be updated
1120 * just after the check. However, this is OK since the stream(s) will
1121 * be deleted once the thread is notified that the end point state has
1122 * changed where this function will be called back again.
1124 * We track the number of inactive FDs because they still need to be
1125 * closed by the polling thread after a wakeup on the data_pipe or
1128 if (stream
->state
!= LTTNG_CONSUMER_ACTIVE_STREAM
||
1129 stream
->endpoint_status
== CONSUMER_ENDPOINT_INACTIVE
) {
1130 (*nb_inactive_fd
)++;
1134 * This clobbers way too much the debug output. Uncomment that if you
1135 * need it for debugging purposes.
1137 * DBG("Active FD %d", stream->wait_fd);
1139 (*pollfd
)[i
].fd
= stream
->wait_fd
;
1140 (*pollfd
)[i
].events
= POLLIN
| POLLPRI
;
1141 local_stream
[i
] = stream
;
1147 * Insert the consumer_data_pipe at the end of the array and don't
1148 * increment i so nb_fd is the number of real FD.
1150 (*pollfd
)[i
].fd
= lttng_pipe_get_readfd(ctx
->consumer_data_pipe
);
1151 (*pollfd
)[i
].events
= POLLIN
| POLLPRI
;
1153 (*pollfd
)[i
+ 1].fd
= lttng_pipe_get_readfd(ctx
->consumer_wakeup_pipe
);
1154 (*pollfd
)[i
+ 1].events
= POLLIN
| POLLPRI
;
1159 * Poll on the should_quit pipe and the command socket return -1 on
1160 * error, 1 if should exit, 0 if data is available on the command socket
1162 int lttng_consumer_poll_socket(struct pollfd
*consumer_sockpoll
)
1167 num_rdy
= poll(consumer_sockpoll
, 2, -1);
1168 if (num_rdy
== -1) {
1170 * Restart interrupted system call.
1172 if (errno
== EINTR
) {
1175 PERROR("Poll error");
1178 if (consumer_sockpoll
[0].revents
& (POLLIN
| POLLPRI
)) {
1179 DBG("consumer_should_quit wake up");
1186 * Set the error socket.
1188 void lttng_consumer_set_error_sock(struct lttng_consumer_local_data
*ctx
,
1191 ctx
->consumer_error_socket
= sock
;
1195 * Set the command socket path.
1197 void lttng_consumer_set_command_sock_path(
1198 struct lttng_consumer_local_data
*ctx
, char *sock
)
1200 ctx
->consumer_command_sock_path
= sock
;
1204 * Send return code to the session daemon.
1205 * If the socket is not defined, we return 0, it is not a fatal error
1207 int lttng_consumer_send_error(struct lttng_consumer_local_data
*ctx
, int cmd
)
1209 if (ctx
->consumer_error_socket
> 0) {
1210 return lttcomm_send_unix_sock(ctx
->consumer_error_socket
, &cmd
,
1211 sizeof(enum lttcomm_sessiond_command
));
1218 * Close all the tracefiles and stream fds and MUST be called when all
1219 * instances are destroyed i.e. when all threads were joined and are ended.
1221 void lttng_consumer_cleanup(void)
1223 struct lttng_ht_iter iter
;
1224 struct lttng_consumer_channel
*channel
;
1228 cds_lfht_for_each_entry(consumer_data
.channel_ht
->ht
, &iter
.iter
, channel
,
1230 consumer_del_channel(channel
);
1235 lttng_ht_destroy(consumer_data
.channel_ht
);
1236 lttng_ht_destroy(consumer_data
.channels_by_session_id_ht
);
1238 cleanup_relayd_ht();
1240 lttng_ht_destroy(consumer_data
.stream_per_chan_id_ht
);
1243 * This HT contains streams that are freed by either the metadata thread or
1244 * the data thread so we do *nothing* on the hash table and simply destroy
1247 lttng_ht_destroy(consumer_data
.stream_list_ht
);
1249 lttng_trace_chunk_registry_destroy(consumer_data
.chunk_registry
);
1253 * Called from signal handler.
1255 void lttng_consumer_should_exit(struct lttng_consumer_local_data
*ctx
)
1259 CMM_STORE_SHARED(consumer_quit
, 1);
1260 ret
= lttng_write(ctx
->consumer_should_quit
[1], "4", 1);
1262 PERROR("write consumer quit");
1265 DBG("Consumer flag that it should quit");
1270 * Flush pending writes to trace output disk file.
1273 void lttng_consumer_sync_trace_file(struct lttng_consumer_stream
*stream
,
1277 int outfd
= stream
->out_fd
;
1280 * This does a blocking write-and-wait on any page that belongs to the
1281 * subbuffer prior to the one we just wrote.
1282 * Don't care about error values, as these are just hints and ways to
1283 * limit the amount of page cache used.
1285 if (orig_offset
< stream
->max_sb_size
) {
1288 lttng_sync_file_range(outfd
, orig_offset
- stream
->max_sb_size
,
1289 stream
->max_sb_size
,
1290 SYNC_FILE_RANGE_WAIT_BEFORE
1291 | SYNC_FILE_RANGE_WRITE
1292 | SYNC_FILE_RANGE_WAIT_AFTER
);
1294 * Give hints to the kernel about how we access the file:
1295 * POSIX_FADV_DONTNEED : we won't re-access data in a near future after
1298 * We need to call fadvise again after the file grows because the
1299 * kernel does not seem to apply fadvise to non-existing parts of the
1302 * Call fadvise _after_ having waited for the page writeback to
1303 * complete because the dirty page writeback semantic is not well
1304 * defined. So it can be expected to lead to lower throughput in
1307 ret
= posix_fadvise(outfd
, orig_offset
- stream
->max_sb_size
,
1308 stream
->max_sb_size
, POSIX_FADV_DONTNEED
);
1309 if (ret
&& ret
!= -ENOSYS
) {
1311 PERROR("posix_fadvise on fd %i", outfd
);
1316 * Initialise the necessary environnement :
1317 * - create a new context
1318 * - create the poll_pipe
1319 * - create the should_quit pipe (for signal handler)
1320 * - create the thread pipe (for splice)
1322 * Takes a function pointer as argument, this function is called when data is
1323 * available on a buffer. This function is responsible to do the
1324 * kernctl_get_next_subbuf, read the data with mmap or splice depending on the
1325 * buffer configuration and then kernctl_put_next_subbuf at the end.
1327 * Returns a pointer to the new context or NULL on error.
1329 struct lttng_consumer_local_data
*lttng_consumer_create(
1330 enum lttng_consumer_type type
,
1331 ssize_t (*buffer_ready
)(struct lttng_consumer_stream
*stream
,
1332 struct lttng_consumer_local_data
*ctx
),
1333 int (*recv_channel
)(struct lttng_consumer_channel
*channel
),
1334 int (*recv_stream
)(struct lttng_consumer_stream
*stream
),
1335 int (*update_stream
)(uint64_t stream_key
, uint32_t state
))
1338 struct lttng_consumer_local_data
*ctx
;
1340 assert(consumer_data
.type
== LTTNG_CONSUMER_UNKNOWN
||
1341 consumer_data
.type
== type
);
1342 consumer_data
.type
= type
;
1344 ctx
= zmalloc(sizeof(struct lttng_consumer_local_data
));
1346 PERROR("allocating context");
1350 ctx
->consumer_error_socket
= -1;
1351 ctx
->consumer_metadata_socket
= -1;
1352 pthread_mutex_init(&ctx
->metadata_socket_lock
, NULL
);
1353 /* assign the callbacks */
1354 ctx
->on_buffer_ready
= buffer_ready
;
1355 ctx
->on_recv_channel
= recv_channel
;
1356 ctx
->on_recv_stream
= recv_stream
;
1357 ctx
->on_update_stream
= update_stream
;
1359 ctx
->consumer_data_pipe
= lttng_pipe_open(0);
1360 if (!ctx
->consumer_data_pipe
) {
1361 goto error_poll_pipe
;
1364 ctx
->consumer_wakeup_pipe
= lttng_pipe_open(0);
1365 if (!ctx
->consumer_wakeup_pipe
) {
1366 goto error_wakeup_pipe
;
1369 ret
= pipe(ctx
->consumer_should_quit
);
1371 PERROR("Error creating recv pipe");
1372 goto error_quit_pipe
;
1375 ret
= pipe(ctx
->consumer_channel_pipe
);
1377 PERROR("Error creating channel pipe");
1378 goto error_channel_pipe
;
1381 ctx
->consumer_metadata_pipe
= lttng_pipe_open(0);
1382 if (!ctx
->consumer_metadata_pipe
) {
1383 goto error_metadata_pipe
;
1386 ctx
->channel_monitor_pipe
= -1;
1390 error_metadata_pipe
:
1391 utils_close_pipe(ctx
->consumer_channel_pipe
);
1393 utils_close_pipe(ctx
->consumer_should_quit
);
1395 lttng_pipe_destroy(ctx
->consumer_wakeup_pipe
);
1397 lttng_pipe_destroy(ctx
->consumer_data_pipe
);
1405 * Iterate over all streams of the hashtable and free them properly.
1407 static void destroy_data_stream_ht(struct lttng_ht
*ht
)
1409 struct lttng_ht_iter iter
;
1410 struct lttng_consumer_stream
*stream
;
1417 cds_lfht_for_each_entry(ht
->ht
, &iter
.iter
, stream
, node
.node
) {
1419 * Ignore return value since we are currently cleaning up so any error
1422 (void) consumer_del_stream(stream
, ht
);
1426 lttng_ht_destroy(ht
);
1430 * Iterate over all streams of the metadata hashtable and free them
1433 static void destroy_metadata_stream_ht(struct lttng_ht
*ht
)
1435 struct lttng_ht_iter iter
;
1436 struct lttng_consumer_stream
*stream
;
1443 cds_lfht_for_each_entry(ht
->ht
, &iter
.iter
, stream
, node
.node
) {
1445 * Ignore return value since we are currently cleaning up so any error
1448 (void) consumer_del_metadata_stream(stream
, ht
);
1452 lttng_ht_destroy(ht
);
1456 * Close all fds associated with the instance and free the context.
1458 void lttng_consumer_destroy(struct lttng_consumer_local_data
*ctx
)
1462 DBG("Consumer destroying it. Closing everything.");
1468 destroy_data_stream_ht(data_ht
);
1469 destroy_metadata_stream_ht(metadata_ht
);
1471 ret
= close(ctx
->consumer_error_socket
);
1475 ret
= close(ctx
->consumer_metadata_socket
);
1479 utils_close_pipe(ctx
->consumer_channel_pipe
);
1480 lttng_pipe_destroy(ctx
->consumer_data_pipe
);
1481 lttng_pipe_destroy(ctx
->consumer_metadata_pipe
);
1482 lttng_pipe_destroy(ctx
->consumer_wakeup_pipe
);
1483 utils_close_pipe(ctx
->consumer_should_quit
);
1485 unlink(ctx
->consumer_command_sock_path
);
1490 * Write the metadata stream id on the specified file descriptor.
1492 static int write_relayd_metadata_id(int fd
,
1493 struct lttng_consumer_stream
*stream
,
1494 unsigned long padding
)
1497 struct lttcomm_relayd_metadata_payload hdr
;
1499 hdr
.stream_id
= htobe64(stream
->relayd_stream_id
);
1500 hdr
.padding_size
= htobe32(padding
);
1501 ret
= lttng_write(fd
, (void *) &hdr
, sizeof(hdr
));
1502 if (ret
< sizeof(hdr
)) {
1504 * This error means that the fd's end is closed so ignore the PERROR
1505 * not to clubber the error output since this can happen in a normal
1508 if (errno
!= EPIPE
) {
1509 PERROR("write metadata stream id");
1511 DBG3("Consumer failed to write relayd metadata id (errno: %d)", errno
);
1513 * Set ret to a negative value because if ret != sizeof(hdr), we don't
1514 * handle writting the missing part so report that as an error and
1515 * don't lie to the caller.
1520 DBG("Metadata stream id %" PRIu64
" with padding %lu written before data",
1521 stream
->relayd_stream_id
, padding
);
1528 * Mmap the ring buffer, read it and write the data to the tracefile. This is a
1529 * core function for writing trace buffers to either the local filesystem or
1532 * It must be called with the stream lock held.
1534 * Careful review MUST be put if any changes occur!
1536 * Returns the number of bytes written
1538 ssize_t
lttng_consumer_on_read_subbuffer_mmap(
1539 struct lttng_consumer_local_data
*ctx
,
1540 struct lttng_consumer_stream
*stream
, unsigned long len
,
1541 unsigned long padding
,
1542 struct ctf_packet_index
*index
)
1544 unsigned long mmap_offset
;
1547 off_t orig_offset
= stream
->out_fd_offset
;
1548 /* Default is on the disk */
1549 int outfd
= stream
->out_fd
;
1550 struct consumer_relayd_sock_pair
*relayd
= NULL
;
1551 unsigned int relayd_hang_up
= 0;
1553 /* RCU lock for the relayd pointer */
1556 /* Flag that the current stream if set for network streaming. */
1557 if (stream
->net_seq_idx
!= (uint64_t) -1ULL) {
1558 relayd
= consumer_find_relayd(stream
->net_seq_idx
);
1559 if (relayd
== NULL
) {
1565 /* get the offset inside the fd to mmap */
1566 switch (consumer_data
.type
) {
1567 case LTTNG_CONSUMER_KERNEL
:
1568 mmap_base
= stream
->mmap_base
;
1569 ret
= kernctl_get_mmap_read_offset(stream
->wait_fd
, &mmap_offset
);
1571 PERROR("tracer ctl get_mmap_read_offset");
1575 case LTTNG_CONSUMER32_UST
:
1576 case LTTNG_CONSUMER64_UST
:
1577 mmap_base
= lttng_ustctl_get_mmap_base(stream
);
1579 ERR("read mmap get mmap base for stream %s", stream
->name
);
1583 ret
= lttng_ustctl_get_mmap_read_offset(stream
, &mmap_offset
);
1585 PERROR("tracer ctl get_mmap_read_offset");
1591 ERR("Unknown consumer_data type");
1595 /* Handle stream on the relayd if the output is on the network */
1597 unsigned long netlen
= len
;
1600 * Lock the control socket for the complete duration of the function
1601 * since from this point on we will use the socket.
1603 if (stream
->metadata_flag
) {
1604 /* Metadata requires the control socket. */
1605 pthread_mutex_lock(&relayd
->ctrl_sock_mutex
);
1606 if (stream
->reset_metadata_flag
) {
1607 ret
= relayd_reset_metadata(&relayd
->control_sock
,
1608 stream
->relayd_stream_id
,
1609 stream
->metadata_version
);
1614 stream
->reset_metadata_flag
= 0;
1616 netlen
+= sizeof(struct lttcomm_relayd_metadata_payload
);
1619 ret
= write_relayd_stream_header(stream
, netlen
, padding
, relayd
);
1624 /* Use the returned socket. */
1627 /* Write metadata stream id before payload */
1628 if (stream
->metadata_flag
) {
1629 ret
= write_relayd_metadata_id(outfd
, stream
, padding
);
1636 /* No streaming, we have to set the len with the full padding */
1639 if (stream
->metadata_flag
&& stream
->reset_metadata_flag
) {
1640 ret
= utils_truncate_stream_file(stream
->out_fd
, 0);
1642 ERR("Reset metadata file");
1645 stream
->reset_metadata_flag
= 0;
1649 * Check if we need to change the tracefile before writing the packet.
1651 if (stream
->chan
->tracefile_size
> 0 &&
1652 (stream
->tracefile_size_current
+ len
) >
1653 stream
->chan
->tracefile_size
) {
1654 ret
= utils_rotate_stream_file(stream
->chan
->pathname
,
1655 stream
->name
, stream
->chan
->tracefile_size
,
1656 stream
->chan
->tracefile_count
, stream
->uid
, stream
->gid
,
1657 stream
->out_fd
, &(stream
->tracefile_count_current
),
1660 ERR("Rotating output file");
1663 outfd
= stream
->out_fd
;
1665 if (stream
->index_file
) {
1666 lttng_index_file_put(stream
->index_file
);
1667 stream
->index_file
= lttng_index_file_create(stream
->chan
->pathname
,
1668 stream
->name
, stream
->uid
, stream
->gid
,
1669 stream
->chan
->tracefile_size
,
1670 stream
->tracefile_count_current
,
1671 CTF_INDEX_MAJOR
, CTF_INDEX_MINOR
);
1672 if (!stream
->index_file
) {
1677 /* Reset current size because we just perform a rotation. */
1678 stream
->tracefile_size_current
= 0;
1679 stream
->out_fd_offset
= 0;
1682 stream
->tracefile_size_current
+= len
;
1684 index
->offset
= htobe64(stream
->out_fd_offset
);
1689 * This call guarantee that len or less is returned. It's impossible to
1690 * receive a ret value that is bigger than len.
1692 ret
= lttng_write(outfd
, mmap_base
+ mmap_offset
, len
);
1693 DBG("Consumer mmap write() ret %zd (len %lu)", ret
, len
);
1694 if (ret
< 0 || ((size_t) ret
!= len
)) {
1696 * Report error to caller if nothing was written else at least send the
1704 /* Socket operation failed. We consider the relayd dead */
1705 if (errno
== EPIPE
|| errno
== EINVAL
|| errno
== EBADF
) {
1707 * This is possible if the fd is closed on the other side
1708 * (outfd) or any write problem. It can be verbose a bit for a
1709 * normal execution if for instance the relayd is stopped
1710 * abruptly. This can happen so set this to a DBG statement.
1712 DBG("Consumer mmap write detected relayd hang up");
1714 /* Unhandled error, print it and stop function right now. */
1715 PERROR("Error in write mmap (ret %zd != len %lu)", ret
, len
);
1719 stream
->output_written
+= ret
;
1721 /* This call is useless on a socket so better save a syscall. */
1723 /* This won't block, but will start writeout asynchronously */
1724 lttng_sync_file_range(outfd
, stream
->out_fd_offset
, len
,
1725 SYNC_FILE_RANGE_WRITE
);
1726 stream
->out_fd_offset
+= len
;
1727 lttng_consumer_sync_trace_file(stream
, orig_offset
);
1732 * This is a special case that the relayd has closed its socket. Let's
1733 * cleanup the relayd object and all associated streams.
1735 if (relayd
&& relayd_hang_up
) {
1736 ERR("Relayd hangup. Cleaning up relayd %" PRIu64
".", relayd
->net_seq_idx
);
1737 lttng_consumer_cleanup_relayd(relayd
);
1741 /* Unlock only if ctrl socket used */
1742 if (relayd
&& stream
->metadata_flag
) {
1743 pthread_mutex_unlock(&relayd
->ctrl_sock_mutex
);
1751 * Splice the data from the ring buffer to the tracefile.
1753 * It must be called with the stream lock held.
1755 * Returns the number of bytes spliced.
1757 ssize_t
lttng_consumer_on_read_subbuffer_splice(
1758 struct lttng_consumer_local_data
*ctx
,
1759 struct lttng_consumer_stream
*stream
, unsigned long len
,
1760 unsigned long padding
,
1761 struct ctf_packet_index
*index
)
1763 ssize_t ret
= 0, written
= 0, ret_splice
= 0;
1765 off_t orig_offset
= stream
->out_fd_offset
;
1766 int fd
= stream
->wait_fd
;
1767 /* Default is on the disk */
1768 int outfd
= stream
->out_fd
;
1769 struct consumer_relayd_sock_pair
*relayd
= NULL
;
1771 unsigned int relayd_hang_up
= 0;
1773 switch (consumer_data
.type
) {
1774 case LTTNG_CONSUMER_KERNEL
:
1776 case LTTNG_CONSUMER32_UST
:
1777 case LTTNG_CONSUMER64_UST
:
1778 /* Not supported for user space tracing */
1781 ERR("Unknown consumer_data type");
1785 /* RCU lock for the relayd pointer */
1788 /* Flag that the current stream if set for network streaming. */
1789 if (stream
->net_seq_idx
!= (uint64_t) -1ULL) {
1790 relayd
= consumer_find_relayd(stream
->net_seq_idx
);
1791 if (relayd
== NULL
) {
1796 splice_pipe
= stream
->splice_pipe
;
1798 /* Write metadata stream id before payload */
1800 unsigned long total_len
= len
;
1802 if (stream
->metadata_flag
) {
1804 * Lock the control socket for the complete duration of the function
1805 * since from this point on we will use the socket.
1807 pthread_mutex_lock(&relayd
->ctrl_sock_mutex
);
1809 if (stream
->reset_metadata_flag
) {
1810 ret
= relayd_reset_metadata(&relayd
->control_sock
,
1811 stream
->relayd_stream_id
,
1812 stream
->metadata_version
);
1817 stream
->reset_metadata_flag
= 0;
1819 ret
= write_relayd_metadata_id(splice_pipe
[1], stream
,
1827 total_len
+= sizeof(struct lttcomm_relayd_metadata_payload
);
1830 ret
= write_relayd_stream_header(stream
, total_len
, padding
, relayd
);
1836 /* Use the returned socket. */
1839 /* No streaming, we have to set the len with the full padding */
1842 if (stream
->metadata_flag
&& stream
->reset_metadata_flag
) {
1843 ret
= utils_truncate_stream_file(stream
->out_fd
, 0);
1845 ERR("Reset metadata file");
1848 stream
->reset_metadata_flag
= 0;
1851 * Check if we need to change the tracefile before writing the packet.
1853 if (stream
->chan
->tracefile_size
> 0 &&
1854 (stream
->tracefile_size_current
+ len
) >
1855 stream
->chan
->tracefile_size
) {
1856 ret
= utils_rotate_stream_file(stream
->chan
->pathname
,
1857 stream
->name
, stream
->chan
->tracefile_size
,
1858 stream
->chan
->tracefile_count
, stream
->uid
, stream
->gid
,
1859 stream
->out_fd
, &(stream
->tracefile_count_current
),
1863 ERR("Rotating output file");
1866 outfd
= stream
->out_fd
;
1868 if (stream
->index_file
) {
1869 lttng_index_file_put(stream
->index_file
);
1870 stream
->index_file
= lttng_index_file_create(stream
->chan
->pathname
,
1871 stream
->name
, stream
->uid
, stream
->gid
,
1872 stream
->chan
->tracefile_size
,
1873 stream
->tracefile_count_current
,
1874 CTF_INDEX_MAJOR
, CTF_INDEX_MINOR
);
1875 if (!stream
->index_file
) {
1880 /* Reset current size because we just perform a rotation. */
1881 stream
->tracefile_size_current
= 0;
1882 stream
->out_fd_offset
= 0;
1885 stream
->tracefile_size_current
+= len
;
1886 index
->offset
= htobe64(stream
->out_fd_offset
);
1890 DBG("splice chan to pipe offset %lu of len %lu (fd : %d, pipe: %d)",
1891 (unsigned long)offset
, len
, fd
, splice_pipe
[1]);
1892 ret_splice
= splice(fd
, &offset
, splice_pipe
[1], NULL
, len
,
1893 SPLICE_F_MOVE
| SPLICE_F_MORE
);
1894 DBG("splice chan to pipe, ret %zd", ret_splice
);
1895 if (ret_splice
< 0) {
1898 PERROR("Error in relay splice");
1902 /* Handle stream on the relayd if the output is on the network */
1903 if (relayd
&& stream
->metadata_flag
) {
1904 size_t metadata_payload_size
=
1905 sizeof(struct lttcomm_relayd_metadata_payload
);
1907 /* Update counter to fit the spliced data */
1908 ret_splice
+= metadata_payload_size
;
1909 len
+= metadata_payload_size
;
1911 * We do this so the return value can match the len passed as
1912 * argument to this function.
1914 written
-= metadata_payload_size
;
1917 /* Splice data out */
1918 ret_splice
= splice(splice_pipe
[0], NULL
, outfd
, NULL
,
1919 ret_splice
, SPLICE_F_MOVE
| SPLICE_F_MORE
);
1920 DBG("Consumer splice pipe to file (out_fd: %d), ret %zd",
1922 if (ret_splice
< 0) {
1927 } else if (ret_splice
> len
) {
1929 * We don't expect this code path to be executed but you never know
1930 * so this is an extra protection agains a buggy splice().
1933 written
+= ret_splice
;
1934 PERROR("Wrote more data than requested %zd (len: %lu)", ret_splice
,
1938 /* All good, update current len and continue. */
1942 /* This call is useless on a socket so better save a syscall. */
1944 /* This won't block, but will start writeout asynchronously */
1945 lttng_sync_file_range(outfd
, stream
->out_fd_offset
, ret_splice
,
1946 SYNC_FILE_RANGE_WRITE
);
1947 stream
->out_fd_offset
+= ret_splice
;
1949 stream
->output_written
+= ret_splice
;
1950 written
+= ret_splice
;
1953 lttng_consumer_sync_trace_file(stream
, orig_offset
);
1959 * This is a special case that the relayd has closed its socket. Let's
1960 * cleanup the relayd object and all associated streams.
1962 if (relayd
&& relayd_hang_up
) {
1963 ERR("Relayd hangup. Cleaning up relayd %" PRIu64
".", relayd
->net_seq_idx
);
1964 lttng_consumer_cleanup_relayd(relayd
);
1965 /* Skip splice error so the consumer does not fail */
1970 /* send the appropriate error description to sessiond */
1973 lttng_consumer_send_error(ctx
, LTTCOMM_CONSUMERD_SPLICE_EINVAL
);
1976 lttng_consumer_send_error(ctx
, LTTCOMM_CONSUMERD_SPLICE_ENOMEM
);
1979 lttng_consumer_send_error(ctx
, LTTCOMM_CONSUMERD_SPLICE_ESPIPE
);
1984 if (relayd
&& stream
->metadata_flag
) {
1985 pthread_mutex_unlock(&relayd
->ctrl_sock_mutex
);
1993 * Sample the snapshot positions for a specific fd
1995 * Returns 0 on success, < 0 on error
1997 int lttng_consumer_sample_snapshot_positions(struct lttng_consumer_stream
*stream
)
1999 switch (consumer_data
.type
) {
2000 case LTTNG_CONSUMER_KERNEL
:
2001 return lttng_kconsumer_sample_snapshot_positions(stream
);
2002 case LTTNG_CONSUMER32_UST
:
2003 case LTTNG_CONSUMER64_UST
:
2004 return lttng_ustconsumer_sample_snapshot_positions(stream
);
2006 ERR("Unknown consumer_data type");
2012 * Take a snapshot for a specific fd
2014 * Returns 0 on success, < 0 on error
2016 int lttng_consumer_take_snapshot(struct lttng_consumer_stream
*stream
)
2018 switch (consumer_data
.type
) {
2019 case LTTNG_CONSUMER_KERNEL
:
2020 return lttng_kconsumer_take_snapshot(stream
);
2021 case LTTNG_CONSUMER32_UST
:
2022 case LTTNG_CONSUMER64_UST
:
2023 return lttng_ustconsumer_take_snapshot(stream
);
2025 ERR("Unknown consumer_data type");
2032 * Get the produced position
2034 * Returns 0 on success, < 0 on error
2036 int lttng_consumer_get_produced_snapshot(struct lttng_consumer_stream
*stream
,
2039 switch (consumer_data
.type
) {
2040 case LTTNG_CONSUMER_KERNEL
:
2041 return lttng_kconsumer_get_produced_snapshot(stream
, pos
);
2042 case LTTNG_CONSUMER32_UST
:
2043 case LTTNG_CONSUMER64_UST
:
2044 return lttng_ustconsumer_get_produced_snapshot(stream
, pos
);
2046 ERR("Unknown consumer_data type");
2053 * Get the consumed position (free-running counter position in bytes).
2055 * Returns 0 on success, < 0 on error
2057 int lttng_consumer_get_consumed_snapshot(struct lttng_consumer_stream
*stream
,
2060 switch (consumer_data
.type
) {
2061 case LTTNG_CONSUMER_KERNEL
:
2062 return lttng_kconsumer_get_consumed_snapshot(stream
, pos
);
2063 case LTTNG_CONSUMER32_UST
:
2064 case LTTNG_CONSUMER64_UST
:
2065 return lttng_ustconsumer_get_consumed_snapshot(stream
, pos
);
2067 ERR("Unknown consumer_data type");
2073 int lttng_consumer_recv_cmd(struct lttng_consumer_local_data
*ctx
,
2074 int sock
, struct pollfd
*consumer_sockpoll
)
2076 switch (consumer_data
.type
) {
2077 case LTTNG_CONSUMER_KERNEL
:
2078 return lttng_kconsumer_recv_cmd(ctx
, sock
, consumer_sockpoll
);
2079 case LTTNG_CONSUMER32_UST
:
2080 case LTTNG_CONSUMER64_UST
:
2081 return lttng_ustconsumer_recv_cmd(ctx
, sock
, consumer_sockpoll
);
2083 ERR("Unknown consumer_data type");
2089 void lttng_consumer_close_all_metadata(void)
2091 switch (consumer_data
.type
) {
2092 case LTTNG_CONSUMER_KERNEL
:
2094 * The Kernel consumer has a different metadata scheme so we don't
2095 * close anything because the stream will be closed by the session
2099 case LTTNG_CONSUMER32_UST
:
2100 case LTTNG_CONSUMER64_UST
:
2102 * Close all metadata streams. The metadata hash table is passed and
2103 * this call iterates over it by closing all wakeup fd. This is safe
2104 * because at this point we are sure that the metadata producer is
2105 * either dead or blocked.
2107 lttng_ustconsumer_close_all_metadata(metadata_ht
);
2110 ERR("Unknown consumer_data type");
2116 * Clean up a metadata stream and free its memory.
2118 void consumer_del_metadata_stream(struct lttng_consumer_stream
*stream
,
2119 struct lttng_ht
*ht
)
2121 struct lttng_consumer_channel
*free_chan
= NULL
;
2125 * This call should NEVER receive regular stream. It must always be
2126 * metadata stream and this is crucial for data structure synchronization.
2128 assert(stream
->metadata_flag
);
2130 DBG3("Consumer delete metadata stream %d", stream
->wait_fd
);
2132 pthread_mutex_lock(&consumer_data
.lock
);
2133 pthread_mutex_lock(&stream
->chan
->lock
);
2134 pthread_mutex_lock(&stream
->lock
);
2135 if (stream
->chan
->metadata_cache
) {
2136 /* Only applicable to userspace consumers. */
2137 pthread_mutex_lock(&stream
->chan
->metadata_cache
->lock
);
2140 /* Remove any reference to that stream. */
2141 consumer_stream_delete(stream
, ht
);
2143 /* Close down everything including the relayd if one. */
2144 consumer_stream_close(stream
);
2145 /* Destroy tracer buffers of the stream. */
2146 consumer_stream_destroy_buffers(stream
);
2148 /* Atomically decrement channel refcount since other threads can use it. */
2149 if (!uatomic_sub_return(&stream
->chan
->refcount
, 1)
2150 && !uatomic_read(&stream
->chan
->nb_init_stream_left
)) {
2151 /* Go for channel deletion! */
2152 free_chan
= stream
->chan
;
2156 * Nullify the stream reference so it is not used after deletion. The
2157 * channel lock MUST be acquired before being able to check for a NULL
2160 stream
->chan
->metadata_stream
= NULL
;
2162 if (stream
->chan
->metadata_cache
) {
2163 pthread_mutex_unlock(&stream
->chan
->metadata_cache
->lock
);
2165 pthread_mutex_unlock(&stream
->lock
);
2166 pthread_mutex_unlock(&stream
->chan
->lock
);
2167 pthread_mutex_unlock(&consumer_data
.lock
);
2170 consumer_del_channel(free_chan
);
2173 consumer_stream_free(stream
);
2177 * Action done with the metadata stream when adding it to the consumer internal
2178 * data structures to handle it.
2180 void consumer_add_metadata_stream(struct lttng_consumer_stream
*stream
)
2182 struct lttng_ht
*ht
= metadata_ht
;
2183 struct lttng_ht_iter iter
;
2184 struct lttng_ht_node_u64
*node
;
2189 DBG3("Adding metadata stream %" PRIu64
" to hash table", stream
->key
);
2191 pthread_mutex_lock(&consumer_data
.lock
);
2192 pthread_mutex_lock(&stream
->chan
->lock
);
2193 pthread_mutex_lock(&stream
->chan
->timer_lock
);
2194 pthread_mutex_lock(&stream
->lock
);
2197 * From here, refcounts are updated so be _careful_ when returning an error
2204 * Lookup the stream just to make sure it does not exist in our internal
2205 * state. This should NEVER happen.
2207 lttng_ht_lookup(ht
, &stream
->key
, &iter
);
2208 node
= lttng_ht_iter_get_node_u64(&iter
);
2212 * When nb_init_stream_left reaches 0, we don't need to trigger any action
2213 * in terms of destroying the associated channel, because the action that
2214 * causes the count to become 0 also causes a stream to be added. The
2215 * channel deletion will thus be triggered by the following removal of this
2218 if (uatomic_read(&stream
->chan
->nb_init_stream_left
) > 0) {
2219 /* Increment refcount before decrementing nb_init_stream_left */
2221 uatomic_dec(&stream
->chan
->nb_init_stream_left
);
2224 lttng_ht_add_unique_u64(ht
, &stream
->node
);
2226 lttng_ht_add_u64(consumer_data
.stream_per_chan_id_ht
,
2227 &stream
->node_channel_id
);
2230 * Add stream to the stream_list_ht of the consumer data. No need to steal
2231 * the key since the HT does not use it and we allow to add redundant keys
2234 lttng_ht_add_u64(consumer_data
.stream_list_ht
, &stream
->node_session_id
);
2238 pthread_mutex_unlock(&stream
->lock
);
2239 pthread_mutex_unlock(&stream
->chan
->lock
);
2240 pthread_mutex_unlock(&stream
->chan
->timer_lock
);
2241 pthread_mutex_unlock(&consumer_data
.lock
);
2245 * Delete data stream that are flagged for deletion (endpoint_status).
2247 static void validate_endpoint_status_data_stream(void)
2249 struct lttng_ht_iter iter
;
2250 struct lttng_consumer_stream
*stream
;
2252 DBG("Consumer delete flagged data stream");
2255 cds_lfht_for_each_entry(data_ht
->ht
, &iter
.iter
, stream
, node
.node
) {
2256 /* Validate delete flag of the stream */
2257 if (stream
->endpoint_status
== CONSUMER_ENDPOINT_ACTIVE
) {
2260 /* Delete it right now */
2261 consumer_del_stream(stream
, data_ht
);
2267 * Delete metadata stream that are flagged for deletion (endpoint_status).
2269 static void validate_endpoint_status_metadata_stream(
2270 struct lttng_poll_event
*pollset
)
2272 struct lttng_ht_iter iter
;
2273 struct lttng_consumer_stream
*stream
;
2275 DBG("Consumer delete flagged metadata stream");
2280 cds_lfht_for_each_entry(metadata_ht
->ht
, &iter
.iter
, stream
, node
.node
) {
2281 /* Validate delete flag of the stream */
2282 if (stream
->endpoint_status
== CONSUMER_ENDPOINT_ACTIVE
) {
2286 * Remove from pollset so the metadata thread can continue without
2287 * blocking on a deleted stream.
2289 lttng_poll_del(pollset
, stream
->wait_fd
);
2291 /* Delete it right now */
2292 consumer_del_metadata_stream(stream
, metadata_ht
);
2298 * Perform operations that need to be done after a stream has
2299 * rotated and released the stream lock.
2301 * Multiple rotations cannot occur simultaneously, so we know the state of the
2302 * "rotated" stream flag cannot change.
2304 * This MUST be called WITHOUT the stream lock held.
2307 int consumer_post_rotation(struct lttng_consumer_stream
*stream
,
2308 struct lttng_consumer_local_data
*ctx
)
2312 pthread_mutex_lock(&stream
->chan
->lock
);
2314 switch (consumer_data
.type
) {
2315 case LTTNG_CONSUMER_KERNEL
:
2317 case LTTNG_CONSUMER32_UST
:
2318 case LTTNG_CONSUMER64_UST
:
2320 * The ust_metadata_pushed counter has been reset to 0, so now
2321 * we can wakeup the metadata thread so it dumps the metadata
2322 * cache to the new file.
2324 if (stream
->metadata_flag
) {
2325 consumer_metadata_wakeup_pipe(stream
->chan
);
2329 ERR("Unknown consumer_data type");
2333 pthread_mutex_unlock(&stream
->chan
->lock
);
2338 * Thread polls on metadata file descriptor and write them on disk or on the
2341 void *consumer_thread_metadata_poll(void *data
)
2343 int ret
, i
, pollfd
, err
= -1;
2344 uint32_t revents
, nb_fd
;
2345 struct lttng_consumer_stream
*stream
= NULL
;
2346 struct lttng_ht_iter iter
;
2347 struct lttng_ht_node_u64
*node
;
2348 struct lttng_poll_event events
;
2349 struct lttng_consumer_local_data
*ctx
= data
;
2352 rcu_register_thread();
2354 health_register(health_consumerd
, HEALTH_CONSUMERD_TYPE_METADATA
);
2356 if (testpoint(consumerd_thread_metadata
)) {
2357 goto error_testpoint
;
2360 health_code_update();
2362 DBG("Thread metadata poll started");
2364 /* Size is set to 1 for the consumer_metadata pipe */
2365 ret
= lttng_poll_create(&events
, 2, LTTNG_CLOEXEC
);
2367 ERR("Poll set creation failed");
2371 ret
= lttng_poll_add(&events
,
2372 lttng_pipe_get_readfd(ctx
->consumer_metadata_pipe
), LPOLLIN
);
2378 DBG("Metadata main loop started");
2382 health_code_update();
2383 health_poll_entry();
2384 DBG("Metadata poll wait");
2385 ret
= lttng_poll_wait(&events
, -1);
2386 DBG("Metadata poll return from wait with %d fd(s)",
2387 LTTNG_POLL_GETNB(&events
));
2389 DBG("Metadata event caught in thread");
2391 if (errno
== EINTR
) {
2392 ERR("Poll EINTR caught");
2395 if (LTTNG_POLL_GETNB(&events
) == 0) {
2396 err
= 0; /* All is OK */
2403 /* From here, the event is a metadata wait fd */
2404 for (i
= 0; i
< nb_fd
; i
++) {
2405 health_code_update();
2407 revents
= LTTNG_POLL_GETEV(&events
, i
);
2408 pollfd
= LTTNG_POLL_GETFD(&events
, i
);
2410 if (pollfd
== lttng_pipe_get_readfd(ctx
->consumer_metadata_pipe
)) {
2411 if (revents
& LPOLLIN
) {
2414 pipe_len
= lttng_pipe_read(ctx
->consumer_metadata_pipe
,
2415 &stream
, sizeof(stream
));
2416 if (pipe_len
< sizeof(stream
)) {
2418 PERROR("read metadata stream");
2421 * Remove the pipe from the poll set and continue the loop
2422 * since their might be data to consume.
2424 lttng_poll_del(&events
,
2425 lttng_pipe_get_readfd(ctx
->consumer_metadata_pipe
));
2426 lttng_pipe_read_close(ctx
->consumer_metadata_pipe
);
2430 /* A NULL stream means that the state has changed. */
2431 if (stream
== NULL
) {
2432 /* Check for deleted streams. */
2433 validate_endpoint_status_metadata_stream(&events
);
2437 DBG("Adding metadata stream %d to poll set",
2440 /* Add metadata stream to the global poll events list */
2441 lttng_poll_add(&events
, stream
->wait_fd
,
2442 LPOLLIN
| LPOLLPRI
| LPOLLHUP
);
2443 } else if (revents
& (LPOLLERR
| LPOLLHUP
)) {
2444 DBG("Metadata thread pipe hung up");
2446 * Remove the pipe from the poll set and continue the loop
2447 * since their might be data to consume.
2449 lttng_poll_del(&events
,
2450 lttng_pipe_get_readfd(ctx
->consumer_metadata_pipe
));
2451 lttng_pipe_read_close(ctx
->consumer_metadata_pipe
);
2454 ERR("Unexpected poll events %u for sock %d", revents
, pollfd
);
2458 /* Handle other stream */
2464 uint64_t tmp_id
= (uint64_t) pollfd
;
2466 lttng_ht_lookup(metadata_ht
, &tmp_id
, &iter
);
2468 node
= lttng_ht_iter_get_node_u64(&iter
);
2471 stream
= caa_container_of(node
, struct lttng_consumer_stream
,
2474 if (revents
& (LPOLLIN
| LPOLLPRI
)) {
2475 /* Get the data out of the metadata file descriptor */
2476 DBG("Metadata available on fd %d", pollfd
);
2477 assert(stream
->wait_fd
== pollfd
);
2480 health_code_update();
2482 len
= ctx
->on_buffer_ready(stream
, ctx
);
2484 * We don't check the return value here since if we get
2485 * a negative len, it means an error occurred thus we
2486 * simply remove it from the poll set and free the
2491 /* It's ok to have an unavailable sub-buffer */
2492 if (len
< 0 && len
!= -EAGAIN
&& len
!= -ENODATA
) {
2493 /* Clean up stream from consumer and free it. */
2494 lttng_poll_del(&events
, stream
->wait_fd
);
2495 consumer_del_metadata_stream(stream
, metadata_ht
);
2497 } else if (revents
& (LPOLLERR
| LPOLLHUP
)) {
2498 DBG("Metadata fd %d is hup|err.", pollfd
);
2499 if (!stream
->hangup_flush_done
2500 && (consumer_data
.type
== LTTNG_CONSUMER32_UST
2501 || consumer_data
.type
== LTTNG_CONSUMER64_UST
)) {
2502 DBG("Attempting to flush and consume the UST buffers");
2503 lttng_ustconsumer_on_stream_hangup(stream
);
2505 /* We just flushed the stream now read it. */
2507 health_code_update();
2509 len
= ctx
->on_buffer_ready(stream
, ctx
);
2511 * We don't check the return value here since if we get
2512 * a negative len, it means an error occurred thus we
2513 * simply remove it from the poll set and free the
2519 lttng_poll_del(&events
, stream
->wait_fd
);
2521 * This call update the channel states, closes file descriptors
2522 * and securely free the stream.
2524 consumer_del_metadata_stream(stream
, metadata_ht
);
2526 ERR("Unexpected poll events %u for sock %d", revents
, pollfd
);
2530 /* Release RCU lock for the stream looked up */
2538 DBG("Metadata poll thread exiting");
2540 lttng_poll_clean(&events
);
2545 ERR("Health error occurred in %s", __func__
);
2547 health_unregister(health_consumerd
);
2548 rcu_unregister_thread();
2553 * This thread polls the fds in the set to consume the data and write
2554 * it to tracefile if necessary.
2556 void *consumer_thread_data_poll(void *data
)
2558 int num_rdy
, num_hup
, high_prio
, ret
, i
, err
= -1;
2559 struct pollfd
*pollfd
= NULL
;
2560 /* local view of the streams */
2561 struct lttng_consumer_stream
**local_stream
= NULL
, *new_stream
= NULL
;
2562 /* local view of consumer_data.fds_count */
2564 /* 2 for the consumer_data_pipe and wake up pipe */
2565 const int nb_pipes_fd
= 2;
2566 /* Number of FDs with CONSUMER_ENDPOINT_INACTIVE but still open. */
2567 int nb_inactive_fd
= 0;
2568 struct lttng_consumer_local_data
*ctx
= data
;
2571 rcu_register_thread();
2573 health_register(health_consumerd
, HEALTH_CONSUMERD_TYPE_DATA
);
2575 if (testpoint(consumerd_thread_data
)) {
2576 goto error_testpoint
;
2579 health_code_update();
2581 local_stream
= zmalloc(sizeof(struct lttng_consumer_stream
*));
2582 if (local_stream
== NULL
) {
2583 PERROR("local_stream malloc");
2588 health_code_update();
2594 * the fds set has been updated, we need to update our
2595 * local array as well
2597 pthread_mutex_lock(&consumer_data
.lock
);
2598 if (consumer_data
.need_update
) {
2603 local_stream
= NULL
;
2605 /* Allocate for all fds */
2606 pollfd
= zmalloc((consumer_data
.stream_count
+ nb_pipes_fd
) * sizeof(struct pollfd
));
2607 if (pollfd
== NULL
) {
2608 PERROR("pollfd malloc");
2609 pthread_mutex_unlock(&consumer_data
.lock
);
2613 local_stream
= zmalloc((consumer_data
.stream_count
+ nb_pipes_fd
) *
2614 sizeof(struct lttng_consumer_stream
*));
2615 if (local_stream
== NULL
) {
2616 PERROR("local_stream malloc");
2617 pthread_mutex_unlock(&consumer_data
.lock
);
2620 ret
= update_poll_array(ctx
, &pollfd
, local_stream
,
2621 data_ht
, &nb_inactive_fd
);
2623 ERR("Error in allocating pollfd or local_outfds");
2624 lttng_consumer_send_error(ctx
, LTTCOMM_CONSUMERD_POLL_ERROR
);
2625 pthread_mutex_unlock(&consumer_data
.lock
);
2629 consumer_data
.need_update
= 0;
2631 pthread_mutex_unlock(&consumer_data
.lock
);
2633 /* No FDs and consumer_quit, consumer_cleanup the thread */
2634 if (nb_fd
== 0 && nb_inactive_fd
== 0 &&
2635 CMM_LOAD_SHARED(consumer_quit
) == 1) {
2636 err
= 0; /* All is OK */
2639 /* poll on the array of fds */
2641 DBG("polling on %d fd", nb_fd
+ nb_pipes_fd
);
2642 if (testpoint(consumerd_thread_data_poll
)) {
2645 health_poll_entry();
2646 num_rdy
= poll(pollfd
, nb_fd
+ nb_pipes_fd
, -1);
2648 DBG("poll num_rdy : %d", num_rdy
);
2649 if (num_rdy
== -1) {
2651 * Restart interrupted system call.
2653 if (errno
== EINTR
) {
2656 PERROR("Poll error");
2657 lttng_consumer_send_error(ctx
, LTTCOMM_CONSUMERD_POLL_ERROR
);
2659 } else if (num_rdy
== 0) {
2660 DBG("Polling thread timed out");
2664 if (caa_unlikely(data_consumption_paused
)) {
2665 DBG("Data consumption paused, sleeping...");
2671 * If the consumer_data_pipe triggered poll go directly to the
2672 * beginning of the loop to update the array. We want to prioritize
2673 * array update over low-priority reads.
2675 if (pollfd
[nb_fd
].revents
& (POLLIN
| POLLPRI
)) {
2676 ssize_t pipe_readlen
;
2678 DBG("consumer_data_pipe wake up");
2679 pipe_readlen
= lttng_pipe_read(ctx
->consumer_data_pipe
,
2680 &new_stream
, sizeof(new_stream
));
2681 if (pipe_readlen
< sizeof(new_stream
)) {
2682 PERROR("Consumer data pipe");
2683 /* Continue so we can at least handle the current stream(s). */
2688 * If the stream is NULL, just ignore it. It's also possible that
2689 * the sessiond poll thread changed the consumer_quit state and is
2690 * waking us up to test it.
2692 if (new_stream
== NULL
) {
2693 validate_endpoint_status_data_stream();
2697 /* Continue to update the local streams and handle prio ones */
2701 /* Handle wakeup pipe. */
2702 if (pollfd
[nb_fd
+ 1].revents
& (POLLIN
| POLLPRI
)) {
2704 ssize_t pipe_readlen
;
2706 pipe_readlen
= lttng_pipe_read(ctx
->consumer_wakeup_pipe
, &dummy
,
2708 if (pipe_readlen
< 0) {
2709 PERROR("Consumer data wakeup pipe");
2711 /* We've been awakened to handle stream(s). */
2712 ctx
->has_wakeup
= 0;
2715 /* Take care of high priority channels first. */
2716 for (i
= 0; i
< nb_fd
; i
++) {
2717 health_code_update();
2719 if (local_stream
[i
] == NULL
) {
2722 if (pollfd
[i
].revents
& POLLPRI
) {
2723 DBG("Urgent read on fd %d", pollfd
[i
].fd
);
2725 len
= ctx
->on_buffer_ready(local_stream
[i
], ctx
);
2726 /* it's ok to have an unavailable sub-buffer */
2727 if (len
< 0 && len
!= -EAGAIN
&& len
!= -ENODATA
) {
2728 /* Clean the stream and free it. */
2729 consumer_del_stream(local_stream
[i
], data_ht
);
2730 local_stream
[i
] = NULL
;
2731 } else if (len
> 0) {
2732 local_stream
[i
]->data_read
= 1;
2738 * If we read high prio channel in this loop, try again
2739 * for more high prio data.
2745 /* Take care of low priority channels. */
2746 for (i
= 0; i
< nb_fd
; i
++) {
2747 health_code_update();
2749 if (local_stream
[i
] == NULL
) {
2752 if ((pollfd
[i
].revents
& POLLIN
) ||
2753 local_stream
[i
]->hangup_flush_done
||
2754 local_stream
[i
]->has_data
) {
2755 DBG("Normal read on fd %d", pollfd
[i
].fd
);
2756 len
= ctx
->on_buffer_ready(local_stream
[i
], ctx
);
2757 /* it's ok to have an unavailable sub-buffer */
2758 if (len
< 0 && len
!= -EAGAIN
&& len
!= -ENODATA
) {
2759 /* Clean the stream and free it. */
2760 consumer_del_stream(local_stream
[i
], data_ht
);
2761 local_stream
[i
] = NULL
;
2762 } else if (len
> 0) {
2763 local_stream
[i
]->data_read
= 1;
2768 /* Handle hangup and errors */
2769 for (i
= 0; i
< nb_fd
; i
++) {
2770 health_code_update();
2772 if (local_stream
[i
] == NULL
) {
2775 if (!local_stream
[i
]->hangup_flush_done
2776 && (pollfd
[i
].revents
& (POLLHUP
| POLLERR
| POLLNVAL
))
2777 && (consumer_data
.type
== LTTNG_CONSUMER32_UST
2778 || consumer_data
.type
== LTTNG_CONSUMER64_UST
)) {
2779 DBG("fd %d is hup|err|nval. Attempting flush and read.",
2781 lttng_ustconsumer_on_stream_hangup(local_stream
[i
]);
2782 /* Attempt read again, for the data we just flushed. */
2783 local_stream
[i
]->data_read
= 1;
2786 * If the poll flag is HUP/ERR/NVAL and we have
2787 * read no data in this pass, we can remove the
2788 * stream from its hash table.
2790 if ((pollfd
[i
].revents
& POLLHUP
)) {
2791 DBG("Polling fd %d tells it has hung up.", pollfd
[i
].fd
);
2792 if (!local_stream
[i
]->data_read
) {
2793 consumer_del_stream(local_stream
[i
], data_ht
);
2794 local_stream
[i
] = NULL
;
2797 } else if (pollfd
[i
].revents
& POLLERR
) {
2798 ERR("Error returned in polling fd %d.", pollfd
[i
].fd
);
2799 if (!local_stream
[i
]->data_read
) {
2800 consumer_del_stream(local_stream
[i
], data_ht
);
2801 local_stream
[i
] = NULL
;
2804 } else if (pollfd
[i
].revents
& POLLNVAL
) {
2805 ERR("Polling fd %d tells fd is not open.", pollfd
[i
].fd
);
2806 if (!local_stream
[i
]->data_read
) {
2807 consumer_del_stream(local_stream
[i
], data_ht
);
2808 local_stream
[i
] = NULL
;
2812 if (local_stream
[i
] != NULL
) {
2813 local_stream
[i
]->data_read
= 0;
2820 DBG("polling thread exiting");
2825 * Close the write side of the pipe so epoll_wait() in
2826 * consumer_thread_metadata_poll can catch it. The thread is monitoring the
2827 * read side of the pipe. If we close them both, epoll_wait strangely does
2828 * not return and could create a endless wait period if the pipe is the
2829 * only tracked fd in the poll set. The thread will take care of closing
2832 (void) lttng_pipe_write_close(ctx
->consumer_metadata_pipe
);
2837 ERR("Health error occurred in %s", __func__
);
2839 health_unregister(health_consumerd
);
2841 rcu_unregister_thread();
2846 * Close wake-up end of each stream belonging to the channel. This will
2847 * allow the poll() on the stream read-side to detect when the
2848 * write-side (application) finally closes them.
2851 void consumer_close_channel_streams(struct lttng_consumer_channel
*channel
)
2853 struct lttng_ht
*ht
;
2854 struct lttng_consumer_stream
*stream
;
2855 struct lttng_ht_iter iter
;
2857 ht
= consumer_data
.stream_per_chan_id_ht
;
2860 cds_lfht_for_each_entry_duplicate(ht
->ht
,
2861 ht
->hash_fct(&channel
->key
, lttng_ht_seed
),
2862 ht
->match_fct
, &channel
->key
,
2863 &iter
.iter
, stream
, node_channel_id
.node
) {
2865 * Protect against teardown with mutex.
2867 pthread_mutex_lock(&stream
->lock
);
2868 if (cds_lfht_is_node_deleted(&stream
->node
.node
)) {
2871 switch (consumer_data
.type
) {
2872 case LTTNG_CONSUMER_KERNEL
:
2874 case LTTNG_CONSUMER32_UST
:
2875 case LTTNG_CONSUMER64_UST
:
2876 if (stream
->metadata_flag
) {
2877 /* Safe and protected by the stream lock. */
2878 lttng_ustconsumer_close_metadata(stream
->chan
);
2881 * Note: a mutex is taken internally within
2882 * liblttng-ust-ctl to protect timer wakeup_fd
2883 * use from concurrent close.
2885 lttng_ustconsumer_close_stream_wakeup(stream
);
2889 ERR("Unknown consumer_data type");
2893 pthread_mutex_unlock(&stream
->lock
);
2898 static void destroy_channel_ht(struct lttng_ht
*ht
)
2900 struct lttng_ht_iter iter
;
2901 struct lttng_consumer_channel
*channel
;
2909 cds_lfht_for_each_entry(ht
->ht
, &iter
.iter
, channel
, wait_fd_node
.node
) {
2910 ret
= lttng_ht_del(ht
, &iter
);
2915 lttng_ht_destroy(ht
);
2919 * This thread polls the channel fds to detect when they are being
2920 * closed. It closes all related streams if the channel is detected as
2921 * closed. It is currently only used as a shim layer for UST because the
2922 * consumerd needs to keep the per-stream wakeup end of pipes open for
2925 void *consumer_thread_channel_poll(void *data
)
2927 int ret
, i
, pollfd
, err
= -1;
2928 uint32_t revents
, nb_fd
;
2929 struct lttng_consumer_channel
*chan
= NULL
;
2930 struct lttng_ht_iter iter
;
2931 struct lttng_ht_node_u64
*node
;
2932 struct lttng_poll_event events
;
2933 struct lttng_consumer_local_data
*ctx
= data
;
2934 struct lttng_ht
*channel_ht
;
2936 rcu_register_thread();
2938 health_register(health_consumerd
, HEALTH_CONSUMERD_TYPE_CHANNEL
);
2940 if (testpoint(consumerd_thread_channel
)) {
2941 goto error_testpoint
;
2944 health_code_update();
2946 channel_ht
= lttng_ht_new(0, LTTNG_HT_TYPE_U64
);
2948 /* ENOMEM at this point. Better to bail out. */
2952 DBG("Thread channel poll started");
2954 /* Size is set to 1 for the consumer_channel pipe */
2955 ret
= lttng_poll_create(&events
, 2, LTTNG_CLOEXEC
);
2957 ERR("Poll set creation failed");
2961 ret
= lttng_poll_add(&events
, ctx
->consumer_channel_pipe
[0], LPOLLIN
);
2967 DBG("Channel main loop started");
2971 health_code_update();
2972 DBG("Channel poll wait");
2973 health_poll_entry();
2974 ret
= lttng_poll_wait(&events
, -1);
2975 DBG("Channel poll return from wait with %d fd(s)",
2976 LTTNG_POLL_GETNB(&events
));
2978 DBG("Channel event caught in thread");
2980 if (errno
== EINTR
) {
2981 ERR("Poll EINTR caught");
2984 if (LTTNG_POLL_GETNB(&events
) == 0) {
2985 err
= 0; /* All is OK */
2992 /* From here, the event is a channel wait fd */
2993 for (i
= 0; i
< nb_fd
; i
++) {
2994 health_code_update();
2996 revents
= LTTNG_POLL_GETEV(&events
, i
);
2997 pollfd
= LTTNG_POLL_GETFD(&events
, i
);
2999 if (pollfd
== ctx
->consumer_channel_pipe
[0]) {
3000 if (revents
& LPOLLIN
) {
3001 enum consumer_channel_action action
;
3004 ret
= read_channel_pipe(ctx
, &chan
, &key
, &action
);
3007 ERR("Error reading channel pipe");
3009 lttng_poll_del(&events
, ctx
->consumer_channel_pipe
[0]);
3014 case CONSUMER_CHANNEL_ADD
:
3015 DBG("Adding channel %d to poll set",
3018 lttng_ht_node_init_u64(&chan
->wait_fd_node
,
3021 lttng_ht_add_unique_u64(channel_ht
,
3022 &chan
->wait_fd_node
);
3024 /* Add channel to the global poll events list */
3025 lttng_poll_add(&events
, chan
->wait_fd
,
3026 LPOLLERR
| LPOLLHUP
);
3028 case CONSUMER_CHANNEL_DEL
:
3031 * This command should never be called if the channel
3032 * has streams monitored by either the data or metadata
3033 * thread. The consumer only notify this thread with a
3034 * channel del. command if it receives a destroy
3035 * channel command from the session daemon that send it
3036 * if a command prior to the GET_CHANNEL failed.
3040 chan
= consumer_find_channel(key
);
3043 ERR("UST consumer get channel key %" PRIu64
" not found for del channel", key
);
3046 lttng_poll_del(&events
, chan
->wait_fd
);
3047 iter
.iter
.node
= &chan
->wait_fd_node
.node
;
3048 ret
= lttng_ht_del(channel_ht
, &iter
);
3051 switch (consumer_data
.type
) {
3052 case LTTNG_CONSUMER_KERNEL
:
3054 case LTTNG_CONSUMER32_UST
:
3055 case LTTNG_CONSUMER64_UST
:
3056 health_code_update();
3057 /* Destroy streams that might have been left in the stream list. */
3058 clean_channel_stream_list(chan
);
3061 ERR("Unknown consumer_data type");
3066 * Release our own refcount. Force channel deletion even if
3067 * streams were not initialized.
3069 if (!uatomic_sub_return(&chan
->refcount
, 1)) {
3070 consumer_del_channel(chan
);
3075 case CONSUMER_CHANNEL_QUIT
:
3077 * Remove the pipe from the poll set and continue the loop
3078 * since their might be data to consume.
3080 lttng_poll_del(&events
, ctx
->consumer_channel_pipe
[0]);
3083 ERR("Unknown action");
3086 } else if (revents
& (LPOLLERR
| LPOLLHUP
)) {
3087 DBG("Channel thread pipe hung up");
3089 * Remove the pipe from the poll set and continue the loop
3090 * since their might be data to consume.
3092 lttng_poll_del(&events
, ctx
->consumer_channel_pipe
[0]);
3095 ERR("Unexpected poll events %u for sock %d", revents
, pollfd
);
3099 /* Handle other stream */
3105 uint64_t tmp_id
= (uint64_t) pollfd
;
3107 lttng_ht_lookup(channel_ht
, &tmp_id
, &iter
);
3109 node
= lttng_ht_iter_get_node_u64(&iter
);
3112 chan
= caa_container_of(node
, struct lttng_consumer_channel
,
3115 /* Check for error event */
3116 if (revents
& (LPOLLERR
| LPOLLHUP
)) {
3117 DBG("Channel fd %d is hup|err.", pollfd
);
3119 lttng_poll_del(&events
, chan
->wait_fd
);
3120 ret
= lttng_ht_del(channel_ht
, &iter
);
3124 * This will close the wait fd for each stream associated to
3125 * this channel AND monitored by the data/metadata thread thus
3126 * will be clean by the right thread.
3128 consumer_close_channel_streams(chan
);
3130 /* Release our own refcount */
3131 if (!uatomic_sub_return(&chan
->refcount
, 1)
3132 && !uatomic_read(&chan
->nb_init_stream_left
)) {
3133 consumer_del_channel(chan
);
3136 ERR("Unexpected poll events %u for sock %d", revents
, pollfd
);
3141 /* Release RCU lock for the channel looked up */
3149 lttng_poll_clean(&events
);
3151 destroy_channel_ht(channel_ht
);
3154 DBG("Channel poll thread exiting");
3157 ERR("Health error occurred in %s", __func__
);
3159 health_unregister(health_consumerd
);
3160 rcu_unregister_thread();
3164 static int set_metadata_socket(struct lttng_consumer_local_data
*ctx
,
3165 struct pollfd
*sockpoll
, int client_socket
)
3172 ret
= lttng_consumer_poll_socket(sockpoll
);
3176 DBG("Metadata connection on client_socket");
3178 /* Blocking call, waiting for transmission */
3179 ctx
->consumer_metadata_socket
= lttcomm_accept_unix_sock(client_socket
);
3180 if (ctx
->consumer_metadata_socket
< 0) {
3181 WARN("On accept metadata");
3192 * This thread listens on the consumerd socket and receives the file
3193 * descriptors from the session daemon.
3195 void *consumer_thread_sessiond_poll(void *data
)
3197 int sock
= -1, client_socket
, ret
, err
= -1;
3199 * structure to poll for incoming data on communication socket avoids
3200 * making blocking sockets.
3202 struct pollfd consumer_sockpoll
[2];
3203 struct lttng_consumer_local_data
*ctx
= data
;
3205 rcu_register_thread();
3207 health_register(health_consumerd
, HEALTH_CONSUMERD_TYPE_SESSIOND
);
3209 if (testpoint(consumerd_thread_sessiond
)) {
3210 goto error_testpoint
;
3213 health_code_update();
3215 DBG("Creating command socket %s", ctx
->consumer_command_sock_path
);
3216 unlink(ctx
->consumer_command_sock_path
);
3217 client_socket
= lttcomm_create_unix_sock(ctx
->consumer_command_sock_path
);
3218 if (client_socket
< 0) {
3219 ERR("Cannot create command socket");
3223 ret
= lttcomm_listen_unix_sock(client_socket
);
3228 DBG("Sending ready command to lttng-sessiond");
3229 ret
= lttng_consumer_send_error(ctx
, LTTCOMM_CONSUMERD_COMMAND_SOCK_READY
);
3230 /* return < 0 on error, but == 0 is not fatal */
3232 ERR("Error sending ready command to lttng-sessiond");
3236 /* prepare the FDs to poll : to client socket and the should_quit pipe */
3237 consumer_sockpoll
[0].fd
= ctx
->consumer_should_quit
[0];
3238 consumer_sockpoll
[0].events
= POLLIN
| POLLPRI
;
3239 consumer_sockpoll
[1].fd
= client_socket
;
3240 consumer_sockpoll
[1].events
= POLLIN
| POLLPRI
;
3242 ret
= lttng_consumer_poll_socket(consumer_sockpoll
);
3250 DBG("Connection on client_socket");
3252 /* Blocking call, waiting for transmission */
3253 sock
= lttcomm_accept_unix_sock(client_socket
);
3260 * Setup metadata socket which is the second socket connection on the
3261 * command unix socket.
3263 ret
= set_metadata_socket(ctx
, consumer_sockpoll
, client_socket
);
3272 /* This socket is not useful anymore. */
3273 ret
= close(client_socket
);
3275 PERROR("close client_socket");
3279 /* update the polling structure to poll on the established socket */
3280 consumer_sockpoll
[1].fd
= sock
;
3281 consumer_sockpoll
[1].events
= POLLIN
| POLLPRI
;
3284 health_code_update();
3286 health_poll_entry();
3287 ret
= lttng_consumer_poll_socket(consumer_sockpoll
);
3296 DBG("Incoming command on sock");
3297 ret
= lttng_consumer_recv_cmd(ctx
, sock
, consumer_sockpoll
);
3300 * This could simply be a session daemon quitting. Don't output
3303 DBG("Communication interrupted on command socket");
3307 if (CMM_LOAD_SHARED(consumer_quit
)) {
3308 DBG("consumer_thread_receive_fds received quit from signal");
3309 err
= 0; /* All is OK */
3312 DBG("received command on sock");
3318 DBG("Consumer thread sessiond poll exiting");
3321 * Close metadata streams since the producer is the session daemon which
3324 * NOTE: for now, this only applies to the UST tracer.
3326 lttng_consumer_close_all_metadata();
3329 * when all fds have hung up, the polling thread
3332 CMM_STORE_SHARED(consumer_quit
, 1);
3335 * Notify the data poll thread to poll back again and test the
3336 * consumer_quit state that we just set so to quit gracefully.
3338 notify_thread_lttng_pipe(ctx
->consumer_data_pipe
);
3340 notify_channel_pipe(ctx
, NULL
, -1, CONSUMER_CHANNEL_QUIT
);
3342 notify_health_quit_pipe(health_quit_pipe
);
3344 /* Cleaning up possibly open sockets. */
3348 PERROR("close sock sessiond poll");
3351 if (client_socket
>= 0) {
3352 ret
= close(client_socket
);
3354 PERROR("close client_socket sessiond poll");
3361 ERR("Health error occurred in %s", __func__
);
3363 health_unregister(health_consumerd
);
3365 rcu_unregister_thread();
3369 ssize_t
lttng_consumer_read_subbuffer(struct lttng_consumer_stream
*stream
,
3370 struct lttng_consumer_local_data
*ctx
)
3374 bool rotated
= false;
3376 pthread_mutex_lock(&stream
->lock
);
3377 if (stream
->metadata_flag
) {
3378 pthread_mutex_lock(&stream
->metadata_rdv_lock
);
3381 switch (consumer_data
.type
) {
3382 case LTTNG_CONSUMER_KERNEL
:
3383 ret
= lttng_kconsumer_read_subbuffer(stream
, ctx
, &rotated
);
3385 case LTTNG_CONSUMER32_UST
:
3386 case LTTNG_CONSUMER64_UST
:
3387 ret
= lttng_ustconsumer_read_subbuffer(stream
, ctx
, &rotated
);
3390 ERR("Unknown consumer_data type");
3396 if (stream
->metadata_flag
) {
3397 pthread_cond_broadcast(&stream
->metadata_rdv
);
3398 pthread_mutex_unlock(&stream
->metadata_rdv_lock
);
3400 pthread_mutex_unlock(&stream
->lock
);
3402 rotate_ret
= consumer_post_rotation(stream
, ctx
);
3403 if (rotate_ret
< 0) {
3404 ERR("Failed after a rotation");
3412 int lttng_consumer_on_recv_stream(struct lttng_consumer_stream
*stream
)
3414 switch (consumer_data
.type
) {
3415 case LTTNG_CONSUMER_KERNEL
:
3416 return lttng_kconsumer_on_recv_stream(stream
);
3417 case LTTNG_CONSUMER32_UST
:
3418 case LTTNG_CONSUMER64_UST
:
3419 return lttng_ustconsumer_on_recv_stream(stream
);
3421 ERR("Unknown consumer_data type");
3428 * Allocate and set consumer data hash tables.
3430 int lttng_consumer_init(void)
3432 consumer_data
.channel_ht
= lttng_ht_new(0, LTTNG_HT_TYPE_U64
);
3433 if (!consumer_data
.channel_ht
) {
3437 consumer_data
.channels_by_session_id_ht
=
3438 lttng_ht_new(0, LTTNG_HT_TYPE_U64
);
3439 if (!consumer_data
.channels_by_session_id_ht
) {
3443 consumer_data
.relayd_ht
= lttng_ht_new(0, LTTNG_HT_TYPE_U64
);
3444 if (!consumer_data
.relayd_ht
) {
3448 consumer_data
.stream_list_ht
= lttng_ht_new(0, LTTNG_HT_TYPE_U64
);
3449 if (!consumer_data
.stream_list_ht
) {
3453 consumer_data
.stream_per_chan_id_ht
= lttng_ht_new(0, LTTNG_HT_TYPE_U64
);
3454 if (!consumer_data
.stream_per_chan_id_ht
) {
3458 data_ht
= lttng_ht_new(0, LTTNG_HT_TYPE_U64
);
3463 metadata_ht
= lttng_ht_new(0, LTTNG_HT_TYPE_U64
);
3468 consumer_data
.chunk_registry
= lttng_trace_chunk_registry_create();
3469 if (!consumer_data
.chunk_registry
) {
3480 * Process the ADD_RELAYD command receive by a consumer.
3482 * This will create a relayd socket pair and add it to the relayd hash table.
3483 * The caller MUST acquire a RCU read side lock before calling it.
3485 void consumer_add_relayd_socket(uint64_t net_seq_idx
, int sock_type
,
3486 struct lttng_consumer_local_data
*ctx
, int sock
,
3487 struct pollfd
*consumer_sockpoll
,
3488 struct lttcomm_relayd_sock
*relayd_sock
, uint64_t sessiond_id
,
3489 uint64_t relayd_session_id
)
3491 int fd
= -1, ret
= -1, relayd_created
= 0;
3492 enum lttcomm_return_code ret_code
= LTTCOMM_CONSUMERD_SUCCESS
;
3493 struct consumer_relayd_sock_pair
*relayd
= NULL
;
3496 assert(relayd_sock
);
3498 DBG("Consumer adding relayd socket (idx: %" PRIu64
")", net_seq_idx
);
3500 /* Get relayd reference if exists. */
3501 relayd
= consumer_find_relayd(net_seq_idx
);
3502 if (relayd
== NULL
) {
3503 assert(sock_type
== LTTNG_STREAM_CONTROL
);
3504 /* Not found. Allocate one. */
3505 relayd
= consumer_allocate_relayd_sock_pair(net_seq_idx
);
3506 if (relayd
== NULL
) {
3507 ret_code
= LTTCOMM_CONSUMERD_ENOMEM
;
3510 relayd
->sessiond_session_id
= sessiond_id
;
3515 * This code path MUST continue to the consumer send status message to
3516 * we can notify the session daemon and continue our work without
3517 * killing everything.
3521 * relayd key should never be found for control socket.
3523 assert(sock_type
!= LTTNG_STREAM_CONTROL
);
3526 /* First send a status message before receiving the fds. */
3527 ret
= consumer_send_status_msg(sock
, LTTCOMM_CONSUMERD_SUCCESS
);
3529 /* Somehow, the session daemon is not responding anymore. */
3530 lttng_consumer_send_error(ctx
, LTTCOMM_CONSUMERD_FATAL
);
3531 goto error_nosignal
;
3534 /* Poll on consumer socket. */
3535 ret
= lttng_consumer_poll_socket(consumer_sockpoll
);
3537 /* Needing to exit in the middle of a command: error. */
3538 lttng_consumer_send_error(ctx
, LTTCOMM_CONSUMERD_POLL_ERROR
);
3539 goto error_nosignal
;
3542 /* Get relayd socket from session daemon */
3543 ret
= lttcomm_recv_fds_unix_sock(sock
, &fd
, 1);
3544 if (ret
!= sizeof(fd
)) {
3545 fd
= -1; /* Just in case it gets set with an invalid value. */
3548 * Failing to receive FDs might indicate a major problem such as
3549 * reaching a fd limit during the receive where the kernel returns a
3550 * MSG_CTRUNC and fails to cleanup the fd in the queue. Any case, we
3551 * don't take any chances and stop everything.
3553 * XXX: Feature request #558 will fix that and avoid this possible
3554 * issue when reaching the fd limit.
3556 lttng_consumer_send_error(ctx
, LTTCOMM_CONSUMERD_ERROR_RECV_FD
);
3557 ret_code
= LTTCOMM_CONSUMERD_ERROR_RECV_FD
;
3561 /* Copy socket information and received FD */
3562 switch (sock_type
) {
3563 case LTTNG_STREAM_CONTROL
:
3564 /* Copy received lttcomm socket */
3565 lttcomm_copy_sock(&relayd
->control_sock
.sock
, &relayd_sock
->sock
);
3566 ret
= lttcomm_create_sock(&relayd
->control_sock
.sock
);
3567 /* Handle create_sock error. */
3569 ret_code
= LTTCOMM_CONSUMERD_ENOMEM
;
3573 * Close the socket created internally by
3574 * lttcomm_create_sock, so we can replace it by the one
3575 * received from sessiond.
3577 if (close(relayd
->control_sock
.sock
.fd
)) {
3581 /* Assign new file descriptor */
3582 relayd
->control_sock
.sock
.fd
= fd
;
3583 /* Assign version values. */
3584 relayd
->control_sock
.major
= relayd_sock
->major
;
3585 relayd
->control_sock
.minor
= relayd_sock
->minor
;
3587 relayd
->relayd_session_id
= relayd_session_id
;
3590 case LTTNG_STREAM_DATA
:
3591 /* Copy received lttcomm socket */
3592 lttcomm_copy_sock(&relayd
->data_sock
.sock
, &relayd_sock
->sock
);
3593 ret
= lttcomm_create_sock(&relayd
->data_sock
.sock
);
3594 /* Handle create_sock error. */
3596 ret_code
= LTTCOMM_CONSUMERD_ENOMEM
;
3600 * Close the socket created internally by
3601 * lttcomm_create_sock, so we can replace it by the one
3602 * received from sessiond.
3604 if (close(relayd
->data_sock
.sock
.fd
)) {
3608 /* Assign new file descriptor */
3609 relayd
->data_sock
.sock
.fd
= fd
;
3610 /* Assign version values. */
3611 relayd
->data_sock
.major
= relayd_sock
->major
;
3612 relayd
->data_sock
.minor
= relayd_sock
->minor
;
3615 ERR("Unknown relayd socket type (%d)", sock_type
);
3616 ret_code
= LTTCOMM_CONSUMERD_FATAL
;
3620 DBG("Consumer %s socket created successfully with net idx %" PRIu64
" (fd: %d)",
3621 sock_type
== LTTNG_STREAM_CONTROL
? "control" : "data",
3622 relayd
->net_seq_idx
, fd
);
3624 * We gave the ownership of the fd to the relayd structure. Set the
3625 * fd to -1 so we don't call close() on it in the error path below.
3629 /* We successfully added the socket. Send status back. */
3630 ret
= consumer_send_status_msg(sock
, ret_code
);
3632 /* Somehow, the session daemon is not responding anymore. */
3633 lttng_consumer_send_error(ctx
, LTTCOMM_CONSUMERD_FATAL
);
3634 goto error_nosignal
;
3638 * Add relayd socket pair to consumer data hashtable. If object already
3639 * exists or on error, the function gracefully returns.
3648 if (consumer_send_status_msg(sock
, ret_code
) < 0) {
3649 lttng_consumer_send_error(ctx
, LTTCOMM_CONSUMERD_FATAL
);
3653 /* Close received socket if valid. */
3656 PERROR("close received socket");
3660 if (relayd_created
) {
3666 * Search for a relayd associated to the session id and return the reference.
3668 * A rcu read side lock MUST be acquire before calling this function and locked
3669 * until the relayd object is no longer necessary.
3671 static struct consumer_relayd_sock_pair
*find_relayd_by_session_id(uint64_t id
)
3673 struct lttng_ht_iter iter
;
3674 struct consumer_relayd_sock_pair
*relayd
= NULL
;
3676 /* Iterate over all relayd since they are indexed by net_seq_idx. */
3677 cds_lfht_for_each_entry(consumer_data
.relayd_ht
->ht
, &iter
.iter
, relayd
,
3680 * Check by sessiond id which is unique here where the relayd session
3681 * id might not be when having multiple relayd.
3683 if (relayd
->sessiond_session_id
== id
) {
3684 /* Found the relayd. There can be only one per id. */
3696 * Check if for a given session id there is still data needed to be extract
3699 * Return 1 if data is pending or else 0 meaning ready to be read.
3701 int consumer_data_pending(uint64_t id
)
3704 struct lttng_ht_iter iter
;
3705 struct lttng_ht
*ht
;
3706 struct lttng_consumer_stream
*stream
;
3707 struct consumer_relayd_sock_pair
*relayd
= NULL
;
3708 int (*data_pending
)(struct lttng_consumer_stream
*);
3710 DBG("Consumer data pending command on session id %" PRIu64
, id
);
3713 pthread_mutex_lock(&consumer_data
.lock
);
3715 switch (consumer_data
.type
) {
3716 case LTTNG_CONSUMER_KERNEL
:
3717 data_pending
= lttng_kconsumer_data_pending
;
3719 case LTTNG_CONSUMER32_UST
:
3720 case LTTNG_CONSUMER64_UST
:
3721 data_pending
= lttng_ustconsumer_data_pending
;
3724 ERR("Unknown consumer data type");
3728 /* Ease our life a bit */
3729 ht
= consumer_data
.stream_list_ht
;
3731 cds_lfht_for_each_entry_duplicate(ht
->ht
,
3732 ht
->hash_fct(&id
, lttng_ht_seed
),
3734 &iter
.iter
, stream
, node_session_id
.node
) {
3735 pthread_mutex_lock(&stream
->lock
);
3738 * A removed node from the hash table indicates that the stream has
3739 * been deleted thus having a guarantee that the buffers are closed
3740 * on the consumer side. However, data can still be transmitted
3741 * over the network so don't skip the relayd check.
3743 ret
= cds_lfht_is_node_deleted(&stream
->node
.node
);
3745 /* Check the stream if there is data in the buffers. */
3746 ret
= data_pending(stream
);
3748 pthread_mutex_unlock(&stream
->lock
);
3753 pthread_mutex_unlock(&stream
->lock
);
3756 relayd
= find_relayd_by_session_id(id
);
3758 unsigned int is_data_inflight
= 0;
3760 /* Send init command for data pending. */
3761 pthread_mutex_lock(&relayd
->ctrl_sock_mutex
);
3762 ret
= relayd_begin_data_pending(&relayd
->control_sock
,
3763 relayd
->relayd_session_id
);
3765 pthread_mutex_unlock(&relayd
->ctrl_sock_mutex
);
3766 /* Communication error thus the relayd so no data pending. */
3767 goto data_not_pending
;
3770 cds_lfht_for_each_entry_duplicate(ht
->ht
,
3771 ht
->hash_fct(&id
, lttng_ht_seed
),
3773 &iter
.iter
, stream
, node_session_id
.node
) {
3774 if (stream
->metadata_flag
) {
3775 ret
= relayd_quiescent_control(&relayd
->control_sock
,
3776 stream
->relayd_stream_id
);
3778 ret
= relayd_data_pending(&relayd
->control_sock
,
3779 stream
->relayd_stream_id
,
3780 stream
->next_net_seq_num
- 1);
3784 pthread_mutex_unlock(&relayd
->ctrl_sock_mutex
);
3786 } else if (ret
< 0) {
3787 ERR("Relayd data pending failed. Cleaning up relayd %" PRIu64
".", relayd
->net_seq_idx
);
3788 lttng_consumer_cleanup_relayd(relayd
);
3789 pthread_mutex_unlock(&relayd
->ctrl_sock_mutex
);
3790 goto data_not_pending
;
3794 /* Send end command for data pending. */
3795 ret
= relayd_end_data_pending(&relayd
->control_sock
,
3796 relayd
->relayd_session_id
, &is_data_inflight
);
3797 pthread_mutex_unlock(&relayd
->ctrl_sock_mutex
);
3799 ERR("Relayd end data pending failed. Cleaning up relayd %" PRIu64
".", relayd
->net_seq_idx
);
3800 lttng_consumer_cleanup_relayd(relayd
);
3801 goto data_not_pending
;
3803 if (is_data_inflight
) {
3809 * Finding _no_ node in the hash table and no inflight data means that the
3810 * stream(s) have been removed thus data is guaranteed to be available for
3811 * analysis from the trace files.
3815 /* Data is available to be read by a viewer. */
3816 pthread_mutex_unlock(&consumer_data
.lock
);
3821 /* Data is still being extracted from buffers. */
3822 pthread_mutex_unlock(&consumer_data
.lock
);
3828 * Send a ret code status message to the sessiond daemon.
3830 * Return the sendmsg() return value.
3832 int consumer_send_status_msg(int sock
, int ret_code
)
3834 struct lttcomm_consumer_status_msg msg
;
3836 memset(&msg
, 0, sizeof(msg
));
3837 msg
.ret_code
= ret_code
;
3839 return lttcomm_send_unix_sock(sock
, &msg
, sizeof(msg
));
3843 * Send a channel status message to the sessiond daemon.
3845 * Return the sendmsg() return value.
3847 int consumer_send_status_channel(int sock
,
3848 struct lttng_consumer_channel
*channel
)
3850 struct lttcomm_consumer_status_channel msg
;
3854 memset(&msg
, 0, sizeof(msg
));
3856 msg
.ret_code
= LTTCOMM_CONSUMERD_CHANNEL_FAIL
;
3858 msg
.ret_code
= LTTCOMM_CONSUMERD_SUCCESS
;
3859 msg
.key
= channel
->key
;
3860 msg
.stream_count
= channel
->streams
.count
;
3863 return lttcomm_send_unix_sock(sock
, &msg
, sizeof(msg
));
3866 unsigned long consumer_get_consume_start_pos(unsigned long consumed_pos
,
3867 unsigned long produced_pos
, uint64_t nb_packets_per_stream
,
3868 uint64_t max_sb_size
)
3870 unsigned long start_pos
;
3872 if (!nb_packets_per_stream
) {
3873 return consumed_pos
; /* Grab everything */
3875 start_pos
= produced_pos
- offset_align_floor(produced_pos
, max_sb_size
);
3876 start_pos
-= max_sb_size
* nb_packets_per_stream
;
3877 if ((long) (start_pos
- consumed_pos
) < 0) {
3878 return consumed_pos
; /* Grab everything */
3884 int consumer_flush_buffer(struct lttng_consumer_stream
*stream
, int producer_active
)
3888 switch (consumer_data
.type
) {
3889 case LTTNG_CONSUMER_KERNEL
:
3890 ret
= kernctl_buffer_flush(stream
->wait_fd
);
3892 ERR("Failed to flush kernel stream");
3896 case LTTNG_CONSUMER32_UST
:
3897 case LTTNG_CONSUMER64_UST
:
3898 lttng_ustctl_flush_buffer(stream
, producer_active
);
3901 ERR("Unknown consumer_data type");
3910 * Sample the rotate position for all the streams of a channel. If a stream
3911 * is already at the rotate position (produced == consumed), we flag it as
3912 * ready for rotation. The rotation of ready streams occurs after we have
3913 * replied to the session daemon that we have finished sampling the positions.
3914 * Must be called with RCU read-side lock held to ensure existence of channel.
3916 * Returns 0 on success, < 0 on error
3918 int lttng_consumer_rotate_channel(struct lttng_consumer_channel
*channel
,
3919 uint64_t key
, const char *path
, uint64_t relayd_id
,
3920 uint32_t metadata
, uint64_t new_chunk_id
,
3921 struct lttng_consumer_local_data
*ctx
)
3924 struct lttng_consumer_stream
*stream
;
3925 struct lttng_ht_iter iter
;
3926 struct lttng_ht
*ht
= consumer_data
.stream_per_chan_id_ht
;
3928 DBG("Consumer sample rotate position for channel %" PRIu64
, key
);
3932 pthread_mutex_lock(&channel
->lock
);
3933 channel
->current_chunk_id
= new_chunk_id
;
3935 ret
= lttng_strncpy(channel
->pathname
, path
, sizeof(channel
->pathname
));
3937 ERR("Failed to copy new path to channel during channel rotation");
3939 goto end_unlock_channel
;
3942 if (relayd_id
== -1ULL) {
3944 * The domain path (/ust or /kernel) has been created before, we
3945 * now need to create the last part of the path: the application/user
3946 * specific section (uid/1000/64-bit).
3948 ret
= utils_mkdir_recursive(channel
->pathname
, S_IRWXU
| S_IRWXG
,
3949 channel
->uid
, channel
->gid
);
3951 ERR("Failed to create trace directory at %s during rotation",
3954 goto end_unlock_channel
;
3958 cds_lfht_for_each_entry_duplicate(ht
->ht
,
3959 ht
->hash_fct(&channel
->key
, lttng_ht_seed
),
3960 ht
->match_fct
, &channel
->key
, &iter
.iter
,
3961 stream
, node_channel_id
.node
) {
3962 unsigned long consumed_pos
;
3964 health_code_update();
3967 * Lock stream because we are about to change its state.
3969 pthread_mutex_lock(&stream
->lock
);
3971 ret
= lttng_strncpy(stream
->channel_read_only_attributes
.path
,
3973 sizeof(stream
->channel_read_only_attributes
.path
));
3975 ERR("Failed to sample channel path name during channel rotation");
3976 goto end_unlock_stream
;
3978 ret
= lttng_consumer_sample_snapshot_positions(stream
);
3980 ERR("Failed to sample snapshot position during channel rotation");
3981 goto end_unlock_stream
;
3984 ret
= lttng_consumer_get_produced_snapshot(stream
,
3985 &stream
->rotate_position
);
3987 ERR("Failed to sample produced position during channel rotation");
3988 goto end_unlock_stream
;
3991 lttng_consumer_get_consumed_snapshot(stream
,
3993 if (consumed_pos
== stream
->rotate_position
) {
3994 stream
->rotate_ready
= true;
3997 ret
= consumer_flush_buffer(stream
, 1);
3999 ERR("Failed to flush stream %" PRIu64
" during channel rotation",
4001 goto end_unlock_stream
;
4004 pthread_mutex_unlock(&stream
->lock
);
4006 pthread_mutex_unlock(&channel
->lock
);
4012 pthread_mutex_unlock(&stream
->lock
);
4014 pthread_mutex_unlock(&channel
->lock
);
4021 * Check if a stream is ready to be rotated after extracting it.
4023 * Return 1 if it is ready for rotation, 0 if it is not, a negative value on
4024 * error. Stream lock must be held.
4026 int lttng_consumer_stream_is_rotate_ready(struct lttng_consumer_stream
*stream
)
4029 unsigned long consumed_pos
;
4031 if (!stream
->rotate_position
&& !stream
->rotate_ready
) {
4036 if (stream
->rotate_ready
) {
4042 * If we don't have the rotate_ready flag, check the consumed position
4043 * to determine if we need to rotate.
4045 ret
= lttng_consumer_sample_snapshot_positions(stream
);
4047 ERR("Taking snapshot positions");
4051 ret
= lttng_consumer_get_consumed_snapshot(stream
, &consumed_pos
);
4053 ERR("Consumed snapshot position");
4057 /* Rotate position not reached yet (with check for overflow). */
4058 if ((long) (consumed_pos
- stream
->rotate_position
) < 0) {
4069 * Reset the state for a stream after a rotation occurred.
4071 void lttng_consumer_reset_stream_rotate_state(struct lttng_consumer_stream
*stream
)
4073 stream
->rotate_position
= 0;
4074 stream
->rotate_ready
= false;
4078 * Perform the rotation a local stream file.
4080 int rotate_local_stream(struct lttng_consumer_local_data
*ctx
,
4081 struct lttng_consumer_stream
*stream
)
4085 DBG("Rotate local stream: stream key %" PRIu64
", channel key %" PRIu64
" at path %s",
4088 stream
->channel_read_only_attributes
.path
);
4090 ret
= close(stream
->out_fd
);
4092 PERROR("Closing trace file (fd %d), stream %" PRIu64
,
4093 stream
->out_fd
, stream
->key
);
4098 ret
= utils_create_stream_file(
4099 stream
->channel_read_only_attributes
.path
,
4101 stream
->channel_read_only_attributes
.tracefile_size
,
4102 stream
->tracefile_count_current
,
4103 stream
->uid
, stream
->gid
, NULL
);
4105 ERR("Rotate create stream file");
4108 stream
->out_fd
= ret
;
4109 stream
->tracefile_size_current
= 0;
4111 if (!stream
->metadata_flag
) {
4112 struct lttng_index_file
*index_file
;
4114 lttng_index_file_put(stream
->index_file
);
4116 index_file
= lttng_index_file_create(
4117 stream
->channel_read_only_attributes
.path
,
4118 stream
->name
, stream
->uid
, stream
->gid
,
4119 stream
->channel_read_only_attributes
.tracefile_size
,
4120 stream
->tracefile_count_current
,
4121 CTF_INDEX_MAJOR
, CTF_INDEX_MINOR
);
4123 ERR("Create index file during rotation");
4126 stream
->index_file
= index_file
;
4127 stream
->out_fd_offset
= 0;
4141 * Perform the rotation a stream file on the relay.
4143 int rotate_relay_stream(struct lttng_consumer_local_data
*ctx
,
4144 struct lttng_consumer_stream
*stream
)
4147 struct consumer_relayd_sock_pair
*relayd
;
4149 DBG("Rotate relay stream");
4150 relayd
= consumer_find_relayd(stream
->net_seq_idx
);
4152 ERR("Failed to find relayd");
4157 pthread_mutex_lock(&relayd
->ctrl_sock_mutex
);
4158 ret
= relayd_rotate_stream(&relayd
->control_sock
,
4159 stream
->relayd_stream_id
,
4160 stream
->channel_read_only_attributes
.path
,
4161 stream
->chan
->current_chunk_id
,
4162 stream
->last_sequence_number
);
4163 pthread_mutex_unlock(&relayd
->ctrl_sock_mutex
);
4165 ERR("Relayd rotate stream failed. Cleaning up relayd %" PRIu64
".", relayd
->net_seq_idx
);
4166 lttng_consumer_cleanup_relayd(relayd
);
4169 ERR("Rotate relay stream");
4177 * Performs the stream rotation for the rotate session feature if needed.
4178 * It must be called with the stream lock held.
4180 * Return 0 on success, a negative number of error.
4182 int lttng_consumer_rotate_stream(struct lttng_consumer_local_data
*ctx
,
4183 struct lttng_consumer_stream
*stream
, bool *rotated
)
4187 DBG("Consumer rotate stream %" PRIu64
, stream
->key
);
4189 if (stream
->net_seq_idx
!= (uint64_t) -1ULL) {
4190 ret
= rotate_relay_stream(ctx
, stream
);
4192 ret
= rotate_local_stream(ctx
, stream
);
4194 stream
->trace_archive_id
++;
4196 ERR("Failed to rotate stream, ret = %i", ret
);
4200 if (stream
->metadata_flag
) {
4201 switch (consumer_data
.type
) {
4202 case LTTNG_CONSUMER_KERNEL
:
4204 * Reset the position of what has been read from the metadata
4205 * cache to 0 so we can dump it again.
4207 ret
= kernctl_metadata_cache_dump(stream
->wait_fd
);
4209 ERR("Failed to dump the kernel metadata cache after rotation");
4213 case LTTNG_CONSUMER32_UST
:
4214 case LTTNG_CONSUMER64_UST
:
4216 * Reset the position pushed from the metadata cache so it
4217 * will write from the beginning on the next push.
4219 stream
->ust_metadata_pushed
= 0;
4222 ERR("Unknown consumer_data type");
4226 lttng_consumer_reset_stream_rotate_state(stream
);
4239 * Rotate all the ready streams now.
4241 * This is especially important for low throughput streams that have already
4242 * been consumed, we cannot wait for their next packet to perform the
4244 * Need to be called with RCU read-side lock held to ensure existence of
4247 * Returns 0 on success, < 0 on error
4249 int lttng_consumer_rotate_ready_streams(struct lttng_consumer_channel
*channel
,
4250 uint64_t key
, struct lttng_consumer_local_data
*ctx
)
4253 struct lttng_consumer_stream
*stream
;
4254 struct lttng_ht_iter iter
;
4255 struct lttng_ht
*ht
= consumer_data
.stream_per_chan_id_ht
;
4259 DBG("Consumer rotate ready streams in channel %" PRIu64
, key
);
4261 cds_lfht_for_each_entry_duplicate(ht
->ht
,
4262 ht
->hash_fct(&channel
->key
, lttng_ht_seed
),
4263 ht
->match_fct
, &channel
->key
, &iter
.iter
,
4264 stream
, node_channel_id
.node
) {
4265 health_code_update();
4267 pthread_mutex_lock(&stream
->lock
);
4269 if (!stream
->rotate_ready
) {
4270 pthread_mutex_unlock(&stream
->lock
);
4273 DBG("Consumer rotate ready stream %" PRIu64
, stream
->key
);
4275 ret
= lttng_consumer_rotate_stream(ctx
, stream
, NULL
);
4276 pthread_mutex_unlock(&stream
->lock
);
4281 ret
= consumer_post_rotation(stream
, ctx
);
4295 int rotate_rename_local(const char *old_path
, const char *new_path
,
4296 uid_t uid
, gid_t gid
)
4303 ret
= utils_mkdir_recursive(new_path
, S_IRWXU
| S_IRWXG
, uid
, gid
);
4305 ERR("Create directory on rotate");
4309 ret
= rename(old_path
, new_path
);
4310 if (ret
< 0 && errno
!= ENOENT
) {
4311 PERROR("Rename completed rotation chunk");
4321 int rotate_rename_relay(const char *old_path
, const char *new_path
,
4325 struct consumer_relayd_sock_pair
*relayd
;
4327 relayd
= consumer_find_relayd(relayd_id
);
4329 ERR("Failed to find relayd while running rotate_rename_relay command");
4334 pthread_mutex_lock(&relayd
->ctrl_sock_mutex
);
4335 ret
= relayd_rotate_rename(&relayd
->control_sock
, old_path
, new_path
);
4337 ERR("Relayd rotate rename failed. Cleaning up relayd %" PRIu64
".", relayd
->net_seq_idx
);
4338 lttng_consumer_cleanup_relayd(relayd
);
4340 pthread_mutex_unlock(&relayd
->ctrl_sock_mutex
);
4345 int lttng_consumer_rotate_rename(const char *old_path
, const char *new_path
,
4346 uid_t uid
, gid_t gid
, uint64_t relayd_id
)
4348 if (relayd_id
!= -1ULL) {
4349 return rotate_rename_relay(old_path
, new_path
, relayd_id
);
4351 return rotate_rename_local(old_path
, new_path
, uid
, gid
);
4355 /* Stream lock must be acquired by the caller. */
4357 bool check_stream_rotation_pending(const struct lttng_consumer_stream
*stream
,
4358 uint64_t session_id
, uint64_t chunk_id
)
4360 bool pending
= false;
4362 if (stream
->session_id
!= session_id
) {
4368 * If the stream's archive_id belongs to the chunk being rotated (or an
4369 * even older one), it means that the consumer has not consumed all the
4370 * buffers that belong to the chunk being rotated. Therefore, the
4371 * rotation is considered as ongoing/pending.
4373 pending
= stream
->trace_archive_id
<= chunk_id
;
4378 /* RCU read lock must be acquired by the caller. */
4379 int lttng_consumer_check_rotation_pending_local(uint64_t session_id
,
4382 struct lttng_ht_iter iter
;
4383 struct lttng_consumer_stream
*stream
;
4384 bool rotation_pending
= false;
4386 /* Start with the metadata streams... */
4387 cds_lfht_for_each_entry(metadata_ht
->ht
, &iter
.iter
, stream
, node
.node
) {
4388 pthread_mutex_lock(&stream
->lock
);
4389 rotation_pending
= check_stream_rotation_pending(stream
,
4390 session_id
, chunk_id
);
4391 pthread_mutex_unlock(&stream
->lock
);
4392 if (rotation_pending
) {
4397 /* ... followed by the data streams. */
4398 cds_lfht_for_each_entry(data_ht
->ht
, &iter
.iter
, stream
, node
.node
) {
4399 pthread_mutex_lock(&stream
->lock
);
4400 rotation_pending
= check_stream_rotation_pending(stream
,
4401 session_id
, chunk_id
);
4402 pthread_mutex_unlock(&stream
->lock
);
4403 if (rotation_pending
) {
4409 return !!rotation_pending
;
4412 int lttng_consumer_check_rotation_pending_relay(uint64_t session_id
,
4413 uint64_t relayd_id
, uint64_t chunk_id
)
4416 struct consumer_relayd_sock_pair
*relayd
;
4418 relayd
= consumer_find_relayd(relayd_id
);
4420 ERR("Failed to find relayd id %" PRIu64
, relayd_id
);
4425 pthread_mutex_lock(&relayd
->ctrl_sock_mutex
);
4426 ret
= relayd_rotate_pending(&relayd
->control_sock
, chunk_id
);
4428 ERR("Relayd rotate pending failed. Cleaning up relayd %" PRIu64
".", relayd
->net_seq_idx
);
4429 lttng_consumer_cleanup_relayd(relayd
);
4431 pthread_mutex_unlock(&relayd
->ctrl_sock_mutex
);
4438 int mkdir_local(const char *path
, uid_t uid
, gid_t gid
)
4442 ret
= utils_mkdir_recursive(path
, S_IRWXU
| S_IRWXG
, uid
, gid
);
4444 /* utils_mkdir_recursive logs an error. */
4454 int mkdir_relay(const char *path
, uint64_t relayd_id
)
4457 struct consumer_relayd_sock_pair
*relayd
;
4459 relayd
= consumer_find_relayd(relayd_id
);
4461 ERR("Failed to find relayd");
4466 pthread_mutex_lock(&relayd
->ctrl_sock_mutex
);
4467 ret
= relayd_mkdir(&relayd
->control_sock
, path
);
4469 ERR("Relayd mkdir failed. Cleaning up relayd %" PRIu64
".", relayd
->net_seq_idx
);
4470 lttng_consumer_cleanup_relayd(relayd
);
4472 pthread_mutex_unlock(&relayd
->ctrl_sock_mutex
);
4479 int lttng_consumer_mkdir(const char *path
, uid_t uid
, gid_t gid
,
4482 if (relayd_id
!= -1ULL) {
4483 return mkdir_relay(path
, relayd_id
);
4485 return mkdir_local(path
, uid
, gid
);
4489 enum lttcomm_return_code
lttng_consumer_init_command(
4490 struct lttng_consumer_local_data
*ctx
,
4491 const lttng_uuid sessiond_uuid
)
4493 enum lttcomm_return_code ret
;
4494 char uuid_str
[UUID_STR_LEN
];
4496 if (ctx
->sessiond_uuid
.is_set
) {
4497 ret
= LTTCOMM_CONSUMERD_ALREADY_SET
;
4501 ctx
->sessiond_uuid
.is_set
= true;
4502 memcpy(ctx
->sessiond_uuid
.value
, sessiond_uuid
, sizeof(lttng_uuid
));
4503 ret
= LTTCOMM_CONSUMERD_SUCCESS
;
4504 lttng_uuid_to_str(sessiond_uuid
, uuid_str
);
4505 DBG("Received session daemon UUID: %s", uuid_str
);