2 * Copyright (C) 2011 - Julien Desfossez <julien.desfossez@polymtl.ca>
3 * Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
4 * 2012 - David Goulet <dgoulet@efficios.com>
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License, version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 * You should have received a copy of the GNU General Public License along
16 * with this program; if not, write to the Free Software Foundation, Inc.,
17 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
27 #include <sys/socket.h>
28 #include <sys/types.h>
33 #include <bin/lttng-consumerd/health-consumerd.h>
34 #include <common/common.h>
35 #include <common/utils.h>
36 #include <common/compat/poll.h>
37 #include <common/compat/endian.h>
38 #include <common/index/index.h>
39 #include <common/kernel-ctl/kernel-ctl.h>
40 #include <common/sessiond-comm/relayd.h>
41 #include <common/sessiond-comm/sessiond-comm.h>
42 #include <common/kernel-consumer/kernel-consumer.h>
43 #include <common/relayd/relayd.h>
44 #include <common/ust-consumer/ust-consumer.h>
45 #include <common/consumer/consumer-timer.h>
46 #include <common/consumer/consumer.h>
47 #include <common/consumer/consumer-stream.h>
48 #include <common/consumer/consumer-testpoint.h>
49 #include <common/align.h>
50 #include <common/consumer/consumer-metadata-cache.h>
52 struct lttng_consumer_global_data consumer_data
= {
55 .type
= LTTNG_CONSUMER_UNKNOWN
,
58 enum consumer_channel_action
{
61 CONSUMER_CHANNEL_QUIT
,
64 struct consumer_channel_msg
{
65 enum consumer_channel_action action
;
66 struct lttng_consumer_channel
*chan
; /* add */
67 uint64_t key
; /* del */
70 /* Flag used to temporarily pause data consumption from testpoints. */
71 int data_consumption_paused
;
74 * Flag to inform the polling thread to quit when all fd hung up. Updated by
75 * the consumer_thread_receive_fds when it notices that all fds has hung up.
76 * Also updated by the signal handler (consumer_should_exit()). Read by the
82 * Global hash table containing respectively metadata and data streams. The
83 * stream element in this ht should only be updated by the metadata poll thread
84 * for the metadata and the data poll thread for the data.
86 static struct lttng_ht
*metadata_ht
;
87 static struct lttng_ht
*data_ht
;
90 * Notify a thread lttng pipe to poll back again. This usually means that some
91 * global state has changed so we just send back the thread in a poll wait
94 static void notify_thread_lttng_pipe(struct lttng_pipe
*pipe
)
96 struct lttng_consumer_stream
*null_stream
= NULL
;
100 (void) lttng_pipe_write(pipe
, &null_stream
, sizeof(null_stream
));
103 static void notify_health_quit_pipe(int *pipe
)
107 ret
= lttng_write(pipe
[1], "4", 1);
109 PERROR("write consumer health quit");
113 static void notify_channel_pipe(struct lttng_consumer_local_data
*ctx
,
114 struct lttng_consumer_channel
*chan
,
116 enum consumer_channel_action action
)
118 struct consumer_channel_msg msg
;
121 memset(&msg
, 0, sizeof(msg
));
126 ret
= lttng_write(ctx
->consumer_channel_pipe
[1], &msg
, sizeof(msg
));
127 if (ret
< sizeof(msg
)) {
128 PERROR("notify_channel_pipe write error");
132 void notify_thread_del_channel(struct lttng_consumer_local_data
*ctx
,
135 notify_channel_pipe(ctx
, NULL
, key
, CONSUMER_CHANNEL_DEL
);
138 static int read_channel_pipe(struct lttng_consumer_local_data
*ctx
,
139 struct lttng_consumer_channel
**chan
,
141 enum consumer_channel_action
*action
)
143 struct consumer_channel_msg msg
;
146 ret
= lttng_read(ctx
->consumer_channel_pipe
[0], &msg
, sizeof(msg
));
147 if (ret
< sizeof(msg
)) {
151 *action
= msg
.action
;
159 * Cleanup the stream list of a channel. Those streams are not yet globally
162 static void clean_channel_stream_list(struct lttng_consumer_channel
*channel
)
164 struct lttng_consumer_stream
*stream
, *stmp
;
168 /* Delete streams that might have been left in the stream list. */
169 cds_list_for_each_entry_safe(stream
, stmp
, &channel
->streams
.head
,
171 cds_list_del(&stream
->send_node
);
173 * Once a stream is added to this list, the buffers were created so we
174 * have a guarantee that this call will succeed. Setting the monitor
175 * mode to 0 so we don't lock nor try to delete the stream from the
179 consumer_stream_destroy(stream
, NULL
);
184 * Find a stream. The consumer_data.lock must be locked during this
187 static struct lttng_consumer_stream
*find_stream(uint64_t key
,
190 struct lttng_ht_iter iter
;
191 struct lttng_ht_node_u64
*node
;
192 struct lttng_consumer_stream
*stream
= NULL
;
196 /* -1ULL keys are lookup failures */
197 if (key
== (uint64_t) -1ULL) {
203 lttng_ht_lookup(ht
, &key
, &iter
);
204 node
= lttng_ht_iter_get_node_u64(&iter
);
206 stream
= caa_container_of(node
, struct lttng_consumer_stream
, node
);
214 static void steal_stream_key(uint64_t key
, struct lttng_ht
*ht
)
216 struct lttng_consumer_stream
*stream
;
219 stream
= find_stream(key
, ht
);
221 stream
->key
= (uint64_t) -1ULL;
223 * We don't want the lookup to match, but we still need
224 * to iterate on this stream when iterating over the hash table. Just
225 * change the node key.
227 stream
->node
.key
= (uint64_t) -1ULL;
233 * Return a channel object for the given key.
235 * RCU read side lock MUST be acquired before calling this function and
236 * protects the channel ptr.
238 struct lttng_consumer_channel
*consumer_find_channel(uint64_t key
)
240 struct lttng_ht_iter iter
;
241 struct lttng_ht_node_u64
*node
;
242 struct lttng_consumer_channel
*channel
= NULL
;
244 /* -1ULL keys are lookup failures */
245 if (key
== (uint64_t) -1ULL) {
249 lttng_ht_lookup(consumer_data
.channel_ht
, &key
, &iter
);
250 node
= lttng_ht_iter_get_node_u64(&iter
);
252 channel
= caa_container_of(node
, struct lttng_consumer_channel
, node
);
259 * There is a possibility that the consumer does not have enough time between
260 * the close of the channel on the session daemon and the cleanup in here thus
261 * once we have a channel add with an existing key, we know for sure that this
262 * channel will eventually get cleaned up by all streams being closed.
264 * This function just nullifies the already existing channel key.
266 static void steal_channel_key(uint64_t key
)
268 struct lttng_consumer_channel
*channel
;
271 channel
= consumer_find_channel(key
);
273 channel
->key
= (uint64_t) -1ULL;
275 * We don't want the lookup to match, but we still need to iterate on
276 * this channel when iterating over the hash table. Just change the
279 channel
->node
.key
= (uint64_t) -1ULL;
284 static void free_channel_rcu(struct rcu_head
*head
)
286 struct lttng_ht_node_u64
*node
=
287 caa_container_of(head
, struct lttng_ht_node_u64
, head
);
288 struct lttng_consumer_channel
*channel
=
289 caa_container_of(node
, struct lttng_consumer_channel
, node
);
291 switch (consumer_data
.type
) {
292 case LTTNG_CONSUMER_KERNEL
:
294 case LTTNG_CONSUMER32_UST
:
295 case LTTNG_CONSUMER64_UST
:
296 lttng_ustconsumer_free_channel(channel
);
299 ERR("Unknown consumer_data type");
306 * RCU protected relayd socket pair free.
308 static void free_relayd_rcu(struct rcu_head
*head
)
310 struct lttng_ht_node_u64
*node
=
311 caa_container_of(head
, struct lttng_ht_node_u64
, head
);
312 struct consumer_relayd_sock_pair
*relayd
=
313 caa_container_of(node
, struct consumer_relayd_sock_pair
, node
);
316 * Close all sockets. This is done in the call RCU since we don't want the
317 * socket fds to be reassigned thus potentially creating bad state of the
320 * We do not have to lock the control socket mutex here since at this stage
321 * there is no one referencing to this relayd object.
323 (void) relayd_close(&relayd
->control_sock
);
324 (void) relayd_close(&relayd
->data_sock
);
326 pthread_mutex_destroy(&relayd
->ctrl_sock_mutex
);
331 * Destroy and free relayd socket pair object.
333 void consumer_destroy_relayd(struct consumer_relayd_sock_pair
*relayd
)
336 struct lttng_ht_iter iter
;
338 if (relayd
== NULL
) {
342 DBG("Consumer destroy and close relayd socket pair");
344 iter
.iter
.node
= &relayd
->node
.node
;
345 ret
= lttng_ht_del(consumer_data
.relayd_ht
, &iter
);
347 /* We assume the relayd is being or is destroyed */
351 /* RCU free() call */
352 call_rcu(&relayd
->node
.head
, free_relayd_rcu
);
356 * Remove a channel from the global list protected by a mutex. This function is
357 * also responsible for freeing its data structures.
359 void consumer_del_channel(struct lttng_consumer_channel
*channel
)
362 struct lttng_ht_iter iter
;
364 DBG("Consumer delete channel key %" PRIu64
, channel
->key
);
366 pthread_mutex_lock(&consumer_data
.lock
);
367 pthread_mutex_lock(&channel
->lock
);
369 /* Destroy streams that might have been left in the stream list. */
370 clean_channel_stream_list(channel
);
372 if (channel
->live_timer_enabled
== 1) {
373 consumer_timer_live_stop(channel
);
375 if (channel
->monitor_timer_enabled
== 1) {
376 consumer_timer_monitor_stop(channel
);
379 switch (consumer_data
.type
) {
380 case LTTNG_CONSUMER_KERNEL
:
382 case LTTNG_CONSUMER32_UST
:
383 case LTTNG_CONSUMER64_UST
:
384 lttng_ustconsumer_del_channel(channel
);
387 ERR("Unknown consumer_data type");
393 iter
.iter
.node
= &channel
->node
.node
;
394 ret
= lttng_ht_del(consumer_data
.channel_ht
, &iter
);
398 call_rcu(&channel
->node
.head
, free_channel_rcu
);
400 pthread_mutex_unlock(&channel
->lock
);
401 pthread_mutex_unlock(&consumer_data
.lock
);
405 * Iterate over the relayd hash table and destroy each element. Finally,
406 * destroy the whole hash table.
408 static void cleanup_relayd_ht(void)
410 struct lttng_ht_iter iter
;
411 struct consumer_relayd_sock_pair
*relayd
;
415 cds_lfht_for_each_entry(consumer_data
.relayd_ht
->ht
, &iter
.iter
, relayd
,
417 consumer_destroy_relayd(relayd
);
422 lttng_ht_destroy(consumer_data
.relayd_ht
);
426 * Update the end point status of all streams having the given network sequence
427 * index (relayd index).
429 * It's atomically set without having the stream mutex locked which is fine
430 * because we handle the write/read race with a pipe wakeup for each thread.
432 static void update_endpoint_status_by_netidx(uint64_t net_seq_idx
,
433 enum consumer_endpoint_status status
)
435 struct lttng_ht_iter iter
;
436 struct lttng_consumer_stream
*stream
;
438 DBG("Consumer set delete flag on stream by idx %" PRIu64
, net_seq_idx
);
442 /* Let's begin with metadata */
443 cds_lfht_for_each_entry(metadata_ht
->ht
, &iter
.iter
, stream
, node
.node
) {
444 if (stream
->net_seq_idx
== net_seq_idx
) {
445 uatomic_set(&stream
->endpoint_status
, status
);
446 DBG("Delete flag set to metadata stream %d", stream
->wait_fd
);
450 /* Follow up by the data streams */
451 cds_lfht_for_each_entry(data_ht
->ht
, &iter
.iter
, stream
, node
.node
) {
452 if (stream
->net_seq_idx
== net_seq_idx
) {
453 uatomic_set(&stream
->endpoint_status
, status
);
454 DBG("Delete flag set to data stream %d", stream
->wait_fd
);
461 * Cleanup a relayd object by flagging every associated streams for deletion,
462 * destroying the object meaning removing it from the relayd hash table,
463 * closing the sockets and freeing the memory in a RCU call.
465 * If a local data context is available, notify the threads that the streams'
466 * state have changed.
468 void lttng_consumer_cleanup_relayd(struct consumer_relayd_sock_pair
*relayd
)
474 DBG("Cleaning up relayd object ID %"PRIu64
, relayd
->net_seq_idx
);
476 /* Save the net sequence index before destroying the object */
477 netidx
= relayd
->net_seq_idx
;
480 * Delete the relayd from the relayd hash table, close the sockets and free
481 * the object in a RCU call.
483 consumer_destroy_relayd(relayd
);
485 /* Set inactive endpoint to all streams */
486 update_endpoint_status_by_netidx(netidx
, CONSUMER_ENDPOINT_INACTIVE
);
489 * With a local data context, notify the threads that the streams' state
490 * have changed. The write() action on the pipe acts as an "implicit"
491 * memory barrier ordering the updates of the end point status from the
492 * read of this status which happens AFTER receiving this notify.
494 notify_thread_lttng_pipe(relayd
->ctx
->consumer_data_pipe
);
495 notify_thread_lttng_pipe(relayd
->ctx
->consumer_metadata_pipe
);
499 * Flag a relayd socket pair for destruction. Destroy it if the refcount
502 * RCU read side lock MUST be aquired before calling this function.
504 void consumer_flag_relayd_for_destroy(struct consumer_relayd_sock_pair
*relayd
)
508 /* Set destroy flag for this object */
509 uatomic_set(&relayd
->destroy_flag
, 1);
511 /* Destroy the relayd if refcount is 0 */
512 if (uatomic_read(&relayd
->refcount
) == 0) {
513 consumer_destroy_relayd(relayd
);
518 * Completly destroy stream from every visiable data structure and the given
521 * One this call returns, the stream object is not longer usable nor visible.
523 void consumer_del_stream(struct lttng_consumer_stream
*stream
,
526 consumer_stream_destroy(stream
, ht
);
530 * XXX naming of del vs destroy is all mixed up.
532 void consumer_del_stream_for_data(struct lttng_consumer_stream
*stream
)
534 consumer_stream_destroy(stream
, data_ht
);
537 void consumer_del_stream_for_metadata(struct lttng_consumer_stream
*stream
)
539 consumer_stream_destroy(stream
, metadata_ht
);
542 struct lttng_consumer_stream
*consumer_allocate_stream(uint64_t channel_key
,
544 enum lttng_consumer_stream_state state
,
545 const char *channel_name
,
552 enum consumer_channel_type type
,
553 unsigned int monitor
)
556 struct lttng_consumer_stream
*stream
;
558 stream
= zmalloc(sizeof(*stream
));
559 if (stream
== NULL
) {
560 PERROR("malloc struct lttng_consumer_stream");
567 stream
->key
= stream_key
;
569 stream
->out_fd_offset
= 0;
570 stream
->output_written
= 0;
571 stream
->state
= state
;
574 stream
->net_seq_idx
= relayd_id
;
575 stream
->session_id
= session_id
;
576 stream
->monitor
= monitor
;
577 stream
->endpoint_status
= CONSUMER_ENDPOINT_ACTIVE
;
578 stream
->index_file
= NULL
;
579 stream
->last_sequence_number
= -1ULL;
580 pthread_mutex_init(&stream
->lock
, NULL
);
581 pthread_mutex_init(&stream
->metadata_timer_lock
, NULL
);
583 /* If channel is the metadata, flag this stream as metadata. */
584 if (type
== CONSUMER_CHANNEL_TYPE_METADATA
) {
585 stream
->metadata_flag
= 1;
586 /* Metadata is flat out. */
587 strncpy(stream
->name
, DEFAULT_METADATA_NAME
, sizeof(stream
->name
));
588 /* Live rendez-vous point. */
589 pthread_cond_init(&stream
->metadata_rdv
, NULL
);
590 pthread_mutex_init(&stream
->metadata_rdv_lock
, NULL
);
592 /* Format stream name to <channel_name>_<cpu_number> */
593 ret
= snprintf(stream
->name
, sizeof(stream
->name
), "%s_%d",
596 PERROR("snprintf stream name");
601 /* Key is always the wait_fd for streams. */
602 lttng_ht_node_init_u64(&stream
->node
, stream
->key
);
604 /* Init node per channel id key */
605 lttng_ht_node_init_u64(&stream
->node_channel_id
, channel_key
);
607 /* Init session id node with the stream session id */
608 lttng_ht_node_init_u64(&stream
->node_session_id
, stream
->session_id
);
610 DBG3("Allocated stream %s (key %" PRIu64
", chan_key %" PRIu64
611 " relayd_id %" PRIu64
", session_id %" PRIu64
,
612 stream
->name
, stream
->key
, channel_key
,
613 stream
->net_seq_idx
, stream
->session_id
);
629 * Add a stream to the global list protected by a mutex.
631 int consumer_add_data_stream(struct lttng_consumer_stream
*stream
)
633 struct lttng_ht
*ht
= data_ht
;
639 DBG3("Adding consumer stream %" PRIu64
, stream
->key
);
641 pthread_mutex_lock(&consumer_data
.lock
);
642 pthread_mutex_lock(&stream
->chan
->lock
);
643 pthread_mutex_lock(&stream
->chan
->timer_lock
);
644 pthread_mutex_lock(&stream
->lock
);
647 /* Steal stream identifier to avoid having streams with the same key */
648 steal_stream_key(stream
->key
, ht
);
650 lttng_ht_add_unique_u64(ht
, &stream
->node
);
652 lttng_ht_add_u64(consumer_data
.stream_per_chan_id_ht
,
653 &stream
->node_channel_id
);
656 * Add stream to the stream_list_ht of the consumer data. No need to steal
657 * the key since the HT does not use it and we allow to add redundant keys
660 lttng_ht_add_u64(consumer_data
.stream_list_ht
, &stream
->node_session_id
);
663 * When nb_init_stream_left reaches 0, we don't need to trigger any action
664 * in terms of destroying the associated channel, because the action that
665 * causes the count to become 0 also causes a stream to be added. The
666 * channel deletion will thus be triggered by the following removal of this
669 if (uatomic_read(&stream
->chan
->nb_init_stream_left
) > 0) {
670 /* Increment refcount before decrementing nb_init_stream_left */
672 uatomic_dec(&stream
->chan
->nb_init_stream_left
);
675 /* Update consumer data once the node is inserted. */
676 consumer_data
.stream_count
++;
677 consumer_data
.need_update
= 1;
680 pthread_mutex_unlock(&stream
->lock
);
681 pthread_mutex_unlock(&stream
->chan
->timer_lock
);
682 pthread_mutex_unlock(&stream
->chan
->lock
);
683 pthread_mutex_unlock(&consumer_data
.lock
);
688 void consumer_del_data_stream(struct lttng_consumer_stream
*stream
)
690 consumer_del_stream(stream
, data_ht
);
694 * Add relayd socket to global consumer data hashtable. RCU read side lock MUST
695 * be acquired before calling this.
697 static int add_relayd(struct consumer_relayd_sock_pair
*relayd
)
700 struct lttng_ht_node_u64
*node
;
701 struct lttng_ht_iter iter
;
705 lttng_ht_lookup(consumer_data
.relayd_ht
,
706 &relayd
->net_seq_idx
, &iter
);
707 node
= lttng_ht_iter_get_node_u64(&iter
);
711 lttng_ht_add_unique_u64(consumer_data
.relayd_ht
, &relayd
->node
);
718 * Allocate and return a consumer relayd socket.
720 static struct consumer_relayd_sock_pair
*consumer_allocate_relayd_sock_pair(
721 uint64_t net_seq_idx
)
723 struct consumer_relayd_sock_pair
*obj
= NULL
;
725 /* net sequence index of -1 is a failure */
726 if (net_seq_idx
== (uint64_t) -1ULL) {
730 obj
= zmalloc(sizeof(struct consumer_relayd_sock_pair
));
732 PERROR("zmalloc relayd sock");
736 obj
->net_seq_idx
= net_seq_idx
;
738 obj
->destroy_flag
= 0;
739 obj
->control_sock
.sock
.fd
= -1;
740 obj
->data_sock
.sock
.fd
= -1;
741 lttng_ht_node_init_u64(&obj
->node
, obj
->net_seq_idx
);
742 pthread_mutex_init(&obj
->ctrl_sock_mutex
, NULL
);
749 * Find a relayd socket pair in the global consumer data.
751 * Return the object if found else NULL.
752 * RCU read-side lock must be held across this call and while using the
755 struct consumer_relayd_sock_pair
*consumer_find_relayd(uint64_t key
)
757 struct lttng_ht_iter iter
;
758 struct lttng_ht_node_u64
*node
;
759 struct consumer_relayd_sock_pair
*relayd
= NULL
;
761 /* Negative keys are lookup failures */
762 if (key
== (uint64_t) -1ULL) {
766 lttng_ht_lookup(consumer_data
.relayd_ht
, &key
,
768 node
= lttng_ht_iter_get_node_u64(&iter
);
770 relayd
= caa_container_of(node
, struct consumer_relayd_sock_pair
, node
);
778 * Find a relayd and send the stream
780 * Returns 0 on success, < 0 on error
782 int consumer_send_relayd_stream(struct lttng_consumer_stream
*stream
,
786 struct consumer_relayd_sock_pair
*relayd
;
789 assert(stream
->net_seq_idx
!= -1ULL);
792 /* The stream is not metadata. Get relayd reference if exists. */
794 relayd
= consumer_find_relayd(stream
->net_seq_idx
);
795 if (relayd
!= NULL
) {
796 /* Add stream on the relayd */
797 pthread_mutex_lock(&relayd
->ctrl_sock_mutex
);
798 ret
= relayd_add_stream(&relayd
->control_sock
, stream
->name
,
799 path
, &stream
->relayd_stream_id
,
800 stream
->chan
->tracefile_size
, stream
->chan
->tracefile_count
);
801 pthread_mutex_unlock(&relayd
->ctrl_sock_mutex
);
803 ERR("Relayd add stream failed. Cleaning up relayd %" PRIu64
".", relayd
->net_seq_idx
);
804 lttng_consumer_cleanup_relayd(relayd
);
808 uatomic_inc(&relayd
->refcount
);
809 stream
->sent_to_relayd
= 1;
811 ERR("Stream %" PRIu64
" relayd ID %" PRIu64
" unknown. Can't send it.",
812 stream
->key
, stream
->net_seq_idx
);
817 DBG("Stream %s with key %" PRIu64
" sent to relayd id %" PRIu64
,
818 stream
->name
, stream
->key
, stream
->net_seq_idx
);
826 * Find a relayd and send the streams sent message
828 * Returns 0 on success, < 0 on error
830 int consumer_send_relayd_streams_sent(uint64_t net_seq_idx
)
833 struct consumer_relayd_sock_pair
*relayd
;
835 assert(net_seq_idx
!= -1ULL);
837 /* The stream is not metadata. Get relayd reference if exists. */
839 relayd
= consumer_find_relayd(net_seq_idx
);
840 if (relayd
!= NULL
) {
841 /* Add stream on the relayd */
842 pthread_mutex_lock(&relayd
->ctrl_sock_mutex
);
843 ret
= relayd_streams_sent(&relayd
->control_sock
);
844 pthread_mutex_unlock(&relayd
->ctrl_sock_mutex
);
846 ERR("Relayd streams sent failed. Cleaning up relayd %" PRIu64
".", relayd
->net_seq_idx
);
847 lttng_consumer_cleanup_relayd(relayd
);
851 ERR("Relayd ID %" PRIu64
" unknown. Can't send streams_sent.",
858 DBG("All streams sent relayd id %" PRIu64
, net_seq_idx
);
866 * Find a relayd and close the stream
868 void close_relayd_stream(struct lttng_consumer_stream
*stream
)
870 struct consumer_relayd_sock_pair
*relayd
;
872 /* The stream is not metadata. Get relayd reference if exists. */
874 relayd
= consumer_find_relayd(stream
->net_seq_idx
);
876 consumer_stream_relayd_close(stream
, relayd
);
882 * Handle stream for relayd transmission if the stream applies for network
883 * streaming where the net sequence index is set.
885 * Return destination file descriptor or negative value on error.
887 static int write_relayd_stream_header(struct lttng_consumer_stream
*stream
,
888 size_t data_size
, unsigned long padding
,
889 struct consumer_relayd_sock_pair
*relayd
)
892 struct lttcomm_relayd_data_hdr data_hdr
;
898 /* Reset data header */
899 memset(&data_hdr
, 0, sizeof(data_hdr
));
901 if (stream
->metadata_flag
) {
902 /* Caller MUST acquire the relayd control socket lock */
903 ret
= relayd_send_metadata(&relayd
->control_sock
, data_size
);
908 /* Metadata are always sent on the control socket. */
909 outfd
= relayd
->control_sock
.sock
.fd
;
911 /* Set header with stream information */
912 data_hdr
.stream_id
= htobe64(stream
->relayd_stream_id
);
913 data_hdr
.data_size
= htobe32(data_size
);
914 data_hdr
.padding_size
= htobe32(padding
);
916 * Note that net_seq_num below is assigned with the *current* value of
917 * next_net_seq_num and only after that the next_net_seq_num will be
918 * increment. This is why when issuing a command on the relayd using
919 * this next value, 1 should always be substracted in order to compare
920 * the last seen sequence number on the relayd side to the last sent.
922 data_hdr
.net_seq_num
= htobe64(stream
->next_net_seq_num
);
923 /* Other fields are zeroed previously */
925 ret
= relayd_send_data_hdr(&relayd
->data_sock
, &data_hdr
,
931 ++stream
->next_net_seq_num
;
933 /* Set to go on data socket */
934 outfd
= relayd
->data_sock
.sock
.fd
;
942 * Allocate and return a new lttng_consumer_channel object using the given key
943 * to initialize the hash table node.
945 * On error, return NULL.
947 struct lttng_consumer_channel
*consumer_allocate_channel(uint64_t key
,
949 const char *pathname
,
954 enum lttng_event_output output
,
955 uint64_t tracefile_size
,
956 uint64_t tracefile_count
,
957 uint64_t session_id_per_pid
,
958 unsigned int monitor
,
959 unsigned int live_timer_interval
,
960 const char *root_shm_path
,
961 const char *shm_path
)
963 struct lttng_consumer_channel
*channel
;
965 channel
= zmalloc(sizeof(*channel
));
966 if (channel
== NULL
) {
967 PERROR("malloc struct lttng_consumer_channel");
972 channel
->refcount
= 0;
973 channel
->session_id
= session_id
;
974 channel
->session_id_per_pid
= session_id_per_pid
;
977 channel
->relayd_id
= relayd_id
;
978 channel
->tracefile_size
= tracefile_size
;
979 channel
->tracefile_count
= tracefile_count
;
980 channel
->monitor
= monitor
;
981 channel
->live_timer_interval
= live_timer_interval
;
982 pthread_mutex_init(&channel
->lock
, NULL
);
983 pthread_mutex_init(&channel
->timer_lock
, NULL
);
986 case LTTNG_EVENT_SPLICE
:
987 channel
->output
= CONSUMER_CHANNEL_SPLICE
;
989 case LTTNG_EVENT_MMAP
:
990 channel
->output
= CONSUMER_CHANNEL_MMAP
;
1000 * In monitor mode, the streams associated with the channel will be put in
1001 * a special list ONLY owned by this channel. So, the refcount is set to 1
1002 * here meaning that the channel itself has streams that are referenced.
1004 * On a channel deletion, once the channel is no longer visible, the
1005 * refcount is decremented and checked for a zero value to delete it. With
1006 * streams in no monitor mode, it will now be safe to destroy the channel.
1008 if (!channel
->monitor
) {
1009 channel
->refcount
= 1;
1012 strncpy(channel
->pathname
, pathname
, sizeof(channel
->pathname
));
1013 channel
->pathname
[sizeof(channel
->pathname
) - 1] = '\0';
1015 strncpy(channel
->name
, name
, sizeof(channel
->name
));
1016 channel
->name
[sizeof(channel
->name
) - 1] = '\0';
1018 if (root_shm_path
) {
1019 strncpy(channel
->root_shm_path
, root_shm_path
, sizeof(channel
->root_shm_path
));
1020 channel
->root_shm_path
[sizeof(channel
->root_shm_path
) - 1] = '\0';
1023 strncpy(channel
->shm_path
, shm_path
, sizeof(channel
->shm_path
));
1024 channel
->shm_path
[sizeof(channel
->shm_path
) - 1] = '\0';
1027 lttng_ht_node_init_u64(&channel
->node
, channel
->key
);
1029 channel
->wait_fd
= -1;
1031 CDS_INIT_LIST_HEAD(&channel
->streams
.head
);
1033 DBG("Allocated channel (key %" PRIu64
")", channel
->key
);
1040 * Add a channel to the global list protected by a mutex.
1042 * Always return 0 indicating success.
1044 int consumer_add_channel(struct lttng_consumer_channel
*channel
,
1045 struct lttng_consumer_local_data
*ctx
)
1047 pthread_mutex_lock(&consumer_data
.lock
);
1048 pthread_mutex_lock(&channel
->lock
);
1049 pthread_mutex_lock(&channel
->timer_lock
);
1052 * This gives us a guarantee that the channel we are about to add to the
1053 * channel hash table will be unique. See this function comment on the why
1054 * we need to steel the channel key at this stage.
1056 steal_channel_key(channel
->key
);
1059 lttng_ht_add_unique_u64(consumer_data
.channel_ht
, &channel
->node
);
1062 pthread_mutex_unlock(&channel
->timer_lock
);
1063 pthread_mutex_unlock(&channel
->lock
);
1064 pthread_mutex_unlock(&consumer_data
.lock
);
1066 if (channel
->wait_fd
!= -1 && channel
->type
== CONSUMER_CHANNEL_TYPE_DATA
) {
1067 notify_channel_pipe(ctx
, channel
, -1, CONSUMER_CHANNEL_ADD
);
1074 * Allocate the pollfd structure and the local view of the out fds to avoid
1075 * doing a lookup in the linked list and concurrency issues when writing is
1076 * needed. Called with consumer_data.lock held.
1078 * Returns the number of fds in the structures.
1080 static int update_poll_array(struct lttng_consumer_local_data
*ctx
,
1081 struct pollfd
**pollfd
, struct lttng_consumer_stream
**local_stream
,
1082 struct lttng_ht
*ht
, int *nb_inactive_fd
)
1085 struct lttng_ht_iter iter
;
1086 struct lttng_consumer_stream
*stream
;
1091 assert(local_stream
);
1093 DBG("Updating poll fd array");
1094 *nb_inactive_fd
= 0;
1096 cds_lfht_for_each_entry(ht
->ht
, &iter
.iter
, stream
, node
.node
) {
1098 * Only active streams with an active end point can be added to the
1099 * poll set and local stream storage of the thread.
1101 * There is a potential race here for endpoint_status to be updated
1102 * just after the check. However, this is OK since the stream(s) will
1103 * be deleted once the thread is notified that the end point state has
1104 * changed where this function will be called back again.
1106 * We track the number of inactive FDs because they still need to be
1107 * closed by the polling thread after a wakeup on the data_pipe or
1110 if (stream
->state
!= LTTNG_CONSUMER_ACTIVE_STREAM
||
1111 stream
->endpoint_status
== CONSUMER_ENDPOINT_INACTIVE
) {
1112 (*nb_inactive_fd
)++;
1116 * This clobbers way too much the debug output. Uncomment that if you
1117 * need it for debugging purposes.
1119 * DBG("Active FD %d", stream->wait_fd);
1121 (*pollfd
)[i
].fd
= stream
->wait_fd
;
1122 (*pollfd
)[i
].events
= POLLIN
| POLLPRI
;
1123 local_stream
[i
] = stream
;
1129 * Insert the consumer_data_pipe at the end of the array and don't
1130 * increment i so nb_fd is the number of real FD.
1132 (*pollfd
)[i
].fd
= lttng_pipe_get_readfd(ctx
->consumer_data_pipe
);
1133 (*pollfd
)[i
].events
= POLLIN
| POLLPRI
;
1135 (*pollfd
)[i
+ 1].fd
= lttng_pipe_get_readfd(ctx
->consumer_wakeup_pipe
);
1136 (*pollfd
)[i
+ 1].events
= POLLIN
| POLLPRI
;
1141 * Poll on the should_quit pipe and the command socket return -1 on
1142 * error, 1 if should exit, 0 if data is available on the command socket
1144 int lttng_consumer_poll_socket(struct pollfd
*consumer_sockpoll
)
1149 num_rdy
= poll(consumer_sockpoll
, 2, -1);
1150 if (num_rdy
== -1) {
1152 * Restart interrupted system call.
1154 if (errno
== EINTR
) {
1157 PERROR("Poll error");
1160 if (consumer_sockpoll
[0].revents
& (POLLIN
| POLLPRI
)) {
1161 DBG("consumer_should_quit wake up");
1168 * Set the error socket.
1170 void lttng_consumer_set_error_sock(struct lttng_consumer_local_data
*ctx
,
1173 ctx
->consumer_error_socket
= sock
;
1177 * Set the command socket path.
1179 void lttng_consumer_set_command_sock_path(
1180 struct lttng_consumer_local_data
*ctx
, char *sock
)
1182 ctx
->consumer_command_sock_path
= sock
;
1186 * Send return code to the session daemon.
1187 * If the socket is not defined, we return 0, it is not a fatal error
1189 int lttng_consumer_send_error(struct lttng_consumer_local_data
*ctx
, int cmd
)
1191 if (ctx
->consumer_error_socket
> 0) {
1192 return lttcomm_send_unix_sock(ctx
->consumer_error_socket
, &cmd
,
1193 sizeof(enum lttcomm_sessiond_command
));
1200 * Close all the tracefiles and stream fds and MUST be called when all
1201 * instances are destroyed i.e. when all threads were joined and are ended.
1203 void lttng_consumer_cleanup(void)
1205 struct lttng_ht_iter iter
;
1206 struct lttng_consumer_channel
*channel
;
1210 cds_lfht_for_each_entry(consumer_data
.channel_ht
->ht
, &iter
.iter
, channel
,
1212 consumer_del_channel(channel
);
1217 lttng_ht_destroy(consumer_data
.channel_ht
);
1219 cleanup_relayd_ht();
1221 lttng_ht_destroy(consumer_data
.stream_per_chan_id_ht
);
1224 * This HT contains streams that are freed by either the metadata thread or
1225 * the data thread so we do *nothing* on the hash table and simply destroy
1228 lttng_ht_destroy(consumer_data
.stream_list_ht
);
1232 * Called from signal handler.
1234 void lttng_consumer_should_exit(struct lttng_consumer_local_data
*ctx
)
1238 CMM_STORE_SHARED(consumer_quit
, 1);
1239 ret
= lttng_write(ctx
->consumer_should_quit
[1], "4", 1);
1241 PERROR("write consumer quit");
1244 DBG("Consumer flag that it should quit");
1249 * Flush pending writes to trace output disk file.
1252 void lttng_consumer_sync_trace_file(struct lttng_consumer_stream
*stream
,
1256 int outfd
= stream
->out_fd
;
1259 * This does a blocking write-and-wait on any page that belongs to the
1260 * subbuffer prior to the one we just wrote.
1261 * Don't care about error values, as these are just hints and ways to
1262 * limit the amount of page cache used.
1264 if (orig_offset
< stream
->max_sb_size
) {
1267 lttng_sync_file_range(outfd
, orig_offset
- stream
->max_sb_size
,
1268 stream
->max_sb_size
,
1269 SYNC_FILE_RANGE_WAIT_BEFORE
1270 | SYNC_FILE_RANGE_WRITE
1271 | SYNC_FILE_RANGE_WAIT_AFTER
);
1273 * Give hints to the kernel about how we access the file:
1274 * POSIX_FADV_DONTNEED : we won't re-access data in a near future after
1277 * We need to call fadvise again after the file grows because the
1278 * kernel does not seem to apply fadvise to non-existing parts of the
1281 * Call fadvise _after_ having waited for the page writeback to
1282 * complete because the dirty page writeback semantic is not well
1283 * defined. So it can be expected to lead to lower throughput in
1286 ret
= posix_fadvise(outfd
, orig_offset
- stream
->max_sb_size
,
1287 stream
->max_sb_size
, POSIX_FADV_DONTNEED
);
1288 if (ret
&& ret
!= -ENOSYS
) {
1290 PERROR("posix_fadvise on fd %i", outfd
);
1295 * Initialise the necessary environnement :
1296 * - create a new context
1297 * - create the poll_pipe
1298 * - create the should_quit pipe (for signal handler)
1299 * - create the thread pipe (for splice)
1301 * Takes a function pointer as argument, this function is called when data is
1302 * available on a buffer. This function is responsible to do the
1303 * kernctl_get_next_subbuf, read the data with mmap or splice depending on the
1304 * buffer configuration and then kernctl_put_next_subbuf at the end.
1306 * Returns a pointer to the new context or NULL on error.
1308 struct lttng_consumer_local_data
*lttng_consumer_create(
1309 enum lttng_consumer_type type
,
1310 ssize_t (*buffer_ready
)(struct lttng_consumer_stream
*stream
,
1311 struct lttng_consumer_local_data
*ctx
),
1312 int (*recv_channel
)(struct lttng_consumer_channel
*channel
),
1313 int (*recv_stream
)(struct lttng_consumer_stream
*stream
),
1314 int (*update_stream
)(uint64_t stream_key
, uint32_t state
))
1317 struct lttng_consumer_local_data
*ctx
;
1319 assert(consumer_data
.type
== LTTNG_CONSUMER_UNKNOWN
||
1320 consumer_data
.type
== type
);
1321 consumer_data
.type
= type
;
1323 ctx
= zmalloc(sizeof(struct lttng_consumer_local_data
));
1325 PERROR("allocating context");
1329 ctx
->consumer_error_socket
= -1;
1330 ctx
->consumer_metadata_socket
= -1;
1331 pthread_mutex_init(&ctx
->metadata_socket_lock
, NULL
);
1332 /* assign the callbacks */
1333 ctx
->on_buffer_ready
= buffer_ready
;
1334 ctx
->on_recv_channel
= recv_channel
;
1335 ctx
->on_recv_stream
= recv_stream
;
1336 ctx
->on_update_stream
= update_stream
;
1338 ctx
->consumer_data_pipe
= lttng_pipe_open(0);
1339 if (!ctx
->consumer_data_pipe
) {
1340 goto error_poll_pipe
;
1343 ctx
->consumer_wakeup_pipe
= lttng_pipe_open(0);
1344 if (!ctx
->consumer_wakeup_pipe
) {
1345 goto error_wakeup_pipe
;
1348 ret
= pipe(ctx
->consumer_should_quit
);
1350 PERROR("Error creating recv pipe");
1351 goto error_quit_pipe
;
1354 ret
= pipe(ctx
->consumer_channel_pipe
);
1356 PERROR("Error creating channel pipe");
1357 goto error_channel_pipe
;
1360 ctx
->consumer_metadata_pipe
= lttng_pipe_open(0);
1361 if (!ctx
->consumer_metadata_pipe
) {
1362 goto error_metadata_pipe
;
1365 ctx
->channel_monitor_pipe
= -1;
1369 error_metadata_pipe
:
1370 utils_close_pipe(ctx
->consumer_channel_pipe
);
1372 utils_close_pipe(ctx
->consumer_should_quit
);
1374 lttng_pipe_destroy(ctx
->consumer_wakeup_pipe
);
1376 lttng_pipe_destroy(ctx
->consumer_data_pipe
);
1384 * Iterate over all streams of the hashtable and free them properly.
1386 static void destroy_data_stream_ht(struct lttng_ht
*ht
)
1388 struct lttng_ht_iter iter
;
1389 struct lttng_consumer_stream
*stream
;
1396 cds_lfht_for_each_entry(ht
->ht
, &iter
.iter
, stream
, node
.node
) {
1398 * Ignore return value since we are currently cleaning up so any error
1401 (void) consumer_del_stream(stream
, ht
);
1405 lttng_ht_destroy(ht
);
1409 * Iterate over all streams of the metadata hashtable and free them
1412 static void destroy_metadata_stream_ht(struct lttng_ht
*ht
)
1414 struct lttng_ht_iter iter
;
1415 struct lttng_consumer_stream
*stream
;
1422 cds_lfht_for_each_entry(ht
->ht
, &iter
.iter
, stream
, node
.node
) {
1424 * Ignore return value since we are currently cleaning up so any error
1427 (void) consumer_del_metadata_stream(stream
, ht
);
1431 lttng_ht_destroy(ht
);
1435 * Close all fds associated with the instance and free the context.
1437 void lttng_consumer_destroy(struct lttng_consumer_local_data
*ctx
)
1441 DBG("Consumer destroying it. Closing everything.");
1447 destroy_data_stream_ht(data_ht
);
1448 destroy_metadata_stream_ht(metadata_ht
);
1450 ret
= close(ctx
->consumer_error_socket
);
1454 ret
= close(ctx
->consumer_metadata_socket
);
1458 utils_close_pipe(ctx
->consumer_channel_pipe
);
1459 lttng_pipe_destroy(ctx
->consumer_data_pipe
);
1460 lttng_pipe_destroy(ctx
->consumer_metadata_pipe
);
1461 lttng_pipe_destroy(ctx
->consumer_wakeup_pipe
);
1462 utils_close_pipe(ctx
->consumer_should_quit
);
1464 unlink(ctx
->consumer_command_sock_path
);
1469 * Write the metadata stream id on the specified file descriptor.
1471 static int write_relayd_metadata_id(int fd
,
1472 struct lttng_consumer_stream
*stream
,
1473 struct consumer_relayd_sock_pair
*relayd
, unsigned long padding
)
1476 struct lttcomm_relayd_metadata_payload hdr
;
1478 hdr
.stream_id
= htobe64(stream
->relayd_stream_id
);
1479 hdr
.padding_size
= htobe32(padding
);
1480 ret
= lttng_write(fd
, (void *) &hdr
, sizeof(hdr
));
1481 if (ret
< sizeof(hdr
)) {
1483 * This error means that the fd's end is closed so ignore the PERROR
1484 * not to clubber the error output since this can happen in a normal
1487 if (errno
!= EPIPE
) {
1488 PERROR("write metadata stream id");
1490 DBG3("Consumer failed to write relayd metadata id (errno: %d)", errno
);
1492 * Set ret to a negative value because if ret != sizeof(hdr), we don't
1493 * handle writting the missing part so report that as an error and
1494 * don't lie to the caller.
1499 DBG("Metadata stream id %" PRIu64
" with padding %lu written before data",
1500 stream
->relayd_stream_id
, padding
);
1507 * Mmap the ring buffer, read it and write the data to the tracefile. This is a
1508 * core function for writing trace buffers to either the local filesystem or
1511 * It must be called with the stream lock held.
1513 * Careful review MUST be put if any changes occur!
1515 * Returns the number of bytes written
1517 ssize_t
lttng_consumer_on_read_subbuffer_mmap(
1518 struct lttng_consumer_local_data
*ctx
,
1519 struct lttng_consumer_stream
*stream
, unsigned long len
,
1520 unsigned long padding
,
1521 struct ctf_packet_index
*index
)
1523 unsigned long mmap_offset
;
1526 off_t orig_offset
= stream
->out_fd_offset
;
1527 /* Default is on the disk */
1528 int outfd
= stream
->out_fd
;
1529 struct consumer_relayd_sock_pair
*relayd
= NULL
;
1530 unsigned int relayd_hang_up
= 0;
1532 /* RCU lock for the relayd pointer */
1535 /* Flag that the current stream if set for network streaming. */
1536 if (stream
->net_seq_idx
!= (uint64_t) -1ULL) {
1537 relayd
= consumer_find_relayd(stream
->net_seq_idx
);
1538 if (relayd
== NULL
) {
1544 /* get the offset inside the fd to mmap */
1545 switch (consumer_data
.type
) {
1546 case LTTNG_CONSUMER_KERNEL
:
1547 mmap_base
= stream
->mmap_base
;
1548 ret
= kernctl_get_mmap_read_offset(stream
->wait_fd
, &mmap_offset
);
1550 PERROR("tracer ctl get_mmap_read_offset");
1554 case LTTNG_CONSUMER32_UST
:
1555 case LTTNG_CONSUMER64_UST
:
1556 mmap_base
= lttng_ustctl_get_mmap_base(stream
);
1558 ERR("read mmap get mmap base for stream %s", stream
->name
);
1562 ret
= lttng_ustctl_get_mmap_read_offset(stream
, &mmap_offset
);
1564 PERROR("tracer ctl get_mmap_read_offset");
1570 ERR("Unknown consumer_data type");
1574 /* Handle stream on the relayd if the output is on the network */
1576 unsigned long netlen
= len
;
1579 * Lock the control socket for the complete duration of the function
1580 * since from this point on we will use the socket.
1582 if (stream
->metadata_flag
) {
1583 /* Metadata requires the control socket. */
1584 pthread_mutex_lock(&relayd
->ctrl_sock_mutex
);
1585 if (stream
->reset_metadata_flag
) {
1586 ret
= relayd_reset_metadata(&relayd
->control_sock
,
1587 stream
->relayd_stream_id
,
1588 stream
->metadata_version
);
1593 stream
->reset_metadata_flag
= 0;
1595 netlen
+= sizeof(struct lttcomm_relayd_metadata_payload
);
1598 ret
= write_relayd_stream_header(stream
, netlen
, padding
, relayd
);
1603 /* Use the returned socket. */
1606 /* Write metadata stream id before payload */
1607 if (stream
->metadata_flag
) {
1608 ret
= write_relayd_metadata_id(outfd
, stream
, relayd
, padding
);
1615 /* No streaming, we have to set the len with the full padding */
1618 if (stream
->metadata_flag
&& stream
->reset_metadata_flag
) {
1619 ret
= utils_truncate_stream_file(stream
->out_fd
, 0);
1621 ERR("Reset metadata file");
1624 stream
->reset_metadata_flag
= 0;
1628 * Check if we need to change the tracefile before writing the packet.
1630 if (stream
->chan
->tracefile_size
> 0 &&
1631 (stream
->tracefile_size_current
+ len
) >
1632 stream
->chan
->tracefile_size
) {
1633 ret
= utils_rotate_stream_file(stream
->chan
->pathname
,
1634 stream
->name
, stream
->chan
->tracefile_size
,
1635 stream
->chan
->tracefile_count
, stream
->uid
, stream
->gid
,
1636 stream
->out_fd
, &(stream
->tracefile_count_current
),
1639 ERR("Rotating output file");
1642 outfd
= stream
->out_fd
;
1644 if (stream
->index_file
) {
1645 lttng_index_file_put(stream
->index_file
);
1646 stream
->index_file
= lttng_index_file_create(stream
->chan
->pathname
,
1647 stream
->name
, stream
->uid
, stream
->gid
,
1648 stream
->chan
->tracefile_size
,
1649 stream
->tracefile_count_current
,
1650 CTF_INDEX_MAJOR
, CTF_INDEX_MINOR
);
1651 if (!stream
->index_file
) {
1656 /* Reset current size because we just perform a rotation. */
1657 stream
->tracefile_size_current
= 0;
1658 stream
->out_fd_offset
= 0;
1661 stream
->tracefile_size_current
+= len
;
1663 index
->offset
= htobe64(stream
->out_fd_offset
);
1668 * This call guarantee that len or less is returned. It's impossible to
1669 * receive a ret value that is bigger than len.
1671 ret
= lttng_write(outfd
, mmap_base
+ mmap_offset
, len
);
1672 DBG("Consumer mmap write() ret %zd (len %lu)", ret
, len
);
1673 if (ret
< 0 || ((size_t) ret
!= len
)) {
1675 * Report error to caller if nothing was written else at least send the
1683 /* Socket operation failed. We consider the relayd dead */
1684 if (errno
== EPIPE
|| errno
== EINVAL
|| errno
== EBADF
) {
1686 * This is possible if the fd is closed on the other side
1687 * (outfd) or any write problem. It can be verbose a bit for a
1688 * normal execution if for instance the relayd is stopped
1689 * abruptly. This can happen so set this to a DBG statement.
1691 DBG("Consumer mmap write detected relayd hang up");
1693 /* Unhandled error, print it and stop function right now. */
1694 PERROR("Error in write mmap (ret %zd != len %lu)", ret
, len
);
1698 stream
->output_written
+= ret
;
1700 /* This call is useless on a socket so better save a syscall. */
1702 /* This won't block, but will start writeout asynchronously */
1703 lttng_sync_file_range(outfd
, stream
->out_fd_offset
, len
,
1704 SYNC_FILE_RANGE_WRITE
);
1705 stream
->out_fd_offset
+= len
;
1706 lttng_consumer_sync_trace_file(stream
, orig_offset
);
1711 * This is a special case that the relayd has closed its socket. Let's
1712 * cleanup the relayd object and all associated streams.
1714 if (relayd
&& relayd_hang_up
) {
1715 ERR("Relayd hangup. Cleaning up relayd %" PRIu64
".", relayd
->net_seq_idx
);
1716 lttng_consumer_cleanup_relayd(relayd
);
1720 /* Unlock only if ctrl socket used */
1721 if (relayd
&& stream
->metadata_flag
) {
1722 pthread_mutex_unlock(&relayd
->ctrl_sock_mutex
);
1730 * Splice the data from the ring buffer to the tracefile.
1732 * It must be called with the stream lock held.
1734 * Returns the number of bytes spliced.
1736 ssize_t
lttng_consumer_on_read_subbuffer_splice(
1737 struct lttng_consumer_local_data
*ctx
,
1738 struct lttng_consumer_stream
*stream
, unsigned long len
,
1739 unsigned long padding
,
1740 struct ctf_packet_index
*index
)
1742 ssize_t ret
= 0, written
= 0, ret_splice
= 0;
1744 off_t orig_offset
= stream
->out_fd_offset
;
1745 int fd
= stream
->wait_fd
;
1746 /* Default is on the disk */
1747 int outfd
= stream
->out_fd
;
1748 struct consumer_relayd_sock_pair
*relayd
= NULL
;
1750 unsigned int relayd_hang_up
= 0;
1752 switch (consumer_data
.type
) {
1753 case LTTNG_CONSUMER_KERNEL
:
1755 case LTTNG_CONSUMER32_UST
:
1756 case LTTNG_CONSUMER64_UST
:
1757 /* Not supported for user space tracing */
1760 ERR("Unknown consumer_data type");
1764 /* RCU lock for the relayd pointer */
1767 /* Flag that the current stream if set for network streaming. */
1768 if (stream
->net_seq_idx
!= (uint64_t) -1ULL) {
1769 relayd
= consumer_find_relayd(stream
->net_seq_idx
);
1770 if (relayd
== NULL
) {
1775 splice_pipe
= stream
->splice_pipe
;
1777 /* Write metadata stream id before payload */
1779 unsigned long total_len
= len
;
1781 if (stream
->metadata_flag
) {
1783 * Lock the control socket for the complete duration of the function
1784 * since from this point on we will use the socket.
1786 pthread_mutex_lock(&relayd
->ctrl_sock_mutex
);
1788 if (stream
->reset_metadata_flag
) {
1789 ret
= relayd_reset_metadata(&relayd
->control_sock
,
1790 stream
->relayd_stream_id
,
1791 stream
->metadata_version
);
1796 stream
->reset_metadata_flag
= 0;
1798 ret
= write_relayd_metadata_id(splice_pipe
[1], stream
, relayd
,
1806 total_len
+= sizeof(struct lttcomm_relayd_metadata_payload
);
1809 ret
= write_relayd_stream_header(stream
, total_len
, padding
, relayd
);
1815 /* Use the returned socket. */
1818 /* No streaming, we have to set the len with the full padding */
1821 if (stream
->metadata_flag
&& stream
->reset_metadata_flag
) {
1822 ret
= utils_truncate_stream_file(stream
->out_fd
, 0);
1824 ERR("Reset metadata file");
1827 stream
->reset_metadata_flag
= 0;
1830 * Check if we need to change the tracefile before writing the packet.
1832 if (stream
->chan
->tracefile_size
> 0 &&
1833 (stream
->tracefile_size_current
+ len
) >
1834 stream
->chan
->tracefile_size
) {
1835 ret
= utils_rotate_stream_file(stream
->chan
->pathname
,
1836 stream
->name
, stream
->chan
->tracefile_size
,
1837 stream
->chan
->tracefile_count
, stream
->uid
, stream
->gid
,
1838 stream
->out_fd
, &(stream
->tracefile_count_current
),
1842 ERR("Rotating output file");
1845 outfd
= stream
->out_fd
;
1847 if (stream
->index_file
) {
1848 lttng_index_file_put(stream
->index_file
);
1849 stream
->index_file
= lttng_index_file_create(stream
->chan
->pathname
,
1850 stream
->name
, stream
->uid
, stream
->gid
,
1851 stream
->chan
->tracefile_size
,
1852 stream
->tracefile_count_current
,
1853 CTF_INDEX_MAJOR
, CTF_INDEX_MINOR
);
1854 if (!stream
->index_file
) {
1859 /* Reset current size because we just perform a rotation. */
1860 stream
->tracefile_size_current
= 0;
1861 stream
->out_fd_offset
= 0;
1864 stream
->tracefile_size_current
+= len
;
1865 index
->offset
= htobe64(stream
->out_fd_offset
);
1869 DBG("splice chan to pipe offset %lu of len %lu (fd : %d, pipe: %d)",
1870 (unsigned long)offset
, len
, fd
, splice_pipe
[1]);
1871 ret_splice
= splice(fd
, &offset
, splice_pipe
[1], NULL
, len
,
1872 SPLICE_F_MOVE
| SPLICE_F_MORE
);
1873 DBG("splice chan to pipe, ret %zd", ret_splice
);
1874 if (ret_splice
< 0) {
1877 PERROR("Error in relay splice");
1881 /* Handle stream on the relayd if the output is on the network */
1882 if (relayd
&& stream
->metadata_flag
) {
1883 size_t metadata_payload_size
=
1884 sizeof(struct lttcomm_relayd_metadata_payload
);
1886 /* Update counter to fit the spliced data */
1887 ret_splice
+= metadata_payload_size
;
1888 len
+= metadata_payload_size
;
1890 * We do this so the return value can match the len passed as
1891 * argument to this function.
1893 written
-= metadata_payload_size
;
1896 /* Splice data out */
1897 ret_splice
= splice(splice_pipe
[0], NULL
, outfd
, NULL
,
1898 ret_splice
, SPLICE_F_MOVE
| SPLICE_F_MORE
);
1899 DBG("Consumer splice pipe to file (out_fd: %d), ret %zd",
1901 if (ret_splice
< 0) {
1906 } else if (ret_splice
> len
) {
1908 * We don't expect this code path to be executed but you never know
1909 * so this is an extra protection agains a buggy splice().
1912 written
+= ret_splice
;
1913 PERROR("Wrote more data than requested %zd (len: %lu)", ret_splice
,
1917 /* All good, update current len and continue. */
1921 /* This call is useless on a socket so better save a syscall. */
1923 /* This won't block, but will start writeout asynchronously */
1924 lttng_sync_file_range(outfd
, stream
->out_fd_offset
, ret_splice
,
1925 SYNC_FILE_RANGE_WRITE
);
1926 stream
->out_fd_offset
+= ret_splice
;
1928 stream
->output_written
+= ret_splice
;
1929 written
+= ret_splice
;
1932 lttng_consumer_sync_trace_file(stream
, orig_offset
);
1938 * This is a special case that the relayd has closed its socket. Let's
1939 * cleanup the relayd object and all associated streams.
1941 if (relayd
&& relayd_hang_up
) {
1942 ERR("Relayd hangup. Cleaning up relayd %" PRIu64
".", relayd
->net_seq_idx
);
1943 lttng_consumer_cleanup_relayd(relayd
);
1944 /* Skip splice error so the consumer does not fail */
1949 /* send the appropriate error description to sessiond */
1952 lttng_consumer_send_error(ctx
, LTTCOMM_CONSUMERD_SPLICE_EINVAL
);
1955 lttng_consumer_send_error(ctx
, LTTCOMM_CONSUMERD_SPLICE_ENOMEM
);
1958 lttng_consumer_send_error(ctx
, LTTCOMM_CONSUMERD_SPLICE_ESPIPE
);
1963 if (relayd
&& stream
->metadata_flag
) {
1964 pthread_mutex_unlock(&relayd
->ctrl_sock_mutex
);
1972 * Take a snapshot for a specific fd
1974 * Returns 0 on success, < 0 on error
1976 int lttng_consumer_take_snapshot(struct lttng_consumer_stream
*stream
)
1978 switch (consumer_data
.type
) {
1979 case LTTNG_CONSUMER_KERNEL
:
1980 return lttng_kconsumer_take_snapshot(stream
);
1981 case LTTNG_CONSUMER32_UST
:
1982 case LTTNG_CONSUMER64_UST
:
1983 return lttng_ustconsumer_take_snapshot(stream
);
1985 ERR("Unknown consumer_data type");
1992 * Get the produced position
1994 * Returns 0 on success, < 0 on error
1996 int lttng_consumer_get_produced_snapshot(struct lttng_consumer_stream
*stream
,
1999 switch (consumer_data
.type
) {
2000 case LTTNG_CONSUMER_KERNEL
:
2001 return lttng_kconsumer_get_produced_snapshot(stream
, pos
);
2002 case LTTNG_CONSUMER32_UST
:
2003 case LTTNG_CONSUMER64_UST
:
2004 return lttng_ustconsumer_get_produced_snapshot(stream
, pos
);
2006 ERR("Unknown consumer_data type");
2012 int lttng_consumer_recv_cmd(struct lttng_consumer_local_data
*ctx
,
2013 int sock
, struct pollfd
*consumer_sockpoll
)
2015 switch (consumer_data
.type
) {
2016 case LTTNG_CONSUMER_KERNEL
:
2017 return lttng_kconsumer_recv_cmd(ctx
, sock
, consumer_sockpoll
);
2018 case LTTNG_CONSUMER32_UST
:
2019 case LTTNG_CONSUMER64_UST
:
2020 return lttng_ustconsumer_recv_cmd(ctx
, sock
, consumer_sockpoll
);
2022 ERR("Unknown consumer_data type");
2028 void lttng_consumer_close_all_metadata(void)
2030 switch (consumer_data
.type
) {
2031 case LTTNG_CONSUMER_KERNEL
:
2033 * The Kernel consumer has a different metadata scheme so we don't
2034 * close anything because the stream will be closed by the session
2038 case LTTNG_CONSUMER32_UST
:
2039 case LTTNG_CONSUMER64_UST
:
2041 * Close all metadata streams. The metadata hash table is passed and
2042 * this call iterates over it by closing all wakeup fd. This is safe
2043 * because at this point we are sure that the metadata producer is
2044 * either dead or blocked.
2046 lttng_ustconsumer_close_all_metadata(metadata_ht
);
2049 ERR("Unknown consumer_data type");
2055 * Clean up a metadata stream and free its memory.
2057 void consumer_del_metadata_stream(struct lttng_consumer_stream
*stream
,
2058 struct lttng_ht
*ht
)
2060 struct lttng_consumer_channel
*free_chan
= NULL
;
2064 * This call should NEVER receive regular stream. It must always be
2065 * metadata stream and this is crucial for data structure synchronization.
2067 assert(stream
->metadata_flag
);
2069 DBG3("Consumer delete metadata stream %d", stream
->wait_fd
);
2071 pthread_mutex_lock(&consumer_data
.lock
);
2072 pthread_mutex_lock(&stream
->chan
->lock
);
2073 pthread_mutex_lock(&stream
->lock
);
2074 if (stream
->chan
->metadata_cache
) {
2075 /* Only applicable to userspace consumers. */
2076 pthread_mutex_lock(&stream
->chan
->metadata_cache
->lock
);
2079 /* Remove any reference to that stream. */
2080 consumer_stream_delete(stream
, ht
);
2082 /* Close down everything including the relayd if one. */
2083 consumer_stream_close(stream
);
2084 /* Destroy tracer buffers of the stream. */
2085 consumer_stream_destroy_buffers(stream
);
2087 /* Atomically decrement channel refcount since other threads can use it. */
2088 if (!uatomic_sub_return(&stream
->chan
->refcount
, 1)
2089 && !uatomic_read(&stream
->chan
->nb_init_stream_left
)) {
2090 /* Go for channel deletion! */
2091 free_chan
= stream
->chan
;
2095 * Nullify the stream reference so it is not used after deletion. The
2096 * channel lock MUST be acquired before being able to check for a NULL
2099 stream
->chan
->metadata_stream
= NULL
;
2101 if (stream
->chan
->metadata_cache
) {
2102 pthread_mutex_unlock(&stream
->chan
->metadata_cache
->lock
);
2104 pthread_mutex_unlock(&stream
->lock
);
2105 pthread_mutex_unlock(&stream
->chan
->lock
);
2106 pthread_mutex_unlock(&consumer_data
.lock
);
2109 consumer_del_channel(free_chan
);
2112 consumer_stream_free(stream
);
2116 * Action done with the metadata stream when adding it to the consumer internal
2117 * data structures to handle it.
2119 int consumer_add_metadata_stream(struct lttng_consumer_stream
*stream
)
2121 struct lttng_ht
*ht
= metadata_ht
;
2123 struct lttng_ht_iter iter
;
2124 struct lttng_ht_node_u64
*node
;
2129 DBG3("Adding metadata stream %" PRIu64
" to hash table", stream
->key
);
2131 pthread_mutex_lock(&consumer_data
.lock
);
2132 pthread_mutex_lock(&stream
->chan
->lock
);
2133 pthread_mutex_lock(&stream
->chan
->timer_lock
);
2134 pthread_mutex_lock(&stream
->lock
);
2137 * From here, refcounts are updated so be _careful_ when returning an error
2144 * Lookup the stream just to make sure it does not exist in our internal
2145 * state. This should NEVER happen.
2147 lttng_ht_lookup(ht
, &stream
->key
, &iter
);
2148 node
= lttng_ht_iter_get_node_u64(&iter
);
2152 * When nb_init_stream_left reaches 0, we don't need to trigger any action
2153 * in terms of destroying the associated channel, because the action that
2154 * causes the count to become 0 also causes a stream to be added. The
2155 * channel deletion will thus be triggered by the following removal of this
2158 if (uatomic_read(&stream
->chan
->nb_init_stream_left
) > 0) {
2159 /* Increment refcount before decrementing nb_init_stream_left */
2161 uatomic_dec(&stream
->chan
->nb_init_stream_left
);
2164 lttng_ht_add_unique_u64(ht
, &stream
->node
);
2166 lttng_ht_add_u64(consumer_data
.stream_per_chan_id_ht
,
2167 &stream
->node_channel_id
);
2170 * Add stream to the stream_list_ht of the consumer data. No need to steal
2171 * the key since the HT does not use it and we allow to add redundant keys
2174 lttng_ht_add_u64(consumer_data
.stream_list_ht
, &stream
->node_session_id
);
2178 pthread_mutex_unlock(&stream
->lock
);
2179 pthread_mutex_unlock(&stream
->chan
->lock
);
2180 pthread_mutex_unlock(&stream
->chan
->timer_lock
);
2181 pthread_mutex_unlock(&consumer_data
.lock
);
2186 * Delete data stream that are flagged for deletion (endpoint_status).
2188 static void validate_endpoint_status_data_stream(void)
2190 struct lttng_ht_iter iter
;
2191 struct lttng_consumer_stream
*stream
;
2193 DBG("Consumer delete flagged data stream");
2196 cds_lfht_for_each_entry(data_ht
->ht
, &iter
.iter
, stream
, node
.node
) {
2197 /* Validate delete flag of the stream */
2198 if (stream
->endpoint_status
== CONSUMER_ENDPOINT_ACTIVE
) {
2201 /* Delete it right now */
2202 consumer_del_stream(stream
, data_ht
);
2208 * Delete metadata stream that are flagged for deletion (endpoint_status).
2210 static void validate_endpoint_status_metadata_stream(
2211 struct lttng_poll_event
*pollset
)
2213 struct lttng_ht_iter iter
;
2214 struct lttng_consumer_stream
*stream
;
2216 DBG("Consumer delete flagged metadata stream");
2221 cds_lfht_for_each_entry(metadata_ht
->ht
, &iter
.iter
, stream
, node
.node
) {
2222 /* Validate delete flag of the stream */
2223 if (stream
->endpoint_status
== CONSUMER_ENDPOINT_ACTIVE
) {
2227 * Remove from pollset so the metadata thread can continue without
2228 * blocking on a deleted stream.
2230 lttng_poll_del(pollset
, stream
->wait_fd
);
2232 /* Delete it right now */
2233 consumer_del_metadata_stream(stream
, metadata_ht
);
2239 * Thread polls on metadata file descriptor and write them on disk or on the
2242 void *consumer_thread_metadata_poll(void *data
)
2244 int ret
, i
, pollfd
, err
= -1;
2245 uint32_t revents
, nb_fd
;
2246 struct lttng_consumer_stream
*stream
= NULL
;
2247 struct lttng_ht_iter iter
;
2248 struct lttng_ht_node_u64
*node
;
2249 struct lttng_poll_event events
;
2250 struct lttng_consumer_local_data
*ctx
= data
;
2253 rcu_register_thread();
2255 health_register(health_consumerd
, HEALTH_CONSUMERD_TYPE_METADATA
);
2257 if (testpoint(consumerd_thread_metadata
)) {
2258 goto error_testpoint
;
2261 health_code_update();
2263 DBG("Thread metadata poll started");
2265 /* Size is set to 1 for the consumer_metadata pipe */
2266 ret
= lttng_poll_create(&events
, 2, LTTNG_CLOEXEC
);
2268 ERR("Poll set creation failed");
2272 ret
= lttng_poll_add(&events
,
2273 lttng_pipe_get_readfd(ctx
->consumer_metadata_pipe
), LPOLLIN
);
2279 DBG("Metadata main loop started");
2283 health_code_update();
2284 health_poll_entry();
2285 DBG("Metadata poll wait");
2286 ret
= lttng_poll_wait(&events
, -1);
2287 DBG("Metadata poll return from wait with %d fd(s)",
2288 LTTNG_POLL_GETNB(&events
));
2290 DBG("Metadata event caught in thread");
2292 if (errno
== EINTR
) {
2293 ERR("Poll EINTR caught");
2296 if (LTTNG_POLL_GETNB(&events
) == 0) {
2297 err
= 0; /* All is OK */
2304 /* From here, the event is a metadata wait fd */
2305 for (i
= 0; i
< nb_fd
; i
++) {
2306 health_code_update();
2308 revents
= LTTNG_POLL_GETEV(&events
, i
);
2309 pollfd
= LTTNG_POLL_GETFD(&events
, i
);
2312 /* No activity for this FD (poll implementation). */
2316 if (pollfd
== lttng_pipe_get_readfd(ctx
->consumer_metadata_pipe
)) {
2317 if (revents
& LPOLLIN
) {
2320 pipe_len
= lttng_pipe_read(ctx
->consumer_metadata_pipe
,
2321 &stream
, sizeof(stream
));
2322 if (pipe_len
< sizeof(stream
)) {
2324 PERROR("read metadata stream");
2327 * Remove the pipe from the poll set and continue the loop
2328 * since their might be data to consume.
2330 lttng_poll_del(&events
,
2331 lttng_pipe_get_readfd(ctx
->consumer_metadata_pipe
));
2332 lttng_pipe_read_close(ctx
->consumer_metadata_pipe
);
2336 /* A NULL stream means that the state has changed. */
2337 if (stream
== NULL
) {
2338 /* Check for deleted streams. */
2339 validate_endpoint_status_metadata_stream(&events
);
2343 DBG("Adding metadata stream %d to poll set",
2346 /* Add metadata stream to the global poll events list */
2347 lttng_poll_add(&events
, stream
->wait_fd
,
2348 LPOLLIN
| LPOLLPRI
| LPOLLHUP
);
2349 } else if (revents
& (LPOLLERR
| LPOLLHUP
)) {
2350 DBG("Metadata thread pipe hung up");
2352 * Remove the pipe from the poll set and continue the loop
2353 * since their might be data to consume.
2355 lttng_poll_del(&events
,
2356 lttng_pipe_get_readfd(ctx
->consumer_metadata_pipe
));
2357 lttng_pipe_read_close(ctx
->consumer_metadata_pipe
);
2360 ERR("Unexpected poll events %u for sock %d", revents
, pollfd
);
2364 /* Handle other stream */
2370 uint64_t tmp_id
= (uint64_t) pollfd
;
2372 lttng_ht_lookup(metadata_ht
, &tmp_id
, &iter
);
2374 node
= lttng_ht_iter_get_node_u64(&iter
);
2377 stream
= caa_container_of(node
, struct lttng_consumer_stream
,
2380 if (revents
& (LPOLLIN
| LPOLLPRI
)) {
2381 /* Get the data out of the metadata file descriptor */
2382 DBG("Metadata available on fd %d", pollfd
);
2383 assert(stream
->wait_fd
== pollfd
);
2386 health_code_update();
2388 len
= ctx
->on_buffer_ready(stream
, ctx
);
2390 * We don't check the return value here since if we get
2391 * a negative len, it means an error occurred thus we
2392 * simply remove it from the poll set and free the
2397 /* It's ok to have an unavailable sub-buffer */
2398 if (len
< 0 && len
!= -EAGAIN
&& len
!= -ENODATA
) {
2399 /* Clean up stream from consumer and free it. */
2400 lttng_poll_del(&events
, stream
->wait_fd
);
2401 consumer_del_metadata_stream(stream
, metadata_ht
);
2403 } else if (revents
& (LPOLLERR
| LPOLLHUP
)) {
2404 DBG("Metadata fd %d is hup|err.", pollfd
);
2405 if (!stream
->hangup_flush_done
2406 && (consumer_data
.type
== LTTNG_CONSUMER32_UST
2407 || consumer_data
.type
== LTTNG_CONSUMER64_UST
)) {
2408 DBG("Attempting to flush and consume the UST buffers");
2409 lttng_ustconsumer_on_stream_hangup(stream
);
2411 /* We just flushed the stream now read it. */
2413 health_code_update();
2415 len
= ctx
->on_buffer_ready(stream
, ctx
);
2417 * We don't check the return value here since if we get
2418 * a negative len, it means an error occurred thus we
2419 * simply remove it from the poll set and free the
2425 lttng_poll_del(&events
, stream
->wait_fd
);
2427 * This call update the channel states, closes file descriptors
2428 * and securely free the stream.
2430 consumer_del_metadata_stream(stream
, metadata_ht
);
2432 ERR("Unexpected poll events %u for sock %d", revents
, pollfd
);
2436 /* Release RCU lock for the stream looked up */
2444 DBG("Metadata poll thread exiting");
2446 lttng_poll_clean(&events
);
2451 ERR("Health error occurred in %s", __func__
);
2453 health_unregister(health_consumerd
);
2454 rcu_unregister_thread();
2459 * This thread polls the fds in the set to consume the data and write
2460 * it to tracefile if necessary.
2462 void *consumer_thread_data_poll(void *data
)
2464 int num_rdy
, num_hup
, high_prio
, ret
, i
, err
= -1;
2465 struct pollfd
*pollfd
= NULL
;
2466 /* local view of the streams */
2467 struct lttng_consumer_stream
**local_stream
= NULL
, *new_stream
= NULL
;
2468 /* local view of consumer_data.fds_count */
2470 /* Number of FDs with CONSUMER_ENDPOINT_INACTIVE but still open. */
2471 int nb_inactive_fd
= 0;
2472 struct lttng_consumer_local_data
*ctx
= data
;
2475 rcu_register_thread();
2477 health_register(health_consumerd
, HEALTH_CONSUMERD_TYPE_DATA
);
2479 if (testpoint(consumerd_thread_data
)) {
2480 goto error_testpoint
;
2483 health_code_update();
2485 local_stream
= zmalloc(sizeof(struct lttng_consumer_stream
*));
2486 if (local_stream
== NULL
) {
2487 PERROR("local_stream malloc");
2492 health_code_update();
2498 * the fds set has been updated, we need to update our
2499 * local array as well
2501 pthread_mutex_lock(&consumer_data
.lock
);
2502 if (consumer_data
.need_update
) {
2507 local_stream
= NULL
;
2510 * Allocate for all fds +1 for the consumer_data_pipe and +1 for
2513 pollfd
= zmalloc((consumer_data
.stream_count
+ 2) * sizeof(struct pollfd
));
2514 if (pollfd
== NULL
) {
2515 PERROR("pollfd malloc");
2516 pthread_mutex_unlock(&consumer_data
.lock
);
2520 local_stream
= zmalloc((consumer_data
.stream_count
+ 2) *
2521 sizeof(struct lttng_consumer_stream
*));
2522 if (local_stream
== NULL
) {
2523 PERROR("local_stream malloc");
2524 pthread_mutex_unlock(&consumer_data
.lock
);
2527 ret
= update_poll_array(ctx
, &pollfd
, local_stream
,
2528 data_ht
, &nb_inactive_fd
);
2530 ERR("Error in allocating pollfd or local_outfds");
2531 lttng_consumer_send_error(ctx
, LTTCOMM_CONSUMERD_POLL_ERROR
);
2532 pthread_mutex_unlock(&consumer_data
.lock
);
2536 consumer_data
.need_update
= 0;
2538 pthread_mutex_unlock(&consumer_data
.lock
);
2540 /* No FDs and consumer_quit, consumer_cleanup the thread */
2541 if (nb_fd
== 0 && nb_inactive_fd
== 0 &&
2542 CMM_LOAD_SHARED(consumer_quit
) == 1) {
2543 err
= 0; /* All is OK */
2546 /* poll on the array of fds */
2548 DBG("polling on %d fd", nb_fd
+ 2);
2549 if (testpoint(consumerd_thread_data_poll
)) {
2552 health_poll_entry();
2553 num_rdy
= poll(pollfd
, nb_fd
+ 2, -1);
2555 DBG("poll num_rdy : %d", num_rdy
);
2556 if (num_rdy
== -1) {
2558 * Restart interrupted system call.
2560 if (errno
== EINTR
) {
2563 PERROR("Poll error");
2564 lttng_consumer_send_error(ctx
, LTTCOMM_CONSUMERD_POLL_ERROR
);
2566 } else if (num_rdy
== 0) {
2567 DBG("Polling thread timed out");
2571 if (caa_unlikely(data_consumption_paused
)) {
2572 DBG("Data consumption paused, sleeping...");
2578 * If the consumer_data_pipe triggered poll go directly to the
2579 * beginning of the loop to update the array. We want to prioritize
2580 * array update over low-priority reads.
2582 if (pollfd
[nb_fd
].revents
& (POLLIN
| POLLPRI
)) {
2583 ssize_t pipe_readlen
;
2585 DBG("consumer_data_pipe wake up");
2586 pipe_readlen
= lttng_pipe_read(ctx
->consumer_data_pipe
,
2587 &new_stream
, sizeof(new_stream
));
2588 if (pipe_readlen
< sizeof(new_stream
)) {
2589 PERROR("Consumer data pipe");
2590 /* Continue so we can at least handle the current stream(s). */
2595 * If the stream is NULL, just ignore it. It's also possible that
2596 * the sessiond poll thread changed the consumer_quit state and is
2597 * waking us up to test it.
2599 if (new_stream
== NULL
) {
2600 validate_endpoint_status_data_stream();
2604 /* Continue to update the local streams and handle prio ones */
2608 /* Handle wakeup pipe. */
2609 if (pollfd
[nb_fd
+ 1].revents
& (POLLIN
| POLLPRI
)) {
2611 ssize_t pipe_readlen
;
2613 pipe_readlen
= lttng_pipe_read(ctx
->consumer_wakeup_pipe
, &dummy
,
2615 if (pipe_readlen
< 0) {
2616 PERROR("Consumer data wakeup pipe");
2618 /* We've been awakened to handle stream(s). */
2619 ctx
->has_wakeup
= 0;
2622 /* Take care of high priority channels first. */
2623 for (i
= 0; i
< nb_fd
; i
++) {
2624 health_code_update();
2626 if (local_stream
[i
] == NULL
) {
2629 if (pollfd
[i
].revents
& POLLPRI
) {
2630 DBG("Urgent read on fd %d", pollfd
[i
].fd
);
2632 len
= ctx
->on_buffer_ready(local_stream
[i
], ctx
);
2633 /* it's ok to have an unavailable sub-buffer */
2634 if (len
< 0 && len
!= -EAGAIN
&& len
!= -ENODATA
) {
2635 /* Clean the stream and free it. */
2636 consumer_del_stream(local_stream
[i
], data_ht
);
2637 local_stream
[i
] = NULL
;
2638 } else if (len
> 0) {
2639 local_stream
[i
]->data_read
= 1;
2645 * If we read high prio channel in this loop, try again
2646 * for more high prio data.
2652 /* Take care of low priority channels. */
2653 for (i
= 0; i
< nb_fd
; i
++) {
2654 health_code_update();
2656 if (local_stream
[i
] == NULL
) {
2659 if ((pollfd
[i
].revents
& POLLIN
) ||
2660 local_stream
[i
]->hangup_flush_done
||
2661 local_stream
[i
]->has_data
) {
2662 DBG("Normal read on fd %d", pollfd
[i
].fd
);
2663 len
= ctx
->on_buffer_ready(local_stream
[i
], ctx
);
2664 /* it's ok to have an unavailable sub-buffer */
2665 if (len
< 0 && len
!= -EAGAIN
&& len
!= -ENODATA
) {
2666 /* Clean the stream and free it. */
2667 consumer_del_stream(local_stream
[i
], data_ht
);
2668 local_stream
[i
] = NULL
;
2669 } else if (len
> 0) {
2670 local_stream
[i
]->data_read
= 1;
2675 /* Handle hangup and errors */
2676 for (i
= 0; i
< nb_fd
; i
++) {
2677 health_code_update();
2679 if (local_stream
[i
] == NULL
) {
2682 if (!local_stream
[i
]->hangup_flush_done
2683 && (pollfd
[i
].revents
& (POLLHUP
| POLLERR
| POLLNVAL
))
2684 && (consumer_data
.type
== LTTNG_CONSUMER32_UST
2685 || consumer_data
.type
== LTTNG_CONSUMER64_UST
)) {
2686 DBG("fd %d is hup|err|nval. Attempting flush and read.",
2688 lttng_ustconsumer_on_stream_hangup(local_stream
[i
]);
2689 /* Attempt read again, for the data we just flushed. */
2690 local_stream
[i
]->data_read
= 1;
2693 * If the poll flag is HUP/ERR/NVAL and we have
2694 * read no data in this pass, we can remove the
2695 * stream from its hash table.
2697 if ((pollfd
[i
].revents
& POLLHUP
)) {
2698 DBG("Polling fd %d tells it has hung up.", pollfd
[i
].fd
);
2699 if (!local_stream
[i
]->data_read
) {
2700 consumer_del_stream(local_stream
[i
], data_ht
);
2701 local_stream
[i
] = NULL
;
2704 } else if (pollfd
[i
].revents
& POLLERR
) {
2705 ERR("Error returned in polling fd %d.", pollfd
[i
].fd
);
2706 if (!local_stream
[i
]->data_read
) {
2707 consumer_del_stream(local_stream
[i
], data_ht
);
2708 local_stream
[i
] = NULL
;
2711 } else if (pollfd
[i
].revents
& POLLNVAL
) {
2712 ERR("Polling fd %d tells fd is not open.", pollfd
[i
].fd
);
2713 if (!local_stream
[i
]->data_read
) {
2714 consumer_del_stream(local_stream
[i
], data_ht
);
2715 local_stream
[i
] = NULL
;
2719 if (local_stream
[i
] != NULL
) {
2720 local_stream
[i
]->data_read
= 0;
2727 DBG("polling thread exiting");
2732 * Close the write side of the pipe so epoll_wait() in
2733 * consumer_thread_metadata_poll can catch it. The thread is monitoring the
2734 * read side of the pipe. If we close them both, epoll_wait strangely does
2735 * not return and could create a endless wait period if the pipe is the
2736 * only tracked fd in the poll set. The thread will take care of closing
2739 (void) lttng_pipe_write_close(ctx
->consumer_metadata_pipe
);
2744 ERR("Health error occurred in %s", __func__
);
2746 health_unregister(health_consumerd
);
2748 rcu_unregister_thread();
2753 * Close wake-up end of each stream belonging to the channel. This will
2754 * allow the poll() on the stream read-side to detect when the
2755 * write-side (application) finally closes them.
2758 void consumer_close_channel_streams(struct lttng_consumer_channel
*channel
)
2760 struct lttng_ht
*ht
;
2761 struct lttng_consumer_stream
*stream
;
2762 struct lttng_ht_iter iter
;
2764 ht
= consumer_data
.stream_per_chan_id_ht
;
2767 cds_lfht_for_each_entry_duplicate(ht
->ht
,
2768 ht
->hash_fct(&channel
->key
, lttng_ht_seed
),
2769 ht
->match_fct
, &channel
->key
,
2770 &iter
.iter
, stream
, node_channel_id
.node
) {
2772 * Protect against teardown with mutex.
2774 pthread_mutex_lock(&stream
->lock
);
2775 if (cds_lfht_is_node_deleted(&stream
->node
.node
)) {
2778 switch (consumer_data
.type
) {
2779 case LTTNG_CONSUMER_KERNEL
:
2781 case LTTNG_CONSUMER32_UST
:
2782 case LTTNG_CONSUMER64_UST
:
2783 if (stream
->metadata_flag
) {
2784 /* Safe and protected by the stream lock. */
2785 lttng_ustconsumer_close_metadata(stream
->chan
);
2788 * Note: a mutex is taken internally within
2789 * liblttng-ust-ctl to protect timer wakeup_fd
2790 * use from concurrent close.
2792 lttng_ustconsumer_close_stream_wakeup(stream
);
2796 ERR("Unknown consumer_data type");
2800 pthread_mutex_unlock(&stream
->lock
);
2805 static void destroy_channel_ht(struct lttng_ht
*ht
)
2807 struct lttng_ht_iter iter
;
2808 struct lttng_consumer_channel
*channel
;
2816 cds_lfht_for_each_entry(ht
->ht
, &iter
.iter
, channel
, wait_fd_node
.node
) {
2817 ret
= lttng_ht_del(ht
, &iter
);
2822 lttng_ht_destroy(ht
);
2826 * This thread polls the channel fds to detect when they are being
2827 * closed. It closes all related streams if the channel is detected as
2828 * closed. It is currently only used as a shim layer for UST because the
2829 * consumerd needs to keep the per-stream wakeup end of pipes open for
2832 void *consumer_thread_channel_poll(void *data
)
2834 int ret
, i
, pollfd
, err
= -1;
2835 uint32_t revents
, nb_fd
;
2836 struct lttng_consumer_channel
*chan
= NULL
;
2837 struct lttng_ht_iter iter
;
2838 struct lttng_ht_node_u64
*node
;
2839 struct lttng_poll_event events
;
2840 struct lttng_consumer_local_data
*ctx
= data
;
2841 struct lttng_ht
*channel_ht
;
2843 rcu_register_thread();
2845 health_register(health_consumerd
, HEALTH_CONSUMERD_TYPE_CHANNEL
);
2847 if (testpoint(consumerd_thread_channel
)) {
2848 goto error_testpoint
;
2851 health_code_update();
2853 channel_ht
= lttng_ht_new(0, LTTNG_HT_TYPE_U64
);
2855 /* ENOMEM at this point. Better to bail out. */
2859 DBG("Thread channel poll started");
2861 /* Size is set to 1 for the consumer_channel pipe */
2862 ret
= lttng_poll_create(&events
, 2, LTTNG_CLOEXEC
);
2864 ERR("Poll set creation failed");
2868 ret
= lttng_poll_add(&events
, ctx
->consumer_channel_pipe
[0], LPOLLIN
);
2874 DBG("Channel main loop started");
2878 health_code_update();
2879 DBG("Channel poll wait");
2880 health_poll_entry();
2881 ret
= lttng_poll_wait(&events
, -1);
2882 DBG("Channel poll return from wait with %d fd(s)",
2883 LTTNG_POLL_GETNB(&events
));
2885 DBG("Channel event caught in thread");
2887 if (errno
== EINTR
) {
2888 ERR("Poll EINTR caught");
2891 if (LTTNG_POLL_GETNB(&events
) == 0) {
2892 err
= 0; /* All is OK */
2899 /* From here, the event is a channel wait fd */
2900 for (i
= 0; i
< nb_fd
; i
++) {
2901 health_code_update();
2903 revents
= LTTNG_POLL_GETEV(&events
, i
);
2904 pollfd
= LTTNG_POLL_GETFD(&events
, i
);
2907 /* No activity for this FD (poll implementation). */
2911 if (pollfd
== ctx
->consumer_channel_pipe
[0]) {
2912 if (revents
& LPOLLIN
) {
2913 enum consumer_channel_action action
;
2916 ret
= read_channel_pipe(ctx
, &chan
, &key
, &action
);
2919 ERR("Error reading channel pipe");
2921 lttng_poll_del(&events
, ctx
->consumer_channel_pipe
[0]);
2926 case CONSUMER_CHANNEL_ADD
:
2927 DBG("Adding channel %d to poll set",
2930 lttng_ht_node_init_u64(&chan
->wait_fd_node
,
2933 lttng_ht_add_unique_u64(channel_ht
,
2934 &chan
->wait_fd_node
);
2936 /* Add channel to the global poll events list */
2937 lttng_poll_add(&events
, chan
->wait_fd
,
2938 LPOLLERR
| LPOLLHUP
);
2940 case CONSUMER_CHANNEL_DEL
:
2943 * This command should never be called if the channel
2944 * has streams monitored by either the data or metadata
2945 * thread. The consumer only notify this thread with a
2946 * channel del. command if it receives a destroy
2947 * channel command from the session daemon that send it
2948 * if a command prior to the GET_CHANNEL failed.
2952 chan
= consumer_find_channel(key
);
2955 ERR("UST consumer get channel key %" PRIu64
" not found for del channel", key
);
2958 lttng_poll_del(&events
, chan
->wait_fd
);
2959 iter
.iter
.node
= &chan
->wait_fd_node
.node
;
2960 ret
= lttng_ht_del(channel_ht
, &iter
);
2963 switch (consumer_data
.type
) {
2964 case LTTNG_CONSUMER_KERNEL
:
2966 case LTTNG_CONSUMER32_UST
:
2967 case LTTNG_CONSUMER64_UST
:
2968 health_code_update();
2969 /* Destroy streams that might have been left in the stream list. */
2970 clean_channel_stream_list(chan
);
2973 ERR("Unknown consumer_data type");
2978 * Release our own refcount. Force channel deletion even if
2979 * streams were not initialized.
2981 if (!uatomic_sub_return(&chan
->refcount
, 1)) {
2982 consumer_del_channel(chan
);
2987 case CONSUMER_CHANNEL_QUIT
:
2989 * Remove the pipe from the poll set and continue the loop
2990 * since their might be data to consume.
2992 lttng_poll_del(&events
, ctx
->consumer_channel_pipe
[0]);
2995 ERR("Unknown action");
2998 } else if (revents
& (LPOLLERR
| LPOLLHUP
)) {
2999 DBG("Channel thread pipe hung up");
3001 * Remove the pipe from the poll set and continue the loop
3002 * since their might be data to consume.
3004 lttng_poll_del(&events
, ctx
->consumer_channel_pipe
[0]);
3007 ERR("Unexpected poll events %u for sock %d", revents
, pollfd
);
3011 /* Handle other stream */
3017 uint64_t tmp_id
= (uint64_t) pollfd
;
3019 lttng_ht_lookup(channel_ht
, &tmp_id
, &iter
);
3021 node
= lttng_ht_iter_get_node_u64(&iter
);
3024 chan
= caa_container_of(node
, struct lttng_consumer_channel
,
3027 /* Check for error event */
3028 if (revents
& (LPOLLERR
| LPOLLHUP
)) {
3029 DBG("Channel fd %d is hup|err.", pollfd
);
3031 lttng_poll_del(&events
, chan
->wait_fd
);
3032 ret
= lttng_ht_del(channel_ht
, &iter
);
3036 * This will close the wait fd for each stream associated to
3037 * this channel AND monitored by the data/metadata thread thus
3038 * will be clean by the right thread.
3040 consumer_close_channel_streams(chan
);
3042 /* Release our own refcount */
3043 if (!uatomic_sub_return(&chan
->refcount
, 1)
3044 && !uatomic_read(&chan
->nb_init_stream_left
)) {
3045 consumer_del_channel(chan
);
3048 ERR("Unexpected poll events %u for sock %d", revents
, pollfd
);
3053 /* Release RCU lock for the channel looked up */
3061 lttng_poll_clean(&events
);
3063 destroy_channel_ht(channel_ht
);
3066 DBG("Channel poll thread exiting");
3069 ERR("Health error occurred in %s", __func__
);
3071 health_unregister(health_consumerd
);
3072 rcu_unregister_thread();
3076 static int set_metadata_socket(struct lttng_consumer_local_data
*ctx
,
3077 struct pollfd
*sockpoll
, int client_socket
)
3084 ret
= lttng_consumer_poll_socket(sockpoll
);
3088 DBG("Metadata connection on client_socket");
3090 /* Blocking call, waiting for transmission */
3091 ctx
->consumer_metadata_socket
= lttcomm_accept_unix_sock(client_socket
);
3092 if (ctx
->consumer_metadata_socket
< 0) {
3093 WARN("On accept metadata");
3104 * This thread listens on the consumerd socket and receives the file
3105 * descriptors from the session daemon.
3107 void *consumer_thread_sessiond_poll(void *data
)
3109 int sock
= -1, client_socket
, ret
, err
= -1;
3111 * structure to poll for incoming data on communication socket avoids
3112 * making blocking sockets.
3114 struct pollfd consumer_sockpoll
[2];
3115 struct lttng_consumer_local_data
*ctx
= data
;
3117 rcu_register_thread();
3119 health_register(health_consumerd
, HEALTH_CONSUMERD_TYPE_SESSIOND
);
3121 if (testpoint(consumerd_thread_sessiond
)) {
3122 goto error_testpoint
;
3125 health_code_update();
3127 DBG("Creating command socket %s", ctx
->consumer_command_sock_path
);
3128 unlink(ctx
->consumer_command_sock_path
);
3129 client_socket
= lttcomm_create_unix_sock(ctx
->consumer_command_sock_path
);
3130 if (client_socket
< 0) {
3131 ERR("Cannot create command socket");
3135 ret
= lttcomm_listen_unix_sock(client_socket
);
3140 DBG("Sending ready command to lttng-sessiond");
3141 ret
= lttng_consumer_send_error(ctx
, LTTCOMM_CONSUMERD_COMMAND_SOCK_READY
);
3142 /* return < 0 on error, but == 0 is not fatal */
3144 ERR("Error sending ready command to lttng-sessiond");
3148 /* prepare the FDs to poll : to client socket and the should_quit pipe */
3149 consumer_sockpoll
[0].fd
= ctx
->consumer_should_quit
[0];
3150 consumer_sockpoll
[0].events
= POLLIN
| POLLPRI
;
3151 consumer_sockpoll
[1].fd
= client_socket
;
3152 consumer_sockpoll
[1].events
= POLLIN
| POLLPRI
;
3154 ret
= lttng_consumer_poll_socket(consumer_sockpoll
);
3162 DBG("Connection on client_socket");
3164 /* Blocking call, waiting for transmission */
3165 sock
= lttcomm_accept_unix_sock(client_socket
);
3172 * Setup metadata socket which is the second socket connection on the
3173 * command unix socket.
3175 ret
= set_metadata_socket(ctx
, consumer_sockpoll
, client_socket
);
3184 /* This socket is not useful anymore. */
3185 ret
= close(client_socket
);
3187 PERROR("close client_socket");
3191 /* update the polling structure to poll on the established socket */
3192 consumer_sockpoll
[1].fd
= sock
;
3193 consumer_sockpoll
[1].events
= POLLIN
| POLLPRI
;
3196 health_code_update();
3198 health_poll_entry();
3199 ret
= lttng_consumer_poll_socket(consumer_sockpoll
);
3208 DBG("Incoming command on sock");
3209 ret
= lttng_consumer_recv_cmd(ctx
, sock
, consumer_sockpoll
);
3212 * This could simply be a session daemon quitting. Don't output
3215 DBG("Communication interrupted on command socket");
3219 if (CMM_LOAD_SHARED(consumer_quit
)) {
3220 DBG("consumer_thread_receive_fds received quit from signal");
3221 err
= 0; /* All is OK */
3224 DBG("received command on sock");
3230 DBG("Consumer thread sessiond poll exiting");
3233 * Close metadata streams since the producer is the session daemon which
3236 * NOTE: for now, this only applies to the UST tracer.
3238 lttng_consumer_close_all_metadata();
3241 * when all fds have hung up, the polling thread
3244 CMM_STORE_SHARED(consumer_quit
, 1);
3247 * Notify the data poll thread to poll back again and test the
3248 * consumer_quit state that we just set so to quit gracefully.
3250 notify_thread_lttng_pipe(ctx
->consumer_data_pipe
);
3252 notify_channel_pipe(ctx
, NULL
, -1, CONSUMER_CHANNEL_QUIT
);
3254 notify_health_quit_pipe(health_quit_pipe
);
3256 /* Cleaning up possibly open sockets. */
3260 PERROR("close sock sessiond poll");
3263 if (client_socket
>= 0) {
3264 ret
= close(client_socket
);
3266 PERROR("close client_socket sessiond poll");
3273 ERR("Health error occurred in %s", __func__
);
3275 health_unregister(health_consumerd
);
3277 rcu_unregister_thread();
3281 ssize_t
lttng_consumer_read_subbuffer(struct lttng_consumer_stream
*stream
,
3282 struct lttng_consumer_local_data
*ctx
)
3286 pthread_mutex_lock(&stream
->lock
);
3287 if (stream
->metadata_flag
) {
3288 pthread_mutex_lock(&stream
->metadata_rdv_lock
);
3291 switch (consumer_data
.type
) {
3292 case LTTNG_CONSUMER_KERNEL
:
3293 ret
= lttng_kconsumer_read_subbuffer(stream
, ctx
);
3295 case LTTNG_CONSUMER32_UST
:
3296 case LTTNG_CONSUMER64_UST
:
3297 ret
= lttng_ustconsumer_read_subbuffer(stream
, ctx
);
3300 ERR("Unknown consumer_data type");
3306 if (stream
->metadata_flag
) {
3307 pthread_cond_broadcast(&stream
->metadata_rdv
);
3308 pthread_mutex_unlock(&stream
->metadata_rdv_lock
);
3310 pthread_mutex_unlock(&stream
->lock
);
3314 int lttng_consumer_on_recv_stream(struct lttng_consumer_stream
*stream
)
3316 switch (consumer_data
.type
) {
3317 case LTTNG_CONSUMER_KERNEL
:
3318 return lttng_kconsumer_on_recv_stream(stream
);
3319 case LTTNG_CONSUMER32_UST
:
3320 case LTTNG_CONSUMER64_UST
:
3321 return lttng_ustconsumer_on_recv_stream(stream
);
3323 ERR("Unknown consumer_data type");
3330 * Allocate and set consumer data hash tables.
3332 int lttng_consumer_init(void)
3334 consumer_data
.channel_ht
= lttng_ht_new(0, LTTNG_HT_TYPE_U64
);
3335 if (!consumer_data
.channel_ht
) {
3339 consumer_data
.relayd_ht
= lttng_ht_new(0, LTTNG_HT_TYPE_U64
);
3340 if (!consumer_data
.relayd_ht
) {
3344 consumer_data
.stream_list_ht
= lttng_ht_new(0, LTTNG_HT_TYPE_U64
);
3345 if (!consumer_data
.stream_list_ht
) {
3349 consumer_data
.stream_per_chan_id_ht
= lttng_ht_new(0, LTTNG_HT_TYPE_U64
);
3350 if (!consumer_data
.stream_per_chan_id_ht
) {
3354 data_ht
= lttng_ht_new(0, LTTNG_HT_TYPE_U64
);
3359 metadata_ht
= lttng_ht_new(0, LTTNG_HT_TYPE_U64
);
3371 * Process the ADD_RELAYD command receive by a consumer.
3373 * This will create a relayd socket pair and add it to the relayd hash table.
3374 * The caller MUST acquire a RCU read side lock before calling it.
3376 void consumer_add_relayd_socket(uint64_t net_seq_idx
, int sock_type
,
3377 struct lttng_consumer_local_data
*ctx
, int sock
,
3378 struct pollfd
*consumer_sockpoll
,
3379 struct lttcomm_relayd_sock
*relayd_sock
, uint64_t sessiond_id
,
3380 uint64_t relayd_session_id
)
3382 int fd
= -1, ret
= -1, relayd_created
= 0;
3383 enum lttcomm_return_code ret_code
= LTTCOMM_CONSUMERD_SUCCESS
;
3384 struct consumer_relayd_sock_pair
*relayd
= NULL
;
3387 assert(relayd_sock
);
3389 DBG("Consumer adding relayd socket (idx: %" PRIu64
")", net_seq_idx
);
3391 /* Get relayd reference if exists. */
3392 relayd
= consumer_find_relayd(net_seq_idx
);
3393 if (relayd
== NULL
) {
3394 assert(sock_type
== LTTNG_STREAM_CONTROL
);
3395 /* Not found. Allocate one. */
3396 relayd
= consumer_allocate_relayd_sock_pair(net_seq_idx
);
3397 if (relayd
== NULL
) {
3398 ret_code
= LTTCOMM_CONSUMERD_ENOMEM
;
3401 relayd
->sessiond_session_id
= sessiond_id
;
3406 * This code path MUST continue to the consumer send status message to
3407 * we can notify the session daemon and continue our work without
3408 * killing everything.
3412 * relayd key should never be found for control socket.
3414 assert(sock_type
!= LTTNG_STREAM_CONTROL
);
3417 /* First send a status message before receiving the fds. */
3418 ret
= consumer_send_status_msg(sock
, LTTCOMM_CONSUMERD_SUCCESS
);
3420 /* Somehow, the session daemon is not responding anymore. */
3421 lttng_consumer_send_error(ctx
, LTTCOMM_CONSUMERD_FATAL
);
3422 goto error_nosignal
;
3425 /* Poll on consumer socket. */
3426 ret
= lttng_consumer_poll_socket(consumer_sockpoll
);
3428 /* Needing to exit in the middle of a command: error. */
3429 lttng_consumer_send_error(ctx
, LTTCOMM_CONSUMERD_POLL_ERROR
);
3430 goto error_nosignal
;
3433 /* Get relayd socket from session daemon */
3434 ret
= lttcomm_recv_fds_unix_sock(sock
, &fd
, 1);
3435 if (ret
!= sizeof(fd
)) {
3436 fd
= -1; /* Just in case it gets set with an invalid value. */
3439 * Failing to receive FDs might indicate a major problem such as
3440 * reaching a fd limit during the receive where the kernel returns a
3441 * MSG_CTRUNC and fails to cleanup the fd in the queue. Any case, we
3442 * don't take any chances and stop everything.
3444 * XXX: Feature request #558 will fix that and avoid this possible
3445 * issue when reaching the fd limit.
3447 lttng_consumer_send_error(ctx
, LTTCOMM_CONSUMERD_ERROR_RECV_FD
);
3448 ret_code
= LTTCOMM_CONSUMERD_ERROR_RECV_FD
;
3452 /* Copy socket information and received FD */
3453 switch (sock_type
) {
3454 case LTTNG_STREAM_CONTROL
:
3455 /* Copy received lttcomm socket */
3456 lttcomm_copy_sock(&relayd
->control_sock
.sock
, &relayd_sock
->sock
);
3457 ret
= lttcomm_create_sock(&relayd
->control_sock
.sock
);
3458 /* Handle create_sock error. */
3460 ret_code
= LTTCOMM_CONSUMERD_ENOMEM
;
3464 * Close the socket created internally by
3465 * lttcomm_create_sock, so we can replace it by the one
3466 * received from sessiond.
3468 if (close(relayd
->control_sock
.sock
.fd
)) {
3472 /* Assign new file descriptor */
3473 relayd
->control_sock
.sock
.fd
= fd
;
3474 fd
= -1; /* For error path */
3475 /* Assign version values. */
3476 relayd
->control_sock
.major
= relayd_sock
->major
;
3477 relayd
->control_sock
.minor
= relayd_sock
->minor
;
3479 relayd
->relayd_session_id
= relayd_session_id
;
3482 case LTTNG_STREAM_DATA
:
3483 /* Copy received lttcomm socket */
3484 lttcomm_copy_sock(&relayd
->data_sock
.sock
, &relayd_sock
->sock
);
3485 ret
= lttcomm_create_sock(&relayd
->data_sock
.sock
);
3486 /* Handle create_sock error. */
3488 ret_code
= LTTCOMM_CONSUMERD_ENOMEM
;
3492 * Close the socket created internally by
3493 * lttcomm_create_sock, so we can replace it by the one
3494 * received from sessiond.
3496 if (close(relayd
->data_sock
.sock
.fd
)) {
3500 /* Assign new file descriptor */
3501 relayd
->data_sock
.sock
.fd
= fd
;
3502 fd
= -1; /* for eventual error paths */
3503 /* Assign version values. */
3504 relayd
->data_sock
.major
= relayd_sock
->major
;
3505 relayd
->data_sock
.minor
= relayd_sock
->minor
;
3508 ERR("Unknown relayd socket type (%d)", sock_type
);
3509 ret_code
= LTTCOMM_CONSUMERD_FATAL
;
3513 DBG("Consumer %s socket created successfully with net idx %" PRIu64
" (fd: %d)",
3514 sock_type
== LTTNG_STREAM_CONTROL
? "control" : "data",
3515 relayd
->net_seq_idx
, fd
);
3517 /* We successfully added the socket. Send status back. */
3518 ret
= consumer_send_status_msg(sock
, ret_code
);
3520 /* Somehow, the session daemon is not responding anymore. */
3521 lttng_consumer_send_error(ctx
, LTTCOMM_CONSUMERD_FATAL
);
3522 goto error_nosignal
;
3526 * Add relayd socket pair to consumer data hashtable. If object already
3527 * exists or on error, the function gracefully returns.
3536 if (consumer_send_status_msg(sock
, ret_code
) < 0) {
3537 lttng_consumer_send_error(ctx
, LTTCOMM_CONSUMERD_FATAL
);
3541 /* Close received socket if valid. */
3544 PERROR("close received socket");
3548 if (relayd_created
) {
3554 * Search for a relayd associated to the session id and return the reference.
3556 * A rcu read side lock MUST be acquire before calling this function and locked
3557 * until the relayd object is no longer necessary.
3559 static struct consumer_relayd_sock_pair
*find_relayd_by_session_id(uint64_t id
)
3561 struct lttng_ht_iter iter
;
3562 struct consumer_relayd_sock_pair
*relayd
= NULL
;
3564 /* Iterate over all relayd since they are indexed by net_seq_idx. */
3565 cds_lfht_for_each_entry(consumer_data
.relayd_ht
->ht
, &iter
.iter
, relayd
,
3568 * Check by sessiond id which is unique here where the relayd session
3569 * id might not be when having multiple relayd.
3571 if (relayd
->sessiond_session_id
== id
) {
3572 /* Found the relayd. There can be only one per id. */
3584 * Check if for a given session id there is still data needed to be extract
3587 * Return 1 if data is pending or else 0 meaning ready to be read.
3589 int consumer_data_pending(uint64_t id
)
3592 struct lttng_ht_iter iter
;
3593 struct lttng_ht
*ht
;
3594 struct lttng_consumer_stream
*stream
;
3595 struct consumer_relayd_sock_pair
*relayd
= NULL
;
3596 int (*data_pending
)(struct lttng_consumer_stream
*);
3598 DBG("Consumer data pending command on session id %" PRIu64
, id
);
3601 pthread_mutex_lock(&consumer_data
.lock
);
3603 switch (consumer_data
.type
) {
3604 case LTTNG_CONSUMER_KERNEL
:
3605 data_pending
= lttng_kconsumer_data_pending
;
3607 case LTTNG_CONSUMER32_UST
:
3608 case LTTNG_CONSUMER64_UST
:
3609 data_pending
= lttng_ustconsumer_data_pending
;
3612 ERR("Unknown consumer data type");
3616 /* Ease our life a bit */
3617 ht
= consumer_data
.stream_list_ht
;
3619 cds_lfht_for_each_entry_duplicate(ht
->ht
,
3620 ht
->hash_fct(&id
, lttng_ht_seed
),
3622 &iter
.iter
, stream
, node_session_id
.node
) {
3623 pthread_mutex_lock(&stream
->lock
);
3626 * A removed node from the hash table indicates that the stream has
3627 * been deleted thus having a guarantee that the buffers are closed
3628 * on the consumer side. However, data can still be transmitted
3629 * over the network so don't skip the relayd check.
3631 ret
= cds_lfht_is_node_deleted(&stream
->node
.node
);
3633 /* Check the stream if there is data in the buffers. */
3634 ret
= data_pending(stream
);
3636 pthread_mutex_unlock(&stream
->lock
);
3641 pthread_mutex_unlock(&stream
->lock
);
3644 relayd
= find_relayd_by_session_id(id
);
3646 unsigned int is_data_inflight
= 0;
3648 /* Send init command for data pending. */
3649 pthread_mutex_lock(&relayd
->ctrl_sock_mutex
);
3650 ret
= relayd_begin_data_pending(&relayd
->control_sock
,
3651 relayd
->relayd_session_id
);
3653 pthread_mutex_unlock(&relayd
->ctrl_sock_mutex
);
3654 /* Communication error thus the relayd so no data pending. */
3655 goto data_not_pending
;
3658 cds_lfht_for_each_entry_duplicate(ht
->ht
,
3659 ht
->hash_fct(&id
, lttng_ht_seed
),
3661 &iter
.iter
, stream
, node_session_id
.node
) {
3662 if (stream
->metadata_flag
) {
3663 ret
= relayd_quiescent_control(&relayd
->control_sock
,
3664 stream
->relayd_stream_id
);
3666 ret
= relayd_data_pending(&relayd
->control_sock
,
3667 stream
->relayd_stream_id
,
3668 stream
->next_net_seq_num
- 1);
3672 pthread_mutex_unlock(&relayd
->ctrl_sock_mutex
);
3674 } else if (ret
< 0) {
3675 ERR("Relayd data pending failed. Cleaning up relayd %" PRIu64
".", relayd
->net_seq_idx
);
3676 lttng_consumer_cleanup_relayd(relayd
);
3677 pthread_mutex_unlock(&relayd
->ctrl_sock_mutex
);
3678 goto data_not_pending
;
3682 /* Send end command for data pending. */
3683 ret
= relayd_end_data_pending(&relayd
->control_sock
,
3684 relayd
->relayd_session_id
, &is_data_inflight
);
3685 pthread_mutex_unlock(&relayd
->ctrl_sock_mutex
);
3687 ERR("Relayd end data pending failed. Cleaning up relayd %" PRIu64
".", relayd
->net_seq_idx
);
3688 lttng_consumer_cleanup_relayd(relayd
);
3689 goto data_not_pending
;
3691 if (is_data_inflight
) {
3697 * Finding _no_ node in the hash table and no inflight data means that the
3698 * stream(s) have been removed thus data is guaranteed to be available for
3699 * analysis from the trace files.
3703 /* Data is available to be read by a viewer. */
3704 pthread_mutex_unlock(&consumer_data
.lock
);
3709 /* Data is still being extracted from buffers. */
3710 pthread_mutex_unlock(&consumer_data
.lock
);
3716 * Send a ret code status message to the sessiond daemon.
3718 * Return the sendmsg() return value.
3720 int consumer_send_status_msg(int sock
, int ret_code
)
3722 struct lttcomm_consumer_status_msg msg
;
3724 memset(&msg
, 0, sizeof(msg
));
3725 msg
.ret_code
= ret_code
;
3727 return lttcomm_send_unix_sock(sock
, &msg
, sizeof(msg
));
3731 * Send a channel status message to the sessiond daemon.
3733 * Return the sendmsg() return value.
3735 int consumer_send_status_channel(int sock
,
3736 struct lttng_consumer_channel
*channel
)
3738 struct lttcomm_consumer_status_channel msg
;
3742 memset(&msg
, 0, sizeof(msg
));
3744 msg
.ret_code
= LTTCOMM_CONSUMERD_CHANNEL_FAIL
;
3746 msg
.ret_code
= LTTCOMM_CONSUMERD_SUCCESS
;
3747 msg
.key
= channel
->key
;
3748 msg
.stream_count
= channel
->streams
.count
;
3751 return lttcomm_send_unix_sock(sock
, &msg
, sizeof(msg
));
3754 unsigned long consumer_get_consume_start_pos(unsigned long consumed_pos
,
3755 unsigned long produced_pos
, uint64_t nb_packets_per_stream
,
3756 uint64_t max_sb_size
)
3758 unsigned long start_pos
;
3760 if (!nb_packets_per_stream
) {
3761 return consumed_pos
; /* Grab everything */
3763 start_pos
= produced_pos
- offset_align_floor(produced_pos
, max_sb_size
);
3764 start_pos
-= max_sb_size
* nb_packets_per_stream
;
3765 if ((long) (start_pos
- consumed_pos
) < 0) {
3766 return consumed_pos
; /* Grab everything */