2 * Copyright (C) 2011 - Julien Desfossez <julien.desfossez@polymtl.ca>
3 * Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
4 * 2012 - David Goulet <dgoulet@efficios.com>
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License, version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 * You should have received a copy of the GNU General Public License along
16 * with this program; if not, write to the Free Software Foundation, Inc.,
17 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
27 #include <sys/socket.h>
28 #include <sys/types.h>
33 #include <bin/lttng-consumerd/health-consumerd.h>
34 #include <common/common.h>
35 #include <common/utils.h>
36 #include <common/compat/poll.h>
37 #include <common/index/index.h>
38 #include <common/kernel-ctl/kernel-ctl.h>
39 #include <common/sessiond-comm/relayd.h>
40 #include <common/sessiond-comm/sessiond-comm.h>
41 #include <common/kernel-consumer/kernel-consumer.h>
42 #include <common/relayd/relayd.h>
43 #include <common/ust-consumer/ust-consumer.h>
44 #include <common/consumer-timer.h>
47 #include "consumer-stream.h"
48 #include "consumer-testpoint.h"
50 struct lttng_consumer_global_data consumer_data
= {
53 .type
= LTTNG_CONSUMER_UNKNOWN
,
56 enum consumer_channel_action
{
59 CONSUMER_CHANNEL_QUIT
,
62 struct consumer_channel_msg
{
63 enum consumer_channel_action action
;
64 struct lttng_consumer_channel
*chan
; /* add */
65 uint64_t key
; /* del */
69 * Flag to inform the polling thread to quit when all fd hung up. Updated by
70 * the consumer_thread_receive_fds when it notices that all fds has hung up.
71 * Also updated by the signal handler (consumer_should_exit()). Read by the
74 volatile int consumer_quit
;
77 * Global hash table containing respectively metadata and data streams. The
78 * stream element in this ht should only be updated by the metadata poll thread
79 * for the metadata and the data poll thread for the data.
81 static struct lttng_ht
*metadata_ht
;
82 static struct lttng_ht
*data_ht
;
85 * Notify a thread lttng pipe to poll back again. This usually means that some
86 * global state has changed so we just send back the thread in a poll wait
89 static void notify_thread_lttng_pipe(struct lttng_pipe
*pipe
)
91 struct lttng_consumer_stream
*null_stream
= NULL
;
95 (void) lttng_pipe_write(pipe
, &null_stream
, sizeof(null_stream
));
98 static void notify_health_quit_pipe(int *pipe
)
102 ret
= lttng_write(pipe
[1], "4", 1);
104 PERROR("write consumer health quit");
108 static void notify_channel_pipe(struct lttng_consumer_local_data
*ctx
,
109 struct lttng_consumer_channel
*chan
,
111 enum consumer_channel_action action
)
113 struct consumer_channel_msg msg
;
116 memset(&msg
, 0, sizeof(msg
));
121 ret
= lttng_write(ctx
->consumer_channel_pipe
[1], &msg
, sizeof(msg
));
122 if (ret
< sizeof(msg
)) {
123 PERROR("notify_channel_pipe write error");
127 void notify_thread_del_channel(struct lttng_consumer_local_data
*ctx
,
130 notify_channel_pipe(ctx
, NULL
, key
, CONSUMER_CHANNEL_DEL
);
133 static int read_channel_pipe(struct lttng_consumer_local_data
*ctx
,
134 struct lttng_consumer_channel
**chan
,
136 enum consumer_channel_action
*action
)
138 struct consumer_channel_msg msg
;
141 ret
= lttng_read(ctx
->consumer_channel_pipe
[0], &msg
, sizeof(msg
));
142 if (ret
< sizeof(msg
)) {
146 *action
= msg
.action
;
154 * Cleanup the stream list of a channel. Those streams are not yet globally
157 static void clean_channel_stream_list(struct lttng_consumer_channel
*channel
)
159 struct lttng_consumer_stream
*stream
, *stmp
;
163 /* Delete streams that might have been left in the stream list. */
164 cds_list_for_each_entry_safe(stream
, stmp
, &channel
->streams
.head
,
166 cds_list_del(&stream
->send_node
);
168 * Once a stream is added to this list, the buffers were created so we
169 * have a guarantee that this call will succeed. Setting the monitor
170 * mode to 0 so we don't lock nor try to delete the stream from the
174 consumer_stream_destroy(stream
, NULL
);
179 * Find a stream. The consumer_data.lock must be locked during this
182 static struct lttng_consumer_stream
*find_stream(uint64_t key
,
185 struct lttng_ht_iter iter
;
186 struct lttng_ht_node_u64
*node
;
187 struct lttng_consumer_stream
*stream
= NULL
;
191 /* -1ULL keys are lookup failures */
192 if (key
== (uint64_t) -1ULL) {
198 lttng_ht_lookup(ht
, &key
, &iter
);
199 node
= lttng_ht_iter_get_node_u64(&iter
);
201 stream
= caa_container_of(node
, struct lttng_consumer_stream
, node
);
209 static void steal_stream_key(uint64_t key
, struct lttng_ht
*ht
)
211 struct lttng_consumer_stream
*stream
;
214 stream
= find_stream(key
, ht
);
216 stream
->key
= (uint64_t) -1ULL;
218 * We don't want the lookup to match, but we still need
219 * to iterate on this stream when iterating over the hash table. Just
220 * change the node key.
222 stream
->node
.key
= (uint64_t) -1ULL;
228 * Return a channel object for the given key.
230 * RCU read side lock MUST be acquired before calling this function and
231 * protects the channel ptr.
233 struct lttng_consumer_channel
*consumer_find_channel(uint64_t key
)
235 struct lttng_ht_iter iter
;
236 struct lttng_ht_node_u64
*node
;
237 struct lttng_consumer_channel
*channel
= NULL
;
239 /* -1ULL keys are lookup failures */
240 if (key
== (uint64_t) -1ULL) {
244 lttng_ht_lookup(consumer_data
.channel_ht
, &key
, &iter
);
245 node
= lttng_ht_iter_get_node_u64(&iter
);
247 channel
= caa_container_of(node
, struct lttng_consumer_channel
, node
);
253 static void free_channel_rcu(struct rcu_head
*head
)
255 struct lttng_ht_node_u64
*node
=
256 caa_container_of(head
, struct lttng_ht_node_u64
, head
);
257 struct lttng_consumer_channel
*channel
=
258 caa_container_of(node
, struct lttng_consumer_channel
, node
);
264 * RCU protected relayd socket pair free.
266 static void free_relayd_rcu(struct rcu_head
*head
)
268 struct lttng_ht_node_u64
*node
=
269 caa_container_of(head
, struct lttng_ht_node_u64
, head
);
270 struct consumer_relayd_sock_pair
*relayd
=
271 caa_container_of(node
, struct consumer_relayd_sock_pair
, node
);
274 * Close all sockets. This is done in the call RCU since we don't want the
275 * socket fds to be reassigned thus potentially creating bad state of the
278 * We do not have to lock the control socket mutex here since at this stage
279 * there is no one referencing to this relayd object.
281 (void) relayd_close(&relayd
->control_sock
);
282 (void) relayd_close(&relayd
->data_sock
);
288 * Destroy and free relayd socket pair object.
290 void consumer_destroy_relayd(struct consumer_relayd_sock_pair
*relayd
)
293 struct lttng_ht_iter iter
;
295 if (relayd
== NULL
) {
299 DBG("Consumer destroy and close relayd socket pair");
301 iter
.iter
.node
= &relayd
->node
.node
;
302 ret
= lttng_ht_del(consumer_data
.relayd_ht
, &iter
);
304 /* We assume the relayd is being or is destroyed */
308 /* RCU free() call */
309 call_rcu(&relayd
->node
.head
, free_relayd_rcu
);
313 * Remove a channel from the global list protected by a mutex. This function is
314 * also responsible for freeing its data structures.
316 void consumer_del_channel(struct lttng_consumer_channel
*channel
)
319 struct lttng_ht_iter iter
;
321 DBG("Consumer delete channel key %" PRIu64
, channel
->key
);
323 pthread_mutex_lock(&consumer_data
.lock
);
324 pthread_mutex_lock(&channel
->lock
);
326 /* Destroy streams that might have been left in the stream list. */
327 clean_channel_stream_list(channel
);
329 if (channel
->live_timer_enabled
== 1) {
330 consumer_timer_live_stop(channel
);
333 switch (consumer_data
.type
) {
334 case LTTNG_CONSUMER_KERNEL
:
336 case LTTNG_CONSUMER32_UST
:
337 case LTTNG_CONSUMER64_UST
:
338 lttng_ustconsumer_del_channel(channel
);
341 ERR("Unknown consumer_data type");
347 iter
.iter
.node
= &channel
->node
.node
;
348 ret
= lttng_ht_del(consumer_data
.channel_ht
, &iter
);
352 call_rcu(&channel
->node
.head
, free_channel_rcu
);
354 pthread_mutex_unlock(&channel
->lock
);
355 pthread_mutex_unlock(&consumer_data
.lock
);
359 * Iterate over the relayd hash table and destroy each element. Finally,
360 * destroy the whole hash table.
362 static void cleanup_relayd_ht(void)
364 struct lttng_ht_iter iter
;
365 struct consumer_relayd_sock_pair
*relayd
;
369 cds_lfht_for_each_entry(consumer_data
.relayd_ht
->ht
, &iter
.iter
, relayd
,
371 consumer_destroy_relayd(relayd
);
376 lttng_ht_destroy(consumer_data
.relayd_ht
);
380 * Update the end point status of all streams having the given network sequence
381 * index (relayd index).
383 * It's atomically set without having the stream mutex locked which is fine
384 * because we handle the write/read race with a pipe wakeup for each thread.
386 static void update_endpoint_status_by_netidx(uint64_t net_seq_idx
,
387 enum consumer_endpoint_status status
)
389 struct lttng_ht_iter iter
;
390 struct lttng_consumer_stream
*stream
;
392 DBG("Consumer set delete flag on stream by idx %" PRIu64
, net_seq_idx
);
396 /* Let's begin with metadata */
397 cds_lfht_for_each_entry(metadata_ht
->ht
, &iter
.iter
, stream
, node
.node
) {
398 if (stream
->net_seq_idx
== net_seq_idx
) {
399 uatomic_set(&stream
->endpoint_status
, status
);
400 DBG("Delete flag set to metadata stream %d", stream
->wait_fd
);
404 /* Follow up by the data streams */
405 cds_lfht_for_each_entry(data_ht
->ht
, &iter
.iter
, stream
, node
.node
) {
406 if (stream
->net_seq_idx
== net_seq_idx
) {
407 uatomic_set(&stream
->endpoint_status
, status
);
408 DBG("Delete flag set to data stream %d", stream
->wait_fd
);
415 * Cleanup a relayd object by flagging every associated streams for deletion,
416 * destroying the object meaning removing it from the relayd hash table,
417 * closing the sockets and freeing the memory in a RCU call.
419 * If a local data context is available, notify the threads that the streams'
420 * state have changed.
422 static void cleanup_relayd(struct consumer_relayd_sock_pair
*relayd
,
423 struct lttng_consumer_local_data
*ctx
)
429 DBG("Cleaning up relayd sockets");
431 /* Save the net sequence index before destroying the object */
432 netidx
= relayd
->net_seq_idx
;
435 * Delete the relayd from the relayd hash table, close the sockets and free
436 * the object in a RCU call.
438 consumer_destroy_relayd(relayd
);
440 /* Set inactive endpoint to all streams */
441 update_endpoint_status_by_netidx(netidx
, CONSUMER_ENDPOINT_INACTIVE
);
444 * With a local data context, notify the threads that the streams' state
445 * have changed. The write() action on the pipe acts as an "implicit"
446 * memory barrier ordering the updates of the end point status from the
447 * read of this status which happens AFTER receiving this notify.
450 notify_thread_lttng_pipe(ctx
->consumer_data_pipe
);
451 notify_thread_lttng_pipe(ctx
->consumer_metadata_pipe
);
456 * Flag a relayd socket pair for destruction. Destroy it if the refcount
459 * RCU read side lock MUST be aquired before calling this function.
461 void consumer_flag_relayd_for_destroy(struct consumer_relayd_sock_pair
*relayd
)
465 /* Set destroy flag for this object */
466 uatomic_set(&relayd
->destroy_flag
, 1);
468 /* Destroy the relayd if refcount is 0 */
469 if (uatomic_read(&relayd
->refcount
) == 0) {
470 consumer_destroy_relayd(relayd
);
475 * Completly destroy stream from every visiable data structure and the given
478 * One this call returns, the stream object is not longer usable nor visible.
480 void consumer_del_stream(struct lttng_consumer_stream
*stream
,
483 consumer_stream_destroy(stream
, ht
);
487 * XXX naming of del vs destroy is all mixed up.
489 void consumer_del_stream_for_data(struct lttng_consumer_stream
*stream
)
491 consumer_stream_destroy(stream
, data_ht
);
494 void consumer_del_stream_for_metadata(struct lttng_consumer_stream
*stream
)
496 consumer_stream_destroy(stream
, metadata_ht
);
499 struct lttng_consumer_stream
*consumer_allocate_stream(uint64_t channel_key
,
501 enum lttng_consumer_stream_state state
,
502 const char *channel_name
,
509 enum consumer_channel_type type
,
510 unsigned int monitor
)
513 struct lttng_consumer_stream
*stream
;
515 stream
= zmalloc(sizeof(*stream
));
516 if (stream
== NULL
) {
517 PERROR("malloc struct lttng_consumer_stream");
524 stream
->key
= stream_key
;
526 stream
->out_fd_offset
= 0;
527 stream
->output_written
= 0;
528 stream
->state
= state
;
531 stream
->net_seq_idx
= relayd_id
;
532 stream
->session_id
= session_id
;
533 stream
->monitor
= monitor
;
534 stream
->endpoint_status
= CONSUMER_ENDPOINT_ACTIVE
;
535 stream
->index_fd
= -1;
536 pthread_mutex_init(&stream
->lock
, NULL
);
538 /* If channel is the metadata, flag this stream as metadata. */
539 if (type
== CONSUMER_CHANNEL_TYPE_METADATA
) {
540 stream
->metadata_flag
= 1;
541 /* Metadata is flat out. */
542 strncpy(stream
->name
, DEFAULT_METADATA_NAME
, sizeof(stream
->name
));
543 /* Live rendez-vous point. */
544 pthread_cond_init(&stream
->metadata_rdv
, NULL
);
545 pthread_mutex_init(&stream
->metadata_rdv_lock
, NULL
);
547 /* Format stream name to <channel_name>_<cpu_number> */
548 ret
= snprintf(stream
->name
, sizeof(stream
->name
), "%s_%d",
551 PERROR("snprintf stream name");
556 /* Key is always the wait_fd for streams. */
557 lttng_ht_node_init_u64(&stream
->node
, stream
->key
);
559 /* Init node per channel id key */
560 lttng_ht_node_init_u64(&stream
->node_channel_id
, channel_key
);
562 /* Init session id node with the stream session id */
563 lttng_ht_node_init_u64(&stream
->node_session_id
, stream
->session_id
);
565 DBG3("Allocated stream %s (key %" PRIu64
", chan_key %" PRIu64
566 " relayd_id %" PRIu64
", session_id %" PRIu64
,
567 stream
->name
, stream
->key
, channel_key
,
568 stream
->net_seq_idx
, stream
->session_id
);
584 * Add a stream to the global list protected by a mutex.
586 int consumer_add_data_stream(struct lttng_consumer_stream
*stream
)
588 struct lttng_ht
*ht
= data_ht
;
594 DBG3("Adding consumer stream %" PRIu64
, stream
->key
);
596 pthread_mutex_lock(&consumer_data
.lock
);
597 pthread_mutex_lock(&stream
->chan
->lock
);
598 pthread_mutex_lock(&stream
->chan
->timer_lock
);
599 pthread_mutex_lock(&stream
->lock
);
602 /* Steal stream identifier to avoid having streams with the same key */
603 steal_stream_key(stream
->key
, ht
);
605 lttng_ht_add_unique_u64(ht
, &stream
->node
);
607 lttng_ht_add_u64(consumer_data
.stream_per_chan_id_ht
,
608 &stream
->node_channel_id
);
611 * Add stream to the stream_list_ht of the consumer data. No need to steal
612 * the key since the HT does not use it and we allow to add redundant keys
615 lttng_ht_add_u64(consumer_data
.stream_list_ht
, &stream
->node_session_id
);
618 * When nb_init_stream_left reaches 0, we don't need to trigger any action
619 * in terms of destroying the associated channel, because the action that
620 * causes the count to become 0 also causes a stream to be added. The
621 * channel deletion will thus be triggered by the following removal of this
624 if (uatomic_read(&stream
->chan
->nb_init_stream_left
) > 0) {
625 /* Increment refcount before decrementing nb_init_stream_left */
627 uatomic_dec(&stream
->chan
->nb_init_stream_left
);
630 /* Update consumer data once the node is inserted. */
631 consumer_data
.stream_count
++;
632 consumer_data
.need_update
= 1;
635 pthread_mutex_unlock(&stream
->lock
);
636 pthread_mutex_unlock(&stream
->chan
->timer_lock
);
637 pthread_mutex_unlock(&stream
->chan
->lock
);
638 pthread_mutex_unlock(&consumer_data
.lock
);
643 void consumer_del_data_stream(struct lttng_consumer_stream
*stream
)
645 consumer_del_stream(stream
, data_ht
);
649 * Add relayd socket to global consumer data hashtable. RCU read side lock MUST
650 * be acquired before calling this.
652 static int add_relayd(struct consumer_relayd_sock_pair
*relayd
)
655 struct lttng_ht_node_u64
*node
;
656 struct lttng_ht_iter iter
;
660 lttng_ht_lookup(consumer_data
.relayd_ht
,
661 &relayd
->net_seq_idx
, &iter
);
662 node
= lttng_ht_iter_get_node_u64(&iter
);
666 lttng_ht_add_unique_u64(consumer_data
.relayd_ht
, &relayd
->node
);
673 * Allocate and return a consumer relayd socket.
675 struct consumer_relayd_sock_pair
*consumer_allocate_relayd_sock_pair(
676 uint64_t net_seq_idx
)
678 struct consumer_relayd_sock_pair
*obj
= NULL
;
680 /* net sequence index of -1 is a failure */
681 if (net_seq_idx
== (uint64_t) -1ULL) {
685 obj
= zmalloc(sizeof(struct consumer_relayd_sock_pair
));
687 PERROR("zmalloc relayd sock");
691 obj
->net_seq_idx
= net_seq_idx
;
693 obj
->destroy_flag
= 0;
694 obj
->control_sock
.sock
.fd
= -1;
695 obj
->data_sock
.sock
.fd
= -1;
696 lttng_ht_node_init_u64(&obj
->node
, obj
->net_seq_idx
);
697 pthread_mutex_init(&obj
->ctrl_sock_mutex
, NULL
);
704 * Find a relayd socket pair in the global consumer data.
706 * Return the object if found else NULL.
707 * RCU read-side lock must be held across this call and while using the
710 struct consumer_relayd_sock_pair
*consumer_find_relayd(uint64_t key
)
712 struct lttng_ht_iter iter
;
713 struct lttng_ht_node_u64
*node
;
714 struct consumer_relayd_sock_pair
*relayd
= NULL
;
716 /* Negative keys are lookup failures */
717 if (key
== (uint64_t) -1ULL) {
721 lttng_ht_lookup(consumer_data
.relayd_ht
, &key
,
723 node
= lttng_ht_iter_get_node_u64(&iter
);
725 relayd
= caa_container_of(node
, struct consumer_relayd_sock_pair
, node
);
733 * Find a relayd and send the stream
735 * Returns 0 on success, < 0 on error
737 int consumer_send_relayd_stream(struct lttng_consumer_stream
*stream
,
741 struct consumer_relayd_sock_pair
*relayd
;
744 assert(stream
->net_seq_idx
!= -1ULL);
747 /* The stream is not metadata. Get relayd reference if exists. */
749 relayd
= consumer_find_relayd(stream
->net_seq_idx
);
750 if (relayd
!= NULL
) {
751 /* Add stream on the relayd */
752 pthread_mutex_lock(&relayd
->ctrl_sock_mutex
);
753 ret
= relayd_add_stream(&relayd
->control_sock
, stream
->name
,
754 path
, &stream
->relayd_stream_id
,
755 stream
->chan
->tracefile_size
, stream
->chan
->tracefile_count
);
756 pthread_mutex_unlock(&relayd
->ctrl_sock_mutex
);
761 uatomic_inc(&relayd
->refcount
);
762 stream
->sent_to_relayd
= 1;
764 ERR("Stream %" PRIu64
" relayd ID %" PRIu64
" unknown. Can't send it.",
765 stream
->key
, stream
->net_seq_idx
);
770 DBG("Stream %s with key %" PRIu64
" sent to relayd id %" PRIu64
,
771 stream
->name
, stream
->key
, stream
->net_seq_idx
);
779 * Find a relayd and send the streams sent message
781 * Returns 0 on success, < 0 on error
783 int consumer_send_relayd_streams_sent(uint64_t net_seq_idx
)
786 struct consumer_relayd_sock_pair
*relayd
;
788 assert(net_seq_idx
!= -1ULL);
790 /* The stream is not metadata. Get relayd reference if exists. */
792 relayd
= consumer_find_relayd(net_seq_idx
);
793 if (relayd
!= NULL
) {
794 /* Add stream on the relayd */
795 pthread_mutex_lock(&relayd
->ctrl_sock_mutex
);
796 ret
= relayd_streams_sent(&relayd
->control_sock
);
797 pthread_mutex_unlock(&relayd
->ctrl_sock_mutex
);
802 ERR("Relayd ID %" PRIu64
" unknown. Can't send streams_sent.",
809 DBG("All streams sent relayd id %" PRIu64
, net_seq_idx
);
817 * Find a relayd and close the stream
819 void close_relayd_stream(struct lttng_consumer_stream
*stream
)
821 struct consumer_relayd_sock_pair
*relayd
;
823 /* The stream is not metadata. Get relayd reference if exists. */
825 relayd
= consumer_find_relayd(stream
->net_seq_idx
);
827 consumer_stream_relayd_close(stream
, relayd
);
833 * Handle stream for relayd transmission if the stream applies for network
834 * streaming where the net sequence index is set.
836 * Return destination file descriptor or negative value on error.
838 static int write_relayd_stream_header(struct lttng_consumer_stream
*stream
,
839 size_t data_size
, unsigned long padding
,
840 struct consumer_relayd_sock_pair
*relayd
)
843 struct lttcomm_relayd_data_hdr data_hdr
;
849 /* Reset data header */
850 memset(&data_hdr
, 0, sizeof(data_hdr
));
852 if (stream
->metadata_flag
) {
853 /* Caller MUST acquire the relayd control socket lock */
854 ret
= relayd_send_metadata(&relayd
->control_sock
, data_size
);
859 /* Metadata are always sent on the control socket. */
860 outfd
= relayd
->control_sock
.sock
.fd
;
862 /* Set header with stream information */
863 data_hdr
.stream_id
= htobe64(stream
->relayd_stream_id
);
864 data_hdr
.data_size
= htobe32(data_size
);
865 data_hdr
.padding_size
= htobe32(padding
);
867 * Note that net_seq_num below is assigned with the *current* value of
868 * next_net_seq_num and only after that the next_net_seq_num will be
869 * increment. This is why when issuing a command on the relayd using
870 * this next value, 1 should always be substracted in order to compare
871 * the last seen sequence number on the relayd side to the last sent.
873 data_hdr
.net_seq_num
= htobe64(stream
->next_net_seq_num
);
874 /* Other fields are zeroed previously */
876 ret
= relayd_send_data_hdr(&relayd
->data_sock
, &data_hdr
,
882 ++stream
->next_net_seq_num
;
884 /* Set to go on data socket */
885 outfd
= relayd
->data_sock
.sock
.fd
;
893 * Allocate and return a new lttng_consumer_channel object using the given key
894 * to initialize the hash table node.
896 * On error, return NULL.
898 struct lttng_consumer_channel
*consumer_allocate_channel(uint64_t key
,
900 const char *pathname
,
905 enum lttng_event_output output
,
906 uint64_t tracefile_size
,
907 uint64_t tracefile_count
,
908 uint64_t session_id_per_pid
,
909 unsigned int monitor
,
910 unsigned int live_timer_interval
)
912 struct lttng_consumer_channel
*channel
;
914 channel
= zmalloc(sizeof(*channel
));
915 if (channel
== NULL
) {
916 PERROR("malloc struct lttng_consumer_channel");
921 channel
->refcount
= 0;
922 channel
->session_id
= session_id
;
923 channel
->session_id_per_pid
= session_id_per_pid
;
926 channel
->relayd_id
= relayd_id
;
927 channel
->tracefile_size
= tracefile_size
;
928 channel
->tracefile_count
= tracefile_count
;
929 channel
->monitor
= monitor
;
930 channel
->live_timer_interval
= live_timer_interval
;
931 pthread_mutex_init(&channel
->lock
, NULL
);
932 pthread_mutex_init(&channel
->timer_lock
, NULL
);
935 case LTTNG_EVENT_SPLICE
:
936 channel
->output
= CONSUMER_CHANNEL_SPLICE
;
938 case LTTNG_EVENT_MMAP
:
939 channel
->output
= CONSUMER_CHANNEL_MMAP
;
949 * In monitor mode, the streams associated with the channel will be put in
950 * a special list ONLY owned by this channel. So, the refcount is set to 1
951 * here meaning that the channel itself has streams that are referenced.
953 * On a channel deletion, once the channel is no longer visible, the
954 * refcount is decremented and checked for a zero value to delete it. With
955 * streams in no monitor mode, it will now be safe to destroy the channel.
957 if (!channel
->monitor
) {
958 channel
->refcount
= 1;
961 strncpy(channel
->pathname
, pathname
, sizeof(channel
->pathname
));
962 channel
->pathname
[sizeof(channel
->pathname
) - 1] = '\0';
964 strncpy(channel
->name
, name
, sizeof(channel
->name
));
965 channel
->name
[sizeof(channel
->name
) - 1] = '\0';
967 lttng_ht_node_init_u64(&channel
->node
, channel
->key
);
969 channel
->wait_fd
= -1;
971 CDS_INIT_LIST_HEAD(&channel
->streams
.head
);
973 DBG("Allocated channel (key %" PRIu64
")", channel
->key
)
980 * Add a channel to the global list protected by a mutex.
982 * On success 0 is returned else a negative value.
984 int consumer_add_channel(struct lttng_consumer_channel
*channel
,
985 struct lttng_consumer_local_data
*ctx
)
988 struct lttng_ht_node_u64
*node
;
989 struct lttng_ht_iter iter
;
991 pthread_mutex_lock(&consumer_data
.lock
);
992 pthread_mutex_lock(&channel
->lock
);
993 pthread_mutex_lock(&channel
->timer_lock
);
996 lttng_ht_lookup(consumer_data
.channel_ht
, &channel
->key
, &iter
);
997 node
= lttng_ht_iter_get_node_u64(&iter
);
999 /* Channel already exist. Ignore the insertion */
1000 ERR("Consumer add channel key %" PRIu64
" already exists!",
1006 lttng_ht_add_unique_u64(consumer_data
.channel_ht
, &channel
->node
);
1010 pthread_mutex_unlock(&channel
->timer_lock
);
1011 pthread_mutex_unlock(&channel
->lock
);
1012 pthread_mutex_unlock(&consumer_data
.lock
);
1014 if (!ret
&& channel
->wait_fd
!= -1 &&
1015 channel
->type
== CONSUMER_CHANNEL_TYPE_DATA
) {
1016 notify_channel_pipe(ctx
, channel
, -1, CONSUMER_CHANNEL_ADD
);
1022 * Allocate the pollfd structure and the local view of the out fds to avoid
1023 * doing a lookup in the linked list and concurrency issues when writing is
1024 * needed. Called with consumer_data.lock held.
1026 * Returns the number of fds in the structures.
1028 static int update_poll_array(struct lttng_consumer_local_data
*ctx
,
1029 struct pollfd
**pollfd
, struct lttng_consumer_stream
**local_stream
,
1030 struct lttng_ht
*ht
)
1033 struct lttng_ht_iter iter
;
1034 struct lttng_consumer_stream
*stream
;
1039 assert(local_stream
);
1041 DBG("Updating poll fd array");
1043 cds_lfht_for_each_entry(ht
->ht
, &iter
.iter
, stream
, node
.node
) {
1045 * Only active streams with an active end point can be added to the
1046 * poll set and local stream storage of the thread.
1048 * There is a potential race here for endpoint_status to be updated
1049 * just after the check. However, this is OK since the stream(s) will
1050 * be deleted once the thread is notified that the end point state has
1051 * changed where this function will be called back again.
1053 if (stream
->state
!= LTTNG_CONSUMER_ACTIVE_STREAM
||
1054 stream
->endpoint_status
== CONSUMER_ENDPOINT_INACTIVE
) {
1058 * This clobbers way too much the debug output. Uncomment that if you
1059 * need it for debugging purposes.
1061 * DBG("Active FD %d", stream->wait_fd);
1063 (*pollfd
)[i
].fd
= stream
->wait_fd
;
1064 (*pollfd
)[i
].events
= POLLIN
| POLLPRI
;
1065 local_stream
[i
] = stream
;
1071 * Insert the consumer_data_pipe at the end of the array and don't
1072 * increment i so nb_fd is the number of real FD.
1074 (*pollfd
)[i
].fd
= lttng_pipe_get_readfd(ctx
->consumer_data_pipe
);
1075 (*pollfd
)[i
].events
= POLLIN
| POLLPRI
;
1080 * Poll on the should_quit pipe and the command socket return -1 on error and
1081 * should exit, 0 if data is available on the command socket
1083 int lttng_consumer_poll_socket(struct pollfd
*consumer_sockpoll
)
1088 num_rdy
= poll(consumer_sockpoll
, 2, -1);
1089 if (num_rdy
== -1) {
1091 * Restart interrupted system call.
1093 if (errno
== EINTR
) {
1096 PERROR("Poll error");
1099 if (consumer_sockpoll
[0].revents
& (POLLIN
| POLLPRI
)) {
1100 DBG("consumer_should_quit wake up");
1110 * Set the error socket.
1112 void lttng_consumer_set_error_sock(struct lttng_consumer_local_data
*ctx
,
1115 ctx
->consumer_error_socket
= sock
;
1119 * Set the command socket path.
1121 void lttng_consumer_set_command_sock_path(
1122 struct lttng_consumer_local_data
*ctx
, char *sock
)
1124 ctx
->consumer_command_sock_path
= sock
;
1128 * Send return code to the session daemon.
1129 * If the socket is not defined, we return 0, it is not a fatal error
1131 int lttng_consumer_send_error(struct lttng_consumer_local_data
*ctx
, int cmd
)
1133 if (ctx
->consumer_error_socket
> 0) {
1134 return lttcomm_send_unix_sock(ctx
->consumer_error_socket
, &cmd
,
1135 sizeof(enum lttcomm_sessiond_command
));
1142 * Close all the tracefiles and stream fds and MUST be called when all
1143 * instances are destroyed i.e. when all threads were joined and are ended.
1145 void lttng_consumer_cleanup(void)
1147 struct lttng_ht_iter iter
;
1148 struct lttng_consumer_channel
*channel
;
1152 cds_lfht_for_each_entry(consumer_data
.channel_ht
->ht
, &iter
.iter
, channel
,
1154 consumer_del_channel(channel
);
1159 lttng_ht_destroy(consumer_data
.channel_ht
);
1161 cleanup_relayd_ht();
1163 lttng_ht_destroy(consumer_data
.stream_per_chan_id_ht
);
1166 * This HT contains streams that are freed by either the metadata thread or
1167 * the data thread so we do *nothing* on the hash table and simply destroy
1170 lttng_ht_destroy(consumer_data
.stream_list_ht
);
1174 * Called from signal handler.
1176 void lttng_consumer_should_exit(struct lttng_consumer_local_data
*ctx
)
1181 ret
= lttng_write(ctx
->consumer_should_quit
[1], "4", 1);
1183 PERROR("write consumer quit");
1186 DBG("Consumer flag that it should quit");
1189 void lttng_consumer_sync_trace_file(struct lttng_consumer_stream
*stream
,
1192 int outfd
= stream
->out_fd
;
1195 * This does a blocking write-and-wait on any page that belongs to the
1196 * subbuffer prior to the one we just wrote.
1197 * Don't care about error values, as these are just hints and ways to
1198 * limit the amount of page cache used.
1200 if (orig_offset
< stream
->max_sb_size
) {
1203 lttng_sync_file_range(outfd
, orig_offset
- stream
->max_sb_size
,
1204 stream
->max_sb_size
,
1205 SYNC_FILE_RANGE_WAIT_BEFORE
1206 | SYNC_FILE_RANGE_WRITE
1207 | SYNC_FILE_RANGE_WAIT_AFTER
);
1209 * Give hints to the kernel about how we access the file:
1210 * POSIX_FADV_DONTNEED : we won't re-access data in a near future after
1213 * We need to call fadvise again after the file grows because the
1214 * kernel does not seem to apply fadvise to non-existing parts of the
1217 * Call fadvise _after_ having waited for the page writeback to
1218 * complete because the dirty page writeback semantic is not well
1219 * defined. So it can be expected to lead to lower throughput in
1222 posix_fadvise(outfd
, orig_offset
- stream
->max_sb_size
,
1223 stream
->max_sb_size
, POSIX_FADV_DONTNEED
);
1227 * Initialise the necessary environnement :
1228 * - create a new context
1229 * - create the poll_pipe
1230 * - create the should_quit pipe (for signal handler)
1231 * - create the thread pipe (for splice)
1233 * Takes a function pointer as argument, this function is called when data is
1234 * available on a buffer. This function is responsible to do the
1235 * kernctl_get_next_subbuf, read the data with mmap or splice depending on the
1236 * buffer configuration and then kernctl_put_next_subbuf at the end.
1238 * Returns a pointer to the new context or NULL on error.
1240 struct lttng_consumer_local_data
*lttng_consumer_create(
1241 enum lttng_consumer_type type
,
1242 ssize_t (*buffer_ready
)(struct lttng_consumer_stream
*stream
,
1243 struct lttng_consumer_local_data
*ctx
),
1244 int (*recv_channel
)(struct lttng_consumer_channel
*channel
),
1245 int (*recv_stream
)(struct lttng_consumer_stream
*stream
),
1246 int (*update_stream
)(uint64_t stream_key
, uint32_t state
))
1249 struct lttng_consumer_local_data
*ctx
;
1251 assert(consumer_data
.type
== LTTNG_CONSUMER_UNKNOWN
||
1252 consumer_data
.type
== type
);
1253 consumer_data
.type
= type
;
1255 ctx
= zmalloc(sizeof(struct lttng_consumer_local_data
));
1257 PERROR("allocating context");
1261 ctx
->consumer_error_socket
= -1;
1262 ctx
->consumer_metadata_socket
= -1;
1263 pthread_mutex_init(&ctx
->metadata_socket_lock
, NULL
);
1264 /* assign the callbacks */
1265 ctx
->on_buffer_ready
= buffer_ready
;
1266 ctx
->on_recv_channel
= recv_channel
;
1267 ctx
->on_recv_stream
= recv_stream
;
1268 ctx
->on_update_stream
= update_stream
;
1270 ctx
->consumer_data_pipe
= lttng_pipe_open(0);
1271 if (!ctx
->consumer_data_pipe
) {
1272 goto error_poll_pipe
;
1275 ret
= pipe(ctx
->consumer_should_quit
);
1277 PERROR("Error creating recv pipe");
1278 goto error_quit_pipe
;
1281 ret
= pipe(ctx
->consumer_thread_pipe
);
1283 PERROR("Error creating thread pipe");
1284 goto error_thread_pipe
;
1287 ret
= pipe(ctx
->consumer_channel_pipe
);
1289 PERROR("Error creating channel pipe");
1290 goto error_channel_pipe
;
1293 ctx
->consumer_metadata_pipe
= lttng_pipe_open(0);
1294 if (!ctx
->consumer_metadata_pipe
) {
1295 goto error_metadata_pipe
;
1298 ret
= utils_create_pipe(ctx
->consumer_splice_metadata_pipe
);
1300 goto error_splice_pipe
;
1306 lttng_pipe_destroy(ctx
->consumer_metadata_pipe
);
1307 error_metadata_pipe
:
1308 utils_close_pipe(ctx
->consumer_channel_pipe
);
1310 utils_close_pipe(ctx
->consumer_thread_pipe
);
1312 utils_close_pipe(ctx
->consumer_should_quit
);
1314 lttng_pipe_destroy(ctx
->consumer_data_pipe
);
1322 * Iterate over all streams of the hashtable and free them properly.
1324 static void destroy_data_stream_ht(struct lttng_ht
*ht
)
1326 struct lttng_ht_iter iter
;
1327 struct lttng_consumer_stream
*stream
;
1334 cds_lfht_for_each_entry(ht
->ht
, &iter
.iter
, stream
, node
.node
) {
1336 * Ignore return value since we are currently cleaning up so any error
1339 (void) consumer_del_stream(stream
, ht
);
1343 lttng_ht_destroy(ht
);
1347 * Iterate over all streams of the metadata hashtable and free them
1350 static void destroy_metadata_stream_ht(struct lttng_ht
*ht
)
1352 struct lttng_ht_iter iter
;
1353 struct lttng_consumer_stream
*stream
;
1360 cds_lfht_for_each_entry(ht
->ht
, &iter
.iter
, stream
, node
.node
) {
1362 * Ignore return value since we are currently cleaning up so any error
1365 (void) consumer_del_metadata_stream(stream
, ht
);
1369 lttng_ht_destroy(ht
);
1373 * Close all fds associated with the instance and free the context.
1375 void lttng_consumer_destroy(struct lttng_consumer_local_data
*ctx
)
1379 DBG("Consumer destroying it. Closing everything.");
1381 destroy_data_stream_ht(data_ht
);
1382 destroy_metadata_stream_ht(metadata_ht
);
1384 ret
= close(ctx
->consumer_error_socket
);
1388 ret
= close(ctx
->consumer_metadata_socket
);
1392 utils_close_pipe(ctx
->consumer_thread_pipe
);
1393 utils_close_pipe(ctx
->consumer_channel_pipe
);
1394 lttng_pipe_destroy(ctx
->consumer_data_pipe
);
1395 lttng_pipe_destroy(ctx
->consumer_metadata_pipe
);
1396 utils_close_pipe(ctx
->consumer_should_quit
);
1397 utils_close_pipe(ctx
->consumer_splice_metadata_pipe
);
1399 unlink(ctx
->consumer_command_sock_path
);
1404 * Write the metadata stream id on the specified file descriptor.
1406 static int write_relayd_metadata_id(int fd
,
1407 struct lttng_consumer_stream
*stream
,
1408 struct consumer_relayd_sock_pair
*relayd
, unsigned long padding
)
1411 struct lttcomm_relayd_metadata_payload hdr
;
1413 hdr
.stream_id
= htobe64(stream
->relayd_stream_id
);
1414 hdr
.padding_size
= htobe32(padding
);
1415 ret
= lttng_write(fd
, (void *) &hdr
, sizeof(hdr
));
1416 if (ret
< sizeof(hdr
)) {
1418 * This error means that the fd's end is closed so ignore the perror
1419 * not to clubber the error output since this can happen in a normal
1422 if (errno
!= EPIPE
) {
1423 PERROR("write metadata stream id");
1425 DBG3("Consumer failed to write relayd metadata id (errno: %d)", errno
);
1427 * Set ret to a negative value because if ret != sizeof(hdr), we don't
1428 * handle writting the missing part so report that as an error and
1429 * don't lie to the caller.
1434 DBG("Metadata stream id %" PRIu64
" with padding %lu written before data",
1435 stream
->relayd_stream_id
, padding
);
1442 * Mmap the ring buffer, read it and write the data to the tracefile. This is a
1443 * core function for writing trace buffers to either the local filesystem or
1446 * It must be called with the stream lock held.
1448 * Careful review MUST be put if any changes occur!
1450 * Returns the number of bytes written
1452 ssize_t
lttng_consumer_on_read_subbuffer_mmap(
1453 struct lttng_consumer_local_data
*ctx
,
1454 struct lttng_consumer_stream
*stream
, unsigned long len
,
1455 unsigned long padding
,
1456 struct ctf_packet_index
*index
)
1458 unsigned long mmap_offset
;
1461 off_t orig_offset
= stream
->out_fd_offset
;
1462 /* Default is on the disk */
1463 int outfd
= stream
->out_fd
;
1464 struct consumer_relayd_sock_pair
*relayd
= NULL
;
1465 unsigned int relayd_hang_up
= 0;
1467 /* RCU lock for the relayd pointer */
1470 /* Flag that the current stream if set for network streaming. */
1471 if (stream
->net_seq_idx
!= (uint64_t) -1ULL) {
1472 relayd
= consumer_find_relayd(stream
->net_seq_idx
);
1473 if (relayd
== NULL
) {
1479 /* get the offset inside the fd to mmap */
1480 switch (consumer_data
.type
) {
1481 case LTTNG_CONSUMER_KERNEL
:
1482 mmap_base
= stream
->mmap_base
;
1483 ret
= kernctl_get_mmap_read_offset(stream
->wait_fd
, &mmap_offset
);
1486 PERROR("tracer ctl get_mmap_read_offset");
1490 case LTTNG_CONSUMER32_UST
:
1491 case LTTNG_CONSUMER64_UST
:
1492 mmap_base
= lttng_ustctl_get_mmap_base(stream
);
1494 ERR("read mmap get mmap base for stream %s", stream
->name
);
1498 ret
= lttng_ustctl_get_mmap_read_offset(stream
, &mmap_offset
);
1500 PERROR("tracer ctl get_mmap_read_offset");
1506 ERR("Unknown consumer_data type");
1510 /* Handle stream on the relayd if the output is on the network */
1512 unsigned long netlen
= len
;
1515 * Lock the control socket for the complete duration of the function
1516 * since from this point on we will use the socket.
1518 if (stream
->metadata_flag
) {
1519 /* Metadata requires the control socket. */
1520 pthread_mutex_lock(&relayd
->ctrl_sock_mutex
);
1521 netlen
+= sizeof(struct lttcomm_relayd_metadata_payload
);
1524 ret
= write_relayd_stream_header(stream
, netlen
, padding
, relayd
);
1529 /* Use the returned socket. */
1532 /* Write metadata stream id before payload */
1533 if (stream
->metadata_flag
) {
1534 ret
= write_relayd_metadata_id(outfd
, stream
, relayd
, padding
);
1541 /* No streaming, we have to set the len with the full padding */
1545 * Check if we need to change the tracefile before writing the packet.
1547 if (stream
->chan
->tracefile_size
> 0 &&
1548 (stream
->tracefile_size_current
+ len
) >
1549 stream
->chan
->tracefile_size
) {
1550 ret
= utils_rotate_stream_file(stream
->chan
->pathname
,
1551 stream
->name
, stream
->chan
->tracefile_size
,
1552 stream
->chan
->tracefile_count
, stream
->uid
, stream
->gid
,
1553 stream
->out_fd
, &(stream
->tracefile_count_current
),
1556 ERR("Rotating output file");
1559 outfd
= stream
->out_fd
;
1561 if (stream
->index_fd
>= 0) {
1562 ret
= index_create_file(stream
->chan
->pathname
,
1563 stream
->name
, stream
->uid
, stream
->gid
,
1564 stream
->chan
->tracefile_size
,
1565 stream
->tracefile_count_current
);
1569 stream
->index_fd
= ret
;
1572 /* Reset current size because we just perform a rotation. */
1573 stream
->tracefile_size_current
= 0;
1574 stream
->out_fd_offset
= 0;
1577 stream
->tracefile_size_current
+= len
;
1579 index
->offset
= htobe64(stream
->out_fd_offset
);
1584 * This call guarantee that len or less is returned. It's impossible to
1585 * receive a ret value that is bigger than len.
1587 ret
= lttng_write(outfd
, mmap_base
+ mmap_offset
, len
);
1588 DBG("Consumer mmap write() ret %zd (len %lu)", ret
, len
);
1589 if (ret
< 0 || ((size_t) ret
!= len
)) {
1591 * Report error to caller if nothing was written else at least send the
1599 /* Socket operation failed. We consider the relayd dead */
1600 if (errno
== EPIPE
|| errno
== EINVAL
|| errno
== EBADF
) {
1602 * This is possible if the fd is closed on the other side
1603 * (outfd) or any write problem. It can be verbose a bit for a
1604 * normal execution if for instance the relayd is stopped
1605 * abruptly. This can happen so set this to a DBG statement.
1607 DBG("Consumer mmap write detected relayd hang up");
1609 /* Unhandled error, print it and stop function right now. */
1610 PERROR("Error in write mmap (ret %zd != len %lu)", ret
, len
);
1614 stream
->output_written
+= ret
;
1616 /* This call is useless on a socket so better save a syscall. */
1618 /* This won't block, but will start writeout asynchronously */
1619 lttng_sync_file_range(outfd
, stream
->out_fd_offset
, len
,
1620 SYNC_FILE_RANGE_WRITE
);
1621 stream
->out_fd_offset
+= len
;
1623 lttng_consumer_sync_trace_file(stream
, orig_offset
);
1627 * This is a special case that the relayd has closed its socket. Let's
1628 * cleanup the relayd object and all associated streams.
1630 if (relayd
&& relayd_hang_up
) {
1631 cleanup_relayd(relayd
, ctx
);
1635 /* Unlock only if ctrl socket used */
1636 if (relayd
&& stream
->metadata_flag
) {
1637 pthread_mutex_unlock(&relayd
->ctrl_sock_mutex
);
1645 * Splice the data from the ring buffer to the tracefile.
1647 * It must be called with the stream lock held.
1649 * Returns the number of bytes spliced.
1651 ssize_t
lttng_consumer_on_read_subbuffer_splice(
1652 struct lttng_consumer_local_data
*ctx
,
1653 struct lttng_consumer_stream
*stream
, unsigned long len
,
1654 unsigned long padding
,
1655 struct ctf_packet_index
*index
)
1657 ssize_t ret
= 0, written
= 0, ret_splice
= 0;
1659 off_t orig_offset
= stream
->out_fd_offset
;
1660 int fd
= stream
->wait_fd
;
1661 /* Default is on the disk */
1662 int outfd
= stream
->out_fd
;
1663 struct consumer_relayd_sock_pair
*relayd
= NULL
;
1665 unsigned int relayd_hang_up
= 0;
1667 switch (consumer_data
.type
) {
1668 case LTTNG_CONSUMER_KERNEL
:
1670 case LTTNG_CONSUMER32_UST
:
1671 case LTTNG_CONSUMER64_UST
:
1672 /* Not supported for user space tracing */
1675 ERR("Unknown consumer_data type");
1679 /* RCU lock for the relayd pointer */
1682 /* Flag that the current stream if set for network streaming. */
1683 if (stream
->net_seq_idx
!= (uint64_t) -1ULL) {
1684 relayd
= consumer_find_relayd(stream
->net_seq_idx
);
1685 if (relayd
== NULL
) {
1692 * Choose right pipe for splice. Metadata and trace data are handled by
1693 * different threads hence the use of two pipes in order not to race or
1694 * corrupt the written data.
1696 if (stream
->metadata_flag
) {
1697 splice_pipe
= ctx
->consumer_splice_metadata_pipe
;
1699 splice_pipe
= ctx
->consumer_thread_pipe
;
1702 /* Write metadata stream id before payload */
1704 int total_len
= len
;
1706 if (stream
->metadata_flag
) {
1708 * Lock the control socket for the complete duration of the function
1709 * since from this point on we will use the socket.
1711 pthread_mutex_lock(&relayd
->ctrl_sock_mutex
);
1713 ret
= write_relayd_metadata_id(splice_pipe
[1], stream
, relayd
,
1717 /* Socket operation failed. We consider the relayd dead */
1718 if (ret
== -EBADF
) {
1719 WARN("Remote relayd disconnected. Stopping");
1726 total_len
+= sizeof(struct lttcomm_relayd_metadata_payload
);
1729 ret
= write_relayd_stream_header(stream
, total_len
, padding
, relayd
);
1731 /* Use the returned socket. */
1734 /* Socket operation failed. We consider the relayd dead */
1735 if (ret
== -EBADF
) {
1736 WARN("Remote relayd disconnected. Stopping");
1743 /* No streaming, we have to set the len with the full padding */
1747 * Check if we need to change the tracefile before writing the packet.
1749 if (stream
->chan
->tracefile_size
> 0 &&
1750 (stream
->tracefile_size_current
+ len
) >
1751 stream
->chan
->tracefile_size
) {
1752 ret
= utils_rotate_stream_file(stream
->chan
->pathname
,
1753 stream
->name
, stream
->chan
->tracefile_size
,
1754 stream
->chan
->tracefile_count
, stream
->uid
, stream
->gid
,
1755 stream
->out_fd
, &(stream
->tracefile_count_current
),
1758 ERR("Rotating output file");
1761 outfd
= stream
->out_fd
;
1763 if (stream
->index_fd
>= 0) {
1764 ret
= index_create_file(stream
->chan
->pathname
,
1765 stream
->name
, stream
->uid
, stream
->gid
,
1766 stream
->chan
->tracefile_size
,
1767 stream
->tracefile_count_current
);
1771 stream
->index_fd
= ret
;
1774 /* Reset current size because we just perform a rotation. */
1775 stream
->tracefile_size_current
= 0;
1776 stream
->out_fd_offset
= 0;
1779 stream
->tracefile_size_current
+= len
;
1780 index
->offset
= htobe64(stream
->out_fd_offset
);
1784 DBG("splice chan to pipe offset %lu of len %lu (fd : %d, pipe: %d)",
1785 (unsigned long)offset
, len
, fd
, splice_pipe
[1]);
1786 ret_splice
= splice(fd
, &offset
, splice_pipe
[1], NULL
, len
,
1787 SPLICE_F_MOVE
| SPLICE_F_MORE
);
1788 DBG("splice chan to pipe, ret %zd", ret_splice
);
1789 if (ret_splice
< 0) {
1792 written
= ret_splice
;
1794 PERROR("Error in relay splice");
1798 /* Handle stream on the relayd if the output is on the network */
1800 if (stream
->metadata_flag
) {
1801 size_t metadata_payload_size
=
1802 sizeof(struct lttcomm_relayd_metadata_payload
);
1804 /* Update counter to fit the spliced data */
1805 ret_splice
+= metadata_payload_size
;
1806 len
+= metadata_payload_size
;
1808 * We do this so the return value can match the len passed as
1809 * argument to this function.
1811 written
-= metadata_payload_size
;
1815 /* Splice data out */
1816 ret_splice
= splice(splice_pipe
[0], NULL
, outfd
, NULL
,
1817 ret_splice
, SPLICE_F_MOVE
| SPLICE_F_MORE
);
1818 DBG("Consumer splice pipe to file, ret %zd", ret_splice
);
1819 if (ret_splice
< 0) {
1822 written
= ret_splice
;
1824 /* Socket operation failed. We consider the relayd dead */
1825 if (errno
== EBADF
|| errno
== EPIPE
|| errno
== ESPIPE
) {
1826 WARN("Remote relayd disconnected. Stopping");
1830 PERROR("Error in file splice");
1832 } else if (ret_splice
> len
) {
1834 * We don't expect this code path to be executed but you never know
1835 * so this is an extra protection agains a buggy splice().
1837 written
+= ret_splice
;
1839 PERROR("Wrote more data than requested %zd (len: %lu)", ret_splice
,
1843 /* All good, update current len and continue. */
1847 /* This call is useless on a socket so better save a syscall. */
1849 /* This won't block, but will start writeout asynchronously */
1850 lttng_sync_file_range(outfd
, stream
->out_fd_offset
, ret_splice
,
1851 SYNC_FILE_RANGE_WRITE
);
1852 stream
->out_fd_offset
+= ret_splice
;
1854 stream
->output_written
+= ret_splice
;
1855 written
+= ret_splice
;
1857 lttng_consumer_sync_trace_file(stream
, orig_offset
);
1862 * This is a special case that the relayd has closed its socket. Let's
1863 * cleanup the relayd object and all associated streams.
1865 if (relayd
&& relayd_hang_up
) {
1866 cleanup_relayd(relayd
, ctx
);
1867 /* Skip splice error so the consumer does not fail */
1872 /* send the appropriate error description to sessiond */
1875 lttng_consumer_send_error(ctx
, LTTCOMM_CONSUMERD_SPLICE_EINVAL
);
1878 lttng_consumer_send_error(ctx
, LTTCOMM_CONSUMERD_SPLICE_ENOMEM
);
1881 lttng_consumer_send_error(ctx
, LTTCOMM_CONSUMERD_SPLICE_ESPIPE
);
1886 if (relayd
&& stream
->metadata_flag
) {
1887 pthread_mutex_unlock(&relayd
->ctrl_sock_mutex
);
1895 * Take a snapshot for a specific fd
1897 * Returns 0 on success, < 0 on error
1899 int lttng_consumer_take_snapshot(struct lttng_consumer_stream
*stream
)
1901 switch (consumer_data
.type
) {
1902 case LTTNG_CONSUMER_KERNEL
:
1903 return lttng_kconsumer_take_snapshot(stream
);
1904 case LTTNG_CONSUMER32_UST
:
1905 case LTTNG_CONSUMER64_UST
:
1906 return lttng_ustconsumer_take_snapshot(stream
);
1908 ERR("Unknown consumer_data type");
1915 * Get the produced position
1917 * Returns 0 on success, < 0 on error
1919 int lttng_consumer_get_produced_snapshot(struct lttng_consumer_stream
*stream
,
1922 switch (consumer_data
.type
) {
1923 case LTTNG_CONSUMER_KERNEL
:
1924 return lttng_kconsumer_get_produced_snapshot(stream
, pos
);
1925 case LTTNG_CONSUMER32_UST
:
1926 case LTTNG_CONSUMER64_UST
:
1927 return lttng_ustconsumer_get_produced_snapshot(stream
, pos
);
1929 ERR("Unknown consumer_data type");
1935 int lttng_consumer_recv_cmd(struct lttng_consumer_local_data
*ctx
,
1936 int sock
, struct pollfd
*consumer_sockpoll
)
1938 switch (consumer_data
.type
) {
1939 case LTTNG_CONSUMER_KERNEL
:
1940 return lttng_kconsumer_recv_cmd(ctx
, sock
, consumer_sockpoll
);
1941 case LTTNG_CONSUMER32_UST
:
1942 case LTTNG_CONSUMER64_UST
:
1943 return lttng_ustconsumer_recv_cmd(ctx
, sock
, consumer_sockpoll
);
1945 ERR("Unknown consumer_data type");
1951 void lttng_consumer_close_all_metadata(void)
1953 switch (consumer_data
.type
) {
1954 case LTTNG_CONSUMER_KERNEL
:
1956 * The Kernel consumer has a different metadata scheme so we don't
1957 * close anything because the stream will be closed by the session
1961 case LTTNG_CONSUMER32_UST
:
1962 case LTTNG_CONSUMER64_UST
:
1964 * Close all metadata streams. The metadata hash table is passed and
1965 * this call iterates over it by closing all wakeup fd. This is safe
1966 * because at this point we are sure that the metadata producer is
1967 * either dead or blocked.
1969 lttng_ustconsumer_close_all_metadata(metadata_ht
);
1972 ERR("Unknown consumer_data type");
1978 * Clean up a metadata stream and free its memory.
1980 void consumer_del_metadata_stream(struct lttng_consumer_stream
*stream
,
1981 struct lttng_ht
*ht
)
1983 struct lttng_consumer_channel
*free_chan
= NULL
;
1987 * This call should NEVER receive regular stream. It must always be
1988 * metadata stream and this is crucial for data structure synchronization.
1990 assert(stream
->metadata_flag
);
1992 DBG3("Consumer delete metadata stream %d", stream
->wait_fd
);
1994 pthread_mutex_lock(&consumer_data
.lock
);
1995 pthread_mutex_lock(&stream
->chan
->lock
);
1996 pthread_mutex_lock(&stream
->lock
);
1998 /* Remove any reference to that stream. */
1999 consumer_stream_delete(stream
, ht
);
2001 /* Close down everything including the relayd if one. */
2002 consumer_stream_close(stream
);
2003 /* Destroy tracer buffers of the stream. */
2004 consumer_stream_destroy_buffers(stream
);
2006 /* Atomically decrement channel refcount since other threads can use it. */
2007 if (!uatomic_sub_return(&stream
->chan
->refcount
, 1)
2008 && !uatomic_read(&stream
->chan
->nb_init_stream_left
)) {
2009 /* Go for channel deletion! */
2010 free_chan
= stream
->chan
;
2014 * Nullify the stream reference so it is not used after deletion. The
2015 * channel lock MUST be acquired before being able to check for a NULL
2018 stream
->chan
->metadata_stream
= NULL
;
2020 pthread_mutex_unlock(&stream
->lock
);
2021 pthread_mutex_unlock(&stream
->chan
->lock
);
2022 pthread_mutex_unlock(&consumer_data
.lock
);
2025 consumer_del_channel(free_chan
);
2028 consumer_stream_free(stream
);
2032 * Action done with the metadata stream when adding it to the consumer internal
2033 * data structures to handle it.
2035 int consumer_add_metadata_stream(struct lttng_consumer_stream
*stream
)
2037 struct lttng_ht
*ht
= metadata_ht
;
2039 struct lttng_ht_iter iter
;
2040 struct lttng_ht_node_u64
*node
;
2045 DBG3("Adding metadata stream %" PRIu64
" to hash table", stream
->key
);
2047 pthread_mutex_lock(&consumer_data
.lock
);
2048 pthread_mutex_lock(&stream
->chan
->lock
);
2049 pthread_mutex_lock(&stream
->chan
->timer_lock
);
2050 pthread_mutex_lock(&stream
->lock
);
2053 * From here, refcounts are updated so be _careful_ when returning an error
2060 * Lookup the stream just to make sure it does not exist in our internal
2061 * state. This should NEVER happen.
2063 lttng_ht_lookup(ht
, &stream
->key
, &iter
);
2064 node
= lttng_ht_iter_get_node_u64(&iter
);
2068 * When nb_init_stream_left reaches 0, we don't need to trigger any action
2069 * in terms of destroying the associated channel, because the action that
2070 * causes the count to become 0 also causes a stream to be added. The
2071 * channel deletion will thus be triggered by the following removal of this
2074 if (uatomic_read(&stream
->chan
->nb_init_stream_left
) > 0) {
2075 /* Increment refcount before decrementing nb_init_stream_left */
2077 uatomic_dec(&stream
->chan
->nb_init_stream_left
);
2080 lttng_ht_add_unique_u64(ht
, &stream
->node
);
2082 lttng_ht_add_unique_u64(consumer_data
.stream_per_chan_id_ht
,
2083 &stream
->node_channel_id
);
2086 * Add stream to the stream_list_ht of the consumer data. No need to steal
2087 * the key since the HT does not use it and we allow to add redundant keys
2090 lttng_ht_add_u64(consumer_data
.stream_list_ht
, &stream
->node_session_id
);
2094 pthread_mutex_unlock(&stream
->lock
);
2095 pthread_mutex_unlock(&stream
->chan
->lock
);
2096 pthread_mutex_unlock(&stream
->chan
->timer_lock
);
2097 pthread_mutex_unlock(&consumer_data
.lock
);
2102 * Delete data stream that are flagged for deletion (endpoint_status).
2104 static void validate_endpoint_status_data_stream(void)
2106 struct lttng_ht_iter iter
;
2107 struct lttng_consumer_stream
*stream
;
2109 DBG("Consumer delete flagged data stream");
2112 cds_lfht_for_each_entry(data_ht
->ht
, &iter
.iter
, stream
, node
.node
) {
2113 /* Validate delete flag of the stream */
2114 if (stream
->endpoint_status
== CONSUMER_ENDPOINT_ACTIVE
) {
2117 /* Delete it right now */
2118 consumer_del_stream(stream
, data_ht
);
2124 * Delete metadata stream that are flagged for deletion (endpoint_status).
2126 static void validate_endpoint_status_metadata_stream(
2127 struct lttng_poll_event
*pollset
)
2129 struct lttng_ht_iter iter
;
2130 struct lttng_consumer_stream
*stream
;
2132 DBG("Consumer delete flagged metadata stream");
2137 cds_lfht_for_each_entry(metadata_ht
->ht
, &iter
.iter
, stream
, node
.node
) {
2138 /* Validate delete flag of the stream */
2139 if (stream
->endpoint_status
== CONSUMER_ENDPOINT_ACTIVE
) {
2143 * Remove from pollset so the metadata thread can continue without
2144 * blocking on a deleted stream.
2146 lttng_poll_del(pollset
, stream
->wait_fd
);
2148 /* Delete it right now */
2149 consumer_del_metadata_stream(stream
, metadata_ht
);
2155 * Thread polls on metadata file descriptor and write them on disk or on the
2158 void *consumer_thread_metadata_poll(void *data
)
2160 int ret
, i
, pollfd
, err
= -1;
2161 uint32_t revents
, nb_fd
;
2162 struct lttng_consumer_stream
*stream
= NULL
;
2163 struct lttng_ht_iter iter
;
2164 struct lttng_ht_node_u64
*node
;
2165 struct lttng_poll_event events
;
2166 struct lttng_consumer_local_data
*ctx
= data
;
2169 rcu_register_thread();
2171 health_register(health_consumerd
, HEALTH_CONSUMERD_TYPE_METADATA
);
2173 if (testpoint(consumerd_thread_metadata
)) {
2174 goto error_testpoint
;
2177 health_code_update();
2179 DBG("Thread metadata poll started");
2181 /* Size is set to 1 for the consumer_metadata pipe */
2182 ret
= lttng_poll_create(&events
, 2, LTTNG_CLOEXEC
);
2184 ERR("Poll set creation failed");
2188 ret
= lttng_poll_add(&events
,
2189 lttng_pipe_get_readfd(ctx
->consumer_metadata_pipe
), LPOLLIN
);
2195 DBG("Metadata main loop started");
2198 health_code_update();
2200 /* Only the metadata pipe is set */
2201 if (LTTNG_POLL_GETNB(&events
) == 0 && consumer_quit
== 1) {
2202 err
= 0; /* All is OK */
2207 DBG("Metadata poll wait with %d fd(s)", LTTNG_POLL_GETNB(&events
));
2208 health_poll_entry();
2209 ret
= lttng_poll_wait(&events
, -1);
2211 DBG("Metadata event catched in thread");
2213 if (errno
== EINTR
) {
2214 ERR("Poll EINTR catched");
2222 /* From here, the event is a metadata wait fd */
2223 for (i
= 0; i
< nb_fd
; i
++) {
2224 health_code_update();
2226 revents
= LTTNG_POLL_GETEV(&events
, i
);
2227 pollfd
= LTTNG_POLL_GETFD(&events
, i
);
2229 if (pollfd
== lttng_pipe_get_readfd(ctx
->consumer_metadata_pipe
)) {
2230 if (revents
& (LPOLLERR
| LPOLLHUP
)) {
2231 DBG("Metadata thread pipe hung up");
2233 * Remove the pipe from the poll set and continue the loop
2234 * since their might be data to consume.
2236 lttng_poll_del(&events
,
2237 lttng_pipe_get_readfd(ctx
->consumer_metadata_pipe
));
2238 lttng_pipe_read_close(ctx
->consumer_metadata_pipe
);
2240 } else if (revents
& LPOLLIN
) {
2243 pipe_len
= lttng_pipe_read(ctx
->consumer_metadata_pipe
,
2244 &stream
, sizeof(stream
));
2245 if (pipe_len
< sizeof(stream
)) {
2246 PERROR("read metadata stream");
2248 * Continue here to handle the rest of the streams.
2253 /* A NULL stream means that the state has changed. */
2254 if (stream
== NULL
) {
2255 /* Check for deleted streams. */
2256 validate_endpoint_status_metadata_stream(&events
);
2260 DBG("Adding metadata stream %d to poll set",
2263 /* Add metadata stream to the global poll events list */
2264 lttng_poll_add(&events
, stream
->wait_fd
,
2265 LPOLLIN
| LPOLLPRI
| LPOLLHUP
);
2268 /* Handle other stream */
2274 uint64_t tmp_id
= (uint64_t) pollfd
;
2276 lttng_ht_lookup(metadata_ht
, &tmp_id
, &iter
);
2278 node
= lttng_ht_iter_get_node_u64(&iter
);
2281 stream
= caa_container_of(node
, struct lttng_consumer_stream
,
2284 /* Check for error event */
2285 if (revents
& (LPOLLERR
| LPOLLHUP
)) {
2286 DBG("Metadata fd %d is hup|err.", pollfd
);
2287 if (!stream
->hangup_flush_done
2288 && (consumer_data
.type
== LTTNG_CONSUMER32_UST
2289 || consumer_data
.type
== LTTNG_CONSUMER64_UST
)) {
2290 DBG("Attempting to flush and consume the UST buffers");
2291 lttng_ustconsumer_on_stream_hangup(stream
);
2293 /* We just flushed the stream now read it. */
2295 health_code_update();
2297 len
= ctx
->on_buffer_ready(stream
, ctx
);
2299 * We don't check the return value here since if we get
2300 * a negative len, it means an error occured thus we
2301 * simply remove it from the poll set and free the
2307 lttng_poll_del(&events
, stream
->wait_fd
);
2309 * This call update the channel states, closes file descriptors
2310 * and securely free the stream.
2312 consumer_del_metadata_stream(stream
, metadata_ht
);
2313 } else if (revents
& (LPOLLIN
| LPOLLPRI
)) {
2314 /* Get the data out of the metadata file descriptor */
2315 DBG("Metadata available on fd %d", pollfd
);
2316 assert(stream
->wait_fd
== pollfd
);
2319 health_code_update();
2321 len
= ctx
->on_buffer_ready(stream
, ctx
);
2323 * We don't check the return value here since if we get
2324 * a negative len, it means an error occured thus we
2325 * simply remove it from the poll set and free the
2330 /* It's ok to have an unavailable sub-buffer */
2331 if (len
< 0 && len
!= -EAGAIN
&& len
!= -ENODATA
) {
2332 /* Clean up stream from consumer and free it. */
2333 lttng_poll_del(&events
, stream
->wait_fd
);
2334 consumer_del_metadata_stream(stream
, metadata_ht
);
2338 /* Release RCU lock for the stream looked up */
2347 DBG("Metadata poll thread exiting");
2349 lttng_poll_clean(&events
);
2354 ERR("Health error occurred in %s", __func__
);
2356 health_unregister(health_consumerd
);
2357 rcu_unregister_thread();
2362 * This thread polls the fds in the set to consume the data and write
2363 * it to tracefile if necessary.
2365 void *consumer_thread_data_poll(void *data
)
2367 int num_rdy
, num_hup
, high_prio
, ret
, i
, err
= -1;
2368 struct pollfd
*pollfd
= NULL
;
2369 /* local view of the streams */
2370 struct lttng_consumer_stream
**local_stream
= NULL
, *new_stream
= NULL
;
2371 /* local view of consumer_data.fds_count */
2373 struct lttng_consumer_local_data
*ctx
= data
;
2376 rcu_register_thread();
2378 health_register(health_consumerd
, HEALTH_CONSUMERD_TYPE_DATA
);
2380 if (testpoint(consumerd_thread_data
)) {
2381 goto error_testpoint
;
2384 health_code_update();
2386 local_stream
= zmalloc(sizeof(struct lttng_consumer_stream
*));
2387 if (local_stream
== NULL
) {
2388 PERROR("local_stream malloc");
2393 health_code_update();
2399 * the fds set has been updated, we need to update our
2400 * local array as well
2402 pthread_mutex_lock(&consumer_data
.lock
);
2403 if (consumer_data
.need_update
) {
2408 local_stream
= NULL
;
2410 /* allocate for all fds + 1 for the consumer_data_pipe */
2411 pollfd
= zmalloc((consumer_data
.stream_count
+ 1) * sizeof(struct pollfd
));
2412 if (pollfd
== NULL
) {
2413 PERROR("pollfd malloc");
2414 pthread_mutex_unlock(&consumer_data
.lock
);
2418 /* allocate for all fds + 1 for the consumer_data_pipe */
2419 local_stream
= zmalloc((consumer_data
.stream_count
+ 1) *
2420 sizeof(struct lttng_consumer_stream
*));
2421 if (local_stream
== NULL
) {
2422 PERROR("local_stream malloc");
2423 pthread_mutex_unlock(&consumer_data
.lock
);
2426 ret
= update_poll_array(ctx
, &pollfd
, local_stream
,
2429 ERR("Error in allocating pollfd or local_outfds");
2430 lttng_consumer_send_error(ctx
, LTTCOMM_CONSUMERD_POLL_ERROR
);
2431 pthread_mutex_unlock(&consumer_data
.lock
);
2435 consumer_data
.need_update
= 0;
2437 pthread_mutex_unlock(&consumer_data
.lock
);
2439 /* No FDs and consumer_quit, consumer_cleanup the thread */
2440 if (nb_fd
== 0 && consumer_quit
== 1) {
2441 err
= 0; /* All is OK */
2444 /* poll on the array of fds */
2446 DBG("polling on %d fd", nb_fd
+ 1);
2447 health_poll_entry();
2448 num_rdy
= poll(pollfd
, nb_fd
+ 1, -1);
2450 DBG("poll num_rdy : %d", num_rdy
);
2451 if (num_rdy
== -1) {
2453 * Restart interrupted system call.
2455 if (errno
== EINTR
) {
2458 PERROR("Poll error");
2459 lttng_consumer_send_error(ctx
, LTTCOMM_CONSUMERD_POLL_ERROR
);
2461 } else if (num_rdy
== 0) {
2462 DBG("Polling thread timed out");
2467 * If the consumer_data_pipe triggered poll go directly to the
2468 * beginning of the loop to update the array. We want to prioritize
2469 * array update over low-priority reads.
2471 if (pollfd
[nb_fd
].revents
& (POLLIN
| POLLPRI
)) {
2472 ssize_t pipe_readlen
;
2474 DBG("consumer_data_pipe wake up");
2475 pipe_readlen
= lttng_pipe_read(ctx
->consumer_data_pipe
,
2476 &new_stream
, sizeof(new_stream
));
2477 if (pipe_readlen
< sizeof(new_stream
)) {
2478 PERROR("Consumer data pipe");
2479 /* Continue so we can at least handle the current stream(s). */
2484 * If the stream is NULL, just ignore it. It's also possible that
2485 * the sessiond poll thread changed the consumer_quit state and is
2486 * waking us up to test it.
2488 if (new_stream
== NULL
) {
2489 validate_endpoint_status_data_stream();
2493 /* Continue to update the local streams and handle prio ones */
2497 /* Take care of high priority channels first. */
2498 for (i
= 0; i
< nb_fd
; i
++) {
2499 health_code_update();
2501 if (local_stream
[i
] == NULL
) {
2504 if (pollfd
[i
].revents
& POLLPRI
) {
2505 DBG("Urgent read on fd %d", pollfd
[i
].fd
);
2507 len
= ctx
->on_buffer_ready(local_stream
[i
], ctx
);
2508 /* it's ok to have an unavailable sub-buffer */
2509 if (len
< 0 && len
!= -EAGAIN
&& len
!= -ENODATA
) {
2510 /* Clean the stream and free it. */
2511 consumer_del_stream(local_stream
[i
], data_ht
);
2512 local_stream
[i
] = NULL
;
2513 } else if (len
> 0) {
2514 local_stream
[i
]->data_read
= 1;
2520 * If we read high prio channel in this loop, try again
2521 * for more high prio data.
2527 /* Take care of low priority channels. */
2528 for (i
= 0; i
< nb_fd
; i
++) {
2529 health_code_update();
2531 if (local_stream
[i
] == NULL
) {
2534 if ((pollfd
[i
].revents
& POLLIN
) ||
2535 local_stream
[i
]->hangup_flush_done
) {
2536 DBG("Normal read on fd %d", pollfd
[i
].fd
);
2537 len
= ctx
->on_buffer_ready(local_stream
[i
], ctx
);
2538 /* it's ok to have an unavailable sub-buffer */
2539 if (len
< 0 && len
!= -EAGAIN
&& len
!= -ENODATA
) {
2540 /* Clean the stream and free it. */
2541 consumer_del_stream(local_stream
[i
], data_ht
);
2542 local_stream
[i
] = NULL
;
2543 } else if (len
> 0) {
2544 local_stream
[i
]->data_read
= 1;
2549 /* Handle hangup and errors */
2550 for (i
= 0; i
< nb_fd
; i
++) {
2551 health_code_update();
2553 if (local_stream
[i
] == NULL
) {
2556 if (!local_stream
[i
]->hangup_flush_done
2557 && (pollfd
[i
].revents
& (POLLHUP
| POLLERR
| POLLNVAL
))
2558 && (consumer_data
.type
== LTTNG_CONSUMER32_UST
2559 || consumer_data
.type
== LTTNG_CONSUMER64_UST
)) {
2560 DBG("fd %d is hup|err|nval. Attempting flush and read.",
2562 lttng_ustconsumer_on_stream_hangup(local_stream
[i
]);
2563 /* Attempt read again, for the data we just flushed. */
2564 local_stream
[i
]->data_read
= 1;
2567 * If the poll flag is HUP/ERR/NVAL and we have
2568 * read no data in this pass, we can remove the
2569 * stream from its hash table.
2571 if ((pollfd
[i
].revents
& POLLHUP
)) {
2572 DBG("Polling fd %d tells it has hung up.", pollfd
[i
].fd
);
2573 if (!local_stream
[i
]->data_read
) {
2574 consumer_del_stream(local_stream
[i
], data_ht
);
2575 local_stream
[i
] = NULL
;
2578 } else if (pollfd
[i
].revents
& POLLERR
) {
2579 ERR("Error returned in polling fd %d.", pollfd
[i
].fd
);
2580 if (!local_stream
[i
]->data_read
) {
2581 consumer_del_stream(local_stream
[i
], data_ht
);
2582 local_stream
[i
] = NULL
;
2585 } else if (pollfd
[i
].revents
& POLLNVAL
) {
2586 ERR("Polling fd %d tells fd is not open.", pollfd
[i
].fd
);
2587 if (!local_stream
[i
]->data_read
) {
2588 consumer_del_stream(local_stream
[i
], data_ht
);
2589 local_stream
[i
] = NULL
;
2593 if (local_stream
[i
] != NULL
) {
2594 local_stream
[i
]->data_read
= 0;
2601 DBG("polling thread exiting");
2606 * Close the write side of the pipe so epoll_wait() in
2607 * consumer_thread_metadata_poll can catch it. The thread is monitoring the
2608 * read side of the pipe. If we close them both, epoll_wait strangely does
2609 * not return and could create a endless wait period if the pipe is the
2610 * only tracked fd in the poll set. The thread will take care of closing
2613 (void) lttng_pipe_write_close(ctx
->consumer_metadata_pipe
);
2618 ERR("Health error occurred in %s", __func__
);
2620 health_unregister(health_consumerd
);
2622 rcu_unregister_thread();
2627 * Close wake-up end of each stream belonging to the channel. This will
2628 * allow the poll() on the stream read-side to detect when the
2629 * write-side (application) finally closes them.
2632 void consumer_close_channel_streams(struct lttng_consumer_channel
*channel
)
2634 struct lttng_ht
*ht
;
2635 struct lttng_consumer_stream
*stream
;
2636 struct lttng_ht_iter iter
;
2638 ht
= consumer_data
.stream_per_chan_id_ht
;
2641 cds_lfht_for_each_entry_duplicate(ht
->ht
,
2642 ht
->hash_fct(&channel
->key
, lttng_ht_seed
),
2643 ht
->match_fct
, &channel
->key
,
2644 &iter
.iter
, stream
, node_channel_id
.node
) {
2646 * Protect against teardown with mutex.
2648 pthread_mutex_lock(&stream
->lock
);
2649 if (cds_lfht_is_node_deleted(&stream
->node
.node
)) {
2652 switch (consumer_data
.type
) {
2653 case LTTNG_CONSUMER_KERNEL
:
2655 case LTTNG_CONSUMER32_UST
:
2656 case LTTNG_CONSUMER64_UST
:
2657 if (stream
->metadata_flag
) {
2658 /* Safe and protected by the stream lock. */
2659 lttng_ustconsumer_close_metadata(stream
->chan
);
2662 * Note: a mutex is taken internally within
2663 * liblttng-ust-ctl to protect timer wakeup_fd
2664 * use from concurrent close.
2666 lttng_ustconsumer_close_stream_wakeup(stream
);
2670 ERR("Unknown consumer_data type");
2674 pthread_mutex_unlock(&stream
->lock
);
2679 static void destroy_channel_ht(struct lttng_ht
*ht
)
2681 struct lttng_ht_iter iter
;
2682 struct lttng_consumer_channel
*channel
;
2690 cds_lfht_for_each_entry(ht
->ht
, &iter
.iter
, channel
, wait_fd_node
.node
) {
2691 ret
= lttng_ht_del(ht
, &iter
);
2696 lttng_ht_destroy(ht
);
2700 * This thread polls the channel fds to detect when they are being
2701 * closed. It closes all related streams if the channel is detected as
2702 * closed. It is currently only used as a shim layer for UST because the
2703 * consumerd needs to keep the per-stream wakeup end of pipes open for
2706 void *consumer_thread_channel_poll(void *data
)
2708 int ret
, i
, pollfd
, err
= -1;
2709 uint32_t revents
, nb_fd
;
2710 struct lttng_consumer_channel
*chan
= NULL
;
2711 struct lttng_ht_iter iter
;
2712 struct lttng_ht_node_u64
*node
;
2713 struct lttng_poll_event events
;
2714 struct lttng_consumer_local_data
*ctx
= data
;
2715 struct lttng_ht
*channel_ht
;
2717 rcu_register_thread();
2719 health_register(health_consumerd
, HEALTH_CONSUMERD_TYPE_CHANNEL
);
2721 if (testpoint(consumerd_thread_channel
)) {
2722 goto error_testpoint
;
2725 health_code_update();
2727 channel_ht
= lttng_ht_new(0, LTTNG_HT_TYPE_U64
);
2729 /* ENOMEM at this point. Better to bail out. */
2733 DBG("Thread channel poll started");
2735 /* Size is set to 1 for the consumer_channel pipe */
2736 ret
= lttng_poll_create(&events
, 2, LTTNG_CLOEXEC
);
2738 ERR("Poll set creation failed");
2742 ret
= lttng_poll_add(&events
, ctx
->consumer_channel_pipe
[0], LPOLLIN
);
2748 DBG("Channel main loop started");
2751 health_code_update();
2753 /* Only the channel pipe is set */
2754 if (LTTNG_POLL_GETNB(&events
) == 0 && consumer_quit
== 1) {
2755 err
= 0; /* All is OK */
2760 DBG("Channel poll wait with %d fd(s)", LTTNG_POLL_GETNB(&events
));
2761 health_poll_entry();
2762 ret
= lttng_poll_wait(&events
, -1);
2764 DBG("Channel event catched in thread");
2766 if (errno
== EINTR
) {
2767 ERR("Poll EINTR catched");
2775 /* From here, the event is a channel wait fd */
2776 for (i
= 0; i
< nb_fd
; i
++) {
2777 health_code_update();
2779 revents
= LTTNG_POLL_GETEV(&events
, i
);
2780 pollfd
= LTTNG_POLL_GETFD(&events
, i
);
2782 /* Just don't waste time if no returned events for the fd */
2786 if (pollfd
== ctx
->consumer_channel_pipe
[0]) {
2787 if (revents
& (LPOLLERR
| LPOLLHUP
)) {
2788 DBG("Channel thread pipe hung up");
2790 * Remove the pipe from the poll set and continue the loop
2791 * since their might be data to consume.
2793 lttng_poll_del(&events
, ctx
->consumer_channel_pipe
[0]);
2795 } else if (revents
& LPOLLIN
) {
2796 enum consumer_channel_action action
;
2799 ret
= read_channel_pipe(ctx
, &chan
, &key
, &action
);
2801 ERR("Error reading channel pipe");
2806 case CONSUMER_CHANNEL_ADD
:
2807 DBG("Adding channel %d to poll set",
2810 lttng_ht_node_init_u64(&chan
->wait_fd_node
,
2813 lttng_ht_add_unique_u64(channel_ht
,
2814 &chan
->wait_fd_node
);
2816 /* Add channel to the global poll events list */
2817 lttng_poll_add(&events
, chan
->wait_fd
,
2818 LPOLLIN
| LPOLLPRI
);
2820 case CONSUMER_CHANNEL_DEL
:
2823 * This command should never be called if the channel
2824 * has streams monitored by either the data or metadata
2825 * thread. The consumer only notify this thread with a
2826 * channel del. command if it receives a destroy
2827 * channel command from the session daemon that send it
2828 * if a command prior to the GET_CHANNEL failed.
2832 chan
= consumer_find_channel(key
);
2835 ERR("UST consumer get channel key %" PRIu64
" not found for del channel", key
);
2838 lttng_poll_del(&events
, chan
->wait_fd
);
2839 iter
.iter
.node
= &chan
->wait_fd_node
.node
;
2840 ret
= lttng_ht_del(channel_ht
, &iter
);
2843 switch (consumer_data
.type
) {
2844 case LTTNG_CONSUMER_KERNEL
:
2846 case LTTNG_CONSUMER32_UST
:
2847 case LTTNG_CONSUMER64_UST
:
2848 health_code_update();
2849 /* Destroy streams that might have been left in the stream list. */
2850 clean_channel_stream_list(chan
);
2853 ERR("Unknown consumer_data type");
2858 * Release our own refcount. Force channel deletion even if
2859 * streams were not initialized.
2861 if (!uatomic_sub_return(&chan
->refcount
, 1)) {
2862 consumer_del_channel(chan
);
2867 case CONSUMER_CHANNEL_QUIT
:
2869 * Remove the pipe from the poll set and continue the loop
2870 * since their might be data to consume.
2872 lttng_poll_del(&events
, ctx
->consumer_channel_pipe
[0]);
2875 ERR("Unknown action");
2880 /* Handle other stream */
2886 uint64_t tmp_id
= (uint64_t) pollfd
;
2888 lttng_ht_lookup(channel_ht
, &tmp_id
, &iter
);
2890 node
= lttng_ht_iter_get_node_u64(&iter
);
2893 chan
= caa_container_of(node
, struct lttng_consumer_channel
,
2896 /* Check for error event */
2897 if (revents
& (LPOLLERR
| LPOLLHUP
)) {
2898 DBG("Channel fd %d is hup|err.", pollfd
);
2900 lttng_poll_del(&events
, chan
->wait_fd
);
2901 ret
= lttng_ht_del(channel_ht
, &iter
);
2905 * This will close the wait fd for each stream associated to
2906 * this channel AND monitored by the data/metadata thread thus
2907 * will be clean by the right thread.
2909 consumer_close_channel_streams(chan
);
2911 /* Release our own refcount */
2912 if (!uatomic_sub_return(&chan
->refcount
, 1)
2913 && !uatomic_read(&chan
->nb_init_stream_left
)) {
2914 consumer_del_channel(chan
);
2918 /* Release RCU lock for the channel looked up */
2926 lttng_poll_clean(&events
);
2928 destroy_channel_ht(channel_ht
);
2931 DBG("Channel poll thread exiting");
2934 ERR("Health error occurred in %s", __func__
);
2936 health_unregister(health_consumerd
);
2937 rcu_unregister_thread();
2941 static int set_metadata_socket(struct lttng_consumer_local_data
*ctx
,
2942 struct pollfd
*sockpoll
, int client_socket
)
2949 if (lttng_consumer_poll_socket(sockpoll
) < 0) {
2953 DBG("Metadata connection on client_socket");
2955 /* Blocking call, waiting for transmission */
2956 ctx
->consumer_metadata_socket
= lttcomm_accept_unix_sock(client_socket
);
2957 if (ctx
->consumer_metadata_socket
< 0) {
2958 WARN("On accept metadata");
2969 * This thread listens on the consumerd socket and receives the file
2970 * descriptors from the session daemon.
2972 void *consumer_thread_sessiond_poll(void *data
)
2974 int sock
= -1, client_socket
, ret
, err
= -1;
2976 * structure to poll for incoming data on communication socket avoids
2977 * making blocking sockets.
2979 struct pollfd consumer_sockpoll
[2];
2980 struct lttng_consumer_local_data
*ctx
= data
;
2982 rcu_register_thread();
2984 health_register(health_consumerd
, HEALTH_CONSUMERD_TYPE_SESSIOND
);
2986 if (testpoint(consumerd_thread_sessiond
)) {
2987 goto error_testpoint
;
2990 health_code_update();
2992 DBG("Creating command socket %s", ctx
->consumer_command_sock_path
);
2993 unlink(ctx
->consumer_command_sock_path
);
2994 client_socket
= lttcomm_create_unix_sock(ctx
->consumer_command_sock_path
);
2995 if (client_socket
< 0) {
2996 ERR("Cannot create command socket");
3000 ret
= lttcomm_listen_unix_sock(client_socket
);
3005 DBG("Sending ready command to lttng-sessiond");
3006 ret
= lttng_consumer_send_error(ctx
, LTTCOMM_CONSUMERD_COMMAND_SOCK_READY
);
3007 /* return < 0 on error, but == 0 is not fatal */
3009 ERR("Error sending ready command to lttng-sessiond");
3013 /* prepare the FDs to poll : to client socket and the should_quit pipe */
3014 consumer_sockpoll
[0].fd
= ctx
->consumer_should_quit
[0];
3015 consumer_sockpoll
[0].events
= POLLIN
| POLLPRI
;
3016 consumer_sockpoll
[1].fd
= client_socket
;
3017 consumer_sockpoll
[1].events
= POLLIN
| POLLPRI
;
3019 if (lttng_consumer_poll_socket(consumer_sockpoll
) < 0) {
3022 DBG("Connection on client_socket");
3024 /* Blocking call, waiting for transmission */
3025 sock
= lttcomm_accept_unix_sock(client_socket
);
3032 * Setup metadata socket which is the second socket connection on the
3033 * command unix socket.
3035 ret
= set_metadata_socket(ctx
, consumer_sockpoll
, client_socket
);
3040 /* This socket is not useful anymore. */
3041 ret
= close(client_socket
);
3043 PERROR("close client_socket");
3047 /* update the polling structure to poll on the established socket */
3048 consumer_sockpoll
[1].fd
= sock
;
3049 consumer_sockpoll
[1].events
= POLLIN
| POLLPRI
;
3052 health_code_update();
3054 health_poll_entry();
3055 ret
= lttng_consumer_poll_socket(consumer_sockpoll
);
3060 DBG("Incoming command on sock");
3061 ret
= lttng_consumer_recv_cmd(ctx
, sock
, consumer_sockpoll
);
3062 if (ret
== -ENOENT
) {
3063 DBG("Received STOP command");
3068 * This could simply be a session daemon quitting. Don't output
3071 DBG("Communication interrupted on command socket");
3075 if (consumer_quit
) {
3076 DBG("consumer_thread_receive_fds received quit from signal");
3077 err
= 0; /* All is OK */
3080 DBG("received command on sock");
3086 DBG("Consumer thread sessiond poll exiting");
3089 * Close metadata streams since the producer is the session daemon which
3092 * NOTE: for now, this only applies to the UST tracer.
3094 lttng_consumer_close_all_metadata();
3097 * when all fds have hung up, the polling thread
3103 * Notify the data poll thread to poll back again and test the
3104 * consumer_quit state that we just set so to quit gracefully.
3106 notify_thread_lttng_pipe(ctx
->consumer_data_pipe
);
3108 notify_channel_pipe(ctx
, NULL
, -1, CONSUMER_CHANNEL_QUIT
);
3110 notify_health_quit_pipe(health_quit_pipe
);
3112 /* Cleaning up possibly open sockets. */
3116 PERROR("close sock sessiond poll");
3119 if (client_socket
>= 0) {
3120 ret
= close(client_socket
);
3122 PERROR("close client_socket sessiond poll");
3129 ERR("Health error occurred in %s", __func__
);
3131 health_unregister(health_consumerd
);
3133 rcu_unregister_thread();
3137 ssize_t
lttng_consumer_read_subbuffer(struct lttng_consumer_stream
*stream
,
3138 struct lttng_consumer_local_data
*ctx
)
3142 pthread_mutex_lock(&stream
->lock
);
3143 if (stream
->metadata_flag
) {
3144 pthread_mutex_lock(&stream
->metadata_rdv_lock
);
3147 switch (consumer_data
.type
) {
3148 case LTTNG_CONSUMER_KERNEL
:
3149 ret
= lttng_kconsumer_read_subbuffer(stream
, ctx
);
3151 case LTTNG_CONSUMER32_UST
:
3152 case LTTNG_CONSUMER64_UST
:
3153 ret
= lttng_ustconsumer_read_subbuffer(stream
, ctx
);
3156 ERR("Unknown consumer_data type");
3162 if (stream
->metadata_flag
) {
3163 pthread_cond_broadcast(&stream
->metadata_rdv
);
3164 pthread_mutex_unlock(&stream
->metadata_rdv_lock
);
3166 pthread_mutex_unlock(&stream
->lock
);
3170 int lttng_consumer_on_recv_stream(struct lttng_consumer_stream
*stream
)
3172 switch (consumer_data
.type
) {
3173 case LTTNG_CONSUMER_KERNEL
:
3174 return lttng_kconsumer_on_recv_stream(stream
);
3175 case LTTNG_CONSUMER32_UST
:
3176 case LTTNG_CONSUMER64_UST
:
3177 return lttng_ustconsumer_on_recv_stream(stream
);
3179 ERR("Unknown consumer_data type");
3186 * Allocate and set consumer data hash tables.
3188 int lttng_consumer_init(void)
3190 consumer_data
.channel_ht
= lttng_ht_new(0, LTTNG_HT_TYPE_U64
);
3191 if (!consumer_data
.channel_ht
) {
3195 consumer_data
.relayd_ht
= lttng_ht_new(0, LTTNG_HT_TYPE_U64
);
3196 if (!consumer_data
.relayd_ht
) {
3200 consumer_data
.stream_list_ht
= lttng_ht_new(0, LTTNG_HT_TYPE_U64
);
3201 if (!consumer_data
.stream_list_ht
) {
3205 consumer_data
.stream_per_chan_id_ht
= lttng_ht_new(0, LTTNG_HT_TYPE_U64
);
3206 if (!consumer_data
.stream_per_chan_id_ht
) {
3210 data_ht
= lttng_ht_new(0, LTTNG_HT_TYPE_U64
);
3215 metadata_ht
= lttng_ht_new(0, LTTNG_HT_TYPE_U64
);
3227 * Process the ADD_RELAYD command receive by a consumer.
3229 * This will create a relayd socket pair and add it to the relayd hash table.
3230 * The caller MUST acquire a RCU read side lock before calling it.
3232 int consumer_add_relayd_socket(uint64_t net_seq_idx
, int sock_type
,
3233 struct lttng_consumer_local_data
*ctx
, int sock
,
3234 struct pollfd
*consumer_sockpoll
,
3235 struct lttcomm_relayd_sock
*relayd_sock
, uint64_t sessiond_id
,
3236 uint64_t relayd_session_id
)
3238 int fd
= -1, ret
= -1, relayd_created
= 0;
3239 enum lttcomm_return_code ret_code
= LTTCOMM_CONSUMERD_SUCCESS
;
3240 struct consumer_relayd_sock_pair
*relayd
= NULL
;
3243 assert(relayd_sock
);
3245 DBG("Consumer adding relayd socket (idx: %" PRIu64
")", net_seq_idx
);
3247 /* Get relayd reference if exists. */
3248 relayd
= consumer_find_relayd(net_seq_idx
);
3249 if (relayd
== NULL
) {
3250 assert(sock_type
== LTTNG_STREAM_CONTROL
);
3251 /* Not found. Allocate one. */
3252 relayd
= consumer_allocate_relayd_sock_pair(net_seq_idx
);
3253 if (relayd
== NULL
) {
3255 ret_code
= LTTCOMM_CONSUMERD_ENOMEM
;
3258 relayd
->sessiond_session_id
= sessiond_id
;
3263 * This code path MUST continue to the consumer send status message to
3264 * we can notify the session daemon and continue our work without
3265 * killing everything.
3269 * relayd key should never be found for control socket.
3271 assert(sock_type
!= LTTNG_STREAM_CONTROL
);
3274 /* First send a status message before receiving the fds. */
3275 ret
= consumer_send_status_msg(sock
, LTTCOMM_CONSUMERD_SUCCESS
);
3277 /* Somehow, the session daemon is not responding anymore. */
3278 lttng_consumer_send_error(ctx
, LTTCOMM_CONSUMERD_FATAL
);
3279 goto error_nosignal
;
3282 /* Poll on consumer socket. */
3283 if (lttng_consumer_poll_socket(consumer_sockpoll
) < 0) {
3284 lttng_consumer_send_error(ctx
, LTTCOMM_CONSUMERD_POLL_ERROR
);
3286 goto error_nosignal
;
3289 /* Get relayd socket from session daemon */
3290 ret
= lttcomm_recv_fds_unix_sock(sock
, &fd
, 1);
3291 if (ret
!= sizeof(fd
)) {
3293 fd
= -1; /* Just in case it gets set with an invalid value. */
3296 * Failing to receive FDs might indicate a major problem such as
3297 * reaching a fd limit during the receive where the kernel returns a
3298 * MSG_CTRUNC and fails to cleanup the fd in the queue. Any case, we
3299 * don't take any chances and stop everything.
3301 * XXX: Feature request #558 will fix that and avoid this possible
3302 * issue when reaching the fd limit.
3304 lttng_consumer_send_error(ctx
, LTTCOMM_CONSUMERD_ERROR_RECV_FD
);
3305 ret_code
= LTTCOMM_CONSUMERD_ERROR_RECV_FD
;
3309 /* Copy socket information and received FD */
3310 switch (sock_type
) {
3311 case LTTNG_STREAM_CONTROL
:
3312 /* Copy received lttcomm socket */
3313 lttcomm_copy_sock(&relayd
->control_sock
.sock
, &relayd_sock
->sock
);
3314 ret
= lttcomm_create_sock(&relayd
->control_sock
.sock
);
3315 /* Handle create_sock error. */
3317 ret_code
= LTTCOMM_CONSUMERD_ENOMEM
;
3321 * Close the socket created internally by
3322 * lttcomm_create_sock, so we can replace it by the one
3323 * received from sessiond.
3325 if (close(relayd
->control_sock
.sock
.fd
)) {
3329 /* Assign new file descriptor */
3330 relayd
->control_sock
.sock
.fd
= fd
;
3331 fd
= -1; /* For error path */
3332 /* Assign version values. */
3333 relayd
->control_sock
.major
= relayd_sock
->major
;
3334 relayd
->control_sock
.minor
= relayd_sock
->minor
;
3336 relayd
->relayd_session_id
= relayd_session_id
;
3339 case LTTNG_STREAM_DATA
:
3340 /* Copy received lttcomm socket */
3341 lttcomm_copy_sock(&relayd
->data_sock
.sock
, &relayd_sock
->sock
);
3342 ret
= lttcomm_create_sock(&relayd
->data_sock
.sock
);
3343 /* Handle create_sock error. */
3345 ret_code
= LTTCOMM_CONSUMERD_ENOMEM
;
3349 * Close the socket created internally by
3350 * lttcomm_create_sock, so we can replace it by the one
3351 * received from sessiond.
3353 if (close(relayd
->data_sock
.sock
.fd
)) {
3357 /* Assign new file descriptor */
3358 relayd
->data_sock
.sock
.fd
= fd
;
3359 fd
= -1; /* for eventual error paths */
3360 /* Assign version values. */
3361 relayd
->data_sock
.major
= relayd_sock
->major
;
3362 relayd
->data_sock
.minor
= relayd_sock
->minor
;
3365 ERR("Unknown relayd socket type (%d)", sock_type
);
3367 ret_code
= LTTCOMM_CONSUMERD_FATAL
;
3371 DBG("Consumer %s socket created successfully with net idx %" PRIu64
" (fd: %d)",
3372 sock_type
== LTTNG_STREAM_CONTROL
? "control" : "data",
3373 relayd
->net_seq_idx
, fd
);
3375 /* We successfully added the socket. Send status back. */
3376 ret
= consumer_send_status_msg(sock
, ret_code
);
3378 /* Somehow, the session daemon is not responding anymore. */
3379 lttng_consumer_send_error(ctx
, LTTCOMM_CONSUMERD_FATAL
);
3380 goto error_nosignal
;
3384 * Add relayd socket pair to consumer data hashtable. If object already
3385 * exists or on error, the function gracefully returns.
3393 if (consumer_send_status_msg(sock
, ret_code
) < 0) {
3394 lttng_consumer_send_error(ctx
, LTTCOMM_CONSUMERD_FATAL
);
3398 /* Close received socket if valid. */
3401 PERROR("close received socket");
3405 if (relayd_created
) {
3413 * Try to lock the stream mutex.
3415 * On success, 1 is returned else 0 indicating that the mutex is NOT lock.
3417 static int stream_try_lock(struct lttng_consumer_stream
*stream
)
3424 * Try to lock the stream mutex. On failure, we know that the stream is
3425 * being used else where hence there is data still being extracted.
3427 ret
= pthread_mutex_trylock(&stream
->lock
);
3429 /* For both EBUSY and EINVAL error, the mutex is NOT locked. */
3441 * Search for a relayd associated to the session id and return the reference.
3443 * A rcu read side lock MUST be acquire before calling this function and locked
3444 * until the relayd object is no longer necessary.
3446 static struct consumer_relayd_sock_pair
*find_relayd_by_session_id(uint64_t id
)
3448 struct lttng_ht_iter iter
;
3449 struct consumer_relayd_sock_pair
*relayd
= NULL
;
3451 /* Iterate over all relayd since they are indexed by net_seq_idx. */
3452 cds_lfht_for_each_entry(consumer_data
.relayd_ht
->ht
, &iter
.iter
, relayd
,
3455 * Check by sessiond id which is unique here where the relayd session
3456 * id might not be when having multiple relayd.
3458 if (relayd
->sessiond_session_id
== id
) {
3459 /* Found the relayd. There can be only one per id. */
3471 * Check if for a given session id there is still data needed to be extract
3474 * Return 1 if data is pending or else 0 meaning ready to be read.
3476 int consumer_data_pending(uint64_t id
)
3479 struct lttng_ht_iter iter
;
3480 struct lttng_ht
*ht
;
3481 struct lttng_consumer_stream
*stream
;
3482 struct consumer_relayd_sock_pair
*relayd
= NULL
;
3483 int (*data_pending
)(struct lttng_consumer_stream
*);
3485 DBG("Consumer data pending command on session id %" PRIu64
, id
);
3488 pthread_mutex_lock(&consumer_data
.lock
);
3490 switch (consumer_data
.type
) {
3491 case LTTNG_CONSUMER_KERNEL
:
3492 data_pending
= lttng_kconsumer_data_pending
;
3494 case LTTNG_CONSUMER32_UST
:
3495 case LTTNG_CONSUMER64_UST
:
3496 data_pending
= lttng_ustconsumer_data_pending
;
3499 ERR("Unknown consumer data type");
3503 /* Ease our life a bit */
3504 ht
= consumer_data
.stream_list_ht
;
3506 relayd
= find_relayd_by_session_id(id
);
3508 /* Send init command for data pending. */
3509 pthread_mutex_lock(&relayd
->ctrl_sock_mutex
);
3510 ret
= relayd_begin_data_pending(&relayd
->control_sock
,
3511 relayd
->relayd_session_id
);
3512 pthread_mutex_unlock(&relayd
->ctrl_sock_mutex
);
3514 /* Communication error thus the relayd so no data pending. */
3515 goto data_not_pending
;
3519 cds_lfht_for_each_entry_duplicate(ht
->ht
,
3520 ht
->hash_fct(&id
, lttng_ht_seed
),
3522 &iter
.iter
, stream
, node_session_id
.node
) {
3523 /* If this call fails, the stream is being used hence data pending. */
3524 ret
= stream_try_lock(stream
);
3530 * A removed node from the hash table indicates that the stream has
3531 * been deleted thus having a guarantee that the buffers are closed
3532 * on the consumer side. However, data can still be transmitted
3533 * over the network so don't skip the relayd check.
3535 ret
= cds_lfht_is_node_deleted(&stream
->node
.node
);
3538 * An empty output file is not valid. We need at least one packet
3539 * generated per stream, even if it contains no event, so it
3540 * contains at least one packet header.
3542 if (stream
->output_written
== 0) {
3543 pthread_mutex_unlock(&stream
->lock
);
3546 /* Check the stream if there is data in the buffers. */
3547 ret
= data_pending(stream
);
3549 pthread_mutex_unlock(&stream
->lock
);
3556 pthread_mutex_lock(&relayd
->ctrl_sock_mutex
);
3557 if (stream
->metadata_flag
) {
3558 ret
= relayd_quiescent_control(&relayd
->control_sock
,
3559 stream
->relayd_stream_id
);
3561 ret
= relayd_data_pending(&relayd
->control_sock
,
3562 stream
->relayd_stream_id
,
3563 stream
->next_net_seq_num
- 1);
3565 pthread_mutex_unlock(&relayd
->ctrl_sock_mutex
);
3567 pthread_mutex_unlock(&stream
->lock
);
3571 pthread_mutex_unlock(&stream
->lock
);
3575 unsigned int is_data_inflight
= 0;
3577 /* Send init command for data pending. */
3578 pthread_mutex_lock(&relayd
->ctrl_sock_mutex
);
3579 ret
= relayd_end_data_pending(&relayd
->control_sock
,
3580 relayd
->relayd_session_id
, &is_data_inflight
);
3581 pthread_mutex_unlock(&relayd
->ctrl_sock_mutex
);
3583 goto data_not_pending
;
3585 if (is_data_inflight
) {
3591 * Finding _no_ node in the hash table and no inflight data means that the
3592 * stream(s) have been removed thus data is guaranteed to be available for
3593 * analysis from the trace files.
3597 /* Data is available to be read by a viewer. */
3598 pthread_mutex_unlock(&consumer_data
.lock
);
3603 /* Data is still being extracted from buffers. */
3604 pthread_mutex_unlock(&consumer_data
.lock
);
3610 * Send a ret code status message to the sessiond daemon.
3612 * Return the sendmsg() return value.
3614 int consumer_send_status_msg(int sock
, int ret_code
)
3616 struct lttcomm_consumer_status_msg msg
;
3618 msg
.ret_code
= ret_code
;
3620 return lttcomm_send_unix_sock(sock
, &msg
, sizeof(msg
));
3624 * Send a channel status message to the sessiond daemon.
3626 * Return the sendmsg() return value.
3628 int consumer_send_status_channel(int sock
,
3629 struct lttng_consumer_channel
*channel
)
3631 struct lttcomm_consumer_status_channel msg
;
3636 msg
.ret_code
= LTTCOMM_CONSUMERD_CHANNEL_FAIL
;
3638 msg
.ret_code
= LTTCOMM_CONSUMERD_SUCCESS
;
3639 msg
.key
= channel
->key
;
3640 msg
.stream_count
= channel
->streams
.count
;
3643 return lttcomm_send_unix_sock(sock
, &msg
, sizeof(msg
));
3647 * Using a maximum stream size with the produced and consumed position of a
3648 * stream, computes the new consumed position to be as close as possible to the
3649 * maximum possible stream size.
3651 * If maximum stream size is lower than the possible buffer size (produced -
3652 * consumed), the consumed_pos given is returned untouched else the new value
3655 unsigned long consumer_get_consumed_maxsize(unsigned long consumed_pos
,
3656 unsigned long produced_pos
, uint64_t max_stream_size
)
3658 if (max_stream_size
&& max_stream_size
< (produced_pos
- consumed_pos
)) {
3659 /* Offset from the produced position to get the latest buffers. */
3660 return produced_pos
- max_stream_size
;
3663 return consumed_pos
;