2 * Copyright (C) 2011 - Julien Desfossez <julien.desfossez@polymtl.ca>
3 * Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
4 * 2012 - David Goulet <dgoulet@efficios.com>
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License, version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 * You should have received a copy of the GNU General Public License along
16 * with this program; if not, write to the Free Software Foundation, Inc.,
17 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
27 #include <sys/socket.h>
28 #include <sys/types.h>
33 #include <common/common.h>
34 #include <common/utils.h>
35 #include <common/compat/poll.h>
36 #include <common/index/index.h>
37 #include <common/kernel-ctl/kernel-ctl.h>
38 #include <common/sessiond-comm/relayd.h>
39 #include <common/sessiond-comm/sessiond-comm.h>
40 #include <common/kernel-consumer/kernel-consumer.h>
41 #include <common/relayd/relayd.h>
42 #include <common/ust-consumer/ust-consumer.h>
45 #include "consumer-stream.h"
47 struct lttng_consumer_global_data consumer_data
= {
50 .type
= LTTNG_CONSUMER_UNKNOWN
,
53 enum consumer_channel_action
{
56 CONSUMER_CHANNEL_QUIT
,
59 struct consumer_channel_msg
{
60 enum consumer_channel_action action
;
61 struct lttng_consumer_channel
*chan
; /* add */
62 uint64_t key
; /* del */
66 * Flag to inform the polling thread to quit when all fd hung up. Updated by
67 * the consumer_thread_receive_fds when it notices that all fds has hung up.
68 * Also updated by the signal handler (consumer_should_exit()). Read by the
71 volatile int consumer_quit
;
74 * Global hash table containing respectively metadata and data streams. The
75 * stream element in this ht should only be updated by the metadata poll thread
76 * for the metadata and the data poll thread for the data.
78 static struct lttng_ht
*metadata_ht
;
79 static struct lttng_ht
*data_ht
;
82 * Notify a thread lttng pipe to poll back again. This usually means that some
83 * global state has changed so we just send back the thread in a poll wait
86 static void notify_thread_lttng_pipe(struct lttng_pipe
*pipe
)
88 struct lttng_consumer_stream
*null_stream
= NULL
;
92 (void) lttng_pipe_write(pipe
, &null_stream
, sizeof(null_stream
));
95 static void notify_channel_pipe(struct lttng_consumer_local_data
*ctx
,
96 struct lttng_consumer_channel
*chan
,
98 enum consumer_channel_action action
)
100 struct consumer_channel_msg msg
;
103 memset(&msg
, 0, sizeof(msg
));
109 ret
= write(ctx
->consumer_channel_pipe
[1], &msg
, sizeof(msg
));
110 } while (ret
< 0 && errno
== EINTR
);
113 void notify_thread_del_channel(struct lttng_consumer_local_data
*ctx
,
116 notify_channel_pipe(ctx
, NULL
, key
, CONSUMER_CHANNEL_DEL
);
119 static int read_channel_pipe(struct lttng_consumer_local_data
*ctx
,
120 struct lttng_consumer_channel
**chan
,
122 enum consumer_channel_action
*action
)
124 struct consumer_channel_msg msg
;
128 ret
= read(ctx
->consumer_channel_pipe
[0], &msg
, sizeof(msg
));
129 } while (ret
< 0 && errno
== EINTR
);
131 *action
= msg
.action
;
139 * Find a stream. The consumer_data.lock must be locked during this
142 static struct lttng_consumer_stream
*find_stream(uint64_t key
,
145 struct lttng_ht_iter iter
;
146 struct lttng_ht_node_u64
*node
;
147 struct lttng_consumer_stream
*stream
= NULL
;
151 /* -1ULL keys are lookup failures */
152 if (key
== (uint64_t) -1ULL) {
158 lttng_ht_lookup(ht
, &key
, &iter
);
159 node
= lttng_ht_iter_get_node_u64(&iter
);
161 stream
= caa_container_of(node
, struct lttng_consumer_stream
, node
);
169 static void steal_stream_key(uint64_t key
, struct lttng_ht
*ht
)
171 struct lttng_consumer_stream
*stream
;
174 stream
= find_stream(key
, ht
);
176 stream
->key
= (uint64_t) -1ULL;
178 * We don't want the lookup to match, but we still need
179 * to iterate on this stream when iterating over the hash table. Just
180 * change the node key.
182 stream
->node
.key
= (uint64_t) -1ULL;
188 * Return a channel object for the given key.
190 * RCU read side lock MUST be acquired before calling this function and
191 * protects the channel ptr.
193 struct lttng_consumer_channel
*consumer_find_channel(uint64_t key
)
195 struct lttng_ht_iter iter
;
196 struct lttng_ht_node_u64
*node
;
197 struct lttng_consumer_channel
*channel
= NULL
;
199 /* -1ULL keys are lookup failures */
200 if (key
== (uint64_t) -1ULL) {
204 lttng_ht_lookup(consumer_data
.channel_ht
, &key
, &iter
);
205 node
= lttng_ht_iter_get_node_u64(&iter
);
207 channel
= caa_container_of(node
, struct lttng_consumer_channel
, node
);
213 static void free_stream_rcu(struct rcu_head
*head
)
215 struct lttng_ht_node_u64
*node
=
216 caa_container_of(head
, struct lttng_ht_node_u64
, head
);
217 struct lttng_consumer_stream
*stream
=
218 caa_container_of(node
, struct lttng_consumer_stream
, node
);
223 static void free_channel_rcu(struct rcu_head
*head
)
225 struct lttng_ht_node_u64
*node
=
226 caa_container_of(head
, struct lttng_ht_node_u64
, head
);
227 struct lttng_consumer_channel
*channel
=
228 caa_container_of(node
, struct lttng_consumer_channel
, node
);
234 * RCU protected relayd socket pair free.
236 static void free_relayd_rcu(struct rcu_head
*head
)
238 struct lttng_ht_node_u64
*node
=
239 caa_container_of(head
, struct lttng_ht_node_u64
, head
);
240 struct consumer_relayd_sock_pair
*relayd
=
241 caa_container_of(node
, struct consumer_relayd_sock_pair
, node
);
244 * Close all sockets. This is done in the call RCU since we don't want the
245 * socket fds to be reassigned thus potentially creating bad state of the
248 * We do not have to lock the control socket mutex here since at this stage
249 * there is no one referencing to this relayd object.
251 (void) relayd_close(&relayd
->control_sock
);
252 (void) relayd_close(&relayd
->data_sock
);
258 * Destroy and free relayd socket pair object.
260 void consumer_destroy_relayd(struct consumer_relayd_sock_pair
*relayd
)
263 struct lttng_ht_iter iter
;
265 if (relayd
== NULL
) {
269 DBG("Consumer destroy and close relayd socket pair");
271 iter
.iter
.node
= &relayd
->node
.node
;
272 ret
= lttng_ht_del(consumer_data
.relayd_ht
, &iter
);
274 /* We assume the relayd is being or is destroyed */
278 /* RCU free() call */
279 call_rcu(&relayd
->node
.head
, free_relayd_rcu
);
283 * Remove a channel from the global list protected by a mutex. This function is
284 * also responsible for freeing its data structures.
286 void consumer_del_channel(struct lttng_consumer_channel
*channel
)
289 struct lttng_ht_iter iter
;
290 struct lttng_consumer_stream
*stream
, *stmp
;
292 DBG("Consumer delete channel key %" PRIu64
, channel
->key
);
294 pthread_mutex_lock(&consumer_data
.lock
);
295 pthread_mutex_lock(&channel
->lock
);
297 /* Delete streams that might have been left in the stream list. */
298 cds_list_for_each_entry_safe(stream
, stmp
, &channel
->streams
.head
,
300 cds_list_del(&stream
->send_node
);
302 * Once a stream is added to this list, the buffers were created so
303 * we have a guarantee that this call will succeed.
305 consumer_stream_destroy(stream
, NULL
);
308 switch (consumer_data
.type
) {
309 case LTTNG_CONSUMER_KERNEL
:
311 case LTTNG_CONSUMER32_UST
:
312 case LTTNG_CONSUMER64_UST
:
313 lttng_ustconsumer_del_channel(channel
);
316 ERR("Unknown consumer_data type");
322 iter
.iter
.node
= &channel
->node
.node
;
323 ret
= lttng_ht_del(consumer_data
.channel_ht
, &iter
);
327 call_rcu(&channel
->node
.head
, free_channel_rcu
);
329 pthread_mutex_unlock(&channel
->lock
);
330 pthread_mutex_unlock(&consumer_data
.lock
);
334 * Iterate over the relayd hash table and destroy each element. Finally,
335 * destroy the whole hash table.
337 static void cleanup_relayd_ht(void)
339 struct lttng_ht_iter iter
;
340 struct consumer_relayd_sock_pair
*relayd
;
344 cds_lfht_for_each_entry(consumer_data
.relayd_ht
->ht
, &iter
.iter
, relayd
,
346 consumer_destroy_relayd(relayd
);
351 lttng_ht_destroy(consumer_data
.relayd_ht
);
355 * Update the end point status of all streams having the given network sequence
356 * index (relayd index).
358 * It's atomically set without having the stream mutex locked which is fine
359 * because we handle the write/read race with a pipe wakeup for each thread.
361 static void update_endpoint_status_by_netidx(uint64_t net_seq_idx
,
362 enum consumer_endpoint_status status
)
364 struct lttng_ht_iter iter
;
365 struct lttng_consumer_stream
*stream
;
367 DBG("Consumer set delete flag on stream by idx %" PRIu64
, net_seq_idx
);
371 /* Let's begin with metadata */
372 cds_lfht_for_each_entry(metadata_ht
->ht
, &iter
.iter
, stream
, node
.node
) {
373 if (stream
->net_seq_idx
== net_seq_idx
) {
374 uatomic_set(&stream
->endpoint_status
, status
);
375 DBG("Delete flag set to metadata stream %d", stream
->wait_fd
);
379 /* Follow up by the data streams */
380 cds_lfht_for_each_entry(data_ht
->ht
, &iter
.iter
, stream
, node
.node
) {
381 if (stream
->net_seq_idx
== net_seq_idx
) {
382 uatomic_set(&stream
->endpoint_status
, status
);
383 DBG("Delete flag set to data stream %d", stream
->wait_fd
);
390 * Cleanup a relayd object by flagging every associated streams for deletion,
391 * destroying the object meaning removing it from the relayd hash table,
392 * closing the sockets and freeing the memory in a RCU call.
394 * If a local data context is available, notify the threads that the streams'
395 * state have changed.
397 static void cleanup_relayd(struct consumer_relayd_sock_pair
*relayd
,
398 struct lttng_consumer_local_data
*ctx
)
404 DBG("Cleaning up relayd sockets");
406 /* Save the net sequence index before destroying the object */
407 netidx
= relayd
->net_seq_idx
;
410 * Delete the relayd from the relayd hash table, close the sockets and free
411 * the object in a RCU call.
413 consumer_destroy_relayd(relayd
);
415 /* Set inactive endpoint to all streams */
416 update_endpoint_status_by_netidx(netidx
, CONSUMER_ENDPOINT_INACTIVE
);
419 * With a local data context, notify the threads that the streams' state
420 * have changed. The write() action on the pipe acts as an "implicit"
421 * memory barrier ordering the updates of the end point status from the
422 * read of this status which happens AFTER receiving this notify.
425 notify_thread_lttng_pipe(ctx
->consumer_data_pipe
);
426 notify_thread_lttng_pipe(ctx
->consumer_metadata_pipe
);
431 * Flag a relayd socket pair for destruction. Destroy it if the refcount
434 * RCU read side lock MUST be aquired before calling this function.
436 void consumer_flag_relayd_for_destroy(struct consumer_relayd_sock_pair
*relayd
)
440 /* Set destroy flag for this object */
441 uatomic_set(&relayd
->destroy_flag
, 1);
443 /* Destroy the relayd if refcount is 0 */
444 if (uatomic_read(&relayd
->refcount
) == 0) {
445 consumer_destroy_relayd(relayd
);
450 * Completly destroy stream from every visiable data structure and the given
453 * One this call returns, the stream object is not longer usable nor visible.
455 void consumer_del_stream(struct lttng_consumer_stream
*stream
,
458 consumer_stream_destroy(stream
, ht
);
462 * XXX naming of del vs destroy is all mixed up.
464 void consumer_del_stream_for_data(struct lttng_consumer_stream
*stream
)
466 consumer_stream_destroy(stream
, data_ht
);
469 void consumer_del_stream_for_metadata(struct lttng_consumer_stream
*stream
)
471 consumer_stream_destroy(stream
, metadata_ht
);
474 struct lttng_consumer_stream
*consumer_allocate_stream(uint64_t channel_key
,
476 enum lttng_consumer_stream_state state
,
477 const char *channel_name
,
484 enum consumer_channel_type type
,
485 unsigned int monitor
)
488 struct lttng_consumer_stream
*stream
;
490 stream
= zmalloc(sizeof(*stream
));
491 if (stream
== NULL
) {
492 PERROR("malloc struct lttng_consumer_stream");
499 stream
->key
= stream_key
;
501 stream
->out_fd_offset
= 0;
502 stream
->output_written
= 0;
503 stream
->state
= state
;
506 stream
->net_seq_idx
= relayd_id
;
507 stream
->session_id
= session_id
;
508 stream
->monitor
= monitor
;
509 stream
->endpoint_status
= CONSUMER_ENDPOINT_ACTIVE
;
510 stream
->index_fd
= -1;
511 pthread_mutex_init(&stream
->lock
, NULL
);
513 /* If channel is the metadata, flag this stream as metadata. */
514 if (type
== CONSUMER_CHANNEL_TYPE_METADATA
) {
515 stream
->metadata_flag
= 1;
516 /* Metadata is flat out. */
517 strncpy(stream
->name
, DEFAULT_METADATA_NAME
, sizeof(stream
->name
));
519 /* Format stream name to <channel_name>_<cpu_number> */
520 ret
= snprintf(stream
->name
, sizeof(stream
->name
), "%s_%d",
523 PERROR("snprintf stream name");
528 /* Key is always the wait_fd for streams. */
529 lttng_ht_node_init_u64(&stream
->node
, stream
->key
);
531 /* Init node per channel id key */
532 lttng_ht_node_init_u64(&stream
->node_channel_id
, channel_key
);
534 /* Init session id node with the stream session id */
535 lttng_ht_node_init_u64(&stream
->node_session_id
, stream
->session_id
);
537 DBG3("Allocated stream %s (key %" PRIu64
", chan_key %" PRIu64
538 " relayd_id %" PRIu64
", session_id %" PRIu64
,
539 stream
->name
, stream
->key
, channel_key
,
540 stream
->net_seq_idx
, stream
->session_id
);
556 * Add a stream to the global list protected by a mutex.
558 int consumer_add_data_stream(struct lttng_consumer_stream
*stream
)
560 struct lttng_ht
*ht
= data_ht
;
566 DBG3("Adding consumer stream %" PRIu64
, stream
->key
);
568 pthread_mutex_lock(&consumer_data
.lock
);
569 pthread_mutex_lock(&stream
->chan
->lock
);
570 pthread_mutex_lock(&stream
->chan
->timer_lock
);
571 pthread_mutex_lock(&stream
->lock
);
574 /* Steal stream identifier to avoid having streams with the same key */
575 steal_stream_key(stream
->key
, ht
);
577 lttng_ht_add_unique_u64(ht
, &stream
->node
);
579 lttng_ht_add_u64(consumer_data
.stream_per_chan_id_ht
,
580 &stream
->node_channel_id
);
583 * Add stream to the stream_list_ht of the consumer data. No need to steal
584 * the key since the HT does not use it and we allow to add redundant keys
587 lttng_ht_add_u64(consumer_data
.stream_list_ht
, &stream
->node_session_id
);
590 * When nb_init_stream_left reaches 0, we don't need to trigger any action
591 * in terms of destroying the associated channel, because the action that
592 * causes the count to become 0 also causes a stream to be added. The
593 * channel deletion will thus be triggered by the following removal of this
596 if (uatomic_read(&stream
->chan
->nb_init_stream_left
) > 0) {
597 /* Increment refcount before decrementing nb_init_stream_left */
599 uatomic_dec(&stream
->chan
->nb_init_stream_left
);
602 /* Update consumer data once the node is inserted. */
603 consumer_data
.stream_count
++;
604 consumer_data
.need_update
= 1;
607 pthread_mutex_unlock(&stream
->lock
);
608 pthread_mutex_unlock(&stream
->chan
->timer_lock
);
609 pthread_mutex_unlock(&stream
->chan
->lock
);
610 pthread_mutex_unlock(&consumer_data
.lock
);
615 void consumer_del_data_stream(struct lttng_consumer_stream
*stream
)
617 consumer_del_stream(stream
, data_ht
);
621 * Add relayd socket to global consumer data hashtable. RCU read side lock MUST
622 * be acquired before calling this.
624 static int add_relayd(struct consumer_relayd_sock_pair
*relayd
)
627 struct lttng_ht_node_u64
*node
;
628 struct lttng_ht_iter iter
;
632 lttng_ht_lookup(consumer_data
.relayd_ht
,
633 &relayd
->net_seq_idx
, &iter
);
634 node
= lttng_ht_iter_get_node_u64(&iter
);
638 lttng_ht_add_unique_u64(consumer_data
.relayd_ht
, &relayd
->node
);
645 * Allocate and return a consumer relayd socket.
647 struct consumer_relayd_sock_pair
*consumer_allocate_relayd_sock_pair(
648 uint64_t net_seq_idx
)
650 struct consumer_relayd_sock_pair
*obj
= NULL
;
652 /* net sequence index of -1 is a failure */
653 if (net_seq_idx
== (uint64_t) -1ULL) {
657 obj
= zmalloc(sizeof(struct consumer_relayd_sock_pair
));
659 PERROR("zmalloc relayd sock");
663 obj
->net_seq_idx
= net_seq_idx
;
665 obj
->destroy_flag
= 0;
666 obj
->control_sock
.sock
.fd
= -1;
667 obj
->data_sock
.sock
.fd
= -1;
668 lttng_ht_node_init_u64(&obj
->node
, obj
->net_seq_idx
);
669 pthread_mutex_init(&obj
->ctrl_sock_mutex
, NULL
);
676 * Find a relayd socket pair in the global consumer data.
678 * Return the object if found else NULL.
679 * RCU read-side lock must be held across this call and while using the
682 struct consumer_relayd_sock_pair
*consumer_find_relayd(uint64_t key
)
684 struct lttng_ht_iter iter
;
685 struct lttng_ht_node_u64
*node
;
686 struct consumer_relayd_sock_pair
*relayd
= NULL
;
688 /* Negative keys are lookup failures */
689 if (key
== (uint64_t) -1ULL) {
693 lttng_ht_lookup(consumer_data
.relayd_ht
, &key
,
695 node
= lttng_ht_iter_get_node_u64(&iter
);
697 relayd
= caa_container_of(node
, struct consumer_relayd_sock_pair
, node
);
705 * Find a relayd and send the stream
707 * Returns 0 on success, < 0 on error
709 int consumer_send_relayd_stream(struct lttng_consumer_stream
*stream
,
713 struct consumer_relayd_sock_pair
*relayd
;
716 assert(stream
->net_seq_idx
!= -1ULL);
719 /* The stream is not metadata. Get relayd reference if exists. */
721 relayd
= consumer_find_relayd(stream
->net_seq_idx
);
722 if (relayd
!= NULL
) {
723 /* Add stream on the relayd */
724 pthread_mutex_lock(&relayd
->ctrl_sock_mutex
);
725 ret
= relayd_add_stream(&relayd
->control_sock
, stream
->name
,
726 path
, &stream
->relayd_stream_id
,
727 stream
->chan
->tracefile_size
, stream
->chan
->tracefile_count
);
728 pthread_mutex_unlock(&relayd
->ctrl_sock_mutex
);
732 uatomic_inc(&relayd
->refcount
);
733 stream
->sent_to_relayd
= 1;
735 ERR("Stream %" PRIu64
" relayd ID %" PRIu64
" unknown. Can't send it.",
736 stream
->key
, stream
->net_seq_idx
);
741 DBG("Stream %s with key %" PRIu64
" sent to relayd id %" PRIu64
,
742 stream
->name
, stream
->key
, stream
->net_seq_idx
);
750 * Find a relayd and close the stream
752 void close_relayd_stream(struct lttng_consumer_stream
*stream
)
754 struct consumer_relayd_sock_pair
*relayd
;
756 /* The stream is not metadata. Get relayd reference if exists. */
758 relayd
= consumer_find_relayd(stream
->net_seq_idx
);
760 consumer_stream_relayd_close(stream
, relayd
);
766 * Handle stream for relayd transmission if the stream applies for network
767 * streaming where the net sequence index is set.
769 * Return destination file descriptor or negative value on error.
771 static int write_relayd_stream_header(struct lttng_consumer_stream
*stream
,
772 size_t data_size
, unsigned long padding
,
773 struct consumer_relayd_sock_pair
*relayd
)
776 struct lttcomm_relayd_data_hdr data_hdr
;
782 /* Reset data header */
783 memset(&data_hdr
, 0, sizeof(data_hdr
));
785 if (stream
->metadata_flag
) {
786 /* Caller MUST acquire the relayd control socket lock */
787 ret
= relayd_send_metadata(&relayd
->control_sock
, data_size
);
792 /* Metadata are always sent on the control socket. */
793 outfd
= relayd
->control_sock
.sock
.fd
;
795 /* Set header with stream information */
796 data_hdr
.stream_id
= htobe64(stream
->relayd_stream_id
);
797 data_hdr
.data_size
= htobe32(data_size
);
798 data_hdr
.padding_size
= htobe32(padding
);
800 * Note that net_seq_num below is assigned with the *current* value of
801 * next_net_seq_num and only after that the next_net_seq_num will be
802 * increment. This is why when issuing a command on the relayd using
803 * this next value, 1 should always be substracted in order to compare
804 * the last seen sequence number on the relayd side to the last sent.
806 data_hdr
.net_seq_num
= htobe64(stream
->next_net_seq_num
);
807 /* Other fields are zeroed previously */
809 ret
= relayd_send_data_hdr(&relayd
->data_sock
, &data_hdr
,
815 ++stream
->next_net_seq_num
;
817 /* Set to go on data socket */
818 outfd
= relayd
->data_sock
.sock
.fd
;
826 * Allocate and return a new lttng_consumer_channel object using the given key
827 * to initialize the hash table node.
829 * On error, return NULL.
831 struct lttng_consumer_channel
*consumer_allocate_channel(uint64_t key
,
833 const char *pathname
,
838 enum lttng_event_output output
,
839 uint64_t tracefile_size
,
840 uint64_t tracefile_count
,
841 uint64_t session_id_per_pid
,
842 unsigned int monitor
,
843 unsigned int live_timer_interval
)
845 struct lttng_consumer_channel
*channel
;
847 channel
= zmalloc(sizeof(*channel
));
848 if (channel
== NULL
) {
849 PERROR("malloc struct lttng_consumer_channel");
854 channel
->refcount
= 0;
855 channel
->session_id
= session_id
;
856 channel
->session_id_per_pid
= session_id_per_pid
;
859 channel
->relayd_id
= relayd_id
;
860 channel
->output
= output
;
861 channel
->tracefile_size
= tracefile_size
;
862 channel
->tracefile_count
= tracefile_count
;
863 channel
->monitor
= monitor
;
864 channel
->live_timer_interval
= live_timer_interval
;
865 pthread_mutex_init(&channel
->lock
, NULL
);
866 pthread_mutex_init(&channel
->timer_lock
, NULL
);
869 * In monitor mode, the streams associated with the channel will be put in
870 * a special list ONLY owned by this channel. So, the refcount is set to 1
871 * here meaning that the channel itself has streams that are referenced.
873 * On a channel deletion, once the channel is no longer visible, the
874 * refcount is decremented and checked for a zero value to delete it. With
875 * streams in no monitor mode, it will now be safe to destroy the channel.
877 if (!channel
->monitor
) {
878 channel
->refcount
= 1;
881 strncpy(channel
->pathname
, pathname
, sizeof(channel
->pathname
));
882 channel
->pathname
[sizeof(channel
->pathname
) - 1] = '\0';
884 strncpy(channel
->name
, name
, sizeof(channel
->name
));
885 channel
->name
[sizeof(channel
->name
) - 1] = '\0';
887 lttng_ht_node_init_u64(&channel
->node
, channel
->key
);
889 channel
->wait_fd
= -1;
891 CDS_INIT_LIST_HEAD(&channel
->streams
.head
);
893 DBG("Allocated channel (key %" PRIu64
")", channel
->key
)
900 * Add a channel to the global list protected by a mutex.
902 * On success 0 is returned else a negative value.
904 int consumer_add_channel(struct lttng_consumer_channel
*channel
,
905 struct lttng_consumer_local_data
*ctx
)
908 struct lttng_ht_node_u64
*node
;
909 struct lttng_ht_iter iter
;
911 pthread_mutex_lock(&consumer_data
.lock
);
912 pthread_mutex_lock(&channel
->lock
);
913 pthread_mutex_lock(&channel
->timer_lock
);
916 lttng_ht_lookup(consumer_data
.channel_ht
, &channel
->key
, &iter
);
917 node
= lttng_ht_iter_get_node_u64(&iter
);
919 /* Channel already exist. Ignore the insertion */
920 ERR("Consumer add channel key %" PRIu64
" already exists!",
926 lttng_ht_add_unique_u64(consumer_data
.channel_ht
, &channel
->node
);
930 pthread_mutex_unlock(&channel
->timer_lock
);
931 pthread_mutex_unlock(&channel
->lock
);
932 pthread_mutex_unlock(&consumer_data
.lock
);
934 if (!ret
&& channel
->wait_fd
!= -1 &&
935 channel
->type
== CONSUMER_CHANNEL_TYPE_DATA
) {
936 notify_channel_pipe(ctx
, channel
, -1, CONSUMER_CHANNEL_ADD
);
942 * Allocate the pollfd structure and the local view of the out fds to avoid
943 * doing a lookup in the linked list and concurrency issues when writing is
944 * needed. Called with consumer_data.lock held.
946 * Returns the number of fds in the structures.
948 static int update_poll_array(struct lttng_consumer_local_data
*ctx
,
949 struct pollfd
**pollfd
, struct lttng_consumer_stream
**local_stream
,
953 struct lttng_ht_iter iter
;
954 struct lttng_consumer_stream
*stream
;
959 assert(local_stream
);
961 DBG("Updating poll fd array");
963 cds_lfht_for_each_entry(ht
->ht
, &iter
.iter
, stream
, node
.node
) {
965 * Only active streams with an active end point can be added to the
966 * poll set and local stream storage of the thread.
968 * There is a potential race here for endpoint_status to be updated
969 * just after the check. However, this is OK since the stream(s) will
970 * be deleted once the thread is notified that the end point state has
971 * changed where this function will be called back again.
973 if (stream
->state
!= LTTNG_CONSUMER_ACTIVE_STREAM
||
974 stream
->endpoint_status
== CONSUMER_ENDPOINT_INACTIVE
) {
978 * This clobbers way too much the debug output. Uncomment that if you
979 * need it for debugging purposes.
981 * DBG("Active FD %d", stream->wait_fd);
983 (*pollfd
)[i
].fd
= stream
->wait_fd
;
984 (*pollfd
)[i
].events
= POLLIN
| POLLPRI
;
985 local_stream
[i
] = stream
;
991 * Insert the consumer_data_pipe at the end of the array and don't
992 * increment i so nb_fd is the number of real FD.
994 (*pollfd
)[i
].fd
= lttng_pipe_get_readfd(ctx
->consumer_data_pipe
);
995 (*pollfd
)[i
].events
= POLLIN
| POLLPRI
;
1000 * Poll on the should_quit pipe and the command socket return -1 on error and
1001 * should exit, 0 if data is available on the command socket
1003 int lttng_consumer_poll_socket(struct pollfd
*consumer_sockpoll
)
1008 num_rdy
= poll(consumer_sockpoll
, 2, -1);
1009 if (num_rdy
== -1) {
1011 * Restart interrupted system call.
1013 if (errno
== EINTR
) {
1016 PERROR("Poll error");
1019 if (consumer_sockpoll
[0].revents
& (POLLIN
| POLLPRI
)) {
1020 DBG("consumer_should_quit wake up");
1030 * Set the error socket.
1032 void lttng_consumer_set_error_sock(struct lttng_consumer_local_data
*ctx
,
1035 ctx
->consumer_error_socket
= sock
;
1039 * Set the command socket path.
1041 void lttng_consumer_set_command_sock_path(
1042 struct lttng_consumer_local_data
*ctx
, char *sock
)
1044 ctx
->consumer_command_sock_path
= sock
;
1048 * Send return code to the session daemon.
1049 * If the socket is not defined, we return 0, it is not a fatal error
1051 int lttng_consumer_send_error(struct lttng_consumer_local_data
*ctx
, int cmd
)
1053 if (ctx
->consumer_error_socket
> 0) {
1054 return lttcomm_send_unix_sock(ctx
->consumer_error_socket
, &cmd
,
1055 sizeof(enum lttcomm_sessiond_command
));
1062 * Close all the tracefiles and stream fds and MUST be called when all
1063 * instances are destroyed i.e. when all threads were joined and are ended.
1065 void lttng_consumer_cleanup(void)
1067 struct lttng_ht_iter iter
;
1068 struct lttng_consumer_channel
*channel
;
1072 cds_lfht_for_each_entry(consumer_data
.channel_ht
->ht
, &iter
.iter
, channel
,
1074 consumer_del_channel(channel
);
1079 lttng_ht_destroy(consumer_data
.channel_ht
);
1081 cleanup_relayd_ht();
1083 lttng_ht_destroy(consumer_data
.stream_per_chan_id_ht
);
1086 * This HT contains streams that are freed by either the metadata thread or
1087 * the data thread so we do *nothing* on the hash table and simply destroy
1090 lttng_ht_destroy(consumer_data
.stream_list_ht
);
1094 * Called from signal handler.
1096 void lttng_consumer_should_exit(struct lttng_consumer_local_data
*ctx
)
1101 ret
= write(ctx
->consumer_should_quit
[1], "4", 1);
1102 } while (ret
< 0 && errno
== EINTR
);
1103 if (ret
< 0 || ret
!= 1) {
1104 PERROR("write consumer quit");
1107 DBG("Consumer flag that it should quit");
1110 void lttng_consumer_sync_trace_file(struct lttng_consumer_stream
*stream
,
1113 int outfd
= stream
->out_fd
;
1116 * This does a blocking write-and-wait on any page that belongs to the
1117 * subbuffer prior to the one we just wrote.
1118 * Don't care about error values, as these are just hints and ways to
1119 * limit the amount of page cache used.
1121 if (orig_offset
< stream
->max_sb_size
) {
1124 lttng_sync_file_range(outfd
, orig_offset
- stream
->max_sb_size
,
1125 stream
->max_sb_size
,
1126 SYNC_FILE_RANGE_WAIT_BEFORE
1127 | SYNC_FILE_RANGE_WRITE
1128 | SYNC_FILE_RANGE_WAIT_AFTER
);
1130 * Give hints to the kernel about how we access the file:
1131 * POSIX_FADV_DONTNEED : we won't re-access data in a near future after
1134 * We need to call fadvise again after the file grows because the
1135 * kernel does not seem to apply fadvise to non-existing parts of the
1138 * Call fadvise _after_ having waited for the page writeback to
1139 * complete because the dirty page writeback semantic is not well
1140 * defined. So it can be expected to lead to lower throughput in
1143 posix_fadvise(outfd
, orig_offset
- stream
->max_sb_size
,
1144 stream
->max_sb_size
, POSIX_FADV_DONTNEED
);
1148 * Initialise the necessary environnement :
1149 * - create a new context
1150 * - create the poll_pipe
1151 * - create the should_quit pipe (for signal handler)
1152 * - create the thread pipe (for splice)
1154 * Takes a function pointer as argument, this function is called when data is
1155 * available on a buffer. This function is responsible to do the
1156 * kernctl_get_next_subbuf, read the data with mmap or splice depending on the
1157 * buffer configuration and then kernctl_put_next_subbuf at the end.
1159 * Returns a pointer to the new context or NULL on error.
1161 struct lttng_consumer_local_data
*lttng_consumer_create(
1162 enum lttng_consumer_type type
,
1163 ssize_t (*buffer_ready
)(struct lttng_consumer_stream
*stream
,
1164 struct lttng_consumer_local_data
*ctx
),
1165 int (*recv_channel
)(struct lttng_consumer_channel
*channel
),
1166 int (*recv_stream
)(struct lttng_consumer_stream
*stream
),
1167 int (*update_stream
)(uint64_t stream_key
, uint32_t state
))
1170 struct lttng_consumer_local_data
*ctx
;
1172 assert(consumer_data
.type
== LTTNG_CONSUMER_UNKNOWN
||
1173 consumer_data
.type
== type
);
1174 consumer_data
.type
= type
;
1176 ctx
= zmalloc(sizeof(struct lttng_consumer_local_data
));
1178 PERROR("allocating context");
1182 ctx
->consumer_error_socket
= -1;
1183 ctx
->consumer_metadata_socket
= -1;
1184 pthread_mutex_init(&ctx
->metadata_socket_lock
, NULL
);
1185 /* assign the callbacks */
1186 ctx
->on_buffer_ready
= buffer_ready
;
1187 ctx
->on_recv_channel
= recv_channel
;
1188 ctx
->on_recv_stream
= recv_stream
;
1189 ctx
->on_update_stream
= update_stream
;
1191 ctx
->consumer_data_pipe
= lttng_pipe_open(0);
1192 if (!ctx
->consumer_data_pipe
) {
1193 goto error_poll_pipe
;
1196 ret
= pipe(ctx
->consumer_should_quit
);
1198 PERROR("Error creating recv pipe");
1199 goto error_quit_pipe
;
1202 ret
= pipe(ctx
->consumer_thread_pipe
);
1204 PERROR("Error creating thread pipe");
1205 goto error_thread_pipe
;
1208 ret
= pipe(ctx
->consumer_channel_pipe
);
1210 PERROR("Error creating channel pipe");
1211 goto error_channel_pipe
;
1214 ctx
->consumer_metadata_pipe
= lttng_pipe_open(0);
1215 if (!ctx
->consumer_metadata_pipe
) {
1216 goto error_metadata_pipe
;
1219 ret
= utils_create_pipe(ctx
->consumer_splice_metadata_pipe
);
1221 goto error_splice_pipe
;
1227 lttng_pipe_destroy(ctx
->consumer_metadata_pipe
);
1228 error_metadata_pipe
:
1229 utils_close_pipe(ctx
->consumer_channel_pipe
);
1231 utils_close_pipe(ctx
->consumer_thread_pipe
);
1233 utils_close_pipe(ctx
->consumer_should_quit
);
1235 lttng_pipe_destroy(ctx
->consumer_data_pipe
);
1243 * Close all fds associated with the instance and free the context.
1245 void lttng_consumer_destroy(struct lttng_consumer_local_data
*ctx
)
1249 DBG("Consumer destroying it. Closing everything.");
1251 ret
= close(ctx
->consumer_error_socket
);
1255 ret
= close(ctx
->consumer_metadata_socket
);
1259 utils_close_pipe(ctx
->consumer_thread_pipe
);
1260 utils_close_pipe(ctx
->consumer_channel_pipe
);
1261 lttng_pipe_destroy(ctx
->consumer_data_pipe
);
1262 lttng_pipe_destroy(ctx
->consumer_metadata_pipe
);
1263 utils_close_pipe(ctx
->consumer_should_quit
);
1264 utils_close_pipe(ctx
->consumer_splice_metadata_pipe
);
1266 unlink(ctx
->consumer_command_sock_path
);
1271 * Write the metadata stream id on the specified file descriptor.
1273 static int write_relayd_metadata_id(int fd
,
1274 struct lttng_consumer_stream
*stream
,
1275 struct consumer_relayd_sock_pair
*relayd
, unsigned long padding
)
1278 struct lttcomm_relayd_metadata_payload hdr
;
1280 hdr
.stream_id
= htobe64(stream
->relayd_stream_id
);
1281 hdr
.padding_size
= htobe32(padding
);
1283 ret
= write(fd
, (void *) &hdr
, sizeof(hdr
));
1284 } while (ret
< 0 && errno
== EINTR
);
1285 if (ret
< 0 || ret
!= sizeof(hdr
)) {
1287 * This error means that the fd's end is closed so ignore the perror
1288 * not to clubber the error output since this can happen in a normal
1291 if (errno
!= EPIPE
) {
1292 PERROR("write metadata stream id");
1294 DBG3("Consumer failed to write relayd metadata id (errno: %d)", errno
);
1296 * Set ret to a negative value because if ret != sizeof(hdr), we don't
1297 * handle writting the missing part so report that as an error and
1298 * don't lie to the caller.
1303 DBG("Metadata stream id %" PRIu64
" with padding %lu written before data",
1304 stream
->relayd_stream_id
, padding
);
1311 * Mmap the ring buffer, read it and write the data to the tracefile. This is a
1312 * core function for writing trace buffers to either the local filesystem or
1315 * It must be called with the stream lock held.
1317 * Careful review MUST be put if any changes occur!
1319 * Returns the number of bytes written
1321 ssize_t
lttng_consumer_on_read_subbuffer_mmap(
1322 struct lttng_consumer_local_data
*ctx
,
1323 struct lttng_consumer_stream
*stream
, unsigned long len
,
1324 unsigned long padding
,
1325 struct lttng_packet_index
*index
)
1327 unsigned long mmap_offset
;
1329 ssize_t ret
= 0, written
= 0;
1330 off_t orig_offset
= stream
->out_fd_offset
;
1331 /* Default is on the disk */
1332 int outfd
= stream
->out_fd
;
1333 struct consumer_relayd_sock_pair
*relayd
= NULL
;
1334 unsigned int relayd_hang_up
= 0;
1336 /* RCU lock for the relayd pointer */
1339 /* Flag that the current stream if set for network streaming. */
1340 if (stream
->net_seq_idx
!= (uint64_t) -1ULL) {
1341 relayd
= consumer_find_relayd(stream
->net_seq_idx
);
1342 if (relayd
== NULL
) {
1348 /* get the offset inside the fd to mmap */
1349 switch (consumer_data
.type
) {
1350 case LTTNG_CONSUMER_KERNEL
:
1351 mmap_base
= stream
->mmap_base
;
1352 ret
= kernctl_get_mmap_read_offset(stream
->wait_fd
, &mmap_offset
);
1354 PERROR("tracer ctl get_mmap_read_offset");
1359 case LTTNG_CONSUMER32_UST
:
1360 case LTTNG_CONSUMER64_UST
:
1361 mmap_base
= lttng_ustctl_get_mmap_base(stream
);
1363 ERR("read mmap get mmap base for stream %s", stream
->name
);
1367 ret
= lttng_ustctl_get_mmap_read_offset(stream
, &mmap_offset
);
1369 PERROR("tracer ctl get_mmap_read_offset");
1375 ERR("Unknown consumer_data type");
1379 /* Handle stream on the relayd if the output is on the network */
1381 unsigned long netlen
= len
;
1384 * Lock the control socket for the complete duration of the function
1385 * since from this point on we will use the socket.
1387 if (stream
->metadata_flag
) {
1388 /* Metadata requires the control socket. */
1389 pthread_mutex_lock(&relayd
->ctrl_sock_mutex
);
1390 netlen
+= sizeof(struct lttcomm_relayd_metadata_payload
);
1393 ret
= write_relayd_stream_header(stream
, netlen
, padding
, relayd
);
1395 /* Use the returned socket. */
1398 /* Write metadata stream id before payload */
1399 if (stream
->metadata_flag
) {
1400 ret
= write_relayd_metadata_id(outfd
, stream
, relayd
, padding
);
1403 /* Socket operation failed. We consider the relayd dead */
1404 if (ret
== -EPIPE
|| ret
== -EINVAL
) {
1412 /* Socket operation failed. We consider the relayd dead */
1413 if (ret
== -EPIPE
|| ret
== -EINVAL
) {
1417 /* Else, use the default set before which is the filesystem. */
1420 /* No streaming, we have to set the len with the full padding */
1424 * Check if we need to change the tracefile before writing the packet.
1426 if (stream
->chan
->tracefile_size
> 0 &&
1427 (stream
->tracefile_size_current
+ len
) >
1428 stream
->chan
->tracefile_size
) {
1429 ret
= utils_rotate_stream_file(stream
->chan
->pathname
,
1430 stream
->name
, stream
->chan
->tracefile_size
,
1431 stream
->chan
->tracefile_count
, stream
->uid
, stream
->gid
,
1432 stream
->out_fd
, &(stream
->tracefile_count_current
),
1435 ERR("Rotating output file");
1438 outfd
= stream
->out_fd
;
1440 if (stream
->index_fd
>= 0) {
1441 ret
= index_create_file(stream
->chan
->pathname
,
1442 stream
->name
, stream
->uid
, stream
->gid
,
1443 stream
->chan
->tracefile_size
,
1444 stream
->tracefile_count_current
);
1448 stream
->index_fd
= ret
;
1451 /* Reset current size because we just perform a rotation. */
1452 stream
->tracefile_size_current
= 0;
1453 stream
->out_fd_offset
= 0;
1456 stream
->tracefile_size_current
+= len
;
1458 index
->offset
= htobe64(stream
->out_fd_offset
);
1464 ret
= write(outfd
, mmap_base
+ mmap_offset
, len
);
1465 } while (ret
< 0 && errno
== EINTR
);
1466 DBG("Consumer mmap write() ret %zd (len %lu)", ret
, len
);
1469 * This is possible if the fd is closed on the other side (outfd)
1470 * or any write problem. It can be verbose a bit for a normal
1471 * execution if for instance the relayd is stopped abruptly. This
1472 * can happen so set this to a DBG statement.
1474 DBG("Error in file write mmap");
1478 /* Socket operation failed. We consider the relayd dead */
1479 if (errno
== EPIPE
|| errno
== EINVAL
) {
1484 } else if (ret
> len
) {
1485 PERROR("Error in file write (ret %zd > len %lu)", ret
, len
);
1493 /* This call is useless on a socket so better save a syscall. */
1495 /* This won't block, but will start writeout asynchronously */
1496 lttng_sync_file_range(outfd
, stream
->out_fd_offset
, ret
,
1497 SYNC_FILE_RANGE_WRITE
);
1498 stream
->out_fd_offset
+= ret
;
1500 stream
->output_written
+= ret
;
1503 lttng_consumer_sync_trace_file(stream
, orig_offset
);
1507 * This is a special case that the relayd has closed its socket. Let's
1508 * cleanup the relayd object and all associated streams.
1510 if (relayd
&& relayd_hang_up
) {
1511 cleanup_relayd(relayd
, ctx
);
1515 /* Unlock only if ctrl socket used */
1516 if (relayd
&& stream
->metadata_flag
) {
1517 pthread_mutex_unlock(&relayd
->ctrl_sock_mutex
);
1525 * Splice the data from the ring buffer to the tracefile.
1527 * It must be called with the stream lock held.
1529 * Returns the number of bytes spliced.
1531 ssize_t
lttng_consumer_on_read_subbuffer_splice(
1532 struct lttng_consumer_local_data
*ctx
,
1533 struct lttng_consumer_stream
*stream
, unsigned long len
,
1534 unsigned long padding
,
1535 struct lttng_packet_index
*index
)
1537 ssize_t ret
= 0, written
= 0, ret_splice
= 0;
1539 off_t orig_offset
= stream
->out_fd_offset
;
1540 int fd
= stream
->wait_fd
;
1541 /* Default is on the disk */
1542 int outfd
= stream
->out_fd
;
1543 struct consumer_relayd_sock_pair
*relayd
= NULL
;
1545 unsigned int relayd_hang_up
= 0;
1547 switch (consumer_data
.type
) {
1548 case LTTNG_CONSUMER_KERNEL
:
1550 case LTTNG_CONSUMER32_UST
:
1551 case LTTNG_CONSUMER64_UST
:
1552 /* Not supported for user space tracing */
1555 ERR("Unknown consumer_data type");
1559 /* RCU lock for the relayd pointer */
1562 /* Flag that the current stream if set for network streaming. */
1563 if (stream
->net_seq_idx
!= (uint64_t) -1ULL) {
1564 relayd
= consumer_find_relayd(stream
->net_seq_idx
);
1565 if (relayd
== NULL
) {
1572 * Choose right pipe for splice. Metadata and trace data are handled by
1573 * different threads hence the use of two pipes in order not to race or
1574 * corrupt the written data.
1576 if (stream
->metadata_flag
) {
1577 splice_pipe
= ctx
->consumer_splice_metadata_pipe
;
1579 splice_pipe
= ctx
->consumer_thread_pipe
;
1582 /* Write metadata stream id before payload */
1584 int total_len
= len
;
1586 if (stream
->metadata_flag
) {
1588 * Lock the control socket for the complete duration of the function
1589 * since from this point on we will use the socket.
1591 pthread_mutex_lock(&relayd
->ctrl_sock_mutex
);
1593 ret
= write_relayd_metadata_id(splice_pipe
[1], stream
, relayd
,
1597 /* Socket operation failed. We consider the relayd dead */
1598 if (ret
== -EBADF
) {
1599 WARN("Remote relayd disconnected. Stopping");
1606 total_len
+= sizeof(struct lttcomm_relayd_metadata_payload
);
1609 ret
= write_relayd_stream_header(stream
, total_len
, padding
, relayd
);
1611 /* Use the returned socket. */
1614 /* Socket operation failed. We consider the relayd dead */
1615 if (ret
== -EBADF
) {
1616 WARN("Remote relayd disconnected. Stopping");
1623 /* No streaming, we have to set the len with the full padding */
1627 * Check if we need to change the tracefile before writing the packet.
1629 if (stream
->chan
->tracefile_size
> 0 &&
1630 (stream
->tracefile_size_current
+ len
) >
1631 stream
->chan
->tracefile_size
) {
1632 ret
= utils_rotate_stream_file(stream
->chan
->pathname
,
1633 stream
->name
, stream
->chan
->tracefile_size
,
1634 stream
->chan
->tracefile_count
, stream
->uid
, stream
->gid
,
1635 stream
->out_fd
, &(stream
->tracefile_count_current
),
1638 ERR("Rotating output file");
1641 outfd
= stream
->out_fd
;
1643 if (stream
->index_fd
>= 0) {
1644 ret
= index_create_file(stream
->chan
->pathname
,
1645 stream
->name
, stream
->uid
, stream
->gid
,
1646 stream
->chan
->tracefile_size
,
1647 stream
->tracefile_count_current
);
1651 stream
->index_fd
= ret
;
1654 /* Reset current size because we just perform a rotation. */
1655 stream
->tracefile_size_current
= 0;
1656 stream
->out_fd_offset
= 0;
1659 stream
->tracefile_size_current
+= len
;
1660 index
->offset
= htobe64(stream
->out_fd_offset
);
1664 DBG("splice chan to pipe offset %lu of len %lu (fd : %d, pipe: %d)",
1665 (unsigned long)offset
, len
, fd
, splice_pipe
[1]);
1666 ret_splice
= splice(fd
, &offset
, splice_pipe
[1], NULL
, len
,
1667 SPLICE_F_MOVE
| SPLICE_F_MORE
);
1668 DBG("splice chan to pipe, ret %zd", ret_splice
);
1669 if (ret_splice
< 0) {
1670 PERROR("Error in relay splice");
1672 written
= ret_splice
;
1678 /* Handle stream on the relayd if the output is on the network */
1680 if (stream
->metadata_flag
) {
1681 size_t metadata_payload_size
=
1682 sizeof(struct lttcomm_relayd_metadata_payload
);
1684 /* Update counter to fit the spliced data */
1685 ret_splice
+= metadata_payload_size
;
1686 len
+= metadata_payload_size
;
1688 * We do this so the return value can match the len passed as
1689 * argument to this function.
1691 written
-= metadata_payload_size
;
1695 /* Splice data out */
1696 ret_splice
= splice(splice_pipe
[0], NULL
, outfd
, NULL
,
1697 ret_splice
, SPLICE_F_MOVE
| SPLICE_F_MORE
);
1698 DBG("Consumer splice pipe to file, ret %zd", ret_splice
);
1699 if (ret_splice
< 0) {
1700 PERROR("Error in file splice");
1702 written
= ret_splice
;
1704 /* Socket operation failed. We consider the relayd dead */
1705 if (errno
== EBADF
|| errno
== EPIPE
) {
1706 WARN("Remote relayd disconnected. Stopping");
1712 } else if (ret_splice
> len
) {
1714 PERROR("Wrote more data than requested %zd (len: %lu)",
1716 written
+= ret_splice
;
1722 /* This call is useless on a socket so better save a syscall. */
1724 /* This won't block, but will start writeout asynchronously */
1725 lttng_sync_file_range(outfd
, stream
->out_fd_offset
, ret_splice
,
1726 SYNC_FILE_RANGE_WRITE
);
1727 stream
->out_fd_offset
+= ret_splice
;
1729 stream
->output_written
+= ret_splice
;
1730 written
+= ret_splice
;
1732 lttng_consumer_sync_trace_file(stream
, orig_offset
);
1740 * This is a special case that the relayd has closed its socket. Let's
1741 * cleanup the relayd object and all associated streams.
1743 if (relayd
&& relayd_hang_up
) {
1744 cleanup_relayd(relayd
, ctx
);
1745 /* Skip splice error so the consumer does not fail */
1750 /* send the appropriate error description to sessiond */
1753 lttng_consumer_send_error(ctx
, LTTCOMM_CONSUMERD_SPLICE_EINVAL
);
1756 lttng_consumer_send_error(ctx
, LTTCOMM_CONSUMERD_SPLICE_ENOMEM
);
1759 lttng_consumer_send_error(ctx
, LTTCOMM_CONSUMERD_SPLICE_ESPIPE
);
1764 if (relayd
&& stream
->metadata_flag
) {
1765 pthread_mutex_unlock(&relayd
->ctrl_sock_mutex
);
1773 * Take a snapshot for a specific fd
1775 * Returns 0 on success, < 0 on error
1777 int lttng_consumer_take_snapshot(struct lttng_consumer_stream
*stream
)
1779 switch (consumer_data
.type
) {
1780 case LTTNG_CONSUMER_KERNEL
:
1781 return lttng_kconsumer_take_snapshot(stream
);
1782 case LTTNG_CONSUMER32_UST
:
1783 case LTTNG_CONSUMER64_UST
:
1784 return lttng_ustconsumer_take_snapshot(stream
);
1786 ERR("Unknown consumer_data type");
1793 * Get the produced position
1795 * Returns 0 on success, < 0 on error
1797 int lttng_consumer_get_produced_snapshot(struct lttng_consumer_stream
*stream
,
1800 switch (consumer_data
.type
) {
1801 case LTTNG_CONSUMER_KERNEL
:
1802 return lttng_kconsumer_get_produced_snapshot(stream
, pos
);
1803 case LTTNG_CONSUMER32_UST
:
1804 case LTTNG_CONSUMER64_UST
:
1805 return lttng_ustconsumer_get_produced_snapshot(stream
, pos
);
1807 ERR("Unknown consumer_data type");
1813 int lttng_consumer_recv_cmd(struct lttng_consumer_local_data
*ctx
,
1814 int sock
, struct pollfd
*consumer_sockpoll
)
1816 switch (consumer_data
.type
) {
1817 case LTTNG_CONSUMER_KERNEL
:
1818 return lttng_kconsumer_recv_cmd(ctx
, sock
, consumer_sockpoll
);
1819 case LTTNG_CONSUMER32_UST
:
1820 case LTTNG_CONSUMER64_UST
:
1821 return lttng_ustconsumer_recv_cmd(ctx
, sock
, consumer_sockpoll
);
1823 ERR("Unknown consumer_data type");
1830 * Iterate over all streams of the hashtable and free them properly.
1832 * WARNING: *MUST* be used with data stream only.
1834 static void destroy_data_stream_ht(struct lttng_ht
*ht
)
1836 struct lttng_ht_iter iter
;
1837 struct lttng_consumer_stream
*stream
;
1844 cds_lfht_for_each_entry(ht
->ht
, &iter
.iter
, stream
, node
.node
) {
1846 * Ignore return value since we are currently cleaning up so any error
1849 (void) consumer_del_stream(stream
, ht
);
1853 lttng_ht_destroy(ht
);
1857 * Iterate over all streams of the hashtable and free them properly.
1859 * XXX: Should not be only for metadata stream or else use an other name.
1861 static void destroy_stream_ht(struct lttng_ht
*ht
)
1863 struct lttng_ht_iter iter
;
1864 struct lttng_consumer_stream
*stream
;
1871 cds_lfht_for_each_entry(ht
->ht
, &iter
.iter
, stream
, node
.node
) {
1873 * Ignore return value since we are currently cleaning up so any error
1876 (void) consumer_del_metadata_stream(stream
, ht
);
1880 lttng_ht_destroy(ht
);
1883 void lttng_consumer_close_metadata(void)
1885 switch (consumer_data
.type
) {
1886 case LTTNG_CONSUMER_KERNEL
:
1888 * The Kernel consumer has a different metadata scheme so we don't
1889 * close anything because the stream will be closed by the session
1893 case LTTNG_CONSUMER32_UST
:
1894 case LTTNG_CONSUMER64_UST
:
1896 * Close all metadata streams. The metadata hash table is passed and
1897 * this call iterates over it by closing all wakeup fd. This is safe
1898 * because at this point we are sure that the metadata producer is
1899 * either dead or blocked.
1901 lttng_ustconsumer_close_metadata(metadata_ht
);
1904 ERR("Unknown consumer_data type");
1910 * Clean up a metadata stream and free its memory.
1912 void consumer_del_metadata_stream(struct lttng_consumer_stream
*stream
,
1913 struct lttng_ht
*ht
)
1916 struct lttng_ht_iter iter
;
1917 struct lttng_consumer_channel
*free_chan
= NULL
;
1918 struct consumer_relayd_sock_pair
*relayd
;
1922 * This call should NEVER receive regular stream. It must always be
1923 * metadata stream and this is crucial for data structure synchronization.
1925 assert(stream
->metadata_flag
);
1927 DBG3("Consumer delete metadata stream %d", stream
->wait_fd
);
1930 /* Means the stream was allocated but not successfully added */
1931 goto free_stream_rcu
;
1934 pthread_mutex_lock(&consumer_data
.lock
);
1935 pthread_mutex_lock(&stream
->chan
->lock
);
1936 pthread_mutex_lock(&stream
->lock
);
1938 switch (consumer_data
.type
) {
1939 case LTTNG_CONSUMER_KERNEL
:
1940 if (stream
->mmap_base
!= NULL
) {
1941 ret
= munmap(stream
->mmap_base
, stream
->mmap_len
);
1943 PERROR("munmap metadata stream");
1946 if (stream
->wait_fd
>= 0) {
1947 ret
= close(stream
->wait_fd
);
1949 PERROR("close kernel metadata wait_fd");
1953 case LTTNG_CONSUMER32_UST
:
1954 case LTTNG_CONSUMER64_UST
:
1955 if (stream
->monitor
) {
1956 /* close the write-side in close_metadata */
1957 ret
= close(stream
->ust_metadata_poll_pipe
[0]);
1959 PERROR("Close UST metadata read-side poll pipe");
1962 lttng_ustconsumer_del_stream(stream
);
1965 ERR("Unknown consumer_data type");
1971 iter
.iter
.node
= &stream
->node
.node
;
1972 ret
= lttng_ht_del(ht
, &iter
);
1975 iter
.iter
.node
= &stream
->node_channel_id
.node
;
1976 ret
= lttng_ht_del(consumer_data
.stream_per_chan_id_ht
, &iter
);
1979 iter
.iter
.node
= &stream
->node_session_id
.node
;
1980 ret
= lttng_ht_del(consumer_data
.stream_list_ht
, &iter
);
1984 if (stream
->out_fd
>= 0) {
1985 ret
= close(stream
->out_fd
);
1991 /* Check and cleanup relayd */
1993 relayd
= consumer_find_relayd(stream
->net_seq_idx
);
1994 if (relayd
!= NULL
) {
1995 uatomic_dec(&relayd
->refcount
);
1996 assert(uatomic_read(&relayd
->refcount
) >= 0);
1998 /* Closing streams requires to lock the control socket. */
1999 pthread_mutex_lock(&relayd
->ctrl_sock_mutex
);
2000 ret
= relayd_send_close_stream(&relayd
->control_sock
,
2001 stream
->relayd_stream_id
, stream
->next_net_seq_num
- 1);
2002 pthread_mutex_unlock(&relayd
->ctrl_sock_mutex
);
2004 DBG("Unable to close stream on the relayd. Continuing");
2006 * Continue here. There is nothing we can do for the relayd.
2007 * Chances are that the relayd has closed the socket so we just
2008 * continue cleaning up.
2012 /* Both conditions are met, we destroy the relayd. */
2013 if (uatomic_read(&relayd
->refcount
) == 0 &&
2014 uatomic_read(&relayd
->destroy_flag
)) {
2015 consumer_destroy_relayd(relayd
);
2020 /* Atomically decrement channel refcount since other threads can use it. */
2021 if (!uatomic_sub_return(&stream
->chan
->refcount
, 1)
2022 && !uatomic_read(&stream
->chan
->nb_init_stream_left
)) {
2023 /* Go for channel deletion! */
2024 free_chan
= stream
->chan
;
2029 * Nullify the stream reference so it is not used after deletion. The
2030 * channel lock MUST be acquired before being able to check for
2031 * a NULL pointer value.
2033 stream
->chan
->metadata_stream
= NULL
;
2035 pthread_mutex_unlock(&stream
->lock
);
2036 pthread_mutex_unlock(&stream
->chan
->lock
);
2037 pthread_mutex_unlock(&consumer_data
.lock
);
2040 consumer_del_channel(free_chan
);
2044 call_rcu(&stream
->node
.head
, free_stream_rcu
);
2048 * Action done with the metadata stream when adding it to the consumer internal
2049 * data structures to handle it.
2051 int consumer_add_metadata_stream(struct lttng_consumer_stream
*stream
)
2053 struct lttng_ht
*ht
= metadata_ht
;
2055 struct lttng_ht_iter iter
;
2056 struct lttng_ht_node_u64
*node
;
2061 DBG3("Adding metadata stream %" PRIu64
" to hash table", stream
->key
);
2063 pthread_mutex_lock(&consumer_data
.lock
);
2064 pthread_mutex_lock(&stream
->chan
->lock
);
2065 pthread_mutex_lock(&stream
->chan
->timer_lock
);
2066 pthread_mutex_lock(&stream
->lock
);
2069 * From here, refcounts are updated so be _careful_ when returning an error
2076 * Lookup the stream just to make sure it does not exist in our internal
2077 * state. This should NEVER happen.
2079 lttng_ht_lookup(ht
, &stream
->key
, &iter
);
2080 node
= lttng_ht_iter_get_node_u64(&iter
);
2084 * When nb_init_stream_left reaches 0, we don't need to trigger any action
2085 * in terms of destroying the associated channel, because the action that
2086 * causes the count to become 0 also causes a stream to be added. The
2087 * channel deletion will thus be triggered by the following removal of this
2090 if (uatomic_read(&stream
->chan
->nb_init_stream_left
) > 0) {
2091 /* Increment refcount before decrementing nb_init_stream_left */
2093 uatomic_dec(&stream
->chan
->nb_init_stream_left
);
2096 lttng_ht_add_unique_u64(ht
, &stream
->node
);
2098 lttng_ht_add_unique_u64(consumer_data
.stream_per_chan_id_ht
,
2099 &stream
->node_channel_id
);
2102 * Add stream to the stream_list_ht of the consumer data. No need to steal
2103 * the key since the HT does not use it and we allow to add redundant keys
2106 lttng_ht_add_u64(consumer_data
.stream_list_ht
, &stream
->node_session_id
);
2110 pthread_mutex_unlock(&stream
->lock
);
2111 pthread_mutex_unlock(&stream
->chan
->lock
);
2112 pthread_mutex_unlock(&stream
->chan
->timer_lock
);
2113 pthread_mutex_unlock(&consumer_data
.lock
);
2118 * Delete data stream that are flagged for deletion (endpoint_status).
2120 static void validate_endpoint_status_data_stream(void)
2122 struct lttng_ht_iter iter
;
2123 struct lttng_consumer_stream
*stream
;
2125 DBG("Consumer delete flagged data stream");
2128 cds_lfht_for_each_entry(data_ht
->ht
, &iter
.iter
, stream
, node
.node
) {
2129 /* Validate delete flag of the stream */
2130 if (stream
->endpoint_status
== CONSUMER_ENDPOINT_ACTIVE
) {
2133 /* Delete it right now */
2134 consumer_del_stream(stream
, data_ht
);
2140 * Delete metadata stream that are flagged for deletion (endpoint_status).
2142 static void validate_endpoint_status_metadata_stream(
2143 struct lttng_poll_event
*pollset
)
2145 struct lttng_ht_iter iter
;
2146 struct lttng_consumer_stream
*stream
;
2148 DBG("Consumer delete flagged metadata stream");
2153 cds_lfht_for_each_entry(metadata_ht
->ht
, &iter
.iter
, stream
, node
.node
) {
2154 /* Validate delete flag of the stream */
2155 if (stream
->endpoint_status
== CONSUMER_ENDPOINT_ACTIVE
) {
2159 * Remove from pollset so the metadata thread can continue without
2160 * blocking on a deleted stream.
2162 lttng_poll_del(pollset
, stream
->wait_fd
);
2164 /* Delete it right now */
2165 consumer_del_metadata_stream(stream
, metadata_ht
);
2171 * Thread polls on metadata file descriptor and write them on disk or on the
2174 void *consumer_thread_metadata_poll(void *data
)
2177 uint32_t revents
, nb_fd
;
2178 struct lttng_consumer_stream
*stream
= NULL
;
2179 struct lttng_ht_iter iter
;
2180 struct lttng_ht_node_u64
*node
;
2181 struct lttng_poll_event events
;
2182 struct lttng_consumer_local_data
*ctx
= data
;
2185 rcu_register_thread();
2187 metadata_ht
= lttng_ht_new(0, LTTNG_HT_TYPE_U64
);
2189 /* ENOMEM at this point. Better to bail out. */
2193 DBG("Thread metadata poll started");
2195 /* Size is set to 1 for the consumer_metadata pipe */
2196 ret
= lttng_poll_create(&events
, 2, LTTNG_CLOEXEC
);
2198 ERR("Poll set creation failed");
2202 ret
= lttng_poll_add(&events
,
2203 lttng_pipe_get_readfd(ctx
->consumer_metadata_pipe
), LPOLLIN
);
2209 DBG("Metadata main loop started");
2212 /* Only the metadata pipe is set */
2213 if (LTTNG_POLL_GETNB(&events
) == 0 && consumer_quit
== 1) {
2218 DBG("Metadata poll wait with %d fd(s)", LTTNG_POLL_GETNB(&events
));
2219 ret
= lttng_poll_wait(&events
, -1);
2220 DBG("Metadata event catched in thread");
2222 if (errno
== EINTR
) {
2223 ERR("Poll EINTR catched");
2231 /* From here, the event is a metadata wait fd */
2232 for (i
= 0; i
< nb_fd
; i
++) {
2233 revents
= LTTNG_POLL_GETEV(&events
, i
);
2234 pollfd
= LTTNG_POLL_GETFD(&events
, i
);
2236 if (pollfd
== lttng_pipe_get_readfd(ctx
->consumer_metadata_pipe
)) {
2237 if (revents
& (LPOLLERR
| LPOLLHUP
)) {
2238 DBG("Metadata thread pipe hung up");
2240 * Remove the pipe from the poll set and continue the loop
2241 * since their might be data to consume.
2243 lttng_poll_del(&events
,
2244 lttng_pipe_get_readfd(ctx
->consumer_metadata_pipe
));
2245 lttng_pipe_read_close(ctx
->consumer_metadata_pipe
);
2247 } else if (revents
& LPOLLIN
) {
2250 pipe_len
= lttng_pipe_read(ctx
->consumer_metadata_pipe
,
2251 &stream
, sizeof(stream
));
2253 ERR("read metadata stream, ret: %zd", pipe_len
);
2255 * Continue here to handle the rest of the streams.
2260 /* A NULL stream means that the state has changed. */
2261 if (stream
== NULL
) {
2262 /* Check for deleted streams. */
2263 validate_endpoint_status_metadata_stream(&events
);
2267 DBG("Adding metadata stream %d to poll set",
2270 /* Add metadata stream to the global poll events list */
2271 lttng_poll_add(&events
, stream
->wait_fd
,
2272 LPOLLIN
| LPOLLPRI
);
2275 /* Handle other stream */
2281 uint64_t tmp_id
= (uint64_t) pollfd
;
2283 lttng_ht_lookup(metadata_ht
, &tmp_id
, &iter
);
2285 node
= lttng_ht_iter_get_node_u64(&iter
);
2288 stream
= caa_container_of(node
, struct lttng_consumer_stream
,
2291 /* Check for error event */
2292 if (revents
& (LPOLLERR
| LPOLLHUP
)) {
2293 DBG("Metadata fd %d is hup|err.", pollfd
);
2294 if (!stream
->hangup_flush_done
2295 && (consumer_data
.type
== LTTNG_CONSUMER32_UST
2296 || consumer_data
.type
== LTTNG_CONSUMER64_UST
)) {
2297 DBG("Attempting to flush and consume the UST buffers");
2298 lttng_ustconsumer_on_stream_hangup(stream
);
2300 /* We just flushed the stream now read it. */
2302 len
= ctx
->on_buffer_ready(stream
, ctx
);
2304 * We don't check the return value here since if we get
2305 * a negative len, it means an error occured thus we
2306 * simply remove it from the poll set and free the
2312 lttng_poll_del(&events
, stream
->wait_fd
);
2314 * This call update the channel states, closes file descriptors
2315 * and securely free the stream.
2317 consumer_del_metadata_stream(stream
, metadata_ht
);
2318 } else if (revents
& (LPOLLIN
| LPOLLPRI
)) {
2319 /* Get the data out of the metadata file descriptor */
2320 DBG("Metadata available on fd %d", pollfd
);
2321 assert(stream
->wait_fd
== pollfd
);
2324 len
= ctx
->on_buffer_ready(stream
, ctx
);
2326 * We don't check the return value here since if we get
2327 * a negative len, it means an error occured thus we
2328 * simply remove it from the poll set and free the
2333 /* It's ok to have an unavailable sub-buffer */
2334 if (len
< 0 && len
!= -EAGAIN
&& len
!= -ENODATA
) {
2335 /* Clean up stream from consumer and free it. */
2336 lttng_poll_del(&events
, stream
->wait_fd
);
2337 consumer_del_metadata_stream(stream
, metadata_ht
);
2341 /* Release RCU lock for the stream looked up */
2348 DBG("Metadata poll thread exiting");
2350 lttng_poll_clean(&events
);
2352 destroy_stream_ht(metadata_ht
);
2354 rcu_unregister_thread();
2359 * This thread polls the fds in the set to consume the data and write
2360 * it to tracefile if necessary.
2362 void *consumer_thread_data_poll(void *data
)
2364 int num_rdy
, num_hup
, high_prio
, ret
, i
;
2365 struct pollfd
*pollfd
= NULL
;
2366 /* local view of the streams */
2367 struct lttng_consumer_stream
**local_stream
= NULL
, *new_stream
= NULL
;
2368 /* local view of consumer_data.fds_count */
2370 struct lttng_consumer_local_data
*ctx
= data
;
2373 rcu_register_thread();
2375 data_ht
= lttng_ht_new(0, LTTNG_HT_TYPE_U64
);
2376 if (data_ht
== NULL
) {
2377 /* ENOMEM at this point. Better to bail out. */
2381 local_stream
= zmalloc(sizeof(struct lttng_consumer_stream
*));
2382 if (local_stream
== NULL
) {
2383 PERROR("local_stream malloc");
2392 * the fds set has been updated, we need to update our
2393 * local array as well
2395 pthread_mutex_lock(&consumer_data
.lock
);
2396 if (consumer_data
.need_update
) {
2401 local_stream
= NULL
;
2403 /* allocate for all fds + 1 for the consumer_data_pipe */
2404 pollfd
= zmalloc((consumer_data
.stream_count
+ 1) * sizeof(struct pollfd
));
2405 if (pollfd
== NULL
) {
2406 PERROR("pollfd malloc");
2407 pthread_mutex_unlock(&consumer_data
.lock
);
2411 /* allocate for all fds + 1 for the consumer_data_pipe */
2412 local_stream
= zmalloc((consumer_data
.stream_count
+ 1) *
2413 sizeof(struct lttng_consumer_stream
*));
2414 if (local_stream
== NULL
) {
2415 PERROR("local_stream malloc");
2416 pthread_mutex_unlock(&consumer_data
.lock
);
2419 ret
= update_poll_array(ctx
, &pollfd
, local_stream
,
2422 ERR("Error in allocating pollfd or local_outfds");
2423 lttng_consumer_send_error(ctx
, LTTCOMM_CONSUMERD_POLL_ERROR
);
2424 pthread_mutex_unlock(&consumer_data
.lock
);
2428 consumer_data
.need_update
= 0;
2430 pthread_mutex_unlock(&consumer_data
.lock
);
2432 /* No FDs and consumer_quit, consumer_cleanup the thread */
2433 if (nb_fd
== 0 && consumer_quit
== 1) {
2436 /* poll on the array of fds */
2438 DBG("polling on %d fd", nb_fd
+ 1);
2439 num_rdy
= poll(pollfd
, nb_fd
+ 1, -1);
2440 DBG("poll num_rdy : %d", num_rdy
);
2441 if (num_rdy
== -1) {
2443 * Restart interrupted system call.
2445 if (errno
== EINTR
) {
2448 PERROR("Poll error");
2449 lttng_consumer_send_error(ctx
, LTTCOMM_CONSUMERD_POLL_ERROR
);
2451 } else if (num_rdy
== 0) {
2452 DBG("Polling thread timed out");
2457 * If the consumer_data_pipe triggered poll go directly to the
2458 * beginning of the loop to update the array. We want to prioritize
2459 * array update over low-priority reads.
2461 if (pollfd
[nb_fd
].revents
& (POLLIN
| POLLPRI
)) {
2462 ssize_t pipe_readlen
;
2464 DBG("consumer_data_pipe wake up");
2465 pipe_readlen
= lttng_pipe_read(ctx
->consumer_data_pipe
,
2466 &new_stream
, sizeof(new_stream
));
2467 if (pipe_readlen
< 0) {
2468 ERR("Consumer data pipe ret %zd", pipe_readlen
);
2469 /* Continue so we can at least handle the current stream(s). */
2474 * If the stream is NULL, just ignore it. It's also possible that
2475 * the sessiond poll thread changed the consumer_quit state and is
2476 * waking us up to test it.
2478 if (new_stream
== NULL
) {
2479 validate_endpoint_status_data_stream();
2483 /* Continue to update the local streams and handle prio ones */
2487 /* Take care of high priority channels first. */
2488 for (i
= 0; i
< nb_fd
; i
++) {
2489 if (local_stream
[i
] == NULL
) {
2492 if (pollfd
[i
].revents
& POLLPRI
) {
2493 DBG("Urgent read on fd %d", pollfd
[i
].fd
);
2495 len
= ctx
->on_buffer_ready(local_stream
[i
], ctx
);
2496 /* it's ok to have an unavailable sub-buffer */
2497 if (len
< 0 && len
!= -EAGAIN
&& len
!= -ENODATA
) {
2498 /* Clean the stream and free it. */
2499 consumer_del_stream(local_stream
[i
], data_ht
);
2500 local_stream
[i
] = NULL
;
2501 } else if (len
> 0) {
2502 local_stream
[i
]->data_read
= 1;
2508 * If we read high prio channel in this loop, try again
2509 * for more high prio data.
2515 /* Take care of low priority channels. */
2516 for (i
= 0; i
< nb_fd
; i
++) {
2517 if (local_stream
[i
] == NULL
) {
2520 if ((pollfd
[i
].revents
& POLLIN
) ||
2521 local_stream
[i
]->hangup_flush_done
) {
2522 DBG("Normal read on fd %d", pollfd
[i
].fd
);
2523 len
= ctx
->on_buffer_ready(local_stream
[i
], ctx
);
2524 /* it's ok to have an unavailable sub-buffer */
2525 if (len
< 0 && len
!= -EAGAIN
&& len
!= -ENODATA
) {
2526 /* Clean the stream and free it. */
2527 consumer_del_stream(local_stream
[i
], data_ht
);
2528 local_stream
[i
] = NULL
;
2529 } else if (len
> 0) {
2530 local_stream
[i
]->data_read
= 1;
2535 /* Handle hangup and errors */
2536 for (i
= 0; i
< nb_fd
; i
++) {
2537 if (local_stream
[i
] == NULL
) {
2540 if (!local_stream
[i
]->hangup_flush_done
2541 && (pollfd
[i
].revents
& (POLLHUP
| POLLERR
| POLLNVAL
))
2542 && (consumer_data
.type
== LTTNG_CONSUMER32_UST
2543 || consumer_data
.type
== LTTNG_CONSUMER64_UST
)) {
2544 DBG("fd %d is hup|err|nval. Attempting flush and read.",
2546 lttng_ustconsumer_on_stream_hangup(local_stream
[i
]);
2547 /* Attempt read again, for the data we just flushed. */
2548 local_stream
[i
]->data_read
= 1;
2551 * If the poll flag is HUP/ERR/NVAL and we have
2552 * read no data in this pass, we can remove the
2553 * stream from its hash table.
2555 if ((pollfd
[i
].revents
& POLLHUP
)) {
2556 DBG("Polling fd %d tells it has hung up.", pollfd
[i
].fd
);
2557 if (!local_stream
[i
]->data_read
) {
2558 consumer_del_stream(local_stream
[i
], data_ht
);
2559 local_stream
[i
] = NULL
;
2562 } else if (pollfd
[i
].revents
& POLLERR
) {
2563 ERR("Error returned in polling fd %d.", pollfd
[i
].fd
);
2564 if (!local_stream
[i
]->data_read
) {
2565 consumer_del_stream(local_stream
[i
], data_ht
);
2566 local_stream
[i
] = NULL
;
2569 } else if (pollfd
[i
].revents
& POLLNVAL
) {
2570 ERR("Polling fd %d tells fd is not open.", pollfd
[i
].fd
);
2571 if (!local_stream
[i
]->data_read
) {
2572 consumer_del_stream(local_stream
[i
], data_ht
);
2573 local_stream
[i
] = NULL
;
2577 if (local_stream
[i
] != NULL
) {
2578 local_stream
[i
]->data_read
= 0;
2583 DBG("polling thread exiting");
2588 * Close the write side of the pipe so epoll_wait() in
2589 * consumer_thread_metadata_poll can catch it. The thread is monitoring the
2590 * read side of the pipe. If we close them both, epoll_wait strangely does
2591 * not return and could create a endless wait period if the pipe is the
2592 * only tracked fd in the poll set. The thread will take care of closing
2595 (void) lttng_pipe_write_close(ctx
->consumer_metadata_pipe
);
2597 destroy_data_stream_ht(data_ht
);
2599 rcu_unregister_thread();
2604 * Close wake-up end of each stream belonging to the channel. This will
2605 * allow the poll() on the stream read-side to detect when the
2606 * write-side (application) finally closes them.
2609 void consumer_close_channel_streams(struct lttng_consumer_channel
*channel
)
2611 struct lttng_ht
*ht
;
2612 struct lttng_consumer_stream
*stream
;
2613 struct lttng_ht_iter iter
;
2615 ht
= consumer_data
.stream_per_chan_id_ht
;
2618 cds_lfht_for_each_entry_duplicate(ht
->ht
,
2619 ht
->hash_fct(&channel
->key
, lttng_ht_seed
),
2620 ht
->match_fct
, &channel
->key
,
2621 &iter
.iter
, stream
, node_channel_id
.node
) {
2623 * Protect against teardown with mutex.
2625 pthread_mutex_lock(&stream
->lock
);
2626 if (cds_lfht_is_node_deleted(&stream
->node
.node
)) {
2629 switch (consumer_data
.type
) {
2630 case LTTNG_CONSUMER_KERNEL
:
2632 case LTTNG_CONSUMER32_UST
:
2633 case LTTNG_CONSUMER64_UST
:
2635 * Note: a mutex is taken internally within
2636 * liblttng-ust-ctl to protect timer wakeup_fd
2637 * use from concurrent close.
2639 lttng_ustconsumer_close_stream_wakeup(stream
);
2642 ERR("Unknown consumer_data type");
2646 pthread_mutex_unlock(&stream
->lock
);
2651 static void destroy_channel_ht(struct lttng_ht
*ht
)
2653 struct lttng_ht_iter iter
;
2654 struct lttng_consumer_channel
*channel
;
2662 cds_lfht_for_each_entry(ht
->ht
, &iter
.iter
, channel
, wait_fd_node
.node
) {
2663 ret
= lttng_ht_del(ht
, &iter
);
2668 lttng_ht_destroy(ht
);
2672 * This thread polls the channel fds to detect when they are being
2673 * closed. It closes all related streams if the channel is detected as
2674 * closed. It is currently only used as a shim layer for UST because the
2675 * consumerd needs to keep the per-stream wakeup end of pipes open for
2678 void *consumer_thread_channel_poll(void *data
)
2681 uint32_t revents
, nb_fd
;
2682 struct lttng_consumer_channel
*chan
= NULL
;
2683 struct lttng_ht_iter iter
;
2684 struct lttng_ht_node_u64
*node
;
2685 struct lttng_poll_event events
;
2686 struct lttng_consumer_local_data
*ctx
= data
;
2687 struct lttng_ht
*channel_ht
;
2689 rcu_register_thread();
2691 channel_ht
= lttng_ht_new(0, LTTNG_HT_TYPE_U64
);
2693 /* ENOMEM at this point. Better to bail out. */
2697 DBG("Thread channel poll started");
2699 /* Size is set to 1 for the consumer_channel pipe */
2700 ret
= lttng_poll_create(&events
, 2, LTTNG_CLOEXEC
);
2702 ERR("Poll set creation failed");
2706 ret
= lttng_poll_add(&events
, ctx
->consumer_channel_pipe
[0], LPOLLIN
);
2712 DBG("Channel main loop started");
2715 /* Only the channel pipe is set */
2716 if (LTTNG_POLL_GETNB(&events
) == 0 && consumer_quit
== 1) {
2721 DBG("Channel poll wait with %d fd(s)", LTTNG_POLL_GETNB(&events
));
2722 ret
= lttng_poll_wait(&events
, -1);
2723 DBG("Channel event catched in thread");
2725 if (errno
== EINTR
) {
2726 ERR("Poll EINTR catched");
2734 /* From here, the event is a channel wait fd */
2735 for (i
= 0; i
< nb_fd
; i
++) {
2736 revents
= LTTNG_POLL_GETEV(&events
, i
);
2737 pollfd
= LTTNG_POLL_GETFD(&events
, i
);
2739 /* Just don't waste time if no returned events for the fd */
2743 if (pollfd
== ctx
->consumer_channel_pipe
[0]) {
2744 if (revents
& (LPOLLERR
| LPOLLHUP
)) {
2745 DBG("Channel thread pipe hung up");
2747 * Remove the pipe from the poll set and continue the loop
2748 * since their might be data to consume.
2750 lttng_poll_del(&events
, ctx
->consumer_channel_pipe
[0]);
2752 } else if (revents
& LPOLLIN
) {
2753 enum consumer_channel_action action
;
2756 ret
= read_channel_pipe(ctx
, &chan
, &key
, &action
);
2758 ERR("Error reading channel pipe");
2763 case CONSUMER_CHANNEL_ADD
:
2764 DBG("Adding channel %d to poll set",
2767 lttng_ht_node_init_u64(&chan
->wait_fd_node
,
2770 lttng_ht_add_unique_u64(channel_ht
,
2771 &chan
->wait_fd_node
);
2773 /* Add channel to the global poll events list */
2774 lttng_poll_add(&events
, chan
->wait_fd
,
2775 LPOLLIN
| LPOLLPRI
);
2777 case CONSUMER_CHANNEL_DEL
:
2779 struct lttng_consumer_stream
*stream
, *stmp
;
2782 chan
= consumer_find_channel(key
);
2785 ERR("UST consumer get channel key %" PRIu64
" not found for del channel", key
);
2788 lttng_poll_del(&events
, chan
->wait_fd
);
2789 iter
.iter
.node
= &chan
->wait_fd_node
.node
;
2790 ret
= lttng_ht_del(channel_ht
, &iter
);
2792 consumer_close_channel_streams(chan
);
2794 switch (consumer_data
.type
) {
2795 case LTTNG_CONSUMER_KERNEL
:
2797 case LTTNG_CONSUMER32_UST
:
2798 case LTTNG_CONSUMER64_UST
:
2799 /* Delete streams that might have been left in the stream list. */
2800 cds_list_for_each_entry_safe(stream
, stmp
, &chan
->streams
.head
,
2802 cds_list_del(&stream
->send_node
);
2803 lttng_ustconsumer_del_stream(stream
);
2804 uatomic_sub(&stream
->chan
->refcount
, 1);
2805 assert(&chan
->refcount
);
2810 ERR("Unknown consumer_data type");
2815 * Release our own refcount. Force channel deletion even if
2816 * streams were not initialized.
2818 if (!uatomic_sub_return(&chan
->refcount
, 1)) {
2819 consumer_del_channel(chan
);
2824 case CONSUMER_CHANNEL_QUIT
:
2826 * Remove the pipe from the poll set and continue the loop
2827 * since their might be data to consume.
2829 lttng_poll_del(&events
, ctx
->consumer_channel_pipe
[0]);
2832 ERR("Unknown action");
2837 /* Handle other stream */
2843 uint64_t tmp_id
= (uint64_t) pollfd
;
2845 lttng_ht_lookup(channel_ht
, &tmp_id
, &iter
);
2847 node
= lttng_ht_iter_get_node_u64(&iter
);
2850 chan
= caa_container_of(node
, struct lttng_consumer_channel
,
2853 /* Check for error event */
2854 if (revents
& (LPOLLERR
| LPOLLHUP
)) {
2855 DBG("Channel fd %d is hup|err.", pollfd
);
2857 lttng_poll_del(&events
, chan
->wait_fd
);
2858 ret
= lttng_ht_del(channel_ht
, &iter
);
2860 consumer_close_channel_streams(chan
);
2862 /* Release our own refcount */
2863 if (!uatomic_sub_return(&chan
->refcount
, 1)
2864 && !uatomic_read(&chan
->nb_init_stream_left
)) {
2865 consumer_del_channel(chan
);
2869 /* Release RCU lock for the channel looked up */
2875 lttng_poll_clean(&events
);
2877 destroy_channel_ht(channel_ht
);
2879 DBG("Channel poll thread exiting");
2880 rcu_unregister_thread();
2884 static int set_metadata_socket(struct lttng_consumer_local_data
*ctx
,
2885 struct pollfd
*sockpoll
, int client_socket
)
2892 if (lttng_consumer_poll_socket(sockpoll
) < 0) {
2896 DBG("Metadata connection on client_socket");
2898 /* Blocking call, waiting for transmission */
2899 ctx
->consumer_metadata_socket
= lttcomm_accept_unix_sock(client_socket
);
2900 if (ctx
->consumer_metadata_socket
< 0) {
2901 WARN("On accept metadata");
2912 * This thread listens on the consumerd socket and receives the file
2913 * descriptors from the session daemon.
2915 void *consumer_thread_sessiond_poll(void *data
)
2917 int sock
= -1, client_socket
, ret
;
2919 * structure to poll for incoming data on communication socket avoids
2920 * making blocking sockets.
2922 struct pollfd consumer_sockpoll
[2];
2923 struct lttng_consumer_local_data
*ctx
= data
;
2925 rcu_register_thread();
2927 DBG("Creating command socket %s", ctx
->consumer_command_sock_path
);
2928 unlink(ctx
->consumer_command_sock_path
);
2929 client_socket
= lttcomm_create_unix_sock(ctx
->consumer_command_sock_path
);
2930 if (client_socket
< 0) {
2931 ERR("Cannot create command socket");
2935 ret
= lttcomm_listen_unix_sock(client_socket
);
2940 DBG("Sending ready command to lttng-sessiond");
2941 ret
= lttng_consumer_send_error(ctx
, LTTCOMM_CONSUMERD_COMMAND_SOCK_READY
);
2942 /* return < 0 on error, but == 0 is not fatal */
2944 ERR("Error sending ready command to lttng-sessiond");
2948 /* prepare the FDs to poll : to client socket and the should_quit pipe */
2949 consumer_sockpoll
[0].fd
= ctx
->consumer_should_quit
[0];
2950 consumer_sockpoll
[0].events
= POLLIN
| POLLPRI
;
2951 consumer_sockpoll
[1].fd
= client_socket
;
2952 consumer_sockpoll
[1].events
= POLLIN
| POLLPRI
;
2954 if (lttng_consumer_poll_socket(consumer_sockpoll
) < 0) {
2957 DBG("Connection on client_socket");
2959 /* Blocking call, waiting for transmission */
2960 sock
= lttcomm_accept_unix_sock(client_socket
);
2967 * Setup metadata socket which is the second socket connection on the
2968 * command unix socket.
2970 ret
= set_metadata_socket(ctx
, consumer_sockpoll
, client_socket
);
2975 /* This socket is not useful anymore. */
2976 ret
= close(client_socket
);
2978 PERROR("close client_socket");
2982 /* update the polling structure to poll on the established socket */
2983 consumer_sockpoll
[1].fd
= sock
;
2984 consumer_sockpoll
[1].events
= POLLIN
| POLLPRI
;
2987 if (lttng_consumer_poll_socket(consumer_sockpoll
) < 0) {
2990 DBG("Incoming command on sock");
2991 ret
= lttng_consumer_recv_cmd(ctx
, sock
, consumer_sockpoll
);
2992 if (ret
== -ENOENT
) {
2993 DBG("Received STOP command");
2998 * This could simply be a session daemon quitting. Don't output
3001 DBG("Communication interrupted on command socket");
3004 if (consumer_quit
) {
3005 DBG("consumer_thread_receive_fds received quit from signal");
3008 DBG("received command on sock");
3011 DBG("Consumer thread sessiond poll exiting");
3014 * Close metadata streams since the producer is the session daemon which
3017 * NOTE: for now, this only applies to the UST tracer.
3019 lttng_consumer_close_metadata();
3022 * when all fds have hung up, the polling thread
3028 * Notify the data poll thread to poll back again and test the
3029 * consumer_quit state that we just set so to quit gracefully.
3031 notify_thread_lttng_pipe(ctx
->consumer_data_pipe
);
3033 notify_channel_pipe(ctx
, NULL
, -1, CONSUMER_CHANNEL_QUIT
);
3035 /* Cleaning up possibly open sockets. */
3039 PERROR("close sock sessiond poll");
3042 if (client_socket
>= 0) {
3043 ret
= close(client_socket
);
3045 PERROR("close client_socket sessiond poll");
3049 rcu_unregister_thread();
3053 ssize_t
lttng_consumer_read_subbuffer(struct lttng_consumer_stream
*stream
,
3054 struct lttng_consumer_local_data
*ctx
)
3058 pthread_mutex_lock(&stream
->lock
);
3060 switch (consumer_data
.type
) {
3061 case LTTNG_CONSUMER_KERNEL
:
3062 ret
= lttng_kconsumer_read_subbuffer(stream
, ctx
);
3064 case LTTNG_CONSUMER32_UST
:
3065 case LTTNG_CONSUMER64_UST
:
3066 ret
= lttng_ustconsumer_read_subbuffer(stream
, ctx
);
3069 ERR("Unknown consumer_data type");
3075 pthread_mutex_unlock(&stream
->lock
);
3079 int lttng_consumer_on_recv_stream(struct lttng_consumer_stream
*stream
)
3081 switch (consumer_data
.type
) {
3082 case LTTNG_CONSUMER_KERNEL
:
3083 return lttng_kconsumer_on_recv_stream(stream
);
3084 case LTTNG_CONSUMER32_UST
:
3085 case LTTNG_CONSUMER64_UST
:
3086 return lttng_ustconsumer_on_recv_stream(stream
);
3088 ERR("Unknown consumer_data type");
3095 * Allocate and set consumer data hash tables.
3097 void lttng_consumer_init(void)
3099 consumer_data
.channel_ht
= lttng_ht_new(0, LTTNG_HT_TYPE_U64
);
3100 consumer_data
.relayd_ht
= lttng_ht_new(0, LTTNG_HT_TYPE_U64
);
3101 consumer_data
.stream_list_ht
= lttng_ht_new(0, LTTNG_HT_TYPE_U64
);
3102 consumer_data
.stream_per_chan_id_ht
= lttng_ht_new(0, LTTNG_HT_TYPE_U64
);
3106 * Process the ADD_RELAYD command receive by a consumer.
3108 * This will create a relayd socket pair and add it to the relayd hash table.
3109 * The caller MUST acquire a RCU read side lock before calling it.
3111 int consumer_add_relayd_socket(uint64_t net_seq_idx
, int sock_type
,
3112 struct lttng_consumer_local_data
*ctx
, int sock
,
3113 struct pollfd
*consumer_sockpoll
,
3114 struct lttcomm_relayd_sock
*relayd_sock
, uint64_t sessiond_id
)
3116 int fd
= -1, ret
= -1, relayd_created
= 0;
3117 enum lttng_error_code ret_code
= LTTNG_OK
;
3118 struct consumer_relayd_sock_pair
*relayd
= NULL
;
3121 assert(relayd_sock
);
3123 DBG("Consumer adding relayd socket (idx: %" PRIu64
")", net_seq_idx
);
3125 /* Get relayd reference if exists. */
3126 relayd
= consumer_find_relayd(net_seq_idx
);
3127 if (relayd
== NULL
) {
3128 assert(sock_type
== LTTNG_STREAM_CONTROL
);
3129 /* Not found. Allocate one. */
3130 relayd
= consumer_allocate_relayd_sock_pair(net_seq_idx
);
3131 if (relayd
== NULL
) {
3133 ret_code
= LTTCOMM_CONSUMERD_ENOMEM
;
3136 relayd
->sessiond_session_id
= sessiond_id
;
3141 * This code path MUST continue to the consumer send status message to
3142 * we can notify the session daemon and continue our work without
3143 * killing everything.
3147 * relayd key should never be found for control socket.
3149 assert(sock_type
!= LTTNG_STREAM_CONTROL
);
3152 /* First send a status message before receiving the fds. */
3153 ret
= consumer_send_status_msg(sock
, LTTNG_OK
);
3155 /* Somehow, the session daemon is not responding anymore. */
3156 lttng_consumer_send_error(ctx
, LTTCOMM_CONSUMERD_FATAL
);
3157 goto error_nosignal
;
3160 /* Poll on consumer socket. */
3161 if (lttng_consumer_poll_socket(consumer_sockpoll
) < 0) {
3162 lttng_consumer_send_error(ctx
, LTTCOMM_CONSUMERD_POLL_ERROR
);
3164 goto error_nosignal
;
3167 /* Get relayd socket from session daemon */
3168 ret
= lttcomm_recv_fds_unix_sock(sock
, &fd
, 1);
3169 if (ret
!= sizeof(fd
)) {
3171 fd
= -1; /* Just in case it gets set with an invalid value. */
3174 * Failing to receive FDs might indicate a major problem such as
3175 * reaching a fd limit during the receive where the kernel returns a
3176 * MSG_CTRUNC and fails to cleanup the fd in the queue. Any case, we
3177 * don't take any chances and stop everything.
3179 * XXX: Feature request #558 will fix that and avoid this possible
3180 * issue when reaching the fd limit.
3182 lttng_consumer_send_error(ctx
, LTTCOMM_CONSUMERD_ERROR_RECV_FD
);
3183 ret_code
= LTTCOMM_CONSUMERD_ERROR_RECV_FD
;
3187 /* Copy socket information and received FD */
3188 switch (sock_type
) {
3189 case LTTNG_STREAM_CONTROL
:
3190 /* Copy received lttcomm socket */
3191 lttcomm_copy_sock(&relayd
->control_sock
.sock
, &relayd_sock
->sock
);
3192 ret
= lttcomm_create_sock(&relayd
->control_sock
.sock
);
3193 /* Handle create_sock error. */
3195 ret_code
= LTTCOMM_CONSUMERD_ENOMEM
;
3199 * Close the socket created internally by
3200 * lttcomm_create_sock, so we can replace it by the one
3201 * received from sessiond.
3203 if (close(relayd
->control_sock
.sock
.fd
)) {
3207 /* Assign new file descriptor */
3208 relayd
->control_sock
.sock
.fd
= fd
;
3209 fd
= -1; /* For error path */
3210 /* Assign version values. */
3211 relayd
->control_sock
.major
= relayd_sock
->major
;
3212 relayd
->control_sock
.minor
= relayd_sock
->minor
;
3215 * Create a session on the relayd and store the returned id. Lock the
3216 * control socket mutex if the relayd was NOT created before.
3218 if (!relayd_created
) {
3219 pthread_mutex_lock(&relayd
->ctrl_sock_mutex
);
3221 ret
= relayd_create_session(&relayd
->control_sock
,
3222 &relayd
->relayd_session_id
);
3223 if (!relayd_created
) {
3224 pthread_mutex_unlock(&relayd
->ctrl_sock_mutex
);
3228 * Close all sockets of a relayd object. It will be freed if it was
3229 * created at the error code path or else it will be garbage
3232 (void) relayd_close(&relayd
->control_sock
);
3233 (void) relayd_close(&relayd
->data_sock
);
3234 ret_code
= LTTCOMM_CONSUMERD_RELAYD_FAIL
;
3239 case LTTNG_STREAM_DATA
:
3240 /* Copy received lttcomm socket */
3241 lttcomm_copy_sock(&relayd
->data_sock
.sock
, &relayd_sock
->sock
);
3242 ret
= lttcomm_create_sock(&relayd
->data_sock
.sock
);
3243 /* Handle create_sock error. */
3245 ret_code
= LTTCOMM_CONSUMERD_ENOMEM
;
3249 * Close the socket created internally by
3250 * lttcomm_create_sock, so we can replace it by the one
3251 * received from sessiond.
3253 if (close(relayd
->data_sock
.sock
.fd
)) {
3257 /* Assign new file descriptor */
3258 relayd
->data_sock
.sock
.fd
= fd
;
3259 fd
= -1; /* for eventual error paths */
3260 /* Assign version values. */
3261 relayd
->data_sock
.major
= relayd_sock
->major
;
3262 relayd
->data_sock
.minor
= relayd_sock
->minor
;
3265 ERR("Unknown relayd socket type (%d)", sock_type
);
3267 ret_code
= LTTCOMM_CONSUMERD_FATAL
;
3271 DBG("Consumer %s socket created successfully with net idx %" PRIu64
" (fd: %d)",
3272 sock_type
== LTTNG_STREAM_CONTROL
? "control" : "data",
3273 relayd
->net_seq_idx
, fd
);
3275 /* We successfully added the socket. Send status back. */
3276 ret
= consumer_send_status_msg(sock
, ret_code
);
3278 /* Somehow, the session daemon is not responding anymore. */
3279 lttng_consumer_send_error(ctx
, LTTCOMM_CONSUMERD_FATAL
);
3280 goto error_nosignal
;
3284 * Add relayd socket pair to consumer data hashtable. If object already
3285 * exists or on error, the function gracefully returns.
3293 if (consumer_send_status_msg(sock
, ret_code
) < 0) {
3294 lttng_consumer_send_error(ctx
, LTTCOMM_CONSUMERD_FATAL
);
3298 /* Close received socket if valid. */
3301 PERROR("close received socket");
3305 if (relayd_created
) {
3313 * Try to lock the stream mutex.
3315 * On success, 1 is returned else 0 indicating that the mutex is NOT lock.
3317 static int stream_try_lock(struct lttng_consumer_stream
*stream
)
3324 * Try to lock the stream mutex. On failure, we know that the stream is
3325 * being used else where hence there is data still being extracted.
3327 ret
= pthread_mutex_trylock(&stream
->lock
);
3329 /* For both EBUSY and EINVAL error, the mutex is NOT locked. */
3341 * Search for a relayd associated to the session id and return the reference.
3343 * A rcu read side lock MUST be acquire before calling this function and locked
3344 * until the relayd object is no longer necessary.
3346 static struct consumer_relayd_sock_pair
*find_relayd_by_session_id(uint64_t id
)
3348 struct lttng_ht_iter iter
;
3349 struct consumer_relayd_sock_pair
*relayd
= NULL
;
3351 /* Iterate over all relayd since they are indexed by net_seq_idx. */
3352 cds_lfht_for_each_entry(consumer_data
.relayd_ht
->ht
, &iter
.iter
, relayd
,
3355 * Check by sessiond id which is unique here where the relayd session
3356 * id might not be when having multiple relayd.
3358 if (relayd
->sessiond_session_id
== id
) {
3359 /* Found the relayd. There can be only one per id. */
3371 * Check if for a given session id there is still data needed to be extract
3374 * Return 1 if data is pending or else 0 meaning ready to be read.
3376 int consumer_data_pending(uint64_t id
)
3379 struct lttng_ht_iter iter
;
3380 struct lttng_ht
*ht
;
3381 struct lttng_consumer_stream
*stream
;
3382 struct consumer_relayd_sock_pair
*relayd
= NULL
;
3383 int (*data_pending
)(struct lttng_consumer_stream
*);
3385 DBG("Consumer data pending command on session id %" PRIu64
, id
);
3388 pthread_mutex_lock(&consumer_data
.lock
);
3390 switch (consumer_data
.type
) {
3391 case LTTNG_CONSUMER_KERNEL
:
3392 data_pending
= lttng_kconsumer_data_pending
;
3394 case LTTNG_CONSUMER32_UST
:
3395 case LTTNG_CONSUMER64_UST
:
3396 data_pending
= lttng_ustconsumer_data_pending
;
3399 ERR("Unknown consumer data type");
3403 /* Ease our life a bit */
3404 ht
= consumer_data
.stream_list_ht
;
3406 relayd
= find_relayd_by_session_id(id
);
3408 /* Send init command for data pending. */
3409 pthread_mutex_lock(&relayd
->ctrl_sock_mutex
);
3410 ret
= relayd_begin_data_pending(&relayd
->control_sock
,
3411 relayd
->relayd_session_id
);
3412 pthread_mutex_unlock(&relayd
->ctrl_sock_mutex
);
3414 /* Communication error thus the relayd so no data pending. */
3415 goto data_not_pending
;
3419 cds_lfht_for_each_entry_duplicate(ht
->ht
,
3420 ht
->hash_fct(&id
, lttng_ht_seed
),
3422 &iter
.iter
, stream
, node_session_id
.node
) {
3423 /* If this call fails, the stream is being used hence data pending. */
3424 ret
= stream_try_lock(stream
);
3430 * A removed node from the hash table indicates that the stream has
3431 * been deleted thus having a guarantee that the buffers are closed
3432 * on the consumer side. However, data can still be transmitted
3433 * over the network so don't skip the relayd check.
3435 ret
= cds_lfht_is_node_deleted(&stream
->node
.node
);
3438 * An empty output file is not valid. We need at least one packet
3439 * generated per stream, even if it contains no event, so it
3440 * contains at least one packet header.
3442 if (stream
->output_written
== 0) {
3443 pthread_mutex_unlock(&stream
->lock
);
3446 /* Check the stream if there is data in the buffers. */
3447 ret
= data_pending(stream
);
3449 pthread_mutex_unlock(&stream
->lock
);
3456 pthread_mutex_lock(&relayd
->ctrl_sock_mutex
);
3457 if (stream
->metadata_flag
) {
3458 ret
= relayd_quiescent_control(&relayd
->control_sock
,
3459 stream
->relayd_stream_id
);
3461 ret
= relayd_data_pending(&relayd
->control_sock
,
3462 stream
->relayd_stream_id
,
3463 stream
->next_net_seq_num
- 1);
3465 pthread_mutex_unlock(&relayd
->ctrl_sock_mutex
);
3467 pthread_mutex_unlock(&stream
->lock
);
3471 pthread_mutex_unlock(&stream
->lock
);
3475 unsigned int is_data_inflight
= 0;
3477 /* Send init command for data pending. */
3478 pthread_mutex_lock(&relayd
->ctrl_sock_mutex
);
3479 ret
= relayd_end_data_pending(&relayd
->control_sock
,
3480 relayd
->relayd_session_id
, &is_data_inflight
);
3481 pthread_mutex_unlock(&relayd
->ctrl_sock_mutex
);
3483 goto data_not_pending
;
3485 if (is_data_inflight
) {
3491 * Finding _no_ node in the hash table and no inflight data means that the
3492 * stream(s) have been removed thus data is guaranteed to be available for
3493 * analysis from the trace files.
3497 /* Data is available to be read by a viewer. */
3498 pthread_mutex_unlock(&consumer_data
.lock
);
3503 /* Data is still being extracted from buffers. */
3504 pthread_mutex_unlock(&consumer_data
.lock
);
3510 * Send a ret code status message to the sessiond daemon.
3512 * Return the sendmsg() return value.
3514 int consumer_send_status_msg(int sock
, int ret_code
)
3516 struct lttcomm_consumer_status_msg msg
;
3518 msg
.ret_code
= ret_code
;
3520 return lttcomm_send_unix_sock(sock
, &msg
, sizeof(msg
));
3524 * Send a channel status message to the sessiond daemon.
3526 * Return the sendmsg() return value.
3528 int consumer_send_status_channel(int sock
,
3529 struct lttng_consumer_channel
*channel
)
3531 struct lttcomm_consumer_status_channel msg
;
3536 msg
.ret_code
= -LTTNG_ERR_UST_CHAN_FAIL
;
3538 msg
.ret_code
= LTTNG_OK
;
3539 msg
.key
= channel
->key
;
3540 msg
.stream_count
= channel
->streams
.count
;
3543 return lttcomm_send_unix_sock(sock
, &msg
, sizeof(msg
));
3547 * Using a maximum stream size with the produced and consumed position of a
3548 * stream, computes the new consumed position to be as close as possible to the
3549 * maximum possible stream size.
3551 * If maximum stream size is lower than the possible buffer size (produced -
3552 * consumed), the consumed_pos given is returned untouched else the new value
3555 unsigned long consumer_get_consumed_maxsize(unsigned long consumed_pos
,
3556 unsigned long produced_pos
, uint64_t max_stream_size
)
3558 if (max_stream_size
&& max_stream_size
< (produced_pos
- consumed_pos
)) {
3559 /* Offset from the produced position to get the latest buffers. */
3560 return produced_pos
- max_stream_size
;
3563 return consumed_pos
;