2 * Copyright (C) 2011 - Julien Desfossez <julien.desfossez@polymtl.ca>
3 * Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
4 * 2012 - David Goulet <dgoulet@efficios.com>
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License, version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 * You should have received a copy of the GNU General Public License along
16 * with this program; if not, write to the Free Software Foundation, Inc.,
17 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
27 #include <sys/socket.h>
28 #include <sys/types.h>
33 #include <bin/lttng-consumerd/health-consumerd.h>
34 #include <common/common.h>
35 #include <common/utils.h>
36 #include <common/compat/poll.h>
37 #include <common/index/index.h>
38 #include <common/kernel-ctl/kernel-ctl.h>
39 #include <common/sessiond-comm/relayd.h>
40 #include <common/sessiond-comm/sessiond-comm.h>
41 #include <common/kernel-consumer/kernel-consumer.h>
42 #include <common/relayd/relayd.h>
43 #include <common/ust-consumer/ust-consumer.h>
44 #include <common/consumer-timer.h>
47 #include "consumer-stream.h"
48 #include "consumer-testpoint.h"
50 struct lttng_consumer_global_data consumer_data
= {
53 .type
= LTTNG_CONSUMER_UNKNOWN
,
56 enum consumer_channel_action
{
59 CONSUMER_CHANNEL_QUIT
,
62 struct consumer_channel_msg
{
63 enum consumer_channel_action action
;
64 struct lttng_consumer_channel
*chan
; /* add */
65 uint64_t key
; /* del */
69 * Flag to inform the polling thread to quit when all fd hung up. Updated by
70 * the consumer_thread_receive_fds when it notices that all fds has hung up.
71 * Also updated by the signal handler (consumer_should_exit()). Read by the
74 volatile int consumer_quit
;
77 * Global hash table containing respectively metadata and data streams. The
78 * stream element in this ht should only be updated by the metadata poll thread
79 * for the metadata and the data poll thread for the data.
81 static struct lttng_ht
*metadata_ht
;
82 static struct lttng_ht
*data_ht
;
85 * Notify a thread lttng pipe to poll back again. This usually means that some
86 * global state has changed so we just send back the thread in a poll wait
89 static void notify_thread_lttng_pipe(struct lttng_pipe
*pipe
)
91 struct lttng_consumer_stream
*null_stream
= NULL
;
95 (void) lttng_pipe_write(pipe
, &null_stream
, sizeof(null_stream
));
98 static void notify_health_quit_pipe(int *pipe
)
102 ret
= lttng_write(pipe
[1], "4", 1);
104 PERROR("write consumer health quit");
108 static void notify_channel_pipe(struct lttng_consumer_local_data
*ctx
,
109 struct lttng_consumer_channel
*chan
,
111 enum consumer_channel_action action
)
113 struct consumer_channel_msg msg
;
116 memset(&msg
, 0, sizeof(msg
));
121 ret
= lttng_write(ctx
->consumer_channel_pipe
[1], &msg
, sizeof(msg
));
122 if (ret
< sizeof(msg
)) {
123 PERROR("notify_channel_pipe write error");
127 void notify_thread_del_channel(struct lttng_consumer_local_data
*ctx
,
130 notify_channel_pipe(ctx
, NULL
, key
, CONSUMER_CHANNEL_DEL
);
133 static int read_channel_pipe(struct lttng_consumer_local_data
*ctx
,
134 struct lttng_consumer_channel
**chan
,
136 enum consumer_channel_action
*action
)
138 struct consumer_channel_msg msg
;
141 ret
= lttng_read(ctx
->consumer_channel_pipe
[0], &msg
, sizeof(msg
));
142 if (ret
< sizeof(msg
)) {
146 *action
= msg
.action
;
154 * Find a stream. The consumer_data.lock must be locked during this
157 static struct lttng_consumer_stream
*find_stream(uint64_t key
,
160 struct lttng_ht_iter iter
;
161 struct lttng_ht_node_u64
*node
;
162 struct lttng_consumer_stream
*stream
= NULL
;
166 /* -1ULL keys are lookup failures */
167 if (key
== (uint64_t) -1ULL) {
173 lttng_ht_lookup(ht
, &key
, &iter
);
174 node
= lttng_ht_iter_get_node_u64(&iter
);
176 stream
= caa_container_of(node
, struct lttng_consumer_stream
, node
);
184 static void steal_stream_key(uint64_t key
, struct lttng_ht
*ht
)
186 struct lttng_consumer_stream
*stream
;
189 stream
= find_stream(key
, ht
);
191 stream
->key
= (uint64_t) -1ULL;
193 * We don't want the lookup to match, but we still need
194 * to iterate on this stream when iterating over the hash table. Just
195 * change the node key.
197 stream
->node
.key
= (uint64_t) -1ULL;
203 * Return a channel object for the given key.
205 * RCU read side lock MUST be acquired before calling this function and
206 * protects the channel ptr.
208 struct lttng_consumer_channel
*consumer_find_channel(uint64_t key
)
210 struct lttng_ht_iter iter
;
211 struct lttng_ht_node_u64
*node
;
212 struct lttng_consumer_channel
*channel
= NULL
;
214 /* -1ULL keys are lookup failures */
215 if (key
== (uint64_t) -1ULL) {
219 lttng_ht_lookup(consumer_data
.channel_ht
, &key
, &iter
);
220 node
= lttng_ht_iter_get_node_u64(&iter
);
222 channel
= caa_container_of(node
, struct lttng_consumer_channel
, node
);
228 static void free_stream_rcu(struct rcu_head
*head
)
230 struct lttng_ht_node_u64
*node
=
231 caa_container_of(head
, struct lttng_ht_node_u64
, head
);
232 struct lttng_consumer_stream
*stream
=
233 caa_container_of(node
, struct lttng_consumer_stream
, node
);
238 static void free_channel_rcu(struct rcu_head
*head
)
240 struct lttng_ht_node_u64
*node
=
241 caa_container_of(head
, struct lttng_ht_node_u64
, head
);
242 struct lttng_consumer_channel
*channel
=
243 caa_container_of(node
, struct lttng_consumer_channel
, node
);
249 * RCU protected relayd socket pair free.
251 static void free_relayd_rcu(struct rcu_head
*head
)
253 struct lttng_ht_node_u64
*node
=
254 caa_container_of(head
, struct lttng_ht_node_u64
, head
);
255 struct consumer_relayd_sock_pair
*relayd
=
256 caa_container_of(node
, struct consumer_relayd_sock_pair
, node
);
259 * Close all sockets. This is done in the call RCU since we don't want the
260 * socket fds to be reassigned thus potentially creating bad state of the
263 * We do not have to lock the control socket mutex here since at this stage
264 * there is no one referencing to this relayd object.
266 (void) relayd_close(&relayd
->control_sock
);
267 (void) relayd_close(&relayd
->data_sock
);
273 * Destroy and free relayd socket pair object.
275 void consumer_destroy_relayd(struct consumer_relayd_sock_pair
*relayd
)
278 struct lttng_ht_iter iter
;
280 if (relayd
== NULL
) {
284 DBG("Consumer destroy and close relayd socket pair");
286 iter
.iter
.node
= &relayd
->node
.node
;
287 ret
= lttng_ht_del(consumer_data
.relayd_ht
, &iter
);
289 /* We assume the relayd is being or is destroyed */
293 /* RCU free() call */
294 call_rcu(&relayd
->node
.head
, free_relayd_rcu
);
298 * Remove a channel from the global list protected by a mutex. This function is
299 * also responsible for freeing its data structures.
301 void consumer_del_channel(struct lttng_consumer_channel
*channel
)
304 struct lttng_ht_iter iter
;
305 struct lttng_consumer_stream
*stream
, *stmp
;
307 DBG("Consumer delete channel key %" PRIu64
, channel
->key
);
309 pthread_mutex_lock(&consumer_data
.lock
);
310 pthread_mutex_lock(&channel
->lock
);
312 /* Delete streams that might have been left in the stream list. */
313 cds_list_for_each_entry_safe(stream
, stmp
, &channel
->streams
.head
,
315 cds_list_del(&stream
->send_node
);
317 * Once a stream is added to this list, the buffers were created so
318 * we have a guarantee that this call will succeed.
320 consumer_stream_destroy(stream
, NULL
);
323 if (channel
->live_timer_enabled
== 1) {
324 consumer_timer_live_stop(channel
);
327 switch (consumer_data
.type
) {
328 case LTTNG_CONSUMER_KERNEL
:
330 case LTTNG_CONSUMER32_UST
:
331 case LTTNG_CONSUMER64_UST
:
332 lttng_ustconsumer_del_channel(channel
);
335 ERR("Unknown consumer_data type");
341 iter
.iter
.node
= &channel
->node
.node
;
342 ret
= lttng_ht_del(consumer_data
.channel_ht
, &iter
);
346 call_rcu(&channel
->node
.head
, free_channel_rcu
);
348 pthread_mutex_unlock(&channel
->lock
);
349 pthread_mutex_unlock(&consumer_data
.lock
);
353 * Iterate over the relayd hash table and destroy each element. Finally,
354 * destroy the whole hash table.
356 static void cleanup_relayd_ht(void)
358 struct lttng_ht_iter iter
;
359 struct consumer_relayd_sock_pair
*relayd
;
363 cds_lfht_for_each_entry(consumer_data
.relayd_ht
->ht
, &iter
.iter
, relayd
,
365 consumer_destroy_relayd(relayd
);
370 lttng_ht_destroy(consumer_data
.relayd_ht
);
374 * Update the end point status of all streams having the given network sequence
375 * index (relayd index).
377 * It's atomically set without having the stream mutex locked which is fine
378 * because we handle the write/read race with a pipe wakeup for each thread.
380 static void update_endpoint_status_by_netidx(uint64_t net_seq_idx
,
381 enum consumer_endpoint_status status
)
383 struct lttng_ht_iter iter
;
384 struct lttng_consumer_stream
*stream
;
386 DBG("Consumer set delete flag on stream by idx %" PRIu64
, net_seq_idx
);
390 /* Let's begin with metadata */
391 cds_lfht_for_each_entry(metadata_ht
->ht
, &iter
.iter
, stream
, node
.node
) {
392 if (stream
->net_seq_idx
== net_seq_idx
) {
393 uatomic_set(&stream
->endpoint_status
, status
);
394 DBG("Delete flag set to metadata stream %d", stream
->wait_fd
);
398 /* Follow up by the data streams */
399 cds_lfht_for_each_entry(data_ht
->ht
, &iter
.iter
, stream
, node
.node
) {
400 if (stream
->net_seq_idx
== net_seq_idx
) {
401 uatomic_set(&stream
->endpoint_status
, status
);
402 DBG("Delete flag set to data stream %d", stream
->wait_fd
);
409 * Cleanup a relayd object by flagging every associated streams for deletion,
410 * destroying the object meaning removing it from the relayd hash table,
411 * closing the sockets and freeing the memory in a RCU call.
413 * If a local data context is available, notify the threads that the streams'
414 * state have changed.
416 static void cleanup_relayd(struct consumer_relayd_sock_pair
*relayd
,
417 struct lttng_consumer_local_data
*ctx
)
423 DBG("Cleaning up relayd sockets");
425 /* Save the net sequence index before destroying the object */
426 netidx
= relayd
->net_seq_idx
;
429 * Delete the relayd from the relayd hash table, close the sockets and free
430 * the object in a RCU call.
432 consumer_destroy_relayd(relayd
);
434 /* Set inactive endpoint to all streams */
435 update_endpoint_status_by_netidx(netidx
, CONSUMER_ENDPOINT_INACTIVE
);
438 * With a local data context, notify the threads that the streams' state
439 * have changed. The write() action on the pipe acts as an "implicit"
440 * memory barrier ordering the updates of the end point status from the
441 * read of this status which happens AFTER receiving this notify.
444 notify_thread_lttng_pipe(ctx
->consumer_data_pipe
);
445 notify_thread_lttng_pipe(ctx
->consumer_metadata_pipe
);
450 * Flag a relayd socket pair for destruction. Destroy it if the refcount
453 * RCU read side lock MUST be aquired before calling this function.
455 void consumer_flag_relayd_for_destroy(struct consumer_relayd_sock_pair
*relayd
)
459 /* Set destroy flag for this object */
460 uatomic_set(&relayd
->destroy_flag
, 1);
462 /* Destroy the relayd if refcount is 0 */
463 if (uatomic_read(&relayd
->refcount
) == 0) {
464 consumer_destroy_relayd(relayd
);
469 * Completly destroy stream from every visiable data structure and the given
472 * One this call returns, the stream object is not longer usable nor visible.
474 void consumer_del_stream(struct lttng_consumer_stream
*stream
,
477 consumer_stream_destroy(stream
, ht
);
481 * XXX naming of del vs destroy is all mixed up.
483 void consumer_del_stream_for_data(struct lttng_consumer_stream
*stream
)
485 consumer_stream_destroy(stream
, data_ht
);
488 void consumer_del_stream_for_metadata(struct lttng_consumer_stream
*stream
)
490 consumer_stream_destroy(stream
, metadata_ht
);
493 struct lttng_consumer_stream
*consumer_allocate_stream(uint64_t channel_key
,
495 enum lttng_consumer_stream_state state
,
496 const char *channel_name
,
503 enum consumer_channel_type type
,
504 unsigned int monitor
)
507 struct lttng_consumer_stream
*stream
;
509 stream
= zmalloc(sizeof(*stream
));
510 if (stream
== NULL
) {
511 PERROR("malloc struct lttng_consumer_stream");
518 stream
->key
= stream_key
;
520 stream
->out_fd_offset
= 0;
521 stream
->output_written
= 0;
522 stream
->state
= state
;
525 stream
->net_seq_idx
= relayd_id
;
526 stream
->session_id
= session_id
;
527 stream
->monitor
= monitor
;
528 stream
->endpoint_status
= CONSUMER_ENDPOINT_ACTIVE
;
529 stream
->index_fd
= -1;
530 pthread_mutex_init(&stream
->lock
, NULL
);
532 /* If channel is the metadata, flag this stream as metadata. */
533 if (type
== CONSUMER_CHANNEL_TYPE_METADATA
) {
534 stream
->metadata_flag
= 1;
535 /* Metadata is flat out. */
536 strncpy(stream
->name
, DEFAULT_METADATA_NAME
, sizeof(stream
->name
));
537 /* Live rendez-vous point. */
538 pthread_cond_init(&stream
->metadata_rdv
, NULL
);
539 pthread_mutex_init(&stream
->metadata_rdv_lock
, NULL
);
541 /* Format stream name to <channel_name>_<cpu_number> */
542 ret
= snprintf(stream
->name
, sizeof(stream
->name
), "%s_%d",
545 PERROR("snprintf stream name");
550 /* Key is always the wait_fd for streams. */
551 lttng_ht_node_init_u64(&stream
->node
, stream
->key
);
553 /* Init node per channel id key */
554 lttng_ht_node_init_u64(&stream
->node_channel_id
, channel_key
);
556 /* Init session id node with the stream session id */
557 lttng_ht_node_init_u64(&stream
->node_session_id
, stream
->session_id
);
559 DBG3("Allocated stream %s (key %" PRIu64
", chan_key %" PRIu64
560 " relayd_id %" PRIu64
", session_id %" PRIu64
,
561 stream
->name
, stream
->key
, channel_key
,
562 stream
->net_seq_idx
, stream
->session_id
);
578 * Add a stream to the global list protected by a mutex.
580 int consumer_add_data_stream(struct lttng_consumer_stream
*stream
)
582 struct lttng_ht
*ht
= data_ht
;
588 DBG3("Adding consumer stream %" PRIu64
, stream
->key
);
590 pthread_mutex_lock(&consumer_data
.lock
);
591 pthread_mutex_lock(&stream
->chan
->lock
);
592 pthread_mutex_lock(&stream
->chan
->timer_lock
);
593 pthread_mutex_lock(&stream
->lock
);
596 /* Steal stream identifier to avoid having streams with the same key */
597 steal_stream_key(stream
->key
, ht
);
599 lttng_ht_add_unique_u64(ht
, &stream
->node
);
601 lttng_ht_add_u64(consumer_data
.stream_per_chan_id_ht
,
602 &stream
->node_channel_id
);
605 * Add stream to the stream_list_ht of the consumer data. No need to steal
606 * the key since the HT does not use it and we allow to add redundant keys
609 lttng_ht_add_u64(consumer_data
.stream_list_ht
, &stream
->node_session_id
);
612 * When nb_init_stream_left reaches 0, we don't need to trigger any action
613 * in terms of destroying the associated channel, because the action that
614 * causes the count to become 0 also causes a stream to be added. The
615 * channel deletion will thus be triggered by the following removal of this
618 if (uatomic_read(&stream
->chan
->nb_init_stream_left
) > 0) {
619 /* Increment refcount before decrementing nb_init_stream_left */
621 uatomic_dec(&stream
->chan
->nb_init_stream_left
);
624 /* Update consumer data once the node is inserted. */
625 consumer_data
.stream_count
++;
626 consumer_data
.need_update
= 1;
629 pthread_mutex_unlock(&stream
->lock
);
630 pthread_mutex_unlock(&stream
->chan
->timer_lock
);
631 pthread_mutex_unlock(&stream
->chan
->lock
);
632 pthread_mutex_unlock(&consumer_data
.lock
);
637 void consumer_del_data_stream(struct lttng_consumer_stream
*stream
)
639 consumer_del_stream(stream
, data_ht
);
643 * Add relayd socket to global consumer data hashtable. RCU read side lock MUST
644 * be acquired before calling this.
646 static int add_relayd(struct consumer_relayd_sock_pair
*relayd
)
649 struct lttng_ht_node_u64
*node
;
650 struct lttng_ht_iter iter
;
654 lttng_ht_lookup(consumer_data
.relayd_ht
,
655 &relayd
->net_seq_idx
, &iter
);
656 node
= lttng_ht_iter_get_node_u64(&iter
);
660 lttng_ht_add_unique_u64(consumer_data
.relayd_ht
, &relayd
->node
);
667 * Allocate and return a consumer relayd socket.
669 struct consumer_relayd_sock_pair
*consumer_allocate_relayd_sock_pair(
670 uint64_t net_seq_idx
)
672 struct consumer_relayd_sock_pair
*obj
= NULL
;
674 /* net sequence index of -1 is a failure */
675 if (net_seq_idx
== (uint64_t) -1ULL) {
679 obj
= zmalloc(sizeof(struct consumer_relayd_sock_pair
));
681 PERROR("zmalloc relayd sock");
685 obj
->net_seq_idx
= net_seq_idx
;
687 obj
->destroy_flag
= 0;
688 obj
->control_sock
.sock
.fd
= -1;
689 obj
->data_sock
.sock
.fd
= -1;
690 lttng_ht_node_init_u64(&obj
->node
, obj
->net_seq_idx
);
691 pthread_mutex_init(&obj
->ctrl_sock_mutex
, NULL
);
698 * Find a relayd socket pair in the global consumer data.
700 * Return the object if found else NULL.
701 * RCU read-side lock must be held across this call and while using the
704 struct consumer_relayd_sock_pair
*consumer_find_relayd(uint64_t key
)
706 struct lttng_ht_iter iter
;
707 struct lttng_ht_node_u64
*node
;
708 struct consumer_relayd_sock_pair
*relayd
= NULL
;
710 /* Negative keys are lookup failures */
711 if (key
== (uint64_t) -1ULL) {
715 lttng_ht_lookup(consumer_data
.relayd_ht
, &key
,
717 node
= lttng_ht_iter_get_node_u64(&iter
);
719 relayd
= caa_container_of(node
, struct consumer_relayd_sock_pair
, node
);
727 * Find a relayd and send the stream
729 * Returns 0 on success, < 0 on error
731 int consumer_send_relayd_stream(struct lttng_consumer_stream
*stream
,
735 struct consumer_relayd_sock_pair
*relayd
;
738 assert(stream
->net_seq_idx
!= -1ULL);
741 /* The stream is not metadata. Get relayd reference if exists. */
743 relayd
= consumer_find_relayd(stream
->net_seq_idx
);
744 if (relayd
!= NULL
) {
745 /* Add stream on the relayd */
746 pthread_mutex_lock(&relayd
->ctrl_sock_mutex
);
747 ret
= relayd_add_stream(&relayd
->control_sock
, stream
->name
,
748 path
, &stream
->relayd_stream_id
,
749 stream
->chan
->tracefile_size
, stream
->chan
->tracefile_count
);
750 pthread_mutex_unlock(&relayd
->ctrl_sock_mutex
);
755 uatomic_inc(&relayd
->refcount
);
756 stream
->sent_to_relayd
= 1;
758 ERR("Stream %" PRIu64
" relayd ID %" PRIu64
" unknown. Can't send it.",
759 stream
->key
, stream
->net_seq_idx
);
764 DBG("Stream %s with key %" PRIu64
" sent to relayd id %" PRIu64
,
765 stream
->name
, stream
->key
, stream
->net_seq_idx
);
773 * Find a relayd and send the streams sent message
775 * Returns 0 on success, < 0 on error
777 int consumer_send_relayd_streams_sent(uint64_t net_seq_idx
)
780 struct consumer_relayd_sock_pair
*relayd
;
782 assert(net_seq_idx
!= -1ULL);
784 /* The stream is not metadata. Get relayd reference if exists. */
786 relayd
= consumer_find_relayd(net_seq_idx
);
787 if (relayd
!= NULL
) {
788 /* Add stream on the relayd */
789 pthread_mutex_lock(&relayd
->ctrl_sock_mutex
);
790 ret
= relayd_streams_sent(&relayd
->control_sock
);
791 pthread_mutex_unlock(&relayd
->ctrl_sock_mutex
);
796 ERR("Relayd ID %" PRIu64
" unknown. Can't send streams_sent.",
803 DBG("All streams sent relayd id %" PRIu64
, net_seq_idx
);
811 * Find a relayd and close the stream
813 void close_relayd_stream(struct lttng_consumer_stream
*stream
)
815 struct consumer_relayd_sock_pair
*relayd
;
817 /* The stream is not metadata. Get relayd reference if exists. */
819 relayd
= consumer_find_relayd(stream
->net_seq_idx
);
821 consumer_stream_relayd_close(stream
, relayd
);
827 * Handle stream for relayd transmission if the stream applies for network
828 * streaming where the net sequence index is set.
830 * Return destination file descriptor or negative value on error.
832 static int write_relayd_stream_header(struct lttng_consumer_stream
*stream
,
833 size_t data_size
, unsigned long padding
,
834 struct consumer_relayd_sock_pair
*relayd
)
837 struct lttcomm_relayd_data_hdr data_hdr
;
843 /* Reset data header */
844 memset(&data_hdr
, 0, sizeof(data_hdr
));
846 if (stream
->metadata_flag
) {
847 /* Caller MUST acquire the relayd control socket lock */
848 ret
= relayd_send_metadata(&relayd
->control_sock
, data_size
);
853 /* Metadata are always sent on the control socket. */
854 outfd
= relayd
->control_sock
.sock
.fd
;
856 /* Set header with stream information */
857 data_hdr
.stream_id
= htobe64(stream
->relayd_stream_id
);
858 data_hdr
.data_size
= htobe32(data_size
);
859 data_hdr
.padding_size
= htobe32(padding
);
861 * Note that net_seq_num below is assigned with the *current* value of
862 * next_net_seq_num and only after that the next_net_seq_num will be
863 * increment. This is why when issuing a command on the relayd using
864 * this next value, 1 should always be substracted in order to compare
865 * the last seen sequence number on the relayd side to the last sent.
867 data_hdr
.net_seq_num
= htobe64(stream
->next_net_seq_num
);
868 /* Other fields are zeroed previously */
870 ret
= relayd_send_data_hdr(&relayd
->data_sock
, &data_hdr
,
876 ++stream
->next_net_seq_num
;
878 /* Set to go on data socket */
879 outfd
= relayd
->data_sock
.sock
.fd
;
887 * Allocate and return a new lttng_consumer_channel object using the given key
888 * to initialize the hash table node.
890 * On error, return NULL.
892 struct lttng_consumer_channel
*consumer_allocate_channel(uint64_t key
,
894 const char *pathname
,
899 enum lttng_event_output output
,
900 uint64_t tracefile_size
,
901 uint64_t tracefile_count
,
902 uint64_t session_id_per_pid
,
903 unsigned int monitor
,
904 unsigned int live_timer_interval
)
906 struct lttng_consumer_channel
*channel
;
908 channel
= zmalloc(sizeof(*channel
));
909 if (channel
== NULL
) {
910 PERROR("malloc struct lttng_consumer_channel");
915 channel
->refcount
= 0;
916 channel
->session_id
= session_id
;
917 channel
->session_id_per_pid
= session_id_per_pid
;
920 channel
->relayd_id
= relayd_id
;
921 channel
->tracefile_size
= tracefile_size
;
922 channel
->tracefile_count
= tracefile_count
;
923 channel
->monitor
= monitor
;
924 channel
->live_timer_interval
= live_timer_interval
;
925 pthread_mutex_init(&channel
->lock
, NULL
);
926 pthread_mutex_init(&channel
->timer_lock
, NULL
);
929 case LTTNG_EVENT_SPLICE
:
930 channel
->output
= CONSUMER_CHANNEL_SPLICE
;
932 case LTTNG_EVENT_MMAP
:
933 channel
->output
= CONSUMER_CHANNEL_MMAP
;
943 * In monitor mode, the streams associated with the channel will be put in
944 * a special list ONLY owned by this channel. So, the refcount is set to 1
945 * here meaning that the channel itself has streams that are referenced.
947 * On a channel deletion, once the channel is no longer visible, the
948 * refcount is decremented and checked for a zero value to delete it. With
949 * streams in no monitor mode, it will now be safe to destroy the channel.
951 if (!channel
->monitor
) {
952 channel
->refcount
= 1;
955 strncpy(channel
->pathname
, pathname
, sizeof(channel
->pathname
));
956 channel
->pathname
[sizeof(channel
->pathname
) - 1] = '\0';
958 strncpy(channel
->name
, name
, sizeof(channel
->name
));
959 channel
->name
[sizeof(channel
->name
) - 1] = '\0';
961 lttng_ht_node_init_u64(&channel
->node
, channel
->key
);
963 channel
->wait_fd
= -1;
965 CDS_INIT_LIST_HEAD(&channel
->streams
.head
);
967 DBG("Allocated channel (key %" PRIu64
")", channel
->key
)
974 * Add a channel to the global list protected by a mutex.
976 * On success 0 is returned else a negative value.
978 int consumer_add_channel(struct lttng_consumer_channel
*channel
,
979 struct lttng_consumer_local_data
*ctx
)
982 struct lttng_ht_node_u64
*node
;
983 struct lttng_ht_iter iter
;
985 pthread_mutex_lock(&consumer_data
.lock
);
986 pthread_mutex_lock(&channel
->lock
);
987 pthread_mutex_lock(&channel
->timer_lock
);
990 lttng_ht_lookup(consumer_data
.channel_ht
, &channel
->key
, &iter
);
991 node
= lttng_ht_iter_get_node_u64(&iter
);
993 /* Channel already exist. Ignore the insertion */
994 ERR("Consumer add channel key %" PRIu64
" already exists!",
1000 lttng_ht_add_unique_u64(consumer_data
.channel_ht
, &channel
->node
);
1004 pthread_mutex_unlock(&channel
->timer_lock
);
1005 pthread_mutex_unlock(&channel
->lock
);
1006 pthread_mutex_unlock(&consumer_data
.lock
);
1008 if (!ret
&& channel
->wait_fd
!= -1 &&
1009 channel
->type
== CONSUMER_CHANNEL_TYPE_DATA
) {
1010 notify_channel_pipe(ctx
, channel
, -1, CONSUMER_CHANNEL_ADD
);
1016 * Allocate the pollfd structure and the local view of the out fds to avoid
1017 * doing a lookup in the linked list and concurrency issues when writing is
1018 * needed. Called with consumer_data.lock held.
1020 * Returns the number of fds in the structures.
1022 static int update_poll_array(struct lttng_consumer_local_data
*ctx
,
1023 struct pollfd
**pollfd
, struct lttng_consumer_stream
**local_stream
,
1024 struct lttng_ht
*ht
)
1027 struct lttng_ht_iter iter
;
1028 struct lttng_consumer_stream
*stream
;
1033 assert(local_stream
);
1035 DBG("Updating poll fd array");
1037 cds_lfht_for_each_entry(ht
->ht
, &iter
.iter
, stream
, node
.node
) {
1039 * Only active streams with an active end point can be added to the
1040 * poll set and local stream storage of the thread.
1042 * There is a potential race here for endpoint_status to be updated
1043 * just after the check. However, this is OK since the stream(s) will
1044 * be deleted once the thread is notified that the end point state has
1045 * changed where this function will be called back again.
1047 if (stream
->state
!= LTTNG_CONSUMER_ACTIVE_STREAM
||
1048 stream
->endpoint_status
== CONSUMER_ENDPOINT_INACTIVE
) {
1052 * This clobbers way too much the debug output. Uncomment that if you
1053 * need it for debugging purposes.
1055 * DBG("Active FD %d", stream->wait_fd);
1057 (*pollfd
)[i
].fd
= stream
->wait_fd
;
1058 (*pollfd
)[i
].events
= POLLIN
| POLLPRI
;
1059 local_stream
[i
] = stream
;
1065 * Insert the consumer_data_pipe at the end of the array and don't
1066 * increment i so nb_fd is the number of real FD.
1068 (*pollfd
)[i
].fd
= lttng_pipe_get_readfd(ctx
->consumer_data_pipe
);
1069 (*pollfd
)[i
].events
= POLLIN
| POLLPRI
;
1074 * Poll on the should_quit pipe and the command socket return -1 on error and
1075 * should exit, 0 if data is available on the command socket
1077 int lttng_consumer_poll_socket(struct pollfd
*consumer_sockpoll
)
1082 num_rdy
= poll(consumer_sockpoll
, 2, -1);
1083 if (num_rdy
== -1) {
1085 * Restart interrupted system call.
1087 if (errno
== EINTR
) {
1090 PERROR("Poll error");
1093 if (consumer_sockpoll
[0].revents
& (POLLIN
| POLLPRI
)) {
1094 DBG("consumer_should_quit wake up");
1104 * Set the error socket.
1106 void lttng_consumer_set_error_sock(struct lttng_consumer_local_data
*ctx
,
1109 ctx
->consumer_error_socket
= sock
;
1113 * Set the command socket path.
1115 void lttng_consumer_set_command_sock_path(
1116 struct lttng_consumer_local_data
*ctx
, char *sock
)
1118 ctx
->consumer_command_sock_path
= sock
;
1122 * Send return code to the session daemon.
1123 * If the socket is not defined, we return 0, it is not a fatal error
1125 int lttng_consumer_send_error(struct lttng_consumer_local_data
*ctx
, int cmd
)
1127 if (ctx
->consumer_error_socket
> 0) {
1128 return lttcomm_send_unix_sock(ctx
->consumer_error_socket
, &cmd
,
1129 sizeof(enum lttcomm_sessiond_command
));
1136 * Close all the tracefiles and stream fds and MUST be called when all
1137 * instances are destroyed i.e. when all threads were joined and are ended.
1139 void lttng_consumer_cleanup(void)
1141 struct lttng_ht_iter iter
;
1142 struct lttng_consumer_channel
*channel
;
1146 cds_lfht_for_each_entry(consumer_data
.channel_ht
->ht
, &iter
.iter
, channel
,
1148 consumer_del_channel(channel
);
1153 lttng_ht_destroy(consumer_data
.channel_ht
);
1155 cleanup_relayd_ht();
1157 lttng_ht_destroy(consumer_data
.stream_per_chan_id_ht
);
1160 * This HT contains streams that are freed by either the metadata thread or
1161 * the data thread so we do *nothing* on the hash table and simply destroy
1164 lttng_ht_destroy(consumer_data
.stream_list_ht
);
1168 * Called from signal handler.
1170 void lttng_consumer_should_exit(struct lttng_consumer_local_data
*ctx
)
1175 ret
= lttng_write(ctx
->consumer_should_quit
[1], "4", 1);
1177 PERROR("write consumer quit");
1180 DBG("Consumer flag that it should quit");
1183 void lttng_consumer_sync_trace_file(struct lttng_consumer_stream
*stream
,
1186 int outfd
= stream
->out_fd
;
1189 * This does a blocking write-and-wait on any page that belongs to the
1190 * subbuffer prior to the one we just wrote.
1191 * Don't care about error values, as these are just hints and ways to
1192 * limit the amount of page cache used.
1194 if (orig_offset
< stream
->max_sb_size
) {
1197 lttng_sync_file_range(outfd
, orig_offset
- stream
->max_sb_size
,
1198 stream
->max_sb_size
,
1199 SYNC_FILE_RANGE_WAIT_BEFORE
1200 | SYNC_FILE_RANGE_WRITE
1201 | SYNC_FILE_RANGE_WAIT_AFTER
);
1203 * Give hints to the kernel about how we access the file:
1204 * POSIX_FADV_DONTNEED : we won't re-access data in a near future after
1207 * We need to call fadvise again after the file grows because the
1208 * kernel does not seem to apply fadvise to non-existing parts of the
1211 * Call fadvise _after_ having waited for the page writeback to
1212 * complete because the dirty page writeback semantic is not well
1213 * defined. So it can be expected to lead to lower throughput in
1216 posix_fadvise(outfd
, orig_offset
- stream
->max_sb_size
,
1217 stream
->max_sb_size
, POSIX_FADV_DONTNEED
);
1221 * Initialise the necessary environnement :
1222 * - create a new context
1223 * - create the poll_pipe
1224 * - create the should_quit pipe (for signal handler)
1225 * - create the thread pipe (for splice)
1227 * Takes a function pointer as argument, this function is called when data is
1228 * available on a buffer. This function is responsible to do the
1229 * kernctl_get_next_subbuf, read the data with mmap or splice depending on the
1230 * buffer configuration and then kernctl_put_next_subbuf at the end.
1232 * Returns a pointer to the new context or NULL on error.
1234 struct lttng_consumer_local_data
*lttng_consumer_create(
1235 enum lttng_consumer_type type
,
1236 ssize_t (*buffer_ready
)(struct lttng_consumer_stream
*stream
,
1237 struct lttng_consumer_local_data
*ctx
),
1238 int (*recv_channel
)(struct lttng_consumer_channel
*channel
),
1239 int (*recv_stream
)(struct lttng_consumer_stream
*stream
),
1240 int (*update_stream
)(uint64_t stream_key
, uint32_t state
))
1243 struct lttng_consumer_local_data
*ctx
;
1245 assert(consumer_data
.type
== LTTNG_CONSUMER_UNKNOWN
||
1246 consumer_data
.type
== type
);
1247 consumer_data
.type
= type
;
1249 ctx
= zmalloc(sizeof(struct lttng_consumer_local_data
));
1251 PERROR("allocating context");
1255 ctx
->consumer_error_socket
= -1;
1256 ctx
->consumer_metadata_socket
= -1;
1257 pthread_mutex_init(&ctx
->metadata_socket_lock
, NULL
);
1258 /* assign the callbacks */
1259 ctx
->on_buffer_ready
= buffer_ready
;
1260 ctx
->on_recv_channel
= recv_channel
;
1261 ctx
->on_recv_stream
= recv_stream
;
1262 ctx
->on_update_stream
= update_stream
;
1264 ctx
->consumer_data_pipe
= lttng_pipe_open(0);
1265 if (!ctx
->consumer_data_pipe
) {
1266 goto error_poll_pipe
;
1269 ret
= pipe(ctx
->consumer_should_quit
);
1271 PERROR("Error creating recv pipe");
1272 goto error_quit_pipe
;
1275 ret
= pipe(ctx
->consumer_thread_pipe
);
1277 PERROR("Error creating thread pipe");
1278 goto error_thread_pipe
;
1281 ret
= pipe(ctx
->consumer_channel_pipe
);
1283 PERROR("Error creating channel pipe");
1284 goto error_channel_pipe
;
1287 ctx
->consumer_metadata_pipe
= lttng_pipe_open(0);
1288 if (!ctx
->consumer_metadata_pipe
) {
1289 goto error_metadata_pipe
;
1292 ret
= utils_create_pipe(ctx
->consumer_splice_metadata_pipe
);
1294 goto error_splice_pipe
;
1300 lttng_pipe_destroy(ctx
->consumer_metadata_pipe
);
1301 error_metadata_pipe
:
1302 utils_close_pipe(ctx
->consumer_channel_pipe
);
1304 utils_close_pipe(ctx
->consumer_thread_pipe
);
1306 utils_close_pipe(ctx
->consumer_should_quit
);
1308 lttng_pipe_destroy(ctx
->consumer_data_pipe
);
1316 * Iterate over all streams of the hashtable and free them properly.
1318 static void destroy_data_stream_ht(struct lttng_ht
*ht
)
1320 struct lttng_ht_iter iter
;
1321 struct lttng_consumer_stream
*stream
;
1328 cds_lfht_for_each_entry(ht
->ht
, &iter
.iter
, stream
, node
.node
) {
1330 * Ignore return value since we are currently cleaning up so any error
1333 (void) consumer_del_stream(stream
, ht
);
1337 lttng_ht_destroy(ht
);
1341 * Iterate over all streams of the metadata hashtable and free them
1344 static void destroy_metadata_stream_ht(struct lttng_ht
*ht
)
1346 struct lttng_ht_iter iter
;
1347 struct lttng_consumer_stream
*stream
;
1354 cds_lfht_for_each_entry(ht
->ht
, &iter
.iter
, stream
, node
.node
) {
1356 * Ignore return value since we are currently cleaning up so any error
1359 (void) consumer_del_metadata_stream(stream
, ht
);
1363 lttng_ht_destroy(ht
);
1367 * Close all fds associated with the instance and free the context.
1369 void lttng_consumer_destroy(struct lttng_consumer_local_data
*ctx
)
1373 DBG("Consumer destroying it. Closing everything.");
1375 destroy_data_stream_ht(data_ht
);
1376 destroy_metadata_stream_ht(metadata_ht
);
1378 ret
= close(ctx
->consumer_error_socket
);
1382 ret
= close(ctx
->consumer_metadata_socket
);
1386 utils_close_pipe(ctx
->consumer_thread_pipe
);
1387 utils_close_pipe(ctx
->consumer_channel_pipe
);
1388 lttng_pipe_destroy(ctx
->consumer_data_pipe
);
1389 lttng_pipe_destroy(ctx
->consumer_metadata_pipe
);
1390 utils_close_pipe(ctx
->consumer_should_quit
);
1391 utils_close_pipe(ctx
->consumer_splice_metadata_pipe
);
1393 unlink(ctx
->consumer_command_sock_path
);
1398 * Write the metadata stream id on the specified file descriptor.
1400 static int write_relayd_metadata_id(int fd
,
1401 struct lttng_consumer_stream
*stream
,
1402 struct consumer_relayd_sock_pair
*relayd
, unsigned long padding
)
1405 struct lttcomm_relayd_metadata_payload hdr
;
1407 hdr
.stream_id
= htobe64(stream
->relayd_stream_id
);
1408 hdr
.padding_size
= htobe32(padding
);
1409 ret
= lttng_write(fd
, (void *) &hdr
, sizeof(hdr
));
1410 if (ret
< sizeof(hdr
)) {
1412 * This error means that the fd's end is closed so ignore the perror
1413 * not to clubber the error output since this can happen in a normal
1416 if (errno
!= EPIPE
) {
1417 PERROR("write metadata stream id");
1419 DBG3("Consumer failed to write relayd metadata id (errno: %d)", errno
);
1421 * Set ret to a negative value because if ret != sizeof(hdr), we don't
1422 * handle writting the missing part so report that as an error and
1423 * don't lie to the caller.
1428 DBG("Metadata stream id %" PRIu64
" with padding %lu written before data",
1429 stream
->relayd_stream_id
, padding
);
1436 * Mmap the ring buffer, read it and write the data to the tracefile. This is a
1437 * core function for writing trace buffers to either the local filesystem or
1440 * It must be called with the stream lock held.
1442 * Careful review MUST be put if any changes occur!
1444 * Returns the number of bytes written
1446 ssize_t
lttng_consumer_on_read_subbuffer_mmap(
1447 struct lttng_consumer_local_data
*ctx
,
1448 struct lttng_consumer_stream
*stream
, unsigned long len
,
1449 unsigned long padding
,
1450 struct ctf_packet_index
*index
)
1452 unsigned long mmap_offset
;
1454 ssize_t ret
= 0, written
= 0;
1455 off_t orig_offset
= stream
->out_fd_offset
;
1456 /* Default is on the disk */
1457 int outfd
= stream
->out_fd
;
1458 struct consumer_relayd_sock_pair
*relayd
= NULL
;
1459 unsigned int relayd_hang_up
= 0;
1461 /* RCU lock for the relayd pointer */
1464 /* Flag that the current stream if set for network streaming. */
1465 if (stream
->net_seq_idx
!= (uint64_t) -1ULL) {
1466 relayd
= consumer_find_relayd(stream
->net_seq_idx
);
1467 if (relayd
== NULL
) {
1473 /* get the offset inside the fd to mmap */
1474 switch (consumer_data
.type
) {
1475 case LTTNG_CONSUMER_KERNEL
:
1476 mmap_base
= stream
->mmap_base
;
1477 ret
= kernctl_get_mmap_read_offset(stream
->wait_fd
, &mmap_offset
);
1479 PERROR("tracer ctl get_mmap_read_offset");
1484 case LTTNG_CONSUMER32_UST
:
1485 case LTTNG_CONSUMER64_UST
:
1486 mmap_base
= lttng_ustctl_get_mmap_base(stream
);
1488 ERR("read mmap get mmap base for stream %s", stream
->name
);
1492 ret
= lttng_ustctl_get_mmap_read_offset(stream
, &mmap_offset
);
1494 PERROR("tracer ctl get_mmap_read_offset");
1500 ERR("Unknown consumer_data type");
1504 /* Handle stream on the relayd if the output is on the network */
1506 unsigned long netlen
= len
;
1509 * Lock the control socket for the complete duration of the function
1510 * since from this point on we will use the socket.
1512 if (stream
->metadata_flag
) {
1513 /* Metadata requires the control socket. */
1514 pthread_mutex_lock(&relayd
->ctrl_sock_mutex
);
1515 netlen
+= sizeof(struct lttcomm_relayd_metadata_payload
);
1518 ret
= write_relayd_stream_header(stream
, netlen
, padding
, relayd
);
1520 /* Use the returned socket. */
1523 /* Write metadata stream id before payload */
1524 if (stream
->metadata_flag
) {
1525 ret
= write_relayd_metadata_id(outfd
, stream
, relayd
, padding
);
1528 /* Socket operation failed. We consider the relayd dead */
1529 if (ret
== -EPIPE
|| ret
== -EINVAL
) {
1537 /* Socket operation failed. We consider the relayd dead */
1538 if (ret
== -EPIPE
|| ret
== -EINVAL
) {
1542 /* Else, use the default set before which is the filesystem. */
1545 /* No streaming, we have to set the len with the full padding */
1549 * Check if we need to change the tracefile before writing the packet.
1551 if (stream
->chan
->tracefile_size
> 0 &&
1552 (stream
->tracefile_size_current
+ len
) >
1553 stream
->chan
->tracefile_size
) {
1554 ret
= utils_rotate_stream_file(stream
->chan
->pathname
,
1555 stream
->name
, stream
->chan
->tracefile_size
,
1556 stream
->chan
->tracefile_count
, stream
->uid
, stream
->gid
,
1557 stream
->out_fd
, &(stream
->tracefile_count_current
),
1560 ERR("Rotating output file");
1563 outfd
= stream
->out_fd
;
1565 if (stream
->index_fd
>= 0) {
1566 ret
= index_create_file(stream
->chan
->pathname
,
1567 stream
->name
, stream
->uid
, stream
->gid
,
1568 stream
->chan
->tracefile_size
,
1569 stream
->tracefile_count_current
);
1573 stream
->index_fd
= ret
;
1576 /* Reset current size because we just perform a rotation. */
1577 stream
->tracefile_size_current
= 0;
1578 stream
->out_fd_offset
= 0;
1581 stream
->tracefile_size_current
+= len
;
1583 index
->offset
= htobe64(stream
->out_fd_offset
);
1588 ret
= lttng_write(outfd
, mmap_base
+ mmap_offset
, len
);
1589 DBG("Consumer mmap write() ret %zd (len %lu)", ret
, len
);
1592 * This is possible if the fd is closed on the other side (outfd)
1593 * or any write problem. It can be verbose a bit for a normal
1594 * execution if for instance the relayd is stopped abruptly. This
1595 * can happen so set this to a DBG statement.
1597 DBG("Error in file write mmap");
1601 /* Socket operation failed. We consider the relayd dead */
1602 if (errno
== EPIPE
|| errno
== EINVAL
) {
1607 } else if (ret
> len
) {
1608 PERROR("Error in file write (ret %zd > len %lu)", ret
, len
);
1616 /* This call is useless on a socket so better save a syscall. */
1618 /* This won't block, but will start writeout asynchronously */
1619 lttng_sync_file_range(outfd
, stream
->out_fd_offset
, ret
,
1620 SYNC_FILE_RANGE_WRITE
);
1621 stream
->out_fd_offset
+= ret
;
1623 stream
->output_written
+= ret
;
1626 lttng_consumer_sync_trace_file(stream
, orig_offset
);
1630 * This is a special case that the relayd has closed its socket. Let's
1631 * cleanup the relayd object and all associated streams.
1633 if (relayd
&& relayd_hang_up
) {
1634 cleanup_relayd(relayd
, ctx
);
1638 /* Unlock only if ctrl socket used */
1639 if (relayd
&& stream
->metadata_flag
) {
1640 pthread_mutex_unlock(&relayd
->ctrl_sock_mutex
);
1648 * Splice the data from the ring buffer to the tracefile.
1650 * It must be called with the stream lock held.
1652 * Returns the number of bytes spliced.
1654 ssize_t
lttng_consumer_on_read_subbuffer_splice(
1655 struct lttng_consumer_local_data
*ctx
,
1656 struct lttng_consumer_stream
*stream
, unsigned long len
,
1657 unsigned long padding
,
1658 struct ctf_packet_index
*index
)
1660 ssize_t ret
= 0, written
= 0, ret_splice
= 0;
1662 off_t orig_offset
= stream
->out_fd_offset
;
1663 int fd
= stream
->wait_fd
;
1664 /* Default is on the disk */
1665 int outfd
= stream
->out_fd
;
1666 struct consumer_relayd_sock_pair
*relayd
= NULL
;
1668 unsigned int relayd_hang_up
= 0;
1670 switch (consumer_data
.type
) {
1671 case LTTNG_CONSUMER_KERNEL
:
1673 case LTTNG_CONSUMER32_UST
:
1674 case LTTNG_CONSUMER64_UST
:
1675 /* Not supported for user space tracing */
1678 ERR("Unknown consumer_data type");
1682 /* RCU lock for the relayd pointer */
1685 /* Flag that the current stream if set for network streaming. */
1686 if (stream
->net_seq_idx
!= (uint64_t) -1ULL) {
1687 relayd
= consumer_find_relayd(stream
->net_seq_idx
);
1688 if (relayd
== NULL
) {
1695 * Choose right pipe for splice. Metadata and trace data are handled by
1696 * different threads hence the use of two pipes in order not to race or
1697 * corrupt the written data.
1699 if (stream
->metadata_flag
) {
1700 splice_pipe
= ctx
->consumer_splice_metadata_pipe
;
1702 splice_pipe
= ctx
->consumer_thread_pipe
;
1705 /* Write metadata stream id before payload */
1707 int total_len
= len
;
1709 if (stream
->metadata_flag
) {
1711 * Lock the control socket for the complete duration of the function
1712 * since from this point on we will use the socket.
1714 pthread_mutex_lock(&relayd
->ctrl_sock_mutex
);
1716 ret
= write_relayd_metadata_id(splice_pipe
[1], stream
, relayd
,
1720 /* Socket operation failed. We consider the relayd dead */
1721 if (ret
== -EBADF
) {
1722 WARN("Remote relayd disconnected. Stopping");
1729 total_len
+= sizeof(struct lttcomm_relayd_metadata_payload
);
1732 ret
= write_relayd_stream_header(stream
, total_len
, padding
, relayd
);
1734 /* Use the returned socket. */
1737 /* Socket operation failed. We consider the relayd dead */
1738 if (ret
== -EBADF
) {
1739 WARN("Remote relayd disconnected. Stopping");
1746 /* No streaming, we have to set the len with the full padding */
1750 * Check if we need to change the tracefile before writing the packet.
1752 if (stream
->chan
->tracefile_size
> 0 &&
1753 (stream
->tracefile_size_current
+ len
) >
1754 stream
->chan
->tracefile_size
) {
1755 ret
= utils_rotate_stream_file(stream
->chan
->pathname
,
1756 stream
->name
, stream
->chan
->tracefile_size
,
1757 stream
->chan
->tracefile_count
, stream
->uid
, stream
->gid
,
1758 stream
->out_fd
, &(stream
->tracefile_count_current
),
1761 ERR("Rotating output file");
1764 outfd
= stream
->out_fd
;
1766 if (stream
->index_fd
>= 0) {
1767 ret
= index_create_file(stream
->chan
->pathname
,
1768 stream
->name
, stream
->uid
, stream
->gid
,
1769 stream
->chan
->tracefile_size
,
1770 stream
->tracefile_count_current
);
1774 stream
->index_fd
= ret
;
1777 /* Reset current size because we just perform a rotation. */
1778 stream
->tracefile_size_current
= 0;
1779 stream
->out_fd_offset
= 0;
1782 stream
->tracefile_size_current
+= len
;
1783 index
->offset
= htobe64(stream
->out_fd_offset
);
1787 DBG("splice chan to pipe offset %lu of len %lu (fd : %d, pipe: %d)",
1788 (unsigned long)offset
, len
, fd
, splice_pipe
[1]);
1789 ret_splice
= splice(fd
, &offset
, splice_pipe
[1], NULL
, len
,
1790 SPLICE_F_MOVE
| SPLICE_F_MORE
);
1791 DBG("splice chan to pipe, ret %zd", ret_splice
);
1792 if (ret_splice
< 0) {
1793 PERROR("Error in relay splice");
1795 written
= ret_splice
;
1801 /* Handle stream on the relayd if the output is on the network */
1803 if (stream
->metadata_flag
) {
1804 size_t metadata_payload_size
=
1805 sizeof(struct lttcomm_relayd_metadata_payload
);
1807 /* Update counter to fit the spliced data */
1808 ret_splice
+= metadata_payload_size
;
1809 len
+= metadata_payload_size
;
1811 * We do this so the return value can match the len passed as
1812 * argument to this function.
1814 written
-= metadata_payload_size
;
1818 /* Splice data out */
1819 ret_splice
= splice(splice_pipe
[0], NULL
, outfd
, NULL
,
1820 ret_splice
, SPLICE_F_MOVE
| SPLICE_F_MORE
);
1821 DBG("Consumer splice pipe to file, ret %zd", ret_splice
);
1822 if (ret_splice
< 0) {
1823 PERROR("Error in file splice");
1825 written
= ret_splice
;
1827 /* Socket operation failed. We consider the relayd dead */
1828 if (errno
== EBADF
|| errno
== EPIPE
) {
1829 WARN("Remote relayd disconnected. Stopping");
1835 } else if (ret_splice
> len
) {
1837 PERROR("Wrote more data than requested %zd (len: %lu)",
1839 written
+= ret_splice
;
1845 /* This call is useless on a socket so better save a syscall. */
1847 /* This won't block, but will start writeout asynchronously */
1848 lttng_sync_file_range(outfd
, stream
->out_fd_offset
, ret_splice
,
1849 SYNC_FILE_RANGE_WRITE
);
1850 stream
->out_fd_offset
+= ret_splice
;
1852 stream
->output_written
+= ret_splice
;
1853 written
+= ret_splice
;
1855 lttng_consumer_sync_trace_file(stream
, orig_offset
);
1863 * This is a special case that the relayd has closed its socket. Let's
1864 * cleanup the relayd object and all associated streams.
1866 if (relayd
&& relayd_hang_up
) {
1867 cleanup_relayd(relayd
, ctx
);
1868 /* Skip splice error so the consumer does not fail */
1873 /* send the appropriate error description to sessiond */
1876 lttng_consumer_send_error(ctx
, LTTCOMM_CONSUMERD_SPLICE_EINVAL
);
1879 lttng_consumer_send_error(ctx
, LTTCOMM_CONSUMERD_SPLICE_ENOMEM
);
1882 lttng_consumer_send_error(ctx
, LTTCOMM_CONSUMERD_SPLICE_ESPIPE
);
1887 if (relayd
&& stream
->metadata_flag
) {
1888 pthread_mutex_unlock(&relayd
->ctrl_sock_mutex
);
1896 * Take a snapshot for a specific fd
1898 * Returns 0 on success, < 0 on error
1900 int lttng_consumer_take_snapshot(struct lttng_consumer_stream
*stream
)
1902 switch (consumer_data
.type
) {
1903 case LTTNG_CONSUMER_KERNEL
:
1904 return lttng_kconsumer_take_snapshot(stream
);
1905 case LTTNG_CONSUMER32_UST
:
1906 case LTTNG_CONSUMER64_UST
:
1907 return lttng_ustconsumer_take_snapshot(stream
);
1909 ERR("Unknown consumer_data type");
1916 * Get the produced position
1918 * Returns 0 on success, < 0 on error
1920 int lttng_consumer_get_produced_snapshot(struct lttng_consumer_stream
*stream
,
1923 switch (consumer_data
.type
) {
1924 case LTTNG_CONSUMER_KERNEL
:
1925 return lttng_kconsumer_get_produced_snapshot(stream
, pos
);
1926 case LTTNG_CONSUMER32_UST
:
1927 case LTTNG_CONSUMER64_UST
:
1928 return lttng_ustconsumer_get_produced_snapshot(stream
, pos
);
1930 ERR("Unknown consumer_data type");
1936 int lttng_consumer_recv_cmd(struct lttng_consumer_local_data
*ctx
,
1937 int sock
, struct pollfd
*consumer_sockpoll
)
1939 switch (consumer_data
.type
) {
1940 case LTTNG_CONSUMER_KERNEL
:
1941 return lttng_kconsumer_recv_cmd(ctx
, sock
, consumer_sockpoll
);
1942 case LTTNG_CONSUMER32_UST
:
1943 case LTTNG_CONSUMER64_UST
:
1944 return lttng_ustconsumer_recv_cmd(ctx
, sock
, consumer_sockpoll
);
1946 ERR("Unknown consumer_data type");
1952 void lttng_consumer_close_metadata(void)
1954 switch (consumer_data
.type
) {
1955 case LTTNG_CONSUMER_KERNEL
:
1957 * The Kernel consumer has a different metadata scheme so we don't
1958 * close anything because the stream will be closed by the session
1962 case LTTNG_CONSUMER32_UST
:
1963 case LTTNG_CONSUMER64_UST
:
1965 * Close all metadata streams. The metadata hash table is passed and
1966 * this call iterates over it by closing all wakeup fd. This is safe
1967 * because at this point we are sure that the metadata producer is
1968 * either dead or blocked.
1970 lttng_ustconsumer_close_metadata(metadata_ht
);
1973 ERR("Unknown consumer_data type");
1979 * Clean up a metadata stream and free its memory.
1981 void consumer_del_metadata_stream(struct lttng_consumer_stream
*stream
,
1982 struct lttng_ht
*ht
)
1985 struct lttng_ht_iter iter
;
1986 struct lttng_consumer_channel
*free_chan
= NULL
;
1987 struct consumer_relayd_sock_pair
*relayd
;
1991 * This call should NEVER receive regular stream. It must always be
1992 * metadata stream and this is crucial for data structure synchronization.
1994 assert(stream
->metadata_flag
);
1996 DBG3("Consumer delete metadata stream %d", stream
->wait_fd
);
1999 /* Means the stream was allocated but not successfully added */
2000 goto free_stream_rcu
;
2003 pthread_mutex_lock(&consumer_data
.lock
);
2004 pthread_mutex_lock(&stream
->chan
->lock
);
2005 pthread_mutex_lock(&stream
->lock
);
2007 switch (consumer_data
.type
) {
2008 case LTTNG_CONSUMER_KERNEL
:
2009 if (stream
->mmap_base
!= NULL
) {
2010 ret
= munmap(stream
->mmap_base
, stream
->mmap_len
);
2012 PERROR("munmap metadata stream");
2015 if (stream
->wait_fd
>= 0) {
2016 ret
= close(stream
->wait_fd
);
2018 PERROR("close kernel metadata wait_fd");
2022 case LTTNG_CONSUMER32_UST
:
2023 case LTTNG_CONSUMER64_UST
:
2024 if (stream
->monitor
) {
2025 /* close the write-side in close_metadata */
2026 ret
= close(stream
->ust_metadata_poll_pipe
[0]);
2028 PERROR("Close UST metadata read-side poll pipe");
2031 lttng_ustconsumer_del_stream(stream
);
2034 ERR("Unknown consumer_data type");
2040 iter
.iter
.node
= &stream
->node
.node
;
2041 ret
= lttng_ht_del(ht
, &iter
);
2044 iter
.iter
.node
= &stream
->node_channel_id
.node
;
2045 ret
= lttng_ht_del(consumer_data
.stream_per_chan_id_ht
, &iter
);
2048 iter
.iter
.node
= &stream
->node_session_id
.node
;
2049 ret
= lttng_ht_del(consumer_data
.stream_list_ht
, &iter
);
2053 if (stream
->out_fd
>= 0) {
2054 ret
= close(stream
->out_fd
);
2060 /* Check and cleanup relayd */
2062 relayd
= consumer_find_relayd(stream
->net_seq_idx
);
2063 if (relayd
!= NULL
) {
2064 uatomic_dec(&relayd
->refcount
);
2065 assert(uatomic_read(&relayd
->refcount
) >= 0);
2067 /* Closing streams requires to lock the control socket. */
2068 pthread_mutex_lock(&relayd
->ctrl_sock_mutex
);
2069 ret
= relayd_send_close_stream(&relayd
->control_sock
,
2070 stream
->relayd_stream_id
, stream
->next_net_seq_num
- 1);
2071 pthread_mutex_unlock(&relayd
->ctrl_sock_mutex
);
2073 DBG("Unable to close stream on the relayd. Continuing");
2075 * Continue here. There is nothing we can do for the relayd.
2076 * Chances are that the relayd has closed the socket so we just
2077 * continue cleaning up.
2081 /* Both conditions are met, we destroy the relayd. */
2082 if (uatomic_read(&relayd
->refcount
) == 0 &&
2083 uatomic_read(&relayd
->destroy_flag
)) {
2084 consumer_destroy_relayd(relayd
);
2089 /* Atomically decrement channel refcount since other threads can use it. */
2090 if (!uatomic_sub_return(&stream
->chan
->refcount
, 1)
2091 && !uatomic_read(&stream
->chan
->nb_init_stream_left
)) {
2092 /* Go for channel deletion! */
2093 free_chan
= stream
->chan
;
2098 * Nullify the stream reference so it is not used after deletion. The
2099 * channel lock MUST be acquired before being able to check for
2100 * a NULL pointer value.
2102 stream
->chan
->metadata_stream
= NULL
;
2104 pthread_mutex_unlock(&stream
->lock
);
2105 pthread_mutex_unlock(&stream
->chan
->lock
);
2106 pthread_mutex_unlock(&consumer_data
.lock
);
2109 consumer_del_channel(free_chan
);
2113 call_rcu(&stream
->node
.head
, free_stream_rcu
);
2117 * Action done with the metadata stream when adding it to the consumer internal
2118 * data structures to handle it.
2120 int consumer_add_metadata_stream(struct lttng_consumer_stream
*stream
)
2122 struct lttng_ht
*ht
= metadata_ht
;
2124 struct lttng_ht_iter iter
;
2125 struct lttng_ht_node_u64
*node
;
2130 DBG3("Adding metadata stream %" PRIu64
" to hash table", stream
->key
);
2132 pthread_mutex_lock(&consumer_data
.lock
);
2133 pthread_mutex_lock(&stream
->chan
->lock
);
2134 pthread_mutex_lock(&stream
->chan
->timer_lock
);
2135 pthread_mutex_lock(&stream
->lock
);
2138 * From here, refcounts are updated so be _careful_ when returning an error
2145 * Lookup the stream just to make sure it does not exist in our internal
2146 * state. This should NEVER happen.
2148 lttng_ht_lookup(ht
, &stream
->key
, &iter
);
2149 node
= lttng_ht_iter_get_node_u64(&iter
);
2153 * When nb_init_stream_left reaches 0, we don't need to trigger any action
2154 * in terms of destroying the associated channel, because the action that
2155 * causes the count to become 0 also causes a stream to be added. The
2156 * channel deletion will thus be triggered by the following removal of this
2159 if (uatomic_read(&stream
->chan
->nb_init_stream_left
) > 0) {
2160 /* Increment refcount before decrementing nb_init_stream_left */
2162 uatomic_dec(&stream
->chan
->nb_init_stream_left
);
2165 lttng_ht_add_unique_u64(ht
, &stream
->node
);
2167 lttng_ht_add_unique_u64(consumer_data
.stream_per_chan_id_ht
,
2168 &stream
->node_channel_id
);
2171 * Add stream to the stream_list_ht of the consumer data. No need to steal
2172 * the key since the HT does not use it and we allow to add redundant keys
2175 lttng_ht_add_u64(consumer_data
.stream_list_ht
, &stream
->node_session_id
);
2179 pthread_mutex_unlock(&stream
->lock
);
2180 pthread_mutex_unlock(&stream
->chan
->lock
);
2181 pthread_mutex_unlock(&stream
->chan
->timer_lock
);
2182 pthread_mutex_unlock(&consumer_data
.lock
);
2187 * Delete data stream that are flagged for deletion (endpoint_status).
2189 static void validate_endpoint_status_data_stream(void)
2191 struct lttng_ht_iter iter
;
2192 struct lttng_consumer_stream
*stream
;
2194 DBG("Consumer delete flagged data stream");
2197 cds_lfht_for_each_entry(data_ht
->ht
, &iter
.iter
, stream
, node
.node
) {
2198 /* Validate delete flag of the stream */
2199 if (stream
->endpoint_status
== CONSUMER_ENDPOINT_ACTIVE
) {
2202 /* Delete it right now */
2203 consumer_del_stream(stream
, data_ht
);
2209 * Delete metadata stream that are flagged for deletion (endpoint_status).
2211 static void validate_endpoint_status_metadata_stream(
2212 struct lttng_poll_event
*pollset
)
2214 struct lttng_ht_iter iter
;
2215 struct lttng_consumer_stream
*stream
;
2217 DBG("Consumer delete flagged metadata stream");
2222 cds_lfht_for_each_entry(metadata_ht
->ht
, &iter
.iter
, stream
, node
.node
) {
2223 /* Validate delete flag of the stream */
2224 if (stream
->endpoint_status
== CONSUMER_ENDPOINT_ACTIVE
) {
2228 * Remove from pollset so the metadata thread can continue without
2229 * blocking on a deleted stream.
2231 lttng_poll_del(pollset
, stream
->wait_fd
);
2233 /* Delete it right now */
2234 consumer_del_metadata_stream(stream
, metadata_ht
);
2240 * Thread polls on metadata file descriptor and write them on disk or on the
2243 void *consumer_thread_metadata_poll(void *data
)
2245 int ret
, i
, pollfd
, err
= -1;
2246 uint32_t revents
, nb_fd
;
2247 struct lttng_consumer_stream
*stream
= NULL
;
2248 struct lttng_ht_iter iter
;
2249 struct lttng_ht_node_u64
*node
;
2250 struct lttng_poll_event events
;
2251 struct lttng_consumer_local_data
*ctx
= data
;
2254 rcu_register_thread();
2256 health_register(health_consumerd
, HEALTH_CONSUMERD_TYPE_METADATA
);
2258 if (testpoint(consumerd_thread_metadata
)) {
2259 goto error_testpoint
;
2262 health_code_update();
2264 DBG("Thread metadata poll started");
2266 /* Size is set to 1 for the consumer_metadata pipe */
2267 ret
= lttng_poll_create(&events
, 2, LTTNG_CLOEXEC
);
2269 ERR("Poll set creation failed");
2273 ret
= lttng_poll_add(&events
,
2274 lttng_pipe_get_readfd(ctx
->consumer_metadata_pipe
), LPOLLIN
);
2280 DBG("Metadata main loop started");
2283 health_code_update();
2285 /* Only the metadata pipe is set */
2286 if (LTTNG_POLL_GETNB(&events
) == 0 && consumer_quit
== 1) {
2287 err
= 0; /* All is OK */
2292 DBG("Metadata poll wait with %d fd(s)", LTTNG_POLL_GETNB(&events
));
2293 health_poll_entry();
2294 ret
= lttng_poll_wait(&events
, -1);
2296 DBG("Metadata event catched in thread");
2298 if (errno
== EINTR
) {
2299 ERR("Poll EINTR catched");
2307 /* From here, the event is a metadata wait fd */
2308 for (i
= 0; i
< nb_fd
; i
++) {
2309 health_code_update();
2311 revents
= LTTNG_POLL_GETEV(&events
, i
);
2312 pollfd
= LTTNG_POLL_GETFD(&events
, i
);
2314 if (pollfd
== lttng_pipe_get_readfd(ctx
->consumer_metadata_pipe
)) {
2315 if (revents
& (LPOLLERR
| LPOLLHUP
)) {
2316 DBG("Metadata thread pipe hung up");
2318 * Remove the pipe from the poll set and continue the loop
2319 * since their might be data to consume.
2321 lttng_poll_del(&events
,
2322 lttng_pipe_get_readfd(ctx
->consumer_metadata_pipe
));
2323 lttng_pipe_read_close(ctx
->consumer_metadata_pipe
);
2325 } else if (revents
& LPOLLIN
) {
2328 pipe_len
= lttng_pipe_read(ctx
->consumer_metadata_pipe
,
2329 &stream
, sizeof(stream
));
2330 if (pipe_len
< sizeof(stream
)) {
2331 PERROR("read metadata stream");
2333 * Continue here to handle the rest of the streams.
2338 /* A NULL stream means that the state has changed. */
2339 if (stream
== NULL
) {
2340 /* Check for deleted streams. */
2341 validate_endpoint_status_metadata_stream(&events
);
2345 DBG("Adding metadata stream %d to poll set",
2348 /* Add metadata stream to the global poll events list */
2349 lttng_poll_add(&events
, stream
->wait_fd
,
2350 LPOLLIN
| LPOLLPRI
);
2353 /* Handle other stream */
2359 uint64_t tmp_id
= (uint64_t) pollfd
;
2361 lttng_ht_lookup(metadata_ht
, &tmp_id
, &iter
);
2363 node
= lttng_ht_iter_get_node_u64(&iter
);
2366 stream
= caa_container_of(node
, struct lttng_consumer_stream
,
2369 /* Check for error event */
2370 if (revents
& (LPOLLERR
| LPOLLHUP
)) {
2371 DBG("Metadata fd %d is hup|err.", pollfd
);
2372 if (!stream
->hangup_flush_done
2373 && (consumer_data
.type
== LTTNG_CONSUMER32_UST
2374 || consumer_data
.type
== LTTNG_CONSUMER64_UST
)) {
2375 DBG("Attempting to flush and consume the UST buffers");
2376 lttng_ustconsumer_on_stream_hangup(stream
);
2378 /* We just flushed the stream now read it. */
2380 health_code_update();
2382 len
= ctx
->on_buffer_ready(stream
, ctx
);
2384 * We don't check the return value here since if we get
2385 * a negative len, it means an error occured thus we
2386 * simply remove it from the poll set and free the
2392 lttng_poll_del(&events
, stream
->wait_fd
);
2394 * This call update the channel states, closes file descriptors
2395 * and securely free the stream.
2397 consumer_del_metadata_stream(stream
, metadata_ht
);
2398 } else if (revents
& (LPOLLIN
| LPOLLPRI
)) {
2399 /* Get the data out of the metadata file descriptor */
2400 DBG("Metadata available on fd %d", pollfd
);
2401 assert(stream
->wait_fd
== pollfd
);
2404 health_code_update();
2406 len
= ctx
->on_buffer_ready(stream
, ctx
);
2408 * We don't check the return value here since if we get
2409 * a negative len, it means an error occured thus we
2410 * simply remove it from the poll set and free the
2415 /* It's ok to have an unavailable sub-buffer */
2416 if (len
< 0 && len
!= -EAGAIN
&& len
!= -ENODATA
) {
2417 /* Clean up stream from consumer and free it. */
2418 lttng_poll_del(&events
, stream
->wait_fd
);
2419 consumer_del_metadata_stream(stream
, metadata_ht
);
2423 /* Release RCU lock for the stream looked up */
2432 DBG("Metadata poll thread exiting");
2434 lttng_poll_clean(&events
);
2439 ERR("Health error occurred in %s", __func__
);
2441 health_unregister(health_consumerd
);
2442 rcu_unregister_thread();
2447 * This thread polls the fds in the set to consume the data and write
2448 * it to tracefile if necessary.
2450 void *consumer_thread_data_poll(void *data
)
2452 int num_rdy
, num_hup
, high_prio
, ret
, i
, err
= -1;
2453 struct pollfd
*pollfd
= NULL
;
2454 /* local view of the streams */
2455 struct lttng_consumer_stream
**local_stream
= NULL
, *new_stream
= NULL
;
2456 /* local view of consumer_data.fds_count */
2458 struct lttng_consumer_local_data
*ctx
= data
;
2461 rcu_register_thread();
2463 health_register(health_consumerd
, HEALTH_CONSUMERD_TYPE_DATA
);
2465 if (testpoint(consumerd_thread_data
)) {
2466 goto error_testpoint
;
2469 health_code_update();
2471 local_stream
= zmalloc(sizeof(struct lttng_consumer_stream
*));
2472 if (local_stream
== NULL
) {
2473 PERROR("local_stream malloc");
2478 health_code_update();
2484 * the fds set has been updated, we need to update our
2485 * local array as well
2487 pthread_mutex_lock(&consumer_data
.lock
);
2488 if (consumer_data
.need_update
) {
2493 local_stream
= NULL
;
2495 /* allocate for all fds + 1 for the consumer_data_pipe */
2496 pollfd
= zmalloc((consumer_data
.stream_count
+ 1) * sizeof(struct pollfd
));
2497 if (pollfd
== NULL
) {
2498 PERROR("pollfd malloc");
2499 pthread_mutex_unlock(&consumer_data
.lock
);
2503 /* allocate for all fds + 1 for the consumer_data_pipe */
2504 local_stream
= zmalloc((consumer_data
.stream_count
+ 1) *
2505 sizeof(struct lttng_consumer_stream
*));
2506 if (local_stream
== NULL
) {
2507 PERROR("local_stream malloc");
2508 pthread_mutex_unlock(&consumer_data
.lock
);
2511 ret
= update_poll_array(ctx
, &pollfd
, local_stream
,
2514 ERR("Error in allocating pollfd or local_outfds");
2515 lttng_consumer_send_error(ctx
, LTTCOMM_CONSUMERD_POLL_ERROR
);
2516 pthread_mutex_unlock(&consumer_data
.lock
);
2520 consumer_data
.need_update
= 0;
2522 pthread_mutex_unlock(&consumer_data
.lock
);
2524 /* No FDs and consumer_quit, consumer_cleanup the thread */
2525 if (nb_fd
== 0 && consumer_quit
== 1) {
2526 err
= 0; /* All is OK */
2529 /* poll on the array of fds */
2531 DBG("polling on %d fd", nb_fd
+ 1);
2532 health_poll_entry();
2533 num_rdy
= poll(pollfd
, nb_fd
+ 1, -1);
2535 DBG("poll num_rdy : %d", num_rdy
);
2536 if (num_rdy
== -1) {
2538 * Restart interrupted system call.
2540 if (errno
== EINTR
) {
2543 PERROR("Poll error");
2544 lttng_consumer_send_error(ctx
, LTTCOMM_CONSUMERD_POLL_ERROR
);
2546 } else if (num_rdy
== 0) {
2547 DBG("Polling thread timed out");
2552 * If the consumer_data_pipe triggered poll go directly to the
2553 * beginning of the loop to update the array. We want to prioritize
2554 * array update over low-priority reads.
2556 if (pollfd
[nb_fd
].revents
& (POLLIN
| POLLPRI
)) {
2557 ssize_t pipe_readlen
;
2559 DBG("consumer_data_pipe wake up");
2560 pipe_readlen
= lttng_pipe_read(ctx
->consumer_data_pipe
,
2561 &new_stream
, sizeof(new_stream
));
2562 if (pipe_readlen
< sizeof(new_stream
)) {
2563 PERROR("Consumer data pipe");
2564 /* Continue so we can at least handle the current stream(s). */
2569 * If the stream is NULL, just ignore it. It's also possible that
2570 * the sessiond poll thread changed the consumer_quit state and is
2571 * waking us up to test it.
2573 if (new_stream
== NULL
) {
2574 validate_endpoint_status_data_stream();
2578 /* Continue to update the local streams and handle prio ones */
2582 /* Take care of high priority channels first. */
2583 for (i
= 0; i
< nb_fd
; i
++) {
2584 health_code_update();
2586 if (local_stream
[i
] == NULL
) {
2589 if (pollfd
[i
].revents
& POLLPRI
) {
2590 DBG("Urgent read on fd %d", pollfd
[i
].fd
);
2592 len
= ctx
->on_buffer_ready(local_stream
[i
], ctx
);
2593 /* it's ok to have an unavailable sub-buffer */
2594 if (len
< 0 && len
!= -EAGAIN
&& len
!= -ENODATA
) {
2595 /* Clean the stream and free it. */
2596 consumer_del_stream(local_stream
[i
], data_ht
);
2597 local_stream
[i
] = NULL
;
2598 } else if (len
> 0) {
2599 local_stream
[i
]->data_read
= 1;
2605 * If we read high prio channel in this loop, try again
2606 * for more high prio data.
2612 /* Take care of low priority channels. */
2613 for (i
= 0; i
< nb_fd
; i
++) {
2614 health_code_update();
2616 if (local_stream
[i
] == NULL
) {
2619 if ((pollfd
[i
].revents
& POLLIN
) ||
2620 local_stream
[i
]->hangup_flush_done
) {
2621 DBG("Normal read on fd %d", pollfd
[i
].fd
);
2622 len
= ctx
->on_buffer_ready(local_stream
[i
], ctx
);
2623 /* it's ok to have an unavailable sub-buffer */
2624 if (len
< 0 && len
!= -EAGAIN
&& len
!= -ENODATA
) {
2625 /* Clean the stream and free it. */
2626 consumer_del_stream(local_stream
[i
], data_ht
);
2627 local_stream
[i
] = NULL
;
2628 } else if (len
> 0) {
2629 local_stream
[i
]->data_read
= 1;
2634 /* Handle hangup and errors */
2635 for (i
= 0; i
< nb_fd
; i
++) {
2636 health_code_update();
2638 if (local_stream
[i
] == NULL
) {
2641 if (!local_stream
[i
]->hangup_flush_done
2642 && (pollfd
[i
].revents
& (POLLHUP
| POLLERR
| POLLNVAL
))
2643 && (consumer_data
.type
== LTTNG_CONSUMER32_UST
2644 || consumer_data
.type
== LTTNG_CONSUMER64_UST
)) {
2645 DBG("fd %d is hup|err|nval. Attempting flush and read.",
2647 lttng_ustconsumer_on_stream_hangup(local_stream
[i
]);
2648 /* Attempt read again, for the data we just flushed. */
2649 local_stream
[i
]->data_read
= 1;
2652 * If the poll flag is HUP/ERR/NVAL and we have
2653 * read no data in this pass, we can remove the
2654 * stream from its hash table.
2656 if ((pollfd
[i
].revents
& POLLHUP
)) {
2657 DBG("Polling fd %d tells it has hung up.", pollfd
[i
].fd
);
2658 if (!local_stream
[i
]->data_read
) {
2659 consumer_del_stream(local_stream
[i
], data_ht
);
2660 local_stream
[i
] = NULL
;
2663 } else if (pollfd
[i
].revents
& POLLERR
) {
2664 ERR("Error returned in polling fd %d.", pollfd
[i
].fd
);
2665 if (!local_stream
[i
]->data_read
) {
2666 consumer_del_stream(local_stream
[i
], data_ht
);
2667 local_stream
[i
] = NULL
;
2670 } else if (pollfd
[i
].revents
& POLLNVAL
) {
2671 ERR("Polling fd %d tells fd is not open.", pollfd
[i
].fd
);
2672 if (!local_stream
[i
]->data_read
) {
2673 consumer_del_stream(local_stream
[i
], data_ht
);
2674 local_stream
[i
] = NULL
;
2678 if (local_stream
[i
] != NULL
) {
2679 local_stream
[i
]->data_read
= 0;
2686 DBG("polling thread exiting");
2691 * Close the write side of the pipe so epoll_wait() in
2692 * consumer_thread_metadata_poll can catch it. The thread is monitoring the
2693 * read side of the pipe. If we close them both, epoll_wait strangely does
2694 * not return and could create a endless wait period if the pipe is the
2695 * only tracked fd in the poll set. The thread will take care of closing
2698 (void) lttng_pipe_write_close(ctx
->consumer_metadata_pipe
);
2703 ERR("Health error occurred in %s", __func__
);
2705 health_unregister(health_consumerd
);
2707 rcu_unregister_thread();
2712 * Close wake-up end of each stream belonging to the channel. This will
2713 * allow the poll() on the stream read-side to detect when the
2714 * write-side (application) finally closes them.
2717 void consumer_close_channel_streams(struct lttng_consumer_channel
*channel
)
2719 struct lttng_ht
*ht
;
2720 struct lttng_consumer_stream
*stream
;
2721 struct lttng_ht_iter iter
;
2723 ht
= consumer_data
.stream_per_chan_id_ht
;
2726 cds_lfht_for_each_entry_duplicate(ht
->ht
,
2727 ht
->hash_fct(&channel
->key
, lttng_ht_seed
),
2728 ht
->match_fct
, &channel
->key
,
2729 &iter
.iter
, stream
, node_channel_id
.node
) {
2731 * Protect against teardown with mutex.
2733 pthread_mutex_lock(&stream
->lock
);
2734 if (cds_lfht_is_node_deleted(&stream
->node
.node
)) {
2737 switch (consumer_data
.type
) {
2738 case LTTNG_CONSUMER_KERNEL
:
2740 case LTTNG_CONSUMER32_UST
:
2741 case LTTNG_CONSUMER64_UST
:
2743 * Note: a mutex is taken internally within
2744 * liblttng-ust-ctl to protect timer wakeup_fd
2745 * use from concurrent close.
2747 lttng_ustconsumer_close_stream_wakeup(stream
);
2750 ERR("Unknown consumer_data type");
2754 pthread_mutex_unlock(&stream
->lock
);
2759 static void destroy_channel_ht(struct lttng_ht
*ht
)
2761 struct lttng_ht_iter iter
;
2762 struct lttng_consumer_channel
*channel
;
2770 cds_lfht_for_each_entry(ht
->ht
, &iter
.iter
, channel
, wait_fd_node
.node
) {
2771 ret
= lttng_ht_del(ht
, &iter
);
2776 lttng_ht_destroy(ht
);
2780 * This thread polls the channel fds to detect when they are being
2781 * closed. It closes all related streams if the channel is detected as
2782 * closed. It is currently only used as a shim layer for UST because the
2783 * consumerd needs to keep the per-stream wakeup end of pipes open for
2786 void *consumer_thread_channel_poll(void *data
)
2788 int ret
, i
, pollfd
, err
= -1;
2789 uint32_t revents
, nb_fd
;
2790 struct lttng_consumer_channel
*chan
= NULL
;
2791 struct lttng_ht_iter iter
;
2792 struct lttng_ht_node_u64
*node
;
2793 struct lttng_poll_event events
;
2794 struct lttng_consumer_local_data
*ctx
= data
;
2795 struct lttng_ht
*channel_ht
;
2797 rcu_register_thread();
2799 health_register(health_consumerd
, HEALTH_CONSUMERD_TYPE_CHANNEL
);
2801 if (testpoint(consumerd_thread_channel
)) {
2802 goto error_testpoint
;
2805 health_code_update();
2807 channel_ht
= lttng_ht_new(0, LTTNG_HT_TYPE_U64
);
2809 /* ENOMEM at this point. Better to bail out. */
2813 DBG("Thread channel poll started");
2815 /* Size is set to 1 for the consumer_channel pipe */
2816 ret
= lttng_poll_create(&events
, 2, LTTNG_CLOEXEC
);
2818 ERR("Poll set creation failed");
2822 ret
= lttng_poll_add(&events
, ctx
->consumer_channel_pipe
[0], LPOLLIN
);
2828 DBG("Channel main loop started");
2831 health_code_update();
2833 /* Only the channel pipe is set */
2834 if (LTTNG_POLL_GETNB(&events
) == 0 && consumer_quit
== 1) {
2835 err
= 0; /* All is OK */
2840 DBG("Channel poll wait with %d fd(s)", LTTNG_POLL_GETNB(&events
));
2841 health_poll_entry();
2842 ret
= lttng_poll_wait(&events
, -1);
2844 DBG("Channel event catched in thread");
2846 if (errno
== EINTR
) {
2847 ERR("Poll EINTR catched");
2855 /* From here, the event is a channel wait fd */
2856 for (i
= 0; i
< nb_fd
; i
++) {
2857 health_code_update();
2859 revents
= LTTNG_POLL_GETEV(&events
, i
);
2860 pollfd
= LTTNG_POLL_GETFD(&events
, i
);
2862 /* Just don't waste time if no returned events for the fd */
2866 if (pollfd
== ctx
->consumer_channel_pipe
[0]) {
2867 if (revents
& (LPOLLERR
| LPOLLHUP
)) {
2868 DBG("Channel thread pipe hung up");
2870 * Remove the pipe from the poll set and continue the loop
2871 * since their might be data to consume.
2873 lttng_poll_del(&events
, ctx
->consumer_channel_pipe
[0]);
2875 } else if (revents
& LPOLLIN
) {
2876 enum consumer_channel_action action
;
2879 ret
= read_channel_pipe(ctx
, &chan
, &key
, &action
);
2881 ERR("Error reading channel pipe");
2886 case CONSUMER_CHANNEL_ADD
:
2887 DBG("Adding channel %d to poll set",
2890 lttng_ht_node_init_u64(&chan
->wait_fd_node
,
2893 lttng_ht_add_unique_u64(channel_ht
,
2894 &chan
->wait_fd_node
);
2896 /* Add channel to the global poll events list */
2897 lttng_poll_add(&events
, chan
->wait_fd
,
2898 LPOLLIN
| LPOLLPRI
);
2900 case CONSUMER_CHANNEL_DEL
:
2902 struct lttng_consumer_stream
*stream
, *stmp
;
2905 chan
= consumer_find_channel(key
);
2908 ERR("UST consumer get channel key %" PRIu64
" not found for del channel", key
);
2911 lttng_poll_del(&events
, chan
->wait_fd
);
2912 iter
.iter
.node
= &chan
->wait_fd_node
.node
;
2913 ret
= lttng_ht_del(channel_ht
, &iter
);
2915 consumer_close_channel_streams(chan
);
2917 switch (consumer_data
.type
) {
2918 case LTTNG_CONSUMER_KERNEL
:
2920 case LTTNG_CONSUMER32_UST
:
2921 case LTTNG_CONSUMER64_UST
:
2922 /* Delete streams that might have been left in the stream list. */
2923 cds_list_for_each_entry_safe(stream
, stmp
, &chan
->streams
.head
,
2925 health_code_update();
2927 cds_list_del(&stream
->send_node
);
2928 lttng_ustconsumer_del_stream(stream
);
2929 uatomic_sub(&stream
->chan
->refcount
, 1);
2930 assert(&chan
->refcount
);
2935 ERR("Unknown consumer_data type");
2940 * Release our own refcount. Force channel deletion even if
2941 * streams were not initialized.
2943 if (!uatomic_sub_return(&chan
->refcount
, 1)) {
2944 consumer_del_channel(chan
);
2949 case CONSUMER_CHANNEL_QUIT
:
2951 * Remove the pipe from the poll set and continue the loop
2952 * since their might be data to consume.
2954 lttng_poll_del(&events
, ctx
->consumer_channel_pipe
[0]);
2957 ERR("Unknown action");
2962 /* Handle other stream */
2968 uint64_t tmp_id
= (uint64_t) pollfd
;
2970 lttng_ht_lookup(channel_ht
, &tmp_id
, &iter
);
2972 node
= lttng_ht_iter_get_node_u64(&iter
);
2975 chan
= caa_container_of(node
, struct lttng_consumer_channel
,
2978 /* Check for error event */
2979 if (revents
& (LPOLLERR
| LPOLLHUP
)) {
2980 DBG("Channel fd %d is hup|err.", pollfd
);
2982 lttng_poll_del(&events
, chan
->wait_fd
);
2983 ret
= lttng_ht_del(channel_ht
, &iter
);
2985 consumer_close_channel_streams(chan
);
2987 /* Release our own refcount */
2988 if (!uatomic_sub_return(&chan
->refcount
, 1)
2989 && !uatomic_read(&chan
->nb_init_stream_left
)) {
2990 consumer_del_channel(chan
);
2994 /* Release RCU lock for the channel looked up */
3002 lttng_poll_clean(&events
);
3004 destroy_channel_ht(channel_ht
);
3007 DBG("Channel poll thread exiting");
3010 ERR("Health error occurred in %s", __func__
);
3012 health_unregister(health_consumerd
);
3013 rcu_unregister_thread();
3017 static int set_metadata_socket(struct lttng_consumer_local_data
*ctx
,
3018 struct pollfd
*sockpoll
, int client_socket
)
3025 if (lttng_consumer_poll_socket(sockpoll
) < 0) {
3029 DBG("Metadata connection on client_socket");
3031 /* Blocking call, waiting for transmission */
3032 ctx
->consumer_metadata_socket
= lttcomm_accept_unix_sock(client_socket
);
3033 if (ctx
->consumer_metadata_socket
< 0) {
3034 WARN("On accept metadata");
3045 * This thread listens on the consumerd socket and receives the file
3046 * descriptors from the session daemon.
3048 void *consumer_thread_sessiond_poll(void *data
)
3050 int sock
= -1, client_socket
, ret
, err
= -1;
3052 * structure to poll for incoming data on communication socket avoids
3053 * making blocking sockets.
3055 struct pollfd consumer_sockpoll
[2];
3056 struct lttng_consumer_local_data
*ctx
= data
;
3058 rcu_register_thread();
3060 health_register(health_consumerd
, HEALTH_CONSUMERD_TYPE_SESSIOND
);
3062 if (testpoint(consumerd_thread_sessiond
)) {
3063 goto error_testpoint
;
3066 health_code_update();
3068 DBG("Creating command socket %s", ctx
->consumer_command_sock_path
);
3069 unlink(ctx
->consumer_command_sock_path
);
3070 client_socket
= lttcomm_create_unix_sock(ctx
->consumer_command_sock_path
);
3071 if (client_socket
< 0) {
3072 ERR("Cannot create command socket");
3076 ret
= lttcomm_listen_unix_sock(client_socket
);
3081 DBG("Sending ready command to lttng-sessiond");
3082 ret
= lttng_consumer_send_error(ctx
, LTTCOMM_CONSUMERD_COMMAND_SOCK_READY
);
3083 /* return < 0 on error, but == 0 is not fatal */
3085 ERR("Error sending ready command to lttng-sessiond");
3089 /* prepare the FDs to poll : to client socket and the should_quit pipe */
3090 consumer_sockpoll
[0].fd
= ctx
->consumer_should_quit
[0];
3091 consumer_sockpoll
[0].events
= POLLIN
| POLLPRI
;
3092 consumer_sockpoll
[1].fd
= client_socket
;
3093 consumer_sockpoll
[1].events
= POLLIN
| POLLPRI
;
3095 if (lttng_consumer_poll_socket(consumer_sockpoll
) < 0) {
3098 DBG("Connection on client_socket");
3100 /* Blocking call, waiting for transmission */
3101 sock
= lttcomm_accept_unix_sock(client_socket
);
3108 * Setup metadata socket which is the second socket connection on the
3109 * command unix socket.
3111 ret
= set_metadata_socket(ctx
, consumer_sockpoll
, client_socket
);
3116 /* This socket is not useful anymore. */
3117 ret
= close(client_socket
);
3119 PERROR("close client_socket");
3123 /* update the polling structure to poll on the established socket */
3124 consumer_sockpoll
[1].fd
= sock
;
3125 consumer_sockpoll
[1].events
= POLLIN
| POLLPRI
;
3128 health_code_update();
3130 health_poll_entry();
3131 ret
= lttng_consumer_poll_socket(consumer_sockpoll
);
3136 DBG("Incoming command on sock");
3137 ret
= lttng_consumer_recv_cmd(ctx
, sock
, consumer_sockpoll
);
3138 if (ret
== -ENOENT
) {
3139 DBG("Received STOP command");
3144 * This could simply be a session daemon quitting. Don't output
3147 DBG("Communication interrupted on command socket");
3151 if (consumer_quit
) {
3152 DBG("consumer_thread_receive_fds received quit from signal");
3153 err
= 0; /* All is OK */
3156 DBG("received command on sock");
3162 DBG("Consumer thread sessiond poll exiting");
3165 * Close metadata streams since the producer is the session daemon which
3168 * NOTE: for now, this only applies to the UST tracer.
3170 lttng_consumer_close_metadata();
3173 * when all fds have hung up, the polling thread
3179 * Notify the data poll thread to poll back again and test the
3180 * consumer_quit state that we just set so to quit gracefully.
3182 notify_thread_lttng_pipe(ctx
->consumer_data_pipe
);
3184 notify_channel_pipe(ctx
, NULL
, -1, CONSUMER_CHANNEL_QUIT
);
3186 notify_health_quit_pipe(health_quit_pipe
);
3188 /* Cleaning up possibly open sockets. */
3192 PERROR("close sock sessiond poll");
3195 if (client_socket
>= 0) {
3196 ret
= close(client_socket
);
3198 PERROR("close client_socket sessiond poll");
3205 ERR("Health error occurred in %s", __func__
);
3207 health_unregister(health_consumerd
);
3209 rcu_unregister_thread();
3213 ssize_t
lttng_consumer_read_subbuffer(struct lttng_consumer_stream
*stream
,
3214 struct lttng_consumer_local_data
*ctx
)
3218 pthread_mutex_lock(&stream
->lock
);
3219 if (stream
->metadata_flag
) {
3220 pthread_mutex_lock(&stream
->metadata_rdv_lock
);
3223 switch (consumer_data
.type
) {
3224 case LTTNG_CONSUMER_KERNEL
:
3225 ret
= lttng_kconsumer_read_subbuffer(stream
, ctx
);
3227 case LTTNG_CONSUMER32_UST
:
3228 case LTTNG_CONSUMER64_UST
:
3229 ret
= lttng_ustconsumer_read_subbuffer(stream
, ctx
);
3232 ERR("Unknown consumer_data type");
3238 if (stream
->metadata_flag
) {
3239 pthread_cond_broadcast(&stream
->metadata_rdv
);
3240 pthread_mutex_unlock(&stream
->metadata_rdv_lock
);
3242 pthread_mutex_unlock(&stream
->lock
);
3246 int lttng_consumer_on_recv_stream(struct lttng_consumer_stream
*stream
)
3248 switch (consumer_data
.type
) {
3249 case LTTNG_CONSUMER_KERNEL
:
3250 return lttng_kconsumer_on_recv_stream(stream
);
3251 case LTTNG_CONSUMER32_UST
:
3252 case LTTNG_CONSUMER64_UST
:
3253 return lttng_ustconsumer_on_recv_stream(stream
);
3255 ERR("Unknown consumer_data type");
3262 * Allocate and set consumer data hash tables.
3264 int lttng_consumer_init(void)
3266 consumer_data
.channel_ht
= lttng_ht_new(0, LTTNG_HT_TYPE_U64
);
3267 if (!consumer_data
.channel_ht
) {
3271 consumer_data
.relayd_ht
= lttng_ht_new(0, LTTNG_HT_TYPE_U64
);
3272 if (!consumer_data
.relayd_ht
) {
3276 consumer_data
.stream_list_ht
= lttng_ht_new(0, LTTNG_HT_TYPE_U64
);
3277 if (!consumer_data
.stream_list_ht
) {
3281 consumer_data
.stream_per_chan_id_ht
= lttng_ht_new(0, LTTNG_HT_TYPE_U64
);
3282 if (!consumer_data
.stream_per_chan_id_ht
) {
3286 data_ht
= lttng_ht_new(0, LTTNG_HT_TYPE_U64
);
3291 metadata_ht
= lttng_ht_new(0, LTTNG_HT_TYPE_U64
);
3303 * Process the ADD_RELAYD command receive by a consumer.
3305 * This will create a relayd socket pair and add it to the relayd hash table.
3306 * The caller MUST acquire a RCU read side lock before calling it.
3308 int consumer_add_relayd_socket(uint64_t net_seq_idx
, int sock_type
,
3309 struct lttng_consumer_local_data
*ctx
, int sock
,
3310 struct pollfd
*consumer_sockpoll
,
3311 struct lttcomm_relayd_sock
*relayd_sock
, uint64_t sessiond_id
,
3312 uint64_t relayd_session_id
)
3314 int fd
= -1, ret
= -1, relayd_created
= 0;
3315 enum lttcomm_return_code ret_code
= LTTCOMM_CONSUMERD_SUCCESS
;
3316 struct consumer_relayd_sock_pair
*relayd
= NULL
;
3319 assert(relayd_sock
);
3321 DBG("Consumer adding relayd socket (idx: %" PRIu64
")", net_seq_idx
);
3323 /* Get relayd reference if exists. */
3324 relayd
= consumer_find_relayd(net_seq_idx
);
3325 if (relayd
== NULL
) {
3326 assert(sock_type
== LTTNG_STREAM_CONTROL
);
3327 /* Not found. Allocate one. */
3328 relayd
= consumer_allocate_relayd_sock_pair(net_seq_idx
);
3329 if (relayd
== NULL
) {
3331 ret_code
= LTTCOMM_CONSUMERD_ENOMEM
;
3334 relayd
->sessiond_session_id
= sessiond_id
;
3339 * This code path MUST continue to the consumer send status message to
3340 * we can notify the session daemon and continue our work without
3341 * killing everything.
3345 * relayd key should never be found for control socket.
3347 assert(sock_type
!= LTTNG_STREAM_CONTROL
);
3350 /* First send a status message before receiving the fds. */
3351 ret
= consumer_send_status_msg(sock
, LTTCOMM_CONSUMERD_SUCCESS
);
3353 /* Somehow, the session daemon is not responding anymore. */
3354 lttng_consumer_send_error(ctx
, LTTCOMM_CONSUMERD_FATAL
);
3355 goto error_nosignal
;
3358 /* Poll on consumer socket. */
3359 if (lttng_consumer_poll_socket(consumer_sockpoll
) < 0) {
3360 lttng_consumer_send_error(ctx
, LTTCOMM_CONSUMERD_POLL_ERROR
);
3362 goto error_nosignal
;
3365 /* Get relayd socket from session daemon */
3366 ret
= lttcomm_recv_fds_unix_sock(sock
, &fd
, 1);
3367 if (ret
!= sizeof(fd
)) {
3369 fd
= -1; /* Just in case it gets set with an invalid value. */
3372 * Failing to receive FDs might indicate a major problem such as
3373 * reaching a fd limit during the receive where the kernel returns a
3374 * MSG_CTRUNC and fails to cleanup the fd in the queue. Any case, we
3375 * don't take any chances and stop everything.
3377 * XXX: Feature request #558 will fix that and avoid this possible
3378 * issue when reaching the fd limit.
3380 lttng_consumer_send_error(ctx
, LTTCOMM_CONSUMERD_ERROR_RECV_FD
);
3381 ret_code
= LTTCOMM_CONSUMERD_ERROR_RECV_FD
;
3385 /* Copy socket information and received FD */
3386 switch (sock_type
) {
3387 case LTTNG_STREAM_CONTROL
:
3388 /* Copy received lttcomm socket */
3389 lttcomm_copy_sock(&relayd
->control_sock
.sock
, &relayd_sock
->sock
);
3390 ret
= lttcomm_create_sock(&relayd
->control_sock
.sock
);
3391 /* Handle create_sock error. */
3393 ret_code
= LTTCOMM_CONSUMERD_ENOMEM
;
3397 * Close the socket created internally by
3398 * lttcomm_create_sock, so we can replace it by the one
3399 * received from sessiond.
3401 if (close(relayd
->control_sock
.sock
.fd
)) {
3405 /* Assign new file descriptor */
3406 relayd
->control_sock
.sock
.fd
= fd
;
3407 fd
= -1; /* For error path */
3408 /* Assign version values. */
3409 relayd
->control_sock
.major
= relayd_sock
->major
;
3410 relayd
->control_sock
.minor
= relayd_sock
->minor
;
3412 relayd
->relayd_session_id
= relayd_session_id
;
3415 case LTTNG_STREAM_DATA
:
3416 /* Copy received lttcomm socket */
3417 lttcomm_copy_sock(&relayd
->data_sock
.sock
, &relayd_sock
->sock
);
3418 ret
= lttcomm_create_sock(&relayd
->data_sock
.sock
);
3419 /* Handle create_sock error. */
3421 ret_code
= LTTCOMM_CONSUMERD_ENOMEM
;
3425 * Close the socket created internally by
3426 * lttcomm_create_sock, so we can replace it by the one
3427 * received from sessiond.
3429 if (close(relayd
->data_sock
.sock
.fd
)) {
3433 /* Assign new file descriptor */
3434 relayd
->data_sock
.sock
.fd
= fd
;
3435 fd
= -1; /* for eventual error paths */
3436 /* Assign version values. */
3437 relayd
->data_sock
.major
= relayd_sock
->major
;
3438 relayd
->data_sock
.minor
= relayd_sock
->minor
;
3441 ERR("Unknown relayd socket type (%d)", sock_type
);
3443 ret_code
= LTTCOMM_CONSUMERD_FATAL
;
3447 DBG("Consumer %s socket created successfully with net idx %" PRIu64
" (fd: %d)",
3448 sock_type
== LTTNG_STREAM_CONTROL
? "control" : "data",
3449 relayd
->net_seq_idx
, fd
);
3451 /* We successfully added the socket. Send status back. */
3452 ret
= consumer_send_status_msg(sock
, ret_code
);
3454 /* Somehow, the session daemon is not responding anymore. */
3455 lttng_consumer_send_error(ctx
, LTTCOMM_CONSUMERD_FATAL
);
3456 goto error_nosignal
;
3460 * Add relayd socket pair to consumer data hashtable. If object already
3461 * exists or on error, the function gracefully returns.
3469 if (consumer_send_status_msg(sock
, ret_code
) < 0) {
3470 lttng_consumer_send_error(ctx
, LTTCOMM_CONSUMERD_FATAL
);
3474 /* Close received socket if valid. */
3477 PERROR("close received socket");
3481 if (relayd_created
) {
3489 * Try to lock the stream mutex.
3491 * On success, 1 is returned else 0 indicating that the mutex is NOT lock.
3493 static int stream_try_lock(struct lttng_consumer_stream
*stream
)
3500 * Try to lock the stream mutex. On failure, we know that the stream is
3501 * being used else where hence there is data still being extracted.
3503 ret
= pthread_mutex_trylock(&stream
->lock
);
3505 /* For both EBUSY and EINVAL error, the mutex is NOT locked. */
3517 * Search for a relayd associated to the session id and return the reference.
3519 * A rcu read side lock MUST be acquire before calling this function and locked
3520 * until the relayd object is no longer necessary.
3522 static struct consumer_relayd_sock_pair
*find_relayd_by_session_id(uint64_t id
)
3524 struct lttng_ht_iter iter
;
3525 struct consumer_relayd_sock_pair
*relayd
= NULL
;
3527 /* Iterate over all relayd since they are indexed by net_seq_idx. */
3528 cds_lfht_for_each_entry(consumer_data
.relayd_ht
->ht
, &iter
.iter
, relayd
,
3531 * Check by sessiond id which is unique here where the relayd session
3532 * id might not be when having multiple relayd.
3534 if (relayd
->sessiond_session_id
== id
) {
3535 /* Found the relayd. There can be only one per id. */
3547 * Check if for a given session id there is still data needed to be extract
3550 * Return 1 if data is pending or else 0 meaning ready to be read.
3552 int consumer_data_pending(uint64_t id
)
3555 struct lttng_ht_iter iter
;
3556 struct lttng_ht
*ht
;
3557 struct lttng_consumer_stream
*stream
;
3558 struct consumer_relayd_sock_pair
*relayd
= NULL
;
3559 int (*data_pending
)(struct lttng_consumer_stream
*);
3561 DBG("Consumer data pending command on session id %" PRIu64
, id
);
3564 pthread_mutex_lock(&consumer_data
.lock
);
3566 switch (consumer_data
.type
) {
3567 case LTTNG_CONSUMER_KERNEL
:
3568 data_pending
= lttng_kconsumer_data_pending
;
3570 case LTTNG_CONSUMER32_UST
:
3571 case LTTNG_CONSUMER64_UST
:
3572 data_pending
= lttng_ustconsumer_data_pending
;
3575 ERR("Unknown consumer data type");
3579 /* Ease our life a bit */
3580 ht
= consumer_data
.stream_list_ht
;
3582 relayd
= find_relayd_by_session_id(id
);
3584 /* Send init command for data pending. */
3585 pthread_mutex_lock(&relayd
->ctrl_sock_mutex
);
3586 ret
= relayd_begin_data_pending(&relayd
->control_sock
,
3587 relayd
->relayd_session_id
);
3588 pthread_mutex_unlock(&relayd
->ctrl_sock_mutex
);
3590 /* Communication error thus the relayd so no data pending. */
3591 goto data_not_pending
;
3595 cds_lfht_for_each_entry_duplicate(ht
->ht
,
3596 ht
->hash_fct(&id
, lttng_ht_seed
),
3598 &iter
.iter
, stream
, node_session_id
.node
) {
3599 /* If this call fails, the stream is being used hence data pending. */
3600 ret
= stream_try_lock(stream
);
3606 * A removed node from the hash table indicates that the stream has
3607 * been deleted thus having a guarantee that the buffers are closed
3608 * on the consumer side. However, data can still be transmitted
3609 * over the network so don't skip the relayd check.
3611 ret
= cds_lfht_is_node_deleted(&stream
->node
.node
);
3614 * An empty output file is not valid. We need at least one packet
3615 * generated per stream, even if it contains no event, so it
3616 * contains at least one packet header.
3618 if (stream
->output_written
== 0) {
3619 pthread_mutex_unlock(&stream
->lock
);
3622 /* Check the stream if there is data in the buffers. */
3623 ret
= data_pending(stream
);
3625 pthread_mutex_unlock(&stream
->lock
);
3632 pthread_mutex_lock(&relayd
->ctrl_sock_mutex
);
3633 if (stream
->metadata_flag
) {
3634 ret
= relayd_quiescent_control(&relayd
->control_sock
,
3635 stream
->relayd_stream_id
);
3637 ret
= relayd_data_pending(&relayd
->control_sock
,
3638 stream
->relayd_stream_id
,
3639 stream
->next_net_seq_num
- 1);
3641 pthread_mutex_unlock(&relayd
->ctrl_sock_mutex
);
3643 pthread_mutex_unlock(&stream
->lock
);
3647 pthread_mutex_unlock(&stream
->lock
);
3651 unsigned int is_data_inflight
= 0;
3653 /* Send init command for data pending. */
3654 pthread_mutex_lock(&relayd
->ctrl_sock_mutex
);
3655 ret
= relayd_end_data_pending(&relayd
->control_sock
,
3656 relayd
->relayd_session_id
, &is_data_inflight
);
3657 pthread_mutex_unlock(&relayd
->ctrl_sock_mutex
);
3659 goto data_not_pending
;
3661 if (is_data_inflight
) {
3667 * Finding _no_ node in the hash table and no inflight data means that the
3668 * stream(s) have been removed thus data is guaranteed to be available for
3669 * analysis from the trace files.
3673 /* Data is available to be read by a viewer. */
3674 pthread_mutex_unlock(&consumer_data
.lock
);
3679 /* Data is still being extracted from buffers. */
3680 pthread_mutex_unlock(&consumer_data
.lock
);
3686 * Send a ret code status message to the sessiond daemon.
3688 * Return the sendmsg() return value.
3690 int consumer_send_status_msg(int sock
, int ret_code
)
3692 struct lttcomm_consumer_status_msg msg
;
3694 msg
.ret_code
= ret_code
;
3696 return lttcomm_send_unix_sock(sock
, &msg
, sizeof(msg
));
3700 * Send a channel status message to the sessiond daemon.
3702 * Return the sendmsg() return value.
3704 int consumer_send_status_channel(int sock
,
3705 struct lttng_consumer_channel
*channel
)
3707 struct lttcomm_consumer_status_channel msg
;
3712 msg
.ret_code
= LTTCOMM_CONSUMERD_CHANNEL_FAIL
;
3714 msg
.ret_code
= LTTCOMM_CONSUMERD_SUCCESS
;
3715 msg
.key
= channel
->key
;
3716 msg
.stream_count
= channel
->streams
.count
;
3719 return lttcomm_send_unix_sock(sock
, &msg
, sizeof(msg
));
3723 * Using a maximum stream size with the produced and consumed position of a
3724 * stream, computes the new consumed position to be as close as possible to the
3725 * maximum possible stream size.
3727 * If maximum stream size is lower than the possible buffer size (produced -
3728 * consumed), the consumed_pos given is returned untouched else the new value
3731 unsigned long consumer_get_consumed_maxsize(unsigned long consumed_pos
,
3732 unsigned long produced_pos
, uint64_t max_stream_size
)
3734 if (max_stream_size
&& max_stream_size
< (produced_pos
- consumed_pos
)) {
3735 /* Offset from the produced position to get the latest buffers. */
3736 return produced_pos
- max_stream_size
;
3739 return consumed_pos
;