2 * Copyright (C) 2011 - Julien Desfossez <julien.desfossez@polymtl.ca>
3 * Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
4 * 2012 - David Goulet <dgoulet@efficios.com>
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License, version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 * You should have received a copy of the GNU General Public License along
16 * with this program; if not, write to the Free Software Foundation, Inc.,
17 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
27 #include <sys/socket.h>
28 #include <sys/types.h>
33 #include <bin/lttng-consumerd/health-consumerd.h>
34 #include <common/common.h>
35 #include <common/utils.h>
36 #include <common/compat/poll.h>
37 #include <common/index/index.h>
38 #include <common/kernel-ctl/kernel-ctl.h>
39 #include <common/sessiond-comm/relayd.h>
40 #include <common/sessiond-comm/sessiond-comm.h>
41 #include <common/kernel-consumer/kernel-consumer.h>
42 #include <common/relayd/relayd.h>
43 #include <common/ust-consumer/ust-consumer.h>
44 #include <common/consumer-timer.h>
47 #include "consumer-stream.h"
49 struct lttng_consumer_global_data consumer_data
= {
52 .type
= LTTNG_CONSUMER_UNKNOWN
,
55 enum consumer_channel_action
{
58 CONSUMER_CHANNEL_QUIT
,
61 struct consumer_channel_msg
{
62 enum consumer_channel_action action
;
63 struct lttng_consumer_channel
*chan
; /* add */
64 uint64_t key
; /* del */
68 * Flag to inform the polling thread to quit when all fd hung up. Updated by
69 * the consumer_thread_receive_fds when it notices that all fds has hung up.
70 * Also updated by the signal handler (consumer_should_exit()). Read by the
73 volatile int consumer_quit
;
76 * Global hash table containing respectively metadata and data streams. The
77 * stream element in this ht should only be updated by the metadata poll thread
78 * for the metadata and the data poll thread for the data.
80 static struct lttng_ht
*metadata_ht
;
81 static struct lttng_ht
*data_ht
;
84 * Notify a thread lttng pipe to poll back again. This usually means that some
85 * global state has changed so we just send back the thread in a poll wait
88 static void notify_thread_lttng_pipe(struct lttng_pipe
*pipe
)
90 struct lttng_consumer_stream
*null_stream
= NULL
;
94 (void) lttng_pipe_write(pipe
, &null_stream
, sizeof(null_stream
));
97 static void notify_health_quit_pipe(int *pipe
)
101 ret
= lttng_write(pipe
[1], "4", 1);
103 PERROR("write consumer health quit");
107 static void notify_channel_pipe(struct lttng_consumer_local_data
*ctx
,
108 struct lttng_consumer_channel
*chan
,
110 enum consumer_channel_action action
)
112 struct consumer_channel_msg msg
;
115 memset(&msg
, 0, sizeof(msg
));
120 ret
= lttng_write(ctx
->consumer_channel_pipe
[1], &msg
, sizeof(msg
));
121 if (ret
< sizeof(msg
)) {
122 PERROR("notify_channel_pipe write error");
126 void notify_thread_del_channel(struct lttng_consumer_local_data
*ctx
,
129 notify_channel_pipe(ctx
, NULL
, key
, CONSUMER_CHANNEL_DEL
);
132 static int read_channel_pipe(struct lttng_consumer_local_data
*ctx
,
133 struct lttng_consumer_channel
**chan
,
135 enum consumer_channel_action
*action
)
137 struct consumer_channel_msg msg
;
140 ret
= lttng_read(ctx
->consumer_channel_pipe
[0], &msg
, sizeof(msg
));
141 if (ret
< sizeof(msg
)) {
145 *action
= msg
.action
;
153 * Find a stream. The consumer_data.lock must be locked during this
156 static struct lttng_consumer_stream
*find_stream(uint64_t key
,
159 struct lttng_ht_iter iter
;
160 struct lttng_ht_node_u64
*node
;
161 struct lttng_consumer_stream
*stream
= NULL
;
165 /* -1ULL keys are lookup failures */
166 if (key
== (uint64_t) -1ULL) {
172 lttng_ht_lookup(ht
, &key
, &iter
);
173 node
= lttng_ht_iter_get_node_u64(&iter
);
175 stream
= caa_container_of(node
, struct lttng_consumer_stream
, node
);
183 static void steal_stream_key(uint64_t key
, struct lttng_ht
*ht
)
185 struct lttng_consumer_stream
*stream
;
188 stream
= find_stream(key
, ht
);
190 stream
->key
= (uint64_t) -1ULL;
192 * We don't want the lookup to match, but we still need
193 * to iterate on this stream when iterating over the hash table. Just
194 * change the node key.
196 stream
->node
.key
= (uint64_t) -1ULL;
202 * Return a channel object for the given key.
204 * RCU read side lock MUST be acquired before calling this function and
205 * protects the channel ptr.
207 struct lttng_consumer_channel
*consumer_find_channel(uint64_t key
)
209 struct lttng_ht_iter iter
;
210 struct lttng_ht_node_u64
*node
;
211 struct lttng_consumer_channel
*channel
= NULL
;
213 /* -1ULL keys are lookup failures */
214 if (key
== (uint64_t) -1ULL) {
218 lttng_ht_lookup(consumer_data
.channel_ht
, &key
, &iter
);
219 node
= lttng_ht_iter_get_node_u64(&iter
);
221 channel
= caa_container_of(node
, struct lttng_consumer_channel
, node
);
227 static void free_stream_rcu(struct rcu_head
*head
)
229 struct lttng_ht_node_u64
*node
=
230 caa_container_of(head
, struct lttng_ht_node_u64
, head
);
231 struct lttng_consumer_stream
*stream
=
232 caa_container_of(node
, struct lttng_consumer_stream
, node
);
237 static void free_channel_rcu(struct rcu_head
*head
)
239 struct lttng_ht_node_u64
*node
=
240 caa_container_of(head
, struct lttng_ht_node_u64
, head
);
241 struct lttng_consumer_channel
*channel
=
242 caa_container_of(node
, struct lttng_consumer_channel
, node
);
248 * RCU protected relayd socket pair free.
250 static void free_relayd_rcu(struct rcu_head
*head
)
252 struct lttng_ht_node_u64
*node
=
253 caa_container_of(head
, struct lttng_ht_node_u64
, head
);
254 struct consumer_relayd_sock_pair
*relayd
=
255 caa_container_of(node
, struct consumer_relayd_sock_pair
, node
);
258 * Close all sockets. This is done in the call RCU since we don't want the
259 * socket fds to be reassigned thus potentially creating bad state of the
262 * We do not have to lock the control socket mutex here since at this stage
263 * there is no one referencing to this relayd object.
265 (void) relayd_close(&relayd
->control_sock
);
266 (void) relayd_close(&relayd
->data_sock
);
272 * Destroy and free relayd socket pair object.
274 void consumer_destroy_relayd(struct consumer_relayd_sock_pair
*relayd
)
277 struct lttng_ht_iter iter
;
279 if (relayd
== NULL
) {
283 DBG("Consumer destroy and close relayd socket pair");
285 iter
.iter
.node
= &relayd
->node
.node
;
286 ret
= lttng_ht_del(consumer_data
.relayd_ht
, &iter
);
288 /* We assume the relayd is being or is destroyed */
292 /* RCU free() call */
293 call_rcu(&relayd
->node
.head
, free_relayd_rcu
);
297 * Remove a channel from the global list protected by a mutex. This function is
298 * also responsible for freeing its data structures.
300 void consumer_del_channel(struct lttng_consumer_channel
*channel
)
303 struct lttng_ht_iter iter
;
304 struct lttng_consumer_stream
*stream
, *stmp
;
306 DBG("Consumer delete channel key %" PRIu64
, channel
->key
);
308 pthread_mutex_lock(&consumer_data
.lock
);
309 pthread_mutex_lock(&channel
->lock
);
311 /* Delete streams that might have been left in the stream list. */
312 cds_list_for_each_entry_safe(stream
, stmp
, &channel
->streams
.head
,
314 cds_list_del(&stream
->send_node
);
316 * Once a stream is added to this list, the buffers were created so
317 * we have a guarantee that this call will succeed.
319 consumer_stream_destroy(stream
, NULL
);
322 if (channel
->live_timer_enabled
== 1) {
323 consumer_timer_live_stop(channel
);
326 switch (consumer_data
.type
) {
327 case LTTNG_CONSUMER_KERNEL
:
329 case LTTNG_CONSUMER32_UST
:
330 case LTTNG_CONSUMER64_UST
:
331 lttng_ustconsumer_del_channel(channel
);
334 ERR("Unknown consumer_data type");
340 iter
.iter
.node
= &channel
->node
.node
;
341 ret
= lttng_ht_del(consumer_data
.channel_ht
, &iter
);
345 call_rcu(&channel
->node
.head
, free_channel_rcu
);
347 pthread_mutex_unlock(&channel
->lock
);
348 pthread_mutex_unlock(&consumer_data
.lock
);
352 * Iterate over the relayd hash table and destroy each element. Finally,
353 * destroy the whole hash table.
355 static void cleanup_relayd_ht(void)
357 struct lttng_ht_iter iter
;
358 struct consumer_relayd_sock_pair
*relayd
;
362 cds_lfht_for_each_entry(consumer_data
.relayd_ht
->ht
, &iter
.iter
, relayd
,
364 consumer_destroy_relayd(relayd
);
369 lttng_ht_destroy(consumer_data
.relayd_ht
);
373 * Update the end point status of all streams having the given network sequence
374 * index (relayd index).
376 * It's atomically set without having the stream mutex locked which is fine
377 * because we handle the write/read race with a pipe wakeup for each thread.
379 static void update_endpoint_status_by_netidx(uint64_t net_seq_idx
,
380 enum consumer_endpoint_status status
)
382 struct lttng_ht_iter iter
;
383 struct lttng_consumer_stream
*stream
;
385 DBG("Consumer set delete flag on stream by idx %" PRIu64
, net_seq_idx
);
389 /* Let's begin with metadata */
390 cds_lfht_for_each_entry(metadata_ht
->ht
, &iter
.iter
, stream
, node
.node
) {
391 if (stream
->net_seq_idx
== net_seq_idx
) {
392 uatomic_set(&stream
->endpoint_status
, status
);
393 DBG("Delete flag set to metadata stream %d", stream
->wait_fd
);
397 /* Follow up by the data streams */
398 cds_lfht_for_each_entry(data_ht
->ht
, &iter
.iter
, stream
, node
.node
) {
399 if (stream
->net_seq_idx
== net_seq_idx
) {
400 uatomic_set(&stream
->endpoint_status
, status
);
401 DBG("Delete flag set to data stream %d", stream
->wait_fd
);
408 * Cleanup a relayd object by flagging every associated streams for deletion,
409 * destroying the object meaning removing it from the relayd hash table,
410 * closing the sockets and freeing the memory in a RCU call.
412 * If a local data context is available, notify the threads that the streams'
413 * state have changed.
415 static void cleanup_relayd(struct consumer_relayd_sock_pair
*relayd
,
416 struct lttng_consumer_local_data
*ctx
)
422 DBG("Cleaning up relayd sockets");
424 /* Save the net sequence index before destroying the object */
425 netidx
= relayd
->net_seq_idx
;
428 * Delete the relayd from the relayd hash table, close the sockets and free
429 * the object in a RCU call.
431 consumer_destroy_relayd(relayd
);
433 /* Set inactive endpoint to all streams */
434 update_endpoint_status_by_netidx(netidx
, CONSUMER_ENDPOINT_INACTIVE
);
437 * With a local data context, notify the threads that the streams' state
438 * have changed. The write() action on the pipe acts as an "implicit"
439 * memory barrier ordering the updates of the end point status from the
440 * read of this status which happens AFTER receiving this notify.
443 notify_thread_lttng_pipe(ctx
->consumer_data_pipe
);
444 notify_thread_lttng_pipe(ctx
->consumer_metadata_pipe
);
449 * Flag a relayd socket pair for destruction. Destroy it if the refcount
452 * RCU read side lock MUST be aquired before calling this function.
454 void consumer_flag_relayd_for_destroy(struct consumer_relayd_sock_pair
*relayd
)
458 /* Set destroy flag for this object */
459 uatomic_set(&relayd
->destroy_flag
, 1);
461 /* Destroy the relayd if refcount is 0 */
462 if (uatomic_read(&relayd
->refcount
) == 0) {
463 consumer_destroy_relayd(relayd
);
468 * Completly destroy stream from every visiable data structure and the given
471 * One this call returns, the stream object is not longer usable nor visible.
473 void consumer_del_stream(struct lttng_consumer_stream
*stream
,
476 consumer_stream_destroy(stream
, ht
);
480 * XXX naming of del vs destroy is all mixed up.
482 void consumer_del_stream_for_data(struct lttng_consumer_stream
*stream
)
484 consumer_stream_destroy(stream
, data_ht
);
487 void consumer_del_stream_for_metadata(struct lttng_consumer_stream
*stream
)
489 consumer_stream_destroy(stream
, metadata_ht
);
492 struct lttng_consumer_stream
*consumer_allocate_stream(uint64_t channel_key
,
494 enum lttng_consumer_stream_state state
,
495 const char *channel_name
,
502 enum consumer_channel_type type
,
503 unsigned int monitor
)
506 struct lttng_consumer_stream
*stream
;
508 stream
= zmalloc(sizeof(*stream
));
509 if (stream
== NULL
) {
510 PERROR("malloc struct lttng_consumer_stream");
517 stream
->key
= stream_key
;
519 stream
->out_fd_offset
= 0;
520 stream
->output_written
= 0;
521 stream
->state
= state
;
524 stream
->net_seq_idx
= relayd_id
;
525 stream
->session_id
= session_id
;
526 stream
->monitor
= monitor
;
527 stream
->endpoint_status
= CONSUMER_ENDPOINT_ACTIVE
;
528 stream
->index_fd
= -1;
529 pthread_mutex_init(&stream
->lock
, NULL
);
531 /* If channel is the metadata, flag this stream as metadata. */
532 if (type
== CONSUMER_CHANNEL_TYPE_METADATA
) {
533 stream
->metadata_flag
= 1;
534 /* Metadata is flat out. */
535 strncpy(stream
->name
, DEFAULT_METADATA_NAME
, sizeof(stream
->name
));
536 /* Live rendez-vous point. */
537 pthread_cond_init(&stream
->metadata_rdv
, NULL
);
538 pthread_mutex_init(&stream
->metadata_rdv_lock
, NULL
);
540 /* Format stream name to <channel_name>_<cpu_number> */
541 ret
= snprintf(stream
->name
, sizeof(stream
->name
), "%s_%d",
544 PERROR("snprintf stream name");
549 /* Key is always the wait_fd for streams. */
550 lttng_ht_node_init_u64(&stream
->node
, stream
->key
);
552 /* Init node per channel id key */
553 lttng_ht_node_init_u64(&stream
->node_channel_id
, channel_key
);
555 /* Init session id node with the stream session id */
556 lttng_ht_node_init_u64(&stream
->node_session_id
, stream
->session_id
);
558 DBG3("Allocated stream %s (key %" PRIu64
", chan_key %" PRIu64
559 " relayd_id %" PRIu64
", session_id %" PRIu64
,
560 stream
->name
, stream
->key
, channel_key
,
561 stream
->net_seq_idx
, stream
->session_id
);
577 * Add a stream to the global list protected by a mutex.
579 int consumer_add_data_stream(struct lttng_consumer_stream
*stream
)
581 struct lttng_ht
*ht
= data_ht
;
587 DBG3("Adding consumer stream %" PRIu64
, stream
->key
);
589 pthread_mutex_lock(&consumer_data
.lock
);
590 pthread_mutex_lock(&stream
->chan
->lock
);
591 pthread_mutex_lock(&stream
->chan
->timer_lock
);
592 pthread_mutex_lock(&stream
->lock
);
595 /* Steal stream identifier to avoid having streams with the same key */
596 steal_stream_key(stream
->key
, ht
);
598 lttng_ht_add_unique_u64(ht
, &stream
->node
);
600 lttng_ht_add_u64(consumer_data
.stream_per_chan_id_ht
,
601 &stream
->node_channel_id
);
604 * Add stream to the stream_list_ht of the consumer data. No need to steal
605 * the key since the HT does not use it and we allow to add redundant keys
608 lttng_ht_add_u64(consumer_data
.stream_list_ht
, &stream
->node_session_id
);
611 * When nb_init_stream_left reaches 0, we don't need to trigger any action
612 * in terms of destroying the associated channel, because the action that
613 * causes the count to become 0 also causes a stream to be added. The
614 * channel deletion will thus be triggered by the following removal of this
617 if (uatomic_read(&stream
->chan
->nb_init_stream_left
) > 0) {
618 /* Increment refcount before decrementing nb_init_stream_left */
620 uatomic_dec(&stream
->chan
->nb_init_stream_left
);
623 /* Update consumer data once the node is inserted. */
624 consumer_data
.stream_count
++;
625 consumer_data
.need_update
= 1;
628 pthread_mutex_unlock(&stream
->lock
);
629 pthread_mutex_unlock(&stream
->chan
->timer_lock
);
630 pthread_mutex_unlock(&stream
->chan
->lock
);
631 pthread_mutex_unlock(&consumer_data
.lock
);
636 void consumer_del_data_stream(struct lttng_consumer_stream
*stream
)
638 consumer_del_stream(stream
, data_ht
);
642 * Add relayd socket to global consumer data hashtable. RCU read side lock MUST
643 * be acquired before calling this.
645 static int add_relayd(struct consumer_relayd_sock_pair
*relayd
)
648 struct lttng_ht_node_u64
*node
;
649 struct lttng_ht_iter iter
;
653 lttng_ht_lookup(consumer_data
.relayd_ht
,
654 &relayd
->net_seq_idx
, &iter
);
655 node
= lttng_ht_iter_get_node_u64(&iter
);
659 lttng_ht_add_unique_u64(consumer_data
.relayd_ht
, &relayd
->node
);
666 * Allocate and return a consumer relayd socket.
668 struct consumer_relayd_sock_pair
*consumer_allocate_relayd_sock_pair(
669 uint64_t net_seq_idx
)
671 struct consumer_relayd_sock_pair
*obj
= NULL
;
673 /* net sequence index of -1 is a failure */
674 if (net_seq_idx
== (uint64_t) -1ULL) {
678 obj
= zmalloc(sizeof(struct consumer_relayd_sock_pair
));
680 PERROR("zmalloc relayd sock");
684 obj
->net_seq_idx
= net_seq_idx
;
686 obj
->destroy_flag
= 0;
687 obj
->control_sock
.sock
.fd
= -1;
688 obj
->data_sock
.sock
.fd
= -1;
689 lttng_ht_node_init_u64(&obj
->node
, obj
->net_seq_idx
);
690 pthread_mutex_init(&obj
->ctrl_sock_mutex
, NULL
);
697 * Find a relayd socket pair in the global consumer data.
699 * Return the object if found else NULL.
700 * RCU read-side lock must be held across this call and while using the
703 struct consumer_relayd_sock_pair
*consumer_find_relayd(uint64_t key
)
705 struct lttng_ht_iter iter
;
706 struct lttng_ht_node_u64
*node
;
707 struct consumer_relayd_sock_pair
*relayd
= NULL
;
709 /* Negative keys are lookup failures */
710 if (key
== (uint64_t) -1ULL) {
714 lttng_ht_lookup(consumer_data
.relayd_ht
, &key
,
716 node
= lttng_ht_iter_get_node_u64(&iter
);
718 relayd
= caa_container_of(node
, struct consumer_relayd_sock_pair
, node
);
726 * Find a relayd and send the stream
728 * Returns 0 on success, < 0 on error
730 int consumer_send_relayd_stream(struct lttng_consumer_stream
*stream
,
734 struct consumer_relayd_sock_pair
*relayd
;
737 assert(stream
->net_seq_idx
!= -1ULL);
740 /* The stream is not metadata. Get relayd reference if exists. */
742 relayd
= consumer_find_relayd(stream
->net_seq_idx
);
743 if (relayd
!= NULL
) {
744 /* Add stream on the relayd */
745 pthread_mutex_lock(&relayd
->ctrl_sock_mutex
);
746 ret
= relayd_add_stream(&relayd
->control_sock
, stream
->name
,
747 path
, &stream
->relayd_stream_id
,
748 stream
->chan
->tracefile_size
, stream
->chan
->tracefile_count
);
749 pthread_mutex_unlock(&relayd
->ctrl_sock_mutex
);
754 uatomic_inc(&relayd
->refcount
);
755 stream
->sent_to_relayd
= 1;
757 ERR("Stream %" PRIu64
" relayd ID %" PRIu64
" unknown. Can't send it.",
758 stream
->key
, stream
->net_seq_idx
);
763 DBG("Stream %s with key %" PRIu64
" sent to relayd id %" PRIu64
,
764 stream
->name
, stream
->key
, stream
->net_seq_idx
);
772 * Find a relayd and close the stream
774 void close_relayd_stream(struct lttng_consumer_stream
*stream
)
776 struct consumer_relayd_sock_pair
*relayd
;
778 /* The stream is not metadata. Get relayd reference if exists. */
780 relayd
= consumer_find_relayd(stream
->net_seq_idx
);
782 consumer_stream_relayd_close(stream
, relayd
);
788 * Handle stream for relayd transmission if the stream applies for network
789 * streaming where the net sequence index is set.
791 * Return destination file descriptor or negative value on error.
793 static int write_relayd_stream_header(struct lttng_consumer_stream
*stream
,
794 size_t data_size
, unsigned long padding
,
795 struct consumer_relayd_sock_pair
*relayd
)
798 struct lttcomm_relayd_data_hdr data_hdr
;
804 /* Reset data header */
805 memset(&data_hdr
, 0, sizeof(data_hdr
));
807 if (stream
->metadata_flag
) {
808 /* Caller MUST acquire the relayd control socket lock */
809 ret
= relayd_send_metadata(&relayd
->control_sock
, data_size
);
814 /* Metadata are always sent on the control socket. */
815 outfd
= relayd
->control_sock
.sock
.fd
;
817 /* Set header with stream information */
818 data_hdr
.stream_id
= htobe64(stream
->relayd_stream_id
);
819 data_hdr
.data_size
= htobe32(data_size
);
820 data_hdr
.padding_size
= htobe32(padding
);
822 * Note that net_seq_num below is assigned with the *current* value of
823 * next_net_seq_num and only after that the next_net_seq_num will be
824 * increment. This is why when issuing a command on the relayd using
825 * this next value, 1 should always be substracted in order to compare
826 * the last seen sequence number on the relayd side to the last sent.
828 data_hdr
.net_seq_num
= htobe64(stream
->next_net_seq_num
);
829 /* Other fields are zeroed previously */
831 ret
= relayd_send_data_hdr(&relayd
->data_sock
, &data_hdr
,
837 ++stream
->next_net_seq_num
;
839 /* Set to go on data socket */
840 outfd
= relayd
->data_sock
.sock
.fd
;
848 * Allocate and return a new lttng_consumer_channel object using the given key
849 * to initialize the hash table node.
851 * On error, return NULL.
853 struct lttng_consumer_channel
*consumer_allocate_channel(uint64_t key
,
855 const char *pathname
,
860 enum lttng_event_output output
,
861 uint64_t tracefile_size
,
862 uint64_t tracefile_count
,
863 uint64_t session_id_per_pid
,
864 unsigned int monitor
,
865 unsigned int live_timer_interval
)
867 struct lttng_consumer_channel
*channel
;
869 channel
= zmalloc(sizeof(*channel
));
870 if (channel
== NULL
) {
871 PERROR("malloc struct lttng_consumer_channel");
876 channel
->refcount
= 0;
877 channel
->session_id
= session_id
;
878 channel
->session_id_per_pid
= session_id_per_pid
;
881 channel
->relayd_id
= relayd_id
;
882 channel
->output
= output
;
883 channel
->tracefile_size
= tracefile_size
;
884 channel
->tracefile_count
= tracefile_count
;
885 channel
->monitor
= monitor
;
886 channel
->live_timer_interval
= live_timer_interval
;
887 pthread_mutex_init(&channel
->lock
, NULL
);
888 pthread_mutex_init(&channel
->timer_lock
, NULL
);
891 * In monitor mode, the streams associated with the channel will be put in
892 * a special list ONLY owned by this channel. So, the refcount is set to 1
893 * here meaning that the channel itself has streams that are referenced.
895 * On a channel deletion, once the channel is no longer visible, the
896 * refcount is decremented and checked for a zero value to delete it. With
897 * streams in no monitor mode, it will now be safe to destroy the channel.
899 if (!channel
->monitor
) {
900 channel
->refcount
= 1;
903 strncpy(channel
->pathname
, pathname
, sizeof(channel
->pathname
));
904 channel
->pathname
[sizeof(channel
->pathname
) - 1] = '\0';
906 strncpy(channel
->name
, name
, sizeof(channel
->name
));
907 channel
->name
[sizeof(channel
->name
) - 1] = '\0';
909 lttng_ht_node_init_u64(&channel
->node
, channel
->key
);
911 channel
->wait_fd
= -1;
913 CDS_INIT_LIST_HEAD(&channel
->streams
.head
);
915 DBG("Allocated channel (key %" PRIu64
")", channel
->key
)
922 * Add a channel to the global list protected by a mutex.
924 * On success 0 is returned else a negative value.
926 int consumer_add_channel(struct lttng_consumer_channel
*channel
,
927 struct lttng_consumer_local_data
*ctx
)
930 struct lttng_ht_node_u64
*node
;
931 struct lttng_ht_iter iter
;
933 pthread_mutex_lock(&consumer_data
.lock
);
934 pthread_mutex_lock(&channel
->lock
);
935 pthread_mutex_lock(&channel
->timer_lock
);
938 lttng_ht_lookup(consumer_data
.channel_ht
, &channel
->key
, &iter
);
939 node
= lttng_ht_iter_get_node_u64(&iter
);
941 /* Channel already exist. Ignore the insertion */
942 ERR("Consumer add channel key %" PRIu64
" already exists!",
948 lttng_ht_add_unique_u64(consumer_data
.channel_ht
, &channel
->node
);
952 pthread_mutex_unlock(&channel
->timer_lock
);
953 pthread_mutex_unlock(&channel
->lock
);
954 pthread_mutex_unlock(&consumer_data
.lock
);
956 if (!ret
&& channel
->wait_fd
!= -1 &&
957 channel
->type
== CONSUMER_CHANNEL_TYPE_DATA
) {
958 notify_channel_pipe(ctx
, channel
, -1, CONSUMER_CHANNEL_ADD
);
964 * Allocate the pollfd structure and the local view of the out fds to avoid
965 * doing a lookup in the linked list and concurrency issues when writing is
966 * needed. Called with consumer_data.lock held.
968 * Returns the number of fds in the structures.
970 static int update_poll_array(struct lttng_consumer_local_data
*ctx
,
971 struct pollfd
**pollfd
, struct lttng_consumer_stream
**local_stream
,
975 struct lttng_ht_iter iter
;
976 struct lttng_consumer_stream
*stream
;
981 assert(local_stream
);
983 DBG("Updating poll fd array");
985 cds_lfht_for_each_entry(ht
->ht
, &iter
.iter
, stream
, node
.node
) {
987 * Only active streams with an active end point can be added to the
988 * poll set and local stream storage of the thread.
990 * There is a potential race here for endpoint_status to be updated
991 * just after the check. However, this is OK since the stream(s) will
992 * be deleted once the thread is notified that the end point state has
993 * changed where this function will be called back again.
995 if (stream
->state
!= LTTNG_CONSUMER_ACTIVE_STREAM
||
996 stream
->endpoint_status
== CONSUMER_ENDPOINT_INACTIVE
) {
1000 * This clobbers way too much the debug output. Uncomment that if you
1001 * need it for debugging purposes.
1003 * DBG("Active FD %d", stream->wait_fd);
1005 (*pollfd
)[i
].fd
= stream
->wait_fd
;
1006 (*pollfd
)[i
].events
= POLLIN
| POLLPRI
;
1007 local_stream
[i
] = stream
;
1013 * Insert the consumer_data_pipe at the end of the array and don't
1014 * increment i so nb_fd is the number of real FD.
1016 (*pollfd
)[i
].fd
= lttng_pipe_get_readfd(ctx
->consumer_data_pipe
);
1017 (*pollfd
)[i
].events
= POLLIN
| POLLPRI
;
1022 * Poll on the should_quit pipe and the command socket return -1 on error and
1023 * should exit, 0 if data is available on the command socket
1025 int lttng_consumer_poll_socket(struct pollfd
*consumer_sockpoll
)
1030 num_rdy
= poll(consumer_sockpoll
, 2, -1);
1031 if (num_rdy
== -1) {
1033 * Restart interrupted system call.
1035 if (errno
== EINTR
) {
1038 PERROR("Poll error");
1041 if (consumer_sockpoll
[0].revents
& (POLLIN
| POLLPRI
)) {
1042 DBG("consumer_should_quit wake up");
1052 * Set the error socket.
1054 void lttng_consumer_set_error_sock(struct lttng_consumer_local_data
*ctx
,
1057 ctx
->consumer_error_socket
= sock
;
1061 * Set the command socket path.
1063 void lttng_consumer_set_command_sock_path(
1064 struct lttng_consumer_local_data
*ctx
, char *sock
)
1066 ctx
->consumer_command_sock_path
= sock
;
1070 * Send return code to the session daemon.
1071 * If the socket is not defined, we return 0, it is not a fatal error
1073 int lttng_consumer_send_error(struct lttng_consumer_local_data
*ctx
, int cmd
)
1075 if (ctx
->consumer_error_socket
> 0) {
1076 return lttcomm_send_unix_sock(ctx
->consumer_error_socket
, &cmd
,
1077 sizeof(enum lttcomm_sessiond_command
));
1084 * Close all the tracefiles and stream fds and MUST be called when all
1085 * instances are destroyed i.e. when all threads were joined and are ended.
1087 void lttng_consumer_cleanup(void)
1089 struct lttng_ht_iter iter
;
1090 struct lttng_consumer_channel
*channel
;
1094 cds_lfht_for_each_entry(consumer_data
.channel_ht
->ht
, &iter
.iter
, channel
,
1096 consumer_del_channel(channel
);
1101 lttng_ht_destroy(consumer_data
.channel_ht
);
1103 cleanup_relayd_ht();
1105 lttng_ht_destroy(consumer_data
.stream_per_chan_id_ht
);
1108 * This HT contains streams that are freed by either the metadata thread or
1109 * the data thread so we do *nothing* on the hash table and simply destroy
1112 lttng_ht_destroy(consumer_data
.stream_list_ht
);
1116 * Called from signal handler.
1118 void lttng_consumer_should_exit(struct lttng_consumer_local_data
*ctx
)
1123 ret
= lttng_write(ctx
->consumer_should_quit
[1], "4", 1);
1125 PERROR("write consumer quit");
1128 DBG("Consumer flag that it should quit");
1131 void lttng_consumer_sync_trace_file(struct lttng_consumer_stream
*stream
,
1134 int outfd
= stream
->out_fd
;
1137 * This does a blocking write-and-wait on any page that belongs to the
1138 * subbuffer prior to the one we just wrote.
1139 * Don't care about error values, as these are just hints and ways to
1140 * limit the amount of page cache used.
1142 if (orig_offset
< stream
->max_sb_size
) {
1145 lttng_sync_file_range(outfd
, orig_offset
- stream
->max_sb_size
,
1146 stream
->max_sb_size
,
1147 SYNC_FILE_RANGE_WAIT_BEFORE
1148 | SYNC_FILE_RANGE_WRITE
1149 | SYNC_FILE_RANGE_WAIT_AFTER
);
1151 * Give hints to the kernel about how we access the file:
1152 * POSIX_FADV_DONTNEED : we won't re-access data in a near future after
1155 * We need to call fadvise again after the file grows because the
1156 * kernel does not seem to apply fadvise to non-existing parts of the
1159 * Call fadvise _after_ having waited for the page writeback to
1160 * complete because the dirty page writeback semantic is not well
1161 * defined. So it can be expected to lead to lower throughput in
1164 posix_fadvise(outfd
, orig_offset
- stream
->max_sb_size
,
1165 stream
->max_sb_size
, POSIX_FADV_DONTNEED
);
1169 * Initialise the necessary environnement :
1170 * - create a new context
1171 * - create the poll_pipe
1172 * - create the should_quit pipe (for signal handler)
1173 * - create the thread pipe (for splice)
1175 * Takes a function pointer as argument, this function is called when data is
1176 * available on a buffer. This function is responsible to do the
1177 * kernctl_get_next_subbuf, read the data with mmap or splice depending on the
1178 * buffer configuration and then kernctl_put_next_subbuf at the end.
1180 * Returns a pointer to the new context or NULL on error.
1182 struct lttng_consumer_local_data
*lttng_consumer_create(
1183 enum lttng_consumer_type type
,
1184 ssize_t (*buffer_ready
)(struct lttng_consumer_stream
*stream
,
1185 struct lttng_consumer_local_data
*ctx
),
1186 int (*recv_channel
)(struct lttng_consumer_channel
*channel
),
1187 int (*recv_stream
)(struct lttng_consumer_stream
*stream
),
1188 int (*update_stream
)(uint64_t stream_key
, uint32_t state
))
1191 struct lttng_consumer_local_data
*ctx
;
1193 assert(consumer_data
.type
== LTTNG_CONSUMER_UNKNOWN
||
1194 consumer_data
.type
== type
);
1195 consumer_data
.type
= type
;
1197 ctx
= zmalloc(sizeof(struct lttng_consumer_local_data
));
1199 PERROR("allocating context");
1203 ctx
->consumer_error_socket
= -1;
1204 ctx
->consumer_metadata_socket
= -1;
1205 pthread_mutex_init(&ctx
->metadata_socket_lock
, NULL
);
1206 /* assign the callbacks */
1207 ctx
->on_buffer_ready
= buffer_ready
;
1208 ctx
->on_recv_channel
= recv_channel
;
1209 ctx
->on_recv_stream
= recv_stream
;
1210 ctx
->on_update_stream
= update_stream
;
1212 ctx
->consumer_data_pipe
= lttng_pipe_open(0);
1213 if (!ctx
->consumer_data_pipe
) {
1214 goto error_poll_pipe
;
1217 ret
= pipe(ctx
->consumer_should_quit
);
1219 PERROR("Error creating recv pipe");
1220 goto error_quit_pipe
;
1223 ret
= pipe(ctx
->consumer_thread_pipe
);
1225 PERROR("Error creating thread pipe");
1226 goto error_thread_pipe
;
1229 ret
= pipe(ctx
->consumer_channel_pipe
);
1231 PERROR("Error creating channel pipe");
1232 goto error_channel_pipe
;
1235 ctx
->consumer_metadata_pipe
= lttng_pipe_open(0);
1236 if (!ctx
->consumer_metadata_pipe
) {
1237 goto error_metadata_pipe
;
1240 ret
= utils_create_pipe(ctx
->consumer_splice_metadata_pipe
);
1242 goto error_splice_pipe
;
1248 lttng_pipe_destroy(ctx
->consumer_metadata_pipe
);
1249 error_metadata_pipe
:
1250 utils_close_pipe(ctx
->consumer_channel_pipe
);
1252 utils_close_pipe(ctx
->consumer_thread_pipe
);
1254 utils_close_pipe(ctx
->consumer_should_quit
);
1256 lttng_pipe_destroy(ctx
->consumer_data_pipe
);
1264 * Close all fds associated with the instance and free the context.
1266 void lttng_consumer_destroy(struct lttng_consumer_local_data
*ctx
)
1270 DBG("Consumer destroying it. Closing everything.");
1272 ret
= close(ctx
->consumer_error_socket
);
1276 ret
= close(ctx
->consumer_metadata_socket
);
1280 utils_close_pipe(ctx
->consumer_thread_pipe
);
1281 utils_close_pipe(ctx
->consumer_channel_pipe
);
1282 lttng_pipe_destroy(ctx
->consumer_data_pipe
);
1283 lttng_pipe_destroy(ctx
->consumer_metadata_pipe
);
1284 utils_close_pipe(ctx
->consumer_should_quit
);
1285 utils_close_pipe(ctx
->consumer_splice_metadata_pipe
);
1287 unlink(ctx
->consumer_command_sock_path
);
1292 * Write the metadata stream id on the specified file descriptor.
1294 static int write_relayd_metadata_id(int fd
,
1295 struct lttng_consumer_stream
*stream
,
1296 struct consumer_relayd_sock_pair
*relayd
, unsigned long padding
)
1299 struct lttcomm_relayd_metadata_payload hdr
;
1301 hdr
.stream_id
= htobe64(stream
->relayd_stream_id
);
1302 hdr
.padding_size
= htobe32(padding
);
1303 ret
= lttng_write(fd
, (void *) &hdr
, sizeof(hdr
));
1304 if (ret
< sizeof(hdr
)) {
1306 * This error means that the fd's end is closed so ignore the perror
1307 * not to clubber the error output since this can happen in a normal
1310 if (errno
!= EPIPE
) {
1311 PERROR("write metadata stream id");
1313 DBG3("Consumer failed to write relayd metadata id (errno: %d)", errno
);
1315 * Set ret to a negative value because if ret != sizeof(hdr), we don't
1316 * handle writting the missing part so report that as an error and
1317 * don't lie to the caller.
1322 DBG("Metadata stream id %" PRIu64
" with padding %lu written before data",
1323 stream
->relayd_stream_id
, padding
);
1330 * Mmap the ring buffer, read it and write the data to the tracefile. This is a
1331 * core function for writing trace buffers to either the local filesystem or
1334 * It must be called with the stream lock held.
1336 * Careful review MUST be put if any changes occur!
1338 * Returns the number of bytes written
1340 ssize_t
lttng_consumer_on_read_subbuffer_mmap(
1341 struct lttng_consumer_local_data
*ctx
,
1342 struct lttng_consumer_stream
*stream
, unsigned long len
,
1343 unsigned long padding
,
1344 struct lttng_packet_index
*index
)
1346 unsigned long mmap_offset
;
1348 ssize_t ret
= 0, written
= 0;
1349 off_t orig_offset
= stream
->out_fd_offset
;
1350 /* Default is on the disk */
1351 int outfd
= stream
->out_fd
;
1352 struct consumer_relayd_sock_pair
*relayd
= NULL
;
1353 unsigned int relayd_hang_up
= 0;
1355 /* RCU lock for the relayd pointer */
1358 /* Flag that the current stream if set for network streaming. */
1359 if (stream
->net_seq_idx
!= (uint64_t) -1ULL) {
1360 relayd
= consumer_find_relayd(stream
->net_seq_idx
);
1361 if (relayd
== NULL
) {
1367 /* get the offset inside the fd to mmap */
1368 switch (consumer_data
.type
) {
1369 case LTTNG_CONSUMER_KERNEL
:
1370 mmap_base
= stream
->mmap_base
;
1371 ret
= kernctl_get_mmap_read_offset(stream
->wait_fd
, &mmap_offset
);
1373 PERROR("tracer ctl get_mmap_read_offset");
1378 case LTTNG_CONSUMER32_UST
:
1379 case LTTNG_CONSUMER64_UST
:
1380 mmap_base
= lttng_ustctl_get_mmap_base(stream
);
1382 ERR("read mmap get mmap base for stream %s", stream
->name
);
1386 ret
= lttng_ustctl_get_mmap_read_offset(stream
, &mmap_offset
);
1388 PERROR("tracer ctl get_mmap_read_offset");
1394 ERR("Unknown consumer_data type");
1398 /* Handle stream on the relayd if the output is on the network */
1400 unsigned long netlen
= len
;
1403 * Lock the control socket for the complete duration of the function
1404 * since from this point on we will use the socket.
1406 if (stream
->metadata_flag
) {
1407 /* Metadata requires the control socket. */
1408 pthread_mutex_lock(&relayd
->ctrl_sock_mutex
);
1409 netlen
+= sizeof(struct lttcomm_relayd_metadata_payload
);
1412 ret
= write_relayd_stream_header(stream
, netlen
, padding
, relayd
);
1414 /* Use the returned socket. */
1417 /* Write metadata stream id before payload */
1418 if (stream
->metadata_flag
) {
1419 ret
= write_relayd_metadata_id(outfd
, stream
, relayd
, padding
);
1422 /* Socket operation failed. We consider the relayd dead */
1423 if (ret
== -EPIPE
|| ret
== -EINVAL
) {
1431 /* Socket operation failed. We consider the relayd dead */
1432 if (ret
== -EPIPE
|| ret
== -EINVAL
) {
1436 /* Else, use the default set before which is the filesystem. */
1439 /* No streaming, we have to set the len with the full padding */
1443 * Check if we need to change the tracefile before writing the packet.
1445 if (stream
->chan
->tracefile_size
> 0 &&
1446 (stream
->tracefile_size_current
+ len
) >
1447 stream
->chan
->tracefile_size
) {
1448 ret
= utils_rotate_stream_file(stream
->chan
->pathname
,
1449 stream
->name
, stream
->chan
->tracefile_size
,
1450 stream
->chan
->tracefile_count
, stream
->uid
, stream
->gid
,
1451 stream
->out_fd
, &(stream
->tracefile_count_current
),
1454 ERR("Rotating output file");
1457 outfd
= stream
->out_fd
;
1459 if (stream
->index_fd
>= 0) {
1460 ret
= index_create_file(stream
->chan
->pathname
,
1461 stream
->name
, stream
->uid
, stream
->gid
,
1462 stream
->chan
->tracefile_size
,
1463 stream
->tracefile_count_current
);
1467 stream
->index_fd
= ret
;
1470 /* Reset current size because we just perform a rotation. */
1471 stream
->tracefile_size_current
= 0;
1472 stream
->out_fd_offset
= 0;
1475 stream
->tracefile_size_current
+= len
;
1477 index
->offset
= htobe64(stream
->out_fd_offset
);
1482 ret
= lttng_write(outfd
, mmap_base
+ mmap_offset
, len
);
1483 DBG("Consumer mmap write() ret %zd (len %lu)", ret
, len
);
1486 * This is possible if the fd is closed on the other side (outfd)
1487 * or any write problem. It can be verbose a bit for a normal
1488 * execution if for instance the relayd is stopped abruptly. This
1489 * can happen so set this to a DBG statement.
1491 DBG("Error in file write mmap");
1495 /* Socket operation failed. We consider the relayd dead */
1496 if (errno
== EPIPE
|| errno
== EINVAL
) {
1501 } else if (ret
> len
) {
1502 PERROR("Error in file write (ret %zd > len %lu)", ret
, len
);
1510 /* This call is useless on a socket so better save a syscall. */
1512 /* This won't block, but will start writeout asynchronously */
1513 lttng_sync_file_range(outfd
, stream
->out_fd_offset
, ret
,
1514 SYNC_FILE_RANGE_WRITE
);
1515 stream
->out_fd_offset
+= ret
;
1517 stream
->output_written
+= ret
;
1520 lttng_consumer_sync_trace_file(stream
, orig_offset
);
1524 * This is a special case that the relayd has closed its socket. Let's
1525 * cleanup the relayd object and all associated streams.
1527 if (relayd
&& relayd_hang_up
) {
1528 cleanup_relayd(relayd
, ctx
);
1532 /* Unlock only if ctrl socket used */
1533 if (relayd
&& stream
->metadata_flag
) {
1534 pthread_mutex_unlock(&relayd
->ctrl_sock_mutex
);
1542 * Splice the data from the ring buffer to the tracefile.
1544 * It must be called with the stream lock held.
1546 * Returns the number of bytes spliced.
1548 ssize_t
lttng_consumer_on_read_subbuffer_splice(
1549 struct lttng_consumer_local_data
*ctx
,
1550 struct lttng_consumer_stream
*stream
, unsigned long len
,
1551 unsigned long padding
,
1552 struct lttng_packet_index
*index
)
1554 ssize_t ret
= 0, written
= 0, ret_splice
= 0;
1556 off_t orig_offset
= stream
->out_fd_offset
;
1557 int fd
= stream
->wait_fd
;
1558 /* Default is on the disk */
1559 int outfd
= stream
->out_fd
;
1560 struct consumer_relayd_sock_pair
*relayd
= NULL
;
1562 unsigned int relayd_hang_up
= 0;
1564 switch (consumer_data
.type
) {
1565 case LTTNG_CONSUMER_KERNEL
:
1567 case LTTNG_CONSUMER32_UST
:
1568 case LTTNG_CONSUMER64_UST
:
1569 /* Not supported for user space tracing */
1572 ERR("Unknown consumer_data type");
1576 /* RCU lock for the relayd pointer */
1579 /* Flag that the current stream if set for network streaming. */
1580 if (stream
->net_seq_idx
!= (uint64_t) -1ULL) {
1581 relayd
= consumer_find_relayd(stream
->net_seq_idx
);
1582 if (relayd
== NULL
) {
1589 * Choose right pipe for splice. Metadata and trace data are handled by
1590 * different threads hence the use of two pipes in order not to race or
1591 * corrupt the written data.
1593 if (stream
->metadata_flag
) {
1594 splice_pipe
= ctx
->consumer_splice_metadata_pipe
;
1596 splice_pipe
= ctx
->consumer_thread_pipe
;
1599 /* Write metadata stream id before payload */
1601 int total_len
= len
;
1603 if (stream
->metadata_flag
) {
1605 * Lock the control socket for the complete duration of the function
1606 * since from this point on we will use the socket.
1608 pthread_mutex_lock(&relayd
->ctrl_sock_mutex
);
1610 ret
= write_relayd_metadata_id(splice_pipe
[1], stream
, relayd
,
1614 /* Socket operation failed. We consider the relayd dead */
1615 if (ret
== -EBADF
) {
1616 WARN("Remote relayd disconnected. Stopping");
1623 total_len
+= sizeof(struct lttcomm_relayd_metadata_payload
);
1626 ret
= write_relayd_stream_header(stream
, total_len
, padding
, relayd
);
1628 /* Use the returned socket. */
1631 /* Socket operation failed. We consider the relayd dead */
1632 if (ret
== -EBADF
) {
1633 WARN("Remote relayd disconnected. Stopping");
1640 /* No streaming, we have to set the len with the full padding */
1644 * Check if we need to change the tracefile before writing the packet.
1646 if (stream
->chan
->tracefile_size
> 0 &&
1647 (stream
->tracefile_size_current
+ len
) >
1648 stream
->chan
->tracefile_size
) {
1649 ret
= utils_rotate_stream_file(stream
->chan
->pathname
,
1650 stream
->name
, stream
->chan
->tracefile_size
,
1651 stream
->chan
->tracefile_count
, stream
->uid
, stream
->gid
,
1652 stream
->out_fd
, &(stream
->tracefile_count_current
),
1655 ERR("Rotating output file");
1658 outfd
= stream
->out_fd
;
1660 if (stream
->index_fd
>= 0) {
1661 ret
= index_create_file(stream
->chan
->pathname
,
1662 stream
->name
, stream
->uid
, stream
->gid
,
1663 stream
->chan
->tracefile_size
,
1664 stream
->tracefile_count_current
);
1668 stream
->index_fd
= ret
;
1671 /* Reset current size because we just perform a rotation. */
1672 stream
->tracefile_size_current
= 0;
1673 stream
->out_fd_offset
= 0;
1676 stream
->tracefile_size_current
+= len
;
1677 index
->offset
= htobe64(stream
->out_fd_offset
);
1681 DBG("splice chan to pipe offset %lu of len %lu (fd : %d, pipe: %d)",
1682 (unsigned long)offset
, len
, fd
, splice_pipe
[1]);
1683 ret_splice
= splice(fd
, &offset
, splice_pipe
[1], NULL
, len
,
1684 SPLICE_F_MOVE
| SPLICE_F_MORE
);
1685 DBG("splice chan to pipe, ret %zd", ret_splice
);
1686 if (ret_splice
< 0) {
1687 PERROR("Error in relay splice");
1689 written
= ret_splice
;
1695 /* Handle stream on the relayd if the output is on the network */
1697 if (stream
->metadata_flag
) {
1698 size_t metadata_payload_size
=
1699 sizeof(struct lttcomm_relayd_metadata_payload
);
1701 /* Update counter to fit the spliced data */
1702 ret_splice
+= metadata_payload_size
;
1703 len
+= metadata_payload_size
;
1705 * We do this so the return value can match the len passed as
1706 * argument to this function.
1708 written
-= metadata_payload_size
;
1712 /* Splice data out */
1713 ret_splice
= splice(splice_pipe
[0], NULL
, outfd
, NULL
,
1714 ret_splice
, SPLICE_F_MOVE
| SPLICE_F_MORE
);
1715 DBG("Consumer splice pipe to file, ret %zd", ret_splice
);
1716 if (ret_splice
< 0) {
1717 PERROR("Error in file splice");
1719 written
= ret_splice
;
1721 /* Socket operation failed. We consider the relayd dead */
1722 if (errno
== EBADF
|| errno
== EPIPE
) {
1723 WARN("Remote relayd disconnected. Stopping");
1729 } else if (ret_splice
> len
) {
1731 PERROR("Wrote more data than requested %zd (len: %lu)",
1733 written
+= ret_splice
;
1739 /* This call is useless on a socket so better save a syscall. */
1741 /* This won't block, but will start writeout asynchronously */
1742 lttng_sync_file_range(outfd
, stream
->out_fd_offset
, ret_splice
,
1743 SYNC_FILE_RANGE_WRITE
);
1744 stream
->out_fd_offset
+= ret_splice
;
1746 stream
->output_written
+= ret_splice
;
1747 written
+= ret_splice
;
1749 lttng_consumer_sync_trace_file(stream
, orig_offset
);
1757 * This is a special case that the relayd has closed its socket. Let's
1758 * cleanup the relayd object and all associated streams.
1760 if (relayd
&& relayd_hang_up
) {
1761 cleanup_relayd(relayd
, ctx
);
1762 /* Skip splice error so the consumer does not fail */
1767 /* send the appropriate error description to sessiond */
1770 lttng_consumer_send_error(ctx
, LTTCOMM_CONSUMERD_SPLICE_EINVAL
);
1773 lttng_consumer_send_error(ctx
, LTTCOMM_CONSUMERD_SPLICE_ENOMEM
);
1776 lttng_consumer_send_error(ctx
, LTTCOMM_CONSUMERD_SPLICE_ESPIPE
);
1781 if (relayd
&& stream
->metadata_flag
) {
1782 pthread_mutex_unlock(&relayd
->ctrl_sock_mutex
);
1790 * Take a snapshot for a specific fd
1792 * Returns 0 on success, < 0 on error
1794 int lttng_consumer_take_snapshot(struct lttng_consumer_stream
*stream
)
1796 switch (consumer_data
.type
) {
1797 case LTTNG_CONSUMER_KERNEL
:
1798 return lttng_kconsumer_take_snapshot(stream
);
1799 case LTTNG_CONSUMER32_UST
:
1800 case LTTNG_CONSUMER64_UST
:
1801 return lttng_ustconsumer_take_snapshot(stream
);
1803 ERR("Unknown consumer_data type");
1810 * Get the produced position
1812 * Returns 0 on success, < 0 on error
1814 int lttng_consumer_get_produced_snapshot(struct lttng_consumer_stream
*stream
,
1817 switch (consumer_data
.type
) {
1818 case LTTNG_CONSUMER_KERNEL
:
1819 return lttng_kconsumer_get_produced_snapshot(stream
, pos
);
1820 case LTTNG_CONSUMER32_UST
:
1821 case LTTNG_CONSUMER64_UST
:
1822 return lttng_ustconsumer_get_produced_snapshot(stream
, pos
);
1824 ERR("Unknown consumer_data type");
1830 int lttng_consumer_recv_cmd(struct lttng_consumer_local_data
*ctx
,
1831 int sock
, struct pollfd
*consumer_sockpoll
)
1833 switch (consumer_data
.type
) {
1834 case LTTNG_CONSUMER_KERNEL
:
1835 return lttng_kconsumer_recv_cmd(ctx
, sock
, consumer_sockpoll
);
1836 case LTTNG_CONSUMER32_UST
:
1837 case LTTNG_CONSUMER64_UST
:
1838 return lttng_ustconsumer_recv_cmd(ctx
, sock
, consumer_sockpoll
);
1840 ERR("Unknown consumer_data type");
1847 * Iterate over all streams of the hashtable and free them properly.
1849 * WARNING: *MUST* be used with data stream only.
1851 static void destroy_data_stream_ht(struct lttng_ht
*ht
)
1853 struct lttng_ht_iter iter
;
1854 struct lttng_consumer_stream
*stream
;
1861 cds_lfht_for_each_entry(ht
->ht
, &iter
.iter
, stream
, node
.node
) {
1863 * Ignore return value since we are currently cleaning up so any error
1866 (void) consumer_del_stream(stream
, ht
);
1870 lttng_ht_destroy(ht
);
1874 * Iterate over all streams of the hashtable and free them properly.
1876 * XXX: Should not be only for metadata stream or else use an other name.
1878 static void destroy_stream_ht(struct lttng_ht
*ht
)
1880 struct lttng_ht_iter iter
;
1881 struct lttng_consumer_stream
*stream
;
1888 cds_lfht_for_each_entry(ht
->ht
, &iter
.iter
, stream
, node
.node
) {
1890 * Ignore return value since we are currently cleaning up so any error
1893 (void) consumer_del_metadata_stream(stream
, ht
);
1897 lttng_ht_destroy(ht
);
1900 void lttng_consumer_close_metadata(void)
1902 switch (consumer_data
.type
) {
1903 case LTTNG_CONSUMER_KERNEL
:
1905 * The Kernel consumer has a different metadata scheme so we don't
1906 * close anything because the stream will be closed by the session
1910 case LTTNG_CONSUMER32_UST
:
1911 case LTTNG_CONSUMER64_UST
:
1913 * Close all metadata streams. The metadata hash table is passed and
1914 * this call iterates over it by closing all wakeup fd. This is safe
1915 * because at this point we are sure that the metadata producer is
1916 * either dead or blocked.
1918 lttng_ustconsumer_close_metadata(metadata_ht
);
1921 ERR("Unknown consumer_data type");
1927 * Clean up a metadata stream and free its memory.
1929 void consumer_del_metadata_stream(struct lttng_consumer_stream
*stream
,
1930 struct lttng_ht
*ht
)
1933 struct lttng_ht_iter iter
;
1934 struct lttng_consumer_channel
*free_chan
= NULL
;
1935 struct consumer_relayd_sock_pair
*relayd
;
1939 * This call should NEVER receive regular stream. It must always be
1940 * metadata stream and this is crucial for data structure synchronization.
1942 assert(stream
->metadata_flag
);
1944 DBG3("Consumer delete metadata stream %d", stream
->wait_fd
);
1947 /* Means the stream was allocated but not successfully added */
1948 goto free_stream_rcu
;
1951 pthread_mutex_lock(&consumer_data
.lock
);
1952 pthread_mutex_lock(&stream
->chan
->lock
);
1953 pthread_mutex_lock(&stream
->lock
);
1955 switch (consumer_data
.type
) {
1956 case LTTNG_CONSUMER_KERNEL
:
1957 if (stream
->mmap_base
!= NULL
) {
1958 ret
= munmap(stream
->mmap_base
, stream
->mmap_len
);
1960 PERROR("munmap metadata stream");
1963 if (stream
->wait_fd
>= 0) {
1964 ret
= close(stream
->wait_fd
);
1966 PERROR("close kernel metadata wait_fd");
1970 case LTTNG_CONSUMER32_UST
:
1971 case LTTNG_CONSUMER64_UST
:
1972 if (stream
->monitor
) {
1973 /* close the write-side in close_metadata */
1974 ret
= close(stream
->ust_metadata_poll_pipe
[0]);
1976 PERROR("Close UST metadata read-side poll pipe");
1979 lttng_ustconsumer_del_stream(stream
);
1982 ERR("Unknown consumer_data type");
1988 iter
.iter
.node
= &stream
->node
.node
;
1989 ret
= lttng_ht_del(ht
, &iter
);
1992 iter
.iter
.node
= &stream
->node_channel_id
.node
;
1993 ret
= lttng_ht_del(consumer_data
.stream_per_chan_id_ht
, &iter
);
1996 iter
.iter
.node
= &stream
->node_session_id
.node
;
1997 ret
= lttng_ht_del(consumer_data
.stream_list_ht
, &iter
);
2001 if (stream
->out_fd
>= 0) {
2002 ret
= close(stream
->out_fd
);
2008 /* Check and cleanup relayd */
2010 relayd
= consumer_find_relayd(stream
->net_seq_idx
);
2011 if (relayd
!= NULL
) {
2012 uatomic_dec(&relayd
->refcount
);
2013 assert(uatomic_read(&relayd
->refcount
) >= 0);
2015 /* Closing streams requires to lock the control socket. */
2016 pthread_mutex_lock(&relayd
->ctrl_sock_mutex
);
2017 ret
= relayd_send_close_stream(&relayd
->control_sock
,
2018 stream
->relayd_stream_id
, stream
->next_net_seq_num
- 1);
2019 pthread_mutex_unlock(&relayd
->ctrl_sock_mutex
);
2021 DBG("Unable to close stream on the relayd. Continuing");
2023 * Continue here. There is nothing we can do for the relayd.
2024 * Chances are that the relayd has closed the socket so we just
2025 * continue cleaning up.
2029 /* Both conditions are met, we destroy the relayd. */
2030 if (uatomic_read(&relayd
->refcount
) == 0 &&
2031 uatomic_read(&relayd
->destroy_flag
)) {
2032 consumer_destroy_relayd(relayd
);
2037 /* Atomically decrement channel refcount since other threads can use it. */
2038 if (!uatomic_sub_return(&stream
->chan
->refcount
, 1)
2039 && !uatomic_read(&stream
->chan
->nb_init_stream_left
)) {
2040 /* Go for channel deletion! */
2041 free_chan
= stream
->chan
;
2046 * Nullify the stream reference so it is not used after deletion. The
2047 * channel lock MUST be acquired before being able to check for
2048 * a NULL pointer value.
2050 stream
->chan
->metadata_stream
= NULL
;
2052 pthread_mutex_unlock(&stream
->lock
);
2053 pthread_mutex_unlock(&stream
->chan
->lock
);
2054 pthread_mutex_unlock(&consumer_data
.lock
);
2057 consumer_del_channel(free_chan
);
2061 call_rcu(&stream
->node
.head
, free_stream_rcu
);
2065 * Action done with the metadata stream when adding it to the consumer internal
2066 * data structures to handle it.
2068 int consumer_add_metadata_stream(struct lttng_consumer_stream
*stream
)
2070 struct lttng_ht
*ht
= metadata_ht
;
2072 struct lttng_ht_iter iter
;
2073 struct lttng_ht_node_u64
*node
;
2078 DBG3("Adding metadata stream %" PRIu64
" to hash table", stream
->key
);
2080 pthread_mutex_lock(&consumer_data
.lock
);
2081 pthread_mutex_lock(&stream
->chan
->lock
);
2082 pthread_mutex_lock(&stream
->chan
->timer_lock
);
2083 pthread_mutex_lock(&stream
->lock
);
2086 * From here, refcounts are updated so be _careful_ when returning an error
2093 * Lookup the stream just to make sure it does not exist in our internal
2094 * state. This should NEVER happen.
2096 lttng_ht_lookup(ht
, &stream
->key
, &iter
);
2097 node
= lttng_ht_iter_get_node_u64(&iter
);
2101 * When nb_init_stream_left reaches 0, we don't need to trigger any action
2102 * in terms of destroying the associated channel, because the action that
2103 * causes the count to become 0 also causes a stream to be added. The
2104 * channel deletion will thus be triggered by the following removal of this
2107 if (uatomic_read(&stream
->chan
->nb_init_stream_left
) > 0) {
2108 /* Increment refcount before decrementing nb_init_stream_left */
2110 uatomic_dec(&stream
->chan
->nb_init_stream_left
);
2113 lttng_ht_add_unique_u64(ht
, &stream
->node
);
2115 lttng_ht_add_unique_u64(consumer_data
.stream_per_chan_id_ht
,
2116 &stream
->node_channel_id
);
2119 * Add stream to the stream_list_ht of the consumer data. No need to steal
2120 * the key since the HT does not use it and we allow to add redundant keys
2123 lttng_ht_add_u64(consumer_data
.stream_list_ht
, &stream
->node_session_id
);
2127 pthread_mutex_unlock(&stream
->lock
);
2128 pthread_mutex_unlock(&stream
->chan
->lock
);
2129 pthread_mutex_unlock(&stream
->chan
->timer_lock
);
2130 pthread_mutex_unlock(&consumer_data
.lock
);
2135 * Delete data stream that are flagged for deletion (endpoint_status).
2137 static void validate_endpoint_status_data_stream(void)
2139 struct lttng_ht_iter iter
;
2140 struct lttng_consumer_stream
*stream
;
2142 DBG("Consumer delete flagged data stream");
2145 cds_lfht_for_each_entry(data_ht
->ht
, &iter
.iter
, stream
, node
.node
) {
2146 /* Validate delete flag of the stream */
2147 if (stream
->endpoint_status
== CONSUMER_ENDPOINT_ACTIVE
) {
2150 /* Delete it right now */
2151 consumer_del_stream(stream
, data_ht
);
2157 * Delete metadata stream that are flagged for deletion (endpoint_status).
2159 static void validate_endpoint_status_metadata_stream(
2160 struct lttng_poll_event
*pollset
)
2162 struct lttng_ht_iter iter
;
2163 struct lttng_consumer_stream
*stream
;
2165 DBG("Consumer delete flagged metadata stream");
2170 cds_lfht_for_each_entry(metadata_ht
->ht
, &iter
.iter
, stream
, node
.node
) {
2171 /* Validate delete flag of the stream */
2172 if (stream
->endpoint_status
== CONSUMER_ENDPOINT_ACTIVE
) {
2176 * Remove from pollset so the metadata thread can continue without
2177 * blocking on a deleted stream.
2179 lttng_poll_del(pollset
, stream
->wait_fd
);
2181 /* Delete it right now */
2182 consumer_del_metadata_stream(stream
, metadata_ht
);
2188 * Thread polls on metadata file descriptor and write them on disk or on the
2191 void *consumer_thread_metadata_poll(void *data
)
2193 int ret
, i
, pollfd
, err
= -1;
2194 uint32_t revents
, nb_fd
;
2195 struct lttng_consumer_stream
*stream
= NULL
;
2196 struct lttng_ht_iter iter
;
2197 struct lttng_ht_node_u64
*node
;
2198 struct lttng_poll_event events
;
2199 struct lttng_consumer_local_data
*ctx
= data
;
2202 rcu_register_thread();
2204 health_register(health_consumerd
, HEALTH_CONSUMERD_TYPE_METADATA
);
2206 health_code_update();
2208 metadata_ht
= lttng_ht_new(0, LTTNG_HT_TYPE_U64
);
2210 /* ENOMEM at this point. Better to bail out. */
2214 DBG("Thread metadata poll started");
2216 /* Size is set to 1 for the consumer_metadata pipe */
2217 ret
= lttng_poll_create(&events
, 2, LTTNG_CLOEXEC
);
2219 ERR("Poll set creation failed");
2223 ret
= lttng_poll_add(&events
,
2224 lttng_pipe_get_readfd(ctx
->consumer_metadata_pipe
), LPOLLIN
);
2230 DBG("Metadata main loop started");
2233 health_code_update();
2235 /* Only the metadata pipe is set */
2236 if (LTTNG_POLL_GETNB(&events
) == 0 && consumer_quit
== 1) {
2237 err
= 0; /* All is OK */
2242 DBG("Metadata poll wait with %d fd(s)", LTTNG_POLL_GETNB(&events
));
2243 health_poll_entry();
2244 ret
= lttng_poll_wait(&events
, -1);
2246 DBG("Metadata event catched in thread");
2248 if (errno
== EINTR
) {
2249 ERR("Poll EINTR catched");
2257 /* From here, the event is a metadata wait fd */
2258 for (i
= 0; i
< nb_fd
; i
++) {
2259 health_code_update();
2261 revents
= LTTNG_POLL_GETEV(&events
, i
);
2262 pollfd
= LTTNG_POLL_GETFD(&events
, i
);
2264 if (pollfd
== lttng_pipe_get_readfd(ctx
->consumer_metadata_pipe
)) {
2265 if (revents
& (LPOLLERR
| LPOLLHUP
)) {
2266 DBG("Metadata thread pipe hung up");
2268 * Remove the pipe from the poll set and continue the loop
2269 * since their might be data to consume.
2271 lttng_poll_del(&events
,
2272 lttng_pipe_get_readfd(ctx
->consumer_metadata_pipe
));
2273 lttng_pipe_read_close(ctx
->consumer_metadata_pipe
);
2275 } else if (revents
& LPOLLIN
) {
2278 pipe_len
= lttng_pipe_read(ctx
->consumer_metadata_pipe
,
2279 &stream
, sizeof(stream
));
2280 if (pipe_len
< sizeof(stream
)) {
2281 PERROR("read metadata stream");
2283 * Continue here to handle the rest of the streams.
2288 /* A NULL stream means that the state has changed. */
2289 if (stream
== NULL
) {
2290 /* Check for deleted streams. */
2291 validate_endpoint_status_metadata_stream(&events
);
2295 DBG("Adding metadata stream %d to poll set",
2298 /* Add metadata stream to the global poll events list */
2299 lttng_poll_add(&events
, stream
->wait_fd
,
2300 LPOLLIN
| LPOLLPRI
);
2303 /* Handle other stream */
2309 uint64_t tmp_id
= (uint64_t) pollfd
;
2311 lttng_ht_lookup(metadata_ht
, &tmp_id
, &iter
);
2313 node
= lttng_ht_iter_get_node_u64(&iter
);
2316 stream
= caa_container_of(node
, struct lttng_consumer_stream
,
2319 /* Check for error event */
2320 if (revents
& (LPOLLERR
| LPOLLHUP
)) {
2321 DBG("Metadata fd %d is hup|err.", pollfd
);
2322 if (!stream
->hangup_flush_done
2323 && (consumer_data
.type
== LTTNG_CONSUMER32_UST
2324 || consumer_data
.type
== LTTNG_CONSUMER64_UST
)) {
2325 DBG("Attempting to flush and consume the UST buffers");
2326 lttng_ustconsumer_on_stream_hangup(stream
);
2328 /* We just flushed the stream now read it. */
2330 health_code_update();
2332 len
= ctx
->on_buffer_ready(stream
, ctx
);
2334 * We don't check the return value here since if we get
2335 * a negative len, it means an error occured thus we
2336 * simply remove it from the poll set and free the
2342 lttng_poll_del(&events
, stream
->wait_fd
);
2344 * This call update the channel states, closes file descriptors
2345 * and securely free the stream.
2347 consumer_del_metadata_stream(stream
, metadata_ht
);
2348 } else if (revents
& (LPOLLIN
| LPOLLPRI
)) {
2349 /* Get the data out of the metadata file descriptor */
2350 DBG("Metadata available on fd %d", pollfd
);
2351 assert(stream
->wait_fd
== pollfd
);
2354 health_code_update();
2356 len
= ctx
->on_buffer_ready(stream
, ctx
);
2358 * We don't check the return value here since if we get
2359 * a negative len, it means an error occured thus we
2360 * simply remove it from the poll set and free the
2365 /* It's ok to have an unavailable sub-buffer */
2366 if (len
< 0 && len
!= -EAGAIN
&& len
!= -ENODATA
) {
2367 /* Clean up stream from consumer and free it. */
2368 lttng_poll_del(&events
, stream
->wait_fd
);
2369 consumer_del_metadata_stream(stream
, metadata_ht
);
2373 /* Release RCU lock for the stream looked up */
2382 DBG("Metadata poll thread exiting");
2384 lttng_poll_clean(&events
);
2386 destroy_stream_ht(metadata_ht
);
2390 ERR("Health error occurred in %s", __func__
);
2392 health_unregister(health_consumerd
);
2393 rcu_unregister_thread();
2398 * This thread polls the fds in the set to consume the data and write
2399 * it to tracefile if necessary.
2401 void *consumer_thread_data_poll(void *data
)
2403 int num_rdy
, num_hup
, high_prio
, ret
, i
, err
= -1;
2404 struct pollfd
*pollfd
= NULL
;
2405 /* local view of the streams */
2406 struct lttng_consumer_stream
**local_stream
= NULL
, *new_stream
= NULL
;
2407 /* local view of consumer_data.fds_count */
2409 struct lttng_consumer_local_data
*ctx
= data
;
2412 rcu_register_thread();
2414 health_register(health_consumerd
, HEALTH_CONSUMERD_TYPE_DATA
);
2416 health_code_update();
2418 data_ht
= lttng_ht_new(0, LTTNG_HT_TYPE_U64
);
2419 if (data_ht
== NULL
) {
2420 /* ENOMEM at this point. Better to bail out. */
2424 local_stream
= zmalloc(sizeof(struct lttng_consumer_stream
*));
2425 if (local_stream
== NULL
) {
2426 PERROR("local_stream malloc");
2431 health_code_update();
2437 * the fds set has been updated, we need to update our
2438 * local array as well
2440 pthread_mutex_lock(&consumer_data
.lock
);
2441 if (consumer_data
.need_update
) {
2446 local_stream
= NULL
;
2448 /* allocate for all fds + 1 for the consumer_data_pipe */
2449 pollfd
= zmalloc((consumer_data
.stream_count
+ 1) * sizeof(struct pollfd
));
2450 if (pollfd
== NULL
) {
2451 PERROR("pollfd malloc");
2452 pthread_mutex_unlock(&consumer_data
.lock
);
2456 /* allocate for all fds + 1 for the consumer_data_pipe */
2457 local_stream
= zmalloc((consumer_data
.stream_count
+ 1) *
2458 sizeof(struct lttng_consumer_stream
*));
2459 if (local_stream
== NULL
) {
2460 PERROR("local_stream malloc");
2461 pthread_mutex_unlock(&consumer_data
.lock
);
2464 ret
= update_poll_array(ctx
, &pollfd
, local_stream
,
2467 ERR("Error in allocating pollfd or local_outfds");
2468 lttng_consumer_send_error(ctx
, LTTCOMM_CONSUMERD_POLL_ERROR
);
2469 pthread_mutex_unlock(&consumer_data
.lock
);
2473 consumer_data
.need_update
= 0;
2475 pthread_mutex_unlock(&consumer_data
.lock
);
2477 /* No FDs and consumer_quit, consumer_cleanup the thread */
2478 if (nb_fd
== 0 && consumer_quit
== 1) {
2479 err
= 0; /* All is OK */
2482 /* poll on the array of fds */
2484 DBG("polling on %d fd", nb_fd
+ 1);
2485 health_poll_entry();
2486 num_rdy
= poll(pollfd
, nb_fd
+ 1, -1);
2488 DBG("poll num_rdy : %d", num_rdy
);
2489 if (num_rdy
== -1) {
2491 * Restart interrupted system call.
2493 if (errno
== EINTR
) {
2496 PERROR("Poll error");
2497 lttng_consumer_send_error(ctx
, LTTCOMM_CONSUMERD_POLL_ERROR
);
2499 } else if (num_rdy
== 0) {
2500 DBG("Polling thread timed out");
2505 * If the consumer_data_pipe triggered poll go directly to the
2506 * beginning of the loop to update the array. We want to prioritize
2507 * array update over low-priority reads.
2509 if (pollfd
[nb_fd
].revents
& (POLLIN
| POLLPRI
)) {
2510 ssize_t pipe_readlen
;
2512 DBG("consumer_data_pipe wake up");
2513 pipe_readlen
= lttng_pipe_read(ctx
->consumer_data_pipe
,
2514 &new_stream
, sizeof(new_stream
));
2515 if (pipe_readlen
< sizeof(new_stream
)) {
2516 PERROR("Consumer data pipe");
2517 /* Continue so we can at least handle the current stream(s). */
2522 * If the stream is NULL, just ignore it. It's also possible that
2523 * the sessiond poll thread changed the consumer_quit state and is
2524 * waking us up to test it.
2526 if (new_stream
== NULL
) {
2527 validate_endpoint_status_data_stream();
2531 /* Continue to update the local streams and handle prio ones */
2535 /* Take care of high priority channels first. */
2536 for (i
= 0; i
< nb_fd
; i
++) {
2537 health_code_update();
2539 if (local_stream
[i
] == NULL
) {
2542 if (pollfd
[i
].revents
& POLLPRI
) {
2543 DBG("Urgent read on fd %d", pollfd
[i
].fd
);
2545 len
= ctx
->on_buffer_ready(local_stream
[i
], ctx
);
2546 /* it's ok to have an unavailable sub-buffer */
2547 if (len
< 0 && len
!= -EAGAIN
&& len
!= -ENODATA
) {
2548 /* Clean the stream and free it. */
2549 consumer_del_stream(local_stream
[i
], data_ht
);
2550 local_stream
[i
] = NULL
;
2551 } else if (len
> 0) {
2552 local_stream
[i
]->data_read
= 1;
2558 * If we read high prio channel in this loop, try again
2559 * for more high prio data.
2565 /* Take care of low priority channels. */
2566 for (i
= 0; i
< nb_fd
; i
++) {
2567 health_code_update();
2569 if (local_stream
[i
] == NULL
) {
2572 if ((pollfd
[i
].revents
& POLLIN
) ||
2573 local_stream
[i
]->hangup_flush_done
) {
2574 DBG("Normal read on fd %d", pollfd
[i
].fd
);
2575 len
= ctx
->on_buffer_ready(local_stream
[i
], ctx
);
2576 /* it's ok to have an unavailable sub-buffer */
2577 if (len
< 0 && len
!= -EAGAIN
&& len
!= -ENODATA
) {
2578 /* Clean the stream and free it. */
2579 consumer_del_stream(local_stream
[i
], data_ht
);
2580 local_stream
[i
] = NULL
;
2581 } else if (len
> 0) {
2582 local_stream
[i
]->data_read
= 1;
2587 /* Handle hangup and errors */
2588 for (i
= 0; i
< nb_fd
; i
++) {
2589 health_code_update();
2591 if (local_stream
[i
] == NULL
) {
2594 if (!local_stream
[i
]->hangup_flush_done
2595 && (pollfd
[i
].revents
& (POLLHUP
| POLLERR
| POLLNVAL
))
2596 && (consumer_data
.type
== LTTNG_CONSUMER32_UST
2597 || consumer_data
.type
== LTTNG_CONSUMER64_UST
)) {
2598 DBG("fd %d is hup|err|nval. Attempting flush and read.",
2600 lttng_ustconsumer_on_stream_hangup(local_stream
[i
]);
2601 /* Attempt read again, for the data we just flushed. */
2602 local_stream
[i
]->data_read
= 1;
2605 * If the poll flag is HUP/ERR/NVAL and we have
2606 * read no data in this pass, we can remove the
2607 * stream from its hash table.
2609 if ((pollfd
[i
].revents
& POLLHUP
)) {
2610 DBG("Polling fd %d tells it has hung up.", pollfd
[i
].fd
);
2611 if (!local_stream
[i
]->data_read
) {
2612 consumer_del_stream(local_stream
[i
], data_ht
);
2613 local_stream
[i
] = NULL
;
2616 } else if (pollfd
[i
].revents
& POLLERR
) {
2617 ERR("Error returned in polling fd %d.", pollfd
[i
].fd
);
2618 if (!local_stream
[i
]->data_read
) {
2619 consumer_del_stream(local_stream
[i
], data_ht
);
2620 local_stream
[i
] = NULL
;
2623 } else if (pollfd
[i
].revents
& POLLNVAL
) {
2624 ERR("Polling fd %d tells fd is not open.", pollfd
[i
].fd
);
2625 if (!local_stream
[i
]->data_read
) {
2626 consumer_del_stream(local_stream
[i
], data_ht
);
2627 local_stream
[i
] = NULL
;
2631 if (local_stream
[i
] != NULL
) {
2632 local_stream
[i
]->data_read
= 0;
2639 DBG("polling thread exiting");
2644 * Close the write side of the pipe so epoll_wait() in
2645 * consumer_thread_metadata_poll can catch it. The thread is monitoring the
2646 * read side of the pipe. If we close them both, epoll_wait strangely does
2647 * not return and could create a endless wait period if the pipe is the
2648 * only tracked fd in the poll set. The thread will take care of closing
2651 (void) lttng_pipe_write_close(ctx
->consumer_metadata_pipe
);
2653 destroy_data_stream_ht(data_ht
);
2657 ERR("Health error occurred in %s", __func__
);
2659 health_unregister(health_consumerd
);
2661 rcu_unregister_thread();
2666 * Close wake-up end of each stream belonging to the channel. This will
2667 * allow the poll() on the stream read-side to detect when the
2668 * write-side (application) finally closes them.
2671 void consumer_close_channel_streams(struct lttng_consumer_channel
*channel
)
2673 struct lttng_ht
*ht
;
2674 struct lttng_consumer_stream
*stream
;
2675 struct lttng_ht_iter iter
;
2677 ht
= consumer_data
.stream_per_chan_id_ht
;
2680 cds_lfht_for_each_entry_duplicate(ht
->ht
,
2681 ht
->hash_fct(&channel
->key
, lttng_ht_seed
),
2682 ht
->match_fct
, &channel
->key
,
2683 &iter
.iter
, stream
, node_channel_id
.node
) {
2685 * Protect against teardown with mutex.
2687 pthread_mutex_lock(&stream
->lock
);
2688 if (cds_lfht_is_node_deleted(&stream
->node
.node
)) {
2691 switch (consumer_data
.type
) {
2692 case LTTNG_CONSUMER_KERNEL
:
2694 case LTTNG_CONSUMER32_UST
:
2695 case LTTNG_CONSUMER64_UST
:
2697 * Note: a mutex is taken internally within
2698 * liblttng-ust-ctl to protect timer wakeup_fd
2699 * use from concurrent close.
2701 lttng_ustconsumer_close_stream_wakeup(stream
);
2704 ERR("Unknown consumer_data type");
2708 pthread_mutex_unlock(&stream
->lock
);
2713 static void destroy_channel_ht(struct lttng_ht
*ht
)
2715 struct lttng_ht_iter iter
;
2716 struct lttng_consumer_channel
*channel
;
2724 cds_lfht_for_each_entry(ht
->ht
, &iter
.iter
, channel
, wait_fd_node
.node
) {
2725 ret
= lttng_ht_del(ht
, &iter
);
2730 lttng_ht_destroy(ht
);
2734 * This thread polls the channel fds to detect when they are being
2735 * closed. It closes all related streams if the channel is detected as
2736 * closed. It is currently only used as a shim layer for UST because the
2737 * consumerd needs to keep the per-stream wakeup end of pipes open for
2740 void *consumer_thread_channel_poll(void *data
)
2742 int ret
, i
, pollfd
, err
= -1;
2743 uint32_t revents
, nb_fd
;
2744 struct lttng_consumer_channel
*chan
= NULL
;
2745 struct lttng_ht_iter iter
;
2746 struct lttng_ht_node_u64
*node
;
2747 struct lttng_poll_event events
;
2748 struct lttng_consumer_local_data
*ctx
= data
;
2749 struct lttng_ht
*channel_ht
;
2751 rcu_register_thread();
2753 health_register(health_consumerd
, HEALTH_CONSUMERD_TYPE_CHANNEL
);
2755 health_code_update();
2757 channel_ht
= lttng_ht_new(0, LTTNG_HT_TYPE_U64
);
2759 /* ENOMEM at this point. Better to bail out. */
2763 DBG("Thread channel poll started");
2765 /* Size is set to 1 for the consumer_channel pipe */
2766 ret
= lttng_poll_create(&events
, 2, LTTNG_CLOEXEC
);
2768 ERR("Poll set creation failed");
2772 ret
= lttng_poll_add(&events
, ctx
->consumer_channel_pipe
[0], LPOLLIN
);
2778 DBG("Channel main loop started");
2781 health_code_update();
2783 /* Only the channel pipe is set */
2784 if (LTTNG_POLL_GETNB(&events
) == 0 && consumer_quit
== 1) {
2785 err
= 0; /* All is OK */
2790 DBG("Channel poll wait with %d fd(s)", LTTNG_POLL_GETNB(&events
));
2791 health_poll_entry();
2792 ret
= lttng_poll_wait(&events
, -1);
2794 DBG("Channel event catched in thread");
2796 if (errno
== EINTR
) {
2797 ERR("Poll EINTR catched");
2805 /* From here, the event is a channel wait fd */
2806 for (i
= 0; i
< nb_fd
; i
++) {
2807 health_code_update();
2809 revents
= LTTNG_POLL_GETEV(&events
, i
);
2810 pollfd
= LTTNG_POLL_GETFD(&events
, i
);
2812 /* Just don't waste time if no returned events for the fd */
2816 if (pollfd
== ctx
->consumer_channel_pipe
[0]) {
2817 if (revents
& (LPOLLERR
| LPOLLHUP
)) {
2818 DBG("Channel thread pipe hung up");
2820 * Remove the pipe from the poll set and continue the loop
2821 * since their might be data to consume.
2823 lttng_poll_del(&events
, ctx
->consumer_channel_pipe
[0]);
2825 } else if (revents
& LPOLLIN
) {
2826 enum consumer_channel_action action
;
2829 ret
= read_channel_pipe(ctx
, &chan
, &key
, &action
);
2831 ERR("Error reading channel pipe");
2836 case CONSUMER_CHANNEL_ADD
:
2837 DBG("Adding channel %d to poll set",
2840 lttng_ht_node_init_u64(&chan
->wait_fd_node
,
2843 lttng_ht_add_unique_u64(channel_ht
,
2844 &chan
->wait_fd_node
);
2846 /* Add channel to the global poll events list */
2847 lttng_poll_add(&events
, chan
->wait_fd
,
2848 LPOLLIN
| LPOLLPRI
);
2850 case CONSUMER_CHANNEL_DEL
:
2852 struct lttng_consumer_stream
*stream
, *stmp
;
2855 chan
= consumer_find_channel(key
);
2858 ERR("UST consumer get channel key %" PRIu64
" not found for del channel", key
);
2861 lttng_poll_del(&events
, chan
->wait_fd
);
2862 iter
.iter
.node
= &chan
->wait_fd_node
.node
;
2863 ret
= lttng_ht_del(channel_ht
, &iter
);
2865 consumer_close_channel_streams(chan
);
2867 switch (consumer_data
.type
) {
2868 case LTTNG_CONSUMER_KERNEL
:
2870 case LTTNG_CONSUMER32_UST
:
2871 case LTTNG_CONSUMER64_UST
:
2872 /* Delete streams that might have been left in the stream list. */
2873 cds_list_for_each_entry_safe(stream
, stmp
, &chan
->streams
.head
,
2875 health_code_update();
2877 cds_list_del(&stream
->send_node
);
2878 lttng_ustconsumer_del_stream(stream
);
2879 uatomic_sub(&stream
->chan
->refcount
, 1);
2880 assert(&chan
->refcount
);
2885 ERR("Unknown consumer_data type");
2890 * Release our own refcount. Force channel deletion even if
2891 * streams were not initialized.
2893 if (!uatomic_sub_return(&chan
->refcount
, 1)) {
2894 consumer_del_channel(chan
);
2899 case CONSUMER_CHANNEL_QUIT
:
2901 * Remove the pipe from the poll set and continue the loop
2902 * since their might be data to consume.
2904 lttng_poll_del(&events
, ctx
->consumer_channel_pipe
[0]);
2907 ERR("Unknown action");
2912 /* Handle other stream */
2918 uint64_t tmp_id
= (uint64_t) pollfd
;
2920 lttng_ht_lookup(channel_ht
, &tmp_id
, &iter
);
2922 node
= lttng_ht_iter_get_node_u64(&iter
);
2925 chan
= caa_container_of(node
, struct lttng_consumer_channel
,
2928 /* Check for error event */
2929 if (revents
& (LPOLLERR
| LPOLLHUP
)) {
2930 DBG("Channel fd %d is hup|err.", pollfd
);
2932 lttng_poll_del(&events
, chan
->wait_fd
);
2933 ret
= lttng_ht_del(channel_ht
, &iter
);
2935 consumer_close_channel_streams(chan
);
2937 /* Release our own refcount */
2938 if (!uatomic_sub_return(&chan
->refcount
, 1)
2939 && !uatomic_read(&chan
->nb_init_stream_left
)) {
2940 consumer_del_channel(chan
);
2944 /* Release RCU lock for the channel looked up */
2952 lttng_poll_clean(&events
);
2954 destroy_channel_ht(channel_ht
);
2956 DBG("Channel poll thread exiting");
2959 ERR("Health error occurred in %s", __func__
);
2961 health_unregister(health_consumerd
);
2962 rcu_unregister_thread();
2966 static int set_metadata_socket(struct lttng_consumer_local_data
*ctx
,
2967 struct pollfd
*sockpoll
, int client_socket
)
2974 if (lttng_consumer_poll_socket(sockpoll
) < 0) {
2978 DBG("Metadata connection on client_socket");
2980 /* Blocking call, waiting for transmission */
2981 ctx
->consumer_metadata_socket
= lttcomm_accept_unix_sock(client_socket
);
2982 if (ctx
->consumer_metadata_socket
< 0) {
2983 WARN("On accept metadata");
2994 * This thread listens on the consumerd socket and receives the file
2995 * descriptors from the session daemon.
2997 void *consumer_thread_sessiond_poll(void *data
)
2999 int sock
= -1, client_socket
, ret
, err
= -1;
3001 * structure to poll for incoming data on communication socket avoids
3002 * making blocking sockets.
3004 struct pollfd consumer_sockpoll
[2];
3005 struct lttng_consumer_local_data
*ctx
= data
;
3007 rcu_register_thread();
3009 health_register(health_consumerd
, HEALTH_CONSUMERD_TYPE_SESSIOND
);
3011 health_code_update();
3013 DBG("Creating command socket %s", ctx
->consumer_command_sock_path
);
3014 unlink(ctx
->consumer_command_sock_path
);
3015 client_socket
= lttcomm_create_unix_sock(ctx
->consumer_command_sock_path
);
3016 if (client_socket
< 0) {
3017 ERR("Cannot create command socket");
3021 ret
= lttcomm_listen_unix_sock(client_socket
);
3026 DBG("Sending ready command to lttng-sessiond");
3027 ret
= lttng_consumer_send_error(ctx
, LTTCOMM_CONSUMERD_COMMAND_SOCK_READY
);
3028 /* return < 0 on error, but == 0 is not fatal */
3030 ERR("Error sending ready command to lttng-sessiond");
3034 /* prepare the FDs to poll : to client socket and the should_quit pipe */
3035 consumer_sockpoll
[0].fd
= ctx
->consumer_should_quit
[0];
3036 consumer_sockpoll
[0].events
= POLLIN
| POLLPRI
;
3037 consumer_sockpoll
[1].fd
= client_socket
;
3038 consumer_sockpoll
[1].events
= POLLIN
| POLLPRI
;
3040 if (lttng_consumer_poll_socket(consumer_sockpoll
) < 0) {
3043 DBG("Connection on client_socket");
3045 /* Blocking call, waiting for transmission */
3046 sock
= lttcomm_accept_unix_sock(client_socket
);
3053 * Setup metadata socket which is the second socket connection on the
3054 * command unix socket.
3056 ret
= set_metadata_socket(ctx
, consumer_sockpoll
, client_socket
);
3061 /* This socket is not useful anymore. */
3062 ret
= close(client_socket
);
3064 PERROR("close client_socket");
3068 /* update the polling structure to poll on the established socket */
3069 consumer_sockpoll
[1].fd
= sock
;
3070 consumer_sockpoll
[1].events
= POLLIN
| POLLPRI
;
3073 health_code_update();
3075 health_poll_entry();
3076 ret
= lttng_consumer_poll_socket(consumer_sockpoll
);
3081 DBG("Incoming command on sock");
3082 ret
= lttng_consumer_recv_cmd(ctx
, sock
, consumer_sockpoll
);
3083 if (ret
== -ENOENT
) {
3084 DBG("Received STOP command");
3089 * This could simply be a session daemon quitting. Don't output
3092 DBG("Communication interrupted on command socket");
3096 if (consumer_quit
) {
3097 DBG("consumer_thread_receive_fds received quit from signal");
3098 err
= 0; /* All is OK */
3101 DBG("received command on sock");
3107 DBG("Consumer thread sessiond poll exiting");
3110 * Close metadata streams since the producer is the session daemon which
3113 * NOTE: for now, this only applies to the UST tracer.
3115 lttng_consumer_close_metadata();
3118 * when all fds have hung up, the polling thread
3124 * Notify the data poll thread to poll back again and test the
3125 * consumer_quit state that we just set so to quit gracefully.
3127 notify_thread_lttng_pipe(ctx
->consumer_data_pipe
);
3129 notify_channel_pipe(ctx
, NULL
, -1, CONSUMER_CHANNEL_QUIT
);
3131 notify_health_quit_pipe(health_quit_pipe
);
3133 /* Cleaning up possibly open sockets. */
3137 PERROR("close sock sessiond poll");
3140 if (client_socket
>= 0) {
3141 ret
= close(client_socket
);
3143 PERROR("close client_socket sessiond poll");
3149 ERR("Health error occurred in %s", __func__
);
3151 health_unregister(health_consumerd
);
3153 rcu_unregister_thread();
3157 ssize_t
lttng_consumer_read_subbuffer(struct lttng_consumer_stream
*stream
,
3158 struct lttng_consumer_local_data
*ctx
)
3162 pthread_mutex_lock(&stream
->lock
);
3163 if (stream
->metadata_flag
) {
3164 pthread_mutex_lock(&stream
->metadata_rdv_lock
);
3167 switch (consumer_data
.type
) {
3168 case LTTNG_CONSUMER_KERNEL
:
3169 ret
= lttng_kconsumer_read_subbuffer(stream
, ctx
);
3171 case LTTNG_CONSUMER32_UST
:
3172 case LTTNG_CONSUMER64_UST
:
3173 ret
= lttng_ustconsumer_read_subbuffer(stream
, ctx
);
3176 ERR("Unknown consumer_data type");
3182 if (stream
->metadata_flag
) {
3183 pthread_cond_broadcast(&stream
->metadata_rdv
);
3184 pthread_mutex_unlock(&stream
->metadata_rdv_lock
);
3186 pthread_mutex_unlock(&stream
->lock
);
3190 int lttng_consumer_on_recv_stream(struct lttng_consumer_stream
*stream
)
3192 switch (consumer_data
.type
) {
3193 case LTTNG_CONSUMER_KERNEL
:
3194 return lttng_kconsumer_on_recv_stream(stream
);
3195 case LTTNG_CONSUMER32_UST
:
3196 case LTTNG_CONSUMER64_UST
:
3197 return lttng_ustconsumer_on_recv_stream(stream
);
3199 ERR("Unknown consumer_data type");
3206 * Allocate and set consumer data hash tables.
3208 void lttng_consumer_init(void)
3210 consumer_data
.channel_ht
= lttng_ht_new(0, LTTNG_HT_TYPE_U64
);
3211 consumer_data
.relayd_ht
= lttng_ht_new(0, LTTNG_HT_TYPE_U64
);
3212 consumer_data
.stream_list_ht
= lttng_ht_new(0, LTTNG_HT_TYPE_U64
);
3213 consumer_data
.stream_per_chan_id_ht
= lttng_ht_new(0, LTTNG_HT_TYPE_U64
);
3217 * Process the ADD_RELAYD command receive by a consumer.
3219 * This will create a relayd socket pair and add it to the relayd hash table.
3220 * The caller MUST acquire a RCU read side lock before calling it.
3222 int consumer_add_relayd_socket(uint64_t net_seq_idx
, int sock_type
,
3223 struct lttng_consumer_local_data
*ctx
, int sock
,
3224 struct pollfd
*consumer_sockpoll
,
3225 struct lttcomm_relayd_sock
*relayd_sock
, uint64_t sessiond_id
,
3226 uint64_t relayd_session_id
)
3228 int fd
= -1, ret
= -1, relayd_created
= 0;
3229 enum lttng_error_code ret_code
= LTTNG_OK
;
3230 struct consumer_relayd_sock_pair
*relayd
= NULL
;
3233 assert(relayd_sock
);
3235 DBG("Consumer adding relayd socket (idx: %" PRIu64
")", net_seq_idx
);
3237 /* Get relayd reference if exists. */
3238 relayd
= consumer_find_relayd(net_seq_idx
);
3239 if (relayd
== NULL
) {
3240 assert(sock_type
== LTTNG_STREAM_CONTROL
);
3241 /* Not found. Allocate one. */
3242 relayd
= consumer_allocate_relayd_sock_pair(net_seq_idx
);
3243 if (relayd
== NULL
) {
3245 ret_code
= LTTCOMM_CONSUMERD_ENOMEM
;
3248 relayd
->sessiond_session_id
= sessiond_id
;
3253 * This code path MUST continue to the consumer send status message to
3254 * we can notify the session daemon and continue our work without
3255 * killing everything.
3259 * relayd key should never be found for control socket.
3261 assert(sock_type
!= LTTNG_STREAM_CONTROL
);
3264 /* First send a status message before receiving the fds. */
3265 ret
= consumer_send_status_msg(sock
, LTTNG_OK
);
3267 /* Somehow, the session daemon is not responding anymore. */
3268 lttng_consumer_send_error(ctx
, LTTCOMM_CONSUMERD_FATAL
);
3269 goto error_nosignal
;
3272 /* Poll on consumer socket. */
3273 if (lttng_consumer_poll_socket(consumer_sockpoll
) < 0) {
3274 lttng_consumer_send_error(ctx
, LTTCOMM_CONSUMERD_POLL_ERROR
);
3276 goto error_nosignal
;
3279 /* Get relayd socket from session daemon */
3280 ret
= lttcomm_recv_fds_unix_sock(sock
, &fd
, 1);
3281 if (ret
!= sizeof(fd
)) {
3283 fd
= -1; /* Just in case it gets set with an invalid value. */
3286 * Failing to receive FDs might indicate a major problem such as
3287 * reaching a fd limit during the receive where the kernel returns a
3288 * MSG_CTRUNC and fails to cleanup the fd in the queue. Any case, we
3289 * don't take any chances and stop everything.
3291 * XXX: Feature request #558 will fix that and avoid this possible
3292 * issue when reaching the fd limit.
3294 lttng_consumer_send_error(ctx
, LTTCOMM_CONSUMERD_ERROR_RECV_FD
);
3295 ret_code
= LTTCOMM_CONSUMERD_ERROR_RECV_FD
;
3299 /* Copy socket information and received FD */
3300 switch (sock_type
) {
3301 case LTTNG_STREAM_CONTROL
:
3302 /* Copy received lttcomm socket */
3303 lttcomm_copy_sock(&relayd
->control_sock
.sock
, &relayd_sock
->sock
);
3304 ret
= lttcomm_create_sock(&relayd
->control_sock
.sock
);
3305 /* Handle create_sock error. */
3307 ret_code
= LTTCOMM_CONSUMERD_ENOMEM
;
3311 * Close the socket created internally by
3312 * lttcomm_create_sock, so we can replace it by the one
3313 * received from sessiond.
3315 if (close(relayd
->control_sock
.sock
.fd
)) {
3319 /* Assign new file descriptor */
3320 relayd
->control_sock
.sock
.fd
= fd
;
3321 fd
= -1; /* For error path */
3322 /* Assign version values. */
3323 relayd
->control_sock
.major
= relayd_sock
->major
;
3324 relayd
->control_sock
.minor
= relayd_sock
->minor
;
3326 relayd
->relayd_session_id
= relayd_session_id
;
3329 case LTTNG_STREAM_DATA
:
3330 /* Copy received lttcomm socket */
3331 lttcomm_copy_sock(&relayd
->data_sock
.sock
, &relayd_sock
->sock
);
3332 ret
= lttcomm_create_sock(&relayd
->data_sock
.sock
);
3333 /* Handle create_sock error. */
3335 ret_code
= LTTCOMM_CONSUMERD_ENOMEM
;
3339 * Close the socket created internally by
3340 * lttcomm_create_sock, so we can replace it by the one
3341 * received from sessiond.
3343 if (close(relayd
->data_sock
.sock
.fd
)) {
3347 /* Assign new file descriptor */
3348 relayd
->data_sock
.sock
.fd
= fd
;
3349 fd
= -1; /* for eventual error paths */
3350 /* Assign version values. */
3351 relayd
->data_sock
.major
= relayd_sock
->major
;
3352 relayd
->data_sock
.minor
= relayd_sock
->minor
;
3355 ERR("Unknown relayd socket type (%d)", sock_type
);
3357 ret_code
= LTTCOMM_CONSUMERD_FATAL
;
3361 DBG("Consumer %s socket created successfully with net idx %" PRIu64
" (fd: %d)",
3362 sock_type
== LTTNG_STREAM_CONTROL
? "control" : "data",
3363 relayd
->net_seq_idx
, fd
);
3365 /* We successfully added the socket. Send status back. */
3366 ret
= consumer_send_status_msg(sock
, ret_code
);
3368 /* Somehow, the session daemon is not responding anymore. */
3369 lttng_consumer_send_error(ctx
, LTTCOMM_CONSUMERD_FATAL
);
3370 goto error_nosignal
;
3374 * Add relayd socket pair to consumer data hashtable. If object already
3375 * exists or on error, the function gracefully returns.
3383 if (consumer_send_status_msg(sock
, ret_code
) < 0) {
3384 lttng_consumer_send_error(ctx
, LTTCOMM_CONSUMERD_FATAL
);
3388 /* Close received socket if valid. */
3391 PERROR("close received socket");
3395 if (relayd_created
) {
3403 * Try to lock the stream mutex.
3405 * On success, 1 is returned else 0 indicating that the mutex is NOT lock.
3407 static int stream_try_lock(struct lttng_consumer_stream
*stream
)
3414 * Try to lock the stream mutex. On failure, we know that the stream is
3415 * being used else where hence there is data still being extracted.
3417 ret
= pthread_mutex_trylock(&stream
->lock
);
3419 /* For both EBUSY and EINVAL error, the mutex is NOT locked. */
3431 * Search for a relayd associated to the session id and return the reference.
3433 * A rcu read side lock MUST be acquire before calling this function and locked
3434 * until the relayd object is no longer necessary.
3436 static struct consumer_relayd_sock_pair
*find_relayd_by_session_id(uint64_t id
)
3438 struct lttng_ht_iter iter
;
3439 struct consumer_relayd_sock_pair
*relayd
= NULL
;
3441 /* Iterate over all relayd since they are indexed by net_seq_idx. */
3442 cds_lfht_for_each_entry(consumer_data
.relayd_ht
->ht
, &iter
.iter
, relayd
,
3445 * Check by sessiond id which is unique here where the relayd session
3446 * id might not be when having multiple relayd.
3448 if (relayd
->sessiond_session_id
== id
) {
3449 /* Found the relayd. There can be only one per id. */
3461 * Check if for a given session id there is still data needed to be extract
3464 * Return 1 if data is pending or else 0 meaning ready to be read.
3466 int consumer_data_pending(uint64_t id
)
3469 struct lttng_ht_iter iter
;
3470 struct lttng_ht
*ht
;
3471 struct lttng_consumer_stream
*stream
;
3472 struct consumer_relayd_sock_pair
*relayd
= NULL
;
3473 int (*data_pending
)(struct lttng_consumer_stream
*);
3475 DBG("Consumer data pending command on session id %" PRIu64
, id
);
3478 pthread_mutex_lock(&consumer_data
.lock
);
3480 switch (consumer_data
.type
) {
3481 case LTTNG_CONSUMER_KERNEL
:
3482 data_pending
= lttng_kconsumer_data_pending
;
3484 case LTTNG_CONSUMER32_UST
:
3485 case LTTNG_CONSUMER64_UST
:
3486 data_pending
= lttng_ustconsumer_data_pending
;
3489 ERR("Unknown consumer data type");
3493 /* Ease our life a bit */
3494 ht
= consumer_data
.stream_list_ht
;
3496 relayd
= find_relayd_by_session_id(id
);
3498 /* Send init command for data pending. */
3499 pthread_mutex_lock(&relayd
->ctrl_sock_mutex
);
3500 ret
= relayd_begin_data_pending(&relayd
->control_sock
,
3501 relayd
->relayd_session_id
);
3502 pthread_mutex_unlock(&relayd
->ctrl_sock_mutex
);
3504 /* Communication error thus the relayd so no data pending. */
3505 goto data_not_pending
;
3509 cds_lfht_for_each_entry_duplicate(ht
->ht
,
3510 ht
->hash_fct(&id
, lttng_ht_seed
),
3512 &iter
.iter
, stream
, node_session_id
.node
) {
3513 /* If this call fails, the stream is being used hence data pending. */
3514 ret
= stream_try_lock(stream
);
3520 * A removed node from the hash table indicates that the stream has
3521 * been deleted thus having a guarantee that the buffers are closed
3522 * on the consumer side. However, data can still be transmitted
3523 * over the network so don't skip the relayd check.
3525 ret
= cds_lfht_is_node_deleted(&stream
->node
.node
);
3528 * An empty output file is not valid. We need at least one packet
3529 * generated per stream, even if it contains no event, so it
3530 * contains at least one packet header.
3532 if (stream
->output_written
== 0) {
3533 pthread_mutex_unlock(&stream
->lock
);
3536 /* Check the stream if there is data in the buffers. */
3537 ret
= data_pending(stream
);
3539 pthread_mutex_unlock(&stream
->lock
);
3546 pthread_mutex_lock(&relayd
->ctrl_sock_mutex
);
3547 if (stream
->metadata_flag
) {
3548 ret
= relayd_quiescent_control(&relayd
->control_sock
,
3549 stream
->relayd_stream_id
);
3551 ret
= relayd_data_pending(&relayd
->control_sock
,
3552 stream
->relayd_stream_id
,
3553 stream
->next_net_seq_num
- 1);
3555 pthread_mutex_unlock(&relayd
->ctrl_sock_mutex
);
3557 pthread_mutex_unlock(&stream
->lock
);
3561 pthread_mutex_unlock(&stream
->lock
);
3565 unsigned int is_data_inflight
= 0;
3567 /* Send init command for data pending. */
3568 pthread_mutex_lock(&relayd
->ctrl_sock_mutex
);
3569 ret
= relayd_end_data_pending(&relayd
->control_sock
,
3570 relayd
->relayd_session_id
, &is_data_inflight
);
3571 pthread_mutex_unlock(&relayd
->ctrl_sock_mutex
);
3573 goto data_not_pending
;
3575 if (is_data_inflight
) {
3581 * Finding _no_ node in the hash table and no inflight data means that the
3582 * stream(s) have been removed thus data is guaranteed to be available for
3583 * analysis from the trace files.
3587 /* Data is available to be read by a viewer. */
3588 pthread_mutex_unlock(&consumer_data
.lock
);
3593 /* Data is still being extracted from buffers. */
3594 pthread_mutex_unlock(&consumer_data
.lock
);
3600 * Send a ret code status message to the sessiond daemon.
3602 * Return the sendmsg() return value.
3604 int consumer_send_status_msg(int sock
, int ret_code
)
3606 struct lttcomm_consumer_status_msg msg
;
3608 msg
.ret_code
= ret_code
;
3610 return lttcomm_send_unix_sock(sock
, &msg
, sizeof(msg
));
3614 * Send a channel status message to the sessiond daemon.
3616 * Return the sendmsg() return value.
3618 int consumer_send_status_channel(int sock
,
3619 struct lttng_consumer_channel
*channel
)
3621 struct lttcomm_consumer_status_channel msg
;
3626 msg
.ret_code
= -LTTNG_ERR_UST_CHAN_FAIL
;
3628 msg
.ret_code
= LTTNG_OK
;
3629 msg
.key
= channel
->key
;
3630 msg
.stream_count
= channel
->streams
.count
;
3633 return lttcomm_send_unix_sock(sock
, &msg
, sizeof(msg
));
3637 * Using a maximum stream size with the produced and consumed position of a
3638 * stream, computes the new consumed position to be as close as possible to the
3639 * maximum possible stream size.
3641 * If maximum stream size is lower than the possible buffer size (produced -
3642 * consumed), the consumed_pos given is returned untouched else the new value
3645 unsigned long consumer_get_consumed_maxsize(unsigned long consumed_pos
,
3646 unsigned long produced_pos
, uint64_t max_stream_size
)
3648 if (max_stream_size
&& max_stream_size
< (produced_pos
- consumed_pos
)) {
3649 /* Offset from the produced position to get the latest buffers. */
3650 return produced_pos
- max_stream_size
;
3653 return consumed_pos
;