Relayd data available command support
[lttng-tools.git] / src / common / consumer.c
1 /*
2 * Copyright (C) 2011 - Julien Desfossez <julien.desfossez@polymtl.ca>
3 * Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
4 * 2012 - David Goulet <dgoulet@efficios.com>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License, version 2 only,
8 * as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 * You should have received a copy of the GNU General Public License along
16 * with this program; if not, write to the Free Software Foundation, Inc.,
17 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
18 */
19
20 #define _GNU_SOURCE
21 #include <assert.h>
22 #include <poll.h>
23 #include <pthread.h>
24 #include <stdlib.h>
25 #include <string.h>
26 #include <sys/mman.h>
27 #include <sys/socket.h>
28 #include <sys/types.h>
29 #include <unistd.h>
30 #include <inttypes.h>
31
32 #include <common/common.h>
33 #include <common/utils.h>
34 #include <common/compat/poll.h>
35 #include <common/kernel-ctl/kernel-ctl.h>
36 #include <common/sessiond-comm/relayd.h>
37 #include <common/sessiond-comm/sessiond-comm.h>
38 #include <common/kernel-consumer/kernel-consumer.h>
39 #include <common/relayd/relayd.h>
40 #include <common/ust-consumer/ust-consumer.h>
41
42 #include "consumer.h"
43
44 struct lttng_consumer_global_data consumer_data = {
45 .stream_count = 0,
46 .need_update = 1,
47 .type = LTTNG_CONSUMER_UNKNOWN,
48 };
49
50 /* timeout parameter, to control the polling thread grace period. */
51 int consumer_poll_timeout = -1;
52
53 /*
54 * Flag to inform the polling thread to quit when all fd hung up. Updated by
55 * the consumer_thread_receive_fds when it notices that all fds has hung up.
56 * Also updated by the signal handler (consumer_should_exit()). Read by the
57 * polling threads.
58 */
59 volatile int consumer_quit = 0;
60
61 /*
62 * The following two hash tables are visible by all threads which are separated
63 * in different source files.
64 *
65 * Global hash table containing respectively metadata and data streams. The
66 * stream element in this ht should only be updated by the metadata poll thread
67 * for the metadata and the data poll thread for the data.
68 */
69 struct lttng_ht *metadata_ht = NULL;
70 struct lttng_ht *data_ht = NULL;
71
72 /*
73 * Find a stream. The consumer_data.lock must be locked during this
74 * call.
75 */
76 static struct lttng_consumer_stream *consumer_find_stream(int key,
77 struct lttng_ht *ht)
78 {
79 struct lttng_ht_iter iter;
80 struct lttng_ht_node_ulong *node;
81 struct lttng_consumer_stream *stream = NULL;
82
83 assert(ht);
84
85 /* Negative keys are lookup failures */
86 if (key < 0) {
87 return NULL;
88 }
89
90 rcu_read_lock();
91
92 lttng_ht_lookup(ht, (void *)((unsigned long) key), &iter);
93 node = lttng_ht_iter_get_node_ulong(&iter);
94 if (node != NULL) {
95 stream = caa_container_of(node, struct lttng_consumer_stream, node);
96 }
97
98 rcu_read_unlock();
99
100 return stream;
101 }
102
103 void consumer_steal_stream_key(int key, struct lttng_ht *ht)
104 {
105 struct lttng_consumer_stream *stream;
106
107 rcu_read_lock();
108 stream = consumer_find_stream(key, ht);
109 if (stream) {
110 stream->key = -1;
111 /*
112 * We don't want the lookup to match, but we still need
113 * to iterate on this stream when iterating over the hash table. Just
114 * change the node key.
115 */
116 stream->node.key = -1;
117 }
118 rcu_read_unlock();
119 }
120
121 static struct lttng_consumer_channel *consumer_find_channel(int key)
122 {
123 struct lttng_ht_iter iter;
124 struct lttng_ht_node_ulong *node;
125 struct lttng_consumer_channel *channel = NULL;
126
127 /* Negative keys are lookup failures */
128 if (key < 0) {
129 return NULL;
130 }
131
132 rcu_read_lock();
133
134 lttng_ht_lookup(consumer_data.channel_ht, (void *)((unsigned long) key),
135 &iter);
136 node = lttng_ht_iter_get_node_ulong(&iter);
137 if (node != NULL) {
138 channel = caa_container_of(node, struct lttng_consumer_channel, node);
139 }
140
141 rcu_read_unlock();
142
143 return channel;
144 }
145
146 static void consumer_steal_channel_key(int key)
147 {
148 struct lttng_consumer_channel *channel;
149
150 rcu_read_lock();
151 channel = consumer_find_channel(key);
152 if (channel) {
153 channel->key = -1;
154 /*
155 * We don't want the lookup to match, but we still need
156 * to iterate on this channel when iterating over the hash table. Just
157 * change the node key.
158 */
159 channel->node.key = -1;
160 }
161 rcu_read_unlock();
162 }
163
164 static
165 void consumer_free_stream(struct rcu_head *head)
166 {
167 struct lttng_ht_node_ulong *node =
168 caa_container_of(head, struct lttng_ht_node_ulong, head);
169 struct lttng_consumer_stream *stream =
170 caa_container_of(node, struct lttng_consumer_stream, node);
171
172 free(stream);
173 }
174
175 /*
176 * RCU protected relayd socket pair free.
177 */
178 static void consumer_rcu_free_relayd(struct rcu_head *head)
179 {
180 struct lttng_ht_node_ulong *node =
181 caa_container_of(head, struct lttng_ht_node_ulong, head);
182 struct consumer_relayd_sock_pair *relayd =
183 caa_container_of(node, struct consumer_relayd_sock_pair, node);
184
185 free(relayd);
186 }
187
188 /*
189 * Destroy and free relayd socket pair object.
190 *
191 * This function MUST be called with the consumer_data lock acquired.
192 */
193 static void destroy_relayd(struct consumer_relayd_sock_pair *relayd)
194 {
195 int ret;
196 struct lttng_ht_iter iter;
197
198 if (relayd == NULL) {
199 return;
200 }
201
202 DBG("Consumer destroy and close relayd socket pair");
203
204 iter.iter.node = &relayd->node.node;
205 ret = lttng_ht_del(consumer_data.relayd_ht, &iter);
206 if (ret != 0) {
207 /* We assume the relayd was already destroyed */
208 return;
209 }
210
211 /* Close all sockets */
212 pthread_mutex_lock(&relayd->ctrl_sock_mutex);
213 (void) relayd_close(&relayd->control_sock);
214 pthread_mutex_unlock(&relayd->ctrl_sock_mutex);
215 (void) relayd_close(&relayd->data_sock);
216
217 /* RCU free() call */
218 call_rcu(&relayd->node.head, consumer_rcu_free_relayd);
219 }
220
221 /*
222 * Flag a relayd socket pair for destruction. Destroy it if the refcount
223 * reaches zero.
224 *
225 * RCU read side lock MUST be aquired before calling this function.
226 */
227 void consumer_flag_relayd_for_destroy(struct consumer_relayd_sock_pair *relayd)
228 {
229 assert(relayd);
230
231 /* Set destroy flag for this object */
232 uatomic_set(&relayd->destroy_flag, 1);
233
234 /* Destroy the relayd if refcount is 0 */
235 if (uatomic_read(&relayd->refcount) == 0) {
236 destroy_relayd(relayd);
237 }
238 }
239
240 /*
241 * Remove a stream from the global list protected by a mutex. This
242 * function is also responsible for freeing its data structures.
243 */
244 void consumer_del_stream(struct lttng_consumer_stream *stream,
245 struct lttng_ht *ht)
246 {
247 int ret;
248 struct lttng_ht_iter iter;
249 struct lttng_consumer_channel *free_chan = NULL;
250 struct consumer_relayd_sock_pair *relayd;
251
252 assert(stream);
253
254 if (ht == NULL) {
255 /* Means the stream was allocated but not successfully added */
256 goto free_stream;
257 }
258
259 pthread_mutex_lock(&consumer_data.lock);
260
261 switch (consumer_data.type) {
262 case LTTNG_CONSUMER_KERNEL:
263 if (stream->mmap_base != NULL) {
264 ret = munmap(stream->mmap_base, stream->mmap_len);
265 if (ret != 0) {
266 PERROR("munmap");
267 }
268 }
269 break;
270 case LTTNG_CONSUMER32_UST:
271 case LTTNG_CONSUMER64_UST:
272 lttng_ustconsumer_del_stream(stream);
273 break;
274 default:
275 ERR("Unknown consumer_data type");
276 assert(0);
277 goto end;
278 }
279
280 rcu_read_lock();
281 iter.iter.node = &stream->node.node;
282 ret = lttng_ht_del(ht, &iter);
283 assert(!ret);
284
285 /* Remove node session id from the consumer_data stream ht */
286 iter.iter.node = &stream->node_session_id.node;
287 ret = lttng_ht_del(consumer_data.stream_list_ht, &iter);
288 assert(!ret);
289 rcu_read_unlock();
290
291 assert(consumer_data.stream_count > 0);
292 consumer_data.stream_count--;
293
294 if (stream->out_fd >= 0) {
295 ret = close(stream->out_fd);
296 if (ret) {
297 PERROR("close");
298 }
299 }
300 if (stream->wait_fd >= 0 && !stream->wait_fd_is_copy) {
301 ret = close(stream->wait_fd);
302 if (ret) {
303 PERROR("close");
304 }
305 }
306 if (stream->shm_fd >= 0 && stream->wait_fd != stream->shm_fd) {
307 ret = close(stream->shm_fd);
308 if (ret) {
309 PERROR("close");
310 }
311 }
312
313 /* Check and cleanup relayd */
314 rcu_read_lock();
315 relayd = consumer_find_relayd(stream->net_seq_idx);
316 if (relayd != NULL) {
317 uatomic_dec(&relayd->refcount);
318 assert(uatomic_read(&relayd->refcount) >= 0);
319
320 /* Closing streams requires to lock the control socket. */
321 pthread_mutex_lock(&relayd->ctrl_sock_mutex);
322 ret = relayd_send_close_stream(&relayd->control_sock,
323 stream->relayd_stream_id,
324 stream->next_net_seq_num - 1);
325 pthread_mutex_unlock(&relayd->ctrl_sock_mutex);
326 if (ret < 0) {
327 DBG("Unable to close stream on the relayd. Continuing");
328 /*
329 * Continue here. There is nothing we can do for the relayd.
330 * Chances are that the relayd has closed the socket so we just
331 * continue cleaning up.
332 */
333 }
334
335 /* Both conditions are met, we destroy the relayd. */
336 if (uatomic_read(&relayd->refcount) == 0 &&
337 uatomic_read(&relayd->destroy_flag)) {
338 destroy_relayd(relayd);
339 }
340 }
341 rcu_read_unlock();
342
343 uatomic_dec(&stream->chan->refcount);
344 if (!uatomic_read(&stream->chan->refcount)
345 && !uatomic_read(&stream->chan->nb_init_streams)) {
346 free_chan = stream->chan;
347 }
348
349 end:
350 consumer_data.need_update = 1;
351 pthread_mutex_unlock(&consumer_data.lock);
352
353 if (free_chan) {
354 consumer_del_channel(free_chan);
355 }
356
357 free_stream:
358 call_rcu(&stream->node.head, consumer_free_stream);
359 }
360
361 struct lttng_consumer_stream *consumer_allocate_stream(
362 int channel_key, int stream_key,
363 int shm_fd, int wait_fd,
364 enum lttng_consumer_stream_state state,
365 uint64_t mmap_len,
366 enum lttng_event_output output,
367 const char *path_name,
368 uid_t uid,
369 gid_t gid,
370 int net_index,
371 int metadata_flag,
372 uint64_t session_id,
373 int *alloc_ret)
374 {
375 struct lttng_consumer_stream *stream;
376
377 stream = zmalloc(sizeof(*stream));
378 if (stream == NULL) {
379 PERROR("malloc struct lttng_consumer_stream");
380 *alloc_ret = -ENOMEM;
381 goto end;
382 }
383
384 /*
385 * Get stream's channel reference. Needed when adding the stream to the
386 * global hash table.
387 */
388 stream->chan = consumer_find_channel(channel_key);
389 if (!stream->chan) {
390 *alloc_ret = -ENOENT;
391 ERR("Unable to find channel for stream %d", stream_key);
392 goto error;
393 }
394
395 stream->key = stream_key;
396 stream->shm_fd = shm_fd;
397 stream->wait_fd = wait_fd;
398 stream->out_fd = -1;
399 stream->out_fd_offset = 0;
400 stream->state = state;
401 stream->mmap_len = mmap_len;
402 stream->mmap_base = NULL;
403 stream->output = output;
404 stream->uid = uid;
405 stream->gid = gid;
406 stream->net_seq_idx = net_index;
407 stream->metadata_flag = metadata_flag;
408 stream->session_id = session_id;
409 strncpy(stream->path_name, path_name, sizeof(stream->path_name));
410 stream->path_name[sizeof(stream->path_name) - 1] = '\0';
411 pthread_mutex_init(&stream->lock, NULL);
412
413 /*
414 * Index differently the metadata node because the thread is using an
415 * internal hash table to match streams in the metadata_ht to the epoll set
416 * file descriptor.
417 */
418 if (metadata_flag) {
419 lttng_ht_node_init_ulong(&stream->node, stream->wait_fd);
420 } else {
421 lttng_ht_node_init_ulong(&stream->node, stream->key);
422 }
423
424 /* Init session id node with the stream session id */
425 lttng_ht_node_init_ulong(&stream->node_session_id, stream->session_id);
426
427 /*
428 * The cpu number is needed before using any ustctl_* actions. Ignored for
429 * the kernel so the value does not matter.
430 */
431 pthread_mutex_lock(&consumer_data.lock);
432 stream->cpu = stream->chan->cpucount++;
433 pthread_mutex_unlock(&consumer_data.lock);
434
435 DBG3("Allocated stream %s (key %d, shm_fd %d, wait_fd %d, mmap_len %llu,"
436 " out_fd %d, net_seq_idx %d, session_id %" PRIu64,
437 stream->path_name, stream->key, stream->shm_fd, stream->wait_fd,
438 (unsigned long long) stream->mmap_len, stream->out_fd,
439 stream->net_seq_idx, stream->session_id);
440 return stream;
441
442 error:
443 free(stream);
444 end:
445 return NULL;
446 }
447
448 /*
449 * Add a stream to the global list protected by a mutex.
450 */
451 static int consumer_add_stream(struct lttng_consumer_stream *stream,
452 struct lttng_ht *ht)
453 {
454 int ret = 0;
455 struct consumer_relayd_sock_pair *relayd;
456
457 assert(stream);
458 assert(ht);
459
460 DBG3("Adding consumer stream %d", stream->key);
461
462 pthread_mutex_lock(&consumer_data.lock);
463 rcu_read_lock();
464
465 /* Steal stream identifier to avoid having streams with the same key */
466 consumer_steal_stream_key(stream->key, ht);
467
468 lttng_ht_add_unique_ulong(ht, &stream->node);
469
470 /*
471 * Add stream to the stream_list_ht of the consumer data. No need to steal
472 * the key since the HT does not use it and we allow to add redundant keys
473 * into this table.
474 */
475 lttng_ht_add_ulong(consumer_data.stream_list_ht, &stream->node_session_id);
476
477 /* Check and cleanup relayd */
478 relayd = consumer_find_relayd(stream->net_seq_idx);
479 if (relayd != NULL) {
480 uatomic_inc(&relayd->refcount);
481 }
482
483 /* Update channel refcount once added without error(s). */
484 uatomic_inc(&stream->chan->refcount);
485
486 /*
487 * When nb_init_streams reaches 0, we don't need to trigger any action in
488 * terms of destroying the associated channel, because the action that
489 * causes the count to become 0 also causes a stream to be added. The
490 * channel deletion will thus be triggered by the following removal of this
491 * stream.
492 */
493 if (uatomic_read(&stream->chan->nb_init_streams) > 0) {
494 uatomic_dec(&stream->chan->nb_init_streams);
495 }
496
497 /* Update consumer data once the node is inserted. */
498 consumer_data.stream_count++;
499 consumer_data.need_update = 1;
500
501 rcu_read_unlock();
502 pthread_mutex_unlock(&consumer_data.lock);
503
504 return ret;
505 }
506
507 /*
508 * Add relayd socket to global consumer data hashtable. RCU read side lock MUST
509 * be acquired before calling this.
510 */
511 static int add_relayd(struct consumer_relayd_sock_pair *relayd)
512 {
513 int ret = 0;
514 struct lttng_ht_node_ulong *node;
515 struct lttng_ht_iter iter;
516
517 if (relayd == NULL) {
518 ret = -1;
519 goto end;
520 }
521
522 lttng_ht_lookup(consumer_data.relayd_ht,
523 (void *)((unsigned long) relayd->net_seq_idx), &iter);
524 node = lttng_ht_iter_get_node_ulong(&iter);
525 if (node != NULL) {
526 /* Relayd already exist. Ignore the insertion */
527 goto end;
528 }
529 lttng_ht_add_unique_ulong(consumer_data.relayd_ht, &relayd->node);
530
531 end:
532 return ret;
533 }
534
535 /*
536 * Allocate and return a consumer relayd socket.
537 */
538 struct consumer_relayd_sock_pair *consumer_allocate_relayd_sock_pair(
539 int net_seq_idx)
540 {
541 struct consumer_relayd_sock_pair *obj = NULL;
542
543 /* Negative net sequence index is a failure */
544 if (net_seq_idx < 0) {
545 goto error;
546 }
547
548 obj = zmalloc(sizeof(struct consumer_relayd_sock_pair));
549 if (obj == NULL) {
550 PERROR("zmalloc relayd sock");
551 goto error;
552 }
553
554 obj->net_seq_idx = net_seq_idx;
555 obj->refcount = 0;
556 obj->destroy_flag = 0;
557 lttng_ht_node_init_ulong(&obj->node, obj->net_seq_idx);
558 pthread_mutex_init(&obj->ctrl_sock_mutex, NULL);
559
560 error:
561 return obj;
562 }
563
564 /*
565 * Find a relayd socket pair in the global consumer data.
566 *
567 * Return the object if found else NULL.
568 * RCU read-side lock must be held across this call and while using the
569 * returned object.
570 */
571 struct consumer_relayd_sock_pair *consumer_find_relayd(int key)
572 {
573 struct lttng_ht_iter iter;
574 struct lttng_ht_node_ulong *node;
575 struct consumer_relayd_sock_pair *relayd = NULL;
576
577 /* Negative keys are lookup failures */
578 if (key < 0) {
579 goto error;
580 }
581
582 lttng_ht_lookup(consumer_data.relayd_ht, (void *)((unsigned long) key),
583 &iter);
584 node = lttng_ht_iter_get_node_ulong(&iter);
585 if (node != NULL) {
586 relayd = caa_container_of(node, struct consumer_relayd_sock_pair, node);
587 }
588
589 error:
590 return relayd;
591 }
592
593 /*
594 * Handle stream for relayd transmission if the stream applies for network
595 * streaming where the net sequence index is set.
596 *
597 * Return destination file descriptor or negative value on error.
598 */
599 static int write_relayd_stream_header(struct lttng_consumer_stream *stream,
600 size_t data_size, unsigned long padding,
601 struct consumer_relayd_sock_pair *relayd)
602 {
603 int outfd = -1, ret;
604 struct lttcomm_relayd_data_hdr data_hdr;
605
606 /* Safety net */
607 assert(stream);
608 assert(relayd);
609
610 /* Reset data header */
611 memset(&data_hdr, 0, sizeof(data_hdr));
612
613 if (stream->metadata_flag) {
614 /* Caller MUST acquire the relayd control socket lock */
615 ret = relayd_send_metadata(&relayd->control_sock, data_size);
616 if (ret < 0) {
617 goto error;
618 }
619
620 /* Metadata are always sent on the control socket. */
621 outfd = relayd->control_sock.fd;
622 } else {
623 /* Set header with stream information */
624 data_hdr.stream_id = htobe64(stream->relayd_stream_id);
625 data_hdr.data_size = htobe32(data_size);
626 data_hdr.padding_size = htobe32(padding);
627 data_hdr.net_seq_num = htobe64(stream->next_net_seq_num++);
628 /* Other fields are zeroed previously */
629
630 ret = relayd_send_data_hdr(&relayd->data_sock, &data_hdr,
631 sizeof(data_hdr));
632 if (ret < 0) {
633 goto error;
634 }
635
636 /* Set to go on data socket */
637 outfd = relayd->data_sock.fd;
638 }
639
640 error:
641 return outfd;
642 }
643
644 static
645 void consumer_free_channel(struct rcu_head *head)
646 {
647 struct lttng_ht_node_ulong *node =
648 caa_container_of(head, struct lttng_ht_node_ulong, head);
649 struct lttng_consumer_channel *channel =
650 caa_container_of(node, struct lttng_consumer_channel, node);
651
652 free(channel);
653 }
654
655 /*
656 * Remove a channel from the global list protected by a mutex. This
657 * function is also responsible for freeing its data structures.
658 */
659 void consumer_del_channel(struct lttng_consumer_channel *channel)
660 {
661 int ret;
662 struct lttng_ht_iter iter;
663
664 pthread_mutex_lock(&consumer_data.lock);
665
666 switch (consumer_data.type) {
667 case LTTNG_CONSUMER_KERNEL:
668 break;
669 case LTTNG_CONSUMER32_UST:
670 case LTTNG_CONSUMER64_UST:
671 lttng_ustconsumer_del_channel(channel);
672 break;
673 default:
674 ERR("Unknown consumer_data type");
675 assert(0);
676 goto end;
677 }
678
679 rcu_read_lock();
680 iter.iter.node = &channel->node.node;
681 ret = lttng_ht_del(consumer_data.channel_ht, &iter);
682 assert(!ret);
683 rcu_read_unlock();
684
685 if (channel->mmap_base != NULL) {
686 ret = munmap(channel->mmap_base, channel->mmap_len);
687 if (ret != 0) {
688 PERROR("munmap");
689 }
690 }
691 if (channel->wait_fd >= 0 && !channel->wait_fd_is_copy) {
692 ret = close(channel->wait_fd);
693 if (ret) {
694 PERROR("close");
695 }
696 }
697 if (channel->shm_fd >= 0 && channel->wait_fd != channel->shm_fd) {
698 ret = close(channel->shm_fd);
699 if (ret) {
700 PERROR("close");
701 }
702 }
703
704 call_rcu(&channel->node.head, consumer_free_channel);
705 end:
706 pthread_mutex_unlock(&consumer_data.lock);
707 }
708
709 struct lttng_consumer_channel *consumer_allocate_channel(
710 int channel_key,
711 int shm_fd, int wait_fd,
712 uint64_t mmap_len,
713 uint64_t max_sb_size,
714 unsigned int nb_init_streams)
715 {
716 struct lttng_consumer_channel *channel;
717 int ret;
718
719 channel = zmalloc(sizeof(*channel));
720 if (channel == NULL) {
721 PERROR("malloc struct lttng_consumer_channel");
722 goto end;
723 }
724 channel->key = channel_key;
725 channel->shm_fd = shm_fd;
726 channel->wait_fd = wait_fd;
727 channel->mmap_len = mmap_len;
728 channel->max_sb_size = max_sb_size;
729 channel->refcount = 0;
730 channel->nb_init_streams = nb_init_streams;
731 lttng_ht_node_init_ulong(&channel->node, channel->key);
732
733 switch (consumer_data.type) {
734 case LTTNG_CONSUMER_KERNEL:
735 channel->mmap_base = NULL;
736 channel->mmap_len = 0;
737 break;
738 case LTTNG_CONSUMER32_UST:
739 case LTTNG_CONSUMER64_UST:
740 ret = lttng_ustconsumer_allocate_channel(channel);
741 if (ret) {
742 free(channel);
743 return NULL;
744 }
745 break;
746 default:
747 ERR("Unknown consumer_data type");
748 assert(0);
749 goto end;
750 }
751 DBG("Allocated channel (key %d, shm_fd %d, wait_fd %d, mmap_len %llu, max_sb_size %llu)",
752 channel->key, channel->shm_fd, channel->wait_fd,
753 (unsigned long long) channel->mmap_len,
754 (unsigned long long) channel->max_sb_size);
755 end:
756 return channel;
757 }
758
759 /*
760 * Add a channel to the global list protected by a mutex.
761 */
762 int consumer_add_channel(struct lttng_consumer_channel *channel)
763 {
764 struct lttng_ht_node_ulong *node;
765 struct lttng_ht_iter iter;
766
767 pthread_mutex_lock(&consumer_data.lock);
768 /* Steal channel identifier, for UST */
769 consumer_steal_channel_key(channel->key);
770 rcu_read_lock();
771
772 lttng_ht_lookup(consumer_data.channel_ht,
773 (void *)((unsigned long) channel->key), &iter);
774 node = lttng_ht_iter_get_node_ulong(&iter);
775 if (node != NULL) {
776 /* Channel already exist. Ignore the insertion */
777 goto end;
778 }
779
780 lttng_ht_add_unique_ulong(consumer_data.channel_ht, &channel->node);
781
782 end:
783 rcu_read_unlock();
784 pthread_mutex_unlock(&consumer_data.lock);
785
786 return 0;
787 }
788
789 /*
790 * Allocate the pollfd structure and the local view of the out fds to avoid
791 * doing a lookup in the linked list and concurrency issues when writing is
792 * needed. Called with consumer_data.lock held.
793 *
794 * Returns the number of fds in the structures.
795 */
796 static int consumer_update_poll_array(
797 struct lttng_consumer_local_data *ctx, struct pollfd **pollfd,
798 struct lttng_consumer_stream **local_stream, struct lttng_ht *ht)
799 {
800 int i = 0;
801 struct lttng_ht_iter iter;
802 struct lttng_consumer_stream *stream;
803
804 DBG("Updating poll fd array");
805 rcu_read_lock();
806 cds_lfht_for_each_entry(ht->ht, &iter.iter, stream, node.node) {
807 if (stream->state != LTTNG_CONSUMER_ACTIVE_STREAM) {
808 continue;
809 }
810 DBG("Active FD %d", stream->wait_fd);
811 (*pollfd)[i].fd = stream->wait_fd;
812 (*pollfd)[i].events = POLLIN | POLLPRI;
813 local_stream[i] = stream;
814 i++;
815 }
816 rcu_read_unlock();
817
818 /*
819 * Insert the consumer_data_pipe at the end of the array and don't
820 * increment i so nb_fd is the number of real FD.
821 */
822 (*pollfd)[i].fd = ctx->consumer_data_pipe[0];
823 (*pollfd)[i].events = POLLIN | POLLPRI;
824 return i;
825 }
826
827 /*
828 * Poll on the should_quit pipe and the command socket return -1 on error and
829 * should exit, 0 if data is available on the command socket
830 */
831 int lttng_consumer_poll_socket(struct pollfd *consumer_sockpoll)
832 {
833 int num_rdy;
834
835 restart:
836 num_rdy = poll(consumer_sockpoll, 2, -1);
837 if (num_rdy == -1) {
838 /*
839 * Restart interrupted system call.
840 */
841 if (errno == EINTR) {
842 goto restart;
843 }
844 PERROR("Poll error");
845 goto exit;
846 }
847 if (consumer_sockpoll[0].revents & (POLLIN | POLLPRI)) {
848 DBG("consumer_should_quit wake up");
849 goto exit;
850 }
851 return 0;
852
853 exit:
854 return -1;
855 }
856
857 /*
858 * Set the error socket.
859 */
860 void lttng_consumer_set_error_sock(
861 struct lttng_consumer_local_data *ctx, int sock)
862 {
863 ctx->consumer_error_socket = sock;
864 }
865
866 /*
867 * Set the command socket path.
868 */
869 void lttng_consumer_set_command_sock_path(
870 struct lttng_consumer_local_data *ctx, char *sock)
871 {
872 ctx->consumer_command_sock_path = sock;
873 }
874
875 /*
876 * Send return code to the session daemon.
877 * If the socket is not defined, we return 0, it is not a fatal error
878 */
879 int lttng_consumer_send_error(
880 struct lttng_consumer_local_data *ctx, int cmd)
881 {
882 if (ctx->consumer_error_socket > 0) {
883 return lttcomm_send_unix_sock(ctx->consumer_error_socket, &cmd,
884 sizeof(enum lttcomm_sessiond_command));
885 }
886
887 return 0;
888 }
889
890 /*
891 * Close all the tracefiles and stream fds, should be called when all instances
892 * are destroyed.
893 */
894 void lttng_consumer_cleanup(void)
895 {
896 struct lttng_ht_iter iter;
897 struct lttng_ht_node_ulong *node;
898
899 rcu_read_lock();
900
901 cds_lfht_for_each_entry(consumer_data.channel_ht->ht, &iter.iter, node,
902 node) {
903 struct lttng_consumer_channel *channel =
904 caa_container_of(node, struct lttng_consumer_channel, node);
905 consumer_del_channel(channel);
906 }
907
908 rcu_read_unlock();
909
910 lttng_ht_destroy(consumer_data.channel_ht);
911 }
912
913 /*
914 * Called from signal handler.
915 */
916 void lttng_consumer_should_exit(struct lttng_consumer_local_data *ctx)
917 {
918 int ret;
919 consumer_quit = 1;
920 do {
921 ret = write(ctx->consumer_should_quit[1], "4", 1);
922 } while (ret < 0 && errno == EINTR);
923 if (ret < 0) {
924 PERROR("write consumer quit");
925 }
926 }
927
928 void lttng_consumer_sync_trace_file(struct lttng_consumer_stream *stream,
929 off_t orig_offset)
930 {
931 int outfd = stream->out_fd;
932
933 /*
934 * This does a blocking write-and-wait on any page that belongs to the
935 * subbuffer prior to the one we just wrote.
936 * Don't care about error values, as these are just hints and ways to
937 * limit the amount of page cache used.
938 */
939 if (orig_offset < stream->chan->max_sb_size) {
940 return;
941 }
942 lttng_sync_file_range(outfd, orig_offset - stream->chan->max_sb_size,
943 stream->chan->max_sb_size,
944 SYNC_FILE_RANGE_WAIT_BEFORE
945 | SYNC_FILE_RANGE_WRITE
946 | SYNC_FILE_RANGE_WAIT_AFTER);
947 /*
948 * Give hints to the kernel about how we access the file:
949 * POSIX_FADV_DONTNEED : we won't re-access data in a near future after
950 * we write it.
951 *
952 * We need to call fadvise again after the file grows because the
953 * kernel does not seem to apply fadvise to non-existing parts of the
954 * file.
955 *
956 * Call fadvise _after_ having waited for the page writeback to
957 * complete because the dirty page writeback semantic is not well
958 * defined. So it can be expected to lead to lower throughput in
959 * streaming.
960 */
961 posix_fadvise(outfd, orig_offset - stream->chan->max_sb_size,
962 stream->chan->max_sb_size, POSIX_FADV_DONTNEED);
963 }
964
965 /*
966 * Initialise the necessary environnement :
967 * - create a new context
968 * - create the poll_pipe
969 * - create the should_quit pipe (for signal handler)
970 * - create the thread pipe (for splice)
971 *
972 * Takes a function pointer as argument, this function is called when data is
973 * available on a buffer. This function is responsible to do the
974 * kernctl_get_next_subbuf, read the data with mmap or splice depending on the
975 * buffer configuration and then kernctl_put_next_subbuf at the end.
976 *
977 * Returns a pointer to the new context or NULL on error.
978 */
979 struct lttng_consumer_local_data *lttng_consumer_create(
980 enum lttng_consumer_type type,
981 ssize_t (*buffer_ready)(struct lttng_consumer_stream *stream,
982 struct lttng_consumer_local_data *ctx),
983 int (*recv_channel)(struct lttng_consumer_channel *channel),
984 int (*recv_stream)(struct lttng_consumer_stream *stream),
985 int (*update_stream)(int stream_key, uint32_t state))
986 {
987 int ret, i;
988 struct lttng_consumer_local_data *ctx;
989
990 assert(consumer_data.type == LTTNG_CONSUMER_UNKNOWN ||
991 consumer_data.type == type);
992 consumer_data.type = type;
993
994 ctx = zmalloc(sizeof(struct lttng_consumer_local_data));
995 if (ctx == NULL) {
996 PERROR("allocating context");
997 goto error;
998 }
999
1000 ctx->consumer_error_socket = -1;
1001 /* assign the callbacks */
1002 ctx->on_buffer_ready = buffer_ready;
1003 ctx->on_recv_channel = recv_channel;
1004 ctx->on_recv_stream = recv_stream;
1005 ctx->on_update_stream = update_stream;
1006
1007 ret = pipe(ctx->consumer_data_pipe);
1008 if (ret < 0) {
1009 PERROR("Error creating poll pipe");
1010 goto error_poll_pipe;
1011 }
1012
1013 /* set read end of the pipe to non-blocking */
1014 ret = fcntl(ctx->consumer_data_pipe[0], F_SETFL, O_NONBLOCK);
1015 if (ret < 0) {
1016 PERROR("fcntl O_NONBLOCK");
1017 goto error_poll_fcntl;
1018 }
1019
1020 /* set write end of the pipe to non-blocking */
1021 ret = fcntl(ctx->consumer_data_pipe[1], F_SETFL, O_NONBLOCK);
1022 if (ret < 0) {
1023 PERROR("fcntl O_NONBLOCK");
1024 goto error_poll_fcntl;
1025 }
1026
1027 ret = pipe(ctx->consumer_should_quit);
1028 if (ret < 0) {
1029 PERROR("Error creating recv pipe");
1030 goto error_quit_pipe;
1031 }
1032
1033 ret = pipe(ctx->consumer_thread_pipe);
1034 if (ret < 0) {
1035 PERROR("Error creating thread pipe");
1036 goto error_thread_pipe;
1037 }
1038
1039 ret = utils_create_pipe(ctx->consumer_metadata_pipe);
1040 if (ret < 0) {
1041 goto error_metadata_pipe;
1042 }
1043
1044 ret = utils_create_pipe(ctx->consumer_splice_metadata_pipe);
1045 if (ret < 0) {
1046 goto error_splice_pipe;
1047 }
1048
1049 return ctx;
1050
1051 error_splice_pipe:
1052 utils_close_pipe(ctx->consumer_metadata_pipe);
1053 error_metadata_pipe:
1054 utils_close_pipe(ctx->consumer_thread_pipe);
1055 error_thread_pipe:
1056 for (i = 0; i < 2; i++) {
1057 int err;
1058
1059 err = close(ctx->consumer_should_quit[i]);
1060 if (err) {
1061 PERROR("close");
1062 }
1063 }
1064 error_poll_fcntl:
1065 error_quit_pipe:
1066 for (i = 0; i < 2; i++) {
1067 int err;
1068
1069 err = close(ctx->consumer_data_pipe[i]);
1070 if (err) {
1071 PERROR("close");
1072 }
1073 }
1074 error_poll_pipe:
1075 free(ctx);
1076 error:
1077 return NULL;
1078 }
1079
1080 /*
1081 * Close all fds associated with the instance and free the context.
1082 */
1083 void lttng_consumer_destroy(struct lttng_consumer_local_data *ctx)
1084 {
1085 int ret;
1086
1087 ret = close(ctx->consumer_error_socket);
1088 if (ret) {
1089 PERROR("close");
1090 }
1091 ret = close(ctx->consumer_thread_pipe[0]);
1092 if (ret) {
1093 PERROR("close");
1094 }
1095 ret = close(ctx->consumer_thread_pipe[1]);
1096 if (ret) {
1097 PERROR("close");
1098 }
1099 ret = close(ctx->consumer_data_pipe[0]);
1100 if (ret) {
1101 PERROR("close");
1102 }
1103 ret = close(ctx->consumer_data_pipe[1]);
1104 if (ret) {
1105 PERROR("close");
1106 }
1107 ret = close(ctx->consumer_should_quit[0]);
1108 if (ret) {
1109 PERROR("close");
1110 }
1111 ret = close(ctx->consumer_should_quit[1]);
1112 if (ret) {
1113 PERROR("close");
1114 }
1115 utils_close_pipe(ctx->consumer_splice_metadata_pipe);
1116
1117 unlink(ctx->consumer_command_sock_path);
1118 free(ctx);
1119 }
1120
1121 /*
1122 * Write the metadata stream id on the specified file descriptor.
1123 */
1124 static int write_relayd_metadata_id(int fd,
1125 struct lttng_consumer_stream *stream,
1126 struct consumer_relayd_sock_pair *relayd,
1127 unsigned long padding)
1128 {
1129 int ret;
1130 struct lttcomm_relayd_metadata_payload hdr;
1131
1132 hdr.stream_id = htobe64(stream->relayd_stream_id);
1133 hdr.padding_size = htobe32(padding);
1134 do {
1135 ret = write(fd, (void *) &hdr, sizeof(hdr));
1136 } while (ret < 0 && errno == EINTR);
1137 if (ret < 0) {
1138 PERROR("write metadata stream id");
1139 goto end;
1140 }
1141 DBG("Metadata stream id %" PRIu64 " with padding %lu written before data",
1142 stream->relayd_stream_id, padding);
1143
1144 end:
1145 return ret;
1146 }
1147
1148 /*
1149 * Mmap the ring buffer, read it and write the data to the tracefile. This is a
1150 * core function for writing trace buffers to either the local filesystem or
1151 * the network.
1152 *
1153 * Careful review MUST be put if any changes occur!
1154 *
1155 * Returns the number of bytes written
1156 */
1157 ssize_t lttng_consumer_on_read_subbuffer_mmap(
1158 struct lttng_consumer_local_data *ctx,
1159 struct lttng_consumer_stream *stream, unsigned long len,
1160 unsigned long padding)
1161 {
1162 unsigned long mmap_offset;
1163 ssize_t ret = 0, written = 0;
1164 off_t orig_offset = stream->out_fd_offset;
1165 /* Default is on the disk */
1166 int outfd = stream->out_fd;
1167 struct consumer_relayd_sock_pair *relayd = NULL;
1168
1169 /* RCU lock for the relayd pointer */
1170 rcu_read_lock();
1171
1172 pthread_mutex_lock(&stream->lock);
1173
1174 /* Flag that the current stream if set for network streaming. */
1175 if (stream->net_seq_idx != -1) {
1176 relayd = consumer_find_relayd(stream->net_seq_idx);
1177 if (relayd == NULL) {
1178 goto end;
1179 }
1180 }
1181
1182 /* get the offset inside the fd to mmap */
1183 switch (consumer_data.type) {
1184 case LTTNG_CONSUMER_KERNEL:
1185 ret = kernctl_get_mmap_read_offset(stream->wait_fd, &mmap_offset);
1186 break;
1187 case LTTNG_CONSUMER32_UST:
1188 case LTTNG_CONSUMER64_UST:
1189 ret = lttng_ustctl_get_mmap_read_offset(stream->chan->handle,
1190 stream->buf, &mmap_offset);
1191 break;
1192 default:
1193 ERR("Unknown consumer_data type");
1194 assert(0);
1195 }
1196 if (ret != 0) {
1197 errno = -ret;
1198 PERROR("tracer ctl get_mmap_read_offset");
1199 written = ret;
1200 goto end;
1201 }
1202
1203 /* Handle stream on the relayd if the output is on the network */
1204 if (relayd) {
1205 unsigned long netlen = len;
1206
1207 /*
1208 * Lock the control socket for the complete duration of the function
1209 * since from this point on we will use the socket.
1210 */
1211 if (stream->metadata_flag) {
1212 /* Metadata requires the control socket. */
1213 pthread_mutex_lock(&relayd->ctrl_sock_mutex);
1214 netlen += sizeof(struct lttcomm_relayd_metadata_payload);
1215 }
1216
1217 ret = write_relayd_stream_header(stream, netlen, padding, relayd);
1218 if (ret >= 0) {
1219 /* Use the returned socket. */
1220 outfd = ret;
1221
1222 /* Write metadata stream id before payload */
1223 if (stream->metadata_flag) {
1224 ret = write_relayd_metadata_id(outfd, stream, relayd, padding);
1225 if (ret < 0) {
1226 written = ret;
1227 goto end;
1228 }
1229 }
1230 }
1231 /* Else, use the default set before which is the filesystem. */
1232 } else {
1233 /* No streaming, we have to set the len with the full padding */
1234 len += padding;
1235 }
1236
1237 while (len > 0) {
1238 do {
1239 ret = write(outfd, stream->mmap_base + mmap_offset, len);
1240 } while (ret < 0 && errno == EINTR);
1241 DBG("Consumer mmap write() ret %zd (len %lu)", ret, len);
1242 if (ret < 0) {
1243 PERROR("Error in file write");
1244 if (written == 0) {
1245 written = ret;
1246 }
1247 goto end;
1248 } else if (ret > len) {
1249 PERROR("Error in file write (ret %zd > len %lu)", ret, len);
1250 written += ret;
1251 goto end;
1252 } else {
1253 len -= ret;
1254 mmap_offset += ret;
1255 }
1256
1257 /* This call is useless on a socket so better save a syscall. */
1258 if (!relayd) {
1259 /* This won't block, but will start writeout asynchronously */
1260 lttng_sync_file_range(outfd, stream->out_fd_offset, ret,
1261 SYNC_FILE_RANGE_WRITE);
1262 stream->out_fd_offset += ret;
1263 }
1264 written += ret;
1265 }
1266 lttng_consumer_sync_trace_file(stream, orig_offset);
1267
1268 end:
1269 pthread_mutex_unlock(&stream->lock);
1270 /* Unlock only if ctrl socket used */
1271 if (relayd && stream->metadata_flag) {
1272 pthread_mutex_unlock(&relayd->ctrl_sock_mutex);
1273 }
1274
1275 rcu_read_unlock();
1276 return written;
1277 }
1278
1279 /*
1280 * Splice the data from the ring buffer to the tracefile.
1281 *
1282 * Returns the number of bytes spliced.
1283 */
1284 ssize_t lttng_consumer_on_read_subbuffer_splice(
1285 struct lttng_consumer_local_data *ctx,
1286 struct lttng_consumer_stream *stream, unsigned long len,
1287 unsigned long padding)
1288 {
1289 ssize_t ret = 0, written = 0, ret_splice = 0;
1290 loff_t offset = 0;
1291 off_t orig_offset = stream->out_fd_offset;
1292 int fd = stream->wait_fd;
1293 /* Default is on the disk */
1294 int outfd = stream->out_fd;
1295 struct consumer_relayd_sock_pair *relayd = NULL;
1296 int *splice_pipe;
1297
1298 switch (consumer_data.type) {
1299 case LTTNG_CONSUMER_KERNEL:
1300 break;
1301 case LTTNG_CONSUMER32_UST:
1302 case LTTNG_CONSUMER64_UST:
1303 /* Not supported for user space tracing */
1304 return -ENOSYS;
1305 default:
1306 ERR("Unknown consumer_data type");
1307 assert(0);
1308 }
1309
1310 /* RCU lock for the relayd pointer */
1311 rcu_read_lock();
1312
1313 pthread_mutex_lock(&stream->lock);
1314
1315 /* Flag that the current stream if set for network streaming. */
1316 if (stream->net_seq_idx != -1) {
1317 relayd = consumer_find_relayd(stream->net_seq_idx);
1318 if (relayd == NULL) {
1319 goto end;
1320 }
1321 }
1322
1323 /*
1324 * Choose right pipe for splice. Metadata and trace data are handled by
1325 * different threads hence the use of two pipes in order not to race or
1326 * corrupt the written data.
1327 */
1328 if (stream->metadata_flag) {
1329 splice_pipe = ctx->consumer_splice_metadata_pipe;
1330 } else {
1331 splice_pipe = ctx->consumer_thread_pipe;
1332 }
1333
1334 /* Write metadata stream id before payload */
1335 if (relayd) {
1336 int total_len = len;
1337
1338 if (stream->metadata_flag) {
1339 /*
1340 * Lock the control socket for the complete duration of the function
1341 * since from this point on we will use the socket.
1342 */
1343 pthread_mutex_lock(&relayd->ctrl_sock_mutex);
1344
1345 ret = write_relayd_metadata_id(splice_pipe[1], stream, relayd,
1346 padding);
1347 if (ret < 0) {
1348 written = ret;
1349 goto end;
1350 }
1351
1352 total_len += sizeof(struct lttcomm_relayd_metadata_payload);
1353 }
1354
1355 ret = write_relayd_stream_header(stream, total_len, padding, relayd);
1356 if (ret >= 0) {
1357 /* Use the returned socket. */
1358 outfd = ret;
1359 } else {
1360 ERR("Remote relayd disconnected. Stopping");
1361 goto end;
1362 }
1363 } else {
1364 /* No streaming, we have to set the len with the full padding */
1365 len += padding;
1366 }
1367
1368 while (len > 0) {
1369 DBG("splice chan to pipe offset %lu of len %lu (fd : %d, pipe: %d)",
1370 (unsigned long)offset, len, fd, splice_pipe[1]);
1371 ret_splice = splice(fd, &offset, splice_pipe[1], NULL, len,
1372 SPLICE_F_MOVE | SPLICE_F_MORE);
1373 DBG("splice chan to pipe, ret %zd", ret_splice);
1374 if (ret_splice < 0) {
1375 PERROR("Error in relay splice");
1376 if (written == 0) {
1377 written = ret_splice;
1378 }
1379 ret = errno;
1380 goto splice_error;
1381 }
1382
1383 /* Handle stream on the relayd if the output is on the network */
1384 if (relayd) {
1385 if (stream->metadata_flag) {
1386 size_t metadata_payload_size =
1387 sizeof(struct lttcomm_relayd_metadata_payload);
1388
1389 /* Update counter to fit the spliced data */
1390 ret_splice += metadata_payload_size;
1391 len += metadata_payload_size;
1392 /*
1393 * We do this so the return value can match the len passed as
1394 * argument to this function.
1395 */
1396 written -= metadata_payload_size;
1397 }
1398 }
1399
1400 /* Splice data out */
1401 ret_splice = splice(splice_pipe[0], NULL, outfd, NULL,
1402 ret_splice, SPLICE_F_MOVE | SPLICE_F_MORE);
1403 DBG("Consumer splice pipe to file, ret %zd", ret_splice);
1404 if (ret_splice < 0) {
1405 PERROR("Error in file splice");
1406 if (written == 0) {
1407 written = ret_splice;
1408 }
1409 ret = errno;
1410 goto splice_error;
1411 } else if (ret_splice > len) {
1412 errno = EINVAL;
1413 PERROR("Wrote more data than requested %zd (len: %lu)",
1414 ret_splice, len);
1415 written += ret_splice;
1416 ret = errno;
1417 goto splice_error;
1418 }
1419 len -= ret_splice;
1420
1421 /* This call is useless on a socket so better save a syscall. */
1422 if (!relayd) {
1423 /* This won't block, but will start writeout asynchronously */
1424 lttng_sync_file_range(outfd, stream->out_fd_offset, ret_splice,
1425 SYNC_FILE_RANGE_WRITE);
1426 stream->out_fd_offset += ret_splice;
1427 }
1428 written += ret_splice;
1429 }
1430 lttng_consumer_sync_trace_file(stream, orig_offset);
1431
1432 ret = ret_splice;
1433
1434 goto end;
1435
1436 splice_error:
1437 /* send the appropriate error description to sessiond */
1438 switch (ret) {
1439 case EBADF:
1440 lttng_consumer_send_error(ctx, LTTCOMM_CONSUMERD_SPLICE_EBADF);
1441 break;
1442 case EINVAL:
1443 lttng_consumer_send_error(ctx, LTTCOMM_CONSUMERD_SPLICE_EINVAL);
1444 break;
1445 case ENOMEM:
1446 lttng_consumer_send_error(ctx, LTTCOMM_CONSUMERD_SPLICE_ENOMEM);
1447 break;
1448 case ESPIPE:
1449 lttng_consumer_send_error(ctx, LTTCOMM_CONSUMERD_SPLICE_ESPIPE);
1450 break;
1451 }
1452
1453 end:
1454 pthread_mutex_unlock(&stream->lock);
1455 if (relayd && stream->metadata_flag) {
1456 pthread_mutex_unlock(&relayd->ctrl_sock_mutex);
1457 }
1458
1459 rcu_read_unlock();
1460 return written;
1461 }
1462
1463 /*
1464 * Take a snapshot for a specific fd
1465 *
1466 * Returns 0 on success, < 0 on error
1467 */
1468 int lttng_consumer_take_snapshot(struct lttng_consumer_local_data *ctx,
1469 struct lttng_consumer_stream *stream)
1470 {
1471 switch (consumer_data.type) {
1472 case LTTNG_CONSUMER_KERNEL:
1473 return lttng_kconsumer_take_snapshot(ctx, stream);
1474 case LTTNG_CONSUMER32_UST:
1475 case LTTNG_CONSUMER64_UST:
1476 return lttng_ustconsumer_take_snapshot(ctx, stream);
1477 default:
1478 ERR("Unknown consumer_data type");
1479 assert(0);
1480 return -ENOSYS;
1481 }
1482
1483 }
1484
1485 /*
1486 * Get the produced position
1487 *
1488 * Returns 0 on success, < 0 on error
1489 */
1490 int lttng_consumer_get_produced_snapshot(
1491 struct lttng_consumer_local_data *ctx,
1492 struct lttng_consumer_stream *stream,
1493 unsigned long *pos)
1494 {
1495 switch (consumer_data.type) {
1496 case LTTNG_CONSUMER_KERNEL:
1497 return lttng_kconsumer_get_produced_snapshot(ctx, stream, pos);
1498 case LTTNG_CONSUMER32_UST:
1499 case LTTNG_CONSUMER64_UST:
1500 return lttng_ustconsumer_get_produced_snapshot(ctx, stream, pos);
1501 default:
1502 ERR("Unknown consumer_data type");
1503 assert(0);
1504 return -ENOSYS;
1505 }
1506 }
1507
1508 int lttng_consumer_recv_cmd(struct lttng_consumer_local_data *ctx,
1509 int sock, struct pollfd *consumer_sockpoll)
1510 {
1511 switch (consumer_data.type) {
1512 case LTTNG_CONSUMER_KERNEL:
1513 return lttng_kconsumer_recv_cmd(ctx, sock, consumer_sockpoll);
1514 case LTTNG_CONSUMER32_UST:
1515 case LTTNG_CONSUMER64_UST:
1516 return lttng_ustconsumer_recv_cmd(ctx, sock, consumer_sockpoll);
1517 default:
1518 ERR("Unknown consumer_data type");
1519 assert(0);
1520 return -ENOSYS;
1521 }
1522 }
1523
1524 /*
1525 * Iterate over all streams of the hashtable and free them properly.
1526 *
1527 * WARNING: *MUST* be used with data stream only.
1528 */
1529 static void destroy_data_stream_ht(struct lttng_ht *ht)
1530 {
1531 int ret;
1532 struct lttng_ht_iter iter;
1533 struct lttng_consumer_stream *stream;
1534
1535 if (ht == NULL) {
1536 return;
1537 }
1538
1539 rcu_read_lock();
1540 cds_lfht_for_each_entry(ht->ht, &iter.iter, stream, node.node) {
1541 ret = lttng_ht_del(ht, &iter);
1542 assert(!ret);
1543
1544 call_rcu(&stream->node.head, consumer_free_stream);
1545 }
1546 rcu_read_unlock();
1547
1548 lttng_ht_destroy(ht);
1549 }
1550
1551 /*
1552 * Iterate over all streams of the hashtable and free them properly.
1553 *
1554 * XXX: Should not be only for metadata stream or else use an other name.
1555 */
1556 static void destroy_stream_ht(struct lttng_ht *ht)
1557 {
1558 int ret;
1559 struct lttng_ht_iter iter;
1560 struct lttng_consumer_stream *stream;
1561
1562 if (ht == NULL) {
1563 return;
1564 }
1565
1566 rcu_read_lock();
1567 cds_lfht_for_each_entry(ht->ht, &iter.iter, stream, node.node) {
1568 ret = lttng_ht_del(ht, &iter);
1569 assert(!ret);
1570
1571 call_rcu(&stream->node.head, consumer_free_stream);
1572 }
1573 rcu_read_unlock();
1574
1575 lttng_ht_destroy(ht);
1576 }
1577
1578 /*
1579 * Clean up a metadata stream and free its memory.
1580 */
1581 void consumer_del_metadata_stream(struct lttng_consumer_stream *stream,
1582 struct lttng_ht *ht)
1583 {
1584 int ret;
1585 struct lttng_ht_iter iter;
1586 struct lttng_consumer_channel *free_chan = NULL;
1587 struct consumer_relayd_sock_pair *relayd;
1588
1589 assert(stream);
1590 /*
1591 * This call should NEVER receive regular stream. It must always be
1592 * metadata stream and this is crucial for data structure synchronization.
1593 */
1594 assert(stream->metadata_flag);
1595
1596 DBG3("Consumer delete metadata stream %d", stream->wait_fd);
1597
1598 if (ht == NULL) {
1599 /* Means the stream was allocated but not successfully added */
1600 goto free_stream;
1601 }
1602
1603 pthread_mutex_lock(&consumer_data.lock);
1604 switch (consumer_data.type) {
1605 case LTTNG_CONSUMER_KERNEL:
1606 if (stream->mmap_base != NULL) {
1607 ret = munmap(stream->mmap_base, stream->mmap_len);
1608 if (ret != 0) {
1609 PERROR("munmap metadata stream");
1610 }
1611 }
1612 break;
1613 case LTTNG_CONSUMER32_UST:
1614 case LTTNG_CONSUMER64_UST:
1615 lttng_ustconsumer_del_stream(stream);
1616 break;
1617 default:
1618 ERR("Unknown consumer_data type");
1619 assert(0);
1620 goto end;
1621 }
1622
1623 rcu_read_lock();
1624 iter.iter.node = &stream->node.node;
1625 ret = lttng_ht_del(ht, &iter);
1626 assert(!ret);
1627
1628 /* Remove node session id from the consumer_data stream ht */
1629 iter.iter.node = &stream->node_session_id.node;
1630 ret = lttng_ht_del(consumer_data.stream_list_ht, &iter);
1631 assert(!ret);
1632 rcu_read_unlock();
1633
1634 if (stream->out_fd >= 0) {
1635 ret = close(stream->out_fd);
1636 if (ret) {
1637 PERROR("close");
1638 }
1639 }
1640
1641 if (stream->wait_fd >= 0 && !stream->wait_fd_is_copy) {
1642 ret = close(stream->wait_fd);
1643 if (ret) {
1644 PERROR("close");
1645 }
1646 }
1647
1648 if (stream->shm_fd >= 0 && stream->wait_fd != stream->shm_fd) {
1649 ret = close(stream->shm_fd);
1650 if (ret) {
1651 PERROR("close");
1652 }
1653 }
1654
1655 /* Check and cleanup relayd */
1656 rcu_read_lock();
1657 relayd = consumer_find_relayd(stream->net_seq_idx);
1658 if (relayd != NULL) {
1659 uatomic_dec(&relayd->refcount);
1660 assert(uatomic_read(&relayd->refcount) >= 0);
1661
1662 /* Closing streams requires to lock the control socket. */
1663 pthread_mutex_lock(&relayd->ctrl_sock_mutex);
1664 ret = relayd_send_close_stream(&relayd->control_sock,
1665 stream->relayd_stream_id, stream->next_net_seq_num - 1);
1666 pthread_mutex_unlock(&relayd->ctrl_sock_mutex);
1667 if (ret < 0) {
1668 DBG("Unable to close stream on the relayd. Continuing");
1669 /*
1670 * Continue here. There is nothing we can do for the relayd.
1671 * Chances are that the relayd has closed the socket so we just
1672 * continue cleaning up.
1673 */
1674 }
1675
1676 /* Both conditions are met, we destroy the relayd. */
1677 if (uatomic_read(&relayd->refcount) == 0 &&
1678 uatomic_read(&relayd->destroy_flag)) {
1679 destroy_relayd(relayd);
1680 }
1681 }
1682 rcu_read_unlock();
1683
1684 /* Atomically decrement channel refcount since other threads can use it. */
1685 uatomic_dec(&stream->chan->refcount);
1686 if (!uatomic_read(&stream->chan->refcount)
1687 && !uatomic_read(&stream->chan->nb_init_streams)) {
1688 /* Go for channel deletion! */
1689 free_chan = stream->chan;
1690 }
1691
1692 end:
1693 pthread_mutex_unlock(&consumer_data.lock);
1694
1695 if (free_chan) {
1696 consumer_del_channel(free_chan);
1697 }
1698
1699 free_stream:
1700 call_rcu(&stream->node.head, consumer_free_stream);
1701 }
1702
1703 /*
1704 * Action done with the metadata stream when adding it to the consumer internal
1705 * data structures to handle it.
1706 */
1707 static int consumer_add_metadata_stream(struct lttng_consumer_stream *stream,
1708 struct lttng_ht *ht)
1709 {
1710 int ret = 0;
1711 struct consumer_relayd_sock_pair *relayd;
1712
1713 assert(stream);
1714 assert(ht);
1715
1716 DBG3("Adding metadata stream %d to hash table", stream->wait_fd);
1717
1718 pthread_mutex_lock(&consumer_data.lock);
1719
1720 /*
1721 * From here, refcounts are updated so be _careful_ when returning an error
1722 * after this point.
1723 */
1724
1725 rcu_read_lock();
1726 /* Find relayd and, if one is found, increment refcount. */
1727 relayd = consumer_find_relayd(stream->net_seq_idx);
1728 if (relayd != NULL) {
1729 uatomic_inc(&relayd->refcount);
1730 }
1731
1732 /* Update channel refcount once added without error(s). */
1733 uatomic_inc(&stream->chan->refcount);
1734
1735 /*
1736 * When nb_init_streams reaches 0, we don't need to trigger any action in
1737 * terms of destroying the associated channel, because the action that
1738 * causes the count to become 0 also causes a stream to be added. The
1739 * channel deletion will thus be triggered by the following removal of this
1740 * stream.
1741 */
1742 if (uatomic_read(&stream->chan->nb_init_streams) > 0) {
1743 uatomic_dec(&stream->chan->nb_init_streams);
1744 }
1745
1746 /* Steal stream identifier to avoid having streams with the same key */
1747 consumer_steal_stream_key(stream->key, ht);
1748
1749 lttng_ht_add_unique_ulong(ht, &stream->node);
1750
1751 /*
1752 * Add stream to the stream_list_ht of the consumer data. No need to steal
1753 * the key since the HT does not use it and we allow to add redundant keys
1754 * into this table.
1755 */
1756 lttng_ht_add_ulong(consumer_data.stream_list_ht, &stream->node_session_id);
1757
1758 rcu_read_unlock();
1759
1760 pthread_mutex_unlock(&consumer_data.lock);
1761 return ret;
1762 }
1763
1764 /*
1765 * Thread polls on metadata file descriptor and write them on disk or on the
1766 * network.
1767 */
1768 void *consumer_thread_metadata_poll(void *data)
1769 {
1770 int ret, i, pollfd;
1771 uint32_t revents, nb_fd;
1772 struct lttng_consumer_stream *stream = NULL;
1773 struct lttng_ht_iter iter;
1774 struct lttng_ht_node_ulong *node;
1775 struct lttng_poll_event events;
1776 struct lttng_consumer_local_data *ctx = data;
1777 ssize_t len;
1778
1779 rcu_register_thread();
1780
1781 DBG("Thread metadata poll started");
1782
1783 /* Size is set to 1 for the consumer_metadata pipe */
1784 ret = lttng_poll_create(&events, 2, LTTNG_CLOEXEC);
1785 if (ret < 0) {
1786 ERR("Poll set creation failed");
1787 goto end;
1788 }
1789
1790 ret = lttng_poll_add(&events, ctx->consumer_metadata_pipe[0], LPOLLIN);
1791 if (ret < 0) {
1792 goto end;
1793 }
1794
1795 /* Main loop */
1796 DBG("Metadata main loop started");
1797
1798 while (1) {
1799 lttng_poll_reset(&events);
1800
1801 nb_fd = LTTNG_POLL_GETNB(&events);
1802
1803 /* Only the metadata pipe is set */
1804 if (nb_fd == 0 && consumer_quit == 1) {
1805 goto end;
1806 }
1807
1808 restart:
1809 DBG("Metadata poll wait with %d fd(s)", nb_fd);
1810 ret = lttng_poll_wait(&events, -1);
1811 DBG("Metadata event catched in thread");
1812 if (ret < 0) {
1813 if (errno == EINTR) {
1814 ERR("Poll EINTR catched");
1815 goto restart;
1816 }
1817 goto error;
1818 }
1819
1820 /* From here, the event is a metadata wait fd */
1821 for (i = 0; i < nb_fd; i++) {
1822 revents = LTTNG_POLL_GETEV(&events, i);
1823 pollfd = LTTNG_POLL_GETFD(&events, i);
1824
1825 /* Just don't waste time if no returned events for the fd */
1826 if (!revents) {
1827 continue;
1828 }
1829
1830 if (pollfd == ctx->consumer_metadata_pipe[0]) {
1831 if (revents & (LPOLLERR | LPOLLHUP )) {
1832 DBG("Metadata thread pipe hung up");
1833 /*
1834 * Remove the pipe from the poll set and continue the loop
1835 * since their might be data to consume.
1836 */
1837 lttng_poll_del(&events, ctx->consumer_metadata_pipe[0]);
1838 close(ctx->consumer_metadata_pipe[0]);
1839 continue;
1840 } else if (revents & LPOLLIN) {
1841 do {
1842 /* Get the stream pointer received */
1843 ret = read(pollfd, &stream, sizeof(stream));
1844 } while (ret < 0 && errno == EINTR);
1845 if (ret < 0 ||
1846 ret < sizeof(struct lttng_consumer_stream *)) {
1847 PERROR("read metadata stream");
1848 /*
1849 * Let's continue here and hope we can still work
1850 * without stopping the consumer. XXX: Should we?
1851 */
1852 continue;
1853 }
1854
1855 DBG("Adding metadata stream %d to poll set",
1856 stream->wait_fd);
1857
1858 ret = consumer_add_metadata_stream(stream, metadata_ht);
1859 if (ret) {
1860 ERR("Unable to add metadata stream");
1861 /* Stream was not setup properly. Continuing. */
1862 consumer_del_metadata_stream(stream, NULL);
1863 continue;
1864 }
1865
1866 /* Add metadata stream to the global poll events list */
1867 lttng_poll_add(&events, stream->wait_fd,
1868 LPOLLIN | LPOLLPRI);
1869 }
1870
1871 /* Handle other stream */
1872 continue;
1873 }
1874
1875 rcu_read_lock();
1876 lttng_ht_lookup(metadata_ht, (void *)((unsigned long) pollfd),
1877 &iter);
1878 node = lttng_ht_iter_get_node_ulong(&iter);
1879 assert(node);
1880
1881 stream = caa_container_of(node, struct lttng_consumer_stream,
1882 node);
1883
1884 /* Check for error event */
1885 if (revents & (LPOLLERR | LPOLLHUP)) {
1886 DBG("Metadata fd %d is hup|err.", pollfd);
1887 if (!stream->hangup_flush_done
1888 && (consumer_data.type == LTTNG_CONSUMER32_UST
1889 || consumer_data.type == LTTNG_CONSUMER64_UST)) {
1890 DBG("Attempting to flush and consume the UST buffers");
1891 lttng_ustconsumer_on_stream_hangup(stream);
1892
1893 /* We just flushed the stream now read it. */
1894 do {
1895 len = ctx->on_buffer_ready(stream, ctx);
1896 /*
1897 * We don't check the return value here since if we get
1898 * a negative len, it means an error occured thus we
1899 * simply remove it from the poll set and free the
1900 * stream.
1901 */
1902 } while (len > 0);
1903 }
1904
1905 lttng_poll_del(&events, stream->wait_fd);
1906 /*
1907 * This call update the channel states, closes file descriptors
1908 * and securely free the stream.
1909 */
1910 consumer_del_metadata_stream(stream, metadata_ht);
1911 } else if (revents & (LPOLLIN | LPOLLPRI)) {
1912 /* Get the data out of the metadata file descriptor */
1913 DBG("Metadata available on fd %d", pollfd);
1914 assert(stream->wait_fd == pollfd);
1915
1916 len = ctx->on_buffer_ready(stream, ctx);
1917 /* It's ok to have an unavailable sub-buffer */
1918 if (len < 0 && len != -EAGAIN && len != -ENODATA) {
1919 rcu_read_unlock();
1920 goto end;
1921 } else if (len > 0) {
1922 stream->data_read = 1;
1923 }
1924 }
1925
1926 /* Release RCU lock for the stream looked up */
1927 rcu_read_unlock();
1928 }
1929 }
1930
1931 error:
1932 end:
1933 DBG("Metadata poll thread exiting");
1934 lttng_poll_clean(&events);
1935
1936 if (metadata_ht) {
1937 destroy_stream_ht(metadata_ht);
1938 }
1939
1940 rcu_unregister_thread();
1941 return NULL;
1942 }
1943
1944 /*
1945 * This thread polls the fds in the set to consume the data and write
1946 * it to tracefile if necessary.
1947 */
1948 void *consumer_thread_data_poll(void *data)
1949 {
1950 int num_rdy, num_hup, high_prio, ret, i;
1951 struct pollfd *pollfd = NULL;
1952 /* local view of the streams */
1953 struct lttng_consumer_stream **local_stream = NULL, *new_stream = NULL;
1954 /* local view of consumer_data.fds_count */
1955 int nb_fd = 0;
1956 struct lttng_consumer_local_data *ctx = data;
1957 ssize_t len;
1958
1959 rcu_register_thread();
1960
1961 data_ht = lttng_ht_new(0, LTTNG_HT_TYPE_ULONG);
1962 if (data_ht == NULL) {
1963 goto end;
1964 }
1965
1966 local_stream = zmalloc(sizeof(struct lttng_consumer_stream));
1967
1968 while (1) {
1969 high_prio = 0;
1970 num_hup = 0;
1971
1972 /*
1973 * the fds set has been updated, we need to update our
1974 * local array as well
1975 */
1976 pthread_mutex_lock(&consumer_data.lock);
1977 if (consumer_data.need_update) {
1978 if (pollfd != NULL) {
1979 free(pollfd);
1980 pollfd = NULL;
1981 }
1982 if (local_stream != NULL) {
1983 free(local_stream);
1984 local_stream = NULL;
1985 }
1986
1987 /* allocate for all fds + 1 for the consumer_data_pipe */
1988 pollfd = zmalloc((consumer_data.stream_count + 1) * sizeof(struct pollfd));
1989 if (pollfd == NULL) {
1990 PERROR("pollfd malloc");
1991 pthread_mutex_unlock(&consumer_data.lock);
1992 goto end;
1993 }
1994
1995 /* allocate for all fds + 1 for the consumer_data_pipe */
1996 local_stream = zmalloc((consumer_data.stream_count + 1) *
1997 sizeof(struct lttng_consumer_stream));
1998 if (local_stream == NULL) {
1999 PERROR("local_stream malloc");
2000 pthread_mutex_unlock(&consumer_data.lock);
2001 goto end;
2002 }
2003 ret = consumer_update_poll_array(ctx, &pollfd, local_stream,
2004 data_ht);
2005 if (ret < 0) {
2006 ERR("Error in allocating pollfd or local_outfds");
2007 lttng_consumer_send_error(ctx, LTTCOMM_CONSUMERD_POLL_ERROR);
2008 pthread_mutex_unlock(&consumer_data.lock);
2009 goto end;
2010 }
2011 nb_fd = ret;
2012 consumer_data.need_update = 0;
2013 }
2014 pthread_mutex_unlock(&consumer_data.lock);
2015
2016 /* No FDs and consumer_quit, consumer_cleanup the thread */
2017 if (nb_fd == 0 && consumer_quit == 1) {
2018 goto end;
2019 }
2020 /* poll on the array of fds */
2021 restart:
2022 DBG("polling on %d fd", nb_fd + 1);
2023 num_rdy = poll(pollfd, nb_fd + 1, consumer_poll_timeout);
2024 DBG("poll num_rdy : %d", num_rdy);
2025 if (num_rdy == -1) {
2026 /*
2027 * Restart interrupted system call.
2028 */
2029 if (errno == EINTR) {
2030 goto restart;
2031 }
2032 PERROR("Poll error");
2033 lttng_consumer_send_error(ctx, LTTCOMM_CONSUMERD_POLL_ERROR);
2034 goto end;
2035 } else if (num_rdy == 0) {
2036 DBG("Polling thread timed out");
2037 goto end;
2038 }
2039
2040 /*
2041 * If the consumer_data_pipe triggered poll go directly to the
2042 * beginning of the loop to update the array. We want to prioritize
2043 * array update over low-priority reads.
2044 */
2045 if (pollfd[nb_fd].revents & (POLLIN | POLLPRI)) {
2046 size_t pipe_readlen;
2047
2048 DBG("consumer_data_pipe wake up");
2049 /* Consume 1 byte of pipe data */
2050 do {
2051 pipe_readlen = read(ctx->consumer_data_pipe[0], &new_stream,
2052 sizeof(new_stream));
2053 } while (pipe_readlen == -1 && errno == EINTR);
2054
2055 /*
2056 * If the stream is NULL, just ignore it. It's also possible that
2057 * the sessiond poll thread changed the consumer_quit state and is
2058 * waking us up to test it.
2059 */
2060 if (new_stream == NULL) {
2061 continue;
2062 }
2063
2064 ret = consumer_add_stream(new_stream, data_ht);
2065 if (ret) {
2066 ERR("Consumer add stream %d failed. Continuing",
2067 new_stream->key);
2068 /*
2069 * At this point, if the add_stream fails, it is not in the
2070 * hash table thus passing the NULL value here.
2071 */
2072 consumer_del_stream(new_stream, NULL);
2073 }
2074
2075 /* Continue to update the local streams and handle prio ones */
2076 continue;
2077 }
2078
2079 /* Take care of high priority channels first. */
2080 for (i = 0; i < nb_fd; i++) {
2081 if (pollfd[i].revents & POLLPRI) {
2082 DBG("Urgent read on fd %d", pollfd[i].fd);
2083 high_prio = 1;
2084 len = ctx->on_buffer_ready(local_stream[i], ctx);
2085 /* it's ok to have an unavailable sub-buffer */
2086 if (len < 0 && len != -EAGAIN && len != -ENODATA) {
2087 goto end;
2088 } else if (len > 0) {
2089 local_stream[i]->data_read = 1;
2090 }
2091 }
2092 }
2093
2094 /*
2095 * If we read high prio channel in this loop, try again
2096 * for more high prio data.
2097 */
2098 if (high_prio) {
2099 continue;
2100 }
2101
2102 /* Take care of low priority channels. */
2103 for (i = 0; i < nb_fd; i++) {
2104 if ((pollfd[i].revents & POLLIN) ||
2105 local_stream[i]->hangup_flush_done) {
2106 DBG("Normal read on fd %d", pollfd[i].fd);
2107 len = ctx->on_buffer_ready(local_stream[i], ctx);
2108 /* it's ok to have an unavailable sub-buffer */
2109 if (len < 0 && len != -EAGAIN && len != -ENODATA) {
2110 goto end;
2111 } else if (len > 0) {
2112 local_stream[i]->data_read = 1;
2113 }
2114 }
2115 }
2116
2117 /* Handle hangup and errors */
2118 for (i = 0; i < nb_fd; i++) {
2119 if (!local_stream[i]->hangup_flush_done
2120 && (pollfd[i].revents & (POLLHUP | POLLERR | POLLNVAL))
2121 && (consumer_data.type == LTTNG_CONSUMER32_UST
2122 || consumer_data.type == LTTNG_CONSUMER64_UST)) {
2123 DBG("fd %d is hup|err|nval. Attempting flush and read.",
2124 pollfd[i].fd);
2125 lttng_ustconsumer_on_stream_hangup(local_stream[i]);
2126 /* Attempt read again, for the data we just flushed. */
2127 local_stream[i]->data_read = 1;
2128 }
2129 /*
2130 * If the poll flag is HUP/ERR/NVAL and we have
2131 * read no data in this pass, we can remove the
2132 * stream from its hash table.
2133 */
2134 if ((pollfd[i].revents & POLLHUP)) {
2135 DBG("Polling fd %d tells it has hung up.", pollfd[i].fd);
2136 if (!local_stream[i]->data_read) {
2137 consumer_del_stream(local_stream[i], data_ht);
2138 num_hup++;
2139 }
2140 } else if (pollfd[i].revents & POLLERR) {
2141 ERR("Error returned in polling fd %d.", pollfd[i].fd);
2142 if (!local_stream[i]->data_read) {
2143 consumer_del_stream(local_stream[i], data_ht);
2144 num_hup++;
2145 }
2146 } else if (pollfd[i].revents & POLLNVAL) {
2147 ERR("Polling fd %d tells fd is not open.", pollfd[i].fd);
2148 if (!local_stream[i]->data_read) {
2149 consumer_del_stream(local_stream[i], data_ht);
2150 num_hup++;
2151 }
2152 }
2153 local_stream[i]->data_read = 0;
2154 }
2155 }
2156 end:
2157 DBG("polling thread exiting");
2158 if (pollfd != NULL) {
2159 free(pollfd);
2160 pollfd = NULL;
2161 }
2162 if (local_stream != NULL) {
2163 free(local_stream);
2164 local_stream = NULL;
2165 }
2166
2167 /*
2168 * Close the write side of the pipe so epoll_wait() in
2169 * consumer_thread_metadata_poll can catch it. The thread is monitoring the
2170 * read side of the pipe. If we close them both, epoll_wait strangely does
2171 * not return and could create a endless wait period if the pipe is the
2172 * only tracked fd in the poll set. The thread will take care of closing
2173 * the read side.
2174 */
2175 close(ctx->consumer_metadata_pipe[1]);
2176
2177 if (data_ht) {
2178 destroy_data_stream_ht(data_ht);
2179 }
2180
2181 rcu_unregister_thread();
2182 return NULL;
2183 }
2184
2185 /*
2186 * This thread listens on the consumerd socket and receives the file
2187 * descriptors from the session daemon.
2188 */
2189 void *consumer_thread_sessiond_poll(void *data)
2190 {
2191 int sock, client_socket, ret;
2192 /*
2193 * structure to poll for incoming data on communication socket avoids
2194 * making blocking sockets.
2195 */
2196 struct pollfd consumer_sockpoll[2];
2197 struct lttng_consumer_local_data *ctx = data;
2198
2199 rcu_register_thread();
2200
2201 DBG("Creating command socket %s", ctx->consumer_command_sock_path);
2202 unlink(ctx->consumer_command_sock_path);
2203 client_socket = lttcomm_create_unix_sock(ctx->consumer_command_sock_path);
2204 if (client_socket < 0) {
2205 ERR("Cannot create command socket");
2206 goto end;
2207 }
2208
2209 ret = lttcomm_listen_unix_sock(client_socket);
2210 if (ret < 0) {
2211 goto end;
2212 }
2213
2214 DBG("Sending ready command to lttng-sessiond");
2215 ret = lttng_consumer_send_error(ctx, LTTCOMM_CONSUMERD_COMMAND_SOCK_READY);
2216 /* return < 0 on error, but == 0 is not fatal */
2217 if (ret < 0) {
2218 ERR("Error sending ready command to lttng-sessiond");
2219 goto end;
2220 }
2221
2222 ret = fcntl(client_socket, F_SETFL, O_NONBLOCK);
2223 if (ret < 0) {
2224 PERROR("fcntl O_NONBLOCK");
2225 goto end;
2226 }
2227
2228 /* prepare the FDs to poll : to client socket and the should_quit pipe */
2229 consumer_sockpoll[0].fd = ctx->consumer_should_quit[0];
2230 consumer_sockpoll[0].events = POLLIN | POLLPRI;
2231 consumer_sockpoll[1].fd = client_socket;
2232 consumer_sockpoll[1].events = POLLIN | POLLPRI;
2233
2234 if (lttng_consumer_poll_socket(consumer_sockpoll) < 0) {
2235 goto end;
2236 }
2237 DBG("Connection on client_socket");
2238
2239 /* Blocking call, waiting for transmission */
2240 sock = lttcomm_accept_unix_sock(client_socket);
2241 if (sock <= 0) {
2242 WARN("On accept");
2243 goto end;
2244 }
2245 ret = fcntl(sock, F_SETFL, O_NONBLOCK);
2246 if (ret < 0) {
2247 PERROR("fcntl O_NONBLOCK");
2248 goto end;
2249 }
2250
2251 /* update the polling structure to poll on the established socket */
2252 consumer_sockpoll[1].fd = sock;
2253 consumer_sockpoll[1].events = POLLIN | POLLPRI;
2254
2255 while (1) {
2256 if (lttng_consumer_poll_socket(consumer_sockpoll) < 0) {
2257 goto end;
2258 }
2259 DBG("Incoming command on sock");
2260 ret = lttng_consumer_recv_cmd(ctx, sock, consumer_sockpoll);
2261 if (ret == -ENOENT) {
2262 DBG("Received STOP command");
2263 goto end;
2264 }
2265 if (ret <= 0) {
2266 /*
2267 * This could simply be a session daemon quitting. Don't output
2268 * ERR() here.
2269 */
2270 DBG("Communication interrupted on command socket");
2271 goto end;
2272 }
2273 if (consumer_quit) {
2274 DBG("consumer_thread_receive_fds received quit from signal");
2275 goto end;
2276 }
2277 DBG("received fds on sock");
2278 }
2279 end:
2280 DBG("consumer_thread_receive_fds exiting");
2281
2282 /*
2283 * when all fds have hung up, the polling thread
2284 * can exit cleanly
2285 */
2286 consumer_quit = 1;
2287
2288 /*
2289 * 2s of grace period, if no polling events occur during
2290 * this period, the polling thread will exit even if there
2291 * are still open FDs (should not happen, but safety mechanism).
2292 */
2293 consumer_poll_timeout = LTTNG_CONSUMER_POLL_TIMEOUT;
2294
2295 /*
2296 * Notify the data poll thread to poll back again and test the
2297 * consumer_quit state to quit gracefully.
2298 */
2299 do {
2300 struct lttng_consumer_stream *null_stream = NULL;
2301
2302 ret = write(ctx->consumer_data_pipe[1], &null_stream,
2303 sizeof(null_stream));
2304 } while (ret < 0 && errno == EINTR);
2305
2306 rcu_unregister_thread();
2307 return NULL;
2308 }
2309
2310 ssize_t lttng_consumer_read_subbuffer(struct lttng_consumer_stream *stream,
2311 struct lttng_consumer_local_data *ctx)
2312 {
2313 switch (consumer_data.type) {
2314 case LTTNG_CONSUMER_KERNEL:
2315 return lttng_kconsumer_read_subbuffer(stream, ctx);
2316 case LTTNG_CONSUMER32_UST:
2317 case LTTNG_CONSUMER64_UST:
2318 return lttng_ustconsumer_read_subbuffer(stream, ctx);
2319 default:
2320 ERR("Unknown consumer_data type");
2321 assert(0);
2322 return -ENOSYS;
2323 }
2324 }
2325
2326 int lttng_consumer_on_recv_stream(struct lttng_consumer_stream *stream)
2327 {
2328 switch (consumer_data.type) {
2329 case LTTNG_CONSUMER_KERNEL:
2330 return lttng_kconsumer_on_recv_stream(stream);
2331 case LTTNG_CONSUMER32_UST:
2332 case LTTNG_CONSUMER64_UST:
2333 return lttng_ustconsumer_on_recv_stream(stream);
2334 default:
2335 ERR("Unknown consumer_data type");
2336 assert(0);
2337 return -ENOSYS;
2338 }
2339 }
2340
2341 /*
2342 * Allocate and set consumer data hash tables.
2343 */
2344 void lttng_consumer_init(void)
2345 {
2346 consumer_data.channel_ht = lttng_ht_new(0, LTTNG_HT_TYPE_ULONG);
2347 consumer_data.relayd_ht = lttng_ht_new(0, LTTNG_HT_TYPE_ULONG);
2348 consumer_data.stream_list_ht = lttng_ht_new(0, LTTNG_HT_TYPE_ULONG);
2349
2350 metadata_ht = lttng_ht_new(0, LTTNG_HT_TYPE_ULONG);
2351 assert(metadata_ht);
2352 data_ht = lttng_ht_new(0, LTTNG_HT_TYPE_ULONG);
2353 assert(data_ht);
2354 }
2355
2356 /*
2357 * Process the ADD_RELAYD command receive by a consumer.
2358 *
2359 * This will create a relayd socket pair and add it to the relayd hash table.
2360 * The caller MUST acquire a RCU read side lock before calling it.
2361 */
2362 int consumer_add_relayd_socket(int net_seq_idx, int sock_type,
2363 struct lttng_consumer_local_data *ctx, int sock,
2364 struct pollfd *consumer_sockpoll, struct lttcomm_sock *relayd_sock)
2365 {
2366 int fd, ret = -1;
2367 struct consumer_relayd_sock_pair *relayd;
2368
2369 DBG("Consumer adding relayd socket (idx: %d)", net_seq_idx);
2370
2371 /* Get relayd reference if exists. */
2372 relayd = consumer_find_relayd(net_seq_idx);
2373 if (relayd == NULL) {
2374 /* Not found. Allocate one. */
2375 relayd = consumer_allocate_relayd_sock_pair(net_seq_idx);
2376 if (relayd == NULL) {
2377 lttng_consumer_send_error(ctx, LTTCOMM_CONSUMERD_OUTFD_ERROR);
2378 goto error;
2379 }
2380 }
2381
2382 /* Poll on consumer socket. */
2383 if (lttng_consumer_poll_socket(consumer_sockpoll) < 0) {
2384 ret = -EINTR;
2385 goto error;
2386 }
2387
2388 /* Get relayd socket from session daemon */
2389 ret = lttcomm_recv_fds_unix_sock(sock, &fd, 1);
2390 if (ret != sizeof(fd)) {
2391 lttng_consumer_send_error(ctx, LTTCOMM_CONSUMERD_ERROR_RECV_FD);
2392 ret = -1;
2393 goto error;
2394 }
2395
2396 /* Copy socket information and received FD */
2397 switch (sock_type) {
2398 case LTTNG_STREAM_CONTROL:
2399 /* Copy received lttcomm socket */
2400 lttcomm_copy_sock(&relayd->control_sock, relayd_sock);
2401 ret = lttcomm_create_sock(&relayd->control_sock);
2402 if (ret < 0) {
2403 goto error;
2404 }
2405
2406 /* Close the created socket fd which is useless */
2407 close(relayd->control_sock.fd);
2408
2409 /* Assign new file descriptor */
2410 relayd->control_sock.fd = fd;
2411 break;
2412 case LTTNG_STREAM_DATA:
2413 /* Copy received lttcomm socket */
2414 lttcomm_copy_sock(&relayd->data_sock, relayd_sock);
2415 ret = lttcomm_create_sock(&relayd->data_sock);
2416 if (ret < 0) {
2417 goto error;
2418 }
2419
2420 /* Close the created socket fd which is useless */
2421 close(relayd->data_sock.fd);
2422
2423 /* Assign new file descriptor */
2424 relayd->data_sock.fd = fd;
2425 break;
2426 default:
2427 ERR("Unknown relayd socket type (%d)", sock_type);
2428 goto error;
2429 }
2430
2431 DBG("Consumer %s socket created successfully with net idx %d (fd: %d)",
2432 sock_type == LTTNG_STREAM_CONTROL ? "control" : "data",
2433 relayd->net_seq_idx, fd);
2434
2435 /*
2436 * Add relayd socket pair to consumer data hashtable. If object already
2437 * exists or on error, the function gracefully returns.
2438 */
2439 add_relayd(relayd);
2440
2441 /* All good! */
2442 ret = 0;
2443
2444 error:
2445 return ret;
2446 }
2447
2448 /*
2449 * Check if for a given session id there is still data needed to be extract
2450 * from the buffers.
2451 *
2452 * Return 1 if data is in fact available to be read or else 0.
2453 */
2454 int consumer_data_available(uint64_t id)
2455 {
2456 int ret;
2457 struct lttng_ht_iter iter;
2458 struct lttng_ht *ht;
2459 struct lttng_consumer_stream *stream;
2460 struct consumer_relayd_sock_pair *relayd;
2461 int (*data_available)(struct lttng_consumer_stream *);
2462
2463 DBG("Consumer data available command on session id %" PRIu64, id);
2464
2465 pthread_mutex_lock(&consumer_data.lock);
2466
2467 switch (consumer_data.type) {
2468 case LTTNG_CONSUMER_KERNEL:
2469 data_available = lttng_kconsumer_data_available;
2470 break;
2471 case LTTNG_CONSUMER32_UST:
2472 case LTTNG_CONSUMER64_UST:
2473 data_available = lttng_ustconsumer_data_available;
2474 break;
2475 default:
2476 ERR("Unknown consumer data type");
2477 assert(0);
2478 }
2479
2480 rcu_read_lock();
2481
2482 /* Ease our life a bit */
2483 ht = consumer_data.stream_list_ht;
2484
2485 cds_lfht_for_each_entry_duplicate(ht->ht,
2486 ht->hash_fct((void *)((unsigned long) id), 0x42UL),
2487 ht->match_fct, (void *)((unsigned long) id),
2488 &iter.iter, stream, node_session_id.node) {
2489 /* Check the stream for data. */
2490 ret = data_available(stream);
2491 if (ret == 0) {
2492 goto data_not_available;
2493 }
2494
2495 if (stream->net_seq_idx != -1) {
2496 relayd = consumer_find_relayd(stream->net_seq_idx);
2497 assert(relayd);
2498
2499 pthread_mutex_lock(&stream->lock);
2500 pthread_mutex_lock(&relayd->ctrl_sock_mutex);
2501 if (stream->metadata_flag) {
2502 ret = relayd_quiescent_control(&relayd->control_sock);
2503 } else {
2504 ret = relayd_data_available(&relayd->control_sock,
2505 stream->relayd_stream_id, stream->next_net_seq_num);
2506 }
2507 pthread_mutex_unlock(&relayd->ctrl_sock_mutex);
2508 pthread_mutex_unlock(&stream->lock);
2509 if (ret == 0) {
2510 goto data_not_available;
2511 }
2512 }
2513 }
2514
2515 /*
2516 * Finding _no_ node in the hash table means that the stream(s) have been
2517 * removed thus data is guaranteed to be available for analysis from the
2518 * trace files. This is *only* true for local consumer and not network
2519 * streaming.
2520 */
2521
2522 /* Data is available to be read by a viewer. */
2523 pthread_mutex_unlock(&consumer_data.lock);
2524 rcu_read_unlock();
2525 return 1;
2526
2527 data_not_available:
2528 /* Data is still being extracted from buffers. */
2529 pthread_mutex_unlock(&consumer_data.lock);
2530 rcu_read_unlock();
2531 return 0;
2532 }
This page took 0.153037 seconds and 5 git commands to generate.