Fix: handle new streams in live mode in relayd
[lttng-tools.git] / src / bin / lttng-relayd / live.c
1 /*
2 * Copyright (C) 2013 - Julien Desfossez <jdesfossez@efficios.com>
3 * David Goulet <dgoulet@efficios.com>
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License, version 2 only,
7 * as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along
15 * with this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
17 */
18
19 #define _GNU_SOURCE
20 #include <getopt.h>
21 #include <grp.h>
22 #include <limits.h>
23 #include <pthread.h>
24 #include <signal.h>
25 #include <stdio.h>
26 #include <stdlib.h>
27 #include <string.h>
28 #include <sys/mman.h>
29 #include <sys/mount.h>
30 #include <sys/resource.h>
31 #include <sys/socket.h>
32 #include <sys/stat.h>
33 #include <sys/types.h>
34 #include <sys/wait.h>
35 #include <inttypes.h>
36 #include <urcu/futex.h>
37 #include <urcu/uatomic.h>
38 #include <unistd.h>
39 #include <fcntl.h>
40 #include <config.h>
41
42 #include <lttng/lttng.h>
43 #include <common/common.h>
44 #include <common/compat/poll.h>
45 #include <common/compat/socket.h>
46 #include <common/defaults.h>
47 #include <common/futex.h>
48 #include <common/sessiond-comm/sessiond-comm.h>
49 #include <common/sessiond-comm/inet.h>
50 #include <common/sessiond-comm/relayd.h>
51 #include <common/uri.h>
52 #include <common/utils.h>
53
54 #include "cmd.h"
55 #include "live.h"
56 #include "lttng-relayd.h"
57 #include "lttng-viewer.h"
58 #include "utils.h"
59 #include "health-relayd.h"
60 #include "testpoint.h"
61
62 static struct lttng_uri *live_uri;
63
64 /*
65 * This pipe is used to inform the worker thread that a command is queued and
66 * ready to be processed.
67 */
68 static int live_relay_cmd_pipe[2] = { -1, -1 };
69
70 /* Shared between threads */
71 static int live_dispatch_thread_exit;
72
73 static pthread_t live_listener_thread;
74 static pthread_t live_dispatcher_thread;
75 static pthread_t live_worker_thread;
76
77 /*
78 * Relay command queue.
79 *
80 * The live_thread_listener and live_thread_dispatcher communicate with this
81 * queue.
82 */
83 static struct relay_cmd_queue viewer_cmd_queue;
84
85 static uint64_t last_relay_viewer_session_id;
86
87 /*
88 * Cleanup the daemon
89 */
90 static
91 void cleanup(void)
92 {
93 DBG("Cleaning up");
94
95 free(live_uri);
96 }
97
98 /*
99 * Write to writable pipe used to notify a thread.
100 */
101 static
102 int notify_thread_pipe(int wpipe)
103 {
104 ssize_t ret;
105
106 ret = lttng_write(wpipe, "!", 1);
107 if (ret < 1) {
108 PERROR("write poll pipe");
109 }
110
111 return (int) ret;
112 }
113
114 /*
115 * Stop all threads by closing the thread quit pipe.
116 */
117 static
118 void stop_threads(void)
119 {
120 int ret;
121
122 /* Stopping all threads */
123 DBG("Terminating all live threads");
124 ret = notify_thread_pipe(thread_quit_pipe[1]);
125 if (ret < 0) {
126 ERR("write error on thread quit pipe");
127 }
128
129 /* Dispatch thread */
130 CMM_STORE_SHARED(live_dispatch_thread_exit, 1);
131 futex_nto1_wake(&viewer_cmd_queue.futex);
132 }
133
134 /*
135 * Create a poll set with O_CLOEXEC and add the thread quit pipe to the set.
136 */
137 static
138 int create_thread_poll_set(struct lttng_poll_event *events, int size)
139 {
140 int ret;
141
142 if (events == NULL || size == 0) {
143 ret = -1;
144 goto error;
145 }
146
147 ret = lttng_poll_create(events, size, LTTNG_CLOEXEC);
148 if (ret < 0) {
149 goto error;
150 }
151
152 /* Add quit pipe */
153 ret = lttng_poll_add(events, thread_quit_pipe[0], LPOLLIN | LPOLLERR);
154 if (ret < 0) {
155 goto error;
156 }
157
158 return 0;
159
160 error:
161 return ret;
162 }
163
164 /*
165 * Check if the thread quit pipe was triggered.
166 *
167 * Return 1 if it was triggered else 0;
168 */
169 static
170 int check_thread_quit_pipe(int fd, uint32_t events)
171 {
172 if (fd == thread_quit_pipe[0] && (events & LPOLLIN)) {
173 return 1;
174 }
175
176 return 0;
177 }
178
179 /*
180 * Create and init socket from uri.
181 */
182 static
183 struct lttcomm_sock *init_socket(struct lttng_uri *uri)
184 {
185 int ret;
186 struct lttcomm_sock *sock = NULL;
187
188 sock = lttcomm_alloc_sock_from_uri(uri);
189 if (sock == NULL) {
190 ERR("Allocating socket");
191 goto error;
192 }
193
194 ret = lttcomm_create_sock(sock);
195 if (ret < 0) {
196 goto error;
197 }
198 DBG("Listening on sock %d for live", sock->fd);
199
200 ret = sock->ops->bind(sock);
201 if (ret < 0) {
202 goto error;
203 }
204
205 ret = sock->ops->listen(sock, -1);
206 if (ret < 0) {
207 goto error;
208
209 }
210
211 return sock;
212
213 error:
214 if (sock) {
215 lttcomm_destroy_sock(sock);
216 }
217 return NULL;
218 }
219
220 /*
221 * This thread manages the listening for new connections on the network
222 */
223 static
224 void *thread_listener(void *data)
225 {
226 int i, ret, pollfd, err = -1;
227 int val = 1;
228 uint32_t revents, nb_fd;
229 struct lttng_poll_event events;
230 struct lttcomm_sock *live_control_sock;
231
232 DBG("[thread] Relay live listener started");
233
234 health_register(health_relayd, HEALTH_RELAYD_TYPE_LIVE_LISTENER);
235
236 health_code_update();
237
238 live_control_sock = init_socket(live_uri);
239 if (!live_control_sock) {
240 goto error_sock_control;
241 }
242
243 /* Pass 2 as size here for the thread quit pipe and control sockets. */
244 ret = create_thread_poll_set(&events, 2);
245 if (ret < 0) {
246 goto error_create_poll;
247 }
248
249 /* Add the control socket */
250 ret = lttng_poll_add(&events, live_control_sock->fd, LPOLLIN | LPOLLRDHUP);
251 if (ret < 0) {
252 goto error_poll_add;
253 }
254
255 lttng_relay_notify_ready();
256
257 if (testpoint(relayd_thread_live_listener)) {
258 goto error_testpoint;
259 }
260
261 while (1) {
262 health_code_update();
263
264 DBG("Listener accepting live viewers connections");
265
266 restart:
267 health_poll_entry();
268 ret = lttng_poll_wait(&events, -1);
269 health_poll_exit();
270 if (ret < 0) {
271 /*
272 * Restart interrupted system call.
273 */
274 if (errno == EINTR) {
275 goto restart;
276 }
277 goto error;
278 }
279 nb_fd = ret;
280
281 DBG("Relay new viewer connection received");
282 for (i = 0; i < nb_fd; i++) {
283 health_code_update();
284
285 /* Fetch once the poll data */
286 revents = LTTNG_POLL_GETEV(&events, i);
287 pollfd = LTTNG_POLL_GETFD(&events, i);
288
289 /* Thread quit pipe has been closed. Killing thread. */
290 ret = check_thread_quit_pipe(pollfd, revents);
291 if (ret) {
292 err = 0;
293 goto exit;
294 }
295
296 if (revents & (LPOLLERR | LPOLLHUP | LPOLLRDHUP)) {
297 ERR("socket poll error");
298 goto error;
299 } else if (revents & LPOLLIN) {
300 /*
301 * Get allocated in this thread, enqueued to a global queue,
302 * dequeued and freed in the worker thread.
303 */
304 struct relay_command *relay_cmd;
305 struct lttcomm_sock *newsock;
306
307 relay_cmd = zmalloc(sizeof(*relay_cmd));
308 if (!relay_cmd) {
309 PERROR("relay command zmalloc");
310 goto error;
311 }
312
313 assert(pollfd == live_control_sock->fd);
314 newsock = live_control_sock->ops->accept(live_control_sock);
315 if (!newsock) {
316 PERROR("accepting control sock");
317 free(relay_cmd);
318 goto error;
319 }
320 DBG("Relay viewer connection accepted socket %d", newsock->fd);
321 ret = setsockopt(newsock->fd, SOL_SOCKET, SO_REUSEADDR, &val,
322 sizeof(int));
323 if (ret < 0) {
324 PERROR("setsockopt inet");
325 lttcomm_destroy_sock(newsock);
326 free(relay_cmd);
327 goto error;
328 }
329 relay_cmd->sock = newsock;
330
331 /*
332 * Lock free enqueue the request.
333 */
334 cds_wfq_enqueue(&viewer_cmd_queue.queue, &relay_cmd->node);
335
336 /*
337 * Wake the dispatch queue futex. Implicit memory
338 * barrier with the exchange in cds_wfq_enqueue.
339 */
340 futex_nto1_wake(&viewer_cmd_queue.futex);
341 }
342 }
343 }
344
345 exit:
346 error:
347 error_poll_add:
348 error_testpoint:
349 lttng_poll_clean(&events);
350 error_create_poll:
351 if (live_control_sock->fd >= 0) {
352 ret = live_control_sock->ops->close(live_control_sock);
353 if (ret) {
354 PERROR("close");
355 }
356 }
357 lttcomm_destroy_sock(live_control_sock);
358 error_sock_control:
359 if (err) {
360 health_error();
361 DBG("Live viewer listener thread exited with error");
362 }
363 health_unregister(health_relayd);
364 DBG("Live viewer listener thread cleanup complete");
365 stop_threads();
366 return NULL;
367 }
368
369 /*
370 * This thread manages the dispatching of the requests to worker threads
371 */
372 static
373 void *thread_dispatcher(void *data)
374 {
375 int err = -1;
376 ssize_t ret;
377 struct cds_wfq_node *node;
378 struct relay_command *relay_cmd = NULL;
379
380 DBG("[thread] Live viewer relay dispatcher started");
381
382 health_register(health_relayd, HEALTH_RELAYD_TYPE_LIVE_DISPATCHER);
383
384 if (testpoint(relayd_thread_live_dispatcher)) {
385 goto error_testpoint;
386 }
387
388 health_code_update();
389
390 while (!CMM_LOAD_SHARED(live_dispatch_thread_exit)) {
391 health_code_update();
392
393 /* Atomically prepare the queue futex */
394 futex_nto1_prepare(&viewer_cmd_queue.futex);
395
396 do {
397 health_code_update();
398
399 /* Dequeue commands */
400 node = cds_wfq_dequeue_blocking(&viewer_cmd_queue.queue);
401 if (node == NULL) {
402 DBG("Woken up but nothing in the live-viewer "
403 "relay command queue");
404 /* Continue thread execution */
405 break;
406 }
407
408 relay_cmd = caa_container_of(node, struct relay_command, node);
409 DBG("Dispatching viewer request waiting on sock %d",
410 relay_cmd->sock->fd);
411
412 /*
413 * Inform worker thread of the new request. This call is blocking
414 * so we can be assured that the data will be read at some point in
415 * time or wait to the end of the world :)
416 */
417 ret = lttng_write(live_relay_cmd_pipe[1], relay_cmd,
418 sizeof(*relay_cmd));
419 free(relay_cmd);
420 if (ret < sizeof(struct relay_command)) {
421 PERROR("write cmd pipe");
422 goto error;
423 }
424 } while (node != NULL);
425
426 /* Futex wait on queue. Blocking call on futex() */
427 health_poll_entry();
428 futex_nto1_wait(&viewer_cmd_queue.futex);
429 health_poll_exit();
430 }
431
432 /* Normal exit, no error */
433 err = 0;
434
435 error:
436 error_testpoint:
437 if (err) {
438 health_error();
439 ERR("Health error occurred in %s", __func__);
440 }
441 health_unregister(health_relayd);
442 DBG("Live viewer dispatch thread dying");
443 stop_threads();
444 return NULL;
445 }
446
447 /*
448 * Establish connection with the viewer and check the versions.
449 *
450 * Return 0 on success or else negative value.
451 */
452 static
453 int viewer_connect(struct relay_command *cmd)
454 {
455 int ret;
456 struct lttng_viewer_connect reply, msg;
457
458 assert(cmd);
459
460 cmd->version_check_done = 1;
461
462 health_code_update();
463
464 /* Get version from the other side. */
465 ret = cmd->sock->ops->recvmsg(cmd->sock, &msg, sizeof(msg), 0);
466 if (ret < 0 || ret != sizeof(msg)) {
467 if (ret == 0) {
468 /* Orderly shutdown. Not necessary to print an error. */
469 DBG("Socket %d did an orderly shutdown", cmd->sock->fd);
470 } else {
471 ERR("Relay failed to receive the version values.");
472 }
473 ret = -1;
474 goto end;
475 }
476
477 health_code_update();
478
479 reply.major = RELAYD_VERSION_COMM_MAJOR;
480 reply.minor = RELAYD_VERSION_COMM_MINOR;
481
482 /* Major versions must be the same */
483 if (reply.major != be32toh(msg.major)) {
484 DBG("Incompatible major versions (%u vs %u)", reply.major,
485 be32toh(msg.major));
486 ret = -1;
487 goto end;
488 }
489
490 cmd->major = reply.major;
491 /* We adapt to the lowest compatible version */
492 if (reply.minor <= be32toh(msg.minor)) {
493 cmd->minor = reply.minor;
494 } else {
495 cmd->minor = be32toh(msg.minor);
496 }
497
498 if (be32toh(msg.type) == VIEWER_CLIENT_COMMAND) {
499 cmd->type = RELAY_VIEWER_COMMAND;
500 } else if (be32toh(msg.type) == VIEWER_CLIENT_NOTIFICATION) {
501 cmd->type = RELAY_VIEWER_NOTIFICATION;
502 } else {
503 ERR("Unknown connection type : %u", be32toh(msg.type));
504 ret = -1;
505 goto end;
506 }
507
508 reply.major = htobe32(reply.major);
509 reply.minor = htobe32(reply.minor);
510 if (cmd->type == RELAY_VIEWER_COMMAND) {
511 reply.viewer_session_id = htobe64(++last_relay_viewer_session_id);
512 }
513
514 health_code_update();
515
516 ret = cmd->sock->ops->sendmsg(cmd->sock, &reply,
517 sizeof(struct lttng_viewer_connect), 0);
518 if (ret < 0) {
519 ERR("Relay sending version");
520 }
521
522 health_code_update();
523
524 DBG("Version check done using protocol %u.%u", cmd->major, cmd->minor);
525 ret = 0;
526
527 end:
528 return ret;
529 }
530
531 /*
532 * Send the viewer the list of current sessions.
533 *
534 * Return 0 on success or else a negative value.
535 */
536 static
537 int viewer_list_sessions(struct relay_command *cmd,
538 struct lttng_ht *sessions_ht)
539 {
540 int ret;
541 struct lttng_viewer_list_sessions session_list;
542 unsigned long count;
543 long approx_before, approx_after;
544 struct lttng_ht_node_ulong *node;
545 struct lttng_ht_iter iter;
546 struct lttng_viewer_session send_session;
547 struct relay_session *session;
548
549 DBG("List sessions received");
550
551 if (cmd->version_check_done == 0) {
552 ERR("Trying to list sessions before version check");
553 ret = -1;
554 goto end_no_session;
555 }
556
557 rcu_read_lock();
558 cds_lfht_count_nodes(sessions_ht->ht, &approx_before, &count, &approx_after);
559 session_list.sessions_count = htobe32(count);
560
561 health_code_update();
562
563 ret = cmd->sock->ops->sendmsg(cmd->sock, &session_list,
564 sizeof(session_list), 0);
565 if (ret < 0) {
566 ERR("Relay sending sessions list");
567 goto end_unlock;
568 }
569
570 health_code_update();
571
572 cds_lfht_for_each_entry(sessions_ht->ht, &iter.iter, node, node) {
573 health_code_update();
574
575 node = lttng_ht_iter_get_node_ulong(&iter);
576 if (!node) {
577 goto end_unlock;
578 }
579 session = caa_container_of(node, struct relay_session, session_n);
580
581 strncpy(send_session.session_name, session->session_name,
582 sizeof(send_session.session_name));
583 strncpy(send_session.hostname, session->hostname,
584 sizeof(send_session.hostname));
585 send_session.id = htobe64(session->id);
586 send_session.live_timer = htobe32(session->live_timer);
587 send_session.clients = htobe32(session->viewer_attached);
588 send_session.streams = htobe32(session->stream_count);
589
590 health_code_update();
591
592 ret = cmd->sock->ops->sendmsg(cmd->sock, &send_session,
593 sizeof(send_session), 0);
594 if (ret < 0) {
595 ERR("Relay sending session info");
596 goto end_unlock;
597 }
598 }
599 health_code_update();
600
601 rcu_read_unlock();
602 ret = 0;
603 goto end;
604
605 end_unlock:
606 rcu_read_unlock();
607
608 end:
609 end_no_session:
610 return ret;
611 }
612
613 /*
614 * Open index file using a given viewer stream.
615 *
616 * Return 0 on success or else a negative value.
617 */
618 static int open_index(struct relay_viewer_stream *stream)
619 {
620 int ret;
621 char fullpath[PATH_MAX];
622 struct ctf_packet_index_file_hdr hdr;
623
624 if (stream->tracefile_count > 0) {
625 ret = snprintf(fullpath, sizeof(fullpath), "%s/" DEFAULT_INDEX_DIR "/%s_%"
626 PRIu64 DEFAULT_INDEX_FILE_SUFFIX, stream->path_name,
627 stream->channel_name, stream->tracefile_count_current);
628 } else {
629 ret = snprintf(fullpath, sizeof(fullpath), "%s/" DEFAULT_INDEX_DIR "/%s"
630 DEFAULT_INDEX_FILE_SUFFIX, stream->path_name,
631 stream->channel_name);
632 }
633 if (ret < 0) {
634 PERROR("snprintf index path");
635 goto error;
636 }
637
638 DBG("Opening index file %s in read only", fullpath);
639 ret = open(fullpath, O_RDONLY);
640 if (ret < 0) {
641 if (errno == ENOENT) {
642 ret = -ENOENT;
643 goto error;
644 } else {
645 PERROR("opening index in read-only");
646 }
647 goto error;
648 }
649 stream->index_read_fd = ret;
650 DBG("Opening index file %s in read only, (fd: %d)", fullpath, ret);
651
652 ret = lttng_read(stream->index_read_fd, &hdr, sizeof(hdr));
653 if (ret < sizeof(hdr)) {
654 PERROR("Reading index header");
655 goto error;
656 }
657 if (be32toh(hdr.magic) != CTF_INDEX_MAGIC) {
658 ERR("Invalid header magic");
659 ret = -1;
660 goto error;
661 }
662 if (be32toh(hdr.index_major) != CTF_INDEX_MAJOR ||
663 be32toh(hdr.index_minor) != CTF_INDEX_MINOR) {
664 ERR("Invalid header version");
665 ret = -1;
666 goto error;
667 }
668 ret = 0;
669
670 error:
671 return ret;
672 }
673
674 /*
675 * Allocate and init a new viewer_stream.
676 *
677 * Copies the values from the stream passed in parameter and insert the new
678 * stream in the viewer_streams_ht.
679 *
680 * MUST be called with rcu_read_lock held.
681 *
682 * Returns 0 on success or a negative value on error.
683 */
684 static
685 int init_viewer_stream(struct relay_stream *stream, int seek_last)
686 {
687 int ret;
688 struct relay_viewer_stream *viewer_stream;
689
690 assert(stream);
691
692 viewer_stream = zmalloc(sizeof(*viewer_stream));
693 if (!viewer_stream) {
694 PERROR("relay viewer stream zmalloc");
695 ret = -1;
696 goto error;
697 }
698 viewer_stream->session_id = stream->session->id;
699 viewer_stream->stream_handle = stream->stream_handle;
700 viewer_stream->path_name = strndup(stream->path_name,
701 LTTNG_VIEWER_PATH_MAX);
702 viewer_stream->channel_name = strndup(stream->channel_name,
703 LTTNG_VIEWER_NAME_MAX);
704 viewer_stream->tracefile_count = stream->tracefile_count;
705 viewer_stream->metadata_flag = stream->metadata_flag;
706 viewer_stream->tracefile_count_last = -1ULL;
707 if (seek_last) {
708 viewer_stream->tracefile_count_current =
709 stream->tracefile_count_current;
710 } else {
711 viewer_stream->tracefile_count_current =
712 stream->oldest_tracefile_id;
713 }
714
715 viewer_stream->ctf_trace = stream->ctf_trace;
716 if (viewer_stream->metadata_flag) {
717 viewer_stream->ctf_trace->viewer_metadata_stream =
718 viewer_stream;
719 }
720 uatomic_inc(&viewer_stream->ctf_trace->refcount);
721
722 lttng_ht_node_init_u64(&viewer_stream->stream_n, stream->stream_handle);
723 lttng_ht_add_unique_u64(viewer_streams_ht, &viewer_stream->stream_n);
724
725 viewer_stream->index_read_fd = -1;
726 viewer_stream->read_fd = -1;
727
728 /*
729 * This is to avoid a race between the initialization of this object and
730 * the close of the given stream. If the stream is unable to find this
731 * viewer stream when closing, this copy will at least take the latest
732 * value.
733 * We also need that for the seek_last.
734 */
735 viewer_stream->total_index_received = stream->total_index_received;
736
737 /*
738 * If we never received an index for the current stream, delay
739 * the opening of the index, otherwise open it right now.
740 */
741 if (viewer_stream->tracefile_count_current ==
742 stream->tracefile_count_current &&
743 viewer_stream->total_index_received == 0) {
744 viewer_stream->index_read_fd = -1;
745 } else {
746 ret = open_index(viewer_stream);
747 if (ret < 0) {
748 goto error;
749 }
750 }
751
752 if (seek_last && viewer_stream->index_read_fd > 0) {
753 ret = lseek(viewer_stream->index_read_fd,
754 viewer_stream->total_index_received *
755 sizeof(struct ctf_packet_index),
756 SEEK_CUR);
757 if (ret < 0) {
758 goto error;
759 }
760 viewer_stream->last_sent_index =
761 viewer_stream->total_index_received;
762 }
763
764 ret = 0;
765
766 error:
767 return ret;
768 }
769
770 /*
771 * Rotate a stream to the next tracefile.
772 *
773 * Returns 0 on success, 1 on EOF, a negative value on error.
774 */
775 static
776 int rotate_viewer_stream(struct relay_viewer_stream *viewer_stream,
777 struct relay_stream *stream)
778 {
779 int ret;
780 uint64_t tracefile_id;
781
782 assert(viewer_stream);
783
784 tracefile_id = (viewer_stream->tracefile_count_current + 1) %
785 viewer_stream->tracefile_count;
786 /*
787 * Detect the last tracefile to open.
788 */
789 if (viewer_stream->tracefile_count_last != -1ULL &&
790 viewer_stream->tracefile_count_last ==
791 viewer_stream->tracefile_count_current) {
792 ret = 1;
793 goto end;
794 }
795
796 if (stream) {
797 pthread_mutex_lock(&stream->viewer_stream_rotation_lock);
798 }
799 /*
800 * The writer and the reader are not working in the same
801 * tracefile, we can read up to EOF, we don't care about the
802 * total_index_received.
803 */
804 if (!stream || (stream->tracefile_count_current != tracefile_id)) {
805 viewer_stream->close_write_flag = 1;
806 } else {
807 /*
808 * We are opening a file that is still open in write, make
809 * sure we limit our reading to the number of indexes
810 * received.
811 */
812 viewer_stream->close_write_flag = 0;
813 if (stream) {
814 viewer_stream->total_index_received =
815 stream->total_index_received;
816 }
817 }
818 viewer_stream->tracefile_count_current = tracefile_id;
819
820 ret = close(viewer_stream->index_read_fd);
821 if (ret < 0) {
822 PERROR("close index file %d",
823 viewer_stream->index_read_fd);
824 }
825 viewer_stream->index_read_fd = -1;
826 ret = close(viewer_stream->read_fd);
827 if (ret < 0) {
828 PERROR("close tracefile %d",
829 viewer_stream->read_fd);
830 }
831 viewer_stream->read_fd = -1;
832
833 pthread_mutex_lock(&viewer_stream->overwrite_lock);
834 viewer_stream->abort_flag = 0;
835 pthread_mutex_unlock(&viewer_stream->overwrite_lock);
836
837 viewer_stream->index_read_fd = -1;
838 viewer_stream->read_fd = -1;
839
840 if (stream) {
841 pthread_mutex_unlock(&stream->viewer_stream_rotation_lock);
842 }
843 ret = open_index(viewer_stream);
844 if (ret < 0) {
845 goto error;
846 }
847
848 ret = 0;
849
850 end:
851 error:
852 return ret;
853 }
854
855 /*
856 * Send the viewer the list of current sessions.
857 */
858 static
859 int viewer_get_new_streams(struct relay_command *cmd,
860 struct lttng_ht *sessions_ht)
861 {
862 int ret, send_streams = 0;
863 uint32_t nb_streams = 0;
864 struct lttng_viewer_new_streams_request request;
865 struct lttng_viewer_new_streams_response response;
866 struct lttng_viewer_stream send_stream;
867 struct relay_stream *stream;
868 struct relay_viewer_stream *viewer_stream;
869 struct lttng_ht_node_ulong *node;
870 struct lttng_ht_iter iter;
871 struct relay_session *session;
872
873 assert(cmd);
874 assert(sessions_ht);
875
876 DBG("Get new streams received");
877
878 if (cmd->version_check_done == 0) {
879 ERR("Trying to get streams before version check");
880 ret = -1;
881 goto end_no_session;
882 }
883
884 health_code_update();
885
886 ret = cmd->sock->ops->recvmsg(cmd->sock, &request, sizeof(request), 0);
887 if (ret < 0 || ret != sizeof(request)) {
888 if (ret == 0) {
889 /* Orderly shutdown. Not necessary to print an error. */
890 DBG("Socket %d did an orderly shutdown", cmd->sock->fd);
891 } else {
892 ERR("Relay failed to receive the command parameters.");
893 }
894 ret = -1;
895 goto error;
896 }
897
898 health_code_update();
899
900 rcu_read_lock();
901 lttng_ht_lookup(sessions_ht,
902 (void *)((unsigned long) be64toh(request.session_id)), &iter);
903 node = lttng_ht_iter_get_node_ulong(&iter);
904 if (node == NULL) {
905 DBG("Relay session %" PRIu64 " not found",
906 be64toh(request.session_id));
907 response.status = htobe32(VIEWER_NEW_STREAMS_ERR);
908 goto send_reply;
909 }
910
911 session = caa_container_of(node, struct relay_session, session_n);
912 if (cmd->session_id == session->id) {
913 /* We confirmed the viewer is asking for the same session. */
914 send_streams = 1;
915 response.status = htobe32(VIEWER_NEW_STREAMS_OK);
916 } else {
917 send_streams = 0;
918 response.status = htobe32(VIEWER_NEW_STREAMS_ERR);
919 goto send_reply;
920 }
921
922 /*
923 * Fill the viewer_streams_ht to count the number of streams ready to be
924 * sent and avoid concurrency issues on the relay_streams_ht and don't rely
925 * on a total session stream count.
926 */
927 pthread_mutex_lock(&session->viewer_ready_lock);
928 cds_lfht_for_each_entry(relay_streams_ht->ht, &iter.iter, stream,
929 stream_n.node) {
930 struct relay_viewer_stream *vstream;
931
932 health_code_update();
933
934 /*
935 * Don't send stream if no ctf_trace, wrong session or if the stream is
936 * not ready for the viewer.
937 */
938 if (stream->session != cmd->session ||
939 !stream->ctf_trace || !stream->viewer_ready) {
940 continue;
941 }
942
943 vstream = live_find_viewer_stream_by_id(stream->stream_handle);
944 if (!vstream) {
945 ret = init_viewer_stream(stream, 0);
946 if (ret < 0) {
947 pthread_mutex_unlock(&session->viewer_ready_lock);
948 goto end_unlock;
949 }
950 nb_streams++;
951 } else if (!vstream->sent_flag) {
952 nb_streams++;
953 }
954 }
955 pthread_mutex_unlock(&session->viewer_ready_lock);
956
957 response.streams_count = htobe32(nb_streams);
958
959 send_reply:
960 health_code_update();
961 ret = cmd->sock->ops->sendmsg(cmd->sock, &response, sizeof(response), 0);
962 if (ret < 0) {
963 ERR("Relay sending viewer attach response");
964 goto end_unlock;
965 }
966 health_code_update();
967
968 /*
969 * Unknown or empty session, just return gracefully, the viewer knows what
970 * is happening.
971 */
972 if (!send_streams || !nb_streams) {
973 ret = 0;
974 goto end_unlock;
975 }
976
977 /* We should only be there if we have a session to attach to. */
978 cds_lfht_for_each_entry(viewer_streams_ht->ht, &iter.iter, viewer_stream,
979 stream_n.node) {
980 health_code_update();
981
982 /* Don't send back if session does not match or already sent. */
983 if (viewer_stream->session_id != cmd->session->id ||
984 viewer_stream->sent_flag) {
985 continue;
986 }
987
988 send_stream.id = htobe64(viewer_stream->stream_handle);
989 send_stream.ctf_trace_id = htobe64(viewer_stream->ctf_trace->id);
990 send_stream.metadata_flag = htobe32(viewer_stream->metadata_flag);
991 strncpy(send_stream.path_name, viewer_stream->path_name,
992 sizeof(send_stream.path_name));
993 strncpy(send_stream.channel_name, viewer_stream->channel_name,
994 sizeof(send_stream.channel_name));
995
996 ret = cmd->sock->ops->sendmsg(cmd->sock, &send_stream,
997 sizeof(send_stream), 0);
998 if (ret < 0) {
999 ERR("Relay sending stream %" PRIu64, viewer_stream->stream_handle);
1000 goto end_unlock;
1001 }
1002 DBG("Sent stream %" PRIu64 " to viewer", viewer_stream->stream_handle);
1003 viewer_stream->sent_flag = 1;
1004 }
1005
1006 ret = 0;
1007
1008 end_unlock:
1009 rcu_read_unlock();
1010 end_no_session:
1011 error:
1012 return ret;
1013 }
1014
1015 /*
1016 * Send the viewer the list of current sessions.
1017 */
1018 static
1019 int viewer_attach_session(struct relay_command *cmd,
1020 struct lttng_ht *sessions_ht)
1021 {
1022 int ret, send_streams = 0;
1023 uint32_t nb_streams = 0;
1024 struct lttng_viewer_attach_session_request request;
1025 struct lttng_viewer_attach_session_response response;
1026 struct lttng_viewer_stream send_stream;
1027 struct relay_stream *stream;
1028 struct relay_viewer_stream *viewer_stream;
1029 struct lttng_ht_node_ulong *node;
1030 struct lttng_ht_node_u64 *node64;
1031 struct lttng_ht_iter iter;
1032 struct relay_session *session;
1033 int seek_last = 0;
1034
1035 assert(cmd);
1036 assert(sessions_ht);
1037
1038 DBG("Attach session received");
1039
1040 if (cmd->version_check_done == 0) {
1041 ERR("Trying to attach session before version check");
1042 ret = -1;
1043 goto end_no_session;
1044 }
1045
1046 health_code_update();
1047
1048 ret = cmd->sock->ops->recvmsg(cmd->sock, &request, sizeof(request), 0);
1049 if (ret < 0 || ret != sizeof(request)) {
1050 if (ret == 0) {
1051 /* Orderly shutdown. Not necessary to print an error. */
1052 DBG("Socket %d did an orderly shutdown", cmd->sock->fd);
1053 } else {
1054 ERR("Relay failed to receive the attach parameters.");
1055 }
1056 ret = -1;
1057 goto error;
1058 }
1059
1060 health_code_update();
1061
1062 rcu_read_lock();
1063 lttng_ht_lookup(sessions_ht,
1064 (void *)((unsigned long) be64toh(request.session_id)), &iter);
1065 node = lttng_ht_iter_get_node_ulong(&iter);
1066 if (node == NULL) {
1067 DBG("Relay session %" PRIu64 " not found",
1068 be64toh(request.session_id));
1069 response.status = htobe32(VIEWER_ATTACH_UNK);
1070 goto send_reply;
1071 }
1072
1073 session = caa_container_of(node, struct relay_session, session_n);
1074 if (cmd->session_id == session->id) {
1075 /* Same viewer already attached, just send the stream list. */
1076 send_streams = 1;
1077 response.status = htobe32(VIEWER_ATTACH_OK);
1078 } else if (session->viewer_attached != 0) {
1079 DBG("Already a viewer attached");
1080 response.status = htobe32(VIEWER_ATTACH_ALREADY);
1081 goto send_reply;
1082 } else if (session->live_timer == 0) {
1083 DBG("Not live session");
1084 response.status = htobe32(VIEWER_ATTACH_NOT_LIVE);
1085 goto send_reply;
1086 } else {
1087 session->viewer_attached++;
1088 send_streams = 1;
1089 response.status = htobe32(VIEWER_ATTACH_OK);
1090 cmd->session_id = session->id;
1091 cmd->session = session;
1092 }
1093
1094 switch (be32toh(request.seek)) {
1095 case VIEWER_SEEK_BEGINNING:
1096 /* Default behaviour. */
1097 break;
1098 case VIEWER_SEEK_LAST:
1099 seek_last = 1;
1100 break;
1101 default:
1102 ERR("Wrong seek parameter");
1103 response.status = htobe32(VIEWER_ATTACH_SEEK_ERR);
1104 send_streams = 0;
1105 goto send_reply;
1106 }
1107
1108 if (send_streams) {
1109 /* We should only be there if we have a session to attach to. */
1110 assert(session);
1111
1112 /*
1113 * Fill the viewer_streams_ht to count the number of streams
1114 * ready to be sent and avoid concurrency issues on the
1115 * relay_streams_ht and don't rely on a total session stream count.
1116 */
1117 pthread_mutex_lock(&session->viewer_ready_lock);
1118 cds_lfht_for_each_entry(relay_streams_ht->ht, &iter.iter, node, node) {
1119 struct relay_viewer_stream *vstream;
1120
1121 health_code_update();
1122
1123 node = lttng_ht_iter_get_node_ulong(&iter);
1124 if (!node) {
1125 continue;
1126 }
1127 stream = caa_container_of(node, struct relay_stream, stream_n);
1128 if (stream->session != cmd->session) {
1129 continue;
1130 }
1131
1132 /*
1133 * Don't send streams with no ctf_trace, they are not
1134 * ready to be read.
1135 */
1136 if (!stream->ctf_trace || !stream->viewer_ready) {
1137 continue;
1138 }
1139
1140 vstream = live_find_viewer_stream_by_id(stream->stream_handle);
1141 if (!vstream) {
1142 ret = init_viewer_stream(stream, seek_last);
1143 if (ret < 0) {
1144 pthread_mutex_unlock(&session->viewer_ready_lock);
1145 goto end_unlock;
1146 }
1147 }
1148 nb_streams++;
1149 }
1150 pthread_mutex_unlock(&session->viewer_ready_lock);
1151
1152 response.streams_count = htobe32(nb_streams);
1153 }
1154
1155 send_reply:
1156 health_code_update();
1157 ret = cmd->sock->ops->sendmsg(cmd->sock, &response, sizeof(response), 0);
1158 if (ret < 0) {
1159 ERR("Relay sending viewer attach response");
1160 goto end_unlock;
1161 }
1162 health_code_update();
1163
1164 /*
1165 * Unknown or empty session, just return gracefully, the viewer knows what
1166 * is happening.
1167 */
1168 if (!send_streams || !nb_streams) {
1169 ret = 0;
1170 goto end_unlock;
1171 }
1172
1173 /* We should only be there if we have a session to attach to. */
1174 assert(session);
1175 cds_lfht_for_each_entry(viewer_streams_ht->ht, &iter.iter, node, node) {
1176 health_code_update();
1177
1178 node64 = lttng_ht_iter_get_node_u64(&iter);
1179 if (!node64) {
1180 continue;
1181 }
1182 viewer_stream = caa_container_of(node64, struct relay_viewer_stream,
1183 stream_n);
1184 if (viewer_stream->session_id != cmd->session->id) {
1185 continue;
1186 }
1187
1188 send_stream.id = htobe64(viewer_stream->stream_handle);
1189 send_stream.ctf_trace_id = htobe64(viewer_stream->ctf_trace->id);
1190 send_stream.metadata_flag = htobe32(viewer_stream->metadata_flag);
1191 strncpy(send_stream.path_name, viewer_stream->path_name,
1192 sizeof(send_stream.path_name));
1193 strncpy(send_stream.channel_name, viewer_stream->channel_name,
1194 sizeof(send_stream.channel_name));
1195
1196 ret = cmd->sock->ops->sendmsg(cmd->sock, &send_stream,
1197 sizeof(send_stream), 0);
1198 if (ret < 0) {
1199 ERR("Relay sending stream %" PRIu64, viewer_stream->stream_handle);
1200 goto end_unlock;
1201 }
1202 DBG("Sent stream %" PRIu64 " to viewer", viewer_stream->stream_handle);
1203 viewer_stream->sent_flag = 1;
1204 }
1205 ret = 0;
1206
1207 end_unlock:
1208 rcu_read_unlock();
1209 end_no_session:
1210 error:
1211 return ret;
1212 }
1213
1214 /*
1215 * Get viewer stream from stream id.
1216 *
1217 * RCU read side lock MUST be acquired.
1218 */
1219 struct relay_viewer_stream *live_find_viewer_stream_by_id(uint64_t stream_id)
1220 {
1221 struct lttng_ht_node_u64 *node;
1222 struct lttng_ht_iter iter;
1223 struct relay_viewer_stream *stream = NULL;
1224
1225 lttng_ht_lookup(viewer_streams_ht, &stream_id, &iter);
1226 node = lttng_ht_iter_get_node_u64(&iter);
1227 if (node == NULL) {
1228 DBG("Relay viewer stream %" PRIu64 " not found", stream_id);
1229 goto end;
1230 }
1231 stream = caa_container_of(node, struct relay_viewer_stream, stream_n);
1232
1233 end:
1234 return stream;
1235 }
1236
1237 static
1238 void deferred_free_viewer_stream(struct rcu_head *head)
1239 {
1240 struct relay_viewer_stream *stream =
1241 caa_container_of(head, struct relay_viewer_stream, rcu_node);
1242
1243 free(stream->path_name);
1244 free(stream->channel_name);
1245 free(stream);
1246 }
1247
1248 static
1249 void delete_viewer_stream(struct relay_viewer_stream *vstream)
1250 {
1251 int delret;
1252 struct lttng_ht_iter iter;
1253
1254 iter.iter.node = &vstream->stream_n.node;
1255 delret = lttng_ht_del(viewer_streams_ht, &iter);
1256 assert(!delret);
1257 }
1258
1259 static
1260 void destroy_viewer_stream(struct relay_viewer_stream *vstream)
1261 {
1262 unsigned long ret_ref;
1263 int ret;
1264
1265 assert(vstream);
1266 ret_ref = uatomic_add_return(&vstream->ctf_trace->refcount, -1);
1267 assert(ret_ref >= 0);
1268
1269 if (vstream->read_fd >= 0) {
1270 ret = close(vstream->read_fd);
1271 if (ret < 0) {
1272 PERROR("close read_fd");
1273 }
1274 }
1275 if (vstream->index_read_fd >= 0) {
1276 ret = close(vstream->index_read_fd);
1277 if (ret < 0) {
1278 PERROR("close index_read_fd");
1279 }
1280 }
1281
1282 /*
1283 * If the only stream left in the HT is the metadata stream,
1284 * we need to remove it because we won't detect a EOF for this
1285 * stream.
1286 */
1287 if (ret_ref == 1 && vstream->ctf_trace->viewer_metadata_stream) {
1288 delete_viewer_stream(vstream->ctf_trace->viewer_metadata_stream);
1289 destroy_viewer_stream(vstream->ctf_trace->viewer_metadata_stream);
1290 vstream->ctf_trace->metadata_stream = NULL;
1291 DBG("Freeing ctf_trace %" PRIu64, vstream->ctf_trace->id);
1292 /*
1293 * The streaming-side is already closed and we can't receive a new
1294 * stream concurrently at this point (since the session is being
1295 * destroyed), so when we detect the refcount equals 0, we are the
1296 * only owners of the ctf_trace and we can free it ourself.
1297 */
1298 free(vstream->ctf_trace);
1299 }
1300
1301 call_rcu(&vstream->rcu_node, deferred_free_viewer_stream);
1302 }
1303
1304 /*
1305 * Atomically check if new streams got added in the session since the last
1306 * check and reset the flag to 0.
1307 *
1308 * Returns 1 if new streams got added, 0 if nothing changed, a negative value
1309 * on error.
1310 */
1311 static
1312 int check_new_streams(uint64_t session_id, struct lttng_ht *sessions_ht)
1313 {
1314 struct lttng_ht_node_ulong *node;
1315 struct lttng_ht_iter iter;
1316 struct relay_session *session;
1317 unsigned long current_val;
1318 int ret;
1319
1320 lttng_ht_lookup(sessions_ht,
1321 (void *)((unsigned long) session_id), &iter);
1322 node = lttng_ht_iter_get_node_ulong(&iter);
1323 if (node == NULL) {
1324 DBG("Relay session %" PRIu64 " not found", session_id);
1325 ret = -1;
1326 goto error;
1327 }
1328
1329 session = caa_container_of(node, struct relay_session, session_n);
1330
1331 current_val = uatomic_cmpxchg(&session->new_streams, 1, 0);
1332 ret = current_val;
1333
1334 error:
1335 return ret;
1336 }
1337
1338 /*
1339 * Send the next index for a stream.
1340 *
1341 * Return 0 on success or else a negative value.
1342 */
1343 static
1344 int viewer_get_next_index(struct relay_command *cmd,
1345 struct lttng_ht *sessions_ht)
1346 {
1347 int ret;
1348 struct lttng_viewer_get_next_index request_index;
1349 struct lttng_viewer_index viewer_index;
1350 struct ctf_packet_index packet_index;
1351 struct relay_viewer_stream *vstream;
1352 struct relay_stream *rstream;
1353
1354 assert(cmd);
1355 assert(sessions_ht);
1356
1357 DBG("Viewer get next index");
1358
1359 if (cmd->version_check_done == 0) {
1360 ERR("Trying to request index before version check");
1361 ret = -1;
1362 goto end_no_session;
1363 }
1364
1365 health_code_update();
1366 ret = cmd->sock->ops->recvmsg(cmd->sock, &request_index,
1367 sizeof(request_index), 0);
1368 if (ret < 0 || ret != sizeof(request_index)) {
1369 ret = -1;
1370 ERR("Relay didn't receive the whole packet");
1371 goto end;
1372 }
1373 health_code_update();
1374
1375 rcu_read_lock();
1376 vstream = live_find_viewer_stream_by_id(be64toh(request_index.stream_id));
1377 if (!vstream) {
1378 ret = -1;
1379 goto end_unlock;
1380 }
1381
1382 memset(&viewer_index, 0, sizeof(viewer_index));
1383
1384 /*
1385 * The viewer should not ask for index on metadata stream.
1386 */
1387 if (vstream->metadata_flag) {
1388 viewer_index.status = htobe32(VIEWER_INDEX_HUP);
1389 goto send_reply;
1390 }
1391
1392 /* First time, we open the index file */
1393 if (vstream->index_read_fd < 0) {
1394 ret = open_index(vstream);
1395 if (ret == -ENOENT) {
1396 /*
1397 * The index is created only when the first data packet arrives, it
1398 * might not be ready at the beginning of the session
1399 */
1400 viewer_index.status = htobe32(VIEWER_INDEX_RETRY);
1401 goto send_reply;
1402 } else if (ret < 0) {
1403 viewer_index.status = htobe32(VIEWER_INDEX_ERR);
1404 goto send_reply;
1405 }
1406 }
1407
1408 rstream = relay_stream_find_by_id(vstream->stream_handle);
1409 if (rstream) {
1410 if (vstream->abort_flag) {
1411 /* Rotate on abort (overwrite). */
1412 DBG("Viewer rotate because of overwrite");
1413 ret = rotate_viewer_stream(vstream, rstream);
1414 if (ret < 0) {
1415 goto end_unlock;
1416 } else if (ret == 1) {
1417 viewer_index.status = htobe32(VIEWER_INDEX_HUP);
1418 delete_viewer_stream(vstream);
1419 destroy_viewer_stream(vstream);
1420 goto send_reply;
1421 }
1422 }
1423 pthread_mutex_lock(&rstream->viewer_stream_rotation_lock);
1424 if (rstream->tracefile_count_current == vstream->tracefile_count_current) {
1425 if (rstream->beacon_ts_end != -1ULL &&
1426 vstream->last_sent_index == rstream->total_index_received) {
1427 viewer_index.status = htobe32(VIEWER_INDEX_INACTIVE);
1428 viewer_index.timestamp_end = htobe64(rstream->beacon_ts_end);
1429 pthread_mutex_unlock(&rstream->viewer_stream_rotation_lock);
1430 goto send_reply;
1431 /*
1432 * Reader and writer are working in the same tracefile, so we care
1433 * about the number of index received and sent. Otherwise, we read
1434 * up to EOF.
1435 */
1436 } else if (rstream->total_index_received <= vstream->last_sent_index
1437 && !vstream->close_write_flag) {
1438 pthread_mutex_unlock(&rstream->viewer_stream_rotation_lock);
1439 /* No new index to send, retry later. */
1440 viewer_index.status = htobe32(VIEWER_INDEX_RETRY);
1441 goto send_reply;
1442 }
1443 }
1444 pthread_mutex_unlock(&rstream->viewer_stream_rotation_lock);
1445 } else if (!rstream && vstream->close_write_flag &&
1446 vstream->total_index_received == vstream->last_sent_index) {
1447 /* Last index sent and current tracefile closed in write */
1448 viewer_index.status = htobe32(VIEWER_INDEX_HUP);
1449 delete_viewer_stream(vstream);
1450 destroy_viewer_stream(vstream);
1451 goto send_reply;
1452 } else {
1453 vstream->close_write_flag = 1;
1454 }
1455
1456 if (!vstream->ctf_trace->metadata_received ||
1457 vstream->ctf_trace->metadata_received >
1458 vstream->ctf_trace->metadata_sent) {
1459 viewer_index.flags |= LTTNG_VIEWER_FLAG_NEW_METADATA;
1460 }
1461
1462 ret = check_new_streams(vstream->session_id, sessions_ht);
1463 if (ret < 0) {
1464 goto end_unlock;
1465 } else if (ret == 1) {
1466 viewer_index.flags |= LTTNG_VIEWER_FLAG_NEW_STREAM;
1467 }
1468
1469 pthread_mutex_lock(&vstream->overwrite_lock);
1470 if (vstream->abort_flag) {
1471 /*
1472 * The file is being overwritten by the writer, we cannot
1473 * use it.
1474 */
1475 viewer_index.status = htobe32(VIEWER_INDEX_RETRY);
1476 pthread_mutex_unlock(&vstream->overwrite_lock);
1477 ret = rotate_viewer_stream(vstream, rstream);
1478 if (ret < 0) {
1479 goto end_unlock;
1480 } else if (ret == 1) {
1481 viewer_index.status = htobe32(VIEWER_INDEX_HUP);
1482 delete_viewer_stream(vstream);
1483 destroy_viewer_stream(vstream);
1484 goto send_reply;
1485 }
1486 goto send_reply;
1487 }
1488 ret = lttng_read(vstream->index_read_fd, &packet_index,
1489 sizeof(packet_index));
1490 pthread_mutex_unlock(&vstream->overwrite_lock);
1491 if (ret < sizeof(packet_index)) {
1492 /*
1493 * The tracefile is closed in write, so we read up to EOF.
1494 */
1495 if (vstream->close_write_flag == 1) {
1496 viewer_index.status = htobe32(VIEWER_INDEX_RETRY);
1497 /* Rotate on normal EOF */
1498 ret = rotate_viewer_stream(vstream, rstream);
1499 if (ret < 0) {
1500 goto end_unlock;
1501 } else if (ret == 1) {
1502 viewer_index.status = htobe32(VIEWER_INDEX_HUP);
1503 delete_viewer_stream(vstream);
1504 destroy_viewer_stream(vstream);
1505 goto send_reply;
1506 }
1507 } else {
1508 PERROR("Relay reading index file %d",
1509 vstream->index_read_fd);
1510 viewer_index.status = htobe32(VIEWER_INDEX_ERR);
1511 }
1512 goto send_reply;
1513 } else {
1514 viewer_index.status = htobe32(VIEWER_INDEX_OK);
1515 vstream->last_sent_index++;
1516 }
1517
1518 /*
1519 * Indexes are stored in big endian, no need to switch before sending.
1520 */
1521 viewer_index.offset = packet_index.offset;
1522 viewer_index.packet_size = packet_index.packet_size;
1523 viewer_index.content_size = packet_index.content_size;
1524 viewer_index.timestamp_begin = packet_index.timestamp_begin;
1525 viewer_index.timestamp_end = packet_index.timestamp_end;
1526 viewer_index.events_discarded = packet_index.events_discarded;
1527 viewer_index.stream_id = packet_index.stream_id;
1528
1529 send_reply:
1530 viewer_index.flags = htobe32(viewer_index.flags);
1531 health_code_update();
1532 ret = cmd->sock->ops->sendmsg(cmd->sock, &viewer_index,
1533 sizeof(viewer_index), 0);
1534 if (ret < 0) {
1535 ERR("Relay index to viewer");
1536 goto end_unlock;
1537 }
1538 health_code_update();
1539
1540 DBG("Index %" PRIu64 "for stream %" PRIu64 "sent",
1541 vstream->last_sent_index, vstream->stream_handle);
1542
1543 end_unlock:
1544 rcu_read_unlock();
1545
1546 end_no_session:
1547 end:
1548 return ret;
1549 }
1550
1551 /*
1552 * Send the next index for a stream
1553 *
1554 * Return 0 on success or else a negative value.
1555 */
1556 static
1557 int viewer_get_packet(struct relay_command *cmd,
1558 struct lttng_ht *sessions_ht)
1559 {
1560 int ret, send_data = 0;
1561 char *data = NULL;
1562 uint32_t len = 0;
1563 ssize_t read_len;
1564 struct lttng_viewer_get_packet get_packet_info;
1565 struct lttng_viewer_trace_packet reply;
1566 struct relay_viewer_stream *stream;
1567
1568 assert(cmd);
1569
1570 DBG2("Relay get data packet");
1571
1572 if (cmd->version_check_done == 0) {
1573 ERR("Trying to get packet before version check");
1574 ret = -1;
1575 goto end;
1576 }
1577
1578 health_code_update();
1579 ret = cmd->sock->ops->recvmsg(cmd->sock, &get_packet_info,
1580 sizeof(get_packet_info), 0);
1581 if (ret < 0 || ret != sizeof(get_packet_info)) {
1582 ret = -1;
1583 ERR("Relay didn't receive the whole packet");
1584 goto end;
1585 }
1586 health_code_update();
1587
1588 /* From this point on, the error label can be reached. */
1589 memset(&reply, 0, sizeof(reply));
1590
1591 rcu_read_lock();
1592 stream = live_find_viewer_stream_by_id(be64toh(get_packet_info.stream_id));
1593 if (!stream) {
1594 goto error;
1595 }
1596 assert(stream->ctf_trace);
1597
1598 /*
1599 * First time we read this stream, we need open the tracefile, we should
1600 * only arrive here if an index has already been sent to the viewer, so the
1601 * tracefile must exist, if it does not it is a fatal error.
1602 */
1603 if (stream->read_fd < 0) {
1604 char fullpath[PATH_MAX];
1605
1606 if (stream->tracefile_count > 0) {
1607 ret = snprintf(fullpath, PATH_MAX, "%s/%s_%" PRIu64, stream->path_name,
1608 stream->channel_name,
1609 stream->tracefile_count_current);
1610 } else {
1611 ret = snprintf(fullpath, PATH_MAX, "%s/%s", stream->path_name,
1612 stream->channel_name);
1613 }
1614 if (ret < 0) {
1615 goto error;
1616 }
1617 ret = open(fullpath, O_RDONLY);
1618 if (ret < 0) {
1619 PERROR("Relay opening trace file");
1620 goto error;
1621 }
1622 stream->read_fd = ret;
1623 }
1624
1625 if (!stream->ctf_trace->metadata_received ||
1626 stream->ctf_trace->metadata_received >
1627 stream->ctf_trace->metadata_sent) {
1628 reply.status = htobe32(VIEWER_GET_PACKET_ERR);
1629 reply.flags |= LTTNG_VIEWER_FLAG_NEW_METADATA;
1630 goto send_reply;
1631 }
1632
1633 ret = check_new_streams(stream->session_id, sessions_ht);
1634 if (ret < 0) {
1635 goto end_unlock;
1636 } else if (ret == 1) {
1637 reply.status = htobe32(VIEWER_GET_PACKET_ERR);
1638 reply.flags |= LTTNG_VIEWER_FLAG_NEW_STREAM;
1639 goto send_reply;
1640 }
1641
1642 len = be32toh(get_packet_info.len);
1643 data = zmalloc(len);
1644 if (!data) {
1645 PERROR("relay data zmalloc");
1646 goto error;
1647 }
1648
1649 ret = lseek(stream->read_fd, be64toh(get_packet_info.offset), SEEK_SET);
1650 if (ret < 0) {
1651 /*
1652 * If the read fd was closed by the streaming side, the
1653 * abort_flag will be set to 1, otherwise it is an error.
1654 */
1655 if (stream->abort_flag == 0) {
1656 PERROR("lseek");
1657 goto error;
1658 }
1659 reply.status = htobe32(VIEWER_GET_PACKET_EOF);
1660 goto send_reply;
1661 }
1662 read_len = lttng_read(stream->read_fd, data, len);
1663 if (read_len < len) {
1664 /*
1665 * If the read fd was closed by the streaming side, the
1666 * abort_flag will be set to 1, otherwise it is an error.
1667 */
1668 if (stream->abort_flag == 0) {
1669 PERROR("Relay reading trace file, fd: %d, offset: %" PRIu64,
1670 stream->read_fd,
1671 be64toh(get_packet_info.offset));
1672 goto error;
1673 } else {
1674 reply.status = htobe32(VIEWER_GET_PACKET_EOF);
1675 goto send_reply;
1676 }
1677 }
1678 reply.status = htobe32(VIEWER_GET_PACKET_OK);
1679 reply.len = htobe32(len);
1680 send_data = 1;
1681 goto send_reply;
1682
1683 error:
1684 reply.status = htobe32(VIEWER_GET_PACKET_ERR);
1685
1686 send_reply:
1687 reply.flags = htobe32(reply.flags);
1688
1689 health_code_update();
1690 ret = cmd->sock->ops->sendmsg(cmd->sock, &reply, sizeof(reply), 0);
1691 if (ret < 0) {
1692 ERR("Relay data header to viewer");
1693 goto end_unlock;
1694 }
1695 health_code_update();
1696
1697 if (send_data) {
1698 health_code_update();
1699 ret = cmd->sock->ops->sendmsg(cmd->sock, data, len, 0);
1700 if (ret < 0) {
1701 ERR("Relay send data to viewer");
1702 goto end_unlock;
1703 }
1704 health_code_update();
1705 }
1706
1707 DBG("Sent %u bytes for stream %" PRIu64, len,
1708 be64toh(get_packet_info.stream_id));
1709
1710 end_unlock:
1711 free(data);
1712 rcu_read_unlock();
1713
1714 end:
1715 return ret;
1716 }
1717
1718 /*
1719 * Send the session's metadata
1720 *
1721 * Return 0 on success else a negative value.
1722 */
1723 static
1724 int viewer_get_metadata(struct relay_command *cmd)
1725 {
1726 int ret = 0;
1727 ssize_t read_len;
1728 uint64_t len = 0;
1729 char *data = NULL;
1730 struct lttng_viewer_get_metadata request;
1731 struct lttng_viewer_metadata_packet reply;
1732 struct relay_viewer_stream *stream;
1733
1734 assert(cmd);
1735
1736 DBG("Relay get metadata");
1737
1738 if (cmd->version_check_done == 0) {
1739 ERR("Trying to get metadata before version check");
1740 ret = -1;
1741 goto end;
1742 }
1743
1744 health_code_update();
1745 ret = cmd->sock->ops->recvmsg(cmd->sock, &request,
1746 sizeof(request), 0);
1747 if (ret < 0 || ret != sizeof(request)) {
1748 ret = -1;
1749 ERR("Relay didn't receive the whole packet");
1750 goto end;
1751 }
1752 health_code_update();
1753
1754 rcu_read_lock();
1755 stream = live_find_viewer_stream_by_id(be64toh(request.stream_id));
1756 if (!stream || !stream->metadata_flag) {
1757 ERR("Invalid metadata stream");
1758 goto error;
1759 }
1760 assert(stream->ctf_trace);
1761 assert(stream->ctf_trace->metadata_sent <=
1762 stream->ctf_trace->metadata_received);
1763
1764 len = stream->ctf_trace->metadata_received -
1765 stream->ctf_trace->metadata_sent;
1766 if (len == 0) {
1767 reply.status = htobe32(VIEWER_NO_NEW_METADATA);
1768 goto send_reply;
1769 }
1770
1771 /* first time, we open the metadata file */
1772 if (stream->read_fd < 0) {
1773 char fullpath[PATH_MAX];
1774
1775 ret = snprintf(fullpath, PATH_MAX, "%s/%s", stream->path_name,
1776 stream->channel_name);
1777 if (ret < 0) {
1778 goto error;
1779 }
1780 ret = open(fullpath, O_RDONLY);
1781 if (ret < 0) {
1782 PERROR("Relay opening metadata file");
1783 goto error;
1784 }
1785 stream->read_fd = ret;
1786 }
1787
1788 reply.len = htobe64(len);
1789 data = zmalloc(len);
1790 if (!data) {
1791 PERROR("viewer metadata zmalloc");
1792 goto error;
1793 }
1794
1795 read_len = lttng_read(stream->read_fd, data, len);
1796 if (read_len < len) {
1797 PERROR("Relay reading metadata file");
1798 goto error;
1799 }
1800 stream->ctf_trace->metadata_sent += read_len;
1801 reply.status = htobe32(VIEWER_METADATA_OK);
1802 goto send_reply;
1803
1804 error:
1805 reply.status = htobe32(VIEWER_METADATA_ERR);
1806
1807 send_reply:
1808 health_code_update();
1809 ret = cmd->sock->ops->sendmsg(cmd->sock, &reply, sizeof(reply), 0);
1810 if (ret < 0) {
1811 ERR("Relay data header to viewer");
1812 goto end_unlock;
1813 }
1814 health_code_update();
1815
1816 if (len > 0) {
1817 ret = cmd->sock->ops->sendmsg(cmd->sock, data, len, 0);
1818 if (ret < 0) {
1819 ERR("Relay send data to viewer");
1820 goto end_unlock;
1821 }
1822 }
1823
1824 DBG("Sent %" PRIu64 " bytes of metadata for stream %" PRIu64, len,
1825 be64toh(request.stream_id));
1826
1827 DBG("Metadata sent");
1828
1829 end_unlock:
1830 free(data);
1831 rcu_read_unlock();
1832 end:
1833 return ret;
1834 }
1835
1836 /*
1837 * live_relay_unknown_command: send -1 if received unknown command
1838 */
1839 static
1840 void live_relay_unknown_command(struct relay_command *cmd)
1841 {
1842 struct lttcomm_relayd_generic_reply reply;
1843 int ret;
1844
1845 reply.ret_code = htobe32(LTTNG_ERR_UNK);
1846 ret = cmd->sock->ops->sendmsg(cmd->sock, &reply,
1847 sizeof(struct lttcomm_relayd_generic_reply), 0);
1848 if (ret < 0) {
1849 ERR("Relay sending unknown command");
1850 }
1851 }
1852
1853 /*
1854 * Process the commands received on the control socket
1855 */
1856 static
1857 int process_control(struct lttng_viewer_cmd *recv_hdr,
1858 struct relay_command *cmd, struct lttng_ht *sessions_ht)
1859 {
1860 int ret = 0;
1861
1862 switch (be32toh(recv_hdr->cmd)) {
1863 case VIEWER_CONNECT:
1864 ret = viewer_connect(cmd);
1865 break;
1866 case VIEWER_LIST_SESSIONS:
1867 ret = viewer_list_sessions(cmd, sessions_ht);
1868 break;
1869 case VIEWER_ATTACH_SESSION:
1870 ret = viewer_attach_session(cmd, sessions_ht);
1871 break;
1872 case VIEWER_GET_NEXT_INDEX:
1873 ret = viewer_get_next_index(cmd, sessions_ht);
1874 break;
1875 case VIEWER_GET_PACKET:
1876 ret = viewer_get_packet(cmd, sessions_ht);
1877 break;
1878 case VIEWER_GET_METADATA:
1879 ret = viewer_get_metadata(cmd);
1880 break;
1881 case VIEWER_GET_NEW_STREAMS:
1882 ret = viewer_get_new_streams(cmd, sessions_ht);
1883 break;
1884 default:
1885 ERR("Received unknown viewer command (%u)", be32toh(recv_hdr->cmd));
1886 live_relay_unknown_command(cmd);
1887 ret = -1;
1888 goto end;
1889 }
1890
1891 end:
1892 return ret;
1893 }
1894
1895 static
1896 void cleanup_poll_connection(struct lttng_poll_event *events, int pollfd)
1897 {
1898 int ret;
1899
1900 assert(events);
1901
1902 lttng_poll_del(events, pollfd);
1903
1904 ret = close(pollfd);
1905 if (ret < 0) {
1906 ERR("Closing pollfd %d", pollfd);
1907 }
1908 }
1909
1910 /*
1911 * Create and add connection to the given hash table.
1912 *
1913 * Return poll add value or else -1 on error.
1914 */
1915 static
1916 int add_connection(int fd, struct lttng_poll_event *events,
1917 struct lttng_ht *relay_connections_ht)
1918 {
1919 int ret;
1920 struct relay_command *relay_connection;
1921
1922 assert(events);
1923 assert(relay_connections_ht);
1924
1925 relay_connection = zmalloc(sizeof(struct relay_command));
1926 if (relay_connection == NULL) {
1927 PERROR("Relay command zmalloc");
1928 goto error;
1929 }
1930
1931 ret = lttng_read(fd, relay_connection, sizeof(*relay_connection));
1932 if (ret < sizeof(*relay_connection)) {
1933 PERROR("read relay cmd pipe");
1934 goto error_read;
1935 }
1936
1937 lttng_ht_node_init_ulong(&relay_connection->sock_n,
1938 (unsigned long) relay_connection->sock->fd);
1939 rcu_read_lock();
1940 lttng_ht_add_unique_ulong(relay_connections_ht,
1941 &relay_connection->sock_n);
1942 rcu_read_unlock();
1943
1944 return lttng_poll_add(events, relay_connection->sock->fd,
1945 LPOLLIN | LPOLLRDHUP);
1946
1947 error_read:
1948 free(relay_connection);
1949 error:
1950 return -1;
1951 }
1952
1953 static
1954 void deferred_free_connection(struct rcu_head *head)
1955 {
1956 struct relay_command *relay_connection =
1957 caa_container_of(head, struct relay_command, rcu_node);
1958
1959 if (relay_connection->session &&
1960 relay_connection->session->viewer_attached > 0) {
1961 relay_connection->session->viewer_attached--;
1962 }
1963 lttcomm_destroy_sock(relay_connection->sock);
1964 free(relay_connection);
1965 }
1966
1967 /*
1968 * Delete all streams for a specific session ID.
1969 */
1970 static
1971 void viewer_del_streams(uint64_t session_id)
1972 {
1973 struct relay_viewer_stream *stream;
1974 struct lttng_ht_iter iter;
1975
1976 rcu_read_lock();
1977 cds_lfht_for_each_entry(viewer_streams_ht->ht, &iter.iter, stream,
1978 stream_n.node) {
1979 health_code_update();
1980
1981 if (stream->session_id != session_id) {
1982 continue;
1983 }
1984
1985 delete_viewer_stream(stream);
1986 assert(stream->ctf_trace);
1987
1988 if (stream->metadata_flag) {
1989 /*
1990 * The metadata viewer stream is destroyed once the refcount on the
1991 * ctf trace goes to 0 in the destroy stream function thus there is
1992 * no explicit call to that function here.
1993 */
1994 stream->ctf_trace->metadata_sent = 0;
1995 stream->ctf_trace->viewer_metadata_stream = NULL;
1996 } else {
1997 destroy_viewer_stream(stream);
1998 }
1999 }
2000 rcu_read_unlock();
2001 }
2002
2003 /*
2004 * Delete and free a connection.
2005 *
2006 * RCU read side lock MUST be acquired.
2007 */
2008 static
2009 void del_connection(struct lttng_ht *relay_connections_ht,
2010 struct lttng_ht_iter *iter, struct relay_command *relay_connection)
2011 {
2012 int ret;
2013
2014 assert(relay_connections_ht);
2015 assert(iter);
2016 assert(relay_connection);
2017
2018 DBG("Cleaning connection of session ID %" PRIu64,
2019 relay_connection->session_id);
2020
2021 ret = lttng_ht_del(relay_connections_ht, iter);
2022 assert(!ret);
2023
2024 viewer_del_streams(relay_connection->session_id);
2025
2026 call_rcu(&relay_connection->rcu_node, deferred_free_connection);
2027 }
2028
2029 /*
2030 * This thread does the actual work
2031 */
2032 static
2033 void *thread_worker(void *data)
2034 {
2035 int ret, err = -1;
2036 uint32_t nb_fd;
2037 struct relay_command *relay_connection;
2038 struct lttng_poll_event events;
2039 struct lttng_ht *relay_connections_ht;
2040 struct lttng_ht_node_ulong *node;
2041 struct lttng_ht_iter iter;
2042 struct lttng_viewer_cmd recv_hdr;
2043 struct relay_local_data *relay_ctx = (struct relay_local_data *) data;
2044 struct lttng_ht *sessions_ht = relay_ctx->sessions_ht;
2045
2046 DBG("[thread] Live viewer relay worker started");
2047
2048 rcu_register_thread();
2049
2050 health_register(health_relayd, HEALTH_RELAYD_TYPE_LIVE_WORKER);
2051
2052 if (testpoint(relayd_thread_live_worker)) {
2053 goto error_testpoint;
2054 }
2055
2056 /* table of connections indexed on socket */
2057 relay_connections_ht = lttng_ht_new(0, LTTNG_HT_TYPE_ULONG);
2058 if (!relay_connections_ht) {
2059 goto relay_connections_ht_error;
2060 }
2061
2062 ret = create_thread_poll_set(&events, 2);
2063 if (ret < 0) {
2064 goto error_poll_create;
2065 }
2066
2067 ret = lttng_poll_add(&events, live_relay_cmd_pipe[0], LPOLLIN | LPOLLRDHUP);
2068 if (ret < 0) {
2069 goto error;
2070 }
2071
2072 restart:
2073 while (1) {
2074 int i;
2075
2076 health_code_update();
2077
2078 /* Infinite blocking call, waiting for transmission */
2079 DBG3("Relayd live viewer worker thread polling...");
2080 health_poll_entry();
2081 ret = lttng_poll_wait(&events, -1);
2082 health_poll_exit();
2083 if (ret < 0) {
2084 /*
2085 * Restart interrupted system call.
2086 */
2087 if (errno == EINTR) {
2088 goto restart;
2089 }
2090 goto error;
2091 }
2092
2093 nb_fd = ret;
2094
2095 /*
2096 * Process control. The control connection is prioritised so we don't
2097 * starve it with high throughput tracing data on the data
2098 * connection.
2099 */
2100 for (i = 0; i < nb_fd; i++) {
2101 /* Fetch once the poll data */
2102 uint32_t revents = LTTNG_POLL_GETEV(&events, i);
2103 int pollfd = LTTNG_POLL_GETFD(&events, i);
2104
2105 health_code_update();
2106
2107 /* Thread quit pipe has been closed. Killing thread. */
2108 ret = check_thread_quit_pipe(pollfd, revents);
2109 if (ret) {
2110 err = 0;
2111 goto exit;
2112 }
2113
2114 /* Inspect the relay cmd pipe for new connection */
2115 if (pollfd == live_relay_cmd_pipe[0]) {
2116 if (revents & (LPOLLERR | LPOLLHUP | LPOLLRDHUP)) {
2117 ERR("Relay live pipe error");
2118 goto error;
2119 } else if (revents & LPOLLIN) {
2120 DBG("Relay live viewer command received");
2121 ret = add_connection(live_relay_cmd_pipe[0],
2122 &events, relay_connections_ht);
2123 if (ret < 0) {
2124 goto error;
2125 }
2126 }
2127 } else if (revents) {
2128 rcu_read_lock();
2129 lttng_ht_lookup(relay_connections_ht,
2130 (void *)((unsigned long) pollfd), &iter);
2131 node = lttng_ht_iter_get_node_ulong(&iter);
2132 if (node == NULL) {
2133 DBG2("Relay viewer sock %d not found", pollfd);
2134 rcu_read_unlock();
2135 goto error;
2136 }
2137 relay_connection = caa_container_of(node, struct relay_command,
2138 sock_n);
2139
2140 if (revents & (LPOLLERR)) {
2141 cleanup_poll_connection(&events, pollfd);
2142 del_connection(relay_connections_ht, &iter,
2143 relay_connection);
2144 } else if (revents & (LPOLLHUP | LPOLLRDHUP)) {
2145 DBG("Viewer socket %d hung up", pollfd);
2146 cleanup_poll_connection(&events, pollfd);
2147 del_connection(relay_connections_ht, &iter,
2148 relay_connection);
2149 } else if (revents & LPOLLIN) {
2150 ret = relay_connection->sock->ops->recvmsg(
2151 relay_connection->sock, &recv_hdr,
2152 sizeof(struct lttng_viewer_cmd),
2153 0);
2154 /* connection closed */
2155 if (ret <= 0) {
2156 cleanup_poll_connection(&events, pollfd);
2157 del_connection(relay_connections_ht, &iter,
2158 relay_connection);
2159 DBG("Viewer control connection closed with %d",
2160 pollfd);
2161 } else {
2162 if (relay_connection->session) {
2163 DBG2("Relay viewer worker receiving data for "
2164 "session: %" PRIu64,
2165 relay_connection->session->id);
2166 }
2167 ret = process_control(&recv_hdr, relay_connection,
2168 sessions_ht);
2169 if (ret < 0) {
2170 /* Clear the session on error. */
2171 cleanup_poll_connection(&events, pollfd);
2172 del_connection(relay_connections_ht, &iter,
2173 relay_connection);
2174 DBG("Viewer connection closed with %d", pollfd);
2175 }
2176 }
2177 }
2178 rcu_read_unlock();
2179 }
2180 }
2181 }
2182
2183 exit:
2184 error:
2185 lttng_poll_clean(&events);
2186
2187 /* empty the hash table and free the memory */
2188 rcu_read_lock();
2189 cds_lfht_for_each_entry(relay_connections_ht->ht, &iter.iter, node, node) {
2190 health_code_update();
2191
2192 node = lttng_ht_iter_get_node_ulong(&iter);
2193 if (!node) {
2194 continue;
2195 }
2196
2197 relay_connection = caa_container_of(node, struct relay_command,
2198 sock_n);
2199 del_connection(relay_connections_ht, &iter, relay_connection);
2200 }
2201 rcu_read_unlock();
2202 error_poll_create:
2203 lttng_ht_destroy(relay_connections_ht);
2204 relay_connections_ht_error:
2205 /* Close relay cmd pipes */
2206 utils_close_pipe(live_relay_cmd_pipe);
2207 if (err) {
2208 DBG("Viewer worker thread exited with error");
2209 }
2210 DBG("Viewer worker thread cleanup complete");
2211 error_testpoint:
2212 if (err) {
2213 health_error();
2214 ERR("Health error occurred in %s", __func__);
2215 }
2216 health_unregister(health_relayd);
2217 stop_threads();
2218 rcu_unregister_thread();
2219 return NULL;
2220 }
2221
2222 /*
2223 * Create the relay command pipe to wake thread_manage_apps.
2224 * Closed in cleanup().
2225 */
2226 static int create_relay_cmd_pipe(void)
2227 {
2228 int ret;
2229
2230 ret = utils_create_pipe_cloexec(live_relay_cmd_pipe);
2231
2232 return ret;
2233 }
2234
2235 void live_stop_threads(void)
2236 {
2237 int ret;
2238 void *status;
2239
2240 stop_threads();
2241
2242 ret = pthread_join(live_listener_thread, &status);
2243 if (ret != 0) {
2244 PERROR("pthread_join live listener");
2245 goto error; /* join error, exit without cleanup */
2246 }
2247
2248 ret = pthread_join(live_worker_thread, &status);
2249 if (ret != 0) {
2250 PERROR("pthread_join live worker");
2251 goto error; /* join error, exit without cleanup */
2252 }
2253
2254 ret = pthread_join(live_dispatcher_thread, &status);
2255 if (ret != 0) {
2256 PERROR("pthread_join live dispatcher");
2257 goto error; /* join error, exit without cleanup */
2258 }
2259
2260 cleanup();
2261
2262 error:
2263 return;
2264 }
2265
2266 /*
2267 * main
2268 */
2269 int live_start_threads(struct lttng_uri *uri,
2270 struct relay_local_data *relay_ctx)
2271 {
2272 int ret = 0;
2273 void *status;
2274 int is_root;
2275
2276 assert(uri);
2277 live_uri = uri;
2278
2279 /* Check if daemon is UID = 0 */
2280 is_root = !getuid();
2281
2282 if (!is_root) {
2283 if (live_uri->port < 1024) {
2284 ERR("Need to be root to use ports < 1024");
2285 ret = -1;
2286 goto exit;
2287 }
2288 }
2289
2290 /* Setup the thread apps communication pipe. */
2291 if ((ret = create_relay_cmd_pipe()) < 0) {
2292 goto exit;
2293 }
2294
2295 /* Init relay command queue. */
2296 cds_wfq_init(&viewer_cmd_queue.queue);
2297
2298 /* Set up max poll set size */
2299 lttng_poll_set_max_size();
2300
2301 /* Setup the dispatcher thread */
2302 ret = pthread_create(&live_dispatcher_thread, NULL,
2303 thread_dispatcher, (void *) NULL);
2304 if (ret != 0) {
2305 PERROR("pthread_create viewer dispatcher");
2306 goto exit_dispatcher;
2307 }
2308
2309 /* Setup the worker thread */
2310 ret = pthread_create(&live_worker_thread, NULL,
2311 thread_worker, relay_ctx);
2312 if (ret != 0) {
2313 PERROR("pthread_create viewer worker");
2314 goto exit_worker;
2315 }
2316
2317 /* Setup the listener thread */
2318 ret = pthread_create(&live_listener_thread, NULL,
2319 thread_listener, (void *) NULL);
2320 if (ret != 0) {
2321 PERROR("pthread_create viewer listener");
2322 goto exit_listener;
2323 }
2324
2325 ret = 0;
2326 goto end;
2327
2328 exit_listener:
2329 ret = pthread_join(live_listener_thread, &status);
2330 if (ret != 0) {
2331 PERROR("pthread_join live listener");
2332 goto error; /* join error, exit without cleanup */
2333 }
2334
2335 exit_worker:
2336 ret = pthread_join(live_worker_thread, &status);
2337 if (ret != 0) {
2338 PERROR("pthread_join live worker");
2339 goto error; /* join error, exit without cleanup */
2340 }
2341
2342 exit_dispatcher:
2343 ret = pthread_join(live_dispatcher_thread, &status);
2344 if (ret != 0) {
2345 PERROR("pthread_join live dispatcher");
2346 goto error; /* join error, exit without cleanup */
2347 }
2348
2349 exit:
2350 cleanup();
2351
2352 end:
2353 error:
2354 return ret;
2355 }
This page took 0.11211 seconds and 5 git commands to generate.