Commit | Line | Data |
---|---|---|
b8aa1682 JD |
1 | /* |
2 | * Copyright (C) 2012 - Julien Desfossez <jdesfossez@efficios.com> | |
3 | * David Goulet <dgoulet@efficios.com> | |
4 | * | |
5 | * This program is free software; you can redistribute it and/or modify | |
6 | * it under the terms of the GNU General Public License, version 2 only, | |
7 | * as published by the Free Software Foundation. | |
8 | * | |
9 | * This program is distributed in the hope that it will be useful, but WITHOUT | |
10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | |
11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | |
12 | * more details. | |
13 | * | |
14 | * You should have received a copy of the GNU General Public License along | |
15 | * with this program; if not, write to the Free Software Foundation, Inc., | |
16 | * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. | |
17 | */ | |
18 | ||
19 | #define _GNU_SOURCE | |
20 | #include <getopt.h> | |
21 | #include <grp.h> | |
22 | #include <limits.h> | |
23 | #include <pthread.h> | |
24 | #include <signal.h> | |
25 | #include <stdio.h> | |
26 | #include <stdlib.h> | |
27 | #include <string.h> | |
28 | #include <sys/mman.h> | |
29 | #include <sys/mount.h> | |
30 | #include <sys/resource.h> | |
31 | #include <sys/socket.h> | |
32 | #include <sys/stat.h> | |
33 | #include <sys/types.h> | |
34 | #include <sys/wait.h> | |
173af62f | 35 | #include <inttypes.h> |
b8aa1682 JD |
36 | #include <urcu/futex.h> |
37 | #include <urcu/uatomic.h> | |
38 | #include <unistd.h> | |
39 | #include <fcntl.h> | |
40 | #include <config.h> | |
41 | ||
42 | #include <lttng/lttng.h> | |
43 | #include <common/common.h> | |
44 | #include <common/compat/poll.h> | |
45 | #include <common/compat/socket.h> | |
46 | #include <common/defaults.h> | |
02a6bb53 | 47 | #include <common/daemonize.h> |
b8aa1682 JD |
48 | #include <common/futex.h> |
49 | #include <common/sessiond-comm/sessiond-comm.h> | |
50 | #include <common/sessiond-comm/inet.h> | |
b8aa1682 JD |
51 | #include <common/sessiond-comm/relayd.h> |
52 | #include <common/uri.h> | |
a02de639 | 53 | #include <common/utils.h> |
b8aa1682 | 54 | |
0f907de1 | 55 | #include "cmd.h" |
d3e2ba59 | 56 | #include "ctf-trace.h" |
1c20f0e2 | 57 | #include "index.h" |
0f907de1 | 58 | #include "utils.h" |
b8aa1682 | 59 | #include "lttng-relayd.h" |
d3e2ba59 | 60 | #include "live.h" |
55706a7d | 61 | #include "health-relayd.h" |
4e5b8b82 | 62 | #include "testpoint.h" |
991adae2 | 63 | #include "viewer-stream.h" |
eb702af5 DG |
64 | #include "session.h" |
65 | #include "stream.h" | |
e85cdca9 | 66 | #include "connection.h" |
b8aa1682 JD |
67 | |
68 | /* command line options */ | |
0f907de1 | 69 | char *opt_output_path; |
d1a3048a | 70 | static int opt_daemon, opt_background; |
02a6bb53 MD |
71 | |
72 | /* | |
73 | * We need to wait for listener and live listener threads, as well as | |
74 | * health check thread, before being ready to signal readiness. | |
75 | */ | |
76 | #define NR_LTTNG_RELAY_READY 3 | |
77 | static int lttng_relay_ready = NR_LTTNG_RELAY_READY; | |
78 | static int recv_child_signal; /* Set to 1 when a SIGUSR1 signal is received. */ | |
79 | static pid_t child_ppid; /* Internal parent PID use with daemonize. */ | |
80 | ||
095a4ae5 MD |
81 | static struct lttng_uri *control_uri; |
82 | static struct lttng_uri *data_uri; | |
d3e2ba59 | 83 | static struct lttng_uri *live_uri; |
b8aa1682 JD |
84 | |
85 | const char *progname; | |
b8aa1682 | 86 | |
65931c8b MD |
87 | const char *tracing_group_name = DEFAULT_TRACING_GROUP; |
88 | ||
b8aa1682 JD |
89 | /* |
90 | * Quit pipe for all threads. This permits a single cancellation point | |
91 | * for all threads when receiving an event on the pipe. | |
92 | */ | |
3557d456 | 93 | int thread_quit_pipe[2] = { -1, -1 }; |
b8aa1682 JD |
94 | |
95 | /* | |
96 | * This pipe is used to inform the worker thread that a command is queued and | |
97 | * ready to be processed. | |
98 | */ | |
e85cdca9 | 99 | static int relay_conn_pipe[2] = { -1, -1 }; |
b8aa1682 | 100 | |
26c9d55e | 101 | /* Shared between threads */ |
b8aa1682 JD |
102 | static int dispatch_thread_exit; |
103 | ||
104 | static pthread_t listener_thread; | |
105 | static pthread_t dispatcher_thread; | |
106 | static pthread_t worker_thread; | |
65931c8b | 107 | static pthread_t health_thread; |
b8aa1682 | 108 | |
095a4ae5 | 109 | static uint64_t last_relay_stream_id; |
b8aa1682 JD |
110 | |
111 | /* | |
112 | * Relay command queue. | |
113 | * | |
114 | * The relay_thread_listener and relay_thread_dispatcher communicate with this | |
115 | * queue. | |
116 | */ | |
e85cdca9 | 117 | static struct relay_conn_queue relay_conn_queue; |
b8aa1682 JD |
118 | |
119 | /* buffer allocated at startup, used to store the trace data */ | |
095a4ae5 MD |
120 | static char *data_buffer; |
121 | static unsigned int data_buffer_size; | |
b8aa1682 | 122 | |
1c20f0e2 JD |
123 | /* We need those values for the file/dir creation. */ |
124 | static uid_t relayd_uid; | |
125 | static gid_t relayd_gid; | |
126 | ||
d3e2ba59 JD |
127 | /* Global relay stream hash table. */ |
128 | struct lttng_ht *relay_streams_ht; | |
129 | ||
92c6ca54 DG |
130 | /* Global relay viewer stream hash table. */ |
131 | struct lttng_ht *viewer_streams_ht; | |
132 | ||
0a6518b0 DG |
133 | /* Global hash table that stores relay index object. */ |
134 | struct lttng_ht *indexes_ht; | |
135 | ||
55706a7d | 136 | /* Relayd health monitoring */ |
eea7556c | 137 | struct health_app *health_relayd; |
55706a7d | 138 | |
b8aa1682 JD |
139 | /* |
140 | * usage function on stderr | |
141 | */ | |
142 | static | |
143 | void usage(void) | |
144 | { | |
145 | fprintf(stderr, "Usage: %s OPTIONS\n\nOptions:\n", progname); | |
994fa64f DG |
146 | fprintf(stderr, " -h, --help Display this usage.\n"); |
147 | fprintf(stderr, " -d, --daemonize Start as a daemon.\n"); | |
d1a3048a | 148 | fprintf(stderr, " -b, --background Start as a daemon, keeping console open.\n"); |
994fa64f DG |
149 | fprintf(stderr, " -C, --control-port URL Control port listening.\n"); |
150 | fprintf(stderr, " -D, --data-port URL Data port listening.\n"); | |
a6a062a4 | 151 | fprintf(stderr, " -L, --live-port URL Live view port listening.\n"); |
994fa64f DG |
152 | fprintf(stderr, " -o, --output PATH Output path for traces. Must use an absolute path.\n"); |
153 | fprintf(stderr, " -v, --verbose Verbose mode. Activate DBG() macro.\n"); | |
65931c8b | 154 | fprintf(stderr, " -g, --group NAME Specify the tracing group name. (default: tracing)\n"); |
b8aa1682 JD |
155 | } |
156 | ||
157 | static | |
158 | int parse_args(int argc, char **argv) | |
159 | { | |
160 | int c; | |
161 | int ret = 0; | |
162 | char *default_address; | |
163 | ||
164 | static struct option long_options[] = { | |
e3678fd8 MD |
165 | { "control-port", 1, 0, 'C', }, |
166 | { "data-port", 1, 0, 'D', }, | |
167 | { "daemonize", 0, 0, 'd', }, | |
65931c8b | 168 | { "group", 1, 0, 'g', }, |
e3678fd8 MD |
169 | { "help", 0, 0, 'h', }, |
170 | { "output", 1, 0, 'o', }, | |
171 | { "verbose", 0, 0, 'v', }, | |
06b55dbb | 172 | { "background", 0, 0, 'b' }, |
095a4ae5 | 173 | { NULL, 0, 0, 0, }, |
b8aa1682 JD |
174 | }; |
175 | ||
176 | while (1) { | |
177 | int option_index = 0; | |
06b55dbb | 178 | c = getopt_long(argc, argv, "dhv" "C:D:L:o:g:b", |
b8aa1682 JD |
179 | long_options, &option_index); |
180 | if (c == -1) { | |
181 | break; | |
182 | } | |
183 | ||
184 | switch (c) { | |
185 | case 0: | |
186 | fprintf(stderr, "option %s", long_options[option_index].name); | |
187 | if (optarg) { | |
188 | fprintf(stderr, " with arg %s\n", optarg); | |
189 | } | |
190 | break; | |
191 | case 'C': | |
192 | ret = uri_parse(optarg, &control_uri); | |
193 | if (ret < 0) { | |
194 | ERR("Invalid control URI specified"); | |
195 | goto exit; | |
196 | } | |
197 | if (control_uri->port == 0) { | |
198 | control_uri->port = DEFAULT_NETWORK_CONTROL_PORT; | |
199 | } | |
200 | break; | |
201 | case 'D': | |
202 | ret = uri_parse(optarg, &data_uri); | |
203 | if (ret < 0) { | |
204 | ERR("Invalid data URI specified"); | |
205 | goto exit; | |
206 | } | |
207 | if (data_uri->port == 0) { | |
208 | data_uri->port = DEFAULT_NETWORK_DATA_PORT; | |
209 | } | |
210 | break; | |
a6a062a4 AM |
211 | case 'L': |
212 | ret = uri_parse(optarg, &live_uri); | |
213 | if (ret < 0) { | |
214 | ERR("Invalid live URI specified"); | |
215 | goto exit; | |
216 | } | |
217 | if (live_uri->port == 0) { | |
218 | live_uri->port = DEFAULT_NETWORK_VIEWER_PORT; | |
219 | } | |
220 | break; | |
b8aa1682 JD |
221 | case 'd': |
222 | opt_daemon = 1; | |
223 | break; | |
06b55dbb DG |
224 | case 'b': |
225 | opt_background = 1; | |
226 | break; | |
65931c8b MD |
227 | case 'g': |
228 | tracing_group_name = optarg; | |
229 | break; | |
b8aa1682 JD |
230 | case 'h': |
231 | usage(); | |
232 | exit(EXIT_FAILURE); | |
233 | case 'o': | |
234 | ret = asprintf(&opt_output_path, "%s", optarg); | |
235 | if (ret < 0) { | |
236 | PERROR("asprintf opt_output_path"); | |
237 | goto exit; | |
238 | } | |
239 | break; | |
240 | case 'v': | |
241 | /* Verbose level can increase using multiple -v */ | |
242 | lttng_opt_verbose += 1; | |
243 | break; | |
244 | default: | |
245 | /* Unknown option or other error. | |
246 | * Error is printed by getopt, just return */ | |
247 | ret = -1; | |
248 | goto exit; | |
249 | } | |
250 | } | |
251 | ||
252 | /* assign default values */ | |
253 | if (control_uri == NULL) { | |
d359cebc MD |
254 | ret = asprintf(&default_address, |
255 | "tcp://" DEFAULT_NETWORK_CONTROL_BIND_ADDRESS ":%d", | |
256 | DEFAULT_NETWORK_CONTROL_PORT); | |
b8aa1682 JD |
257 | if (ret < 0) { |
258 | PERROR("asprintf default data address"); | |
259 | goto exit; | |
260 | } | |
261 | ||
262 | ret = uri_parse(default_address, &control_uri); | |
263 | free(default_address); | |
264 | if (ret < 0) { | |
265 | ERR("Invalid control URI specified"); | |
266 | goto exit; | |
267 | } | |
268 | } | |
269 | if (data_uri == NULL) { | |
d359cebc MD |
270 | ret = asprintf(&default_address, |
271 | "tcp://" DEFAULT_NETWORK_DATA_BIND_ADDRESS ":%d", | |
272 | DEFAULT_NETWORK_DATA_PORT); | |
b8aa1682 JD |
273 | if (ret < 0) { |
274 | PERROR("asprintf default data address"); | |
275 | goto exit; | |
276 | } | |
277 | ||
278 | ret = uri_parse(default_address, &data_uri); | |
279 | free(default_address); | |
280 | if (ret < 0) { | |
281 | ERR("Invalid data URI specified"); | |
282 | goto exit; | |
283 | } | |
284 | } | |
d3e2ba59 | 285 | if (live_uri == NULL) { |
d359cebc MD |
286 | ret = asprintf(&default_address, |
287 | "tcp://" DEFAULT_NETWORK_VIEWER_BIND_ADDRESS ":%d", | |
288 | DEFAULT_NETWORK_VIEWER_PORT); | |
d3e2ba59 JD |
289 | if (ret < 0) { |
290 | PERROR("asprintf default viewer control address"); | |
291 | goto exit; | |
292 | } | |
293 | ||
294 | ret = uri_parse(default_address, &live_uri); | |
295 | free(default_address); | |
296 | if (ret < 0) { | |
297 | ERR("Invalid viewer control URI specified"); | |
298 | goto exit; | |
299 | } | |
300 | } | |
b8aa1682 JD |
301 | |
302 | exit: | |
303 | return ret; | |
304 | } | |
305 | ||
306 | /* | |
307 | * Cleanup the daemon | |
308 | */ | |
309 | static | |
310 | void cleanup(void) | |
311 | { | |
b8aa1682 JD |
312 | DBG("Cleaning up"); |
313 | ||
095a4ae5 MD |
314 | /* free the dynamically allocated opt_output_path */ |
315 | free(opt_output_path); | |
316 | ||
a02de639 CB |
317 | /* Close thread quit pipes */ |
318 | utils_close_pipe(thread_quit_pipe); | |
319 | ||
710c1f73 DG |
320 | uri_free(control_uri); |
321 | uri_free(data_uri); | |
a6a062a4 | 322 | /* Live URI is freed in the live thread. */ |
b8aa1682 JD |
323 | } |
324 | ||
325 | /* | |
326 | * Write to writable pipe used to notify a thread. | |
327 | */ | |
328 | static | |
329 | int notify_thread_pipe(int wpipe) | |
330 | { | |
6cd525e8 | 331 | ssize_t ret; |
b8aa1682 | 332 | |
6cd525e8 MD |
333 | ret = lttng_write(wpipe, "!", 1); |
334 | if (ret < 1) { | |
b8aa1682 JD |
335 | PERROR("write poll pipe"); |
336 | } | |
337 | ||
338 | return ret; | |
339 | } | |
340 | ||
65931c8b MD |
341 | static void notify_health_quit_pipe(int *pipe) |
342 | { | |
6cd525e8 | 343 | ssize_t ret; |
65931c8b | 344 | |
6cd525e8 MD |
345 | ret = lttng_write(pipe[1], "4", 1); |
346 | if (ret < 1) { | |
65931c8b MD |
347 | PERROR("write relay health quit"); |
348 | } | |
349 | } | |
350 | ||
b8aa1682 JD |
351 | /* |
352 | * Stop all threads by closing the thread quit pipe. | |
353 | */ | |
354 | static | |
355 | void stop_threads(void) | |
356 | { | |
357 | int ret; | |
358 | ||
359 | /* Stopping all threads */ | |
360 | DBG("Terminating all threads"); | |
361 | ret = notify_thread_pipe(thread_quit_pipe[1]); | |
362 | if (ret < 0) { | |
363 | ERR("write error on thread quit pipe"); | |
364 | } | |
365 | ||
65931c8b MD |
366 | notify_health_quit_pipe(health_quit_pipe); |
367 | ||
b8aa1682 | 368 | /* Dispatch thread */ |
26c9d55e | 369 | CMM_STORE_SHARED(dispatch_thread_exit, 1); |
e85cdca9 | 370 | futex_nto1_wake(&relay_conn_queue.futex); |
b8aa1682 JD |
371 | } |
372 | ||
373 | /* | |
374 | * Signal handler for the daemon | |
375 | * | |
376 | * Simply stop all worker threads, leaving main() return gracefully after | |
377 | * joining all threads and calling cleanup(). | |
378 | */ | |
379 | static | |
380 | void sighandler(int sig) | |
381 | { | |
382 | switch (sig) { | |
383 | case SIGPIPE: | |
384 | DBG("SIGPIPE caught"); | |
385 | return; | |
386 | case SIGINT: | |
387 | DBG("SIGINT caught"); | |
388 | stop_threads(); | |
389 | break; | |
390 | case SIGTERM: | |
391 | DBG("SIGTERM caught"); | |
392 | stop_threads(); | |
393 | break; | |
02a6bb53 MD |
394 | case SIGUSR1: |
395 | CMM_STORE_SHARED(recv_child_signal, 1); | |
396 | break; | |
b8aa1682 JD |
397 | default: |
398 | break; | |
399 | } | |
400 | } | |
401 | ||
402 | /* | |
403 | * Setup signal handler for : | |
404 | * SIGINT, SIGTERM, SIGPIPE | |
405 | */ | |
406 | static | |
407 | int set_signal_handler(void) | |
408 | { | |
409 | int ret = 0; | |
410 | struct sigaction sa; | |
411 | sigset_t sigset; | |
412 | ||
413 | if ((ret = sigemptyset(&sigset)) < 0) { | |
414 | PERROR("sigemptyset"); | |
415 | return ret; | |
416 | } | |
417 | ||
418 | sa.sa_handler = sighandler; | |
419 | sa.sa_mask = sigset; | |
420 | sa.sa_flags = 0; | |
421 | if ((ret = sigaction(SIGTERM, &sa, NULL)) < 0) { | |
422 | PERROR("sigaction"); | |
423 | return ret; | |
424 | } | |
425 | ||
426 | if ((ret = sigaction(SIGINT, &sa, NULL)) < 0) { | |
427 | PERROR("sigaction"); | |
428 | return ret; | |
429 | } | |
430 | ||
431 | if ((ret = sigaction(SIGPIPE, &sa, NULL)) < 0) { | |
432 | PERROR("sigaction"); | |
433 | return ret; | |
434 | } | |
435 | ||
02a6bb53 MD |
436 | if ((ret = sigaction(SIGUSR1, &sa, NULL)) < 0) { |
437 | PERROR("sigaction"); | |
438 | return ret; | |
439 | } | |
440 | ||
441 | DBG("Signal handler set for SIGTERM, SIGUSR1, SIGPIPE and SIGINT"); | |
b8aa1682 JD |
442 | |
443 | return ret; | |
444 | } | |
445 | ||
02a6bb53 MD |
446 | void lttng_relay_notify_ready(void) |
447 | { | |
448 | /* Notify the parent of the fork() process that we are ready. */ | |
449 | if (opt_daemon || opt_background) { | |
450 | if (uatomic_sub_return(<tng_relay_ready, 1) == 0) { | |
451 | kill(child_ppid, SIGUSR1); | |
452 | } | |
453 | } | |
454 | } | |
455 | ||
b8aa1682 JD |
456 | /* |
457 | * Init thread quit pipe. | |
458 | * | |
459 | * Return -1 on error or 0 if all pipes are created. | |
460 | */ | |
461 | static | |
462 | int init_thread_quit_pipe(void) | |
463 | { | |
a02de639 | 464 | int ret; |
b8aa1682 | 465 | |
a02de639 | 466 | ret = utils_create_pipe_cloexec(thread_quit_pipe); |
b8aa1682 | 467 | |
b8aa1682 JD |
468 | return ret; |
469 | } | |
470 | ||
471 | /* | |
472 | * Create a poll set with O_CLOEXEC and add the thread quit pipe to the set. | |
473 | */ | |
474 | static | |
475 | int create_thread_poll_set(struct lttng_poll_event *events, int size) | |
476 | { | |
477 | int ret; | |
478 | ||
479 | if (events == NULL || size == 0) { | |
480 | ret = -1; | |
481 | goto error; | |
482 | } | |
483 | ||
484 | ret = lttng_poll_create(events, size, LTTNG_CLOEXEC); | |
485 | if (ret < 0) { | |
486 | goto error; | |
487 | } | |
488 | ||
489 | /* Add quit pipe */ | |
532ed517 | 490 | ret = lttng_poll_add(events, thread_quit_pipe[0], LPOLLIN | LPOLLERR); |
b8aa1682 JD |
491 | if (ret < 0) { |
492 | goto error; | |
493 | } | |
494 | ||
495 | return 0; | |
496 | ||
497 | error: | |
498 | return ret; | |
499 | } | |
500 | ||
501 | /* | |
502 | * Check if the thread quit pipe was triggered. | |
503 | * | |
504 | * Return 1 if it was triggered else 0; | |
505 | */ | |
506 | static | |
507 | int check_thread_quit_pipe(int fd, uint32_t events) | |
508 | { | |
509 | if (fd == thread_quit_pipe[0] && (events & LPOLLIN)) { | |
510 | return 1; | |
511 | } | |
512 | ||
513 | return 0; | |
514 | } | |
515 | ||
516 | /* | |
517 | * Create and init socket from uri. | |
518 | */ | |
519 | static | |
520 | struct lttcomm_sock *relay_init_sock(struct lttng_uri *uri) | |
521 | { | |
522 | int ret; | |
523 | struct lttcomm_sock *sock = NULL; | |
524 | ||
525 | sock = lttcomm_alloc_sock_from_uri(uri); | |
526 | if (sock == NULL) { | |
527 | ERR("Allocating socket"); | |
528 | goto error; | |
529 | } | |
530 | ||
531 | ret = lttcomm_create_sock(sock); | |
532 | if (ret < 0) { | |
533 | goto error; | |
534 | } | |
535 | DBG("Listening on sock %d", sock->fd); | |
536 | ||
537 | ret = sock->ops->bind(sock); | |
538 | if (ret < 0) { | |
539 | goto error; | |
540 | } | |
541 | ||
542 | ret = sock->ops->listen(sock, -1); | |
543 | if (ret < 0) { | |
544 | goto error; | |
545 | ||
546 | } | |
547 | ||
548 | return sock; | |
549 | ||
550 | error: | |
551 | if (sock) { | |
552 | lttcomm_destroy_sock(sock); | |
553 | } | |
554 | return NULL; | |
555 | } | |
556 | ||
173af62f DG |
557 | /* |
558 | * Return nonzero if stream needs to be closed. | |
559 | */ | |
560 | static | |
561 | int close_stream_check(struct relay_stream *stream) | |
562 | { | |
173af62f | 563 | if (stream->close_flag && stream->prev_seq == stream->last_net_seq_num) { |
f7079f67 DG |
564 | /* |
565 | * We are about to close the stream so set the data pending flag to 1 | |
566 | * which will make the end data pending command skip the stream which | |
567 | * is now closed and ready. Note that after proceeding to a file close, | |
568 | * the written file is ready for reading. | |
569 | */ | |
570 | stream->data_pending_check_done = 1; | |
173af62f DG |
571 | return 1; |
572 | } | |
573 | return 0; | |
574 | } | |
575 | ||
eb702af5 DG |
576 | static void try_close_stream(struct relay_session *session, |
577 | struct relay_stream *stream) | |
578 | { | |
579 | int ret; | |
580 | struct ctf_trace *ctf_trace; | |
581 | ||
582 | assert(session); | |
583 | assert(stream); | |
584 | ||
585 | if (!close_stream_check(stream)) { | |
586 | /* Can't close it, not ready for that. */ | |
587 | goto end; | |
588 | } | |
589 | ||
590 | ctf_trace = ctf_trace_find_by_path(session->ctf_traces_ht, | |
591 | stream->path_name); | |
592 | assert(ctf_trace); | |
593 | ||
594 | pthread_mutex_lock(&session->viewer_ready_lock); | |
595 | ctf_trace->invalid_flag = 1; | |
596 | pthread_mutex_unlock(&session->viewer_ready_lock); | |
597 | ||
598 | ret = stream_close(session, stream); | |
50979bcd | 599 | if (ret || session->snapshot) { |
eb702af5 DG |
600 | /* Already close thus the ctf trace is being or has been destroyed. */ |
601 | goto end; | |
602 | } | |
603 | ||
604 | ctf_trace_try_destroy(session, ctf_trace); | |
605 | ||
606 | end: | |
607 | return; | |
608 | } | |
609 | ||
b8aa1682 JD |
610 | /* |
611 | * This thread manages the listening for new connections on the network | |
612 | */ | |
613 | static | |
614 | void *relay_thread_listener(void *data) | |
615 | { | |
095a4ae5 | 616 | int i, ret, pollfd, err = -1; |
b8aa1682 JD |
617 | uint32_t revents, nb_fd; |
618 | struct lttng_poll_event events; | |
619 | struct lttcomm_sock *control_sock, *data_sock; | |
620 | ||
b8aa1682 JD |
621 | DBG("[thread] Relay listener started"); |
622 | ||
55706a7d MD |
623 | health_register(health_relayd, HEALTH_RELAYD_TYPE_LISTENER); |
624 | ||
f385ae0a MD |
625 | health_code_update(); |
626 | ||
b8aa1682 JD |
627 | control_sock = relay_init_sock(control_uri); |
628 | if (!control_sock) { | |
095a4ae5 | 629 | goto error_sock_control; |
b8aa1682 JD |
630 | } |
631 | ||
632 | data_sock = relay_init_sock(data_uri); | |
633 | if (!data_sock) { | |
095a4ae5 | 634 | goto error_sock_relay; |
b8aa1682 JD |
635 | } |
636 | ||
637 | /* | |
638 | * Pass 3 as size here for the thread quit pipe, control and data socket. | |
639 | */ | |
640 | ret = create_thread_poll_set(&events, 3); | |
641 | if (ret < 0) { | |
642 | goto error_create_poll; | |
643 | } | |
644 | ||
645 | /* Add the control socket */ | |
646 | ret = lttng_poll_add(&events, control_sock->fd, LPOLLIN | LPOLLRDHUP); | |
647 | if (ret < 0) { | |
648 | goto error_poll_add; | |
649 | } | |
650 | ||
651 | /* Add the data socket */ | |
652 | ret = lttng_poll_add(&events, data_sock->fd, LPOLLIN | LPOLLRDHUP); | |
653 | if (ret < 0) { | |
654 | goto error_poll_add; | |
655 | } | |
656 | ||
02a6bb53 MD |
657 | lttng_relay_notify_ready(); |
658 | ||
4e5b8b82 MD |
659 | if (testpoint(relayd_thread_listener)) { |
660 | goto error_testpoint; | |
661 | } | |
662 | ||
b8aa1682 | 663 | while (1) { |
f385ae0a MD |
664 | health_code_update(); |
665 | ||
b8aa1682 JD |
666 | DBG("Listener accepting connections"); |
667 | ||
b8aa1682 | 668 | restart: |
f385ae0a | 669 | health_poll_entry(); |
b8aa1682 | 670 | ret = lttng_poll_wait(&events, -1); |
f385ae0a | 671 | health_poll_exit(); |
b8aa1682 JD |
672 | if (ret < 0) { |
673 | /* | |
674 | * Restart interrupted system call. | |
675 | */ | |
676 | if (errno == EINTR) { | |
677 | goto restart; | |
678 | } | |
679 | goto error; | |
680 | } | |
681 | ||
0d9c5d77 DG |
682 | nb_fd = ret; |
683 | ||
b8aa1682 JD |
684 | DBG("Relay new connection received"); |
685 | for (i = 0; i < nb_fd; i++) { | |
f385ae0a MD |
686 | health_code_update(); |
687 | ||
b8aa1682 JD |
688 | /* Fetch once the poll data */ |
689 | revents = LTTNG_POLL_GETEV(&events, i); | |
690 | pollfd = LTTNG_POLL_GETFD(&events, i); | |
691 | ||
f0567343 MD |
692 | if (!revents) { |
693 | /* No activity for this FD (poll implementation). */ | |
694 | continue; | |
695 | } | |
696 | ||
b8aa1682 JD |
697 | /* Thread quit pipe has been closed. Killing thread. */ |
698 | ret = check_thread_quit_pipe(pollfd, revents); | |
699 | if (ret) { | |
095a4ae5 MD |
700 | err = 0; |
701 | goto exit; | |
b8aa1682 JD |
702 | } |
703 | ||
704 | if (revents & (LPOLLERR | LPOLLHUP | LPOLLRDHUP)) { | |
705 | ERR("socket poll error"); | |
706 | goto error; | |
707 | } else if (revents & LPOLLIN) { | |
4b7f17b2 | 708 | /* |
e85cdca9 DG |
709 | * Get allocated in this thread, enqueued to a global queue, |
710 | * dequeued and freed in the worker thread. | |
4b7f17b2 | 711 | */ |
e85cdca9 DG |
712 | int val = 1; |
713 | struct relay_connection *new_conn; | |
4b7f17b2 | 714 | struct lttcomm_sock *newsock; |
b8aa1682 | 715 | |
e85cdca9 DG |
716 | new_conn = connection_create(); |
717 | if (!new_conn) { | |
b8aa1682 JD |
718 | goto error; |
719 | } | |
720 | ||
721 | if (pollfd == data_sock->fd) { | |
e85cdca9 | 722 | new_conn->type = RELAY_DATA; |
b8aa1682 | 723 | newsock = data_sock->ops->accept(data_sock); |
e85cdca9 DG |
724 | DBG("Relay data connection accepted, socket %d", |
725 | newsock->fd); | |
4b7f17b2 MD |
726 | } else { |
727 | assert(pollfd == control_sock->fd); | |
e85cdca9 | 728 | new_conn->type = RELAY_CONTROL; |
b8aa1682 | 729 | newsock = control_sock->ops->accept(control_sock); |
e85cdca9 DG |
730 | DBG("Relay control connection accepted, socket %d", |
731 | newsock->fd); | |
b8aa1682 | 732 | } |
e85cdca9 DG |
733 | if (!newsock) { |
734 | PERROR("accepting sock"); | |
735 | connection_free(new_conn); | |
736 | goto error; | |
737 | } | |
738 | ||
739 | ret = setsockopt(newsock->fd, SOL_SOCKET, SO_REUSEADDR, &val, | |
740 | sizeof(val)); | |
b8aa1682 JD |
741 | if (ret < 0) { |
742 | PERROR("setsockopt inet"); | |
4b7f17b2 | 743 | lttcomm_destroy_sock(newsock); |
e85cdca9 | 744 | connection_free(new_conn); |
b8aa1682 JD |
745 | goto error; |
746 | } | |
e85cdca9 DG |
747 | new_conn->sock = newsock; |
748 | ||
749 | /* Enqueue request for the dispatcher thread. */ | |
750 | cds_wfq_enqueue(&relay_conn_queue.queue, &new_conn->qnode); | |
b8aa1682 JD |
751 | |
752 | /* | |
e85cdca9 DG |
753 | * Wake the dispatch queue futex. Implicit memory barrier with |
754 | * the exchange in cds_wfq_enqueue. | |
b8aa1682 | 755 | */ |
e85cdca9 | 756 | futex_nto1_wake(&relay_conn_queue.futex); |
b8aa1682 JD |
757 | } |
758 | } | |
759 | } | |
760 | ||
095a4ae5 | 761 | exit: |
b8aa1682 JD |
762 | error: |
763 | error_poll_add: | |
4e5b8b82 | 764 | error_testpoint: |
b8aa1682 JD |
765 | lttng_poll_clean(&events); |
766 | error_create_poll: | |
095a4ae5 MD |
767 | if (data_sock->fd >= 0) { |
768 | ret = data_sock->ops->close(data_sock); | |
b8aa1682 JD |
769 | if (ret) { |
770 | PERROR("close"); | |
771 | } | |
b8aa1682 | 772 | } |
095a4ae5 MD |
773 | lttcomm_destroy_sock(data_sock); |
774 | error_sock_relay: | |
775 | if (control_sock->fd >= 0) { | |
776 | ret = control_sock->ops->close(control_sock); | |
b8aa1682 JD |
777 | if (ret) { |
778 | PERROR("close"); | |
779 | } | |
b8aa1682 | 780 | } |
095a4ae5 MD |
781 | lttcomm_destroy_sock(control_sock); |
782 | error_sock_control: | |
783 | if (err) { | |
f385ae0a MD |
784 | health_error(); |
785 | ERR("Health error occurred in %s", __func__); | |
095a4ae5 | 786 | } |
55706a7d | 787 | health_unregister(health_relayd); |
b8aa1682 JD |
788 | DBG("Relay listener thread cleanup complete"); |
789 | stop_threads(); | |
b8aa1682 JD |
790 | return NULL; |
791 | } | |
792 | ||
793 | /* | |
794 | * This thread manages the dispatching of the requests to worker threads | |
795 | */ | |
796 | static | |
797 | void *relay_thread_dispatcher(void *data) | |
798 | { | |
6cd525e8 MD |
799 | int err = -1; |
800 | ssize_t ret; | |
b8aa1682 | 801 | struct cds_wfq_node *node; |
e85cdca9 | 802 | struct relay_connection *new_conn = NULL; |
b8aa1682 JD |
803 | |
804 | DBG("[thread] Relay dispatcher started"); | |
805 | ||
55706a7d MD |
806 | health_register(health_relayd, HEALTH_RELAYD_TYPE_DISPATCHER); |
807 | ||
4e5b8b82 MD |
808 | if (testpoint(relayd_thread_dispatcher)) { |
809 | goto error_testpoint; | |
810 | } | |
811 | ||
f385ae0a MD |
812 | health_code_update(); |
813 | ||
26c9d55e | 814 | while (!CMM_LOAD_SHARED(dispatch_thread_exit)) { |
f385ae0a MD |
815 | health_code_update(); |
816 | ||
b8aa1682 | 817 | /* Atomically prepare the queue futex */ |
e85cdca9 | 818 | futex_nto1_prepare(&relay_conn_queue.futex); |
b8aa1682 JD |
819 | |
820 | do { | |
f385ae0a MD |
821 | health_code_update(); |
822 | ||
b8aa1682 | 823 | /* Dequeue commands */ |
e85cdca9 | 824 | node = cds_wfq_dequeue_blocking(&relay_conn_queue.queue); |
b8aa1682 JD |
825 | if (node == NULL) { |
826 | DBG("Woken up but nothing in the relay command queue"); | |
827 | /* Continue thread execution */ | |
828 | break; | |
829 | } | |
e85cdca9 | 830 | new_conn = caa_container_of(node, struct relay_connection, qnode); |
b8aa1682 | 831 | |
e85cdca9 | 832 | DBG("Dispatching request waiting on sock %d", new_conn->sock->fd); |
b8aa1682 JD |
833 | |
834 | /* | |
e85cdca9 DG |
835 | * Inform worker thread of the new request. This call is blocking |
836 | * so we can be assured that the data will be read at some point in | |
837 | * time or wait to the end of the world :) | |
b8aa1682 | 838 | */ |
e85cdca9 DG |
839 | ret = lttng_write(relay_conn_pipe[1], &new_conn, sizeof(new_conn)); |
840 | if (ret < 0) { | |
841 | PERROR("write connection pipe"); | |
842 | connection_destroy(new_conn); | |
b8aa1682 JD |
843 | goto error; |
844 | } | |
845 | } while (node != NULL); | |
846 | ||
847 | /* Futex wait on queue. Blocking call on futex() */ | |
f385ae0a | 848 | health_poll_entry(); |
e85cdca9 | 849 | futex_nto1_wait(&relay_conn_queue.futex); |
f385ae0a | 850 | health_poll_exit(); |
b8aa1682 JD |
851 | } |
852 | ||
f385ae0a MD |
853 | /* Normal exit, no error */ |
854 | err = 0; | |
855 | ||
b8aa1682 | 856 | error: |
4e5b8b82 | 857 | error_testpoint: |
f385ae0a MD |
858 | if (err) { |
859 | health_error(); | |
860 | ERR("Health error occurred in %s", __func__); | |
861 | } | |
55706a7d | 862 | health_unregister(health_relayd); |
b8aa1682 JD |
863 | DBG("Dispatch thread dying"); |
864 | stop_threads(); | |
865 | return NULL; | |
866 | } | |
867 | ||
eb702af5 | 868 | static void try_close_streams(struct relay_session *session) |
d3e2ba59 | 869 | { |
eb702af5 | 870 | struct ctf_trace *ctf_trace; |
94d49140 JD |
871 | struct lttng_ht_iter iter; |
872 | ||
eb702af5 | 873 | assert(session); |
94d49140 | 874 | |
eb702af5 DG |
875 | pthread_mutex_lock(&session->viewer_ready_lock); |
876 | rcu_read_lock(); | |
877 | cds_lfht_for_each_entry(session->ctf_traces_ht->ht, &iter.iter, ctf_trace, | |
878 | node.node) { | |
879 | struct relay_stream *stream; | |
94d49140 | 880 | |
eb702af5 DG |
881 | /* Close streams. */ |
882 | cds_list_for_each_entry(stream, &ctf_trace->stream_list, trace_list) { | |
883 | stream_close(session, stream); | |
94d49140 | 884 | } |
94d49140 | 885 | |
eb702af5 DG |
886 | ctf_trace->invalid_flag = 1; |
887 | ctf_trace_try_destroy(session, ctf_trace); | |
157df586 | 888 | } |
eb702af5 DG |
889 | rcu_read_unlock(); |
890 | pthread_mutex_unlock(&session->viewer_ready_lock); | |
94d49140 JD |
891 | } |
892 | ||
b8aa1682 | 893 | /* |
eb702af5 | 894 | * Try to destroy a session within a connection. |
b8aa1682 | 895 | */ |
e85cdca9 | 896 | static void destroy_session(struct relay_session *session, |
d3e2ba59 | 897 | struct lttng_ht *sessions_ht) |
b8aa1682 | 898 | { |
e85cdca9 | 899 | assert(session); |
eb702af5 | 900 | assert(sessions_ht); |
b8aa1682 | 901 | |
eb702af5 | 902 | /* Indicate that this session can be destroyed from now on. */ |
e85cdca9 | 903 | session->close_flag = 1; |
b8aa1682 | 904 | |
e85cdca9 | 905 | try_close_streams(session); |
5b6d8097 | 906 | |
eb702af5 DG |
907 | /* |
908 | * This will try to delete and destroy the session if no viewer is attached | |
909 | * to it meaning the refcount is down to zero. | |
910 | */ | |
e85cdca9 | 911 | session_try_destroy(sessions_ht, session); |
b8aa1682 JD |
912 | } |
913 | ||
1c20f0e2 JD |
914 | /* |
915 | * Copy index data from the control port to a given index object. | |
916 | */ | |
917 | static void copy_index_control_data(struct relay_index *index, | |
918 | struct lttcomm_relayd_index *data) | |
919 | { | |
920 | assert(index); | |
921 | assert(data); | |
922 | ||
923 | /* | |
924 | * The index on disk is encoded in big endian, so we don't need to convert | |
925 | * the data received on the network. The data_offset value is NEVER | |
926 | * modified here and is updated by the data thread. | |
927 | */ | |
928 | index->index_data.packet_size = data->packet_size; | |
929 | index->index_data.content_size = data->content_size; | |
930 | index->index_data.timestamp_begin = data->timestamp_begin; | |
931 | index->index_data.timestamp_end = data->timestamp_end; | |
932 | index->index_data.events_discarded = data->events_discarded; | |
933 | index->index_data.stream_id = data->stream_id; | |
934 | } | |
935 | ||
c5b6f4f0 DG |
936 | /* |
937 | * Handle the RELAYD_CREATE_SESSION command. | |
938 | * | |
939 | * On success, send back the session id or else return a negative value. | |
940 | */ | |
941 | static | |
942 | int relay_create_session(struct lttcomm_relayd_hdr *recv_hdr, | |
e85cdca9 | 943 | struct relay_connection *conn) |
c5b6f4f0 DG |
944 | { |
945 | int ret = 0, send_ret; | |
946 | struct relay_session *session; | |
947 | struct lttcomm_relayd_status_session reply; | |
948 | ||
949 | assert(recv_hdr); | |
e85cdca9 | 950 | assert(conn); |
c5b6f4f0 DG |
951 | |
952 | memset(&reply, 0, sizeof(reply)); | |
953 | ||
eb702af5 DG |
954 | session = session_create(); |
955 | if (!session) { | |
c5b6f4f0 DG |
956 | ret = -1; |
957 | goto error; | |
958 | } | |
e85cdca9 DG |
959 | session->minor = conn->minor; |
960 | session->major = conn->major; | |
961 | conn->session_id = session->id; | |
962 | conn->session = session; | |
c5b6f4f0 DG |
963 | |
964 | reply.session_id = htobe64(session->id); | |
965 | ||
e85cdca9 | 966 | switch (conn->minor) { |
eb702af5 DG |
967 | case 1: |
968 | case 2: | |
969 | case 3: | |
970 | break; | |
971 | case 4: /* LTTng sessiond 2.4 */ | |
972 | default: | |
e85cdca9 | 973 | ret = cmd_create_session_2_4(conn, session); |
d3e2ba59 JD |
974 | } |
975 | ||
e85cdca9 | 976 | lttng_ht_add_unique_u64(conn->sessions_ht, &session->session_n); |
c5b6f4f0 DG |
977 | DBG("Created session %" PRIu64, session->id); |
978 | ||
979 | error: | |
980 | if (ret < 0) { | |
981 | reply.ret_code = htobe32(LTTNG_ERR_FATAL); | |
982 | } else { | |
983 | reply.ret_code = htobe32(LTTNG_OK); | |
984 | } | |
985 | ||
e85cdca9 | 986 | send_ret = conn->sock->ops->sendmsg(conn->sock, &reply, sizeof(reply), 0); |
c5b6f4f0 DG |
987 | if (send_ret < 0) { |
988 | ERR("Relayd sending session id"); | |
4169f5ad | 989 | ret = send_ret; |
c5b6f4f0 DG |
990 | } |
991 | ||
992 | return ret; | |
993 | } | |
994 | ||
814fcae4 JD |
995 | /* |
996 | * When we have received all the streams and the metadata for a channel, | |
997 | * we make them visible to the viewer threads. | |
998 | */ | |
999 | static | |
e85cdca9 | 1000 | void set_viewer_ready_flag(struct relay_connection *conn) |
814fcae4 | 1001 | { |
eb702af5 | 1002 | struct relay_stream *stream, *tmp_stream; |
814fcae4 | 1003 | |
e85cdca9 DG |
1004 | pthread_mutex_lock(&conn->session->viewer_ready_lock); |
1005 | cds_list_for_each_entry_safe(stream, tmp_stream, &conn->recv_head, | |
eb702af5 | 1006 | recv_list) { |
814fcae4 | 1007 | stream->viewer_ready = 1; |
eb702af5 | 1008 | cds_list_del(&stream->recv_list); |
814fcae4 | 1009 | } |
e85cdca9 | 1010 | pthread_mutex_unlock(&conn->session->viewer_ready_lock); |
814fcae4 JD |
1011 | return; |
1012 | } | |
1013 | ||
1014 | /* | |
1015 | * Add a recv handle node to the connection recv list with the given stream | |
1016 | * handle. A new node is allocated thus must be freed when the node is deleted | |
1017 | * from the list. | |
1018 | */ | |
e85cdca9 DG |
1019 | static void queue_stream(struct relay_stream *stream, |
1020 | struct relay_connection *conn) | |
814fcae4 | 1021 | { |
e85cdca9 | 1022 | assert(conn); |
eb702af5 | 1023 | assert(stream); |
814fcae4 | 1024 | |
e85cdca9 | 1025 | cds_list_add(&stream->recv_list, &conn->recv_head); |
814fcae4 JD |
1026 | } |
1027 | ||
b8aa1682 JD |
1028 | /* |
1029 | * relay_add_stream: allocate a new stream for a session | |
1030 | */ | |
1031 | static | |
1032 | int relay_add_stream(struct lttcomm_relayd_hdr *recv_hdr, | |
e85cdca9 | 1033 | struct relay_connection *conn) |
b8aa1682 | 1034 | { |
eb702af5 | 1035 | int ret, send_ret; |
e85cdca9 | 1036 | struct relay_session *session = conn->session; |
b8aa1682 JD |
1037 | struct relay_stream *stream = NULL; |
1038 | struct lttcomm_relayd_status_stream reply; | |
eb702af5 | 1039 | struct ctf_trace *trace; |
b8aa1682 | 1040 | |
e85cdca9 | 1041 | if (!session || conn->version_check_done == 0) { |
b8aa1682 JD |
1042 | ERR("Trying to add a stream before version check"); |
1043 | ret = -1; | |
1044 | goto end_no_session; | |
1045 | } | |
1046 | ||
b8aa1682 JD |
1047 | stream = zmalloc(sizeof(struct relay_stream)); |
1048 | if (stream == NULL) { | |
1049 | PERROR("relay stream zmalloc"); | |
1050 | ret = -1; | |
1051 | goto end_no_session; | |
1052 | } | |
1053 | ||
e85cdca9 | 1054 | switch (conn->minor) { |
0f907de1 | 1055 | case 1: /* LTTng sessiond 2.1 */ |
e85cdca9 | 1056 | ret = cmd_recv_stream_2_1(conn, stream); |
0f907de1 JD |
1057 | break; |
1058 | case 2: /* LTTng sessiond 2.2 */ | |
1059 | default: | |
e85cdca9 | 1060 | ret = cmd_recv_stream_2_2(conn, stream); |
0f907de1 JD |
1061 | break; |
1062 | } | |
1063 | if (ret < 0) { | |
1064 | goto err_free_stream; | |
1065 | } | |
1066 | ||
9d1bbf21 | 1067 | rcu_read_lock(); |
b8aa1682 | 1068 | stream->stream_handle = ++last_relay_stream_id; |
173af62f | 1069 | stream->prev_seq = -1ULL; |
eb702af5 | 1070 | stream->session_id = session->id; |
1c20f0e2 | 1071 | stream->index_fd = -1; |
d3e2ba59 | 1072 | stream->read_index_fd = -1; |
5e372a51 | 1073 | stream->ctf_stream_id = -1ULL; |
eb702af5 | 1074 | lttng_ht_node_init_u64(&stream->node, stream->stream_handle); |
d3e2ba59 | 1075 | pthread_mutex_init(&stream->lock, NULL); |
b8aa1682 | 1076 | |
0f907de1 | 1077 | ret = utils_mkdir_recursive(stream->path_name, S_IRWXU | S_IRWXG); |
b8aa1682 | 1078 | if (ret < 0) { |
b8aa1682 JD |
1079 | ERR("relay creating output directory"); |
1080 | goto end; | |
1081 | } | |
1082 | ||
be96a7d1 DG |
1083 | /* |
1084 | * No need to use run_as API here because whatever we receives, the relayd | |
1085 | * uses its own credentials for the stream files. | |
1086 | */ | |
0f907de1 | 1087 | ret = utils_create_stream_file(stream->path_name, stream->channel_name, |
1c20f0e2 | 1088 | stream->tracefile_size, 0, relayd_uid, relayd_gid, NULL); |
b8aa1682 | 1089 | if (ret < 0) { |
0f907de1 | 1090 | ERR("Create output file"); |
b8aa1682 JD |
1091 | goto end; |
1092 | } | |
b8aa1682 | 1093 | stream->fd = ret; |
0f907de1 JD |
1094 | if (stream->tracefile_size) { |
1095 | DBG("Tracefile %s/%s_0 created", stream->path_name, stream->channel_name); | |
1096 | } else { | |
1097 | DBG("Tracefile %s/%s created", stream->path_name, stream->channel_name); | |
1098 | } | |
b8aa1682 | 1099 | |
eb702af5 DG |
1100 | trace = ctf_trace_find_by_path(session->ctf_traces_ht, stream->path_name); |
1101 | if (!trace) { | |
1102 | trace = ctf_trace_create(stream->path_name); | |
1103 | if (!trace) { | |
d3e2ba59 JD |
1104 | ret = -1; |
1105 | goto end; | |
1106 | } | |
eb702af5 DG |
1107 | ctf_trace_add(session->ctf_traces_ht, trace); |
1108 | } | |
1109 | ctf_trace_get_ref(trace); | |
1110 | ||
1111 | if (!strncmp(stream->channel_name, DEFAULT_METADATA_NAME, NAME_MAX)) { | |
1112 | stream->metadata_flag = 1; | |
1113 | /* Assign quick reference to the metadata stream in the trace. */ | |
1114 | trace->metadata_stream = stream; | |
d3e2ba59 | 1115 | } |
d3e2ba59 | 1116 | |
814fcae4 | 1117 | /* |
eb702af5 DG |
1118 | * Add the stream in the recv list of the connection. Once the end stream |
1119 | * message is received, this list is emptied and streams are set with the | |
1120 | * viewer ready flag. | |
814fcae4 | 1121 | */ |
e85cdca9 | 1122 | queue_stream(stream, conn); |
814fcae4 | 1123 | |
eb702af5 DG |
1124 | /* |
1125 | * Both in the ctf_trace object and the global stream ht since the data | |
1126 | * side of the relayd does not have the concept of session. | |
1127 | */ | |
1128 | lttng_ht_add_unique_u64(relay_streams_ht, &stream->node); | |
1129 | cds_list_add_tail(&stream->trace_list, &trace->stream_list); | |
b8aa1682 | 1130 | |
87b576ec | 1131 | session->stream_count++; |
d3e2ba59 | 1132 | |
1c20f0e2 JD |
1133 | DBG("Relay new stream added %s with ID %" PRIu64, stream->channel_name, |
1134 | stream->stream_handle); | |
b8aa1682 JD |
1135 | |
1136 | end: | |
239f3aec | 1137 | memset(&reply, 0, sizeof(reply)); |
5af40280 | 1138 | reply.handle = htobe64(stream->stream_handle); |
b8aa1682 JD |
1139 | /* send the session id to the client or a negative return code on error */ |
1140 | if (ret < 0) { | |
f73fabfd | 1141 | reply.ret_code = htobe32(LTTNG_ERR_UNK); |
5af40280 CB |
1142 | /* stream was not properly added to the ht, so free it */ |
1143 | free(stream); | |
b8aa1682 | 1144 | } else { |
f73fabfd | 1145 | reply.ret_code = htobe32(LTTNG_OK); |
b8aa1682 | 1146 | } |
5af40280 | 1147 | |
e85cdca9 | 1148 | send_ret = conn->sock->ops->sendmsg(conn->sock, &reply, |
b8aa1682 JD |
1149 | sizeof(struct lttcomm_relayd_status_stream), 0); |
1150 | if (send_ret < 0) { | |
1151 | ERR("Relay sending stream id"); | |
4169f5ad | 1152 | ret = send_ret; |
b8aa1682 | 1153 | } |
9d1bbf21 | 1154 | rcu_read_unlock(); |
b8aa1682 JD |
1155 | |
1156 | end_no_session: | |
1157 | return ret; | |
0f907de1 JD |
1158 | |
1159 | err_free_stream: | |
1160 | free(stream->path_name); | |
1161 | free(stream->channel_name); | |
1162 | free(stream); | |
1163 | return ret; | |
b8aa1682 JD |
1164 | } |
1165 | ||
173af62f DG |
1166 | /* |
1167 | * relay_close_stream: close a specific stream | |
1168 | */ | |
1169 | static | |
1170 | int relay_close_stream(struct lttcomm_relayd_hdr *recv_hdr, | |
e85cdca9 | 1171 | struct relay_connection *conn) |
173af62f | 1172 | { |
94d49140 | 1173 | int ret, send_ret; |
e85cdca9 | 1174 | struct relay_session *session = conn->session; |
173af62f DG |
1175 | struct lttcomm_relayd_close_stream stream_info; |
1176 | struct lttcomm_relayd_generic_reply reply; | |
1177 | struct relay_stream *stream; | |
173af62f DG |
1178 | |
1179 | DBG("Close stream received"); | |
1180 | ||
e85cdca9 | 1181 | if (!session || conn->version_check_done == 0) { |
173af62f DG |
1182 | ERR("Trying to close a stream before version check"); |
1183 | ret = -1; | |
1184 | goto end_no_session; | |
1185 | } | |
1186 | ||
e85cdca9 | 1187 | ret = conn->sock->ops->recvmsg(conn->sock, &stream_info, |
7c5aef62 | 1188 | sizeof(struct lttcomm_relayd_close_stream), 0); |
173af62f | 1189 | if (ret < sizeof(struct lttcomm_relayd_close_stream)) { |
a6cd2b97 DG |
1190 | if (ret == 0) { |
1191 | /* Orderly shutdown. Not necessary to print an error. */ | |
e85cdca9 | 1192 | DBG("Socket %d did an orderly shutdown", conn->sock->fd); |
a6cd2b97 DG |
1193 | } else { |
1194 | ERR("Relay didn't receive valid add_stream struct size : %d", ret); | |
1195 | } | |
173af62f DG |
1196 | ret = -1; |
1197 | goto end_no_session; | |
1198 | } | |
1199 | ||
1200 | rcu_read_lock(); | |
eb702af5 DG |
1201 | stream = stream_find_by_id(relay_streams_ht, |
1202 | be64toh(stream_info.stream_id)); | |
173af62f DG |
1203 | if (!stream) { |
1204 | ret = -1; | |
1205 | goto end_unlock; | |
1206 | } | |
1207 | ||
8e2583a4 | 1208 | stream->last_net_seq_num = be64toh(stream_info.last_net_seq_num); |
173af62f | 1209 | stream->close_flag = 1; |
87b576ec JD |
1210 | session->stream_count--; |
1211 | assert(session->stream_count >= 0); | |
173af62f | 1212 | |
eb702af5 DG |
1213 | /* Check if we can close it or else the data will do it. */ |
1214 | try_close_stream(session, stream); | |
173af62f DG |
1215 | |
1216 | end_unlock: | |
1217 | rcu_read_unlock(); | |
1218 | ||
239f3aec | 1219 | memset(&reply, 0, sizeof(reply)); |
173af62f | 1220 | if (ret < 0) { |
f73fabfd | 1221 | reply.ret_code = htobe32(LTTNG_ERR_UNK); |
173af62f | 1222 | } else { |
f73fabfd | 1223 | reply.ret_code = htobe32(LTTNG_OK); |
173af62f | 1224 | } |
e85cdca9 | 1225 | send_ret = conn->sock->ops->sendmsg(conn->sock, &reply, |
173af62f DG |
1226 | sizeof(struct lttcomm_relayd_generic_reply), 0); |
1227 | if (send_ret < 0) { | |
1228 | ERR("Relay sending stream id"); | |
4169f5ad | 1229 | ret = send_ret; |
173af62f DG |
1230 | } |
1231 | ||
1232 | end_no_session: | |
1233 | return ret; | |
1234 | } | |
1235 | ||
b8aa1682 JD |
1236 | /* |
1237 | * relay_unknown_command: send -1 if received unknown command | |
1238 | */ | |
1239 | static | |
e85cdca9 | 1240 | void relay_unknown_command(struct relay_connection *conn) |
b8aa1682 JD |
1241 | { |
1242 | struct lttcomm_relayd_generic_reply reply; | |
1243 | int ret; | |
1244 | ||
239f3aec | 1245 | memset(&reply, 0, sizeof(reply)); |
f73fabfd | 1246 | reply.ret_code = htobe32(LTTNG_ERR_UNK); |
e85cdca9 | 1247 | ret = conn->sock->ops->sendmsg(conn->sock, &reply, |
b8aa1682 JD |
1248 | sizeof(struct lttcomm_relayd_generic_reply), 0); |
1249 | if (ret < 0) { | |
1250 | ERR("Relay sending unknown command"); | |
1251 | } | |
1252 | } | |
1253 | ||
1254 | /* | |
1255 | * relay_start: send an acknowledgment to the client to tell if we are | |
1256 | * ready to receive data. We are ready if a session is established. | |
1257 | */ | |
1258 | static | |
1259 | int relay_start(struct lttcomm_relayd_hdr *recv_hdr, | |
e85cdca9 | 1260 | struct relay_connection *conn) |
b8aa1682 | 1261 | { |
f73fabfd | 1262 | int ret = htobe32(LTTNG_OK); |
b8aa1682 | 1263 | struct lttcomm_relayd_generic_reply reply; |
e85cdca9 | 1264 | struct relay_session *session = conn->session; |
b8aa1682 JD |
1265 | |
1266 | if (!session) { | |
1267 | DBG("Trying to start the streaming without a session established"); | |
f73fabfd | 1268 | ret = htobe32(LTTNG_ERR_UNK); |
b8aa1682 JD |
1269 | } |
1270 | ||
239f3aec | 1271 | memset(&reply, 0, sizeof(reply)); |
b8aa1682 | 1272 | reply.ret_code = ret; |
e85cdca9 | 1273 | ret = conn->sock->ops->sendmsg(conn->sock, &reply, |
b8aa1682 JD |
1274 | sizeof(struct lttcomm_relayd_generic_reply), 0); |
1275 | if (ret < 0) { | |
1276 | ERR("Relay sending start ack"); | |
1277 | } | |
1278 | ||
1279 | return ret; | |
1280 | } | |
1281 | ||
1d4dfdef DG |
1282 | /* |
1283 | * Append padding to the file pointed by the file descriptor fd. | |
1284 | */ | |
1285 | static int write_padding_to_file(int fd, uint32_t size) | |
1286 | { | |
6cd525e8 | 1287 | ssize_t ret = 0; |
1d4dfdef DG |
1288 | char *zeros; |
1289 | ||
1290 | if (size == 0) { | |
1291 | goto end; | |
1292 | } | |
1293 | ||
1294 | zeros = zmalloc(size); | |
1295 | if (zeros == NULL) { | |
1296 | PERROR("zmalloc zeros for padding"); | |
1297 | ret = -1; | |
1298 | goto end; | |
1299 | } | |
1300 | ||
6cd525e8 MD |
1301 | ret = lttng_write(fd, zeros, size); |
1302 | if (ret < size) { | |
1d4dfdef DG |
1303 | PERROR("write padding to file"); |
1304 | } | |
1305 | ||
e986c7a1 DG |
1306 | free(zeros); |
1307 | ||
1d4dfdef DG |
1308 | end: |
1309 | return ret; | |
1310 | } | |
1311 | ||
b8aa1682 JD |
1312 | /* |
1313 | * relay_recv_metadata: receive the metada for the session. | |
1314 | */ | |
1315 | static | |
1316 | int relay_recv_metadata(struct lttcomm_relayd_hdr *recv_hdr, | |
e85cdca9 | 1317 | struct relay_connection *conn) |
b8aa1682 | 1318 | { |
f73fabfd | 1319 | int ret = htobe32(LTTNG_OK); |
6cd525e8 | 1320 | ssize_t size_ret; |
e85cdca9 | 1321 | struct relay_session *session = conn->session; |
b8aa1682 JD |
1322 | struct lttcomm_relayd_metadata_payload *metadata_struct; |
1323 | struct relay_stream *metadata_stream; | |
1324 | uint64_t data_size, payload_size; | |
eb702af5 | 1325 | struct ctf_trace *ctf_trace; |
b8aa1682 JD |
1326 | |
1327 | if (!session) { | |
1328 | ERR("Metadata sent before version check"); | |
1329 | ret = -1; | |
1330 | goto end; | |
1331 | } | |
1332 | ||
f6416125 MD |
1333 | data_size = payload_size = be64toh(recv_hdr->data_size); |
1334 | if (data_size < sizeof(struct lttcomm_relayd_metadata_payload)) { | |
1335 | ERR("Incorrect data size"); | |
1336 | ret = -1; | |
1337 | goto end; | |
1338 | } | |
1339 | payload_size -= sizeof(struct lttcomm_relayd_metadata_payload); | |
1340 | ||
b8aa1682 | 1341 | if (data_buffer_size < data_size) { |
d7b3776f | 1342 | /* In case the realloc fails, we can free the memory */ |
c617c0c6 MD |
1343 | char *tmp_data_ptr; |
1344 | ||
1345 | tmp_data_ptr = realloc(data_buffer, data_size); | |
1346 | if (!tmp_data_ptr) { | |
b8aa1682 | 1347 | ERR("Allocating data buffer"); |
c617c0c6 | 1348 | free(data_buffer); |
b8aa1682 JD |
1349 | ret = -1; |
1350 | goto end; | |
1351 | } | |
c617c0c6 | 1352 | data_buffer = tmp_data_ptr; |
b8aa1682 JD |
1353 | data_buffer_size = data_size; |
1354 | } | |
1355 | memset(data_buffer, 0, data_size); | |
77c7c900 | 1356 | DBG2("Relay receiving metadata, waiting for %" PRIu64 " bytes", data_size); |
e85cdca9 | 1357 | ret = conn->sock->ops->recvmsg(conn->sock, data_buffer, data_size, 0); |
b8aa1682 | 1358 | if (ret < 0 || ret != data_size) { |
a6cd2b97 DG |
1359 | if (ret == 0) { |
1360 | /* Orderly shutdown. Not necessary to print an error. */ | |
e85cdca9 | 1361 | DBG("Socket %d did an orderly shutdown", conn->sock->fd); |
a6cd2b97 DG |
1362 | } else { |
1363 | ERR("Relay didn't receive the whole metadata"); | |
1364 | } | |
b8aa1682 | 1365 | ret = -1; |
b8aa1682 JD |
1366 | goto end; |
1367 | } | |
1368 | metadata_struct = (struct lttcomm_relayd_metadata_payload *) data_buffer; | |
9d1bbf21 MD |
1369 | |
1370 | rcu_read_lock(); | |
eb702af5 | 1371 | metadata_stream = stream_find_by_id(relay_streams_ht, |
d3e2ba59 | 1372 | be64toh(metadata_struct->stream_id)); |
b8aa1682 JD |
1373 | if (!metadata_stream) { |
1374 | ret = -1; | |
9d1bbf21 | 1375 | goto end_unlock; |
b8aa1682 JD |
1376 | } |
1377 | ||
6cd525e8 MD |
1378 | size_ret = lttng_write(metadata_stream->fd, metadata_struct->payload, |
1379 | payload_size); | |
1380 | if (size_ret < payload_size) { | |
b8aa1682 JD |
1381 | ERR("Relay error writing metadata on file"); |
1382 | ret = -1; | |
9d1bbf21 | 1383 | goto end_unlock; |
b8aa1682 | 1384 | } |
1d4dfdef DG |
1385 | |
1386 | ret = write_padding_to_file(metadata_stream->fd, | |
1387 | be32toh(metadata_struct->padding_size)); | |
1388 | if (ret < 0) { | |
1389 | goto end_unlock; | |
1390 | } | |
eb702af5 DG |
1391 | |
1392 | ctf_trace = ctf_trace_find_by_path(session->ctf_traces_ht, | |
1393 | metadata_stream->path_name); | |
1394 | assert(ctf_trace); | |
1395 | ctf_trace->metadata_received += | |
d3e2ba59 | 1396 | payload_size + be32toh(metadata_struct->padding_size); |
1d4dfdef | 1397 | |
b8aa1682 JD |
1398 | DBG2("Relay metadata written"); |
1399 | ||
9d1bbf21 | 1400 | end_unlock: |
6e3c5836 | 1401 | rcu_read_unlock(); |
b8aa1682 JD |
1402 | end: |
1403 | return ret; | |
1404 | } | |
1405 | ||
1406 | /* | |
1407 | * relay_send_version: send relayd version number | |
1408 | */ | |
1409 | static | |
1410 | int relay_send_version(struct lttcomm_relayd_hdr *recv_hdr, | |
e85cdca9 | 1411 | struct relay_connection *conn) |
b8aa1682 | 1412 | { |
7f51dcba | 1413 | int ret; |
092b6259 | 1414 | struct lttcomm_relayd_version reply, msg; |
b8aa1682 | 1415 | |
e85cdca9 | 1416 | assert(conn); |
c5b6f4f0 | 1417 | |
e85cdca9 | 1418 | conn->version_check_done = 1; |
b8aa1682 | 1419 | |
092b6259 | 1420 | /* Get version from the other side. */ |
e85cdca9 | 1421 | ret = conn->sock->ops->recvmsg(conn->sock, &msg, sizeof(msg), 0); |
092b6259 | 1422 | if (ret < 0 || ret != sizeof(msg)) { |
a6cd2b97 DG |
1423 | if (ret == 0) { |
1424 | /* Orderly shutdown. Not necessary to print an error. */ | |
e85cdca9 | 1425 | DBG("Socket %d did an orderly shutdown", conn->sock->fd); |
a6cd2b97 DG |
1426 | } else { |
1427 | ERR("Relay failed to receive the version values."); | |
1428 | } | |
092b6259 | 1429 | ret = -1; |
092b6259 DG |
1430 | goto end; |
1431 | } | |
1432 | ||
239f3aec | 1433 | memset(&reply, 0, sizeof(reply)); |
d83a952c MD |
1434 | reply.major = RELAYD_VERSION_COMM_MAJOR; |
1435 | reply.minor = RELAYD_VERSION_COMM_MINOR; | |
d4519fa3 JD |
1436 | |
1437 | /* Major versions must be the same */ | |
1438 | if (reply.major != be32toh(msg.major)) { | |
6151a90f JD |
1439 | DBG("Incompatible major versions (%u vs %u), deleting session", |
1440 | reply.major, be32toh(msg.major)); | |
e85cdca9 | 1441 | destroy_session(conn->session, conn->sessions_ht); |
d4519fa3 JD |
1442 | ret = 0; |
1443 | goto end; | |
1444 | } | |
1445 | ||
e85cdca9 | 1446 | conn->major = reply.major; |
0f907de1 JD |
1447 | /* We adapt to the lowest compatible version */ |
1448 | if (reply.minor <= be32toh(msg.minor)) { | |
e85cdca9 | 1449 | conn->minor = reply.minor; |
0f907de1 | 1450 | } else { |
e85cdca9 | 1451 | conn->minor = be32toh(msg.minor); |
0f907de1 JD |
1452 | } |
1453 | ||
6151a90f JD |
1454 | reply.major = htobe32(reply.major); |
1455 | reply.minor = htobe32(reply.minor); | |
e85cdca9 | 1456 | ret = conn->sock->ops->sendmsg(conn->sock, &reply, |
6151a90f JD |
1457 | sizeof(struct lttcomm_relayd_version), 0); |
1458 | if (ret < 0) { | |
1459 | ERR("Relay sending version"); | |
1460 | } | |
1461 | ||
e85cdca9 DG |
1462 | DBG("Version check done using protocol %u.%u", conn->major, |
1463 | conn->minor); | |
b8aa1682 JD |
1464 | |
1465 | end: | |
1466 | return ret; | |
1467 | } | |
1468 | ||
c8f59ee5 | 1469 | /* |
6d805429 | 1470 | * Check for data pending for a given stream id from the session daemon. |
c8f59ee5 DG |
1471 | */ |
1472 | static | |
6d805429 | 1473 | int relay_data_pending(struct lttcomm_relayd_hdr *recv_hdr, |
e85cdca9 | 1474 | struct relay_connection *conn) |
c8f59ee5 | 1475 | { |
e85cdca9 | 1476 | struct relay_session *session = conn->session; |
6d805429 | 1477 | struct lttcomm_relayd_data_pending msg; |
c8f59ee5 DG |
1478 | struct lttcomm_relayd_generic_reply reply; |
1479 | struct relay_stream *stream; | |
1480 | int ret; | |
c8f59ee5 DG |
1481 | uint64_t last_net_seq_num, stream_id; |
1482 | ||
6d805429 | 1483 | DBG("Data pending command received"); |
c8f59ee5 | 1484 | |
e85cdca9 | 1485 | if (!session || conn->version_check_done == 0) { |
c8f59ee5 DG |
1486 | ERR("Trying to check for data before version check"); |
1487 | ret = -1; | |
1488 | goto end_no_session; | |
1489 | } | |
1490 | ||
e85cdca9 | 1491 | ret = conn->sock->ops->recvmsg(conn->sock, &msg, sizeof(msg), 0); |
c8f59ee5 | 1492 | if (ret < sizeof(msg)) { |
a6cd2b97 DG |
1493 | if (ret == 0) { |
1494 | /* Orderly shutdown. Not necessary to print an error. */ | |
e85cdca9 | 1495 | DBG("Socket %d did an orderly shutdown", conn->sock->fd); |
a6cd2b97 DG |
1496 | } else { |
1497 | ERR("Relay didn't receive valid data_pending struct size : %d", | |
1498 | ret); | |
1499 | } | |
c8f59ee5 DG |
1500 | ret = -1; |
1501 | goto end_no_session; | |
1502 | } | |
1503 | ||
1504 | stream_id = be64toh(msg.stream_id); | |
1505 | last_net_seq_num = be64toh(msg.last_net_seq_num); | |
1506 | ||
1507 | rcu_read_lock(); | |
eb702af5 | 1508 | stream = stream_find_by_id(relay_streams_ht, stream_id); |
de91f48a | 1509 | if (stream == NULL) { |
c8f59ee5 DG |
1510 | ret = -1; |
1511 | goto end_unlock; | |
1512 | } | |
1513 | ||
6d805429 | 1514 | DBG("Data pending for stream id %" PRIu64 " prev_seq %" PRIu64 |
c8f59ee5 DG |
1515 | " and last_seq %" PRIu64, stream_id, stream->prev_seq, |
1516 | last_net_seq_num); | |
1517 | ||
33832e64 | 1518 | /* Avoid wrapping issue */ |
39df6d9f | 1519 | if (((int64_t) (stream->prev_seq - last_net_seq_num)) >= 0) { |
6d805429 | 1520 | /* Data has in fact been written and is NOT pending */ |
c8f59ee5 | 1521 | ret = 0; |
6d805429 DG |
1522 | } else { |
1523 | /* Data still being streamed thus pending */ | |
1524 | ret = 1; | |
c8f59ee5 DG |
1525 | } |
1526 | ||
f7079f67 DG |
1527 | /* Pending check is now done. */ |
1528 | stream->data_pending_check_done = 1; | |
1529 | ||
c8f59ee5 DG |
1530 | end_unlock: |
1531 | rcu_read_unlock(); | |
1532 | ||
239f3aec | 1533 | memset(&reply, 0, sizeof(reply)); |
c8f59ee5 | 1534 | reply.ret_code = htobe32(ret); |
e85cdca9 | 1535 | ret = conn->sock->ops->sendmsg(conn->sock, &reply, sizeof(reply), 0); |
c8f59ee5 | 1536 | if (ret < 0) { |
6d805429 | 1537 | ERR("Relay data pending ret code failed"); |
c8f59ee5 DG |
1538 | } |
1539 | ||
1540 | end_no_session: | |
1541 | return ret; | |
1542 | } | |
1543 | ||
1544 | /* | |
1545 | * Wait for the control socket to reach a quiescent state. | |
1546 | * | |
1547 | * Note that for now, when receiving this command from the session daemon, this | |
1548 | * means that every subsequent commands or data received on the control socket | |
1549 | * has been handled. So, this is why we simply return OK here. | |
1550 | */ | |
1551 | static | |
1552 | int relay_quiescent_control(struct lttcomm_relayd_hdr *recv_hdr, | |
e85cdca9 | 1553 | struct relay_connection *conn) |
c8f59ee5 DG |
1554 | { |
1555 | int ret; | |
ad7051c0 DG |
1556 | uint64_t stream_id; |
1557 | struct relay_stream *stream; | |
1558 | struct lttng_ht_iter iter; | |
1559 | struct lttcomm_relayd_quiescent_control msg; | |
c8f59ee5 DG |
1560 | struct lttcomm_relayd_generic_reply reply; |
1561 | ||
1562 | DBG("Checking quiescent state on control socket"); | |
1563 | ||
e85cdca9 | 1564 | if (!conn->session || conn->version_check_done == 0) { |
ad7051c0 DG |
1565 | ERR("Trying to check for data before version check"); |
1566 | ret = -1; | |
1567 | goto end_no_session; | |
1568 | } | |
1569 | ||
e85cdca9 | 1570 | ret = conn->sock->ops->recvmsg(conn->sock, &msg, sizeof(msg), 0); |
ad7051c0 | 1571 | if (ret < sizeof(msg)) { |
a6cd2b97 DG |
1572 | if (ret == 0) { |
1573 | /* Orderly shutdown. Not necessary to print an error. */ | |
e85cdca9 | 1574 | DBG("Socket %d did an orderly shutdown", conn->sock->fd); |
a6cd2b97 DG |
1575 | } else { |
1576 | ERR("Relay didn't receive valid begin data_pending struct size: %d", | |
1577 | ret); | |
1578 | } | |
ad7051c0 DG |
1579 | ret = -1; |
1580 | goto end_no_session; | |
1581 | } | |
1582 | ||
1583 | stream_id = be64toh(msg.stream_id); | |
1584 | ||
1585 | rcu_read_lock(); | |
d3e2ba59 | 1586 | cds_lfht_for_each_entry(relay_streams_ht->ht, &iter.iter, stream, |
eb702af5 | 1587 | node.node) { |
ad7051c0 DG |
1588 | if (stream->stream_handle == stream_id) { |
1589 | stream->data_pending_check_done = 1; | |
1590 | DBG("Relay quiescent control pending flag set to %" PRIu64, | |
1591 | stream_id); | |
1592 | break; | |
1593 | } | |
1594 | } | |
1595 | rcu_read_unlock(); | |
1596 | ||
239f3aec | 1597 | memset(&reply, 0, sizeof(reply)); |
c8f59ee5 | 1598 | reply.ret_code = htobe32(LTTNG_OK); |
e85cdca9 | 1599 | ret = conn->sock->ops->sendmsg(conn->sock, &reply, sizeof(reply), 0); |
c8f59ee5 | 1600 | if (ret < 0) { |
6d805429 | 1601 | ERR("Relay data quiescent control ret code failed"); |
c8f59ee5 DG |
1602 | } |
1603 | ||
ad7051c0 | 1604 | end_no_session: |
c8f59ee5 DG |
1605 | return ret; |
1606 | } | |
1607 | ||
f7079f67 DG |
1608 | /* |
1609 | * Initialize a data pending command. This means that a client is about to ask | |
1610 | * for data pending for each stream he/she holds. Simply iterate over all | |
1611 | * streams of a session and set the data_pending_check_done flag. | |
1612 | * | |
1613 | * This command returns to the client a LTTNG_OK code. | |
1614 | */ | |
1615 | static | |
1616 | int relay_begin_data_pending(struct lttcomm_relayd_hdr *recv_hdr, | |
e85cdca9 | 1617 | struct relay_connection *conn) |
f7079f67 DG |
1618 | { |
1619 | int ret; | |
1620 | struct lttng_ht_iter iter; | |
1621 | struct lttcomm_relayd_begin_data_pending msg; | |
1622 | struct lttcomm_relayd_generic_reply reply; | |
1623 | struct relay_stream *stream; | |
1624 | uint64_t session_id; | |
1625 | ||
1626 | assert(recv_hdr); | |
e85cdca9 | 1627 | assert(conn); |
f7079f67 DG |
1628 | |
1629 | DBG("Init streams for data pending"); | |
1630 | ||
e85cdca9 | 1631 | if (!conn->session || conn->version_check_done == 0) { |
f7079f67 DG |
1632 | ERR("Trying to check for data before version check"); |
1633 | ret = -1; | |
1634 | goto end_no_session; | |
1635 | } | |
1636 | ||
e85cdca9 | 1637 | ret = conn->sock->ops->recvmsg(conn->sock, &msg, sizeof(msg), 0); |
f7079f67 | 1638 | if (ret < sizeof(msg)) { |
a6cd2b97 DG |
1639 | if (ret == 0) { |
1640 | /* Orderly shutdown. Not necessary to print an error. */ | |
e85cdca9 | 1641 | DBG("Socket %d did an orderly shutdown", conn->sock->fd); |
a6cd2b97 DG |
1642 | } else { |
1643 | ERR("Relay didn't receive valid begin data_pending struct size: %d", | |
1644 | ret); | |
1645 | } | |
f7079f67 DG |
1646 | ret = -1; |
1647 | goto end_no_session; | |
1648 | } | |
1649 | ||
1650 | session_id = be64toh(msg.session_id); | |
1651 | ||
1652 | /* | |
1653 | * Iterate over all streams to set the begin data pending flag. For now, the | |
1654 | * streams are indexed by stream handle so we have to iterate over all | |
1655 | * streams to find the one associated with the right session_id. | |
1656 | */ | |
1657 | rcu_read_lock(); | |
d3e2ba59 | 1658 | cds_lfht_for_each_entry(relay_streams_ht->ht, &iter.iter, stream, |
eb702af5 DG |
1659 | node.node) { |
1660 | if (stream->session_id == session_id) { | |
f7079f67 DG |
1661 | stream->data_pending_check_done = 0; |
1662 | DBG("Set begin data pending flag to stream %" PRIu64, | |
1663 | stream->stream_handle); | |
1664 | } | |
1665 | } | |
1666 | rcu_read_unlock(); | |
1667 | ||
239f3aec | 1668 | memset(&reply, 0, sizeof(reply)); |
f7079f67 DG |
1669 | /* All good, send back reply. */ |
1670 | reply.ret_code = htobe32(LTTNG_OK); | |
1671 | ||
e85cdca9 | 1672 | ret = conn->sock->ops->sendmsg(conn->sock, &reply, sizeof(reply), 0); |
f7079f67 DG |
1673 | if (ret < 0) { |
1674 | ERR("Relay begin data pending send reply failed"); | |
1675 | } | |
1676 | ||
1677 | end_no_session: | |
1678 | return ret; | |
1679 | } | |
1680 | ||
1681 | /* | |
1682 | * End data pending command. This will check, for a given session id, if each | |
1683 | * stream associated with it has its data_pending_check_done flag set. If not, | |
1684 | * this means that the client lost track of the stream but the data is still | |
1685 | * being streamed on our side. In this case, we inform the client that data is | |
1686 | * inflight. | |
1687 | * | |
1688 | * Return to the client if there is data in flight or not with a ret_code. | |
1689 | */ | |
1690 | static | |
1691 | int relay_end_data_pending(struct lttcomm_relayd_hdr *recv_hdr, | |
e85cdca9 | 1692 | struct relay_connection *conn) |
f7079f67 DG |
1693 | { |
1694 | int ret; | |
1695 | struct lttng_ht_iter iter; | |
1696 | struct lttcomm_relayd_end_data_pending msg; | |
1697 | struct lttcomm_relayd_generic_reply reply; | |
1698 | struct relay_stream *stream; | |
1699 | uint64_t session_id; | |
1700 | uint32_t is_data_inflight = 0; | |
1701 | ||
1702 | assert(recv_hdr); | |
e85cdca9 | 1703 | assert(conn); |
f7079f67 DG |
1704 | |
1705 | DBG("End data pending command"); | |
1706 | ||
e85cdca9 | 1707 | if (!conn->session || conn->version_check_done == 0) { |
f7079f67 DG |
1708 | ERR("Trying to check for data before version check"); |
1709 | ret = -1; | |
1710 | goto end_no_session; | |
1711 | } | |
1712 | ||
e85cdca9 | 1713 | ret = conn->sock->ops->recvmsg(conn->sock, &msg, sizeof(msg), 0); |
f7079f67 | 1714 | if (ret < sizeof(msg)) { |
a6cd2b97 DG |
1715 | if (ret == 0) { |
1716 | /* Orderly shutdown. Not necessary to print an error. */ | |
e85cdca9 | 1717 | DBG("Socket %d did an orderly shutdown", conn->sock->fd); |
a6cd2b97 DG |
1718 | } else { |
1719 | ERR("Relay didn't receive valid end data_pending struct size: %d", | |
1720 | ret); | |
1721 | } | |
f7079f67 DG |
1722 | ret = -1; |
1723 | goto end_no_session; | |
1724 | } | |
1725 | ||
1726 | session_id = be64toh(msg.session_id); | |
1727 | ||
1728 | /* Iterate over all streams to see if the begin data pending flag is set. */ | |
1729 | rcu_read_lock(); | |
d3e2ba59 | 1730 | cds_lfht_for_each_entry(relay_streams_ht->ht, &iter.iter, stream, |
eb702af5 DG |
1731 | node.node) { |
1732 | if (stream->session_id == session_id && | |
50979bcd | 1733 | !stream->data_pending_check_done && !stream->terminated_flag) { |
f7079f67 DG |
1734 | is_data_inflight = 1; |
1735 | DBG("Data is still in flight for stream %" PRIu64, | |
1736 | stream->stream_handle); | |
1737 | break; | |
1738 | } | |
1739 | } | |
1740 | rcu_read_unlock(); | |
1741 | ||
239f3aec | 1742 | memset(&reply, 0, sizeof(reply)); |
f7079f67 DG |
1743 | /* All good, send back reply. */ |
1744 | reply.ret_code = htobe32(is_data_inflight); | |
1745 | ||
e85cdca9 | 1746 | ret = conn->sock->ops->sendmsg(conn->sock, &reply, sizeof(reply), 0); |
f7079f67 DG |
1747 | if (ret < 0) { |
1748 | ERR("Relay end data pending send reply failed"); | |
1749 | } | |
1750 | ||
1751 | end_no_session: | |
1752 | return ret; | |
1753 | } | |
1754 | ||
1c20f0e2 JD |
1755 | /* |
1756 | * Receive an index for a specific stream. | |
1757 | * | |
1758 | * Return 0 on success else a negative value. | |
1759 | */ | |
1760 | static | |
1761 | int relay_recv_index(struct lttcomm_relayd_hdr *recv_hdr, | |
e85cdca9 | 1762 | struct relay_connection *conn) |
1c20f0e2 JD |
1763 | { |
1764 | int ret, send_ret, index_created = 0; | |
e85cdca9 | 1765 | struct relay_session *session = conn->session; |
1c20f0e2 JD |
1766 | struct lttcomm_relayd_index index_info; |
1767 | struct relay_index *index, *wr_index = NULL; | |
1768 | struct lttcomm_relayd_generic_reply reply; | |
1769 | struct relay_stream *stream; | |
1770 | uint64_t net_seq_num; | |
1771 | ||
e85cdca9 | 1772 | assert(conn); |
1c20f0e2 JD |
1773 | |
1774 | DBG("Relay receiving index"); | |
1775 | ||
e85cdca9 | 1776 | if (!session || conn->version_check_done == 0) { |
1c20f0e2 JD |
1777 | ERR("Trying to close a stream before version check"); |
1778 | ret = -1; | |
1779 | goto end_no_session; | |
1780 | } | |
1781 | ||
e85cdca9 | 1782 | ret = conn->sock->ops->recvmsg(conn->sock, &index_info, |
1c20f0e2 JD |
1783 | sizeof(index_info), 0); |
1784 | if (ret < sizeof(index_info)) { | |
1785 | if (ret == 0) { | |
1786 | /* Orderly shutdown. Not necessary to print an error. */ | |
e85cdca9 | 1787 | DBG("Socket %d did an orderly shutdown", conn->sock->fd); |
1c20f0e2 JD |
1788 | } else { |
1789 | ERR("Relay didn't receive valid index struct size : %d", ret); | |
1790 | } | |
1791 | ret = -1; | |
1792 | goto end_no_session; | |
1793 | } | |
1794 | ||
1795 | net_seq_num = be64toh(index_info.net_seq_num); | |
1796 | ||
1797 | rcu_read_lock(); | |
eb702af5 DG |
1798 | stream = stream_find_by_id(relay_streams_ht, |
1799 | be64toh(index_info.relay_stream_id)); | |
1c20f0e2 JD |
1800 | if (!stream) { |
1801 | ret = -1; | |
1802 | goto end_rcu_unlock; | |
1803 | } | |
1804 | ||
d3e2ba59 JD |
1805 | /* Live beacon handling */ |
1806 | if (index_info.packet_size == 0) { | |
1807 | DBG("Received live beacon for stream %" PRIu64, stream->stream_handle); | |
1808 | ||
1809 | /* | |
e36fbffb JD |
1810 | * Only flag a stream inactive when it has already received data |
1811 | * and no indexes are in flight. | |
d3e2ba59 | 1812 | */ |
e36fbffb | 1813 | if (stream->total_index_received > 0 && stream->indexes_in_flight == 0) { |
d3e2ba59 JD |
1814 | stream->beacon_ts_end = be64toh(index_info.timestamp_end); |
1815 | } | |
1816 | ret = 0; | |
1817 | goto end_rcu_unlock; | |
1818 | } else { | |
1819 | stream->beacon_ts_end = -1ULL; | |
1820 | } | |
1821 | ||
0a6518b0 | 1822 | index = relay_index_find(stream->stream_handle, net_seq_num); |
1c20f0e2 JD |
1823 | if (!index) { |
1824 | /* A successful creation will add the object to the HT. */ | |
1825 | index = relay_index_create(stream->stream_handle, net_seq_num); | |
1826 | if (!index) { | |
1827 | goto end_rcu_unlock; | |
1828 | } | |
1829 | index_created = 1; | |
e36fbffb | 1830 | stream->indexes_in_flight++; |
1c20f0e2 JD |
1831 | } |
1832 | ||
1833 | copy_index_control_data(index, &index_info); | |
5e372a51 JD |
1834 | if (stream->ctf_stream_id == -1ULL) { |
1835 | stream->ctf_stream_id = be64toh(index_info.stream_id); | |
1836 | } | |
1c20f0e2 JD |
1837 | |
1838 | if (index_created) { | |
1839 | /* | |
1840 | * Try to add the relay index object to the hash table. If an object | |
1841 | * already exist, destroy back the index created, set the data in this | |
1842 | * object and write it on disk. | |
1843 | */ | |
0a6518b0 | 1844 | relay_index_add(index, &wr_index); |
1c20f0e2 JD |
1845 | if (wr_index) { |
1846 | copy_index_control_data(wr_index, &index_info); | |
1847 | free(index); | |
1848 | } | |
1849 | } else { | |
1850 | /* The index already exists so write it on disk. */ | |
1851 | wr_index = index; | |
1852 | } | |
1853 | ||
1854 | /* Do we have a writable ready index to write on disk. */ | |
1855 | if (wr_index) { | |
0a6518b0 | 1856 | ret = relay_index_write(wr_index->fd, wr_index); |
1c20f0e2 JD |
1857 | if (ret < 0) { |
1858 | goto end_rcu_unlock; | |
1859 | } | |
d3e2ba59 | 1860 | stream->total_index_received++; |
e36fbffb JD |
1861 | stream->indexes_in_flight--; |
1862 | assert(stream->indexes_in_flight >= 0); | |
1c20f0e2 JD |
1863 | } |
1864 | ||
1865 | end_rcu_unlock: | |
1866 | rcu_read_unlock(); | |
1867 | ||
239f3aec | 1868 | memset(&reply, 0, sizeof(reply)); |
1c20f0e2 JD |
1869 | if (ret < 0) { |
1870 | reply.ret_code = htobe32(LTTNG_ERR_UNK); | |
1871 | } else { | |
1872 | reply.ret_code = htobe32(LTTNG_OK); | |
1873 | } | |
e85cdca9 | 1874 | send_ret = conn->sock->ops->sendmsg(conn->sock, &reply, sizeof(reply), 0); |
1c20f0e2 JD |
1875 | if (send_ret < 0) { |
1876 | ERR("Relay sending close index id reply"); | |
1877 | ret = send_ret; | |
1878 | } | |
1879 | ||
1880 | end_no_session: | |
1881 | return ret; | |
1882 | } | |
1883 | ||
814fcae4 JD |
1884 | /* |
1885 | * Receive the streams_sent message. | |
1886 | * | |
1887 | * Return 0 on success else a negative value. | |
1888 | */ | |
1889 | static | |
1890 | int relay_streams_sent(struct lttcomm_relayd_hdr *recv_hdr, | |
e85cdca9 | 1891 | struct relay_connection *conn) |
814fcae4 JD |
1892 | { |
1893 | int ret, send_ret; | |
1894 | struct lttcomm_relayd_generic_reply reply; | |
1895 | ||
e85cdca9 | 1896 | assert(conn); |
814fcae4 JD |
1897 | |
1898 | DBG("Relay receiving streams_sent"); | |
1899 | ||
e85cdca9 | 1900 | if (!conn->session || conn->version_check_done == 0) { |
814fcae4 JD |
1901 | ERR("Trying to close a stream before version check"); |
1902 | ret = -1; | |
1903 | goto end_no_session; | |
1904 | } | |
1905 | ||
1906 | /* | |
1907 | * Flag every pending stream in the connection recv list that they are | |
1908 | * ready to be used by the viewer. | |
1909 | */ | |
e85cdca9 | 1910 | set_viewer_ready_flag(conn); |
814fcae4 | 1911 | |
829007d9 JD |
1912 | /* |
1913 | * Inform the viewer that there are new streams in the session. | |
1914 | */ | |
750c7a85 JD |
1915 | if (conn->session->viewer_refcount) { |
1916 | uatomic_set(&conn->session->new_streams, 1); | |
1917 | } | |
829007d9 | 1918 | |
239f3aec | 1919 | memset(&reply, 0, sizeof(reply)); |
814fcae4 | 1920 | reply.ret_code = htobe32(LTTNG_OK); |
e85cdca9 | 1921 | send_ret = conn->sock->ops->sendmsg(conn->sock, &reply, sizeof(reply), 0); |
814fcae4 JD |
1922 | if (send_ret < 0) { |
1923 | ERR("Relay sending sent_stream reply"); | |
1924 | ret = send_ret; | |
1925 | } else { | |
1926 | /* Success. */ | |
1927 | ret = 0; | |
1928 | } | |
1929 | ||
1930 | end_no_session: | |
1931 | return ret; | |
1932 | } | |
1933 | ||
b8aa1682 | 1934 | /* |
d3e2ba59 | 1935 | * Process the commands received on the control socket |
b8aa1682 JD |
1936 | */ |
1937 | static | |
1938 | int relay_process_control(struct lttcomm_relayd_hdr *recv_hdr, | |
e85cdca9 | 1939 | struct relay_connection *conn) |
b8aa1682 JD |
1940 | { |
1941 | int ret = 0; | |
1942 | ||
1943 | switch (be32toh(recv_hdr->cmd)) { | |
b8aa1682 | 1944 | case RELAYD_CREATE_SESSION: |
e85cdca9 | 1945 | ret = relay_create_session(recv_hdr, conn); |
b8aa1682 | 1946 | break; |
b8aa1682 | 1947 | case RELAYD_ADD_STREAM: |
e85cdca9 | 1948 | ret = relay_add_stream(recv_hdr, conn); |
b8aa1682 JD |
1949 | break; |
1950 | case RELAYD_START_DATA: | |
e85cdca9 | 1951 | ret = relay_start(recv_hdr, conn); |
b8aa1682 JD |
1952 | break; |
1953 | case RELAYD_SEND_METADATA: | |
e85cdca9 | 1954 | ret = relay_recv_metadata(recv_hdr, conn); |
b8aa1682 JD |
1955 | break; |
1956 | case RELAYD_VERSION: | |
e85cdca9 | 1957 | ret = relay_send_version(recv_hdr, conn); |
b8aa1682 | 1958 | break; |
173af62f | 1959 | case RELAYD_CLOSE_STREAM: |
e85cdca9 | 1960 | ret = relay_close_stream(recv_hdr, conn); |
173af62f | 1961 | break; |
6d805429 | 1962 | case RELAYD_DATA_PENDING: |
e85cdca9 | 1963 | ret = relay_data_pending(recv_hdr, conn); |
c8f59ee5 DG |
1964 | break; |
1965 | case RELAYD_QUIESCENT_CONTROL: | |
e85cdca9 | 1966 | ret = relay_quiescent_control(recv_hdr, conn); |
c8f59ee5 | 1967 | break; |
f7079f67 | 1968 | case RELAYD_BEGIN_DATA_PENDING: |
e85cdca9 | 1969 | ret = relay_begin_data_pending(recv_hdr, conn); |
f7079f67 DG |
1970 | break; |
1971 | case RELAYD_END_DATA_PENDING: | |
e85cdca9 | 1972 | ret = relay_end_data_pending(recv_hdr, conn); |
f7079f67 | 1973 | break; |
1c20f0e2 | 1974 | case RELAYD_SEND_INDEX: |
e85cdca9 | 1975 | ret = relay_recv_index(recv_hdr, conn); |
1c20f0e2 | 1976 | break; |
814fcae4 | 1977 | case RELAYD_STREAMS_SENT: |
e85cdca9 | 1978 | ret = relay_streams_sent(recv_hdr, conn); |
814fcae4 | 1979 | break; |
b8aa1682 JD |
1980 | case RELAYD_UPDATE_SYNC_INFO: |
1981 | default: | |
1982 | ERR("Received unknown command (%u)", be32toh(recv_hdr->cmd)); | |
e85cdca9 | 1983 | relay_unknown_command(conn); |
b8aa1682 JD |
1984 | ret = -1; |
1985 | goto end; | |
1986 | } | |
1987 | ||
1988 | end: | |
1989 | return ret; | |
1990 | } | |
1991 | ||
7d2f7452 DG |
1992 | /* |
1993 | * Handle index for a data stream. | |
1994 | * | |
1995 | * RCU read side lock MUST be acquired. | |
1996 | * | |
1997 | * Return 0 on success else a negative value. | |
1998 | */ | |
1999 | static int handle_index_data(struct relay_stream *stream, uint64_t net_seq_num, | |
2000 | int rotate_index) | |
2001 | { | |
2002 | int ret = 0, index_created = 0; | |
2003 | uint64_t stream_id, data_offset; | |
2004 | struct relay_index *index, *wr_index = NULL; | |
2005 | ||
2006 | assert(stream); | |
2007 | ||
2008 | stream_id = stream->stream_handle; | |
2009 | /* Get data offset because we are about to update the index. */ | |
2010 | data_offset = htobe64(stream->tracefile_size_current); | |
2011 | ||
2012 | /* | |
2013 | * Lookup for an existing index for that stream id/sequence number. If on | |
2014 | * exists, the control thread already received the data for it thus we need | |
2015 | * to write it on disk. | |
2016 | */ | |
2017 | index = relay_index_find(stream_id, net_seq_num); | |
2018 | if (!index) { | |
2019 | /* A successful creation will add the object to the HT. */ | |
2020 | index = relay_index_create(stream_id, net_seq_num); | |
2021 | if (!index) { | |
2022 | ret = -1; | |
2023 | goto error; | |
2024 | } | |
2025 | index_created = 1; | |
e36fbffb | 2026 | stream->indexes_in_flight++; |
7d2f7452 DG |
2027 | } |
2028 | ||
2029 | if (rotate_index || stream->index_fd < 0) { | |
2030 | index->to_close_fd = stream->index_fd; | |
2031 | ret = index_create_file(stream->path_name, stream->channel_name, | |
2032 | relayd_uid, relayd_gid, stream->tracefile_size, | |
2033 | stream->tracefile_count_current); | |
2034 | if (ret < 0) { | |
2035 | /* This will close the stream's index fd if one. */ | |
2036 | relay_index_free_safe(index); | |
2037 | goto error; | |
2038 | } | |
2039 | stream->index_fd = ret; | |
2040 | } | |
2041 | index->fd = stream->index_fd; | |
2042 | index->index_data.offset = data_offset; | |
2043 | ||
2044 | if (index_created) { | |
2045 | /* | |
2046 | * Try to add the relay index object to the hash table. If an object | |
2047 | * already exist, destroy back the index created and set the data. | |
2048 | */ | |
2049 | relay_index_add(index, &wr_index); | |
2050 | if (wr_index) { | |
2051 | /* Copy back data from the created index. */ | |
2052 | wr_index->fd = index->fd; | |
2053 | wr_index->to_close_fd = index->to_close_fd; | |
2054 | wr_index->index_data.offset = data_offset; | |
2055 | free(index); | |
2056 | } | |
2057 | } else { | |
2058 | /* The index already exists so write it on disk. */ | |
2059 | wr_index = index; | |
2060 | } | |
2061 | ||
2062 | /* Do we have a writable ready index to write on disk. */ | |
2063 | if (wr_index) { | |
2064 | ret = relay_index_write(wr_index->fd, wr_index); | |
2065 | if (ret < 0) { | |
2066 | goto error; | |
2067 | } | |
2068 | stream->total_index_received++; | |
e36fbffb JD |
2069 | stream->indexes_in_flight--; |
2070 | assert(stream->indexes_in_flight >= 0); | |
7d2f7452 DG |
2071 | } |
2072 | ||
2073 | error: | |
2074 | return ret; | |
2075 | } | |
2076 | ||
b8aa1682 JD |
2077 | /* |
2078 | * relay_process_data: Process the data received on the data socket | |
2079 | */ | |
2080 | static | |
e85cdca9 | 2081 | int relay_process_data(struct relay_connection *conn) |
b8aa1682 | 2082 | { |
7d2f7452 | 2083 | int ret = 0, rotate_index = 0; |
6cd525e8 | 2084 | ssize_t size_ret; |
b8aa1682 JD |
2085 | struct relay_stream *stream; |
2086 | struct lttcomm_relayd_data_hdr data_hdr; | |
7d2f7452 | 2087 | uint64_t stream_id; |
173af62f | 2088 | uint64_t net_seq_num; |
b8aa1682 | 2089 | uint32_t data_size; |
eb702af5 | 2090 | struct relay_session *session; |
b8aa1682 | 2091 | |
e85cdca9 DG |
2092 | assert(conn); |
2093 | ||
2094 | ret = conn->sock->ops->recvmsg(conn->sock, &data_hdr, | |
7c5aef62 | 2095 | sizeof(struct lttcomm_relayd_data_hdr), 0); |
b8aa1682 | 2096 | if (ret <= 0) { |
a6cd2b97 DG |
2097 | if (ret == 0) { |
2098 | /* Orderly shutdown. Not necessary to print an error. */ | |
e85cdca9 | 2099 | DBG("Socket %d did an orderly shutdown", conn->sock->fd); |
a6cd2b97 | 2100 | } else { |
e85cdca9 | 2101 | ERR("Unable to receive data header on sock %d", conn->sock->fd); |
a6cd2b97 | 2102 | } |
b8aa1682 JD |
2103 | ret = -1; |
2104 | goto end; | |
2105 | } | |
2106 | ||
2107 | stream_id = be64toh(data_hdr.stream_id); | |
9d1bbf21 MD |
2108 | |
2109 | rcu_read_lock(); | |
eb702af5 | 2110 | stream = stream_find_by_id(relay_streams_ht, stream_id); |
b8aa1682 JD |
2111 | if (!stream) { |
2112 | ret = -1; | |
1c20f0e2 | 2113 | goto end_rcu_unlock; |
b8aa1682 JD |
2114 | } |
2115 | ||
e85cdca9 | 2116 | session = session_find_by_id(conn->sessions_ht, stream->session_id); |
eb702af5 DG |
2117 | assert(session); |
2118 | ||
b8aa1682 JD |
2119 | data_size = be32toh(data_hdr.data_size); |
2120 | if (data_buffer_size < data_size) { | |
c617c0c6 MD |
2121 | char *tmp_data_ptr; |
2122 | ||
2123 | tmp_data_ptr = realloc(data_buffer, data_size); | |
2124 | if (!tmp_data_ptr) { | |
b8aa1682 | 2125 | ERR("Allocating data buffer"); |
c617c0c6 | 2126 | free(data_buffer); |
b8aa1682 | 2127 | ret = -1; |
1c20f0e2 | 2128 | goto end_rcu_unlock; |
b8aa1682 | 2129 | } |
c617c0c6 | 2130 | data_buffer = tmp_data_ptr; |
b8aa1682 JD |
2131 | data_buffer_size = data_size; |
2132 | } | |
2133 | memset(data_buffer, 0, data_size); | |
2134 | ||
173af62f DG |
2135 | net_seq_num = be64toh(data_hdr.net_seq_num); |
2136 | ||
77c7c900 | 2137 | DBG3("Receiving data of size %u for stream id %" PRIu64 " seqnum %" PRIu64, |
173af62f | 2138 | data_size, stream_id, net_seq_num); |
e85cdca9 | 2139 | ret = conn->sock->ops->recvmsg(conn->sock, data_buffer, data_size, 0); |
b8aa1682 | 2140 | if (ret <= 0) { |
a6cd2b97 DG |
2141 | if (ret == 0) { |
2142 | /* Orderly shutdown. Not necessary to print an error. */ | |
e85cdca9 | 2143 | DBG("Socket %d did an orderly shutdown", conn->sock->fd); |
a6cd2b97 | 2144 | } |
b8aa1682 | 2145 | ret = -1; |
1c20f0e2 | 2146 | goto end_rcu_unlock; |
b8aa1682 JD |
2147 | } |
2148 | ||
1c20f0e2 | 2149 | /* Check if a rotation is needed. */ |
0f907de1 JD |
2150 | if (stream->tracefile_size > 0 && |
2151 | (stream->tracefile_size_current + data_size) > | |
2152 | stream->tracefile_size) { | |
6b6b9a5a JD |
2153 | struct relay_viewer_stream *vstream; |
2154 | uint64_t new_id; | |
2155 | ||
2156 | new_id = (stream->tracefile_count_current + 1) % | |
2157 | stream->tracefile_count; | |
2158 | /* | |
2159 | * When we wrap-around back to 0, we start overwriting old | |
2160 | * trace data. | |
2161 | */ | |
2162 | if (!stream->tracefile_overwrite && new_id == 0) { | |
2163 | stream->tracefile_overwrite = 1; | |
2164 | } | |
2165 | pthread_mutex_lock(&stream->viewer_stream_rotation_lock); | |
2166 | if (stream->tracefile_overwrite) { | |
2167 | stream->oldest_tracefile_id = | |
2168 | (stream->oldest_tracefile_id + 1) % | |
2169 | stream->tracefile_count; | |
2170 | } | |
991adae2 | 2171 | vstream = viewer_stream_find_by_id(stream->stream_handle); |
6b6b9a5a JD |
2172 | if (vstream) { |
2173 | /* | |
2174 | * The viewer is reading a file about to be | |
2175 | * overwritten. Close the FDs it is | |
2176 | * currently using and let it handle the fault. | |
2177 | */ | |
2178 | if (vstream->tracefile_count_current == new_id) { | |
cef0f7d5 | 2179 | pthread_mutex_lock(&vstream->overwrite_lock); |
6b6b9a5a | 2180 | vstream->abort_flag = 1; |
cef0f7d5 | 2181 | pthread_mutex_unlock(&vstream->overwrite_lock); |
6b6b9a5a JD |
2182 | DBG("Streaming side setting abort_flag on stream %s_%lu\n", |
2183 | stream->channel_name, new_id); | |
2184 | } else if (vstream->tracefile_count_current == | |
2185 | stream->tracefile_count_current) { | |
2186 | /* | |
2187 | * The reader and writer were in the | |
2188 | * same trace file, inform the viewer | |
2189 | * that no new index will ever be added | |
2190 | * to this file. | |
2191 | */ | |
2192 | vstream->close_write_flag = 1; | |
2193 | } | |
2194 | } | |
1c20f0e2 JD |
2195 | ret = utils_rotate_stream_file(stream->path_name, stream->channel_name, |
2196 | stream->tracefile_size, stream->tracefile_count, | |
2197 | relayd_uid, relayd_gid, stream->fd, | |
2198 | &(stream->tracefile_count_current), &stream->fd); | |
6b6b9a5a | 2199 | pthread_mutex_unlock(&stream->viewer_stream_rotation_lock); |
0f907de1 | 2200 | if (ret < 0) { |
1c20f0e2 JD |
2201 | ERR("Rotating stream output file"); |
2202 | goto end_rcu_unlock; | |
0f907de1 | 2203 | } |
a6976990 DG |
2204 | /* Reset current size because we just perform a stream rotation. */ |
2205 | stream->tracefile_size_current = 0; | |
1c20f0e2 JD |
2206 | rotate_index = 1; |
2207 | } | |
2208 | ||
1c20f0e2 | 2209 | /* |
7d2f7452 DG |
2210 | * Index are handled in protocol version 2.4 and above. Also, snapshot and |
2211 | * index are NOT supported. | |
1c20f0e2 | 2212 | */ |
eb702af5 | 2213 | if (session->minor >= 4 && !session->snapshot) { |
7d2f7452 | 2214 | ret = handle_index_data(stream, net_seq_num, rotate_index); |
1c20f0e2 | 2215 | if (ret < 0) { |
1c20f0e2 JD |
2216 | goto end_rcu_unlock; |
2217 | } | |
1c20f0e2 JD |
2218 | } |
2219 | ||
7d2f7452 | 2220 | /* Write data to stream output fd. */ |
6cd525e8 MD |
2221 | size_ret = lttng_write(stream->fd, data_buffer, data_size); |
2222 | if (size_ret < data_size) { | |
b8aa1682 JD |
2223 | ERR("Relay error writing data to file"); |
2224 | ret = -1; | |
1c20f0e2 | 2225 | goto end_rcu_unlock; |
b8aa1682 | 2226 | } |
1d4dfdef | 2227 | |
5ab7344e JD |
2228 | DBG2("Relay wrote %d bytes to tracefile for stream id %" PRIu64, |
2229 | ret, stream->stream_handle); | |
2230 | ||
1d4dfdef DG |
2231 | ret = write_padding_to_file(stream->fd, be32toh(data_hdr.padding_size)); |
2232 | if (ret < 0) { | |
1c20f0e2 | 2233 | goto end_rcu_unlock; |
1d4dfdef | 2234 | } |
1c20f0e2 | 2235 | stream->tracefile_size_current += data_size + be32toh(data_hdr.padding_size); |
1d4dfdef | 2236 | |
173af62f DG |
2237 | stream->prev_seq = net_seq_num; |
2238 | ||
eb702af5 | 2239 | try_close_stream(session, stream); |
173af62f | 2240 | |
1c20f0e2 | 2241 | end_rcu_unlock: |
9d1bbf21 | 2242 | rcu_read_unlock(); |
b8aa1682 JD |
2243 | end: |
2244 | return ret; | |
2245 | } | |
2246 | ||
2247 | static | |
e85cdca9 | 2248 | void cleanup_connection_pollfd(struct lttng_poll_event *events, int pollfd) |
b8aa1682 JD |
2249 | { |
2250 | int ret; | |
2251 | ||
e85cdca9 DG |
2252 | assert(events); |
2253 | ||
2254 | (void) lttng_poll_del(events, pollfd); | |
b8aa1682 JD |
2255 | |
2256 | ret = close(pollfd); | |
2257 | if (ret < 0) { | |
2258 | ERR("Closing pollfd %d", pollfd); | |
2259 | } | |
2260 | } | |
2261 | ||
e85cdca9 DG |
2262 | static void destroy_connection(struct lttng_ht *relay_connections_ht, |
2263 | struct relay_connection *conn) | |
9d1bbf21 | 2264 | { |
e85cdca9 DG |
2265 | assert(relay_connections_ht); |
2266 | assert(conn); | |
eb702af5 | 2267 | |
e85cdca9 | 2268 | connection_delete(relay_connections_ht, conn); |
eb702af5 | 2269 | |
e85cdca9 | 2270 | /* For the control socket, we try to destroy the session. */ |
23c1fab9 | 2271 | if (conn->type == RELAY_CONTROL && conn->session) { |
e85cdca9 | 2272 | destroy_session(conn->session, conn->sessions_ht); |
9d1bbf21 | 2273 | } |
5b6d8097 | 2274 | |
e85cdca9 | 2275 | connection_destroy(conn); |
b8aa1682 JD |
2276 | } |
2277 | ||
2278 | /* | |
2279 | * This thread does the actual work | |
2280 | */ | |
2281 | static | |
2282 | void *relay_thread_worker(void *data) | |
2283 | { | |
beaad64c DG |
2284 | int ret, err = -1, last_seen_data_fd = -1; |
2285 | uint32_t nb_fd; | |
e85cdca9 | 2286 | struct relay_connection *conn; |
b8aa1682 JD |
2287 | struct lttng_poll_event events; |
2288 | struct lttng_ht *relay_connections_ht; | |
b8aa1682 | 2289 | struct lttng_ht_iter iter; |
b8aa1682 | 2290 | struct lttcomm_relayd_hdr recv_hdr; |
d3e2ba59 JD |
2291 | struct relay_local_data *relay_ctx = (struct relay_local_data *) data; |
2292 | struct lttng_ht *sessions_ht = relay_ctx->sessions_ht; | |
6eb15779 | 2293 | struct relay_index *index; |
b8aa1682 JD |
2294 | |
2295 | DBG("[thread] Relay worker started"); | |
2296 | ||
9d1bbf21 MD |
2297 | rcu_register_thread(); |
2298 | ||
55706a7d MD |
2299 | health_register(health_relayd, HEALTH_RELAYD_TYPE_WORKER); |
2300 | ||
4e5b8b82 MD |
2301 | if (testpoint(relayd_thread_worker)) { |
2302 | goto error_testpoint; | |
2303 | } | |
2304 | ||
f385ae0a MD |
2305 | health_code_update(); |
2306 | ||
b8aa1682 JD |
2307 | /* table of connections indexed on socket */ |
2308 | relay_connections_ht = lttng_ht_new(0, LTTNG_HT_TYPE_ULONG); | |
095a4ae5 MD |
2309 | if (!relay_connections_ht) { |
2310 | goto relay_connections_ht_error; | |
2311 | } | |
b8aa1682 | 2312 | |
1c20f0e2 JD |
2313 | /* Tables of received indexes indexed by index handle and net_seq_num. */ |
2314 | indexes_ht = lttng_ht_new(0, LTTNG_HT_TYPE_TWO_U64); | |
2315 | if (!indexes_ht) { | |
2316 | goto indexes_ht_error; | |
2317 | } | |
2318 | ||
b8aa1682 JD |
2319 | ret = create_thread_poll_set(&events, 2); |
2320 | if (ret < 0) { | |
2321 | goto error_poll_create; | |
2322 | } | |
2323 | ||
e85cdca9 | 2324 | ret = lttng_poll_add(&events, relay_conn_pipe[0], LPOLLIN | LPOLLRDHUP); |
b8aa1682 JD |
2325 | if (ret < 0) { |
2326 | goto error; | |
2327 | } | |
2328 | ||
beaad64c | 2329 | restart: |
b8aa1682 | 2330 | while (1) { |
beaad64c DG |
2331 | int idx = -1, i, seen_control = 0, last_notdel_data_fd = -1; |
2332 | ||
f385ae0a MD |
2333 | health_code_update(); |
2334 | ||
b8aa1682 | 2335 | /* Infinite blocking call, waiting for transmission */ |
87c1611d | 2336 | DBG3("Relayd worker thread polling..."); |
f385ae0a | 2337 | health_poll_entry(); |
b8aa1682 | 2338 | ret = lttng_poll_wait(&events, -1); |
f385ae0a | 2339 | health_poll_exit(); |
b8aa1682 JD |
2340 | if (ret < 0) { |
2341 | /* | |
2342 | * Restart interrupted system call. | |
2343 | */ | |
2344 | if (errno == EINTR) { | |
2345 | goto restart; | |
2346 | } | |
2347 | goto error; | |
2348 | } | |
2349 | ||
0d9c5d77 DG |
2350 | nb_fd = ret; |
2351 | ||
beaad64c DG |
2352 | /* |
2353 | * Process control. The control connection is prioritised so we don't | |
2354 | * starve it with high throughout put tracing data on the data | |
2355 | * connection. | |
2356 | */ | |
b8aa1682 JD |
2357 | for (i = 0; i < nb_fd; i++) { |
2358 | /* Fetch once the poll data */ | |
beaad64c DG |
2359 | uint32_t revents = LTTNG_POLL_GETEV(&events, i); |
2360 | int pollfd = LTTNG_POLL_GETFD(&events, i); | |
b8aa1682 | 2361 | |
f385ae0a MD |
2362 | health_code_update(); |
2363 | ||
f0567343 MD |
2364 | if (!revents) { |
2365 | /* No activity for this FD (poll implementation). */ | |
2366 | continue; | |
2367 | } | |
2368 | ||
b8aa1682 JD |
2369 | /* Thread quit pipe has been closed. Killing thread. */ |
2370 | ret = check_thread_quit_pipe(pollfd, revents); | |
2371 | if (ret) { | |
095a4ae5 MD |
2372 | err = 0; |
2373 | goto exit; | |
b8aa1682 JD |
2374 | } |
2375 | ||
e85cdca9 DG |
2376 | /* Inspect the relay conn pipe for new connection */ |
2377 | if (pollfd == relay_conn_pipe[0]) { | |
b8aa1682 | 2378 | if (revents & (LPOLLERR | LPOLLHUP | LPOLLRDHUP)) { |
e85cdca9 | 2379 | ERR("Relay connection pipe error"); |
b8aa1682 JD |
2380 | goto error; |
2381 | } else if (revents & LPOLLIN) { | |
e85cdca9 | 2382 | ret = lttng_read(relay_conn_pipe[0], &conn, sizeof(conn)); |
b8aa1682 JD |
2383 | if (ret < 0) { |
2384 | goto error; | |
2385 | } | |
e85cdca9 DG |
2386 | conn->sessions_ht = sessions_ht; |
2387 | connection_init(conn); | |
2388 | lttng_poll_add(&events, conn->sock->fd, | |
2389 | LPOLLIN | LPOLLRDHUP); | |
2390 | rcu_read_lock(); | |
2391 | lttng_ht_add_unique_ulong(relay_connections_ht, | |
2392 | &conn->sock_n); | |
9d1bbf21 | 2393 | rcu_read_unlock(); |
e85cdca9 | 2394 | DBG("Connection socket %d added", conn->sock->fd); |
b8aa1682 | 2395 | } |
e85cdca9 DG |
2396 | } else { |
2397 | rcu_read_lock(); | |
2398 | conn = connection_find_by_sock(relay_connections_ht, pollfd); | |
2399 | /* If not found, there is a synchronization issue. */ | |
2400 | assert(conn); | |
2401 | ||
2402 | if (revents & (LPOLLERR | LPOLLHUP | LPOLLRDHUP)) { | |
2403 | cleanup_connection_pollfd(&events, pollfd); | |
2404 | destroy_connection(relay_connections_ht, conn); | |
beaad64c DG |
2405 | if (last_seen_data_fd == pollfd) { |
2406 | last_seen_data_fd = last_notdel_data_fd; | |
2407 | } | |
b8aa1682 | 2408 | } else if (revents & LPOLLIN) { |
e85cdca9 DG |
2409 | if (conn->type == RELAY_CONTROL) { |
2410 | ret = conn->sock->ops->recvmsg(conn->sock, &recv_hdr, | |
2411 | sizeof(recv_hdr), 0); | |
b8aa1682 | 2412 | if (ret <= 0) { |
e85cdca9 DG |
2413 | /* Connection closed */ |
2414 | cleanup_connection_pollfd(&events, pollfd); | |
2415 | destroy_connection(relay_connections_ht, conn); | |
b8aa1682 JD |
2416 | DBG("Control connection closed with %d", pollfd); |
2417 | } else { | |
e85cdca9 | 2418 | ret = relay_process_control(&recv_hdr, conn); |
b8aa1682 | 2419 | if (ret < 0) { |
beaad64c | 2420 | /* Clear the session on error. */ |
e85cdca9 DG |
2421 | cleanup_connection_pollfd(&events, pollfd); |
2422 | destroy_connection(relay_connections_ht, conn); | |
b8aa1682 JD |
2423 | DBG("Connection closed with %d", pollfd); |
2424 | } | |
beaad64c | 2425 | seen_control = 1; |
b8aa1682 | 2426 | } |
beaad64c DG |
2427 | } else { |
2428 | /* | |
2429 | * Flag the last seen data fd not deleted. It will be | |
2430 | * used as the last seen fd if any fd gets deleted in | |
2431 | * this first loop. | |
2432 | */ | |
2433 | last_notdel_data_fd = pollfd; | |
2434 | } | |
e85cdca9 DG |
2435 | } else { |
2436 | ERR("Unknown poll events %u for sock %d", revents, pollfd); | |
beaad64c DG |
2437 | } |
2438 | rcu_read_unlock(); | |
2439 | } | |
2440 | } | |
2441 | ||
2442 | /* | |
2443 | * The last loop handled a control request, go back to poll to make | |
2444 | * sure we prioritise the control socket. | |
2445 | */ | |
2446 | if (seen_control) { | |
2447 | continue; | |
2448 | } | |
2449 | ||
2450 | if (last_seen_data_fd >= 0) { | |
2451 | for (i = 0; i < nb_fd; i++) { | |
2452 | int pollfd = LTTNG_POLL_GETFD(&events, i); | |
f385ae0a MD |
2453 | |
2454 | health_code_update(); | |
2455 | ||
beaad64c DG |
2456 | if (last_seen_data_fd == pollfd) { |
2457 | idx = i; | |
2458 | break; | |
2459 | } | |
2460 | } | |
2461 | } | |
2462 | ||
2463 | /* Process data connection. */ | |
2464 | for (i = idx + 1; i < nb_fd; i++) { | |
2465 | /* Fetch the poll data. */ | |
2466 | uint32_t revents = LTTNG_POLL_GETEV(&events, i); | |
2467 | int pollfd = LTTNG_POLL_GETFD(&events, i); | |
2468 | ||
f385ae0a MD |
2469 | health_code_update(); |
2470 | ||
f0567343 MD |
2471 | if (!revents) { |
2472 | /* No activity for this FD (poll implementation). */ | |
2473 | continue; | |
2474 | } | |
2475 | ||
beaad64c | 2476 | /* Skip the command pipe. It's handled in the first loop. */ |
e85cdca9 | 2477 | if (pollfd == relay_conn_pipe[0]) { |
beaad64c DG |
2478 | continue; |
2479 | } | |
2480 | ||
f0567343 MD |
2481 | rcu_read_lock(); |
2482 | conn = connection_find_by_sock(relay_connections_ht, pollfd); | |
2483 | if (!conn) { | |
2484 | /* Skip it. Might be removed before. */ | |
2485 | rcu_read_unlock(); | |
2486 | continue; | |
2487 | } | |
2488 | ||
2489 | if (revents & LPOLLIN) { | |
2490 | if (conn->type != RELAY_DATA) { | |
beaad64c DG |
2491 | rcu_read_unlock(); |
2492 | continue; | |
2493 | } | |
beaad64c | 2494 | |
f0567343 MD |
2495 | ret = relay_process_data(conn); |
2496 | /* Connection closed */ | |
2497 | if (ret < 0) { | |
2498 | cleanup_connection_pollfd(&events, pollfd); | |
2499 | destroy_connection(relay_connections_ht, conn); | |
2500 | DBG("Data connection closed with %d", pollfd); | |
2501 | /* | |
2502 | * Every goto restart call sets the last seen fd where | |
2503 | * here we don't really care since we gracefully | |
2504 | * continue the loop after the connection is deleted. | |
2505 | */ | |
2506 | } else { | |
2507 | /* Keep last seen port. */ | |
2508 | last_seen_data_fd = pollfd; | |
2509 | rcu_read_unlock(); | |
2510 | goto restart; | |
b8aa1682 JD |
2511 | } |
2512 | } | |
f0567343 | 2513 | rcu_read_unlock(); |
b8aa1682 | 2514 | } |
beaad64c | 2515 | last_seen_data_fd = -1; |
b8aa1682 JD |
2516 | } |
2517 | ||
f385ae0a MD |
2518 | /* Normal exit, no error */ |
2519 | ret = 0; | |
2520 | ||
095a4ae5 | 2521 | exit: |
b8aa1682 JD |
2522 | error: |
2523 | lttng_poll_clean(&events); | |
2524 | ||
e85cdca9 | 2525 | /* Cleanup reamaining connection object. */ |
9d1bbf21 | 2526 | rcu_read_lock(); |
e85cdca9 DG |
2527 | cds_lfht_for_each_entry(relay_connections_ht->ht, &iter.iter, conn, |
2528 | sock_n.node) { | |
f385ae0a | 2529 | health_code_update(); |
e85cdca9 | 2530 | destroy_connection(relay_connections_ht, conn); |
b8aa1682 | 2531 | } |
94d49140 | 2532 | rcu_read_unlock(); |
7d2f7452 | 2533 | error_poll_create: |
6eb15779 JD |
2534 | rcu_read_lock(); |
2535 | cds_lfht_for_each_entry(indexes_ht->ht, &iter.iter, index, | |
2536 | index_n.node) { | |
2537 | health_code_update(); | |
2538 | relay_index_delete(index); | |
2539 | relay_index_free_safe(index); | |
2540 | } | |
2541 | rcu_read_unlock(); | |
7d2f7452 | 2542 | lttng_ht_destroy(indexes_ht); |
1c20f0e2 | 2543 | indexes_ht_error: |
b8aa1682 | 2544 | lttng_ht_destroy(relay_connections_ht); |
095a4ae5 | 2545 | relay_connections_ht_error: |
e85cdca9 DG |
2546 | /* Close relay conn pipes */ |
2547 | utils_close_pipe(relay_conn_pipe); | |
095a4ae5 MD |
2548 | if (err) { |
2549 | DBG("Thread exited with error"); | |
2550 | } | |
b8aa1682 | 2551 | DBG("Worker thread cleanup complete"); |
095a4ae5 | 2552 | free(data_buffer); |
4e5b8b82 | 2553 | error_testpoint: |
f385ae0a MD |
2554 | if (err) { |
2555 | health_error(); | |
2556 | ERR("Health error occurred in %s", __func__); | |
2557 | } | |
2558 | health_unregister(health_relayd); | |
9d1bbf21 | 2559 | rcu_unregister_thread(); |
f385ae0a | 2560 | stop_threads(); |
b8aa1682 JD |
2561 | return NULL; |
2562 | } | |
2563 | ||
2564 | /* | |
2565 | * Create the relay command pipe to wake thread_manage_apps. | |
2566 | * Closed in cleanup(). | |
2567 | */ | |
e85cdca9 | 2568 | static int create_relay_conn_pipe(void) |
b8aa1682 | 2569 | { |
a02de639 | 2570 | int ret; |
b8aa1682 | 2571 | |
e85cdca9 | 2572 | ret = utils_create_pipe_cloexec(relay_conn_pipe); |
b8aa1682 | 2573 | |
b8aa1682 JD |
2574 | return ret; |
2575 | } | |
2576 | ||
2577 | /* | |
2578 | * main | |
2579 | */ | |
2580 | int main(int argc, char **argv) | |
2581 | { | |
2582 | int ret = 0; | |
2583 | void *status; | |
d3e2ba59 | 2584 | struct relay_local_data *relay_ctx; |
b8aa1682 | 2585 | |
b8aa1682 JD |
2586 | /* Parse arguments */ |
2587 | progname = argv[0]; | |
c617c0c6 | 2588 | if ((ret = parse_args(argc, argv)) < 0) { |
a02de639 | 2589 | goto exit; |
b8aa1682 JD |
2590 | } |
2591 | ||
2592 | if ((ret = set_signal_handler()) < 0) { | |
2593 | goto exit; | |
2594 | } | |
2595 | ||
4d513a50 DG |
2596 | /* Try to create directory if -o, --output is specified. */ |
2597 | if (opt_output_path) { | |
994fa64f DG |
2598 | if (*opt_output_path != '/') { |
2599 | ERR("Please specify an absolute path for -o, --output PATH"); | |
2600 | goto exit; | |
2601 | } | |
2602 | ||
4d513a50 DG |
2603 | ret = utils_mkdir_recursive(opt_output_path, S_IRWXU | S_IRWXG); |
2604 | if (ret < 0) { | |
2605 | ERR("Unable to create %s", opt_output_path); | |
2606 | goto exit; | |
2607 | } | |
2608 | } | |
2609 | ||
b8aa1682 | 2610 | /* Daemonize */ |
d1a3048a | 2611 | if (opt_daemon || opt_background) { |
02a6bb53 MD |
2612 | int i; |
2613 | ||
2614 | ret = lttng_daemonize(&child_ppid, &recv_child_signal, | |
2615 | !opt_background); | |
b8aa1682 | 2616 | if (ret < 0) { |
a02de639 | 2617 | goto exit; |
b8aa1682 | 2618 | } |
02a6bb53 MD |
2619 | |
2620 | /* | |
2621 | * We are in the child. Make sure all other file | |
2622 | * descriptors are closed, in case we are called with | |
2623 | * more opened file descriptors than the standard ones. | |
2624 | */ | |
2625 | for (i = 3; i < sysconf(_SC_OPEN_MAX); i++) { | |
2626 | (void) close(i); | |
2627 | } | |
2628 | } | |
2629 | ||
2630 | /* Create thread quit pipe */ | |
2631 | if ((ret = init_thread_quit_pipe()) < 0) { | |
2632 | goto error; | |
b8aa1682 JD |
2633 | } |
2634 | ||
1c20f0e2 JD |
2635 | /* We need those values for the file/dir creation. */ |
2636 | relayd_uid = getuid(); | |
2637 | relayd_gid = getgid(); | |
b8aa1682 | 2638 | |
1c20f0e2 JD |
2639 | /* Check if daemon is UID = 0 */ |
2640 | if (relayd_uid == 0) { | |
a6a062a4 AM |
2641 | if (control_uri->port < 1024 || data_uri->port < 1024 || |
2642 | live_uri->port < 1024) { | |
b8aa1682 JD |
2643 | ERR("Need to be root to use ports < 1024"); |
2644 | ret = -1; | |
a02de639 | 2645 | goto exit; |
b8aa1682 JD |
2646 | } |
2647 | } | |
2648 | ||
2649 | /* Setup the thread apps communication pipe. */ | |
e85cdca9 | 2650 | if ((ret = create_relay_conn_pipe()) < 0) { |
b8aa1682 JD |
2651 | goto exit; |
2652 | } | |
2653 | ||
2654 | /* Init relay command queue. */ | |
e85cdca9 | 2655 | cds_wfq_init(&relay_conn_queue.queue); |
b8aa1682 JD |
2656 | |
2657 | /* Set up max poll set size */ | |
2658 | lttng_poll_set_max_size(); | |
2659 | ||
554831e7 MD |
2660 | /* Initialize communication library */ |
2661 | lttcomm_init(); | |
d5a5e58f | 2662 | lttcomm_inet_init(); |
554831e7 | 2663 | |
d3e2ba59 JD |
2664 | relay_ctx = zmalloc(sizeof(struct relay_local_data)); |
2665 | if (!relay_ctx) { | |
2666 | PERROR("relay_ctx"); | |
2667 | goto exit; | |
2668 | } | |
2669 | ||
2670 | /* tables of sessions indexed by session ID */ | |
eb702af5 | 2671 | relay_ctx->sessions_ht = lttng_ht_new(0, LTTNG_HT_TYPE_U64); |
d3e2ba59 JD |
2672 | if (!relay_ctx->sessions_ht) { |
2673 | goto exit_relay_ctx_sessions; | |
2674 | } | |
2675 | ||
2676 | /* tables of streams indexed by stream ID */ | |
eb702af5 | 2677 | relay_streams_ht = lttng_ht_new(0, LTTNG_HT_TYPE_U64); |
d3e2ba59 JD |
2678 | if (!relay_streams_ht) { |
2679 | goto exit_relay_ctx_streams; | |
2680 | } | |
2681 | ||
2682 | /* tables of streams indexed by stream ID */ | |
92c6ca54 DG |
2683 | viewer_streams_ht = lttng_ht_new(0, LTTNG_HT_TYPE_U64); |
2684 | if (!viewer_streams_ht) { | |
d3e2ba59 JD |
2685 | goto exit_relay_ctx_viewer_streams; |
2686 | } | |
2687 | ||
55706a7d MD |
2688 | /* Initialize thread health monitoring */ |
2689 | health_relayd = health_app_create(NR_HEALTH_RELAYD_TYPES); | |
2690 | if (!health_relayd) { | |
2691 | PERROR("health_app_create error"); | |
2692 | goto exit_health_app_create; | |
2693 | } | |
2694 | ||
65931c8b MD |
2695 | ret = utils_create_pipe(health_quit_pipe); |
2696 | if (ret < 0) { | |
2697 | goto error_health_pipe; | |
2698 | } | |
2699 | ||
2700 | /* Create thread to manage the client socket */ | |
2701 | ret = pthread_create(&health_thread, NULL, | |
2702 | thread_manage_health, (void *) NULL); | |
2703 | if (ret != 0) { | |
2704 | PERROR("pthread_create health"); | |
2705 | goto health_error; | |
2706 | } | |
2707 | ||
b8aa1682 JD |
2708 | /* Setup the dispatcher thread */ |
2709 | ret = pthread_create(&dispatcher_thread, NULL, | |
2710 | relay_thread_dispatcher, (void *) NULL); | |
2711 | if (ret != 0) { | |
2712 | PERROR("pthread_create dispatcher"); | |
2713 | goto exit_dispatcher; | |
2714 | } | |
2715 | ||
2716 | /* Setup the worker thread */ | |
2717 | ret = pthread_create(&worker_thread, NULL, | |
d3e2ba59 | 2718 | relay_thread_worker, (void *) relay_ctx); |
b8aa1682 JD |
2719 | if (ret != 0) { |
2720 | PERROR("pthread_create worker"); | |
2721 | goto exit_worker; | |
2722 | } | |
2723 | ||
2724 | /* Setup the listener thread */ | |
2725 | ret = pthread_create(&listener_thread, NULL, | |
2726 | relay_thread_listener, (void *) NULL); | |
2727 | if (ret != 0) { | |
2728 | PERROR("pthread_create listener"); | |
2729 | goto exit_listener; | |
2730 | } | |
2731 | ||
3557d456 | 2732 | ret = live_start_threads(live_uri, relay_ctx); |
d3e2ba59 JD |
2733 | if (ret != 0) { |
2734 | ERR("Starting live viewer threads"); | |
50138f51 | 2735 | goto exit_live; |
d3e2ba59 JD |
2736 | } |
2737 | ||
50138f51 | 2738 | exit_live: |
b8aa1682 JD |
2739 | ret = pthread_join(listener_thread, &status); |
2740 | if (ret != 0) { | |
2741 | PERROR("pthread_join"); | |
2742 | goto error; /* join error, exit without cleanup */ | |
2743 | } | |
2744 | ||
50138f51 | 2745 | exit_listener: |
b8aa1682 JD |
2746 | ret = pthread_join(worker_thread, &status); |
2747 | if (ret != 0) { | |
2748 | PERROR("pthread_join"); | |
2749 | goto error; /* join error, exit without cleanup */ | |
2750 | } | |
2751 | ||
50138f51 | 2752 | exit_worker: |
b8aa1682 JD |
2753 | ret = pthread_join(dispatcher_thread, &status); |
2754 | if (ret != 0) { | |
2755 | PERROR("pthread_join"); | |
2756 | goto error; /* join error, exit without cleanup */ | |
2757 | } | |
42415026 | 2758 | |
50138f51 | 2759 | exit_dispatcher: |
65931c8b MD |
2760 | ret = pthread_join(health_thread, &status); |
2761 | if (ret != 0) { | |
2762 | PERROR("pthread_join health thread"); | |
2763 | goto error; /* join error, exit without cleanup */ | |
2764 | } | |
2765 | ||
bd11e201 MD |
2766 | /* |
2767 | * Stop live threads only after joining other threads. | |
2768 | */ | |
2769 | live_stop_threads(); | |
2770 | ||
65931c8b MD |
2771 | health_error: |
2772 | utils_close_pipe(health_quit_pipe); | |
2773 | ||
2774 | error_health_pipe: | |
55706a7d MD |
2775 | health_app_destroy(health_relayd); |
2776 | ||
2777 | exit_health_app_create: | |
92c6ca54 | 2778 | lttng_ht_destroy(viewer_streams_ht); |
d3e2ba59 JD |
2779 | |
2780 | exit_relay_ctx_viewer_streams: | |
2781 | lttng_ht_destroy(relay_streams_ht); | |
2782 | ||
2783 | exit_relay_ctx_streams: | |
2784 | lttng_ht_destroy(relay_ctx->sessions_ht); | |
2785 | ||
2786 | exit_relay_ctx_sessions: | |
2787 | free(relay_ctx); | |
b8aa1682 JD |
2788 | |
2789 | exit: | |
2790 | cleanup(); | |
2791 | if (!ret) { | |
2792 | exit(EXIT_SUCCESS); | |
2793 | } | |
a02de639 | 2794 | |
b8aa1682 JD |
2795 | error: |
2796 | exit(EXIT_FAILURE); | |
2797 | } |