Fix: clarify sessiond ust app queue cmd comment
[lttng-tools.git] / src / bin / lttng-sessiond / main.c
1 /*
2 * Copyright (C) 2011 - David Goulet <david.goulet@polymtl.ca>
3 * Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
4 * 2013 - Jérémie Galarneau <jeremie.galarneau@efficios.com>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License, version 2 only,
8 * as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License along
16 * with this program; if not, write to the Free Software Foundation, Inc.,
17 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
18 */
19
20 #define _GNU_SOURCE
21 #include <getopt.h>
22 #include <grp.h>
23 #include <limits.h>
24 #include <paths.h>
25 #include <pthread.h>
26 #include <signal.h>
27 #include <stdio.h>
28 #include <stdlib.h>
29 #include <string.h>
30 #include <inttypes.h>
31 #include <sys/mman.h>
32 #include <sys/mount.h>
33 #include <sys/resource.h>
34 #include <sys/socket.h>
35 #include <sys/stat.h>
36 #include <sys/types.h>
37 #include <sys/wait.h>
38 #include <urcu/uatomic.h>
39 #include <unistd.h>
40 #include <config.h>
41
42 #include <common/common.h>
43 #include <common/compat/socket.h>
44 #include <common/defaults.h>
45 #include <common/kernel-consumer/kernel-consumer.h>
46 #include <common/futex.h>
47 #include <common/relayd/relayd.h>
48 #include <common/utils.h>
49 #include <common/daemonize.h>
50 #include <common/config/config.h>
51
52 #include "lttng-sessiond.h"
53 #include "buffer-registry.h"
54 #include "channel.h"
55 #include "cmd.h"
56 #include "consumer.h"
57 #include "context.h"
58 #include "event.h"
59 #include "kernel.h"
60 #include "kernel-consumer.h"
61 #include "modprobe.h"
62 #include "shm.h"
63 #include "ust-ctl.h"
64 #include "ust-consumer.h"
65 #include "utils.h"
66 #include "fd-limit.h"
67 #include "health-sessiond.h"
68 #include "testpoint.h"
69 #include "ust-thread.h"
70 #include "jul-thread.h"
71 #include "save.h"
72
73 #define CONSUMERD_FILE "lttng-consumerd"
74
75 const char *progname;
76 static const char *tracing_group_name = DEFAULT_TRACING_GROUP;
77 static int tracing_group_name_override;
78 static char *opt_pidfile;
79 static int opt_sig_parent;
80 static int opt_verbose_consumer;
81 static int opt_daemon, opt_background;
82 static int opt_no_kernel;
83 static pid_t ppid; /* Parent PID for --sig-parent option */
84 static pid_t child_ppid; /* Internal parent PID use with daemonize. */
85 static char *rundir;
86
87 /* Set to 1 when a SIGUSR1 signal is received. */
88 static int recv_child_signal;
89
90 /*
91 * Consumer daemon specific control data. Every value not initialized here is
92 * set to 0 by the static definition.
93 */
94 static struct consumer_data kconsumer_data = {
95 .type = LTTNG_CONSUMER_KERNEL,
96 .err_unix_sock_path = DEFAULT_KCONSUMERD_ERR_SOCK_PATH,
97 .cmd_unix_sock_path = DEFAULT_KCONSUMERD_CMD_SOCK_PATH,
98 .err_sock = -1,
99 .cmd_sock = -1,
100 .pid_mutex = PTHREAD_MUTEX_INITIALIZER,
101 .lock = PTHREAD_MUTEX_INITIALIZER,
102 .cond = PTHREAD_COND_INITIALIZER,
103 .cond_mutex = PTHREAD_MUTEX_INITIALIZER,
104 };
105 static struct consumer_data ustconsumer64_data = {
106 .type = LTTNG_CONSUMER64_UST,
107 .err_unix_sock_path = DEFAULT_USTCONSUMERD64_ERR_SOCK_PATH,
108 .cmd_unix_sock_path = DEFAULT_USTCONSUMERD64_CMD_SOCK_PATH,
109 .err_sock = -1,
110 .cmd_sock = -1,
111 .pid_mutex = PTHREAD_MUTEX_INITIALIZER,
112 .lock = PTHREAD_MUTEX_INITIALIZER,
113 .cond = PTHREAD_COND_INITIALIZER,
114 .cond_mutex = PTHREAD_MUTEX_INITIALIZER,
115 };
116 static struct consumer_data ustconsumer32_data = {
117 .type = LTTNG_CONSUMER32_UST,
118 .err_unix_sock_path = DEFAULT_USTCONSUMERD32_ERR_SOCK_PATH,
119 .cmd_unix_sock_path = DEFAULT_USTCONSUMERD32_CMD_SOCK_PATH,
120 .err_sock = -1,
121 .cmd_sock = -1,
122 .pid_mutex = PTHREAD_MUTEX_INITIALIZER,
123 .lock = PTHREAD_MUTEX_INITIALIZER,
124 .cond = PTHREAD_COND_INITIALIZER,
125 .cond_mutex = PTHREAD_MUTEX_INITIALIZER,
126 };
127
128 /* Command line options */
129 static const struct option long_options[] = {
130 { "client-sock", 1, 0, 'c' },
131 { "apps-sock", 1, 0, 'a' },
132 { "kconsumerd-cmd-sock", 1, 0, 'C' },
133 { "kconsumerd-err-sock", 1, 0, 'E' },
134 { "ustconsumerd32-cmd-sock", 1, 0, 'G' },
135 { "ustconsumerd32-err-sock", 1, 0, 'H' },
136 { "ustconsumerd64-cmd-sock", 1, 0, 'D' },
137 { "ustconsumerd64-err-sock", 1, 0, 'F' },
138 { "consumerd32-path", 1, 0, 'u' },
139 { "consumerd32-libdir", 1, 0, 'U' },
140 { "consumerd64-path", 1, 0, 't' },
141 { "consumerd64-libdir", 1, 0, 'T' },
142 { "daemonize", 0, 0, 'd' },
143 { "background", 0, 0, 'b' },
144 { "sig-parent", 0, 0, 'S' },
145 { "help", 0, 0, 'h' },
146 { "group", 1, 0, 'g' },
147 { "version", 0, 0, 'V' },
148 { "quiet", 0, 0, 'q' },
149 { "verbose", 0, 0, 'v' },
150 { "verbose-consumer", 0, 0, 'Z' },
151 { "no-kernel", 0, 0, 'N' },
152 { "pidfile", 1, 0, 'p' },
153 { "jul-tcp-port", 1, 0, 'J' },
154 { "config", 1, 0, 'f' },
155 { NULL, 0, 0, 0 }
156 };
157
158 /* Command line options to ignore from configuration file */
159 static const char *config_ignore_options[] = { "help", "version", "config" };
160
161 /* Shared between threads */
162 static int dispatch_thread_exit;
163
164 /* Global application Unix socket path */
165 static char apps_unix_sock_path[PATH_MAX];
166 /* Global client Unix socket path */
167 static char client_unix_sock_path[PATH_MAX];
168 /* global wait shm path for UST */
169 static char wait_shm_path[PATH_MAX];
170 /* Global health check unix path */
171 static char health_unix_sock_path[PATH_MAX];
172
173 /* Sockets and FDs */
174 static int client_sock = -1;
175 static int apps_sock = -1;
176 int kernel_tracer_fd = -1;
177 static int kernel_poll_pipe[2] = { -1, -1 };
178
179 /*
180 * Quit pipe for all threads. This permits a single cancellation point
181 * for all threads when receiving an event on the pipe.
182 */
183 static int thread_quit_pipe[2] = { -1, -1 };
184
185 /*
186 * This pipe is used to inform the thread managing application communication
187 * that a command is queued and ready to be processed.
188 */
189 static int apps_cmd_pipe[2] = { -1, -1 };
190
191 int apps_cmd_notify_pipe[2] = { -1, -1 };
192
193 /* Pthread, Mutexes and Semaphores */
194 static pthread_t apps_thread;
195 static pthread_t apps_notify_thread;
196 static pthread_t reg_apps_thread;
197 static pthread_t client_thread;
198 static pthread_t kernel_thread;
199 static pthread_t dispatch_thread;
200 static pthread_t health_thread;
201 static pthread_t ht_cleanup_thread;
202 static pthread_t jul_reg_thread;
203
204 /*
205 * UST registration command queue. This queue is tied with a futex and uses a N
206 * wakers / 1 waiter implemented and detailed in futex.c/.h
207 *
208 * The thread_registration_apps and thread_dispatch_ust_registration uses this
209 * queue along with the wait/wake scheme. The thread_manage_apps receives down
210 * the line new application socket and monitors it for any I/O error or clean
211 * close that triggers an unregistration of the application.
212 */
213 static struct ust_cmd_queue ust_cmd_queue;
214
215 /*
216 * Pointer initialized before thread creation.
217 *
218 * This points to the tracing session list containing the session count and a
219 * mutex lock. The lock MUST be taken if you iterate over the list. The lock
220 * MUST NOT be taken if you call a public function in session.c.
221 *
222 * The lock is nested inside the structure: session_list_ptr->lock. Please use
223 * session_lock_list and session_unlock_list for lock acquisition.
224 */
225 static struct ltt_session_list *session_list_ptr;
226
227 int ust_consumerd64_fd = -1;
228 int ust_consumerd32_fd = -1;
229
230 static const char *consumerd32_bin = CONFIG_CONSUMERD32_BIN;
231 static const char *consumerd64_bin = CONFIG_CONSUMERD64_BIN;
232 static const char *consumerd32_libdir = CONFIG_CONSUMERD32_LIBDIR;
233 static const char *consumerd64_libdir = CONFIG_CONSUMERD64_LIBDIR;
234 static int consumerd32_bin_override;
235 static int consumerd64_bin_override;
236 static int consumerd32_libdir_override;
237 static int consumerd64_libdir_override;
238
239 static const char *module_proc_lttng = "/proc/lttng";
240
241 /*
242 * Consumer daemon state which is changed when spawning it, killing it or in
243 * case of a fatal error.
244 */
245 enum consumerd_state {
246 CONSUMER_STARTED = 1,
247 CONSUMER_STOPPED = 2,
248 CONSUMER_ERROR = 3,
249 };
250
251 /*
252 * This consumer daemon state is used to validate if a client command will be
253 * able to reach the consumer. If not, the client is informed. For instance,
254 * doing a "lttng start" when the consumer state is set to ERROR will return an
255 * error to the client.
256 *
257 * The following example shows a possible race condition of this scheme:
258 *
259 * consumer thread error happens
260 * client cmd arrives
261 * client cmd checks state -> still OK
262 * consumer thread exit, sets error
263 * client cmd try to talk to consumer
264 * ...
265 *
266 * However, since the consumer is a different daemon, we have no way of making
267 * sure the command will reach it safely even with this state flag. This is why
268 * we consider that up to the state validation during command processing, the
269 * command is safe. After that, we can not guarantee the correctness of the
270 * client request vis-a-vis the consumer.
271 */
272 static enum consumerd_state ust_consumerd_state;
273 static enum consumerd_state kernel_consumerd_state;
274
275 /*
276 * Socket timeout for receiving and sending in seconds.
277 */
278 static int app_socket_timeout;
279
280 /* Set in main() with the current page size. */
281 long page_size;
282
283 /* Application health monitoring */
284 struct health_app *health_sessiond;
285
286 /* JUL TCP port for registration. Used by the JUL thread. */
287 unsigned int jul_tcp_port = DEFAULT_JUL_TCP_PORT;
288
289 /* Am I root or not. */
290 int is_root; /* Set to 1 if the daemon is running as root */
291
292 const char * const config_section_name = "sessiond";
293
294 /*
295 * Whether sessiond is ready for commands/health check requests.
296 * NR_LTTNG_SESSIOND_READY must match the number of calls to
297 * lttng_sessiond_notify_ready().
298 */
299 #define NR_LTTNG_SESSIOND_READY 2
300 int lttng_sessiond_ready = NR_LTTNG_SESSIOND_READY;
301
302 /* Notify parents that we are ready for cmd and health check */
303 static
304 void lttng_sessiond_notify_ready(void)
305 {
306 if (uatomic_sub_return(&lttng_sessiond_ready, 1) == 0) {
307 /*
308 * Notify parent pid that we are ready to accept command
309 * for client side. This ppid is the one from the
310 * external process that spawned us.
311 */
312 if (opt_sig_parent) {
313 kill(ppid, SIGUSR1);
314 }
315
316 /*
317 * Notify the parent of the fork() process that we are
318 * ready.
319 */
320 if (opt_daemon || opt_background) {
321 kill(child_ppid, SIGUSR1);
322 }
323 }
324 }
325
326 static
327 void setup_consumerd_path(void)
328 {
329 const char *bin, *libdir;
330
331 /*
332 * Allow INSTALL_BIN_PATH to be used as a target path for the
333 * native architecture size consumer if CONFIG_CONSUMER*_PATH
334 * has not been defined.
335 */
336 #if (CAA_BITS_PER_LONG == 32)
337 if (!consumerd32_bin[0]) {
338 consumerd32_bin = INSTALL_BIN_PATH "/" CONSUMERD_FILE;
339 }
340 if (!consumerd32_libdir[0]) {
341 consumerd32_libdir = INSTALL_LIB_PATH;
342 }
343 #elif (CAA_BITS_PER_LONG == 64)
344 if (!consumerd64_bin[0]) {
345 consumerd64_bin = INSTALL_BIN_PATH "/" CONSUMERD_FILE;
346 }
347 if (!consumerd64_libdir[0]) {
348 consumerd64_libdir = INSTALL_LIB_PATH;
349 }
350 #else
351 #error "Unknown bitness"
352 #endif
353
354 /*
355 * runtime env. var. overrides the build default.
356 */
357 bin = getenv("LTTNG_CONSUMERD32_BIN");
358 if (bin) {
359 consumerd32_bin = bin;
360 }
361 bin = getenv("LTTNG_CONSUMERD64_BIN");
362 if (bin) {
363 consumerd64_bin = bin;
364 }
365 libdir = getenv("LTTNG_CONSUMERD32_LIBDIR");
366 if (libdir) {
367 consumerd32_libdir = libdir;
368 }
369 libdir = getenv("LTTNG_CONSUMERD64_LIBDIR");
370 if (libdir) {
371 consumerd64_libdir = libdir;
372 }
373 }
374
375 /*
376 * Create a poll set with O_CLOEXEC and add the thread quit pipe to the set.
377 */
378 int sessiond_set_thread_pollset(struct lttng_poll_event *events, size_t size)
379 {
380 int ret;
381
382 assert(events);
383
384 ret = lttng_poll_create(events, size, LTTNG_CLOEXEC);
385 if (ret < 0) {
386 goto error;
387 }
388
389 /* Add quit pipe */
390 ret = lttng_poll_add(events, thread_quit_pipe[0], LPOLLIN | LPOLLERR);
391 if (ret < 0) {
392 goto error;
393 }
394
395 return 0;
396
397 error:
398 return ret;
399 }
400
401 /*
402 * Check if the thread quit pipe was triggered.
403 *
404 * Return 1 if it was triggered else 0;
405 */
406 int sessiond_check_thread_quit_pipe(int fd, uint32_t events)
407 {
408 if (fd == thread_quit_pipe[0] && (events & LPOLLIN)) {
409 return 1;
410 }
411
412 return 0;
413 }
414
415 /*
416 * Init thread quit pipe.
417 *
418 * Return -1 on error or 0 if all pipes are created.
419 */
420 static int init_thread_quit_pipe(void)
421 {
422 int ret, i;
423
424 ret = pipe(thread_quit_pipe);
425 if (ret < 0) {
426 PERROR("thread quit pipe");
427 goto error;
428 }
429
430 for (i = 0; i < 2; i++) {
431 ret = fcntl(thread_quit_pipe[i], F_SETFD, FD_CLOEXEC);
432 if (ret < 0) {
433 PERROR("fcntl");
434 goto error;
435 }
436 }
437
438 error:
439 return ret;
440 }
441
442 /*
443 * Stop all threads by closing the thread quit pipe.
444 */
445 static void stop_threads(void)
446 {
447 int ret;
448
449 /* Stopping all threads */
450 DBG("Terminating all threads");
451 ret = notify_thread_pipe(thread_quit_pipe[1]);
452 if (ret < 0) {
453 ERR("write error on thread quit pipe");
454 }
455
456 /* Dispatch thread */
457 CMM_STORE_SHARED(dispatch_thread_exit, 1);
458 futex_nto1_wake(&ust_cmd_queue.futex);
459 }
460
461 /*
462 * Close every consumer sockets.
463 */
464 static void close_consumer_sockets(void)
465 {
466 int ret;
467
468 if (kconsumer_data.err_sock >= 0) {
469 ret = close(kconsumer_data.err_sock);
470 if (ret < 0) {
471 PERROR("kernel consumer err_sock close");
472 }
473 }
474 if (ustconsumer32_data.err_sock >= 0) {
475 ret = close(ustconsumer32_data.err_sock);
476 if (ret < 0) {
477 PERROR("UST consumerd32 err_sock close");
478 }
479 }
480 if (ustconsumer64_data.err_sock >= 0) {
481 ret = close(ustconsumer64_data.err_sock);
482 if (ret < 0) {
483 PERROR("UST consumerd64 err_sock close");
484 }
485 }
486 if (kconsumer_data.cmd_sock >= 0) {
487 ret = close(kconsumer_data.cmd_sock);
488 if (ret < 0) {
489 PERROR("kernel consumer cmd_sock close");
490 }
491 }
492 if (ustconsumer32_data.cmd_sock >= 0) {
493 ret = close(ustconsumer32_data.cmd_sock);
494 if (ret < 0) {
495 PERROR("UST consumerd32 cmd_sock close");
496 }
497 }
498 if (ustconsumer64_data.cmd_sock >= 0) {
499 ret = close(ustconsumer64_data.cmd_sock);
500 if (ret < 0) {
501 PERROR("UST consumerd64 cmd_sock close");
502 }
503 }
504 }
505
506 /*
507 * Cleanup the daemon
508 */
509 static void cleanup(void)
510 {
511 int ret;
512 struct ltt_session *sess, *stmp;
513 char path[PATH_MAX];
514
515 DBG("Cleaning up");
516
517 /*
518 * Close the thread quit pipe. It has already done its job,
519 * since we are now called.
520 */
521 utils_close_pipe(thread_quit_pipe);
522
523 /*
524 * If opt_pidfile is undefined, the default file will be wiped when
525 * removing the rundir.
526 */
527 if (opt_pidfile) {
528 ret = remove(opt_pidfile);
529 if (ret < 0) {
530 PERROR("remove pidfile %s", opt_pidfile);
531 }
532 }
533
534 DBG("Removing sessiond and consumerd content of directory %s", rundir);
535
536 /* sessiond */
537 snprintf(path, PATH_MAX,
538 "%s/%s",
539 rundir, DEFAULT_LTTNG_SESSIOND_PIDFILE);
540 DBG("Removing %s", path);
541 (void) unlink(path);
542
543 snprintf(path, PATH_MAX, "%s/%s", rundir,
544 DEFAULT_LTTNG_SESSIOND_JULPORT_FILE);
545 DBG("Removing %s", path);
546 (void) unlink(path);
547
548 /* kconsumerd */
549 snprintf(path, PATH_MAX,
550 DEFAULT_KCONSUMERD_ERR_SOCK_PATH,
551 rundir);
552 DBG("Removing %s", path);
553 (void) unlink(path);
554
555 snprintf(path, PATH_MAX,
556 DEFAULT_KCONSUMERD_PATH,
557 rundir);
558 DBG("Removing directory %s", path);
559 (void) rmdir(path);
560
561 /* ust consumerd 32 */
562 snprintf(path, PATH_MAX,
563 DEFAULT_USTCONSUMERD32_ERR_SOCK_PATH,
564 rundir);
565 DBG("Removing %s", path);
566 (void) unlink(path);
567
568 snprintf(path, PATH_MAX,
569 DEFAULT_USTCONSUMERD32_PATH,
570 rundir);
571 DBG("Removing directory %s", path);
572 (void) rmdir(path);
573
574 /* ust consumerd 64 */
575 snprintf(path, PATH_MAX,
576 DEFAULT_USTCONSUMERD64_ERR_SOCK_PATH,
577 rundir);
578 DBG("Removing %s", path);
579 (void) unlink(path);
580
581 snprintf(path, PATH_MAX,
582 DEFAULT_USTCONSUMERD64_PATH,
583 rundir);
584 DBG("Removing directory %s", path);
585 (void) rmdir(path);
586
587 /*
588 * We do NOT rmdir rundir because there are other processes
589 * using it, for instance lttng-relayd, which can start in
590 * parallel with this teardown.
591 */
592
593 free(rundir);
594
595 DBG("Cleaning up all sessions");
596
597 /* Destroy session list mutex */
598 if (session_list_ptr != NULL) {
599 pthread_mutex_destroy(&session_list_ptr->lock);
600
601 /* Cleanup ALL session */
602 cds_list_for_each_entry_safe(sess, stmp,
603 &session_list_ptr->head, list) {
604 cmd_destroy_session(sess, kernel_poll_pipe[1]);
605 }
606 }
607
608 DBG("Closing all UST sockets");
609 ust_app_clean_list();
610 buffer_reg_destroy_registries();
611
612 if (is_root && !opt_no_kernel) {
613 DBG2("Closing kernel fd");
614 if (kernel_tracer_fd >= 0) {
615 ret = close(kernel_tracer_fd);
616 if (ret) {
617 PERROR("close");
618 }
619 }
620 DBG("Unloading kernel modules");
621 modprobe_remove_lttng_all();
622 }
623
624 close_consumer_sockets();
625
626 /*
627 * If the override option is set, the pointer points to a *non* const thus
628 * freeing it even though the variable type is set to const.
629 */
630 if (tracing_group_name_override) {
631 free((void *) tracing_group_name);
632 }
633 if (consumerd32_bin_override) {
634 free((void *) consumerd32_bin);
635 }
636 if (consumerd64_bin_override) {
637 free((void *) consumerd64_bin);
638 }
639 if (consumerd32_libdir_override) {
640 free((void *) consumerd32_libdir);
641 }
642 if (consumerd64_libdir_override) {
643 free((void *) consumerd64_libdir);
644 }
645
646 if (opt_pidfile) {
647 free(opt_pidfile);
648 }
649
650 /* <fun> */
651 DBG("%c[%d;%dm*** assert failed :-) *** ==> %c[%dm%c[%d;%dm"
652 "Matthew, BEET driven development works!%c[%dm",
653 27, 1, 31, 27, 0, 27, 1, 33, 27, 0);
654 /* </fun> */
655 }
656
657 /*
658 * Send data on a unix socket using the liblttsessiondcomm API.
659 *
660 * Return lttcomm error code.
661 */
662 static int send_unix_sock(int sock, void *buf, size_t len)
663 {
664 /* Check valid length */
665 if (len == 0) {
666 return -1;
667 }
668
669 return lttcomm_send_unix_sock(sock, buf, len);
670 }
671
672 /*
673 * Free memory of a command context structure.
674 */
675 static void clean_command_ctx(struct command_ctx **cmd_ctx)
676 {
677 DBG("Clean command context structure");
678 if (*cmd_ctx) {
679 if ((*cmd_ctx)->llm) {
680 free((*cmd_ctx)->llm);
681 }
682 if ((*cmd_ctx)->lsm) {
683 free((*cmd_ctx)->lsm);
684 }
685 free(*cmd_ctx);
686 *cmd_ctx = NULL;
687 }
688 }
689
690 /*
691 * Notify UST applications using the shm mmap futex.
692 */
693 static int notify_ust_apps(int active)
694 {
695 char *wait_shm_mmap;
696
697 DBG("Notifying applications of session daemon state: %d", active);
698
699 /* See shm.c for this call implying mmap, shm and futex calls */
700 wait_shm_mmap = shm_ust_get_mmap(wait_shm_path, is_root);
701 if (wait_shm_mmap == NULL) {
702 goto error;
703 }
704
705 /* Wake waiting process */
706 futex_wait_update((int32_t *) wait_shm_mmap, active);
707
708 /* Apps notified successfully */
709 return 0;
710
711 error:
712 return -1;
713 }
714
715 /*
716 * Setup the outgoing data buffer for the response (llm) by allocating the
717 * right amount of memory and copying the original information from the lsm
718 * structure.
719 *
720 * Return total size of the buffer pointed by buf.
721 */
722 static int setup_lttng_msg(struct command_ctx *cmd_ctx, size_t size)
723 {
724 int ret, buf_size;
725
726 buf_size = size;
727
728 cmd_ctx->llm = zmalloc(sizeof(struct lttcomm_lttng_msg) + buf_size);
729 if (cmd_ctx->llm == NULL) {
730 PERROR("zmalloc");
731 ret = -ENOMEM;
732 goto error;
733 }
734
735 /* Copy common data */
736 cmd_ctx->llm->cmd_type = cmd_ctx->lsm->cmd_type;
737 cmd_ctx->llm->pid = cmd_ctx->lsm->domain.attr.pid;
738
739 cmd_ctx->llm->data_size = size;
740 cmd_ctx->lttng_msg_size = sizeof(struct lttcomm_lttng_msg) + buf_size;
741
742 return buf_size;
743
744 error:
745 return ret;
746 }
747
748 /*
749 * Update the kernel poll set of all channel fd available over all tracing
750 * session. Add the wakeup pipe at the end of the set.
751 */
752 static int update_kernel_poll(struct lttng_poll_event *events)
753 {
754 int ret;
755 struct ltt_session *session;
756 struct ltt_kernel_channel *channel;
757
758 DBG("Updating kernel poll set");
759
760 session_lock_list();
761 cds_list_for_each_entry(session, &session_list_ptr->head, list) {
762 session_lock(session);
763 if (session->kernel_session == NULL) {
764 session_unlock(session);
765 continue;
766 }
767
768 cds_list_for_each_entry(channel,
769 &session->kernel_session->channel_list.head, list) {
770 /* Add channel fd to the kernel poll set */
771 ret = lttng_poll_add(events, channel->fd, LPOLLIN | LPOLLRDNORM);
772 if (ret < 0) {
773 session_unlock(session);
774 goto error;
775 }
776 DBG("Channel fd %d added to kernel set", channel->fd);
777 }
778 session_unlock(session);
779 }
780 session_unlock_list();
781
782 return 0;
783
784 error:
785 session_unlock_list();
786 return -1;
787 }
788
789 /*
790 * Find the channel fd from 'fd' over all tracing session. When found, check
791 * for new channel stream and send those stream fds to the kernel consumer.
792 *
793 * Useful for CPU hotplug feature.
794 */
795 static int update_kernel_stream(struct consumer_data *consumer_data, int fd)
796 {
797 int ret = 0;
798 struct ltt_session *session;
799 struct ltt_kernel_session *ksess;
800 struct ltt_kernel_channel *channel;
801
802 DBG("Updating kernel streams for channel fd %d", fd);
803
804 session_lock_list();
805 cds_list_for_each_entry(session, &session_list_ptr->head, list) {
806 session_lock(session);
807 if (session->kernel_session == NULL) {
808 session_unlock(session);
809 continue;
810 }
811 ksess = session->kernel_session;
812
813 cds_list_for_each_entry(channel, &ksess->channel_list.head, list) {
814 if (channel->fd == fd) {
815 DBG("Channel found, updating kernel streams");
816 ret = kernel_open_channel_stream(channel);
817 if (ret < 0) {
818 goto error;
819 }
820 /* Update the stream global counter */
821 ksess->stream_count_global += ret;
822
823 /*
824 * Have we already sent fds to the consumer? If yes, it means
825 * that tracing is started so it is safe to send our updated
826 * stream fds.
827 */
828 if (ksess->consumer_fds_sent == 1 && ksess->consumer != NULL) {
829 struct lttng_ht_iter iter;
830 struct consumer_socket *socket;
831
832 rcu_read_lock();
833 cds_lfht_for_each_entry(ksess->consumer->socks->ht,
834 &iter.iter, socket, node.node) {
835 pthread_mutex_lock(socket->lock);
836 ret = kernel_consumer_send_channel_stream(socket,
837 channel, ksess,
838 session->output_traces ? 1 : 0);
839 pthread_mutex_unlock(socket->lock);
840 if (ret < 0) {
841 rcu_read_unlock();
842 goto error;
843 }
844 }
845 rcu_read_unlock();
846 }
847 goto error;
848 }
849 }
850 session_unlock(session);
851 }
852 session_unlock_list();
853 return ret;
854
855 error:
856 session_unlock(session);
857 session_unlock_list();
858 return ret;
859 }
860
861 /*
862 * For each tracing session, update newly registered apps. The session list
863 * lock MUST be acquired before calling this.
864 */
865 static void update_ust_app(int app_sock)
866 {
867 struct ltt_session *sess, *stmp;
868
869 /* Consumer is in an ERROR state. Stop any application update. */
870 if (uatomic_read(&ust_consumerd_state) == CONSUMER_ERROR) {
871 /* Stop the update process since the consumer is dead. */
872 return;
873 }
874
875 /* For all tracing session(s) */
876 cds_list_for_each_entry_safe(sess, stmp, &session_list_ptr->head, list) {
877 session_lock(sess);
878 if (sess->ust_session) {
879 ust_app_global_update(sess->ust_session, app_sock);
880 }
881 session_unlock(sess);
882 }
883 }
884
885 /*
886 * This thread manage event coming from the kernel.
887 *
888 * Features supported in this thread:
889 * -) CPU Hotplug
890 */
891 static void *thread_manage_kernel(void *data)
892 {
893 int ret, i, pollfd, update_poll_flag = 1, err = -1;
894 uint32_t revents, nb_fd;
895 char tmp;
896 struct lttng_poll_event events;
897
898 DBG("[thread] Thread manage kernel started");
899
900 health_register(health_sessiond, HEALTH_SESSIOND_TYPE_KERNEL);
901
902 /*
903 * This first step of the while is to clean this structure which could free
904 * non NULL pointers so initialize it before the loop.
905 */
906 lttng_poll_init(&events);
907
908 if (testpoint(sessiond_thread_manage_kernel)) {
909 goto error_testpoint;
910 }
911
912 health_code_update();
913
914 if (testpoint(sessiond_thread_manage_kernel_before_loop)) {
915 goto error_testpoint;
916 }
917
918 while (1) {
919 health_code_update();
920
921 if (update_poll_flag == 1) {
922 /* Clean events object. We are about to populate it again. */
923 lttng_poll_clean(&events);
924
925 ret = sessiond_set_thread_pollset(&events, 2);
926 if (ret < 0) {
927 goto error_poll_create;
928 }
929
930 ret = lttng_poll_add(&events, kernel_poll_pipe[0], LPOLLIN);
931 if (ret < 0) {
932 goto error;
933 }
934
935 /* This will add the available kernel channel if any. */
936 ret = update_kernel_poll(&events);
937 if (ret < 0) {
938 goto error;
939 }
940 update_poll_flag = 0;
941 }
942
943 DBG("Thread kernel polling on %d fds", LTTNG_POLL_GETNB(&events));
944
945 /* Poll infinite value of time */
946 restart:
947 health_poll_entry();
948 ret = lttng_poll_wait(&events, -1);
949 health_poll_exit();
950 if (ret < 0) {
951 /*
952 * Restart interrupted system call.
953 */
954 if (errno == EINTR) {
955 goto restart;
956 }
957 goto error;
958 } else if (ret == 0) {
959 /* Should not happen since timeout is infinite */
960 ERR("Return value of poll is 0 with an infinite timeout.\n"
961 "This should not have happened! Continuing...");
962 continue;
963 }
964
965 nb_fd = ret;
966
967 for (i = 0; i < nb_fd; i++) {
968 /* Fetch once the poll data */
969 revents = LTTNG_POLL_GETEV(&events, i);
970 pollfd = LTTNG_POLL_GETFD(&events, i);
971
972 health_code_update();
973
974 /* Thread quit pipe has been closed. Killing thread. */
975 ret = sessiond_check_thread_quit_pipe(pollfd, revents);
976 if (ret) {
977 err = 0;
978 goto exit;
979 }
980
981 /* Check for data on kernel pipe */
982 if (pollfd == kernel_poll_pipe[0] && (revents & LPOLLIN)) {
983 (void) lttng_read(kernel_poll_pipe[0],
984 &tmp, 1);
985 /*
986 * Ret value is useless here, if this pipe gets any actions an
987 * update is required anyway.
988 */
989 update_poll_flag = 1;
990 continue;
991 } else {
992 /*
993 * New CPU detected by the kernel. Adding kernel stream to
994 * kernel session and updating the kernel consumer
995 */
996 if (revents & LPOLLIN) {
997 ret = update_kernel_stream(&kconsumer_data, pollfd);
998 if (ret < 0) {
999 continue;
1000 }
1001 break;
1002 /*
1003 * TODO: We might want to handle the LPOLLERR | LPOLLHUP
1004 * and unregister kernel stream at this point.
1005 */
1006 }
1007 }
1008 }
1009 }
1010
1011 exit:
1012 error:
1013 lttng_poll_clean(&events);
1014 error_poll_create:
1015 error_testpoint:
1016 utils_close_pipe(kernel_poll_pipe);
1017 kernel_poll_pipe[0] = kernel_poll_pipe[1] = -1;
1018 if (err) {
1019 health_error();
1020 ERR("Health error occurred in %s", __func__);
1021 WARN("Kernel thread died unexpectedly. "
1022 "Kernel tracing can continue but CPU hotplug is disabled.");
1023 }
1024 health_unregister(health_sessiond);
1025 DBG("Kernel thread dying");
1026 return NULL;
1027 }
1028
1029 /*
1030 * Signal pthread condition of the consumer data that the thread.
1031 */
1032 static void signal_consumer_condition(struct consumer_data *data, int state)
1033 {
1034 pthread_mutex_lock(&data->cond_mutex);
1035
1036 /*
1037 * The state is set before signaling. It can be any value, it's the waiter
1038 * job to correctly interpret this condition variable associated to the
1039 * consumer pthread_cond.
1040 *
1041 * A value of 0 means that the corresponding thread of the consumer data
1042 * was not started. 1 indicates that the thread has started and is ready
1043 * for action. A negative value means that there was an error during the
1044 * thread bootstrap.
1045 */
1046 data->consumer_thread_is_ready = state;
1047 (void) pthread_cond_signal(&data->cond);
1048
1049 pthread_mutex_unlock(&data->cond_mutex);
1050 }
1051
1052 /*
1053 * This thread manage the consumer error sent back to the session daemon.
1054 */
1055 static void *thread_manage_consumer(void *data)
1056 {
1057 int sock = -1, i, ret, pollfd, err = -1;
1058 uint32_t revents, nb_fd;
1059 enum lttcomm_return_code code;
1060 struct lttng_poll_event events;
1061 struct consumer_data *consumer_data = data;
1062
1063 DBG("[thread] Manage consumer started");
1064
1065 health_register(health_sessiond, HEALTH_SESSIOND_TYPE_CONSUMER);
1066
1067 health_code_update();
1068
1069 /*
1070 * Pass 3 as size here for the thread quit pipe, consumerd_err_sock and the
1071 * metadata_sock. Nothing more will be added to this poll set.
1072 */
1073 ret = sessiond_set_thread_pollset(&events, 3);
1074 if (ret < 0) {
1075 goto error_poll;
1076 }
1077
1078 /*
1079 * The error socket here is already in a listening state which was done
1080 * just before spawning this thread to avoid a race between the consumer
1081 * daemon exec trying to connect and the listen() call.
1082 */
1083 ret = lttng_poll_add(&events, consumer_data->err_sock, LPOLLIN | LPOLLRDHUP);
1084 if (ret < 0) {
1085 goto error;
1086 }
1087
1088 health_code_update();
1089
1090 /* Infinite blocking call, waiting for transmission */
1091 restart:
1092 health_poll_entry();
1093
1094 if (testpoint(sessiond_thread_manage_consumer)) {
1095 goto error;
1096 }
1097
1098 ret = lttng_poll_wait(&events, -1);
1099 health_poll_exit();
1100 if (ret < 0) {
1101 /*
1102 * Restart interrupted system call.
1103 */
1104 if (errno == EINTR) {
1105 goto restart;
1106 }
1107 goto error;
1108 }
1109
1110 nb_fd = ret;
1111
1112 for (i = 0; i < nb_fd; i++) {
1113 /* Fetch once the poll data */
1114 revents = LTTNG_POLL_GETEV(&events, i);
1115 pollfd = LTTNG_POLL_GETFD(&events, i);
1116
1117 health_code_update();
1118
1119 /* Thread quit pipe has been closed. Killing thread. */
1120 ret = sessiond_check_thread_quit_pipe(pollfd, revents);
1121 if (ret) {
1122 err = 0;
1123 goto exit;
1124 }
1125
1126 /* Event on the registration socket */
1127 if (pollfd == consumer_data->err_sock) {
1128 if (revents & (LPOLLERR | LPOLLHUP | LPOLLRDHUP)) {
1129 ERR("consumer err socket poll error");
1130 goto error;
1131 }
1132 }
1133 }
1134
1135 sock = lttcomm_accept_unix_sock(consumer_data->err_sock);
1136 if (sock < 0) {
1137 goto error;
1138 }
1139
1140 /*
1141 * Set the CLOEXEC flag. Return code is useless because either way, the
1142 * show must go on.
1143 */
1144 (void) utils_set_fd_cloexec(sock);
1145
1146 health_code_update();
1147
1148 DBG2("Receiving code from consumer err_sock");
1149
1150 /* Getting status code from kconsumerd */
1151 ret = lttcomm_recv_unix_sock(sock, &code,
1152 sizeof(enum lttcomm_return_code));
1153 if (ret <= 0) {
1154 goto error;
1155 }
1156
1157 health_code_update();
1158 if (code == LTTCOMM_CONSUMERD_COMMAND_SOCK_READY) {
1159 /* Connect both socket, command and metadata. */
1160 consumer_data->cmd_sock =
1161 lttcomm_connect_unix_sock(consumer_data->cmd_unix_sock_path);
1162 consumer_data->metadata_fd =
1163 lttcomm_connect_unix_sock(consumer_data->cmd_unix_sock_path);
1164 if (consumer_data->cmd_sock < 0
1165 || consumer_data->metadata_fd < 0) {
1166 PERROR("consumer connect cmd socket");
1167 /* On error, signal condition and quit. */
1168 signal_consumer_condition(consumer_data, -1);
1169 goto error;
1170 }
1171 consumer_data->metadata_sock.fd_ptr = &consumer_data->metadata_fd;
1172 /* Create metadata socket lock. */
1173 consumer_data->metadata_sock.lock = zmalloc(sizeof(pthread_mutex_t));
1174 if (consumer_data->metadata_sock.lock == NULL) {
1175 PERROR("zmalloc pthread mutex");
1176 ret = -1;
1177 goto error;
1178 }
1179 pthread_mutex_init(consumer_data->metadata_sock.lock, NULL);
1180
1181 signal_consumer_condition(consumer_data, 1);
1182 DBG("Consumer command socket ready (fd: %d", consumer_data->cmd_sock);
1183 DBG("Consumer metadata socket ready (fd: %d)",
1184 consumer_data->metadata_fd);
1185 } else {
1186 ERR("consumer error when waiting for SOCK_READY : %s",
1187 lttcomm_get_readable_code(-code));
1188 goto error;
1189 }
1190
1191 /* Remove the consumerd error sock since we've established a connexion */
1192 ret = lttng_poll_del(&events, consumer_data->err_sock);
1193 if (ret < 0) {
1194 goto error;
1195 }
1196
1197 /* Add new accepted error socket. */
1198 ret = lttng_poll_add(&events, sock, LPOLLIN | LPOLLRDHUP);
1199 if (ret < 0) {
1200 goto error;
1201 }
1202
1203 /* Add metadata socket that is successfully connected. */
1204 ret = lttng_poll_add(&events, consumer_data->metadata_fd,
1205 LPOLLIN | LPOLLRDHUP);
1206 if (ret < 0) {
1207 goto error;
1208 }
1209
1210 health_code_update();
1211
1212 /* Infinite blocking call, waiting for transmission */
1213 restart_poll:
1214 while (1) {
1215 health_poll_entry();
1216 ret = lttng_poll_wait(&events, -1);
1217 health_poll_exit();
1218 if (ret < 0) {
1219 /*
1220 * Restart interrupted system call.
1221 */
1222 if (errno == EINTR) {
1223 goto restart_poll;
1224 }
1225 goto error;
1226 }
1227
1228 nb_fd = ret;
1229
1230 for (i = 0; i < nb_fd; i++) {
1231 /* Fetch once the poll data */
1232 revents = LTTNG_POLL_GETEV(&events, i);
1233 pollfd = LTTNG_POLL_GETFD(&events, i);
1234
1235 health_code_update();
1236
1237 /* Thread quit pipe has been closed. Killing thread. */
1238 ret = sessiond_check_thread_quit_pipe(pollfd, revents);
1239 if (ret) {
1240 err = 0;
1241 goto exit;
1242 }
1243
1244 if (pollfd == sock) {
1245 /* Event on the consumerd socket */
1246 if (revents & (LPOLLERR | LPOLLHUP | LPOLLRDHUP)) {
1247 ERR("consumer err socket second poll error");
1248 goto error;
1249 }
1250 health_code_update();
1251 /* Wait for any kconsumerd error */
1252 ret = lttcomm_recv_unix_sock(sock, &code,
1253 sizeof(enum lttcomm_return_code));
1254 if (ret <= 0) {
1255 ERR("consumer closed the command socket");
1256 goto error;
1257 }
1258
1259 ERR("consumer return code : %s",
1260 lttcomm_get_readable_code(-code));
1261
1262 goto exit;
1263 } else if (pollfd == consumer_data->metadata_fd) {
1264 /* UST metadata requests */
1265 ret = ust_consumer_metadata_request(
1266 &consumer_data->metadata_sock);
1267 if (ret < 0) {
1268 ERR("Handling metadata request");
1269 goto error;
1270 }
1271 break;
1272 } else {
1273 ERR("Unknown pollfd");
1274 goto error;
1275 }
1276 }
1277 health_code_update();
1278 }
1279
1280 exit:
1281 error:
1282 /*
1283 * We lock here because we are about to close the sockets and some other
1284 * thread might be using them so get exclusive access which will abort all
1285 * other consumer command by other threads.
1286 */
1287 pthread_mutex_lock(&consumer_data->lock);
1288
1289 /* Immediately set the consumerd state to stopped */
1290 if (consumer_data->type == LTTNG_CONSUMER_KERNEL) {
1291 uatomic_set(&kernel_consumerd_state, CONSUMER_ERROR);
1292 } else if (consumer_data->type == LTTNG_CONSUMER64_UST ||
1293 consumer_data->type == LTTNG_CONSUMER32_UST) {
1294 uatomic_set(&ust_consumerd_state, CONSUMER_ERROR);
1295 } else {
1296 /* Code flow error... */
1297 assert(0);
1298 }
1299
1300 if (consumer_data->err_sock >= 0) {
1301 ret = close(consumer_data->err_sock);
1302 if (ret) {
1303 PERROR("close");
1304 }
1305 consumer_data->err_sock = -1;
1306 }
1307 if (consumer_data->cmd_sock >= 0) {
1308 ret = close(consumer_data->cmd_sock);
1309 if (ret) {
1310 PERROR("close");
1311 }
1312 consumer_data->cmd_sock = -1;
1313 }
1314 if (consumer_data->metadata_sock.fd_ptr &&
1315 *consumer_data->metadata_sock.fd_ptr >= 0) {
1316 ret = close(*consumer_data->metadata_sock.fd_ptr);
1317 if (ret) {
1318 PERROR("close");
1319 }
1320 }
1321 if (sock >= 0) {
1322 ret = close(sock);
1323 if (ret) {
1324 PERROR("close");
1325 }
1326 }
1327
1328 unlink(consumer_data->err_unix_sock_path);
1329 unlink(consumer_data->cmd_unix_sock_path);
1330 consumer_data->pid = 0;
1331 pthread_mutex_unlock(&consumer_data->lock);
1332
1333 /* Cleanup metadata socket mutex. */
1334 if (consumer_data->metadata_sock.lock) {
1335 pthread_mutex_destroy(consumer_data->metadata_sock.lock);
1336 free(consumer_data->metadata_sock.lock);
1337 }
1338 lttng_poll_clean(&events);
1339 error_poll:
1340 if (err) {
1341 health_error();
1342 ERR("Health error occurred in %s", __func__);
1343 }
1344 health_unregister(health_sessiond);
1345 DBG("consumer thread cleanup completed");
1346
1347 return NULL;
1348 }
1349
1350 /*
1351 * This thread manage application communication.
1352 */
1353 static void *thread_manage_apps(void *data)
1354 {
1355 int i, ret, pollfd, err = -1;
1356 ssize_t size_ret;
1357 uint32_t revents, nb_fd;
1358 struct lttng_poll_event events;
1359
1360 DBG("[thread] Manage application started");
1361
1362 rcu_register_thread();
1363 rcu_thread_online();
1364
1365 health_register(health_sessiond, HEALTH_SESSIOND_TYPE_APP_MANAGE);
1366
1367 if (testpoint(sessiond_thread_manage_apps)) {
1368 goto error_testpoint;
1369 }
1370
1371 health_code_update();
1372
1373 ret = sessiond_set_thread_pollset(&events, 2);
1374 if (ret < 0) {
1375 goto error_poll_create;
1376 }
1377
1378 ret = lttng_poll_add(&events, apps_cmd_pipe[0], LPOLLIN | LPOLLRDHUP);
1379 if (ret < 0) {
1380 goto error;
1381 }
1382
1383 if (testpoint(sessiond_thread_manage_apps_before_loop)) {
1384 goto error;
1385 }
1386
1387 health_code_update();
1388
1389 while (1) {
1390 DBG("Apps thread polling on %d fds", LTTNG_POLL_GETNB(&events));
1391
1392 /* Inifinite blocking call, waiting for transmission */
1393 restart:
1394 health_poll_entry();
1395 ret = lttng_poll_wait(&events, -1);
1396 health_poll_exit();
1397 if (ret < 0) {
1398 /*
1399 * Restart interrupted system call.
1400 */
1401 if (errno == EINTR) {
1402 goto restart;
1403 }
1404 goto error;
1405 }
1406
1407 nb_fd = ret;
1408
1409 for (i = 0; i < nb_fd; i++) {
1410 /* Fetch once the poll data */
1411 revents = LTTNG_POLL_GETEV(&events, i);
1412 pollfd = LTTNG_POLL_GETFD(&events, i);
1413
1414 health_code_update();
1415
1416 /* Thread quit pipe has been closed. Killing thread. */
1417 ret = sessiond_check_thread_quit_pipe(pollfd, revents);
1418 if (ret) {
1419 err = 0;
1420 goto exit;
1421 }
1422
1423 /* Inspect the apps cmd pipe */
1424 if (pollfd == apps_cmd_pipe[0]) {
1425 if (revents & (LPOLLERR | LPOLLHUP | LPOLLRDHUP)) {
1426 ERR("Apps command pipe error");
1427 goto error;
1428 } else if (revents & LPOLLIN) {
1429 int sock;
1430
1431 /* Empty pipe */
1432 size_ret = lttng_read(apps_cmd_pipe[0], &sock, sizeof(sock));
1433 if (size_ret < sizeof(sock)) {
1434 PERROR("read apps cmd pipe");
1435 goto error;
1436 }
1437
1438 health_code_update();
1439
1440 /*
1441 * We only monitor the error events of the socket. This
1442 * thread does not handle any incoming data from UST
1443 * (POLLIN).
1444 */
1445 ret = lttng_poll_add(&events, sock,
1446 LPOLLERR | LPOLLHUP | LPOLLRDHUP);
1447 if (ret < 0) {
1448 goto error;
1449 }
1450
1451 DBG("Apps with sock %d added to poll set", sock);
1452 }
1453 } else {
1454 /*
1455 * At this point, we know that a registered application made
1456 * the event at poll_wait.
1457 */
1458 if (revents & (LPOLLERR | LPOLLHUP | LPOLLRDHUP)) {
1459 /* Removing from the poll set */
1460 ret = lttng_poll_del(&events, pollfd);
1461 if (ret < 0) {
1462 goto error;
1463 }
1464
1465 /* Socket closed on remote end. */
1466 ust_app_unregister(pollfd);
1467 }
1468 }
1469
1470 health_code_update();
1471 }
1472 }
1473
1474 exit:
1475 error:
1476 lttng_poll_clean(&events);
1477 error_poll_create:
1478 error_testpoint:
1479 utils_close_pipe(apps_cmd_pipe);
1480 apps_cmd_pipe[0] = apps_cmd_pipe[1] = -1;
1481
1482 /*
1483 * We don't clean the UST app hash table here since already registered
1484 * applications can still be controlled so let them be until the session
1485 * daemon dies or the applications stop.
1486 */
1487
1488 if (err) {
1489 health_error();
1490 ERR("Health error occurred in %s", __func__);
1491 }
1492 health_unregister(health_sessiond);
1493 DBG("Application communication apps thread cleanup complete");
1494 rcu_thread_offline();
1495 rcu_unregister_thread();
1496 return NULL;
1497 }
1498
1499 /*
1500 * Send a socket to a thread This is called from the dispatch UST registration
1501 * thread once all sockets are set for the application.
1502 *
1503 * The sock value can be invalid, we don't really care, the thread will handle
1504 * it and make the necessary cleanup if so.
1505 *
1506 * On success, return 0 else a negative value being the errno message of the
1507 * write().
1508 */
1509 static int send_socket_to_thread(int fd, int sock)
1510 {
1511 ssize_t ret;
1512
1513 /*
1514 * It's possible that the FD is set as invalid with -1 concurrently just
1515 * before calling this function being a shutdown state of the thread.
1516 */
1517 if (fd < 0) {
1518 ret = -EBADF;
1519 goto error;
1520 }
1521
1522 ret = lttng_write(fd, &sock, sizeof(sock));
1523 if (ret < sizeof(sock)) {
1524 PERROR("write apps pipe %d", fd);
1525 if (ret < 0) {
1526 ret = -errno;
1527 }
1528 goto error;
1529 }
1530
1531 /* All good. Don't send back the write positive ret value. */
1532 ret = 0;
1533 error:
1534 return (int) ret;
1535 }
1536
1537 /*
1538 * Sanitize the wait queue of the dispatch registration thread meaning removing
1539 * invalid nodes from it. This is to avoid memory leaks for the case the UST
1540 * notify socket is never received.
1541 */
1542 static void sanitize_wait_queue(struct ust_reg_wait_queue *wait_queue)
1543 {
1544 int ret, nb_fd = 0, i;
1545 unsigned int fd_added = 0;
1546 struct lttng_poll_event events;
1547 struct ust_reg_wait_node *wait_node = NULL, *tmp_wait_node;
1548
1549 assert(wait_queue);
1550
1551 lttng_poll_init(&events);
1552
1553 /* Just skip everything for an empty queue. */
1554 if (!wait_queue->count) {
1555 goto end;
1556 }
1557
1558 ret = lttng_poll_create(&events, wait_queue->count, LTTNG_CLOEXEC);
1559 if (ret < 0) {
1560 goto error_create;
1561 }
1562
1563 cds_list_for_each_entry_safe(wait_node, tmp_wait_node,
1564 &wait_queue->head, head) {
1565 assert(wait_node->app);
1566 ret = lttng_poll_add(&events, wait_node->app->sock,
1567 LPOLLHUP | LPOLLERR);
1568 if (ret < 0) {
1569 goto error;
1570 }
1571
1572 fd_added = 1;
1573 }
1574
1575 if (!fd_added) {
1576 goto end;
1577 }
1578
1579 /*
1580 * Poll but don't block so we can quickly identify the faulty events and
1581 * clean them afterwards from the wait queue.
1582 */
1583 ret = lttng_poll_wait(&events, 0);
1584 if (ret < 0) {
1585 goto error;
1586 }
1587 nb_fd = ret;
1588
1589 for (i = 0; i < nb_fd; i++) {
1590 /* Get faulty FD. */
1591 uint32_t revents = LTTNG_POLL_GETEV(&events, i);
1592 int pollfd = LTTNG_POLL_GETFD(&events, i);
1593
1594 cds_list_for_each_entry_safe(wait_node, tmp_wait_node,
1595 &wait_queue->head, head) {
1596 if (pollfd == wait_node->app->sock &&
1597 (revents & (LPOLLHUP | LPOLLERR))) {
1598 cds_list_del(&wait_node->head);
1599 wait_queue->count--;
1600 ust_app_destroy(wait_node->app);
1601 free(wait_node);
1602 break;
1603 }
1604 }
1605 }
1606
1607 if (nb_fd > 0) {
1608 DBG("Wait queue sanitized, %d node were cleaned up", nb_fd);
1609 }
1610
1611 end:
1612 lttng_poll_clean(&events);
1613 return;
1614
1615 error:
1616 lttng_poll_clean(&events);
1617 error_create:
1618 ERR("Unable to sanitize wait queue");
1619 return;
1620 }
1621
1622 /*
1623 * Dispatch request from the registration threads to the application
1624 * communication thread.
1625 */
1626 static void *thread_dispatch_ust_registration(void *data)
1627 {
1628 int ret, err = -1;
1629 struct cds_wfq_node *node;
1630 struct ust_command *ust_cmd = NULL;
1631 struct ust_reg_wait_node *wait_node = NULL, *tmp_wait_node;
1632 struct ust_reg_wait_queue wait_queue = {
1633 .count = 0,
1634 };
1635
1636 health_register(health_sessiond, HEALTH_SESSIOND_TYPE_APP_REG_DISPATCH);
1637
1638 if (testpoint(sessiond_thread_app_reg_dispatch)) {
1639 goto error_testpoint;
1640 }
1641
1642 health_code_update();
1643
1644 CDS_INIT_LIST_HEAD(&wait_queue.head);
1645
1646 DBG("[thread] Dispatch UST command started");
1647
1648 while (!CMM_LOAD_SHARED(dispatch_thread_exit)) {
1649 health_code_update();
1650
1651 /* Atomically prepare the queue futex */
1652 futex_nto1_prepare(&ust_cmd_queue.futex);
1653
1654 do {
1655 struct ust_app *app = NULL;
1656 ust_cmd = NULL;
1657
1658 /*
1659 * Make sure we don't have node(s) that have hung up before receiving
1660 * the notify socket. This is to clean the list in order to avoid
1661 * memory leaks from notify socket that are never seen.
1662 */
1663 sanitize_wait_queue(&wait_queue);
1664
1665 health_code_update();
1666 /* Dequeue command for registration */
1667 node = cds_wfq_dequeue_blocking(&ust_cmd_queue.queue);
1668 if (node == NULL) {
1669 DBG("Woken up but nothing in the UST command queue");
1670 /* Continue thread execution */
1671 break;
1672 }
1673
1674 ust_cmd = caa_container_of(node, struct ust_command, node);
1675
1676 DBG("Dispatching UST registration pid:%d ppid:%d uid:%d"
1677 " gid:%d sock:%d name:%s (version %d.%d)",
1678 ust_cmd->reg_msg.pid, ust_cmd->reg_msg.ppid,
1679 ust_cmd->reg_msg.uid, ust_cmd->reg_msg.gid,
1680 ust_cmd->sock, ust_cmd->reg_msg.name,
1681 ust_cmd->reg_msg.major, ust_cmd->reg_msg.minor);
1682
1683 if (ust_cmd->reg_msg.type == USTCTL_SOCKET_CMD) {
1684 wait_node = zmalloc(sizeof(*wait_node));
1685 if (!wait_node) {
1686 PERROR("zmalloc wait_node dispatch");
1687 ret = close(ust_cmd->sock);
1688 if (ret < 0) {
1689 PERROR("close ust sock dispatch %d", ust_cmd->sock);
1690 }
1691 lttng_fd_put(LTTNG_FD_APPS, 1);
1692 free(ust_cmd);
1693 goto error;
1694 }
1695 CDS_INIT_LIST_HEAD(&wait_node->head);
1696
1697 /* Create application object if socket is CMD. */
1698 wait_node->app = ust_app_create(&ust_cmd->reg_msg,
1699 ust_cmd->sock);
1700 if (!wait_node->app) {
1701 ret = close(ust_cmd->sock);
1702 if (ret < 0) {
1703 PERROR("close ust sock dispatch %d", ust_cmd->sock);
1704 }
1705 lttng_fd_put(LTTNG_FD_APPS, 1);
1706 free(wait_node);
1707 free(ust_cmd);
1708 continue;
1709 }
1710 /*
1711 * Add application to the wait queue so we can set the notify
1712 * socket before putting this object in the global ht.
1713 */
1714 cds_list_add(&wait_node->head, &wait_queue.head);
1715 wait_queue.count++;
1716
1717 free(ust_cmd);
1718 /*
1719 * We have to continue here since we don't have the notify
1720 * socket and the application MUST be added to the hash table
1721 * only at that moment.
1722 */
1723 continue;
1724 } else {
1725 /*
1726 * Look for the application in the local wait queue and set the
1727 * notify socket if found.
1728 */
1729 cds_list_for_each_entry_safe(wait_node, tmp_wait_node,
1730 &wait_queue.head, head) {
1731 health_code_update();
1732 if (wait_node->app->pid == ust_cmd->reg_msg.pid) {
1733 wait_node->app->notify_sock = ust_cmd->sock;
1734 cds_list_del(&wait_node->head);
1735 wait_queue.count--;
1736 app = wait_node->app;
1737 free(wait_node);
1738 DBG3("UST app notify socket %d is set", ust_cmd->sock);
1739 break;
1740 }
1741 }
1742
1743 /*
1744 * With no application at this stage the received socket is
1745 * basically useless so close it before we free the cmd data
1746 * structure for good.
1747 */
1748 if (!app) {
1749 ret = close(ust_cmd->sock);
1750 if (ret < 0) {
1751 PERROR("close ust sock dispatch %d", ust_cmd->sock);
1752 }
1753 lttng_fd_put(LTTNG_FD_APPS, 1);
1754 }
1755 free(ust_cmd);
1756 }
1757
1758 if (app) {
1759 /*
1760 * @session_lock_list
1761 *
1762 * Lock the global session list so from the register up to the
1763 * registration done message, no thread can see the application
1764 * and change its state.
1765 */
1766 session_lock_list();
1767 rcu_read_lock();
1768
1769 /*
1770 * Add application to the global hash table. This needs to be
1771 * done before the update to the UST registry can locate the
1772 * application.
1773 */
1774 ust_app_add(app);
1775
1776 /* Set app version. This call will print an error if needed. */
1777 (void) ust_app_version(app);
1778
1779 /* Send notify socket through the notify pipe. */
1780 ret = send_socket_to_thread(apps_cmd_notify_pipe[1],
1781 app->notify_sock);
1782 if (ret < 0) {
1783 rcu_read_unlock();
1784 session_unlock_list();
1785 /*
1786 * No notify thread, stop the UST tracing. However, this is
1787 * not an internal error of the this thread thus setting
1788 * the health error code to a normal exit.
1789 */
1790 err = 0;
1791 goto error;
1792 }
1793
1794 /*
1795 * Update newly registered application with the tracing
1796 * registry info already enabled information.
1797 */
1798 update_ust_app(app->sock);
1799
1800 /*
1801 * Don't care about return value. Let the manage apps threads
1802 * handle app unregistration upon socket close.
1803 */
1804 (void) ust_app_register_done(app->sock);
1805
1806 /*
1807 * Even if the application socket has been closed, send the app
1808 * to the thread and unregistration will take place at that
1809 * place.
1810 */
1811 ret = send_socket_to_thread(apps_cmd_pipe[1], app->sock);
1812 if (ret < 0) {
1813 rcu_read_unlock();
1814 session_unlock_list();
1815 /*
1816 * No apps. thread, stop the UST tracing. However, this is
1817 * not an internal error of the this thread thus setting
1818 * the health error code to a normal exit.
1819 */
1820 err = 0;
1821 goto error;
1822 }
1823
1824 rcu_read_unlock();
1825 session_unlock_list();
1826 }
1827 } while (node != NULL);
1828
1829 health_poll_entry();
1830 /* Futex wait on queue. Blocking call on futex() */
1831 futex_nto1_wait(&ust_cmd_queue.futex);
1832 health_poll_exit();
1833 }
1834 /* Normal exit, no error */
1835 err = 0;
1836
1837 error:
1838 /* Clean up wait queue. */
1839 cds_list_for_each_entry_safe(wait_node, tmp_wait_node,
1840 &wait_queue.head, head) {
1841 cds_list_del(&wait_node->head);
1842 wait_queue.count--;
1843 free(wait_node);
1844 }
1845
1846 error_testpoint:
1847 DBG("Dispatch thread dying");
1848 if (err) {
1849 health_error();
1850 ERR("Health error occurred in %s", __func__);
1851 }
1852 health_unregister(health_sessiond);
1853 return NULL;
1854 }
1855
1856 /*
1857 * This thread manage application registration.
1858 */
1859 static void *thread_registration_apps(void *data)
1860 {
1861 int sock = -1, i, ret, pollfd, err = -1;
1862 uint32_t revents, nb_fd;
1863 struct lttng_poll_event events;
1864 /*
1865 * Get allocated in this thread, enqueued to a global queue, dequeued and
1866 * freed in the manage apps thread.
1867 */
1868 struct ust_command *ust_cmd = NULL;
1869
1870 DBG("[thread] Manage application registration started");
1871
1872 health_register(health_sessiond, HEALTH_SESSIOND_TYPE_APP_REG);
1873
1874 if (testpoint(sessiond_thread_registration_apps)) {
1875 goto error_testpoint;
1876 }
1877
1878 ret = lttcomm_listen_unix_sock(apps_sock);
1879 if (ret < 0) {
1880 goto error_listen;
1881 }
1882
1883 /*
1884 * Pass 2 as size here for the thread quit pipe and apps socket. Nothing
1885 * more will be added to this poll set.
1886 */
1887 ret = sessiond_set_thread_pollset(&events, 2);
1888 if (ret < 0) {
1889 goto error_create_poll;
1890 }
1891
1892 /* Add the application registration socket */
1893 ret = lttng_poll_add(&events, apps_sock, LPOLLIN | LPOLLRDHUP);
1894 if (ret < 0) {
1895 goto error_poll_add;
1896 }
1897
1898 /* Notify all applications to register */
1899 ret = notify_ust_apps(1);
1900 if (ret < 0) {
1901 ERR("Failed to notify applications or create the wait shared memory.\n"
1902 "Execution continues but there might be problem for already\n"
1903 "running applications that wishes to register.");
1904 }
1905
1906 while (1) {
1907 DBG("Accepting application registration");
1908
1909 /* Inifinite blocking call, waiting for transmission */
1910 restart:
1911 health_poll_entry();
1912 ret = lttng_poll_wait(&events, -1);
1913 health_poll_exit();
1914 if (ret < 0) {
1915 /*
1916 * Restart interrupted system call.
1917 */
1918 if (errno == EINTR) {
1919 goto restart;
1920 }
1921 goto error;
1922 }
1923
1924 nb_fd = ret;
1925
1926 for (i = 0; i < nb_fd; i++) {
1927 health_code_update();
1928
1929 /* Fetch once the poll data */
1930 revents = LTTNG_POLL_GETEV(&events, i);
1931 pollfd = LTTNG_POLL_GETFD(&events, i);
1932
1933 /* Thread quit pipe has been closed. Killing thread. */
1934 ret = sessiond_check_thread_quit_pipe(pollfd, revents);
1935 if (ret) {
1936 err = 0;
1937 goto exit;
1938 }
1939
1940 /* Event on the registration socket */
1941 if (pollfd == apps_sock) {
1942 if (revents & (LPOLLERR | LPOLLHUP | LPOLLRDHUP)) {
1943 ERR("Register apps socket poll error");
1944 goto error;
1945 } else if (revents & LPOLLIN) {
1946 sock = lttcomm_accept_unix_sock(apps_sock);
1947 if (sock < 0) {
1948 goto error;
1949 }
1950
1951 /*
1952 * Set socket timeout for both receiving and ending.
1953 * app_socket_timeout is in seconds, whereas
1954 * lttcomm_setsockopt_rcv_timeout and
1955 * lttcomm_setsockopt_snd_timeout expect msec as
1956 * parameter.
1957 */
1958 (void) lttcomm_setsockopt_rcv_timeout(sock,
1959 app_socket_timeout * 1000);
1960 (void) lttcomm_setsockopt_snd_timeout(sock,
1961 app_socket_timeout * 1000);
1962
1963 /*
1964 * Set the CLOEXEC flag. Return code is useless because
1965 * either way, the show must go on.
1966 */
1967 (void) utils_set_fd_cloexec(sock);
1968
1969 /* Create UST registration command for enqueuing */
1970 ust_cmd = zmalloc(sizeof(struct ust_command));
1971 if (ust_cmd == NULL) {
1972 PERROR("ust command zmalloc");
1973 goto error;
1974 }
1975
1976 /*
1977 * Using message-based transmissions to ensure we don't
1978 * have to deal with partially received messages.
1979 */
1980 ret = lttng_fd_get(LTTNG_FD_APPS, 1);
1981 if (ret < 0) {
1982 ERR("Exhausted file descriptors allowed for applications.");
1983 free(ust_cmd);
1984 ret = close(sock);
1985 if (ret) {
1986 PERROR("close");
1987 }
1988 sock = -1;
1989 continue;
1990 }
1991
1992 health_code_update();
1993 ret = ust_app_recv_registration(sock, &ust_cmd->reg_msg);
1994 if (ret < 0) {
1995 free(ust_cmd);
1996 /* Close socket of the application. */
1997 ret = close(sock);
1998 if (ret) {
1999 PERROR("close");
2000 }
2001 lttng_fd_put(LTTNG_FD_APPS, 1);
2002 sock = -1;
2003 continue;
2004 }
2005 health_code_update();
2006
2007 ust_cmd->sock = sock;
2008 sock = -1;
2009
2010 DBG("UST registration received with pid:%d ppid:%d uid:%d"
2011 " gid:%d sock:%d name:%s (version %d.%d)",
2012 ust_cmd->reg_msg.pid, ust_cmd->reg_msg.ppid,
2013 ust_cmd->reg_msg.uid, ust_cmd->reg_msg.gid,
2014 ust_cmd->sock, ust_cmd->reg_msg.name,
2015 ust_cmd->reg_msg.major, ust_cmd->reg_msg.minor);
2016
2017 /*
2018 * Lock free enqueue the registration request. The red pill
2019 * has been taken! This apps will be part of the *system*.
2020 */
2021 cds_wfq_enqueue(&ust_cmd_queue.queue, &ust_cmd->node);
2022
2023 /*
2024 * Wake the registration queue futex. Implicit memory
2025 * barrier with the exchange in cds_wfq_enqueue.
2026 */
2027 futex_nto1_wake(&ust_cmd_queue.futex);
2028 }
2029 }
2030 }
2031 }
2032
2033 exit:
2034 error:
2035 /* Notify that the registration thread is gone */
2036 notify_ust_apps(0);
2037
2038 if (apps_sock >= 0) {
2039 ret = close(apps_sock);
2040 if (ret) {
2041 PERROR("close");
2042 }
2043 }
2044 if (sock >= 0) {
2045 ret = close(sock);
2046 if (ret) {
2047 PERROR("close");
2048 }
2049 lttng_fd_put(LTTNG_FD_APPS, 1);
2050 }
2051 unlink(apps_unix_sock_path);
2052
2053 error_poll_add:
2054 lttng_poll_clean(&events);
2055 error_listen:
2056 error_create_poll:
2057 error_testpoint:
2058 DBG("UST Registration thread cleanup complete");
2059 if (err) {
2060 health_error();
2061 ERR("Health error occurred in %s", __func__);
2062 }
2063 health_unregister(health_sessiond);
2064
2065 return NULL;
2066 }
2067
2068 /*
2069 * Start the thread_manage_consumer. This must be done after a lttng-consumerd
2070 * exec or it will fails.
2071 */
2072 static int spawn_consumer_thread(struct consumer_data *consumer_data)
2073 {
2074 int ret, clock_ret;
2075 struct timespec timeout;
2076
2077 /* Make sure we set the readiness flag to 0 because we are NOT ready */
2078 consumer_data->consumer_thread_is_ready = 0;
2079
2080 /* Setup pthread condition */
2081 ret = pthread_condattr_init(&consumer_data->condattr);
2082 if (ret != 0) {
2083 errno = ret;
2084 PERROR("pthread_condattr_init consumer data");
2085 goto error;
2086 }
2087
2088 /*
2089 * Set the monotonic clock in order to make sure we DO NOT jump in time
2090 * between the clock_gettime() call and the timedwait call. See bug #324
2091 * for a more details and how we noticed it.
2092 */
2093 ret = pthread_condattr_setclock(&consumer_data->condattr, CLOCK_MONOTONIC);
2094 if (ret != 0) {
2095 errno = ret;
2096 PERROR("pthread_condattr_setclock consumer data");
2097 goto error;
2098 }
2099
2100 ret = pthread_cond_init(&consumer_data->cond, &consumer_data->condattr);
2101 if (ret != 0) {
2102 errno = ret;
2103 PERROR("pthread_cond_init consumer data");
2104 goto error;
2105 }
2106
2107 ret = pthread_create(&consumer_data->thread, NULL, thread_manage_consumer,
2108 consumer_data);
2109 if (ret != 0) {
2110 PERROR("pthread_create consumer");
2111 ret = -1;
2112 goto error;
2113 }
2114
2115 /* We are about to wait on a pthread condition */
2116 pthread_mutex_lock(&consumer_data->cond_mutex);
2117
2118 /* Get time for sem_timedwait absolute timeout */
2119 clock_ret = clock_gettime(CLOCK_MONOTONIC, &timeout);
2120 /*
2121 * Set the timeout for the condition timed wait even if the clock gettime
2122 * call fails since we might loop on that call and we want to avoid to
2123 * increment the timeout too many times.
2124 */
2125 timeout.tv_sec += DEFAULT_SEM_WAIT_TIMEOUT;
2126
2127 /*
2128 * The following loop COULD be skipped in some conditions so this is why we
2129 * set ret to 0 in order to make sure at least one round of the loop is
2130 * done.
2131 */
2132 ret = 0;
2133
2134 /*
2135 * Loop until the condition is reached or when a timeout is reached. Note
2136 * that the pthread_cond_timedwait(P) man page specifies that EINTR can NOT
2137 * be returned but the pthread_cond(3), from the glibc-doc, says that it is
2138 * possible. This loop does not take any chances and works with both of
2139 * them.
2140 */
2141 while (!consumer_data->consumer_thread_is_ready && ret != ETIMEDOUT) {
2142 if (clock_ret < 0) {
2143 PERROR("clock_gettime spawn consumer");
2144 /* Infinite wait for the consumerd thread to be ready */
2145 ret = pthread_cond_wait(&consumer_data->cond,
2146 &consumer_data->cond_mutex);
2147 } else {
2148 ret = pthread_cond_timedwait(&consumer_data->cond,
2149 &consumer_data->cond_mutex, &timeout);
2150 }
2151 }
2152
2153 /* Release the pthread condition */
2154 pthread_mutex_unlock(&consumer_data->cond_mutex);
2155
2156 if (ret != 0) {
2157 errno = ret;
2158 if (ret == ETIMEDOUT) {
2159 int pth_ret;
2160
2161 /*
2162 * Call has timed out so we kill the kconsumerd_thread and return
2163 * an error.
2164 */
2165 ERR("Condition timed out. The consumer thread was never ready."
2166 " Killing it");
2167 pth_ret = pthread_cancel(consumer_data->thread);
2168 if (pth_ret < 0) {
2169 PERROR("pthread_cancel consumer thread");
2170 }
2171 } else {
2172 PERROR("pthread_cond_wait failed consumer thread");
2173 }
2174 /* Caller is expecting a negative value on failure. */
2175 ret = -1;
2176 goto error;
2177 }
2178
2179 pthread_mutex_lock(&consumer_data->pid_mutex);
2180 if (consumer_data->pid == 0) {
2181 ERR("Consumerd did not start");
2182 pthread_mutex_unlock(&consumer_data->pid_mutex);
2183 goto error;
2184 }
2185 pthread_mutex_unlock(&consumer_data->pid_mutex);
2186
2187 return 0;
2188
2189 error:
2190 return ret;
2191 }
2192
2193 /*
2194 * Join consumer thread
2195 */
2196 static int join_consumer_thread(struct consumer_data *consumer_data)
2197 {
2198 void *status;
2199
2200 /* Consumer pid must be a real one. */
2201 if (consumer_data->pid > 0) {
2202 int ret;
2203 ret = kill(consumer_data->pid, SIGTERM);
2204 if (ret) {
2205 ERR("Error killing consumer daemon");
2206 return ret;
2207 }
2208 return pthread_join(consumer_data->thread, &status);
2209 } else {
2210 return 0;
2211 }
2212 }
2213
2214 /*
2215 * Fork and exec a consumer daemon (consumerd).
2216 *
2217 * Return pid if successful else -1.
2218 */
2219 static pid_t spawn_consumerd(struct consumer_data *consumer_data)
2220 {
2221 int ret;
2222 pid_t pid;
2223 const char *consumer_to_use;
2224 const char *verbosity;
2225 struct stat st;
2226
2227 DBG("Spawning consumerd");
2228
2229 pid = fork();
2230 if (pid == 0) {
2231 /*
2232 * Exec consumerd.
2233 */
2234 if (opt_verbose_consumer) {
2235 verbosity = "--verbose";
2236 } else {
2237 verbosity = "--quiet";
2238 }
2239 switch (consumer_data->type) {
2240 case LTTNG_CONSUMER_KERNEL:
2241 /*
2242 * Find out which consumerd to execute. We will first try the
2243 * 64-bit path, then the sessiond's installation directory, and
2244 * fallback on the 32-bit one,
2245 */
2246 DBG3("Looking for a kernel consumer at these locations:");
2247 DBG3(" 1) %s", consumerd64_bin);
2248 DBG3(" 2) %s/%s", INSTALL_BIN_PATH, CONSUMERD_FILE);
2249 DBG3(" 3) %s", consumerd32_bin);
2250 if (stat(consumerd64_bin, &st) == 0) {
2251 DBG3("Found location #1");
2252 consumer_to_use = consumerd64_bin;
2253 } else if (stat(INSTALL_BIN_PATH "/" CONSUMERD_FILE, &st) == 0) {
2254 DBG3("Found location #2");
2255 consumer_to_use = INSTALL_BIN_PATH "/" CONSUMERD_FILE;
2256 } else if (stat(consumerd32_bin, &st) == 0) {
2257 DBG3("Found location #3");
2258 consumer_to_use = consumerd32_bin;
2259 } else {
2260 DBG("Could not find any valid consumerd executable");
2261 ret = -EINVAL;
2262 break;
2263 }
2264 DBG("Using kernel consumer at: %s", consumer_to_use);
2265 ret = execl(consumer_to_use,
2266 "lttng-consumerd", verbosity, "-k",
2267 "--consumerd-cmd-sock", consumer_data->cmd_unix_sock_path,
2268 "--consumerd-err-sock", consumer_data->err_unix_sock_path,
2269 "--group", tracing_group_name,
2270 NULL);
2271 break;
2272 case LTTNG_CONSUMER64_UST:
2273 {
2274 char *tmpnew = NULL;
2275
2276 if (consumerd64_libdir[0] != '\0') {
2277 char *tmp;
2278 size_t tmplen;
2279
2280 tmp = getenv("LD_LIBRARY_PATH");
2281 if (!tmp) {
2282 tmp = "";
2283 }
2284 tmplen = strlen("LD_LIBRARY_PATH=")
2285 + strlen(consumerd64_libdir) + 1 /* : */ + strlen(tmp);
2286 tmpnew = zmalloc(tmplen + 1 /* \0 */);
2287 if (!tmpnew) {
2288 ret = -ENOMEM;
2289 goto error;
2290 }
2291 strcpy(tmpnew, "LD_LIBRARY_PATH=");
2292 strcat(tmpnew, consumerd64_libdir);
2293 if (tmp[0] != '\0') {
2294 strcat(tmpnew, ":");
2295 strcat(tmpnew, tmp);
2296 }
2297 ret = putenv(tmpnew);
2298 if (ret) {
2299 ret = -errno;
2300 free(tmpnew);
2301 goto error;
2302 }
2303 }
2304 DBG("Using 64-bit UST consumer at: %s", consumerd64_bin);
2305 ret = execl(consumerd64_bin, "lttng-consumerd", verbosity, "-u",
2306 "--consumerd-cmd-sock", consumer_data->cmd_unix_sock_path,
2307 "--consumerd-err-sock", consumer_data->err_unix_sock_path,
2308 "--group", tracing_group_name,
2309 NULL);
2310 if (consumerd64_libdir[0] != '\0') {
2311 free(tmpnew);
2312 }
2313 break;
2314 }
2315 case LTTNG_CONSUMER32_UST:
2316 {
2317 char *tmpnew = NULL;
2318
2319 if (consumerd32_libdir[0] != '\0') {
2320 char *tmp;
2321 size_t tmplen;
2322
2323 tmp = getenv("LD_LIBRARY_PATH");
2324 if (!tmp) {
2325 tmp = "";
2326 }
2327 tmplen = strlen("LD_LIBRARY_PATH=")
2328 + strlen(consumerd32_libdir) + 1 /* : */ + strlen(tmp);
2329 tmpnew = zmalloc(tmplen + 1 /* \0 */);
2330 if (!tmpnew) {
2331 ret = -ENOMEM;
2332 goto error;
2333 }
2334 strcpy(tmpnew, "LD_LIBRARY_PATH=");
2335 strcat(tmpnew, consumerd32_libdir);
2336 if (tmp[0] != '\0') {
2337 strcat(tmpnew, ":");
2338 strcat(tmpnew, tmp);
2339 }
2340 ret = putenv(tmpnew);
2341 if (ret) {
2342 ret = -errno;
2343 free(tmpnew);
2344 goto error;
2345 }
2346 }
2347 DBG("Using 32-bit UST consumer at: %s", consumerd32_bin);
2348 ret = execl(consumerd32_bin, "lttng-consumerd", verbosity, "-u",
2349 "--consumerd-cmd-sock", consumer_data->cmd_unix_sock_path,
2350 "--consumerd-err-sock", consumer_data->err_unix_sock_path,
2351 "--group", tracing_group_name,
2352 NULL);
2353 if (consumerd32_libdir[0] != '\0') {
2354 free(tmpnew);
2355 }
2356 break;
2357 }
2358 default:
2359 PERROR("unknown consumer type");
2360 exit(EXIT_FAILURE);
2361 }
2362 if (errno != 0) {
2363 PERROR("Consumer execl()");
2364 }
2365 /* Reaching this point, we got a failure on our execl(). */
2366 exit(EXIT_FAILURE);
2367 } else if (pid > 0) {
2368 ret = pid;
2369 } else {
2370 PERROR("start consumer fork");
2371 ret = -errno;
2372 }
2373 error:
2374 return ret;
2375 }
2376
2377 /*
2378 * Spawn the consumerd daemon and session daemon thread.
2379 */
2380 static int start_consumerd(struct consumer_data *consumer_data)
2381 {
2382 int ret;
2383
2384 /*
2385 * Set the listen() state on the socket since there is a possible race
2386 * between the exec() of the consumer daemon and this call if place in the
2387 * consumer thread. See bug #366 for more details.
2388 */
2389 ret = lttcomm_listen_unix_sock(consumer_data->err_sock);
2390 if (ret < 0) {
2391 goto error;
2392 }
2393
2394 pthread_mutex_lock(&consumer_data->pid_mutex);
2395 if (consumer_data->pid != 0) {
2396 pthread_mutex_unlock(&consumer_data->pid_mutex);
2397 goto end;
2398 }
2399
2400 ret = spawn_consumerd(consumer_data);
2401 if (ret < 0) {
2402 ERR("Spawning consumerd failed");
2403 pthread_mutex_unlock(&consumer_data->pid_mutex);
2404 goto error;
2405 }
2406
2407 /* Setting up the consumer_data pid */
2408 consumer_data->pid = ret;
2409 DBG2("Consumer pid %d", consumer_data->pid);
2410 pthread_mutex_unlock(&consumer_data->pid_mutex);
2411
2412 DBG2("Spawning consumer control thread");
2413 ret = spawn_consumer_thread(consumer_data);
2414 if (ret < 0) {
2415 ERR("Fatal error spawning consumer control thread");
2416 goto error;
2417 }
2418
2419 end:
2420 return 0;
2421
2422 error:
2423 /* Cleanup already created sockets on error. */
2424 if (consumer_data->err_sock >= 0) {
2425 int err;
2426
2427 err = close(consumer_data->err_sock);
2428 if (err < 0) {
2429 PERROR("close consumer data error socket");
2430 }
2431 }
2432 return ret;
2433 }
2434
2435 /*
2436 * Setup necessary data for kernel tracer action.
2437 */
2438 static int init_kernel_tracer(void)
2439 {
2440 int ret;
2441
2442 /* Modprobe lttng kernel modules */
2443 ret = modprobe_lttng_control();
2444 if (ret < 0) {
2445 goto error;
2446 }
2447
2448 /* Open debugfs lttng */
2449 kernel_tracer_fd = open(module_proc_lttng, O_RDWR);
2450 if (kernel_tracer_fd < 0) {
2451 DBG("Failed to open %s", module_proc_lttng);
2452 ret = -1;
2453 goto error_open;
2454 }
2455
2456 /* Validate kernel version */
2457 ret = kernel_validate_version(kernel_tracer_fd);
2458 if (ret < 0) {
2459 goto error_version;
2460 }
2461
2462 ret = modprobe_lttng_data();
2463 if (ret < 0) {
2464 goto error_modules;
2465 }
2466
2467 DBG("Kernel tracer fd %d", kernel_tracer_fd);
2468 return 0;
2469
2470 error_version:
2471 modprobe_remove_lttng_control();
2472 ret = close(kernel_tracer_fd);
2473 if (ret) {
2474 PERROR("close");
2475 }
2476 kernel_tracer_fd = -1;
2477 return LTTNG_ERR_KERN_VERSION;
2478
2479 error_modules:
2480 ret = close(kernel_tracer_fd);
2481 if (ret) {
2482 PERROR("close");
2483 }
2484
2485 error_open:
2486 modprobe_remove_lttng_control();
2487
2488 error:
2489 WARN("No kernel tracer available");
2490 kernel_tracer_fd = -1;
2491 if (!is_root) {
2492 return LTTNG_ERR_NEED_ROOT_SESSIOND;
2493 } else {
2494 return LTTNG_ERR_KERN_NA;
2495 }
2496 }
2497
2498
2499 /*
2500 * Copy consumer output from the tracing session to the domain session. The
2501 * function also applies the right modification on a per domain basis for the
2502 * trace files destination directory.
2503 *
2504 * Should *NOT* be called with RCU read-side lock held.
2505 */
2506 static int copy_session_consumer(int domain, struct ltt_session *session)
2507 {
2508 int ret;
2509 const char *dir_name;
2510 struct consumer_output *consumer;
2511
2512 assert(session);
2513 assert(session->consumer);
2514
2515 switch (domain) {
2516 case LTTNG_DOMAIN_KERNEL:
2517 DBG3("Copying tracing session consumer output in kernel session");
2518 /*
2519 * XXX: We should audit the session creation and what this function
2520 * does "extra" in order to avoid a destroy since this function is used
2521 * in the domain session creation (kernel and ust) only. Same for UST
2522 * domain.
2523 */
2524 if (session->kernel_session->consumer) {
2525 consumer_destroy_output(session->kernel_session->consumer);
2526 }
2527 session->kernel_session->consumer =
2528 consumer_copy_output(session->consumer);
2529 /* Ease our life a bit for the next part */
2530 consumer = session->kernel_session->consumer;
2531 dir_name = DEFAULT_KERNEL_TRACE_DIR;
2532 break;
2533 case LTTNG_DOMAIN_JUL:
2534 case LTTNG_DOMAIN_UST:
2535 DBG3("Copying tracing session consumer output in UST session");
2536 if (session->ust_session->consumer) {
2537 consumer_destroy_output(session->ust_session->consumer);
2538 }
2539 session->ust_session->consumer =
2540 consumer_copy_output(session->consumer);
2541 /* Ease our life a bit for the next part */
2542 consumer = session->ust_session->consumer;
2543 dir_name = DEFAULT_UST_TRACE_DIR;
2544 break;
2545 default:
2546 ret = LTTNG_ERR_UNKNOWN_DOMAIN;
2547 goto error;
2548 }
2549
2550 /* Append correct directory to subdir */
2551 strncat(consumer->subdir, dir_name,
2552 sizeof(consumer->subdir) - strlen(consumer->subdir) - 1);
2553 DBG3("Copy session consumer subdir %s", consumer->subdir);
2554
2555 ret = LTTNG_OK;
2556
2557 error:
2558 return ret;
2559 }
2560
2561 /*
2562 * Create an UST session and add it to the session ust list.
2563 *
2564 * Should *NOT* be called with RCU read-side lock held.
2565 */
2566 static int create_ust_session(struct ltt_session *session,
2567 struct lttng_domain *domain)
2568 {
2569 int ret;
2570 struct ltt_ust_session *lus = NULL;
2571
2572 assert(session);
2573 assert(domain);
2574 assert(session->consumer);
2575
2576 switch (domain->type) {
2577 case LTTNG_DOMAIN_JUL:
2578 case LTTNG_DOMAIN_UST:
2579 break;
2580 default:
2581 ERR("Unknown UST domain on create session %d", domain->type);
2582 ret = LTTNG_ERR_UNKNOWN_DOMAIN;
2583 goto error;
2584 }
2585
2586 DBG("Creating UST session");
2587
2588 lus = trace_ust_create_session(session->id);
2589 if (lus == NULL) {
2590 ret = LTTNG_ERR_UST_SESS_FAIL;
2591 goto error;
2592 }
2593
2594 lus->uid = session->uid;
2595 lus->gid = session->gid;
2596 lus->output_traces = session->output_traces;
2597 lus->snapshot_mode = session->snapshot_mode;
2598 lus->live_timer_interval = session->live_timer;
2599 session->ust_session = lus;
2600
2601 /* Copy session output to the newly created UST session */
2602 ret = copy_session_consumer(domain->type, session);
2603 if (ret != LTTNG_OK) {
2604 goto error;
2605 }
2606
2607 return LTTNG_OK;
2608
2609 error:
2610 free(lus);
2611 session->ust_session = NULL;
2612 return ret;
2613 }
2614
2615 /*
2616 * Create a kernel tracer session then create the default channel.
2617 */
2618 static int create_kernel_session(struct ltt_session *session)
2619 {
2620 int ret;
2621
2622 DBG("Creating kernel session");
2623
2624 ret = kernel_create_session(session, kernel_tracer_fd);
2625 if (ret < 0) {
2626 ret = LTTNG_ERR_KERN_SESS_FAIL;
2627 goto error;
2628 }
2629
2630 /* Code flow safety */
2631 assert(session->kernel_session);
2632
2633 /* Copy session output to the newly created Kernel session */
2634 ret = copy_session_consumer(LTTNG_DOMAIN_KERNEL, session);
2635 if (ret != LTTNG_OK) {
2636 goto error;
2637 }
2638
2639 /* Create directory(ies) on local filesystem. */
2640 if (session->kernel_session->consumer->type == CONSUMER_DST_LOCAL &&
2641 strlen(session->kernel_session->consumer->dst.trace_path) > 0) {
2642 ret = run_as_mkdir_recursive(
2643 session->kernel_session->consumer->dst.trace_path,
2644 S_IRWXU | S_IRWXG, session->uid, session->gid);
2645 if (ret < 0) {
2646 if (ret != -EEXIST) {
2647 ERR("Trace directory creation error");
2648 goto error;
2649 }
2650 }
2651 }
2652
2653 session->kernel_session->uid = session->uid;
2654 session->kernel_session->gid = session->gid;
2655 session->kernel_session->output_traces = session->output_traces;
2656 session->kernel_session->snapshot_mode = session->snapshot_mode;
2657
2658 return LTTNG_OK;
2659
2660 error:
2661 trace_kernel_destroy_session(session->kernel_session);
2662 session->kernel_session = NULL;
2663 return ret;
2664 }
2665
2666 /*
2667 * Count number of session permitted by uid/gid.
2668 */
2669 static unsigned int lttng_sessions_count(uid_t uid, gid_t gid)
2670 {
2671 unsigned int i = 0;
2672 struct ltt_session *session;
2673
2674 DBG("Counting number of available session for UID %d GID %d",
2675 uid, gid);
2676 cds_list_for_each_entry(session, &session_list_ptr->head, list) {
2677 /*
2678 * Only list the sessions the user can control.
2679 */
2680 if (!session_access_ok(session, uid, gid)) {
2681 continue;
2682 }
2683 i++;
2684 }
2685 return i;
2686 }
2687
2688 /*
2689 * Process the command requested by the lttng client within the command
2690 * context structure. This function make sure that the return structure (llm)
2691 * is set and ready for transmission before returning.
2692 *
2693 * Return any error encountered or 0 for success.
2694 *
2695 * "sock" is only used for special-case var. len data.
2696 *
2697 * Should *NOT* be called with RCU read-side lock held.
2698 */
2699 static int process_client_msg(struct command_ctx *cmd_ctx, int sock,
2700 int *sock_error)
2701 {
2702 int ret = LTTNG_OK;
2703 int need_tracing_session = 1;
2704 int need_domain;
2705
2706 DBG("Processing client command %d", cmd_ctx->lsm->cmd_type);
2707
2708 *sock_error = 0;
2709
2710 switch (cmd_ctx->lsm->cmd_type) {
2711 case LTTNG_CREATE_SESSION:
2712 case LTTNG_CREATE_SESSION_SNAPSHOT:
2713 case LTTNG_CREATE_SESSION_LIVE:
2714 case LTTNG_DESTROY_SESSION:
2715 case LTTNG_LIST_SESSIONS:
2716 case LTTNG_LIST_DOMAINS:
2717 case LTTNG_START_TRACE:
2718 case LTTNG_STOP_TRACE:
2719 case LTTNG_DATA_PENDING:
2720 case LTTNG_SNAPSHOT_ADD_OUTPUT:
2721 case LTTNG_SNAPSHOT_DEL_OUTPUT:
2722 case LTTNG_SNAPSHOT_LIST_OUTPUT:
2723 case LTTNG_SNAPSHOT_RECORD:
2724 case LTTNG_SAVE_SESSION:
2725 need_domain = 0;
2726 break;
2727 default:
2728 need_domain = 1;
2729 }
2730
2731 if (opt_no_kernel && need_domain
2732 && cmd_ctx->lsm->domain.type == LTTNG_DOMAIN_KERNEL) {
2733 if (!is_root) {
2734 ret = LTTNG_ERR_NEED_ROOT_SESSIOND;
2735 } else {
2736 ret = LTTNG_ERR_KERN_NA;
2737 }
2738 goto error;
2739 }
2740
2741 /* Deny register consumer if we already have a spawned consumer. */
2742 if (cmd_ctx->lsm->cmd_type == LTTNG_REGISTER_CONSUMER) {
2743 pthread_mutex_lock(&kconsumer_data.pid_mutex);
2744 if (kconsumer_data.pid > 0) {
2745 ret = LTTNG_ERR_KERN_CONSUMER_FAIL;
2746 pthread_mutex_unlock(&kconsumer_data.pid_mutex);
2747 goto error;
2748 }
2749 pthread_mutex_unlock(&kconsumer_data.pid_mutex);
2750 }
2751
2752 /*
2753 * Check for command that don't needs to allocate a returned payload. We do
2754 * this here so we don't have to make the call for no payload at each
2755 * command.
2756 */
2757 switch(cmd_ctx->lsm->cmd_type) {
2758 case LTTNG_LIST_SESSIONS:
2759 case LTTNG_LIST_TRACEPOINTS:
2760 case LTTNG_LIST_TRACEPOINT_FIELDS:
2761 case LTTNG_LIST_DOMAINS:
2762 case LTTNG_LIST_CHANNELS:
2763 case LTTNG_LIST_EVENTS:
2764 break;
2765 default:
2766 /* Setup lttng message with no payload */
2767 ret = setup_lttng_msg(cmd_ctx, 0);
2768 if (ret < 0) {
2769 /* This label does not try to unlock the session */
2770 goto init_setup_error;
2771 }
2772 }
2773
2774 /* Commands that DO NOT need a session. */
2775 switch (cmd_ctx->lsm->cmd_type) {
2776 case LTTNG_CREATE_SESSION:
2777 case LTTNG_CREATE_SESSION_SNAPSHOT:
2778 case LTTNG_CREATE_SESSION_LIVE:
2779 case LTTNG_CALIBRATE:
2780 case LTTNG_LIST_SESSIONS:
2781 case LTTNG_LIST_TRACEPOINTS:
2782 case LTTNG_LIST_TRACEPOINT_FIELDS:
2783 case LTTNG_SAVE_SESSION:
2784 need_tracing_session = 0;
2785 break;
2786 default:
2787 DBG("Getting session %s by name", cmd_ctx->lsm->session.name);
2788 /*
2789 * We keep the session list lock across _all_ commands
2790 * for now, because the per-session lock does not
2791 * handle teardown properly.
2792 */
2793 session_lock_list();
2794 cmd_ctx->session = session_find_by_name(cmd_ctx->lsm->session.name);
2795 if (cmd_ctx->session == NULL) {
2796 ret = LTTNG_ERR_SESS_NOT_FOUND;
2797 goto error;
2798 } else {
2799 /* Acquire lock for the session */
2800 session_lock(cmd_ctx->session);
2801 }
2802 break;
2803 }
2804
2805 if (!need_domain) {
2806 goto skip_domain;
2807 }
2808
2809 /*
2810 * Check domain type for specific "pre-action".
2811 */
2812 switch (cmd_ctx->lsm->domain.type) {
2813 case LTTNG_DOMAIN_KERNEL:
2814 if (!is_root) {
2815 ret = LTTNG_ERR_NEED_ROOT_SESSIOND;
2816 goto error;
2817 }
2818
2819 /* Kernel tracer check */
2820 if (kernel_tracer_fd == -1) {
2821 /* Basically, load kernel tracer modules */
2822 ret = init_kernel_tracer();
2823 if (ret != 0) {
2824 goto error;
2825 }
2826 }
2827
2828 /* Consumer is in an ERROR state. Report back to client */
2829 if (uatomic_read(&kernel_consumerd_state) == CONSUMER_ERROR) {
2830 ret = LTTNG_ERR_NO_KERNCONSUMERD;
2831 goto error;
2832 }
2833
2834 /* Need a session for kernel command */
2835 if (need_tracing_session) {
2836 if (cmd_ctx->session->kernel_session == NULL) {
2837 ret = create_kernel_session(cmd_ctx->session);
2838 if (ret < 0) {
2839 ret = LTTNG_ERR_KERN_SESS_FAIL;
2840 goto error;
2841 }
2842 }
2843
2844 /* Start the kernel consumer daemon */
2845 pthread_mutex_lock(&kconsumer_data.pid_mutex);
2846 if (kconsumer_data.pid == 0 &&
2847 cmd_ctx->lsm->cmd_type != LTTNG_REGISTER_CONSUMER) {
2848 pthread_mutex_unlock(&kconsumer_data.pid_mutex);
2849 ret = start_consumerd(&kconsumer_data);
2850 if (ret < 0) {
2851 ret = LTTNG_ERR_KERN_CONSUMER_FAIL;
2852 goto error;
2853 }
2854 uatomic_set(&kernel_consumerd_state, CONSUMER_STARTED);
2855 } else {
2856 pthread_mutex_unlock(&kconsumer_data.pid_mutex);
2857 }
2858
2859 /*
2860 * The consumer was just spawned so we need to add the socket to
2861 * the consumer output of the session if exist.
2862 */
2863 ret = consumer_create_socket(&kconsumer_data,
2864 cmd_ctx->session->kernel_session->consumer);
2865 if (ret < 0) {
2866 goto error;
2867 }
2868 }
2869
2870 break;
2871 case LTTNG_DOMAIN_JUL:
2872 case LTTNG_DOMAIN_UST:
2873 {
2874 if (!ust_app_supported()) {
2875 ret = LTTNG_ERR_NO_UST;
2876 goto error;
2877 }
2878 /* Consumer is in an ERROR state. Report back to client */
2879 if (uatomic_read(&ust_consumerd_state) == CONSUMER_ERROR) {
2880 ret = LTTNG_ERR_NO_USTCONSUMERD;
2881 goto error;
2882 }
2883
2884 if (need_tracing_session) {
2885 /* Create UST session if none exist. */
2886 if (cmd_ctx->session->ust_session == NULL) {
2887 ret = create_ust_session(cmd_ctx->session,
2888 &cmd_ctx->lsm->domain);
2889 if (ret != LTTNG_OK) {
2890 goto error;
2891 }
2892 }
2893
2894 /* Start the UST consumer daemons */
2895 /* 64-bit */
2896 pthread_mutex_lock(&ustconsumer64_data.pid_mutex);
2897 if (consumerd64_bin[0] != '\0' &&
2898 ustconsumer64_data.pid == 0 &&
2899 cmd_ctx->lsm->cmd_type != LTTNG_REGISTER_CONSUMER) {
2900 pthread_mutex_unlock(&ustconsumer64_data.pid_mutex);
2901 ret = start_consumerd(&ustconsumer64_data);
2902 if (ret < 0) {
2903 ret = LTTNG_ERR_UST_CONSUMER64_FAIL;
2904 uatomic_set(&ust_consumerd64_fd, -EINVAL);
2905 goto error;
2906 }
2907
2908 uatomic_set(&ust_consumerd64_fd, ustconsumer64_data.cmd_sock);
2909 uatomic_set(&ust_consumerd_state, CONSUMER_STARTED);
2910 } else {
2911 pthread_mutex_unlock(&ustconsumer64_data.pid_mutex);
2912 }
2913
2914 /*
2915 * Setup socket for consumer 64 bit. No need for atomic access
2916 * since it was set above and can ONLY be set in this thread.
2917 */
2918 ret = consumer_create_socket(&ustconsumer64_data,
2919 cmd_ctx->session->ust_session->consumer);
2920 if (ret < 0) {
2921 goto error;
2922 }
2923
2924 /* 32-bit */
2925 if (consumerd32_bin[0] != '\0' &&
2926 ustconsumer32_data.pid == 0 &&
2927 cmd_ctx->lsm->cmd_type != LTTNG_REGISTER_CONSUMER) {
2928 pthread_mutex_unlock(&ustconsumer32_data.pid_mutex);
2929 ret = start_consumerd(&ustconsumer32_data);
2930 if (ret < 0) {
2931 ret = LTTNG_ERR_UST_CONSUMER32_FAIL;
2932 uatomic_set(&ust_consumerd32_fd, -EINVAL);
2933 goto error;
2934 }
2935
2936 uatomic_set(&ust_consumerd32_fd, ustconsumer32_data.cmd_sock);
2937 uatomic_set(&ust_consumerd_state, CONSUMER_STARTED);
2938 } else {
2939 pthread_mutex_unlock(&ustconsumer32_data.pid_mutex);
2940 }
2941
2942 /*
2943 * Setup socket for consumer 64 bit. No need for atomic access
2944 * since it was set above and can ONLY be set in this thread.
2945 */
2946 ret = consumer_create_socket(&ustconsumer32_data,
2947 cmd_ctx->session->ust_session->consumer);
2948 if (ret < 0) {
2949 goto error;
2950 }
2951 }
2952 break;
2953 }
2954 default:
2955 break;
2956 }
2957 skip_domain:
2958
2959 /* Validate consumer daemon state when start/stop trace command */
2960 if (cmd_ctx->lsm->cmd_type == LTTNG_START_TRACE ||
2961 cmd_ctx->lsm->cmd_type == LTTNG_STOP_TRACE) {
2962 switch (cmd_ctx->lsm->domain.type) {
2963 case LTTNG_DOMAIN_JUL:
2964 case LTTNG_DOMAIN_UST:
2965 if (uatomic_read(&ust_consumerd_state) != CONSUMER_STARTED) {
2966 ret = LTTNG_ERR_NO_USTCONSUMERD;
2967 goto error;
2968 }
2969 break;
2970 case LTTNG_DOMAIN_KERNEL:
2971 if (uatomic_read(&kernel_consumerd_state) != CONSUMER_STARTED) {
2972 ret = LTTNG_ERR_NO_KERNCONSUMERD;
2973 goto error;
2974 }
2975 break;
2976 }
2977 }
2978
2979 /*
2980 * Check that the UID or GID match that of the tracing session.
2981 * The root user can interact with all sessions.
2982 */
2983 if (need_tracing_session) {
2984 if (!session_access_ok(cmd_ctx->session,
2985 LTTNG_SOCK_GET_UID_CRED(&cmd_ctx->creds),
2986 LTTNG_SOCK_GET_GID_CRED(&cmd_ctx->creds))) {
2987 ret = LTTNG_ERR_EPERM;
2988 goto error;
2989 }
2990 }
2991
2992 /*
2993 * Send relayd information to consumer as soon as we have a domain and a
2994 * session defined.
2995 */
2996 if (cmd_ctx->session && need_domain) {
2997 /*
2998 * Setup relayd if not done yet. If the relayd information was already
2999 * sent to the consumer, this call will gracefully return.
3000 */
3001 ret = cmd_setup_relayd(cmd_ctx->session);
3002 if (ret != LTTNG_OK) {
3003 goto error;
3004 }
3005 }
3006
3007 /* Process by command type */
3008 switch (cmd_ctx->lsm->cmd_type) {
3009 case LTTNG_ADD_CONTEXT:
3010 {
3011 ret = cmd_add_context(cmd_ctx->session, cmd_ctx->lsm->domain.type,
3012 cmd_ctx->lsm->u.context.channel_name,
3013 &cmd_ctx->lsm->u.context.ctx, kernel_poll_pipe[1]);
3014 break;
3015 }
3016 case LTTNG_DISABLE_CHANNEL:
3017 {
3018 ret = cmd_disable_channel(cmd_ctx->session, cmd_ctx->lsm->domain.type,
3019 cmd_ctx->lsm->u.disable.channel_name);
3020 break;
3021 }
3022 case LTTNG_DISABLE_EVENT:
3023 {
3024 ret = cmd_disable_event(cmd_ctx->session, cmd_ctx->lsm->domain.type,
3025 cmd_ctx->lsm->u.disable.channel_name,
3026 cmd_ctx->lsm->u.disable.name);
3027 break;
3028 }
3029 case LTTNG_DISABLE_ALL_EVENT:
3030 {
3031 DBG("Disabling all events");
3032
3033 ret = cmd_disable_event_all(cmd_ctx->session, cmd_ctx->lsm->domain.type,
3034 cmd_ctx->lsm->u.disable.channel_name);
3035 break;
3036 }
3037 case LTTNG_ENABLE_CHANNEL:
3038 {
3039 ret = cmd_enable_channel(cmd_ctx->session, &cmd_ctx->lsm->domain,
3040 &cmd_ctx->lsm->u.channel.chan, kernel_poll_pipe[1]);
3041 break;
3042 }
3043 case LTTNG_ENABLE_EVENT:
3044 {
3045 struct lttng_event_exclusion *exclusion = NULL;
3046 struct lttng_filter_bytecode *bytecode = NULL;
3047 char *filter_expression = NULL;
3048
3049 /* Handle exclusion events and receive it from the client. */
3050 if (cmd_ctx->lsm->u.enable.exclusion_count > 0) {
3051 size_t count = cmd_ctx->lsm->u.enable.exclusion_count;
3052
3053 exclusion = zmalloc(sizeof(struct lttng_event_exclusion) +
3054 (count * LTTNG_SYMBOL_NAME_LEN));
3055 if (!exclusion) {
3056 ret = LTTNG_ERR_EXCLUSION_NOMEM;
3057 goto error;
3058 }
3059
3060 DBG("Receiving var len exclusion event list from client ...");
3061 exclusion->count = count;
3062 ret = lttcomm_recv_unix_sock(sock, exclusion->names,
3063 count * LTTNG_SYMBOL_NAME_LEN);
3064 if (ret <= 0) {
3065 DBG("Nothing recv() from client var len data... continuing");
3066 *sock_error = 1;
3067 free(exclusion);
3068 ret = LTTNG_ERR_EXCLUSION_INVAL;
3069 goto error;
3070 }
3071 }
3072
3073 /* Get filter expression from client. */
3074 if (cmd_ctx->lsm->u.enable.expression_len > 0) {
3075 size_t expression_len =
3076 cmd_ctx->lsm->u.enable.expression_len;
3077
3078 if (expression_len > LTTNG_FILTER_MAX_LEN) {
3079 ret = LTTNG_ERR_FILTER_INVAL;
3080 free(exclusion);
3081 goto error;
3082 }
3083
3084 filter_expression = zmalloc(expression_len);
3085 if (!filter_expression) {
3086 free(exclusion);
3087 ret = LTTNG_ERR_FILTER_NOMEM;
3088 goto error;
3089 }
3090
3091 /* Receive var. len. data */
3092 DBG("Receiving var len filter's expression from client ...");
3093 ret = lttcomm_recv_unix_sock(sock, filter_expression,
3094 expression_len);
3095 if (ret <= 0) {
3096 DBG("Nothing recv() from client car len data... continuing");
3097 *sock_error = 1;
3098 free(filter_expression);
3099 free(exclusion);
3100 ret = LTTNG_ERR_FILTER_INVAL;
3101 goto error;
3102 }
3103 }
3104
3105 /* Handle filter and get bytecode from client. */
3106 if (cmd_ctx->lsm->u.enable.bytecode_len > 0) {
3107 size_t bytecode_len = cmd_ctx->lsm->u.enable.bytecode_len;
3108
3109 if (bytecode_len > LTTNG_FILTER_MAX_LEN) {
3110 ret = LTTNG_ERR_FILTER_INVAL;
3111 free(exclusion);
3112 goto error;
3113 }
3114
3115 bytecode = zmalloc(bytecode_len);
3116 if (!bytecode) {
3117 free(exclusion);
3118 ret = LTTNG_ERR_FILTER_NOMEM;
3119 goto error;
3120 }
3121
3122 /* Receive var. len. data */
3123 DBG("Receiving var len filter's bytecode from client ...");
3124 ret = lttcomm_recv_unix_sock(sock, bytecode, bytecode_len);
3125 if (ret <= 0) {
3126 DBG("Nothing recv() from client car len data... continuing");
3127 *sock_error = 1;
3128 free(bytecode);
3129 free(exclusion);
3130 ret = LTTNG_ERR_FILTER_INVAL;
3131 goto error;
3132 }
3133
3134 if ((bytecode->len + sizeof(*bytecode)) != bytecode_len) {
3135 free(bytecode);
3136 free(exclusion);
3137 ret = LTTNG_ERR_FILTER_INVAL;
3138 goto error;
3139 }
3140 }
3141
3142 ret = cmd_enable_event(cmd_ctx->session, &cmd_ctx->lsm->domain,
3143 cmd_ctx->lsm->u.enable.channel_name,
3144 &cmd_ctx->lsm->u.enable.event,
3145 filter_expression, bytecode, exclusion,
3146 kernel_poll_pipe[1]);
3147 break;
3148 }
3149 case LTTNG_ENABLE_ALL_EVENT:
3150 {
3151 DBG("Enabling all events");
3152
3153 ret = cmd_enable_event_all(cmd_ctx->session, &cmd_ctx->lsm->domain,
3154 cmd_ctx->lsm->u.enable.channel_name,
3155 cmd_ctx->lsm->u.enable.event.type, NULL, NULL,
3156 kernel_poll_pipe[1]);
3157 break;
3158 }
3159 case LTTNG_LIST_TRACEPOINTS:
3160 {
3161 struct lttng_event *events;
3162 ssize_t nb_events;
3163
3164 nb_events = cmd_list_tracepoints(cmd_ctx->lsm->domain.type, &events);
3165 if (nb_events < 0) {
3166 /* Return value is a negative lttng_error_code. */
3167 ret = -nb_events;
3168 goto error;
3169 }
3170
3171 /*
3172 * Setup lttng message with payload size set to the event list size in
3173 * bytes and then copy list into the llm payload.
3174 */
3175 ret = setup_lttng_msg(cmd_ctx, sizeof(struct lttng_event) * nb_events);
3176 if (ret < 0) {
3177 free(events);
3178 goto setup_error;
3179 }
3180
3181 /* Copy event list into message payload */
3182 memcpy(cmd_ctx->llm->payload, events,
3183 sizeof(struct lttng_event) * nb_events);
3184
3185 free(events);
3186
3187 ret = LTTNG_OK;
3188 break;
3189 }
3190 case LTTNG_LIST_TRACEPOINT_FIELDS:
3191 {
3192 struct lttng_event_field *fields;
3193 ssize_t nb_fields;
3194
3195 nb_fields = cmd_list_tracepoint_fields(cmd_ctx->lsm->domain.type,
3196 &fields);
3197 if (nb_fields < 0) {
3198 /* Return value is a negative lttng_error_code. */
3199 ret = -nb_fields;
3200 goto error;
3201 }
3202
3203 /*
3204 * Setup lttng message with payload size set to the event list size in
3205 * bytes and then copy list into the llm payload.
3206 */
3207 ret = setup_lttng_msg(cmd_ctx,
3208 sizeof(struct lttng_event_field) * nb_fields);
3209 if (ret < 0) {
3210 free(fields);
3211 goto setup_error;
3212 }
3213
3214 /* Copy event list into message payload */
3215 memcpy(cmd_ctx->llm->payload, fields,
3216 sizeof(struct lttng_event_field) * nb_fields);
3217
3218 free(fields);
3219
3220 ret = LTTNG_OK;
3221 break;
3222 }
3223 case LTTNG_SET_CONSUMER_URI:
3224 {
3225 size_t nb_uri, len;
3226 struct lttng_uri *uris;
3227
3228 nb_uri = cmd_ctx->lsm->u.uri.size;
3229 len = nb_uri * sizeof(struct lttng_uri);
3230
3231 if (nb_uri == 0) {
3232 ret = LTTNG_ERR_INVALID;
3233 goto error;
3234 }
3235
3236 uris = zmalloc(len);
3237 if (uris == NULL) {
3238 ret = LTTNG_ERR_FATAL;
3239 goto error;
3240 }
3241
3242 /* Receive variable len data */
3243 DBG("Receiving %zu URI(s) from client ...", nb_uri);
3244 ret = lttcomm_recv_unix_sock(sock, uris, len);
3245 if (ret <= 0) {
3246 DBG("No URIs received from client... continuing");
3247 *sock_error = 1;
3248 ret = LTTNG_ERR_SESSION_FAIL;
3249 free(uris);
3250 goto error;
3251 }
3252
3253 ret = cmd_set_consumer_uri(cmd_ctx->lsm->domain.type, cmd_ctx->session,
3254 nb_uri, uris);
3255 if (ret != LTTNG_OK) {
3256 free(uris);
3257 goto error;
3258 }
3259
3260 /*
3261 * XXX: 0 means that this URI should be applied on the session. Should
3262 * be a DOMAIN enuam.
3263 */
3264 if (cmd_ctx->lsm->domain.type == 0) {
3265 /* Add the URI for the UST session if a consumer is present. */
3266 if (cmd_ctx->session->ust_session &&
3267 cmd_ctx->session->ust_session->consumer) {
3268 ret = cmd_set_consumer_uri(LTTNG_DOMAIN_UST, cmd_ctx->session,
3269 nb_uri, uris);
3270 } else if (cmd_ctx->session->kernel_session &&
3271 cmd_ctx->session->kernel_session->consumer) {
3272 ret = cmd_set_consumer_uri(LTTNG_DOMAIN_KERNEL,
3273 cmd_ctx->session, nb_uri, uris);
3274 }
3275 }
3276
3277 free(uris);
3278
3279 break;
3280 }
3281 case LTTNG_START_TRACE:
3282 {
3283 ret = cmd_start_trace(cmd_ctx->session);
3284 break;
3285 }
3286 case LTTNG_STOP_TRACE:
3287 {
3288 ret = cmd_stop_trace(cmd_ctx->session);
3289 break;
3290 }
3291 case LTTNG_CREATE_SESSION:
3292 {
3293 size_t nb_uri, len;
3294 struct lttng_uri *uris = NULL;
3295
3296 nb_uri = cmd_ctx->lsm->u.uri.size;
3297 len = nb_uri * sizeof(struct lttng_uri);
3298
3299 if (nb_uri > 0) {
3300 uris = zmalloc(len);
3301 if (uris == NULL) {
3302 ret = LTTNG_ERR_FATAL;
3303 goto error;
3304 }
3305
3306 /* Receive variable len data */
3307 DBG("Waiting for %zu URIs from client ...", nb_uri);
3308 ret = lttcomm_recv_unix_sock(sock, uris, len);
3309 if (ret <= 0) {
3310 DBG("No URIs received from client... continuing");
3311 *sock_error = 1;
3312 ret = LTTNG_ERR_SESSION_FAIL;
3313 free(uris);
3314 goto error;
3315 }
3316
3317 if (nb_uri == 1 && uris[0].dtype != LTTNG_DST_PATH) {
3318 DBG("Creating session with ONE network URI is a bad call");
3319 ret = LTTNG_ERR_SESSION_FAIL;
3320 free(uris);
3321 goto error;
3322 }
3323 }
3324
3325 ret = cmd_create_session_uri(cmd_ctx->lsm->session.name, uris, nb_uri,
3326 &cmd_ctx->creds, 0);
3327
3328 free(uris);
3329
3330 break;
3331 }
3332 case LTTNG_DESTROY_SESSION:
3333 {
3334 ret = cmd_destroy_session(cmd_ctx->session, kernel_poll_pipe[1]);
3335
3336 /* Set session to NULL so we do not unlock it after free. */
3337 cmd_ctx->session = NULL;
3338 break;
3339 }
3340 case LTTNG_LIST_DOMAINS:
3341 {
3342 ssize_t nb_dom;
3343 struct lttng_domain *domains;
3344
3345 nb_dom = cmd_list_domains(cmd_ctx->session, &domains);
3346 if (nb_dom < 0) {
3347 /* Return value is a negative lttng_error_code. */
3348 ret = -nb_dom;
3349 goto error;
3350 }
3351
3352 ret = setup_lttng_msg(cmd_ctx, nb_dom * sizeof(struct lttng_domain));
3353 if (ret < 0) {
3354 free(domains);
3355 goto setup_error;
3356 }
3357
3358 /* Copy event list into message payload */
3359 memcpy(cmd_ctx->llm->payload, domains,
3360 nb_dom * sizeof(struct lttng_domain));
3361
3362 free(domains);
3363
3364 ret = LTTNG_OK;
3365 break;
3366 }
3367 case LTTNG_LIST_CHANNELS:
3368 {
3369 int nb_chan;
3370 struct lttng_channel *channels;
3371
3372 nb_chan = cmd_list_channels(cmd_ctx->lsm->domain.type,
3373 cmd_ctx->session, &channels);
3374 if (nb_chan < 0) {
3375 /* Return value is a negative lttng_error_code. */
3376 ret = -nb_chan;
3377 goto error;
3378 }
3379
3380 ret = setup_lttng_msg(cmd_ctx, nb_chan * sizeof(struct lttng_channel));
3381 if (ret < 0) {
3382 free(channels);
3383 goto setup_error;
3384 }
3385
3386 /* Copy event list into message payload */
3387 memcpy(cmd_ctx->llm->payload, channels,
3388 nb_chan * sizeof(struct lttng_channel));
3389
3390 free(channels);
3391
3392 ret = LTTNG_OK;
3393 break;
3394 }
3395 case LTTNG_LIST_EVENTS:
3396 {
3397 ssize_t nb_event;
3398 struct lttng_event *events = NULL;
3399
3400 nb_event = cmd_list_events(cmd_ctx->lsm->domain.type, cmd_ctx->session,
3401 cmd_ctx->lsm->u.list.channel_name, &events);
3402 if (nb_event < 0) {
3403 /* Return value is a negative lttng_error_code. */
3404 ret = -nb_event;
3405 goto error;
3406 }
3407
3408 ret = setup_lttng_msg(cmd_ctx, nb_event * sizeof(struct lttng_event));
3409 if (ret < 0) {
3410 free(events);
3411 goto setup_error;
3412 }
3413
3414 /* Copy event list into message payload */
3415 memcpy(cmd_ctx->llm->payload, events,
3416 nb_event * sizeof(struct lttng_event));
3417
3418 free(events);
3419
3420 ret = LTTNG_OK;
3421 break;
3422 }
3423 case LTTNG_LIST_SESSIONS:
3424 {
3425 unsigned int nr_sessions;
3426
3427 session_lock_list();
3428 nr_sessions = lttng_sessions_count(
3429 LTTNG_SOCK_GET_UID_CRED(&cmd_ctx->creds),
3430 LTTNG_SOCK_GET_GID_CRED(&cmd_ctx->creds));
3431
3432 ret = setup_lttng_msg(cmd_ctx, sizeof(struct lttng_session) * nr_sessions);
3433 if (ret < 0) {
3434 session_unlock_list();
3435 goto setup_error;
3436 }
3437
3438 /* Filled the session array */
3439 cmd_list_lttng_sessions((struct lttng_session *)(cmd_ctx->llm->payload),
3440 LTTNG_SOCK_GET_UID_CRED(&cmd_ctx->creds),
3441 LTTNG_SOCK_GET_GID_CRED(&cmd_ctx->creds));
3442
3443 session_unlock_list();
3444
3445 ret = LTTNG_OK;
3446 break;
3447 }
3448 case LTTNG_CALIBRATE:
3449 {
3450 ret = cmd_calibrate(cmd_ctx->lsm->domain.type,
3451 &cmd_ctx->lsm->u.calibrate);
3452 break;
3453 }
3454 case LTTNG_REGISTER_CONSUMER:
3455 {
3456 struct consumer_data *cdata;
3457
3458 switch (cmd_ctx->lsm->domain.type) {
3459 case LTTNG_DOMAIN_KERNEL:
3460 cdata = &kconsumer_data;
3461 break;
3462 default:
3463 ret = LTTNG_ERR_UND;
3464 goto error;
3465 }
3466
3467 ret = cmd_register_consumer(cmd_ctx->session, cmd_ctx->lsm->domain.type,
3468 cmd_ctx->lsm->u.reg.path, cdata);
3469 break;
3470 }
3471 case LTTNG_DATA_PENDING:
3472 {
3473 ret = cmd_data_pending(cmd_ctx->session);
3474 break;
3475 }
3476 case LTTNG_SNAPSHOT_ADD_OUTPUT:
3477 {
3478 struct lttcomm_lttng_output_id reply;
3479
3480 ret = cmd_snapshot_add_output(cmd_ctx->session,
3481 &cmd_ctx->lsm->u.snapshot_output.output, &reply.id);
3482 if (ret != LTTNG_OK) {
3483 goto error;
3484 }
3485
3486 ret = setup_lttng_msg(cmd_ctx, sizeof(reply));
3487 if (ret < 0) {
3488 goto setup_error;
3489 }
3490
3491 /* Copy output list into message payload */
3492 memcpy(cmd_ctx->llm->payload, &reply, sizeof(reply));
3493 ret = LTTNG_OK;
3494 break;
3495 }
3496 case LTTNG_SNAPSHOT_DEL_OUTPUT:
3497 {
3498 ret = cmd_snapshot_del_output(cmd_ctx->session,
3499 &cmd_ctx->lsm->u.snapshot_output.output);
3500 break;
3501 }
3502 case LTTNG_SNAPSHOT_LIST_OUTPUT:
3503 {
3504 ssize_t nb_output;
3505 struct lttng_snapshot_output *outputs = NULL;
3506
3507 nb_output = cmd_snapshot_list_outputs(cmd_ctx->session, &outputs);
3508 if (nb_output < 0) {
3509 ret = -nb_output;
3510 goto error;
3511 }
3512
3513 ret = setup_lttng_msg(cmd_ctx,
3514 nb_output * sizeof(struct lttng_snapshot_output));
3515 if (ret < 0) {
3516 free(outputs);
3517 goto setup_error;
3518 }
3519
3520 if (outputs) {
3521 /* Copy output list into message payload */
3522 memcpy(cmd_ctx->llm->payload, outputs,
3523 nb_output * sizeof(struct lttng_snapshot_output));
3524 free(outputs);
3525 }
3526
3527 ret = LTTNG_OK;
3528 break;
3529 }
3530 case LTTNG_SNAPSHOT_RECORD:
3531 {
3532 ret = cmd_snapshot_record(cmd_ctx->session,
3533 &cmd_ctx->lsm->u.snapshot_record.output,
3534 cmd_ctx->lsm->u.snapshot_record.wait);
3535 break;
3536 }
3537 case LTTNG_CREATE_SESSION_SNAPSHOT:
3538 {
3539 size_t nb_uri, len;
3540 struct lttng_uri *uris = NULL;
3541
3542 nb_uri = cmd_ctx->lsm->u.uri.size;
3543 len = nb_uri * sizeof(struct lttng_uri);
3544
3545 if (nb_uri > 0) {
3546 uris = zmalloc(len);
3547 if (uris == NULL) {
3548 ret = LTTNG_ERR_FATAL;
3549 goto error;
3550 }
3551
3552 /* Receive variable len data */
3553 DBG("Waiting for %zu URIs from client ...", nb_uri);
3554 ret = lttcomm_recv_unix_sock(sock, uris, len);
3555 if (ret <= 0) {
3556 DBG("No URIs received from client... continuing");
3557 *sock_error = 1;
3558 ret = LTTNG_ERR_SESSION_FAIL;
3559 free(uris);
3560 goto error;
3561 }
3562
3563 if (nb_uri == 1 && uris[0].dtype != LTTNG_DST_PATH) {
3564 DBG("Creating session with ONE network URI is a bad call");
3565 ret = LTTNG_ERR_SESSION_FAIL;
3566 free(uris);
3567 goto error;
3568 }
3569 }
3570
3571 ret = cmd_create_session_snapshot(cmd_ctx->lsm->session.name, uris,
3572 nb_uri, &cmd_ctx->creds);
3573 free(uris);
3574 break;
3575 }
3576 case LTTNG_CREATE_SESSION_LIVE:
3577 {
3578 size_t nb_uri, len;
3579 struct lttng_uri *uris = NULL;
3580
3581 nb_uri = cmd_ctx->lsm->u.uri.size;
3582 len = nb_uri * sizeof(struct lttng_uri);
3583
3584 if (nb_uri > 0) {
3585 uris = zmalloc(len);
3586 if (uris == NULL) {
3587 ret = LTTNG_ERR_FATAL;
3588 goto error;
3589 }
3590
3591 /* Receive variable len data */
3592 DBG("Waiting for %zu URIs from client ...", nb_uri);
3593 ret = lttcomm_recv_unix_sock(sock, uris, len);
3594 if (ret <= 0) {
3595 DBG("No URIs received from client... continuing");
3596 *sock_error = 1;
3597 ret = LTTNG_ERR_SESSION_FAIL;
3598 free(uris);
3599 goto error;
3600 }
3601
3602 if (nb_uri == 1 && uris[0].dtype != LTTNG_DST_PATH) {
3603 DBG("Creating session with ONE network URI is a bad call");
3604 ret = LTTNG_ERR_SESSION_FAIL;
3605 free(uris);
3606 goto error;
3607 }
3608 }
3609
3610 ret = cmd_create_session_uri(cmd_ctx->lsm->session.name, uris,
3611 nb_uri, &cmd_ctx->creds, cmd_ctx->lsm->u.session_live.timer_interval);
3612 free(uris);
3613 break;
3614 }
3615 case LTTNG_SAVE_SESSION:
3616 {
3617 ret = cmd_save_sessions(&cmd_ctx->lsm->u.save_session.attr,
3618 &cmd_ctx->creds);
3619 break;
3620 }
3621 default:
3622 ret = LTTNG_ERR_UND;
3623 break;
3624 }
3625
3626 error:
3627 if (cmd_ctx->llm == NULL) {
3628 DBG("Missing llm structure. Allocating one.");
3629 if (setup_lttng_msg(cmd_ctx, 0) < 0) {
3630 goto setup_error;
3631 }
3632 }
3633 /* Set return code */
3634 cmd_ctx->llm->ret_code = ret;
3635 setup_error:
3636 if (cmd_ctx->session) {
3637 session_unlock(cmd_ctx->session);
3638 }
3639 if (need_tracing_session) {
3640 session_unlock_list();
3641 }
3642 init_setup_error:
3643 return ret;
3644 }
3645
3646 /*
3647 * Thread managing health check socket.
3648 */
3649 static void *thread_manage_health(void *data)
3650 {
3651 int sock = -1, new_sock = -1, ret, i, pollfd, err = -1;
3652 uint32_t revents, nb_fd;
3653 struct lttng_poll_event events;
3654 struct health_comm_msg msg;
3655 struct health_comm_reply reply;
3656
3657 DBG("[thread] Manage health check started");
3658
3659 rcu_register_thread();
3660
3661 /* We might hit an error path before this is created. */
3662 lttng_poll_init(&events);
3663
3664 /* Create unix socket */
3665 sock = lttcomm_create_unix_sock(health_unix_sock_path);
3666 if (sock < 0) {
3667 ERR("Unable to create health check Unix socket");
3668 ret = -1;
3669 goto error;
3670 }
3671
3672 if (is_root) {
3673 /* lttng health client socket path permissions */
3674 ret = chown(health_unix_sock_path, 0,
3675 utils_get_group_id(tracing_group_name));
3676 if (ret < 0) {
3677 ERR("Unable to set group on %s", health_unix_sock_path);
3678 PERROR("chown");
3679 ret = -1;
3680 goto error;
3681 }
3682
3683 ret = chmod(health_unix_sock_path,
3684 S_IRUSR | S_IWUSR | S_IRGRP | S_IWGRP);
3685 if (ret < 0) {
3686 ERR("Unable to set permissions on %s", health_unix_sock_path);
3687 PERROR("chmod");
3688 ret = -1;
3689 goto error;
3690 }
3691 }
3692
3693 /*
3694 * Set the CLOEXEC flag. Return code is useless because either way, the
3695 * show must go on.
3696 */
3697 (void) utils_set_fd_cloexec(sock);
3698
3699 ret = lttcomm_listen_unix_sock(sock);
3700 if (ret < 0) {
3701 goto error;
3702 }
3703
3704 /*
3705 * Pass 2 as size here for the thread quit pipe and client_sock. Nothing
3706 * more will be added to this poll set.
3707 */
3708 ret = sessiond_set_thread_pollset(&events, 2);
3709 if (ret < 0) {
3710 goto error;
3711 }
3712
3713 /* Add the application registration socket */
3714 ret = lttng_poll_add(&events, sock, LPOLLIN | LPOLLPRI);
3715 if (ret < 0) {
3716 goto error;
3717 }
3718
3719 lttng_sessiond_notify_ready();
3720
3721 while (1) {
3722 DBG("Health check ready");
3723
3724 /* Inifinite blocking call, waiting for transmission */
3725 restart:
3726 ret = lttng_poll_wait(&events, -1);
3727 if (ret < 0) {
3728 /*
3729 * Restart interrupted system call.
3730 */
3731 if (errno == EINTR) {
3732 goto restart;
3733 }
3734 goto error;
3735 }
3736
3737 nb_fd = ret;
3738
3739 for (i = 0; i < nb_fd; i++) {
3740 /* Fetch once the poll data */
3741 revents = LTTNG_POLL_GETEV(&events, i);
3742 pollfd = LTTNG_POLL_GETFD(&events, i);
3743
3744 /* Thread quit pipe has been closed. Killing thread. */
3745 ret = sessiond_check_thread_quit_pipe(pollfd, revents);
3746 if (ret) {
3747 err = 0;
3748 goto exit;
3749 }
3750
3751 /* Event on the registration socket */
3752 if (pollfd == sock) {
3753 if (revents & (LPOLLERR | LPOLLHUP | LPOLLRDHUP)) {
3754 ERR("Health socket poll error");
3755 goto error;
3756 }
3757 }
3758 }
3759
3760 new_sock = lttcomm_accept_unix_sock(sock);
3761 if (new_sock < 0) {
3762 goto error;
3763 }
3764
3765 /*
3766 * Set the CLOEXEC flag. Return code is useless because either way, the
3767 * show must go on.
3768 */
3769 (void) utils_set_fd_cloexec(new_sock);
3770
3771 DBG("Receiving data from client for health...");
3772 ret = lttcomm_recv_unix_sock(new_sock, (void *)&msg, sizeof(msg));
3773 if (ret <= 0) {
3774 DBG("Nothing recv() from client... continuing");
3775 ret = close(new_sock);
3776 if (ret) {
3777 PERROR("close");
3778 }
3779 new_sock = -1;
3780 continue;
3781 }
3782
3783 rcu_thread_online();
3784
3785 memset(&reply, 0, sizeof(reply));
3786 for (i = 0; i < NR_HEALTH_SESSIOND_TYPES; i++) {
3787 /*
3788 * health_check_state returns 0 if health is
3789 * bad.
3790 */
3791 if (!health_check_state(health_sessiond, i)) {
3792 reply.ret_code |= 1ULL << i;
3793 }
3794 }
3795
3796 DBG2("Health check return value %" PRIx64, reply.ret_code);
3797
3798 ret = send_unix_sock(new_sock, (void *) &reply, sizeof(reply));
3799 if (ret < 0) {
3800 ERR("Failed to send health data back to client");
3801 }
3802
3803 /* End of transmission */
3804 ret = close(new_sock);
3805 if (ret) {
3806 PERROR("close");
3807 }
3808 new_sock = -1;
3809 }
3810
3811 exit:
3812 error:
3813 if (err) {
3814 ERR("Health error occurred in %s", __func__);
3815 }
3816 DBG("Health check thread dying");
3817 unlink(health_unix_sock_path);
3818 if (sock >= 0) {
3819 ret = close(sock);
3820 if (ret) {
3821 PERROR("close");
3822 }
3823 }
3824
3825 lttng_poll_clean(&events);
3826
3827 rcu_unregister_thread();
3828 return NULL;
3829 }
3830
3831 /*
3832 * This thread manage all clients request using the unix client socket for
3833 * communication.
3834 */
3835 static void *thread_manage_clients(void *data)
3836 {
3837 int sock = -1, ret, i, pollfd, err = -1;
3838 int sock_error;
3839 uint32_t revents, nb_fd;
3840 struct command_ctx *cmd_ctx = NULL;
3841 struct lttng_poll_event events;
3842
3843 DBG("[thread] Manage client started");
3844
3845 rcu_register_thread();
3846
3847 health_register(health_sessiond, HEALTH_SESSIOND_TYPE_CMD);
3848
3849 health_code_update();
3850
3851 ret = lttcomm_listen_unix_sock(client_sock);
3852 if (ret < 0) {
3853 goto error_listen;
3854 }
3855
3856 /*
3857 * Pass 2 as size here for the thread quit pipe and client_sock. Nothing
3858 * more will be added to this poll set.
3859 */
3860 ret = sessiond_set_thread_pollset(&events, 2);
3861 if (ret < 0) {
3862 goto error_create_poll;
3863 }
3864
3865 /* Add the application registration socket */
3866 ret = lttng_poll_add(&events, client_sock, LPOLLIN | LPOLLPRI);
3867 if (ret < 0) {
3868 goto error;
3869 }
3870
3871 lttng_sessiond_notify_ready();
3872
3873 /* This testpoint is after we signal readiness to the parent. */
3874 if (testpoint(sessiond_thread_manage_clients)) {
3875 goto error;
3876 }
3877
3878 if (testpoint(sessiond_thread_manage_clients_before_loop)) {
3879 goto error;
3880 }
3881
3882 health_code_update();
3883
3884 while (1) {
3885 DBG("Accepting client command ...");
3886
3887 /* Inifinite blocking call, waiting for transmission */
3888 restart:
3889 health_poll_entry();
3890 ret = lttng_poll_wait(&events, -1);
3891 health_poll_exit();
3892 if (ret < 0) {
3893 /*
3894 * Restart interrupted system call.
3895 */
3896 if (errno == EINTR) {
3897 goto restart;
3898 }
3899 goto error;
3900 }
3901
3902 nb_fd = ret;
3903
3904 for (i = 0; i < nb_fd; i++) {
3905 /* Fetch once the poll data */
3906 revents = LTTNG_POLL_GETEV(&events, i);
3907 pollfd = LTTNG_POLL_GETFD(&events, i);
3908
3909 health_code_update();
3910
3911 /* Thread quit pipe has been closed. Killing thread. */
3912 ret = sessiond_check_thread_quit_pipe(pollfd, revents);
3913 if (ret) {
3914 err = 0;
3915 goto exit;
3916 }
3917
3918 /* Event on the registration socket */
3919 if (pollfd == client_sock) {
3920 if (revents & (LPOLLERR | LPOLLHUP | LPOLLRDHUP)) {
3921 ERR("Client socket poll error");
3922 goto error;
3923 }
3924 }
3925 }
3926
3927 DBG("Wait for client response");
3928
3929 health_code_update();
3930
3931 sock = lttcomm_accept_unix_sock(client_sock);
3932 if (sock < 0) {
3933 goto error;
3934 }
3935
3936 /*
3937 * Set the CLOEXEC flag. Return code is useless because either way, the
3938 * show must go on.
3939 */
3940 (void) utils_set_fd_cloexec(sock);
3941
3942 /* Set socket option for credentials retrieval */
3943 ret = lttcomm_setsockopt_creds_unix_sock(sock);
3944 if (ret < 0) {
3945 goto error;
3946 }
3947
3948 /* Allocate context command to process the client request */
3949 cmd_ctx = zmalloc(sizeof(struct command_ctx));
3950 if (cmd_ctx == NULL) {
3951 PERROR("zmalloc cmd_ctx");
3952 goto error;
3953 }
3954
3955 /* Allocate data buffer for reception */
3956 cmd_ctx->lsm = zmalloc(sizeof(struct lttcomm_session_msg));
3957 if (cmd_ctx->lsm == NULL) {
3958 PERROR("zmalloc cmd_ctx->lsm");
3959 goto error;
3960 }
3961
3962 cmd_ctx->llm = NULL;
3963 cmd_ctx->session = NULL;
3964
3965 health_code_update();
3966
3967 /*
3968 * Data is received from the lttng client. The struct
3969 * lttcomm_session_msg (lsm) contains the command and data request of
3970 * the client.
3971 */
3972 DBG("Receiving data from client ...");
3973 ret = lttcomm_recv_creds_unix_sock(sock, cmd_ctx->lsm,
3974 sizeof(struct lttcomm_session_msg), &cmd_ctx->creds);
3975 if (ret <= 0) {
3976 DBG("Nothing recv() from client... continuing");
3977 ret = close(sock);
3978 if (ret) {
3979 PERROR("close");
3980 }
3981 sock = -1;
3982 clean_command_ctx(&cmd_ctx);
3983 continue;
3984 }
3985
3986 health_code_update();
3987
3988 // TODO: Validate cmd_ctx including sanity check for
3989 // security purpose.
3990
3991 rcu_thread_online();
3992 /*
3993 * This function dispatch the work to the kernel or userspace tracer
3994 * libs and fill the lttcomm_lttng_msg data structure of all the needed
3995 * informations for the client. The command context struct contains
3996 * everything this function may needs.
3997 */
3998 ret = process_client_msg(cmd_ctx, sock, &sock_error);
3999 rcu_thread_offline();
4000 if (ret < 0) {
4001 ret = close(sock);
4002 if (ret) {
4003 PERROR("close");
4004 }
4005 sock = -1;
4006 /*
4007 * TODO: Inform client somehow of the fatal error. At
4008 * this point, ret < 0 means that a zmalloc failed
4009 * (ENOMEM). Error detected but still accept
4010 * command, unless a socket error has been
4011 * detected.
4012 */
4013 clean_command_ctx(&cmd_ctx);
4014 continue;
4015 }
4016
4017 health_code_update();
4018
4019 DBG("Sending response (size: %d, retcode: %s)",
4020 cmd_ctx->lttng_msg_size,
4021 lttng_strerror(-cmd_ctx->llm->ret_code));
4022 ret = send_unix_sock(sock, cmd_ctx->llm, cmd_ctx->lttng_msg_size);
4023 if (ret < 0) {
4024 ERR("Failed to send data back to client");
4025 }
4026
4027 /* End of transmission */
4028 ret = close(sock);
4029 if (ret) {
4030 PERROR("close");
4031 }
4032 sock = -1;
4033
4034 clean_command_ctx(&cmd_ctx);
4035
4036 health_code_update();
4037 }
4038
4039 exit:
4040 error:
4041 if (sock >= 0) {
4042 ret = close(sock);
4043 if (ret) {
4044 PERROR("close");
4045 }
4046 }
4047
4048 lttng_poll_clean(&events);
4049 clean_command_ctx(&cmd_ctx);
4050
4051 error_listen:
4052 error_create_poll:
4053 unlink(client_unix_sock_path);
4054 if (client_sock >= 0) {
4055 ret = close(client_sock);
4056 if (ret) {
4057 PERROR("close");
4058 }
4059 }
4060
4061 if (err) {
4062 health_error();
4063 ERR("Health error occurred in %s", __func__);
4064 }
4065
4066 health_unregister(health_sessiond);
4067
4068 DBG("Client thread dying");
4069
4070 rcu_unregister_thread();
4071 return NULL;
4072 }
4073
4074
4075 /*
4076 * usage function on stderr
4077 */
4078 static void usage(void)
4079 {
4080 fprintf(stderr, "Usage: %s OPTIONS\n\nOptions:\n", progname);
4081 fprintf(stderr, " -h, --help Display this usage.\n");
4082 fprintf(stderr, " -c, --client-sock PATH Specify path for the client unix socket\n");
4083 fprintf(stderr, " -a, --apps-sock PATH Specify path for apps unix socket\n");
4084 fprintf(stderr, " --kconsumerd-err-sock PATH Specify path for the kernel consumer error socket\n");
4085 fprintf(stderr, " --kconsumerd-cmd-sock PATH Specify path for the kernel consumer command socket\n");
4086 fprintf(stderr, " --ustconsumerd32-err-sock PATH Specify path for the 32-bit UST consumer error socket\n");
4087 fprintf(stderr, " --ustconsumerd64-err-sock PATH Specify path for the 64-bit UST consumer error socket\n");
4088 fprintf(stderr, " --ustconsumerd32-cmd-sock PATH Specify path for the 32-bit UST consumer command socket\n");
4089 fprintf(stderr, " --ustconsumerd64-cmd-sock PATH Specify path for the 64-bit UST consumer command socket\n");
4090 fprintf(stderr, " --consumerd32-path PATH Specify path for the 32-bit UST consumer daemon binary\n");
4091 fprintf(stderr, " --consumerd32-libdir PATH Specify path for the 32-bit UST consumer daemon libraries\n");
4092 fprintf(stderr, " --consumerd64-path PATH Specify path for the 64-bit UST consumer daemon binary\n");
4093 fprintf(stderr, " --consumerd64-libdir PATH Specify path for the 64-bit UST consumer daemon libraries\n");
4094 fprintf(stderr, " -d, --daemonize Start as a daemon.\n");
4095 fprintf(stderr, " -b, --background Start as a daemon, keeping console open.\n");
4096 fprintf(stderr, " -g, --group NAME Specify the tracing group name. (default: tracing)\n");
4097 fprintf(stderr, " -V, --version Show version number.\n");
4098 fprintf(stderr, " -S, --sig-parent Send SIGUSR1 to parent pid to notify readiness.\n");
4099 fprintf(stderr, " -q, --quiet No output at all.\n");
4100 fprintf(stderr, " -v, --verbose Verbose mode. Activate DBG() macro.\n");
4101 fprintf(stderr, " -p, --pidfile FILE Write a pid to FILE name overriding the default value.\n");
4102 fprintf(stderr, " --verbose-consumer Verbose mode for consumer. Activate DBG() macro.\n");
4103 fprintf(stderr, " --no-kernel Disable kernel tracer\n");
4104 fprintf(stderr, " --jul-tcp-port JUL application registration TCP port\n");
4105 fprintf(stderr, " -f --config Load daemon configuration file\n");
4106 }
4107
4108 /*
4109 * Take an option from the getopt output and set it in the right variable to be
4110 * used later.
4111 *
4112 * Return 0 on success else a negative value.
4113 */
4114 static int set_option(int opt, const char *arg, const char *optname)
4115 {
4116 int ret = 0;
4117
4118 switch (opt) {
4119 case 0:
4120 fprintf(stderr, "option %s", optname);
4121 if (arg) {
4122 fprintf(stderr, " with arg %s\n", arg);
4123 }
4124 break;
4125 case 'c':
4126 snprintf(client_unix_sock_path, PATH_MAX, "%s", arg);
4127 break;
4128 case 'a':
4129 snprintf(apps_unix_sock_path, PATH_MAX, "%s", arg);
4130 break;
4131 case 'd':
4132 opt_daemon = 1;
4133 break;
4134 case 'b':
4135 opt_background = 1;
4136 break;
4137 case 'g':
4138 tracing_group_name = strdup(arg);
4139 break;
4140 case 'h':
4141 usage();
4142 exit(EXIT_FAILURE);
4143 case 'V':
4144 fprintf(stdout, "%s\n", VERSION);
4145 exit(EXIT_SUCCESS);
4146 case 'S':
4147 opt_sig_parent = 1;
4148 break;
4149 case 'E':
4150 snprintf(kconsumer_data.err_unix_sock_path, PATH_MAX, "%s", arg);
4151 break;
4152 case 'C':
4153 snprintf(kconsumer_data.cmd_unix_sock_path, PATH_MAX, "%s", arg);
4154 break;
4155 case 'F':
4156 snprintf(ustconsumer64_data.err_unix_sock_path, PATH_MAX, "%s", arg);
4157 break;
4158 case 'D':
4159 snprintf(ustconsumer64_data.cmd_unix_sock_path, PATH_MAX, "%s", arg);
4160 break;
4161 case 'H':
4162 snprintf(ustconsumer32_data.err_unix_sock_path, PATH_MAX, "%s", arg);
4163 break;
4164 case 'G':
4165 snprintf(ustconsumer32_data.cmd_unix_sock_path, PATH_MAX, "%s", arg);
4166 break;
4167 case 'N':
4168 opt_no_kernel = 1;
4169 break;
4170 case 'q':
4171 lttng_opt_quiet = 1;
4172 break;
4173 case 'v':
4174 /* Verbose level can increase using multiple -v */
4175 if (arg) {
4176 lttng_opt_verbose = config_parse_value(arg);
4177 } else {
4178 lttng_opt_verbose += 1;
4179 }
4180 break;
4181 case 'Z':
4182 if (arg) {
4183 opt_verbose_consumer = config_parse_value(arg);
4184 } else {
4185 opt_verbose_consumer += 1;
4186 }
4187 break;
4188 case 'u':
4189 consumerd32_bin = strdup(arg);
4190 consumerd32_bin_override = 1;
4191 break;
4192 case 'U':
4193 consumerd32_libdir = strdup(arg);
4194 consumerd32_libdir_override = 1;
4195 break;
4196 case 't':
4197 consumerd64_bin = strdup(arg);
4198 consumerd64_bin_override = 1;
4199 break;
4200 case 'T':
4201 consumerd64_libdir = strdup(arg);
4202 consumerd64_libdir_override = 1;
4203 break;
4204 case 'p':
4205 opt_pidfile = strdup(arg);
4206 break;
4207 case 'J': /* JUL TCP port. */
4208 {
4209 unsigned long v;
4210
4211 errno = 0;
4212 v = strtoul(arg, NULL, 0);
4213 if (errno != 0 || !isdigit(arg[0])) {
4214 ERR("Wrong value in --jul-tcp-port parameter: %s", arg);
4215 return -1;
4216 }
4217 if (v == 0 || v >= 65535) {
4218 ERR("Port overflow in --jul-tcp-port parameter: %s", arg);
4219 return -1;
4220 }
4221 jul_tcp_port = (uint32_t) v;
4222 DBG3("JUL TCP port set to non default: %u", jul_tcp_port);
4223 break;
4224 }
4225 default:
4226 /* Unknown option or other error.
4227 * Error is printed by getopt, just return */
4228 ret = -1;
4229 }
4230
4231 return ret;
4232 }
4233
4234 /*
4235 * config_entry_handler_cb used to handle options read from a config file.
4236 * See config_entry_handler_cb comment in common/config/config.h for the
4237 * return value conventions.
4238 */
4239 static int config_entry_handler(const struct config_entry *entry, void *unused)
4240 {
4241 int ret = 0, i;
4242
4243 if (!entry || !entry->name || !entry->value) {
4244 ret = -EINVAL;
4245 goto end;
4246 }
4247
4248 /* Check if the option is to be ignored */
4249 for (i = 0; i < sizeof(config_ignore_options) / sizeof(char *); i++) {
4250 if (!strcmp(entry->name, config_ignore_options[i])) {
4251 goto end;
4252 }
4253 }
4254
4255 for (i = 0; i < (sizeof(long_options) / sizeof(struct option)) - 1;
4256 i++) {
4257
4258 /* Ignore if not fully matched. */
4259 if (strcmp(entry->name, long_options[i].name)) {
4260 continue;
4261 }
4262
4263 /*
4264 * If the option takes no argument on the command line, we have to
4265 * check if the value is "true". We support non-zero numeric values,
4266 * true, on and yes.
4267 */
4268 if (!long_options[i].has_arg) {
4269 ret = config_parse_value(entry->value);
4270 if (ret <= 0) {
4271 if (ret) {
4272 WARN("Invalid configuration value \"%s\" for option %s",
4273 entry->value, entry->name);
4274 }
4275 /* False, skip boolean config option. */
4276 goto end;
4277 }
4278 }
4279
4280 ret = set_option(long_options[i].val, entry->value, entry->name);
4281 goto end;
4282 }
4283
4284 WARN("Unrecognized option \"%s\" in daemon configuration file.", entry->name);
4285
4286 end:
4287 return ret;
4288 }
4289
4290 /*
4291 * daemon configuration loading and argument parsing
4292 */
4293 static int set_options(int argc, char **argv)
4294 {
4295 int ret = 0, c = 0, option_index = 0;
4296 int orig_optopt = optopt, orig_optind = optind;
4297 char *optstring;
4298 const char *config_path = NULL;
4299
4300 optstring = utils_generate_optstring(long_options,
4301 sizeof(long_options) / sizeof(struct option));
4302 if (!optstring) {
4303 ret = -ENOMEM;
4304 goto end;
4305 }
4306
4307 /* Check for the --config option */
4308 while ((c = getopt_long(argc, argv, optstring, long_options,
4309 &option_index)) != -1) {
4310 if (c == '?') {
4311 ret = -EINVAL;
4312 goto end;
4313 } else if (c != 'f') {
4314 /* if not equal to --config option. */
4315 continue;
4316 }
4317
4318 config_path = utils_expand_path(optarg);
4319 if (!config_path) {
4320 ERR("Failed to resolve path: %s", optarg);
4321 }
4322 }
4323
4324 ret = config_get_section_entries(config_path, config_section_name,
4325 config_entry_handler, NULL);
4326 if (ret) {
4327 if (ret > 0) {
4328 ERR("Invalid configuration option at line %i", ret);
4329 ret = -1;
4330 }
4331 goto end;
4332 }
4333
4334 /* Reset getopt's global state */
4335 optopt = orig_optopt;
4336 optind = orig_optind;
4337 while (1) {
4338 c = getopt_long(argc, argv, optstring, long_options, &option_index);
4339 if (c == -1) {
4340 break;
4341 }
4342
4343 ret = set_option(c, optarg, long_options[option_index].name);
4344 if (ret < 0) {
4345 break;
4346 }
4347 }
4348
4349 end:
4350 free(optstring);
4351 return ret;
4352 }
4353
4354 /*
4355 * Creates the two needed socket by the daemon.
4356 * apps_sock - The communication socket for all UST apps.
4357 * client_sock - The communication of the cli tool (lttng).
4358 */
4359 static int init_daemon_socket(void)
4360 {
4361 int ret = 0;
4362 mode_t old_umask;
4363
4364 old_umask = umask(0);
4365
4366 /* Create client tool unix socket */
4367 client_sock = lttcomm_create_unix_sock(client_unix_sock_path);
4368 if (client_sock < 0) {
4369 ERR("Create unix sock failed: %s", client_unix_sock_path);
4370 ret = -1;
4371 goto end;
4372 }
4373
4374 /* Set the cloexec flag */
4375 ret = utils_set_fd_cloexec(client_sock);
4376 if (ret < 0) {
4377 ERR("Unable to set CLOEXEC flag to the client Unix socket (fd: %d). "
4378 "Continuing but note that the consumer daemon will have a "
4379 "reference to this socket on exec()", client_sock);
4380 }
4381
4382 /* File permission MUST be 660 */
4383 ret = chmod(client_unix_sock_path, S_IRUSR | S_IWUSR | S_IRGRP | S_IWGRP);
4384 if (ret < 0) {
4385 ERR("Set file permissions failed: %s", client_unix_sock_path);
4386 PERROR("chmod");
4387 goto end;
4388 }
4389
4390 /* Create the application unix socket */
4391 apps_sock = lttcomm_create_unix_sock(apps_unix_sock_path);
4392 if (apps_sock < 0) {
4393 ERR("Create unix sock failed: %s", apps_unix_sock_path);
4394 ret = -1;
4395 goto end;
4396 }
4397
4398 /* Set the cloexec flag */
4399 ret = utils_set_fd_cloexec(apps_sock);
4400 if (ret < 0) {
4401 ERR("Unable to set CLOEXEC flag to the app Unix socket (fd: %d). "
4402 "Continuing but note that the consumer daemon will have a "
4403 "reference to this socket on exec()", apps_sock);
4404 }
4405
4406 /* File permission MUST be 666 */
4407 ret = chmod(apps_unix_sock_path,
4408 S_IRUSR | S_IWUSR | S_IRGRP | S_IWGRP | S_IROTH | S_IWOTH);
4409 if (ret < 0) {
4410 ERR("Set file permissions failed: %s", apps_unix_sock_path);
4411 PERROR("chmod");
4412 goto end;
4413 }
4414
4415 DBG3("Session daemon client socket %d and application socket %d created",
4416 client_sock, apps_sock);
4417
4418 end:
4419 umask(old_umask);
4420 return ret;
4421 }
4422
4423 /*
4424 * Check if the global socket is available, and if a daemon is answering at the
4425 * other side. If yes, error is returned.
4426 */
4427 static int check_existing_daemon(void)
4428 {
4429 /* Is there anybody out there ? */
4430 if (lttng_session_daemon_alive()) {
4431 return -EEXIST;
4432 }
4433
4434 return 0;
4435 }
4436
4437 /*
4438 * Set the tracing group gid onto the client socket.
4439 *
4440 * Race window between mkdir and chown is OK because we are going from more
4441 * permissive (root.root) to less permissive (root.tracing).
4442 */
4443 static int set_permissions(char *rundir)
4444 {
4445 int ret;
4446 gid_t gid;
4447
4448 gid = utils_get_group_id(tracing_group_name);
4449
4450 /* Set lttng run dir */
4451 ret = chown(rundir, 0, gid);
4452 if (ret < 0) {
4453 ERR("Unable to set group on %s", rundir);
4454 PERROR("chown");
4455 }
4456
4457 /*
4458 * Ensure all applications and tracing group can search the run
4459 * dir. Allow everyone to read the directory, since it does not
4460 * buy us anything to hide its content.
4461 */
4462 ret = chmod(rundir, S_IRWXU | S_IRGRP | S_IXGRP | S_IROTH | S_IXOTH);
4463 if (ret < 0) {
4464 ERR("Unable to set permissions on %s", rundir);
4465 PERROR("chmod");
4466 }
4467
4468 /* lttng client socket path */
4469 ret = chown(client_unix_sock_path, 0, gid);
4470 if (ret < 0) {
4471 ERR("Unable to set group on %s", client_unix_sock_path);
4472 PERROR("chown");
4473 }
4474
4475 /* kconsumer error socket path */
4476 ret = chown(kconsumer_data.err_unix_sock_path, 0, 0);
4477 if (ret < 0) {
4478 ERR("Unable to set group on %s", kconsumer_data.err_unix_sock_path);
4479 PERROR("chown");
4480 }
4481
4482 /* 64-bit ustconsumer error socket path */
4483 ret = chown(ustconsumer64_data.err_unix_sock_path, 0, 0);
4484 if (ret < 0) {
4485 ERR("Unable to set group on %s", ustconsumer64_data.err_unix_sock_path);
4486 PERROR("chown");
4487 }
4488
4489 /* 32-bit ustconsumer compat32 error socket path */
4490 ret = chown(ustconsumer32_data.err_unix_sock_path, 0, 0);
4491 if (ret < 0) {
4492 ERR("Unable to set group on %s", ustconsumer32_data.err_unix_sock_path);
4493 PERROR("chown");
4494 }
4495
4496 DBG("All permissions are set");
4497
4498 return ret;
4499 }
4500
4501 /*
4502 * Create the lttng run directory needed for all global sockets and pipe.
4503 */
4504 static int create_lttng_rundir(const char *rundir)
4505 {
4506 int ret;
4507
4508 DBG3("Creating LTTng run directory: %s", rundir);
4509
4510 ret = mkdir(rundir, S_IRWXU);
4511 if (ret < 0) {
4512 if (errno != EEXIST) {
4513 ERR("Unable to create %s", rundir);
4514 goto error;
4515 } else {
4516 ret = 0;
4517 }
4518 }
4519
4520 error:
4521 return ret;
4522 }
4523
4524 /*
4525 * Setup sockets and directory needed by the kconsumerd communication with the
4526 * session daemon.
4527 */
4528 static int set_consumer_sockets(struct consumer_data *consumer_data,
4529 const char *rundir)
4530 {
4531 int ret;
4532 char path[PATH_MAX];
4533
4534 switch (consumer_data->type) {
4535 case LTTNG_CONSUMER_KERNEL:
4536 snprintf(path, PATH_MAX, DEFAULT_KCONSUMERD_PATH, rundir);
4537 break;
4538 case LTTNG_CONSUMER64_UST:
4539 snprintf(path, PATH_MAX, DEFAULT_USTCONSUMERD64_PATH, rundir);
4540 break;
4541 case LTTNG_CONSUMER32_UST:
4542 snprintf(path, PATH_MAX, DEFAULT_USTCONSUMERD32_PATH, rundir);
4543 break;
4544 default:
4545 ERR("Consumer type unknown");
4546 ret = -EINVAL;
4547 goto error;
4548 }
4549
4550 DBG2("Creating consumer directory: %s", path);
4551
4552 ret = mkdir(path, S_IRWXU | S_IRGRP | S_IXGRP);
4553 if (ret < 0) {
4554 if (errno != EEXIST) {
4555 PERROR("mkdir");
4556 ERR("Failed to create %s", path);
4557 goto error;
4558 }
4559 ret = -1;
4560 }
4561 if (is_root) {
4562 ret = chown(path, 0, utils_get_group_id(tracing_group_name));
4563 if (ret < 0) {
4564 ERR("Unable to set group on %s", path);
4565 PERROR("chown");
4566 goto error;
4567 }
4568 }
4569
4570 /* Create the kconsumerd error unix socket */
4571 consumer_data->err_sock =
4572 lttcomm_create_unix_sock(consumer_data->err_unix_sock_path);
4573 if (consumer_data->err_sock < 0) {
4574 ERR("Create unix sock failed: %s", consumer_data->err_unix_sock_path);
4575 ret = -1;
4576 goto error;
4577 }
4578
4579 /*
4580 * Set the CLOEXEC flag. Return code is useless because either way, the
4581 * show must go on.
4582 */
4583 ret = utils_set_fd_cloexec(consumer_data->err_sock);
4584 if (ret < 0) {
4585 PERROR("utils_set_fd_cloexec");
4586 /* continue anyway */
4587 }
4588
4589 /* File permission MUST be 660 */
4590 ret = chmod(consumer_data->err_unix_sock_path,
4591 S_IRUSR | S_IWUSR | S_IRGRP | S_IWGRP);
4592 if (ret < 0) {
4593 ERR("Set file permissions failed: %s", consumer_data->err_unix_sock_path);
4594 PERROR("chmod");
4595 goto error;
4596 }
4597
4598 error:
4599 return ret;
4600 }
4601
4602 /*
4603 * Signal handler for the daemon
4604 *
4605 * Simply stop all worker threads, leaving main() return gracefully after
4606 * joining all threads and calling cleanup().
4607 */
4608 static void sighandler(int sig)
4609 {
4610 switch (sig) {
4611 case SIGPIPE:
4612 DBG("SIGPIPE caught");
4613 return;
4614 case SIGINT:
4615 DBG("SIGINT caught");
4616 stop_threads();
4617 break;
4618 case SIGTERM:
4619 DBG("SIGTERM caught");
4620 stop_threads();
4621 break;
4622 case SIGUSR1:
4623 CMM_STORE_SHARED(recv_child_signal, 1);
4624 break;
4625 default:
4626 break;
4627 }
4628 }
4629
4630 /*
4631 * Setup signal handler for :
4632 * SIGINT, SIGTERM, SIGPIPE
4633 */
4634 static int set_signal_handler(void)
4635 {
4636 int ret = 0;
4637 struct sigaction sa;
4638 sigset_t sigset;
4639
4640 if ((ret = sigemptyset(&sigset)) < 0) {
4641 PERROR("sigemptyset");
4642 return ret;
4643 }
4644
4645 sa.sa_handler = sighandler;
4646 sa.sa_mask = sigset;
4647 sa.sa_flags = 0;
4648 if ((ret = sigaction(SIGTERM, &sa, NULL)) < 0) {
4649 PERROR("sigaction");
4650 return ret;
4651 }
4652
4653 if ((ret = sigaction(SIGINT, &sa, NULL)) < 0) {
4654 PERROR("sigaction");
4655 return ret;
4656 }
4657
4658 if ((ret = sigaction(SIGPIPE, &sa, NULL)) < 0) {
4659 PERROR("sigaction");
4660 return ret;
4661 }
4662
4663 if ((ret = sigaction(SIGUSR1, &sa, NULL)) < 0) {
4664 PERROR("sigaction");
4665 return ret;
4666 }
4667
4668 DBG("Signal handler set for SIGTERM, SIGUSR1, SIGPIPE and SIGINT");
4669
4670 return ret;
4671 }
4672
4673 /*
4674 * Set open files limit to unlimited. This daemon can open a large number of
4675 * file descriptors in order to consumer multiple kernel traces.
4676 */
4677 static void set_ulimit(void)
4678 {
4679 int ret;
4680 struct rlimit lim;
4681
4682 /* The kernel does not allowed an infinite limit for open files */
4683 lim.rlim_cur = 65535;
4684 lim.rlim_max = 65535;
4685
4686 ret = setrlimit(RLIMIT_NOFILE, &lim);
4687 if (ret < 0) {
4688 PERROR("failed to set open files limit");
4689 }
4690 }
4691
4692 /*
4693 * Write pidfile using the rundir and opt_pidfile.
4694 */
4695 static void write_pidfile(void)
4696 {
4697 int ret;
4698 char pidfile_path[PATH_MAX];
4699
4700 assert(rundir);
4701
4702 if (opt_pidfile) {
4703 strncpy(pidfile_path, opt_pidfile, sizeof(pidfile_path));
4704 } else {
4705 /* Build pidfile path from rundir and opt_pidfile. */
4706 ret = snprintf(pidfile_path, sizeof(pidfile_path), "%s/"
4707 DEFAULT_LTTNG_SESSIOND_PIDFILE, rundir);
4708 if (ret < 0) {
4709 PERROR("snprintf pidfile path");
4710 goto error;
4711 }
4712 }
4713
4714 /*
4715 * Create pid file in rundir. Return value is of no importance. The
4716 * execution will continue even though we are not able to write the file.
4717 */
4718 (void) utils_create_pid_file(getpid(), pidfile_path);
4719
4720 error:
4721 return;
4722 }
4723
4724 /*
4725 * Write JUL TCP port using the rundir.
4726 */
4727 static void write_julport(void)
4728 {
4729 int ret;
4730 char path[PATH_MAX];
4731
4732 assert(rundir);
4733
4734 ret = snprintf(path, sizeof(path), "%s/"
4735 DEFAULT_LTTNG_SESSIOND_JULPORT_FILE, rundir);
4736 if (ret < 0) {
4737 PERROR("snprintf julport path");
4738 goto error;
4739 }
4740
4741 /*
4742 * Create TCP JUL port file in rundir. Return value is of no importance.
4743 * The execution will continue even though we are not able to write the
4744 * file.
4745 */
4746 (void) utils_create_pid_file(jul_tcp_port, path);
4747
4748 error:
4749 return;
4750 }
4751
4752 /*
4753 * main
4754 */
4755 int main(int argc, char **argv)
4756 {
4757 int ret = 0;
4758 void *status;
4759 const char *home_path, *env_app_timeout;
4760
4761 init_kernel_workarounds();
4762
4763 rcu_register_thread();
4764
4765 if ((ret = set_signal_handler()) < 0) {
4766 goto error;
4767 }
4768
4769 setup_consumerd_path();
4770
4771 page_size = sysconf(_SC_PAGESIZE);
4772 if (page_size < 0) {
4773 PERROR("sysconf _SC_PAGESIZE");
4774 page_size = LONG_MAX;
4775 WARN("Fallback page size to %ld", page_size);
4776 }
4777
4778 /* Parse arguments and load the daemon configuration file */
4779 progname = argv[0];
4780 if ((ret = set_options(argc, argv)) < 0) {
4781 goto error;
4782 }
4783
4784 /* Daemonize */
4785 if (opt_daemon || opt_background) {
4786 int i;
4787
4788 ret = lttng_daemonize(&child_ppid, &recv_child_signal,
4789 !opt_background);
4790 if (ret < 0) {
4791 goto error;
4792 }
4793
4794 /*
4795 * We are in the child. Make sure all other file descriptors are
4796 * closed, in case we are called with more opened file descriptors than
4797 * the standard ones.
4798 */
4799 for (i = 3; i < sysconf(_SC_OPEN_MAX); i++) {
4800 (void) close(i);
4801 }
4802 }
4803
4804 /* Create thread quit pipe */
4805 if ((ret = init_thread_quit_pipe()) < 0) {
4806 goto error;
4807 }
4808
4809 /* Check if daemon is UID = 0 */
4810 is_root = !getuid();
4811
4812 if (is_root) {
4813 rundir = strdup(DEFAULT_LTTNG_RUNDIR);
4814
4815 /* Create global run dir with root access */
4816 ret = create_lttng_rundir(rundir);
4817 if (ret < 0) {
4818 goto error;
4819 }
4820
4821 if (strlen(apps_unix_sock_path) == 0) {
4822 snprintf(apps_unix_sock_path, PATH_MAX,
4823 DEFAULT_GLOBAL_APPS_UNIX_SOCK);
4824 }
4825
4826 if (strlen(client_unix_sock_path) == 0) {
4827 snprintf(client_unix_sock_path, PATH_MAX,
4828 DEFAULT_GLOBAL_CLIENT_UNIX_SOCK);
4829 }
4830
4831 /* Set global SHM for ust */
4832 if (strlen(wait_shm_path) == 0) {
4833 snprintf(wait_shm_path, PATH_MAX,
4834 DEFAULT_GLOBAL_APPS_WAIT_SHM_PATH);
4835 }
4836
4837 if (strlen(health_unix_sock_path) == 0) {
4838 snprintf(health_unix_sock_path, sizeof(health_unix_sock_path),
4839 DEFAULT_GLOBAL_HEALTH_UNIX_SOCK);
4840 }
4841
4842 /* Setup kernel consumerd path */
4843 snprintf(kconsumer_data.err_unix_sock_path, PATH_MAX,
4844 DEFAULT_KCONSUMERD_ERR_SOCK_PATH, rundir);
4845 snprintf(kconsumer_data.cmd_unix_sock_path, PATH_MAX,
4846 DEFAULT_KCONSUMERD_CMD_SOCK_PATH, rundir);
4847
4848 DBG2("Kernel consumer err path: %s",
4849 kconsumer_data.err_unix_sock_path);
4850 DBG2("Kernel consumer cmd path: %s",
4851 kconsumer_data.cmd_unix_sock_path);
4852 } else {
4853 home_path = utils_get_home_dir();
4854 if (home_path == NULL) {
4855 /* TODO: Add --socket PATH option */
4856 ERR("Can't get HOME directory for sockets creation.");
4857 ret = -EPERM;
4858 goto error;
4859 }
4860
4861 /*
4862 * Create rundir from home path. This will create something like
4863 * $HOME/.lttng
4864 */
4865 ret = asprintf(&rundir, DEFAULT_LTTNG_HOME_RUNDIR, home_path);
4866 if (ret < 0) {
4867 ret = -ENOMEM;
4868 goto error;
4869 }
4870
4871 ret = create_lttng_rundir(rundir);
4872 if (ret < 0) {
4873 goto error;
4874 }
4875
4876 if (strlen(apps_unix_sock_path) == 0) {
4877 snprintf(apps_unix_sock_path, PATH_MAX,
4878 DEFAULT_HOME_APPS_UNIX_SOCK, home_path);
4879 }
4880
4881 /* Set the cli tool unix socket path */
4882 if (strlen(client_unix_sock_path) == 0) {
4883 snprintf(client_unix_sock_path, PATH_MAX,
4884 DEFAULT_HOME_CLIENT_UNIX_SOCK, home_path);
4885 }
4886
4887 /* Set global SHM for ust */
4888 if (strlen(wait_shm_path) == 0) {
4889 snprintf(wait_shm_path, PATH_MAX,
4890 DEFAULT_HOME_APPS_WAIT_SHM_PATH, getuid());
4891 }
4892
4893 /* Set health check Unix path */
4894 if (strlen(health_unix_sock_path) == 0) {
4895 snprintf(health_unix_sock_path, sizeof(health_unix_sock_path),
4896 DEFAULT_HOME_HEALTH_UNIX_SOCK, home_path);
4897 }
4898 }
4899
4900 /* Set consumer initial state */
4901 kernel_consumerd_state = CONSUMER_STOPPED;
4902 ust_consumerd_state = CONSUMER_STOPPED;
4903
4904 DBG("Client socket path %s", client_unix_sock_path);
4905 DBG("Application socket path %s", apps_unix_sock_path);
4906 DBG("Application wait path %s", wait_shm_path);
4907 DBG("LTTng run directory path: %s", rundir);
4908
4909 /* 32 bits consumerd path setup */
4910 snprintf(ustconsumer32_data.err_unix_sock_path, PATH_MAX,
4911 DEFAULT_USTCONSUMERD32_ERR_SOCK_PATH, rundir);
4912 snprintf(ustconsumer32_data.cmd_unix_sock_path, PATH_MAX,
4913 DEFAULT_USTCONSUMERD32_CMD_SOCK_PATH, rundir);
4914
4915 DBG2("UST consumer 32 bits err path: %s",
4916 ustconsumer32_data.err_unix_sock_path);
4917 DBG2("UST consumer 32 bits cmd path: %s",
4918 ustconsumer32_data.cmd_unix_sock_path);
4919
4920 /* 64 bits consumerd path setup */
4921 snprintf(ustconsumer64_data.err_unix_sock_path, PATH_MAX,
4922 DEFAULT_USTCONSUMERD64_ERR_SOCK_PATH, rundir);
4923 snprintf(ustconsumer64_data.cmd_unix_sock_path, PATH_MAX,
4924 DEFAULT_USTCONSUMERD64_CMD_SOCK_PATH, rundir);
4925
4926 DBG2("UST consumer 64 bits err path: %s",
4927 ustconsumer64_data.err_unix_sock_path);
4928 DBG2("UST consumer 64 bits cmd path: %s",
4929 ustconsumer64_data.cmd_unix_sock_path);
4930
4931 /*
4932 * See if daemon already exist.
4933 */
4934 if ((ret = check_existing_daemon()) < 0) {
4935 ERR("Already running daemon.\n");
4936 /*
4937 * We do not goto exit because we must not cleanup()
4938 * because a daemon is already running.
4939 */
4940 goto error;
4941 }
4942
4943 /*
4944 * Init UST app hash table. Alloc hash table before this point since
4945 * cleanup() can get called after that point.
4946 */
4947 ust_app_ht_alloc();
4948
4949 /* Initialize JUL domain subsystem. */
4950 if ((ret = jul_init()) < 0) {
4951 /* ENOMEM at this point. */
4952 goto error;
4953 }
4954
4955 /* After this point, we can safely call cleanup() with "goto exit" */
4956
4957 /*
4958 * These actions must be executed as root. We do that *after* setting up
4959 * the sockets path because we MUST make the check for another daemon using
4960 * those paths *before* trying to set the kernel consumer sockets and init
4961 * kernel tracer.
4962 */
4963 if (is_root) {
4964 ret = set_consumer_sockets(&kconsumer_data, rundir);
4965 if (ret < 0) {
4966 goto exit;
4967 }
4968
4969 /* Setup kernel tracer */
4970 if (!opt_no_kernel) {
4971 init_kernel_tracer();
4972 }
4973
4974 /* Set ulimit for open files */
4975 set_ulimit();
4976 }
4977 /* init lttng_fd tracking must be done after set_ulimit. */
4978 lttng_fd_init();
4979
4980 ret = set_consumer_sockets(&ustconsumer64_data, rundir);
4981 if (ret < 0) {
4982 goto exit;
4983 }
4984
4985 ret = set_consumer_sockets(&ustconsumer32_data, rundir);
4986 if (ret < 0) {
4987 goto exit;
4988 }
4989
4990 /* Setup the needed unix socket */
4991 if ((ret = init_daemon_socket()) < 0) {
4992 goto exit;
4993 }
4994
4995 /* Set credentials to socket */
4996 if (is_root && ((ret = set_permissions(rundir)) < 0)) {
4997 goto exit;
4998 }
4999
5000 /* Get parent pid if -S, --sig-parent is specified. */
5001 if (opt_sig_parent) {
5002 ppid = getppid();
5003 }
5004
5005 /* Setup the kernel pipe for waking up the kernel thread */
5006 if (is_root && !opt_no_kernel) {
5007 if ((ret = utils_create_pipe_cloexec(kernel_poll_pipe)) < 0) {
5008 goto exit;
5009 }
5010 }
5011
5012 /* Setup the thread ht_cleanup communication pipe. */
5013 if (utils_create_pipe_cloexec(ht_cleanup_pipe) < 0) {
5014 goto exit;
5015 }
5016
5017 /* Setup the thread apps communication pipe. */
5018 if ((ret = utils_create_pipe_cloexec(apps_cmd_pipe)) < 0) {
5019 goto exit;
5020 }
5021
5022 /* Setup the thread apps notify communication pipe. */
5023 if (utils_create_pipe_cloexec(apps_cmd_notify_pipe) < 0) {
5024 goto exit;
5025 }
5026
5027 /* Initialize global buffer per UID and PID registry. */
5028 buffer_reg_init_uid_registry();
5029 buffer_reg_init_pid_registry();
5030
5031 /* Init UST command queue. */
5032 cds_wfq_init(&ust_cmd_queue.queue);
5033
5034 /*
5035 * Get session list pointer. This pointer MUST NOT be free(). This list is
5036 * statically declared in session.c
5037 */
5038 session_list_ptr = session_get_list();
5039
5040 /* Set up max poll set size */
5041 lttng_poll_set_max_size();
5042
5043 cmd_init();
5044
5045 /* Check for the application socket timeout env variable. */
5046 env_app_timeout = getenv(DEFAULT_APP_SOCKET_TIMEOUT_ENV);
5047 if (env_app_timeout) {
5048 app_socket_timeout = atoi(env_app_timeout);
5049 } else {
5050 app_socket_timeout = DEFAULT_APP_SOCKET_RW_TIMEOUT;
5051 }
5052
5053 write_pidfile();
5054 write_julport();
5055
5056 /* Initialize communication library */
5057 lttcomm_init();
5058 /* This is to get the TCP timeout value. */
5059 lttcomm_inet_init();
5060
5061 /*
5062 * Initialize the health check subsystem. This call should set the
5063 * appropriate time values.
5064 */
5065 health_sessiond = health_app_create(NR_HEALTH_SESSIOND_TYPES);
5066 if (!health_sessiond) {
5067 PERROR("health_app_create error");
5068 goto exit_health_sessiond_cleanup;
5069 }
5070
5071 /* Create thread to clean up RCU hash tables */
5072 ret = pthread_create(&ht_cleanup_thread, NULL,
5073 thread_ht_cleanup, (void *) NULL);
5074 if (ret != 0) {
5075 PERROR("pthread_create ht_cleanup");
5076 goto exit_ht_cleanup;
5077 }
5078
5079 /* Create health-check thread */
5080 ret = pthread_create(&health_thread, NULL,
5081 thread_manage_health, (void *) NULL);
5082 if (ret != 0) {
5083 PERROR("pthread_create health");
5084 goto exit_health;
5085 }
5086
5087 /* Create thread to manage the client socket */
5088 ret = pthread_create(&client_thread, NULL,
5089 thread_manage_clients, (void *) NULL);
5090 if (ret != 0) {
5091 PERROR("pthread_create clients");
5092 goto exit_client;
5093 }
5094
5095 /* Create thread to dispatch registration */
5096 ret = pthread_create(&dispatch_thread, NULL,
5097 thread_dispatch_ust_registration, (void *) NULL);
5098 if (ret != 0) {
5099 PERROR("pthread_create dispatch");
5100 goto exit_dispatch;
5101 }
5102
5103 /* Create thread to manage application registration. */
5104 ret = pthread_create(&reg_apps_thread, NULL,
5105 thread_registration_apps, (void *) NULL);
5106 if (ret != 0) {
5107 PERROR("pthread_create registration");
5108 goto exit_reg_apps;
5109 }
5110
5111 /* Create thread to manage application socket */
5112 ret = pthread_create(&apps_thread, NULL,
5113 thread_manage_apps, (void *) NULL);
5114 if (ret != 0) {
5115 PERROR("pthread_create apps");
5116 goto exit_apps;
5117 }
5118
5119 /* Create thread to manage application notify socket */
5120 ret = pthread_create(&apps_notify_thread, NULL,
5121 ust_thread_manage_notify, (void *) NULL);
5122 if (ret != 0) {
5123 PERROR("pthread_create apps");
5124 goto exit_apps_notify;
5125 }
5126
5127 /* Create JUL registration thread. */
5128 ret = pthread_create(&jul_reg_thread, NULL,
5129 jul_thread_manage_registration, (void *) NULL);
5130 if (ret != 0) {
5131 PERROR("pthread_create apps");
5132 goto exit_jul_reg;
5133 }
5134
5135 /* Don't start this thread if kernel tracing is not requested nor root */
5136 if (is_root && !opt_no_kernel) {
5137 /* Create kernel thread to manage kernel event */
5138 ret = pthread_create(&kernel_thread, NULL,
5139 thread_manage_kernel, (void *) NULL);
5140 if (ret != 0) {
5141 PERROR("pthread_create kernel");
5142 goto exit_kernel;
5143 }
5144
5145 ret = pthread_join(kernel_thread, &status);
5146 if (ret != 0) {
5147 PERROR("pthread_join");
5148 goto error; /* join error, exit without cleanup */
5149 }
5150 }
5151
5152 exit_kernel:
5153 ret = pthread_join(jul_reg_thread, &status);
5154 if (ret != 0) {
5155 PERROR("pthread_join JUL");
5156 goto error; /* join error, exit without cleanup */
5157 }
5158
5159 exit_jul_reg:
5160 ret = pthread_join(apps_notify_thread, &status);
5161 if (ret != 0) {
5162 PERROR("pthread_join apps notify");
5163 goto error; /* join error, exit without cleanup */
5164 }
5165
5166 exit_apps_notify:
5167 ret = pthread_join(apps_thread, &status);
5168 if (ret != 0) {
5169 PERROR("pthread_join apps");
5170 goto error; /* join error, exit without cleanup */
5171 }
5172
5173
5174 exit_apps:
5175 ret = pthread_join(reg_apps_thread, &status);
5176 if (ret != 0) {
5177 PERROR("pthread_join");
5178 goto error; /* join error, exit without cleanup */
5179 }
5180
5181 exit_reg_apps:
5182 ret = pthread_join(dispatch_thread, &status);
5183 if (ret != 0) {
5184 PERROR("pthread_join");
5185 goto error; /* join error, exit without cleanup */
5186 }
5187
5188 exit_dispatch:
5189 ret = pthread_join(client_thread, &status);
5190 if (ret != 0) {
5191 PERROR("pthread_join");
5192 goto error; /* join error, exit without cleanup */
5193 }
5194
5195 ret = join_consumer_thread(&kconsumer_data);
5196 if (ret != 0) {
5197 PERROR("join_consumer");
5198 goto error; /* join error, exit without cleanup */
5199 }
5200
5201 ret = join_consumer_thread(&ustconsumer32_data);
5202 if (ret != 0) {
5203 PERROR("join_consumer ust32");
5204 goto error; /* join error, exit without cleanup */
5205 }
5206
5207 ret = join_consumer_thread(&ustconsumer64_data);
5208 if (ret != 0) {
5209 PERROR("join_consumer ust64");
5210 goto error; /* join error, exit without cleanup */
5211 }
5212
5213 exit_client:
5214 ret = pthread_join(health_thread, &status);
5215 if (ret != 0) {
5216 PERROR("pthread_join health thread");
5217 goto error; /* join error, exit without cleanup */
5218 }
5219
5220 exit_health:
5221 ret = pthread_join(ht_cleanup_thread, &status);
5222 if (ret != 0) {
5223 PERROR("pthread_join ht cleanup thread");
5224 goto error; /* join error, exit without cleanup */
5225 }
5226 exit_ht_cleanup:
5227 health_app_destroy(health_sessiond);
5228 exit_health_sessiond_cleanup:
5229 exit:
5230 /*
5231 * cleanup() is called when no other thread is running.
5232 */
5233 rcu_thread_online();
5234 cleanup();
5235 rcu_thread_offline();
5236 rcu_unregister_thread();
5237 if (!ret) {
5238 exit(EXIT_SUCCESS);
5239 }
5240 error:
5241 exit(EXIT_FAILURE);
5242 }
This page took 0.15714 seconds and 5 git commands to generate.