2 * Copyright (C) 2011 - David Goulet <david.goulet@polymtl.ca>
3 * Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
4 * 2013 - Jérémie Galarneau <jeremie.galarneau@efficios.com>
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License, version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
15 * You should have received a copy of the GNU General Public License along
16 * with this program; if not, write to the Free Software Foundation, Inc.,
17 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
33 #include <sys/mount.h>
34 #include <sys/resource.h>
35 #include <sys/socket.h>
37 #include <sys/types.h>
39 #include <urcu/uatomic.h>
43 #include <common/common.h>
44 #include <common/compat/socket.h>
45 #include <common/defaults.h>
46 #include <common/kernel-consumer/kernel-consumer.h>
47 #include <common/futex.h>
48 #include <common/relayd/relayd.h>
49 #include <common/utils.h>
50 #include <common/daemonize.h>
51 #include <common/config/config.h>
53 #include "lttng-sessiond.h"
54 #include "buffer-registry.h"
61 #include "kernel-consumer.h"
65 #include "ust-consumer.h"
68 #include "health-sessiond.h"
69 #include "testpoint.h"
70 #include "ust-thread.h"
71 #include "agent-thread.h"
73 #include "load-session-thread.h"
76 #define CONSUMERD_FILE "lttng-consumerd"
79 static const char *tracing_group_name
= DEFAULT_TRACING_GROUP
;
80 static int tracing_group_name_override
;
81 static char *opt_pidfile
;
82 static int opt_sig_parent
;
83 static int opt_verbose_consumer
;
84 static int opt_daemon
, opt_background
;
85 static int opt_no_kernel
;
86 static char *opt_load_session_path
;
87 static pid_t ppid
; /* Parent PID for --sig-parent option */
88 static pid_t child_ppid
; /* Internal parent PID use with daemonize. */
90 static int lockfile_fd
= -1;
92 /* Set to 1 when a SIGUSR1 signal is received. */
93 static int recv_child_signal
;
96 * Consumer daemon specific control data. Every value not initialized here is
97 * set to 0 by the static definition.
99 static struct consumer_data kconsumer_data
= {
100 .type
= LTTNG_CONSUMER_KERNEL
,
101 .err_unix_sock_path
= DEFAULT_KCONSUMERD_ERR_SOCK_PATH
,
102 .cmd_unix_sock_path
= DEFAULT_KCONSUMERD_CMD_SOCK_PATH
,
105 .pid_mutex
= PTHREAD_MUTEX_INITIALIZER
,
106 .lock
= PTHREAD_MUTEX_INITIALIZER
,
107 .cond
= PTHREAD_COND_INITIALIZER
,
108 .cond_mutex
= PTHREAD_MUTEX_INITIALIZER
,
110 static struct consumer_data ustconsumer64_data
= {
111 .type
= LTTNG_CONSUMER64_UST
,
112 .err_unix_sock_path
= DEFAULT_USTCONSUMERD64_ERR_SOCK_PATH
,
113 .cmd_unix_sock_path
= DEFAULT_USTCONSUMERD64_CMD_SOCK_PATH
,
116 .pid_mutex
= PTHREAD_MUTEX_INITIALIZER
,
117 .lock
= PTHREAD_MUTEX_INITIALIZER
,
118 .cond
= PTHREAD_COND_INITIALIZER
,
119 .cond_mutex
= PTHREAD_MUTEX_INITIALIZER
,
121 static struct consumer_data ustconsumer32_data
= {
122 .type
= LTTNG_CONSUMER32_UST
,
123 .err_unix_sock_path
= DEFAULT_USTCONSUMERD32_ERR_SOCK_PATH
,
124 .cmd_unix_sock_path
= DEFAULT_USTCONSUMERD32_CMD_SOCK_PATH
,
127 .pid_mutex
= PTHREAD_MUTEX_INITIALIZER
,
128 .lock
= PTHREAD_MUTEX_INITIALIZER
,
129 .cond
= PTHREAD_COND_INITIALIZER
,
130 .cond_mutex
= PTHREAD_MUTEX_INITIALIZER
,
133 /* Command line options */
134 static const struct option long_options
[] = {
135 { "client-sock", 1, 0, 'c' },
136 { "apps-sock", 1, 0, 'a' },
137 { "kconsumerd-cmd-sock", 1, 0, 'C' },
138 { "kconsumerd-err-sock", 1, 0, 'E' },
139 { "ustconsumerd32-cmd-sock", 1, 0, 'G' },
140 { "ustconsumerd32-err-sock", 1, 0, 'H' },
141 { "ustconsumerd64-cmd-sock", 1, 0, 'D' },
142 { "ustconsumerd64-err-sock", 1, 0, 'F' },
143 { "consumerd32-path", 1, 0, 'u' },
144 { "consumerd32-libdir", 1, 0, 'U' },
145 { "consumerd64-path", 1, 0, 't' },
146 { "consumerd64-libdir", 1, 0, 'T' },
147 { "daemonize", 0, 0, 'd' },
148 { "background", 0, 0, 'b' },
149 { "sig-parent", 0, 0, 'S' },
150 { "help", 0, 0, 'h' },
151 { "group", 1, 0, 'g' },
152 { "version", 0, 0, 'V' },
153 { "quiet", 0, 0, 'q' },
154 { "verbose", 0, 0, 'v' },
155 { "verbose-consumer", 0, 0, 'Z' },
156 { "no-kernel", 0, 0, 'N' },
157 { "pidfile", 1, 0, 'p' },
158 { "agent-tcp-port", 1, 0, 'J' },
159 { "config", 1, 0, 'f' },
160 { "load", 1, 0, 'l' },
161 { "kmod-probes", 1, 0, 'P' },
162 { "extra-kmod-probes", 1, 0, 'e' },
166 /* Command line options to ignore from configuration file */
167 static const char *config_ignore_options
[] = { "help", "version", "config" };
169 /* Shared between threads */
170 static int dispatch_thread_exit
;
172 /* Global application Unix socket path */
173 static char apps_unix_sock_path
[PATH_MAX
];
174 /* Global client Unix socket path */
175 static char client_unix_sock_path
[PATH_MAX
];
176 /* global wait shm path for UST */
177 static char wait_shm_path
[PATH_MAX
];
178 /* Global health check unix path */
179 static char health_unix_sock_path
[PATH_MAX
];
181 /* Sockets and FDs */
182 static int client_sock
= -1;
183 static int apps_sock
= -1;
184 int kernel_tracer_fd
= -1;
185 static int kernel_poll_pipe
[2] = { -1, -1 };
188 * Quit pipe for all threads. This permits a single cancellation point
189 * for all threads when receiving an event on the pipe.
191 static int thread_quit_pipe
[2] = { -1, -1 };
192 static int ht_cleanup_quit_pipe
[2] = { -1, -1 };
195 * This pipe is used to inform the thread managing application communication
196 * that a command is queued and ready to be processed.
198 static int apps_cmd_pipe
[2] = { -1, -1 };
200 int apps_cmd_notify_pipe
[2] = { -1, -1 };
202 /* Pthread, Mutexes and Semaphores */
203 static pthread_t apps_thread
;
204 static pthread_t apps_notify_thread
;
205 static pthread_t reg_apps_thread
;
206 static pthread_t client_thread
;
207 static pthread_t kernel_thread
;
208 static pthread_t dispatch_thread
;
209 static pthread_t health_thread
;
210 static pthread_t ht_cleanup_thread
;
211 static pthread_t agent_reg_thread
;
212 static pthread_t load_session_thread
;
215 * UST registration command queue. This queue is tied with a futex and uses a N
216 * wakers / 1 waiter implemented and detailed in futex.c/.h
218 * The thread_registration_apps and thread_dispatch_ust_registration uses this
219 * queue along with the wait/wake scheme. The thread_manage_apps receives down
220 * the line new application socket and monitors it for any I/O error or clean
221 * close that triggers an unregistration of the application.
223 static struct ust_cmd_queue ust_cmd_queue
;
226 * Pointer initialized before thread creation.
228 * This points to the tracing session list containing the session count and a
229 * mutex lock. The lock MUST be taken if you iterate over the list. The lock
230 * MUST NOT be taken if you call a public function in session.c.
232 * The lock is nested inside the structure: session_list_ptr->lock. Please use
233 * session_lock_list and session_unlock_list for lock acquisition.
235 static struct ltt_session_list
*session_list_ptr
;
237 int ust_consumerd64_fd
= -1;
238 int ust_consumerd32_fd
= -1;
240 static const char *consumerd32_bin
= CONFIG_CONSUMERD32_BIN
;
241 static const char *consumerd64_bin
= CONFIG_CONSUMERD64_BIN
;
242 static const char *consumerd32_libdir
= CONFIG_CONSUMERD32_LIBDIR
;
243 static const char *consumerd64_libdir
= CONFIG_CONSUMERD64_LIBDIR
;
244 static int consumerd32_bin_override
;
245 static int consumerd64_bin_override
;
246 static int consumerd32_libdir_override
;
247 static int consumerd64_libdir_override
;
249 static const char *module_proc_lttng
= "/proc/lttng";
252 * Consumer daemon state which is changed when spawning it, killing it or in
253 * case of a fatal error.
255 enum consumerd_state
{
256 CONSUMER_STARTED
= 1,
257 CONSUMER_STOPPED
= 2,
262 * This consumer daemon state is used to validate if a client command will be
263 * able to reach the consumer. If not, the client is informed. For instance,
264 * doing a "lttng start" when the consumer state is set to ERROR will return an
265 * error to the client.
267 * The following example shows a possible race condition of this scheme:
269 * consumer thread error happens
271 * client cmd checks state -> still OK
272 * consumer thread exit, sets error
273 * client cmd try to talk to consumer
276 * However, since the consumer is a different daemon, we have no way of making
277 * sure the command will reach it safely even with this state flag. This is why
278 * we consider that up to the state validation during command processing, the
279 * command is safe. After that, we can not guarantee the correctness of the
280 * client request vis-a-vis the consumer.
282 static enum consumerd_state ust_consumerd_state
;
283 static enum consumerd_state kernel_consumerd_state
;
286 * Socket timeout for receiving and sending in seconds.
288 static int app_socket_timeout
;
290 /* Set in main() with the current page size. */
293 /* Application health monitoring */
294 struct health_app
*health_sessiond
;
296 /* Agent TCP port for registration. Used by the agent thread. */
297 unsigned int agent_tcp_port
= DEFAULT_AGENT_TCP_PORT
;
299 /* Am I root or not. */
300 int is_root
; /* Set to 1 if the daemon is running as root */
302 const char * const config_section_name
= "sessiond";
304 /* Load session thread information to operate. */
305 struct load_session_thread_data
*load_info
;
308 * Whether sessiond is ready for commands/health check requests.
309 * NR_LTTNG_SESSIOND_READY must match the number of calls to
310 * sessiond_notify_ready().
312 #define NR_LTTNG_SESSIOND_READY 3
313 int lttng_sessiond_ready
= NR_LTTNG_SESSIOND_READY
;
315 /* Notify parents that we are ready for cmd and health check */
317 void sessiond_notify_ready(void)
319 if (uatomic_sub_return(<tng_sessiond_ready
, 1) == 0) {
321 * Notify parent pid that we are ready to accept command
322 * for client side. This ppid is the one from the
323 * external process that spawned us.
325 if (opt_sig_parent
) {
330 * Notify the parent of the fork() process that we are
333 if (opt_daemon
|| opt_background
) {
334 kill(child_ppid
, SIGUSR1
);
340 void setup_consumerd_path(void)
342 const char *bin
, *libdir
;
345 * Allow INSTALL_BIN_PATH to be used as a target path for the
346 * native architecture size consumer if CONFIG_CONSUMER*_PATH
347 * has not been defined.
349 #if (CAA_BITS_PER_LONG == 32)
350 if (!consumerd32_bin
[0]) {
351 consumerd32_bin
= INSTALL_BIN_PATH
"/" CONSUMERD_FILE
;
353 if (!consumerd32_libdir
[0]) {
354 consumerd32_libdir
= INSTALL_LIB_PATH
;
356 #elif (CAA_BITS_PER_LONG == 64)
357 if (!consumerd64_bin
[0]) {
358 consumerd64_bin
= INSTALL_BIN_PATH
"/" CONSUMERD_FILE
;
360 if (!consumerd64_libdir
[0]) {
361 consumerd64_libdir
= INSTALL_LIB_PATH
;
364 #error "Unknown bitness"
368 * runtime env. var. overrides the build default.
370 bin
= getenv("LTTNG_CONSUMERD32_BIN");
372 consumerd32_bin
= bin
;
374 bin
= getenv("LTTNG_CONSUMERD64_BIN");
376 consumerd64_bin
= bin
;
378 libdir
= getenv("LTTNG_CONSUMERD32_LIBDIR");
380 consumerd32_libdir
= libdir
;
382 libdir
= getenv("LTTNG_CONSUMERD64_LIBDIR");
384 consumerd64_libdir
= libdir
;
389 int __sessiond_set_thread_pollset(struct lttng_poll_event
*events
, size_t size
,
396 ret
= lttng_poll_create(events
, size
, LTTNG_CLOEXEC
);
402 ret
= lttng_poll_add(events
, a_pipe
[0], LPOLLIN
| LPOLLERR
);
414 * Create a poll set with O_CLOEXEC and add the thread quit pipe to the set.
416 int sessiond_set_thread_pollset(struct lttng_poll_event
*events
, size_t size
)
418 return __sessiond_set_thread_pollset(events
, size
, thread_quit_pipe
);
422 * Create a poll set with O_CLOEXEC and add the thread quit pipe to the set.
424 int sessiond_set_ht_cleanup_thread_pollset(struct lttng_poll_event
*events
,
427 return __sessiond_set_thread_pollset(events
, size
,
428 ht_cleanup_quit_pipe
);
432 int __sessiond_check_thread_quit_pipe(int fd
, uint32_t events
, int a_pipe
)
434 if (fd
== a_pipe
&& (events
& LPOLLIN
)) {
441 * Check if the thread quit pipe was triggered.
443 * Return 1 if it was triggered else 0;
445 int sessiond_check_thread_quit_pipe(int fd
, uint32_t events
)
447 return __sessiond_check_thread_quit_pipe(fd
, events
,
448 thread_quit_pipe
[0]);
452 * Check if the ht_cleanup thread quit pipe was triggered.
454 * Return 1 if it was triggered else 0;
456 int sessiond_check_ht_cleanup_quit(int fd
, uint32_t events
)
458 return __sessiond_check_thread_quit_pipe(fd
, events
,
459 ht_cleanup_quit_pipe
[0]);
463 * Init thread quit pipe.
465 * Return -1 on error or 0 if all pipes are created.
467 static int __init_thread_quit_pipe(int *a_pipe
)
473 PERROR("thread quit pipe");
477 for (i
= 0; i
< 2; i
++) {
478 ret
= fcntl(a_pipe
[i
], F_SETFD
, FD_CLOEXEC
);
489 static int init_thread_quit_pipe(void)
491 return __init_thread_quit_pipe(thread_quit_pipe
);
494 static int init_ht_cleanup_quit_pipe(void)
496 return __init_thread_quit_pipe(ht_cleanup_quit_pipe
);
500 * Stop all threads by closing the thread quit pipe.
502 static void stop_threads(void)
506 /* Stopping all threads */
507 DBG("Terminating all threads");
508 ret
= notify_thread_pipe(thread_quit_pipe
[1]);
510 ERR("write error on thread quit pipe");
513 /* Dispatch thread */
514 CMM_STORE_SHARED(dispatch_thread_exit
, 1);
515 futex_nto1_wake(&ust_cmd_queue
.futex
);
519 * Close every consumer sockets.
521 static void close_consumer_sockets(void)
525 if (kconsumer_data
.err_sock
>= 0) {
526 ret
= close(kconsumer_data
.err_sock
);
528 PERROR("kernel consumer err_sock close");
531 if (ustconsumer32_data
.err_sock
>= 0) {
532 ret
= close(ustconsumer32_data
.err_sock
);
534 PERROR("UST consumerd32 err_sock close");
537 if (ustconsumer64_data
.err_sock
>= 0) {
538 ret
= close(ustconsumer64_data
.err_sock
);
540 PERROR("UST consumerd64 err_sock close");
543 if (kconsumer_data
.cmd_sock
>= 0) {
544 ret
= close(kconsumer_data
.cmd_sock
);
546 PERROR("kernel consumer cmd_sock close");
549 if (ustconsumer32_data
.cmd_sock
>= 0) {
550 ret
= close(ustconsumer32_data
.cmd_sock
);
552 PERROR("UST consumerd32 cmd_sock close");
555 if (ustconsumer64_data
.cmd_sock
>= 0) {
556 ret
= close(ustconsumer64_data
.cmd_sock
);
558 PERROR("UST consumerd64 cmd_sock close");
564 * Generate the full lock file path using the rundir.
566 * Return the snprintf() return value thus a negative value is an error.
568 static int generate_lock_file_path(char *path
, size_t len
)
575 /* Build lockfile path from rundir. */
576 ret
= snprintf(path
, len
, "%s/" DEFAULT_LTTNG_SESSIOND_LOCKFILE
, rundir
);
578 PERROR("snprintf lockfile path");
585 * Cleanup the session daemon's data structures.
587 static void sessiond_cleanup(void)
590 struct ltt_session
*sess
, *stmp
;
593 DBG("Cleanup sessiond");
596 * Close the thread quit pipe. It has already done its job,
597 * since we are now called.
599 utils_close_pipe(thread_quit_pipe
);
602 * If opt_pidfile is undefined, the default file will be wiped when
603 * removing the rundir.
606 ret
= remove(opt_pidfile
);
608 PERROR("remove pidfile %s", opt_pidfile
);
612 DBG("Removing sessiond and consumerd content of directory %s", rundir
);
615 snprintf(path
, PATH_MAX
,
617 rundir
, DEFAULT_LTTNG_SESSIOND_PIDFILE
);
618 DBG("Removing %s", path
);
621 snprintf(path
, PATH_MAX
, "%s/%s", rundir
,
622 DEFAULT_LTTNG_SESSIOND_AGENTPORT_FILE
);
623 DBG("Removing %s", path
);
627 snprintf(path
, PATH_MAX
,
628 DEFAULT_KCONSUMERD_ERR_SOCK_PATH
,
630 DBG("Removing %s", path
);
633 snprintf(path
, PATH_MAX
,
634 DEFAULT_KCONSUMERD_PATH
,
636 DBG("Removing directory %s", path
);
639 /* ust consumerd 32 */
640 snprintf(path
, PATH_MAX
,
641 DEFAULT_USTCONSUMERD32_ERR_SOCK_PATH
,
643 DBG("Removing %s", path
);
646 snprintf(path
, PATH_MAX
,
647 DEFAULT_USTCONSUMERD32_PATH
,
649 DBG("Removing directory %s", path
);
652 /* ust consumerd 64 */
653 snprintf(path
, PATH_MAX
,
654 DEFAULT_USTCONSUMERD64_ERR_SOCK_PATH
,
656 DBG("Removing %s", path
);
659 snprintf(path
, PATH_MAX
,
660 DEFAULT_USTCONSUMERD64_PATH
,
662 DBG("Removing directory %s", path
);
665 DBG("Cleaning up all sessions");
667 /* Destroy session list mutex */
668 if (session_list_ptr
!= NULL
) {
669 pthread_mutex_destroy(&session_list_ptr
->lock
);
671 /* Cleanup ALL session */
672 cds_list_for_each_entry_safe(sess
, stmp
,
673 &session_list_ptr
->head
, list
) {
674 cmd_destroy_session(sess
, kernel_poll_pipe
[1]);
678 DBG("Closing all UST sockets");
679 ust_app_clean_list();
680 buffer_reg_destroy_registries();
682 if (is_root
&& !opt_no_kernel
) {
683 DBG2("Closing kernel fd");
684 if (kernel_tracer_fd
>= 0) {
685 ret
= close(kernel_tracer_fd
);
690 DBG("Unloading kernel modules");
691 modprobe_remove_lttng_all();
695 close_consumer_sockets();
698 load_session_destroy_data(load_info
);
703 * Cleanup lock file by deleting it and finaly closing it which will
704 * release the file system lock.
706 if (lockfile_fd
>= 0) {
707 char lockfile_path
[PATH_MAX
];
709 ret
= generate_lock_file_path(lockfile_path
,
710 sizeof(lockfile_path
));
712 ret
= remove(lockfile_path
);
714 PERROR("remove lock file");
716 ret
= close(lockfile_fd
);
718 PERROR("close lock file");
724 * We do NOT rmdir rundir because there are other processes
725 * using it, for instance lttng-relayd, which can start in
726 * parallel with this teardown.
733 * Cleanup the daemon's option data structures.
735 static void sessiond_cleanup_options(void)
737 DBG("Cleaning up options");
740 * If the override option is set, the pointer points to a *non* const
741 * thus freeing it even though the variable type is set to const.
743 if (tracing_group_name_override
) {
744 free((void *) tracing_group_name
);
746 if (consumerd32_bin_override
) {
747 free((void *) consumerd32_bin
);
749 if (consumerd64_bin_override
) {
750 free((void *) consumerd64_bin
);
752 if (consumerd32_libdir_override
) {
753 free((void *) consumerd32_libdir
);
755 if (consumerd64_libdir_override
) {
756 free((void *) consumerd64_libdir
);
760 free(opt_load_session_path
);
761 free(kmod_probes_list
);
762 free(kmod_extra_probes_list
);
765 DBG("%c[%d;%dm*** assert failed :-) *** ==> %c[%dm%c[%d;%dm"
766 "Matthew, BEET driven development works!%c[%dm",
767 27, 1, 31, 27, 0, 27, 1, 33, 27, 0);
772 * Send data on a unix socket using the liblttsessiondcomm API.
774 * Return lttcomm error code.
776 static int send_unix_sock(int sock
, void *buf
, size_t len
)
778 /* Check valid length */
783 return lttcomm_send_unix_sock(sock
, buf
, len
);
787 * Free memory of a command context structure.
789 static void clean_command_ctx(struct command_ctx
**cmd_ctx
)
791 DBG("Clean command context structure");
793 if ((*cmd_ctx
)->llm
) {
794 free((*cmd_ctx
)->llm
);
796 if ((*cmd_ctx
)->lsm
) {
797 free((*cmd_ctx
)->lsm
);
805 * Notify UST applications using the shm mmap futex.
807 static int notify_ust_apps(int active
)
811 DBG("Notifying applications of session daemon state: %d", active
);
813 /* See shm.c for this call implying mmap, shm and futex calls */
814 wait_shm_mmap
= shm_ust_get_mmap(wait_shm_path
, is_root
);
815 if (wait_shm_mmap
== NULL
) {
819 /* Wake waiting process */
820 futex_wait_update((int32_t *) wait_shm_mmap
, active
);
822 /* Apps notified successfully */
830 * Setup the outgoing data buffer for the response (llm) by allocating the
831 * right amount of memory and copying the original information from the lsm
834 * Return total size of the buffer pointed by buf.
836 static int setup_lttng_msg(struct command_ctx
*cmd_ctx
, size_t size
)
842 cmd_ctx
->llm
= zmalloc(sizeof(struct lttcomm_lttng_msg
) + buf_size
);
843 if (cmd_ctx
->llm
== NULL
) {
849 /* Copy common data */
850 cmd_ctx
->llm
->cmd_type
= cmd_ctx
->lsm
->cmd_type
;
851 cmd_ctx
->llm
->pid
= cmd_ctx
->lsm
->domain
.attr
.pid
;
853 cmd_ctx
->llm
->data_size
= size
;
854 cmd_ctx
->lttng_msg_size
= sizeof(struct lttcomm_lttng_msg
) + buf_size
;
863 * Update the kernel poll set of all channel fd available over all tracing
864 * session. Add the wakeup pipe at the end of the set.
866 static int update_kernel_poll(struct lttng_poll_event
*events
)
869 struct ltt_session
*session
;
870 struct ltt_kernel_channel
*channel
;
872 DBG("Updating kernel poll set");
875 cds_list_for_each_entry(session
, &session_list_ptr
->head
, list
) {
876 session_lock(session
);
877 if (session
->kernel_session
== NULL
) {
878 session_unlock(session
);
882 cds_list_for_each_entry(channel
,
883 &session
->kernel_session
->channel_list
.head
, list
) {
884 /* Add channel fd to the kernel poll set */
885 ret
= lttng_poll_add(events
, channel
->fd
, LPOLLIN
| LPOLLRDNORM
);
887 session_unlock(session
);
890 DBG("Channel fd %d added to kernel set", channel
->fd
);
892 session_unlock(session
);
894 session_unlock_list();
899 session_unlock_list();
904 * Find the channel fd from 'fd' over all tracing session. When found, check
905 * for new channel stream and send those stream fds to the kernel consumer.
907 * Useful for CPU hotplug feature.
909 static int update_kernel_stream(struct consumer_data
*consumer_data
, int fd
)
912 struct ltt_session
*session
;
913 struct ltt_kernel_session
*ksess
;
914 struct ltt_kernel_channel
*channel
;
916 DBG("Updating kernel streams for channel fd %d", fd
);
919 cds_list_for_each_entry(session
, &session_list_ptr
->head
, list
) {
920 session_lock(session
);
921 if (session
->kernel_session
== NULL
) {
922 session_unlock(session
);
925 ksess
= session
->kernel_session
;
927 cds_list_for_each_entry(channel
,
928 &ksess
->channel_list
.head
, list
) {
929 struct lttng_ht_iter iter
;
930 struct consumer_socket
*socket
;
932 if (channel
->fd
!= fd
) {
935 DBG("Channel found, updating kernel streams");
936 ret
= kernel_open_channel_stream(channel
);
940 /* Update the stream global counter */
941 ksess
->stream_count_global
+= ret
;
944 * Have we already sent fds to the consumer? If yes, it
945 * means that tracing is started so it is safe to send
946 * our updated stream fds.
948 if (ksess
->consumer_fds_sent
!= 1
949 || ksess
->consumer
== NULL
) {
955 cds_lfht_for_each_entry(ksess
->consumer
->socks
->ht
,
956 &iter
.iter
, socket
, node
.node
) {
957 pthread_mutex_lock(socket
->lock
);
958 ret
= kernel_consumer_send_channel_stream(socket
,
960 session
->output_traces
? 1 : 0);
961 pthread_mutex_unlock(socket
->lock
);
969 session_unlock(session
);
971 session_unlock_list();
975 session_unlock(session
);
976 session_unlock_list();
981 * For each tracing session, update newly registered apps. The session list
982 * lock MUST be acquired before calling this.
984 static void update_ust_app(int app_sock
)
986 struct ltt_session
*sess
, *stmp
;
988 /* Consumer is in an ERROR state. Stop any application update. */
989 if (uatomic_read(&ust_consumerd_state
) == CONSUMER_ERROR
) {
990 /* Stop the update process since the consumer is dead. */
994 /* For all tracing session(s) */
995 cds_list_for_each_entry_safe(sess
, stmp
, &session_list_ptr
->head
, list
) {
997 if (sess
->ust_session
) {
998 ust_app_global_update(sess
->ust_session
, app_sock
);
1000 session_unlock(sess
);
1005 * This thread manage event coming from the kernel.
1007 * Features supported in this thread:
1010 static void *thread_manage_kernel(void *data
)
1012 int ret
, i
, pollfd
, update_poll_flag
= 1, err
= -1;
1013 uint32_t revents
, nb_fd
;
1015 struct lttng_poll_event events
;
1017 DBG("[thread] Thread manage kernel started");
1019 health_register(health_sessiond
, HEALTH_SESSIOND_TYPE_KERNEL
);
1022 * This first step of the while is to clean this structure which could free
1023 * non NULL pointers so initialize it before the loop.
1025 lttng_poll_init(&events
);
1027 if (testpoint(sessiond_thread_manage_kernel
)) {
1028 goto error_testpoint
;
1031 health_code_update();
1033 if (testpoint(sessiond_thread_manage_kernel_before_loop
)) {
1034 goto error_testpoint
;
1038 health_code_update();
1040 if (update_poll_flag
== 1) {
1041 /* Clean events object. We are about to populate it again. */
1042 lttng_poll_clean(&events
);
1044 ret
= sessiond_set_thread_pollset(&events
, 2);
1046 goto error_poll_create
;
1049 ret
= lttng_poll_add(&events
, kernel_poll_pipe
[0], LPOLLIN
);
1054 /* This will add the available kernel channel if any. */
1055 ret
= update_kernel_poll(&events
);
1059 update_poll_flag
= 0;
1062 DBG("Thread kernel polling on %d fds", LTTNG_POLL_GETNB(&events
));
1064 /* Poll infinite value of time */
1066 health_poll_entry();
1067 ret
= lttng_poll_wait(&events
, -1);
1071 * Restart interrupted system call.
1073 if (errno
== EINTR
) {
1077 } else if (ret
== 0) {
1078 /* Should not happen since timeout is infinite */
1079 ERR("Return value of poll is 0 with an infinite timeout.\n"
1080 "This should not have happened! Continuing...");
1086 for (i
= 0; i
< nb_fd
; i
++) {
1087 /* Fetch once the poll data */
1088 revents
= LTTNG_POLL_GETEV(&events
, i
);
1089 pollfd
= LTTNG_POLL_GETFD(&events
, i
);
1091 health_code_update();
1094 /* No activity for this FD (poll implementation). */
1098 /* Thread quit pipe has been closed. Killing thread. */
1099 ret
= sessiond_check_thread_quit_pipe(pollfd
, revents
);
1105 /* Check for data on kernel pipe */
1106 if (pollfd
== kernel_poll_pipe
[0] && (revents
& LPOLLIN
)) {
1107 (void) lttng_read(kernel_poll_pipe
[0],
1110 * Ret value is useless here, if this pipe gets any actions an
1111 * update is required anyway.
1113 update_poll_flag
= 1;
1117 * New CPU detected by the kernel. Adding kernel stream to
1118 * kernel session and updating the kernel consumer
1120 if (revents
& LPOLLIN
) {
1121 ret
= update_kernel_stream(&kconsumer_data
, pollfd
);
1127 * TODO: We might want to handle the LPOLLERR | LPOLLHUP
1128 * and unregister kernel stream at this point.
1137 lttng_poll_clean(&events
);
1140 utils_close_pipe(kernel_poll_pipe
);
1141 kernel_poll_pipe
[0] = kernel_poll_pipe
[1] = -1;
1144 ERR("Health error occurred in %s", __func__
);
1145 WARN("Kernel thread died unexpectedly. "
1146 "Kernel tracing can continue but CPU hotplug is disabled.");
1148 health_unregister(health_sessiond
);
1149 DBG("Kernel thread dying");
1154 * Signal pthread condition of the consumer data that the thread.
1156 static void signal_consumer_condition(struct consumer_data
*data
, int state
)
1158 pthread_mutex_lock(&data
->cond_mutex
);
1161 * The state is set before signaling. It can be any value, it's the waiter
1162 * job to correctly interpret this condition variable associated to the
1163 * consumer pthread_cond.
1165 * A value of 0 means that the corresponding thread of the consumer data
1166 * was not started. 1 indicates that the thread has started and is ready
1167 * for action. A negative value means that there was an error during the
1170 data
->consumer_thread_is_ready
= state
;
1171 (void) pthread_cond_signal(&data
->cond
);
1173 pthread_mutex_unlock(&data
->cond_mutex
);
1177 * This thread manage the consumer error sent back to the session daemon.
1179 static void *thread_manage_consumer(void *data
)
1181 int sock
= -1, i
, ret
, pollfd
, err
= -1, should_quit
= 0;
1182 uint32_t revents
, nb_fd
;
1183 enum lttcomm_return_code code
;
1184 struct lttng_poll_event events
;
1185 struct consumer_data
*consumer_data
= data
;
1187 DBG("[thread] Manage consumer started");
1189 health_register(health_sessiond
, HEALTH_SESSIOND_TYPE_CONSUMER
);
1191 health_code_update();
1194 * Pass 3 as size here for the thread quit pipe, consumerd_err_sock and the
1195 * metadata_sock. Nothing more will be added to this poll set.
1197 ret
= sessiond_set_thread_pollset(&events
, 3);
1203 * The error socket here is already in a listening state which was done
1204 * just before spawning this thread to avoid a race between the consumer
1205 * daemon exec trying to connect and the listen() call.
1207 ret
= lttng_poll_add(&events
, consumer_data
->err_sock
, LPOLLIN
| LPOLLRDHUP
);
1212 health_code_update();
1214 /* Infinite blocking call, waiting for transmission */
1216 health_poll_entry();
1218 if (testpoint(sessiond_thread_manage_consumer
)) {
1222 ret
= lttng_poll_wait(&events
, -1);
1226 * Restart interrupted system call.
1228 if (errno
== EINTR
) {
1236 for (i
= 0; i
< nb_fd
; i
++) {
1237 /* Fetch once the poll data */
1238 revents
= LTTNG_POLL_GETEV(&events
, i
);
1239 pollfd
= LTTNG_POLL_GETFD(&events
, i
);
1241 health_code_update();
1244 /* No activity for this FD (poll implementation). */
1248 /* Thread quit pipe has been closed. Killing thread. */
1249 ret
= sessiond_check_thread_quit_pipe(pollfd
, revents
);
1255 /* Event on the registration socket */
1256 if (pollfd
== consumer_data
->err_sock
) {
1257 if (revents
& (LPOLLERR
| LPOLLHUP
| LPOLLRDHUP
)) {
1258 ERR("consumer err socket poll error");
1264 sock
= lttcomm_accept_unix_sock(consumer_data
->err_sock
);
1270 * Set the CLOEXEC flag. Return code is useless because either way, the
1273 (void) utils_set_fd_cloexec(sock
);
1275 health_code_update();
1277 DBG2("Receiving code from consumer err_sock");
1279 /* Getting status code from kconsumerd */
1280 ret
= lttcomm_recv_unix_sock(sock
, &code
,
1281 sizeof(enum lttcomm_return_code
));
1286 health_code_update();
1287 if (code
== LTTCOMM_CONSUMERD_COMMAND_SOCK_READY
) {
1288 /* Connect both socket, command and metadata. */
1289 consumer_data
->cmd_sock
=
1290 lttcomm_connect_unix_sock(consumer_data
->cmd_unix_sock_path
);
1291 consumer_data
->metadata_fd
=
1292 lttcomm_connect_unix_sock(consumer_data
->cmd_unix_sock_path
);
1293 if (consumer_data
->cmd_sock
< 0
1294 || consumer_data
->metadata_fd
< 0) {
1295 PERROR("consumer connect cmd socket");
1296 /* On error, signal condition and quit. */
1297 signal_consumer_condition(consumer_data
, -1);
1300 consumer_data
->metadata_sock
.fd_ptr
= &consumer_data
->metadata_fd
;
1301 /* Create metadata socket lock. */
1302 consumer_data
->metadata_sock
.lock
= zmalloc(sizeof(pthread_mutex_t
));
1303 if (consumer_data
->metadata_sock
.lock
== NULL
) {
1304 PERROR("zmalloc pthread mutex");
1308 pthread_mutex_init(consumer_data
->metadata_sock
.lock
, NULL
);
1310 signal_consumer_condition(consumer_data
, 1);
1311 DBG("Consumer command socket ready (fd: %d", consumer_data
->cmd_sock
);
1312 DBG("Consumer metadata socket ready (fd: %d)",
1313 consumer_data
->metadata_fd
);
1315 ERR("consumer error when waiting for SOCK_READY : %s",
1316 lttcomm_get_readable_code(-code
));
1320 /* Remove the consumerd error sock since we've established a connexion */
1321 ret
= lttng_poll_del(&events
, consumer_data
->err_sock
);
1326 /* Add new accepted error socket. */
1327 ret
= lttng_poll_add(&events
, sock
, LPOLLIN
| LPOLLRDHUP
);
1332 /* Add metadata socket that is successfully connected. */
1333 ret
= lttng_poll_add(&events
, consumer_data
->metadata_fd
,
1334 LPOLLIN
| LPOLLRDHUP
);
1339 health_code_update();
1341 /* Infinite blocking call, waiting for transmission */
1344 health_code_update();
1346 /* Exit the thread because the thread quit pipe has been triggered. */
1348 /* Not a health error. */
1353 health_poll_entry();
1354 ret
= lttng_poll_wait(&events
, -1);
1358 * Restart interrupted system call.
1360 if (errno
== EINTR
) {
1368 for (i
= 0; i
< nb_fd
; i
++) {
1369 /* Fetch once the poll data */
1370 revents
= LTTNG_POLL_GETEV(&events
, i
);
1371 pollfd
= LTTNG_POLL_GETFD(&events
, i
);
1373 health_code_update();
1376 /* No activity for this FD (poll implementation). */
1381 * Thread quit pipe has been triggered, flag that we should stop
1382 * but continue the current loop to handle potential data from
1385 should_quit
= sessiond_check_thread_quit_pipe(pollfd
, revents
);
1387 if (pollfd
== sock
) {
1388 /* Event on the consumerd socket */
1389 if (revents
& (LPOLLERR
| LPOLLHUP
| LPOLLRDHUP
)) {
1390 ERR("consumer err socket second poll error");
1393 health_code_update();
1394 /* Wait for any kconsumerd error */
1395 ret
= lttcomm_recv_unix_sock(sock
, &code
,
1396 sizeof(enum lttcomm_return_code
));
1398 ERR("consumer closed the command socket");
1402 ERR("consumer return code : %s",
1403 lttcomm_get_readable_code(-code
));
1406 } else if (pollfd
== consumer_data
->metadata_fd
) {
1407 /* UST metadata requests */
1408 ret
= ust_consumer_metadata_request(
1409 &consumer_data
->metadata_sock
);
1411 ERR("Handling metadata request");
1415 /* No need for an else branch all FDs are tested prior. */
1417 health_code_update();
1423 * We lock here because we are about to close the sockets and some other
1424 * thread might be using them so get exclusive access which will abort all
1425 * other consumer command by other threads.
1427 pthread_mutex_lock(&consumer_data
->lock
);
1429 /* Immediately set the consumerd state to stopped */
1430 if (consumer_data
->type
== LTTNG_CONSUMER_KERNEL
) {
1431 uatomic_set(&kernel_consumerd_state
, CONSUMER_ERROR
);
1432 } else if (consumer_data
->type
== LTTNG_CONSUMER64_UST
||
1433 consumer_data
->type
== LTTNG_CONSUMER32_UST
) {
1434 uatomic_set(&ust_consumerd_state
, CONSUMER_ERROR
);
1436 /* Code flow error... */
1440 if (consumer_data
->err_sock
>= 0) {
1441 ret
= close(consumer_data
->err_sock
);
1445 consumer_data
->err_sock
= -1;
1447 if (consumer_data
->cmd_sock
>= 0) {
1448 ret
= close(consumer_data
->cmd_sock
);
1452 consumer_data
->cmd_sock
= -1;
1454 if (consumer_data
->metadata_sock
.fd_ptr
&&
1455 *consumer_data
->metadata_sock
.fd_ptr
>= 0) {
1456 ret
= close(*consumer_data
->metadata_sock
.fd_ptr
);
1468 unlink(consumer_data
->err_unix_sock_path
);
1469 unlink(consumer_data
->cmd_unix_sock_path
);
1470 consumer_data
->pid
= 0;
1471 pthread_mutex_unlock(&consumer_data
->lock
);
1473 /* Cleanup metadata socket mutex. */
1474 if (consumer_data
->metadata_sock
.lock
) {
1475 pthread_mutex_destroy(consumer_data
->metadata_sock
.lock
);
1476 free(consumer_data
->metadata_sock
.lock
);
1478 lttng_poll_clean(&events
);
1482 ERR("Health error occurred in %s", __func__
);
1484 health_unregister(health_sessiond
);
1485 DBG("consumer thread cleanup completed");
1491 * This thread manage application communication.
1493 static void *thread_manage_apps(void *data
)
1495 int i
, ret
, pollfd
, err
= -1;
1497 uint32_t revents
, nb_fd
;
1498 struct lttng_poll_event events
;
1500 DBG("[thread] Manage application started");
1502 rcu_register_thread();
1503 rcu_thread_online();
1505 health_register(health_sessiond
, HEALTH_SESSIOND_TYPE_APP_MANAGE
);
1507 if (testpoint(sessiond_thread_manage_apps
)) {
1508 goto error_testpoint
;
1511 health_code_update();
1513 ret
= sessiond_set_thread_pollset(&events
, 2);
1515 goto error_poll_create
;
1518 ret
= lttng_poll_add(&events
, apps_cmd_pipe
[0], LPOLLIN
| LPOLLRDHUP
);
1523 if (testpoint(sessiond_thread_manage_apps_before_loop
)) {
1527 health_code_update();
1530 DBG("Apps thread polling on %d fds", LTTNG_POLL_GETNB(&events
));
1532 /* Inifinite blocking call, waiting for transmission */
1534 health_poll_entry();
1535 ret
= lttng_poll_wait(&events
, -1);
1539 * Restart interrupted system call.
1541 if (errno
== EINTR
) {
1549 for (i
= 0; i
< nb_fd
; i
++) {
1550 /* Fetch once the poll data */
1551 revents
= LTTNG_POLL_GETEV(&events
, i
);
1552 pollfd
= LTTNG_POLL_GETFD(&events
, i
);
1554 health_code_update();
1557 /* No activity for this FD (poll implementation). */
1561 /* Thread quit pipe has been closed. Killing thread. */
1562 ret
= sessiond_check_thread_quit_pipe(pollfd
, revents
);
1568 /* Inspect the apps cmd pipe */
1569 if (pollfd
== apps_cmd_pipe
[0]) {
1570 if (revents
& (LPOLLERR
| LPOLLHUP
| LPOLLRDHUP
)) {
1571 ERR("Apps command pipe error");
1573 } else if (revents
& LPOLLIN
) {
1577 size_ret
= lttng_read(apps_cmd_pipe
[0], &sock
, sizeof(sock
));
1578 if (size_ret
< sizeof(sock
)) {
1579 PERROR("read apps cmd pipe");
1583 health_code_update();
1586 * We only monitor the error events of the socket. This
1587 * thread does not handle any incoming data from UST
1590 ret
= lttng_poll_add(&events
, sock
,
1591 LPOLLERR
| LPOLLHUP
| LPOLLRDHUP
);
1596 DBG("Apps with sock %d added to poll set", sock
);
1600 * At this point, we know that a registered application made
1601 * the event at poll_wait.
1603 if (revents
& (LPOLLERR
| LPOLLHUP
| LPOLLRDHUP
)) {
1604 /* Removing from the poll set */
1605 ret
= lttng_poll_del(&events
, pollfd
);
1610 /* Socket closed on remote end. */
1611 ust_app_unregister(pollfd
);
1615 health_code_update();
1621 lttng_poll_clean(&events
);
1624 utils_close_pipe(apps_cmd_pipe
);
1625 apps_cmd_pipe
[0] = apps_cmd_pipe
[1] = -1;
1628 * We don't clean the UST app hash table here since already registered
1629 * applications can still be controlled so let them be until the session
1630 * daemon dies or the applications stop.
1635 ERR("Health error occurred in %s", __func__
);
1637 health_unregister(health_sessiond
);
1638 DBG("Application communication apps thread cleanup complete");
1639 rcu_thread_offline();
1640 rcu_unregister_thread();
1645 * Send a socket to a thread This is called from the dispatch UST registration
1646 * thread once all sockets are set for the application.
1648 * The sock value can be invalid, we don't really care, the thread will handle
1649 * it and make the necessary cleanup if so.
1651 * On success, return 0 else a negative value being the errno message of the
1654 static int send_socket_to_thread(int fd
, int sock
)
1659 * It's possible that the FD is set as invalid with -1 concurrently just
1660 * before calling this function being a shutdown state of the thread.
1667 ret
= lttng_write(fd
, &sock
, sizeof(sock
));
1668 if (ret
< sizeof(sock
)) {
1669 PERROR("write apps pipe %d", fd
);
1676 /* All good. Don't send back the write positive ret value. */
1683 * Sanitize the wait queue of the dispatch registration thread meaning removing
1684 * invalid nodes from it. This is to avoid memory leaks for the case the UST
1685 * notify socket is never received.
1687 static void sanitize_wait_queue(struct ust_reg_wait_queue
*wait_queue
)
1689 int ret
, nb_fd
= 0, i
;
1690 unsigned int fd_added
= 0;
1691 struct lttng_poll_event events
;
1692 struct ust_reg_wait_node
*wait_node
= NULL
, *tmp_wait_node
;
1696 lttng_poll_init(&events
);
1698 /* Just skip everything for an empty queue. */
1699 if (!wait_queue
->count
) {
1703 ret
= lttng_poll_create(&events
, wait_queue
->count
, LTTNG_CLOEXEC
);
1708 cds_list_for_each_entry_safe(wait_node
, tmp_wait_node
,
1709 &wait_queue
->head
, head
) {
1710 assert(wait_node
->app
);
1711 ret
= lttng_poll_add(&events
, wait_node
->app
->sock
,
1712 LPOLLHUP
| LPOLLERR
);
1725 * Poll but don't block so we can quickly identify the faulty events and
1726 * clean them afterwards from the wait queue.
1728 ret
= lttng_poll_wait(&events
, 0);
1734 for (i
= 0; i
< nb_fd
; i
++) {
1735 /* Get faulty FD. */
1736 uint32_t revents
= LTTNG_POLL_GETEV(&events
, i
);
1737 int pollfd
= LTTNG_POLL_GETFD(&events
, i
);
1740 /* No activity for this FD (poll implementation). */
1744 cds_list_for_each_entry_safe(wait_node
, tmp_wait_node
,
1745 &wait_queue
->head
, head
) {
1746 if (pollfd
== wait_node
->app
->sock
&&
1747 (revents
& (LPOLLHUP
| LPOLLERR
))) {
1748 cds_list_del(&wait_node
->head
);
1749 wait_queue
->count
--;
1750 ust_app_destroy(wait_node
->app
);
1758 DBG("Wait queue sanitized, %d node were cleaned up", nb_fd
);
1762 lttng_poll_clean(&events
);
1766 lttng_poll_clean(&events
);
1768 ERR("Unable to sanitize wait queue");
1773 * Dispatch request from the registration threads to the application
1774 * communication thread.
1776 static void *thread_dispatch_ust_registration(void *data
)
1779 struct cds_wfcq_node
*node
;
1780 struct ust_command
*ust_cmd
= NULL
;
1781 struct ust_reg_wait_node
*wait_node
= NULL
, *tmp_wait_node
;
1782 struct ust_reg_wait_queue wait_queue
= {
1786 health_register(health_sessiond
, HEALTH_SESSIOND_TYPE_APP_REG_DISPATCH
);
1788 if (testpoint(sessiond_thread_app_reg_dispatch
)) {
1789 goto error_testpoint
;
1792 health_code_update();
1794 CDS_INIT_LIST_HEAD(&wait_queue
.head
);
1796 DBG("[thread] Dispatch UST command started");
1798 while (!CMM_LOAD_SHARED(dispatch_thread_exit
)) {
1799 health_code_update();
1801 /* Atomically prepare the queue futex */
1802 futex_nto1_prepare(&ust_cmd_queue
.futex
);
1805 struct ust_app
*app
= NULL
;
1809 * Make sure we don't have node(s) that have hung up before receiving
1810 * the notify socket. This is to clean the list in order to avoid
1811 * memory leaks from notify socket that are never seen.
1813 sanitize_wait_queue(&wait_queue
);
1815 health_code_update();
1816 /* Dequeue command for registration */
1817 node
= cds_wfcq_dequeue_blocking(&ust_cmd_queue
.head
, &ust_cmd_queue
.tail
);
1819 DBG("Woken up but nothing in the UST command queue");
1820 /* Continue thread execution */
1824 ust_cmd
= caa_container_of(node
, struct ust_command
, node
);
1826 DBG("Dispatching UST registration pid:%d ppid:%d uid:%d"
1827 " gid:%d sock:%d name:%s (version %d.%d)",
1828 ust_cmd
->reg_msg
.pid
, ust_cmd
->reg_msg
.ppid
,
1829 ust_cmd
->reg_msg
.uid
, ust_cmd
->reg_msg
.gid
,
1830 ust_cmd
->sock
, ust_cmd
->reg_msg
.name
,
1831 ust_cmd
->reg_msg
.major
, ust_cmd
->reg_msg
.minor
);
1833 if (ust_cmd
->reg_msg
.type
== USTCTL_SOCKET_CMD
) {
1834 wait_node
= zmalloc(sizeof(*wait_node
));
1836 PERROR("zmalloc wait_node dispatch");
1837 ret
= close(ust_cmd
->sock
);
1839 PERROR("close ust sock dispatch %d", ust_cmd
->sock
);
1841 lttng_fd_put(LTTNG_FD_APPS
, 1);
1845 CDS_INIT_LIST_HEAD(&wait_node
->head
);
1847 /* Create application object if socket is CMD. */
1848 wait_node
->app
= ust_app_create(&ust_cmd
->reg_msg
,
1850 if (!wait_node
->app
) {
1851 ret
= close(ust_cmd
->sock
);
1853 PERROR("close ust sock dispatch %d", ust_cmd
->sock
);
1855 lttng_fd_put(LTTNG_FD_APPS
, 1);
1861 * Add application to the wait queue so we can set the notify
1862 * socket before putting this object in the global ht.
1864 cds_list_add(&wait_node
->head
, &wait_queue
.head
);
1869 * We have to continue here since we don't have the notify
1870 * socket and the application MUST be added to the hash table
1871 * only at that moment.
1876 * Look for the application in the local wait queue and set the
1877 * notify socket if found.
1879 cds_list_for_each_entry_safe(wait_node
, tmp_wait_node
,
1880 &wait_queue
.head
, head
) {
1881 health_code_update();
1882 if (wait_node
->app
->pid
== ust_cmd
->reg_msg
.pid
) {
1883 wait_node
->app
->notify_sock
= ust_cmd
->sock
;
1884 cds_list_del(&wait_node
->head
);
1886 app
= wait_node
->app
;
1888 DBG3("UST app notify socket %d is set", ust_cmd
->sock
);
1894 * With no application at this stage the received socket is
1895 * basically useless so close it before we free the cmd data
1896 * structure for good.
1899 ret
= close(ust_cmd
->sock
);
1901 PERROR("close ust sock dispatch %d", ust_cmd
->sock
);
1903 lttng_fd_put(LTTNG_FD_APPS
, 1);
1910 * @session_lock_list
1912 * Lock the global session list so from the register up to the
1913 * registration done message, no thread can see the application
1914 * and change its state.
1916 session_lock_list();
1920 * Add application to the global hash table. This needs to be
1921 * done before the update to the UST registry can locate the
1926 /* Set app version. This call will print an error if needed. */
1927 (void) ust_app_version(app
);
1929 /* Send notify socket through the notify pipe. */
1930 ret
= send_socket_to_thread(apps_cmd_notify_pipe
[1],
1934 session_unlock_list();
1936 * No notify thread, stop the UST tracing. However, this is
1937 * not an internal error of the this thread thus setting
1938 * the health error code to a normal exit.
1945 * Update newly registered application with the tracing
1946 * registry info already enabled information.
1948 update_ust_app(app
->sock
);
1951 * Don't care about return value. Let the manage apps threads
1952 * handle app unregistration upon socket close.
1954 (void) ust_app_register_done(app
->sock
);
1957 * Even if the application socket has been closed, send the app
1958 * to the thread and unregistration will take place at that
1961 ret
= send_socket_to_thread(apps_cmd_pipe
[1], app
->sock
);
1964 session_unlock_list();
1966 * No apps. thread, stop the UST tracing. However, this is
1967 * not an internal error of the this thread thus setting
1968 * the health error code to a normal exit.
1975 session_unlock_list();
1977 } while (node
!= NULL
);
1979 health_poll_entry();
1980 /* Futex wait on queue. Blocking call on futex() */
1981 futex_nto1_wait(&ust_cmd_queue
.futex
);
1984 /* Normal exit, no error */
1988 /* Clean up wait queue. */
1989 cds_list_for_each_entry_safe(wait_node
, tmp_wait_node
,
1990 &wait_queue
.head
, head
) {
1991 cds_list_del(&wait_node
->head
);
1997 DBG("Dispatch thread dying");
2000 ERR("Health error occurred in %s", __func__
);
2002 health_unregister(health_sessiond
);
2007 * This thread manage application registration.
2009 static void *thread_registration_apps(void *data
)
2011 int sock
= -1, i
, ret
, pollfd
, err
= -1;
2012 uint32_t revents
, nb_fd
;
2013 struct lttng_poll_event events
;
2015 * Get allocated in this thread, enqueued to a global queue, dequeued and
2016 * freed in the manage apps thread.
2018 struct ust_command
*ust_cmd
= NULL
;
2020 DBG("[thread] Manage application registration started");
2022 health_register(health_sessiond
, HEALTH_SESSIOND_TYPE_APP_REG
);
2024 if (testpoint(sessiond_thread_registration_apps
)) {
2025 goto error_testpoint
;
2028 ret
= lttcomm_listen_unix_sock(apps_sock
);
2034 * Pass 2 as size here for the thread quit pipe and apps socket. Nothing
2035 * more will be added to this poll set.
2037 ret
= sessiond_set_thread_pollset(&events
, 2);
2039 goto error_create_poll
;
2042 /* Add the application registration socket */
2043 ret
= lttng_poll_add(&events
, apps_sock
, LPOLLIN
| LPOLLRDHUP
);
2045 goto error_poll_add
;
2048 /* Notify all applications to register */
2049 ret
= notify_ust_apps(1);
2051 ERR("Failed to notify applications or create the wait shared memory.\n"
2052 "Execution continues but there might be problem for already\n"
2053 "running applications that wishes to register.");
2057 DBG("Accepting application registration");
2059 /* Inifinite blocking call, waiting for transmission */
2061 health_poll_entry();
2062 ret
= lttng_poll_wait(&events
, -1);
2066 * Restart interrupted system call.
2068 if (errno
== EINTR
) {
2076 for (i
= 0; i
< nb_fd
; i
++) {
2077 health_code_update();
2079 /* Fetch once the poll data */
2080 revents
= LTTNG_POLL_GETEV(&events
, i
);
2081 pollfd
= LTTNG_POLL_GETFD(&events
, i
);
2084 /* No activity for this FD (poll implementation). */
2088 /* Thread quit pipe has been closed. Killing thread. */
2089 ret
= sessiond_check_thread_quit_pipe(pollfd
, revents
);
2095 /* Event on the registration socket */
2096 if (pollfd
== apps_sock
) {
2097 if (revents
& (LPOLLERR
| LPOLLHUP
| LPOLLRDHUP
)) {
2098 ERR("Register apps socket poll error");
2100 } else if (revents
& LPOLLIN
) {
2101 sock
= lttcomm_accept_unix_sock(apps_sock
);
2107 * Set socket timeout for both receiving and ending.
2108 * app_socket_timeout is in seconds, whereas
2109 * lttcomm_setsockopt_rcv_timeout and
2110 * lttcomm_setsockopt_snd_timeout expect msec as
2113 (void) lttcomm_setsockopt_rcv_timeout(sock
,
2114 app_socket_timeout
* 1000);
2115 (void) lttcomm_setsockopt_snd_timeout(sock
,
2116 app_socket_timeout
* 1000);
2119 * Set the CLOEXEC flag. Return code is useless because
2120 * either way, the show must go on.
2122 (void) utils_set_fd_cloexec(sock
);
2124 /* Create UST registration command for enqueuing */
2125 ust_cmd
= zmalloc(sizeof(struct ust_command
));
2126 if (ust_cmd
== NULL
) {
2127 PERROR("ust command zmalloc");
2132 * Using message-based transmissions to ensure we don't
2133 * have to deal with partially received messages.
2135 ret
= lttng_fd_get(LTTNG_FD_APPS
, 1);
2137 ERR("Exhausted file descriptors allowed for applications.");
2147 health_code_update();
2148 ret
= ust_app_recv_registration(sock
, &ust_cmd
->reg_msg
);
2151 /* Close socket of the application. */
2156 lttng_fd_put(LTTNG_FD_APPS
, 1);
2160 health_code_update();
2162 ust_cmd
->sock
= sock
;
2165 DBG("UST registration received with pid:%d ppid:%d uid:%d"
2166 " gid:%d sock:%d name:%s (version %d.%d)",
2167 ust_cmd
->reg_msg
.pid
, ust_cmd
->reg_msg
.ppid
,
2168 ust_cmd
->reg_msg
.uid
, ust_cmd
->reg_msg
.gid
,
2169 ust_cmd
->sock
, ust_cmd
->reg_msg
.name
,
2170 ust_cmd
->reg_msg
.major
, ust_cmd
->reg_msg
.minor
);
2173 * Lock free enqueue the registration request. The red pill
2174 * has been taken! This apps will be part of the *system*.
2176 cds_wfcq_enqueue(&ust_cmd_queue
.head
, &ust_cmd_queue
.tail
, &ust_cmd
->node
);
2179 * Wake the registration queue futex. Implicit memory
2180 * barrier with the exchange in cds_wfcq_enqueue.
2182 futex_nto1_wake(&ust_cmd_queue
.futex
);
2190 /* Notify that the registration thread is gone */
2193 if (apps_sock
>= 0) {
2194 ret
= close(apps_sock
);
2204 lttng_fd_put(LTTNG_FD_APPS
, 1);
2206 unlink(apps_unix_sock_path
);
2209 lttng_poll_clean(&events
);
2213 DBG("UST Registration thread cleanup complete");
2216 ERR("Health error occurred in %s", __func__
);
2218 health_unregister(health_sessiond
);
2224 * Start the thread_manage_consumer. This must be done after a lttng-consumerd
2225 * exec or it will fails.
2227 static int spawn_consumer_thread(struct consumer_data
*consumer_data
)
2230 struct timespec timeout
;
2232 /* Make sure we set the readiness flag to 0 because we are NOT ready */
2233 consumer_data
->consumer_thread_is_ready
= 0;
2235 /* Setup pthread condition */
2236 ret
= pthread_condattr_init(&consumer_data
->condattr
);
2239 PERROR("pthread_condattr_init consumer data");
2244 * Set the monotonic clock in order to make sure we DO NOT jump in time
2245 * between the clock_gettime() call and the timedwait call. See bug #324
2246 * for a more details and how we noticed it.
2248 ret
= pthread_condattr_setclock(&consumer_data
->condattr
, CLOCK_MONOTONIC
);
2251 PERROR("pthread_condattr_setclock consumer data");
2255 ret
= pthread_cond_init(&consumer_data
->cond
, &consumer_data
->condattr
);
2258 PERROR("pthread_cond_init consumer data");
2262 ret
= pthread_create(&consumer_data
->thread
, NULL
, thread_manage_consumer
,
2266 PERROR("pthread_create consumer");
2271 /* We are about to wait on a pthread condition */
2272 pthread_mutex_lock(&consumer_data
->cond_mutex
);
2274 /* Get time for sem_timedwait absolute timeout */
2275 clock_ret
= clock_gettime(CLOCK_MONOTONIC
, &timeout
);
2277 * Set the timeout for the condition timed wait even if the clock gettime
2278 * call fails since we might loop on that call and we want to avoid to
2279 * increment the timeout too many times.
2281 timeout
.tv_sec
+= DEFAULT_SEM_WAIT_TIMEOUT
;
2284 * The following loop COULD be skipped in some conditions so this is why we
2285 * set ret to 0 in order to make sure at least one round of the loop is
2291 * Loop until the condition is reached or when a timeout is reached. Note
2292 * that the pthread_cond_timedwait(P) man page specifies that EINTR can NOT
2293 * be returned but the pthread_cond(3), from the glibc-doc, says that it is
2294 * possible. This loop does not take any chances and works with both of
2297 while (!consumer_data
->consumer_thread_is_ready
&& ret
!= ETIMEDOUT
) {
2298 if (clock_ret
< 0) {
2299 PERROR("clock_gettime spawn consumer");
2300 /* Infinite wait for the consumerd thread to be ready */
2301 ret
= pthread_cond_wait(&consumer_data
->cond
,
2302 &consumer_data
->cond_mutex
);
2304 ret
= pthread_cond_timedwait(&consumer_data
->cond
,
2305 &consumer_data
->cond_mutex
, &timeout
);
2309 /* Release the pthread condition */
2310 pthread_mutex_unlock(&consumer_data
->cond_mutex
);
2314 if (ret
== ETIMEDOUT
) {
2318 * Call has timed out so we kill the kconsumerd_thread and return
2321 ERR("Condition timed out. The consumer thread was never ready."
2323 pth_ret
= pthread_cancel(consumer_data
->thread
);
2325 PERROR("pthread_cancel consumer thread");
2328 PERROR("pthread_cond_wait failed consumer thread");
2330 /* Caller is expecting a negative value on failure. */
2335 pthread_mutex_lock(&consumer_data
->pid_mutex
);
2336 if (consumer_data
->pid
== 0) {
2337 ERR("Consumerd did not start");
2338 pthread_mutex_unlock(&consumer_data
->pid_mutex
);
2341 pthread_mutex_unlock(&consumer_data
->pid_mutex
);
2350 * Join consumer thread
2352 static int join_consumer_thread(struct consumer_data
*consumer_data
)
2356 /* Consumer pid must be a real one. */
2357 if (consumer_data
->pid
> 0) {
2359 ret
= kill(consumer_data
->pid
, SIGTERM
);
2361 PERROR("Error killing consumer daemon");
2364 return pthread_join(consumer_data
->thread
, &status
);
2371 * Fork and exec a consumer daemon (consumerd).
2373 * Return pid if successful else -1.
2375 static pid_t
spawn_consumerd(struct consumer_data
*consumer_data
)
2379 const char *consumer_to_use
;
2380 const char *verbosity
;
2383 DBG("Spawning consumerd");
2390 if (opt_verbose_consumer
) {
2391 verbosity
= "--verbose";
2392 } else if (lttng_opt_quiet
) {
2393 verbosity
= "--quiet";
2398 switch (consumer_data
->type
) {
2399 case LTTNG_CONSUMER_KERNEL
:
2401 * Find out which consumerd to execute. We will first try the
2402 * 64-bit path, then the sessiond's installation directory, and
2403 * fallback on the 32-bit one,
2405 DBG3("Looking for a kernel consumer at these locations:");
2406 DBG3(" 1) %s", consumerd64_bin
);
2407 DBG3(" 2) %s/%s", INSTALL_BIN_PATH
, CONSUMERD_FILE
);
2408 DBG3(" 3) %s", consumerd32_bin
);
2409 if (stat(consumerd64_bin
, &st
) == 0) {
2410 DBG3("Found location #1");
2411 consumer_to_use
= consumerd64_bin
;
2412 } else if (stat(INSTALL_BIN_PATH
"/" CONSUMERD_FILE
, &st
) == 0) {
2413 DBG3("Found location #2");
2414 consumer_to_use
= INSTALL_BIN_PATH
"/" CONSUMERD_FILE
;
2415 } else if (stat(consumerd32_bin
, &st
) == 0) {
2416 DBG3("Found location #3");
2417 consumer_to_use
= consumerd32_bin
;
2419 DBG("Could not find any valid consumerd executable");
2423 DBG("Using kernel consumer at: %s", consumer_to_use
);
2424 ret
= execl(consumer_to_use
,
2425 "lttng-consumerd", verbosity
, "-k",
2426 "--consumerd-cmd-sock", consumer_data
->cmd_unix_sock_path
,
2427 "--consumerd-err-sock", consumer_data
->err_unix_sock_path
,
2428 "--group", tracing_group_name
,
2431 case LTTNG_CONSUMER64_UST
:
2433 char *tmpnew
= NULL
;
2435 if (consumerd64_libdir
[0] != '\0') {
2439 tmp
= getenv("LD_LIBRARY_PATH");
2443 tmplen
= strlen("LD_LIBRARY_PATH=")
2444 + strlen(consumerd64_libdir
) + 1 /* : */ + strlen(tmp
);
2445 tmpnew
= zmalloc(tmplen
+ 1 /* \0 */);
2450 strcpy(tmpnew
, "LD_LIBRARY_PATH=");
2451 strcat(tmpnew
, consumerd64_libdir
);
2452 if (tmp
[0] != '\0') {
2453 strcat(tmpnew
, ":");
2454 strcat(tmpnew
, tmp
);
2456 ret
= putenv(tmpnew
);
2463 DBG("Using 64-bit UST consumer at: %s", consumerd64_bin
);
2464 ret
= execl(consumerd64_bin
, "lttng-consumerd", verbosity
, "-u",
2465 "--consumerd-cmd-sock", consumer_data
->cmd_unix_sock_path
,
2466 "--consumerd-err-sock", consumer_data
->err_unix_sock_path
,
2467 "--group", tracing_group_name
,
2469 if (consumerd64_libdir
[0] != '\0') {
2474 case LTTNG_CONSUMER32_UST
:
2476 char *tmpnew
= NULL
;
2478 if (consumerd32_libdir
[0] != '\0') {
2482 tmp
= getenv("LD_LIBRARY_PATH");
2486 tmplen
= strlen("LD_LIBRARY_PATH=")
2487 + strlen(consumerd32_libdir
) + 1 /* : */ + strlen(tmp
);
2488 tmpnew
= zmalloc(tmplen
+ 1 /* \0 */);
2493 strcpy(tmpnew
, "LD_LIBRARY_PATH=");
2494 strcat(tmpnew
, consumerd32_libdir
);
2495 if (tmp
[0] != '\0') {
2496 strcat(tmpnew
, ":");
2497 strcat(tmpnew
, tmp
);
2499 ret
= putenv(tmpnew
);
2506 DBG("Using 32-bit UST consumer at: %s", consumerd32_bin
);
2507 ret
= execl(consumerd32_bin
, "lttng-consumerd", verbosity
, "-u",
2508 "--consumerd-cmd-sock", consumer_data
->cmd_unix_sock_path
,
2509 "--consumerd-err-sock", consumer_data
->err_unix_sock_path
,
2510 "--group", tracing_group_name
,
2512 if (consumerd32_libdir
[0] != '\0') {
2518 PERROR("unknown consumer type");
2522 PERROR("Consumer execl()");
2524 /* Reaching this point, we got a failure on our execl(). */
2526 } else if (pid
> 0) {
2529 PERROR("start consumer fork");
2537 * Spawn the consumerd daemon and session daemon thread.
2539 static int start_consumerd(struct consumer_data
*consumer_data
)
2544 * Set the listen() state on the socket since there is a possible race
2545 * between the exec() of the consumer daemon and this call if place in the
2546 * consumer thread. See bug #366 for more details.
2548 ret
= lttcomm_listen_unix_sock(consumer_data
->err_sock
);
2553 pthread_mutex_lock(&consumer_data
->pid_mutex
);
2554 if (consumer_data
->pid
!= 0) {
2555 pthread_mutex_unlock(&consumer_data
->pid_mutex
);
2559 ret
= spawn_consumerd(consumer_data
);
2561 ERR("Spawning consumerd failed");
2562 pthread_mutex_unlock(&consumer_data
->pid_mutex
);
2566 /* Setting up the consumer_data pid */
2567 consumer_data
->pid
= ret
;
2568 DBG2("Consumer pid %d", consumer_data
->pid
);
2569 pthread_mutex_unlock(&consumer_data
->pid_mutex
);
2571 DBG2("Spawning consumer control thread");
2572 ret
= spawn_consumer_thread(consumer_data
);
2574 ERR("Fatal error spawning consumer control thread");
2582 /* Cleanup already created sockets on error. */
2583 if (consumer_data
->err_sock
>= 0) {
2586 err
= close(consumer_data
->err_sock
);
2588 PERROR("close consumer data error socket");
2595 * Setup necessary data for kernel tracer action.
2597 static int init_kernel_tracer(void)
2601 /* Modprobe lttng kernel modules */
2602 ret
= modprobe_lttng_control();
2607 /* Open debugfs lttng */
2608 kernel_tracer_fd
= open(module_proc_lttng
, O_RDWR
);
2609 if (kernel_tracer_fd
< 0) {
2610 DBG("Failed to open %s", module_proc_lttng
);
2615 /* Validate kernel version */
2616 ret
= kernel_validate_version(kernel_tracer_fd
);
2621 ret
= modprobe_lttng_data();
2626 DBG("Kernel tracer fd %d", kernel_tracer_fd
);
2630 modprobe_remove_lttng_control();
2631 ret
= close(kernel_tracer_fd
);
2635 kernel_tracer_fd
= -1;
2636 return LTTNG_ERR_KERN_VERSION
;
2639 ret
= close(kernel_tracer_fd
);
2645 modprobe_remove_lttng_control();
2648 WARN("No kernel tracer available");
2649 kernel_tracer_fd
= -1;
2651 return LTTNG_ERR_NEED_ROOT_SESSIOND
;
2653 return LTTNG_ERR_KERN_NA
;
2659 * Copy consumer output from the tracing session to the domain session. The
2660 * function also applies the right modification on a per domain basis for the
2661 * trace files destination directory.
2663 * Should *NOT* be called with RCU read-side lock held.
2665 static int copy_session_consumer(int domain
, struct ltt_session
*session
)
2668 const char *dir_name
;
2669 struct consumer_output
*consumer
;
2672 assert(session
->consumer
);
2675 case LTTNG_DOMAIN_KERNEL
:
2676 DBG3("Copying tracing session consumer output in kernel session");
2678 * XXX: We should audit the session creation and what this function
2679 * does "extra" in order to avoid a destroy since this function is used
2680 * in the domain session creation (kernel and ust) only. Same for UST
2683 if (session
->kernel_session
->consumer
) {
2684 consumer_destroy_output(session
->kernel_session
->consumer
);
2686 session
->kernel_session
->consumer
=
2687 consumer_copy_output(session
->consumer
);
2688 /* Ease our life a bit for the next part */
2689 consumer
= session
->kernel_session
->consumer
;
2690 dir_name
= DEFAULT_KERNEL_TRACE_DIR
;
2692 case LTTNG_DOMAIN_JUL
:
2693 case LTTNG_DOMAIN_LOG4J
:
2694 case LTTNG_DOMAIN_PYTHON
:
2695 case LTTNG_DOMAIN_UST
:
2696 DBG3("Copying tracing session consumer output in UST session");
2697 if (session
->ust_session
->consumer
) {
2698 consumer_destroy_output(session
->ust_session
->consumer
);
2700 session
->ust_session
->consumer
=
2701 consumer_copy_output(session
->consumer
);
2702 /* Ease our life a bit for the next part */
2703 consumer
= session
->ust_session
->consumer
;
2704 dir_name
= DEFAULT_UST_TRACE_DIR
;
2707 ret
= LTTNG_ERR_UNKNOWN_DOMAIN
;
2711 /* Append correct directory to subdir */
2712 strncat(consumer
->subdir
, dir_name
,
2713 sizeof(consumer
->subdir
) - strlen(consumer
->subdir
) - 1);
2714 DBG3("Copy session consumer subdir %s", consumer
->subdir
);
2723 * Create an UST session and add it to the session ust list.
2725 * Should *NOT* be called with RCU read-side lock held.
2727 static int create_ust_session(struct ltt_session
*session
,
2728 struct lttng_domain
*domain
)
2731 struct ltt_ust_session
*lus
= NULL
;
2735 assert(session
->consumer
);
2737 switch (domain
->type
) {
2738 case LTTNG_DOMAIN_JUL
:
2739 case LTTNG_DOMAIN_LOG4J
:
2740 case LTTNG_DOMAIN_PYTHON
:
2741 case LTTNG_DOMAIN_UST
:
2744 ERR("Unknown UST domain on create session %d", domain
->type
);
2745 ret
= LTTNG_ERR_UNKNOWN_DOMAIN
;
2749 DBG("Creating UST session");
2751 lus
= trace_ust_create_session(session
->id
);
2753 ret
= LTTNG_ERR_UST_SESS_FAIL
;
2757 lus
->uid
= session
->uid
;
2758 lus
->gid
= session
->gid
;
2759 lus
->output_traces
= session
->output_traces
;
2760 lus
->snapshot_mode
= session
->snapshot_mode
;
2761 lus
->live_timer_interval
= session
->live_timer
;
2762 session
->ust_session
= lus
;
2764 /* Copy session output to the newly created UST session */
2765 ret
= copy_session_consumer(domain
->type
, session
);
2766 if (ret
!= LTTNG_OK
) {
2774 session
->ust_session
= NULL
;
2779 * Create a kernel tracer session then create the default channel.
2781 static int create_kernel_session(struct ltt_session
*session
)
2785 DBG("Creating kernel session");
2787 ret
= kernel_create_session(session
, kernel_tracer_fd
);
2789 ret
= LTTNG_ERR_KERN_SESS_FAIL
;
2793 /* Code flow safety */
2794 assert(session
->kernel_session
);
2796 /* Copy session output to the newly created Kernel session */
2797 ret
= copy_session_consumer(LTTNG_DOMAIN_KERNEL
, session
);
2798 if (ret
!= LTTNG_OK
) {
2802 /* Create directory(ies) on local filesystem. */
2803 if (session
->kernel_session
->consumer
->type
== CONSUMER_DST_LOCAL
&&
2804 strlen(session
->kernel_session
->consumer
->dst
.trace_path
) > 0) {
2805 ret
= run_as_mkdir_recursive(
2806 session
->kernel_session
->consumer
->dst
.trace_path
,
2807 S_IRWXU
| S_IRWXG
, session
->uid
, session
->gid
);
2809 if (ret
!= -EEXIST
) {
2810 ERR("Trace directory creation error");
2816 session
->kernel_session
->uid
= session
->uid
;
2817 session
->kernel_session
->gid
= session
->gid
;
2818 session
->kernel_session
->output_traces
= session
->output_traces
;
2819 session
->kernel_session
->snapshot_mode
= session
->snapshot_mode
;
2824 trace_kernel_destroy_session(session
->kernel_session
);
2825 session
->kernel_session
= NULL
;
2830 * Count number of session permitted by uid/gid.
2832 static unsigned int lttng_sessions_count(uid_t uid
, gid_t gid
)
2835 struct ltt_session
*session
;
2837 DBG("Counting number of available session for UID %d GID %d",
2839 cds_list_for_each_entry(session
, &session_list_ptr
->head
, list
) {
2841 * Only list the sessions the user can control.
2843 if (!session_access_ok(session
, uid
, gid
)) {
2852 * Process the command requested by the lttng client within the command
2853 * context structure. This function make sure that the return structure (llm)
2854 * is set and ready for transmission before returning.
2856 * Return any error encountered or 0 for success.
2858 * "sock" is only used for special-case var. len data.
2860 * Should *NOT* be called with RCU read-side lock held.
2862 static int process_client_msg(struct command_ctx
*cmd_ctx
, int sock
,
2866 int need_tracing_session
= 1;
2869 DBG("Processing client command %d", cmd_ctx
->lsm
->cmd_type
);
2873 switch (cmd_ctx
->lsm
->cmd_type
) {
2874 case LTTNG_CREATE_SESSION
:
2875 case LTTNG_CREATE_SESSION_SNAPSHOT
:
2876 case LTTNG_CREATE_SESSION_LIVE
:
2877 case LTTNG_DESTROY_SESSION
:
2878 case LTTNG_LIST_SESSIONS
:
2879 case LTTNG_LIST_DOMAINS
:
2880 case LTTNG_START_TRACE
:
2881 case LTTNG_STOP_TRACE
:
2882 case LTTNG_DATA_PENDING
:
2883 case LTTNG_SNAPSHOT_ADD_OUTPUT
:
2884 case LTTNG_SNAPSHOT_DEL_OUTPUT
:
2885 case LTTNG_SNAPSHOT_LIST_OUTPUT
:
2886 case LTTNG_SNAPSHOT_RECORD
:
2887 case LTTNG_SAVE_SESSION
:
2894 if (opt_no_kernel
&& need_domain
2895 && cmd_ctx
->lsm
->domain
.type
== LTTNG_DOMAIN_KERNEL
) {
2897 ret
= LTTNG_ERR_NEED_ROOT_SESSIOND
;
2899 ret
= LTTNG_ERR_KERN_NA
;
2904 /* Deny register consumer if we already have a spawned consumer. */
2905 if (cmd_ctx
->lsm
->cmd_type
== LTTNG_REGISTER_CONSUMER
) {
2906 pthread_mutex_lock(&kconsumer_data
.pid_mutex
);
2907 if (kconsumer_data
.pid
> 0) {
2908 ret
= LTTNG_ERR_KERN_CONSUMER_FAIL
;
2909 pthread_mutex_unlock(&kconsumer_data
.pid_mutex
);
2912 pthread_mutex_unlock(&kconsumer_data
.pid_mutex
);
2916 * Check for command that don't needs to allocate a returned payload. We do
2917 * this here so we don't have to make the call for no payload at each
2920 switch(cmd_ctx
->lsm
->cmd_type
) {
2921 case LTTNG_LIST_SESSIONS
:
2922 case LTTNG_LIST_TRACEPOINTS
:
2923 case LTTNG_LIST_TRACEPOINT_FIELDS
:
2924 case LTTNG_LIST_DOMAINS
:
2925 case LTTNG_LIST_CHANNELS
:
2926 case LTTNG_LIST_EVENTS
:
2927 case LTTNG_LIST_SYSCALLS
:
2930 /* Setup lttng message with no payload */
2931 ret
= setup_lttng_msg(cmd_ctx
, 0);
2933 /* This label does not try to unlock the session */
2934 goto init_setup_error
;
2938 /* Commands that DO NOT need a session. */
2939 switch (cmd_ctx
->lsm
->cmd_type
) {
2940 case LTTNG_CREATE_SESSION
:
2941 case LTTNG_CREATE_SESSION_SNAPSHOT
:
2942 case LTTNG_CREATE_SESSION_LIVE
:
2943 case LTTNG_CALIBRATE
:
2944 case LTTNG_LIST_SESSIONS
:
2945 case LTTNG_LIST_TRACEPOINTS
:
2946 case LTTNG_LIST_SYSCALLS
:
2947 case LTTNG_LIST_TRACEPOINT_FIELDS
:
2948 case LTTNG_SAVE_SESSION
:
2949 need_tracing_session
= 0;
2952 DBG("Getting session %s by name", cmd_ctx
->lsm
->session
.name
);
2954 * We keep the session list lock across _all_ commands
2955 * for now, because the per-session lock does not
2956 * handle teardown properly.
2958 session_lock_list();
2959 cmd_ctx
->session
= session_find_by_name(cmd_ctx
->lsm
->session
.name
);
2960 if (cmd_ctx
->session
== NULL
) {
2961 ret
= LTTNG_ERR_SESS_NOT_FOUND
;
2964 /* Acquire lock for the session */
2965 session_lock(cmd_ctx
->session
);
2971 * Commands that need a valid session but should NOT create one if none
2972 * exists. Instead of creating one and destroying it when the command is
2973 * handled, process that right before so we save some round trip in useless
2976 switch (cmd_ctx
->lsm
->cmd_type
) {
2977 case LTTNG_DISABLE_CHANNEL
:
2978 case LTTNG_DISABLE_EVENT
:
2979 switch (cmd_ctx
->lsm
->domain
.type
) {
2980 case LTTNG_DOMAIN_KERNEL
:
2981 if (!cmd_ctx
->session
->kernel_session
) {
2982 ret
= LTTNG_ERR_NO_CHANNEL
;
2986 case LTTNG_DOMAIN_JUL
:
2987 case LTTNG_DOMAIN_LOG4J
:
2988 case LTTNG_DOMAIN_PYTHON
:
2989 case LTTNG_DOMAIN_UST
:
2990 if (!cmd_ctx
->session
->ust_session
) {
2991 ret
= LTTNG_ERR_NO_CHANNEL
;
2996 ret
= LTTNG_ERR_UNKNOWN_DOMAIN
;
3008 * Check domain type for specific "pre-action".
3010 switch (cmd_ctx
->lsm
->domain
.type
) {
3011 case LTTNG_DOMAIN_KERNEL
:
3013 ret
= LTTNG_ERR_NEED_ROOT_SESSIOND
;
3017 /* Kernel tracer check */
3018 if (kernel_tracer_fd
== -1) {
3019 /* Basically, load kernel tracer modules */
3020 ret
= init_kernel_tracer();
3026 /* Consumer is in an ERROR state. Report back to client */
3027 if (uatomic_read(&kernel_consumerd_state
) == CONSUMER_ERROR
) {
3028 ret
= LTTNG_ERR_NO_KERNCONSUMERD
;
3032 /* Need a session for kernel command */
3033 if (need_tracing_session
) {
3034 if (cmd_ctx
->session
->kernel_session
== NULL
) {
3035 ret
= create_kernel_session(cmd_ctx
->session
);
3037 ret
= LTTNG_ERR_KERN_SESS_FAIL
;
3042 /* Start the kernel consumer daemon */
3043 pthread_mutex_lock(&kconsumer_data
.pid_mutex
);
3044 if (kconsumer_data
.pid
== 0 &&
3045 cmd_ctx
->lsm
->cmd_type
!= LTTNG_REGISTER_CONSUMER
) {
3046 pthread_mutex_unlock(&kconsumer_data
.pid_mutex
);
3047 ret
= start_consumerd(&kconsumer_data
);
3049 ret
= LTTNG_ERR_KERN_CONSUMER_FAIL
;
3052 uatomic_set(&kernel_consumerd_state
, CONSUMER_STARTED
);
3054 pthread_mutex_unlock(&kconsumer_data
.pid_mutex
);
3058 * The consumer was just spawned so we need to add the socket to
3059 * the consumer output of the session if exist.
3061 ret
= consumer_create_socket(&kconsumer_data
,
3062 cmd_ctx
->session
->kernel_session
->consumer
);
3069 case LTTNG_DOMAIN_JUL
:
3070 case LTTNG_DOMAIN_LOG4J
:
3071 case LTTNG_DOMAIN_PYTHON
:
3072 case LTTNG_DOMAIN_UST
:
3074 if (!ust_app_supported()) {
3075 ret
= LTTNG_ERR_NO_UST
;
3078 /* Consumer is in an ERROR state. Report back to client */
3079 if (uatomic_read(&ust_consumerd_state
) == CONSUMER_ERROR
) {
3080 ret
= LTTNG_ERR_NO_USTCONSUMERD
;
3084 if (need_tracing_session
) {
3085 /* Create UST session if none exist. */
3086 if (cmd_ctx
->session
->ust_session
== NULL
) {
3087 ret
= create_ust_session(cmd_ctx
->session
,
3088 &cmd_ctx
->lsm
->domain
);
3089 if (ret
!= LTTNG_OK
) {
3094 /* Start the UST consumer daemons */
3096 pthread_mutex_lock(&ustconsumer64_data
.pid_mutex
);
3097 if (consumerd64_bin
[0] != '\0' &&
3098 ustconsumer64_data
.pid
== 0 &&
3099 cmd_ctx
->lsm
->cmd_type
!= LTTNG_REGISTER_CONSUMER
) {
3100 pthread_mutex_unlock(&ustconsumer64_data
.pid_mutex
);
3101 ret
= start_consumerd(&ustconsumer64_data
);
3103 ret
= LTTNG_ERR_UST_CONSUMER64_FAIL
;
3104 uatomic_set(&ust_consumerd64_fd
, -EINVAL
);
3108 uatomic_set(&ust_consumerd64_fd
, ustconsumer64_data
.cmd_sock
);
3109 uatomic_set(&ust_consumerd_state
, CONSUMER_STARTED
);
3111 pthread_mutex_unlock(&ustconsumer64_data
.pid_mutex
);
3115 * Setup socket for consumer 64 bit. No need for atomic access
3116 * since it was set above and can ONLY be set in this thread.
3118 ret
= consumer_create_socket(&ustconsumer64_data
,
3119 cmd_ctx
->session
->ust_session
->consumer
);
3125 pthread_mutex_lock(&ustconsumer32_data
.pid_mutex
);
3126 if (consumerd32_bin
[0] != '\0' &&
3127 ustconsumer32_data
.pid
== 0 &&
3128 cmd_ctx
->lsm
->cmd_type
!= LTTNG_REGISTER_CONSUMER
) {
3129 pthread_mutex_unlock(&ustconsumer32_data
.pid_mutex
);
3130 ret
= start_consumerd(&ustconsumer32_data
);
3132 ret
= LTTNG_ERR_UST_CONSUMER32_FAIL
;
3133 uatomic_set(&ust_consumerd32_fd
, -EINVAL
);
3137 uatomic_set(&ust_consumerd32_fd
, ustconsumer32_data
.cmd_sock
);
3138 uatomic_set(&ust_consumerd_state
, CONSUMER_STARTED
);
3140 pthread_mutex_unlock(&ustconsumer32_data
.pid_mutex
);
3144 * Setup socket for consumer 64 bit. No need for atomic access
3145 * since it was set above and can ONLY be set in this thread.
3147 ret
= consumer_create_socket(&ustconsumer32_data
,
3148 cmd_ctx
->session
->ust_session
->consumer
);
3160 /* Validate consumer daemon state when start/stop trace command */
3161 if (cmd_ctx
->lsm
->cmd_type
== LTTNG_START_TRACE
||
3162 cmd_ctx
->lsm
->cmd_type
== LTTNG_STOP_TRACE
) {
3163 switch (cmd_ctx
->lsm
->domain
.type
) {
3164 case LTTNG_DOMAIN_JUL
:
3165 case LTTNG_DOMAIN_LOG4J
:
3166 case LTTNG_DOMAIN_PYTHON
:
3167 case LTTNG_DOMAIN_UST
:
3168 if (uatomic_read(&ust_consumerd_state
) != CONSUMER_STARTED
) {
3169 ret
= LTTNG_ERR_NO_USTCONSUMERD
;
3173 case LTTNG_DOMAIN_KERNEL
:
3174 if (uatomic_read(&kernel_consumerd_state
) != CONSUMER_STARTED
) {
3175 ret
= LTTNG_ERR_NO_KERNCONSUMERD
;
3183 * Check that the UID or GID match that of the tracing session.
3184 * The root user can interact with all sessions.
3186 if (need_tracing_session
) {
3187 if (!session_access_ok(cmd_ctx
->session
,
3188 LTTNG_SOCK_GET_UID_CRED(&cmd_ctx
->creds
),
3189 LTTNG_SOCK_GET_GID_CRED(&cmd_ctx
->creds
))) {
3190 ret
= LTTNG_ERR_EPERM
;
3196 * Send relayd information to consumer as soon as we have a domain and a
3199 if (cmd_ctx
->session
&& need_domain
) {
3201 * Setup relayd if not done yet. If the relayd information was already
3202 * sent to the consumer, this call will gracefully return.
3204 ret
= cmd_setup_relayd(cmd_ctx
->session
);
3205 if (ret
!= LTTNG_OK
) {
3210 /* Process by command type */
3211 switch (cmd_ctx
->lsm
->cmd_type
) {
3212 case LTTNG_ADD_CONTEXT
:
3214 ret
= cmd_add_context(cmd_ctx
->session
, cmd_ctx
->lsm
->domain
.type
,
3215 cmd_ctx
->lsm
->u
.context
.channel_name
,
3216 &cmd_ctx
->lsm
->u
.context
.ctx
, kernel_poll_pipe
[1]);
3219 case LTTNG_DISABLE_CHANNEL
:
3221 ret
= cmd_disable_channel(cmd_ctx
->session
, cmd_ctx
->lsm
->domain
.type
,
3222 cmd_ctx
->lsm
->u
.disable
.channel_name
);
3225 case LTTNG_DISABLE_EVENT
:
3227 /* FIXME: passing packed structure to non-packed pointer */
3228 /* TODO: handle filter */
3229 ret
= cmd_disable_event(cmd_ctx
->session
, cmd_ctx
->lsm
->domain
.type
,
3230 cmd_ctx
->lsm
->u
.disable
.channel_name
,
3231 &cmd_ctx
->lsm
->u
.disable
.event
);
3234 case LTTNG_ENABLE_CHANNEL
:
3236 ret
= cmd_enable_channel(cmd_ctx
->session
, &cmd_ctx
->lsm
->domain
,
3237 &cmd_ctx
->lsm
->u
.channel
.chan
, kernel_poll_pipe
[1]);
3240 case LTTNG_ENABLE_EVENT
:
3242 struct lttng_event_exclusion
*exclusion
= NULL
;
3243 struct lttng_filter_bytecode
*bytecode
= NULL
;
3244 char *filter_expression
= NULL
;
3246 /* Handle exclusion events and receive it from the client. */
3247 if (cmd_ctx
->lsm
->u
.enable
.exclusion_count
> 0) {
3248 size_t count
= cmd_ctx
->lsm
->u
.enable
.exclusion_count
;
3250 exclusion
= zmalloc(sizeof(struct lttng_event_exclusion
) +
3251 (count
* LTTNG_SYMBOL_NAME_LEN
));
3253 ret
= LTTNG_ERR_EXCLUSION_NOMEM
;
3257 DBG("Receiving var len exclusion event list from client ...");
3258 exclusion
->count
= count
;
3259 ret
= lttcomm_recv_unix_sock(sock
, exclusion
->names
,
3260 count
* LTTNG_SYMBOL_NAME_LEN
);
3262 DBG("Nothing recv() from client var len data... continuing");
3265 ret
= LTTNG_ERR_EXCLUSION_INVAL
;
3270 /* Get filter expression from client. */
3271 if (cmd_ctx
->lsm
->u
.enable
.expression_len
> 0) {
3272 size_t expression_len
=
3273 cmd_ctx
->lsm
->u
.enable
.expression_len
;
3275 if (expression_len
> LTTNG_FILTER_MAX_LEN
) {
3276 ret
= LTTNG_ERR_FILTER_INVAL
;
3281 filter_expression
= zmalloc(expression_len
);
3282 if (!filter_expression
) {
3284 ret
= LTTNG_ERR_FILTER_NOMEM
;
3288 /* Receive var. len. data */
3289 DBG("Receiving var len filter's expression from client ...");
3290 ret
= lttcomm_recv_unix_sock(sock
, filter_expression
,
3293 DBG("Nothing recv() from client car len data... continuing");
3295 free(filter_expression
);
3297 ret
= LTTNG_ERR_FILTER_INVAL
;
3302 /* Handle filter and get bytecode from client. */
3303 if (cmd_ctx
->lsm
->u
.enable
.bytecode_len
> 0) {
3304 size_t bytecode_len
= cmd_ctx
->lsm
->u
.enable
.bytecode_len
;
3306 if (bytecode_len
> LTTNG_FILTER_MAX_LEN
) {
3307 ret
= LTTNG_ERR_FILTER_INVAL
;
3308 free(filter_expression
);
3313 bytecode
= zmalloc(bytecode_len
);
3315 free(filter_expression
);
3317 ret
= LTTNG_ERR_FILTER_NOMEM
;
3321 /* Receive var. len. data */
3322 DBG("Receiving var len filter's bytecode from client ...");
3323 ret
= lttcomm_recv_unix_sock(sock
, bytecode
, bytecode_len
);
3325 DBG("Nothing recv() from client car len data... continuing");
3327 free(filter_expression
);
3330 ret
= LTTNG_ERR_FILTER_INVAL
;
3334 if ((bytecode
->len
+ sizeof(*bytecode
)) != bytecode_len
) {
3335 free(filter_expression
);
3338 ret
= LTTNG_ERR_FILTER_INVAL
;
3343 ret
= cmd_enable_event(cmd_ctx
->session
, &cmd_ctx
->lsm
->domain
,
3344 cmd_ctx
->lsm
->u
.enable
.channel_name
,
3345 &cmd_ctx
->lsm
->u
.enable
.event
,
3346 filter_expression
, bytecode
, exclusion
,
3347 kernel_poll_pipe
[1]);
3350 case LTTNG_LIST_TRACEPOINTS
:
3352 struct lttng_event
*events
;
3355 session_lock_list();
3356 nb_events
= cmd_list_tracepoints(cmd_ctx
->lsm
->domain
.type
, &events
);
3357 session_unlock_list();
3358 if (nb_events
< 0) {
3359 /* Return value is a negative lttng_error_code. */
3365 * Setup lttng message with payload size set to the event list size in
3366 * bytes and then copy list into the llm payload.
3368 ret
= setup_lttng_msg(cmd_ctx
, sizeof(struct lttng_event
) * nb_events
);
3374 /* Copy event list into message payload */
3375 memcpy(cmd_ctx
->llm
->payload
, events
,
3376 sizeof(struct lttng_event
) * nb_events
);
3383 case LTTNG_LIST_TRACEPOINT_FIELDS
:
3385 struct lttng_event_field
*fields
;
3388 session_lock_list();
3389 nb_fields
= cmd_list_tracepoint_fields(cmd_ctx
->lsm
->domain
.type
,
3391 session_unlock_list();
3392 if (nb_fields
< 0) {
3393 /* Return value is a negative lttng_error_code. */
3399 * Setup lttng message with payload size set to the event list size in
3400 * bytes and then copy list into the llm payload.
3402 ret
= setup_lttng_msg(cmd_ctx
,
3403 sizeof(struct lttng_event_field
) * nb_fields
);
3409 /* Copy event list into message payload */
3410 memcpy(cmd_ctx
->llm
->payload
, fields
,
3411 sizeof(struct lttng_event_field
) * nb_fields
);
3418 case LTTNG_LIST_SYSCALLS
:
3420 struct lttng_event
*events
;
3423 nb_events
= cmd_list_syscalls(&events
);
3424 if (nb_events
< 0) {
3425 /* Return value is a negative lttng_error_code. */
3431 * Setup lttng message with payload size set to the event list size in
3432 * bytes and then copy list into the llm payload.
3434 ret
= setup_lttng_msg(cmd_ctx
, sizeof(struct lttng_event
) * nb_events
);
3440 /* Copy event list into message payload */
3441 memcpy(cmd_ctx
->llm
->payload
, events
,
3442 sizeof(struct lttng_event
) * nb_events
);
3449 case LTTNG_SET_CONSUMER_URI
:
3452 struct lttng_uri
*uris
;
3454 nb_uri
= cmd_ctx
->lsm
->u
.uri
.size
;
3455 len
= nb_uri
* sizeof(struct lttng_uri
);
3458 ret
= LTTNG_ERR_INVALID
;
3462 uris
= zmalloc(len
);
3464 ret
= LTTNG_ERR_FATAL
;
3468 /* Receive variable len data */
3469 DBG("Receiving %zu URI(s) from client ...", nb_uri
);
3470 ret
= lttcomm_recv_unix_sock(sock
, uris
, len
);
3472 DBG("No URIs received from client... continuing");
3474 ret
= LTTNG_ERR_SESSION_FAIL
;
3479 ret
= cmd_set_consumer_uri(cmd_ctx
->session
, nb_uri
, uris
);
3481 if (ret
!= LTTNG_OK
) {
3488 case LTTNG_START_TRACE
:
3490 ret
= cmd_start_trace(cmd_ctx
->session
);
3493 case LTTNG_STOP_TRACE
:
3495 ret
= cmd_stop_trace(cmd_ctx
->session
);
3498 case LTTNG_CREATE_SESSION
:
3501 struct lttng_uri
*uris
= NULL
;
3503 nb_uri
= cmd_ctx
->lsm
->u
.uri
.size
;
3504 len
= nb_uri
* sizeof(struct lttng_uri
);
3507 uris
= zmalloc(len
);
3509 ret
= LTTNG_ERR_FATAL
;
3513 /* Receive variable len data */
3514 DBG("Waiting for %zu URIs from client ...", nb_uri
);
3515 ret
= lttcomm_recv_unix_sock(sock
, uris
, len
);
3517 DBG("No URIs received from client... continuing");
3519 ret
= LTTNG_ERR_SESSION_FAIL
;
3524 if (nb_uri
== 1 && uris
[0].dtype
!= LTTNG_DST_PATH
) {
3525 DBG("Creating session with ONE network URI is a bad call");
3526 ret
= LTTNG_ERR_SESSION_FAIL
;
3532 ret
= cmd_create_session_uri(cmd_ctx
->lsm
->session
.name
, uris
, nb_uri
,
3533 &cmd_ctx
->creds
, 0);
3539 case LTTNG_DESTROY_SESSION
:
3541 ret
= cmd_destroy_session(cmd_ctx
->session
, kernel_poll_pipe
[1]);
3543 /* Set session to NULL so we do not unlock it after free. */
3544 cmd_ctx
->session
= NULL
;
3547 case LTTNG_LIST_DOMAINS
:
3550 struct lttng_domain
*domains
= NULL
;
3552 nb_dom
= cmd_list_domains(cmd_ctx
->session
, &domains
);
3554 /* Return value is a negative lttng_error_code. */
3559 ret
= setup_lttng_msg(cmd_ctx
, nb_dom
* sizeof(struct lttng_domain
));
3565 /* Copy event list into message payload */
3566 memcpy(cmd_ctx
->llm
->payload
, domains
,
3567 nb_dom
* sizeof(struct lttng_domain
));
3574 case LTTNG_LIST_CHANNELS
:
3577 struct lttng_channel
*channels
= NULL
;
3579 nb_chan
= cmd_list_channels(cmd_ctx
->lsm
->domain
.type
,
3580 cmd_ctx
->session
, &channels
);
3582 /* Return value is a negative lttng_error_code. */
3587 ret
= setup_lttng_msg(cmd_ctx
, nb_chan
* sizeof(struct lttng_channel
));
3593 /* Copy event list into message payload */
3594 memcpy(cmd_ctx
->llm
->payload
, channels
,
3595 nb_chan
* sizeof(struct lttng_channel
));
3602 case LTTNG_LIST_EVENTS
:
3605 struct lttng_event
*events
= NULL
;
3607 nb_event
= cmd_list_events(cmd_ctx
->lsm
->domain
.type
, cmd_ctx
->session
,
3608 cmd_ctx
->lsm
->u
.list
.channel_name
, &events
);
3610 /* Return value is a negative lttng_error_code. */
3615 ret
= setup_lttng_msg(cmd_ctx
, nb_event
* sizeof(struct lttng_event
));
3621 /* Copy event list into message payload */
3622 memcpy(cmd_ctx
->llm
->payload
, events
,
3623 nb_event
* sizeof(struct lttng_event
));
3630 case LTTNG_LIST_SESSIONS
:
3632 unsigned int nr_sessions
;
3634 session_lock_list();
3635 nr_sessions
= lttng_sessions_count(
3636 LTTNG_SOCK_GET_UID_CRED(&cmd_ctx
->creds
),
3637 LTTNG_SOCK_GET_GID_CRED(&cmd_ctx
->creds
));
3639 ret
= setup_lttng_msg(cmd_ctx
, sizeof(struct lttng_session
) * nr_sessions
);
3641 session_unlock_list();
3645 /* Filled the session array */
3646 cmd_list_lttng_sessions((struct lttng_session
*)(cmd_ctx
->llm
->payload
),
3647 LTTNG_SOCK_GET_UID_CRED(&cmd_ctx
->creds
),
3648 LTTNG_SOCK_GET_GID_CRED(&cmd_ctx
->creds
));
3650 session_unlock_list();
3655 case LTTNG_CALIBRATE
:
3657 ret
= cmd_calibrate(cmd_ctx
->lsm
->domain
.type
,
3658 &cmd_ctx
->lsm
->u
.calibrate
);
3661 case LTTNG_REGISTER_CONSUMER
:
3663 struct consumer_data
*cdata
;
3665 switch (cmd_ctx
->lsm
->domain
.type
) {
3666 case LTTNG_DOMAIN_KERNEL
:
3667 cdata
= &kconsumer_data
;
3670 ret
= LTTNG_ERR_UND
;
3674 ret
= cmd_register_consumer(cmd_ctx
->session
, cmd_ctx
->lsm
->domain
.type
,
3675 cmd_ctx
->lsm
->u
.reg
.path
, cdata
);
3678 case LTTNG_DATA_PENDING
:
3680 ret
= cmd_data_pending(cmd_ctx
->session
);
3683 case LTTNG_SNAPSHOT_ADD_OUTPUT
:
3685 struct lttcomm_lttng_output_id reply
;
3687 ret
= cmd_snapshot_add_output(cmd_ctx
->session
,
3688 &cmd_ctx
->lsm
->u
.snapshot_output
.output
, &reply
.id
);
3689 if (ret
!= LTTNG_OK
) {
3693 ret
= setup_lttng_msg(cmd_ctx
, sizeof(reply
));
3698 /* Copy output list into message payload */
3699 memcpy(cmd_ctx
->llm
->payload
, &reply
, sizeof(reply
));
3703 case LTTNG_SNAPSHOT_DEL_OUTPUT
:
3705 ret
= cmd_snapshot_del_output(cmd_ctx
->session
,
3706 &cmd_ctx
->lsm
->u
.snapshot_output
.output
);
3709 case LTTNG_SNAPSHOT_LIST_OUTPUT
:
3712 struct lttng_snapshot_output
*outputs
= NULL
;
3714 nb_output
= cmd_snapshot_list_outputs(cmd_ctx
->session
, &outputs
);
3715 if (nb_output
< 0) {
3720 ret
= setup_lttng_msg(cmd_ctx
,
3721 nb_output
* sizeof(struct lttng_snapshot_output
));
3728 /* Copy output list into message payload */
3729 memcpy(cmd_ctx
->llm
->payload
, outputs
,
3730 nb_output
* sizeof(struct lttng_snapshot_output
));
3737 case LTTNG_SNAPSHOT_RECORD
:
3739 ret
= cmd_snapshot_record(cmd_ctx
->session
,
3740 &cmd_ctx
->lsm
->u
.snapshot_record
.output
,
3741 cmd_ctx
->lsm
->u
.snapshot_record
.wait
);
3744 case LTTNG_CREATE_SESSION_SNAPSHOT
:
3747 struct lttng_uri
*uris
= NULL
;
3749 nb_uri
= cmd_ctx
->lsm
->u
.uri
.size
;
3750 len
= nb_uri
* sizeof(struct lttng_uri
);
3753 uris
= zmalloc(len
);
3755 ret
= LTTNG_ERR_FATAL
;
3759 /* Receive variable len data */
3760 DBG("Waiting for %zu URIs from client ...", nb_uri
);
3761 ret
= lttcomm_recv_unix_sock(sock
, uris
, len
);
3763 DBG("No URIs received from client... continuing");
3765 ret
= LTTNG_ERR_SESSION_FAIL
;
3770 if (nb_uri
== 1 && uris
[0].dtype
!= LTTNG_DST_PATH
) {
3771 DBG("Creating session with ONE network URI is a bad call");
3772 ret
= LTTNG_ERR_SESSION_FAIL
;
3778 ret
= cmd_create_session_snapshot(cmd_ctx
->lsm
->session
.name
, uris
,
3779 nb_uri
, &cmd_ctx
->creds
);
3783 case LTTNG_CREATE_SESSION_LIVE
:
3786 struct lttng_uri
*uris
= NULL
;
3788 nb_uri
= cmd_ctx
->lsm
->u
.uri
.size
;
3789 len
= nb_uri
* sizeof(struct lttng_uri
);
3792 uris
= zmalloc(len
);
3794 ret
= LTTNG_ERR_FATAL
;
3798 /* Receive variable len data */
3799 DBG("Waiting for %zu URIs from client ...", nb_uri
);
3800 ret
= lttcomm_recv_unix_sock(sock
, uris
, len
);
3802 DBG("No URIs received from client... continuing");
3804 ret
= LTTNG_ERR_SESSION_FAIL
;
3809 if (nb_uri
== 1 && uris
[0].dtype
!= LTTNG_DST_PATH
) {
3810 DBG("Creating session with ONE network URI is a bad call");
3811 ret
= LTTNG_ERR_SESSION_FAIL
;
3817 ret
= cmd_create_session_uri(cmd_ctx
->lsm
->session
.name
, uris
,
3818 nb_uri
, &cmd_ctx
->creds
, cmd_ctx
->lsm
->u
.session_live
.timer_interval
);
3822 case LTTNG_SAVE_SESSION
:
3824 ret
= cmd_save_sessions(&cmd_ctx
->lsm
->u
.save_session
.attr
,
3829 ret
= LTTNG_ERR_UND
;
3834 if (cmd_ctx
->llm
== NULL
) {
3835 DBG("Missing llm structure. Allocating one.");
3836 if (setup_lttng_msg(cmd_ctx
, 0) < 0) {
3840 /* Set return code */
3841 cmd_ctx
->llm
->ret_code
= ret
;
3843 if (cmd_ctx
->session
) {
3844 session_unlock(cmd_ctx
->session
);
3846 if (need_tracing_session
) {
3847 session_unlock_list();
3854 * Thread managing health check socket.
3856 static void *thread_manage_health(void *data
)
3858 int sock
= -1, new_sock
= -1, ret
, i
, pollfd
, err
= -1;
3859 uint32_t revents
, nb_fd
;
3860 struct lttng_poll_event events
;
3861 struct health_comm_msg msg
;
3862 struct health_comm_reply reply
;
3864 DBG("[thread] Manage health check started");
3866 rcu_register_thread();
3868 /* We might hit an error path before this is created. */
3869 lttng_poll_init(&events
);
3871 /* Create unix socket */
3872 sock
= lttcomm_create_unix_sock(health_unix_sock_path
);
3874 ERR("Unable to create health check Unix socket");
3880 /* lttng health client socket path permissions */
3881 ret
= chown(health_unix_sock_path
, 0,
3882 utils_get_group_id(tracing_group_name
));
3884 ERR("Unable to set group on %s", health_unix_sock_path
);
3890 ret
= chmod(health_unix_sock_path
,
3891 S_IRUSR
| S_IWUSR
| S_IRGRP
| S_IWGRP
);
3893 ERR("Unable to set permissions on %s", health_unix_sock_path
);
3901 * Set the CLOEXEC flag. Return code is useless because either way, the
3904 (void) utils_set_fd_cloexec(sock
);
3906 ret
= lttcomm_listen_unix_sock(sock
);
3912 * Pass 2 as size here for the thread quit pipe and client_sock. Nothing
3913 * more will be added to this poll set.
3915 ret
= sessiond_set_thread_pollset(&events
, 2);
3920 /* Add the application registration socket */
3921 ret
= lttng_poll_add(&events
, sock
, LPOLLIN
| LPOLLPRI
);
3926 sessiond_notify_ready();
3929 DBG("Health check ready");
3931 /* Inifinite blocking call, waiting for transmission */
3933 ret
= lttng_poll_wait(&events
, -1);
3936 * Restart interrupted system call.
3938 if (errno
== EINTR
) {
3946 for (i
= 0; i
< nb_fd
; i
++) {
3947 /* Fetch once the poll data */
3948 revents
= LTTNG_POLL_GETEV(&events
, i
);
3949 pollfd
= LTTNG_POLL_GETFD(&events
, i
);
3952 /* No activity for this FD (poll implementation). */
3956 /* Thread quit pipe has been closed. Killing thread. */
3957 ret
= sessiond_check_thread_quit_pipe(pollfd
, revents
);
3963 /* Event on the registration socket */
3964 if (pollfd
== sock
) {
3965 if (revents
& (LPOLLERR
| LPOLLHUP
| LPOLLRDHUP
)) {
3966 ERR("Health socket poll error");
3972 new_sock
= lttcomm_accept_unix_sock(sock
);
3978 * Set the CLOEXEC flag. Return code is useless because either way, the
3981 (void) utils_set_fd_cloexec(new_sock
);
3983 DBG("Receiving data from client for health...");
3984 ret
= lttcomm_recv_unix_sock(new_sock
, (void *)&msg
, sizeof(msg
));
3986 DBG("Nothing recv() from client... continuing");
3987 ret
= close(new_sock
);
3995 rcu_thread_online();
3997 memset(&reply
, 0, sizeof(reply
));
3998 for (i
= 0; i
< NR_HEALTH_SESSIOND_TYPES
; i
++) {
4000 * health_check_state returns 0 if health is
4003 if (!health_check_state(health_sessiond
, i
)) {
4004 reply
.ret_code
|= 1ULL << i
;
4008 DBG2("Health check return value %" PRIx64
, reply
.ret_code
);
4010 ret
= send_unix_sock(new_sock
, (void *) &reply
, sizeof(reply
));
4012 ERR("Failed to send health data back to client");
4015 /* End of transmission */
4016 ret
= close(new_sock
);
4026 ERR("Health error occurred in %s", __func__
);
4028 DBG("Health check thread dying");
4029 unlink(health_unix_sock_path
);
4037 lttng_poll_clean(&events
);
4039 rcu_unregister_thread();
4044 * This thread manage all clients request using the unix client socket for
4047 static void *thread_manage_clients(void *data
)
4049 int sock
= -1, ret
, i
, pollfd
, err
= -1;
4051 uint32_t revents
, nb_fd
;
4052 struct command_ctx
*cmd_ctx
= NULL
;
4053 struct lttng_poll_event events
;
4055 DBG("[thread] Manage client started");
4057 rcu_register_thread();
4059 health_register(health_sessiond
, HEALTH_SESSIOND_TYPE_CMD
);
4061 health_code_update();
4063 ret
= lttcomm_listen_unix_sock(client_sock
);
4069 * Pass 2 as size here for the thread quit pipe and client_sock. Nothing
4070 * more will be added to this poll set.
4072 ret
= sessiond_set_thread_pollset(&events
, 2);
4074 goto error_create_poll
;
4077 /* Add the application registration socket */
4078 ret
= lttng_poll_add(&events
, client_sock
, LPOLLIN
| LPOLLPRI
);
4083 sessiond_notify_ready();
4084 ret
= sem_post(&load_info
->message_thread_ready
);
4086 PERROR("sem_post message_thread_ready");
4090 /* This testpoint is after we signal readiness to the parent. */
4091 if (testpoint(sessiond_thread_manage_clients
)) {
4095 if (testpoint(sessiond_thread_manage_clients_before_loop
)) {
4099 health_code_update();
4102 DBG("Accepting client command ...");
4104 /* Inifinite blocking call, waiting for transmission */
4106 health_poll_entry();
4107 ret
= lttng_poll_wait(&events
, -1);
4111 * Restart interrupted system call.
4113 if (errno
== EINTR
) {
4121 for (i
= 0; i
< nb_fd
; i
++) {
4122 /* Fetch once the poll data */
4123 revents
= LTTNG_POLL_GETEV(&events
, i
);
4124 pollfd
= LTTNG_POLL_GETFD(&events
, i
);
4126 health_code_update();
4129 /* No activity for this FD (poll implementation). */
4133 /* Thread quit pipe has been closed. Killing thread. */
4134 ret
= sessiond_check_thread_quit_pipe(pollfd
, revents
);
4140 /* Event on the registration socket */
4141 if (pollfd
== client_sock
) {
4142 if (revents
& (LPOLLERR
| LPOLLHUP
| LPOLLRDHUP
)) {
4143 ERR("Client socket poll error");
4149 DBG("Wait for client response");
4151 health_code_update();
4153 sock
= lttcomm_accept_unix_sock(client_sock
);
4159 * Set the CLOEXEC flag. Return code is useless because either way, the
4162 (void) utils_set_fd_cloexec(sock
);
4164 /* Set socket option for credentials retrieval */
4165 ret
= lttcomm_setsockopt_creds_unix_sock(sock
);
4170 /* Allocate context command to process the client request */
4171 cmd_ctx
= zmalloc(sizeof(struct command_ctx
));
4172 if (cmd_ctx
== NULL
) {
4173 PERROR("zmalloc cmd_ctx");
4177 /* Allocate data buffer for reception */
4178 cmd_ctx
->lsm
= zmalloc(sizeof(struct lttcomm_session_msg
));
4179 if (cmd_ctx
->lsm
== NULL
) {
4180 PERROR("zmalloc cmd_ctx->lsm");
4184 cmd_ctx
->llm
= NULL
;
4185 cmd_ctx
->session
= NULL
;
4187 health_code_update();
4190 * Data is received from the lttng client. The struct
4191 * lttcomm_session_msg (lsm) contains the command and data request of
4194 DBG("Receiving data from client ...");
4195 ret
= lttcomm_recv_creds_unix_sock(sock
, cmd_ctx
->lsm
,
4196 sizeof(struct lttcomm_session_msg
), &cmd_ctx
->creds
);
4198 DBG("Nothing recv() from client... continuing");
4204 clean_command_ctx(&cmd_ctx
);
4208 health_code_update();
4210 // TODO: Validate cmd_ctx including sanity check for
4211 // security purpose.
4213 rcu_thread_online();
4215 * This function dispatch the work to the kernel or userspace tracer
4216 * libs and fill the lttcomm_lttng_msg data structure of all the needed
4217 * informations for the client. The command context struct contains
4218 * everything this function may needs.
4220 ret
= process_client_msg(cmd_ctx
, sock
, &sock_error
);
4221 rcu_thread_offline();
4229 * TODO: Inform client somehow of the fatal error. At
4230 * this point, ret < 0 means that a zmalloc failed
4231 * (ENOMEM). Error detected but still accept
4232 * command, unless a socket error has been
4235 clean_command_ctx(&cmd_ctx
);
4239 health_code_update();
4241 DBG("Sending response (size: %d, retcode: %s)",
4242 cmd_ctx
->lttng_msg_size
,
4243 lttng_strerror(-cmd_ctx
->llm
->ret_code
));
4244 ret
= send_unix_sock(sock
, cmd_ctx
->llm
, cmd_ctx
->lttng_msg_size
);
4246 ERR("Failed to send data back to client");
4249 /* End of transmission */
4256 clean_command_ctx(&cmd_ctx
);
4258 health_code_update();
4270 lttng_poll_clean(&events
);
4271 clean_command_ctx(&cmd_ctx
);
4275 unlink(client_unix_sock_path
);
4276 if (client_sock
>= 0) {
4277 ret
= close(client_sock
);
4285 ERR("Health error occurred in %s", __func__
);
4288 health_unregister(health_sessiond
);
4290 DBG("Client thread dying");
4292 rcu_unregister_thread();
4295 * Since we are creating the consumer threads, we own them, so we need
4296 * to join them before our thread exits.
4298 ret
= join_consumer_thread(&kconsumer_data
);
4301 PERROR("join_consumer");
4304 ret
= join_consumer_thread(&ustconsumer32_data
);
4307 PERROR("join_consumer ust32");
4310 ret
= join_consumer_thread(&ustconsumer64_data
);
4313 PERROR("join_consumer ust64");
4320 * usage function on stderr
4322 static void usage(void)
4324 fprintf(stderr
, "Usage: %s OPTIONS\n\nOptions:\n", progname
);
4325 fprintf(stderr
, " -h, --help Display this usage.\n");
4326 fprintf(stderr
, " -c, --client-sock PATH Specify path for the client unix socket\n");
4327 fprintf(stderr
, " -a, --apps-sock PATH Specify path for apps unix socket\n");
4328 fprintf(stderr
, " --kconsumerd-err-sock PATH Specify path for the kernel consumer error socket\n");
4329 fprintf(stderr
, " --kconsumerd-cmd-sock PATH Specify path for the kernel consumer command socket\n");
4330 fprintf(stderr
, " --ustconsumerd32-err-sock PATH Specify path for the 32-bit UST consumer error socket\n");
4331 fprintf(stderr
, " --ustconsumerd64-err-sock PATH Specify path for the 64-bit UST consumer error socket\n");
4332 fprintf(stderr
, " --ustconsumerd32-cmd-sock PATH Specify path for the 32-bit UST consumer command socket\n");
4333 fprintf(stderr
, " --ustconsumerd64-cmd-sock PATH Specify path for the 64-bit UST consumer command socket\n");
4334 fprintf(stderr
, " --consumerd32-path PATH Specify path for the 32-bit UST consumer daemon binary\n");
4335 fprintf(stderr
, " --consumerd32-libdir PATH Specify path for the 32-bit UST consumer daemon libraries\n");
4336 fprintf(stderr
, " --consumerd64-path PATH Specify path for the 64-bit UST consumer daemon binary\n");
4337 fprintf(stderr
, " --consumerd64-libdir PATH Specify path for the 64-bit UST consumer daemon libraries\n");
4338 fprintf(stderr
, " -d, --daemonize Start as a daemon.\n");
4339 fprintf(stderr
, " -b, --background Start as a daemon, keeping console open.\n");
4340 fprintf(stderr
, " -g, --group NAME Specify the tracing group name. (default: tracing)\n");
4341 fprintf(stderr
, " -V, --version Show version number.\n");
4342 fprintf(stderr
, " -S, --sig-parent Send SIGUSR1 to parent pid to notify readiness.\n");
4343 fprintf(stderr
, " -q, --quiet No output at all.\n");
4344 fprintf(stderr
, " -v, --verbose Verbose mode. Activate DBG() macro.\n");
4345 fprintf(stderr
, " -p, --pidfile FILE Write a pid to FILE name overriding the default value.\n");
4346 fprintf(stderr
, " --verbose-consumer Verbose mode for consumer. Activate DBG() macro.\n");
4347 fprintf(stderr
, " --no-kernel Disable kernel tracer\n");
4348 fprintf(stderr
, " --agent-tcp-port Agent registration TCP port\n");
4349 fprintf(stderr
, " -f --config PATH Load daemon configuration file\n");
4350 fprintf(stderr
, " -l --load PATH Load session configuration\n");
4351 fprintf(stderr
, " --kmod-probes Specify kernel module probes to load\n");
4352 fprintf(stderr
, " --extra-kmod-probes Specify extra kernel module probes to load\n");
4356 * Take an option from the getopt output and set it in the right variable to be
4359 * Return 0 on success else a negative value.
4361 static int set_option(int opt
, const char *arg
, const char *optname
)
4365 if (arg
&& arg
[0] == '\0') {
4367 * This only happens if the value is read from daemon config
4368 * file. This means the option requires an argument and the
4369 * configuration file contains a line such as:
4378 fprintf(stderr
, "option %s", optname
);
4380 fprintf(stderr
, " with arg %s\n", arg
);
4384 snprintf(client_unix_sock_path
, PATH_MAX
, "%s", arg
);
4387 snprintf(apps_unix_sock_path
, PATH_MAX
, "%s", arg
);
4397 * If the override option is set, the pointer points to a
4398 * *non* const thus freeing it even though the variable type is
4401 if (tracing_group_name_override
) {
4402 free((void *) tracing_group_name
);
4404 tracing_group_name
= strdup(arg
);
4405 if (!tracing_group_name
) {
4409 tracing_group_name_override
= 1;
4415 fprintf(stdout
, "%s\n", VERSION
);
4421 snprintf(kconsumer_data
.err_unix_sock_path
, PATH_MAX
, "%s", arg
);
4424 snprintf(kconsumer_data
.cmd_unix_sock_path
, PATH_MAX
, "%s", arg
);
4427 snprintf(ustconsumer64_data
.err_unix_sock_path
, PATH_MAX
, "%s", arg
);
4430 snprintf(ustconsumer64_data
.cmd_unix_sock_path
, PATH_MAX
, "%s", arg
);
4433 snprintf(ustconsumer32_data
.err_unix_sock_path
, PATH_MAX
, "%s", arg
);
4436 snprintf(ustconsumer32_data
.cmd_unix_sock_path
, PATH_MAX
, "%s", arg
);
4442 lttng_opt_quiet
= 1;
4445 /* Verbose level can increase using multiple -v */
4447 /* Value obtained from config file */
4448 lttng_opt_verbose
= config_parse_value(arg
);
4450 /* -v used on command line */
4451 lttng_opt_verbose
++;
4453 /* Clamp value to [0, 3] */
4454 lttng_opt_verbose
= lttng_opt_verbose
< 0 ? 0 :
4455 (lttng_opt_verbose
<= 3 ? lttng_opt_verbose
: 3);
4459 opt_verbose_consumer
= config_parse_value(arg
);
4461 opt_verbose_consumer
+= 1;
4465 if (consumerd32_bin_override
) {
4466 free((void *) consumerd32_bin
);
4468 consumerd32_bin
= strdup(arg
);
4469 if (!consumerd32_bin
) {
4473 consumerd32_bin_override
= 1;
4476 if (consumerd32_libdir_override
) {
4477 free((void *) consumerd32_libdir
);
4479 consumerd32_libdir
= strdup(arg
);
4480 if (!consumerd32_libdir
) {
4484 consumerd32_libdir_override
= 1;
4487 if (consumerd64_bin_override
) {
4488 free((void *) consumerd64_bin
);
4490 consumerd64_bin
= strdup(arg
);
4491 if (!consumerd64_bin
) {
4495 consumerd64_bin_override
= 1;
4498 if (consumerd64_libdir_override
) {
4499 free((void *) consumerd64_libdir
);
4501 consumerd64_libdir
= strdup(arg
);
4502 if (!consumerd64_libdir
) {
4506 consumerd64_libdir_override
= 1;
4510 opt_pidfile
= strdup(arg
);
4516 case 'J': /* Agent TCP port. */
4521 v
= strtoul(arg
, NULL
, 0);
4522 if (errno
!= 0 || !isdigit(arg
[0])) {
4523 ERR("Wrong value in --agent-tcp-port parameter: %s", arg
);
4526 if (v
== 0 || v
>= 65535) {
4527 ERR("Port overflow in --agent-tcp-port parameter: %s", arg
);
4530 agent_tcp_port
= (uint32_t) v
;
4531 DBG3("Agent TCP port set to non default: %u", agent_tcp_port
);
4535 free(opt_load_session_path
);
4536 opt_load_session_path
= strdup(arg
);
4537 if (!opt_load_session_path
) {
4542 case 'P': /* probe modules list */
4543 free(kmod_probes_list
);
4544 kmod_probes_list
= strdup(arg
);
4545 if (!kmod_probes_list
) {
4551 free(kmod_extra_probes_list
);
4552 kmod_extra_probes_list
= strdup(arg
);
4553 if (!kmod_extra_probes_list
) {
4559 /* This is handled in set_options() thus silent break. */
4562 /* Unknown option or other error.
4563 * Error is printed by getopt, just return */
4568 if (ret
== -EINVAL
) {
4569 const char *opt_name
= "unknown";
4572 for (i
= 0; i
< sizeof(long_options
) / sizeof(struct option
);
4574 if (opt
== long_options
[i
].val
) {
4575 opt_name
= long_options
[i
].name
;
4580 WARN("Invalid argument provided for option \"%s\", using default value.",
4588 * config_entry_handler_cb used to handle options read from a config file.
4589 * See config_entry_handler_cb comment in common/config/config.h for the
4590 * return value conventions.
4592 static int config_entry_handler(const struct config_entry
*entry
, void *unused
)
4596 if (!entry
|| !entry
->name
|| !entry
->value
) {
4601 /* Check if the option is to be ignored */
4602 for (i
= 0; i
< sizeof(config_ignore_options
) / sizeof(char *); i
++) {
4603 if (!strcmp(entry
->name
, config_ignore_options
[i
])) {
4608 for (i
= 0; i
< (sizeof(long_options
) / sizeof(struct option
)) - 1;
4611 /* Ignore if not fully matched. */
4612 if (strcmp(entry
->name
, long_options
[i
].name
)) {
4617 * If the option takes no argument on the command line, we have to
4618 * check if the value is "true". We support non-zero numeric values,
4621 if (!long_options
[i
].has_arg
) {
4622 ret
= config_parse_value(entry
->value
);
4625 WARN("Invalid configuration value \"%s\" for option %s",
4626 entry
->value
, entry
->name
);
4628 /* False, skip boolean config option. */
4633 ret
= set_option(long_options
[i
].val
, entry
->value
, entry
->name
);
4637 WARN("Unrecognized option \"%s\" in daemon configuration file.", entry
->name
);
4644 * daemon configuration loading and argument parsing
4646 static int set_options(int argc
, char **argv
)
4648 int ret
= 0, c
= 0, option_index
= 0;
4649 int orig_optopt
= optopt
, orig_optind
= optind
;
4651 const char *config_path
= NULL
;
4653 optstring
= utils_generate_optstring(long_options
,
4654 sizeof(long_options
) / sizeof(struct option
));
4660 /* Check for the --config option */
4661 while ((c
= getopt_long(argc
, argv
, optstring
, long_options
,
4662 &option_index
)) != -1) {
4666 } else if (c
!= 'f') {
4667 /* if not equal to --config option. */
4671 config_path
= utils_expand_path(optarg
);
4673 ERR("Failed to resolve path: %s", optarg
);
4677 ret
= config_get_section_entries(config_path
, config_section_name
,
4678 config_entry_handler
, NULL
);
4681 ERR("Invalid configuration option at line %i", ret
);
4687 /* Reset getopt's global state */
4688 optopt
= orig_optopt
;
4689 optind
= orig_optind
;
4691 c
= getopt_long(argc
, argv
, optstring
, long_options
, &option_index
);
4696 ret
= set_option(c
, optarg
, long_options
[option_index
].name
);
4708 * Creates the two needed socket by the daemon.
4709 * apps_sock - The communication socket for all UST apps.
4710 * client_sock - The communication of the cli tool (lttng).
4712 static int init_daemon_socket(void)
4717 old_umask
= umask(0);
4719 /* Create client tool unix socket */
4720 client_sock
= lttcomm_create_unix_sock(client_unix_sock_path
);
4721 if (client_sock
< 0) {
4722 ERR("Create unix sock failed: %s", client_unix_sock_path
);
4727 /* Set the cloexec flag */
4728 ret
= utils_set_fd_cloexec(client_sock
);
4730 ERR("Unable to set CLOEXEC flag to the client Unix socket (fd: %d). "
4731 "Continuing but note that the consumer daemon will have a "
4732 "reference to this socket on exec()", client_sock
);
4735 /* File permission MUST be 660 */
4736 ret
= chmod(client_unix_sock_path
, S_IRUSR
| S_IWUSR
| S_IRGRP
| S_IWGRP
);
4738 ERR("Set file permissions failed: %s", client_unix_sock_path
);
4743 /* Create the application unix socket */
4744 apps_sock
= lttcomm_create_unix_sock(apps_unix_sock_path
);
4745 if (apps_sock
< 0) {
4746 ERR("Create unix sock failed: %s", apps_unix_sock_path
);
4751 /* Set the cloexec flag */
4752 ret
= utils_set_fd_cloexec(apps_sock
);
4754 ERR("Unable to set CLOEXEC flag to the app Unix socket (fd: %d). "
4755 "Continuing but note that the consumer daemon will have a "
4756 "reference to this socket on exec()", apps_sock
);
4759 /* File permission MUST be 666 */
4760 ret
= chmod(apps_unix_sock_path
,
4761 S_IRUSR
| S_IWUSR
| S_IRGRP
| S_IWGRP
| S_IROTH
| S_IWOTH
);
4763 ERR("Set file permissions failed: %s", apps_unix_sock_path
);
4768 DBG3("Session daemon client socket %d and application socket %d created",
4769 client_sock
, apps_sock
);
4777 * Check if the global socket is available, and if a daemon is answering at the
4778 * other side. If yes, error is returned.
4780 static int check_existing_daemon(void)
4782 /* Is there anybody out there ? */
4783 if (lttng_session_daemon_alive()) {
4791 * Set the tracing group gid onto the client socket.
4793 * Race window between mkdir and chown is OK because we are going from more
4794 * permissive (root.root) to less permissive (root.tracing).
4796 static int set_permissions(char *rundir
)
4801 gid
= utils_get_group_id(tracing_group_name
);
4803 /* Set lttng run dir */
4804 ret
= chown(rundir
, 0, gid
);
4806 ERR("Unable to set group on %s", rundir
);
4811 * Ensure all applications and tracing group can search the run
4812 * dir. Allow everyone to read the directory, since it does not
4813 * buy us anything to hide its content.
4815 ret
= chmod(rundir
, S_IRWXU
| S_IRGRP
| S_IXGRP
| S_IROTH
| S_IXOTH
);
4817 ERR("Unable to set permissions on %s", rundir
);
4821 /* lttng client socket path */
4822 ret
= chown(client_unix_sock_path
, 0, gid
);
4824 ERR("Unable to set group on %s", client_unix_sock_path
);
4828 /* kconsumer error socket path */
4829 ret
= chown(kconsumer_data
.err_unix_sock_path
, 0, 0);
4831 ERR("Unable to set group on %s", kconsumer_data
.err_unix_sock_path
);
4835 /* 64-bit ustconsumer error socket path */
4836 ret
= chown(ustconsumer64_data
.err_unix_sock_path
, 0, 0);
4838 ERR("Unable to set group on %s", ustconsumer64_data
.err_unix_sock_path
);
4842 /* 32-bit ustconsumer compat32 error socket path */
4843 ret
= chown(ustconsumer32_data
.err_unix_sock_path
, 0, 0);
4845 ERR("Unable to set group on %s", ustconsumer32_data
.err_unix_sock_path
);
4849 DBG("All permissions are set");
4855 * Create the lttng run directory needed for all global sockets and pipe.
4857 static int create_lttng_rundir(const char *rundir
)
4861 DBG3("Creating LTTng run directory: %s", rundir
);
4863 ret
= mkdir(rundir
, S_IRWXU
);
4865 if (errno
!= EEXIST
) {
4866 ERR("Unable to create %s", rundir
);
4878 * Setup sockets and directory needed by the kconsumerd communication with the
4881 static int set_consumer_sockets(struct consumer_data
*consumer_data
,
4885 char path
[PATH_MAX
];
4887 switch (consumer_data
->type
) {
4888 case LTTNG_CONSUMER_KERNEL
:
4889 snprintf(path
, PATH_MAX
, DEFAULT_KCONSUMERD_PATH
, rundir
);
4891 case LTTNG_CONSUMER64_UST
:
4892 snprintf(path
, PATH_MAX
, DEFAULT_USTCONSUMERD64_PATH
, rundir
);
4894 case LTTNG_CONSUMER32_UST
:
4895 snprintf(path
, PATH_MAX
, DEFAULT_USTCONSUMERD32_PATH
, rundir
);
4898 ERR("Consumer type unknown");
4903 DBG2("Creating consumer directory: %s", path
);
4905 ret
= mkdir(path
, S_IRWXU
| S_IRGRP
| S_IXGRP
);
4907 if (errno
!= EEXIST
) {
4909 ERR("Failed to create %s", path
);
4915 ret
= chown(path
, 0, utils_get_group_id(tracing_group_name
));
4917 ERR("Unable to set group on %s", path
);
4923 /* Create the kconsumerd error unix socket */
4924 consumer_data
->err_sock
=
4925 lttcomm_create_unix_sock(consumer_data
->err_unix_sock_path
);
4926 if (consumer_data
->err_sock
< 0) {
4927 ERR("Create unix sock failed: %s", consumer_data
->err_unix_sock_path
);
4933 * Set the CLOEXEC flag. Return code is useless because either way, the
4936 ret
= utils_set_fd_cloexec(consumer_data
->err_sock
);
4938 PERROR("utils_set_fd_cloexec");
4939 /* continue anyway */
4942 /* File permission MUST be 660 */
4943 ret
= chmod(consumer_data
->err_unix_sock_path
,
4944 S_IRUSR
| S_IWUSR
| S_IRGRP
| S_IWGRP
);
4946 ERR("Set file permissions failed: %s", consumer_data
->err_unix_sock_path
);
4956 * Signal handler for the daemon
4958 * Simply stop all worker threads, leaving main() return gracefully after
4959 * joining all threads and calling cleanup().
4961 static void sighandler(int sig
)
4965 DBG("SIGPIPE caught");
4968 DBG("SIGINT caught");
4972 DBG("SIGTERM caught");
4976 CMM_STORE_SHARED(recv_child_signal
, 1);
4984 * Setup signal handler for :
4985 * SIGINT, SIGTERM, SIGPIPE
4987 static int set_signal_handler(void)
4990 struct sigaction sa
;
4993 if ((ret
= sigemptyset(&sigset
)) < 0) {
4994 PERROR("sigemptyset");
4998 sa
.sa_handler
= sighandler
;
4999 sa
.sa_mask
= sigset
;
5001 if ((ret
= sigaction(SIGTERM
, &sa
, NULL
)) < 0) {
5002 PERROR("sigaction");
5006 if ((ret
= sigaction(SIGINT
, &sa
, NULL
)) < 0) {
5007 PERROR("sigaction");
5011 if ((ret
= sigaction(SIGPIPE
, &sa
, NULL
)) < 0) {
5012 PERROR("sigaction");
5016 if ((ret
= sigaction(SIGUSR1
, &sa
, NULL
)) < 0) {
5017 PERROR("sigaction");
5021 DBG("Signal handler set for SIGTERM, SIGUSR1, SIGPIPE and SIGINT");
5027 * Set open files limit to unlimited. This daemon can open a large number of
5028 * file descriptors in order to consumer multiple kernel traces.
5030 static void set_ulimit(void)
5035 /* The kernel does not allowed an infinite limit for open files */
5036 lim
.rlim_cur
= 65535;
5037 lim
.rlim_max
= 65535;
5039 ret
= setrlimit(RLIMIT_NOFILE
, &lim
);
5041 PERROR("failed to set open files limit");
5046 * Write pidfile using the rundir and opt_pidfile.
5048 static int write_pidfile(void)
5051 char pidfile_path
[PATH_MAX
];
5056 strncpy(pidfile_path
, opt_pidfile
, sizeof(pidfile_path
));
5058 /* Build pidfile path from rundir and opt_pidfile. */
5059 ret
= snprintf(pidfile_path
, sizeof(pidfile_path
), "%s/"
5060 DEFAULT_LTTNG_SESSIOND_PIDFILE
, rundir
);
5062 PERROR("snprintf pidfile path");
5068 * Create pid file in rundir.
5070 ret
= utils_create_pid_file(getpid(), pidfile_path
);
5076 * Create lockfile using the rundir and return its fd.
5078 static int create_lockfile(void)
5081 char lockfile_path
[PATH_MAX
];
5083 ret
= generate_lock_file_path(lockfile_path
, sizeof(lockfile_path
));
5088 ret
= utils_create_lock_file(lockfile_path
);
5094 * Write agent TCP port using the rundir.
5096 static int write_agent_port(void)
5099 char path
[PATH_MAX
];
5103 ret
= snprintf(path
, sizeof(path
), "%s/"
5104 DEFAULT_LTTNG_SESSIOND_AGENTPORT_FILE
, rundir
);
5106 PERROR("snprintf agent port path");
5111 * Create TCP agent port file in rundir.
5113 ret
= utils_create_pid_file(agent_tcp_port
, path
);
5122 int main(int argc
, char **argv
)
5124 int ret
= 0, retval
= 0;
5126 const char *home_path
, *env_app_timeout
;
5128 init_kernel_workarounds();
5130 rcu_register_thread();
5132 if (set_signal_handler()) {
5134 goto exit_set_signal_handler
;
5137 setup_consumerd_path();
5139 page_size
= sysconf(_SC_PAGESIZE
);
5140 if (page_size
< 0) {
5141 PERROR("sysconf _SC_PAGESIZE");
5142 page_size
= LONG_MAX
;
5143 WARN("Fallback page size to %ld", page_size
);
5147 * Parse arguments and load the daemon configuration file.
5149 * We have an exit_options exit path to free memory reserved by
5150 * set_options. This is needed because the rest of sessiond_cleanup()
5151 * depends on ht_cleanup_thread, which depends on lttng_daemonize, which
5152 * depends on set_options.
5155 if (set_options(argc
, argv
)) {
5161 if (opt_daemon
|| opt_background
) {
5164 ret
= lttng_daemonize(&child_ppid
, &recv_child_signal
,
5172 * We are in the child. Make sure all other file descriptors are
5173 * closed, in case we are called with more opened file
5174 * descriptors than the standard ones.
5176 for (i
= 3; i
< sysconf(_SC_OPEN_MAX
); i
++) {
5182 * Starting from here, we can create threads. This needs to be after
5183 * lttng_daemonize due to RCU.
5187 * Initialize the health check subsystem. This call should set the
5188 * appropriate time values.
5190 health_sessiond
= health_app_create(NR_HEALTH_SESSIOND_TYPES
);
5191 if (!health_sessiond
) {
5192 PERROR("health_app_create error");
5194 goto exit_health_sessiond_cleanup
;
5197 if (init_ht_cleanup_quit_pipe()) {
5199 goto exit_ht_cleanup_quit_pipe
;
5202 /* Setup the thread ht_cleanup communication pipe. */
5203 if (utils_create_pipe_cloexec(ht_cleanup_pipe
)) {
5205 goto exit_ht_cleanup_pipe
;
5208 /* Set up max poll set size */
5209 if (lttng_poll_set_max_size()) {
5211 goto exit_set_max_size
;
5214 /* Create thread to clean up RCU hash tables */
5215 ret
= pthread_create(&ht_cleanup_thread
, NULL
,
5216 thread_ht_cleanup
, (void *) NULL
);
5219 PERROR("pthread_create ht_cleanup");
5221 goto exit_ht_cleanup
;
5224 /* Create thread quit pipe */
5225 if (init_thread_quit_pipe()) {
5227 goto exit_init_data
;
5230 /* Check if daemon is UID = 0 */
5231 is_root
= !getuid();
5234 rundir
= strdup(DEFAULT_LTTNG_RUNDIR
);
5237 goto exit_init_data
;
5240 /* Create global run dir with root access */
5241 if (create_lttng_rundir(rundir
)) {
5243 goto exit_init_data
;
5246 if (strlen(apps_unix_sock_path
) == 0) {
5247 ret
= snprintf(apps_unix_sock_path
, PATH_MAX
,
5248 DEFAULT_GLOBAL_APPS_UNIX_SOCK
);
5251 goto exit_init_data
;
5255 if (strlen(client_unix_sock_path
) == 0) {
5256 ret
= snprintf(client_unix_sock_path
, PATH_MAX
,
5257 DEFAULT_GLOBAL_CLIENT_UNIX_SOCK
);
5260 goto exit_init_data
;
5264 /* Set global SHM for ust */
5265 if (strlen(wait_shm_path
) == 0) {
5266 ret
= snprintf(wait_shm_path
, PATH_MAX
,
5267 DEFAULT_GLOBAL_APPS_WAIT_SHM_PATH
);
5270 goto exit_init_data
;
5274 if (strlen(health_unix_sock_path
) == 0) {
5275 ret
= snprintf(health_unix_sock_path
,
5276 sizeof(health_unix_sock_path
),
5277 DEFAULT_GLOBAL_HEALTH_UNIX_SOCK
);
5280 goto exit_init_data
;
5284 /* Setup kernel consumerd path */
5285 ret
= snprintf(kconsumer_data
.err_unix_sock_path
, PATH_MAX
,
5286 DEFAULT_KCONSUMERD_ERR_SOCK_PATH
, rundir
);
5289 goto exit_init_data
;
5291 ret
= snprintf(kconsumer_data
.cmd_unix_sock_path
, PATH_MAX
,
5292 DEFAULT_KCONSUMERD_CMD_SOCK_PATH
, rundir
);
5295 goto exit_init_data
;
5298 DBG2("Kernel consumer err path: %s",
5299 kconsumer_data
.err_unix_sock_path
);
5300 DBG2("Kernel consumer cmd path: %s",
5301 kconsumer_data
.cmd_unix_sock_path
);
5303 home_path
= utils_get_home_dir();
5304 if (home_path
== NULL
) {
5305 /* TODO: Add --socket PATH option */
5306 ERR("Can't get HOME directory for sockets creation.");
5308 goto exit_init_data
;
5312 * Create rundir from home path. This will create something like
5315 ret
= asprintf(&rundir
, DEFAULT_LTTNG_HOME_RUNDIR
, home_path
);
5318 goto exit_init_data
;
5321 if (create_lttng_rundir(rundir
)) {
5323 goto exit_init_data
;
5326 if (strlen(apps_unix_sock_path
) == 0) {
5327 ret
= snprintf(apps_unix_sock_path
, PATH_MAX
,
5328 DEFAULT_HOME_APPS_UNIX_SOCK
,
5332 goto exit_init_data
;
5336 /* Set the cli tool unix socket path */
5337 if (strlen(client_unix_sock_path
) == 0) {
5338 ret
= snprintf(client_unix_sock_path
, PATH_MAX
,
5339 DEFAULT_HOME_CLIENT_UNIX_SOCK
,
5343 goto exit_init_data
;
5347 /* Set global SHM for ust */
5348 if (strlen(wait_shm_path
) == 0) {
5349 ret
= snprintf(wait_shm_path
, PATH_MAX
,
5350 DEFAULT_HOME_APPS_WAIT_SHM_PATH
,
5354 goto exit_init_data
;
5358 /* Set health check Unix path */
5359 if (strlen(health_unix_sock_path
) == 0) {
5360 ret
= snprintf(health_unix_sock_path
,
5361 sizeof(health_unix_sock_path
),
5362 DEFAULT_HOME_HEALTH_UNIX_SOCK
,
5366 goto exit_init_data
;
5371 lockfile_fd
= create_lockfile();
5372 if (lockfile_fd
< 0) {
5374 goto exit_init_data
;
5377 /* Set consumer initial state */
5378 kernel_consumerd_state
= CONSUMER_STOPPED
;
5379 ust_consumerd_state
= CONSUMER_STOPPED
;
5381 DBG("Client socket path %s", client_unix_sock_path
);
5382 DBG("Application socket path %s", apps_unix_sock_path
);
5383 DBG("Application wait path %s", wait_shm_path
);
5384 DBG("LTTng run directory path: %s", rundir
);
5386 /* 32 bits consumerd path setup */
5387 ret
= snprintf(ustconsumer32_data
.err_unix_sock_path
, PATH_MAX
,
5388 DEFAULT_USTCONSUMERD32_ERR_SOCK_PATH
, rundir
);
5390 PERROR("snprintf 32-bit consumer error socket path");
5392 goto exit_init_data
;
5394 ret
= snprintf(ustconsumer32_data
.cmd_unix_sock_path
, PATH_MAX
,
5395 DEFAULT_USTCONSUMERD32_CMD_SOCK_PATH
, rundir
);
5397 PERROR("snprintf 32-bit consumer command socket path");
5399 goto exit_init_data
;
5402 DBG2("UST consumer 32 bits err path: %s",
5403 ustconsumer32_data
.err_unix_sock_path
);
5404 DBG2("UST consumer 32 bits cmd path: %s",
5405 ustconsumer32_data
.cmd_unix_sock_path
);
5407 /* 64 bits consumerd path setup */
5408 ret
= snprintf(ustconsumer64_data
.err_unix_sock_path
, PATH_MAX
,
5409 DEFAULT_USTCONSUMERD64_ERR_SOCK_PATH
, rundir
);
5411 PERROR("snprintf 64-bit consumer error socket path");
5413 goto exit_init_data
;
5415 ret
= snprintf(ustconsumer64_data
.cmd_unix_sock_path
, PATH_MAX
,
5416 DEFAULT_USTCONSUMERD64_CMD_SOCK_PATH
, rundir
);
5418 PERROR("snprintf 64-bit consumer command socket path");
5420 goto exit_init_data
;
5423 DBG2("UST consumer 64 bits err path: %s",
5424 ustconsumer64_data
.err_unix_sock_path
);
5425 DBG2("UST consumer 64 bits cmd path: %s",
5426 ustconsumer64_data
.cmd_unix_sock_path
);
5429 * See if daemon already exist.
5431 if (check_existing_daemon()) {
5432 ERR("Already running daemon.\n");
5434 * We do not goto exit because we must not cleanup()
5435 * because a daemon is already running.
5438 goto exit_init_data
;
5442 * Init UST app hash table. Alloc hash table before this point since
5443 * cleanup() can get called after that point.
5445 if (ust_app_ht_alloc()) {
5447 goto exit_init_data
;
5450 /* Initialize agent domain subsystem. */
5451 if (agent_setup()) {
5452 /* ENOMEM at this point. */
5454 goto exit_init_data
;
5458 * These actions must be executed as root. We do that *after* setting up
5459 * the sockets path because we MUST make the check for another daemon using
5460 * those paths *before* trying to set the kernel consumer sockets and init
5464 if (set_consumer_sockets(&kconsumer_data
, rundir
)) {
5466 goto exit_init_data
;
5469 /* Setup kernel tracer */
5470 if (!opt_no_kernel
) {
5471 init_kernel_tracer();
5472 if (kernel_tracer_fd
>= 0) {
5473 ret
= syscall_init_table();
5475 ERR("Unable to populate syscall table. "
5476 "Syscall tracing won't work "
5477 "for this session daemon.");
5482 /* Set ulimit for open files */
5485 /* init lttng_fd tracking must be done after set_ulimit. */
5488 if (set_consumer_sockets(&ustconsumer64_data
, rundir
)) {
5490 goto exit_init_data
;
5493 if (set_consumer_sockets(&ustconsumer32_data
, rundir
)) {
5495 goto exit_init_data
;
5498 /* Setup the needed unix socket */
5499 if (init_daemon_socket()) {
5501 goto exit_init_data
;
5504 /* Set credentials to socket */
5505 if (is_root
&& set_permissions(rundir
)) {
5507 goto exit_init_data
;
5510 /* Get parent pid if -S, --sig-parent is specified. */
5511 if (opt_sig_parent
) {
5515 /* Setup the kernel pipe for waking up the kernel thread */
5516 if (is_root
&& !opt_no_kernel
) {
5517 if (utils_create_pipe_cloexec(kernel_poll_pipe
)) {
5519 goto exit_init_data
;
5523 /* Setup the thread apps communication pipe. */
5524 if (utils_create_pipe_cloexec(apps_cmd_pipe
)) {
5526 goto exit_init_data
;
5529 /* Setup the thread apps notify communication pipe. */
5530 if (utils_create_pipe_cloexec(apps_cmd_notify_pipe
)) {
5532 goto exit_init_data
;
5535 /* Initialize global buffer per UID and PID registry. */
5536 buffer_reg_init_uid_registry();
5537 buffer_reg_init_pid_registry();
5539 /* Init UST command queue. */
5540 cds_wfcq_init(&ust_cmd_queue
.head
, &ust_cmd_queue
.tail
);
5543 * Get session list pointer. This pointer MUST NOT be free'd. This list
5544 * is statically declared in session.c
5546 session_list_ptr
= session_get_list();
5550 /* Check for the application socket timeout env variable. */
5551 env_app_timeout
= getenv(DEFAULT_APP_SOCKET_TIMEOUT_ENV
);
5552 if (env_app_timeout
) {
5553 app_socket_timeout
= atoi(env_app_timeout
);
5555 app_socket_timeout
= DEFAULT_APP_SOCKET_RW_TIMEOUT
;
5558 ret
= write_pidfile();
5560 ERR("Error in write_pidfile");
5562 goto exit_init_data
;
5564 ret
= write_agent_port();
5566 ERR("Error in write_agent_port");
5568 goto exit_init_data
;
5571 /* Initialize communication library */
5573 /* Initialize TCP timeout values */
5574 lttcomm_inet_init();
5576 if (load_session_init_data(&load_info
) < 0) {
5578 goto exit_init_data
;
5580 load_info
->path
= opt_load_session_path
;
5582 /* Create health-check thread */
5583 ret
= pthread_create(&health_thread
, NULL
,
5584 thread_manage_health
, (void *) NULL
);
5587 PERROR("pthread_create health");
5592 /* Create thread to manage the client socket */
5593 ret
= pthread_create(&client_thread
, NULL
,
5594 thread_manage_clients
, (void *) NULL
);
5597 PERROR("pthread_create clients");
5602 /* Create thread to dispatch registration */
5603 ret
= pthread_create(&dispatch_thread
, NULL
,
5604 thread_dispatch_ust_registration
, (void *) NULL
);
5607 PERROR("pthread_create dispatch");
5612 /* Create thread to manage application registration. */
5613 ret
= pthread_create(®_apps_thread
, NULL
,
5614 thread_registration_apps
, (void *) NULL
);
5617 PERROR("pthread_create registration");
5622 /* Create thread to manage application socket */
5623 ret
= pthread_create(&apps_thread
, NULL
,
5624 thread_manage_apps
, (void *) NULL
);
5627 PERROR("pthread_create apps");
5632 /* Create thread to manage application notify socket */
5633 ret
= pthread_create(&apps_notify_thread
, NULL
,
5634 ust_thread_manage_notify
, (void *) NULL
);
5637 PERROR("pthread_create notify");
5639 goto exit_apps_notify
;
5642 /* Create agent registration thread. */
5643 ret
= pthread_create(&agent_reg_thread
, NULL
,
5644 agent_thread_manage_registration
, (void *) NULL
);
5647 PERROR("pthread_create agent");
5649 goto exit_agent_reg
;
5652 /* Don't start this thread if kernel tracing is not requested nor root */
5653 if (is_root
&& !opt_no_kernel
) {
5654 /* Create kernel thread to manage kernel event */
5655 ret
= pthread_create(&kernel_thread
, NULL
,
5656 thread_manage_kernel
, (void *) NULL
);
5659 PERROR("pthread_create kernel");
5665 /* Create session loading thread. */
5666 ret
= pthread_create(&load_session_thread
, NULL
, thread_load_session
,
5670 PERROR("pthread_create load_session_thread");
5672 goto exit_load_session
;
5676 * This is where we start awaiting program completion (e.g. through
5677 * signal that asks threads to teardown).
5680 ret
= pthread_join(load_session_thread
, &status
);
5683 PERROR("pthread_join load_session_thread");
5688 if (is_root
&& !opt_no_kernel
) {
5689 ret
= pthread_join(kernel_thread
, &status
);
5692 PERROR("pthread_join");
5698 ret
= pthread_join(agent_reg_thread
, &status
);
5701 PERROR("pthread_join agent");
5706 ret
= pthread_join(apps_notify_thread
, &status
);
5709 PERROR("pthread_join apps notify");
5714 ret
= pthread_join(apps_thread
, &status
);
5717 PERROR("pthread_join apps");
5722 ret
= pthread_join(reg_apps_thread
, &status
);
5725 PERROR("pthread_join");
5730 ret
= pthread_join(dispatch_thread
, &status
);
5733 PERROR("pthread_join");
5738 ret
= pthread_join(client_thread
, &status
);
5741 PERROR("pthread_join");
5746 ret
= pthread_join(health_thread
, &status
);
5749 PERROR("pthread_join health thread");
5756 * sessiond_cleanup() is called when no other thread is running, except
5757 * the ht_cleanup thread, which is needed to destroy the hash tables.
5759 rcu_thread_online();
5761 rcu_thread_offline();
5762 rcu_unregister_thread();
5764 ret
= notify_thread_pipe(ht_cleanup_quit_pipe
[1]);
5766 ERR("write error on ht_cleanup quit pipe");
5770 ret
= pthread_join(ht_cleanup_thread
, &status
);
5773 PERROR("pthread_join ht cleanup thread");
5779 utils_close_pipe(ht_cleanup_pipe
);
5780 exit_ht_cleanup_pipe
:
5783 * Close the ht_cleanup quit pipe.
5785 utils_close_pipe(ht_cleanup_quit_pipe
);
5786 exit_ht_cleanup_quit_pipe
:
5788 health_app_destroy(health_sessiond
);
5789 exit_health_sessiond_cleanup
:
5792 sessiond_cleanup_options();
5794 exit_set_signal_handler
: