2 * Copyright (C) 2011 - David Goulet <david.goulet@polymtl.ca>
3 * Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
4 * 2013 - Jérémie Galarneau <jeremie.galarneau@efficios.com>
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License, version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
15 * You should have received a copy of the GNU General Public License along
16 * with this program; if not, write to the Free Software Foundation, Inc.,
17 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
32 #include <sys/mount.h>
33 #include <sys/resource.h>
34 #include <sys/socket.h>
36 #include <sys/types.h>
38 #include <urcu/uatomic.h>
42 #include <common/common.h>
43 #include <common/compat/socket.h>
44 #include <common/defaults.h>
45 #include <common/kernel-consumer/kernel-consumer.h>
46 #include <common/futex.h>
47 #include <common/relayd/relayd.h>
48 #include <common/utils.h>
49 #include <common/daemonize.h>
50 #include <common/config/config.h>
52 #include "lttng-sessiond.h"
53 #include "buffer-registry.h"
60 #include "kernel-consumer.h"
64 #include "ust-consumer.h"
67 #include "health-sessiond.h"
68 #include "testpoint.h"
69 #include "ust-thread.h"
70 #include "jul-thread.h"
73 #define CONSUMERD_FILE "lttng-consumerd"
76 static const char *tracing_group_name
= DEFAULT_TRACING_GROUP
;
77 static int tracing_group_name_override
;
78 static char *opt_pidfile
;
79 static int opt_sig_parent
;
80 static int opt_verbose_consumer
;
81 static int opt_daemon
, opt_background
;
82 static int opt_no_kernel
;
83 static pid_t ppid
; /* Parent PID for --sig-parent option */
84 static pid_t child_ppid
; /* Internal parent PID use with daemonize. */
87 /* Set to 1 when a SIGUSR1 signal is received. */
88 static int recv_child_signal
;
91 * Consumer daemon specific control data. Every value not initialized here is
92 * set to 0 by the static definition.
94 static struct consumer_data kconsumer_data
= {
95 .type
= LTTNG_CONSUMER_KERNEL
,
96 .err_unix_sock_path
= DEFAULT_KCONSUMERD_ERR_SOCK_PATH
,
97 .cmd_unix_sock_path
= DEFAULT_KCONSUMERD_CMD_SOCK_PATH
,
100 .pid_mutex
= PTHREAD_MUTEX_INITIALIZER
,
101 .lock
= PTHREAD_MUTEX_INITIALIZER
,
102 .cond
= PTHREAD_COND_INITIALIZER
,
103 .cond_mutex
= PTHREAD_MUTEX_INITIALIZER
,
105 static struct consumer_data ustconsumer64_data
= {
106 .type
= LTTNG_CONSUMER64_UST
,
107 .err_unix_sock_path
= DEFAULT_USTCONSUMERD64_ERR_SOCK_PATH
,
108 .cmd_unix_sock_path
= DEFAULT_USTCONSUMERD64_CMD_SOCK_PATH
,
111 .pid_mutex
= PTHREAD_MUTEX_INITIALIZER
,
112 .lock
= PTHREAD_MUTEX_INITIALIZER
,
113 .cond
= PTHREAD_COND_INITIALIZER
,
114 .cond_mutex
= PTHREAD_MUTEX_INITIALIZER
,
116 static struct consumer_data ustconsumer32_data
= {
117 .type
= LTTNG_CONSUMER32_UST
,
118 .err_unix_sock_path
= DEFAULT_USTCONSUMERD32_ERR_SOCK_PATH
,
119 .cmd_unix_sock_path
= DEFAULT_USTCONSUMERD32_CMD_SOCK_PATH
,
122 .pid_mutex
= PTHREAD_MUTEX_INITIALIZER
,
123 .lock
= PTHREAD_MUTEX_INITIALIZER
,
124 .cond
= PTHREAD_COND_INITIALIZER
,
125 .cond_mutex
= PTHREAD_MUTEX_INITIALIZER
,
128 /* Command line options */
129 static const struct option long_options
[] = {
130 { "client-sock", 1, 0, 'c' },
131 { "apps-sock", 1, 0, 'a' },
132 { "kconsumerd-cmd-sock", 1, 0, 'C' },
133 { "kconsumerd-err-sock", 1, 0, 'E' },
134 { "ustconsumerd32-cmd-sock", 1, 0, 'G' },
135 { "ustconsumerd32-err-sock", 1, 0, 'H' },
136 { "ustconsumerd64-cmd-sock", 1, 0, 'D' },
137 { "ustconsumerd64-err-sock", 1, 0, 'F' },
138 { "consumerd32-path", 1, 0, 'u' },
139 { "consumerd32-libdir", 1, 0, 'U' },
140 { "consumerd64-path", 1, 0, 't' },
141 { "consumerd64-libdir", 1, 0, 'T' },
142 { "daemonize", 0, 0, 'd' },
143 { "background", 0, 0, 'b' },
144 { "sig-parent", 0, 0, 'S' },
145 { "help", 0, 0, 'h' },
146 { "group", 1, 0, 'g' },
147 { "version", 0, 0, 'V' },
148 { "quiet", 0, 0, 'q' },
149 { "verbose", 0, 0, 'v' },
150 { "verbose-consumer", 0, 0, 'Z' },
151 { "no-kernel", 0, 0, 'N' },
152 { "pidfile", 1, 0, 'p' },
153 { "jul-tcp-port", 1, 0, 'J' },
154 { "config", 1, 0, 'f' },
158 /* Command line options to ignore from configuration file */
159 static const char *config_ignore_options
[] = { "help", "version", "config" };
161 /* Shared between threads */
162 static int dispatch_thread_exit
;
164 /* Global application Unix socket path */
165 static char apps_unix_sock_path
[PATH_MAX
];
166 /* Global client Unix socket path */
167 static char client_unix_sock_path
[PATH_MAX
];
168 /* global wait shm path for UST */
169 static char wait_shm_path
[PATH_MAX
];
170 /* Global health check unix path */
171 static char health_unix_sock_path
[PATH_MAX
];
173 /* Sockets and FDs */
174 static int client_sock
= -1;
175 static int apps_sock
= -1;
176 int kernel_tracer_fd
= -1;
177 static int kernel_poll_pipe
[2] = { -1, -1 };
180 * Quit pipe for all threads. This permits a single cancellation point
181 * for all threads when receiving an event on the pipe.
183 static int thread_quit_pipe
[2] = { -1, -1 };
186 * This pipe is used to inform the thread managing application communication
187 * that a command is queued and ready to be processed.
189 static int apps_cmd_pipe
[2] = { -1, -1 };
191 int apps_cmd_notify_pipe
[2] = { -1, -1 };
193 /* Pthread, Mutexes and Semaphores */
194 static pthread_t apps_thread
;
195 static pthread_t apps_notify_thread
;
196 static pthread_t reg_apps_thread
;
197 static pthread_t client_thread
;
198 static pthread_t kernel_thread
;
199 static pthread_t dispatch_thread
;
200 static pthread_t health_thread
;
201 static pthread_t ht_cleanup_thread
;
202 static pthread_t jul_reg_thread
;
205 * UST registration command queue. This queue is tied with a futex and uses a N
206 * wakers / 1 waiter implemented and detailed in futex.c/.h
208 * The thread_manage_apps and thread_dispatch_ust_registration interact with
209 * this queue and the wait/wake scheme.
211 static struct ust_cmd_queue ust_cmd_queue
;
214 * Pointer initialized before thread creation.
216 * This points to the tracing session list containing the session count and a
217 * mutex lock. The lock MUST be taken if you iterate over the list. The lock
218 * MUST NOT be taken if you call a public function in session.c.
220 * The lock is nested inside the structure: session_list_ptr->lock. Please use
221 * session_lock_list and session_unlock_list for lock acquisition.
223 static struct ltt_session_list
*session_list_ptr
;
225 int ust_consumerd64_fd
= -1;
226 int ust_consumerd32_fd
= -1;
228 static const char *consumerd32_bin
= CONFIG_CONSUMERD32_BIN
;
229 static const char *consumerd64_bin
= CONFIG_CONSUMERD64_BIN
;
230 static const char *consumerd32_libdir
= CONFIG_CONSUMERD32_LIBDIR
;
231 static const char *consumerd64_libdir
= CONFIG_CONSUMERD64_LIBDIR
;
232 static int consumerd32_bin_override
;
233 static int consumerd64_bin_override
;
234 static int consumerd32_libdir_override
;
235 static int consumerd64_libdir_override
;
237 static const char *module_proc_lttng
= "/proc/lttng";
240 * Consumer daemon state which is changed when spawning it, killing it or in
241 * case of a fatal error.
243 enum consumerd_state
{
244 CONSUMER_STARTED
= 1,
245 CONSUMER_STOPPED
= 2,
250 * This consumer daemon state is used to validate if a client command will be
251 * able to reach the consumer. If not, the client is informed. For instance,
252 * doing a "lttng start" when the consumer state is set to ERROR will return an
253 * error to the client.
255 * The following example shows a possible race condition of this scheme:
257 * consumer thread error happens
259 * client cmd checks state -> still OK
260 * consumer thread exit, sets error
261 * client cmd try to talk to consumer
264 * However, since the consumer is a different daemon, we have no way of making
265 * sure the command will reach it safely even with this state flag. This is why
266 * we consider that up to the state validation during command processing, the
267 * command is safe. After that, we can not guarantee the correctness of the
268 * client request vis-a-vis the consumer.
270 static enum consumerd_state ust_consumerd_state
;
271 static enum consumerd_state kernel_consumerd_state
;
274 * Socket timeout for receiving and sending in seconds.
276 static int app_socket_timeout
;
278 /* Set in main() with the current page size. */
281 /* Application health monitoring */
282 struct health_app
*health_sessiond
;
284 /* JUL TCP port for registration. Used by the JUL thread. */
285 unsigned int jul_tcp_port
= DEFAULT_JUL_TCP_PORT
;
287 /* Am I root or not. */
288 int is_root
; /* Set to 1 if the daemon is running as root */
290 const char * const config_section_name
= "sessiond";
293 * Whether sessiond is ready for commands/health check requests.
294 * NR_LTTNG_SESSIOND_READY must match the number of calls to
295 * lttng_sessiond_notify_ready().
297 #define NR_LTTNG_SESSIOND_READY 2
298 int lttng_sessiond_ready
= NR_LTTNG_SESSIOND_READY
;
300 /* Notify parents that we are ready for cmd and health check */
302 void lttng_sessiond_notify_ready(void)
304 if (uatomic_sub_return(<tng_sessiond_ready
, 1) == 0) {
306 * Notify parent pid that we are ready to accept command
307 * for client side. This ppid is the one from the
308 * external process that spawned us.
310 if (opt_sig_parent
) {
315 * Notify the parent of the fork() process that we are
318 if (opt_daemon
|| opt_background
) {
319 kill(child_ppid
, SIGUSR1
);
325 void setup_consumerd_path(void)
327 const char *bin
, *libdir
;
330 * Allow INSTALL_BIN_PATH to be used as a target path for the
331 * native architecture size consumer if CONFIG_CONSUMER*_PATH
332 * has not been defined.
334 #if (CAA_BITS_PER_LONG == 32)
335 if (!consumerd32_bin
[0]) {
336 consumerd32_bin
= INSTALL_BIN_PATH
"/" CONSUMERD_FILE
;
338 if (!consumerd32_libdir
[0]) {
339 consumerd32_libdir
= INSTALL_LIB_PATH
;
341 #elif (CAA_BITS_PER_LONG == 64)
342 if (!consumerd64_bin
[0]) {
343 consumerd64_bin
= INSTALL_BIN_PATH
"/" CONSUMERD_FILE
;
345 if (!consumerd64_libdir
[0]) {
346 consumerd64_libdir
= INSTALL_LIB_PATH
;
349 #error "Unknown bitness"
353 * runtime env. var. overrides the build default.
355 bin
= getenv("LTTNG_CONSUMERD32_BIN");
357 consumerd32_bin
= bin
;
359 bin
= getenv("LTTNG_CONSUMERD64_BIN");
361 consumerd64_bin
= bin
;
363 libdir
= getenv("LTTNG_CONSUMERD32_LIBDIR");
365 consumerd32_libdir
= libdir
;
367 libdir
= getenv("LTTNG_CONSUMERD64_LIBDIR");
369 consumerd64_libdir
= libdir
;
374 * Create a poll set with O_CLOEXEC and add the thread quit pipe to the set.
376 int sessiond_set_thread_pollset(struct lttng_poll_event
*events
, size_t size
)
382 ret
= lttng_poll_create(events
, size
, LTTNG_CLOEXEC
);
388 ret
= lttng_poll_add(events
, thread_quit_pipe
[0], LPOLLIN
| LPOLLERR
);
400 * Check if the thread quit pipe was triggered.
402 * Return 1 if it was triggered else 0;
404 int sessiond_check_thread_quit_pipe(int fd
, uint32_t events
)
406 if (fd
== thread_quit_pipe
[0] && (events
& LPOLLIN
)) {
414 * Init thread quit pipe.
416 * Return -1 on error or 0 if all pipes are created.
418 static int init_thread_quit_pipe(void)
422 ret
= pipe(thread_quit_pipe
);
424 PERROR("thread quit pipe");
428 for (i
= 0; i
< 2; i
++) {
429 ret
= fcntl(thread_quit_pipe
[i
], F_SETFD
, FD_CLOEXEC
);
441 * Stop all threads by closing the thread quit pipe.
443 static void stop_threads(void)
447 /* Stopping all threads */
448 DBG("Terminating all threads");
449 ret
= notify_thread_pipe(thread_quit_pipe
[1]);
451 ERR("write error on thread quit pipe");
454 /* Dispatch thread */
455 CMM_STORE_SHARED(dispatch_thread_exit
, 1);
456 futex_nto1_wake(&ust_cmd_queue
.futex
);
460 * Close every consumer sockets.
462 static void close_consumer_sockets(void)
466 if (kconsumer_data
.err_sock
>= 0) {
467 ret
= close(kconsumer_data
.err_sock
);
469 PERROR("kernel consumer err_sock close");
472 if (ustconsumer32_data
.err_sock
>= 0) {
473 ret
= close(ustconsumer32_data
.err_sock
);
475 PERROR("UST consumerd32 err_sock close");
478 if (ustconsumer64_data
.err_sock
>= 0) {
479 ret
= close(ustconsumer64_data
.err_sock
);
481 PERROR("UST consumerd64 err_sock close");
484 if (kconsumer_data
.cmd_sock
>= 0) {
485 ret
= close(kconsumer_data
.cmd_sock
);
487 PERROR("kernel consumer cmd_sock close");
490 if (ustconsumer32_data
.cmd_sock
>= 0) {
491 ret
= close(ustconsumer32_data
.cmd_sock
);
493 PERROR("UST consumerd32 cmd_sock close");
496 if (ustconsumer64_data
.cmd_sock
>= 0) {
497 ret
= close(ustconsumer64_data
.cmd_sock
);
499 PERROR("UST consumerd64 cmd_sock close");
507 static void cleanup(void)
510 struct ltt_session
*sess
, *stmp
;
516 * Close the thread quit pipe. It has already done its job,
517 * since we are now called.
519 utils_close_pipe(thread_quit_pipe
);
522 * If opt_pidfile is undefined, the default file will be wiped when
523 * removing the rundir.
526 ret
= remove(opt_pidfile
);
528 PERROR("remove pidfile %s", opt_pidfile
);
532 DBG("Removing sessiond and consumerd content of directory %s", rundir
);
535 snprintf(path
, PATH_MAX
,
537 rundir
, DEFAULT_LTTNG_SESSIOND_PIDFILE
);
538 DBG("Removing %s", path
);
541 snprintf(path
, PATH_MAX
, "%s/%s", rundir
,
542 DEFAULT_LTTNG_SESSIOND_JULPORT_FILE
);
543 DBG("Removing %s", path
);
547 snprintf(path
, PATH_MAX
,
548 DEFAULT_KCONSUMERD_ERR_SOCK_PATH
,
550 DBG("Removing %s", path
);
553 snprintf(path
, PATH_MAX
,
554 DEFAULT_KCONSUMERD_PATH
,
556 DBG("Removing directory %s", path
);
559 /* ust consumerd 32 */
560 snprintf(path
, PATH_MAX
,
561 DEFAULT_USTCONSUMERD32_ERR_SOCK_PATH
,
563 DBG("Removing %s", path
);
566 snprintf(path
, PATH_MAX
,
567 DEFAULT_USTCONSUMERD32_PATH
,
569 DBG("Removing directory %s", path
);
572 /* ust consumerd 64 */
573 snprintf(path
, PATH_MAX
,
574 DEFAULT_USTCONSUMERD64_ERR_SOCK_PATH
,
576 DBG("Removing %s", path
);
579 snprintf(path
, PATH_MAX
,
580 DEFAULT_USTCONSUMERD64_PATH
,
582 DBG("Removing directory %s", path
);
586 * We do NOT rmdir rundir because there are other processes
587 * using it, for instance lttng-relayd, which can start in
588 * parallel with this teardown.
593 DBG("Cleaning up all sessions");
595 /* Destroy session list mutex */
596 if (session_list_ptr
!= NULL
) {
597 pthread_mutex_destroy(&session_list_ptr
->lock
);
599 /* Cleanup ALL session */
600 cds_list_for_each_entry_safe(sess
, stmp
,
601 &session_list_ptr
->head
, list
) {
602 cmd_destroy_session(sess
, kernel_poll_pipe
[1]);
606 DBG("Closing all UST sockets");
607 ust_app_clean_list();
608 buffer_reg_destroy_registries();
610 if (is_root
&& !opt_no_kernel
) {
611 DBG2("Closing kernel fd");
612 if (kernel_tracer_fd
>= 0) {
613 ret
= close(kernel_tracer_fd
);
618 DBG("Unloading kernel modules");
619 modprobe_remove_lttng_all();
622 close_consumer_sockets();
625 * If the override option is set, the pointer points to a *non* const thus
626 * freeing it even though the variable type is set to const.
628 if (tracing_group_name_override
) {
629 free((void *) tracing_group_name
);
631 if (consumerd32_bin_override
) {
632 free((void *) consumerd32_bin
);
634 if (consumerd64_bin_override
) {
635 free((void *) consumerd64_bin
);
637 if (consumerd32_libdir_override
) {
638 free((void *) consumerd32_libdir
);
640 if (consumerd64_libdir_override
) {
641 free((void *) consumerd64_libdir
);
649 DBG("%c[%d;%dm*** assert failed :-) *** ==> %c[%dm%c[%d;%dm"
650 "Matthew, BEET driven development works!%c[%dm",
651 27, 1, 31, 27, 0, 27, 1, 33, 27, 0);
656 * Send data on a unix socket using the liblttsessiondcomm API.
658 * Return lttcomm error code.
660 static int send_unix_sock(int sock
, void *buf
, size_t len
)
662 /* Check valid length */
667 return lttcomm_send_unix_sock(sock
, buf
, len
);
671 * Free memory of a command context structure.
673 static void clean_command_ctx(struct command_ctx
**cmd_ctx
)
675 DBG("Clean command context structure");
677 if ((*cmd_ctx
)->llm
) {
678 free((*cmd_ctx
)->llm
);
680 if ((*cmd_ctx
)->lsm
) {
681 free((*cmd_ctx
)->lsm
);
689 * Notify UST applications using the shm mmap futex.
691 static int notify_ust_apps(int active
)
695 DBG("Notifying applications of session daemon state: %d", active
);
697 /* See shm.c for this call implying mmap, shm and futex calls */
698 wait_shm_mmap
= shm_ust_get_mmap(wait_shm_path
, is_root
);
699 if (wait_shm_mmap
== NULL
) {
703 /* Wake waiting process */
704 futex_wait_update((int32_t *) wait_shm_mmap
, active
);
706 /* Apps notified successfully */
714 * Setup the outgoing data buffer for the response (llm) by allocating the
715 * right amount of memory and copying the original information from the lsm
718 * Return total size of the buffer pointed by buf.
720 static int setup_lttng_msg(struct command_ctx
*cmd_ctx
, size_t size
)
726 cmd_ctx
->llm
= zmalloc(sizeof(struct lttcomm_lttng_msg
) + buf_size
);
727 if (cmd_ctx
->llm
== NULL
) {
733 /* Copy common data */
734 cmd_ctx
->llm
->cmd_type
= cmd_ctx
->lsm
->cmd_type
;
735 cmd_ctx
->llm
->pid
= cmd_ctx
->lsm
->domain
.attr
.pid
;
737 cmd_ctx
->llm
->data_size
= size
;
738 cmd_ctx
->lttng_msg_size
= sizeof(struct lttcomm_lttng_msg
) + buf_size
;
747 * Update the kernel poll set of all channel fd available over all tracing
748 * session. Add the wakeup pipe at the end of the set.
750 static int update_kernel_poll(struct lttng_poll_event
*events
)
753 struct ltt_session
*session
;
754 struct ltt_kernel_channel
*channel
;
756 DBG("Updating kernel poll set");
759 cds_list_for_each_entry(session
, &session_list_ptr
->head
, list
) {
760 session_lock(session
);
761 if (session
->kernel_session
== NULL
) {
762 session_unlock(session
);
766 cds_list_for_each_entry(channel
,
767 &session
->kernel_session
->channel_list
.head
, list
) {
768 /* Add channel fd to the kernel poll set */
769 ret
= lttng_poll_add(events
, channel
->fd
, LPOLLIN
| LPOLLRDNORM
);
771 session_unlock(session
);
774 DBG("Channel fd %d added to kernel set", channel
->fd
);
776 session_unlock(session
);
778 session_unlock_list();
783 session_unlock_list();
788 * Find the channel fd from 'fd' over all tracing session. When found, check
789 * for new channel stream and send those stream fds to the kernel consumer.
791 * Useful for CPU hotplug feature.
793 static int update_kernel_stream(struct consumer_data
*consumer_data
, int fd
)
796 struct ltt_session
*session
;
797 struct ltt_kernel_session
*ksess
;
798 struct ltt_kernel_channel
*channel
;
800 DBG("Updating kernel streams for channel fd %d", fd
);
803 cds_list_for_each_entry(session
, &session_list_ptr
->head
, list
) {
804 session_lock(session
);
805 if (session
->kernel_session
== NULL
) {
806 session_unlock(session
);
809 ksess
= session
->kernel_session
;
811 cds_list_for_each_entry(channel
, &ksess
->channel_list
.head
, list
) {
812 if (channel
->fd
== fd
) {
813 DBG("Channel found, updating kernel streams");
814 ret
= kernel_open_channel_stream(channel
);
818 /* Update the stream global counter */
819 ksess
->stream_count_global
+= ret
;
822 * Have we already sent fds to the consumer? If yes, it means
823 * that tracing is started so it is safe to send our updated
826 if (ksess
->consumer_fds_sent
== 1 && ksess
->consumer
!= NULL
) {
827 struct lttng_ht_iter iter
;
828 struct consumer_socket
*socket
;
831 cds_lfht_for_each_entry(ksess
->consumer
->socks
->ht
,
832 &iter
.iter
, socket
, node
.node
) {
833 pthread_mutex_lock(socket
->lock
);
834 ret
= kernel_consumer_send_channel_stream(socket
,
836 session
->output_traces
? 1 : 0);
837 pthread_mutex_unlock(socket
->lock
);
848 session_unlock(session
);
850 session_unlock_list();
854 session_unlock(session
);
855 session_unlock_list();
860 * For each tracing session, update newly registered apps. The session list
861 * lock MUST be acquired before calling this.
863 static void update_ust_app(int app_sock
)
865 struct ltt_session
*sess
, *stmp
;
867 /* Consumer is in an ERROR state. Stop any application update. */
868 if (uatomic_read(&ust_consumerd_state
) == CONSUMER_ERROR
) {
869 /* Stop the update process since the consumer is dead. */
873 /* For all tracing session(s) */
874 cds_list_for_each_entry_safe(sess
, stmp
, &session_list_ptr
->head
, list
) {
876 if (sess
->ust_session
) {
877 ust_app_global_update(sess
->ust_session
, app_sock
);
879 session_unlock(sess
);
884 * This thread manage event coming from the kernel.
886 * Features supported in this thread:
889 static void *thread_manage_kernel(void *data
)
891 int ret
, i
, pollfd
, update_poll_flag
= 1, err
= -1;
892 uint32_t revents
, nb_fd
;
894 struct lttng_poll_event events
;
896 DBG("[thread] Thread manage kernel started");
898 health_register(health_sessiond
, HEALTH_SESSIOND_TYPE_KERNEL
);
901 * This first step of the while is to clean this structure which could free
902 * non NULL pointers so initialize it before the loop.
904 lttng_poll_init(&events
);
906 if (testpoint(sessiond_thread_manage_kernel
)) {
907 goto error_testpoint
;
910 health_code_update();
912 if (testpoint(sessiond_thread_manage_kernel_before_loop
)) {
913 goto error_testpoint
;
917 health_code_update();
919 if (update_poll_flag
== 1) {
920 /* Clean events object. We are about to populate it again. */
921 lttng_poll_clean(&events
);
923 ret
= sessiond_set_thread_pollset(&events
, 2);
925 goto error_poll_create
;
928 ret
= lttng_poll_add(&events
, kernel_poll_pipe
[0], LPOLLIN
);
933 /* This will add the available kernel channel if any. */
934 ret
= update_kernel_poll(&events
);
938 update_poll_flag
= 0;
941 DBG("Thread kernel polling on %d fds", LTTNG_POLL_GETNB(&events
));
943 /* Poll infinite value of time */
946 ret
= lttng_poll_wait(&events
, -1);
950 * Restart interrupted system call.
952 if (errno
== EINTR
) {
956 } else if (ret
== 0) {
957 /* Should not happen since timeout is infinite */
958 ERR("Return value of poll is 0 with an infinite timeout.\n"
959 "This should not have happened! Continuing...");
965 for (i
= 0; i
< nb_fd
; i
++) {
966 /* Fetch once the poll data */
967 revents
= LTTNG_POLL_GETEV(&events
, i
);
968 pollfd
= LTTNG_POLL_GETFD(&events
, i
);
970 health_code_update();
972 /* Thread quit pipe has been closed. Killing thread. */
973 ret
= sessiond_check_thread_quit_pipe(pollfd
, revents
);
979 /* Check for data on kernel pipe */
980 if (pollfd
== kernel_poll_pipe
[0] && (revents
& LPOLLIN
)) {
981 (void) lttng_read(kernel_poll_pipe
[0],
984 * Ret value is useless here, if this pipe gets any actions an
985 * update is required anyway.
987 update_poll_flag
= 1;
991 * New CPU detected by the kernel. Adding kernel stream to
992 * kernel session and updating the kernel consumer
994 if (revents
& LPOLLIN
) {
995 ret
= update_kernel_stream(&kconsumer_data
, pollfd
);
1001 * TODO: We might want to handle the LPOLLERR | LPOLLHUP
1002 * and unregister kernel stream at this point.
1011 lttng_poll_clean(&events
);
1014 utils_close_pipe(kernel_poll_pipe
);
1015 kernel_poll_pipe
[0] = kernel_poll_pipe
[1] = -1;
1018 ERR("Health error occurred in %s", __func__
);
1019 WARN("Kernel thread died unexpectedly. "
1020 "Kernel tracing can continue but CPU hotplug is disabled.");
1022 health_unregister(health_sessiond
);
1023 DBG("Kernel thread dying");
1028 * Signal pthread condition of the consumer data that the thread.
1030 static void signal_consumer_condition(struct consumer_data
*data
, int state
)
1032 pthread_mutex_lock(&data
->cond_mutex
);
1035 * The state is set before signaling. It can be any value, it's the waiter
1036 * job to correctly interpret this condition variable associated to the
1037 * consumer pthread_cond.
1039 * A value of 0 means that the corresponding thread of the consumer data
1040 * was not started. 1 indicates that the thread has started and is ready
1041 * for action. A negative value means that there was an error during the
1044 data
->consumer_thread_is_ready
= state
;
1045 (void) pthread_cond_signal(&data
->cond
);
1047 pthread_mutex_unlock(&data
->cond_mutex
);
1051 * This thread manage the consumer error sent back to the session daemon.
1053 static void *thread_manage_consumer(void *data
)
1055 int sock
= -1, i
, ret
, pollfd
, err
= -1;
1056 uint32_t revents
, nb_fd
;
1057 enum lttcomm_return_code code
;
1058 struct lttng_poll_event events
;
1059 struct consumer_data
*consumer_data
= data
;
1061 DBG("[thread] Manage consumer started");
1063 health_register(health_sessiond
, HEALTH_SESSIOND_TYPE_CONSUMER
);
1065 health_code_update();
1068 * Pass 3 as size here for the thread quit pipe, consumerd_err_sock and the
1069 * metadata_sock. Nothing more will be added to this poll set.
1071 ret
= sessiond_set_thread_pollset(&events
, 3);
1077 * The error socket here is already in a listening state which was done
1078 * just before spawning this thread to avoid a race between the consumer
1079 * daemon exec trying to connect and the listen() call.
1081 ret
= lttng_poll_add(&events
, consumer_data
->err_sock
, LPOLLIN
| LPOLLRDHUP
);
1086 health_code_update();
1088 /* Infinite blocking call, waiting for transmission */
1090 health_poll_entry();
1092 if (testpoint(sessiond_thread_manage_consumer
)) {
1096 ret
= lttng_poll_wait(&events
, -1);
1100 * Restart interrupted system call.
1102 if (errno
== EINTR
) {
1110 for (i
= 0; i
< nb_fd
; i
++) {
1111 /* Fetch once the poll data */
1112 revents
= LTTNG_POLL_GETEV(&events
, i
);
1113 pollfd
= LTTNG_POLL_GETFD(&events
, i
);
1115 health_code_update();
1117 /* Thread quit pipe has been closed. Killing thread. */
1118 ret
= sessiond_check_thread_quit_pipe(pollfd
, revents
);
1124 /* Event on the registration socket */
1125 if (pollfd
== consumer_data
->err_sock
) {
1126 if (revents
& (LPOLLERR
| LPOLLHUP
| LPOLLRDHUP
)) {
1127 ERR("consumer err socket poll error");
1133 sock
= lttcomm_accept_unix_sock(consumer_data
->err_sock
);
1139 * Set the CLOEXEC flag. Return code is useless because either way, the
1142 (void) utils_set_fd_cloexec(sock
);
1144 health_code_update();
1146 DBG2("Receiving code from consumer err_sock");
1148 /* Getting status code from kconsumerd */
1149 ret
= lttcomm_recv_unix_sock(sock
, &code
,
1150 sizeof(enum lttcomm_return_code
));
1155 health_code_update();
1156 if (code
== LTTCOMM_CONSUMERD_COMMAND_SOCK_READY
) {
1157 /* Connect both socket, command and metadata. */
1158 consumer_data
->cmd_sock
=
1159 lttcomm_connect_unix_sock(consumer_data
->cmd_unix_sock_path
);
1160 consumer_data
->metadata_fd
=
1161 lttcomm_connect_unix_sock(consumer_data
->cmd_unix_sock_path
);
1162 if (consumer_data
->cmd_sock
< 0
1163 || consumer_data
->metadata_fd
< 0) {
1164 PERROR("consumer connect cmd socket");
1165 /* On error, signal condition and quit. */
1166 signal_consumer_condition(consumer_data
, -1);
1169 consumer_data
->metadata_sock
.fd_ptr
= &consumer_data
->metadata_fd
;
1170 /* Create metadata socket lock. */
1171 consumer_data
->metadata_sock
.lock
= zmalloc(sizeof(pthread_mutex_t
));
1172 if (consumer_data
->metadata_sock
.lock
== NULL
) {
1173 PERROR("zmalloc pthread mutex");
1177 pthread_mutex_init(consumer_data
->metadata_sock
.lock
, NULL
);
1179 signal_consumer_condition(consumer_data
, 1);
1180 DBG("Consumer command socket ready (fd: %d", consumer_data
->cmd_sock
);
1181 DBG("Consumer metadata socket ready (fd: %d)",
1182 consumer_data
->metadata_fd
);
1184 ERR("consumer error when waiting for SOCK_READY : %s",
1185 lttcomm_get_readable_code(-code
));
1189 /* Remove the consumerd error sock since we've established a connexion */
1190 ret
= lttng_poll_del(&events
, consumer_data
->err_sock
);
1195 /* Add new accepted error socket. */
1196 ret
= lttng_poll_add(&events
, sock
, LPOLLIN
| LPOLLRDHUP
);
1201 /* Add metadata socket that is successfully connected. */
1202 ret
= lttng_poll_add(&events
, consumer_data
->metadata_fd
,
1203 LPOLLIN
| LPOLLRDHUP
);
1208 health_code_update();
1210 /* Infinite blocking call, waiting for transmission */
1213 health_poll_entry();
1214 ret
= lttng_poll_wait(&events
, -1);
1218 * Restart interrupted system call.
1220 if (errno
== EINTR
) {
1228 for (i
= 0; i
< nb_fd
; i
++) {
1229 /* Fetch once the poll data */
1230 revents
= LTTNG_POLL_GETEV(&events
, i
);
1231 pollfd
= LTTNG_POLL_GETFD(&events
, i
);
1233 health_code_update();
1235 /* Thread quit pipe has been closed. Killing thread. */
1236 ret
= sessiond_check_thread_quit_pipe(pollfd
, revents
);
1242 if (pollfd
== sock
) {
1243 /* Event on the consumerd socket */
1244 if (revents
& (LPOLLERR
| LPOLLHUP
| LPOLLRDHUP
)) {
1245 ERR("consumer err socket second poll error");
1248 health_code_update();
1249 /* Wait for any kconsumerd error */
1250 ret
= lttcomm_recv_unix_sock(sock
, &code
,
1251 sizeof(enum lttcomm_return_code
));
1253 ERR("consumer closed the command socket");
1257 ERR("consumer return code : %s",
1258 lttcomm_get_readable_code(-code
));
1261 } else if (pollfd
== consumer_data
->metadata_fd
) {
1262 /* UST metadata requests */
1263 ret
= ust_consumer_metadata_request(
1264 &consumer_data
->metadata_sock
);
1266 ERR("Handling metadata request");
1271 ERR("Unknown pollfd");
1275 health_code_update();
1281 * We lock here because we are about to close the sockets and some other
1282 * thread might be using them so get exclusive access which will abort all
1283 * other consumer command by other threads.
1285 pthread_mutex_lock(&consumer_data
->lock
);
1287 /* Immediately set the consumerd state to stopped */
1288 if (consumer_data
->type
== LTTNG_CONSUMER_KERNEL
) {
1289 uatomic_set(&kernel_consumerd_state
, CONSUMER_ERROR
);
1290 } else if (consumer_data
->type
== LTTNG_CONSUMER64_UST
||
1291 consumer_data
->type
== LTTNG_CONSUMER32_UST
) {
1292 uatomic_set(&ust_consumerd_state
, CONSUMER_ERROR
);
1294 /* Code flow error... */
1298 if (consumer_data
->err_sock
>= 0) {
1299 ret
= close(consumer_data
->err_sock
);
1303 consumer_data
->err_sock
= -1;
1305 if (consumer_data
->cmd_sock
>= 0) {
1306 ret
= close(consumer_data
->cmd_sock
);
1310 consumer_data
->cmd_sock
= -1;
1312 if (consumer_data
->metadata_sock
.fd_ptr
&&
1313 *consumer_data
->metadata_sock
.fd_ptr
>= 0) {
1314 ret
= close(*consumer_data
->metadata_sock
.fd_ptr
);
1326 unlink(consumer_data
->err_unix_sock_path
);
1327 unlink(consumer_data
->cmd_unix_sock_path
);
1328 consumer_data
->pid
= 0;
1329 pthread_mutex_unlock(&consumer_data
->lock
);
1331 /* Cleanup metadata socket mutex. */
1332 if (consumer_data
->metadata_sock
.lock
) {
1333 pthread_mutex_destroy(consumer_data
->metadata_sock
.lock
);
1334 free(consumer_data
->metadata_sock
.lock
);
1336 lttng_poll_clean(&events
);
1340 ERR("Health error occurred in %s", __func__
);
1342 health_unregister(health_sessiond
);
1343 DBG("consumer thread cleanup completed");
1349 * This thread manage application communication.
1351 static void *thread_manage_apps(void *data
)
1353 int i
, ret
, pollfd
, err
= -1;
1355 uint32_t revents
, nb_fd
;
1356 struct lttng_poll_event events
;
1358 DBG("[thread] Manage application started");
1360 rcu_register_thread();
1361 rcu_thread_online();
1363 health_register(health_sessiond
, HEALTH_SESSIOND_TYPE_APP_MANAGE
);
1365 if (testpoint(sessiond_thread_manage_apps
)) {
1366 goto error_testpoint
;
1369 health_code_update();
1371 ret
= sessiond_set_thread_pollset(&events
, 2);
1373 goto error_poll_create
;
1376 ret
= lttng_poll_add(&events
, apps_cmd_pipe
[0], LPOLLIN
| LPOLLRDHUP
);
1381 if (testpoint(sessiond_thread_manage_apps_before_loop
)) {
1385 health_code_update();
1388 DBG("Apps thread polling on %d fds", LTTNG_POLL_GETNB(&events
));
1390 /* Inifinite blocking call, waiting for transmission */
1392 health_poll_entry();
1393 ret
= lttng_poll_wait(&events
, -1);
1397 * Restart interrupted system call.
1399 if (errno
== EINTR
) {
1407 for (i
= 0; i
< nb_fd
; i
++) {
1408 /* Fetch once the poll data */
1409 revents
= LTTNG_POLL_GETEV(&events
, i
);
1410 pollfd
= LTTNG_POLL_GETFD(&events
, i
);
1412 health_code_update();
1414 /* Thread quit pipe has been closed. Killing thread. */
1415 ret
= sessiond_check_thread_quit_pipe(pollfd
, revents
);
1421 /* Inspect the apps cmd pipe */
1422 if (pollfd
== apps_cmd_pipe
[0]) {
1423 if (revents
& (LPOLLERR
| LPOLLHUP
| LPOLLRDHUP
)) {
1424 ERR("Apps command pipe error");
1426 } else if (revents
& LPOLLIN
) {
1430 size_ret
= lttng_read(apps_cmd_pipe
[0], &sock
, sizeof(sock
));
1431 if (size_ret
< sizeof(sock
)) {
1432 PERROR("read apps cmd pipe");
1436 health_code_update();
1439 * We only monitor the error events of the socket. This
1440 * thread does not handle any incoming data from UST
1443 ret
= lttng_poll_add(&events
, sock
,
1444 LPOLLERR
| LPOLLHUP
| LPOLLRDHUP
);
1449 DBG("Apps with sock %d added to poll set", sock
);
1453 * At this point, we know that a registered application made
1454 * the event at poll_wait.
1456 if (revents
& (LPOLLERR
| LPOLLHUP
| LPOLLRDHUP
)) {
1457 /* Removing from the poll set */
1458 ret
= lttng_poll_del(&events
, pollfd
);
1463 /* Socket closed on remote end. */
1464 ust_app_unregister(pollfd
);
1468 health_code_update();
1474 lttng_poll_clean(&events
);
1477 utils_close_pipe(apps_cmd_pipe
);
1478 apps_cmd_pipe
[0] = apps_cmd_pipe
[1] = -1;
1481 * We don't clean the UST app hash table here since already registered
1482 * applications can still be controlled so let them be until the session
1483 * daemon dies or the applications stop.
1488 ERR("Health error occurred in %s", __func__
);
1490 health_unregister(health_sessiond
);
1491 DBG("Application communication apps thread cleanup complete");
1492 rcu_thread_offline();
1493 rcu_unregister_thread();
1498 * Send a socket to a thread This is called from the dispatch UST registration
1499 * thread once all sockets are set for the application.
1501 * The sock value can be invalid, we don't really care, the thread will handle
1502 * it and make the necessary cleanup if so.
1504 * On success, return 0 else a negative value being the errno message of the
1507 static int send_socket_to_thread(int fd
, int sock
)
1512 * It's possible that the FD is set as invalid with -1 concurrently just
1513 * before calling this function being a shutdown state of the thread.
1520 ret
= lttng_write(fd
, &sock
, sizeof(sock
));
1521 if (ret
< sizeof(sock
)) {
1522 PERROR("write apps pipe %d", fd
);
1529 /* All good. Don't send back the write positive ret value. */
1536 * Sanitize the wait queue of the dispatch registration thread meaning removing
1537 * invalid nodes from it. This is to avoid memory leaks for the case the UST
1538 * notify socket is never received.
1540 static void sanitize_wait_queue(struct ust_reg_wait_queue
*wait_queue
)
1542 int ret
, nb_fd
= 0, i
;
1543 unsigned int fd_added
= 0;
1544 struct lttng_poll_event events
;
1545 struct ust_reg_wait_node
*wait_node
= NULL
, *tmp_wait_node
;
1549 lttng_poll_init(&events
);
1551 /* Just skip everything for an empty queue. */
1552 if (!wait_queue
->count
) {
1556 ret
= lttng_poll_create(&events
, wait_queue
->count
, LTTNG_CLOEXEC
);
1561 cds_list_for_each_entry_safe(wait_node
, tmp_wait_node
,
1562 &wait_queue
->head
, head
) {
1563 assert(wait_node
->app
);
1564 ret
= lttng_poll_add(&events
, wait_node
->app
->sock
,
1565 LPOLLHUP
| LPOLLERR
);
1578 * Poll but don't block so we can quickly identify the faulty events and
1579 * clean them afterwards from the wait queue.
1581 ret
= lttng_poll_wait(&events
, 0);
1587 for (i
= 0; i
< nb_fd
; i
++) {
1588 /* Get faulty FD. */
1589 uint32_t revents
= LTTNG_POLL_GETEV(&events
, i
);
1590 int pollfd
= LTTNG_POLL_GETFD(&events
, i
);
1592 cds_list_for_each_entry_safe(wait_node
, tmp_wait_node
,
1593 &wait_queue
->head
, head
) {
1594 if (pollfd
== wait_node
->app
->sock
&&
1595 (revents
& (LPOLLHUP
| LPOLLERR
))) {
1596 cds_list_del(&wait_node
->head
);
1597 wait_queue
->count
--;
1598 ust_app_destroy(wait_node
->app
);
1606 DBG("Wait queue sanitized, %d node were cleaned up", nb_fd
);
1610 lttng_poll_clean(&events
);
1614 lttng_poll_clean(&events
);
1616 ERR("Unable to sanitize wait queue");
1621 * Dispatch request from the registration threads to the application
1622 * communication thread.
1624 static void *thread_dispatch_ust_registration(void *data
)
1627 struct cds_wfq_node
*node
;
1628 struct ust_command
*ust_cmd
= NULL
;
1629 struct ust_reg_wait_node
*wait_node
= NULL
, *tmp_wait_node
;
1630 struct ust_reg_wait_queue wait_queue
= {
1634 health_register(health_sessiond
, HEALTH_SESSIOND_TYPE_APP_REG_DISPATCH
);
1636 if (testpoint(sessiond_thread_app_reg_dispatch
)) {
1637 goto error_testpoint
;
1640 health_code_update();
1642 CDS_INIT_LIST_HEAD(&wait_queue
.head
);
1644 DBG("[thread] Dispatch UST command started");
1646 while (!CMM_LOAD_SHARED(dispatch_thread_exit
)) {
1647 health_code_update();
1649 /* Atomically prepare the queue futex */
1650 futex_nto1_prepare(&ust_cmd_queue
.futex
);
1653 struct ust_app
*app
= NULL
;
1657 * Make sure we don't have node(s) that have hung up before receiving
1658 * the notify socket. This is to clean the list in order to avoid
1659 * memory leaks from notify socket that are never seen.
1661 sanitize_wait_queue(&wait_queue
);
1663 health_code_update();
1664 /* Dequeue command for registration */
1665 node
= cds_wfq_dequeue_blocking(&ust_cmd_queue
.queue
);
1667 DBG("Woken up but nothing in the UST command queue");
1668 /* Continue thread execution */
1672 ust_cmd
= caa_container_of(node
, struct ust_command
, node
);
1674 DBG("Dispatching UST registration pid:%d ppid:%d uid:%d"
1675 " gid:%d sock:%d name:%s (version %d.%d)",
1676 ust_cmd
->reg_msg
.pid
, ust_cmd
->reg_msg
.ppid
,
1677 ust_cmd
->reg_msg
.uid
, ust_cmd
->reg_msg
.gid
,
1678 ust_cmd
->sock
, ust_cmd
->reg_msg
.name
,
1679 ust_cmd
->reg_msg
.major
, ust_cmd
->reg_msg
.minor
);
1681 if (ust_cmd
->reg_msg
.type
== USTCTL_SOCKET_CMD
) {
1682 wait_node
= zmalloc(sizeof(*wait_node
));
1684 PERROR("zmalloc wait_node dispatch");
1685 ret
= close(ust_cmd
->sock
);
1687 PERROR("close ust sock dispatch %d", ust_cmd
->sock
);
1689 lttng_fd_put(LTTNG_FD_APPS
, 1);
1693 CDS_INIT_LIST_HEAD(&wait_node
->head
);
1695 /* Create application object if socket is CMD. */
1696 wait_node
->app
= ust_app_create(&ust_cmd
->reg_msg
,
1698 if (!wait_node
->app
) {
1699 ret
= close(ust_cmd
->sock
);
1701 PERROR("close ust sock dispatch %d", ust_cmd
->sock
);
1703 lttng_fd_put(LTTNG_FD_APPS
, 1);
1709 * Add application to the wait queue so we can set the notify
1710 * socket before putting this object in the global ht.
1712 cds_list_add(&wait_node
->head
, &wait_queue
.head
);
1717 * We have to continue here since we don't have the notify
1718 * socket and the application MUST be added to the hash table
1719 * only at that moment.
1724 * Look for the application in the local wait queue and set the
1725 * notify socket if found.
1727 cds_list_for_each_entry_safe(wait_node
, tmp_wait_node
,
1728 &wait_queue
.head
, head
) {
1729 health_code_update();
1730 if (wait_node
->app
->pid
== ust_cmd
->reg_msg
.pid
) {
1731 wait_node
->app
->notify_sock
= ust_cmd
->sock
;
1732 cds_list_del(&wait_node
->head
);
1734 app
= wait_node
->app
;
1736 DBG3("UST app notify socket %d is set", ust_cmd
->sock
);
1742 * With no application at this stage the received socket is
1743 * basically useless so close it before we free the cmd data
1744 * structure for good.
1747 ret
= close(ust_cmd
->sock
);
1749 PERROR("close ust sock dispatch %d", ust_cmd
->sock
);
1751 lttng_fd_put(LTTNG_FD_APPS
, 1);
1758 * @session_lock_list
1760 * Lock the global session list so from the register up to the
1761 * registration done message, no thread can see the application
1762 * and change its state.
1764 session_lock_list();
1768 * Add application to the global hash table. This needs to be
1769 * done before the update to the UST registry can locate the
1774 /* Set app version. This call will print an error if needed. */
1775 (void) ust_app_version(app
);
1777 /* Send notify socket through the notify pipe. */
1778 ret
= send_socket_to_thread(apps_cmd_notify_pipe
[1],
1782 session_unlock_list();
1784 * No notify thread, stop the UST tracing. However, this is
1785 * not an internal error of the this thread thus setting
1786 * the health error code to a normal exit.
1793 * Update newly registered application with the tracing
1794 * registry info already enabled information.
1796 update_ust_app(app
->sock
);
1799 * Don't care about return value. Let the manage apps threads
1800 * handle app unregistration upon socket close.
1802 (void) ust_app_register_done(app
->sock
);
1805 * Even if the application socket has been closed, send the app
1806 * to the thread and unregistration will take place at that
1809 ret
= send_socket_to_thread(apps_cmd_pipe
[1], app
->sock
);
1812 session_unlock_list();
1814 * No apps. thread, stop the UST tracing. However, this is
1815 * not an internal error of the this thread thus setting
1816 * the health error code to a normal exit.
1823 session_unlock_list();
1825 } while (node
!= NULL
);
1827 health_poll_entry();
1828 /* Futex wait on queue. Blocking call on futex() */
1829 futex_nto1_wait(&ust_cmd_queue
.futex
);
1832 /* Normal exit, no error */
1836 /* Clean up wait queue. */
1837 cds_list_for_each_entry_safe(wait_node
, tmp_wait_node
,
1838 &wait_queue
.head
, head
) {
1839 cds_list_del(&wait_node
->head
);
1845 DBG("Dispatch thread dying");
1848 ERR("Health error occurred in %s", __func__
);
1850 health_unregister(health_sessiond
);
1855 * This thread manage application registration.
1857 static void *thread_registration_apps(void *data
)
1859 int sock
= -1, i
, ret
, pollfd
, err
= -1;
1860 uint32_t revents
, nb_fd
;
1861 struct lttng_poll_event events
;
1863 * Get allocated in this thread, enqueued to a global queue, dequeued and
1864 * freed in the manage apps thread.
1866 struct ust_command
*ust_cmd
= NULL
;
1868 DBG("[thread] Manage application registration started");
1870 health_register(health_sessiond
, HEALTH_SESSIOND_TYPE_APP_REG
);
1872 if (testpoint(sessiond_thread_registration_apps
)) {
1873 goto error_testpoint
;
1876 ret
= lttcomm_listen_unix_sock(apps_sock
);
1882 * Pass 2 as size here for the thread quit pipe and apps socket. Nothing
1883 * more will be added to this poll set.
1885 ret
= sessiond_set_thread_pollset(&events
, 2);
1887 goto error_create_poll
;
1890 /* Add the application registration socket */
1891 ret
= lttng_poll_add(&events
, apps_sock
, LPOLLIN
| LPOLLRDHUP
);
1893 goto error_poll_add
;
1896 /* Notify all applications to register */
1897 ret
= notify_ust_apps(1);
1899 ERR("Failed to notify applications or create the wait shared memory.\n"
1900 "Execution continues but there might be problem for already\n"
1901 "running applications that wishes to register.");
1905 DBG("Accepting application registration");
1907 /* Inifinite blocking call, waiting for transmission */
1909 health_poll_entry();
1910 ret
= lttng_poll_wait(&events
, -1);
1914 * Restart interrupted system call.
1916 if (errno
== EINTR
) {
1924 for (i
= 0; i
< nb_fd
; i
++) {
1925 health_code_update();
1927 /* Fetch once the poll data */
1928 revents
= LTTNG_POLL_GETEV(&events
, i
);
1929 pollfd
= LTTNG_POLL_GETFD(&events
, i
);
1931 /* Thread quit pipe has been closed. Killing thread. */
1932 ret
= sessiond_check_thread_quit_pipe(pollfd
, revents
);
1938 /* Event on the registration socket */
1939 if (pollfd
== apps_sock
) {
1940 if (revents
& (LPOLLERR
| LPOLLHUP
| LPOLLRDHUP
)) {
1941 ERR("Register apps socket poll error");
1943 } else if (revents
& LPOLLIN
) {
1944 sock
= lttcomm_accept_unix_sock(apps_sock
);
1950 * Set socket timeout for both receiving and ending.
1951 * app_socket_timeout is in seconds, whereas
1952 * lttcomm_setsockopt_rcv_timeout and
1953 * lttcomm_setsockopt_snd_timeout expect msec as
1956 (void) lttcomm_setsockopt_rcv_timeout(sock
,
1957 app_socket_timeout
* 1000);
1958 (void) lttcomm_setsockopt_snd_timeout(sock
,
1959 app_socket_timeout
* 1000);
1962 * Set the CLOEXEC flag. Return code is useless because
1963 * either way, the show must go on.
1965 (void) utils_set_fd_cloexec(sock
);
1967 /* Create UST registration command for enqueuing */
1968 ust_cmd
= zmalloc(sizeof(struct ust_command
));
1969 if (ust_cmd
== NULL
) {
1970 PERROR("ust command zmalloc");
1975 * Using message-based transmissions to ensure we don't
1976 * have to deal with partially received messages.
1978 ret
= lttng_fd_get(LTTNG_FD_APPS
, 1);
1980 ERR("Exhausted file descriptors allowed for applications.");
1990 health_code_update();
1991 ret
= ust_app_recv_registration(sock
, &ust_cmd
->reg_msg
);
1994 /* Close socket of the application. */
1999 lttng_fd_put(LTTNG_FD_APPS
, 1);
2003 health_code_update();
2005 ust_cmd
->sock
= sock
;
2008 DBG("UST registration received with pid:%d ppid:%d uid:%d"
2009 " gid:%d sock:%d name:%s (version %d.%d)",
2010 ust_cmd
->reg_msg
.pid
, ust_cmd
->reg_msg
.ppid
,
2011 ust_cmd
->reg_msg
.uid
, ust_cmd
->reg_msg
.gid
,
2012 ust_cmd
->sock
, ust_cmd
->reg_msg
.name
,
2013 ust_cmd
->reg_msg
.major
, ust_cmd
->reg_msg
.minor
);
2016 * Lock free enqueue the registration request. The red pill
2017 * has been taken! This apps will be part of the *system*.
2019 cds_wfq_enqueue(&ust_cmd_queue
.queue
, &ust_cmd
->node
);
2022 * Wake the registration queue futex. Implicit memory
2023 * barrier with the exchange in cds_wfq_enqueue.
2025 futex_nto1_wake(&ust_cmd_queue
.futex
);
2033 /* Notify that the registration thread is gone */
2036 if (apps_sock
>= 0) {
2037 ret
= close(apps_sock
);
2047 lttng_fd_put(LTTNG_FD_APPS
, 1);
2049 unlink(apps_unix_sock_path
);
2052 lttng_poll_clean(&events
);
2056 DBG("UST Registration thread cleanup complete");
2059 ERR("Health error occurred in %s", __func__
);
2061 health_unregister(health_sessiond
);
2067 * Start the thread_manage_consumer. This must be done after a lttng-consumerd
2068 * exec or it will fails.
2070 static int spawn_consumer_thread(struct consumer_data
*consumer_data
)
2073 struct timespec timeout
;
2075 /* Make sure we set the readiness flag to 0 because we are NOT ready */
2076 consumer_data
->consumer_thread_is_ready
= 0;
2078 /* Setup pthread condition */
2079 ret
= pthread_condattr_init(&consumer_data
->condattr
);
2082 PERROR("pthread_condattr_init consumer data");
2087 * Set the monotonic clock in order to make sure we DO NOT jump in time
2088 * between the clock_gettime() call and the timedwait call. See bug #324
2089 * for a more details and how we noticed it.
2091 ret
= pthread_condattr_setclock(&consumer_data
->condattr
, CLOCK_MONOTONIC
);
2094 PERROR("pthread_condattr_setclock consumer data");
2098 ret
= pthread_cond_init(&consumer_data
->cond
, &consumer_data
->condattr
);
2101 PERROR("pthread_cond_init consumer data");
2105 ret
= pthread_create(&consumer_data
->thread
, NULL
, thread_manage_consumer
,
2108 PERROR("pthread_create consumer");
2113 /* We are about to wait on a pthread condition */
2114 pthread_mutex_lock(&consumer_data
->cond_mutex
);
2116 /* Get time for sem_timedwait absolute timeout */
2117 clock_ret
= clock_gettime(CLOCK_MONOTONIC
, &timeout
);
2119 * Set the timeout for the condition timed wait even if the clock gettime
2120 * call fails since we might loop on that call and we want to avoid to
2121 * increment the timeout too many times.
2123 timeout
.tv_sec
+= DEFAULT_SEM_WAIT_TIMEOUT
;
2126 * The following loop COULD be skipped in some conditions so this is why we
2127 * set ret to 0 in order to make sure at least one round of the loop is
2133 * Loop until the condition is reached or when a timeout is reached. Note
2134 * that the pthread_cond_timedwait(P) man page specifies that EINTR can NOT
2135 * be returned but the pthread_cond(3), from the glibc-doc, says that it is
2136 * possible. This loop does not take any chances and works with both of
2139 while (!consumer_data
->consumer_thread_is_ready
&& ret
!= ETIMEDOUT
) {
2140 if (clock_ret
< 0) {
2141 PERROR("clock_gettime spawn consumer");
2142 /* Infinite wait for the consumerd thread to be ready */
2143 ret
= pthread_cond_wait(&consumer_data
->cond
,
2144 &consumer_data
->cond_mutex
);
2146 ret
= pthread_cond_timedwait(&consumer_data
->cond
,
2147 &consumer_data
->cond_mutex
, &timeout
);
2151 /* Release the pthread condition */
2152 pthread_mutex_unlock(&consumer_data
->cond_mutex
);
2156 if (ret
== ETIMEDOUT
) {
2160 * Call has timed out so we kill the kconsumerd_thread and return
2163 ERR("Condition timed out. The consumer thread was never ready."
2165 pth_ret
= pthread_cancel(consumer_data
->thread
);
2167 PERROR("pthread_cancel consumer thread");
2170 PERROR("pthread_cond_wait failed consumer thread");
2172 /* Caller is expecting a negative value on failure. */
2177 pthread_mutex_lock(&consumer_data
->pid_mutex
);
2178 if (consumer_data
->pid
== 0) {
2179 ERR("Consumerd did not start");
2180 pthread_mutex_unlock(&consumer_data
->pid_mutex
);
2183 pthread_mutex_unlock(&consumer_data
->pid_mutex
);
2192 * Join consumer thread
2194 static int join_consumer_thread(struct consumer_data
*consumer_data
)
2198 /* Consumer pid must be a real one. */
2199 if (consumer_data
->pid
> 0) {
2201 ret
= kill(consumer_data
->pid
, SIGTERM
);
2203 ERR("Error killing consumer daemon");
2206 return pthread_join(consumer_data
->thread
, &status
);
2213 * Fork and exec a consumer daemon (consumerd).
2215 * Return pid if successful else -1.
2217 static pid_t
spawn_consumerd(struct consumer_data
*consumer_data
)
2221 const char *consumer_to_use
;
2222 const char *verbosity
;
2225 DBG("Spawning consumerd");
2232 if (opt_verbose_consumer
) {
2233 verbosity
= "--verbose";
2235 verbosity
= "--quiet";
2237 switch (consumer_data
->type
) {
2238 case LTTNG_CONSUMER_KERNEL
:
2240 * Find out which consumerd to execute. We will first try the
2241 * 64-bit path, then the sessiond's installation directory, and
2242 * fallback on the 32-bit one,
2244 DBG3("Looking for a kernel consumer at these locations:");
2245 DBG3(" 1) %s", consumerd64_bin
);
2246 DBG3(" 2) %s/%s", INSTALL_BIN_PATH
, CONSUMERD_FILE
);
2247 DBG3(" 3) %s", consumerd32_bin
);
2248 if (stat(consumerd64_bin
, &st
) == 0) {
2249 DBG3("Found location #1");
2250 consumer_to_use
= consumerd64_bin
;
2251 } else if (stat(INSTALL_BIN_PATH
"/" CONSUMERD_FILE
, &st
) == 0) {
2252 DBG3("Found location #2");
2253 consumer_to_use
= INSTALL_BIN_PATH
"/" CONSUMERD_FILE
;
2254 } else if (stat(consumerd32_bin
, &st
) == 0) {
2255 DBG3("Found location #3");
2256 consumer_to_use
= consumerd32_bin
;
2258 DBG("Could not find any valid consumerd executable");
2262 DBG("Using kernel consumer at: %s", consumer_to_use
);
2263 ret
= execl(consumer_to_use
,
2264 "lttng-consumerd", verbosity
, "-k",
2265 "--consumerd-cmd-sock", consumer_data
->cmd_unix_sock_path
,
2266 "--consumerd-err-sock", consumer_data
->err_unix_sock_path
,
2267 "--group", tracing_group_name
,
2270 case LTTNG_CONSUMER64_UST
:
2272 char *tmpnew
= NULL
;
2274 if (consumerd64_libdir
[0] != '\0') {
2278 tmp
= getenv("LD_LIBRARY_PATH");
2282 tmplen
= strlen("LD_LIBRARY_PATH=")
2283 + strlen(consumerd64_libdir
) + 1 /* : */ + strlen(tmp
);
2284 tmpnew
= zmalloc(tmplen
+ 1 /* \0 */);
2289 strcpy(tmpnew
, "LD_LIBRARY_PATH=");
2290 strcat(tmpnew
, consumerd64_libdir
);
2291 if (tmp
[0] != '\0') {
2292 strcat(tmpnew
, ":");
2293 strcat(tmpnew
, tmp
);
2295 ret
= putenv(tmpnew
);
2302 DBG("Using 64-bit UST consumer at: %s", consumerd64_bin
);
2303 ret
= execl(consumerd64_bin
, "lttng-consumerd", verbosity
, "-u",
2304 "--consumerd-cmd-sock", consumer_data
->cmd_unix_sock_path
,
2305 "--consumerd-err-sock", consumer_data
->err_unix_sock_path
,
2306 "--group", tracing_group_name
,
2308 if (consumerd64_libdir
[0] != '\0') {
2313 case LTTNG_CONSUMER32_UST
:
2315 char *tmpnew
= NULL
;
2317 if (consumerd32_libdir
[0] != '\0') {
2321 tmp
= getenv("LD_LIBRARY_PATH");
2325 tmplen
= strlen("LD_LIBRARY_PATH=")
2326 + strlen(consumerd32_libdir
) + 1 /* : */ + strlen(tmp
);
2327 tmpnew
= zmalloc(tmplen
+ 1 /* \0 */);
2332 strcpy(tmpnew
, "LD_LIBRARY_PATH=");
2333 strcat(tmpnew
, consumerd32_libdir
);
2334 if (tmp
[0] != '\0') {
2335 strcat(tmpnew
, ":");
2336 strcat(tmpnew
, tmp
);
2338 ret
= putenv(tmpnew
);
2345 DBG("Using 32-bit UST consumer at: %s", consumerd32_bin
);
2346 ret
= execl(consumerd32_bin
, "lttng-consumerd", verbosity
, "-u",
2347 "--consumerd-cmd-sock", consumer_data
->cmd_unix_sock_path
,
2348 "--consumerd-err-sock", consumer_data
->err_unix_sock_path
,
2349 "--group", tracing_group_name
,
2351 if (consumerd32_libdir
[0] != '\0') {
2357 PERROR("unknown consumer type");
2361 PERROR("Consumer execl()");
2363 /* Reaching this point, we got a failure on our execl(). */
2365 } else if (pid
> 0) {
2368 PERROR("start consumer fork");
2376 * Spawn the consumerd daemon and session daemon thread.
2378 static int start_consumerd(struct consumer_data
*consumer_data
)
2383 * Set the listen() state on the socket since there is a possible race
2384 * between the exec() of the consumer daemon and this call if place in the
2385 * consumer thread. See bug #366 for more details.
2387 ret
= lttcomm_listen_unix_sock(consumer_data
->err_sock
);
2392 pthread_mutex_lock(&consumer_data
->pid_mutex
);
2393 if (consumer_data
->pid
!= 0) {
2394 pthread_mutex_unlock(&consumer_data
->pid_mutex
);
2398 ret
= spawn_consumerd(consumer_data
);
2400 ERR("Spawning consumerd failed");
2401 pthread_mutex_unlock(&consumer_data
->pid_mutex
);
2405 /* Setting up the consumer_data pid */
2406 consumer_data
->pid
= ret
;
2407 DBG2("Consumer pid %d", consumer_data
->pid
);
2408 pthread_mutex_unlock(&consumer_data
->pid_mutex
);
2410 DBG2("Spawning consumer control thread");
2411 ret
= spawn_consumer_thread(consumer_data
);
2413 ERR("Fatal error spawning consumer control thread");
2421 /* Cleanup already created sockets on error. */
2422 if (consumer_data
->err_sock
>= 0) {
2425 err
= close(consumer_data
->err_sock
);
2427 PERROR("close consumer data error socket");
2434 * Setup necessary data for kernel tracer action.
2436 static int init_kernel_tracer(void)
2440 /* Modprobe lttng kernel modules */
2441 ret
= modprobe_lttng_control();
2446 /* Open debugfs lttng */
2447 kernel_tracer_fd
= open(module_proc_lttng
, O_RDWR
);
2448 if (kernel_tracer_fd
< 0) {
2449 DBG("Failed to open %s", module_proc_lttng
);
2454 /* Validate kernel version */
2455 ret
= kernel_validate_version(kernel_tracer_fd
);
2460 ret
= modprobe_lttng_data();
2465 DBG("Kernel tracer fd %d", kernel_tracer_fd
);
2469 modprobe_remove_lttng_control();
2470 ret
= close(kernel_tracer_fd
);
2474 kernel_tracer_fd
= -1;
2475 return LTTNG_ERR_KERN_VERSION
;
2478 ret
= close(kernel_tracer_fd
);
2484 modprobe_remove_lttng_control();
2487 WARN("No kernel tracer available");
2488 kernel_tracer_fd
= -1;
2490 return LTTNG_ERR_NEED_ROOT_SESSIOND
;
2492 return LTTNG_ERR_KERN_NA
;
2498 * Copy consumer output from the tracing session to the domain session. The
2499 * function also applies the right modification on a per domain basis for the
2500 * trace files destination directory.
2502 * Should *NOT* be called with RCU read-side lock held.
2504 static int copy_session_consumer(int domain
, struct ltt_session
*session
)
2507 const char *dir_name
;
2508 struct consumer_output
*consumer
;
2511 assert(session
->consumer
);
2514 case LTTNG_DOMAIN_KERNEL
:
2515 DBG3("Copying tracing session consumer output in kernel session");
2517 * XXX: We should audit the session creation and what this function
2518 * does "extra" in order to avoid a destroy since this function is used
2519 * in the domain session creation (kernel and ust) only. Same for UST
2522 if (session
->kernel_session
->consumer
) {
2523 consumer_destroy_output(session
->kernel_session
->consumer
);
2525 session
->kernel_session
->consumer
=
2526 consumer_copy_output(session
->consumer
);
2527 /* Ease our life a bit for the next part */
2528 consumer
= session
->kernel_session
->consumer
;
2529 dir_name
= DEFAULT_KERNEL_TRACE_DIR
;
2531 case LTTNG_DOMAIN_JUL
:
2532 case LTTNG_DOMAIN_UST
:
2533 DBG3("Copying tracing session consumer output in UST session");
2534 if (session
->ust_session
->consumer
) {
2535 consumer_destroy_output(session
->ust_session
->consumer
);
2537 session
->ust_session
->consumer
=
2538 consumer_copy_output(session
->consumer
);
2539 /* Ease our life a bit for the next part */
2540 consumer
= session
->ust_session
->consumer
;
2541 dir_name
= DEFAULT_UST_TRACE_DIR
;
2544 ret
= LTTNG_ERR_UNKNOWN_DOMAIN
;
2548 /* Append correct directory to subdir */
2549 strncat(consumer
->subdir
, dir_name
,
2550 sizeof(consumer
->subdir
) - strlen(consumer
->subdir
) - 1);
2551 DBG3("Copy session consumer subdir %s", consumer
->subdir
);
2560 * Create an UST session and add it to the session ust list.
2562 * Should *NOT* be called with RCU read-side lock held.
2564 static int create_ust_session(struct ltt_session
*session
,
2565 struct lttng_domain
*domain
)
2568 struct ltt_ust_session
*lus
= NULL
;
2572 assert(session
->consumer
);
2574 switch (domain
->type
) {
2575 case LTTNG_DOMAIN_JUL
:
2576 case LTTNG_DOMAIN_UST
:
2579 ERR("Unknown UST domain on create session %d", domain
->type
);
2580 ret
= LTTNG_ERR_UNKNOWN_DOMAIN
;
2584 DBG("Creating UST session");
2586 lus
= trace_ust_create_session(session
->id
);
2588 ret
= LTTNG_ERR_UST_SESS_FAIL
;
2592 lus
->uid
= session
->uid
;
2593 lus
->gid
= session
->gid
;
2594 lus
->output_traces
= session
->output_traces
;
2595 lus
->snapshot_mode
= session
->snapshot_mode
;
2596 lus
->live_timer_interval
= session
->live_timer
;
2597 session
->ust_session
= lus
;
2599 /* Copy session output to the newly created UST session */
2600 ret
= copy_session_consumer(domain
->type
, session
);
2601 if (ret
!= LTTNG_OK
) {
2609 session
->ust_session
= NULL
;
2614 * Create a kernel tracer session then create the default channel.
2616 static int create_kernel_session(struct ltt_session
*session
)
2620 DBG("Creating kernel session");
2622 ret
= kernel_create_session(session
, kernel_tracer_fd
);
2624 ret
= LTTNG_ERR_KERN_SESS_FAIL
;
2628 /* Code flow safety */
2629 assert(session
->kernel_session
);
2631 /* Copy session output to the newly created Kernel session */
2632 ret
= copy_session_consumer(LTTNG_DOMAIN_KERNEL
, session
);
2633 if (ret
!= LTTNG_OK
) {
2637 /* Create directory(ies) on local filesystem. */
2638 if (session
->kernel_session
->consumer
->type
== CONSUMER_DST_LOCAL
&&
2639 strlen(session
->kernel_session
->consumer
->dst
.trace_path
) > 0) {
2640 ret
= run_as_mkdir_recursive(
2641 session
->kernel_session
->consumer
->dst
.trace_path
,
2642 S_IRWXU
| S_IRWXG
, session
->uid
, session
->gid
);
2644 if (ret
!= -EEXIST
) {
2645 ERR("Trace directory creation error");
2651 session
->kernel_session
->uid
= session
->uid
;
2652 session
->kernel_session
->gid
= session
->gid
;
2653 session
->kernel_session
->output_traces
= session
->output_traces
;
2654 session
->kernel_session
->snapshot_mode
= session
->snapshot_mode
;
2659 trace_kernel_destroy_session(session
->kernel_session
);
2660 session
->kernel_session
= NULL
;
2665 * Count number of session permitted by uid/gid.
2667 static unsigned int lttng_sessions_count(uid_t uid
, gid_t gid
)
2670 struct ltt_session
*session
;
2672 DBG("Counting number of available session for UID %d GID %d",
2674 cds_list_for_each_entry(session
, &session_list_ptr
->head
, list
) {
2676 * Only list the sessions the user can control.
2678 if (!session_access_ok(session
, uid
, gid
)) {
2687 * Process the command requested by the lttng client within the command
2688 * context structure. This function make sure that the return structure (llm)
2689 * is set and ready for transmission before returning.
2691 * Return any error encountered or 0 for success.
2693 * "sock" is only used for special-case var. len data.
2695 * Should *NOT* be called with RCU read-side lock held.
2697 static int process_client_msg(struct command_ctx
*cmd_ctx
, int sock
,
2701 int need_tracing_session
= 1;
2704 DBG("Processing client command %d", cmd_ctx
->lsm
->cmd_type
);
2708 switch (cmd_ctx
->lsm
->cmd_type
) {
2709 case LTTNG_CREATE_SESSION
:
2710 case LTTNG_CREATE_SESSION_SNAPSHOT
:
2711 case LTTNG_CREATE_SESSION_LIVE
:
2712 case LTTNG_DESTROY_SESSION
:
2713 case LTTNG_LIST_SESSIONS
:
2714 case LTTNG_LIST_DOMAINS
:
2715 case LTTNG_START_TRACE
:
2716 case LTTNG_STOP_TRACE
:
2717 case LTTNG_DATA_PENDING
:
2718 case LTTNG_SNAPSHOT_ADD_OUTPUT
:
2719 case LTTNG_SNAPSHOT_DEL_OUTPUT
:
2720 case LTTNG_SNAPSHOT_LIST_OUTPUT
:
2721 case LTTNG_SNAPSHOT_RECORD
:
2722 case LTTNG_SAVE_SESSION
:
2729 if (opt_no_kernel
&& need_domain
2730 && cmd_ctx
->lsm
->domain
.type
== LTTNG_DOMAIN_KERNEL
) {
2732 ret
= LTTNG_ERR_NEED_ROOT_SESSIOND
;
2734 ret
= LTTNG_ERR_KERN_NA
;
2739 /* Deny register consumer if we already have a spawned consumer. */
2740 if (cmd_ctx
->lsm
->cmd_type
== LTTNG_REGISTER_CONSUMER
) {
2741 pthread_mutex_lock(&kconsumer_data
.pid_mutex
);
2742 if (kconsumer_data
.pid
> 0) {
2743 ret
= LTTNG_ERR_KERN_CONSUMER_FAIL
;
2744 pthread_mutex_unlock(&kconsumer_data
.pid_mutex
);
2747 pthread_mutex_unlock(&kconsumer_data
.pid_mutex
);
2751 * Check for command that don't needs to allocate a returned payload. We do
2752 * this here so we don't have to make the call for no payload at each
2755 switch(cmd_ctx
->lsm
->cmd_type
) {
2756 case LTTNG_LIST_SESSIONS
:
2757 case LTTNG_LIST_TRACEPOINTS
:
2758 case LTTNG_LIST_TRACEPOINT_FIELDS
:
2759 case LTTNG_LIST_DOMAINS
:
2760 case LTTNG_LIST_CHANNELS
:
2761 case LTTNG_LIST_EVENTS
:
2764 /* Setup lttng message with no payload */
2765 ret
= setup_lttng_msg(cmd_ctx
, 0);
2767 /* This label does not try to unlock the session */
2768 goto init_setup_error
;
2772 /* Commands that DO NOT need a session. */
2773 switch (cmd_ctx
->lsm
->cmd_type
) {
2774 case LTTNG_CREATE_SESSION
:
2775 case LTTNG_CREATE_SESSION_SNAPSHOT
:
2776 case LTTNG_CREATE_SESSION_LIVE
:
2777 case LTTNG_CALIBRATE
:
2778 case LTTNG_LIST_SESSIONS
:
2779 case LTTNG_LIST_TRACEPOINTS
:
2780 case LTTNG_LIST_TRACEPOINT_FIELDS
:
2781 case LTTNG_SAVE_SESSION
:
2782 need_tracing_session
= 0;
2785 DBG("Getting session %s by name", cmd_ctx
->lsm
->session
.name
);
2787 * We keep the session list lock across _all_ commands
2788 * for now, because the per-session lock does not
2789 * handle teardown properly.
2791 session_lock_list();
2792 cmd_ctx
->session
= session_find_by_name(cmd_ctx
->lsm
->session
.name
);
2793 if (cmd_ctx
->session
== NULL
) {
2794 ret
= LTTNG_ERR_SESS_NOT_FOUND
;
2797 /* Acquire lock for the session */
2798 session_lock(cmd_ctx
->session
);
2808 * Check domain type for specific "pre-action".
2810 switch (cmd_ctx
->lsm
->domain
.type
) {
2811 case LTTNG_DOMAIN_KERNEL
:
2813 ret
= LTTNG_ERR_NEED_ROOT_SESSIOND
;
2817 /* Kernel tracer check */
2818 if (kernel_tracer_fd
== -1) {
2819 /* Basically, load kernel tracer modules */
2820 ret
= init_kernel_tracer();
2826 /* Consumer is in an ERROR state. Report back to client */
2827 if (uatomic_read(&kernel_consumerd_state
) == CONSUMER_ERROR
) {
2828 ret
= LTTNG_ERR_NO_KERNCONSUMERD
;
2832 /* Need a session for kernel command */
2833 if (need_tracing_session
) {
2834 if (cmd_ctx
->session
->kernel_session
== NULL
) {
2835 ret
= create_kernel_session(cmd_ctx
->session
);
2837 ret
= LTTNG_ERR_KERN_SESS_FAIL
;
2842 /* Start the kernel consumer daemon */
2843 pthread_mutex_lock(&kconsumer_data
.pid_mutex
);
2844 if (kconsumer_data
.pid
== 0 &&
2845 cmd_ctx
->lsm
->cmd_type
!= LTTNG_REGISTER_CONSUMER
) {
2846 pthread_mutex_unlock(&kconsumer_data
.pid_mutex
);
2847 ret
= start_consumerd(&kconsumer_data
);
2849 ret
= LTTNG_ERR_KERN_CONSUMER_FAIL
;
2852 uatomic_set(&kernel_consumerd_state
, CONSUMER_STARTED
);
2854 pthread_mutex_unlock(&kconsumer_data
.pid_mutex
);
2858 * The consumer was just spawned so we need to add the socket to
2859 * the consumer output of the session if exist.
2861 ret
= consumer_create_socket(&kconsumer_data
,
2862 cmd_ctx
->session
->kernel_session
->consumer
);
2869 case LTTNG_DOMAIN_JUL
:
2870 case LTTNG_DOMAIN_UST
:
2872 if (!ust_app_supported()) {
2873 ret
= LTTNG_ERR_NO_UST
;
2876 /* Consumer is in an ERROR state. Report back to client */
2877 if (uatomic_read(&ust_consumerd_state
) == CONSUMER_ERROR
) {
2878 ret
= LTTNG_ERR_NO_USTCONSUMERD
;
2882 if (need_tracing_session
) {
2883 /* Create UST session if none exist. */
2884 if (cmd_ctx
->session
->ust_session
== NULL
) {
2885 ret
= create_ust_session(cmd_ctx
->session
,
2886 &cmd_ctx
->lsm
->domain
);
2887 if (ret
!= LTTNG_OK
) {
2892 /* Start the UST consumer daemons */
2894 pthread_mutex_lock(&ustconsumer64_data
.pid_mutex
);
2895 if (consumerd64_bin
[0] != '\0' &&
2896 ustconsumer64_data
.pid
== 0 &&
2897 cmd_ctx
->lsm
->cmd_type
!= LTTNG_REGISTER_CONSUMER
) {
2898 pthread_mutex_unlock(&ustconsumer64_data
.pid_mutex
);
2899 ret
= start_consumerd(&ustconsumer64_data
);
2901 ret
= LTTNG_ERR_UST_CONSUMER64_FAIL
;
2902 uatomic_set(&ust_consumerd64_fd
, -EINVAL
);
2906 uatomic_set(&ust_consumerd64_fd
, ustconsumer64_data
.cmd_sock
);
2907 uatomic_set(&ust_consumerd_state
, CONSUMER_STARTED
);
2909 pthread_mutex_unlock(&ustconsumer64_data
.pid_mutex
);
2913 * Setup socket for consumer 64 bit. No need for atomic access
2914 * since it was set above and can ONLY be set in this thread.
2916 ret
= consumer_create_socket(&ustconsumer64_data
,
2917 cmd_ctx
->session
->ust_session
->consumer
);
2923 if (consumerd32_bin
[0] != '\0' &&
2924 ustconsumer32_data
.pid
== 0 &&
2925 cmd_ctx
->lsm
->cmd_type
!= LTTNG_REGISTER_CONSUMER
) {
2926 pthread_mutex_unlock(&ustconsumer32_data
.pid_mutex
);
2927 ret
= start_consumerd(&ustconsumer32_data
);
2929 ret
= LTTNG_ERR_UST_CONSUMER32_FAIL
;
2930 uatomic_set(&ust_consumerd32_fd
, -EINVAL
);
2934 uatomic_set(&ust_consumerd32_fd
, ustconsumer32_data
.cmd_sock
);
2935 uatomic_set(&ust_consumerd_state
, CONSUMER_STARTED
);
2937 pthread_mutex_unlock(&ustconsumer32_data
.pid_mutex
);
2941 * Setup socket for consumer 64 bit. No need for atomic access
2942 * since it was set above and can ONLY be set in this thread.
2944 ret
= consumer_create_socket(&ustconsumer32_data
,
2945 cmd_ctx
->session
->ust_session
->consumer
);
2957 /* Validate consumer daemon state when start/stop trace command */
2958 if (cmd_ctx
->lsm
->cmd_type
== LTTNG_START_TRACE
||
2959 cmd_ctx
->lsm
->cmd_type
== LTTNG_STOP_TRACE
) {
2960 switch (cmd_ctx
->lsm
->domain
.type
) {
2961 case LTTNG_DOMAIN_JUL
:
2962 case LTTNG_DOMAIN_UST
:
2963 if (uatomic_read(&ust_consumerd_state
) != CONSUMER_STARTED
) {
2964 ret
= LTTNG_ERR_NO_USTCONSUMERD
;
2968 case LTTNG_DOMAIN_KERNEL
:
2969 if (uatomic_read(&kernel_consumerd_state
) != CONSUMER_STARTED
) {
2970 ret
= LTTNG_ERR_NO_KERNCONSUMERD
;
2978 * Check that the UID or GID match that of the tracing session.
2979 * The root user can interact with all sessions.
2981 if (need_tracing_session
) {
2982 if (!session_access_ok(cmd_ctx
->session
,
2983 LTTNG_SOCK_GET_UID_CRED(&cmd_ctx
->creds
),
2984 LTTNG_SOCK_GET_GID_CRED(&cmd_ctx
->creds
))) {
2985 ret
= LTTNG_ERR_EPERM
;
2991 * Send relayd information to consumer as soon as we have a domain and a
2994 if (cmd_ctx
->session
&& need_domain
) {
2996 * Setup relayd if not done yet. If the relayd information was already
2997 * sent to the consumer, this call will gracefully return.
2999 ret
= cmd_setup_relayd(cmd_ctx
->session
);
3000 if (ret
!= LTTNG_OK
) {
3005 /* Process by command type */
3006 switch (cmd_ctx
->lsm
->cmd_type
) {
3007 case LTTNG_ADD_CONTEXT
:
3009 ret
= cmd_add_context(cmd_ctx
->session
, cmd_ctx
->lsm
->domain
.type
,
3010 cmd_ctx
->lsm
->u
.context
.channel_name
,
3011 &cmd_ctx
->lsm
->u
.context
.ctx
, kernel_poll_pipe
[1]);
3014 case LTTNG_DISABLE_CHANNEL
:
3016 ret
= cmd_disable_channel(cmd_ctx
->session
, cmd_ctx
->lsm
->domain
.type
,
3017 cmd_ctx
->lsm
->u
.disable
.channel_name
);
3020 case LTTNG_DISABLE_EVENT
:
3022 ret
= cmd_disable_event(cmd_ctx
->session
, cmd_ctx
->lsm
->domain
.type
,
3023 cmd_ctx
->lsm
->u
.disable
.channel_name
,
3024 cmd_ctx
->lsm
->u
.disable
.name
);
3027 case LTTNG_DISABLE_ALL_EVENT
:
3029 DBG("Disabling all events");
3031 ret
= cmd_disable_event_all(cmd_ctx
->session
, cmd_ctx
->lsm
->domain
.type
,
3032 cmd_ctx
->lsm
->u
.disable
.channel_name
);
3035 case LTTNG_ENABLE_CHANNEL
:
3037 ret
= cmd_enable_channel(cmd_ctx
->session
, &cmd_ctx
->lsm
->domain
,
3038 &cmd_ctx
->lsm
->u
.channel
.chan
, kernel_poll_pipe
[1]);
3041 case LTTNG_ENABLE_EVENT
:
3043 struct lttng_event_exclusion
*exclusion
= NULL
;
3044 struct lttng_filter_bytecode
*bytecode
= NULL
;
3045 char *filter_expression
= NULL
;
3047 /* Handle exclusion events and receive it from the client. */
3048 if (cmd_ctx
->lsm
->u
.enable
.exclusion_count
> 0) {
3049 size_t count
= cmd_ctx
->lsm
->u
.enable
.exclusion_count
;
3051 exclusion
= zmalloc(sizeof(struct lttng_event_exclusion
) +
3052 (count
* LTTNG_SYMBOL_NAME_LEN
));
3054 ret
= LTTNG_ERR_EXCLUSION_NOMEM
;
3058 DBG("Receiving var len exclusion event list from client ...");
3059 exclusion
->count
= count
;
3060 ret
= lttcomm_recv_unix_sock(sock
, exclusion
->names
,
3061 count
* LTTNG_SYMBOL_NAME_LEN
);
3063 DBG("Nothing recv() from client var len data... continuing");
3066 ret
= LTTNG_ERR_EXCLUSION_INVAL
;
3071 /* Get filter expression from client. */
3072 if (cmd_ctx
->lsm
->u
.enable
.expression_len
> 0) {
3073 size_t expression_len
=
3074 cmd_ctx
->lsm
->u
.enable
.expression_len
;
3076 if (expression_len
> LTTNG_FILTER_MAX_LEN
) {
3077 ret
= LTTNG_ERR_FILTER_INVAL
;
3082 filter_expression
= zmalloc(expression_len
);
3083 if (!filter_expression
) {
3085 ret
= LTTNG_ERR_FILTER_NOMEM
;
3089 /* Receive var. len. data */
3090 DBG("Receiving var len filter's expression from client ...");
3091 ret
= lttcomm_recv_unix_sock(sock
, filter_expression
,
3094 DBG("Nothing recv() from client car len data... continuing");
3096 free(filter_expression
);
3098 ret
= LTTNG_ERR_FILTER_INVAL
;
3103 /* Handle filter and get bytecode from client. */
3104 if (cmd_ctx
->lsm
->u
.enable
.bytecode_len
> 0) {
3105 size_t bytecode_len
= cmd_ctx
->lsm
->u
.enable
.bytecode_len
;
3107 if (bytecode_len
> LTTNG_FILTER_MAX_LEN
) {
3108 ret
= LTTNG_ERR_FILTER_INVAL
;
3113 bytecode
= zmalloc(bytecode_len
);
3116 ret
= LTTNG_ERR_FILTER_NOMEM
;
3120 /* Receive var. len. data */
3121 DBG("Receiving var len filter's bytecode from client ...");
3122 ret
= lttcomm_recv_unix_sock(sock
, bytecode
, bytecode_len
);
3124 DBG("Nothing recv() from client car len data... continuing");
3128 ret
= LTTNG_ERR_FILTER_INVAL
;
3132 if ((bytecode
->len
+ sizeof(*bytecode
)) != bytecode_len
) {
3135 ret
= LTTNG_ERR_FILTER_INVAL
;
3140 ret
= cmd_enable_event(cmd_ctx
->session
, &cmd_ctx
->lsm
->domain
,
3141 cmd_ctx
->lsm
->u
.enable
.channel_name
,
3142 &cmd_ctx
->lsm
->u
.enable
.event
,
3143 filter_expression
, bytecode
, exclusion
,
3144 kernel_poll_pipe
[1]);
3147 case LTTNG_ENABLE_ALL_EVENT
:
3149 DBG("Enabling all events");
3151 ret
= cmd_enable_event_all(cmd_ctx
->session
, &cmd_ctx
->lsm
->domain
,
3152 cmd_ctx
->lsm
->u
.enable
.channel_name
,
3153 cmd_ctx
->lsm
->u
.enable
.event
.type
, NULL
, NULL
,
3154 kernel_poll_pipe
[1]);
3157 case LTTNG_LIST_TRACEPOINTS
:
3159 struct lttng_event
*events
;
3162 nb_events
= cmd_list_tracepoints(cmd_ctx
->lsm
->domain
.type
, &events
);
3163 if (nb_events
< 0) {
3164 /* Return value is a negative lttng_error_code. */
3170 * Setup lttng message with payload size set to the event list size in
3171 * bytes and then copy list into the llm payload.
3173 ret
= setup_lttng_msg(cmd_ctx
, sizeof(struct lttng_event
) * nb_events
);
3179 /* Copy event list into message payload */
3180 memcpy(cmd_ctx
->llm
->payload
, events
,
3181 sizeof(struct lttng_event
) * nb_events
);
3188 case LTTNG_LIST_TRACEPOINT_FIELDS
:
3190 struct lttng_event_field
*fields
;
3193 nb_fields
= cmd_list_tracepoint_fields(cmd_ctx
->lsm
->domain
.type
,
3195 if (nb_fields
< 0) {
3196 /* Return value is a negative lttng_error_code. */
3202 * Setup lttng message with payload size set to the event list size in
3203 * bytes and then copy list into the llm payload.
3205 ret
= setup_lttng_msg(cmd_ctx
,
3206 sizeof(struct lttng_event_field
) * nb_fields
);
3212 /* Copy event list into message payload */
3213 memcpy(cmd_ctx
->llm
->payload
, fields
,
3214 sizeof(struct lttng_event_field
) * nb_fields
);
3221 case LTTNG_SET_CONSUMER_URI
:
3224 struct lttng_uri
*uris
;
3226 nb_uri
= cmd_ctx
->lsm
->u
.uri
.size
;
3227 len
= nb_uri
* sizeof(struct lttng_uri
);
3230 ret
= LTTNG_ERR_INVALID
;
3234 uris
= zmalloc(len
);
3236 ret
= LTTNG_ERR_FATAL
;
3240 /* Receive variable len data */
3241 DBG("Receiving %zu URI(s) from client ...", nb_uri
);
3242 ret
= lttcomm_recv_unix_sock(sock
, uris
, len
);
3244 DBG("No URIs received from client... continuing");
3246 ret
= LTTNG_ERR_SESSION_FAIL
;
3251 ret
= cmd_set_consumer_uri(cmd_ctx
->lsm
->domain
.type
, cmd_ctx
->session
,
3253 if (ret
!= LTTNG_OK
) {
3259 * XXX: 0 means that this URI should be applied on the session. Should
3260 * be a DOMAIN enuam.
3262 if (cmd_ctx
->lsm
->domain
.type
== 0) {
3263 /* Add the URI for the UST session if a consumer is present. */
3264 if (cmd_ctx
->session
->ust_session
&&
3265 cmd_ctx
->session
->ust_session
->consumer
) {
3266 ret
= cmd_set_consumer_uri(LTTNG_DOMAIN_UST
, cmd_ctx
->session
,
3268 } else if (cmd_ctx
->session
->kernel_session
&&
3269 cmd_ctx
->session
->kernel_session
->consumer
) {
3270 ret
= cmd_set_consumer_uri(LTTNG_DOMAIN_KERNEL
,
3271 cmd_ctx
->session
, nb_uri
, uris
);
3279 case LTTNG_START_TRACE
:
3281 ret
= cmd_start_trace(cmd_ctx
->session
);
3284 case LTTNG_STOP_TRACE
:
3286 ret
= cmd_stop_trace(cmd_ctx
->session
);
3289 case LTTNG_CREATE_SESSION
:
3292 struct lttng_uri
*uris
= NULL
;
3294 nb_uri
= cmd_ctx
->lsm
->u
.uri
.size
;
3295 len
= nb_uri
* sizeof(struct lttng_uri
);
3298 uris
= zmalloc(len
);
3300 ret
= LTTNG_ERR_FATAL
;
3304 /* Receive variable len data */
3305 DBG("Waiting for %zu URIs from client ...", nb_uri
);
3306 ret
= lttcomm_recv_unix_sock(sock
, uris
, len
);
3308 DBG("No URIs received from client... continuing");
3310 ret
= LTTNG_ERR_SESSION_FAIL
;
3315 if (nb_uri
== 1 && uris
[0].dtype
!= LTTNG_DST_PATH
) {
3316 DBG("Creating session with ONE network URI is a bad call");
3317 ret
= LTTNG_ERR_SESSION_FAIL
;
3323 ret
= cmd_create_session_uri(cmd_ctx
->lsm
->session
.name
, uris
, nb_uri
,
3324 &cmd_ctx
->creds
, 0);
3330 case LTTNG_DESTROY_SESSION
:
3332 ret
= cmd_destroy_session(cmd_ctx
->session
, kernel_poll_pipe
[1]);
3334 /* Set session to NULL so we do not unlock it after free. */
3335 cmd_ctx
->session
= NULL
;
3338 case LTTNG_LIST_DOMAINS
:
3341 struct lttng_domain
*domains
;
3343 nb_dom
= cmd_list_domains(cmd_ctx
->session
, &domains
);
3345 /* Return value is a negative lttng_error_code. */
3350 ret
= setup_lttng_msg(cmd_ctx
, nb_dom
* sizeof(struct lttng_domain
));
3356 /* Copy event list into message payload */
3357 memcpy(cmd_ctx
->llm
->payload
, domains
,
3358 nb_dom
* sizeof(struct lttng_domain
));
3365 case LTTNG_LIST_CHANNELS
:
3368 struct lttng_channel
*channels
;
3370 nb_chan
= cmd_list_channels(cmd_ctx
->lsm
->domain
.type
,
3371 cmd_ctx
->session
, &channels
);
3373 /* Return value is a negative lttng_error_code. */
3378 ret
= setup_lttng_msg(cmd_ctx
, nb_chan
* sizeof(struct lttng_channel
));
3384 /* Copy event list into message payload */
3385 memcpy(cmd_ctx
->llm
->payload
, channels
,
3386 nb_chan
* sizeof(struct lttng_channel
));
3393 case LTTNG_LIST_EVENTS
:
3396 struct lttng_event
*events
= NULL
;
3398 nb_event
= cmd_list_events(cmd_ctx
->lsm
->domain
.type
, cmd_ctx
->session
,
3399 cmd_ctx
->lsm
->u
.list
.channel_name
, &events
);
3401 /* Return value is a negative lttng_error_code. */
3406 ret
= setup_lttng_msg(cmd_ctx
, nb_event
* sizeof(struct lttng_event
));
3412 /* Copy event list into message payload */
3413 memcpy(cmd_ctx
->llm
->payload
, events
,
3414 nb_event
* sizeof(struct lttng_event
));
3421 case LTTNG_LIST_SESSIONS
:
3423 unsigned int nr_sessions
;
3425 session_lock_list();
3426 nr_sessions
= lttng_sessions_count(
3427 LTTNG_SOCK_GET_UID_CRED(&cmd_ctx
->creds
),
3428 LTTNG_SOCK_GET_GID_CRED(&cmd_ctx
->creds
));
3430 ret
= setup_lttng_msg(cmd_ctx
, sizeof(struct lttng_session
) * nr_sessions
);
3432 session_unlock_list();
3436 /* Filled the session array */
3437 cmd_list_lttng_sessions((struct lttng_session
*)(cmd_ctx
->llm
->payload
),
3438 LTTNG_SOCK_GET_UID_CRED(&cmd_ctx
->creds
),
3439 LTTNG_SOCK_GET_GID_CRED(&cmd_ctx
->creds
));
3441 session_unlock_list();
3446 case LTTNG_CALIBRATE
:
3448 ret
= cmd_calibrate(cmd_ctx
->lsm
->domain
.type
,
3449 &cmd_ctx
->lsm
->u
.calibrate
);
3452 case LTTNG_REGISTER_CONSUMER
:
3454 struct consumer_data
*cdata
;
3456 switch (cmd_ctx
->lsm
->domain
.type
) {
3457 case LTTNG_DOMAIN_KERNEL
:
3458 cdata
= &kconsumer_data
;
3461 ret
= LTTNG_ERR_UND
;
3465 ret
= cmd_register_consumer(cmd_ctx
->session
, cmd_ctx
->lsm
->domain
.type
,
3466 cmd_ctx
->lsm
->u
.reg
.path
, cdata
);
3469 case LTTNG_DATA_PENDING
:
3471 ret
= cmd_data_pending(cmd_ctx
->session
);
3474 case LTTNG_SNAPSHOT_ADD_OUTPUT
:
3476 struct lttcomm_lttng_output_id reply
;
3478 ret
= cmd_snapshot_add_output(cmd_ctx
->session
,
3479 &cmd_ctx
->lsm
->u
.snapshot_output
.output
, &reply
.id
);
3480 if (ret
!= LTTNG_OK
) {
3484 ret
= setup_lttng_msg(cmd_ctx
, sizeof(reply
));
3489 /* Copy output list into message payload */
3490 memcpy(cmd_ctx
->llm
->payload
, &reply
, sizeof(reply
));
3494 case LTTNG_SNAPSHOT_DEL_OUTPUT
:
3496 ret
= cmd_snapshot_del_output(cmd_ctx
->session
,
3497 &cmd_ctx
->lsm
->u
.snapshot_output
.output
);
3500 case LTTNG_SNAPSHOT_LIST_OUTPUT
:
3503 struct lttng_snapshot_output
*outputs
= NULL
;
3505 nb_output
= cmd_snapshot_list_outputs(cmd_ctx
->session
, &outputs
);
3506 if (nb_output
< 0) {
3511 ret
= setup_lttng_msg(cmd_ctx
,
3512 nb_output
* sizeof(struct lttng_snapshot_output
));
3519 /* Copy output list into message payload */
3520 memcpy(cmd_ctx
->llm
->payload
, outputs
,
3521 nb_output
* sizeof(struct lttng_snapshot_output
));
3528 case LTTNG_SNAPSHOT_RECORD
:
3530 ret
= cmd_snapshot_record(cmd_ctx
->session
,
3531 &cmd_ctx
->lsm
->u
.snapshot_record
.output
,
3532 cmd_ctx
->lsm
->u
.snapshot_record
.wait
);
3535 case LTTNG_CREATE_SESSION_SNAPSHOT
:
3538 struct lttng_uri
*uris
= NULL
;
3540 nb_uri
= cmd_ctx
->lsm
->u
.uri
.size
;
3541 len
= nb_uri
* sizeof(struct lttng_uri
);
3544 uris
= zmalloc(len
);
3546 ret
= LTTNG_ERR_FATAL
;
3550 /* Receive variable len data */
3551 DBG("Waiting for %zu URIs from client ...", nb_uri
);
3552 ret
= lttcomm_recv_unix_sock(sock
, uris
, len
);
3554 DBG("No URIs received from client... continuing");
3556 ret
= LTTNG_ERR_SESSION_FAIL
;
3561 if (nb_uri
== 1 && uris
[0].dtype
!= LTTNG_DST_PATH
) {
3562 DBG("Creating session with ONE network URI is a bad call");
3563 ret
= LTTNG_ERR_SESSION_FAIL
;
3569 ret
= cmd_create_session_snapshot(cmd_ctx
->lsm
->session
.name
, uris
,
3570 nb_uri
, &cmd_ctx
->creds
);
3574 case LTTNG_CREATE_SESSION_LIVE
:
3577 struct lttng_uri
*uris
= NULL
;
3579 nb_uri
= cmd_ctx
->lsm
->u
.uri
.size
;
3580 len
= nb_uri
* sizeof(struct lttng_uri
);
3583 uris
= zmalloc(len
);
3585 ret
= LTTNG_ERR_FATAL
;
3589 /* Receive variable len data */
3590 DBG("Waiting for %zu URIs from client ...", nb_uri
);
3591 ret
= lttcomm_recv_unix_sock(sock
, uris
, len
);
3593 DBG("No URIs received from client... continuing");
3595 ret
= LTTNG_ERR_SESSION_FAIL
;
3600 if (nb_uri
== 1 && uris
[0].dtype
!= LTTNG_DST_PATH
) {
3601 DBG("Creating session with ONE network URI is a bad call");
3602 ret
= LTTNG_ERR_SESSION_FAIL
;
3608 ret
= cmd_create_session_uri(cmd_ctx
->lsm
->session
.name
, uris
,
3609 nb_uri
, &cmd_ctx
->creds
, cmd_ctx
->lsm
->u
.session_live
.timer_interval
);
3613 case LTTNG_SAVE_SESSION
:
3615 ret
= cmd_save_sessions(&cmd_ctx
->lsm
->u
.save_session
.attr
,
3620 ret
= LTTNG_ERR_UND
;
3625 if (cmd_ctx
->llm
== NULL
) {
3626 DBG("Missing llm structure. Allocating one.");
3627 if (setup_lttng_msg(cmd_ctx
, 0) < 0) {
3631 /* Set return code */
3632 cmd_ctx
->llm
->ret_code
= ret
;
3634 if (cmd_ctx
->session
) {
3635 session_unlock(cmd_ctx
->session
);
3637 if (need_tracing_session
) {
3638 session_unlock_list();
3645 * Thread managing health check socket.
3647 static void *thread_manage_health(void *data
)
3649 int sock
= -1, new_sock
= -1, ret
, i
, pollfd
, err
= -1;
3650 uint32_t revents
, nb_fd
;
3651 struct lttng_poll_event events
;
3652 struct health_comm_msg msg
;
3653 struct health_comm_reply reply
;
3655 DBG("[thread] Manage health check started");
3657 rcu_register_thread();
3659 /* We might hit an error path before this is created. */
3660 lttng_poll_init(&events
);
3662 /* Create unix socket */
3663 sock
= lttcomm_create_unix_sock(health_unix_sock_path
);
3665 ERR("Unable to create health check Unix socket");
3671 /* lttng health client socket path permissions */
3672 ret
= chown(health_unix_sock_path
, 0,
3673 utils_get_group_id(tracing_group_name
));
3675 ERR("Unable to set group on %s", health_unix_sock_path
);
3681 ret
= chmod(health_unix_sock_path
,
3682 S_IRUSR
| S_IWUSR
| S_IRGRP
| S_IWGRP
);
3684 ERR("Unable to set permissions on %s", health_unix_sock_path
);
3692 * Set the CLOEXEC flag. Return code is useless because either way, the
3695 (void) utils_set_fd_cloexec(sock
);
3697 ret
= lttcomm_listen_unix_sock(sock
);
3703 * Pass 2 as size here for the thread quit pipe and client_sock. Nothing
3704 * more will be added to this poll set.
3706 ret
= sessiond_set_thread_pollset(&events
, 2);
3711 /* Add the application registration socket */
3712 ret
= lttng_poll_add(&events
, sock
, LPOLLIN
| LPOLLPRI
);
3717 lttng_sessiond_notify_ready();
3720 DBG("Health check ready");
3722 /* Inifinite blocking call, waiting for transmission */
3724 ret
= lttng_poll_wait(&events
, -1);
3727 * Restart interrupted system call.
3729 if (errno
== EINTR
) {
3737 for (i
= 0; i
< nb_fd
; i
++) {
3738 /* Fetch once the poll data */
3739 revents
= LTTNG_POLL_GETEV(&events
, i
);
3740 pollfd
= LTTNG_POLL_GETFD(&events
, i
);
3742 /* Thread quit pipe has been closed. Killing thread. */
3743 ret
= sessiond_check_thread_quit_pipe(pollfd
, revents
);
3749 /* Event on the registration socket */
3750 if (pollfd
== sock
) {
3751 if (revents
& (LPOLLERR
| LPOLLHUP
| LPOLLRDHUP
)) {
3752 ERR("Health socket poll error");
3758 new_sock
= lttcomm_accept_unix_sock(sock
);
3764 * Set the CLOEXEC flag. Return code is useless because either way, the
3767 (void) utils_set_fd_cloexec(new_sock
);
3769 DBG("Receiving data from client for health...");
3770 ret
= lttcomm_recv_unix_sock(new_sock
, (void *)&msg
, sizeof(msg
));
3772 DBG("Nothing recv() from client... continuing");
3773 ret
= close(new_sock
);
3781 rcu_thread_online();
3783 memset(&reply
, 0, sizeof(reply
));
3784 for (i
= 0; i
< NR_HEALTH_SESSIOND_TYPES
; i
++) {
3786 * health_check_state returns 0 if health is
3789 if (!health_check_state(health_sessiond
, i
)) {
3790 reply
.ret_code
|= 1ULL << i
;
3794 DBG2("Health check return value %" PRIx64
, reply
.ret_code
);
3796 ret
= send_unix_sock(new_sock
, (void *) &reply
, sizeof(reply
));
3798 ERR("Failed to send health data back to client");
3801 /* End of transmission */
3802 ret
= close(new_sock
);
3812 ERR("Health error occurred in %s", __func__
);
3814 DBG("Health check thread dying");
3815 unlink(health_unix_sock_path
);
3823 lttng_poll_clean(&events
);
3825 rcu_unregister_thread();
3830 * This thread manage all clients request using the unix client socket for
3833 static void *thread_manage_clients(void *data
)
3835 int sock
= -1, ret
, i
, pollfd
, err
= -1;
3837 uint32_t revents
, nb_fd
;
3838 struct command_ctx
*cmd_ctx
= NULL
;
3839 struct lttng_poll_event events
;
3841 DBG("[thread] Manage client started");
3843 rcu_register_thread();
3845 health_register(health_sessiond
, HEALTH_SESSIOND_TYPE_CMD
);
3847 health_code_update();
3849 ret
= lttcomm_listen_unix_sock(client_sock
);
3855 * Pass 2 as size here for the thread quit pipe and client_sock. Nothing
3856 * more will be added to this poll set.
3858 ret
= sessiond_set_thread_pollset(&events
, 2);
3860 goto error_create_poll
;
3863 /* Add the application registration socket */
3864 ret
= lttng_poll_add(&events
, client_sock
, LPOLLIN
| LPOLLPRI
);
3869 lttng_sessiond_notify_ready();
3871 /* This testpoint is after we signal readiness to the parent. */
3872 if (testpoint(sessiond_thread_manage_clients
)) {
3876 if (testpoint(sessiond_thread_manage_clients_before_loop
)) {
3880 health_code_update();
3883 DBG("Accepting client command ...");
3885 /* Inifinite blocking call, waiting for transmission */
3887 health_poll_entry();
3888 ret
= lttng_poll_wait(&events
, -1);
3892 * Restart interrupted system call.
3894 if (errno
== EINTR
) {
3902 for (i
= 0; i
< nb_fd
; i
++) {
3903 /* Fetch once the poll data */
3904 revents
= LTTNG_POLL_GETEV(&events
, i
);
3905 pollfd
= LTTNG_POLL_GETFD(&events
, i
);
3907 health_code_update();
3909 /* Thread quit pipe has been closed. Killing thread. */
3910 ret
= sessiond_check_thread_quit_pipe(pollfd
, revents
);
3916 /* Event on the registration socket */
3917 if (pollfd
== client_sock
) {
3918 if (revents
& (LPOLLERR
| LPOLLHUP
| LPOLLRDHUP
)) {
3919 ERR("Client socket poll error");
3925 DBG("Wait for client response");
3927 health_code_update();
3929 sock
= lttcomm_accept_unix_sock(client_sock
);
3935 * Set the CLOEXEC flag. Return code is useless because either way, the
3938 (void) utils_set_fd_cloexec(sock
);
3940 /* Set socket option for credentials retrieval */
3941 ret
= lttcomm_setsockopt_creds_unix_sock(sock
);
3946 /* Allocate context command to process the client request */
3947 cmd_ctx
= zmalloc(sizeof(struct command_ctx
));
3948 if (cmd_ctx
== NULL
) {
3949 PERROR("zmalloc cmd_ctx");
3953 /* Allocate data buffer for reception */
3954 cmd_ctx
->lsm
= zmalloc(sizeof(struct lttcomm_session_msg
));
3955 if (cmd_ctx
->lsm
== NULL
) {
3956 PERROR("zmalloc cmd_ctx->lsm");
3960 cmd_ctx
->llm
= NULL
;
3961 cmd_ctx
->session
= NULL
;
3963 health_code_update();
3966 * Data is received from the lttng client. The struct
3967 * lttcomm_session_msg (lsm) contains the command and data request of
3970 DBG("Receiving data from client ...");
3971 ret
= lttcomm_recv_creds_unix_sock(sock
, cmd_ctx
->lsm
,
3972 sizeof(struct lttcomm_session_msg
), &cmd_ctx
->creds
);
3974 DBG("Nothing recv() from client... continuing");
3980 clean_command_ctx(&cmd_ctx
);
3984 health_code_update();
3986 // TODO: Validate cmd_ctx including sanity check for
3987 // security purpose.
3989 rcu_thread_online();
3991 * This function dispatch the work to the kernel or userspace tracer
3992 * libs and fill the lttcomm_lttng_msg data structure of all the needed
3993 * informations for the client. The command context struct contains
3994 * everything this function may needs.
3996 ret
= process_client_msg(cmd_ctx
, sock
, &sock_error
);
3997 rcu_thread_offline();
4005 * TODO: Inform client somehow of the fatal error. At
4006 * this point, ret < 0 means that a zmalloc failed
4007 * (ENOMEM). Error detected but still accept
4008 * command, unless a socket error has been
4011 clean_command_ctx(&cmd_ctx
);
4015 health_code_update();
4017 DBG("Sending response (size: %d, retcode: %s)",
4018 cmd_ctx
->lttng_msg_size
,
4019 lttng_strerror(-cmd_ctx
->llm
->ret_code
));
4020 ret
= send_unix_sock(sock
, cmd_ctx
->llm
, cmd_ctx
->lttng_msg_size
);
4022 ERR("Failed to send data back to client");
4025 /* End of transmission */
4032 clean_command_ctx(&cmd_ctx
);
4034 health_code_update();
4046 lttng_poll_clean(&events
);
4047 clean_command_ctx(&cmd_ctx
);
4051 unlink(client_unix_sock_path
);
4052 if (client_sock
>= 0) {
4053 ret
= close(client_sock
);
4061 ERR("Health error occurred in %s", __func__
);
4064 health_unregister(health_sessiond
);
4066 DBG("Client thread dying");
4068 rcu_unregister_thread();
4074 * usage function on stderr
4076 static void usage(void)
4078 fprintf(stderr
, "Usage: %s OPTIONS\n\nOptions:\n", progname
);
4079 fprintf(stderr
, " -h, --help Display this usage.\n");
4080 fprintf(stderr
, " -c, --client-sock PATH Specify path for the client unix socket\n");
4081 fprintf(stderr
, " -a, --apps-sock PATH Specify path for apps unix socket\n");
4082 fprintf(stderr
, " --kconsumerd-err-sock PATH Specify path for the kernel consumer error socket\n");
4083 fprintf(stderr
, " --kconsumerd-cmd-sock PATH Specify path for the kernel consumer command socket\n");
4084 fprintf(stderr
, " --ustconsumerd32-err-sock PATH Specify path for the 32-bit UST consumer error socket\n");
4085 fprintf(stderr
, " --ustconsumerd64-err-sock PATH Specify path for the 64-bit UST consumer error socket\n");
4086 fprintf(stderr
, " --ustconsumerd32-cmd-sock PATH Specify path for the 32-bit UST consumer command socket\n");
4087 fprintf(stderr
, " --ustconsumerd64-cmd-sock PATH Specify path for the 64-bit UST consumer command socket\n");
4088 fprintf(stderr
, " --consumerd32-path PATH Specify path for the 32-bit UST consumer daemon binary\n");
4089 fprintf(stderr
, " --consumerd32-libdir PATH Specify path for the 32-bit UST consumer daemon libraries\n");
4090 fprintf(stderr
, " --consumerd64-path PATH Specify path for the 64-bit UST consumer daemon binary\n");
4091 fprintf(stderr
, " --consumerd64-libdir PATH Specify path for the 64-bit UST consumer daemon libraries\n");
4092 fprintf(stderr
, " -d, --daemonize Start as a daemon.\n");
4093 fprintf(stderr
, " -b, --background Start as a daemon, keeping console open.\n");
4094 fprintf(stderr
, " -g, --group NAME Specify the tracing group name. (default: tracing)\n");
4095 fprintf(stderr
, " -V, --version Show version number.\n");
4096 fprintf(stderr
, " -S, --sig-parent Send SIGUSR1 to parent pid to notify readiness.\n");
4097 fprintf(stderr
, " -q, --quiet No output at all.\n");
4098 fprintf(stderr
, " -v, --verbose Verbose mode. Activate DBG() macro.\n");
4099 fprintf(stderr
, " -p, --pidfile FILE Write a pid to FILE name overriding the default value.\n");
4100 fprintf(stderr
, " --verbose-consumer Verbose mode for consumer. Activate DBG() macro.\n");
4101 fprintf(stderr
, " --no-kernel Disable kernel tracer\n");
4102 fprintf(stderr
, " --jul-tcp-port JUL application registration TCP port\n");
4103 fprintf(stderr
, " -f --config Load daemon configuration file\n");
4107 * Take an option from the getopt output and set it in the right variable to be
4110 * Return 0 on success else a negative value.
4112 static int set_option(int opt
, const char *arg
, const char *optname
)
4118 fprintf(stderr
, "option %s", optname
);
4120 fprintf(stderr
, " with arg %s\n", arg
);
4124 snprintf(client_unix_sock_path
, PATH_MAX
, "%s", arg
);
4127 snprintf(apps_unix_sock_path
, PATH_MAX
, "%s", arg
);
4136 tracing_group_name
= strdup(arg
);
4142 fprintf(stdout
, "%s\n", VERSION
);
4148 snprintf(kconsumer_data
.err_unix_sock_path
, PATH_MAX
, "%s", arg
);
4151 snprintf(kconsumer_data
.cmd_unix_sock_path
, PATH_MAX
, "%s", arg
);
4154 snprintf(ustconsumer64_data
.err_unix_sock_path
, PATH_MAX
, "%s", arg
);
4157 snprintf(ustconsumer64_data
.cmd_unix_sock_path
, PATH_MAX
, "%s", arg
);
4160 snprintf(ustconsumer32_data
.err_unix_sock_path
, PATH_MAX
, "%s", arg
);
4163 snprintf(ustconsumer32_data
.cmd_unix_sock_path
, PATH_MAX
, "%s", arg
);
4169 lttng_opt_quiet
= 1;
4172 /* Verbose level can increase using multiple -v */
4174 lttng_opt_verbose
= config_parse_value(arg
);
4176 lttng_opt_verbose
+= 1;
4181 opt_verbose_consumer
= config_parse_value(arg
);
4183 opt_verbose_consumer
+= 1;
4187 consumerd32_bin
= strdup(arg
);
4188 consumerd32_bin_override
= 1;
4191 consumerd32_libdir
= strdup(arg
);
4192 consumerd32_libdir_override
= 1;
4195 consumerd64_bin
= strdup(arg
);
4196 consumerd64_bin_override
= 1;
4199 consumerd64_libdir
= strdup(arg
);
4200 consumerd64_libdir_override
= 1;
4203 opt_pidfile
= strdup(arg
);
4205 case 'J': /* JUL TCP port. */
4210 v
= strtoul(arg
, NULL
, 0);
4211 if (errno
!= 0 || !isdigit(arg
[0])) {
4212 ERR("Wrong value in --jul-tcp-port parameter: %s", arg
);
4215 if (v
== 0 || v
>= 65535) {
4216 ERR("Port overflow in --jul-tcp-port parameter: %s", arg
);
4219 jul_tcp_port
= (uint32_t) v
;
4220 DBG3("JUL TCP port set to non default: %u", jul_tcp_port
);
4224 /* Unknown option or other error.
4225 * Error is printed by getopt, just return */
4233 * config_entry_handler_cb used to handle options read from a config file.
4234 * See config_entry_handler_cb comment in common/config/config.h for the
4235 * return value conventions.
4237 static int config_entry_handler(const struct config_entry
*entry
, void *unused
)
4241 if (!entry
|| !entry
->name
|| !entry
->value
) {
4246 /* Check if the option is to be ignored */
4247 for (i
= 0; i
< sizeof(config_ignore_options
) / sizeof(char *); i
++) {
4248 if (!strcmp(entry
->name
, config_ignore_options
[i
])) {
4253 for (i
= 0; i
< (sizeof(long_options
) / sizeof(struct option
)) - 1;
4256 /* Ignore if not fully matched. */
4257 if (strcmp(entry
->name
, long_options
[i
].name
)) {
4262 * If the option takes no argument on the command line, we have to
4263 * check if the value is "true". We support non-zero numeric values,
4266 if (!long_options
[i
].has_arg
) {
4267 ret
= config_parse_value(entry
->value
);
4270 WARN("Invalid configuration value \"%s\" for option %s",
4271 entry
->value
, entry
->name
);
4273 /* False, skip boolean config option. */
4278 ret
= set_option(long_options
[i
].val
, entry
->value
, entry
->name
);
4282 WARN("Unrecognized option \"%s\" in daemon configuration file.", entry
->name
);
4289 * daemon configuration loading and argument parsing
4291 static int set_options(int argc
, char **argv
)
4293 int ret
= 0, c
= 0, option_index
= 0;
4294 int orig_optopt
= optopt
, orig_optind
= optind
;
4296 const char *config_path
= NULL
;
4298 optstring
= utils_generate_optstring(long_options
,
4299 sizeof(long_options
) / sizeof(struct option
));
4305 /* Check for the --config option */
4306 while ((c
= getopt_long(argc
, argv
, optstring
, long_options
,
4307 &option_index
)) != -1) {
4311 } else if (c
!= 'f') {
4312 /* if not equal to --config option. */
4316 config_path
= utils_expand_path(optarg
);
4318 ERR("Failed to resolve path: %s", optarg
);
4322 ret
= config_get_section_entries(config_path
, config_section_name
,
4323 config_entry_handler
, NULL
);
4326 ERR("Invalid configuration option at line %i", ret
);
4332 /* Reset getopt's global state */
4333 optopt
= orig_optopt
;
4334 optind
= orig_optind
;
4336 c
= getopt_long(argc
, argv
, optstring
, long_options
, &option_index
);
4341 ret
= set_option(c
, optarg
, long_options
[option_index
].name
);
4353 * Creates the two needed socket by the daemon.
4354 * apps_sock - The communication socket for all UST apps.
4355 * client_sock - The communication of the cli tool (lttng).
4357 static int init_daemon_socket(void)
4362 old_umask
= umask(0);
4364 /* Create client tool unix socket */
4365 client_sock
= lttcomm_create_unix_sock(client_unix_sock_path
);
4366 if (client_sock
< 0) {
4367 ERR("Create unix sock failed: %s", client_unix_sock_path
);
4372 /* Set the cloexec flag */
4373 ret
= utils_set_fd_cloexec(client_sock
);
4375 ERR("Unable to set CLOEXEC flag to the client Unix socket (fd: %d). "
4376 "Continuing but note that the consumer daemon will have a "
4377 "reference to this socket on exec()", client_sock
);
4380 /* File permission MUST be 660 */
4381 ret
= chmod(client_unix_sock_path
, S_IRUSR
| S_IWUSR
| S_IRGRP
| S_IWGRP
);
4383 ERR("Set file permissions failed: %s", client_unix_sock_path
);
4388 /* Create the application unix socket */
4389 apps_sock
= lttcomm_create_unix_sock(apps_unix_sock_path
);
4390 if (apps_sock
< 0) {
4391 ERR("Create unix sock failed: %s", apps_unix_sock_path
);
4396 /* Set the cloexec flag */
4397 ret
= utils_set_fd_cloexec(apps_sock
);
4399 ERR("Unable to set CLOEXEC flag to the app Unix socket (fd: %d). "
4400 "Continuing but note that the consumer daemon will have a "
4401 "reference to this socket on exec()", apps_sock
);
4404 /* File permission MUST be 666 */
4405 ret
= chmod(apps_unix_sock_path
,
4406 S_IRUSR
| S_IWUSR
| S_IRGRP
| S_IWGRP
| S_IROTH
| S_IWOTH
);
4408 ERR("Set file permissions failed: %s", apps_unix_sock_path
);
4413 DBG3("Session daemon client socket %d and application socket %d created",
4414 client_sock
, apps_sock
);
4422 * Check if the global socket is available, and if a daemon is answering at the
4423 * other side. If yes, error is returned.
4425 static int check_existing_daemon(void)
4427 /* Is there anybody out there ? */
4428 if (lttng_session_daemon_alive()) {
4436 * Set the tracing group gid onto the client socket.
4438 * Race window between mkdir and chown is OK because we are going from more
4439 * permissive (root.root) to less permissive (root.tracing).
4441 static int set_permissions(char *rundir
)
4446 gid
= utils_get_group_id(tracing_group_name
);
4448 /* Set lttng run dir */
4449 ret
= chown(rundir
, 0, gid
);
4451 ERR("Unable to set group on %s", rundir
);
4456 * Ensure all applications and tracing group can search the run
4457 * dir. Allow everyone to read the directory, since it does not
4458 * buy us anything to hide its content.
4460 ret
= chmod(rundir
, S_IRWXU
| S_IRGRP
| S_IXGRP
| S_IROTH
| S_IXOTH
);
4462 ERR("Unable to set permissions on %s", rundir
);
4466 /* lttng client socket path */
4467 ret
= chown(client_unix_sock_path
, 0, gid
);
4469 ERR("Unable to set group on %s", client_unix_sock_path
);
4473 /* kconsumer error socket path */
4474 ret
= chown(kconsumer_data
.err_unix_sock_path
, 0, 0);
4476 ERR("Unable to set group on %s", kconsumer_data
.err_unix_sock_path
);
4480 /* 64-bit ustconsumer error socket path */
4481 ret
= chown(ustconsumer64_data
.err_unix_sock_path
, 0, 0);
4483 ERR("Unable to set group on %s", ustconsumer64_data
.err_unix_sock_path
);
4487 /* 32-bit ustconsumer compat32 error socket path */
4488 ret
= chown(ustconsumer32_data
.err_unix_sock_path
, 0, 0);
4490 ERR("Unable to set group on %s", ustconsumer32_data
.err_unix_sock_path
);
4494 DBG("All permissions are set");
4500 * Create the lttng run directory needed for all global sockets and pipe.
4502 static int create_lttng_rundir(const char *rundir
)
4506 DBG3("Creating LTTng run directory: %s", rundir
);
4508 ret
= mkdir(rundir
, S_IRWXU
);
4510 if (errno
!= EEXIST
) {
4511 ERR("Unable to create %s", rundir
);
4523 * Setup sockets and directory needed by the kconsumerd communication with the
4526 static int set_consumer_sockets(struct consumer_data
*consumer_data
,
4530 char path
[PATH_MAX
];
4532 switch (consumer_data
->type
) {
4533 case LTTNG_CONSUMER_KERNEL
:
4534 snprintf(path
, PATH_MAX
, DEFAULT_KCONSUMERD_PATH
, rundir
);
4536 case LTTNG_CONSUMER64_UST
:
4537 snprintf(path
, PATH_MAX
, DEFAULT_USTCONSUMERD64_PATH
, rundir
);
4539 case LTTNG_CONSUMER32_UST
:
4540 snprintf(path
, PATH_MAX
, DEFAULT_USTCONSUMERD32_PATH
, rundir
);
4543 ERR("Consumer type unknown");
4548 DBG2("Creating consumer directory: %s", path
);
4550 ret
= mkdir(path
, S_IRWXU
| S_IRGRP
| S_IXGRP
);
4552 if (errno
!= EEXIST
) {
4554 ERR("Failed to create %s", path
);
4560 ret
= chown(path
, 0, utils_get_group_id(tracing_group_name
));
4562 ERR("Unable to set group on %s", path
);
4568 /* Create the kconsumerd error unix socket */
4569 consumer_data
->err_sock
=
4570 lttcomm_create_unix_sock(consumer_data
->err_unix_sock_path
);
4571 if (consumer_data
->err_sock
< 0) {
4572 ERR("Create unix sock failed: %s", consumer_data
->err_unix_sock_path
);
4578 * Set the CLOEXEC flag. Return code is useless because either way, the
4581 ret
= utils_set_fd_cloexec(consumer_data
->err_sock
);
4583 PERROR("utils_set_fd_cloexec");
4584 /* continue anyway */
4587 /* File permission MUST be 660 */
4588 ret
= chmod(consumer_data
->err_unix_sock_path
,
4589 S_IRUSR
| S_IWUSR
| S_IRGRP
| S_IWGRP
);
4591 ERR("Set file permissions failed: %s", consumer_data
->err_unix_sock_path
);
4601 * Signal handler for the daemon
4603 * Simply stop all worker threads, leaving main() return gracefully after
4604 * joining all threads and calling cleanup().
4606 static void sighandler(int sig
)
4610 DBG("SIGPIPE caught");
4613 DBG("SIGINT caught");
4617 DBG("SIGTERM caught");
4621 CMM_STORE_SHARED(recv_child_signal
, 1);
4629 * Setup signal handler for :
4630 * SIGINT, SIGTERM, SIGPIPE
4632 static int set_signal_handler(void)
4635 struct sigaction sa
;
4638 if ((ret
= sigemptyset(&sigset
)) < 0) {
4639 PERROR("sigemptyset");
4643 sa
.sa_handler
= sighandler
;
4644 sa
.sa_mask
= sigset
;
4646 if ((ret
= sigaction(SIGTERM
, &sa
, NULL
)) < 0) {
4647 PERROR("sigaction");
4651 if ((ret
= sigaction(SIGINT
, &sa
, NULL
)) < 0) {
4652 PERROR("sigaction");
4656 if ((ret
= sigaction(SIGPIPE
, &sa
, NULL
)) < 0) {
4657 PERROR("sigaction");
4661 if ((ret
= sigaction(SIGUSR1
, &sa
, NULL
)) < 0) {
4662 PERROR("sigaction");
4666 DBG("Signal handler set for SIGTERM, SIGUSR1, SIGPIPE and SIGINT");
4672 * Set open files limit to unlimited. This daemon can open a large number of
4673 * file descriptors in order to consumer multiple kernel traces.
4675 static void set_ulimit(void)
4680 /* The kernel does not allowed an infinite limit for open files */
4681 lim
.rlim_cur
= 65535;
4682 lim
.rlim_max
= 65535;
4684 ret
= setrlimit(RLIMIT_NOFILE
, &lim
);
4686 PERROR("failed to set open files limit");
4691 * Write pidfile using the rundir and opt_pidfile.
4693 static void write_pidfile(void)
4696 char pidfile_path
[PATH_MAX
];
4701 strncpy(pidfile_path
, opt_pidfile
, sizeof(pidfile_path
));
4703 /* Build pidfile path from rundir and opt_pidfile. */
4704 ret
= snprintf(pidfile_path
, sizeof(pidfile_path
), "%s/"
4705 DEFAULT_LTTNG_SESSIOND_PIDFILE
, rundir
);
4707 PERROR("snprintf pidfile path");
4713 * Create pid file in rundir. Return value is of no importance. The
4714 * execution will continue even though we are not able to write the file.
4716 (void) utils_create_pid_file(getpid(), pidfile_path
);
4723 * Write JUL TCP port using the rundir.
4725 static void write_julport(void)
4728 char path
[PATH_MAX
];
4732 ret
= snprintf(path
, sizeof(path
), "%s/"
4733 DEFAULT_LTTNG_SESSIOND_JULPORT_FILE
, rundir
);
4735 PERROR("snprintf julport path");
4740 * Create TCP JUL port file in rundir. Return value is of no importance.
4741 * The execution will continue even though we are not able to write the
4744 (void) utils_create_pid_file(jul_tcp_port
, path
);
4753 int main(int argc
, char **argv
)
4757 const char *home_path
, *env_app_timeout
;
4759 init_kernel_workarounds();
4761 rcu_register_thread();
4763 if ((ret
= set_signal_handler()) < 0) {
4767 setup_consumerd_path();
4769 page_size
= sysconf(_SC_PAGESIZE
);
4770 if (page_size
< 0) {
4771 PERROR("sysconf _SC_PAGESIZE");
4772 page_size
= LONG_MAX
;
4773 WARN("Fallback page size to %ld", page_size
);
4776 /* Parse arguments and load the daemon configuration file */
4778 if ((ret
= set_options(argc
, argv
)) < 0) {
4783 if (opt_daemon
|| opt_background
) {
4786 ret
= lttng_daemonize(&child_ppid
, &recv_child_signal
,
4793 * We are in the child. Make sure all other file descriptors are
4794 * closed, in case we are called with more opened file descriptors than
4795 * the standard ones.
4797 for (i
= 3; i
< sysconf(_SC_OPEN_MAX
); i
++) {
4802 /* Create thread quit pipe */
4803 if ((ret
= init_thread_quit_pipe()) < 0) {
4807 /* Check if daemon is UID = 0 */
4808 is_root
= !getuid();
4811 rundir
= strdup(DEFAULT_LTTNG_RUNDIR
);
4813 /* Create global run dir with root access */
4814 ret
= create_lttng_rundir(rundir
);
4819 if (strlen(apps_unix_sock_path
) == 0) {
4820 snprintf(apps_unix_sock_path
, PATH_MAX
,
4821 DEFAULT_GLOBAL_APPS_UNIX_SOCK
);
4824 if (strlen(client_unix_sock_path
) == 0) {
4825 snprintf(client_unix_sock_path
, PATH_MAX
,
4826 DEFAULT_GLOBAL_CLIENT_UNIX_SOCK
);
4829 /* Set global SHM for ust */
4830 if (strlen(wait_shm_path
) == 0) {
4831 snprintf(wait_shm_path
, PATH_MAX
,
4832 DEFAULT_GLOBAL_APPS_WAIT_SHM_PATH
);
4835 if (strlen(health_unix_sock_path
) == 0) {
4836 snprintf(health_unix_sock_path
, sizeof(health_unix_sock_path
),
4837 DEFAULT_GLOBAL_HEALTH_UNIX_SOCK
);
4840 /* Setup kernel consumerd path */
4841 snprintf(kconsumer_data
.err_unix_sock_path
, PATH_MAX
,
4842 DEFAULT_KCONSUMERD_ERR_SOCK_PATH
, rundir
);
4843 snprintf(kconsumer_data
.cmd_unix_sock_path
, PATH_MAX
,
4844 DEFAULT_KCONSUMERD_CMD_SOCK_PATH
, rundir
);
4846 DBG2("Kernel consumer err path: %s",
4847 kconsumer_data
.err_unix_sock_path
);
4848 DBG2("Kernel consumer cmd path: %s",
4849 kconsumer_data
.cmd_unix_sock_path
);
4851 home_path
= utils_get_home_dir();
4852 if (home_path
== NULL
) {
4853 /* TODO: Add --socket PATH option */
4854 ERR("Can't get HOME directory for sockets creation.");
4860 * Create rundir from home path. This will create something like
4863 ret
= asprintf(&rundir
, DEFAULT_LTTNG_HOME_RUNDIR
, home_path
);
4869 ret
= create_lttng_rundir(rundir
);
4874 if (strlen(apps_unix_sock_path
) == 0) {
4875 snprintf(apps_unix_sock_path
, PATH_MAX
,
4876 DEFAULT_HOME_APPS_UNIX_SOCK
, home_path
);
4879 /* Set the cli tool unix socket path */
4880 if (strlen(client_unix_sock_path
) == 0) {
4881 snprintf(client_unix_sock_path
, PATH_MAX
,
4882 DEFAULT_HOME_CLIENT_UNIX_SOCK
, home_path
);
4885 /* Set global SHM for ust */
4886 if (strlen(wait_shm_path
) == 0) {
4887 snprintf(wait_shm_path
, PATH_MAX
,
4888 DEFAULT_HOME_APPS_WAIT_SHM_PATH
, getuid());
4891 /* Set health check Unix path */
4892 if (strlen(health_unix_sock_path
) == 0) {
4893 snprintf(health_unix_sock_path
, sizeof(health_unix_sock_path
),
4894 DEFAULT_HOME_HEALTH_UNIX_SOCK
, home_path
);
4898 /* Set consumer initial state */
4899 kernel_consumerd_state
= CONSUMER_STOPPED
;
4900 ust_consumerd_state
= CONSUMER_STOPPED
;
4902 DBG("Client socket path %s", client_unix_sock_path
);
4903 DBG("Application socket path %s", apps_unix_sock_path
);
4904 DBG("Application wait path %s", wait_shm_path
);
4905 DBG("LTTng run directory path: %s", rundir
);
4907 /* 32 bits consumerd path setup */
4908 snprintf(ustconsumer32_data
.err_unix_sock_path
, PATH_MAX
,
4909 DEFAULT_USTCONSUMERD32_ERR_SOCK_PATH
, rundir
);
4910 snprintf(ustconsumer32_data
.cmd_unix_sock_path
, PATH_MAX
,
4911 DEFAULT_USTCONSUMERD32_CMD_SOCK_PATH
, rundir
);
4913 DBG2("UST consumer 32 bits err path: %s",
4914 ustconsumer32_data
.err_unix_sock_path
);
4915 DBG2("UST consumer 32 bits cmd path: %s",
4916 ustconsumer32_data
.cmd_unix_sock_path
);
4918 /* 64 bits consumerd path setup */
4919 snprintf(ustconsumer64_data
.err_unix_sock_path
, PATH_MAX
,
4920 DEFAULT_USTCONSUMERD64_ERR_SOCK_PATH
, rundir
);
4921 snprintf(ustconsumer64_data
.cmd_unix_sock_path
, PATH_MAX
,
4922 DEFAULT_USTCONSUMERD64_CMD_SOCK_PATH
, rundir
);
4924 DBG2("UST consumer 64 bits err path: %s",
4925 ustconsumer64_data
.err_unix_sock_path
);
4926 DBG2("UST consumer 64 bits cmd path: %s",
4927 ustconsumer64_data
.cmd_unix_sock_path
);
4930 * See if daemon already exist.
4932 if ((ret
= check_existing_daemon()) < 0) {
4933 ERR("Already running daemon.\n");
4935 * We do not goto exit because we must not cleanup()
4936 * because a daemon is already running.
4942 * Init UST app hash table. Alloc hash table before this point since
4943 * cleanup() can get called after that point.
4947 /* Initialize JUL domain subsystem. */
4948 if ((ret
= jul_init()) < 0) {
4949 /* ENOMEM at this point. */
4953 /* After this point, we can safely call cleanup() with "goto exit" */
4956 * These actions must be executed as root. We do that *after* setting up
4957 * the sockets path because we MUST make the check for another daemon using
4958 * those paths *before* trying to set the kernel consumer sockets and init
4962 ret
= set_consumer_sockets(&kconsumer_data
, rundir
);
4967 /* Setup kernel tracer */
4968 if (!opt_no_kernel
) {
4969 init_kernel_tracer();
4972 /* Set ulimit for open files */
4975 /* init lttng_fd tracking must be done after set_ulimit. */
4978 ret
= set_consumer_sockets(&ustconsumer64_data
, rundir
);
4983 ret
= set_consumer_sockets(&ustconsumer32_data
, rundir
);
4988 /* Setup the needed unix socket */
4989 if ((ret
= init_daemon_socket()) < 0) {
4993 /* Set credentials to socket */
4994 if (is_root
&& ((ret
= set_permissions(rundir
)) < 0)) {
4998 /* Get parent pid if -S, --sig-parent is specified. */
4999 if (opt_sig_parent
) {
5003 /* Setup the kernel pipe for waking up the kernel thread */
5004 if (is_root
&& !opt_no_kernel
) {
5005 if ((ret
= utils_create_pipe_cloexec(kernel_poll_pipe
)) < 0) {
5010 /* Setup the thread ht_cleanup communication pipe. */
5011 if (utils_create_pipe_cloexec(ht_cleanup_pipe
) < 0) {
5015 /* Setup the thread apps communication pipe. */
5016 if ((ret
= utils_create_pipe_cloexec(apps_cmd_pipe
)) < 0) {
5020 /* Setup the thread apps notify communication pipe. */
5021 if (utils_create_pipe_cloexec(apps_cmd_notify_pipe
) < 0) {
5025 /* Initialize global buffer per UID and PID registry. */
5026 buffer_reg_init_uid_registry();
5027 buffer_reg_init_pid_registry();
5029 /* Init UST command queue. */
5030 cds_wfq_init(&ust_cmd_queue
.queue
);
5033 * Get session list pointer. This pointer MUST NOT be free(). This list is
5034 * statically declared in session.c
5036 session_list_ptr
= session_get_list();
5038 /* Set up max poll set size */
5039 lttng_poll_set_max_size();
5043 /* Check for the application socket timeout env variable. */
5044 env_app_timeout
= getenv(DEFAULT_APP_SOCKET_TIMEOUT_ENV
);
5045 if (env_app_timeout
) {
5046 app_socket_timeout
= atoi(env_app_timeout
);
5048 app_socket_timeout
= DEFAULT_APP_SOCKET_RW_TIMEOUT
;
5054 /* Initialize communication library */
5056 /* This is to get the TCP timeout value. */
5057 lttcomm_inet_init();
5060 * Initialize the health check subsystem. This call should set the
5061 * appropriate time values.
5063 health_sessiond
= health_app_create(NR_HEALTH_SESSIOND_TYPES
);
5064 if (!health_sessiond
) {
5065 PERROR("health_app_create error");
5066 goto exit_health_sessiond_cleanup
;
5069 /* Create thread to clean up RCU hash tables */
5070 ret
= pthread_create(&ht_cleanup_thread
, NULL
,
5071 thread_ht_cleanup
, (void *) NULL
);
5073 PERROR("pthread_create ht_cleanup");
5074 goto exit_ht_cleanup
;
5077 /* Create health-check thread */
5078 ret
= pthread_create(&health_thread
, NULL
,
5079 thread_manage_health
, (void *) NULL
);
5081 PERROR("pthread_create health");
5085 /* Create thread to manage the client socket */
5086 ret
= pthread_create(&client_thread
, NULL
,
5087 thread_manage_clients
, (void *) NULL
);
5089 PERROR("pthread_create clients");
5093 /* Create thread to dispatch registration */
5094 ret
= pthread_create(&dispatch_thread
, NULL
,
5095 thread_dispatch_ust_registration
, (void *) NULL
);
5097 PERROR("pthread_create dispatch");
5101 /* Create thread to manage application registration. */
5102 ret
= pthread_create(®_apps_thread
, NULL
,
5103 thread_registration_apps
, (void *) NULL
);
5105 PERROR("pthread_create registration");
5109 /* Create thread to manage application socket */
5110 ret
= pthread_create(&apps_thread
, NULL
,
5111 thread_manage_apps
, (void *) NULL
);
5113 PERROR("pthread_create apps");
5117 /* Create thread to manage application notify socket */
5118 ret
= pthread_create(&apps_notify_thread
, NULL
,
5119 ust_thread_manage_notify
, (void *) NULL
);
5121 PERROR("pthread_create apps");
5122 goto exit_apps_notify
;
5125 /* Create JUL registration thread. */
5126 ret
= pthread_create(&jul_reg_thread
, NULL
,
5127 jul_thread_manage_registration
, (void *) NULL
);
5129 PERROR("pthread_create apps");
5133 /* Don't start this thread if kernel tracing is not requested nor root */
5134 if (is_root
&& !opt_no_kernel
) {
5135 /* Create kernel thread to manage kernel event */
5136 ret
= pthread_create(&kernel_thread
, NULL
,
5137 thread_manage_kernel
, (void *) NULL
);
5139 PERROR("pthread_create kernel");
5143 ret
= pthread_join(kernel_thread
, &status
);
5145 PERROR("pthread_join");
5146 goto error
; /* join error, exit without cleanup */
5151 ret
= pthread_join(jul_reg_thread
, &status
);
5153 PERROR("pthread_join JUL");
5154 goto error
; /* join error, exit without cleanup */
5158 ret
= pthread_join(apps_notify_thread
, &status
);
5160 PERROR("pthread_join apps notify");
5161 goto error
; /* join error, exit without cleanup */
5165 ret
= pthread_join(apps_thread
, &status
);
5167 PERROR("pthread_join apps");
5168 goto error
; /* join error, exit without cleanup */
5173 ret
= pthread_join(reg_apps_thread
, &status
);
5175 PERROR("pthread_join");
5176 goto error
; /* join error, exit without cleanup */
5180 ret
= pthread_join(dispatch_thread
, &status
);
5182 PERROR("pthread_join");
5183 goto error
; /* join error, exit without cleanup */
5187 ret
= pthread_join(client_thread
, &status
);
5189 PERROR("pthread_join");
5190 goto error
; /* join error, exit without cleanup */
5193 ret
= join_consumer_thread(&kconsumer_data
);
5195 PERROR("join_consumer");
5196 goto error
; /* join error, exit without cleanup */
5199 ret
= join_consumer_thread(&ustconsumer32_data
);
5201 PERROR("join_consumer ust32");
5202 goto error
; /* join error, exit without cleanup */
5205 ret
= join_consumer_thread(&ustconsumer64_data
);
5207 PERROR("join_consumer ust64");
5208 goto error
; /* join error, exit without cleanup */
5212 ret
= pthread_join(health_thread
, &status
);
5214 PERROR("pthread_join health thread");
5215 goto error
; /* join error, exit without cleanup */
5219 ret
= pthread_join(ht_cleanup_thread
, &status
);
5221 PERROR("pthread_join ht cleanup thread");
5222 goto error
; /* join error, exit without cleanup */
5225 health_app_destroy(health_sessiond
);
5226 exit_health_sessiond_cleanup
:
5229 * cleanup() is called when no other thread is running.
5231 rcu_thread_online();
5233 rcu_thread_offline();
5234 rcu_unregister_thread();