2 * Copyright (C) 2011 - David Goulet <david.goulet@polymtl.ca>
3 * Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License, version 2 only,
7 * as published by the Free Software Foundation.
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License along
15 * with this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
29 #include <sys/mount.h>
30 #include <sys/resource.h>
31 #include <sys/socket.h>
33 #include <sys/types.h>
35 #include <urcu/uatomic.h>
39 #include <common/common.h>
40 #include <common/compat/poll.h>
41 #include <common/compat/socket.h>
42 #include <common/defaults.h>
43 #include <common/kernel-consumer/kernel-consumer.h>
44 #include <common/futex.h>
45 #include <common/relayd/relayd.h>
46 #include <common/utils.h>
48 #include "lttng-sessiond.h"
55 #include "kernel-consumer.h"
59 #include "ust-consumer.h"
63 #include "testpoint.h"
65 #define CONSUMERD_FILE "lttng-consumerd"
68 const char default_home_dir
[] = DEFAULT_HOME_DIR
;
69 const char default_tracing_group
[] = DEFAULT_TRACING_GROUP
;
70 const char default_ust_sock_dir
[] = DEFAULT_UST_SOCK_DIR
;
71 const char default_global_apps_pipe
[] = DEFAULT_GLOBAL_APPS_PIPE
;
74 const char *opt_tracing_group
;
75 static int opt_sig_parent
;
76 static int opt_verbose_consumer
;
77 static int opt_daemon
;
78 static int opt_no_kernel
;
79 static int is_root
; /* Set to 1 if the daemon is running as root */
80 static pid_t ppid
; /* Parent PID for --sig-parent option */
84 * Consumer daemon specific control data. Every value not initialized here is
85 * set to 0 by the static definition.
87 static struct consumer_data kconsumer_data
= {
88 .type
= LTTNG_CONSUMER_KERNEL
,
89 .err_unix_sock_path
= DEFAULT_KCONSUMERD_ERR_SOCK_PATH
,
90 .cmd_unix_sock_path
= DEFAULT_KCONSUMERD_CMD_SOCK_PATH
,
93 .pid_mutex
= PTHREAD_MUTEX_INITIALIZER
,
94 .lock
= PTHREAD_MUTEX_INITIALIZER
,
95 .cond
= PTHREAD_COND_INITIALIZER
,
96 .cond_mutex
= PTHREAD_MUTEX_INITIALIZER
,
98 static struct consumer_data ustconsumer64_data
= {
99 .type
= LTTNG_CONSUMER64_UST
,
100 .err_unix_sock_path
= DEFAULT_USTCONSUMERD64_ERR_SOCK_PATH
,
101 .cmd_unix_sock_path
= DEFAULT_USTCONSUMERD64_CMD_SOCK_PATH
,
104 .pid_mutex
= PTHREAD_MUTEX_INITIALIZER
,
105 .lock
= PTHREAD_MUTEX_INITIALIZER
,
106 .cond
= PTHREAD_COND_INITIALIZER
,
107 .cond_mutex
= PTHREAD_MUTEX_INITIALIZER
,
109 static struct consumer_data ustconsumer32_data
= {
110 .type
= LTTNG_CONSUMER32_UST
,
111 .err_unix_sock_path
= DEFAULT_USTCONSUMERD32_ERR_SOCK_PATH
,
112 .cmd_unix_sock_path
= DEFAULT_USTCONSUMERD32_CMD_SOCK_PATH
,
115 .pid_mutex
= PTHREAD_MUTEX_INITIALIZER
,
116 .lock
= PTHREAD_MUTEX_INITIALIZER
,
117 .cond
= PTHREAD_COND_INITIALIZER
,
118 .cond_mutex
= PTHREAD_MUTEX_INITIALIZER
,
121 /* Shared between threads */
122 static int dispatch_thread_exit
;
124 /* Global application Unix socket path */
125 static char apps_unix_sock_path
[PATH_MAX
];
126 /* Global client Unix socket path */
127 static char client_unix_sock_path
[PATH_MAX
];
128 /* global wait shm path for UST */
129 static char wait_shm_path
[PATH_MAX
];
130 /* Global health check unix path */
131 static char health_unix_sock_path
[PATH_MAX
];
133 /* Sockets and FDs */
134 static int client_sock
= -1;
135 static int apps_sock
= -1;
136 int kernel_tracer_fd
= -1;
137 static int kernel_poll_pipe
[2] = { -1, -1 };
140 * Quit pipe for all threads. This permits a single cancellation point
141 * for all threads when receiving an event on the pipe.
143 static int thread_quit_pipe
[2] = { -1, -1 };
146 * This pipe is used to inform the thread managing application communication
147 * that a command is queued and ready to be processed.
149 static int apps_cmd_pipe
[2] = { -1, -1 };
151 /* Pthread, Mutexes and Semaphores */
152 static pthread_t apps_thread
;
153 static pthread_t reg_apps_thread
;
154 static pthread_t client_thread
;
155 static pthread_t kernel_thread
;
156 static pthread_t dispatch_thread
;
157 static pthread_t health_thread
;
160 * UST registration command queue. This queue is tied with a futex and uses a N
161 * wakers / 1 waiter implemented and detailed in futex.c/.h
163 * The thread_manage_apps and thread_dispatch_ust_registration interact with
164 * this queue and the wait/wake scheme.
166 static struct ust_cmd_queue ust_cmd_queue
;
169 * Pointer initialized before thread creation.
171 * This points to the tracing session list containing the session count and a
172 * mutex lock. The lock MUST be taken if you iterate over the list. The lock
173 * MUST NOT be taken if you call a public function in session.c.
175 * The lock is nested inside the structure: session_list_ptr->lock. Please use
176 * session_lock_list and session_unlock_list for lock acquisition.
178 static struct ltt_session_list
*session_list_ptr
;
180 int ust_consumerd64_fd
= -1;
181 int ust_consumerd32_fd
= -1;
183 static const char *consumerd32_bin
= CONFIG_CONSUMERD32_BIN
;
184 static const char *consumerd64_bin
= CONFIG_CONSUMERD64_BIN
;
185 static const char *consumerd32_libdir
= CONFIG_CONSUMERD32_LIBDIR
;
186 static const char *consumerd64_libdir
= CONFIG_CONSUMERD64_LIBDIR
;
188 static const char *module_proc_lttng
= "/proc/lttng";
191 * Consumer daemon state which is changed when spawning it, killing it or in
192 * case of a fatal error.
194 enum consumerd_state
{
195 CONSUMER_STARTED
= 1,
196 CONSUMER_STOPPED
= 2,
201 * This consumer daemon state is used to validate if a client command will be
202 * able to reach the consumer. If not, the client is informed. For instance,
203 * doing a "lttng start" when the consumer state is set to ERROR will return an
204 * error to the client.
206 * The following example shows a possible race condition of this scheme:
208 * consumer thread error happens
210 * client cmd checks state -> still OK
211 * consumer thread exit, sets error
212 * client cmd try to talk to consumer
215 * However, since the consumer is a different daemon, we have no way of making
216 * sure the command will reach it safely even with this state flag. This is why
217 * we consider that up to the state validation during command processing, the
218 * command is safe. After that, we can not guarantee the correctness of the
219 * client request vis-a-vis the consumer.
221 static enum consumerd_state ust_consumerd_state
;
222 static enum consumerd_state kernel_consumerd_state
;
224 /* Used for the health monitoring of the session daemon. See health.h */
225 struct health_state health_thread_cmd
;
226 struct health_state health_thread_app_manage
;
227 struct health_state health_thread_app_reg
;
228 struct health_state health_thread_kernel
;
231 * Socket timeout for receiving and sending in seconds.
233 static int app_socket_timeout
;
236 void setup_consumerd_path(void)
238 const char *bin
, *libdir
;
241 * Allow INSTALL_BIN_PATH to be used as a target path for the
242 * native architecture size consumer if CONFIG_CONSUMER*_PATH
243 * has not been defined.
245 #if (CAA_BITS_PER_LONG == 32)
246 if (!consumerd32_bin
[0]) {
247 consumerd32_bin
= INSTALL_BIN_PATH
"/" CONSUMERD_FILE
;
249 if (!consumerd32_libdir
[0]) {
250 consumerd32_libdir
= INSTALL_LIB_PATH
;
252 #elif (CAA_BITS_PER_LONG == 64)
253 if (!consumerd64_bin
[0]) {
254 consumerd64_bin
= INSTALL_BIN_PATH
"/" CONSUMERD_FILE
;
256 if (!consumerd64_libdir
[0]) {
257 consumerd64_libdir
= INSTALL_LIB_PATH
;
260 #error "Unknown bitness"
264 * runtime env. var. overrides the build default.
266 bin
= getenv("LTTNG_CONSUMERD32_BIN");
268 consumerd32_bin
= bin
;
270 bin
= getenv("LTTNG_CONSUMERD64_BIN");
272 consumerd64_bin
= bin
;
274 libdir
= getenv("LTTNG_CONSUMERD32_LIBDIR");
276 consumerd32_libdir
= libdir
;
278 libdir
= getenv("LTTNG_CONSUMERD64_LIBDIR");
280 consumerd64_libdir
= libdir
;
285 * Create a poll set with O_CLOEXEC and add the thread quit pipe to the set.
287 static int create_thread_poll_set(struct lttng_poll_event
*events
,
292 if (events
== NULL
|| size
== 0) {
297 ret
= lttng_poll_create(events
, size
, LTTNG_CLOEXEC
);
303 ret
= lttng_poll_add(events
, thread_quit_pipe
[0], LPOLLIN
);
315 * Check if the thread quit pipe was triggered.
317 * Return 1 if it was triggered else 0;
319 static int check_thread_quit_pipe(int fd
, uint32_t events
)
321 if (fd
== thread_quit_pipe
[0] && (events
& LPOLLIN
)) {
329 * Return group ID of the tracing group or -1 if not found.
331 static gid_t
allowed_group(void)
335 if (opt_tracing_group
) {
336 grp
= getgrnam(opt_tracing_group
);
338 grp
= getgrnam(default_tracing_group
);
348 * Init thread quit pipe.
350 * Return -1 on error or 0 if all pipes are created.
352 static int init_thread_quit_pipe(void)
356 ret
= pipe(thread_quit_pipe
);
358 PERROR("thread quit pipe");
362 for (i
= 0; i
< 2; i
++) {
363 ret
= fcntl(thread_quit_pipe
[i
], F_SETFD
, FD_CLOEXEC
);
375 * Stop all threads by closing the thread quit pipe.
377 static void stop_threads(void)
381 /* Stopping all threads */
382 DBG("Terminating all threads");
383 ret
= notify_thread_pipe(thread_quit_pipe
[1]);
385 ERR("write error on thread quit pipe");
388 /* Dispatch thread */
389 CMM_STORE_SHARED(dispatch_thread_exit
, 1);
390 futex_nto1_wake(&ust_cmd_queue
.futex
);
396 static void cleanup(void)
400 struct ltt_session
*sess
, *stmp
;
404 /* First thing first, stop all threads */
405 utils_close_pipe(thread_quit_pipe
);
407 DBG("Removing %s directory", rundir
);
408 ret
= asprintf(&cmd
, "rm -rf %s", rundir
);
410 ERR("asprintf failed. Something is really wrong!");
413 /* Remove lttng run directory */
416 ERR("Unable to clean %s", rundir
);
421 DBG("Cleaning up all sessions");
423 /* Destroy session list mutex */
424 if (session_list_ptr
!= NULL
) {
425 pthread_mutex_destroy(&session_list_ptr
->lock
);
427 /* Cleanup ALL session */
428 cds_list_for_each_entry_safe(sess
, stmp
,
429 &session_list_ptr
->head
, list
) {
430 cmd_destroy_session(sess
, kernel_poll_pipe
[1]);
434 DBG("Closing all UST sockets");
435 ust_app_clean_list();
437 if (is_root
&& !opt_no_kernel
) {
438 DBG2("Closing kernel fd");
439 if (kernel_tracer_fd
>= 0) {
440 ret
= close(kernel_tracer_fd
);
445 DBG("Unloading kernel modules");
446 modprobe_remove_lttng_all();
450 DBG("%c[%d;%dm*** assert failed :-) *** ==> %c[%dm%c[%d;%dm"
451 "Matthew, BEET driven development works!%c[%dm",
452 27, 1, 31, 27, 0, 27, 1, 33, 27, 0);
457 * Send data on a unix socket using the liblttsessiondcomm API.
459 * Return lttcomm error code.
461 static int send_unix_sock(int sock
, void *buf
, size_t len
)
463 /* Check valid length */
468 return lttcomm_send_unix_sock(sock
, buf
, len
);
472 * Free memory of a command context structure.
474 static void clean_command_ctx(struct command_ctx
**cmd_ctx
)
476 DBG("Clean command context structure");
478 if ((*cmd_ctx
)->llm
) {
479 free((*cmd_ctx
)->llm
);
481 if ((*cmd_ctx
)->lsm
) {
482 free((*cmd_ctx
)->lsm
);
490 * Notify UST applications using the shm mmap futex.
492 static int notify_ust_apps(int active
)
496 DBG("Notifying applications of session daemon state: %d", active
);
498 /* See shm.c for this call implying mmap, shm and futex calls */
499 wait_shm_mmap
= shm_ust_get_mmap(wait_shm_path
, is_root
);
500 if (wait_shm_mmap
== NULL
) {
504 /* Wake waiting process */
505 futex_wait_update((int32_t *) wait_shm_mmap
, active
);
507 /* Apps notified successfully */
515 * Setup the outgoing data buffer for the response (llm) by allocating the
516 * right amount of memory and copying the original information from the lsm
519 * Return total size of the buffer pointed by buf.
521 static int setup_lttng_msg(struct command_ctx
*cmd_ctx
, size_t size
)
527 cmd_ctx
->llm
= zmalloc(sizeof(struct lttcomm_lttng_msg
) + buf_size
);
528 if (cmd_ctx
->llm
== NULL
) {
534 /* Copy common data */
535 cmd_ctx
->llm
->cmd_type
= cmd_ctx
->lsm
->cmd_type
;
536 cmd_ctx
->llm
->pid
= cmd_ctx
->lsm
->domain
.attr
.pid
;
538 cmd_ctx
->llm
->data_size
= size
;
539 cmd_ctx
->lttng_msg_size
= sizeof(struct lttcomm_lttng_msg
) + buf_size
;
548 * Update the kernel poll set of all channel fd available over all tracing
549 * session. Add the wakeup pipe at the end of the set.
551 static int update_kernel_poll(struct lttng_poll_event
*events
)
554 struct ltt_session
*session
;
555 struct ltt_kernel_channel
*channel
;
557 DBG("Updating kernel poll set");
560 cds_list_for_each_entry(session
, &session_list_ptr
->head
, list
) {
561 session_lock(session
);
562 if (session
->kernel_session
== NULL
) {
563 session_unlock(session
);
567 cds_list_for_each_entry(channel
,
568 &session
->kernel_session
->channel_list
.head
, list
) {
569 /* Add channel fd to the kernel poll set */
570 ret
= lttng_poll_add(events
, channel
->fd
, LPOLLIN
| LPOLLRDNORM
);
572 session_unlock(session
);
575 DBG("Channel fd %d added to kernel set", channel
->fd
);
577 session_unlock(session
);
579 session_unlock_list();
584 session_unlock_list();
589 * Find the channel fd from 'fd' over all tracing session. When found, check
590 * for new channel stream and send those stream fds to the kernel consumer.
592 * Useful for CPU hotplug feature.
594 static int update_kernel_stream(struct consumer_data
*consumer_data
, int fd
)
597 struct ltt_session
*session
;
598 struct ltt_kernel_session
*ksess
;
599 struct ltt_kernel_channel
*channel
;
601 DBG("Updating kernel streams for channel fd %d", fd
);
604 cds_list_for_each_entry(session
, &session_list_ptr
->head
, list
) {
605 session_lock(session
);
606 if (session
->kernel_session
== NULL
) {
607 session_unlock(session
);
610 ksess
= session
->kernel_session
;
612 cds_list_for_each_entry(channel
, &ksess
->channel_list
.head
, list
) {
613 if (channel
->fd
== fd
) {
614 DBG("Channel found, updating kernel streams");
615 ret
= kernel_open_channel_stream(channel
);
621 * Have we already sent fds to the consumer? If yes, it means
622 * that tracing is started so it is safe to send our updated
625 if (ksess
->consumer_fds_sent
== 1 && ksess
->consumer
!= NULL
) {
626 struct lttng_ht_iter iter
;
627 struct consumer_socket
*socket
;
630 cds_lfht_for_each_entry(ksess
->consumer
->socks
->ht
,
631 &iter
.iter
, socket
, node
.node
) {
632 /* Code flow error */
633 assert(socket
->fd
>= 0);
635 pthread_mutex_lock(socket
->lock
);
636 ret
= kernel_consumer_send_channel_stream(socket
,
638 pthread_mutex_unlock(socket
->lock
);
647 session_unlock(session
);
649 session_unlock_list();
653 session_unlock(session
);
654 session_unlock_list();
659 * For each tracing session, update newly registered apps.
661 static void update_ust_app(int app_sock
)
663 struct ltt_session
*sess
, *stmp
;
667 /* For all tracing session(s) */
668 cds_list_for_each_entry_safe(sess
, stmp
, &session_list_ptr
->head
, list
) {
670 if (sess
->ust_session
) {
671 ust_app_global_update(sess
->ust_session
, app_sock
);
673 session_unlock(sess
);
676 session_unlock_list();
680 * This thread manage event coming from the kernel.
682 * Features supported in this thread:
685 static void *thread_manage_kernel(void *data
)
687 int ret
, i
, pollfd
, update_poll_flag
= 1, err
= -1;
688 uint32_t revents
, nb_fd
;
690 struct lttng_poll_event events
;
692 DBG("[thread] Thread manage kernel started");
694 if (testpoint(thread_manage_kernel
)) {
695 goto error_testpoint
;
698 health_code_update(&health_thread_kernel
);
700 ret
= create_thread_poll_set(&events
, 2);
702 goto error_poll_create
;
705 ret
= lttng_poll_add(&events
, kernel_poll_pipe
[0], LPOLLIN
);
710 if (testpoint(thread_manage_kernel_before_loop
)) {
715 health_code_update(&health_thread_kernel
);
717 if (update_poll_flag
== 1) {
719 * Reset number of fd in the poll set. Always 2 since there is the thread
720 * quit pipe and the kernel pipe.
724 ret
= update_kernel_poll(&events
);
728 update_poll_flag
= 0;
731 nb_fd
= LTTNG_POLL_GETNB(&events
);
733 DBG("Thread kernel polling on %d fds", nb_fd
);
735 /* Zeroed the poll events */
736 lttng_poll_reset(&events
);
738 /* Poll infinite value of time */
740 health_poll_update(&health_thread_kernel
);
741 ret
= lttng_poll_wait(&events
, -1);
742 health_poll_update(&health_thread_kernel
);
745 * Restart interrupted system call.
747 if (errno
== EINTR
) {
751 } else if (ret
== 0) {
752 /* Should not happen since timeout is infinite */
753 ERR("Return value of poll is 0 with an infinite timeout.\n"
754 "This should not have happened! Continuing...");
758 for (i
= 0; i
< nb_fd
; i
++) {
759 /* Fetch once the poll data */
760 revents
= LTTNG_POLL_GETEV(&events
, i
);
761 pollfd
= LTTNG_POLL_GETFD(&events
, i
);
763 health_code_update(&health_thread_kernel
);
765 /* Thread quit pipe has been closed. Killing thread. */
766 ret
= check_thread_quit_pipe(pollfd
, revents
);
772 /* Check for data on kernel pipe */
773 if (pollfd
== kernel_poll_pipe
[0] && (revents
& LPOLLIN
)) {
774 ret
= read(kernel_poll_pipe
[0], &tmp
, 1);
775 update_poll_flag
= 1;
779 * New CPU detected by the kernel. Adding kernel stream to
780 * kernel session and updating the kernel consumer
782 if (revents
& LPOLLIN
) {
783 ret
= update_kernel_stream(&kconsumer_data
, pollfd
);
789 * TODO: We might want to handle the LPOLLERR | LPOLLHUP
790 * and unregister kernel stream at this point.
799 lttng_poll_clean(&events
);
802 utils_close_pipe(kernel_poll_pipe
);
803 kernel_poll_pipe
[0] = kernel_poll_pipe
[1] = -1;
805 health_error(&health_thread_kernel
);
806 ERR("Health error occurred in %s", __func__
);
807 WARN("Kernel thread died unexpectedly. "
808 "Kernel tracing can continue but CPU hotplug is disabled.");
810 health_exit(&health_thread_kernel
);
811 DBG("Kernel thread dying");
816 * Signal pthread condition of the consumer data that the thread.
818 static void signal_consumer_condition(struct consumer_data
*data
, int state
)
820 pthread_mutex_lock(&data
->cond_mutex
);
823 * The state is set before signaling. It can be any value, it's the waiter
824 * job to correctly interpret this condition variable associated to the
825 * consumer pthread_cond.
827 * A value of 0 means that the corresponding thread of the consumer data
828 * was not started. 1 indicates that the thread has started and is ready
829 * for action. A negative value means that there was an error during the
832 data
->consumer_thread_is_ready
= state
;
833 (void) pthread_cond_signal(&data
->cond
);
835 pthread_mutex_unlock(&data
->cond_mutex
);
839 * This thread manage the consumer error sent back to the session daemon.
841 static void *thread_manage_consumer(void *data
)
843 int sock
= -1, i
, ret
, pollfd
, err
= -1;
844 uint32_t revents
, nb_fd
;
845 enum lttcomm_return_code code
;
846 struct lttng_poll_event events
;
847 struct consumer_data
*consumer_data
= data
;
849 DBG("[thread] Manage consumer started");
852 * Since the consumer thread can be spawned at any moment in time, we init
853 * the health to a poll status (1, which is a valid health over time).
854 * When the thread starts, we update here the health to a "code" path being
855 * an even value so this thread, when reaching a poll wait, does not
856 * trigger an error with an even value.
858 * Here is the use case we avoid.
860 * +1: the first poll update during initialization (main())
861 * +2 * x: multiple code update once in this thread.
862 * +1: poll wait in this thread (being a good health state).
863 * == even number which after the wait period shows as a bad health.
865 * In a nutshell, the following poll update to the health state brings back
866 * the state to an even value meaning a code path.
868 health_poll_update(&consumer_data
->health
);
871 * Pass 2 as size here for the thread quit pipe and kconsumerd_err_sock.
872 * Nothing more will be added to this poll set.
874 ret
= create_thread_poll_set(&events
, 2);
880 * The error socket here is already in a listening state which was done
881 * just before spawning this thread to avoid a race between the consumer
882 * daemon exec trying to connect and the listen() call.
884 ret
= lttng_poll_add(&events
, consumer_data
->err_sock
, LPOLLIN
| LPOLLRDHUP
);
889 nb_fd
= LTTNG_POLL_GETNB(&events
);
891 health_code_update(&consumer_data
->health
);
893 /* Inifinite blocking call, waiting for transmission */
895 health_poll_update(&consumer_data
->health
);
897 if (testpoint(thread_manage_consumer
)) {
901 ret
= lttng_poll_wait(&events
, -1);
902 health_poll_update(&consumer_data
->health
);
905 * Restart interrupted system call.
907 if (errno
== EINTR
) {
913 for (i
= 0; i
< nb_fd
; i
++) {
914 /* Fetch once the poll data */
915 revents
= LTTNG_POLL_GETEV(&events
, i
);
916 pollfd
= LTTNG_POLL_GETFD(&events
, i
);
918 health_code_update(&consumer_data
->health
);
920 /* Thread quit pipe has been closed. Killing thread. */
921 ret
= check_thread_quit_pipe(pollfd
, revents
);
927 /* Event on the registration socket */
928 if (pollfd
== consumer_data
->err_sock
) {
929 if (revents
& (LPOLLERR
| LPOLLHUP
| LPOLLRDHUP
)) {
930 ERR("consumer err socket poll error");
936 sock
= lttcomm_accept_unix_sock(consumer_data
->err_sock
);
942 * Set the CLOEXEC flag. Return code is useless because either way, the
945 (void) utils_set_fd_cloexec(sock
);
947 health_code_update(&consumer_data
->health
);
949 DBG2("Receiving code from consumer err_sock");
951 /* Getting status code from kconsumerd */
952 ret
= lttcomm_recv_unix_sock(sock
, &code
,
953 sizeof(enum lttcomm_return_code
));
958 health_code_update(&consumer_data
->health
);
960 if (code
== LTTCOMM_CONSUMERD_COMMAND_SOCK_READY
) {
961 consumer_data
->cmd_sock
=
962 lttcomm_connect_unix_sock(consumer_data
->cmd_unix_sock_path
);
963 if (consumer_data
->cmd_sock
< 0) {
964 /* On error, signal condition and quit. */
965 signal_consumer_condition(consumer_data
, -1);
966 PERROR("consumer connect");
969 signal_consumer_condition(consumer_data
, 1);
970 DBG("Consumer command socket ready");
972 ERR("consumer error when waiting for SOCK_READY : %s",
973 lttcomm_get_readable_code(-code
));
977 /* Remove the kconsumerd error sock since we've established a connexion */
978 ret
= lttng_poll_del(&events
, consumer_data
->err_sock
);
983 ret
= lttng_poll_add(&events
, sock
, LPOLLIN
| LPOLLRDHUP
);
988 health_code_update(&consumer_data
->health
);
990 /* Update number of fd */
991 nb_fd
= LTTNG_POLL_GETNB(&events
);
993 /* Inifinite blocking call, waiting for transmission */
995 health_poll_update(&consumer_data
->health
);
996 ret
= lttng_poll_wait(&events
, -1);
997 health_poll_update(&consumer_data
->health
);
1000 * Restart interrupted system call.
1002 if (errno
== EINTR
) {
1008 for (i
= 0; i
< nb_fd
; i
++) {
1009 /* Fetch once the poll data */
1010 revents
= LTTNG_POLL_GETEV(&events
, i
);
1011 pollfd
= LTTNG_POLL_GETFD(&events
, i
);
1013 health_code_update(&consumer_data
->health
);
1015 /* Thread quit pipe has been closed. Killing thread. */
1016 ret
= check_thread_quit_pipe(pollfd
, revents
);
1022 /* Event on the kconsumerd socket */
1023 if (pollfd
== sock
) {
1024 if (revents
& (LPOLLERR
| LPOLLHUP
| LPOLLRDHUP
)) {
1025 ERR("consumer err socket second poll error");
1031 health_code_update(&consumer_data
->health
);
1033 /* Wait for any kconsumerd error */
1034 ret
= lttcomm_recv_unix_sock(sock
, &code
,
1035 sizeof(enum lttcomm_return_code
));
1037 ERR("consumer closed the command socket");
1041 ERR("consumer return code : %s", lttcomm_get_readable_code(-code
));
1045 /* Immediately set the consumerd state to stopped */
1046 if (consumer_data
->type
== LTTNG_CONSUMER_KERNEL
) {
1047 uatomic_set(&kernel_consumerd_state
, CONSUMER_ERROR
);
1048 } else if (consumer_data
->type
== LTTNG_CONSUMER64_UST
||
1049 consumer_data
->type
== LTTNG_CONSUMER32_UST
) {
1050 uatomic_set(&ust_consumerd_state
, CONSUMER_ERROR
);
1052 /* Code flow error... */
1056 if (consumer_data
->err_sock
>= 0) {
1057 ret
= close(consumer_data
->err_sock
);
1062 if (consumer_data
->cmd_sock
>= 0) {
1063 ret
= close(consumer_data
->cmd_sock
);
1075 unlink(consumer_data
->err_unix_sock_path
);
1076 unlink(consumer_data
->cmd_unix_sock_path
);
1077 consumer_data
->pid
= 0;
1079 lttng_poll_clean(&events
);
1082 health_error(&consumer_data
->health
);
1083 ERR("Health error occurred in %s", __func__
);
1085 health_exit(&consumer_data
->health
);
1086 DBG("consumer thread cleanup completed");
1092 * This thread manage application communication.
1094 static void *thread_manage_apps(void *data
)
1096 int i
, ret
, pollfd
, err
= -1;
1097 uint32_t revents
, nb_fd
;
1098 struct ust_command ust_cmd
;
1099 struct lttng_poll_event events
;
1101 DBG("[thread] Manage application started");
1103 rcu_register_thread();
1104 rcu_thread_online();
1106 if (testpoint(thread_manage_apps
)) {
1107 goto error_testpoint
;
1110 health_code_update(&health_thread_app_manage
);
1112 ret
= create_thread_poll_set(&events
, 2);
1114 goto error_poll_create
;
1117 ret
= lttng_poll_add(&events
, apps_cmd_pipe
[0], LPOLLIN
| LPOLLRDHUP
);
1122 if (testpoint(thread_manage_apps_before_loop
)) {
1126 health_code_update(&health_thread_app_manage
);
1129 /* Zeroed the events structure */
1130 lttng_poll_reset(&events
);
1132 nb_fd
= LTTNG_POLL_GETNB(&events
);
1134 DBG("Apps thread polling on %d fds", nb_fd
);
1136 /* Inifinite blocking call, waiting for transmission */
1138 health_poll_update(&health_thread_app_manage
);
1139 ret
= lttng_poll_wait(&events
, -1);
1140 health_poll_update(&health_thread_app_manage
);
1143 * Restart interrupted system call.
1145 if (errno
== EINTR
) {
1151 for (i
= 0; i
< nb_fd
; i
++) {
1152 /* Fetch once the poll data */
1153 revents
= LTTNG_POLL_GETEV(&events
, i
);
1154 pollfd
= LTTNG_POLL_GETFD(&events
, i
);
1156 health_code_update(&health_thread_app_manage
);
1158 /* Thread quit pipe has been closed. Killing thread. */
1159 ret
= check_thread_quit_pipe(pollfd
, revents
);
1165 /* Inspect the apps cmd pipe */
1166 if (pollfd
== apps_cmd_pipe
[0]) {
1167 if (revents
& (LPOLLERR
| LPOLLHUP
| LPOLLRDHUP
)) {
1168 ERR("Apps command pipe error");
1170 } else if (revents
& LPOLLIN
) {
1172 ret
= read(apps_cmd_pipe
[0], &ust_cmd
, sizeof(ust_cmd
));
1173 if (ret
< 0 || ret
< sizeof(ust_cmd
)) {
1174 PERROR("read apps cmd pipe");
1178 health_code_update(&health_thread_app_manage
);
1180 /* Register applicaton to the session daemon */
1181 ret
= ust_app_register(&ust_cmd
.reg_msg
,
1183 if (ret
== -ENOMEM
) {
1185 } else if (ret
< 0) {
1189 health_code_update(&health_thread_app_manage
);
1192 * Validate UST version compatibility.
1194 ret
= ust_app_validate_version(ust_cmd
.sock
);
1197 * Add channel(s) and event(s) to newly registered apps
1198 * from lttng global UST domain.
1200 update_ust_app(ust_cmd
.sock
);
1203 health_code_update(&health_thread_app_manage
);
1205 ret
= ust_app_register_done(ust_cmd
.sock
);
1208 * If the registration is not possible, we simply
1209 * unregister the apps and continue
1211 ust_app_unregister(ust_cmd
.sock
);
1214 * We only monitor the error events of the socket. This
1215 * thread does not handle any incoming data from UST
1218 ret
= lttng_poll_add(&events
, ust_cmd
.sock
,
1219 LPOLLERR
& LPOLLHUP
& LPOLLRDHUP
);
1224 /* Set socket timeout for both receiving and ending */
1225 (void) lttcomm_setsockopt_rcv_timeout(ust_cmd
.sock
,
1226 app_socket_timeout
);
1227 (void) lttcomm_setsockopt_snd_timeout(ust_cmd
.sock
,
1228 app_socket_timeout
);
1230 DBG("Apps with sock %d added to poll set",
1234 health_code_update(&health_thread_app_manage
);
1240 * At this point, we know that a registered application made
1241 * the event at poll_wait.
1243 if (revents
& (LPOLLERR
| LPOLLHUP
| LPOLLRDHUP
)) {
1244 /* Removing from the poll set */
1245 ret
= lttng_poll_del(&events
, pollfd
);
1250 /* Socket closed on remote end. */
1251 ust_app_unregister(pollfd
);
1256 health_code_update(&health_thread_app_manage
);
1262 lttng_poll_clean(&events
);
1265 utils_close_pipe(apps_cmd_pipe
);
1266 apps_cmd_pipe
[0] = apps_cmd_pipe
[1] = -1;
1269 * We don't clean the UST app hash table here since already registered
1270 * applications can still be controlled so let them be until the session
1271 * daemon dies or the applications stop.
1275 health_error(&health_thread_app_manage
);
1276 ERR("Health error occurred in %s", __func__
);
1278 health_exit(&health_thread_app_manage
);
1279 DBG("Application communication apps thread cleanup complete");
1280 rcu_thread_offline();
1281 rcu_unregister_thread();
1286 * Dispatch request from the registration threads to the application
1287 * communication thread.
1289 static void *thread_dispatch_ust_registration(void *data
)
1292 struct cds_wfq_node
*node
;
1293 struct ust_command
*ust_cmd
= NULL
;
1295 DBG("[thread] Dispatch UST command started");
1297 while (!CMM_LOAD_SHARED(dispatch_thread_exit
)) {
1298 /* Atomically prepare the queue futex */
1299 futex_nto1_prepare(&ust_cmd_queue
.futex
);
1302 /* Dequeue command for registration */
1303 node
= cds_wfq_dequeue_blocking(&ust_cmd_queue
.queue
);
1305 DBG("Woken up but nothing in the UST command queue");
1306 /* Continue thread execution */
1310 ust_cmd
= caa_container_of(node
, struct ust_command
, node
);
1312 DBG("Dispatching UST registration pid:%d ppid:%d uid:%d"
1313 " gid:%d sock:%d name:%s (version %d.%d)",
1314 ust_cmd
->reg_msg
.pid
, ust_cmd
->reg_msg
.ppid
,
1315 ust_cmd
->reg_msg
.uid
, ust_cmd
->reg_msg
.gid
,
1316 ust_cmd
->sock
, ust_cmd
->reg_msg
.name
,
1317 ust_cmd
->reg_msg
.major
, ust_cmd
->reg_msg
.minor
);
1319 * Inform apps thread of the new application registration. This
1320 * call is blocking so we can be assured that the data will be read
1321 * at some point in time or wait to the end of the world :)
1323 if (apps_cmd_pipe
[1] >= 0) {
1324 ret
= write(apps_cmd_pipe
[1], ust_cmd
,
1325 sizeof(struct ust_command
));
1327 PERROR("write apps cmd pipe");
1328 if (errno
== EBADF
) {
1330 * We can't inform the application thread to process
1331 * registration. We will exit or else application
1332 * registration will not occur and tracing will never
1339 /* Application manager thread is not available. */
1340 ret
= close(ust_cmd
->sock
);
1342 PERROR("close ust_cmd sock");
1346 } while (node
!= NULL
);
1348 /* Futex wait on queue. Blocking call on futex() */
1349 futex_nto1_wait(&ust_cmd_queue
.futex
);
1353 DBG("Dispatch thread dying");
1358 * This thread manage application registration.
1360 static void *thread_registration_apps(void *data
)
1362 int sock
= -1, i
, ret
, pollfd
, err
= -1;
1363 uint32_t revents
, nb_fd
;
1364 struct lttng_poll_event events
;
1366 * Get allocated in this thread, enqueued to a global queue, dequeued and
1367 * freed in the manage apps thread.
1369 struct ust_command
*ust_cmd
= NULL
;
1371 DBG("[thread] Manage application registration started");
1373 if (testpoint(thread_registration_apps
)) {
1374 goto error_testpoint
;
1377 ret
= lttcomm_listen_unix_sock(apps_sock
);
1383 * Pass 2 as size here for the thread quit pipe and apps socket. Nothing
1384 * more will be added to this poll set.
1386 ret
= create_thread_poll_set(&events
, 2);
1388 goto error_create_poll
;
1391 /* Add the application registration socket */
1392 ret
= lttng_poll_add(&events
, apps_sock
, LPOLLIN
| LPOLLRDHUP
);
1394 goto error_poll_add
;
1397 /* Notify all applications to register */
1398 ret
= notify_ust_apps(1);
1400 ERR("Failed to notify applications or create the wait shared memory.\n"
1401 "Execution continues but there might be problem for already\n"
1402 "running applications that wishes to register.");
1406 DBG("Accepting application registration");
1408 nb_fd
= LTTNG_POLL_GETNB(&events
);
1410 /* Inifinite blocking call, waiting for transmission */
1412 health_poll_update(&health_thread_app_reg
);
1413 ret
= lttng_poll_wait(&events
, -1);
1414 health_poll_update(&health_thread_app_reg
);
1417 * Restart interrupted system call.
1419 if (errno
== EINTR
) {
1425 for (i
= 0; i
< nb_fd
; i
++) {
1426 health_code_update(&health_thread_app_reg
);
1428 /* Fetch once the poll data */
1429 revents
= LTTNG_POLL_GETEV(&events
, i
);
1430 pollfd
= LTTNG_POLL_GETFD(&events
, i
);
1432 /* Thread quit pipe has been closed. Killing thread. */
1433 ret
= check_thread_quit_pipe(pollfd
, revents
);
1439 /* Event on the registration socket */
1440 if (pollfd
== apps_sock
) {
1441 if (revents
& (LPOLLERR
| LPOLLHUP
| LPOLLRDHUP
)) {
1442 ERR("Register apps socket poll error");
1444 } else if (revents
& LPOLLIN
) {
1445 sock
= lttcomm_accept_unix_sock(apps_sock
);
1451 * Set the CLOEXEC flag. Return code is useless because
1452 * either way, the show must go on.
1454 (void) utils_set_fd_cloexec(sock
);
1456 /* Create UST registration command for enqueuing */
1457 ust_cmd
= zmalloc(sizeof(struct ust_command
));
1458 if (ust_cmd
== NULL
) {
1459 PERROR("ust command zmalloc");
1464 * Using message-based transmissions to ensure we don't
1465 * have to deal with partially received messages.
1467 ret
= lttng_fd_get(LTTNG_FD_APPS
, 1);
1469 ERR("Exhausted file descriptors allowed for applications.");
1478 health_code_update(&health_thread_app_reg
);
1479 ret
= lttcomm_recv_unix_sock(sock
, &ust_cmd
->reg_msg
,
1480 sizeof(struct ust_register_msg
));
1481 if (ret
< 0 || ret
< sizeof(struct ust_register_msg
)) {
1483 PERROR("lttcomm_recv_unix_sock register apps");
1485 ERR("Wrong size received on apps register");
1492 lttng_fd_put(LTTNG_FD_APPS
, 1);
1496 health_code_update(&health_thread_app_reg
);
1498 ust_cmd
->sock
= sock
;
1501 DBG("UST registration received with pid:%d ppid:%d uid:%d"
1502 " gid:%d sock:%d name:%s (version %d.%d)",
1503 ust_cmd
->reg_msg
.pid
, ust_cmd
->reg_msg
.ppid
,
1504 ust_cmd
->reg_msg
.uid
, ust_cmd
->reg_msg
.gid
,
1505 ust_cmd
->sock
, ust_cmd
->reg_msg
.name
,
1506 ust_cmd
->reg_msg
.major
, ust_cmd
->reg_msg
.minor
);
1509 * Lock free enqueue the registration request. The red pill
1510 * has been taken! This apps will be part of the *system*.
1512 cds_wfq_enqueue(&ust_cmd_queue
.queue
, &ust_cmd
->node
);
1515 * Wake the registration queue futex. Implicit memory
1516 * barrier with the exchange in cds_wfq_enqueue.
1518 futex_nto1_wake(&ust_cmd_queue
.futex
);
1527 health_error(&health_thread_app_reg
);
1528 ERR("Health error occurred in %s", __func__
);
1531 /* Notify that the registration thread is gone */
1534 if (apps_sock
>= 0) {
1535 ret
= close(apps_sock
);
1545 lttng_fd_put(LTTNG_FD_APPS
, 1);
1547 unlink(apps_unix_sock_path
);
1550 lttng_poll_clean(&events
);
1554 DBG("UST Registration thread cleanup complete");
1555 health_exit(&health_thread_app_reg
);
1561 * Start the thread_manage_consumer. This must be done after a lttng-consumerd
1562 * exec or it will fails.
1564 static int spawn_consumer_thread(struct consumer_data
*consumer_data
)
1567 struct timespec timeout
;
1569 /* Make sure we set the readiness flag to 0 because we are NOT ready */
1570 consumer_data
->consumer_thread_is_ready
= 0;
1572 /* Setup pthread condition */
1573 ret
= pthread_condattr_init(&consumer_data
->condattr
);
1576 PERROR("pthread_condattr_init consumer data");
1581 * Set the monotonic clock in order to make sure we DO NOT jump in time
1582 * between the clock_gettime() call and the timedwait call. See bug #324
1583 * for a more details and how we noticed it.
1585 ret
= pthread_condattr_setclock(&consumer_data
->condattr
, CLOCK_MONOTONIC
);
1588 PERROR("pthread_condattr_setclock consumer data");
1592 ret
= pthread_cond_init(&consumer_data
->cond
, &consumer_data
->condattr
);
1595 PERROR("pthread_cond_init consumer data");
1599 ret
= pthread_create(&consumer_data
->thread
, NULL
, thread_manage_consumer
,
1602 PERROR("pthread_create consumer");
1607 /* We are about to wait on a pthread condition */
1608 pthread_mutex_lock(&consumer_data
->cond_mutex
);
1610 /* Get time for sem_timedwait absolute timeout */
1611 clock_ret
= clock_gettime(CLOCK_MONOTONIC
, &timeout
);
1613 * Set the timeout for the condition timed wait even if the clock gettime
1614 * call fails since we might loop on that call and we want to avoid to
1615 * increment the timeout too many times.
1617 timeout
.tv_sec
+= DEFAULT_SEM_WAIT_TIMEOUT
;
1620 * The following loop COULD be skipped in some conditions so this is why we
1621 * set ret to 0 in order to make sure at least one round of the loop is
1627 * Loop until the condition is reached or when a timeout is reached. Note
1628 * that the pthread_cond_timedwait(P) man page specifies that EINTR can NOT
1629 * be returned but the pthread_cond(3), from the glibc-doc, says that it is
1630 * possible. This loop does not take any chances and works with both of
1633 while (!consumer_data
->consumer_thread_is_ready
&& ret
!= ETIMEDOUT
) {
1634 if (clock_ret
< 0) {
1635 PERROR("clock_gettime spawn consumer");
1636 /* Infinite wait for the consumerd thread to be ready */
1637 ret
= pthread_cond_wait(&consumer_data
->cond
,
1638 &consumer_data
->cond_mutex
);
1640 ret
= pthread_cond_timedwait(&consumer_data
->cond
,
1641 &consumer_data
->cond_mutex
, &timeout
);
1645 /* Release the pthread condition */
1646 pthread_mutex_unlock(&consumer_data
->cond_mutex
);
1650 if (ret
== ETIMEDOUT
) {
1652 * Call has timed out so we kill the kconsumerd_thread and return
1655 ERR("Condition timed out. The consumer thread was never ready."
1657 ret
= pthread_cancel(consumer_data
->thread
);
1659 PERROR("pthread_cancel consumer thread");
1662 PERROR("pthread_cond_wait failed consumer thread");
1667 pthread_mutex_lock(&consumer_data
->pid_mutex
);
1668 if (consumer_data
->pid
== 0) {
1669 ERR("Consumerd did not start");
1670 pthread_mutex_unlock(&consumer_data
->pid_mutex
);
1673 pthread_mutex_unlock(&consumer_data
->pid_mutex
);
1682 * Join consumer thread
1684 static int join_consumer_thread(struct consumer_data
*consumer_data
)
1689 /* Consumer pid must be a real one. */
1690 if (consumer_data
->pid
> 0) {
1691 ret
= kill(consumer_data
->pid
, SIGTERM
);
1693 ERR("Error killing consumer daemon");
1696 return pthread_join(consumer_data
->thread
, &status
);
1703 * Fork and exec a consumer daemon (consumerd).
1705 * Return pid if successful else -1.
1707 static pid_t
spawn_consumerd(struct consumer_data
*consumer_data
)
1711 const char *consumer_to_use
;
1712 const char *verbosity
;
1715 DBG("Spawning consumerd");
1722 if (opt_verbose_consumer
) {
1723 verbosity
= "--verbose";
1725 verbosity
= "--quiet";
1727 switch (consumer_data
->type
) {
1728 case LTTNG_CONSUMER_KERNEL
:
1730 * Find out which consumerd to execute. We will first try the
1731 * 64-bit path, then the sessiond's installation directory, and
1732 * fallback on the 32-bit one,
1734 DBG3("Looking for a kernel consumer at these locations:");
1735 DBG3(" 1) %s", consumerd64_bin
);
1736 DBG3(" 2) %s/%s", INSTALL_BIN_PATH
, CONSUMERD_FILE
);
1737 DBG3(" 3) %s", consumerd32_bin
);
1738 if (stat(consumerd64_bin
, &st
) == 0) {
1739 DBG3("Found location #1");
1740 consumer_to_use
= consumerd64_bin
;
1741 } else if (stat(INSTALL_BIN_PATH
"/" CONSUMERD_FILE
, &st
) == 0) {
1742 DBG3("Found location #2");
1743 consumer_to_use
= INSTALL_BIN_PATH
"/" CONSUMERD_FILE
;
1744 } else if (stat(consumerd32_bin
, &st
) == 0) {
1745 DBG3("Found location #3");
1746 consumer_to_use
= consumerd32_bin
;
1748 DBG("Could not find any valid consumerd executable");
1751 DBG("Using kernel consumer at: %s", consumer_to_use
);
1752 execl(consumer_to_use
,
1753 "lttng-consumerd", verbosity
, "-k",
1754 "--consumerd-cmd-sock", consumer_data
->cmd_unix_sock_path
,
1755 "--consumerd-err-sock", consumer_data
->err_unix_sock_path
,
1758 case LTTNG_CONSUMER64_UST
:
1760 char *tmpnew
= NULL
;
1762 if (consumerd64_libdir
[0] != '\0') {
1766 tmp
= getenv("LD_LIBRARY_PATH");
1770 tmplen
= strlen("LD_LIBRARY_PATH=")
1771 + strlen(consumerd64_libdir
) + 1 /* : */ + strlen(tmp
);
1772 tmpnew
= zmalloc(tmplen
+ 1 /* \0 */);
1777 strcpy(tmpnew
, "LD_LIBRARY_PATH=");
1778 strcat(tmpnew
, consumerd64_libdir
);
1779 if (tmp
[0] != '\0') {
1780 strcat(tmpnew
, ":");
1781 strcat(tmpnew
, tmp
);
1783 ret
= putenv(tmpnew
);
1789 DBG("Using 64-bit UST consumer at: %s", consumerd64_bin
);
1790 ret
= execl(consumerd64_bin
, "lttng-consumerd", verbosity
, "-u",
1791 "--consumerd-cmd-sock", consumer_data
->cmd_unix_sock_path
,
1792 "--consumerd-err-sock", consumer_data
->err_unix_sock_path
,
1794 if (consumerd64_libdir
[0] != '\0') {
1802 case LTTNG_CONSUMER32_UST
:
1804 char *tmpnew
= NULL
;
1806 if (consumerd32_libdir
[0] != '\0') {
1810 tmp
= getenv("LD_LIBRARY_PATH");
1814 tmplen
= strlen("LD_LIBRARY_PATH=")
1815 + strlen(consumerd32_libdir
) + 1 /* : */ + strlen(tmp
);
1816 tmpnew
= zmalloc(tmplen
+ 1 /* \0 */);
1821 strcpy(tmpnew
, "LD_LIBRARY_PATH=");
1822 strcat(tmpnew
, consumerd32_libdir
);
1823 if (tmp
[0] != '\0') {
1824 strcat(tmpnew
, ":");
1825 strcat(tmpnew
, tmp
);
1827 ret
= putenv(tmpnew
);
1833 DBG("Using 32-bit UST consumer at: %s", consumerd32_bin
);
1834 ret
= execl(consumerd32_bin
, "lttng-consumerd", verbosity
, "-u",
1835 "--consumerd-cmd-sock", consumer_data
->cmd_unix_sock_path
,
1836 "--consumerd-err-sock", consumer_data
->err_unix_sock_path
,
1838 if (consumerd32_libdir
[0] != '\0') {
1847 PERROR("unknown consumer type");
1851 PERROR("kernel start consumer exec");
1854 } else if (pid
> 0) {
1857 PERROR("start consumer fork");
1865 * Spawn the consumerd daemon and session daemon thread.
1867 static int start_consumerd(struct consumer_data
*consumer_data
)
1872 * Set the listen() state on the socket since there is a possible race
1873 * between the exec() of the consumer daemon and this call if place in the
1874 * consumer thread. See bug #366 for more details.
1876 ret
= lttcomm_listen_unix_sock(consumer_data
->err_sock
);
1881 pthread_mutex_lock(&consumer_data
->pid_mutex
);
1882 if (consumer_data
->pid
!= 0) {
1883 pthread_mutex_unlock(&consumer_data
->pid_mutex
);
1887 ret
= spawn_consumerd(consumer_data
);
1889 ERR("Spawning consumerd failed");
1890 pthread_mutex_unlock(&consumer_data
->pid_mutex
);
1894 /* Setting up the consumer_data pid */
1895 consumer_data
->pid
= ret
;
1896 DBG2("Consumer pid %d", consumer_data
->pid
);
1897 pthread_mutex_unlock(&consumer_data
->pid_mutex
);
1899 DBG2("Spawning consumer control thread");
1900 ret
= spawn_consumer_thread(consumer_data
);
1902 ERR("Fatal error spawning consumer control thread");
1910 /* Cleanup already created socket on error. */
1911 if (consumer_data
->err_sock
>= 0) {
1912 err
= close(consumer_data
->err_sock
);
1914 PERROR("close consumer data error socket");
1921 * Compute health status of each consumer. If one of them is zero (bad
1922 * state), we return 0.
1924 static int check_consumer_health(void)
1928 ret
= health_check_state(&kconsumer_data
.health
) &&
1929 health_check_state(&ustconsumer32_data
.health
) &&
1930 health_check_state(&ustconsumer64_data
.health
);
1932 DBG3("Health consumer check %d", ret
);
1938 * Setup necessary data for kernel tracer action.
1940 static int init_kernel_tracer(void)
1944 /* Modprobe lttng kernel modules */
1945 ret
= modprobe_lttng_control();
1950 /* Open debugfs lttng */
1951 kernel_tracer_fd
= open(module_proc_lttng
, O_RDWR
);
1952 if (kernel_tracer_fd
< 0) {
1953 DBG("Failed to open %s", module_proc_lttng
);
1958 /* Validate kernel version */
1959 ret
= kernel_validate_version(kernel_tracer_fd
);
1964 ret
= modprobe_lttng_data();
1969 DBG("Kernel tracer fd %d", kernel_tracer_fd
);
1973 modprobe_remove_lttng_control();
1974 ret
= close(kernel_tracer_fd
);
1978 kernel_tracer_fd
= -1;
1979 return LTTNG_ERR_KERN_VERSION
;
1982 ret
= close(kernel_tracer_fd
);
1988 modprobe_remove_lttng_control();
1991 WARN("No kernel tracer available");
1992 kernel_tracer_fd
= -1;
1994 return LTTNG_ERR_NEED_ROOT_SESSIOND
;
1996 return LTTNG_ERR_KERN_NA
;
2002 * Copy consumer output from the tracing session to the domain session. The
2003 * function also applies the right modification on a per domain basis for the
2004 * trace files destination directory.
2006 static int copy_session_consumer(int domain
, struct ltt_session
*session
)
2009 const char *dir_name
;
2010 struct consumer_output
*consumer
;
2013 assert(session
->consumer
);
2016 case LTTNG_DOMAIN_KERNEL
:
2017 DBG3("Copying tracing session consumer output in kernel session");
2019 * XXX: We should audit the session creation and what this function
2020 * does "extra" in order to avoid a destroy since this function is used
2021 * in the domain session creation (kernel and ust) only. Same for UST
2024 if (session
->kernel_session
->consumer
) {
2025 consumer_destroy_output(session
->kernel_session
->consumer
);
2027 session
->kernel_session
->consumer
=
2028 consumer_copy_output(session
->consumer
);
2029 /* Ease our life a bit for the next part */
2030 consumer
= session
->kernel_session
->consumer
;
2031 dir_name
= DEFAULT_KERNEL_TRACE_DIR
;
2033 case LTTNG_DOMAIN_UST
:
2034 DBG3("Copying tracing session consumer output in UST session");
2035 if (session
->ust_session
->consumer
) {
2036 consumer_destroy_output(session
->ust_session
->consumer
);
2038 session
->ust_session
->consumer
=
2039 consumer_copy_output(session
->consumer
);
2040 /* Ease our life a bit for the next part */
2041 consumer
= session
->ust_session
->consumer
;
2042 dir_name
= DEFAULT_UST_TRACE_DIR
;
2045 ret
= LTTNG_ERR_UNKNOWN_DOMAIN
;
2049 /* Append correct directory to subdir */
2050 strncat(consumer
->subdir
, dir_name
,
2051 sizeof(consumer
->subdir
) - strlen(consumer
->subdir
) - 1);
2052 DBG3("Copy session consumer subdir %s", consumer
->subdir
);
2061 * Create an UST session and add it to the session ust list.
2063 static int create_ust_session(struct ltt_session
*session
,
2064 struct lttng_domain
*domain
)
2067 struct ltt_ust_session
*lus
= NULL
;
2071 assert(session
->consumer
);
2073 switch (domain
->type
) {
2074 case LTTNG_DOMAIN_UST
:
2077 ERR("Unknown UST domain on create session %d", domain
->type
);
2078 ret
= LTTNG_ERR_UNKNOWN_DOMAIN
;
2082 DBG("Creating UST session");
2084 lus
= trace_ust_create_session(session
->path
, session
->id
, domain
);
2086 ret
= LTTNG_ERR_UST_SESS_FAIL
;
2090 lus
->uid
= session
->uid
;
2091 lus
->gid
= session
->gid
;
2092 session
->ust_session
= lus
;
2094 /* Copy session output to the newly created UST session */
2095 ret
= copy_session_consumer(domain
->type
, session
);
2096 if (ret
!= LTTNG_OK
) {
2104 session
->ust_session
= NULL
;
2109 * Create a kernel tracer session then create the default channel.
2111 static int create_kernel_session(struct ltt_session
*session
)
2115 DBG("Creating kernel session");
2117 ret
= kernel_create_session(session
, kernel_tracer_fd
);
2119 ret
= LTTNG_ERR_KERN_SESS_FAIL
;
2123 /* Code flow safety */
2124 assert(session
->kernel_session
);
2126 /* Copy session output to the newly created Kernel session */
2127 ret
= copy_session_consumer(LTTNG_DOMAIN_KERNEL
, session
);
2128 if (ret
!= LTTNG_OK
) {
2132 /* Create directory(ies) on local filesystem. */
2133 if (session
->kernel_session
->consumer
->type
== CONSUMER_DST_LOCAL
&&
2134 strlen(session
->kernel_session
->consumer
->dst
.trace_path
) > 0) {
2135 ret
= run_as_mkdir_recursive(
2136 session
->kernel_session
->consumer
->dst
.trace_path
,
2137 S_IRWXU
| S_IRWXG
, session
->uid
, session
->gid
);
2139 if (ret
!= -EEXIST
) {
2140 ERR("Trace directory creation error");
2146 session
->kernel_session
->uid
= session
->uid
;
2147 session
->kernel_session
->gid
= session
->gid
;
2152 trace_kernel_destroy_session(session
->kernel_session
);
2153 session
->kernel_session
= NULL
;
2158 * Count number of session permitted by uid/gid.
2160 static unsigned int lttng_sessions_count(uid_t uid
, gid_t gid
)
2163 struct ltt_session
*session
;
2165 DBG("Counting number of available session for UID %d GID %d",
2167 cds_list_for_each_entry(session
, &session_list_ptr
->head
, list
) {
2169 * Only list the sessions the user can control.
2171 if (!session_access_ok(session
, uid
, gid
)) {
2180 * Process the command requested by the lttng client within the command
2181 * context structure. This function make sure that the return structure (llm)
2182 * is set and ready for transmission before returning.
2184 * Return any error encountered or 0 for success.
2186 * "sock" is only used for special-case var. len data.
2188 static int process_client_msg(struct command_ctx
*cmd_ctx
, int sock
,
2192 int need_tracing_session
= 1;
2195 DBG("Processing client command %d", cmd_ctx
->lsm
->cmd_type
);
2199 switch (cmd_ctx
->lsm
->cmd_type
) {
2200 case LTTNG_CREATE_SESSION
:
2201 case LTTNG_DESTROY_SESSION
:
2202 case LTTNG_LIST_SESSIONS
:
2203 case LTTNG_LIST_DOMAINS
:
2204 case LTTNG_START_TRACE
:
2205 case LTTNG_STOP_TRACE
:
2206 case LTTNG_DATA_PENDING
:
2213 if (opt_no_kernel
&& need_domain
2214 && cmd_ctx
->lsm
->domain
.type
== LTTNG_DOMAIN_KERNEL
) {
2216 ret
= LTTNG_ERR_NEED_ROOT_SESSIOND
;
2218 ret
= LTTNG_ERR_KERN_NA
;
2223 /* Deny register consumer if we already have a spawned consumer. */
2224 if (cmd_ctx
->lsm
->cmd_type
== LTTNG_REGISTER_CONSUMER
) {
2225 pthread_mutex_lock(&kconsumer_data
.pid_mutex
);
2226 if (kconsumer_data
.pid
> 0) {
2227 ret
= LTTNG_ERR_KERN_CONSUMER_FAIL
;
2228 pthread_mutex_unlock(&kconsumer_data
.pid_mutex
);
2231 pthread_mutex_unlock(&kconsumer_data
.pid_mutex
);
2235 * Check for command that don't needs to allocate a returned payload. We do
2236 * this here so we don't have to make the call for no payload at each
2239 switch(cmd_ctx
->lsm
->cmd_type
) {
2240 case LTTNG_LIST_SESSIONS
:
2241 case LTTNG_LIST_TRACEPOINTS
:
2242 case LTTNG_LIST_TRACEPOINT_FIELDS
:
2243 case LTTNG_LIST_DOMAINS
:
2244 case LTTNG_LIST_CHANNELS
:
2245 case LTTNG_LIST_EVENTS
:
2248 /* Setup lttng message with no payload */
2249 ret
= setup_lttng_msg(cmd_ctx
, 0);
2251 /* This label does not try to unlock the session */
2252 goto init_setup_error
;
2256 /* Commands that DO NOT need a session. */
2257 switch (cmd_ctx
->lsm
->cmd_type
) {
2258 case LTTNG_CREATE_SESSION
:
2259 case LTTNG_CALIBRATE
:
2260 case LTTNG_LIST_SESSIONS
:
2261 case LTTNG_LIST_TRACEPOINTS
:
2262 case LTTNG_LIST_TRACEPOINT_FIELDS
:
2263 need_tracing_session
= 0;
2266 DBG("Getting session %s by name", cmd_ctx
->lsm
->session
.name
);
2268 * We keep the session list lock across _all_ commands
2269 * for now, because the per-session lock does not
2270 * handle teardown properly.
2272 session_lock_list();
2273 cmd_ctx
->session
= session_find_by_name(cmd_ctx
->lsm
->session
.name
);
2274 if (cmd_ctx
->session
== NULL
) {
2275 if (cmd_ctx
->lsm
->session
.name
!= NULL
) {
2276 ret
= LTTNG_ERR_SESS_NOT_FOUND
;
2278 /* If no session name specified */
2279 ret
= LTTNG_ERR_SELECT_SESS
;
2283 /* Acquire lock for the session */
2284 session_lock(cmd_ctx
->session
);
2294 * Check domain type for specific "pre-action".
2296 switch (cmd_ctx
->lsm
->domain
.type
) {
2297 case LTTNG_DOMAIN_KERNEL
:
2299 ret
= LTTNG_ERR_NEED_ROOT_SESSIOND
;
2303 /* Kernel tracer check */
2304 if (kernel_tracer_fd
== -1) {
2305 /* Basically, load kernel tracer modules */
2306 ret
= init_kernel_tracer();
2312 /* Consumer is in an ERROR state. Report back to client */
2313 if (uatomic_read(&kernel_consumerd_state
) == CONSUMER_ERROR
) {
2314 ret
= LTTNG_ERR_NO_KERNCONSUMERD
;
2318 /* Need a session for kernel command */
2319 if (need_tracing_session
) {
2320 if (cmd_ctx
->session
->kernel_session
== NULL
) {
2321 ret
= create_kernel_session(cmd_ctx
->session
);
2323 ret
= LTTNG_ERR_KERN_SESS_FAIL
;
2328 /* Start the kernel consumer daemon */
2329 pthread_mutex_lock(&kconsumer_data
.pid_mutex
);
2330 if (kconsumer_data
.pid
== 0 &&
2331 cmd_ctx
->lsm
->cmd_type
!= LTTNG_REGISTER_CONSUMER
&&
2332 cmd_ctx
->session
->start_consumer
) {
2333 pthread_mutex_unlock(&kconsumer_data
.pid_mutex
);
2334 ret
= start_consumerd(&kconsumer_data
);
2336 ret
= LTTNG_ERR_KERN_CONSUMER_FAIL
;
2339 uatomic_set(&kernel_consumerd_state
, CONSUMER_STARTED
);
2341 pthread_mutex_unlock(&kconsumer_data
.pid_mutex
);
2345 * The consumer was just spawned so we need to add the socket to
2346 * the consumer output of the session if exist.
2348 ret
= consumer_create_socket(&kconsumer_data
,
2349 cmd_ctx
->session
->kernel_session
->consumer
);
2356 case LTTNG_DOMAIN_UST
:
2358 /* Consumer is in an ERROR state. Report back to client */
2359 if (uatomic_read(&ust_consumerd_state
) == CONSUMER_ERROR
) {
2360 ret
= LTTNG_ERR_NO_USTCONSUMERD
;
2364 if (need_tracing_session
) {
2365 /* Create UST session if none exist. */
2366 if (cmd_ctx
->session
->ust_session
== NULL
) {
2367 ret
= create_ust_session(cmd_ctx
->session
,
2368 &cmd_ctx
->lsm
->domain
);
2369 if (ret
!= LTTNG_OK
) {
2374 /* Start the UST consumer daemons */
2376 pthread_mutex_lock(&ustconsumer64_data
.pid_mutex
);
2377 if (consumerd64_bin
[0] != '\0' &&
2378 ustconsumer64_data
.pid
== 0 &&
2379 cmd_ctx
->lsm
->cmd_type
!= LTTNG_REGISTER_CONSUMER
&&
2380 cmd_ctx
->session
->start_consumer
) {
2381 pthread_mutex_unlock(&ustconsumer64_data
.pid_mutex
);
2382 ret
= start_consumerd(&ustconsumer64_data
);
2384 ret
= LTTNG_ERR_UST_CONSUMER64_FAIL
;
2385 uatomic_set(&ust_consumerd64_fd
, -EINVAL
);
2389 uatomic_set(&ust_consumerd64_fd
, ustconsumer64_data
.cmd_sock
);
2390 uatomic_set(&ust_consumerd_state
, CONSUMER_STARTED
);
2392 pthread_mutex_unlock(&ustconsumer64_data
.pid_mutex
);
2396 * Setup socket for consumer 64 bit. No need for atomic access
2397 * since it was set above and can ONLY be set in this thread.
2399 ret
= consumer_create_socket(&ustconsumer64_data
,
2400 cmd_ctx
->session
->ust_session
->consumer
);
2406 if (consumerd32_bin
[0] != '\0' &&
2407 ustconsumer32_data
.pid
== 0 &&
2408 cmd_ctx
->lsm
->cmd_type
!= LTTNG_REGISTER_CONSUMER
&&
2409 cmd_ctx
->session
->start_consumer
) {
2410 pthread_mutex_unlock(&ustconsumer32_data
.pid_mutex
);
2411 ret
= start_consumerd(&ustconsumer32_data
);
2413 ret
= LTTNG_ERR_UST_CONSUMER32_FAIL
;
2414 uatomic_set(&ust_consumerd32_fd
, -EINVAL
);
2418 uatomic_set(&ust_consumerd32_fd
, ustconsumer32_data
.cmd_sock
);
2419 uatomic_set(&ust_consumerd_state
, CONSUMER_STARTED
);
2421 pthread_mutex_unlock(&ustconsumer32_data
.pid_mutex
);
2425 * Setup socket for consumer 64 bit. No need for atomic access
2426 * since it was set above and can ONLY be set in this thread.
2428 ret
= consumer_create_socket(&ustconsumer32_data
,
2429 cmd_ctx
->session
->ust_session
->consumer
);
2441 /* Validate consumer daemon state when start/stop trace command */
2442 if (cmd_ctx
->lsm
->cmd_type
== LTTNG_START_TRACE
||
2443 cmd_ctx
->lsm
->cmd_type
== LTTNG_STOP_TRACE
) {
2444 switch (cmd_ctx
->lsm
->domain
.type
) {
2445 case LTTNG_DOMAIN_UST
:
2446 if (uatomic_read(&ust_consumerd_state
) != CONSUMER_STARTED
) {
2447 ret
= LTTNG_ERR_NO_USTCONSUMERD
;
2451 case LTTNG_DOMAIN_KERNEL
:
2452 if (uatomic_read(&kernel_consumerd_state
) != CONSUMER_STARTED
) {
2453 ret
= LTTNG_ERR_NO_KERNCONSUMERD
;
2461 * Check that the UID or GID match that of the tracing session.
2462 * The root user can interact with all sessions.
2464 if (need_tracing_session
) {
2465 if (!session_access_ok(cmd_ctx
->session
,
2466 LTTNG_SOCK_GET_UID_CRED(&cmd_ctx
->creds
),
2467 LTTNG_SOCK_GET_GID_CRED(&cmd_ctx
->creds
))) {
2468 ret
= LTTNG_ERR_EPERM
;
2473 /* Process by command type */
2474 switch (cmd_ctx
->lsm
->cmd_type
) {
2475 case LTTNG_ADD_CONTEXT
:
2477 ret
= cmd_add_context(cmd_ctx
->session
, cmd_ctx
->lsm
->domain
.type
,
2478 cmd_ctx
->lsm
->u
.context
.channel_name
,
2479 &cmd_ctx
->lsm
->u
.context
.ctx
, kernel_poll_pipe
[1]);
2482 case LTTNG_DISABLE_CHANNEL
:
2484 ret
= cmd_disable_channel(cmd_ctx
->session
, cmd_ctx
->lsm
->domain
.type
,
2485 cmd_ctx
->lsm
->u
.disable
.channel_name
);
2488 case LTTNG_DISABLE_EVENT
:
2490 ret
= cmd_disable_event(cmd_ctx
->session
, cmd_ctx
->lsm
->domain
.type
,
2491 cmd_ctx
->lsm
->u
.disable
.channel_name
,
2492 cmd_ctx
->lsm
->u
.disable
.name
);
2495 case LTTNG_DISABLE_ALL_EVENT
:
2497 DBG("Disabling all events");
2499 ret
= cmd_disable_event_all(cmd_ctx
->session
, cmd_ctx
->lsm
->domain
.type
,
2500 cmd_ctx
->lsm
->u
.disable
.channel_name
);
2503 case LTTNG_DISABLE_CONSUMER
:
2505 ret
= cmd_disable_consumer(cmd_ctx
->lsm
->domain
.type
, cmd_ctx
->session
);
2508 case LTTNG_ENABLE_CHANNEL
:
2510 ret
= cmd_enable_channel(cmd_ctx
->session
, cmd_ctx
->lsm
->domain
.type
,
2511 &cmd_ctx
->lsm
->u
.channel
.chan
, kernel_poll_pipe
[1]);
2514 case LTTNG_ENABLE_CONSUMER
:
2517 * XXX: 0 means that this URI should be applied on the session. Should
2518 * be a DOMAIN enuam.
2520 ret
= cmd_enable_consumer(cmd_ctx
->lsm
->domain
.type
, cmd_ctx
->session
);
2521 if (ret
!= LTTNG_OK
) {
2525 if (cmd_ctx
->lsm
->domain
.type
== 0) {
2526 /* Add the URI for the UST session if a consumer is present. */
2527 if (cmd_ctx
->session
->ust_session
&&
2528 cmd_ctx
->session
->ust_session
->consumer
) {
2529 ret
= cmd_enable_consumer(LTTNG_DOMAIN_UST
, cmd_ctx
->session
);
2530 } else if (cmd_ctx
->session
->kernel_session
&&
2531 cmd_ctx
->session
->kernel_session
->consumer
) {
2532 ret
= cmd_enable_consumer(LTTNG_DOMAIN_KERNEL
,
2538 case LTTNG_ENABLE_EVENT
:
2540 ret
= cmd_enable_event(cmd_ctx
->session
, cmd_ctx
->lsm
->domain
.type
,
2541 cmd_ctx
->lsm
->u
.enable
.channel_name
,
2542 &cmd_ctx
->lsm
->u
.enable
.event
, NULL
, kernel_poll_pipe
[1]);
2545 case LTTNG_ENABLE_ALL_EVENT
:
2547 DBG("Enabling all events");
2549 ret
= cmd_enable_event_all(cmd_ctx
->session
, cmd_ctx
->lsm
->domain
.type
,
2550 cmd_ctx
->lsm
->u
.enable
.channel_name
,
2551 cmd_ctx
->lsm
->u
.enable
.event
.type
, NULL
, kernel_poll_pipe
[1]);
2554 case LTTNG_LIST_TRACEPOINTS
:
2556 struct lttng_event
*events
;
2559 nb_events
= cmd_list_tracepoints(cmd_ctx
->lsm
->domain
.type
, &events
);
2560 if (nb_events
< 0) {
2561 /* Return value is a negative lttng_error_code. */
2567 * Setup lttng message with payload size set to the event list size in
2568 * bytes and then copy list into the llm payload.
2570 ret
= setup_lttng_msg(cmd_ctx
, sizeof(struct lttng_event
) * nb_events
);
2576 /* Copy event list into message payload */
2577 memcpy(cmd_ctx
->llm
->payload
, events
,
2578 sizeof(struct lttng_event
) * nb_events
);
2585 case LTTNG_LIST_TRACEPOINT_FIELDS
:
2587 struct lttng_event_field
*fields
;
2590 nb_fields
= cmd_list_tracepoint_fields(cmd_ctx
->lsm
->domain
.type
,
2592 if (nb_fields
< 0) {
2593 /* Return value is a negative lttng_error_code. */
2599 * Setup lttng message with payload size set to the event list size in
2600 * bytes and then copy list into the llm payload.
2602 ret
= setup_lttng_msg(cmd_ctx
,
2603 sizeof(struct lttng_event_field
) * nb_fields
);
2609 /* Copy event list into message payload */
2610 memcpy(cmd_ctx
->llm
->payload
, fields
,
2611 sizeof(struct lttng_event_field
) * nb_fields
);
2618 case LTTNG_SET_CONSUMER_URI
:
2621 struct lttng_uri
*uris
;
2623 nb_uri
= cmd_ctx
->lsm
->u
.uri
.size
;
2624 len
= nb_uri
* sizeof(struct lttng_uri
);
2627 ret
= LTTNG_ERR_INVALID
;
2631 uris
= zmalloc(len
);
2633 ret
= LTTNG_ERR_FATAL
;
2637 /* Receive variable len data */
2638 DBG("Receiving %zu URI(s) from client ...", nb_uri
);
2639 ret
= lttcomm_recv_unix_sock(sock
, uris
, len
);
2641 DBG("No URIs received from client... continuing");
2643 ret
= LTTNG_ERR_SESSION_FAIL
;
2648 ret
= cmd_set_consumer_uri(cmd_ctx
->lsm
->domain
.type
, cmd_ctx
->session
,
2650 if (ret
!= LTTNG_OK
) {
2656 * XXX: 0 means that this URI should be applied on the session. Should
2657 * be a DOMAIN enuam.
2659 if (cmd_ctx
->lsm
->domain
.type
== 0) {
2660 /* Add the URI for the UST session if a consumer is present. */
2661 if (cmd_ctx
->session
->ust_session
&&
2662 cmd_ctx
->session
->ust_session
->consumer
) {
2663 ret
= cmd_set_consumer_uri(LTTNG_DOMAIN_UST
, cmd_ctx
->session
,
2665 } else if (cmd_ctx
->session
->kernel_session
&&
2666 cmd_ctx
->session
->kernel_session
->consumer
) {
2667 ret
= cmd_set_consumer_uri(LTTNG_DOMAIN_KERNEL
,
2668 cmd_ctx
->session
, nb_uri
, uris
);
2676 case LTTNG_START_TRACE
:
2678 ret
= cmd_start_trace(cmd_ctx
->session
);
2681 case LTTNG_STOP_TRACE
:
2683 ret
= cmd_stop_trace(cmd_ctx
->session
);
2686 case LTTNG_CREATE_SESSION
:
2689 struct lttng_uri
*uris
= NULL
;
2691 nb_uri
= cmd_ctx
->lsm
->u
.uri
.size
;
2692 len
= nb_uri
* sizeof(struct lttng_uri
);
2695 uris
= zmalloc(len
);
2697 ret
= LTTNG_ERR_FATAL
;
2701 /* Receive variable len data */
2702 DBG("Waiting for %zu URIs from client ...", nb_uri
);
2703 ret
= lttcomm_recv_unix_sock(sock
, uris
, len
);
2705 DBG("No URIs received from client... continuing");
2707 ret
= LTTNG_ERR_SESSION_FAIL
;
2712 if (nb_uri
== 1 && uris
[0].dtype
!= LTTNG_DST_PATH
) {
2713 DBG("Creating session with ONE network URI is a bad call");
2714 ret
= LTTNG_ERR_SESSION_FAIL
;
2720 ret
= cmd_create_session_uri(cmd_ctx
->lsm
->session
.name
, uris
, nb_uri
,
2727 case LTTNG_DESTROY_SESSION
:
2729 ret
= cmd_destroy_session(cmd_ctx
->session
, kernel_poll_pipe
[1]);
2731 /* Set session to NULL so we do not unlock it after free. */
2732 cmd_ctx
->session
= NULL
;
2735 case LTTNG_LIST_DOMAINS
:
2738 struct lttng_domain
*domains
;
2740 nb_dom
= cmd_list_domains(cmd_ctx
->session
, &domains
);
2742 /* Return value is a negative lttng_error_code. */
2747 ret
= setup_lttng_msg(cmd_ctx
, nb_dom
* sizeof(struct lttng_domain
));
2752 /* Copy event list into message payload */
2753 memcpy(cmd_ctx
->llm
->payload
, domains
,
2754 nb_dom
* sizeof(struct lttng_domain
));
2761 case LTTNG_LIST_CHANNELS
:
2764 struct lttng_channel
*channels
;
2766 nb_chan
= cmd_list_channels(cmd_ctx
->lsm
->domain
.type
,
2767 cmd_ctx
->session
, &channels
);
2769 /* Return value is a negative lttng_error_code. */
2774 ret
= setup_lttng_msg(cmd_ctx
, nb_chan
* sizeof(struct lttng_channel
));
2779 /* Copy event list into message payload */
2780 memcpy(cmd_ctx
->llm
->payload
, channels
,
2781 nb_chan
* sizeof(struct lttng_channel
));
2788 case LTTNG_LIST_EVENTS
:
2791 struct lttng_event
*events
= NULL
;
2793 nb_event
= cmd_list_events(cmd_ctx
->lsm
->domain
.type
, cmd_ctx
->session
,
2794 cmd_ctx
->lsm
->u
.list
.channel_name
, &events
);
2796 /* Return value is a negative lttng_error_code. */
2801 ret
= setup_lttng_msg(cmd_ctx
, nb_event
* sizeof(struct lttng_event
));
2806 /* Copy event list into message payload */
2807 memcpy(cmd_ctx
->llm
->payload
, events
,
2808 nb_event
* sizeof(struct lttng_event
));
2815 case LTTNG_LIST_SESSIONS
:
2817 unsigned int nr_sessions
;
2819 session_lock_list();
2820 nr_sessions
= lttng_sessions_count(
2821 LTTNG_SOCK_GET_UID_CRED(&cmd_ctx
->creds
),
2822 LTTNG_SOCK_GET_GID_CRED(&cmd_ctx
->creds
));
2824 ret
= setup_lttng_msg(cmd_ctx
, sizeof(struct lttng_session
) * nr_sessions
);
2826 session_unlock_list();
2830 /* Filled the session array */
2831 cmd_list_lttng_sessions((struct lttng_session
*)(cmd_ctx
->llm
->payload
),
2832 LTTNG_SOCK_GET_UID_CRED(&cmd_ctx
->creds
),
2833 LTTNG_SOCK_GET_GID_CRED(&cmd_ctx
->creds
));
2835 session_unlock_list();
2840 case LTTNG_CALIBRATE
:
2842 ret
= cmd_calibrate(cmd_ctx
->lsm
->domain
.type
,
2843 &cmd_ctx
->lsm
->u
.calibrate
);
2846 case LTTNG_REGISTER_CONSUMER
:
2848 struct consumer_data
*cdata
;
2850 switch (cmd_ctx
->lsm
->domain
.type
) {
2851 case LTTNG_DOMAIN_KERNEL
:
2852 cdata
= &kconsumer_data
;
2855 ret
= LTTNG_ERR_UND
;
2859 ret
= cmd_register_consumer(cmd_ctx
->session
, cmd_ctx
->lsm
->domain
.type
,
2860 cmd_ctx
->lsm
->u
.reg
.path
, cdata
);
2863 case LTTNG_ENABLE_EVENT_WITH_FILTER
:
2865 struct lttng_filter_bytecode
*bytecode
;
2867 if (cmd_ctx
->lsm
->u
.enable
.bytecode_len
> LTTNG_FILTER_MAX_LEN
) {
2868 ret
= LTTNG_ERR_FILTER_INVAL
;
2871 if (cmd_ctx
->lsm
->u
.enable
.bytecode_len
== 0) {
2872 ret
= LTTNG_ERR_FILTER_INVAL
;
2875 bytecode
= zmalloc(cmd_ctx
->lsm
->u
.enable
.bytecode_len
);
2877 ret
= LTTNG_ERR_FILTER_NOMEM
;
2880 /* Receive var. len. data */
2881 DBG("Receiving var len data from client ...");
2882 ret
= lttcomm_recv_unix_sock(sock
, bytecode
,
2883 cmd_ctx
->lsm
->u
.enable
.bytecode_len
);
2885 DBG("Nothing recv() from client var len data... continuing");
2887 ret
= LTTNG_ERR_FILTER_INVAL
;
2891 if (bytecode
->len
+ sizeof(*bytecode
)
2892 != cmd_ctx
->lsm
->u
.enable
.bytecode_len
) {
2894 ret
= LTTNG_ERR_FILTER_INVAL
;
2898 ret
= cmd_enable_event(cmd_ctx
->session
, cmd_ctx
->lsm
->domain
.type
,
2899 cmd_ctx
->lsm
->u
.enable
.channel_name
,
2900 &cmd_ctx
->lsm
->u
.enable
.event
, bytecode
, kernel_poll_pipe
[1]);
2903 case LTTNG_DATA_PENDING
:
2905 ret
= cmd_data_pending(cmd_ctx
->session
);
2909 ret
= LTTNG_ERR_UND
;
2914 if (cmd_ctx
->llm
== NULL
) {
2915 DBG("Missing llm structure. Allocating one.");
2916 if (setup_lttng_msg(cmd_ctx
, 0) < 0) {
2920 /* Set return code */
2921 cmd_ctx
->llm
->ret_code
= ret
;
2923 if (cmd_ctx
->session
) {
2924 session_unlock(cmd_ctx
->session
);
2926 if (need_tracing_session
) {
2927 session_unlock_list();
2934 * Thread managing health check socket.
2936 static void *thread_manage_health(void *data
)
2938 int sock
= -1, new_sock
= -1, ret
, i
, pollfd
, err
= -1;
2939 uint32_t revents
, nb_fd
;
2940 struct lttng_poll_event events
;
2941 struct lttcomm_health_msg msg
;
2942 struct lttcomm_health_data reply
;
2944 DBG("[thread] Manage health check started");
2946 rcu_register_thread();
2948 /* Create unix socket */
2949 sock
= lttcomm_create_unix_sock(health_unix_sock_path
);
2951 ERR("Unable to create health check Unix socket");
2957 * Set the CLOEXEC flag. Return code is useless because either way, the
2960 (void) utils_set_fd_cloexec(sock
);
2962 ret
= lttcomm_listen_unix_sock(sock
);
2968 * Pass 2 as size here for the thread quit pipe and client_sock. Nothing
2969 * more will be added to this poll set.
2971 ret
= create_thread_poll_set(&events
, 2);
2976 /* Add the application registration socket */
2977 ret
= lttng_poll_add(&events
, sock
, LPOLLIN
| LPOLLPRI
);
2983 DBG("Health check ready");
2985 nb_fd
= LTTNG_POLL_GETNB(&events
);
2987 /* Inifinite blocking call, waiting for transmission */
2989 ret
= lttng_poll_wait(&events
, -1);
2992 * Restart interrupted system call.
2994 if (errno
== EINTR
) {
3000 for (i
= 0; i
< nb_fd
; i
++) {
3001 /* Fetch once the poll data */
3002 revents
= LTTNG_POLL_GETEV(&events
, i
);
3003 pollfd
= LTTNG_POLL_GETFD(&events
, i
);
3005 /* Thread quit pipe has been closed. Killing thread. */
3006 ret
= check_thread_quit_pipe(pollfd
, revents
);
3012 /* Event on the registration socket */
3013 if (pollfd
== sock
) {
3014 if (revents
& (LPOLLERR
| LPOLLHUP
| LPOLLRDHUP
)) {
3015 ERR("Health socket poll error");
3021 new_sock
= lttcomm_accept_unix_sock(sock
);
3027 * Set the CLOEXEC flag. Return code is useless because either way, the
3030 (void) utils_set_fd_cloexec(new_sock
);
3032 DBG("Receiving data from client for health...");
3033 ret
= lttcomm_recv_unix_sock(new_sock
, (void *)&msg
, sizeof(msg
));
3035 DBG("Nothing recv() from client... continuing");
3036 ret
= close(new_sock
);
3044 rcu_thread_online();
3046 switch (msg
.component
) {
3047 case LTTNG_HEALTH_CMD
:
3048 reply
.ret_code
= health_check_state(&health_thread_cmd
);
3050 case LTTNG_HEALTH_APP_MANAGE
:
3051 reply
.ret_code
= health_check_state(&health_thread_app_manage
);
3053 case LTTNG_HEALTH_APP_REG
:
3054 reply
.ret_code
= health_check_state(&health_thread_app_reg
);
3056 case LTTNG_HEALTH_KERNEL
:
3057 reply
.ret_code
= health_check_state(&health_thread_kernel
);
3059 case LTTNG_HEALTH_CONSUMER
:
3060 reply
.ret_code
= check_consumer_health();
3062 case LTTNG_HEALTH_ALL
:
3064 health_check_state(&health_thread_app_manage
) &&
3065 health_check_state(&health_thread_app_reg
) &&
3066 health_check_state(&health_thread_cmd
) &&
3067 health_check_state(&health_thread_kernel
) &&
3068 check_consumer_health();
3071 reply
.ret_code
= LTTNG_ERR_UND
;
3076 * Flip ret value since 0 is a success and 1 indicates a bad health for
3077 * the client where in the sessiond it is the opposite. Again, this is
3078 * just to make things easier for us poor developer which enjoy a lot
3081 if (reply
.ret_code
== 0 || reply
.ret_code
== 1) {
3082 reply
.ret_code
= !reply
.ret_code
;
3085 DBG2("Health check return value %d", reply
.ret_code
);
3087 ret
= send_unix_sock(new_sock
, (void *) &reply
, sizeof(reply
));
3089 ERR("Failed to send health data back to client");
3092 /* End of transmission */
3093 ret
= close(new_sock
);
3103 ERR("Health error occurred in %s", __func__
);
3105 DBG("Health check thread dying");
3106 unlink(health_unix_sock_path
);
3113 if (new_sock
>= 0) {
3114 ret
= close(new_sock
);
3120 lttng_poll_clean(&events
);
3122 rcu_unregister_thread();
3127 * This thread manage all clients request using the unix client socket for
3130 static void *thread_manage_clients(void *data
)
3132 int sock
= -1, ret
, i
, pollfd
, err
= -1;
3134 uint32_t revents
, nb_fd
;
3135 struct command_ctx
*cmd_ctx
= NULL
;
3136 struct lttng_poll_event events
;
3138 DBG("[thread] Manage client started");
3140 rcu_register_thread();
3142 if (testpoint(thread_manage_clients
)) {
3143 goto error_testpoint
;
3146 health_code_update(&health_thread_cmd
);
3148 ret
= lttcomm_listen_unix_sock(client_sock
);
3154 * Pass 2 as size here for the thread quit pipe and client_sock. Nothing
3155 * more will be added to this poll set.
3157 ret
= create_thread_poll_set(&events
, 2);
3159 goto error_create_poll
;
3162 /* Add the application registration socket */
3163 ret
= lttng_poll_add(&events
, client_sock
, LPOLLIN
| LPOLLPRI
);
3169 * Notify parent pid that we are ready to accept command for client side.
3171 if (opt_sig_parent
) {
3172 kill(ppid
, SIGUSR1
);
3175 if (testpoint(thread_manage_clients_before_loop
)) {
3179 health_code_update(&health_thread_cmd
);
3182 DBG("Accepting client command ...");
3184 nb_fd
= LTTNG_POLL_GETNB(&events
);
3186 /* Inifinite blocking call, waiting for transmission */
3188 health_poll_update(&health_thread_cmd
);
3189 ret
= lttng_poll_wait(&events
, -1);
3190 health_poll_update(&health_thread_cmd
);
3193 * Restart interrupted system call.
3195 if (errno
== EINTR
) {
3201 for (i
= 0; i
< nb_fd
; i
++) {
3202 /* Fetch once the poll data */
3203 revents
= LTTNG_POLL_GETEV(&events
, i
);
3204 pollfd
= LTTNG_POLL_GETFD(&events
, i
);
3206 health_code_update(&health_thread_cmd
);
3208 /* Thread quit pipe has been closed. Killing thread. */
3209 ret
= check_thread_quit_pipe(pollfd
, revents
);
3215 /* Event on the registration socket */
3216 if (pollfd
== client_sock
) {
3217 if (revents
& (LPOLLERR
| LPOLLHUP
| LPOLLRDHUP
)) {
3218 ERR("Client socket poll error");
3224 DBG("Wait for client response");
3226 health_code_update(&health_thread_cmd
);
3228 sock
= lttcomm_accept_unix_sock(client_sock
);
3234 * Set the CLOEXEC flag. Return code is useless because either way, the
3237 (void) utils_set_fd_cloexec(sock
);
3239 /* Set socket option for credentials retrieval */
3240 ret
= lttcomm_setsockopt_creds_unix_sock(sock
);
3245 /* Allocate context command to process the client request */
3246 cmd_ctx
= zmalloc(sizeof(struct command_ctx
));
3247 if (cmd_ctx
== NULL
) {
3248 PERROR("zmalloc cmd_ctx");
3252 /* Allocate data buffer for reception */
3253 cmd_ctx
->lsm
= zmalloc(sizeof(struct lttcomm_session_msg
));
3254 if (cmd_ctx
->lsm
== NULL
) {
3255 PERROR("zmalloc cmd_ctx->lsm");
3259 cmd_ctx
->llm
= NULL
;
3260 cmd_ctx
->session
= NULL
;
3262 health_code_update(&health_thread_cmd
);
3265 * Data is received from the lttng client. The struct
3266 * lttcomm_session_msg (lsm) contains the command and data request of
3269 DBG("Receiving data from client ...");
3270 ret
= lttcomm_recv_creds_unix_sock(sock
, cmd_ctx
->lsm
,
3271 sizeof(struct lttcomm_session_msg
), &cmd_ctx
->creds
);
3273 DBG("Nothing recv() from client... continuing");
3279 clean_command_ctx(&cmd_ctx
);
3283 health_code_update(&health_thread_cmd
);
3285 // TODO: Validate cmd_ctx including sanity check for
3286 // security purpose.
3288 rcu_thread_online();
3290 * This function dispatch the work to the kernel or userspace tracer
3291 * libs and fill the lttcomm_lttng_msg data structure of all the needed
3292 * informations for the client. The command context struct contains
3293 * everything this function may needs.
3295 ret
= process_client_msg(cmd_ctx
, sock
, &sock_error
);
3296 rcu_thread_offline();
3306 * TODO: Inform client somehow of the fatal error. At
3307 * this point, ret < 0 means that a zmalloc failed
3308 * (ENOMEM). Error detected but still accept
3309 * command, unless a socket error has been
3312 clean_command_ctx(&cmd_ctx
);
3316 health_code_update(&health_thread_cmd
);
3318 DBG("Sending response (size: %d, retcode: %s)",
3319 cmd_ctx
->lttng_msg_size
,
3320 lttng_strerror(-cmd_ctx
->llm
->ret_code
));
3321 ret
= send_unix_sock(sock
, cmd_ctx
->llm
, cmd_ctx
->lttng_msg_size
);
3323 ERR("Failed to send data back to client");
3326 /* End of transmission */
3333 clean_command_ctx(&cmd_ctx
);
3335 health_code_update(&health_thread_cmd
);
3347 lttng_poll_clean(&events
);
3348 clean_command_ctx(&cmd_ctx
);
3353 unlink(client_unix_sock_path
);
3354 if (client_sock
>= 0) {
3355 ret
= close(client_sock
);
3362 health_error(&health_thread_cmd
);
3363 ERR("Health error occurred in %s", __func__
);
3366 health_exit(&health_thread_cmd
);
3368 DBG("Client thread dying");
3370 rcu_unregister_thread();
3376 * usage function on stderr
3378 static void usage(void)
3380 fprintf(stderr
, "Usage: %s OPTIONS\n\nOptions:\n", progname
);
3381 fprintf(stderr
, " -h, --help Display this usage.\n");
3382 fprintf(stderr
, " -c, --client-sock PATH Specify path for the client unix socket\n");
3383 fprintf(stderr
, " -a, --apps-sock PATH Specify path for apps unix socket\n");
3384 fprintf(stderr
, " --kconsumerd-err-sock PATH Specify path for the kernel consumer error socket\n");
3385 fprintf(stderr
, " --kconsumerd-cmd-sock PATH Specify path for the kernel consumer command socket\n");
3386 fprintf(stderr
, " --ustconsumerd32-err-sock PATH Specify path for the 32-bit UST consumer error socket\n");
3387 fprintf(stderr
, " --ustconsumerd64-err-sock PATH Specify path for the 64-bit UST consumer error socket\n");
3388 fprintf(stderr
, " --ustconsumerd32-cmd-sock PATH Specify path for the 32-bit UST consumer command socket\n");
3389 fprintf(stderr
, " --ustconsumerd64-cmd-sock PATH Specify path for the 64-bit UST consumer command socket\n");
3390 fprintf(stderr
, " --consumerd32-path PATH Specify path for the 32-bit UST consumer daemon binary\n");
3391 fprintf(stderr
, " --consumerd32-libdir PATH Specify path for the 32-bit UST consumer daemon libraries\n");
3392 fprintf(stderr
, " --consumerd64-path PATH Specify path for the 64-bit UST consumer daemon binary\n");
3393 fprintf(stderr
, " --consumerd64-libdir PATH Specify path for the 64-bit UST consumer daemon libraries\n");
3394 fprintf(stderr
, " -d, --daemonize Start as a daemon.\n");
3395 fprintf(stderr
, " -g, --group NAME Specify the tracing group name. (default: tracing)\n");
3396 fprintf(stderr
, " -V, --version Show version number.\n");
3397 fprintf(stderr
, " -S, --sig-parent Send SIGCHLD to parent pid to notify readiness.\n");
3398 fprintf(stderr
, " -q, --quiet No output at all.\n");
3399 fprintf(stderr
, " -v, --verbose Verbose mode. Activate DBG() macro.\n");
3400 fprintf(stderr
, " --verbose-consumer Verbose mode for consumer. Activate DBG() macro.\n");
3401 fprintf(stderr
, " --no-kernel Disable kernel tracer\n");
3405 * daemon argument parsing
3407 static int parse_args(int argc
, char **argv
)
3411 static struct option long_options
[] = {
3412 { "client-sock", 1, 0, 'c' },
3413 { "apps-sock", 1, 0, 'a' },
3414 { "kconsumerd-cmd-sock", 1, 0, 'C' },
3415 { "kconsumerd-err-sock", 1, 0, 'E' },
3416 { "ustconsumerd32-cmd-sock", 1, 0, 'G' },
3417 { "ustconsumerd32-err-sock", 1, 0, 'H' },
3418 { "ustconsumerd64-cmd-sock", 1, 0, 'D' },
3419 { "ustconsumerd64-err-sock", 1, 0, 'F' },
3420 { "consumerd32-path", 1, 0, 'u' },
3421 { "consumerd32-libdir", 1, 0, 'U' },
3422 { "consumerd64-path", 1, 0, 't' },
3423 { "consumerd64-libdir", 1, 0, 'T' },
3424 { "daemonize", 0, 0, 'd' },
3425 { "sig-parent", 0, 0, 'S' },
3426 { "help", 0, 0, 'h' },
3427 { "group", 1, 0, 'g' },
3428 { "version", 0, 0, 'V' },
3429 { "quiet", 0, 0, 'q' },
3430 { "verbose", 0, 0, 'v' },
3431 { "verbose-consumer", 0, 0, 'Z' },
3432 { "no-kernel", 0, 0, 'N' },
3437 int option_index
= 0;
3438 c
= getopt_long(argc
, argv
, "dhqvVSN" "a:c:g:s:C:E:D:F:Z:u:t",
3439 long_options
, &option_index
);
3446 fprintf(stderr
, "option %s", long_options
[option_index
].name
);
3448 fprintf(stderr
, " with arg %s\n", optarg
);
3452 snprintf(client_unix_sock_path
, PATH_MAX
, "%s", optarg
);
3455 snprintf(apps_unix_sock_path
, PATH_MAX
, "%s", optarg
);
3461 opt_tracing_group
= optarg
;
3467 fprintf(stdout
, "%s\n", VERSION
);
3473 snprintf(kconsumer_data
.err_unix_sock_path
, PATH_MAX
, "%s", optarg
);
3476 snprintf(kconsumer_data
.cmd_unix_sock_path
, PATH_MAX
, "%s", optarg
);
3479 snprintf(ustconsumer64_data
.err_unix_sock_path
, PATH_MAX
, "%s", optarg
);
3482 snprintf(ustconsumer64_data
.cmd_unix_sock_path
, PATH_MAX
, "%s", optarg
);
3485 snprintf(ustconsumer32_data
.err_unix_sock_path
, PATH_MAX
, "%s", optarg
);
3488 snprintf(ustconsumer32_data
.cmd_unix_sock_path
, PATH_MAX
, "%s", optarg
);
3494 lttng_opt_quiet
= 1;
3497 /* Verbose level can increase using multiple -v */
3498 lttng_opt_verbose
+= 1;
3501 opt_verbose_consumer
+= 1;
3504 consumerd32_bin
= optarg
;
3507 consumerd32_libdir
= optarg
;
3510 consumerd64_bin
= optarg
;
3513 consumerd64_libdir
= optarg
;
3516 /* Unknown option or other error.
3517 * Error is printed by getopt, just return */
3526 * Creates the two needed socket by the daemon.
3527 * apps_sock - The communication socket for all UST apps.
3528 * client_sock - The communication of the cli tool (lttng).
3530 static int init_daemon_socket(void)
3535 old_umask
= umask(0);
3537 /* Create client tool unix socket */
3538 client_sock
= lttcomm_create_unix_sock(client_unix_sock_path
);
3539 if (client_sock
< 0) {
3540 ERR("Create unix sock failed: %s", client_unix_sock_path
);
3545 /* Set the cloexec flag */
3546 ret
= utils_set_fd_cloexec(client_sock
);
3548 ERR("Unable to set CLOEXEC flag to the client Unix socket (fd: %d). "
3549 "Continuing but note that the consumer daemon will have a "
3550 "reference to this socket on exec()", client_sock
);
3553 /* File permission MUST be 660 */
3554 ret
= chmod(client_unix_sock_path
, S_IRUSR
| S_IWUSR
| S_IRGRP
| S_IWGRP
);
3556 ERR("Set file permissions failed: %s", client_unix_sock_path
);
3561 /* Create the application unix socket */
3562 apps_sock
= lttcomm_create_unix_sock(apps_unix_sock_path
);
3563 if (apps_sock
< 0) {
3564 ERR("Create unix sock failed: %s", apps_unix_sock_path
);
3569 /* Set the cloexec flag */
3570 ret
= utils_set_fd_cloexec(apps_sock
);
3572 ERR("Unable to set CLOEXEC flag to the app Unix socket (fd: %d). "
3573 "Continuing but note that the consumer daemon will have a "
3574 "reference to this socket on exec()", apps_sock
);
3577 /* File permission MUST be 666 */
3578 ret
= chmod(apps_unix_sock_path
,
3579 S_IRUSR
| S_IWUSR
| S_IRGRP
| S_IWGRP
| S_IROTH
| S_IWOTH
);
3581 ERR("Set file permissions failed: %s", apps_unix_sock_path
);
3586 DBG3("Session daemon client socket %d and application socket %d created",
3587 client_sock
, apps_sock
);
3595 * Check if the global socket is available, and if a daemon is answering at the
3596 * other side. If yes, error is returned.
3598 static int check_existing_daemon(void)
3600 /* Is there anybody out there ? */
3601 if (lttng_session_daemon_alive()) {
3609 * Set the tracing group gid onto the client socket.
3611 * Race window between mkdir and chown is OK because we are going from more
3612 * permissive (root.root) to less permissive (root.tracing).
3614 static int set_permissions(char *rundir
)
3619 ret
= allowed_group();
3621 WARN("No tracing group detected");
3628 /* Set lttng run dir */
3629 ret
= chown(rundir
, 0, gid
);
3631 ERR("Unable to set group on %s", rundir
);
3635 /* Ensure tracing group can search the run dir */
3636 ret
= chmod(rundir
, S_IRWXU
| S_IXGRP
| S_IXOTH
);
3638 ERR("Unable to set permissions on %s", rundir
);
3642 /* lttng client socket path */
3643 ret
= chown(client_unix_sock_path
, 0, gid
);
3645 ERR("Unable to set group on %s", client_unix_sock_path
);
3649 /* kconsumer error socket path */
3650 ret
= chown(kconsumer_data
.err_unix_sock_path
, 0, gid
);
3652 ERR("Unable to set group on %s", kconsumer_data
.err_unix_sock_path
);
3656 /* 64-bit ustconsumer error socket path */
3657 ret
= chown(ustconsumer64_data
.err_unix_sock_path
, 0, gid
);
3659 ERR("Unable to set group on %s", ustconsumer64_data
.err_unix_sock_path
);
3663 /* 32-bit ustconsumer compat32 error socket path */
3664 ret
= chown(ustconsumer32_data
.err_unix_sock_path
, 0, gid
);
3666 ERR("Unable to set group on %s", ustconsumer32_data
.err_unix_sock_path
);
3670 DBG("All permissions are set");
3677 * Create the lttng run directory needed for all global sockets and pipe.
3679 static int create_lttng_rundir(const char *rundir
)
3683 DBG3("Creating LTTng run directory: %s", rundir
);
3685 ret
= mkdir(rundir
, S_IRWXU
);
3687 if (errno
!= EEXIST
) {
3688 ERR("Unable to create %s", rundir
);
3700 * Setup sockets and directory needed by the kconsumerd communication with the
3703 static int set_consumer_sockets(struct consumer_data
*consumer_data
,
3707 char path
[PATH_MAX
];
3709 switch (consumer_data
->type
) {
3710 case LTTNG_CONSUMER_KERNEL
:
3711 snprintf(path
, PATH_MAX
, DEFAULT_KCONSUMERD_PATH
, rundir
);
3713 case LTTNG_CONSUMER64_UST
:
3714 snprintf(path
, PATH_MAX
, DEFAULT_USTCONSUMERD64_PATH
, rundir
);
3716 case LTTNG_CONSUMER32_UST
:
3717 snprintf(path
, PATH_MAX
, DEFAULT_USTCONSUMERD32_PATH
, rundir
);
3720 ERR("Consumer type unknown");
3725 DBG2("Creating consumer directory: %s", path
);
3727 ret
= mkdir(path
, S_IRWXU
);
3729 if (errno
!= EEXIST
) {
3731 ERR("Failed to create %s", path
);
3737 /* Create the kconsumerd error unix socket */
3738 consumer_data
->err_sock
=
3739 lttcomm_create_unix_sock(consumer_data
->err_unix_sock_path
);
3740 if (consumer_data
->err_sock
< 0) {
3741 ERR("Create unix sock failed: %s", consumer_data
->err_unix_sock_path
);
3746 /* File permission MUST be 660 */
3747 ret
= chmod(consumer_data
->err_unix_sock_path
,
3748 S_IRUSR
| S_IWUSR
| S_IRGRP
| S_IWGRP
);
3750 ERR("Set file permissions failed: %s", consumer_data
->err_unix_sock_path
);
3760 * Signal handler for the daemon
3762 * Simply stop all worker threads, leaving main() return gracefully after
3763 * joining all threads and calling cleanup().
3765 static void sighandler(int sig
)
3769 DBG("SIGPIPE caught");
3772 DBG("SIGINT caught");
3776 DBG("SIGTERM caught");
3785 * Setup signal handler for :
3786 * SIGINT, SIGTERM, SIGPIPE
3788 static int set_signal_handler(void)
3791 struct sigaction sa
;
3794 if ((ret
= sigemptyset(&sigset
)) < 0) {
3795 PERROR("sigemptyset");
3799 sa
.sa_handler
= sighandler
;
3800 sa
.sa_mask
= sigset
;
3802 if ((ret
= sigaction(SIGTERM
, &sa
, NULL
)) < 0) {
3803 PERROR("sigaction");
3807 if ((ret
= sigaction(SIGINT
, &sa
, NULL
)) < 0) {
3808 PERROR("sigaction");
3812 if ((ret
= sigaction(SIGPIPE
, &sa
, NULL
)) < 0) {
3813 PERROR("sigaction");
3817 DBG("Signal handler set for SIGTERM, SIGPIPE and SIGINT");
3823 * Set open files limit to unlimited. This daemon can open a large number of
3824 * file descriptors in order to consumer multiple kernel traces.
3826 static void set_ulimit(void)
3831 /* The kernel does not allowed an infinite limit for open files */
3832 lim
.rlim_cur
= 65535;
3833 lim
.rlim_max
= 65535;
3835 ret
= setrlimit(RLIMIT_NOFILE
, &lim
);
3837 PERROR("failed to set open files limit");
3844 int main(int argc
, char **argv
)
3848 const char *home_path
, *env_app_timeout
;
3850 init_kernel_workarounds();
3852 rcu_register_thread();
3854 setup_consumerd_path();
3856 /* Parse arguments */
3858 if ((ret
= parse_args(argc
, argv
) < 0)) {
3868 * child: setsid, close FD 0, 1, 2, chdir /
3869 * parent: exit (if fork is successful)
3877 * We are in the child. Make sure all other file
3878 * descriptors are closed, in case we are called with
3879 * more opened file descriptors than the standard ones.
3881 for (i
= 3; i
< sysconf(_SC_OPEN_MAX
); i
++) {
3886 /* Create thread quit pipe */
3887 if ((ret
= init_thread_quit_pipe()) < 0) {
3891 /* Check if daemon is UID = 0 */
3892 is_root
= !getuid();
3895 rundir
= strdup(DEFAULT_LTTNG_RUNDIR
);
3897 /* Create global run dir with root access */
3898 ret
= create_lttng_rundir(rundir
);
3903 if (strlen(apps_unix_sock_path
) == 0) {
3904 snprintf(apps_unix_sock_path
, PATH_MAX
,
3905 DEFAULT_GLOBAL_APPS_UNIX_SOCK
);
3908 if (strlen(client_unix_sock_path
) == 0) {
3909 snprintf(client_unix_sock_path
, PATH_MAX
,
3910 DEFAULT_GLOBAL_CLIENT_UNIX_SOCK
);
3913 /* Set global SHM for ust */
3914 if (strlen(wait_shm_path
) == 0) {
3915 snprintf(wait_shm_path
, PATH_MAX
,
3916 DEFAULT_GLOBAL_APPS_WAIT_SHM_PATH
);
3919 if (strlen(health_unix_sock_path
) == 0) {
3920 snprintf(health_unix_sock_path
, sizeof(health_unix_sock_path
),
3921 DEFAULT_GLOBAL_HEALTH_UNIX_SOCK
);
3924 /* Setup kernel consumerd path */
3925 snprintf(kconsumer_data
.err_unix_sock_path
, PATH_MAX
,
3926 DEFAULT_KCONSUMERD_ERR_SOCK_PATH
, rundir
);
3927 snprintf(kconsumer_data
.cmd_unix_sock_path
, PATH_MAX
,
3928 DEFAULT_KCONSUMERD_CMD_SOCK_PATH
, rundir
);
3930 DBG2("Kernel consumer err path: %s",
3931 kconsumer_data
.err_unix_sock_path
);
3932 DBG2("Kernel consumer cmd path: %s",
3933 kconsumer_data
.cmd_unix_sock_path
);
3935 home_path
= get_home_dir();
3936 if (home_path
== NULL
) {
3937 /* TODO: Add --socket PATH option */
3938 ERR("Can't get HOME directory for sockets creation.");
3944 * Create rundir from home path. This will create something like
3947 ret
= asprintf(&rundir
, DEFAULT_LTTNG_HOME_RUNDIR
, home_path
);
3953 ret
= create_lttng_rundir(rundir
);
3958 if (strlen(apps_unix_sock_path
) == 0) {
3959 snprintf(apps_unix_sock_path
, PATH_MAX
,
3960 DEFAULT_HOME_APPS_UNIX_SOCK
, home_path
);
3963 /* Set the cli tool unix socket path */
3964 if (strlen(client_unix_sock_path
) == 0) {
3965 snprintf(client_unix_sock_path
, PATH_MAX
,
3966 DEFAULT_HOME_CLIENT_UNIX_SOCK
, home_path
);
3969 /* Set global SHM for ust */
3970 if (strlen(wait_shm_path
) == 0) {
3971 snprintf(wait_shm_path
, PATH_MAX
,
3972 DEFAULT_HOME_APPS_WAIT_SHM_PATH
, geteuid());
3975 /* Set health check Unix path */
3976 if (strlen(health_unix_sock_path
) == 0) {
3977 snprintf(health_unix_sock_path
, sizeof(health_unix_sock_path
),
3978 DEFAULT_HOME_HEALTH_UNIX_SOCK
, home_path
);
3982 /* Set consumer initial state */
3983 kernel_consumerd_state
= CONSUMER_STOPPED
;
3984 ust_consumerd_state
= CONSUMER_STOPPED
;
3986 DBG("Client socket path %s", client_unix_sock_path
);
3987 DBG("Application socket path %s", apps_unix_sock_path
);
3988 DBG("LTTng run directory path: %s", rundir
);
3990 /* 32 bits consumerd path setup */
3991 snprintf(ustconsumer32_data
.err_unix_sock_path
, PATH_MAX
,
3992 DEFAULT_USTCONSUMERD32_ERR_SOCK_PATH
, rundir
);
3993 snprintf(ustconsumer32_data
.cmd_unix_sock_path
, PATH_MAX
,
3994 DEFAULT_USTCONSUMERD32_CMD_SOCK_PATH
, rundir
);
3996 DBG2("UST consumer 32 bits err path: %s",
3997 ustconsumer32_data
.err_unix_sock_path
);
3998 DBG2("UST consumer 32 bits cmd path: %s",
3999 ustconsumer32_data
.cmd_unix_sock_path
);
4001 /* 64 bits consumerd path setup */
4002 snprintf(ustconsumer64_data
.err_unix_sock_path
, PATH_MAX
,
4003 DEFAULT_USTCONSUMERD64_ERR_SOCK_PATH
, rundir
);
4004 snprintf(ustconsumer64_data
.cmd_unix_sock_path
, PATH_MAX
,
4005 DEFAULT_USTCONSUMERD64_CMD_SOCK_PATH
, rundir
);
4007 DBG2("UST consumer 64 bits err path: %s",
4008 ustconsumer64_data
.err_unix_sock_path
);
4009 DBG2("UST consumer 64 bits cmd path: %s",
4010 ustconsumer64_data
.cmd_unix_sock_path
);
4013 * See if daemon already exist.
4015 if ((ret
= check_existing_daemon()) < 0) {
4016 ERR("Already running daemon.\n");
4018 * We do not goto exit because we must not cleanup()
4019 * because a daemon is already running.
4025 * Init UST app hash table. Alloc hash table before this point since
4026 * cleanup() can get called after that point.
4030 /* After this point, we can safely call cleanup() with "goto exit" */
4033 * These actions must be executed as root. We do that *after* setting up
4034 * the sockets path because we MUST make the check for another daemon using
4035 * those paths *before* trying to set the kernel consumer sockets and init
4039 ret
= set_consumer_sockets(&kconsumer_data
, rundir
);
4044 /* Setup kernel tracer */
4045 if (!opt_no_kernel
) {
4046 init_kernel_tracer();
4049 /* Set ulimit for open files */
4052 /* init lttng_fd tracking must be done after set_ulimit. */
4055 ret
= set_consumer_sockets(&ustconsumer64_data
, rundir
);
4060 ret
= set_consumer_sockets(&ustconsumer32_data
, rundir
);
4065 if ((ret
= set_signal_handler()) < 0) {
4069 /* Setup the needed unix socket */
4070 if ((ret
= init_daemon_socket()) < 0) {
4074 /* Set credentials to socket */
4075 if (is_root
&& ((ret
= set_permissions(rundir
)) < 0)) {
4079 /* Get parent pid if -S, --sig-parent is specified. */
4080 if (opt_sig_parent
) {
4084 /* Setup the kernel pipe for waking up the kernel thread */
4085 if (is_root
&& !opt_no_kernel
) {
4086 if ((ret
= utils_create_pipe_cloexec(kernel_poll_pipe
)) < 0) {
4091 /* Setup the thread apps communication pipe. */
4092 if ((ret
= utils_create_pipe_cloexec(apps_cmd_pipe
)) < 0) {
4096 /* Init UST command queue. */
4097 cds_wfq_init(&ust_cmd_queue
.queue
);
4100 * Get session list pointer. This pointer MUST NOT be free(). This list is
4101 * statically declared in session.c
4103 session_list_ptr
= session_get_list();
4105 /* Set up max poll set size */
4106 lttng_poll_set_max_size();
4110 /* Init all health thread counters. */
4111 health_init(&health_thread_cmd
);
4112 health_init(&health_thread_kernel
);
4113 health_init(&health_thread_app_manage
);
4114 health_init(&health_thread_app_reg
);
4117 * Init health counters of the consumer thread. We do a quick hack here to
4118 * the state of the consumer health is fine even if the thread is not
4119 * started. Once the thread starts, the health state is updated with a poll
4120 * value to set a health code path. This is simply to ease our life and has
4121 * no cost what so ever.
4123 health_init(&kconsumer_data
.health
);
4124 health_poll_update(&kconsumer_data
.health
);
4125 health_init(&ustconsumer32_data
.health
);
4126 health_poll_update(&ustconsumer32_data
.health
);
4127 health_init(&ustconsumer64_data
.health
);
4128 health_poll_update(&ustconsumer64_data
.health
);
4130 /* Check for the application socket timeout env variable. */
4131 env_app_timeout
= getenv(DEFAULT_APP_SOCKET_TIMEOUT_ENV
);
4132 if (env_app_timeout
) {
4133 app_socket_timeout
= atoi(env_app_timeout
);
4135 app_socket_timeout
= DEFAULT_APP_SOCKET_RW_TIMEOUT
;
4138 /* Create thread to manage the client socket */
4139 ret
= pthread_create(&health_thread
, NULL
,
4140 thread_manage_health
, (void *) NULL
);
4142 PERROR("pthread_create health");
4146 /* Create thread to manage the client socket */
4147 ret
= pthread_create(&client_thread
, NULL
,
4148 thread_manage_clients
, (void *) NULL
);
4150 PERROR("pthread_create clients");
4154 /* Create thread to dispatch registration */
4155 ret
= pthread_create(&dispatch_thread
, NULL
,
4156 thread_dispatch_ust_registration
, (void *) NULL
);
4158 PERROR("pthread_create dispatch");
4162 /* Create thread to manage application registration. */
4163 ret
= pthread_create(®_apps_thread
, NULL
,
4164 thread_registration_apps
, (void *) NULL
);
4166 PERROR("pthread_create registration");
4170 /* Create thread to manage application socket */
4171 ret
= pthread_create(&apps_thread
, NULL
,
4172 thread_manage_apps
, (void *) NULL
);
4174 PERROR("pthread_create apps");
4178 /* Don't start this thread if kernel tracing is not requested nor root */
4179 if (is_root
&& !opt_no_kernel
) {
4180 /* Create kernel thread to manage kernel event */
4181 ret
= pthread_create(&kernel_thread
, NULL
,
4182 thread_manage_kernel
, (void *) NULL
);
4184 PERROR("pthread_create kernel");
4188 ret
= pthread_join(kernel_thread
, &status
);
4190 PERROR("pthread_join");
4191 goto error
; /* join error, exit without cleanup */
4196 ret
= pthread_join(apps_thread
, &status
);
4198 PERROR("pthread_join");
4199 goto error
; /* join error, exit without cleanup */
4203 ret
= pthread_join(reg_apps_thread
, &status
);
4205 PERROR("pthread_join");
4206 goto error
; /* join error, exit without cleanup */
4210 ret
= pthread_join(dispatch_thread
, &status
);
4212 PERROR("pthread_join");
4213 goto error
; /* join error, exit without cleanup */
4217 ret
= pthread_join(client_thread
, &status
);
4219 PERROR("pthread_join");
4220 goto error
; /* join error, exit without cleanup */
4223 ret
= join_consumer_thread(&kconsumer_data
);
4225 PERROR("join_consumer");
4226 goto error
; /* join error, exit without cleanup */
4229 ret
= join_consumer_thread(&ustconsumer32_data
);
4231 PERROR("join_consumer ust32");
4232 goto error
; /* join error, exit without cleanup */
4235 ret
= join_consumer_thread(&ustconsumer64_data
);
4237 PERROR("join_consumer ust64");
4238 goto error
; /* join error, exit without cleanup */
4242 ret
= pthread_join(health_thread
, &status
);
4244 PERROR("pthread_join health thread");
4245 goto error
; /* join error, exit without cleanup */
4251 * cleanup() is called when no other thread is running.
4253 rcu_thread_online();
4255 rcu_thread_offline();
4256 rcu_unregister_thread();