2 * Copyright (C) 2011 - David Goulet <david.goulet@polymtl.ca>
3 * Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License, version 2 only,
7 * as published by the Free Software Foundation.
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License along
15 * with this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
29 #include <sys/mount.h>
30 #include <sys/resource.h>
31 #include <sys/socket.h>
33 #include <sys/types.h>
35 #include <urcu/uatomic.h>
39 #include <common/common.h>
40 #include <common/compat/poll.h>
41 #include <common/compat/socket.h>
42 #include <common/defaults.h>
43 #include <common/kernel-consumer/kernel-consumer.h>
44 #include <common/futex.h>
45 #include <common/relayd/relayd.h>
46 #include <common/utils.h>
48 #include "lttng-sessiond.h"
55 #include "kernel-consumer.h"
59 #include "ust-consumer.h"
63 #include "testpoint.h"
65 #define CONSUMERD_FILE "lttng-consumerd"
68 const char default_home_dir
[] = DEFAULT_HOME_DIR
;
69 const char default_tracing_group
[] = DEFAULT_TRACING_GROUP
;
70 const char default_ust_sock_dir
[] = DEFAULT_UST_SOCK_DIR
;
71 const char default_global_apps_pipe
[] = DEFAULT_GLOBAL_APPS_PIPE
;
74 const char *opt_tracing_group
;
75 static int opt_sig_parent
;
76 static int opt_verbose_consumer
;
77 static int opt_daemon
;
78 static int opt_no_kernel
;
79 static int is_root
; /* Set to 1 if the daemon is running as root */
80 static pid_t ppid
; /* Parent PID for --sig-parent option */
84 * Consumer daemon specific control data. Every value not initialized here is
85 * set to 0 by the static definition.
87 static struct consumer_data kconsumer_data
= {
88 .type
= LTTNG_CONSUMER_KERNEL
,
89 .err_unix_sock_path
= DEFAULT_KCONSUMERD_ERR_SOCK_PATH
,
90 .cmd_unix_sock_path
= DEFAULT_KCONSUMERD_CMD_SOCK_PATH
,
93 .pid_mutex
= PTHREAD_MUTEX_INITIALIZER
,
94 .lock
= PTHREAD_MUTEX_INITIALIZER
,
95 .cond
= PTHREAD_COND_INITIALIZER
,
96 .cond_mutex
= PTHREAD_MUTEX_INITIALIZER
,
98 static struct consumer_data ustconsumer64_data
= {
99 .type
= LTTNG_CONSUMER64_UST
,
100 .err_unix_sock_path
= DEFAULT_USTCONSUMERD64_ERR_SOCK_PATH
,
101 .cmd_unix_sock_path
= DEFAULT_USTCONSUMERD64_CMD_SOCK_PATH
,
104 .pid_mutex
= PTHREAD_MUTEX_INITIALIZER
,
105 .lock
= PTHREAD_MUTEX_INITIALIZER
,
106 .cond
= PTHREAD_COND_INITIALIZER
,
107 .cond_mutex
= PTHREAD_MUTEX_INITIALIZER
,
109 static struct consumer_data ustconsumer32_data
= {
110 .type
= LTTNG_CONSUMER32_UST
,
111 .err_unix_sock_path
= DEFAULT_USTCONSUMERD32_ERR_SOCK_PATH
,
112 .cmd_unix_sock_path
= DEFAULT_USTCONSUMERD32_CMD_SOCK_PATH
,
115 .pid_mutex
= PTHREAD_MUTEX_INITIALIZER
,
116 .lock
= PTHREAD_MUTEX_INITIALIZER
,
117 .cond
= PTHREAD_COND_INITIALIZER
,
118 .cond_mutex
= PTHREAD_MUTEX_INITIALIZER
,
121 /* Shared between threads */
122 static int dispatch_thread_exit
;
124 /* Global application Unix socket path */
125 static char apps_unix_sock_path
[PATH_MAX
];
126 /* Global client Unix socket path */
127 static char client_unix_sock_path
[PATH_MAX
];
128 /* global wait shm path for UST */
129 static char wait_shm_path
[PATH_MAX
];
130 /* Global health check unix path */
131 static char health_unix_sock_path
[PATH_MAX
];
133 /* Sockets and FDs */
134 static int client_sock
= -1;
135 static int apps_sock
= -1;
136 int kernel_tracer_fd
= -1;
137 static int kernel_poll_pipe
[2] = { -1, -1 };
140 * Quit pipe for all threads. This permits a single cancellation point
141 * for all threads when receiving an event on the pipe.
143 static int thread_quit_pipe
[2] = { -1, -1 };
146 * This pipe is used to inform the thread managing application communication
147 * that a command is queued and ready to be processed.
149 static int apps_cmd_pipe
[2] = { -1, -1 };
151 /* Pthread, Mutexes and Semaphores */
152 static pthread_t apps_thread
;
153 static pthread_t reg_apps_thread
;
154 static pthread_t client_thread
;
155 static pthread_t kernel_thread
;
156 static pthread_t dispatch_thread
;
157 static pthread_t health_thread
;
160 * UST registration command queue. This queue is tied with a futex and uses a N
161 * wakers / 1 waiter implemented and detailed in futex.c/.h
163 * The thread_manage_apps and thread_dispatch_ust_registration interact with
164 * this queue and the wait/wake scheme.
166 static struct ust_cmd_queue ust_cmd_queue
;
169 * Pointer initialized before thread creation.
171 * This points to the tracing session list containing the session count and a
172 * mutex lock. The lock MUST be taken if you iterate over the list. The lock
173 * MUST NOT be taken if you call a public function in session.c.
175 * The lock is nested inside the structure: session_list_ptr->lock. Please use
176 * session_lock_list and session_unlock_list for lock acquisition.
178 static struct ltt_session_list
*session_list_ptr
;
180 int ust_consumerd64_fd
= -1;
181 int ust_consumerd32_fd
= -1;
183 static const char *consumerd32_bin
= CONFIG_CONSUMERD32_BIN
;
184 static const char *consumerd64_bin
= CONFIG_CONSUMERD64_BIN
;
185 static const char *consumerd32_libdir
= CONFIG_CONSUMERD32_LIBDIR
;
186 static const char *consumerd64_libdir
= CONFIG_CONSUMERD64_LIBDIR
;
188 static const char *module_proc_lttng
= "/proc/lttng";
191 * Consumer daemon state which is changed when spawning it, killing it or in
192 * case of a fatal error.
194 enum consumerd_state
{
195 CONSUMER_STARTED
= 1,
196 CONSUMER_STOPPED
= 2,
201 * This consumer daemon state is used to validate if a client command will be
202 * able to reach the consumer. If not, the client is informed. For instance,
203 * doing a "lttng start" when the consumer state is set to ERROR will return an
204 * error to the client.
206 * The following example shows a possible race condition of this scheme:
208 * consumer thread error happens
210 * client cmd checks state -> still OK
211 * consumer thread exit, sets error
212 * client cmd try to talk to consumer
215 * However, since the consumer is a different daemon, we have no way of making
216 * sure the command will reach it safely even with this state flag. This is why
217 * we consider that up to the state validation during command processing, the
218 * command is safe. After that, we can not guarantee the correctness of the
219 * client request vis-a-vis the consumer.
221 static enum consumerd_state ust_consumerd_state
;
222 static enum consumerd_state kernel_consumerd_state
;
224 /* Used for the health monitoring of the session daemon. See health.h */
225 struct health_state health_thread_cmd
;
226 struct health_state health_thread_app_manage
;
227 struct health_state health_thread_app_reg
;
228 struct health_state health_thread_kernel
;
231 * Socket timeout for receiving and sending in seconds.
233 static int app_socket_timeout
;
236 void setup_consumerd_path(void)
238 const char *bin
, *libdir
;
241 * Allow INSTALL_BIN_PATH to be used as a target path for the
242 * native architecture size consumer if CONFIG_CONSUMER*_PATH
243 * has not been defined.
245 #if (CAA_BITS_PER_LONG == 32)
246 if (!consumerd32_bin
[0]) {
247 consumerd32_bin
= INSTALL_BIN_PATH
"/" CONSUMERD_FILE
;
249 if (!consumerd32_libdir
[0]) {
250 consumerd32_libdir
= INSTALL_LIB_PATH
;
252 #elif (CAA_BITS_PER_LONG == 64)
253 if (!consumerd64_bin
[0]) {
254 consumerd64_bin
= INSTALL_BIN_PATH
"/" CONSUMERD_FILE
;
256 if (!consumerd64_libdir
[0]) {
257 consumerd64_libdir
= INSTALL_LIB_PATH
;
260 #error "Unknown bitness"
264 * runtime env. var. overrides the build default.
266 bin
= getenv("LTTNG_CONSUMERD32_BIN");
268 consumerd32_bin
= bin
;
270 bin
= getenv("LTTNG_CONSUMERD64_BIN");
272 consumerd64_bin
= bin
;
274 libdir
= getenv("LTTNG_CONSUMERD32_LIBDIR");
276 consumerd32_libdir
= libdir
;
278 libdir
= getenv("LTTNG_CONSUMERD64_LIBDIR");
280 consumerd64_libdir
= libdir
;
285 * Create a poll set with O_CLOEXEC and add the thread quit pipe to the set.
287 static int create_thread_poll_set(struct lttng_poll_event
*events
,
292 if (events
== NULL
|| size
== 0) {
297 ret
= lttng_poll_create(events
, size
, LTTNG_CLOEXEC
);
303 ret
= lttng_poll_add(events
, thread_quit_pipe
[0], LPOLLIN
);
315 * Check if the thread quit pipe was triggered.
317 * Return 1 if it was triggered else 0;
319 static int check_thread_quit_pipe(int fd
, uint32_t events
)
321 if (fd
== thread_quit_pipe
[0] && (events
& LPOLLIN
)) {
329 * Return group ID of the tracing group or -1 if not found.
331 static gid_t
allowed_group(void)
335 if (opt_tracing_group
) {
336 grp
= getgrnam(opt_tracing_group
);
338 grp
= getgrnam(default_tracing_group
);
348 * Init thread quit pipe.
350 * Return -1 on error or 0 if all pipes are created.
352 static int init_thread_quit_pipe(void)
356 ret
= pipe(thread_quit_pipe
);
358 PERROR("thread quit pipe");
362 for (i
= 0; i
< 2; i
++) {
363 ret
= fcntl(thread_quit_pipe
[i
], F_SETFD
, FD_CLOEXEC
);
375 * Stop all threads by closing the thread quit pipe.
377 static void stop_threads(void)
381 /* Stopping all threads */
382 DBG("Terminating all threads");
383 ret
= notify_thread_pipe(thread_quit_pipe
[1]);
385 ERR("write error on thread quit pipe");
388 /* Dispatch thread */
389 CMM_STORE_SHARED(dispatch_thread_exit
, 1);
390 futex_nto1_wake(&ust_cmd_queue
.futex
);
396 static void cleanup(void)
400 struct ltt_session
*sess
, *stmp
;
404 /* First thing first, stop all threads */
405 utils_close_pipe(thread_quit_pipe
);
407 DBG("Removing %s directory", rundir
);
408 ret
= asprintf(&cmd
, "rm -rf %s", rundir
);
410 ERR("asprintf failed. Something is really wrong!");
413 /* Remove lttng run directory */
416 ERR("Unable to clean %s", rundir
);
421 DBG("Cleaning up all sessions");
423 /* Destroy session list mutex */
424 if (session_list_ptr
!= NULL
) {
425 pthread_mutex_destroy(&session_list_ptr
->lock
);
427 /* Cleanup ALL session */
428 cds_list_for_each_entry_safe(sess
, stmp
,
429 &session_list_ptr
->head
, list
) {
430 cmd_destroy_session(sess
, kernel_poll_pipe
[1]);
434 DBG("Closing all UST sockets");
435 ust_app_clean_list();
437 if (is_root
&& !opt_no_kernel
) {
438 DBG2("Closing kernel fd");
439 if (kernel_tracer_fd
>= 0) {
440 ret
= close(kernel_tracer_fd
);
445 DBG("Unloading kernel modules");
446 modprobe_remove_lttng_all();
450 DBG("%c[%d;%dm*** assert failed :-) *** ==> %c[%dm%c[%d;%dm"
451 "Matthew, BEET driven development works!%c[%dm",
452 27, 1, 31, 27, 0, 27, 1, 33, 27, 0);
457 * Send data on a unix socket using the liblttsessiondcomm API.
459 * Return lttcomm error code.
461 static int send_unix_sock(int sock
, void *buf
, size_t len
)
463 /* Check valid length */
468 return lttcomm_send_unix_sock(sock
, buf
, len
);
472 * Free memory of a command context structure.
474 static void clean_command_ctx(struct command_ctx
**cmd_ctx
)
476 DBG("Clean command context structure");
478 if ((*cmd_ctx
)->llm
) {
479 free((*cmd_ctx
)->llm
);
481 if ((*cmd_ctx
)->lsm
) {
482 free((*cmd_ctx
)->lsm
);
490 * Notify UST applications using the shm mmap futex.
492 static int notify_ust_apps(int active
)
496 DBG("Notifying applications of session daemon state: %d", active
);
498 /* See shm.c for this call implying mmap, shm and futex calls */
499 wait_shm_mmap
= shm_ust_get_mmap(wait_shm_path
, is_root
);
500 if (wait_shm_mmap
== NULL
) {
504 /* Wake waiting process */
505 futex_wait_update((int32_t *) wait_shm_mmap
, active
);
507 /* Apps notified successfully */
515 * Setup the outgoing data buffer for the response (llm) by allocating the
516 * right amount of memory and copying the original information from the lsm
519 * Return total size of the buffer pointed by buf.
521 static int setup_lttng_msg(struct command_ctx
*cmd_ctx
, size_t size
)
527 cmd_ctx
->llm
= zmalloc(sizeof(struct lttcomm_lttng_msg
) + buf_size
);
528 if (cmd_ctx
->llm
== NULL
) {
534 /* Copy common data */
535 cmd_ctx
->llm
->cmd_type
= cmd_ctx
->lsm
->cmd_type
;
536 cmd_ctx
->llm
->pid
= cmd_ctx
->lsm
->domain
.attr
.pid
;
538 cmd_ctx
->llm
->data_size
= size
;
539 cmd_ctx
->lttng_msg_size
= sizeof(struct lttcomm_lttng_msg
) + buf_size
;
548 * Update the kernel poll set of all channel fd available over all tracing
549 * session. Add the wakeup pipe at the end of the set.
551 static int update_kernel_poll(struct lttng_poll_event
*events
)
554 struct ltt_session
*session
;
555 struct ltt_kernel_channel
*channel
;
557 DBG("Updating kernel poll set");
560 cds_list_for_each_entry(session
, &session_list_ptr
->head
, list
) {
561 session_lock(session
);
562 if (session
->kernel_session
== NULL
) {
563 session_unlock(session
);
567 cds_list_for_each_entry(channel
,
568 &session
->kernel_session
->channel_list
.head
, list
) {
569 /* Add channel fd to the kernel poll set */
570 ret
= lttng_poll_add(events
, channel
->fd
, LPOLLIN
| LPOLLRDNORM
);
572 session_unlock(session
);
575 DBG("Channel fd %d added to kernel set", channel
->fd
);
577 session_unlock(session
);
579 session_unlock_list();
584 session_unlock_list();
589 * Find the channel fd from 'fd' over all tracing session. When found, check
590 * for new channel stream and send those stream fds to the kernel consumer.
592 * Useful for CPU hotplug feature.
594 static int update_kernel_stream(struct consumer_data
*consumer_data
, int fd
)
597 struct ltt_session
*session
;
598 struct ltt_kernel_session
*ksess
;
599 struct ltt_kernel_channel
*channel
;
601 DBG("Updating kernel streams for channel fd %d", fd
);
604 cds_list_for_each_entry(session
, &session_list_ptr
->head
, list
) {
605 session_lock(session
);
606 if (session
->kernel_session
== NULL
) {
607 session_unlock(session
);
610 ksess
= session
->kernel_session
;
612 cds_list_for_each_entry(channel
, &ksess
->channel_list
.head
, list
) {
613 if (channel
->fd
== fd
) {
614 DBG("Channel found, updating kernel streams");
615 ret
= kernel_open_channel_stream(channel
);
621 * Have we already sent fds to the consumer? If yes, it means
622 * that tracing is started so it is safe to send our updated
625 if (ksess
->consumer_fds_sent
== 1 && ksess
->consumer
!= NULL
) {
626 struct lttng_ht_iter iter
;
627 struct consumer_socket
*socket
;
630 cds_lfht_for_each_entry(ksess
->consumer
->socks
->ht
,
631 &iter
.iter
, socket
, node
.node
) {
632 /* Code flow error */
633 assert(socket
->fd
>= 0);
635 pthread_mutex_lock(socket
->lock
);
636 ret
= kernel_consumer_send_channel_stream(socket
,
638 pthread_mutex_unlock(socket
->lock
);
647 session_unlock(session
);
649 session_unlock_list();
653 session_unlock(session
);
654 session_unlock_list();
659 * For each tracing session, update newly registered apps.
661 static void update_ust_app(int app_sock
)
663 struct ltt_session
*sess
, *stmp
;
667 /* For all tracing session(s) */
668 cds_list_for_each_entry_safe(sess
, stmp
, &session_list_ptr
->head
, list
) {
670 if (sess
->ust_session
) {
671 ust_app_global_update(sess
->ust_session
, app_sock
);
673 session_unlock(sess
);
676 session_unlock_list();
680 * This thread manage event coming from the kernel.
682 * Features supported in this thread:
685 static void *thread_manage_kernel(void *data
)
687 int ret
, i
, pollfd
, update_poll_flag
= 1, err
= -1;
688 uint32_t revents
, nb_fd
;
690 struct lttng_poll_event events
;
692 DBG("[thread] Thread manage kernel started");
694 if (testpoint(thread_manage_kernel
)) {
695 goto error_testpoint
;
698 health_code_update(&health_thread_kernel
);
700 ret
= create_thread_poll_set(&events
, 2);
702 goto error_poll_create
;
705 ret
= lttng_poll_add(&events
, kernel_poll_pipe
[0], LPOLLIN
);
710 if (testpoint(thread_manage_kernel_before_loop
)) {
715 health_code_update(&health_thread_kernel
);
717 if (update_poll_flag
== 1) {
719 * Reset number of fd in the poll set. Always 2 since there is the thread
720 * quit pipe and the kernel pipe.
724 ret
= update_kernel_poll(&events
);
728 update_poll_flag
= 0;
731 DBG("Thread kernel polling on %d fds", events
.nb_fd
);
733 /* Poll infinite value of time */
735 health_poll_update(&health_thread_kernel
);
736 ret
= lttng_poll_wait(&events
, -1);
737 health_poll_update(&health_thread_kernel
);
740 * Restart interrupted system call.
742 if (errno
== EINTR
) {
746 } else if (ret
== 0) {
747 /* Should not happen since timeout is infinite */
748 ERR("Return value of poll is 0 with an infinite timeout.\n"
749 "This should not have happened! Continuing...");
755 for (i
= 0; i
< nb_fd
; i
++) {
756 /* Fetch once the poll data */
757 revents
= LTTNG_POLL_GETEV(&events
, i
);
758 pollfd
= LTTNG_POLL_GETFD(&events
, i
);
760 health_code_update(&health_thread_kernel
);
762 /* Thread quit pipe has been closed. Killing thread. */
763 ret
= check_thread_quit_pipe(pollfd
, revents
);
769 /* Check for data on kernel pipe */
770 if (pollfd
== kernel_poll_pipe
[0] && (revents
& LPOLLIN
)) {
771 ret
= read(kernel_poll_pipe
[0], &tmp
, 1);
772 update_poll_flag
= 1;
776 * New CPU detected by the kernel. Adding kernel stream to
777 * kernel session and updating the kernel consumer
779 if (revents
& LPOLLIN
) {
780 ret
= update_kernel_stream(&kconsumer_data
, pollfd
);
786 * TODO: We might want to handle the LPOLLERR | LPOLLHUP
787 * and unregister kernel stream at this point.
796 lttng_poll_clean(&events
);
799 utils_close_pipe(kernel_poll_pipe
);
800 kernel_poll_pipe
[0] = kernel_poll_pipe
[1] = -1;
802 health_error(&health_thread_kernel
);
803 ERR("Health error occurred in %s", __func__
);
804 WARN("Kernel thread died unexpectedly. "
805 "Kernel tracing can continue but CPU hotplug is disabled.");
807 health_exit(&health_thread_kernel
);
808 DBG("Kernel thread dying");
813 * Signal pthread condition of the consumer data that the thread.
815 static void signal_consumer_condition(struct consumer_data
*data
, int state
)
817 pthread_mutex_lock(&data
->cond_mutex
);
820 * The state is set before signaling. It can be any value, it's the waiter
821 * job to correctly interpret this condition variable associated to the
822 * consumer pthread_cond.
824 * A value of 0 means that the corresponding thread of the consumer data
825 * was not started. 1 indicates that the thread has started and is ready
826 * for action. A negative value means that there was an error during the
829 data
->consumer_thread_is_ready
= state
;
830 (void) pthread_cond_signal(&data
->cond
);
832 pthread_mutex_unlock(&data
->cond_mutex
);
836 * This thread manage the consumer error sent back to the session daemon.
838 static void *thread_manage_consumer(void *data
)
840 int sock
= -1, i
, ret
, pollfd
, err
= -1;
841 uint32_t revents
, nb_fd
;
842 enum lttcomm_return_code code
;
843 struct lttng_poll_event events
;
844 struct consumer_data
*consumer_data
= data
;
846 DBG("[thread] Manage consumer started");
849 * Since the consumer thread can be spawned at any moment in time, we init
850 * the health to a poll status (1, which is a valid health over time).
851 * When the thread starts, we update here the health to a "code" path being
852 * an even value so this thread, when reaching a poll wait, does not
853 * trigger an error with an even value.
855 * Here is the use case we avoid.
857 * +1: the first poll update during initialization (main())
858 * +2 * x: multiple code update once in this thread.
859 * +1: poll wait in this thread (being a good health state).
860 * == even number which after the wait period shows as a bad health.
862 * In a nutshell, the following poll update to the health state brings back
863 * the state to an even value meaning a code path.
865 health_poll_update(&consumer_data
->health
);
868 * Pass 2 as size here for the thread quit pipe and kconsumerd_err_sock.
869 * Nothing more will be added to this poll set.
871 ret
= create_thread_poll_set(&events
, 2);
877 * The error socket here is already in a listening state which was done
878 * just before spawning this thread to avoid a race between the consumer
879 * daemon exec trying to connect and the listen() call.
881 ret
= lttng_poll_add(&events
, consumer_data
->err_sock
, LPOLLIN
| LPOLLRDHUP
);
886 health_code_update(&consumer_data
->health
);
888 /* Inifinite blocking call, waiting for transmission */
890 health_poll_update(&consumer_data
->health
);
892 if (testpoint(thread_manage_consumer
)) {
896 ret
= lttng_poll_wait(&events
, -1);
897 health_poll_update(&consumer_data
->health
);
900 * Restart interrupted system call.
902 if (errno
== EINTR
) {
910 for (i
= 0; i
< nb_fd
; i
++) {
911 /* Fetch once the poll data */
912 revents
= LTTNG_POLL_GETEV(&events
, i
);
913 pollfd
= LTTNG_POLL_GETFD(&events
, i
);
915 health_code_update(&consumer_data
->health
);
917 /* Thread quit pipe has been closed. Killing thread. */
918 ret
= check_thread_quit_pipe(pollfd
, revents
);
924 /* Event on the registration socket */
925 if (pollfd
== consumer_data
->err_sock
) {
926 if (revents
& (LPOLLERR
| LPOLLHUP
| LPOLLRDHUP
)) {
927 ERR("consumer err socket poll error");
933 sock
= lttcomm_accept_unix_sock(consumer_data
->err_sock
);
939 * Set the CLOEXEC flag. Return code is useless because either way, the
942 (void) utils_set_fd_cloexec(sock
);
944 health_code_update(&consumer_data
->health
);
946 DBG2("Receiving code from consumer err_sock");
948 /* Getting status code from kconsumerd */
949 ret
= lttcomm_recv_unix_sock(sock
, &code
,
950 sizeof(enum lttcomm_return_code
));
955 health_code_update(&consumer_data
->health
);
957 if (code
== LTTCOMM_CONSUMERD_COMMAND_SOCK_READY
) {
958 consumer_data
->cmd_sock
=
959 lttcomm_connect_unix_sock(consumer_data
->cmd_unix_sock_path
);
960 if (consumer_data
->cmd_sock
< 0) {
961 /* On error, signal condition and quit. */
962 signal_consumer_condition(consumer_data
, -1);
963 PERROR("consumer connect");
966 signal_consumer_condition(consumer_data
, 1);
967 DBG("Consumer command socket ready");
969 ERR("consumer error when waiting for SOCK_READY : %s",
970 lttcomm_get_readable_code(-code
));
974 /* Remove the kconsumerd error sock since we've established a connexion */
975 ret
= lttng_poll_del(&events
, consumer_data
->err_sock
);
980 ret
= lttng_poll_add(&events
, sock
, LPOLLIN
| LPOLLRDHUP
);
985 health_code_update(&consumer_data
->health
);
987 /* Inifinite blocking call, waiting for transmission */
989 health_poll_update(&consumer_data
->health
);
990 ret
= lttng_poll_wait(&events
, -1);
991 health_poll_update(&consumer_data
->health
);
994 * Restart interrupted system call.
996 if (errno
== EINTR
) {
1004 for (i
= 0; i
< nb_fd
; i
++) {
1005 /* Fetch once the poll data */
1006 revents
= LTTNG_POLL_GETEV(&events
, i
);
1007 pollfd
= LTTNG_POLL_GETFD(&events
, i
);
1009 health_code_update(&consumer_data
->health
);
1011 /* Thread quit pipe has been closed. Killing thread. */
1012 ret
= check_thread_quit_pipe(pollfd
, revents
);
1018 /* Event on the kconsumerd socket */
1019 if (pollfd
== sock
) {
1020 if (revents
& (LPOLLERR
| LPOLLHUP
| LPOLLRDHUP
)) {
1021 ERR("consumer err socket second poll error");
1027 health_code_update(&consumer_data
->health
);
1029 /* Wait for any kconsumerd error */
1030 ret
= lttcomm_recv_unix_sock(sock
, &code
,
1031 sizeof(enum lttcomm_return_code
));
1033 ERR("consumer closed the command socket");
1037 ERR("consumer return code : %s", lttcomm_get_readable_code(-code
));
1041 /* Immediately set the consumerd state to stopped */
1042 if (consumer_data
->type
== LTTNG_CONSUMER_KERNEL
) {
1043 uatomic_set(&kernel_consumerd_state
, CONSUMER_ERROR
);
1044 } else if (consumer_data
->type
== LTTNG_CONSUMER64_UST
||
1045 consumer_data
->type
== LTTNG_CONSUMER32_UST
) {
1046 uatomic_set(&ust_consumerd_state
, CONSUMER_ERROR
);
1048 /* Code flow error... */
1052 if (consumer_data
->err_sock
>= 0) {
1053 ret
= close(consumer_data
->err_sock
);
1058 if (consumer_data
->cmd_sock
>= 0) {
1059 ret
= close(consumer_data
->cmd_sock
);
1071 unlink(consumer_data
->err_unix_sock_path
);
1072 unlink(consumer_data
->cmd_unix_sock_path
);
1073 consumer_data
->pid
= 0;
1075 lttng_poll_clean(&events
);
1078 health_error(&consumer_data
->health
);
1079 ERR("Health error occurred in %s", __func__
);
1081 health_exit(&consumer_data
->health
);
1082 DBG("consumer thread cleanup completed");
1088 * This thread manage application communication.
1090 static void *thread_manage_apps(void *data
)
1092 int i
, ret
, pollfd
, err
= -1;
1093 uint32_t revents
, nb_fd
;
1094 struct ust_command ust_cmd
;
1095 struct lttng_poll_event events
;
1097 DBG("[thread] Manage application started");
1099 rcu_register_thread();
1100 rcu_thread_online();
1102 if (testpoint(thread_manage_apps
)) {
1103 goto error_testpoint
;
1106 health_code_update(&health_thread_app_manage
);
1108 ret
= create_thread_poll_set(&events
, 2);
1110 goto error_poll_create
;
1113 ret
= lttng_poll_add(&events
, apps_cmd_pipe
[0], LPOLLIN
| LPOLLRDHUP
);
1118 if (testpoint(thread_manage_apps_before_loop
)) {
1122 health_code_update(&health_thread_app_manage
);
1125 DBG("Apps thread polling on %d fds", events
.nb_fd
);
1127 /* Inifinite blocking call, waiting for transmission */
1129 health_poll_update(&health_thread_app_manage
);
1130 ret
= lttng_poll_wait(&events
, -1);
1131 health_poll_update(&health_thread_app_manage
);
1134 * Restart interrupted system call.
1136 if (errno
== EINTR
) {
1144 for (i
= 0; i
< nb_fd
; i
++) {
1145 /* Fetch once the poll data */
1146 revents
= LTTNG_POLL_GETEV(&events
, i
);
1147 pollfd
= LTTNG_POLL_GETFD(&events
, i
);
1149 health_code_update(&health_thread_app_manage
);
1151 /* Thread quit pipe has been closed. Killing thread. */
1152 ret
= check_thread_quit_pipe(pollfd
, revents
);
1158 /* Inspect the apps cmd pipe */
1159 if (pollfd
== apps_cmd_pipe
[0]) {
1160 if (revents
& (LPOLLERR
| LPOLLHUP
| LPOLLRDHUP
)) {
1161 ERR("Apps command pipe error");
1163 } else if (revents
& LPOLLIN
) {
1165 ret
= read(apps_cmd_pipe
[0], &ust_cmd
, sizeof(ust_cmd
));
1166 if (ret
< 0 || ret
< sizeof(ust_cmd
)) {
1167 PERROR("read apps cmd pipe");
1171 health_code_update(&health_thread_app_manage
);
1173 /* Register applicaton to the session daemon */
1174 ret
= ust_app_register(&ust_cmd
.reg_msg
,
1176 if (ret
== -ENOMEM
) {
1178 } else if (ret
< 0) {
1182 health_code_update(&health_thread_app_manage
);
1185 * Validate UST version compatibility.
1187 ret
= ust_app_validate_version(ust_cmd
.sock
);
1190 * Add channel(s) and event(s) to newly registered apps
1191 * from lttng global UST domain.
1193 update_ust_app(ust_cmd
.sock
);
1196 health_code_update(&health_thread_app_manage
);
1198 ret
= ust_app_register_done(ust_cmd
.sock
);
1201 * If the registration is not possible, we simply
1202 * unregister the apps and continue
1204 ust_app_unregister(ust_cmd
.sock
);
1207 * We only monitor the error events of the socket. This
1208 * thread does not handle any incoming data from UST
1211 ret
= lttng_poll_add(&events
, ust_cmd
.sock
,
1212 LPOLLERR
& LPOLLHUP
& LPOLLRDHUP
);
1217 /* Set socket timeout for both receiving and ending */
1218 (void) lttcomm_setsockopt_rcv_timeout(ust_cmd
.sock
,
1219 app_socket_timeout
);
1220 (void) lttcomm_setsockopt_snd_timeout(ust_cmd
.sock
,
1221 app_socket_timeout
);
1223 DBG("Apps with sock %d added to poll set",
1227 health_code_update(&health_thread_app_manage
);
1233 * At this point, we know that a registered application made
1234 * the event at poll_wait.
1236 if (revents
& (LPOLLERR
| LPOLLHUP
| LPOLLRDHUP
)) {
1237 /* Removing from the poll set */
1238 ret
= lttng_poll_del(&events
, pollfd
);
1243 /* Socket closed on remote end. */
1244 ust_app_unregister(pollfd
);
1249 health_code_update(&health_thread_app_manage
);
1255 lttng_poll_clean(&events
);
1258 utils_close_pipe(apps_cmd_pipe
);
1259 apps_cmd_pipe
[0] = apps_cmd_pipe
[1] = -1;
1262 * We don't clean the UST app hash table here since already registered
1263 * applications can still be controlled so let them be until the session
1264 * daemon dies or the applications stop.
1268 health_error(&health_thread_app_manage
);
1269 ERR("Health error occurred in %s", __func__
);
1271 health_exit(&health_thread_app_manage
);
1272 DBG("Application communication apps thread cleanup complete");
1273 rcu_thread_offline();
1274 rcu_unregister_thread();
1279 * Dispatch request from the registration threads to the application
1280 * communication thread.
1282 static void *thread_dispatch_ust_registration(void *data
)
1285 struct cds_wfq_node
*node
;
1286 struct ust_command
*ust_cmd
= NULL
;
1288 DBG("[thread] Dispatch UST command started");
1290 while (!CMM_LOAD_SHARED(dispatch_thread_exit
)) {
1291 /* Atomically prepare the queue futex */
1292 futex_nto1_prepare(&ust_cmd_queue
.futex
);
1295 /* Dequeue command for registration */
1296 node
= cds_wfq_dequeue_blocking(&ust_cmd_queue
.queue
);
1298 DBG("Woken up but nothing in the UST command queue");
1299 /* Continue thread execution */
1303 ust_cmd
= caa_container_of(node
, struct ust_command
, node
);
1305 DBG("Dispatching UST registration pid:%d ppid:%d uid:%d"
1306 " gid:%d sock:%d name:%s (version %d.%d)",
1307 ust_cmd
->reg_msg
.pid
, ust_cmd
->reg_msg
.ppid
,
1308 ust_cmd
->reg_msg
.uid
, ust_cmd
->reg_msg
.gid
,
1309 ust_cmd
->sock
, ust_cmd
->reg_msg
.name
,
1310 ust_cmd
->reg_msg
.major
, ust_cmd
->reg_msg
.minor
);
1312 * Inform apps thread of the new application registration. This
1313 * call is blocking so we can be assured that the data will be read
1314 * at some point in time or wait to the end of the world :)
1316 if (apps_cmd_pipe
[1] >= 0) {
1317 ret
= write(apps_cmd_pipe
[1], ust_cmd
,
1318 sizeof(struct ust_command
));
1320 PERROR("write apps cmd pipe");
1321 if (errno
== EBADF
) {
1323 * We can't inform the application thread to process
1324 * registration. We will exit or else application
1325 * registration will not occur and tracing will never
1332 /* Application manager thread is not available. */
1333 ret
= close(ust_cmd
->sock
);
1335 PERROR("close ust_cmd sock");
1339 } while (node
!= NULL
);
1341 /* Futex wait on queue. Blocking call on futex() */
1342 futex_nto1_wait(&ust_cmd_queue
.futex
);
1346 DBG("Dispatch thread dying");
1351 * This thread manage application registration.
1353 static void *thread_registration_apps(void *data
)
1355 int sock
= -1, i
, ret
, pollfd
, err
= -1;
1356 uint32_t revents
, nb_fd
;
1357 struct lttng_poll_event events
;
1359 * Get allocated in this thread, enqueued to a global queue, dequeued and
1360 * freed in the manage apps thread.
1362 struct ust_command
*ust_cmd
= NULL
;
1364 DBG("[thread] Manage application registration started");
1366 if (testpoint(thread_registration_apps
)) {
1367 goto error_testpoint
;
1370 ret
= lttcomm_listen_unix_sock(apps_sock
);
1376 * Pass 2 as size here for the thread quit pipe and apps socket. Nothing
1377 * more will be added to this poll set.
1379 ret
= create_thread_poll_set(&events
, 2);
1381 goto error_create_poll
;
1384 /* Add the application registration socket */
1385 ret
= lttng_poll_add(&events
, apps_sock
, LPOLLIN
| LPOLLRDHUP
);
1387 goto error_poll_add
;
1390 /* Notify all applications to register */
1391 ret
= notify_ust_apps(1);
1393 ERR("Failed to notify applications or create the wait shared memory.\n"
1394 "Execution continues but there might be problem for already\n"
1395 "running applications that wishes to register.");
1399 DBG("Accepting application registration");
1401 /* Inifinite blocking call, waiting for transmission */
1403 health_poll_update(&health_thread_app_reg
);
1404 ret
= lttng_poll_wait(&events
, -1);
1405 health_poll_update(&health_thread_app_reg
);
1408 * Restart interrupted system call.
1410 if (errno
== EINTR
) {
1418 for (i
= 0; i
< nb_fd
; i
++) {
1419 health_code_update(&health_thread_app_reg
);
1421 /* Fetch once the poll data */
1422 revents
= LTTNG_POLL_GETEV(&events
, i
);
1423 pollfd
= LTTNG_POLL_GETFD(&events
, i
);
1425 /* Thread quit pipe has been closed. Killing thread. */
1426 ret
= check_thread_quit_pipe(pollfd
, revents
);
1432 /* Event on the registration socket */
1433 if (pollfd
== apps_sock
) {
1434 if (revents
& (LPOLLERR
| LPOLLHUP
| LPOLLRDHUP
)) {
1435 ERR("Register apps socket poll error");
1437 } else if (revents
& LPOLLIN
) {
1438 sock
= lttcomm_accept_unix_sock(apps_sock
);
1444 * Set the CLOEXEC flag. Return code is useless because
1445 * either way, the show must go on.
1447 (void) utils_set_fd_cloexec(sock
);
1449 /* Create UST registration command for enqueuing */
1450 ust_cmd
= zmalloc(sizeof(struct ust_command
));
1451 if (ust_cmd
== NULL
) {
1452 PERROR("ust command zmalloc");
1457 * Using message-based transmissions to ensure we don't
1458 * have to deal with partially received messages.
1460 ret
= lttng_fd_get(LTTNG_FD_APPS
, 1);
1462 ERR("Exhausted file descriptors allowed for applications.");
1471 health_code_update(&health_thread_app_reg
);
1472 ret
= lttcomm_recv_unix_sock(sock
, &ust_cmd
->reg_msg
,
1473 sizeof(struct ust_register_msg
));
1474 if (ret
< 0 || ret
< sizeof(struct ust_register_msg
)) {
1476 PERROR("lttcomm_recv_unix_sock register apps");
1478 ERR("Wrong size received on apps register");
1485 lttng_fd_put(LTTNG_FD_APPS
, 1);
1489 health_code_update(&health_thread_app_reg
);
1491 ust_cmd
->sock
= sock
;
1494 DBG("UST registration received with pid:%d ppid:%d uid:%d"
1495 " gid:%d sock:%d name:%s (version %d.%d)",
1496 ust_cmd
->reg_msg
.pid
, ust_cmd
->reg_msg
.ppid
,
1497 ust_cmd
->reg_msg
.uid
, ust_cmd
->reg_msg
.gid
,
1498 ust_cmd
->sock
, ust_cmd
->reg_msg
.name
,
1499 ust_cmd
->reg_msg
.major
, ust_cmd
->reg_msg
.minor
);
1502 * Lock free enqueue the registration request. The red pill
1503 * has been taken! This apps will be part of the *system*.
1505 cds_wfq_enqueue(&ust_cmd_queue
.queue
, &ust_cmd
->node
);
1508 * Wake the registration queue futex. Implicit memory
1509 * barrier with the exchange in cds_wfq_enqueue.
1511 futex_nto1_wake(&ust_cmd_queue
.futex
);
1520 health_error(&health_thread_app_reg
);
1521 ERR("Health error occurred in %s", __func__
);
1524 /* Notify that the registration thread is gone */
1527 if (apps_sock
>= 0) {
1528 ret
= close(apps_sock
);
1538 lttng_fd_put(LTTNG_FD_APPS
, 1);
1540 unlink(apps_unix_sock_path
);
1543 lttng_poll_clean(&events
);
1547 DBG("UST Registration thread cleanup complete");
1548 health_exit(&health_thread_app_reg
);
1554 * Start the thread_manage_consumer. This must be done after a lttng-consumerd
1555 * exec or it will fails.
1557 static int spawn_consumer_thread(struct consumer_data
*consumer_data
)
1560 struct timespec timeout
;
1562 /* Make sure we set the readiness flag to 0 because we are NOT ready */
1563 consumer_data
->consumer_thread_is_ready
= 0;
1565 /* Setup pthread condition */
1566 ret
= pthread_condattr_init(&consumer_data
->condattr
);
1569 PERROR("pthread_condattr_init consumer data");
1574 * Set the monotonic clock in order to make sure we DO NOT jump in time
1575 * between the clock_gettime() call and the timedwait call. See bug #324
1576 * for a more details and how we noticed it.
1578 ret
= pthread_condattr_setclock(&consumer_data
->condattr
, CLOCK_MONOTONIC
);
1581 PERROR("pthread_condattr_setclock consumer data");
1585 ret
= pthread_cond_init(&consumer_data
->cond
, &consumer_data
->condattr
);
1588 PERROR("pthread_cond_init consumer data");
1592 ret
= pthread_create(&consumer_data
->thread
, NULL
, thread_manage_consumer
,
1595 PERROR("pthread_create consumer");
1600 /* We are about to wait on a pthread condition */
1601 pthread_mutex_lock(&consumer_data
->cond_mutex
);
1603 /* Get time for sem_timedwait absolute timeout */
1604 clock_ret
= clock_gettime(CLOCK_MONOTONIC
, &timeout
);
1606 * Set the timeout for the condition timed wait even if the clock gettime
1607 * call fails since we might loop on that call and we want to avoid to
1608 * increment the timeout too many times.
1610 timeout
.tv_sec
+= DEFAULT_SEM_WAIT_TIMEOUT
;
1613 * The following loop COULD be skipped in some conditions so this is why we
1614 * set ret to 0 in order to make sure at least one round of the loop is
1620 * Loop until the condition is reached or when a timeout is reached. Note
1621 * that the pthread_cond_timedwait(P) man page specifies that EINTR can NOT
1622 * be returned but the pthread_cond(3), from the glibc-doc, says that it is
1623 * possible. This loop does not take any chances and works with both of
1626 while (!consumer_data
->consumer_thread_is_ready
&& ret
!= ETIMEDOUT
) {
1627 if (clock_ret
< 0) {
1628 PERROR("clock_gettime spawn consumer");
1629 /* Infinite wait for the consumerd thread to be ready */
1630 ret
= pthread_cond_wait(&consumer_data
->cond
,
1631 &consumer_data
->cond_mutex
);
1633 ret
= pthread_cond_timedwait(&consumer_data
->cond
,
1634 &consumer_data
->cond_mutex
, &timeout
);
1638 /* Release the pthread condition */
1639 pthread_mutex_unlock(&consumer_data
->cond_mutex
);
1643 if (ret
== ETIMEDOUT
) {
1645 * Call has timed out so we kill the kconsumerd_thread and return
1648 ERR("Condition timed out. The consumer thread was never ready."
1650 ret
= pthread_cancel(consumer_data
->thread
);
1652 PERROR("pthread_cancel consumer thread");
1655 PERROR("pthread_cond_wait failed consumer thread");
1660 pthread_mutex_lock(&consumer_data
->pid_mutex
);
1661 if (consumer_data
->pid
== 0) {
1662 ERR("Consumerd did not start");
1663 pthread_mutex_unlock(&consumer_data
->pid_mutex
);
1666 pthread_mutex_unlock(&consumer_data
->pid_mutex
);
1675 * Join consumer thread
1677 static int join_consumer_thread(struct consumer_data
*consumer_data
)
1681 /* Consumer pid must be a real one. */
1682 if (consumer_data
->pid
> 0) {
1684 ret
= kill(consumer_data
->pid
, SIGTERM
);
1686 ERR("Error killing consumer daemon");
1689 return pthread_join(consumer_data
->thread
, &status
);
1696 * Fork and exec a consumer daemon (consumerd).
1698 * Return pid if successful else -1.
1700 static pid_t
spawn_consumerd(struct consumer_data
*consumer_data
)
1704 const char *consumer_to_use
;
1705 const char *verbosity
;
1708 DBG("Spawning consumerd");
1715 if (opt_verbose_consumer
) {
1716 verbosity
= "--verbose";
1718 verbosity
= "--quiet";
1720 switch (consumer_data
->type
) {
1721 case LTTNG_CONSUMER_KERNEL
:
1723 * Find out which consumerd to execute. We will first try the
1724 * 64-bit path, then the sessiond's installation directory, and
1725 * fallback on the 32-bit one,
1727 DBG3("Looking for a kernel consumer at these locations:");
1728 DBG3(" 1) %s", consumerd64_bin
);
1729 DBG3(" 2) %s/%s", INSTALL_BIN_PATH
, CONSUMERD_FILE
);
1730 DBG3(" 3) %s", consumerd32_bin
);
1731 if (stat(consumerd64_bin
, &st
) == 0) {
1732 DBG3("Found location #1");
1733 consumer_to_use
= consumerd64_bin
;
1734 } else if (stat(INSTALL_BIN_PATH
"/" CONSUMERD_FILE
, &st
) == 0) {
1735 DBG3("Found location #2");
1736 consumer_to_use
= INSTALL_BIN_PATH
"/" CONSUMERD_FILE
;
1737 } else if (stat(consumerd32_bin
, &st
) == 0) {
1738 DBG3("Found location #3");
1739 consumer_to_use
= consumerd32_bin
;
1741 DBG("Could not find any valid consumerd executable");
1744 DBG("Using kernel consumer at: %s", consumer_to_use
);
1745 execl(consumer_to_use
,
1746 "lttng-consumerd", verbosity
, "-k",
1747 "--consumerd-cmd-sock", consumer_data
->cmd_unix_sock_path
,
1748 "--consumerd-err-sock", consumer_data
->err_unix_sock_path
,
1751 case LTTNG_CONSUMER64_UST
:
1753 char *tmpnew
= NULL
;
1755 if (consumerd64_libdir
[0] != '\0') {
1759 tmp
= getenv("LD_LIBRARY_PATH");
1763 tmplen
= strlen("LD_LIBRARY_PATH=")
1764 + strlen(consumerd64_libdir
) + 1 /* : */ + strlen(tmp
);
1765 tmpnew
= zmalloc(tmplen
+ 1 /* \0 */);
1770 strcpy(tmpnew
, "LD_LIBRARY_PATH=");
1771 strcat(tmpnew
, consumerd64_libdir
);
1772 if (tmp
[0] != '\0') {
1773 strcat(tmpnew
, ":");
1774 strcat(tmpnew
, tmp
);
1776 ret
= putenv(tmpnew
);
1782 DBG("Using 64-bit UST consumer at: %s", consumerd64_bin
);
1783 ret
= execl(consumerd64_bin
, "lttng-consumerd", verbosity
, "-u",
1784 "--consumerd-cmd-sock", consumer_data
->cmd_unix_sock_path
,
1785 "--consumerd-err-sock", consumer_data
->err_unix_sock_path
,
1787 if (consumerd64_libdir
[0] != '\0') {
1795 case LTTNG_CONSUMER32_UST
:
1797 char *tmpnew
= NULL
;
1799 if (consumerd32_libdir
[0] != '\0') {
1803 tmp
= getenv("LD_LIBRARY_PATH");
1807 tmplen
= strlen("LD_LIBRARY_PATH=")
1808 + strlen(consumerd32_libdir
) + 1 /* : */ + strlen(tmp
);
1809 tmpnew
= zmalloc(tmplen
+ 1 /* \0 */);
1814 strcpy(tmpnew
, "LD_LIBRARY_PATH=");
1815 strcat(tmpnew
, consumerd32_libdir
);
1816 if (tmp
[0] != '\0') {
1817 strcat(tmpnew
, ":");
1818 strcat(tmpnew
, tmp
);
1820 ret
= putenv(tmpnew
);
1826 DBG("Using 32-bit UST consumer at: %s", consumerd32_bin
);
1827 ret
= execl(consumerd32_bin
, "lttng-consumerd", verbosity
, "-u",
1828 "--consumerd-cmd-sock", consumer_data
->cmd_unix_sock_path
,
1829 "--consumerd-err-sock", consumer_data
->err_unix_sock_path
,
1831 if (consumerd32_libdir
[0] != '\0') {
1840 PERROR("unknown consumer type");
1844 PERROR("kernel start consumer exec");
1847 } else if (pid
> 0) {
1850 PERROR("start consumer fork");
1858 * Spawn the consumerd daemon and session daemon thread.
1860 static int start_consumerd(struct consumer_data
*consumer_data
)
1865 * Set the listen() state on the socket since there is a possible race
1866 * between the exec() of the consumer daemon and this call if place in the
1867 * consumer thread. See bug #366 for more details.
1869 ret
= lttcomm_listen_unix_sock(consumer_data
->err_sock
);
1874 pthread_mutex_lock(&consumer_data
->pid_mutex
);
1875 if (consumer_data
->pid
!= 0) {
1876 pthread_mutex_unlock(&consumer_data
->pid_mutex
);
1880 ret
= spawn_consumerd(consumer_data
);
1882 ERR("Spawning consumerd failed");
1883 pthread_mutex_unlock(&consumer_data
->pid_mutex
);
1887 /* Setting up the consumer_data pid */
1888 consumer_data
->pid
= ret
;
1889 DBG2("Consumer pid %d", consumer_data
->pid
);
1890 pthread_mutex_unlock(&consumer_data
->pid_mutex
);
1892 DBG2("Spawning consumer control thread");
1893 ret
= spawn_consumer_thread(consumer_data
);
1895 ERR("Fatal error spawning consumer control thread");
1903 /* Cleanup already created socket on error. */
1904 if (consumer_data
->err_sock
>= 0) {
1907 err
= close(consumer_data
->err_sock
);
1909 PERROR("close consumer data error socket");
1916 * Compute health status of each consumer. If one of them is zero (bad
1917 * state), we return 0.
1919 static int check_consumer_health(void)
1923 ret
= health_check_state(&kconsumer_data
.health
) &&
1924 health_check_state(&ustconsumer32_data
.health
) &&
1925 health_check_state(&ustconsumer64_data
.health
);
1927 DBG3("Health consumer check %d", ret
);
1933 * Setup necessary data for kernel tracer action.
1935 static int init_kernel_tracer(void)
1939 /* Modprobe lttng kernel modules */
1940 ret
= modprobe_lttng_control();
1945 /* Open debugfs lttng */
1946 kernel_tracer_fd
= open(module_proc_lttng
, O_RDWR
);
1947 if (kernel_tracer_fd
< 0) {
1948 DBG("Failed to open %s", module_proc_lttng
);
1953 /* Validate kernel version */
1954 ret
= kernel_validate_version(kernel_tracer_fd
);
1959 ret
= modprobe_lttng_data();
1964 DBG("Kernel tracer fd %d", kernel_tracer_fd
);
1968 modprobe_remove_lttng_control();
1969 ret
= close(kernel_tracer_fd
);
1973 kernel_tracer_fd
= -1;
1974 return LTTNG_ERR_KERN_VERSION
;
1977 ret
= close(kernel_tracer_fd
);
1983 modprobe_remove_lttng_control();
1986 WARN("No kernel tracer available");
1987 kernel_tracer_fd
= -1;
1989 return LTTNG_ERR_NEED_ROOT_SESSIOND
;
1991 return LTTNG_ERR_KERN_NA
;
1997 * Copy consumer output from the tracing session to the domain session. The
1998 * function also applies the right modification on a per domain basis for the
1999 * trace files destination directory.
2001 static int copy_session_consumer(int domain
, struct ltt_session
*session
)
2004 const char *dir_name
;
2005 struct consumer_output
*consumer
;
2008 assert(session
->consumer
);
2011 case LTTNG_DOMAIN_KERNEL
:
2012 DBG3("Copying tracing session consumer output in kernel session");
2014 * XXX: We should audit the session creation and what this function
2015 * does "extra" in order to avoid a destroy since this function is used
2016 * in the domain session creation (kernel and ust) only. Same for UST
2019 if (session
->kernel_session
->consumer
) {
2020 consumer_destroy_output(session
->kernel_session
->consumer
);
2022 session
->kernel_session
->consumer
=
2023 consumer_copy_output(session
->consumer
);
2024 /* Ease our life a bit for the next part */
2025 consumer
= session
->kernel_session
->consumer
;
2026 dir_name
= DEFAULT_KERNEL_TRACE_DIR
;
2028 case LTTNG_DOMAIN_UST
:
2029 DBG3("Copying tracing session consumer output in UST session");
2030 if (session
->ust_session
->consumer
) {
2031 consumer_destroy_output(session
->ust_session
->consumer
);
2033 session
->ust_session
->consumer
=
2034 consumer_copy_output(session
->consumer
);
2035 /* Ease our life a bit for the next part */
2036 consumer
= session
->ust_session
->consumer
;
2037 dir_name
= DEFAULT_UST_TRACE_DIR
;
2040 ret
= LTTNG_ERR_UNKNOWN_DOMAIN
;
2044 /* Append correct directory to subdir */
2045 strncat(consumer
->subdir
, dir_name
,
2046 sizeof(consumer
->subdir
) - strlen(consumer
->subdir
) - 1);
2047 DBG3("Copy session consumer subdir %s", consumer
->subdir
);
2056 * Create an UST session and add it to the session ust list.
2058 static int create_ust_session(struct ltt_session
*session
,
2059 struct lttng_domain
*domain
)
2062 struct ltt_ust_session
*lus
= NULL
;
2066 assert(session
->consumer
);
2068 switch (domain
->type
) {
2069 case LTTNG_DOMAIN_UST
:
2072 ERR("Unknown UST domain on create session %d", domain
->type
);
2073 ret
= LTTNG_ERR_UNKNOWN_DOMAIN
;
2077 DBG("Creating UST session");
2079 lus
= trace_ust_create_session(session
->path
, session
->id
, domain
);
2081 ret
= LTTNG_ERR_UST_SESS_FAIL
;
2085 lus
->uid
= session
->uid
;
2086 lus
->gid
= session
->gid
;
2087 session
->ust_session
= lus
;
2089 /* Copy session output to the newly created UST session */
2090 ret
= copy_session_consumer(domain
->type
, session
);
2091 if (ret
!= LTTNG_OK
) {
2099 session
->ust_session
= NULL
;
2104 * Create a kernel tracer session then create the default channel.
2106 static int create_kernel_session(struct ltt_session
*session
)
2110 DBG("Creating kernel session");
2112 ret
= kernel_create_session(session
, kernel_tracer_fd
);
2114 ret
= LTTNG_ERR_KERN_SESS_FAIL
;
2118 /* Code flow safety */
2119 assert(session
->kernel_session
);
2121 /* Copy session output to the newly created Kernel session */
2122 ret
= copy_session_consumer(LTTNG_DOMAIN_KERNEL
, session
);
2123 if (ret
!= LTTNG_OK
) {
2127 /* Create directory(ies) on local filesystem. */
2128 if (session
->kernel_session
->consumer
->type
== CONSUMER_DST_LOCAL
&&
2129 strlen(session
->kernel_session
->consumer
->dst
.trace_path
) > 0) {
2130 ret
= run_as_mkdir_recursive(
2131 session
->kernel_session
->consumer
->dst
.trace_path
,
2132 S_IRWXU
| S_IRWXG
, session
->uid
, session
->gid
);
2134 if (ret
!= -EEXIST
) {
2135 ERR("Trace directory creation error");
2141 session
->kernel_session
->uid
= session
->uid
;
2142 session
->kernel_session
->gid
= session
->gid
;
2147 trace_kernel_destroy_session(session
->kernel_session
);
2148 session
->kernel_session
= NULL
;
2153 * Count number of session permitted by uid/gid.
2155 static unsigned int lttng_sessions_count(uid_t uid
, gid_t gid
)
2158 struct ltt_session
*session
;
2160 DBG("Counting number of available session for UID %d GID %d",
2162 cds_list_for_each_entry(session
, &session_list_ptr
->head
, list
) {
2164 * Only list the sessions the user can control.
2166 if (!session_access_ok(session
, uid
, gid
)) {
2175 * Process the command requested by the lttng client within the command
2176 * context structure. This function make sure that the return structure (llm)
2177 * is set and ready for transmission before returning.
2179 * Return any error encountered or 0 for success.
2181 * "sock" is only used for special-case var. len data.
2183 static int process_client_msg(struct command_ctx
*cmd_ctx
, int sock
,
2187 int need_tracing_session
= 1;
2190 DBG("Processing client command %d", cmd_ctx
->lsm
->cmd_type
);
2194 switch (cmd_ctx
->lsm
->cmd_type
) {
2195 case LTTNG_CREATE_SESSION
:
2196 case LTTNG_DESTROY_SESSION
:
2197 case LTTNG_LIST_SESSIONS
:
2198 case LTTNG_LIST_DOMAINS
:
2199 case LTTNG_START_TRACE
:
2200 case LTTNG_STOP_TRACE
:
2201 case LTTNG_DATA_PENDING
:
2208 if (opt_no_kernel
&& need_domain
2209 && cmd_ctx
->lsm
->domain
.type
== LTTNG_DOMAIN_KERNEL
) {
2211 ret
= LTTNG_ERR_NEED_ROOT_SESSIOND
;
2213 ret
= LTTNG_ERR_KERN_NA
;
2218 /* Deny register consumer if we already have a spawned consumer. */
2219 if (cmd_ctx
->lsm
->cmd_type
== LTTNG_REGISTER_CONSUMER
) {
2220 pthread_mutex_lock(&kconsumer_data
.pid_mutex
);
2221 if (kconsumer_data
.pid
> 0) {
2222 ret
= LTTNG_ERR_KERN_CONSUMER_FAIL
;
2223 pthread_mutex_unlock(&kconsumer_data
.pid_mutex
);
2226 pthread_mutex_unlock(&kconsumer_data
.pid_mutex
);
2230 * Check for command that don't needs to allocate a returned payload. We do
2231 * this here so we don't have to make the call for no payload at each
2234 switch(cmd_ctx
->lsm
->cmd_type
) {
2235 case LTTNG_LIST_SESSIONS
:
2236 case LTTNG_LIST_TRACEPOINTS
:
2237 case LTTNG_LIST_TRACEPOINT_FIELDS
:
2238 case LTTNG_LIST_DOMAINS
:
2239 case LTTNG_LIST_CHANNELS
:
2240 case LTTNG_LIST_EVENTS
:
2243 /* Setup lttng message with no payload */
2244 ret
= setup_lttng_msg(cmd_ctx
, 0);
2246 /* This label does not try to unlock the session */
2247 goto init_setup_error
;
2251 /* Commands that DO NOT need a session. */
2252 switch (cmd_ctx
->lsm
->cmd_type
) {
2253 case LTTNG_CREATE_SESSION
:
2254 case LTTNG_CALIBRATE
:
2255 case LTTNG_LIST_SESSIONS
:
2256 case LTTNG_LIST_TRACEPOINTS
:
2257 case LTTNG_LIST_TRACEPOINT_FIELDS
:
2258 need_tracing_session
= 0;
2261 DBG("Getting session %s by name", cmd_ctx
->lsm
->session
.name
);
2263 * We keep the session list lock across _all_ commands
2264 * for now, because the per-session lock does not
2265 * handle teardown properly.
2267 session_lock_list();
2268 cmd_ctx
->session
= session_find_by_name(cmd_ctx
->lsm
->session
.name
);
2269 if (cmd_ctx
->session
== NULL
) {
2270 if (cmd_ctx
->lsm
->session
.name
!= NULL
) {
2271 ret
= LTTNG_ERR_SESS_NOT_FOUND
;
2273 /* If no session name specified */
2274 ret
= LTTNG_ERR_SELECT_SESS
;
2278 /* Acquire lock for the session */
2279 session_lock(cmd_ctx
->session
);
2289 * Check domain type for specific "pre-action".
2291 switch (cmd_ctx
->lsm
->domain
.type
) {
2292 case LTTNG_DOMAIN_KERNEL
:
2294 ret
= LTTNG_ERR_NEED_ROOT_SESSIOND
;
2298 /* Kernel tracer check */
2299 if (kernel_tracer_fd
== -1) {
2300 /* Basically, load kernel tracer modules */
2301 ret
= init_kernel_tracer();
2307 /* Consumer is in an ERROR state. Report back to client */
2308 if (uatomic_read(&kernel_consumerd_state
) == CONSUMER_ERROR
) {
2309 ret
= LTTNG_ERR_NO_KERNCONSUMERD
;
2313 /* Need a session for kernel command */
2314 if (need_tracing_session
) {
2315 if (cmd_ctx
->session
->kernel_session
== NULL
) {
2316 ret
= create_kernel_session(cmd_ctx
->session
);
2318 ret
= LTTNG_ERR_KERN_SESS_FAIL
;
2323 /* Start the kernel consumer daemon */
2324 pthread_mutex_lock(&kconsumer_data
.pid_mutex
);
2325 if (kconsumer_data
.pid
== 0 &&
2326 cmd_ctx
->lsm
->cmd_type
!= LTTNG_REGISTER_CONSUMER
&&
2327 cmd_ctx
->session
->start_consumer
) {
2328 pthread_mutex_unlock(&kconsumer_data
.pid_mutex
);
2329 ret
= start_consumerd(&kconsumer_data
);
2331 ret
= LTTNG_ERR_KERN_CONSUMER_FAIL
;
2334 uatomic_set(&kernel_consumerd_state
, CONSUMER_STARTED
);
2336 pthread_mutex_unlock(&kconsumer_data
.pid_mutex
);
2340 * The consumer was just spawned so we need to add the socket to
2341 * the consumer output of the session if exist.
2343 ret
= consumer_create_socket(&kconsumer_data
,
2344 cmd_ctx
->session
->kernel_session
->consumer
);
2351 case LTTNG_DOMAIN_UST
:
2353 /* Consumer is in an ERROR state. Report back to client */
2354 if (uatomic_read(&ust_consumerd_state
) == CONSUMER_ERROR
) {
2355 ret
= LTTNG_ERR_NO_USTCONSUMERD
;
2359 if (need_tracing_session
) {
2360 /* Create UST session if none exist. */
2361 if (cmd_ctx
->session
->ust_session
== NULL
) {
2362 ret
= create_ust_session(cmd_ctx
->session
,
2363 &cmd_ctx
->lsm
->domain
);
2364 if (ret
!= LTTNG_OK
) {
2369 /* Start the UST consumer daemons */
2371 pthread_mutex_lock(&ustconsumer64_data
.pid_mutex
);
2372 if (consumerd64_bin
[0] != '\0' &&
2373 ustconsumer64_data
.pid
== 0 &&
2374 cmd_ctx
->lsm
->cmd_type
!= LTTNG_REGISTER_CONSUMER
&&
2375 cmd_ctx
->session
->start_consumer
) {
2376 pthread_mutex_unlock(&ustconsumer64_data
.pid_mutex
);
2377 ret
= start_consumerd(&ustconsumer64_data
);
2379 ret
= LTTNG_ERR_UST_CONSUMER64_FAIL
;
2380 uatomic_set(&ust_consumerd64_fd
, -EINVAL
);
2384 uatomic_set(&ust_consumerd64_fd
, ustconsumer64_data
.cmd_sock
);
2385 uatomic_set(&ust_consumerd_state
, CONSUMER_STARTED
);
2387 pthread_mutex_unlock(&ustconsumer64_data
.pid_mutex
);
2391 * Setup socket for consumer 64 bit. No need for atomic access
2392 * since it was set above and can ONLY be set in this thread.
2394 ret
= consumer_create_socket(&ustconsumer64_data
,
2395 cmd_ctx
->session
->ust_session
->consumer
);
2401 if (consumerd32_bin
[0] != '\0' &&
2402 ustconsumer32_data
.pid
== 0 &&
2403 cmd_ctx
->lsm
->cmd_type
!= LTTNG_REGISTER_CONSUMER
&&
2404 cmd_ctx
->session
->start_consumer
) {
2405 pthread_mutex_unlock(&ustconsumer32_data
.pid_mutex
);
2406 ret
= start_consumerd(&ustconsumer32_data
);
2408 ret
= LTTNG_ERR_UST_CONSUMER32_FAIL
;
2409 uatomic_set(&ust_consumerd32_fd
, -EINVAL
);
2413 uatomic_set(&ust_consumerd32_fd
, ustconsumer32_data
.cmd_sock
);
2414 uatomic_set(&ust_consumerd_state
, CONSUMER_STARTED
);
2416 pthread_mutex_unlock(&ustconsumer32_data
.pid_mutex
);
2420 * Setup socket for consumer 64 bit. No need for atomic access
2421 * since it was set above and can ONLY be set in this thread.
2423 ret
= consumer_create_socket(&ustconsumer32_data
,
2424 cmd_ctx
->session
->ust_session
->consumer
);
2436 /* Validate consumer daemon state when start/stop trace command */
2437 if (cmd_ctx
->lsm
->cmd_type
== LTTNG_START_TRACE
||
2438 cmd_ctx
->lsm
->cmd_type
== LTTNG_STOP_TRACE
) {
2439 switch (cmd_ctx
->lsm
->domain
.type
) {
2440 case LTTNG_DOMAIN_UST
:
2441 if (uatomic_read(&ust_consumerd_state
) != CONSUMER_STARTED
) {
2442 ret
= LTTNG_ERR_NO_USTCONSUMERD
;
2446 case LTTNG_DOMAIN_KERNEL
:
2447 if (uatomic_read(&kernel_consumerd_state
) != CONSUMER_STARTED
) {
2448 ret
= LTTNG_ERR_NO_KERNCONSUMERD
;
2456 * Check that the UID or GID match that of the tracing session.
2457 * The root user can interact with all sessions.
2459 if (need_tracing_session
) {
2460 if (!session_access_ok(cmd_ctx
->session
,
2461 LTTNG_SOCK_GET_UID_CRED(&cmd_ctx
->creds
),
2462 LTTNG_SOCK_GET_GID_CRED(&cmd_ctx
->creds
))) {
2463 ret
= LTTNG_ERR_EPERM
;
2468 /* Process by command type */
2469 switch (cmd_ctx
->lsm
->cmd_type
) {
2470 case LTTNG_ADD_CONTEXT
:
2472 ret
= cmd_add_context(cmd_ctx
->session
, cmd_ctx
->lsm
->domain
.type
,
2473 cmd_ctx
->lsm
->u
.context
.channel_name
,
2474 &cmd_ctx
->lsm
->u
.context
.ctx
, kernel_poll_pipe
[1]);
2477 case LTTNG_DISABLE_CHANNEL
:
2479 ret
= cmd_disable_channel(cmd_ctx
->session
, cmd_ctx
->lsm
->domain
.type
,
2480 cmd_ctx
->lsm
->u
.disable
.channel_name
);
2483 case LTTNG_DISABLE_EVENT
:
2485 ret
= cmd_disable_event(cmd_ctx
->session
, cmd_ctx
->lsm
->domain
.type
,
2486 cmd_ctx
->lsm
->u
.disable
.channel_name
,
2487 cmd_ctx
->lsm
->u
.disable
.name
);
2490 case LTTNG_DISABLE_ALL_EVENT
:
2492 DBG("Disabling all events");
2494 ret
= cmd_disable_event_all(cmd_ctx
->session
, cmd_ctx
->lsm
->domain
.type
,
2495 cmd_ctx
->lsm
->u
.disable
.channel_name
);
2498 case LTTNG_DISABLE_CONSUMER
:
2500 ret
= cmd_disable_consumer(cmd_ctx
->lsm
->domain
.type
, cmd_ctx
->session
);
2503 case LTTNG_ENABLE_CHANNEL
:
2505 ret
= cmd_enable_channel(cmd_ctx
->session
, cmd_ctx
->lsm
->domain
.type
,
2506 &cmd_ctx
->lsm
->u
.channel
.chan
, kernel_poll_pipe
[1]);
2509 case LTTNG_ENABLE_CONSUMER
:
2512 * XXX: 0 means that this URI should be applied on the session. Should
2513 * be a DOMAIN enuam.
2515 ret
= cmd_enable_consumer(cmd_ctx
->lsm
->domain
.type
, cmd_ctx
->session
);
2516 if (ret
!= LTTNG_OK
) {
2520 if (cmd_ctx
->lsm
->domain
.type
== 0) {
2521 /* Add the URI for the UST session if a consumer is present. */
2522 if (cmd_ctx
->session
->ust_session
&&
2523 cmd_ctx
->session
->ust_session
->consumer
) {
2524 ret
= cmd_enable_consumer(LTTNG_DOMAIN_UST
, cmd_ctx
->session
);
2525 } else if (cmd_ctx
->session
->kernel_session
&&
2526 cmd_ctx
->session
->kernel_session
->consumer
) {
2527 ret
= cmd_enable_consumer(LTTNG_DOMAIN_KERNEL
,
2533 case LTTNG_ENABLE_EVENT
:
2535 ret
= cmd_enable_event(cmd_ctx
->session
, cmd_ctx
->lsm
->domain
.type
,
2536 cmd_ctx
->lsm
->u
.enable
.channel_name
,
2537 &cmd_ctx
->lsm
->u
.enable
.event
, NULL
, kernel_poll_pipe
[1]);
2540 case LTTNG_ENABLE_ALL_EVENT
:
2542 DBG("Enabling all events");
2544 ret
= cmd_enable_event_all(cmd_ctx
->session
, cmd_ctx
->lsm
->domain
.type
,
2545 cmd_ctx
->lsm
->u
.enable
.channel_name
,
2546 cmd_ctx
->lsm
->u
.enable
.event
.type
, NULL
, kernel_poll_pipe
[1]);
2549 case LTTNG_LIST_TRACEPOINTS
:
2551 struct lttng_event
*events
;
2554 nb_events
= cmd_list_tracepoints(cmd_ctx
->lsm
->domain
.type
, &events
);
2555 if (nb_events
< 0) {
2556 /* Return value is a negative lttng_error_code. */
2562 * Setup lttng message with payload size set to the event list size in
2563 * bytes and then copy list into the llm payload.
2565 ret
= setup_lttng_msg(cmd_ctx
, sizeof(struct lttng_event
) * nb_events
);
2571 /* Copy event list into message payload */
2572 memcpy(cmd_ctx
->llm
->payload
, events
,
2573 sizeof(struct lttng_event
) * nb_events
);
2580 case LTTNG_LIST_TRACEPOINT_FIELDS
:
2582 struct lttng_event_field
*fields
;
2585 nb_fields
= cmd_list_tracepoint_fields(cmd_ctx
->lsm
->domain
.type
,
2587 if (nb_fields
< 0) {
2588 /* Return value is a negative lttng_error_code. */
2594 * Setup lttng message with payload size set to the event list size in
2595 * bytes and then copy list into the llm payload.
2597 ret
= setup_lttng_msg(cmd_ctx
,
2598 sizeof(struct lttng_event_field
) * nb_fields
);
2604 /* Copy event list into message payload */
2605 memcpy(cmd_ctx
->llm
->payload
, fields
,
2606 sizeof(struct lttng_event_field
) * nb_fields
);
2613 case LTTNG_SET_CONSUMER_URI
:
2616 struct lttng_uri
*uris
;
2618 nb_uri
= cmd_ctx
->lsm
->u
.uri
.size
;
2619 len
= nb_uri
* sizeof(struct lttng_uri
);
2622 ret
= LTTNG_ERR_INVALID
;
2626 uris
= zmalloc(len
);
2628 ret
= LTTNG_ERR_FATAL
;
2632 /* Receive variable len data */
2633 DBG("Receiving %zu URI(s) from client ...", nb_uri
);
2634 ret
= lttcomm_recv_unix_sock(sock
, uris
, len
);
2636 DBG("No URIs received from client... continuing");
2638 ret
= LTTNG_ERR_SESSION_FAIL
;
2643 ret
= cmd_set_consumer_uri(cmd_ctx
->lsm
->domain
.type
, cmd_ctx
->session
,
2645 if (ret
!= LTTNG_OK
) {
2651 * XXX: 0 means that this URI should be applied on the session. Should
2652 * be a DOMAIN enuam.
2654 if (cmd_ctx
->lsm
->domain
.type
== 0) {
2655 /* Add the URI for the UST session if a consumer is present. */
2656 if (cmd_ctx
->session
->ust_session
&&
2657 cmd_ctx
->session
->ust_session
->consumer
) {
2658 ret
= cmd_set_consumer_uri(LTTNG_DOMAIN_UST
, cmd_ctx
->session
,
2660 } else if (cmd_ctx
->session
->kernel_session
&&
2661 cmd_ctx
->session
->kernel_session
->consumer
) {
2662 ret
= cmd_set_consumer_uri(LTTNG_DOMAIN_KERNEL
,
2663 cmd_ctx
->session
, nb_uri
, uris
);
2671 case LTTNG_START_TRACE
:
2673 ret
= cmd_start_trace(cmd_ctx
->session
);
2676 case LTTNG_STOP_TRACE
:
2678 ret
= cmd_stop_trace(cmd_ctx
->session
);
2681 case LTTNG_CREATE_SESSION
:
2684 struct lttng_uri
*uris
= NULL
;
2686 nb_uri
= cmd_ctx
->lsm
->u
.uri
.size
;
2687 len
= nb_uri
* sizeof(struct lttng_uri
);
2690 uris
= zmalloc(len
);
2692 ret
= LTTNG_ERR_FATAL
;
2696 /* Receive variable len data */
2697 DBG("Waiting for %zu URIs from client ...", nb_uri
);
2698 ret
= lttcomm_recv_unix_sock(sock
, uris
, len
);
2700 DBG("No URIs received from client... continuing");
2702 ret
= LTTNG_ERR_SESSION_FAIL
;
2707 if (nb_uri
== 1 && uris
[0].dtype
!= LTTNG_DST_PATH
) {
2708 DBG("Creating session with ONE network URI is a bad call");
2709 ret
= LTTNG_ERR_SESSION_FAIL
;
2715 ret
= cmd_create_session_uri(cmd_ctx
->lsm
->session
.name
, uris
, nb_uri
,
2722 case LTTNG_DESTROY_SESSION
:
2724 ret
= cmd_destroy_session(cmd_ctx
->session
, kernel_poll_pipe
[1]);
2726 /* Set session to NULL so we do not unlock it after free. */
2727 cmd_ctx
->session
= NULL
;
2730 case LTTNG_LIST_DOMAINS
:
2733 struct lttng_domain
*domains
;
2735 nb_dom
= cmd_list_domains(cmd_ctx
->session
, &domains
);
2737 /* Return value is a negative lttng_error_code. */
2742 ret
= setup_lttng_msg(cmd_ctx
, nb_dom
* sizeof(struct lttng_domain
));
2747 /* Copy event list into message payload */
2748 memcpy(cmd_ctx
->llm
->payload
, domains
,
2749 nb_dom
* sizeof(struct lttng_domain
));
2756 case LTTNG_LIST_CHANNELS
:
2759 struct lttng_channel
*channels
;
2761 nb_chan
= cmd_list_channels(cmd_ctx
->lsm
->domain
.type
,
2762 cmd_ctx
->session
, &channels
);
2764 /* Return value is a negative lttng_error_code. */
2769 ret
= setup_lttng_msg(cmd_ctx
, nb_chan
* sizeof(struct lttng_channel
));
2774 /* Copy event list into message payload */
2775 memcpy(cmd_ctx
->llm
->payload
, channels
,
2776 nb_chan
* sizeof(struct lttng_channel
));
2783 case LTTNG_LIST_EVENTS
:
2786 struct lttng_event
*events
= NULL
;
2788 nb_event
= cmd_list_events(cmd_ctx
->lsm
->domain
.type
, cmd_ctx
->session
,
2789 cmd_ctx
->lsm
->u
.list
.channel_name
, &events
);
2791 /* Return value is a negative lttng_error_code. */
2796 ret
= setup_lttng_msg(cmd_ctx
, nb_event
* sizeof(struct lttng_event
));
2801 /* Copy event list into message payload */
2802 memcpy(cmd_ctx
->llm
->payload
, events
,
2803 nb_event
* sizeof(struct lttng_event
));
2810 case LTTNG_LIST_SESSIONS
:
2812 unsigned int nr_sessions
;
2814 session_lock_list();
2815 nr_sessions
= lttng_sessions_count(
2816 LTTNG_SOCK_GET_UID_CRED(&cmd_ctx
->creds
),
2817 LTTNG_SOCK_GET_GID_CRED(&cmd_ctx
->creds
));
2819 ret
= setup_lttng_msg(cmd_ctx
, sizeof(struct lttng_session
) * nr_sessions
);
2821 session_unlock_list();
2825 /* Filled the session array */
2826 cmd_list_lttng_sessions((struct lttng_session
*)(cmd_ctx
->llm
->payload
),
2827 LTTNG_SOCK_GET_UID_CRED(&cmd_ctx
->creds
),
2828 LTTNG_SOCK_GET_GID_CRED(&cmd_ctx
->creds
));
2830 session_unlock_list();
2835 case LTTNG_CALIBRATE
:
2837 ret
= cmd_calibrate(cmd_ctx
->lsm
->domain
.type
,
2838 &cmd_ctx
->lsm
->u
.calibrate
);
2841 case LTTNG_REGISTER_CONSUMER
:
2843 struct consumer_data
*cdata
;
2845 switch (cmd_ctx
->lsm
->domain
.type
) {
2846 case LTTNG_DOMAIN_KERNEL
:
2847 cdata
= &kconsumer_data
;
2850 ret
= LTTNG_ERR_UND
;
2854 ret
= cmd_register_consumer(cmd_ctx
->session
, cmd_ctx
->lsm
->domain
.type
,
2855 cmd_ctx
->lsm
->u
.reg
.path
, cdata
);
2858 case LTTNG_ENABLE_EVENT_WITH_FILTER
:
2860 struct lttng_filter_bytecode
*bytecode
;
2862 if (cmd_ctx
->lsm
->u
.enable
.bytecode_len
> LTTNG_FILTER_MAX_LEN
) {
2863 ret
= LTTNG_ERR_FILTER_INVAL
;
2866 if (cmd_ctx
->lsm
->u
.enable
.bytecode_len
== 0) {
2867 ret
= LTTNG_ERR_FILTER_INVAL
;
2870 bytecode
= zmalloc(cmd_ctx
->lsm
->u
.enable
.bytecode_len
);
2872 ret
= LTTNG_ERR_FILTER_NOMEM
;
2875 /* Receive var. len. data */
2876 DBG("Receiving var len data from client ...");
2877 ret
= lttcomm_recv_unix_sock(sock
, bytecode
,
2878 cmd_ctx
->lsm
->u
.enable
.bytecode_len
);
2880 DBG("Nothing recv() from client var len data... continuing");
2882 ret
= LTTNG_ERR_FILTER_INVAL
;
2886 if (bytecode
->len
+ sizeof(*bytecode
)
2887 != cmd_ctx
->lsm
->u
.enable
.bytecode_len
) {
2889 ret
= LTTNG_ERR_FILTER_INVAL
;
2893 ret
= cmd_enable_event(cmd_ctx
->session
, cmd_ctx
->lsm
->domain
.type
,
2894 cmd_ctx
->lsm
->u
.enable
.channel_name
,
2895 &cmd_ctx
->lsm
->u
.enable
.event
, bytecode
, kernel_poll_pipe
[1]);
2898 case LTTNG_DATA_PENDING
:
2900 ret
= cmd_data_pending(cmd_ctx
->session
);
2904 ret
= LTTNG_ERR_UND
;
2909 if (cmd_ctx
->llm
== NULL
) {
2910 DBG("Missing llm structure. Allocating one.");
2911 if (setup_lttng_msg(cmd_ctx
, 0) < 0) {
2915 /* Set return code */
2916 cmd_ctx
->llm
->ret_code
= ret
;
2918 if (cmd_ctx
->session
) {
2919 session_unlock(cmd_ctx
->session
);
2921 if (need_tracing_session
) {
2922 session_unlock_list();
2929 * Thread managing health check socket.
2931 static void *thread_manage_health(void *data
)
2933 int sock
= -1, new_sock
= -1, ret
, i
, pollfd
, err
= -1;
2934 uint32_t revents
, nb_fd
;
2935 struct lttng_poll_event events
;
2936 struct lttcomm_health_msg msg
;
2937 struct lttcomm_health_data reply
;
2939 DBG("[thread] Manage health check started");
2941 rcu_register_thread();
2943 /* Create unix socket */
2944 sock
= lttcomm_create_unix_sock(health_unix_sock_path
);
2946 ERR("Unable to create health check Unix socket");
2952 * Set the CLOEXEC flag. Return code is useless because either way, the
2955 (void) utils_set_fd_cloexec(sock
);
2957 ret
= lttcomm_listen_unix_sock(sock
);
2963 * Pass 2 as size here for the thread quit pipe and client_sock. Nothing
2964 * more will be added to this poll set.
2966 ret
= create_thread_poll_set(&events
, 2);
2971 /* Add the application registration socket */
2972 ret
= lttng_poll_add(&events
, sock
, LPOLLIN
| LPOLLPRI
);
2978 DBG("Health check ready");
2980 /* Inifinite blocking call, waiting for transmission */
2982 ret
= lttng_poll_wait(&events
, -1);
2985 * Restart interrupted system call.
2987 if (errno
== EINTR
) {
2995 for (i
= 0; i
< nb_fd
; i
++) {
2996 /* Fetch once the poll data */
2997 revents
= LTTNG_POLL_GETEV(&events
, i
);
2998 pollfd
= LTTNG_POLL_GETFD(&events
, i
);
3000 /* Thread quit pipe has been closed. Killing thread. */
3001 ret
= check_thread_quit_pipe(pollfd
, revents
);
3007 /* Event on the registration socket */
3008 if (pollfd
== sock
) {
3009 if (revents
& (LPOLLERR
| LPOLLHUP
| LPOLLRDHUP
)) {
3010 ERR("Health socket poll error");
3016 new_sock
= lttcomm_accept_unix_sock(sock
);
3022 * Set the CLOEXEC flag. Return code is useless because either way, the
3025 (void) utils_set_fd_cloexec(new_sock
);
3027 DBG("Receiving data from client for health...");
3028 ret
= lttcomm_recv_unix_sock(new_sock
, (void *)&msg
, sizeof(msg
));
3030 DBG("Nothing recv() from client... continuing");
3031 ret
= close(new_sock
);
3039 rcu_thread_online();
3041 switch (msg
.component
) {
3042 case LTTNG_HEALTH_CMD
:
3043 reply
.ret_code
= health_check_state(&health_thread_cmd
);
3045 case LTTNG_HEALTH_APP_MANAGE
:
3046 reply
.ret_code
= health_check_state(&health_thread_app_manage
);
3048 case LTTNG_HEALTH_APP_REG
:
3049 reply
.ret_code
= health_check_state(&health_thread_app_reg
);
3051 case LTTNG_HEALTH_KERNEL
:
3052 reply
.ret_code
= health_check_state(&health_thread_kernel
);
3054 case LTTNG_HEALTH_CONSUMER
:
3055 reply
.ret_code
= check_consumer_health();
3057 case LTTNG_HEALTH_ALL
:
3059 health_check_state(&health_thread_app_manage
) &&
3060 health_check_state(&health_thread_app_reg
) &&
3061 health_check_state(&health_thread_cmd
) &&
3062 health_check_state(&health_thread_kernel
) &&
3063 check_consumer_health();
3066 reply
.ret_code
= LTTNG_ERR_UND
;
3071 * Flip ret value since 0 is a success and 1 indicates a bad health for
3072 * the client where in the sessiond it is the opposite. Again, this is
3073 * just to make things easier for us poor developer which enjoy a lot
3076 if (reply
.ret_code
== 0 || reply
.ret_code
== 1) {
3077 reply
.ret_code
= !reply
.ret_code
;
3080 DBG2("Health check return value %d", reply
.ret_code
);
3082 ret
= send_unix_sock(new_sock
, (void *) &reply
, sizeof(reply
));
3084 ERR("Failed to send health data back to client");
3087 /* End of transmission */
3088 ret
= close(new_sock
);
3098 ERR("Health error occurred in %s", __func__
);
3100 DBG("Health check thread dying");
3101 unlink(health_unix_sock_path
);
3108 if (new_sock
>= 0) {
3109 ret
= close(new_sock
);
3115 lttng_poll_clean(&events
);
3117 rcu_unregister_thread();
3122 * This thread manage all clients request using the unix client socket for
3125 static void *thread_manage_clients(void *data
)
3127 int sock
= -1, ret
, i
, pollfd
, err
= -1;
3129 uint32_t revents
, nb_fd
;
3130 struct command_ctx
*cmd_ctx
= NULL
;
3131 struct lttng_poll_event events
;
3133 DBG("[thread] Manage client started");
3135 rcu_register_thread();
3137 if (testpoint(thread_manage_clients
)) {
3138 goto error_testpoint
;
3141 health_code_update(&health_thread_cmd
);
3143 ret
= lttcomm_listen_unix_sock(client_sock
);
3149 * Pass 2 as size here for the thread quit pipe and client_sock. Nothing
3150 * more will be added to this poll set.
3152 ret
= create_thread_poll_set(&events
, 2);
3154 goto error_create_poll
;
3157 /* Add the application registration socket */
3158 ret
= lttng_poll_add(&events
, client_sock
, LPOLLIN
| LPOLLPRI
);
3164 * Notify parent pid that we are ready to accept command for client side.
3166 if (opt_sig_parent
) {
3167 kill(ppid
, SIGUSR1
);
3170 if (testpoint(thread_manage_clients_before_loop
)) {
3174 health_code_update(&health_thread_cmd
);
3177 DBG("Accepting client command ...");
3179 /* Inifinite blocking call, waiting for transmission */
3181 health_poll_update(&health_thread_cmd
);
3182 ret
= lttng_poll_wait(&events
, -1);
3183 health_poll_update(&health_thread_cmd
);
3186 * Restart interrupted system call.
3188 if (errno
== EINTR
) {
3196 for (i
= 0; i
< nb_fd
; i
++) {
3197 /* Fetch once the poll data */
3198 revents
= LTTNG_POLL_GETEV(&events
, i
);
3199 pollfd
= LTTNG_POLL_GETFD(&events
, i
);
3201 health_code_update(&health_thread_cmd
);
3203 /* Thread quit pipe has been closed. Killing thread. */
3204 ret
= check_thread_quit_pipe(pollfd
, revents
);
3210 /* Event on the registration socket */
3211 if (pollfd
== client_sock
) {
3212 if (revents
& (LPOLLERR
| LPOLLHUP
| LPOLLRDHUP
)) {
3213 ERR("Client socket poll error");
3219 DBG("Wait for client response");
3221 health_code_update(&health_thread_cmd
);
3223 sock
= lttcomm_accept_unix_sock(client_sock
);
3229 * Set the CLOEXEC flag. Return code is useless because either way, the
3232 (void) utils_set_fd_cloexec(sock
);
3234 /* Set socket option for credentials retrieval */
3235 ret
= lttcomm_setsockopt_creds_unix_sock(sock
);
3240 /* Allocate context command to process the client request */
3241 cmd_ctx
= zmalloc(sizeof(struct command_ctx
));
3242 if (cmd_ctx
== NULL
) {
3243 PERROR("zmalloc cmd_ctx");
3247 /* Allocate data buffer for reception */
3248 cmd_ctx
->lsm
= zmalloc(sizeof(struct lttcomm_session_msg
));
3249 if (cmd_ctx
->lsm
== NULL
) {
3250 PERROR("zmalloc cmd_ctx->lsm");
3254 cmd_ctx
->llm
= NULL
;
3255 cmd_ctx
->session
= NULL
;
3257 health_code_update(&health_thread_cmd
);
3260 * Data is received from the lttng client. The struct
3261 * lttcomm_session_msg (lsm) contains the command and data request of
3264 DBG("Receiving data from client ...");
3265 ret
= lttcomm_recv_creds_unix_sock(sock
, cmd_ctx
->lsm
,
3266 sizeof(struct lttcomm_session_msg
), &cmd_ctx
->creds
);
3268 DBG("Nothing recv() from client... continuing");
3274 clean_command_ctx(&cmd_ctx
);
3278 health_code_update(&health_thread_cmd
);
3280 // TODO: Validate cmd_ctx including sanity check for
3281 // security purpose.
3283 rcu_thread_online();
3285 * This function dispatch the work to the kernel or userspace tracer
3286 * libs and fill the lttcomm_lttng_msg data structure of all the needed
3287 * informations for the client. The command context struct contains
3288 * everything this function may needs.
3290 ret
= process_client_msg(cmd_ctx
, sock
, &sock_error
);
3291 rcu_thread_offline();
3301 * TODO: Inform client somehow of the fatal error. At
3302 * this point, ret < 0 means that a zmalloc failed
3303 * (ENOMEM). Error detected but still accept
3304 * command, unless a socket error has been
3307 clean_command_ctx(&cmd_ctx
);
3311 health_code_update(&health_thread_cmd
);
3313 DBG("Sending response (size: %d, retcode: %s)",
3314 cmd_ctx
->lttng_msg_size
,
3315 lttng_strerror(-cmd_ctx
->llm
->ret_code
));
3316 ret
= send_unix_sock(sock
, cmd_ctx
->llm
, cmd_ctx
->lttng_msg_size
);
3318 ERR("Failed to send data back to client");
3321 /* End of transmission */
3328 clean_command_ctx(&cmd_ctx
);
3330 health_code_update(&health_thread_cmd
);
3342 lttng_poll_clean(&events
);
3343 clean_command_ctx(&cmd_ctx
);
3348 unlink(client_unix_sock_path
);
3349 if (client_sock
>= 0) {
3350 ret
= close(client_sock
);
3357 health_error(&health_thread_cmd
);
3358 ERR("Health error occurred in %s", __func__
);
3361 health_exit(&health_thread_cmd
);
3363 DBG("Client thread dying");
3365 rcu_unregister_thread();
3371 * usage function on stderr
3373 static void usage(void)
3375 fprintf(stderr
, "Usage: %s OPTIONS\n\nOptions:\n", progname
);
3376 fprintf(stderr
, " -h, --help Display this usage.\n");
3377 fprintf(stderr
, " -c, --client-sock PATH Specify path for the client unix socket\n");
3378 fprintf(stderr
, " -a, --apps-sock PATH Specify path for apps unix socket\n");
3379 fprintf(stderr
, " --kconsumerd-err-sock PATH Specify path for the kernel consumer error socket\n");
3380 fprintf(stderr
, " --kconsumerd-cmd-sock PATH Specify path for the kernel consumer command socket\n");
3381 fprintf(stderr
, " --ustconsumerd32-err-sock PATH Specify path for the 32-bit UST consumer error socket\n");
3382 fprintf(stderr
, " --ustconsumerd64-err-sock PATH Specify path for the 64-bit UST consumer error socket\n");
3383 fprintf(stderr
, " --ustconsumerd32-cmd-sock PATH Specify path for the 32-bit UST consumer command socket\n");
3384 fprintf(stderr
, " --ustconsumerd64-cmd-sock PATH Specify path for the 64-bit UST consumer command socket\n");
3385 fprintf(stderr
, " --consumerd32-path PATH Specify path for the 32-bit UST consumer daemon binary\n");
3386 fprintf(stderr
, " --consumerd32-libdir PATH Specify path for the 32-bit UST consumer daemon libraries\n");
3387 fprintf(stderr
, " --consumerd64-path PATH Specify path for the 64-bit UST consumer daemon binary\n");
3388 fprintf(stderr
, " --consumerd64-libdir PATH Specify path for the 64-bit UST consumer daemon libraries\n");
3389 fprintf(stderr
, " -d, --daemonize Start as a daemon.\n");
3390 fprintf(stderr
, " -g, --group NAME Specify the tracing group name. (default: tracing)\n");
3391 fprintf(stderr
, " -V, --version Show version number.\n");
3392 fprintf(stderr
, " -S, --sig-parent Send SIGCHLD to parent pid to notify readiness.\n");
3393 fprintf(stderr
, " -q, --quiet No output at all.\n");
3394 fprintf(stderr
, " -v, --verbose Verbose mode. Activate DBG() macro.\n");
3395 fprintf(stderr
, " --verbose-consumer Verbose mode for consumer. Activate DBG() macro.\n");
3396 fprintf(stderr
, " --no-kernel Disable kernel tracer\n");
3400 * daemon argument parsing
3402 static int parse_args(int argc
, char **argv
)
3406 static struct option long_options
[] = {
3407 { "client-sock", 1, 0, 'c' },
3408 { "apps-sock", 1, 0, 'a' },
3409 { "kconsumerd-cmd-sock", 1, 0, 'C' },
3410 { "kconsumerd-err-sock", 1, 0, 'E' },
3411 { "ustconsumerd32-cmd-sock", 1, 0, 'G' },
3412 { "ustconsumerd32-err-sock", 1, 0, 'H' },
3413 { "ustconsumerd64-cmd-sock", 1, 0, 'D' },
3414 { "ustconsumerd64-err-sock", 1, 0, 'F' },
3415 { "consumerd32-path", 1, 0, 'u' },
3416 { "consumerd32-libdir", 1, 0, 'U' },
3417 { "consumerd64-path", 1, 0, 't' },
3418 { "consumerd64-libdir", 1, 0, 'T' },
3419 { "daemonize", 0, 0, 'd' },
3420 { "sig-parent", 0, 0, 'S' },
3421 { "help", 0, 0, 'h' },
3422 { "group", 1, 0, 'g' },
3423 { "version", 0, 0, 'V' },
3424 { "quiet", 0, 0, 'q' },
3425 { "verbose", 0, 0, 'v' },
3426 { "verbose-consumer", 0, 0, 'Z' },
3427 { "no-kernel", 0, 0, 'N' },
3432 int option_index
= 0;
3433 c
= getopt_long(argc
, argv
, "dhqvVSN" "a:c:g:s:C:E:D:F:Z:u:t",
3434 long_options
, &option_index
);
3441 fprintf(stderr
, "option %s", long_options
[option_index
].name
);
3443 fprintf(stderr
, " with arg %s\n", optarg
);
3447 snprintf(client_unix_sock_path
, PATH_MAX
, "%s", optarg
);
3450 snprintf(apps_unix_sock_path
, PATH_MAX
, "%s", optarg
);
3456 opt_tracing_group
= optarg
;
3462 fprintf(stdout
, "%s\n", VERSION
);
3468 snprintf(kconsumer_data
.err_unix_sock_path
, PATH_MAX
, "%s", optarg
);
3471 snprintf(kconsumer_data
.cmd_unix_sock_path
, PATH_MAX
, "%s", optarg
);
3474 snprintf(ustconsumer64_data
.err_unix_sock_path
, PATH_MAX
, "%s", optarg
);
3477 snprintf(ustconsumer64_data
.cmd_unix_sock_path
, PATH_MAX
, "%s", optarg
);
3480 snprintf(ustconsumer32_data
.err_unix_sock_path
, PATH_MAX
, "%s", optarg
);
3483 snprintf(ustconsumer32_data
.cmd_unix_sock_path
, PATH_MAX
, "%s", optarg
);
3489 lttng_opt_quiet
= 1;
3492 /* Verbose level can increase using multiple -v */
3493 lttng_opt_verbose
+= 1;
3496 opt_verbose_consumer
+= 1;
3499 consumerd32_bin
= optarg
;
3502 consumerd32_libdir
= optarg
;
3505 consumerd64_bin
= optarg
;
3508 consumerd64_libdir
= optarg
;
3511 /* Unknown option or other error.
3512 * Error is printed by getopt, just return */
3521 * Creates the two needed socket by the daemon.
3522 * apps_sock - The communication socket for all UST apps.
3523 * client_sock - The communication of the cli tool (lttng).
3525 static int init_daemon_socket(void)
3530 old_umask
= umask(0);
3532 /* Create client tool unix socket */
3533 client_sock
= lttcomm_create_unix_sock(client_unix_sock_path
);
3534 if (client_sock
< 0) {
3535 ERR("Create unix sock failed: %s", client_unix_sock_path
);
3540 /* Set the cloexec flag */
3541 ret
= utils_set_fd_cloexec(client_sock
);
3543 ERR("Unable to set CLOEXEC flag to the client Unix socket (fd: %d). "
3544 "Continuing but note that the consumer daemon will have a "
3545 "reference to this socket on exec()", client_sock
);
3548 /* File permission MUST be 660 */
3549 ret
= chmod(client_unix_sock_path
, S_IRUSR
| S_IWUSR
| S_IRGRP
| S_IWGRP
);
3551 ERR("Set file permissions failed: %s", client_unix_sock_path
);
3556 /* Create the application unix socket */
3557 apps_sock
= lttcomm_create_unix_sock(apps_unix_sock_path
);
3558 if (apps_sock
< 0) {
3559 ERR("Create unix sock failed: %s", apps_unix_sock_path
);
3564 /* Set the cloexec flag */
3565 ret
= utils_set_fd_cloexec(apps_sock
);
3567 ERR("Unable to set CLOEXEC flag to the app Unix socket (fd: %d). "
3568 "Continuing but note that the consumer daemon will have a "
3569 "reference to this socket on exec()", apps_sock
);
3572 /* File permission MUST be 666 */
3573 ret
= chmod(apps_unix_sock_path
,
3574 S_IRUSR
| S_IWUSR
| S_IRGRP
| S_IWGRP
| S_IROTH
| S_IWOTH
);
3576 ERR("Set file permissions failed: %s", apps_unix_sock_path
);
3581 DBG3("Session daemon client socket %d and application socket %d created",
3582 client_sock
, apps_sock
);
3590 * Check if the global socket is available, and if a daemon is answering at the
3591 * other side. If yes, error is returned.
3593 static int check_existing_daemon(void)
3595 /* Is there anybody out there ? */
3596 if (lttng_session_daemon_alive()) {
3604 * Set the tracing group gid onto the client socket.
3606 * Race window between mkdir and chown is OK because we are going from more
3607 * permissive (root.root) to less permissive (root.tracing).
3609 static int set_permissions(char *rundir
)
3614 ret
= allowed_group();
3616 WARN("No tracing group detected");
3623 /* Set lttng run dir */
3624 ret
= chown(rundir
, 0, gid
);
3626 ERR("Unable to set group on %s", rundir
);
3630 /* Ensure tracing group can search the run dir */
3631 ret
= chmod(rundir
, S_IRWXU
| S_IXGRP
| S_IXOTH
);
3633 ERR("Unable to set permissions on %s", rundir
);
3637 /* lttng client socket path */
3638 ret
= chown(client_unix_sock_path
, 0, gid
);
3640 ERR("Unable to set group on %s", client_unix_sock_path
);
3644 /* kconsumer error socket path */
3645 ret
= chown(kconsumer_data
.err_unix_sock_path
, 0, gid
);
3647 ERR("Unable to set group on %s", kconsumer_data
.err_unix_sock_path
);
3651 /* 64-bit ustconsumer error socket path */
3652 ret
= chown(ustconsumer64_data
.err_unix_sock_path
, 0, gid
);
3654 ERR("Unable to set group on %s", ustconsumer64_data
.err_unix_sock_path
);
3658 /* 32-bit ustconsumer compat32 error socket path */
3659 ret
= chown(ustconsumer32_data
.err_unix_sock_path
, 0, gid
);
3661 ERR("Unable to set group on %s", ustconsumer32_data
.err_unix_sock_path
);
3665 DBG("All permissions are set");
3672 * Create the lttng run directory needed for all global sockets and pipe.
3674 static int create_lttng_rundir(const char *rundir
)
3678 DBG3("Creating LTTng run directory: %s", rundir
);
3680 ret
= mkdir(rundir
, S_IRWXU
);
3682 if (errno
!= EEXIST
) {
3683 ERR("Unable to create %s", rundir
);
3695 * Setup sockets and directory needed by the kconsumerd communication with the
3698 static int set_consumer_sockets(struct consumer_data
*consumer_data
,
3702 char path
[PATH_MAX
];
3704 switch (consumer_data
->type
) {
3705 case LTTNG_CONSUMER_KERNEL
:
3706 snprintf(path
, PATH_MAX
, DEFAULT_KCONSUMERD_PATH
, rundir
);
3708 case LTTNG_CONSUMER64_UST
:
3709 snprintf(path
, PATH_MAX
, DEFAULT_USTCONSUMERD64_PATH
, rundir
);
3711 case LTTNG_CONSUMER32_UST
:
3712 snprintf(path
, PATH_MAX
, DEFAULT_USTCONSUMERD32_PATH
, rundir
);
3715 ERR("Consumer type unknown");
3720 DBG2("Creating consumer directory: %s", path
);
3722 ret
= mkdir(path
, S_IRWXU
);
3724 if (errno
!= EEXIST
) {
3726 ERR("Failed to create %s", path
);
3732 /* Create the kconsumerd error unix socket */
3733 consumer_data
->err_sock
=
3734 lttcomm_create_unix_sock(consumer_data
->err_unix_sock_path
);
3735 if (consumer_data
->err_sock
< 0) {
3736 ERR("Create unix sock failed: %s", consumer_data
->err_unix_sock_path
);
3741 /* File permission MUST be 660 */
3742 ret
= chmod(consumer_data
->err_unix_sock_path
,
3743 S_IRUSR
| S_IWUSR
| S_IRGRP
| S_IWGRP
);
3745 ERR("Set file permissions failed: %s", consumer_data
->err_unix_sock_path
);
3755 * Signal handler for the daemon
3757 * Simply stop all worker threads, leaving main() return gracefully after
3758 * joining all threads and calling cleanup().
3760 static void sighandler(int sig
)
3764 DBG("SIGPIPE caught");
3767 DBG("SIGINT caught");
3771 DBG("SIGTERM caught");
3780 * Setup signal handler for :
3781 * SIGINT, SIGTERM, SIGPIPE
3783 static int set_signal_handler(void)
3786 struct sigaction sa
;
3789 if ((ret
= sigemptyset(&sigset
)) < 0) {
3790 PERROR("sigemptyset");
3794 sa
.sa_handler
= sighandler
;
3795 sa
.sa_mask
= sigset
;
3797 if ((ret
= sigaction(SIGTERM
, &sa
, NULL
)) < 0) {
3798 PERROR("sigaction");
3802 if ((ret
= sigaction(SIGINT
, &sa
, NULL
)) < 0) {
3803 PERROR("sigaction");
3807 if ((ret
= sigaction(SIGPIPE
, &sa
, NULL
)) < 0) {
3808 PERROR("sigaction");
3812 DBG("Signal handler set for SIGTERM, SIGPIPE and SIGINT");
3818 * Set open files limit to unlimited. This daemon can open a large number of
3819 * file descriptors in order to consumer multiple kernel traces.
3821 static void set_ulimit(void)
3826 /* The kernel does not allowed an infinite limit for open files */
3827 lim
.rlim_cur
= 65535;
3828 lim
.rlim_max
= 65535;
3830 ret
= setrlimit(RLIMIT_NOFILE
, &lim
);
3832 PERROR("failed to set open files limit");
3839 int main(int argc
, char **argv
)
3843 const char *home_path
, *env_app_timeout
;
3845 init_kernel_workarounds();
3847 rcu_register_thread();
3849 setup_consumerd_path();
3851 /* Parse arguments */
3853 if ((ret
= parse_args(argc
, argv
)) < 0) {
3863 * child: setsid, close FD 0, 1, 2, chdir /
3864 * parent: exit (if fork is successful)
3872 * We are in the child. Make sure all other file
3873 * descriptors are closed, in case we are called with
3874 * more opened file descriptors than the standard ones.
3876 for (i
= 3; i
< sysconf(_SC_OPEN_MAX
); i
++) {
3881 /* Create thread quit pipe */
3882 if ((ret
= init_thread_quit_pipe()) < 0) {
3886 /* Check if daemon is UID = 0 */
3887 is_root
= !getuid();
3890 rundir
= strdup(DEFAULT_LTTNG_RUNDIR
);
3892 /* Create global run dir with root access */
3893 ret
= create_lttng_rundir(rundir
);
3898 if (strlen(apps_unix_sock_path
) == 0) {
3899 snprintf(apps_unix_sock_path
, PATH_MAX
,
3900 DEFAULT_GLOBAL_APPS_UNIX_SOCK
);
3903 if (strlen(client_unix_sock_path
) == 0) {
3904 snprintf(client_unix_sock_path
, PATH_MAX
,
3905 DEFAULT_GLOBAL_CLIENT_UNIX_SOCK
);
3908 /* Set global SHM for ust */
3909 if (strlen(wait_shm_path
) == 0) {
3910 snprintf(wait_shm_path
, PATH_MAX
,
3911 DEFAULT_GLOBAL_APPS_WAIT_SHM_PATH
);
3914 if (strlen(health_unix_sock_path
) == 0) {
3915 snprintf(health_unix_sock_path
, sizeof(health_unix_sock_path
),
3916 DEFAULT_GLOBAL_HEALTH_UNIX_SOCK
);
3919 /* Setup kernel consumerd path */
3920 snprintf(kconsumer_data
.err_unix_sock_path
, PATH_MAX
,
3921 DEFAULT_KCONSUMERD_ERR_SOCK_PATH
, rundir
);
3922 snprintf(kconsumer_data
.cmd_unix_sock_path
, PATH_MAX
,
3923 DEFAULT_KCONSUMERD_CMD_SOCK_PATH
, rundir
);
3925 DBG2("Kernel consumer err path: %s",
3926 kconsumer_data
.err_unix_sock_path
);
3927 DBG2("Kernel consumer cmd path: %s",
3928 kconsumer_data
.cmd_unix_sock_path
);
3930 home_path
= get_home_dir();
3931 if (home_path
== NULL
) {
3932 /* TODO: Add --socket PATH option */
3933 ERR("Can't get HOME directory for sockets creation.");
3939 * Create rundir from home path. This will create something like
3942 ret
= asprintf(&rundir
, DEFAULT_LTTNG_HOME_RUNDIR
, home_path
);
3948 ret
= create_lttng_rundir(rundir
);
3953 if (strlen(apps_unix_sock_path
) == 0) {
3954 snprintf(apps_unix_sock_path
, PATH_MAX
,
3955 DEFAULT_HOME_APPS_UNIX_SOCK
, home_path
);
3958 /* Set the cli tool unix socket path */
3959 if (strlen(client_unix_sock_path
) == 0) {
3960 snprintf(client_unix_sock_path
, PATH_MAX
,
3961 DEFAULT_HOME_CLIENT_UNIX_SOCK
, home_path
);
3964 /* Set global SHM for ust */
3965 if (strlen(wait_shm_path
) == 0) {
3966 snprintf(wait_shm_path
, PATH_MAX
,
3967 DEFAULT_HOME_APPS_WAIT_SHM_PATH
, geteuid());
3970 /* Set health check Unix path */
3971 if (strlen(health_unix_sock_path
) == 0) {
3972 snprintf(health_unix_sock_path
, sizeof(health_unix_sock_path
),
3973 DEFAULT_HOME_HEALTH_UNIX_SOCK
, home_path
);
3977 /* Set consumer initial state */
3978 kernel_consumerd_state
= CONSUMER_STOPPED
;
3979 ust_consumerd_state
= CONSUMER_STOPPED
;
3981 DBG("Client socket path %s", client_unix_sock_path
);
3982 DBG("Application socket path %s", apps_unix_sock_path
);
3983 DBG("LTTng run directory path: %s", rundir
);
3985 /* 32 bits consumerd path setup */
3986 snprintf(ustconsumer32_data
.err_unix_sock_path
, PATH_MAX
,
3987 DEFAULT_USTCONSUMERD32_ERR_SOCK_PATH
, rundir
);
3988 snprintf(ustconsumer32_data
.cmd_unix_sock_path
, PATH_MAX
,
3989 DEFAULT_USTCONSUMERD32_CMD_SOCK_PATH
, rundir
);
3991 DBG2("UST consumer 32 bits err path: %s",
3992 ustconsumer32_data
.err_unix_sock_path
);
3993 DBG2("UST consumer 32 bits cmd path: %s",
3994 ustconsumer32_data
.cmd_unix_sock_path
);
3996 /* 64 bits consumerd path setup */
3997 snprintf(ustconsumer64_data
.err_unix_sock_path
, PATH_MAX
,
3998 DEFAULT_USTCONSUMERD64_ERR_SOCK_PATH
, rundir
);
3999 snprintf(ustconsumer64_data
.cmd_unix_sock_path
, PATH_MAX
,
4000 DEFAULT_USTCONSUMERD64_CMD_SOCK_PATH
, rundir
);
4002 DBG2("UST consumer 64 bits err path: %s",
4003 ustconsumer64_data
.err_unix_sock_path
);
4004 DBG2("UST consumer 64 bits cmd path: %s",
4005 ustconsumer64_data
.cmd_unix_sock_path
);
4008 * See if daemon already exist.
4010 if ((ret
= check_existing_daemon()) < 0) {
4011 ERR("Already running daemon.\n");
4013 * We do not goto exit because we must not cleanup()
4014 * because a daemon is already running.
4020 * Init UST app hash table. Alloc hash table before this point since
4021 * cleanup() can get called after that point.
4025 /* After this point, we can safely call cleanup() with "goto exit" */
4028 * These actions must be executed as root. We do that *after* setting up
4029 * the sockets path because we MUST make the check for another daemon using
4030 * those paths *before* trying to set the kernel consumer sockets and init
4034 ret
= set_consumer_sockets(&kconsumer_data
, rundir
);
4039 /* Setup kernel tracer */
4040 if (!opt_no_kernel
) {
4041 init_kernel_tracer();
4044 /* Set ulimit for open files */
4047 /* init lttng_fd tracking must be done after set_ulimit. */
4050 ret
= set_consumer_sockets(&ustconsumer64_data
, rundir
);
4055 ret
= set_consumer_sockets(&ustconsumer32_data
, rundir
);
4060 if ((ret
= set_signal_handler()) < 0) {
4064 /* Setup the needed unix socket */
4065 if ((ret
= init_daemon_socket()) < 0) {
4069 /* Set credentials to socket */
4070 if (is_root
&& ((ret
= set_permissions(rundir
)) < 0)) {
4074 /* Get parent pid if -S, --sig-parent is specified. */
4075 if (opt_sig_parent
) {
4079 /* Setup the kernel pipe for waking up the kernel thread */
4080 if (is_root
&& !opt_no_kernel
) {
4081 if ((ret
= utils_create_pipe_cloexec(kernel_poll_pipe
)) < 0) {
4086 /* Setup the thread apps communication pipe. */
4087 if ((ret
= utils_create_pipe_cloexec(apps_cmd_pipe
)) < 0) {
4091 /* Init UST command queue. */
4092 cds_wfq_init(&ust_cmd_queue
.queue
);
4095 * Get session list pointer. This pointer MUST NOT be free(). This list is
4096 * statically declared in session.c
4098 session_list_ptr
= session_get_list();
4100 /* Set up max poll set size */
4101 lttng_poll_set_max_size();
4105 /* Init all health thread counters. */
4106 health_init(&health_thread_cmd
);
4107 health_init(&health_thread_kernel
);
4108 health_init(&health_thread_app_manage
);
4109 health_init(&health_thread_app_reg
);
4112 * Init health counters of the consumer thread. We do a quick hack here to
4113 * the state of the consumer health is fine even if the thread is not
4114 * started. Once the thread starts, the health state is updated with a poll
4115 * value to set a health code path. This is simply to ease our life and has
4116 * no cost what so ever.
4118 health_init(&kconsumer_data
.health
);
4119 health_poll_update(&kconsumer_data
.health
);
4120 health_init(&ustconsumer32_data
.health
);
4121 health_poll_update(&ustconsumer32_data
.health
);
4122 health_init(&ustconsumer64_data
.health
);
4123 health_poll_update(&ustconsumer64_data
.health
);
4125 /* Check for the application socket timeout env variable. */
4126 env_app_timeout
= getenv(DEFAULT_APP_SOCKET_TIMEOUT_ENV
);
4127 if (env_app_timeout
) {
4128 app_socket_timeout
= atoi(env_app_timeout
);
4130 app_socket_timeout
= DEFAULT_APP_SOCKET_RW_TIMEOUT
;
4133 /* Create thread to manage the client socket */
4134 ret
= pthread_create(&health_thread
, NULL
,
4135 thread_manage_health
, (void *) NULL
);
4137 PERROR("pthread_create health");
4141 /* Create thread to manage the client socket */
4142 ret
= pthread_create(&client_thread
, NULL
,
4143 thread_manage_clients
, (void *) NULL
);
4145 PERROR("pthread_create clients");
4149 /* Create thread to dispatch registration */
4150 ret
= pthread_create(&dispatch_thread
, NULL
,
4151 thread_dispatch_ust_registration
, (void *) NULL
);
4153 PERROR("pthread_create dispatch");
4157 /* Create thread to manage application registration. */
4158 ret
= pthread_create(®_apps_thread
, NULL
,
4159 thread_registration_apps
, (void *) NULL
);
4161 PERROR("pthread_create registration");
4165 /* Create thread to manage application socket */
4166 ret
= pthread_create(&apps_thread
, NULL
,
4167 thread_manage_apps
, (void *) NULL
);
4169 PERROR("pthread_create apps");
4173 /* Don't start this thread if kernel tracing is not requested nor root */
4174 if (is_root
&& !opt_no_kernel
) {
4175 /* Create kernel thread to manage kernel event */
4176 ret
= pthread_create(&kernel_thread
, NULL
,
4177 thread_manage_kernel
, (void *) NULL
);
4179 PERROR("pthread_create kernel");
4183 ret
= pthread_join(kernel_thread
, &status
);
4185 PERROR("pthread_join");
4186 goto error
; /* join error, exit without cleanup */
4191 ret
= pthread_join(apps_thread
, &status
);
4193 PERROR("pthread_join");
4194 goto error
; /* join error, exit without cleanup */
4198 ret
= pthread_join(reg_apps_thread
, &status
);
4200 PERROR("pthread_join");
4201 goto error
; /* join error, exit without cleanup */
4205 ret
= pthread_join(dispatch_thread
, &status
);
4207 PERROR("pthread_join");
4208 goto error
; /* join error, exit without cleanup */
4212 ret
= pthread_join(client_thread
, &status
);
4214 PERROR("pthread_join");
4215 goto error
; /* join error, exit without cleanup */
4218 ret
= join_consumer_thread(&kconsumer_data
);
4220 PERROR("join_consumer");
4221 goto error
; /* join error, exit without cleanup */
4224 ret
= join_consumer_thread(&ustconsumer32_data
);
4226 PERROR("join_consumer ust32");
4227 goto error
; /* join error, exit without cleanup */
4230 ret
= join_consumer_thread(&ustconsumer64_data
);
4232 PERROR("join_consumer ust64");
4233 goto error
; /* join error, exit without cleanup */
4237 ret
= pthread_join(health_thread
, &status
);
4239 PERROR("pthread_join health thread");
4240 goto error
; /* join error, exit without cleanup */
4246 * cleanup() is called when no other thread is running.
4248 rcu_thread_online();
4250 rcu_thread_offline();
4251 rcu_unregister_thread();