2 * Copyright (C) 2011 - David Goulet <david.goulet@polymtl.ca>
3 * Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
4 * 2013 - Jérémie Galarneau <jeremie.galarneau@efficios.com>
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License, version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
15 * You should have received a copy of the GNU General Public License along
16 * with this program; if not, write to the Free Software Foundation, Inc.,
17 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
32 #include <sys/mount.h>
33 #include <sys/resource.h>
34 #include <sys/socket.h>
36 #include <sys/types.h>
38 #include <urcu/uatomic.h>
42 #include <common/common.h>
43 #include <common/compat/socket.h>
44 #include <common/compat/getenv.h>
45 #include <common/defaults.h>
46 #include <common/kernel-consumer/kernel-consumer.h>
47 #include <common/futex.h>
48 #include <common/relayd/relayd.h>
49 #include <common/utils.h>
50 #include <common/daemonize.h>
51 #include <common/config/session-config.h>
53 #include "lttng-sessiond.h"
54 #include "buffer-registry.h"
61 #include "kernel-consumer.h"
65 #include "ust-consumer.h"
68 #include "health-sessiond.h"
69 #include "testpoint.h"
70 #include "ust-thread.h"
71 #include "agent-thread.h"
73 #include "load-session-thread.h"
74 #include "notification-thread.h"
75 #include "notification-thread-commands.h"
78 #include "ht-cleanup.h"
79 #include "sessiond-config.h"
81 static const char *help_msg
=
82 #ifdef LTTNG_EMBED_HELP
83 #include <lttng-sessiond.8.h>
90 static pid_t ppid
; /* Parent PID for --sig-parent option */
91 static pid_t child_ppid
; /* Internal parent PID use with daemonize. */
92 static int lockfile_fd
= -1;
94 /* Set to 1 when a SIGUSR1 signal is received. */
95 static int recv_child_signal
;
97 static struct lttng_kernel_tracer_version kernel_tracer_version
;
98 static struct lttng_kernel_tracer_abi_version kernel_tracer_abi_version
;
101 * Consumer daemon specific control data. Every value not initialized here is
102 * set to 0 by the static definition.
104 static struct consumer_data kconsumer_data
= {
105 .type
= LTTNG_CONSUMER_KERNEL
,
108 .channel_monitor_pipe
= -1,
109 .pid_mutex
= PTHREAD_MUTEX_INITIALIZER
,
110 .lock
= PTHREAD_MUTEX_INITIALIZER
,
111 .cond
= PTHREAD_COND_INITIALIZER
,
112 .cond_mutex
= PTHREAD_MUTEX_INITIALIZER
,
114 static struct consumer_data ustconsumer64_data
= {
115 .type
= LTTNG_CONSUMER64_UST
,
118 .channel_monitor_pipe
= -1,
119 .pid_mutex
= PTHREAD_MUTEX_INITIALIZER
,
120 .lock
= PTHREAD_MUTEX_INITIALIZER
,
121 .cond
= PTHREAD_COND_INITIALIZER
,
122 .cond_mutex
= PTHREAD_MUTEX_INITIALIZER
,
124 static struct consumer_data ustconsumer32_data
= {
125 .type
= LTTNG_CONSUMER32_UST
,
128 .channel_monitor_pipe
= -1,
129 .pid_mutex
= PTHREAD_MUTEX_INITIALIZER
,
130 .lock
= PTHREAD_MUTEX_INITIALIZER
,
131 .cond
= PTHREAD_COND_INITIALIZER
,
132 .cond_mutex
= PTHREAD_MUTEX_INITIALIZER
,
135 /* Command line options */
136 static const struct option long_options
[] = {
137 { "client-sock", required_argument
, 0, 'c' },
138 { "apps-sock", required_argument
, 0, 'a' },
139 { "kconsumerd-cmd-sock", required_argument
, 0, '\0' },
140 { "kconsumerd-err-sock", required_argument
, 0, '\0' },
141 { "ustconsumerd32-cmd-sock", required_argument
, 0, '\0' },
142 { "ustconsumerd32-err-sock", required_argument
, 0, '\0' },
143 { "ustconsumerd64-cmd-sock", required_argument
, 0, '\0' },
144 { "ustconsumerd64-err-sock", required_argument
, 0, '\0' },
145 { "consumerd32-path", required_argument
, 0, '\0' },
146 { "consumerd32-libdir", required_argument
, 0, '\0' },
147 { "consumerd64-path", required_argument
, 0, '\0' },
148 { "consumerd64-libdir", required_argument
, 0, '\0' },
149 { "daemonize", no_argument
, 0, 'd' },
150 { "background", no_argument
, 0, 'b' },
151 { "sig-parent", no_argument
, 0, 'S' },
152 { "help", no_argument
, 0, 'h' },
153 { "group", required_argument
, 0, 'g' },
154 { "version", no_argument
, 0, 'V' },
155 { "quiet", no_argument
, 0, 'q' },
156 { "verbose", no_argument
, 0, 'v' },
157 { "verbose-consumer", no_argument
, 0, '\0' },
158 { "no-kernel", no_argument
, 0, '\0' },
159 { "pidfile", required_argument
, 0, 'p' },
160 { "agent-tcp-port", required_argument
, 0, '\0' },
161 { "config", required_argument
, 0, 'f' },
162 { "load", required_argument
, 0, 'l' },
163 { "kmod-probes", required_argument
, 0, '\0' },
164 { "extra-kmod-probes", required_argument
, 0, '\0' },
168 struct sessiond_config config
;
170 /* Command line options to ignore from configuration file */
171 static const char *config_ignore_options
[] = { "help", "version", "config" };
173 /* Shared between threads */
174 static int dispatch_thread_exit
;
176 /* Sockets and FDs */
177 static int client_sock
= -1;
178 static int apps_sock
= -1;
179 int kernel_tracer_fd
= -1;
180 static int kernel_poll_pipe
[2] = { -1, -1 };
183 * Quit pipe for all threads. This permits a single cancellation point
184 * for all threads when receiving an event on the pipe.
186 static int thread_quit_pipe
[2] = { -1, -1 };
189 * This pipe is used to inform the thread managing application communication
190 * that a command is queued and ready to be processed.
192 static int apps_cmd_pipe
[2] = { -1, -1 };
194 int apps_cmd_notify_pipe
[2] = { -1, -1 };
196 /* Pthread, Mutexes and Semaphores */
197 static pthread_t apps_thread
;
198 static pthread_t apps_notify_thread
;
199 static pthread_t reg_apps_thread
;
200 static pthread_t client_thread
;
201 static pthread_t kernel_thread
;
202 static pthread_t dispatch_thread
;
203 static pthread_t health_thread
;
204 static pthread_t ht_cleanup_thread
;
205 static pthread_t agent_reg_thread
;
206 static pthread_t load_session_thread
;
207 static pthread_t notification_thread
;
210 * UST registration command queue. This queue is tied with a futex and uses a N
211 * wakers / 1 waiter implemented and detailed in futex.c/.h
213 * The thread_registration_apps and thread_dispatch_ust_registration uses this
214 * queue along with the wait/wake scheme. The thread_manage_apps receives down
215 * the line new application socket and monitors it for any I/O error or clean
216 * close that triggers an unregistration of the application.
218 static struct ust_cmd_queue ust_cmd_queue
;
221 * Pointer initialized before thread creation.
223 * This points to the tracing session list containing the session count and a
224 * mutex lock. The lock MUST be taken if you iterate over the list. The lock
225 * MUST NOT be taken if you call a public function in session.c.
227 * The lock is nested inside the structure: session_list_ptr->lock. Please use
228 * session_lock_list and session_unlock_list for lock acquisition.
230 static struct ltt_session_list
*session_list_ptr
;
232 int ust_consumerd64_fd
= -1;
233 int ust_consumerd32_fd
= -1;
235 static const char *module_proc_lttng
= "/proc/lttng";
238 * Consumer daemon state which is changed when spawning it, killing it or in
239 * case of a fatal error.
241 enum consumerd_state
{
242 CONSUMER_STARTED
= 1,
243 CONSUMER_STOPPED
= 2,
248 * This consumer daemon state is used to validate if a client command will be
249 * able to reach the consumer. If not, the client is informed. For instance,
250 * doing a "lttng start" when the consumer state is set to ERROR will return an
251 * error to the client.
253 * The following example shows a possible race condition of this scheme:
255 * consumer thread error happens
257 * client cmd checks state -> still OK
258 * consumer thread exit, sets error
259 * client cmd try to talk to consumer
262 * However, since the consumer is a different daemon, we have no way of making
263 * sure the command will reach it safely even with this state flag. This is why
264 * we consider that up to the state validation during command processing, the
265 * command is safe. After that, we can not guarantee the correctness of the
266 * client request vis-a-vis the consumer.
268 static enum consumerd_state ust_consumerd_state
;
269 static enum consumerd_state kernel_consumerd_state
;
271 /* Set in main() with the current page size. */
274 /* Application health monitoring */
275 struct health_app
*health_sessiond
;
277 /* Am I root or not. */
278 int is_root
; /* Set to 1 if the daemon is running as root */
280 const char * const config_section_name
= "sessiond";
282 /* Load session thread information to operate. */
283 struct load_session_thread_data
*load_info
;
285 /* Notification thread handle. */
286 struct notification_thread_handle
*notification_thread_handle
;
288 /* Global hash tables */
289 struct lttng_ht
*agent_apps_ht_by_sock
= NULL
;
292 * Whether sessiond is ready for commands/notification channel/health check
294 * NR_LTTNG_SESSIOND_READY must match the number of calls to
295 * sessiond_notify_ready().
297 #define NR_LTTNG_SESSIOND_READY 4
298 int lttng_sessiond_ready
= NR_LTTNG_SESSIOND_READY
;
300 int sessiond_check_thread_quit_pipe(int fd
, uint32_t events
)
302 return (fd
== thread_quit_pipe
[0] && (events
& LPOLLIN
)) ? 1 : 0;
305 /* Notify parents that we are ready for cmd and health check */
307 void sessiond_notify_ready(void)
309 if (uatomic_sub_return(<tng_sessiond_ready
, 1) == 0) {
311 * Notify parent pid that we are ready to accept command
312 * for client side. This ppid is the one from the
313 * external process that spawned us.
315 if (config
.sig_parent
) {
320 * Notify the parent of the fork() process that we are
323 if (config
.daemonize
|| config
.background
) {
324 kill(child_ppid
, SIGUSR1
);
330 int __sessiond_set_thread_pollset(struct lttng_poll_event
*events
, size_t size
,
337 ret
= lttng_poll_create(events
, size
, LTTNG_CLOEXEC
);
343 ret
= lttng_poll_add(events
, a_pipe
[0], LPOLLIN
| LPOLLERR
);
355 * Create a poll set with O_CLOEXEC and add the thread quit pipe to the set.
357 int sessiond_set_thread_pollset(struct lttng_poll_event
*events
, size_t size
)
359 return __sessiond_set_thread_pollset(events
, size
, thread_quit_pipe
);
363 * Init thread quit pipe.
365 * Return -1 on error or 0 if all pipes are created.
367 static int __init_thread_quit_pipe(int *a_pipe
)
373 PERROR("thread quit pipe");
377 for (i
= 0; i
< 2; i
++) {
378 ret
= fcntl(a_pipe
[i
], F_SETFD
, FD_CLOEXEC
);
389 static int init_thread_quit_pipe(void)
391 return __init_thread_quit_pipe(thread_quit_pipe
);
395 * Stop all threads by closing the thread quit pipe.
397 static void stop_threads(void)
401 /* Stopping all threads */
402 DBG("Terminating all threads");
403 ret
= notify_thread_pipe(thread_quit_pipe
[1]);
405 ERR("write error on thread quit pipe");
408 /* Dispatch thread */
409 CMM_STORE_SHARED(dispatch_thread_exit
, 1);
410 futex_nto1_wake(&ust_cmd_queue
.futex
);
414 * Close every consumer sockets.
416 static void close_consumer_sockets(void)
420 if (kconsumer_data
.err_sock
>= 0) {
421 ret
= close(kconsumer_data
.err_sock
);
423 PERROR("kernel consumer err_sock close");
426 if (ustconsumer32_data
.err_sock
>= 0) {
427 ret
= close(ustconsumer32_data
.err_sock
);
429 PERROR("UST consumerd32 err_sock close");
432 if (ustconsumer64_data
.err_sock
>= 0) {
433 ret
= close(ustconsumer64_data
.err_sock
);
435 PERROR("UST consumerd64 err_sock close");
438 if (kconsumer_data
.cmd_sock
>= 0) {
439 ret
= close(kconsumer_data
.cmd_sock
);
441 PERROR("kernel consumer cmd_sock close");
444 if (ustconsumer32_data
.cmd_sock
>= 0) {
445 ret
= close(ustconsumer32_data
.cmd_sock
);
447 PERROR("UST consumerd32 cmd_sock close");
450 if (ustconsumer64_data
.cmd_sock
>= 0) {
451 ret
= close(ustconsumer64_data
.cmd_sock
);
453 PERROR("UST consumerd64 cmd_sock close");
456 if (kconsumer_data
.channel_monitor_pipe
>= 0) {
457 ret
= close(kconsumer_data
.channel_monitor_pipe
);
459 PERROR("kernel consumer channel monitor pipe close");
462 if (ustconsumer32_data
.channel_monitor_pipe
>= 0) {
463 ret
= close(ustconsumer32_data
.channel_monitor_pipe
);
465 PERROR("UST consumerd32 channel monitor pipe close");
468 if (ustconsumer64_data
.channel_monitor_pipe
>= 0) {
469 ret
= close(ustconsumer64_data
.channel_monitor_pipe
);
471 PERROR("UST consumerd64 channel monitor pipe close");
477 * Wait on consumer process termination.
479 * Need to be called with the consumer data lock held or from a context
480 * ensuring no concurrent access to data (e.g: cleanup).
482 static void wait_consumer(struct consumer_data
*consumer_data
)
487 if (consumer_data
->pid
<= 0) {
491 DBG("Waiting for complete teardown of consumerd (PID: %d)",
493 ret
= waitpid(consumer_data
->pid
, &status
, 0);
495 PERROR("consumerd waitpid pid: %d", consumer_data
->pid
)
496 } else if (!WIFEXITED(status
)) {
497 ERR("consumerd termination with error: %d",
500 consumer_data
->pid
= 0;
504 * Cleanup the session daemon's data structures.
506 static void sessiond_cleanup(void)
509 struct ltt_session
*sess
, *stmp
;
511 DBG("Cleanup sessiond");
514 * Close the thread quit pipe. It has already done its job,
515 * since we are now called.
517 utils_close_pipe(thread_quit_pipe
);
520 * If config.pid_file_path.value is undefined, the default file will be
521 * wiped when removing the rundir.
523 if (config
.pid_file_path
.value
) {
524 ret
= remove(config
.pid_file_path
.value
);
526 PERROR("remove pidfile %s", config
.pid_file_path
.value
);
530 DBG("Removing sessiond and consumerd content of directory %s",
531 config
.rundir
.value
);
534 DBG("Removing %s", config
.pid_file_path
.value
);
535 (void) unlink(config
.pid_file_path
.value
);
537 DBG("Removing %s", config
.agent_port_file_path
.value
);
538 (void) unlink(config
.agent_port_file_path
.value
);
541 DBG("Removing %s", kconsumer_data
.err_unix_sock_path
);
542 (void) unlink(kconsumer_data
.err_unix_sock_path
);
544 DBG("Removing directory %s", config
.kconsumerd_path
.value
);
545 (void) rmdir(config
.kconsumerd_path
.value
);
547 /* ust consumerd 32 */
548 DBG("Removing %s", config
.consumerd32_err_unix_sock_path
.value
);
549 (void) unlink(config
.consumerd32_err_unix_sock_path
.value
);
551 DBG("Removing directory %s", config
.consumerd32_path
.value
);
552 (void) rmdir(config
.consumerd32_path
.value
);
554 /* ust consumerd 64 */
555 DBG("Removing %s", config
.consumerd64_err_unix_sock_path
.value
);
556 (void) unlink(config
.consumerd64_err_unix_sock_path
.value
);
558 DBG("Removing directory %s", config
.consumerd64_path
.value
);
559 (void) rmdir(config
.consumerd64_path
.value
);
561 DBG("Cleaning up all sessions");
563 /* Destroy session list mutex */
564 if (session_list_ptr
!= NULL
) {
565 pthread_mutex_destroy(&session_list_ptr
->lock
);
567 /* Cleanup ALL session */
568 cds_list_for_each_entry_safe(sess
, stmp
,
569 &session_list_ptr
->head
, list
) {
570 cmd_destroy_session(sess
, kernel_poll_pipe
[1]);
574 wait_consumer(&kconsumer_data
);
575 wait_consumer(&ustconsumer64_data
);
576 wait_consumer(&ustconsumer32_data
);
578 DBG("Cleaning up all agent apps");
579 agent_app_ht_clean();
581 DBG("Closing all UST sockets");
582 ust_app_clean_list();
583 buffer_reg_destroy_registries();
585 if (is_root
&& !config
.no_kernel
) {
586 DBG2("Closing kernel fd");
587 if (kernel_tracer_fd
>= 0) {
588 ret
= close(kernel_tracer_fd
);
593 DBG("Unloading kernel modules");
594 modprobe_remove_lttng_all();
598 close_consumer_sockets();
601 load_session_destroy_data(load_info
);
606 * Cleanup lock file by deleting it and finaly closing it which will
607 * release the file system lock.
609 if (lockfile_fd
>= 0) {
610 ret
= remove(config
.lock_file_path
.value
);
612 PERROR("remove lock file");
614 ret
= close(lockfile_fd
);
616 PERROR("close lock file");
621 * We do NOT rmdir rundir because there are other processes
622 * using it, for instance lttng-relayd, which can start in
623 * parallel with this teardown.
628 * Cleanup the daemon's option data structures.
630 static void sessiond_cleanup_options(void)
632 DBG("Cleaning up options");
634 sessiond_config_fini(&config
);
636 run_as_destroy_worker();
640 * Send data on a unix socket using the liblttsessiondcomm API.
642 * Return lttcomm error code.
644 static int send_unix_sock(int sock
, void *buf
, size_t len
)
646 /* Check valid length */
651 return lttcomm_send_unix_sock(sock
, buf
, len
);
655 * Free memory of a command context structure.
657 static void clean_command_ctx(struct command_ctx
**cmd_ctx
)
659 DBG("Clean command context structure");
661 if ((*cmd_ctx
)->llm
) {
662 free((*cmd_ctx
)->llm
);
664 if ((*cmd_ctx
)->lsm
) {
665 free((*cmd_ctx
)->lsm
);
673 * Notify UST applications using the shm mmap futex.
675 static int notify_ust_apps(int active
)
679 DBG("Notifying applications of session daemon state: %d", active
);
681 /* See shm.c for this call implying mmap, shm and futex calls */
682 wait_shm_mmap
= shm_ust_get_mmap(config
.wait_shm_path
.value
, is_root
);
683 if (wait_shm_mmap
== NULL
) {
687 /* Wake waiting process */
688 futex_wait_update((int32_t *) wait_shm_mmap
, active
);
690 /* Apps notified successfully */
698 * Setup the outgoing data buffer for the response (llm) by allocating the
699 * right amount of memory and copying the original information from the lsm
702 * Return 0 on success, negative value on error.
704 static int setup_lttng_msg(struct command_ctx
*cmd_ctx
,
705 const void *payload_buf
, size_t payload_len
,
706 const void *cmd_header_buf
, size_t cmd_header_len
)
709 const size_t header_len
= sizeof(struct lttcomm_lttng_msg
);
710 const size_t cmd_header_offset
= header_len
;
711 const size_t payload_offset
= cmd_header_offset
+ cmd_header_len
;
712 const size_t total_msg_size
= header_len
+ cmd_header_len
+ payload_len
;
714 cmd_ctx
->llm
= zmalloc(total_msg_size
);
716 if (cmd_ctx
->llm
== NULL
) {
722 /* Copy common data */
723 cmd_ctx
->llm
->cmd_type
= cmd_ctx
->lsm
->cmd_type
;
724 cmd_ctx
->llm
->pid
= cmd_ctx
->lsm
->domain
.attr
.pid
;
725 cmd_ctx
->llm
->cmd_header_size
= cmd_header_len
;
726 cmd_ctx
->llm
->data_size
= payload_len
;
727 cmd_ctx
->lttng_msg_size
= total_msg_size
;
729 /* Copy command header */
730 if (cmd_header_len
) {
731 memcpy(((uint8_t *) cmd_ctx
->llm
) + cmd_header_offset
, cmd_header_buf
,
737 memcpy(((uint8_t *) cmd_ctx
->llm
) + payload_offset
, payload_buf
,
746 * Version of setup_lttng_msg() without command header.
748 static int setup_lttng_msg_no_cmd_header(struct command_ctx
*cmd_ctx
,
749 void *payload_buf
, size_t payload_len
)
751 return setup_lttng_msg(cmd_ctx
, payload_buf
, payload_len
, NULL
, 0);
754 * Update the kernel poll set of all channel fd available over all tracing
755 * session. Add the wakeup pipe at the end of the set.
757 static int update_kernel_poll(struct lttng_poll_event
*events
)
760 struct ltt_session
*session
;
761 struct ltt_kernel_channel
*channel
;
763 DBG("Updating kernel poll set");
766 cds_list_for_each_entry(session
, &session_list_ptr
->head
, list
) {
767 session_lock(session
);
768 if (session
->kernel_session
== NULL
) {
769 session_unlock(session
);
773 cds_list_for_each_entry(channel
,
774 &session
->kernel_session
->channel_list
.head
, list
) {
775 /* Add channel fd to the kernel poll set */
776 ret
= lttng_poll_add(events
, channel
->fd
, LPOLLIN
| LPOLLRDNORM
);
778 session_unlock(session
);
781 DBG("Channel fd %d added to kernel set", channel
->fd
);
783 session_unlock(session
);
785 session_unlock_list();
790 session_unlock_list();
795 * Find the channel fd from 'fd' over all tracing session. When found, check
796 * for new channel stream and send those stream fds to the kernel consumer.
798 * Useful for CPU hotplug feature.
800 static int update_kernel_stream(struct consumer_data
*consumer_data
, int fd
)
803 struct ltt_session
*session
;
804 struct ltt_kernel_session
*ksess
;
805 struct ltt_kernel_channel
*channel
;
807 DBG("Updating kernel streams for channel fd %d", fd
);
810 cds_list_for_each_entry(session
, &session_list_ptr
->head
, list
) {
811 session_lock(session
);
812 if (session
->kernel_session
== NULL
) {
813 session_unlock(session
);
816 ksess
= session
->kernel_session
;
818 cds_list_for_each_entry(channel
,
819 &ksess
->channel_list
.head
, list
) {
820 struct lttng_ht_iter iter
;
821 struct consumer_socket
*socket
;
823 if (channel
->fd
!= fd
) {
826 DBG("Channel found, updating kernel streams");
827 ret
= kernel_open_channel_stream(channel
);
831 /* Update the stream global counter */
832 ksess
->stream_count_global
+= ret
;
835 * Have we already sent fds to the consumer? If yes, it
836 * means that tracing is started so it is safe to send
837 * our updated stream fds.
839 if (ksess
->consumer_fds_sent
!= 1
840 || ksess
->consumer
== NULL
) {
846 cds_lfht_for_each_entry(ksess
->consumer
->socks
->ht
,
847 &iter
.iter
, socket
, node
.node
) {
848 pthread_mutex_lock(socket
->lock
);
849 ret
= kernel_consumer_send_channel_stream(socket
,
851 session
->output_traces
? 1 : 0);
852 pthread_mutex_unlock(socket
->lock
);
860 session_unlock(session
);
862 session_unlock_list();
866 session_unlock(session
);
867 session_unlock_list();
872 * For each tracing session, update newly registered apps. The session list
873 * lock MUST be acquired before calling this.
875 static void update_ust_app(int app_sock
)
877 struct ltt_session
*sess
, *stmp
;
879 /* Consumer is in an ERROR state. Stop any application update. */
880 if (uatomic_read(&ust_consumerd_state
) == CONSUMER_ERROR
) {
881 /* Stop the update process since the consumer is dead. */
885 /* For all tracing session(s) */
886 cds_list_for_each_entry_safe(sess
, stmp
, &session_list_ptr
->head
, list
) {
890 if (!sess
->ust_session
) {
895 assert(app_sock
>= 0);
896 app
= ust_app_find_by_sock(app_sock
);
899 * Application can be unregistered before so
900 * this is possible hence simply stopping the
903 DBG3("UST app update failed to find app sock %d",
907 ust_app_global_update(sess
->ust_session
, app
);
911 session_unlock(sess
);
916 * This thread manage event coming from the kernel.
918 * Features supported in this thread:
921 static void *thread_manage_kernel(void *data
)
923 int ret
, i
, pollfd
, update_poll_flag
= 1, err
= -1;
924 uint32_t revents
, nb_fd
;
926 struct lttng_poll_event events
;
928 DBG("[thread] Thread manage kernel started");
930 health_register(health_sessiond
, HEALTH_SESSIOND_TYPE_KERNEL
);
933 * This first step of the while is to clean this structure which could free
934 * non NULL pointers so initialize it before the loop.
936 lttng_poll_init(&events
);
938 if (testpoint(sessiond_thread_manage_kernel
)) {
939 goto error_testpoint
;
942 health_code_update();
944 if (testpoint(sessiond_thread_manage_kernel_before_loop
)) {
945 goto error_testpoint
;
949 health_code_update();
951 if (update_poll_flag
== 1) {
952 /* Clean events object. We are about to populate it again. */
953 lttng_poll_clean(&events
);
955 ret
= sessiond_set_thread_pollset(&events
, 2);
957 goto error_poll_create
;
960 ret
= lttng_poll_add(&events
, kernel_poll_pipe
[0], LPOLLIN
);
965 /* This will add the available kernel channel if any. */
966 ret
= update_kernel_poll(&events
);
970 update_poll_flag
= 0;
973 DBG("Thread kernel polling");
975 /* Poll infinite value of time */
978 ret
= lttng_poll_wait(&events
, -1);
979 DBG("Thread kernel return from poll on %d fds",
980 LTTNG_POLL_GETNB(&events
));
984 * Restart interrupted system call.
986 if (errno
== EINTR
) {
990 } else if (ret
== 0) {
991 /* Should not happen since timeout is infinite */
992 ERR("Return value of poll is 0 with an infinite timeout.\n"
993 "This should not have happened! Continuing...");
999 for (i
= 0; i
< nb_fd
; i
++) {
1000 /* Fetch once the poll data */
1001 revents
= LTTNG_POLL_GETEV(&events
, i
);
1002 pollfd
= LTTNG_POLL_GETFD(&events
, i
);
1004 health_code_update();
1007 /* No activity for this FD (poll implementation). */
1011 /* Thread quit pipe has been closed. Killing thread. */
1012 ret
= sessiond_check_thread_quit_pipe(pollfd
, revents
);
1018 /* Check for data on kernel pipe */
1019 if (revents
& LPOLLIN
) {
1020 if (pollfd
== kernel_poll_pipe
[0]) {
1021 (void) lttng_read(kernel_poll_pipe
[0],
1024 * Ret value is useless here, if this pipe gets any actions an
1025 * update is required anyway.
1027 update_poll_flag
= 1;
1031 * New CPU detected by the kernel. Adding kernel stream to
1032 * kernel session and updating the kernel consumer
1034 ret
= update_kernel_stream(&kconsumer_data
, pollfd
);
1040 } else if (revents
& (LPOLLERR
| LPOLLHUP
| LPOLLRDHUP
)) {
1041 update_poll_flag
= 1;
1044 ERR("Unexpected poll events %u for sock %d", revents
, pollfd
);
1052 lttng_poll_clean(&events
);
1055 utils_close_pipe(kernel_poll_pipe
);
1056 kernel_poll_pipe
[0] = kernel_poll_pipe
[1] = -1;
1059 ERR("Health error occurred in %s", __func__
);
1060 WARN("Kernel thread died unexpectedly. "
1061 "Kernel tracing can continue but CPU hotplug is disabled.");
1063 health_unregister(health_sessiond
);
1064 DBG("Kernel thread dying");
1069 * Signal pthread condition of the consumer data that the thread.
1071 static void signal_consumer_condition(struct consumer_data
*data
, int state
)
1073 pthread_mutex_lock(&data
->cond_mutex
);
1076 * The state is set before signaling. It can be any value, it's the waiter
1077 * job to correctly interpret this condition variable associated to the
1078 * consumer pthread_cond.
1080 * A value of 0 means that the corresponding thread of the consumer data
1081 * was not started. 1 indicates that the thread has started and is ready
1082 * for action. A negative value means that there was an error during the
1085 data
->consumer_thread_is_ready
= state
;
1086 (void) pthread_cond_signal(&data
->cond
);
1088 pthread_mutex_unlock(&data
->cond_mutex
);
1092 * This thread manage the consumer error sent back to the session daemon.
1094 static void *thread_manage_consumer(void *data
)
1096 int sock
= -1, i
, ret
, pollfd
, err
= -1, should_quit
= 0;
1097 uint32_t revents
, nb_fd
;
1098 enum lttcomm_return_code code
;
1099 struct lttng_poll_event events
;
1100 struct consumer_data
*consumer_data
= data
;
1101 struct consumer_socket
*cmd_socket_wrapper
= NULL
;
1103 DBG("[thread] Manage consumer started");
1105 rcu_register_thread();
1106 rcu_thread_online();
1108 health_register(health_sessiond
, HEALTH_SESSIOND_TYPE_CONSUMER
);
1110 health_code_update();
1113 * Pass 3 as size here for the thread quit pipe, consumerd_err_sock and the
1114 * metadata_sock. Nothing more will be added to this poll set.
1116 ret
= sessiond_set_thread_pollset(&events
, 3);
1122 * The error socket here is already in a listening state which was done
1123 * just before spawning this thread to avoid a race between the consumer
1124 * daemon exec trying to connect and the listen() call.
1126 ret
= lttng_poll_add(&events
, consumer_data
->err_sock
, LPOLLIN
| LPOLLRDHUP
);
1131 health_code_update();
1133 /* Infinite blocking call, waiting for transmission */
1135 health_poll_entry();
1137 if (testpoint(sessiond_thread_manage_consumer
)) {
1141 ret
= lttng_poll_wait(&events
, -1);
1145 * Restart interrupted system call.
1147 if (errno
== EINTR
) {
1155 for (i
= 0; i
< nb_fd
; i
++) {
1156 /* Fetch once the poll data */
1157 revents
= LTTNG_POLL_GETEV(&events
, i
);
1158 pollfd
= LTTNG_POLL_GETFD(&events
, i
);
1160 health_code_update();
1163 /* No activity for this FD (poll implementation). */
1167 /* Thread quit pipe has been closed. Killing thread. */
1168 ret
= sessiond_check_thread_quit_pipe(pollfd
, revents
);
1174 /* Event on the registration socket */
1175 if (pollfd
== consumer_data
->err_sock
) {
1176 if (revents
& LPOLLIN
) {
1178 } else if (revents
& (LPOLLERR
| LPOLLHUP
| LPOLLRDHUP
)) {
1179 ERR("consumer err socket poll error");
1182 ERR("Unexpected poll events %u for sock %d", revents
, pollfd
);
1188 sock
= lttcomm_accept_unix_sock(consumer_data
->err_sock
);
1194 * Set the CLOEXEC flag. Return code is useless because either way, the
1197 (void) utils_set_fd_cloexec(sock
);
1199 health_code_update();
1201 DBG2("Receiving code from consumer err_sock");
1203 /* Getting status code from kconsumerd */
1204 ret
= lttcomm_recv_unix_sock(sock
, &code
,
1205 sizeof(enum lttcomm_return_code
));
1210 health_code_update();
1211 if (code
!= LTTCOMM_CONSUMERD_COMMAND_SOCK_READY
) {
1212 ERR("consumer error when waiting for SOCK_READY : %s",
1213 lttcomm_get_readable_code(-code
));
1217 /* Connect both command and metadata sockets. */
1218 consumer_data
->cmd_sock
=
1219 lttcomm_connect_unix_sock(
1220 consumer_data
->cmd_unix_sock_path
);
1221 consumer_data
->metadata_fd
=
1222 lttcomm_connect_unix_sock(
1223 consumer_data
->cmd_unix_sock_path
);
1224 if (consumer_data
->cmd_sock
< 0 || consumer_data
->metadata_fd
< 0) {
1225 PERROR("consumer connect cmd socket");
1226 /* On error, signal condition and quit. */
1227 signal_consumer_condition(consumer_data
, -1);
1231 consumer_data
->metadata_sock
.fd_ptr
= &consumer_data
->metadata_fd
;
1233 /* Create metadata socket lock. */
1234 consumer_data
->metadata_sock
.lock
= zmalloc(sizeof(pthread_mutex_t
));
1235 if (consumer_data
->metadata_sock
.lock
== NULL
) {
1236 PERROR("zmalloc pthread mutex");
1239 pthread_mutex_init(consumer_data
->metadata_sock
.lock
, NULL
);
1241 DBG("Consumer command socket ready (fd: %d", consumer_data
->cmd_sock
);
1242 DBG("Consumer metadata socket ready (fd: %d)",
1243 consumer_data
->metadata_fd
);
1246 * Remove the consumerd error sock since we've established a connection.
1248 ret
= lttng_poll_del(&events
, consumer_data
->err_sock
);
1253 /* Add new accepted error socket. */
1254 ret
= lttng_poll_add(&events
, sock
, LPOLLIN
| LPOLLRDHUP
);
1259 /* Add metadata socket that is successfully connected. */
1260 ret
= lttng_poll_add(&events
, consumer_data
->metadata_fd
,
1261 LPOLLIN
| LPOLLRDHUP
);
1266 health_code_update();
1269 * Transfer the write-end of the channel monitoring pipe to the
1270 * by issuing a SET_CHANNEL_MONITOR_PIPE command.
1272 cmd_socket_wrapper
= consumer_allocate_socket(&consumer_data
->cmd_sock
);
1273 if (!cmd_socket_wrapper
) {
1276 cmd_socket_wrapper
->lock
= &consumer_data
->lock
;
1278 ret
= consumer_send_channel_monitor_pipe(cmd_socket_wrapper
,
1279 consumer_data
->channel_monitor_pipe
);
1283 /* Discard the socket wrapper as it is no longer needed. */
1284 consumer_destroy_socket(cmd_socket_wrapper
);
1285 cmd_socket_wrapper
= NULL
;
1287 /* The thread is completely initialized, signal that it is ready. */
1288 signal_consumer_condition(consumer_data
, 1);
1290 /* Infinite blocking call, waiting for transmission */
1293 health_code_update();
1295 /* Exit the thread because the thread quit pipe has been triggered. */
1297 /* Not a health error. */
1302 health_poll_entry();
1303 ret
= lttng_poll_wait(&events
, -1);
1307 * Restart interrupted system call.
1309 if (errno
== EINTR
) {
1317 for (i
= 0; i
< nb_fd
; i
++) {
1318 /* Fetch once the poll data */
1319 revents
= LTTNG_POLL_GETEV(&events
, i
);
1320 pollfd
= LTTNG_POLL_GETFD(&events
, i
);
1322 health_code_update();
1325 /* No activity for this FD (poll implementation). */
1330 * Thread quit pipe has been triggered, flag that we should stop
1331 * but continue the current loop to handle potential data from
1334 should_quit
= sessiond_check_thread_quit_pipe(pollfd
, revents
);
1336 if (pollfd
== sock
) {
1337 /* Event on the consumerd socket */
1338 if (revents
& (LPOLLERR
| LPOLLHUP
| LPOLLRDHUP
)
1339 && !(revents
& LPOLLIN
)) {
1340 ERR("consumer err socket second poll error");
1343 health_code_update();
1344 /* Wait for any kconsumerd error */
1345 ret
= lttcomm_recv_unix_sock(sock
, &code
,
1346 sizeof(enum lttcomm_return_code
));
1348 ERR("consumer closed the command socket");
1352 ERR("consumer return code : %s",
1353 lttcomm_get_readable_code(-code
));
1356 } else if (pollfd
== consumer_data
->metadata_fd
) {
1357 if (revents
& (LPOLLERR
| LPOLLHUP
| LPOLLRDHUP
)
1358 && !(revents
& LPOLLIN
)) {
1359 ERR("consumer err metadata socket second poll error");
1362 /* UST metadata requests */
1363 ret
= ust_consumer_metadata_request(
1364 &consumer_data
->metadata_sock
);
1366 ERR("Handling metadata request");
1370 /* No need for an else branch all FDs are tested prior. */
1372 health_code_update();
1378 * We lock here because we are about to close the sockets and some other
1379 * thread might be using them so get exclusive access which will abort all
1380 * other consumer command by other threads.
1382 pthread_mutex_lock(&consumer_data
->lock
);
1384 /* Immediately set the consumerd state to stopped */
1385 if (consumer_data
->type
== LTTNG_CONSUMER_KERNEL
) {
1386 uatomic_set(&kernel_consumerd_state
, CONSUMER_ERROR
);
1387 } else if (consumer_data
->type
== LTTNG_CONSUMER64_UST
||
1388 consumer_data
->type
== LTTNG_CONSUMER32_UST
) {
1389 uatomic_set(&ust_consumerd_state
, CONSUMER_ERROR
);
1391 /* Code flow error... */
1395 if (consumer_data
->err_sock
>= 0) {
1396 ret
= close(consumer_data
->err_sock
);
1400 consumer_data
->err_sock
= -1;
1402 if (consumer_data
->cmd_sock
>= 0) {
1403 ret
= close(consumer_data
->cmd_sock
);
1407 consumer_data
->cmd_sock
= -1;
1409 if (consumer_data
->metadata_sock
.fd_ptr
&&
1410 *consumer_data
->metadata_sock
.fd_ptr
>= 0) {
1411 ret
= close(*consumer_data
->metadata_sock
.fd_ptr
);
1423 unlink(consumer_data
->err_unix_sock_path
);
1424 unlink(consumer_data
->cmd_unix_sock_path
);
1425 pthread_mutex_unlock(&consumer_data
->lock
);
1427 /* Cleanup metadata socket mutex. */
1428 if (consumer_data
->metadata_sock
.lock
) {
1429 pthread_mutex_destroy(consumer_data
->metadata_sock
.lock
);
1430 free(consumer_data
->metadata_sock
.lock
);
1432 lttng_poll_clean(&events
);
1434 if (cmd_socket_wrapper
) {
1435 consumer_destroy_socket(cmd_socket_wrapper
);
1440 ERR("Health error occurred in %s", __func__
);
1442 health_unregister(health_sessiond
);
1443 DBG("consumer thread cleanup completed");
1445 rcu_thread_offline();
1446 rcu_unregister_thread();
1452 * This thread receives application command sockets (FDs) on the
1453 * apps_cmd_pipe and waits (polls) on them until they are closed
1454 * or an error occurs.
1456 * At that point, it flushes the data (tracing and metadata) associated
1457 * with this application and tears down ust app sessions and other
1458 * associated data structures through ust_app_unregister().
1460 * Note that this thread never sends commands to the applications
1461 * through the command sockets; it merely listens for hang-ups
1462 * and errors on those sockets and cleans-up as they occur.
1464 static void *thread_manage_apps(void *data
)
1466 int i
, ret
, pollfd
, err
= -1;
1468 uint32_t revents
, nb_fd
;
1469 struct lttng_poll_event events
;
1471 DBG("[thread] Manage application started");
1473 rcu_register_thread();
1474 rcu_thread_online();
1476 health_register(health_sessiond
, HEALTH_SESSIOND_TYPE_APP_MANAGE
);
1478 if (testpoint(sessiond_thread_manage_apps
)) {
1479 goto error_testpoint
;
1482 health_code_update();
1484 ret
= sessiond_set_thread_pollset(&events
, 2);
1486 goto error_poll_create
;
1489 ret
= lttng_poll_add(&events
, apps_cmd_pipe
[0], LPOLLIN
| LPOLLRDHUP
);
1494 if (testpoint(sessiond_thread_manage_apps_before_loop
)) {
1498 health_code_update();
1501 DBG("Apps thread polling");
1503 /* Inifinite blocking call, waiting for transmission */
1505 health_poll_entry();
1506 ret
= lttng_poll_wait(&events
, -1);
1507 DBG("Apps thread return from poll on %d fds",
1508 LTTNG_POLL_GETNB(&events
));
1512 * Restart interrupted system call.
1514 if (errno
== EINTR
) {
1522 for (i
= 0; i
< nb_fd
; i
++) {
1523 /* Fetch once the poll data */
1524 revents
= LTTNG_POLL_GETEV(&events
, i
);
1525 pollfd
= LTTNG_POLL_GETFD(&events
, i
);
1527 health_code_update();
1530 /* No activity for this FD (poll implementation). */
1534 /* Thread quit pipe has been closed. Killing thread. */
1535 ret
= sessiond_check_thread_quit_pipe(pollfd
, revents
);
1541 /* Inspect the apps cmd pipe */
1542 if (pollfd
== apps_cmd_pipe
[0]) {
1543 if (revents
& LPOLLIN
) {
1547 size_ret
= lttng_read(apps_cmd_pipe
[0], &sock
, sizeof(sock
));
1548 if (size_ret
< sizeof(sock
)) {
1549 PERROR("read apps cmd pipe");
1553 health_code_update();
1556 * Since this is a command socket (write then read),
1557 * we only monitor the error events of the socket.
1559 ret
= lttng_poll_add(&events
, sock
,
1560 LPOLLERR
| LPOLLHUP
| LPOLLRDHUP
);
1565 DBG("Apps with sock %d added to poll set", sock
);
1566 } else if (revents
& (LPOLLERR
| LPOLLHUP
| LPOLLRDHUP
)) {
1567 ERR("Apps command pipe error");
1570 ERR("Unknown poll events %u for sock %d", revents
, pollfd
);
1575 * At this point, we know that a registered application made
1576 * the event at poll_wait.
1578 if (revents
& (LPOLLERR
| LPOLLHUP
| LPOLLRDHUP
)) {
1579 /* Removing from the poll set */
1580 ret
= lttng_poll_del(&events
, pollfd
);
1585 /* Socket closed on remote end. */
1586 ust_app_unregister(pollfd
);
1588 ERR("Unexpected poll events %u for sock %d", revents
, pollfd
);
1593 health_code_update();
1599 lttng_poll_clean(&events
);
1602 utils_close_pipe(apps_cmd_pipe
);
1603 apps_cmd_pipe
[0] = apps_cmd_pipe
[1] = -1;
1606 * We don't clean the UST app hash table here since already registered
1607 * applications can still be controlled so let them be until the session
1608 * daemon dies or the applications stop.
1613 ERR("Health error occurred in %s", __func__
);
1615 health_unregister(health_sessiond
);
1616 DBG("Application communication apps thread cleanup complete");
1617 rcu_thread_offline();
1618 rcu_unregister_thread();
1623 * Send a socket to a thread This is called from the dispatch UST registration
1624 * thread once all sockets are set for the application.
1626 * The sock value can be invalid, we don't really care, the thread will handle
1627 * it and make the necessary cleanup if so.
1629 * On success, return 0 else a negative value being the errno message of the
1632 static int send_socket_to_thread(int fd
, int sock
)
1637 * It's possible that the FD is set as invalid with -1 concurrently just
1638 * before calling this function being a shutdown state of the thread.
1645 ret
= lttng_write(fd
, &sock
, sizeof(sock
));
1646 if (ret
< sizeof(sock
)) {
1647 PERROR("write apps pipe %d", fd
);
1654 /* All good. Don't send back the write positive ret value. */
1661 * Sanitize the wait queue of the dispatch registration thread meaning removing
1662 * invalid nodes from it. This is to avoid memory leaks for the case the UST
1663 * notify socket is never received.
1665 static void sanitize_wait_queue(struct ust_reg_wait_queue
*wait_queue
)
1667 int ret
, nb_fd
= 0, i
;
1668 unsigned int fd_added
= 0;
1669 struct lttng_poll_event events
;
1670 struct ust_reg_wait_node
*wait_node
= NULL
, *tmp_wait_node
;
1674 lttng_poll_init(&events
);
1676 /* Just skip everything for an empty queue. */
1677 if (!wait_queue
->count
) {
1681 ret
= lttng_poll_create(&events
, wait_queue
->count
, LTTNG_CLOEXEC
);
1686 cds_list_for_each_entry_safe(wait_node
, tmp_wait_node
,
1687 &wait_queue
->head
, head
) {
1688 assert(wait_node
->app
);
1689 ret
= lttng_poll_add(&events
, wait_node
->app
->sock
,
1690 LPOLLHUP
| LPOLLERR
);
1703 * Poll but don't block so we can quickly identify the faulty events and
1704 * clean them afterwards from the wait queue.
1706 ret
= lttng_poll_wait(&events
, 0);
1712 for (i
= 0; i
< nb_fd
; i
++) {
1713 /* Get faulty FD. */
1714 uint32_t revents
= LTTNG_POLL_GETEV(&events
, i
);
1715 int pollfd
= LTTNG_POLL_GETFD(&events
, i
);
1718 /* No activity for this FD (poll implementation). */
1722 cds_list_for_each_entry_safe(wait_node
, tmp_wait_node
,
1723 &wait_queue
->head
, head
) {
1724 if (pollfd
== wait_node
->app
->sock
&&
1725 (revents
& (LPOLLHUP
| LPOLLERR
))) {
1726 cds_list_del(&wait_node
->head
);
1727 wait_queue
->count
--;
1728 ust_app_destroy(wait_node
->app
);
1731 * Silence warning of use-after-free in
1732 * cds_list_for_each_entry_safe which uses
1733 * __typeof__(*wait_node).
1738 ERR("Unexpected poll events %u for sock %d", revents
, pollfd
);
1745 DBG("Wait queue sanitized, %d node were cleaned up", nb_fd
);
1749 lttng_poll_clean(&events
);
1753 lttng_poll_clean(&events
);
1755 ERR("Unable to sanitize wait queue");
1760 * Dispatch request from the registration threads to the application
1761 * communication thread.
1763 static void *thread_dispatch_ust_registration(void *data
)
1766 struct cds_wfcq_node
*node
;
1767 struct ust_command
*ust_cmd
= NULL
;
1768 struct ust_reg_wait_node
*wait_node
= NULL
, *tmp_wait_node
;
1769 struct ust_reg_wait_queue wait_queue
= {
1773 rcu_register_thread();
1775 health_register(health_sessiond
, HEALTH_SESSIOND_TYPE_APP_REG_DISPATCH
);
1777 if (testpoint(sessiond_thread_app_reg_dispatch
)) {
1778 goto error_testpoint
;
1781 health_code_update();
1783 CDS_INIT_LIST_HEAD(&wait_queue
.head
);
1785 DBG("[thread] Dispatch UST command started");
1788 health_code_update();
1790 /* Atomically prepare the queue futex */
1791 futex_nto1_prepare(&ust_cmd_queue
.futex
);
1793 if (CMM_LOAD_SHARED(dispatch_thread_exit
)) {
1798 struct ust_app
*app
= NULL
;
1802 * Make sure we don't have node(s) that have hung up before receiving
1803 * the notify socket. This is to clean the list in order to avoid
1804 * memory leaks from notify socket that are never seen.
1806 sanitize_wait_queue(&wait_queue
);
1808 health_code_update();
1809 /* Dequeue command for registration */
1810 node
= cds_wfcq_dequeue_blocking(&ust_cmd_queue
.head
, &ust_cmd_queue
.tail
);
1812 DBG("Woken up but nothing in the UST command queue");
1813 /* Continue thread execution */
1817 ust_cmd
= caa_container_of(node
, struct ust_command
, node
);
1819 DBG("Dispatching UST registration pid:%d ppid:%d uid:%d"
1820 " gid:%d sock:%d name:%s (version %d.%d)",
1821 ust_cmd
->reg_msg
.pid
, ust_cmd
->reg_msg
.ppid
,
1822 ust_cmd
->reg_msg
.uid
, ust_cmd
->reg_msg
.gid
,
1823 ust_cmd
->sock
, ust_cmd
->reg_msg
.name
,
1824 ust_cmd
->reg_msg
.major
, ust_cmd
->reg_msg
.minor
);
1826 if (ust_cmd
->reg_msg
.type
== USTCTL_SOCKET_CMD
) {
1827 wait_node
= zmalloc(sizeof(*wait_node
));
1829 PERROR("zmalloc wait_node dispatch");
1830 ret
= close(ust_cmd
->sock
);
1832 PERROR("close ust sock dispatch %d", ust_cmd
->sock
);
1834 lttng_fd_put(LTTNG_FD_APPS
, 1);
1838 CDS_INIT_LIST_HEAD(&wait_node
->head
);
1840 /* Create application object if socket is CMD. */
1841 wait_node
->app
= ust_app_create(&ust_cmd
->reg_msg
,
1843 if (!wait_node
->app
) {
1844 ret
= close(ust_cmd
->sock
);
1846 PERROR("close ust sock dispatch %d", ust_cmd
->sock
);
1848 lttng_fd_put(LTTNG_FD_APPS
, 1);
1854 * Add application to the wait queue so we can set the notify
1855 * socket before putting this object in the global ht.
1857 cds_list_add(&wait_node
->head
, &wait_queue
.head
);
1862 * We have to continue here since we don't have the notify
1863 * socket and the application MUST be added to the hash table
1864 * only at that moment.
1869 * Look for the application in the local wait queue and set the
1870 * notify socket if found.
1872 cds_list_for_each_entry_safe(wait_node
, tmp_wait_node
,
1873 &wait_queue
.head
, head
) {
1874 health_code_update();
1875 if (wait_node
->app
->pid
== ust_cmd
->reg_msg
.pid
) {
1876 wait_node
->app
->notify_sock
= ust_cmd
->sock
;
1877 cds_list_del(&wait_node
->head
);
1879 app
= wait_node
->app
;
1881 DBG3("UST app notify socket %d is set", ust_cmd
->sock
);
1887 * With no application at this stage the received socket is
1888 * basically useless so close it before we free the cmd data
1889 * structure for good.
1892 ret
= close(ust_cmd
->sock
);
1894 PERROR("close ust sock dispatch %d", ust_cmd
->sock
);
1896 lttng_fd_put(LTTNG_FD_APPS
, 1);
1903 * @session_lock_list
1905 * Lock the global session list so from the register up to the
1906 * registration done message, no thread can see the application
1907 * and change its state.
1909 session_lock_list();
1913 * Add application to the global hash table. This needs to be
1914 * done before the update to the UST registry can locate the
1919 /* Set app version. This call will print an error if needed. */
1920 (void) ust_app_version(app
);
1922 /* Send notify socket through the notify pipe. */
1923 ret
= send_socket_to_thread(apps_cmd_notify_pipe
[1],
1927 session_unlock_list();
1929 * No notify thread, stop the UST tracing. However, this is
1930 * not an internal error of the this thread thus setting
1931 * the health error code to a normal exit.
1938 * Update newly registered application with the tracing
1939 * registry info already enabled information.
1941 update_ust_app(app
->sock
);
1944 * Don't care about return value. Let the manage apps threads
1945 * handle app unregistration upon socket close.
1947 (void) ust_app_register_done(app
);
1950 * Even if the application socket has been closed, send the app
1951 * to the thread and unregistration will take place at that
1954 ret
= send_socket_to_thread(apps_cmd_pipe
[1], app
->sock
);
1957 session_unlock_list();
1959 * No apps. thread, stop the UST tracing. However, this is
1960 * not an internal error of the this thread thus setting
1961 * the health error code to a normal exit.
1968 session_unlock_list();
1970 } while (node
!= NULL
);
1972 health_poll_entry();
1973 /* Futex wait on queue. Blocking call on futex() */
1974 futex_nto1_wait(&ust_cmd_queue
.futex
);
1977 /* Normal exit, no error */
1981 /* Clean up wait queue. */
1982 cds_list_for_each_entry_safe(wait_node
, tmp_wait_node
,
1983 &wait_queue
.head
, head
) {
1984 cds_list_del(&wait_node
->head
);
1989 /* Empty command queue. */
1991 /* Dequeue command for registration */
1992 node
= cds_wfcq_dequeue_blocking(&ust_cmd_queue
.head
, &ust_cmd_queue
.tail
);
1996 ust_cmd
= caa_container_of(node
, struct ust_command
, node
);
1997 ret
= close(ust_cmd
->sock
);
1999 PERROR("close ust sock exit dispatch %d", ust_cmd
->sock
);
2001 lttng_fd_put(LTTNG_FD_APPS
, 1);
2006 DBG("Dispatch thread dying");
2009 ERR("Health error occurred in %s", __func__
);
2011 health_unregister(health_sessiond
);
2012 rcu_unregister_thread();
2017 * This thread manage application registration.
2019 static void *thread_registration_apps(void *data
)
2021 int sock
= -1, i
, ret
, pollfd
, err
= -1;
2022 uint32_t revents
, nb_fd
;
2023 struct lttng_poll_event events
;
2025 * Get allocated in this thread, enqueued to a global queue, dequeued and
2026 * freed in the manage apps thread.
2028 struct ust_command
*ust_cmd
= NULL
;
2030 DBG("[thread] Manage application registration started");
2032 health_register(health_sessiond
, HEALTH_SESSIOND_TYPE_APP_REG
);
2034 if (testpoint(sessiond_thread_registration_apps
)) {
2035 goto error_testpoint
;
2038 ret
= lttcomm_listen_unix_sock(apps_sock
);
2044 * Pass 2 as size here for the thread quit pipe and apps socket. Nothing
2045 * more will be added to this poll set.
2047 ret
= sessiond_set_thread_pollset(&events
, 2);
2049 goto error_create_poll
;
2052 /* Add the application registration socket */
2053 ret
= lttng_poll_add(&events
, apps_sock
, LPOLLIN
| LPOLLRDHUP
);
2055 goto error_poll_add
;
2058 /* Notify all applications to register */
2059 ret
= notify_ust_apps(1);
2061 ERR("Failed to notify applications or create the wait shared memory.\n"
2062 "Execution continues but there might be problem for already\n"
2063 "running applications that wishes to register.");
2067 DBG("Accepting application registration");
2069 /* Inifinite blocking call, waiting for transmission */
2071 health_poll_entry();
2072 ret
= lttng_poll_wait(&events
, -1);
2076 * Restart interrupted system call.
2078 if (errno
== EINTR
) {
2086 for (i
= 0; i
< nb_fd
; i
++) {
2087 health_code_update();
2089 /* Fetch once the poll data */
2090 revents
= LTTNG_POLL_GETEV(&events
, i
);
2091 pollfd
= LTTNG_POLL_GETFD(&events
, i
);
2094 /* No activity for this FD (poll implementation). */
2098 /* Thread quit pipe has been closed. Killing thread. */
2099 ret
= sessiond_check_thread_quit_pipe(pollfd
, revents
);
2105 /* Event on the registration socket */
2106 if (pollfd
== apps_sock
) {
2107 if (revents
& LPOLLIN
) {
2108 sock
= lttcomm_accept_unix_sock(apps_sock
);
2114 * Set socket timeout for both receiving and ending.
2115 * app_socket_timeout is in seconds, whereas
2116 * lttcomm_setsockopt_rcv_timeout and
2117 * lttcomm_setsockopt_snd_timeout expect msec as
2120 if (config
.app_socket_timeout
>= 0) {
2121 (void) lttcomm_setsockopt_rcv_timeout(sock
,
2122 config
.app_socket_timeout
* 1000);
2123 (void) lttcomm_setsockopt_snd_timeout(sock
,
2124 config
.app_socket_timeout
* 1000);
2128 * Set the CLOEXEC flag. Return code is useless because
2129 * either way, the show must go on.
2131 (void) utils_set_fd_cloexec(sock
);
2133 /* Create UST registration command for enqueuing */
2134 ust_cmd
= zmalloc(sizeof(struct ust_command
));
2135 if (ust_cmd
== NULL
) {
2136 PERROR("ust command zmalloc");
2145 * Using message-based transmissions to ensure we don't
2146 * have to deal with partially received messages.
2148 ret
= lttng_fd_get(LTTNG_FD_APPS
, 1);
2150 ERR("Exhausted file descriptors allowed for applications.");
2160 health_code_update();
2161 ret
= ust_app_recv_registration(sock
, &ust_cmd
->reg_msg
);
2164 /* Close socket of the application. */
2169 lttng_fd_put(LTTNG_FD_APPS
, 1);
2173 health_code_update();
2175 ust_cmd
->sock
= sock
;
2178 DBG("UST registration received with pid:%d ppid:%d uid:%d"
2179 " gid:%d sock:%d name:%s (version %d.%d)",
2180 ust_cmd
->reg_msg
.pid
, ust_cmd
->reg_msg
.ppid
,
2181 ust_cmd
->reg_msg
.uid
, ust_cmd
->reg_msg
.gid
,
2182 ust_cmd
->sock
, ust_cmd
->reg_msg
.name
,
2183 ust_cmd
->reg_msg
.major
, ust_cmd
->reg_msg
.minor
);
2186 * Lock free enqueue the registration request. The red pill
2187 * has been taken! This apps will be part of the *system*.
2189 cds_wfcq_enqueue(&ust_cmd_queue
.head
, &ust_cmd_queue
.tail
, &ust_cmd
->node
);
2192 * Wake the registration queue futex. Implicit memory
2193 * barrier with the exchange in cds_wfcq_enqueue.
2195 futex_nto1_wake(&ust_cmd_queue
.futex
);
2196 } else if (revents
& (LPOLLERR
| LPOLLHUP
| LPOLLRDHUP
)) {
2197 ERR("Register apps socket poll error");
2200 ERR("Unexpected poll events %u for sock %d", revents
, pollfd
);
2209 /* Notify that the registration thread is gone */
2212 if (apps_sock
>= 0) {
2213 ret
= close(apps_sock
);
2223 lttng_fd_put(LTTNG_FD_APPS
, 1);
2225 unlink(config
.apps_unix_sock_path
.value
);
2228 lttng_poll_clean(&events
);
2232 DBG("UST Registration thread cleanup complete");
2235 ERR("Health error occurred in %s", __func__
);
2237 health_unregister(health_sessiond
);
2243 * Start the thread_manage_consumer. This must be done after a lttng-consumerd
2244 * exec or it will fails.
2246 static int spawn_consumer_thread(struct consumer_data
*consumer_data
)
2249 struct timespec timeout
;
2252 * Make sure we set the readiness flag to 0 because we are NOT ready.
2253 * This access to consumer_thread_is_ready does not need to be
2254 * protected by consumer_data.cond_mutex (yet) since the consumer
2255 * management thread has not been started at this point.
2257 consumer_data
->consumer_thread_is_ready
= 0;
2259 /* Setup pthread condition */
2260 ret
= pthread_condattr_init(&consumer_data
->condattr
);
2263 PERROR("pthread_condattr_init consumer data");
2268 * Set the monotonic clock in order to make sure we DO NOT jump in time
2269 * between the clock_gettime() call and the timedwait call. See bug #324
2270 * for a more details and how we noticed it.
2272 ret
= pthread_condattr_setclock(&consumer_data
->condattr
, CLOCK_MONOTONIC
);
2275 PERROR("pthread_condattr_setclock consumer data");
2279 ret
= pthread_cond_init(&consumer_data
->cond
, &consumer_data
->condattr
);
2282 PERROR("pthread_cond_init consumer data");
2286 ret
= pthread_create(&consumer_data
->thread
, default_pthread_attr(),
2287 thread_manage_consumer
, consumer_data
);
2290 PERROR("pthread_create consumer");
2295 /* We are about to wait on a pthread condition */
2296 pthread_mutex_lock(&consumer_data
->cond_mutex
);
2298 /* Get time for sem_timedwait absolute timeout */
2299 clock_ret
= lttng_clock_gettime(CLOCK_MONOTONIC
, &timeout
);
2301 * Set the timeout for the condition timed wait even if the clock gettime
2302 * call fails since we might loop on that call and we want to avoid to
2303 * increment the timeout too many times.
2305 timeout
.tv_sec
+= DEFAULT_SEM_WAIT_TIMEOUT
;
2308 * The following loop COULD be skipped in some conditions so this is why we
2309 * set ret to 0 in order to make sure at least one round of the loop is
2315 * Loop until the condition is reached or when a timeout is reached. Note
2316 * that the pthread_cond_timedwait(P) man page specifies that EINTR can NOT
2317 * be returned but the pthread_cond(3), from the glibc-doc, says that it is
2318 * possible. This loop does not take any chances and works with both of
2321 while (!consumer_data
->consumer_thread_is_ready
&& ret
!= ETIMEDOUT
) {
2322 if (clock_ret
< 0) {
2323 PERROR("clock_gettime spawn consumer");
2324 /* Infinite wait for the consumerd thread to be ready */
2325 ret
= pthread_cond_wait(&consumer_data
->cond
,
2326 &consumer_data
->cond_mutex
);
2328 ret
= pthread_cond_timedwait(&consumer_data
->cond
,
2329 &consumer_data
->cond_mutex
, &timeout
);
2333 /* Release the pthread condition */
2334 pthread_mutex_unlock(&consumer_data
->cond_mutex
);
2338 if (ret
== ETIMEDOUT
) {
2342 * Call has timed out so we kill the kconsumerd_thread and return
2345 ERR("Condition timed out. The consumer thread was never ready."
2347 pth_ret
= pthread_cancel(consumer_data
->thread
);
2349 PERROR("pthread_cancel consumer thread");
2352 PERROR("pthread_cond_wait failed consumer thread");
2354 /* Caller is expecting a negative value on failure. */
2359 pthread_mutex_lock(&consumer_data
->pid_mutex
);
2360 if (consumer_data
->pid
== 0) {
2361 ERR("Consumerd did not start");
2362 pthread_mutex_unlock(&consumer_data
->pid_mutex
);
2365 pthread_mutex_unlock(&consumer_data
->pid_mutex
);
2374 * Join consumer thread
2376 static int join_consumer_thread(struct consumer_data
*consumer_data
)
2380 /* Consumer pid must be a real one. */
2381 if (consumer_data
->pid
> 0) {
2383 ret
= kill(consumer_data
->pid
, SIGTERM
);
2385 PERROR("Error killing consumer daemon");
2388 return pthread_join(consumer_data
->thread
, &status
);
2395 * Fork and exec a consumer daemon (consumerd).
2397 * Return pid if successful else -1.
2399 static pid_t
spawn_consumerd(struct consumer_data
*consumer_data
)
2403 const char *consumer_to_use
;
2404 const char *verbosity
;
2407 DBG("Spawning consumerd");
2414 if (config
.verbose_consumer
) {
2415 verbosity
= "--verbose";
2416 } else if (lttng_opt_quiet
) {
2417 verbosity
= "--quiet";
2422 switch (consumer_data
->type
) {
2423 case LTTNG_CONSUMER_KERNEL
:
2425 * Find out which consumerd to execute. We will first try the
2426 * 64-bit path, then the sessiond's installation directory, and
2427 * fallback on the 32-bit one,
2429 DBG3("Looking for a kernel consumer at these locations:");
2430 DBG3(" 1) %s", config
.consumerd64_bin_path
.value
? : "NULL");
2431 DBG3(" 2) %s/%s", INSTALL_BIN_PATH
, DEFAULT_CONSUMERD_FILE
);
2432 DBG3(" 3) %s", config
.consumerd32_bin_path
.value
? : "NULL");
2433 if (stat(config
.consumerd64_bin_path
.value
, &st
) == 0) {
2434 DBG3("Found location #1");
2435 consumer_to_use
= config
.consumerd64_bin_path
.value
;
2436 } else if (stat(INSTALL_BIN_PATH
"/" DEFAULT_CONSUMERD_FILE
, &st
) == 0) {
2437 DBG3("Found location #2");
2438 consumer_to_use
= INSTALL_BIN_PATH
"/" DEFAULT_CONSUMERD_FILE
;
2439 } else if (stat(config
.consumerd32_bin_path
.value
, &st
) == 0) {
2440 DBG3("Found location #3");
2441 consumer_to_use
= config
.consumerd32_bin_path
.value
;
2443 DBG("Could not find any valid consumerd executable");
2447 DBG("Using kernel consumer at: %s", consumer_to_use
);
2448 (void) execl(consumer_to_use
,
2449 "lttng-consumerd", verbosity
, "-k",
2450 "--consumerd-cmd-sock", consumer_data
->cmd_unix_sock_path
,
2451 "--consumerd-err-sock", consumer_data
->err_unix_sock_path
,
2452 "--group", config
.tracing_group_name
.value
,
2455 case LTTNG_CONSUMER64_UST
:
2457 if (config
.consumerd64_lib_dir
.value
) {
2462 tmp
= lttng_secure_getenv("LD_LIBRARY_PATH");
2466 tmplen
= strlen(config
.consumerd64_lib_dir
.value
) + 1 /* : */ + strlen(tmp
);
2467 tmpnew
= zmalloc(tmplen
+ 1 /* \0 */);
2472 strcat(tmpnew
, config
.consumerd64_lib_dir
.value
);
2473 if (tmp
[0] != '\0') {
2474 strcat(tmpnew
, ":");
2475 strcat(tmpnew
, tmp
);
2477 ret
= setenv("LD_LIBRARY_PATH", tmpnew
, 1);
2484 DBG("Using 64-bit UST consumer at: %s", config
.consumerd64_bin_path
.value
);
2485 (void) execl(config
.consumerd64_bin_path
.value
, "lttng-consumerd", verbosity
, "-u",
2486 "--consumerd-cmd-sock", consumer_data
->cmd_unix_sock_path
,
2487 "--consumerd-err-sock", consumer_data
->err_unix_sock_path
,
2488 "--group", config
.tracing_group_name
.value
,
2492 case LTTNG_CONSUMER32_UST
:
2494 if (config
.consumerd32_lib_dir
.value
) {
2499 tmp
= lttng_secure_getenv("LD_LIBRARY_PATH");
2503 tmplen
= strlen(config
.consumerd32_lib_dir
.value
) + 1 /* : */ + strlen(tmp
);
2504 tmpnew
= zmalloc(tmplen
+ 1 /* \0 */);
2509 strcat(tmpnew
, config
.consumerd32_lib_dir
.value
);
2510 if (tmp
[0] != '\0') {
2511 strcat(tmpnew
, ":");
2512 strcat(tmpnew
, tmp
);
2514 ret
= setenv("LD_LIBRARY_PATH", tmpnew
, 1);
2521 DBG("Using 32-bit UST consumer at: %s", config
.consumerd32_bin_path
.value
);
2522 (void) execl(config
.consumerd32_bin_path
.value
, "lttng-consumerd", verbosity
, "-u",
2523 "--consumerd-cmd-sock", consumer_data
->cmd_unix_sock_path
,
2524 "--consumerd-err-sock", consumer_data
->err_unix_sock_path
,
2525 "--group", config
.tracing_group_name
.value
,
2530 ERR("unknown consumer type");
2534 PERROR("Consumer execl()");
2536 /* Reaching this point, we got a failure on our execl(). */
2538 } else if (pid
> 0) {
2541 PERROR("start consumer fork");
2549 * Spawn the consumerd daemon and session daemon thread.
2551 static int start_consumerd(struct consumer_data
*consumer_data
)
2556 * Set the listen() state on the socket since there is a possible race
2557 * between the exec() of the consumer daemon and this call if place in the
2558 * consumer thread. See bug #366 for more details.
2560 ret
= lttcomm_listen_unix_sock(consumer_data
->err_sock
);
2565 pthread_mutex_lock(&consumer_data
->pid_mutex
);
2566 if (consumer_data
->pid
!= 0) {
2567 pthread_mutex_unlock(&consumer_data
->pid_mutex
);
2571 ret
= spawn_consumerd(consumer_data
);
2573 ERR("Spawning consumerd failed");
2574 pthread_mutex_unlock(&consumer_data
->pid_mutex
);
2578 /* Setting up the consumer_data pid */
2579 consumer_data
->pid
= ret
;
2580 DBG2("Consumer pid %d", consumer_data
->pid
);
2581 pthread_mutex_unlock(&consumer_data
->pid_mutex
);
2583 DBG2("Spawning consumer control thread");
2584 ret
= spawn_consumer_thread(consumer_data
);
2586 ERR("Fatal error spawning consumer control thread");
2594 /* Cleanup already created sockets on error. */
2595 if (consumer_data
->err_sock
>= 0) {
2598 err
= close(consumer_data
->err_sock
);
2600 PERROR("close consumer data error socket");
2607 * Setup necessary data for kernel tracer action.
2609 static int init_kernel_tracer(void)
2613 /* Modprobe lttng kernel modules */
2614 ret
= modprobe_lttng_control();
2619 /* Open debugfs lttng */
2620 kernel_tracer_fd
= open(module_proc_lttng
, O_RDWR
);
2621 if (kernel_tracer_fd
< 0) {
2622 DBG("Failed to open %s", module_proc_lttng
);
2626 /* Validate kernel version */
2627 ret
= kernel_validate_version(kernel_tracer_fd
, &kernel_tracer_version
,
2628 &kernel_tracer_abi_version
);
2633 ret
= modprobe_lttng_data();
2638 ret
= kernel_supports_ring_buffer_snapshot_sample_positions(
2645 WARN("Kernel tracer does not support buffer monitoring. "
2646 "The monitoring timer of channels in the kernel domain "
2647 "will be set to 0 (disabled).");
2650 DBG("Kernel tracer fd %d", kernel_tracer_fd
);
2654 modprobe_remove_lttng_control();
2655 ret
= close(kernel_tracer_fd
);
2659 kernel_tracer_fd
= -1;
2660 return LTTNG_ERR_KERN_VERSION
;
2663 ret
= close(kernel_tracer_fd
);
2669 modprobe_remove_lttng_control();
2672 WARN("No kernel tracer available");
2673 kernel_tracer_fd
= -1;
2675 return LTTNG_ERR_NEED_ROOT_SESSIOND
;
2677 return LTTNG_ERR_KERN_NA
;
2683 * Copy consumer output from the tracing session to the domain session. The
2684 * function also applies the right modification on a per domain basis for the
2685 * trace files destination directory.
2687 * Should *NOT* be called with RCU read-side lock held.
2689 static int copy_session_consumer(int domain
, struct ltt_session
*session
)
2692 const char *dir_name
;
2693 struct consumer_output
*consumer
;
2696 assert(session
->consumer
);
2699 case LTTNG_DOMAIN_KERNEL
:
2700 DBG3("Copying tracing session consumer output in kernel session");
2702 * XXX: We should audit the session creation and what this function
2703 * does "extra" in order to avoid a destroy since this function is used
2704 * in the domain session creation (kernel and ust) only. Same for UST
2707 if (session
->kernel_session
->consumer
) {
2708 consumer_output_put(session
->kernel_session
->consumer
);
2710 session
->kernel_session
->consumer
=
2711 consumer_copy_output(session
->consumer
);
2712 /* Ease our life a bit for the next part */
2713 consumer
= session
->kernel_session
->consumer
;
2714 dir_name
= DEFAULT_KERNEL_TRACE_DIR
;
2716 case LTTNG_DOMAIN_JUL
:
2717 case LTTNG_DOMAIN_LOG4J
:
2718 case LTTNG_DOMAIN_PYTHON
:
2719 case LTTNG_DOMAIN_UST
:
2720 DBG3("Copying tracing session consumer output in UST session");
2721 if (session
->ust_session
->consumer
) {
2722 consumer_output_put(session
->ust_session
->consumer
);
2724 session
->ust_session
->consumer
=
2725 consumer_copy_output(session
->consumer
);
2726 /* Ease our life a bit for the next part */
2727 consumer
= session
->ust_session
->consumer
;
2728 dir_name
= DEFAULT_UST_TRACE_DIR
;
2731 ret
= LTTNG_ERR_UNKNOWN_DOMAIN
;
2735 /* Append correct directory to subdir */
2736 strncat(consumer
->subdir
, dir_name
,
2737 sizeof(consumer
->subdir
) - strlen(consumer
->subdir
) - 1);
2738 DBG3("Copy session consumer subdir %s", consumer
->subdir
);
2747 * Create an UST session and add it to the session ust list.
2749 * Should *NOT* be called with RCU read-side lock held.
2751 static int create_ust_session(struct ltt_session
*session
,
2752 struct lttng_domain
*domain
)
2755 struct ltt_ust_session
*lus
= NULL
;
2759 assert(session
->consumer
);
2761 switch (domain
->type
) {
2762 case LTTNG_DOMAIN_JUL
:
2763 case LTTNG_DOMAIN_LOG4J
:
2764 case LTTNG_DOMAIN_PYTHON
:
2765 case LTTNG_DOMAIN_UST
:
2768 ERR("Unknown UST domain on create session %d", domain
->type
);
2769 ret
= LTTNG_ERR_UNKNOWN_DOMAIN
;
2773 DBG("Creating UST session");
2775 lus
= trace_ust_create_session(session
->id
);
2777 ret
= LTTNG_ERR_UST_SESS_FAIL
;
2781 lus
->uid
= session
->uid
;
2782 lus
->gid
= session
->gid
;
2783 lus
->output_traces
= session
->output_traces
;
2784 lus
->snapshot_mode
= session
->snapshot_mode
;
2785 lus
->live_timer_interval
= session
->live_timer
;
2786 session
->ust_session
= lus
;
2787 if (session
->shm_path
[0]) {
2788 strncpy(lus
->root_shm_path
, session
->shm_path
,
2789 sizeof(lus
->root_shm_path
));
2790 lus
->root_shm_path
[sizeof(lus
->root_shm_path
) - 1] = '\0';
2791 strncpy(lus
->shm_path
, session
->shm_path
,
2792 sizeof(lus
->shm_path
));
2793 lus
->shm_path
[sizeof(lus
->shm_path
) - 1] = '\0';
2794 strncat(lus
->shm_path
, "/ust",
2795 sizeof(lus
->shm_path
) - strlen(lus
->shm_path
) - 1);
2797 /* Copy session output to the newly created UST session */
2798 ret
= copy_session_consumer(domain
->type
, session
);
2799 if (ret
!= LTTNG_OK
) {
2807 session
->ust_session
= NULL
;
2812 * Create a kernel tracer session then create the default channel.
2814 static int create_kernel_session(struct ltt_session
*session
)
2818 DBG("Creating kernel session");
2820 ret
= kernel_create_session(session
, kernel_tracer_fd
);
2822 ret
= LTTNG_ERR_KERN_SESS_FAIL
;
2826 /* Code flow safety */
2827 assert(session
->kernel_session
);
2829 /* Copy session output to the newly created Kernel session */
2830 ret
= copy_session_consumer(LTTNG_DOMAIN_KERNEL
, session
);
2831 if (ret
!= LTTNG_OK
) {
2835 session
->kernel_session
->uid
= session
->uid
;
2836 session
->kernel_session
->gid
= session
->gid
;
2837 session
->kernel_session
->output_traces
= session
->output_traces
;
2838 session
->kernel_session
->snapshot_mode
= session
->snapshot_mode
;
2843 trace_kernel_destroy_session(session
->kernel_session
);
2844 session
->kernel_session
= NULL
;
2849 * Count number of session permitted by uid/gid.
2851 static unsigned int lttng_sessions_count(uid_t uid
, gid_t gid
)
2854 struct ltt_session
*session
;
2856 DBG("Counting number of available session for UID %d GID %d",
2858 cds_list_for_each_entry(session
, &session_list_ptr
->head
, list
) {
2860 * Only list the sessions the user can control.
2862 if (!session_access_ok(session
, uid
, gid
)) {
2871 * Process the command requested by the lttng client within the command
2872 * context structure. This function make sure that the return structure (llm)
2873 * is set and ready for transmission before returning.
2875 * Return any error encountered or 0 for success.
2877 * "sock" is only used for special-case var. len data.
2879 * Should *NOT* be called with RCU read-side lock held.
2881 static int process_client_msg(struct command_ctx
*cmd_ctx
, int sock
,
2885 int need_tracing_session
= 1;
2888 DBG("Processing client command %d", cmd_ctx
->lsm
->cmd_type
);
2890 assert(!rcu_read_ongoing());
2894 switch (cmd_ctx
->lsm
->cmd_type
) {
2895 case LTTNG_CREATE_SESSION
:
2896 case LTTNG_CREATE_SESSION_SNAPSHOT
:
2897 case LTTNG_CREATE_SESSION_LIVE
:
2898 case LTTNG_DESTROY_SESSION
:
2899 case LTTNG_LIST_SESSIONS
:
2900 case LTTNG_LIST_DOMAINS
:
2901 case LTTNG_START_TRACE
:
2902 case LTTNG_STOP_TRACE
:
2903 case LTTNG_DATA_PENDING
:
2904 case LTTNG_SNAPSHOT_ADD_OUTPUT
:
2905 case LTTNG_SNAPSHOT_DEL_OUTPUT
:
2906 case LTTNG_SNAPSHOT_LIST_OUTPUT
:
2907 case LTTNG_SNAPSHOT_RECORD
:
2908 case LTTNG_SAVE_SESSION
:
2909 case LTTNG_SET_SESSION_SHM_PATH
:
2910 case LTTNG_REGENERATE_METADATA
:
2911 case LTTNG_REGENERATE_STATEDUMP
:
2912 case LTTNG_REGISTER_TRIGGER
:
2913 case LTTNG_UNREGISTER_TRIGGER
:
2920 if (config
.no_kernel
&& need_domain
2921 && cmd_ctx
->lsm
->domain
.type
== LTTNG_DOMAIN_KERNEL
) {
2923 ret
= LTTNG_ERR_NEED_ROOT_SESSIOND
;
2925 ret
= LTTNG_ERR_KERN_NA
;
2930 /* Deny register consumer if we already have a spawned consumer. */
2931 if (cmd_ctx
->lsm
->cmd_type
== LTTNG_REGISTER_CONSUMER
) {
2932 pthread_mutex_lock(&kconsumer_data
.pid_mutex
);
2933 if (kconsumer_data
.pid
> 0) {
2934 ret
= LTTNG_ERR_KERN_CONSUMER_FAIL
;
2935 pthread_mutex_unlock(&kconsumer_data
.pid_mutex
);
2938 pthread_mutex_unlock(&kconsumer_data
.pid_mutex
);
2942 * Check for command that don't needs to allocate a returned payload. We do
2943 * this here so we don't have to make the call for no payload at each
2946 switch(cmd_ctx
->lsm
->cmd_type
) {
2947 case LTTNG_LIST_SESSIONS
:
2948 case LTTNG_LIST_TRACEPOINTS
:
2949 case LTTNG_LIST_TRACEPOINT_FIELDS
:
2950 case LTTNG_LIST_DOMAINS
:
2951 case LTTNG_LIST_CHANNELS
:
2952 case LTTNG_LIST_EVENTS
:
2953 case LTTNG_LIST_SYSCALLS
:
2954 case LTTNG_LIST_TRACKER_PIDS
:
2955 case LTTNG_DATA_PENDING
:
2958 /* Setup lttng message with no payload */
2959 ret
= setup_lttng_msg_no_cmd_header(cmd_ctx
, NULL
, 0);
2961 /* This label does not try to unlock the session */
2962 goto init_setup_error
;
2966 /* Commands that DO NOT need a session. */
2967 switch (cmd_ctx
->lsm
->cmd_type
) {
2968 case LTTNG_CREATE_SESSION
:
2969 case LTTNG_CREATE_SESSION_SNAPSHOT
:
2970 case LTTNG_CREATE_SESSION_LIVE
:
2971 case LTTNG_LIST_SESSIONS
:
2972 case LTTNG_LIST_TRACEPOINTS
:
2973 case LTTNG_LIST_SYSCALLS
:
2974 case LTTNG_LIST_TRACEPOINT_FIELDS
:
2975 case LTTNG_SAVE_SESSION
:
2976 case LTTNG_REGISTER_TRIGGER
:
2977 case LTTNG_UNREGISTER_TRIGGER
:
2978 need_tracing_session
= 0;
2981 DBG("Getting session %s by name", cmd_ctx
->lsm
->session
.name
);
2983 * We keep the session list lock across _all_ commands
2984 * for now, because the per-session lock does not
2985 * handle teardown properly.
2987 session_lock_list();
2988 cmd_ctx
->session
= session_find_by_name(cmd_ctx
->lsm
->session
.name
);
2989 if (cmd_ctx
->session
== NULL
) {
2990 ret
= LTTNG_ERR_SESS_NOT_FOUND
;
2993 /* Acquire lock for the session */
2994 session_lock(cmd_ctx
->session
);
3000 * Commands that need a valid session but should NOT create one if none
3001 * exists. Instead of creating one and destroying it when the command is
3002 * handled, process that right before so we save some round trip in useless
3005 switch (cmd_ctx
->lsm
->cmd_type
) {
3006 case LTTNG_DISABLE_CHANNEL
:
3007 case LTTNG_DISABLE_EVENT
:
3008 switch (cmd_ctx
->lsm
->domain
.type
) {
3009 case LTTNG_DOMAIN_KERNEL
:
3010 if (!cmd_ctx
->session
->kernel_session
) {
3011 ret
= LTTNG_ERR_NO_CHANNEL
;
3015 case LTTNG_DOMAIN_JUL
:
3016 case LTTNG_DOMAIN_LOG4J
:
3017 case LTTNG_DOMAIN_PYTHON
:
3018 case LTTNG_DOMAIN_UST
:
3019 if (!cmd_ctx
->session
->ust_session
) {
3020 ret
= LTTNG_ERR_NO_CHANNEL
;
3025 ret
= LTTNG_ERR_UNKNOWN_DOMAIN
;
3037 * Check domain type for specific "pre-action".
3039 switch (cmd_ctx
->lsm
->domain
.type
) {
3040 case LTTNG_DOMAIN_KERNEL
:
3042 ret
= LTTNG_ERR_NEED_ROOT_SESSIOND
;
3046 /* Kernel tracer check */
3047 if (kernel_tracer_fd
== -1) {
3048 /* Basically, load kernel tracer modules */
3049 ret
= init_kernel_tracer();
3055 /* Consumer is in an ERROR state. Report back to client */
3056 if (uatomic_read(&kernel_consumerd_state
) == CONSUMER_ERROR
) {
3057 ret
= LTTNG_ERR_NO_KERNCONSUMERD
;
3061 /* Need a session for kernel command */
3062 if (need_tracing_session
) {
3063 if (cmd_ctx
->session
->kernel_session
== NULL
) {
3064 ret
= create_kernel_session(cmd_ctx
->session
);
3066 ret
= LTTNG_ERR_KERN_SESS_FAIL
;
3071 /* Start the kernel consumer daemon */
3072 pthread_mutex_lock(&kconsumer_data
.pid_mutex
);
3073 if (kconsumer_data
.pid
== 0 &&
3074 cmd_ctx
->lsm
->cmd_type
!= LTTNG_REGISTER_CONSUMER
) {
3075 pthread_mutex_unlock(&kconsumer_data
.pid_mutex
);
3076 ret
= start_consumerd(&kconsumer_data
);
3078 ret
= LTTNG_ERR_KERN_CONSUMER_FAIL
;
3081 uatomic_set(&kernel_consumerd_state
, CONSUMER_STARTED
);
3083 pthread_mutex_unlock(&kconsumer_data
.pid_mutex
);
3087 * The consumer was just spawned so we need to add the socket to
3088 * the consumer output of the session if exist.
3090 ret
= consumer_create_socket(&kconsumer_data
,
3091 cmd_ctx
->session
->kernel_session
->consumer
);
3098 case LTTNG_DOMAIN_JUL
:
3099 case LTTNG_DOMAIN_LOG4J
:
3100 case LTTNG_DOMAIN_PYTHON
:
3101 case LTTNG_DOMAIN_UST
:
3103 if (!ust_app_supported()) {
3104 ret
= LTTNG_ERR_NO_UST
;
3107 /* Consumer is in an ERROR state. Report back to client */
3108 if (uatomic_read(&ust_consumerd_state
) == CONSUMER_ERROR
) {
3109 ret
= LTTNG_ERR_NO_USTCONSUMERD
;
3113 if (need_tracing_session
) {
3114 /* Create UST session if none exist. */
3115 if (cmd_ctx
->session
->ust_session
== NULL
) {
3116 ret
= create_ust_session(cmd_ctx
->session
,
3117 &cmd_ctx
->lsm
->domain
);
3118 if (ret
!= LTTNG_OK
) {
3123 /* Start the UST consumer daemons */
3125 pthread_mutex_lock(&ustconsumer64_data
.pid_mutex
);
3126 if (config
.consumerd64_bin_path
.value
&&
3127 ustconsumer64_data
.pid
== 0 &&
3128 cmd_ctx
->lsm
->cmd_type
!= LTTNG_REGISTER_CONSUMER
) {
3129 pthread_mutex_unlock(&ustconsumer64_data
.pid_mutex
);
3130 ret
= start_consumerd(&ustconsumer64_data
);
3132 ret
= LTTNG_ERR_UST_CONSUMER64_FAIL
;
3133 uatomic_set(&ust_consumerd64_fd
, -EINVAL
);
3137 uatomic_set(&ust_consumerd64_fd
, ustconsumer64_data
.cmd_sock
);
3138 uatomic_set(&ust_consumerd_state
, CONSUMER_STARTED
);
3140 pthread_mutex_unlock(&ustconsumer64_data
.pid_mutex
);
3144 * Setup socket for consumer 64 bit. No need for atomic access
3145 * since it was set above and can ONLY be set in this thread.
3147 ret
= consumer_create_socket(&ustconsumer64_data
,
3148 cmd_ctx
->session
->ust_session
->consumer
);
3154 pthread_mutex_lock(&ustconsumer32_data
.pid_mutex
);
3155 if (config
.consumerd32_bin_path
.value
&&
3156 ustconsumer32_data
.pid
== 0 &&
3157 cmd_ctx
->lsm
->cmd_type
!= LTTNG_REGISTER_CONSUMER
) {
3158 pthread_mutex_unlock(&ustconsumer32_data
.pid_mutex
);
3159 ret
= start_consumerd(&ustconsumer32_data
);
3161 ret
= LTTNG_ERR_UST_CONSUMER32_FAIL
;
3162 uatomic_set(&ust_consumerd32_fd
, -EINVAL
);
3166 uatomic_set(&ust_consumerd32_fd
, ustconsumer32_data
.cmd_sock
);
3167 uatomic_set(&ust_consumerd_state
, CONSUMER_STARTED
);
3169 pthread_mutex_unlock(&ustconsumer32_data
.pid_mutex
);
3173 * Setup socket for consumer 64 bit. No need for atomic access
3174 * since it was set above and can ONLY be set in this thread.
3176 ret
= consumer_create_socket(&ustconsumer32_data
,
3177 cmd_ctx
->session
->ust_session
->consumer
);
3189 /* Validate consumer daemon state when start/stop trace command */
3190 if (cmd_ctx
->lsm
->cmd_type
== LTTNG_START_TRACE
||
3191 cmd_ctx
->lsm
->cmd_type
== LTTNG_STOP_TRACE
) {
3192 switch (cmd_ctx
->lsm
->domain
.type
) {
3193 case LTTNG_DOMAIN_NONE
:
3195 case LTTNG_DOMAIN_JUL
:
3196 case LTTNG_DOMAIN_LOG4J
:
3197 case LTTNG_DOMAIN_PYTHON
:
3198 case LTTNG_DOMAIN_UST
:
3199 if (uatomic_read(&ust_consumerd_state
) != CONSUMER_STARTED
) {
3200 ret
= LTTNG_ERR_NO_USTCONSUMERD
;
3204 case LTTNG_DOMAIN_KERNEL
:
3205 if (uatomic_read(&kernel_consumerd_state
) != CONSUMER_STARTED
) {
3206 ret
= LTTNG_ERR_NO_KERNCONSUMERD
;
3211 ret
= LTTNG_ERR_UNKNOWN_DOMAIN
;
3217 * Check that the UID or GID match that of the tracing session.
3218 * The root user can interact with all sessions.
3220 if (need_tracing_session
) {
3221 if (!session_access_ok(cmd_ctx
->session
,
3222 LTTNG_SOCK_GET_UID_CRED(&cmd_ctx
->creds
),
3223 LTTNG_SOCK_GET_GID_CRED(&cmd_ctx
->creds
))) {
3224 ret
= LTTNG_ERR_EPERM
;
3230 * Send relayd information to consumer as soon as we have a domain and a
3233 if (cmd_ctx
->session
&& need_domain
) {
3235 * Setup relayd if not done yet. If the relayd information was already
3236 * sent to the consumer, this call will gracefully return.
3238 ret
= cmd_setup_relayd(cmd_ctx
->session
);
3239 if (ret
!= LTTNG_OK
) {
3244 /* Process by command type */
3245 switch (cmd_ctx
->lsm
->cmd_type
) {
3246 case LTTNG_ADD_CONTEXT
:
3249 * An LTTNG_ADD_CONTEXT command might have a supplementary
3250 * payload if the context being added is an application context.
3252 if (cmd_ctx
->lsm
->u
.context
.ctx
.ctx
==
3253 LTTNG_EVENT_CONTEXT_APP_CONTEXT
) {
3254 char *provider_name
= NULL
, *context_name
= NULL
;
3255 size_t provider_name_len
=
3256 cmd_ctx
->lsm
->u
.context
.provider_name_len
;
3257 size_t context_name_len
=
3258 cmd_ctx
->lsm
->u
.context
.context_name_len
;
3260 if (provider_name_len
== 0 || context_name_len
== 0) {
3262 * Application provider and context names MUST
3265 ret
= -LTTNG_ERR_INVALID
;
3269 provider_name
= zmalloc(provider_name_len
+ 1);
3270 if (!provider_name
) {
3271 ret
= -LTTNG_ERR_NOMEM
;
3274 cmd_ctx
->lsm
->u
.context
.ctx
.u
.app_ctx
.provider_name
=
3277 context_name
= zmalloc(context_name_len
+ 1);
3278 if (!context_name
) {
3279 ret
= -LTTNG_ERR_NOMEM
;
3280 goto error_add_context
;
3282 cmd_ctx
->lsm
->u
.context
.ctx
.u
.app_ctx
.ctx_name
=
3285 ret
= lttcomm_recv_unix_sock(sock
, provider_name
,
3288 goto error_add_context
;
3291 ret
= lttcomm_recv_unix_sock(sock
, context_name
,
3294 goto error_add_context
;
3299 * cmd_add_context assumes ownership of the provider and context
3302 ret
= cmd_add_context(cmd_ctx
->session
,
3303 cmd_ctx
->lsm
->domain
.type
,
3304 cmd_ctx
->lsm
->u
.context
.channel_name
,
3305 &cmd_ctx
->lsm
->u
.context
.ctx
,
3306 kernel_poll_pipe
[1]);
3308 cmd_ctx
->lsm
->u
.context
.ctx
.u
.app_ctx
.provider_name
= NULL
;
3309 cmd_ctx
->lsm
->u
.context
.ctx
.u
.app_ctx
.ctx_name
= NULL
;
3311 free(cmd_ctx
->lsm
->u
.context
.ctx
.u
.app_ctx
.provider_name
);
3312 free(cmd_ctx
->lsm
->u
.context
.ctx
.u
.app_ctx
.ctx_name
);
3318 case LTTNG_DISABLE_CHANNEL
:
3320 ret
= cmd_disable_channel(cmd_ctx
->session
, cmd_ctx
->lsm
->domain
.type
,
3321 cmd_ctx
->lsm
->u
.disable
.channel_name
);
3324 case LTTNG_DISABLE_EVENT
:
3328 * FIXME: handle filter; for now we just receive the filter's
3329 * bytecode along with the filter expression which are sent by
3330 * liblttng-ctl and discard them.
3332 * This fixes an issue where the client may block while sending
3333 * the filter payload and encounter an error because the session
3334 * daemon closes the socket without ever handling this data.
3336 size_t count
= cmd_ctx
->lsm
->u
.disable
.expression_len
+
3337 cmd_ctx
->lsm
->u
.disable
.bytecode_len
;
3340 char data
[LTTNG_FILTER_MAX_LEN
];
3342 DBG("Discarding disable event command payload of size %zu", count
);
3344 ret
= lttcomm_recv_unix_sock(sock
, data
,
3345 count
> sizeof(data
) ? sizeof(data
) : count
);
3350 count
-= (size_t) ret
;
3353 /* FIXME: passing packed structure to non-packed pointer */
3354 ret
= cmd_disable_event(cmd_ctx
->session
, cmd_ctx
->lsm
->domain
.type
,
3355 cmd_ctx
->lsm
->u
.disable
.channel_name
,
3356 &cmd_ctx
->lsm
->u
.disable
.event
);
3359 case LTTNG_ENABLE_CHANNEL
:
3361 cmd_ctx
->lsm
->u
.channel
.chan
.attr
.extended
.ptr
=
3362 (struct lttng_channel_extended
*) &cmd_ctx
->lsm
->u
.channel
.extended
;
3363 ret
= cmd_enable_channel(cmd_ctx
->session
, &cmd_ctx
->lsm
->domain
,
3364 &cmd_ctx
->lsm
->u
.channel
.chan
,
3365 kernel_poll_pipe
[1]);
3368 case LTTNG_TRACK_PID
:
3370 ret
= cmd_track_pid(cmd_ctx
->session
,
3371 cmd_ctx
->lsm
->domain
.type
,
3372 cmd_ctx
->lsm
->u
.pid_tracker
.pid
);
3375 case LTTNG_UNTRACK_PID
:
3377 ret
= cmd_untrack_pid(cmd_ctx
->session
,
3378 cmd_ctx
->lsm
->domain
.type
,
3379 cmd_ctx
->lsm
->u
.pid_tracker
.pid
);
3382 case LTTNG_ENABLE_EVENT
:
3384 struct lttng_event_exclusion
*exclusion
= NULL
;
3385 struct lttng_filter_bytecode
*bytecode
= NULL
;
3386 char *filter_expression
= NULL
;
3388 /* Handle exclusion events and receive it from the client. */
3389 if (cmd_ctx
->lsm
->u
.enable
.exclusion_count
> 0) {
3390 size_t count
= cmd_ctx
->lsm
->u
.enable
.exclusion_count
;
3392 exclusion
= zmalloc(sizeof(struct lttng_event_exclusion
) +
3393 (count
* LTTNG_SYMBOL_NAME_LEN
));
3395 ret
= LTTNG_ERR_EXCLUSION_NOMEM
;
3399 DBG("Receiving var len exclusion event list from client ...");
3400 exclusion
->count
= count
;
3401 ret
= lttcomm_recv_unix_sock(sock
, exclusion
->names
,
3402 count
* LTTNG_SYMBOL_NAME_LEN
);
3404 DBG("Nothing recv() from client var len data... continuing");
3407 ret
= LTTNG_ERR_EXCLUSION_INVAL
;
3412 /* Get filter expression from client. */
3413 if (cmd_ctx
->lsm
->u
.enable
.expression_len
> 0) {
3414 size_t expression_len
=
3415 cmd_ctx
->lsm
->u
.enable
.expression_len
;
3417 if (expression_len
> LTTNG_FILTER_MAX_LEN
) {
3418 ret
= LTTNG_ERR_FILTER_INVAL
;
3423 filter_expression
= zmalloc(expression_len
);
3424 if (!filter_expression
) {
3426 ret
= LTTNG_ERR_FILTER_NOMEM
;
3430 /* Receive var. len. data */
3431 DBG("Receiving var len filter's expression from client ...");
3432 ret
= lttcomm_recv_unix_sock(sock
, filter_expression
,
3435 DBG("Nothing recv() from client car len data... continuing");
3437 free(filter_expression
);
3439 ret
= LTTNG_ERR_FILTER_INVAL
;
3444 /* Handle filter and get bytecode from client. */
3445 if (cmd_ctx
->lsm
->u
.enable
.bytecode_len
> 0) {
3446 size_t bytecode_len
= cmd_ctx
->lsm
->u
.enable
.bytecode_len
;
3448 if (bytecode_len
> LTTNG_FILTER_MAX_LEN
) {
3449 ret
= LTTNG_ERR_FILTER_INVAL
;
3450 free(filter_expression
);
3455 bytecode
= zmalloc(bytecode_len
);
3457 free(filter_expression
);
3459 ret
= LTTNG_ERR_FILTER_NOMEM
;
3463 /* Receive var. len. data */
3464 DBG("Receiving var len filter's bytecode from client ...");
3465 ret
= lttcomm_recv_unix_sock(sock
, bytecode
, bytecode_len
);
3467 DBG("Nothing recv() from client car len data... continuing");
3469 free(filter_expression
);
3472 ret
= LTTNG_ERR_FILTER_INVAL
;
3476 if ((bytecode
->len
+ sizeof(*bytecode
)) != bytecode_len
) {
3477 free(filter_expression
);
3480 ret
= LTTNG_ERR_FILTER_INVAL
;
3485 ret
= cmd_enable_event(cmd_ctx
->session
, &cmd_ctx
->lsm
->domain
,
3486 cmd_ctx
->lsm
->u
.enable
.channel_name
,
3487 &cmd_ctx
->lsm
->u
.enable
.event
,
3488 filter_expression
, bytecode
, exclusion
,
3489 kernel_poll_pipe
[1]);
3492 case LTTNG_LIST_TRACEPOINTS
:
3494 struct lttng_event
*events
;
3497 session_lock_list();
3498 nb_events
= cmd_list_tracepoints(cmd_ctx
->lsm
->domain
.type
, &events
);
3499 session_unlock_list();
3500 if (nb_events
< 0) {
3501 /* Return value is a negative lttng_error_code. */
3507 * Setup lttng message with payload size set to the event list size in
3508 * bytes and then copy list into the llm payload.
3510 ret
= setup_lttng_msg_no_cmd_header(cmd_ctx
, events
,
3511 sizeof(struct lttng_event
) * nb_events
);
3521 case LTTNG_LIST_TRACEPOINT_FIELDS
:
3523 struct lttng_event_field
*fields
;
3526 session_lock_list();
3527 nb_fields
= cmd_list_tracepoint_fields(cmd_ctx
->lsm
->domain
.type
,
3529 session_unlock_list();
3530 if (nb_fields
< 0) {
3531 /* Return value is a negative lttng_error_code. */
3537 * Setup lttng message with payload size set to the event list size in
3538 * bytes and then copy list into the llm payload.
3540 ret
= setup_lttng_msg_no_cmd_header(cmd_ctx
, fields
,
3541 sizeof(struct lttng_event_field
) * nb_fields
);
3551 case LTTNG_LIST_SYSCALLS
:
3553 struct lttng_event
*events
;
3556 nb_events
= cmd_list_syscalls(&events
);
3557 if (nb_events
< 0) {
3558 /* Return value is a negative lttng_error_code. */
3564 * Setup lttng message with payload size set to the event list size in
3565 * bytes and then copy list into the llm payload.
3567 ret
= setup_lttng_msg_no_cmd_header(cmd_ctx
, events
,
3568 sizeof(struct lttng_event
) * nb_events
);
3578 case LTTNG_LIST_TRACKER_PIDS
:
3580 int32_t *pids
= NULL
;
3583 nr_pids
= cmd_list_tracker_pids(cmd_ctx
->session
,
3584 cmd_ctx
->lsm
->domain
.type
, &pids
);
3586 /* Return value is a negative lttng_error_code. */
3592 * Setup lttng message with payload size set to the event list size in
3593 * bytes and then copy list into the llm payload.
3595 ret
= setup_lttng_msg_no_cmd_header(cmd_ctx
, pids
,
3596 sizeof(int32_t) * nr_pids
);
3606 case LTTNG_SET_CONSUMER_URI
:
3609 struct lttng_uri
*uris
;
3611 nb_uri
= cmd_ctx
->lsm
->u
.uri
.size
;
3612 len
= nb_uri
* sizeof(struct lttng_uri
);
3615 ret
= LTTNG_ERR_INVALID
;
3619 uris
= zmalloc(len
);
3621 ret
= LTTNG_ERR_FATAL
;
3625 /* Receive variable len data */
3626 DBG("Receiving %zu URI(s) from client ...", nb_uri
);
3627 ret
= lttcomm_recv_unix_sock(sock
, uris
, len
);
3629 DBG("No URIs received from client... continuing");
3631 ret
= LTTNG_ERR_SESSION_FAIL
;
3636 ret
= cmd_set_consumer_uri(cmd_ctx
->session
, nb_uri
, uris
);
3638 if (ret
!= LTTNG_OK
) {
3645 case LTTNG_START_TRACE
:
3647 ret
= cmd_start_trace(cmd_ctx
->session
);
3650 case LTTNG_STOP_TRACE
:
3652 ret
= cmd_stop_trace(cmd_ctx
->session
);
3655 case LTTNG_CREATE_SESSION
:
3658 struct lttng_uri
*uris
= NULL
;
3660 nb_uri
= cmd_ctx
->lsm
->u
.uri
.size
;
3661 len
= nb_uri
* sizeof(struct lttng_uri
);
3664 uris
= zmalloc(len
);
3666 ret
= LTTNG_ERR_FATAL
;
3670 /* Receive variable len data */
3671 DBG("Waiting for %zu URIs from client ...", nb_uri
);
3672 ret
= lttcomm_recv_unix_sock(sock
, uris
, len
);
3674 DBG("No URIs received from client... continuing");
3676 ret
= LTTNG_ERR_SESSION_FAIL
;
3681 if (nb_uri
== 1 && uris
[0].dtype
!= LTTNG_DST_PATH
) {
3682 DBG("Creating session with ONE network URI is a bad call");
3683 ret
= LTTNG_ERR_SESSION_FAIL
;
3689 ret
= cmd_create_session_uri(cmd_ctx
->lsm
->session
.name
, uris
, nb_uri
,
3690 &cmd_ctx
->creds
, 0);
3696 case LTTNG_DESTROY_SESSION
:
3698 ret
= cmd_destroy_session(cmd_ctx
->session
, kernel_poll_pipe
[1]);
3700 /* Set session to NULL so we do not unlock it after free. */
3701 cmd_ctx
->session
= NULL
;
3704 case LTTNG_LIST_DOMAINS
:
3707 struct lttng_domain
*domains
= NULL
;
3709 nb_dom
= cmd_list_domains(cmd_ctx
->session
, &domains
);
3711 /* Return value is a negative lttng_error_code. */
3716 ret
= setup_lttng_msg_no_cmd_header(cmd_ctx
, domains
,
3717 nb_dom
* sizeof(struct lttng_domain
));
3727 case LTTNG_LIST_CHANNELS
:
3729 ssize_t payload_size
;
3730 struct lttng_channel
*channels
= NULL
;
3732 payload_size
= cmd_list_channels(cmd_ctx
->lsm
->domain
.type
,
3733 cmd_ctx
->session
, &channels
);
3734 if (payload_size
< 0) {
3735 /* Return value is a negative lttng_error_code. */
3736 ret
= -payload_size
;
3740 ret
= setup_lttng_msg_no_cmd_header(cmd_ctx
, channels
,
3751 case LTTNG_LIST_EVENTS
:
3754 struct lttng_event
*events
= NULL
;
3755 struct lttcomm_event_command_header cmd_header
;
3758 memset(&cmd_header
, 0, sizeof(cmd_header
));
3759 /* Extended infos are included at the end of events */
3760 nb_event
= cmd_list_events(cmd_ctx
->lsm
->domain
.type
,
3761 cmd_ctx
->session
, cmd_ctx
->lsm
->u
.list
.channel_name
,
3762 &events
, &total_size
);
3765 /* Return value is a negative lttng_error_code. */
3770 cmd_header
.nb_events
= nb_event
;
3771 ret
= setup_lttng_msg(cmd_ctx
, events
, total_size
,
3772 &cmd_header
, sizeof(cmd_header
));
3782 case LTTNG_LIST_SESSIONS
:
3784 unsigned int nr_sessions
;
3785 void *sessions_payload
;
3788 session_lock_list();
3789 nr_sessions
= lttng_sessions_count(
3790 LTTNG_SOCK_GET_UID_CRED(&cmd_ctx
->creds
),
3791 LTTNG_SOCK_GET_GID_CRED(&cmd_ctx
->creds
));
3792 payload_len
= sizeof(struct lttng_session
) * nr_sessions
;
3793 sessions_payload
= zmalloc(payload_len
);
3795 if (!sessions_payload
) {
3796 session_unlock_list();
3801 cmd_list_lttng_sessions(sessions_payload
,
3802 LTTNG_SOCK_GET_UID_CRED(&cmd_ctx
->creds
),
3803 LTTNG_SOCK_GET_GID_CRED(&cmd_ctx
->creds
));
3804 session_unlock_list();
3806 ret
= setup_lttng_msg_no_cmd_header(cmd_ctx
, sessions_payload
,
3808 free(sessions_payload
);
3817 case LTTNG_REGISTER_CONSUMER
:
3819 struct consumer_data
*cdata
;
3821 switch (cmd_ctx
->lsm
->domain
.type
) {
3822 case LTTNG_DOMAIN_KERNEL
:
3823 cdata
= &kconsumer_data
;
3826 ret
= LTTNG_ERR_UND
;
3830 ret
= cmd_register_consumer(cmd_ctx
->session
, cmd_ctx
->lsm
->domain
.type
,
3831 cmd_ctx
->lsm
->u
.reg
.path
, cdata
);
3834 case LTTNG_DATA_PENDING
:
3837 uint8_t pending_ret_byte
;
3839 pending_ret
= cmd_data_pending(cmd_ctx
->session
);
3844 * This function may returns 0 or 1 to indicate whether or not
3845 * there is data pending. In case of error, it should return an
3846 * LTTNG_ERR code. However, some code paths may still return
3847 * a nondescript error code, which we handle by returning an
3850 if (pending_ret
== 0 || pending_ret
== 1) {
3852 * ret will be set to LTTNG_OK at the end of
3855 } else if (pending_ret
< 0) {
3856 ret
= LTTNG_ERR_UNK
;
3863 pending_ret_byte
= (uint8_t) pending_ret
;
3865 /* 1 byte to return whether or not data is pending */
3866 ret
= setup_lttng_msg_no_cmd_header(cmd_ctx
,
3867 &pending_ret_byte
, 1);
3876 case LTTNG_SNAPSHOT_ADD_OUTPUT
:
3878 struct lttcomm_lttng_output_id reply
;
3880 ret
= cmd_snapshot_add_output(cmd_ctx
->session
,
3881 &cmd_ctx
->lsm
->u
.snapshot_output
.output
, &reply
.id
);
3882 if (ret
!= LTTNG_OK
) {
3886 ret
= setup_lttng_msg_no_cmd_header(cmd_ctx
, &reply
,
3892 /* Copy output list into message payload */
3896 case LTTNG_SNAPSHOT_DEL_OUTPUT
:
3898 ret
= cmd_snapshot_del_output(cmd_ctx
->session
,
3899 &cmd_ctx
->lsm
->u
.snapshot_output
.output
);
3902 case LTTNG_SNAPSHOT_LIST_OUTPUT
:
3905 struct lttng_snapshot_output
*outputs
= NULL
;
3907 nb_output
= cmd_snapshot_list_outputs(cmd_ctx
->session
, &outputs
);
3908 if (nb_output
< 0) {
3913 assert((nb_output
> 0 && outputs
) || nb_output
== 0);
3914 ret
= setup_lttng_msg_no_cmd_header(cmd_ctx
, outputs
,
3915 nb_output
* sizeof(struct lttng_snapshot_output
));
3925 case LTTNG_SNAPSHOT_RECORD
:
3927 ret
= cmd_snapshot_record(cmd_ctx
->session
,
3928 &cmd_ctx
->lsm
->u
.snapshot_record
.output
,
3929 cmd_ctx
->lsm
->u
.snapshot_record
.wait
);
3932 case LTTNG_CREATE_SESSION_SNAPSHOT
:
3935 struct lttng_uri
*uris
= NULL
;
3937 nb_uri
= cmd_ctx
->lsm
->u
.uri
.size
;
3938 len
= nb_uri
* sizeof(struct lttng_uri
);
3941 uris
= zmalloc(len
);
3943 ret
= LTTNG_ERR_FATAL
;
3947 /* Receive variable len data */
3948 DBG("Waiting for %zu URIs from client ...", nb_uri
);
3949 ret
= lttcomm_recv_unix_sock(sock
, uris
, len
);
3951 DBG("No URIs received from client... continuing");
3953 ret
= LTTNG_ERR_SESSION_FAIL
;
3958 if (nb_uri
== 1 && uris
[0].dtype
!= LTTNG_DST_PATH
) {
3959 DBG("Creating session with ONE network URI is a bad call");
3960 ret
= LTTNG_ERR_SESSION_FAIL
;
3966 ret
= cmd_create_session_snapshot(cmd_ctx
->lsm
->session
.name
, uris
,
3967 nb_uri
, &cmd_ctx
->creds
);
3971 case LTTNG_CREATE_SESSION_LIVE
:
3974 struct lttng_uri
*uris
= NULL
;
3976 nb_uri
= cmd_ctx
->lsm
->u
.uri
.size
;
3977 len
= nb_uri
* sizeof(struct lttng_uri
);
3980 uris
= zmalloc(len
);
3982 ret
= LTTNG_ERR_FATAL
;
3986 /* Receive variable len data */
3987 DBG("Waiting for %zu URIs from client ...", nb_uri
);
3988 ret
= lttcomm_recv_unix_sock(sock
, uris
, len
);
3990 DBG("No URIs received from client... continuing");
3992 ret
= LTTNG_ERR_SESSION_FAIL
;
3997 if (nb_uri
== 1 && uris
[0].dtype
!= LTTNG_DST_PATH
) {
3998 DBG("Creating session with ONE network URI is a bad call");
3999 ret
= LTTNG_ERR_SESSION_FAIL
;
4005 ret
= cmd_create_session_uri(cmd_ctx
->lsm
->session
.name
, uris
,
4006 nb_uri
, &cmd_ctx
->creds
, cmd_ctx
->lsm
->u
.session_live
.timer_interval
);
4010 case LTTNG_SAVE_SESSION
:
4012 ret
= cmd_save_sessions(&cmd_ctx
->lsm
->u
.save_session
.attr
,
4016 case LTTNG_SET_SESSION_SHM_PATH
:
4018 ret
= cmd_set_session_shm_path(cmd_ctx
->session
,
4019 cmd_ctx
->lsm
->u
.set_shm_path
.shm_path
);
4022 case LTTNG_REGENERATE_METADATA
:
4024 ret
= cmd_regenerate_metadata(cmd_ctx
->session
);
4027 case LTTNG_REGENERATE_STATEDUMP
:
4029 ret
= cmd_regenerate_statedump(cmd_ctx
->session
);
4032 case LTTNG_REGISTER_TRIGGER
:
4034 ret
= cmd_register_trigger(cmd_ctx
, sock
,
4035 notification_thread_handle
);
4038 case LTTNG_UNREGISTER_TRIGGER
:
4040 ret
= cmd_unregister_trigger(cmd_ctx
, sock
,
4041 notification_thread_handle
);
4045 ret
= LTTNG_ERR_UND
;
4050 if (cmd_ctx
->llm
== NULL
) {
4051 DBG("Missing llm structure. Allocating one.");
4052 if (setup_lttng_msg_no_cmd_header(cmd_ctx
, NULL
, 0) < 0) {
4056 /* Set return code */
4057 cmd_ctx
->llm
->ret_code
= ret
;
4059 if (cmd_ctx
->session
) {
4060 session_unlock(cmd_ctx
->session
);
4062 if (need_tracing_session
) {
4063 session_unlock_list();
4066 assert(!rcu_read_ongoing());
4071 * Thread managing health check socket.
4073 static void *thread_manage_health(void *data
)
4075 int sock
= -1, new_sock
= -1, ret
, i
, pollfd
, err
= -1;
4076 uint32_t revents
, nb_fd
;
4077 struct lttng_poll_event events
;
4078 struct health_comm_msg msg
;
4079 struct health_comm_reply reply
;
4081 DBG("[thread] Manage health check started");
4083 rcu_register_thread();
4085 /* We might hit an error path before this is created. */
4086 lttng_poll_init(&events
);
4088 /* Create unix socket */
4089 sock
= lttcomm_create_unix_sock(config
.health_unix_sock_path
.value
);
4091 ERR("Unable to create health check Unix socket");
4096 /* lttng health client socket path permissions */
4097 ret
= chown(config
.health_unix_sock_path
.value
, 0,
4098 utils_get_group_id(config
.tracing_group_name
.value
));
4100 ERR("Unable to set group on %s", config
.health_unix_sock_path
.value
);
4105 ret
= chmod(config
.health_unix_sock_path
.value
,
4106 S_IRUSR
| S_IWUSR
| S_IRGRP
| S_IWGRP
);
4108 ERR("Unable to set permissions on %s", config
.health_unix_sock_path
.value
);
4115 * Set the CLOEXEC flag. Return code is useless because either way, the
4118 (void) utils_set_fd_cloexec(sock
);
4120 ret
= lttcomm_listen_unix_sock(sock
);
4126 * Pass 2 as size here for the thread quit pipe and client_sock. Nothing
4127 * more will be added to this poll set.
4129 ret
= sessiond_set_thread_pollset(&events
, 2);
4134 /* Add the application registration socket */
4135 ret
= lttng_poll_add(&events
, sock
, LPOLLIN
| LPOLLPRI
);
4140 sessiond_notify_ready();
4143 DBG("Health check ready");
4145 /* Inifinite blocking call, waiting for transmission */
4147 ret
= lttng_poll_wait(&events
, -1);
4150 * Restart interrupted system call.
4152 if (errno
== EINTR
) {
4160 for (i
= 0; i
< nb_fd
; i
++) {
4161 /* Fetch once the poll data */
4162 revents
= LTTNG_POLL_GETEV(&events
, i
);
4163 pollfd
= LTTNG_POLL_GETFD(&events
, i
);
4166 /* No activity for this FD (poll implementation). */
4170 /* Thread quit pipe has been closed. Killing thread. */
4171 ret
= sessiond_check_thread_quit_pipe(pollfd
, revents
);
4177 /* Event on the registration socket */
4178 if (pollfd
== sock
) {
4179 if (revents
& LPOLLIN
) {
4181 } else if (revents
& (LPOLLERR
| LPOLLHUP
| LPOLLRDHUP
)) {
4182 ERR("Health socket poll error");
4185 ERR("Unexpected poll events %u for sock %d", revents
, pollfd
);
4191 new_sock
= lttcomm_accept_unix_sock(sock
);
4197 * Set the CLOEXEC flag. Return code is useless because either way, the
4200 (void) utils_set_fd_cloexec(new_sock
);
4202 DBG("Receiving data from client for health...");
4203 ret
= lttcomm_recv_unix_sock(new_sock
, (void *)&msg
, sizeof(msg
));
4205 DBG("Nothing recv() from client... continuing");
4206 ret
= close(new_sock
);
4213 rcu_thread_online();
4215 memset(&reply
, 0, sizeof(reply
));
4216 for (i
= 0; i
< NR_HEALTH_SESSIOND_TYPES
; i
++) {
4218 * health_check_state returns 0 if health is
4221 if (!health_check_state(health_sessiond
, i
)) {
4222 reply
.ret_code
|= 1ULL << i
;
4226 DBG2("Health check return value %" PRIx64
, reply
.ret_code
);
4228 ret
= send_unix_sock(new_sock
, (void *) &reply
, sizeof(reply
));
4230 ERR("Failed to send health data back to client");
4233 /* End of transmission */
4234 ret
= close(new_sock
);
4243 ERR("Health error occurred in %s", __func__
);
4245 DBG("Health check thread dying");
4246 unlink(config
.health_unix_sock_path
.value
);
4254 lttng_poll_clean(&events
);
4256 rcu_unregister_thread();
4261 * This thread manage all clients request using the unix client socket for
4264 static void *thread_manage_clients(void *data
)
4266 int sock
= -1, ret
, i
, pollfd
, err
= -1;
4268 uint32_t revents
, nb_fd
;
4269 struct command_ctx
*cmd_ctx
= NULL
;
4270 struct lttng_poll_event events
;
4272 DBG("[thread] Manage client started");
4274 rcu_register_thread();
4276 health_register(health_sessiond
, HEALTH_SESSIOND_TYPE_CMD
);
4278 health_code_update();
4280 ret
= lttcomm_listen_unix_sock(client_sock
);
4286 * Pass 2 as size here for the thread quit pipe and client_sock. Nothing
4287 * more will be added to this poll set.
4289 ret
= sessiond_set_thread_pollset(&events
, 2);
4291 goto error_create_poll
;
4294 /* Add the application registration socket */
4295 ret
= lttng_poll_add(&events
, client_sock
, LPOLLIN
| LPOLLPRI
);
4300 sessiond_notify_ready();
4301 ret
= sem_post(&load_info
->message_thread_ready
);
4303 PERROR("sem_post message_thread_ready");
4307 /* This testpoint is after we signal readiness to the parent. */
4308 if (testpoint(sessiond_thread_manage_clients
)) {
4312 if (testpoint(sessiond_thread_manage_clients_before_loop
)) {
4316 health_code_update();
4319 DBG("Accepting client command ...");
4321 /* Inifinite blocking call, waiting for transmission */
4323 health_poll_entry();
4324 ret
= lttng_poll_wait(&events
, -1);
4328 * Restart interrupted system call.
4330 if (errno
== EINTR
) {
4338 for (i
= 0; i
< nb_fd
; i
++) {
4339 /* Fetch once the poll data */
4340 revents
= LTTNG_POLL_GETEV(&events
, i
);
4341 pollfd
= LTTNG_POLL_GETFD(&events
, i
);
4343 health_code_update();
4346 /* No activity for this FD (poll implementation). */
4350 /* Thread quit pipe has been closed. Killing thread. */
4351 ret
= sessiond_check_thread_quit_pipe(pollfd
, revents
);
4357 /* Event on the registration socket */
4358 if (pollfd
== client_sock
) {
4359 if (revents
& LPOLLIN
) {
4361 } else if (revents
& (LPOLLERR
| LPOLLHUP
| LPOLLRDHUP
)) {
4362 ERR("Client socket poll error");
4365 ERR("Unexpected poll events %u for sock %d", revents
, pollfd
);
4371 DBG("Wait for client response");
4373 health_code_update();
4375 sock
= lttcomm_accept_unix_sock(client_sock
);
4381 * Set the CLOEXEC flag. Return code is useless because either way, the
4384 (void) utils_set_fd_cloexec(sock
);
4386 /* Set socket option for credentials retrieval */
4387 ret
= lttcomm_setsockopt_creds_unix_sock(sock
);
4392 /* Allocate context command to process the client request */
4393 cmd_ctx
= zmalloc(sizeof(struct command_ctx
));
4394 if (cmd_ctx
== NULL
) {
4395 PERROR("zmalloc cmd_ctx");
4399 /* Allocate data buffer for reception */
4400 cmd_ctx
->lsm
= zmalloc(sizeof(struct lttcomm_session_msg
));
4401 if (cmd_ctx
->lsm
== NULL
) {
4402 PERROR("zmalloc cmd_ctx->lsm");
4406 cmd_ctx
->llm
= NULL
;
4407 cmd_ctx
->session
= NULL
;
4409 health_code_update();
4412 * Data is received from the lttng client. The struct
4413 * lttcomm_session_msg (lsm) contains the command and data request of
4416 DBG("Receiving data from client ...");
4417 ret
= lttcomm_recv_creds_unix_sock(sock
, cmd_ctx
->lsm
,
4418 sizeof(struct lttcomm_session_msg
), &cmd_ctx
->creds
);
4420 DBG("Nothing recv() from client... continuing");
4426 clean_command_ctx(&cmd_ctx
);
4430 health_code_update();
4432 // TODO: Validate cmd_ctx including sanity check for
4433 // security purpose.
4435 rcu_thread_online();
4437 * This function dispatch the work to the kernel or userspace tracer
4438 * libs and fill the lttcomm_lttng_msg data structure of all the needed
4439 * informations for the client. The command context struct contains
4440 * everything this function may needs.
4442 ret
= process_client_msg(cmd_ctx
, sock
, &sock_error
);
4443 rcu_thread_offline();
4451 * TODO: Inform client somehow of the fatal error. At
4452 * this point, ret < 0 means that a zmalloc failed
4453 * (ENOMEM). Error detected but still accept
4454 * command, unless a socket error has been
4457 clean_command_ctx(&cmd_ctx
);
4461 health_code_update();
4463 DBG("Sending response (size: %d, retcode: %s (%d))",
4464 cmd_ctx
->lttng_msg_size
,
4465 lttng_strerror(-cmd_ctx
->llm
->ret_code
),
4466 cmd_ctx
->llm
->ret_code
);
4467 ret
= send_unix_sock(sock
, cmd_ctx
->llm
, cmd_ctx
->lttng_msg_size
);
4469 ERR("Failed to send data back to client");
4472 /* End of transmission */
4479 clean_command_ctx(&cmd_ctx
);
4481 health_code_update();
4493 lttng_poll_clean(&events
);
4494 clean_command_ctx(&cmd_ctx
);
4498 unlink(config
.client_unix_sock_path
.value
);
4499 if (client_sock
>= 0) {
4500 ret
= close(client_sock
);
4508 ERR("Health error occurred in %s", __func__
);
4511 health_unregister(health_sessiond
);
4513 DBG("Client thread dying");
4515 rcu_unregister_thread();
4518 * Since we are creating the consumer threads, we own them, so we need
4519 * to join them before our thread exits.
4521 ret
= join_consumer_thread(&kconsumer_data
);
4524 PERROR("join_consumer");
4527 ret
= join_consumer_thread(&ustconsumer32_data
);
4530 PERROR("join_consumer ust32");
4533 ret
= join_consumer_thread(&ustconsumer64_data
);
4536 PERROR("join_consumer ust64");
4541 static int string_match(const char *str1
, const char *str2
)
4543 return (str1
&& str2
) && !strcmp(str1
, str2
);
4547 * Take an option from the getopt output and set it in the right variable to be
4550 * Return 0 on success else a negative value.
4552 static int set_option(int opt
, const char *arg
, const char *optname
)
4556 if (string_match(optname
, "client-sock") || opt
== 'c') {
4557 if (!arg
|| *arg
== '\0') {
4561 if (lttng_is_setuid_setgid()) {
4562 WARN("Getting '%s' argument from setuid/setgid binary refused for security reasons.",
4563 "-c, --client-sock");
4565 config_string_set(&config
.client_unix_sock_path
,
4567 if (!config
.client_unix_sock_path
.value
) {
4572 } else if (string_match(optname
, "apps-sock") || opt
== 'a') {
4573 if (!arg
|| *arg
== '\0') {
4577 if (lttng_is_setuid_setgid()) {
4578 WARN("Getting '%s' argument from setuid/setgid binary refused for security reasons.",
4581 config_string_set(&config
.apps_unix_sock_path
,
4583 if (!config
.apps_unix_sock_path
.value
) {
4588 } else if (string_match(optname
, "daemonize") || opt
== 'd') {
4589 config
.daemonize
= true;
4590 } else if (string_match(optname
, "background") || opt
== 'b') {
4591 config
.background
= true;
4592 } else if (string_match(optname
, "group") || opt
== 'g') {
4593 if (!arg
|| *arg
== '\0') {
4597 if (lttng_is_setuid_setgid()) {
4598 WARN("Getting '%s' argument from setuid/setgid binary refused for security reasons.",
4601 config_string_set(&config
.tracing_group_name
,
4603 if (!config
.tracing_group_name
.value
) {
4608 } else if (string_match(optname
, "help") || opt
== 'h') {
4609 ret
= utils_show_help(8, "lttng-sessiond", help_msg
);
4611 ERR("Cannot show --help for `lttng-sessiond`");
4614 exit(ret
? EXIT_FAILURE
: EXIT_SUCCESS
);
4615 } else if (string_match(optname
, "version") || opt
== 'V') {
4616 fprintf(stdout
, "%s\n", VERSION
);
4618 } else if (string_match(optname
, "sig-parent") || opt
== 'S') {
4619 config
.sig_parent
= true;
4620 } else if (string_match(optname
, "kconsumerd-err-sock")) {
4621 if (!arg
|| *arg
== '\0') {
4625 if (lttng_is_setuid_setgid()) {
4626 WARN("Getting '%s' argument from setuid/setgid binary refused for security reasons.",
4627 "--kconsumerd-err-sock");
4629 config_string_set(&config
.kconsumerd_err_unix_sock_path
,
4631 if (!config
.kconsumerd_err_unix_sock_path
.value
) {
4636 } else if (string_match(optname
, "kconsumerd-cmd-sock")) {
4637 if (!arg
|| *arg
== '\0') {
4641 if (lttng_is_setuid_setgid()) {
4642 WARN("Getting '%s' argument from setuid/setgid binary refused for security reasons.",
4643 "--kconsumerd-cmd-sock");
4645 config_string_set(&config
.kconsumerd_cmd_unix_sock_path
,
4647 if (!config
.kconsumerd_cmd_unix_sock_path
.value
) {
4652 } else if (string_match(optname
, "ustconsumerd64-err-sock")) {
4653 if (!arg
|| *arg
== '\0') {
4657 if (lttng_is_setuid_setgid()) {
4658 WARN("Getting '%s' argument from setuid/setgid binary refused for security reasons.",
4659 "--ustconsumerd64-err-sock");
4661 config_string_set(&config
.consumerd64_err_unix_sock_path
,
4663 if (!config
.consumerd64_err_unix_sock_path
.value
) {
4668 } else if (string_match(optname
, "ustconsumerd64-cmd-sock")) {
4669 if (!arg
|| *arg
== '\0') {
4673 if (lttng_is_setuid_setgid()) {
4674 WARN("Getting '%s' argument from setuid/setgid binary refused for security reasons.",
4675 "--ustconsumerd64-cmd-sock");
4677 config_string_set(&config
.consumerd64_cmd_unix_sock_path
,
4679 if (!config
.consumerd64_cmd_unix_sock_path
.value
) {
4684 } else if (string_match(optname
, "ustconsumerd32-err-sock")) {
4685 if (!arg
|| *arg
== '\0') {
4689 if (lttng_is_setuid_setgid()) {
4690 WARN("Getting '%s' argument from setuid/setgid binary refused for security reasons.",
4691 "--ustconsumerd32-err-sock");
4693 config_string_set(&config
.consumerd32_err_unix_sock_path
,
4695 if (!config
.consumerd32_err_unix_sock_path
.value
) {
4700 } else if (string_match(optname
, "ustconsumerd32-cmd-sock")) {
4701 if (!arg
|| *arg
== '\0') {
4705 if (lttng_is_setuid_setgid()) {
4706 WARN("Getting '%s' argument from setuid/setgid binary refused for security reasons.",
4707 "--ustconsumerd32-cmd-sock");
4709 config_string_set(&config
.consumerd32_cmd_unix_sock_path
,
4711 if (!config
.consumerd32_cmd_unix_sock_path
.value
) {
4716 } else if (string_match(optname
, "no-kernel")) {
4717 config
.no_kernel
= true;
4718 } else if (string_match(optname
, "quiet") || opt
== 'q') {
4719 lttng_opt_quiet
= true;
4720 } else if (string_match(optname
, "verbose") || opt
== 'v') {
4721 /* Verbose level can increase using multiple -v */
4723 /* Value obtained from config file */
4724 config
.verbose
= config_parse_value(arg
);
4726 /* -v used on command line */
4729 /* Clamp value to [0, 3] */
4730 config
.verbose
= config
.verbose
< 0 ? 0 :
4731 (config
.verbose
<= 3 ? config
.verbose
: 3);
4732 } else if (string_match(optname
, "verbose-consumer")) {
4734 config
.verbose_consumer
= config_parse_value(arg
);
4736 config
.verbose_consumer
++;
4738 } else if (string_match(optname
, "consumerd32-path")) {
4739 if (!arg
|| *arg
== '\0') {
4743 if (lttng_is_setuid_setgid()) {
4744 WARN("Getting '%s' argument from setuid/setgid binary refused for security reasons.",
4745 "--consumerd32-path");
4747 config_string_set(&config
.consumerd32_bin_path
,
4749 if (!config
.consumerd32_bin_path
.value
) {
4754 } else if (string_match(optname
, "consumerd32-libdir")) {
4755 if (!arg
|| *arg
== '\0') {
4759 if (lttng_is_setuid_setgid()) {
4760 WARN("Getting '%s' argument from setuid/setgid binary refused for security reasons.",
4761 "--consumerd32-libdir");
4763 config_string_set(&config
.consumerd32_lib_dir
,
4765 if (!config
.consumerd32_lib_dir
.value
) {
4770 } else if (string_match(optname
, "consumerd64-path")) {
4771 if (!arg
|| *arg
== '\0') {
4775 if (lttng_is_setuid_setgid()) {
4776 WARN("Getting '%s' argument from setuid/setgid binary refused for security reasons.",
4777 "--consumerd64-path");
4779 config_string_set(&config
.consumerd64_bin_path
,
4781 if (!config
.consumerd64_bin_path
.value
) {
4786 } else if (string_match(optname
, "consumerd64-libdir")) {
4787 if (!arg
|| *arg
== '\0') {
4791 if (lttng_is_setuid_setgid()) {
4792 WARN("Getting '%s' argument from setuid/setgid binary refused for security reasons.",
4793 "--consumerd64-libdir");
4795 config_string_set(&config
.consumerd64_lib_dir
,
4797 if (!config
.consumerd64_lib_dir
.value
) {
4802 } else if (string_match(optname
, "pidfile") || opt
== 'p') {
4803 if (!arg
|| *arg
== '\0') {
4807 if (lttng_is_setuid_setgid()) {
4808 WARN("Getting '%s' argument from setuid/setgid binary refused for security reasons.",
4811 config_string_set(&config
.pid_file_path
, strdup(arg
));
4812 if (!config
.pid_file_path
.value
) {
4817 } else if (string_match(optname
, "agent-tcp-port")) {
4818 if (!arg
|| *arg
== '\0') {
4822 if (lttng_is_setuid_setgid()) {
4823 WARN("Getting '%s' argument from setuid/setgid binary refused for security reasons.",
4824 "--agent-tcp-port");
4829 v
= strtoul(arg
, NULL
, 0);
4830 if (errno
!= 0 || !isdigit(arg
[0])) {
4831 ERR("Wrong value in --agent-tcp-port parameter: %s", arg
);
4834 if (v
== 0 || v
>= 65535) {
4835 ERR("Port overflow in --agent-tcp-port parameter: %s", arg
);
4838 config
.agent_tcp_port
= (uint32_t) v
;
4839 DBG3("Agent TCP port set to non default: %u", config
.agent_tcp_port
);
4841 } else if (string_match(optname
, "load") || opt
== 'l') {
4842 if (!arg
|| *arg
== '\0') {
4846 if (lttng_is_setuid_setgid()) {
4847 WARN("Getting '%s' argument from setuid/setgid binary refused for security reasons.",
4850 config_string_set(&config
.load_session_path
, strdup(arg
));
4851 if (!config
.load_session_path
.value
) {
4856 } else if (string_match(optname
, "kmod-probes")) {
4857 if (!arg
|| *arg
== '\0') {
4861 if (lttng_is_setuid_setgid()) {
4862 WARN("Getting '%s' argument from setuid/setgid binary refused for security reasons.",
4865 config_string_set(&config
.kmod_probes_list
, strdup(arg
));
4866 if (!config
.kmod_probes_list
.value
) {
4871 } else if (string_match(optname
, "extra-kmod-probes")) {
4872 if (!arg
|| *arg
== '\0') {
4876 if (lttng_is_setuid_setgid()) {
4877 WARN("Getting '%s' argument from setuid/setgid binary refused for security reasons.",
4878 "--extra-kmod-probes");
4880 config_string_set(&config
.kmod_extra_probes_list
,
4882 if (!config
.kmod_extra_probes_list
.value
) {
4887 } else if (string_match(optname
, "config") || opt
== 'f') {
4888 /* This is handled in set_options() thus silent skip. */
4891 /* Unknown option or other error.
4892 * Error is printed by getopt, just return */
4897 if (ret
== -EINVAL
) {
4898 const char *opt_name
= "unknown";
4901 for (i
= 0; i
< sizeof(long_options
) / sizeof(struct option
);
4903 if (opt
== long_options
[i
].val
) {
4904 opt_name
= long_options
[i
].name
;
4909 WARN("Invalid argument provided for option \"%s\", using default value.",
4917 * config_entry_handler_cb used to handle options read from a config file.
4918 * See config_entry_handler_cb comment in common/config/session-config.h for the
4919 * return value conventions.
4921 static int config_entry_handler(const struct config_entry
*entry
, void *unused
)
4925 if (!entry
|| !entry
->name
|| !entry
->value
) {
4930 /* Check if the option is to be ignored */
4931 for (i
= 0; i
< sizeof(config_ignore_options
) / sizeof(char *); i
++) {
4932 if (!strcmp(entry
->name
, config_ignore_options
[i
])) {
4937 for (i
= 0; i
< (sizeof(long_options
) / sizeof(struct option
)) - 1;
4940 /* Ignore if not fully matched. */
4941 if (strcmp(entry
->name
, long_options
[i
].name
)) {
4946 * If the option takes no argument on the command line, we have to
4947 * check if the value is "true". We support non-zero numeric values,
4950 if (!long_options
[i
].has_arg
) {
4951 ret
= config_parse_value(entry
->value
);
4954 WARN("Invalid configuration value \"%s\" for option %s",
4955 entry
->value
, entry
->name
);
4957 /* False, skip boolean config option. */
4962 ret
= set_option(long_options
[i
].val
, entry
->value
, entry
->name
);
4966 WARN("Unrecognized option \"%s\" in daemon configuration file.", entry
->name
);
4973 * daemon configuration loading and argument parsing
4975 static int set_options(int argc
, char **argv
)
4977 int ret
= 0, c
= 0, option_index
= 0;
4978 int orig_optopt
= optopt
, orig_optind
= optind
;
4980 const char *config_path
= NULL
;
4982 optstring
= utils_generate_optstring(long_options
,
4983 sizeof(long_options
) / sizeof(struct option
));
4989 /* Check for the --config option */
4990 while ((c
= getopt_long(argc
, argv
, optstring
, long_options
,
4991 &option_index
)) != -1) {
4995 } else if (c
!= 'f') {
4996 /* if not equal to --config option. */
5000 if (lttng_is_setuid_setgid()) {
5001 WARN("Getting '%s' argument from setuid/setgid binary refused for security reasons.",
5004 config_path
= utils_expand_path(optarg
);
5006 ERR("Failed to resolve path: %s", optarg
);
5011 ret
= config_get_section_entries(config_path
, config_section_name
,
5012 config_entry_handler
, NULL
);
5015 ERR("Invalid configuration option at line %i", ret
);
5021 /* Reset getopt's global state */
5022 optopt
= orig_optopt
;
5023 optind
= orig_optind
;
5027 * getopt_long() will not set option_index if it encounters a
5030 c
= getopt_long(argc
, argv
, optstring
, long_options
,
5037 * Pass NULL as the long option name if popt left the index
5040 ret
= set_option(c
, optarg
,
5041 option_index
< 0 ? NULL
:
5042 long_options
[option_index
].name
);
5054 * Creates the two needed socket by the daemon.
5055 * apps_sock - The communication socket for all UST apps.
5056 * client_sock - The communication of the cli tool (lttng).
5058 static int init_daemon_socket(void)
5063 old_umask
= umask(0);
5065 /* Create client tool unix socket */
5066 client_sock
= lttcomm_create_unix_sock(config
.client_unix_sock_path
.value
);
5067 if (client_sock
< 0) {
5068 ERR("Create unix sock failed: %s", config
.client_unix_sock_path
.value
);
5073 /* Set the cloexec flag */
5074 ret
= utils_set_fd_cloexec(client_sock
);
5076 ERR("Unable to set CLOEXEC flag to the client Unix socket (fd: %d). "
5077 "Continuing but note that the consumer daemon will have a "
5078 "reference to this socket on exec()", client_sock
);
5081 /* File permission MUST be 660 */
5082 ret
= chmod(config
.client_unix_sock_path
.value
, S_IRUSR
| S_IWUSR
| S_IRGRP
| S_IWGRP
);
5084 ERR("Set file permissions failed: %s", config
.client_unix_sock_path
.value
);
5089 /* Create the application unix socket */
5090 apps_sock
= lttcomm_create_unix_sock(config
.apps_unix_sock_path
.value
);
5091 if (apps_sock
< 0) {
5092 ERR("Create unix sock failed: %s", config
.apps_unix_sock_path
.value
);
5097 /* Set the cloexec flag */
5098 ret
= utils_set_fd_cloexec(apps_sock
);
5100 ERR("Unable to set CLOEXEC flag to the app Unix socket (fd: %d). "
5101 "Continuing but note that the consumer daemon will have a "
5102 "reference to this socket on exec()", apps_sock
);
5105 /* File permission MUST be 666 */
5106 ret
= chmod(config
.apps_unix_sock_path
.value
,
5107 S_IRUSR
| S_IWUSR
| S_IRGRP
| S_IWGRP
| S_IROTH
| S_IWOTH
);
5109 ERR("Set file permissions failed: %s", config
.apps_unix_sock_path
.value
);
5114 DBG3("Session daemon client socket %d and application socket %d created",
5115 client_sock
, apps_sock
);
5123 * Check if the global socket is available, and if a daemon is answering at the
5124 * other side. If yes, error is returned.
5126 static int check_existing_daemon(void)
5128 /* Is there anybody out there ? */
5129 if (lttng_session_daemon_alive()) {
5137 * Set the tracing group gid onto the client socket.
5139 * Race window between mkdir and chown is OK because we are going from more
5140 * permissive (root.root) to less permissive (root.tracing).
5142 static int set_permissions(char *rundir
)
5147 gid
= utils_get_group_id(config
.tracing_group_name
.value
);
5149 /* Set lttng run dir */
5150 ret
= chown(rundir
, 0, gid
);
5152 ERR("Unable to set group on %s", rundir
);
5157 * Ensure all applications and tracing group can search the run
5158 * dir. Allow everyone to read the directory, since it does not
5159 * buy us anything to hide its content.
5161 ret
= chmod(rundir
, S_IRWXU
| S_IRGRP
| S_IXGRP
| S_IROTH
| S_IXOTH
);
5163 ERR("Unable to set permissions on %s", rundir
);
5167 /* lttng client socket path */
5168 ret
= chown(config
.client_unix_sock_path
.value
, 0, gid
);
5170 ERR("Unable to set group on %s", config
.client_unix_sock_path
.value
);
5174 /* kconsumer error socket path */
5175 ret
= chown(kconsumer_data
.err_unix_sock_path
, 0, 0);
5177 ERR("Unable to set group on %s", kconsumer_data
.err_unix_sock_path
);
5181 /* 64-bit ustconsumer error socket path */
5182 ret
= chown(ustconsumer64_data
.err_unix_sock_path
, 0, 0);
5184 ERR("Unable to set group on %s", ustconsumer64_data
.err_unix_sock_path
);
5188 /* 32-bit ustconsumer compat32 error socket path */
5189 ret
= chown(ustconsumer32_data
.err_unix_sock_path
, 0, 0);
5191 ERR("Unable to set group on %s", ustconsumer32_data
.err_unix_sock_path
);
5195 DBG("All permissions are set");
5201 * Create the lttng run directory needed for all global sockets and pipe.
5203 static int create_lttng_rundir(void)
5207 DBG3("Creating LTTng run directory: %s", config
.rundir
.value
);
5209 ret
= mkdir(config
.rundir
.value
, S_IRWXU
);
5211 if (errno
!= EEXIST
) {
5212 ERR("Unable to create %s", config
.rundir
.value
);
5224 * Setup sockets and directory needed by the consumerds' communication with the
5227 static int set_consumer_sockets(struct consumer_data
*consumer_data
)
5232 switch (consumer_data
->type
) {
5233 case LTTNG_CONSUMER_KERNEL
:
5234 path
= config
.kconsumerd_path
.value
;
5236 case LTTNG_CONSUMER64_UST
:
5237 path
= config
.consumerd64_path
.value
;
5239 case LTTNG_CONSUMER32_UST
:
5240 path
= config
.consumerd32_path
.value
;
5243 ERR("Consumer type unknown");
5249 DBG2("Creating consumer directory: %s", path
);
5251 ret
= mkdir(path
, S_IRWXU
| S_IRGRP
| S_IXGRP
);
5252 if (ret
< 0 && errno
!= EEXIST
) {
5254 ERR("Failed to create %s", path
);
5258 ret
= chown(path
, 0, utils_get_group_id(config
.tracing_group_name
.value
));
5260 ERR("Unable to set group on %s", path
);
5266 /* Create the consumerd error unix socket */
5267 consumer_data
->err_sock
=
5268 lttcomm_create_unix_sock(consumer_data
->err_unix_sock_path
);
5269 if (consumer_data
->err_sock
< 0) {
5270 ERR("Create unix sock failed: %s", consumer_data
->err_unix_sock_path
);
5276 * Set the CLOEXEC flag. Return code is useless because either way, the
5279 ret
= utils_set_fd_cloexec(consumer_data
->err_sock
);
5281 PERROR("utils_set_fd_cloexec");
5282 /* continue anyway */
5285 /* File permission MUST be 660 */
5286 ret
= chmod(consumer_data
->err_unix_sock_path
,
5287 S_IRUSR
| S_IWUSR
| S_IRGRP
| S_IWGRP
);
5289 ERR("Set file permissions failed: %s", consumer_data
->err_unix_sock_path
);
5299 * Signal handler for the daemon
5301 * Simply stop all worker threads, leaving main() return gracefully after
5302 * joining all threads and calling cleanup().
5304 static void sighandler(int sig
)
5308 DBG("SIGINT caught");
5312 DBG("SIGTERM caught");
5316 CMM_STORE_SHARED(recv_child_signal
, 1);
5324 * Setup signal handler for :
5325 * SIGINT, SIGTERM, SIGPIPE
5327 static int set_signal_handler(void)
5330 struct sigaction sa
;
5333 if ((ret
= sigemptyset(&sigset
)) < 0) {
5334 PERROR("sigemptyset");
5338 sa
.sa_mask
= sigset
;
5341 sa
.sa_handler
= sighandler
;
5342 if ((ret
= sigaction(SIGTERM
, &sa
, NULL
)) < 0) {
5343 PERROR("sigaction");
5347 if ((ret
= sigaction(SIGINT
, &sa
, NULL
)) < 0) {
5348 PERROR("sigaction");
5352 if ((ret
= sigaction(SIGUSR1
, &sa
, NULL
)) < 0) {
5353 PERROR("sigaction");
5357 sa
.sa_handler
= SIG_IGN
;
5358 if ((ret
= sigaction(SIGPIPE
, &sa
, NULL
)) < 0) {
5359 PERROR("sigaction");
5363 DBG("Signal handler set for SIGTERM, SIGUSR1, SIGPIPE and SIGINT");
5369 * Set open files limit to unlimited. This daemon can open a large number of
5370 * file descriptors in order to consume multiple kernel traces.
5372 static void set_ulimit(void)
5377 /* The kernel does not allow an infinite limit for open files */
5378 lim
.rlim_cur
= 65535;
5379 lim
.rlim_max
= 65535;
5381 ret
= setrlimit(RLIMIT_NOFILE
, &lim
);
5383 PERROR("failed to set open files limit");
5387 static int write_pidfile(void)
5389 return utils_create_pid_file(getpid(), config
.pid_file_path
.value
);
5393 * Create lockfile using the rundir and return its fd.
5395 static int create_lockfile(void)
5397 return utils_create_lock_file(config
.lock_file_path
.value
);
5401 * Write agent TCP port using the rundir.
5403 static int write_agent_port(void)
5405 return utils_create_pid_file(config
.agent_tcp_port
,
5406 config
.agent_port_file_path
.value
);
5409 static int set_clock_plugin_env(void)
5412 char *env_value
= NULL
;
5414 if (!config
.lttng_ust_clock_plugin
.value
) {
5418 ret
= asprintf(&env_value
, "LTTNG_UST_CLOCK_PLUGIN=%s",
5419 config
.lttng_ust_clock_plugin
.value
);
5425 ret
= putenv(env_value
);
5428 PERROR("putenv of LTTNG_UST_CLOCK_PLUGIN");
5432 DBG("Updated LTTNG_UST_CLOCK_PLUGIN environment variable to \"%s\"",
5433 config
.lttng_ust_clock_plugin
.value
);
5441 int main(int argc
, char **argv
)
5443 int ret
= 0, retval
= 0;
5445 const char *env_app_timeout
;
5446 struct lttng_pipe
*ust32_channel_monitor_pipe
= NULL
,
5447 *ust64_channel_monitor_pipe
= NULL
,
5448 *kernel_channel_monitor_pipe
= NULL
;
5449 bool notification_thread_running
= false;
5451 init_kernel_workarounds();
5453 rcu_register_thread();
5455 if (set_signal_handler()) {
5457 goto exit_set_signal_handler
;
5460 page_size
= sysconf(_SC_PAGESIZE
);
5461 if (page_size
< 0) {
5462 PERROR("sysconf _SC_PAGESIZE");
5463 page_size
= LONG_MAX
;
5464 WARN("Fallback page size to %ld", page_size
);
5467 ret
= sessiond_config_init(&config
);
5470 goto exit_set_signal_handler
;
5474 * Parse arguments and load the daemon configuration file.
5476 * We have an exit_options exit path to free memory reserved by
5477 * set_options. This is needed because the rest of sessiond_cleanup()
5478 * depends on ht_cleanup_thread, which depends on lttng_daemonize, which
5479 * depends on set_options.
5482 if (set_options(argc
, argv
)) {
5487 /* Init config from environment variables. */
5488 sessiond_config_apply_env_config(&config
);
5491 * Resolve all paths received as arguments, configuration option, or
5492 * through environment variable as absolute paths. This is necessary
5493 * since daemonizing causes the sessiond's current working directory
5496 ret
= sessiond_config_resolve_paths(&config
);
5502 lttng_opt_verbose
= config
.verbose
;
5503 lttng_opt_quiet
= config
.quiet
;
5504 kconsumer_data
.err_unix_sock_path
=
5505 config
.kconsumerd_err_unix_sock_path
.value
;
5506 kconsumer_data
.cmd_unix_sock_path
=
5507 config
.kconsumerd_cmd_unix_sock_path
.value
;
5508 ustconsumer32_data
.err_unix_sock_path
=
5509 config
.consumerd32_err_unix_sock_path
.value
;
5510 ustconsumer32_data
.cmd_unix_sock_path
=
5511 config
.consumerd32_cmd_unix_sock_path
.value
;
5512 ustconsumer64_data
.err_unix_sock_path
=
5513 config
.consumerd64_err_unix_sock_path
.value
;
5514 ustconsumer64_data
.cmd_unix_sock_path
=
5515 config
.consumerd64_cmd_unix_sock_path
.value
;
5516 set_clock_plugin_env();
5518 sessiond_config_log(&config
);
5521 if (config
.daemonize
|| config
.background
) {
5524 ret
= lttng_daemonize(&child_ppid
, &recv_child_signal
,
5525 !config
.background
);
5532 * We are in the child. Make sure all other file descriptors are
5533 * closed, in case we are called with more opened file
5534 * descriptors than the standard ones.
5536 for (i
= 3; i
< sysconf(_SC_OPEN_MAX
); i
++) {
5541 if (run_as_create_worker(argv
[0]) < 0) {
5542 goto exit_create_run_as_worker_cleanup
;
5546 * Starting from here, we can create threads. This needs to be after
5547 * lttng_daemonize due to RCU.
5551 * Initialize the health check subsystem. This call should set the
5552 * appropriate time values.
5554 health_sessiond
= health_app_create(NR_HEALTH_SESSIOND_TYPES
);
5555 if (!health_sessiond
) {
5556 PERROR("health_app_create error");
5558 goto exit_health_sessiond_cleanup
;
5561 /* Create thread to clean up RCU hash tables */
5562 if (init_ht_cleanup_thread(&ht_cleanup_thread
)) {
5564 goto exit_ht_cleanup
;
5567 /* Create thread quit pipe */
5568 if (init_thread_quit_pipe()) {
5570 goto exit_init_data
;
5573 /* Check if daemon is UID = 0 */
5574 is_root
= !getuid();
5576 if (create_lttng_rundir()) {
5578 goto exit_init_data
;
5582 /* Create global run dir with root access */
5584 kernel_channel_monitor_pipe
= lttng_pipe_open(0);
5585 if (!kernel_channel_monitor_pipe
) {
5586 ERR("Failed to create kernel consumer channel monitor pipe");
5588 goto exit_init_data
;
5590 kconsumer_data
.channel_monitor_pipe
=
5591 lttng_pipe_release_writefd(
5592 kernel_channel_monitor_pipe
);
5593 if (kconsumer_data
.channel_monitor_pipe
< 0) {
5595 goto exit_init_data
;
5599 lockfile_fd
= create_lockfile();
5600 if (lockfile_fd
< 0) {
5602 goto exit_init_data
;
5605 /* Set consumer initial state */
5606 kernel_consumerd_state
= CONSUMER_STOPPED
;
5607 ust_consumerd_state
= CONSUMER_STOPPED
;
5609 ust32_channel_monitor_pipe
= lttng_pipe_open(0);
5610 if (!ust32_channel_monitor_pipe
) {
5611 ERR("Failed to create 32-bit user space consumer channel monitor pipe");
5613 goto exit_init_data
;
5615 ustconsumer32_data
.channel_monitor_pipe
= lttng_pipe_release_writefd(
5616 ust32_channel_monitor_pipe
);
5617 if (ustconsumer32_data
.channel_monitor_pipe
< 0) {
5619 goto exit_init_data
;
5622 ust64_channel_monitor_pipe
= lttng_pipe_open(0);
5623 if (!ust64_channel_monitor_pipe
) {
5624 ERR("Failed to create 64-bit user space consumer channel monitor pipe");
5626 goto exit_init_data
;
5628 ustconsumer64_data
.channel_monitor_pipe
= lttng_pipe_release_writefd(
5629 ust64_channel_monitor_pipe
);
5630 if (ustconsumer64_data
.channel_monitor_pipe
< 0) {
5632 goto exit_init_data
;
5636 * See if daemon already exist.
5638 if (check_existing_daemon()) {
5639 ERR("Already running daemon.\n");
5641 * We do not goto exit because we must not cleanup()
5642 * because a daemon is already running.
5645 goto exit_init_data
;
5649 * Init UST app hash table. Alloc hash table before this point since
5650 * cleanup() can get called after that point.
5652 if (ust_app_ht_alloc()) {
5653 ERR("Failed to allocate UST app hash table");
5655 goto exit_init_data
;
5659 * Initialize agent app hash table. We allocate the hash table here
5660 * since cleanup() can get called after this point.
5662 if (agent_app_ht_alloc()) {
5663 ERR("Failed to allocate Agent app hash table");
5665 goto exit_init_data
;
5669 * These actions must be executed as root. We do that *after* setting up
5670 * the sockets path because we MUST make the check for another daemon using
5671 * those paths *before* trying to set the kernel consumer sockets and init
5675 if (set_consumer_sockets(&kconsumer_data
)) {
5677 goto exit_init_data
;
5680 /* Setup kernel tracer */
5681 if (!config
.no_kernel
) {
5682 init_kernel_tracer();
5683 if (kernel_tracer_fd
>= 0) {
5684 ret
= syscall_init_table();
5686 ERR("Unable to populate syscall table. "
5687 "Syscall tracing won't work "
5688 "for this session daemon.");
5693 /* Set ulimit for open files */
5696 /* init lttng_fd tracking must be done after set_ulimit. */
5699 if (set_consumer_sockets(&ustconsumer64_data
)) {
5701 goto exit_init_data
;
5704 if (set_consumer_sockets(&ustconsumer32_data
)) {
5706 goto exit_init_data
;
5709 /* Setup the needed unix socket */
5710 if (init_daemon_socket()) {
5712 goto exit_init_data
;
5715 /* Set credentials to socket */
5716 if (is_root
&& set_permissions(config
.rundir
.value
)) {
5718 goto exit_init_data
;
5721 /* Get parent pid if -S, --sig-parent is specified. */
5722 if (config
.sig_parent
) {
5726 /* Setup the kernel pipe for waking up the kernel thread */
5727 if (is_root
&& !config
.no_kernel
) {
5728 if (utils_create_pipe_cloexec(kernel_poll_pipe
)) {
5730 goto exit_init_data
;
5734 /* Setup the thread apps communication pipe. */
5735 if (utils_create_pipe_cloexec(apps_cmd_pipe
)) {
5737 goto exit_init_data
;
5740 /* Setup the thread apps notify communication pipe. */
5741 if (utils_create_pipe_cloexec(apps_cmd_notify_pipe
)) {
5743 goto exit_init_data
;
5746 /* Initialize global buffer per UID and PID registry. */
5747 buffer_reg_init_uid_registry();
5748 buffer_reg_init_pid_registry();
5750 /* Init UST command queue. */
5751 cds_wfcq_init(&ust_cmd_queue
.head
, &ust_cmd_queue
.tail
);
5754 * Get session list pointer. This pointer MUST NOT be free'd. This list
5755 * is statically declared in session.c
5757 session_list_ptr
= session_get_list();
5761 /* Check for the application socket timeout env variable. */
5762 env_app_timeout
= getenv(DEFAULT_APP_SOCKET_TIMEOUT_ENV
);
5763 if (env_app_timeout
) {
5764 config
.app_socket_timeout
= atoi(env_app_timeout
);
5766 config
.app_socket_timeout
= DEFAULT_APP_SOCKET_RW_TIMEOUT
;
5769 ret
= write_pidfile();
5771 ERR("Error in write_pidfile");
5773 goto exit_init_data
;
5775 ret
= write_agent_port();
5777 ERR("Error in write_agent_port");
5779 goto exit_init_data
;
5782 /* Initialize communication library */
5784 /* Initialize TCP timeout values */
5785 lttcomm_inet_init();
5787 if (load_session_init_data(&load_info
) < 0) {
5789 goto exit_init_data
;
5791 load_info
->path
= config
.load_session_path
.value
;
5793 /* Create health-check thread. */
5794 ret
= pthread_create(&health_thread
, default_pthread_attr(),
5795 thread_manage_health
, (void *) NULL
);
5798 PERROR("pthread_create health");
5803 /* notification_thread_data acquires the pipes' read side. */
5804 notification_thread_handle
= notification_thread_handle_create(
5805 ust32_channel_monitor_pipe
,
5806 ust64_channel_monitor_pipe
,
5807 kernel_channel_monitor_pipe
);
5808 if (!notification_thread_handle
) {
5810 ERR("Failed to create notification thread shared data");
5812 goto exit_notification
;
5815 /* Create notification thread. */
5816 ret
= pthread_create(¬ification_thread
, default_pthread_attr(),
5817 thread_notification
, notification_thread_handle
);
5820 PERROR("pthread_create notification");
5823 goto exit_notification
;
5825 notification_thread_running
= true;
5827 /* Create thread to manage the client socket */
5828 ret
= pthread_create(&client_thread
, default_pthread_attr(),
5829 thread_manage_clients
, (void *) NULL
);
5832 PERROR("pthread_create clients");
5838 /* Create thread to dispatch registration */
5839 ret
= pthread_create(&dispatch_thread
, default_pthread_attr(),
5840 thread_dispatch_ust_registration
, (void *) NULL
);
5843 PERROR("pthread_create dispatch");
5849 /* Create thread to manage application registration. */
5850 ret
= pthread_create(®_apps_thread
, default_pthread_attr(),
5851 thread_registration_apps
, (void *) NULL
);
5854 PERROR("pthread_create registration");
5860 /* Create thread to manage application socket */
5861 ret
= pthread_create(&apps_thread
, default_pthread_attr(),
5862 thread_manage_apps
, (void *) NULL
);
5865 PERROR("pthread_create apps");
5871 /* Create thread to manage application notify socket */
5872 ret
= pthread_create(&apps_notify_thread
, default_pthread_attr(),
5873 ust_thread_manage_notify
, (void *) NULL
);
5876 PERROR("pthread_create notify");
5879 goto exit_apps_notify
;
5882 /* Create agent registration thread. */
5883 ret
= pthread_create(&agent_reg_thread
, default_pthread_attr(),
5884 agent_thread_manage_registration
, (void *) NULL
);
5887 PERROR("pthread_create agent");
5890 goto exit_agent_reg
;
5893 /* Don't start this thread if kernel tracing is not requested nor root */
5894 if (is_root
&& !config
.no_kernel
) {
5895 /* Create kernel thread to manage kernel event */
5896 ret
= pthread_create(&kernel_thread
, default_pthread_attr(),
5897 thread_manage_kernel
, (void *) NULL
);
5900 PERROR("pthread_create kernel");
5907 /* Create session loading thread. */
5908 ret
= pthread_create(&load_session_thread
, default_pthread_attr(),
5909 thread_load_session
, load_info
);
5912 PERROR("pthread_create load_session_thread");
5915 goto exit_load_session
;
5919 * This is where we start awaiting program completion (e.g. through
5920 * signal that asks threads to teardown).
5923 ret
= pthread_join(load_session_thread
, &status
);
5926 PERROR("pthread_join load_session_thread");
5931 if (is_root
&& !config
.no_kernel
) {
5932 ret
= pthread_join(kernel_thread
, &status
);
5935 PERROR("pthread_join");
5941 ret
= pthread_join(agent_reg_thread
, &status
);
5944 PERROR("pthread_join agent");
5949 ret
= pthread_join(apps_notify_thread
, &status
);
5952 PERROR("pthread_join apps notify");
5957 ret
= pthread_join(apps_thread
, &status
);
5960 PERROR("pthread_join apps");
5965 ret
= pthread_join(reg_apps_thread
, &status
);
5968 PERROR("pthread_join");
5974 * Join dispatch thread after joining reg_apps_thread to ensure
5975 * we don't leak applications in the queue.
5977 ret
= pthread_join(dispatch_thread
, &status
);
5980 PERROR("pthread_join");
5985 ret
= pthread_join(client_thread
, &status
);
5988 PERROR("pthread_join");
5994 ret
= pthread_join(health_thread
, &status
);
5997 PERROR("pthread_join health thread");
6004 * Wait for all pending call_rcu work to complete before tearing
6005 * down data structures. call_rcu worker may be trying to
6006 * perform lookups in those structures.
6010 * sessiond_cleanup() is called when no other thread is running, except
6011 * the ht_cleanup thread, which is needed to destroy the hash tables.
6013 rcu_thread_online();
6017 * Ensure all prior call_rcu are done. call_rcu callbacks may push
6018 * hash tables to the ht_cleanup thread. Therefore, we ensure that
6019 * the queue is empty before shutting down the clean-up thread.
6024 * The teardown of the notification system is performed after the
6025 * session daemon's teardown in order to allow it to be notified
6026 * of the active session and channels at the moment of the teardown.
6028 if (notification_thread_handle
) {
6029 if (notification_thread_running
) {
6030 notification_thread_command_quit(
6031 notification_thread_handle
);
6032 ret
= pthread_join(notification_thread
, &status
);
6035 PERROR("pthread_join notification thread");
6039 notification_thread_handle_destroy(notification_thread_handle
);
6042 rcu_thread_offline();
6043 rcu_unregister_thread();
6045 ret
= fini_ht_cleanup_thread(&ht_cleanup_thread
);
6049 lttng_pipe_destroy(ust32_channel_monitor_pipe
);
6050 lttng_pipe_destroy(ust64_channel_monitor_pipe
);
6051 lttng_pipe_destroy(kernel_channel_monitor_pipe
);
6054 health_app_destroy(health_sessiond
);
6055 exit_health_sessiond_cleanup
:
6056 exit_create_run_as_worker_cleanup
:
6059 sessiond_cleanup_options();
6061 exit_set_signal_handler
: