#include <getopt.h>
#include <grp.h>
#include <limits.h>
+#include <paths.h>
#include <pthread.h>
#include <signal.h>
#include <stdio.h>
#include <common/futex.h>
#include <common/relayd/relayd.h>
#include <common/utils.h>
+#include <common/daemonize.h>
#include "lttng-sessiond.h"
#include "buffer-registry.h"
static const char *opt_pidfile;
static int opt_sig_parent;
static int opt_verbose_consumer;
-static int opt_daemon;
+static int opt_daemon, opt_background;
static int opt_no_kernel;
-static int is_root; /* Set to 1 if the daemon is running as root */
static pid_t ppid; /* Parent PID for --sig-parent option */
+static pid_t child_ppid; /* Internal parent PID use with daemonize. */
static char *rundir;
+static int lockfile_fd = -1;
+
+/* Set to 1 when a SIGUSR1 signal is received. */
+static int recv_child_signal;
/*
* Consumer daemon specific control data. Every value not initialized here is
/* JUL TCP port for registration. Used by the JUL thread. */
unsigned int jul_tcp_port = DEFAULT_JUL_TCP_PORT;
+/* Am I root or not. */
+int is_root; /* Set to 1 if the daemon is running as root */
+
+/*
+ * Whether sessiond is ready for commands/health check requests.
+ * NR_LTTNG_SESSIOND_READY must match the number of calls to
+ * lttng_sessiond_notify_ready().
+ */
+#define NR_LTTNG_SESSIOND_READY 2
+int lttng_sessiond_ready = NR_LTTNG_SESSIOND_READY;
+
+/* Notify parents that we are ready for cmd and health check */
+static
+void lttng_sessiond_notify_ready(void)
+{
+ if (uatomic_sub_return(<tng_sessiond_ready, 1) == 0) {
+ /*
+ * Notify parent pid that we are ready to accept command
+ * for client side. This ppid is the one from the
+ * external process that spawned us.
+ */
+ if (opt_sig_parent) {
+ kill(ppid, SIGUSR1);
+ }
+
+ /*
+ * Notify the parent of the fork() process that we are
+ * ready.
+ */
+ if (opt_daemon || opt_background) {
+ kill(child_ppid, SIGUSR1);
+ }
+ }
+}
+
static
void setup_consumerd_path(void)
{
}
}
+/*
+ * Generate the full lock file path using the rundir.
+ *
+ * Return the snprintf() return value thus a negative value is an error.
+ */
+static int generate_lock_file_path(char *path, size_t len)
+{
+ int ret;
+
+ assert(path);
+ assert(rundir);
+
+ /* Build lockfile path from rundir. */
+ ret = snprintf(path, len, "%s/" DEFAULT_LTTNG_SESSIOND_LOCKFILE, rundir);
+ if (ret < 0) {
+ PERROR("snprintf lockfile path");
+ }
+
+ return ret;
+}
+
/*
* Cleanup the daemon
*/
DBG("Removing %s", path);
(void) unlink(path);
+ snprintf(path, PATH_MAX, "%s/%s", rundir,
+ DEFAULT_LTTNG_SESSIOND_JULPORT_FILE);
+ DBG("Removing %s", path);
+ (void) unlink(path);
+
/* kconsumerd */
snprintf(path, PATH_MAX,
DEFAULT_KCONSUMERD_ERR_SOCK_PATH,
DBG("Removing directory %s", path);
(void) rmdir(path);
- free(rundir);
-
DBG("Cleaning up all sessions");
/* Destroy session list mutex */
close_consumer_sockets();
+
+ /*
+ * Cleanup lock file by deleting it and finaly closing it which will
+ * release the file system lock.
+ */
+ if (lockfile_fd >= 0) {
+ char lockfile_path[PATH_MAX];
+
+ ret = generate_lock_file_path(lockfile_path, sizeof(lockfile_path));
+ if (ret > 0) {
+ ret = remove(lockfile_path);
+ if (ret < 0) {
+ PERROR("remove lock file");
+ }
+ ret = close(lockfile_fd);
+ if (ret < 0) {
+ PERROR("close lock file");
+ }
+ }
+ }
+
+ /*
+ * We do NOT rmdir rundir because there are other processes
+ * using it, for instance lttng-relayd, which can start in
+ * parallel with this teardown.
+ */
+
+ free(rundir);
+
/* <fun> */
DBG("%c[%d;%dm*** assert failed :-) *** ==> %c[%dm%c[%d;%dm"
"Matthew, BEET driven development works!%c[%dm",
*/
lttng_poll_init(&events);
- if (testpoint(thread_manage_kernel)) {
+ if (testpoint(sessiond_thread_manage_kernel)) {
goto error_testpoint;
}
health_code_update();
- if (testpoint(thread_manage_kernel_before_loop)) {
+ if (testpoint(sessiond_thread_manage_kernel_before_loop)) {
goto error_testpoint;
}
update_poll_flag = 0;
}
- DBG("Thread kernel polling on %d fds", LTTNG_POLL_GETNB(&events));
+ DBG("Thread kernel polling");
/* Poll infinite value of time */
restart:
health_poll_entry();
ret = lttng_poll_wait(&events, -1);
+ DBG("Thread kernel return from poll on %d fds",
+ LTTNG_POLL_GETNB(&events));
health_poll_exit();
if (ret < 0) {
/*
health_code_update();
+ if (!revents) {
+ /* No activity for this FD (poll implementation). */
+ continue;
+ }
+
/* Thread quit pipe has been closed. Killing thread. */
ret = sessiond_check_thread_quit_pipe(pollfd, revents);
if (ret) {
/* Check for data on kernel pipe */
if (pollfd == kernel_poll_pipe[0] && (revents & LPOLLIN)) {
- do {
- ret = read(kernel_poll_pipe[0], &tmp, 1);
- } while (ret < 0 && errno == EINTR);
+ (void) lttng_read(kernel_poll_pipe[0],
+ &tmp, 1);
/*
* Ret value is useless here, if this pipe gets any actions an
* update is required anyway.
*/
static void *thread_manage_consumer(void *data)
{
- int sock = -1, i, ret, pollfd, err = -1;
+ int sock = -1, i, ret, pollfd, err = -1, should_quit = 0;
uint32_t revents, nb_fd;
enum lttcomm_return_code code;
struct lttng_poll_event events;
restart:
health_poll_entry();
- if (testpoint(thread_manage_consumer)) {
+ if (testpoint(sessiond_thread_manage_consumer)) {
goto error;
}
health_code_update();
+ if (!revents) {
+ /* No activity for this FD (poll implementation). */
+ continue;
+ }
+
/* Thread quit pipe has been closed. Killing thread. */
ret = sessiond_check_thread_quit_pipe(pollfd, revents);
if (ret) {
}
health_code_update();
-
if (code == LTTCOMM_CONSUMERD_COMMAND_SOCK_READY) {
/* Connect both socket, command and metadata. */
consumer_data->cmd_sock =
/* Infinite blocking call, waiting for transmission */
restart_poll:
while (1) {
+ health_code_update();
+
+ /* Exit the thread because the thread quit pipe has been triggered. */
+ if (should_quit) {
+ /* Not a health error. */
+ err = 0;
+ goto exit;
+ }
+
health_poll_entry();
ret = lttng_poll_wait(&events, -1);
health_poll_exit();
health_code_update();
- /* Thread quit pipe has been closed. Killing thread. */
- ret = sessiond_check_thread_quit_pipe(pollfd, revents);
- if (ret) {
- err = 0;
- goto exit;
+ if (!revents) {
+ /* No activity for this FD (poll implementation). */
+ continue;
}
+ /*
+ * Thread quit pipe has been triggered, flag that we should stop
+ * but continue the current loop to handle potential data from
+ * consumer.
+ */
+ should_quit = sessiond_check_thread_quit_pipe(pollfd, revents);
+
if (pollfd == sock) {
/* Event on the consumerd socket */
if (revents & (LPOLLERR | LPOLLHUP | LPOLLRDHUP)) {
ERR("Handling metadata request");
goto error;
}
- break;
- } else {
- ERR("Unknown pollfd");
- goto error;
}
+ /* No need for an else branch all FDs are tested prior. */
}
health_code_update();
}
}
consumer_data->cmd_sock = -1;
}
- if (*consumer_data->metadata_sock.fd_ptr >= 0) {
+ if (consumer_data->metadata_sock.fd_ptr &&
+ *consumer_data->metadata_sock.fd_ptr >= 0) {
ret = close(*consumer_data->metadata_sock.fd_ptr);
if (ret) {
PERROR("close");
}
}
-
if (sock >= 0) {
ret = close(sock);
if (ret) {
pthread_mutex_unlock(&consumer_data->lock);
/* Cleanup metadata socket mutex. */
- pthread_mutex_destroy(consumer_data->metadata_sock.lock);
- free(consumer_data->metadata_sock.lock);
-
+ if (consumer_data->metadata_sock.lock) {
+ pthread_mutex_destroy(consumer_data->metadata_sock.lock);
+ free(consumer_data->metadata_sock.lock);
+ }
lttng_poll_clean(&events);
error_poll:
if (err) {
static void *thread_manage_apps(void *data)
{
int i, ret, pollfd, err = -1;
+ ssize_t size_ret;
uint32_t revents, nb_fd;
struct lttng_poll_event events;
health_register(health_sessiond, HEALTH_SESSIOND_TYPE_APP_MANAGE);
- if (testpoint(thread_manage_apps)) {
+ if (testpoint(sessiond_thread_manage_apps)) {
goto error_testpoint;
}
goto error;
}
- if (testpoint(thread_manage_apps_before_loop)) {
+ if (testpoint(sessiond_thread_manage_apps_before_loop)) {
goto error;
}
health_code_update();
while (1) {
- DBG("Apps thread polling on %d fds", LTTNG_POLL_GETNB(&events));
+ DBG("Apps thread polling");
/* Inifinite blocking call, waiting for transmission */
restart:
health_poll_entry();
ret = lttng_poll_wait(&events, -1);
+ DBG("Apps thread return from poll on %d fds",
+ LTTNG_POLL_GETNB(&events));
health_poll_exit();
if (ret < 0) {
/*
health_code_update();
+ if (!revents) {
+ /* No activity for this FD (poll implementation). */
+ continue;
+ }
+
/* Thread quit pipe has been closed. Killing thread. */
ret = sessiond_check_thread_quit_pipe(pollfd, revents);
if (ret) {
int sock;
/* Empty pipe */
- do {
- ret = read(apps_cmd_pipe[0], &sock, sizeof(sock));
- } while (ret < 0 && errno == EINTR);
- if (ret < 0 || ret < sizeof(sock)) {
+ size_ret = lttng_read(apps_cmd_pipe[0], &sock, sizeof(sock));
+ if (size_ret < sizeof(sock)) {
PERROR("read apps cmd pipe");
goto error;
}
}
DBG("Apps with sock %d added to poll set", sock);
-
- health_code_update();
-
- break;
}
} else {
/*
/* Socket closed on remote end. */
ust_app_unregister(pollfd);
- break;
}
}
*/
static int send_socket_to_thread(int fd, int sock)
{
- int ret;
+ ssize_t ret;
/*
* It's possible that the FD is set as invalid with -1 concurrently just
goto error;
}
- do {
- ret = write(fd, &sock, sizeof(sock));
- } while (ret < 0 && errno == EINTR);
- if (ret < 0 || ret != sizeof(sock)) {
+ ret = lttng_write(fd, &sock, sizeof(sock));
+ if (ret < sizeof(sock)) {
PERROR("write apps pipe %d", fd);
if (ret < 0) {
ret = -errno;
/* All good. Don't send back the write positive ret value. */
ret = 0;
error:
- return ret;
+ return (int) ret;
}
/*
uint32_t revents = LTTNG_POLL_GETEV(&events, i);
int pollfd = LTTNG_POLL_GETFD(&events, i);
+ if (!revents) {
+ /* No activity for this FD (poll implementation). */
+ continue;
+ }
+
cds_list_for_each_entry_safe(wait_node, tmp_wait_node,
&wait_queue->head, head) {
if (pollfd == wait_node->app->sock &&
health_register(health_sessiond, HEALTH_SESSIOND_TYPE_APP_REG_DISPATCH);
+ if (testpoint(sessiond_thread_app_reg_dispatch)) {
+ goto error_testpoint;
+ }
+
health_code_update();
CDS_INIT_LIST_HEAD(&wait_queue.head);
if (ret < 0) {
PERROR("close ust sock dispatch %d", ust_cmd->sock);
}
- lttng_fd_put(1, LTTNG_FD_APPS);
+ lttng_fd_put(LTTNG_FD_APPS, 1);
free(ust_cmd);
goto error;
}
if (ret < 0) {
PERROR("close ust sock dispatch %d", ust_cmd->sock);
}
- lttng_fd_put(1, LTTNG_FD_APPS);
+ lttng_fd_put(LTTNG_FD_APPS, 1);
free(wait_node);
free(ust_cmd);
continue;
if (ret < 0) {
PERROR("close ust sock dispatch %d", ust_cmd->sock);
}
- lttng_fd_put(1, LTTNG_FD_APPS);
+ lttng_fd_put(LTTNG_FD_APPS, 1);
}
free(ust_cmd);
}
free(wait_node);
}
+error_testpoint:
DBG("Dispatch thread dying");
if (err) {
health_error();
health_register(health_sessiond, HEALTH_SESSIOND_TYPE_APP_REG);
- if (testpoint(thread_registration_apps)) {
+ if (testpoint(sessiond_thread_registration_apps)) {
goto error_testpoint;
}
revents = LTTNG_POLL_GETEV(&events, i);
pollfd = LTTNG_POLL_GETFD(&events, i);
+ if (!revents) {
+ /* No activity for this FD (poll implementation). */
+ continue;
+ }
+
/* Thread quit pipe has been closed. Killing thread. */
ret = sessiond_check_thread_quit_pipe(pollfd, revents);
if (ret) {
exit:
error:
- if (err) {
- health_error();
- ERR("Health error occurred in %s", __func__);
- }
-
/* Notify that the registration thread is gone */
notify_ust_apps(0);
error_create_poll:
error_testpoint:
DBG("UST Registration thread cleanup complete");
+ if (err) {
+ health_error();
+ ERR("Health error occurred in %s", __func__);
+ }
health_unregister(health_sessiond);
return NULL;
if (ret != 0) {
errno = ret;
if (ret == ETIMEDOUT) {
+ int pth_ret;
+
/*
* Call has timed out so we kill the kconsumerd_thread and return
* an error.
*/
ERR("Condition timed out. The consumer thread was never ready."
" Killing it");
- ret = pthread_cancel(consumer_data->thread);
- if (ret < 0) {
+ pth_ret = pthread_cancel(consumer_data->thread);
+ if (pth_ret < 0) {
PERROR("pthread_cancel consumer thread");
}
} else {
PERROR("pthread_cond_wait failed consumer thread");
}
+ /* Caller is expecting a negative value on failure. */
+ ret = -1;
goto error;
}
consumer_to_use = consumerd32_bin;
} else {
DBG("Could not find any valid consumerd executable");
+ ret = -EINVAL;
break;
}
DBG("Using kernel consumer at: %s", consumer_to_use);
- execl(consumer_to_use,
+ ret = execl(consumer_to_use,
"lttng-consumerd", verbosity, "-k",
"--consumerd-cmd-sock", consumer_data->cmd_unix_sock_path,
"--consumerd-err-sock", consumer_data->err_unix_sock_path,
if (consumerd64_libdir[0] != '\0') {
free(tmpnew);
}
- if (ret) {
- goto error;
- }
break;
}
case LTTNG_CONSUMER32_UST:
if (consumerd32_libdir[0] != '\0') {
free(tmpnew);
}
- if (ret) {
- goto error;
- }
break;
}
default:
exit(EXIT_FAILURE);
}
if (errno != 0) {
- PERROR("kernel start consumer exec");
+ PERROR("Consumer execl()");
}
+ /* Reaching this point, we got a failure on our execl(). */
exit(EXIT_FAILURE);
} else if (pid > 0) {
ret = pid;
consumer = session->kernel_session->consumer;
dir_name = DEFAULT_KERNEL_TRACE_DIR;
break;
+ case LTTNG_DOMAIN_JUL:
case LTTNG_DOMAIN_UST:
DBG3("Copying tracing session consumer output in UST session");
if (session->ust_session->consumer) {
assert(session->consumer);
switch (domain->type) {
+ case LTTNG_DOMAIN_JUL:
case LTTNG_DOMAIN_UST:
break;
default:
break;
case LTTNG_DOMAIN_JUL:
- {
- ret = LTTNG_ERR_UNKNOWN_DOMAIN;
- goto error;
- }
case LTTNG_DOMAIN_UST:
{
if (!ust_app_supported()) {
}
/* 32-bit */
+ pthread_mutex_lock(&ustconsumer32_data.pid_mutex);
if (consumerd32_bin[0] != '\0' &&
ustconsumer32_data.pid == 0 &&
cmd_ctx->lsm->cmd_type != LTTNG_REGISTER_CONSUMER) {
}
case LTTNG_ENABLE_EVENT:
{
+ struct lttng_event_exclusion *exclusion = NULL;
+ struct lttng_filter_bytecode *bytecode = NULL;
+
+ /* Handle exclusion events and receive it from the client. */
+ if (cmd_ctx->lsm->u.enable.exclusion_count > 0) {
+ size_t count = cmd_ctx->lsm->u.enable.exclusion_count;
+
+ exclusion = zmalloc(sizeof(struct lttng_event_exclusion) +
+ (count * LTTNG_SYMBOL_NAME_LEN));
+ if (!exclusion) {
+ ret = LTTNG_ERR_EXCLUSION_NOMEM;
+ goto error;
+ }
+
+ DBG("Receiving var len exclusion event list from client ...");
+ exclusion->count = count;
+ ret = lttcomm_recv_unix_sock(sock, exclusion->names,
+ count * LTTNG_SYMBOL_NAME_LEN);
+ if (ret <= 0) {
+ DBG("Nothing recv() from client var len data... continuing");
+ *sock_error = 1;
+ free(exclusion);
+ ret = LTTNG_ERR_EXCLUSION_INVAL;
+ goto error;
+ }
+ }
+
+ /* Handle filter and get bytecode from client. */
+ if (cmd_ctx->lsm->u.enable.bytecode_len > 0) {
+ size_t bytecode_len = cmd_ctx->lsm->u.enable.bytecode_len;
+
+ if (bytecode_len > LTTNG_FILTER_MAX_LEN) {
+ ret = LTTNG_ERR_FILTER_INVAL;
+ free(exclusion);
+ goto error;
+ }
+
+ bytecode = zmalloc(bytecode_len);
+ if (!bytecode) {
+ free(exclusion);
+ ret = LTTNG_ERR_FILTER_NOMEM;
+ goto error;
+ }
+
+ /* Receive var. len. data */
+ DBG("Receiving var len filter's bytecode from client ...");
+ ret = lttcomm_recv_unix_sock(sock, bytecode, bytecode_len);
+ if (ret <= 0) {
+ DBG("Nothing recv() from client car len data... continuing");
+ *sock_error = 1;
+ free(bytecode);
+ free(exclusion);
+ ret = LTTNG_ERR_FILTER_INVAL;
+ goto error;
+ }
+
+ if ((bytecode->len + sizeof(*bytecode)) != bytecode_len) {
+ free(bytecode);
+ free(exclusion);
+ ret = LTTNG_ERR_FILTER_INVAL;
+ goto error;
+ }
+ }
+
ret = cmd_enable_event(cmd_ctx->session, &cmd_ctx->lsm->domain,
cmd_ctx->lsm->u.enable.channel_name,
- &cmd_ctx->lsm->u.enable.event, NULL, kernel_poll_pipe[1]);
+ &cmd_ctx->lsm->u.enable.event, bytecode, exclusion,
+ kernel_poll_pipe[1]);
break;
}
case LTTNG_ENABLE_ALL_EVENT:
struct lttng_event *events;
ssize_t nb_events;
+ session_lock_list();
nb_events = cmd_list_tracepoints(cmd_ctx->lsm->domain.type, &events);
+ session_unlock_list();
if (nb_events < 0) {
/* Return value is a negative lttng_error_code. */
ret = -nb_events;
struct lttng_event_field *fields;
ssize_t nb_fields;
+ session_lock_list();
nb_fields = cmd_list_tracepoint_fields(cmd_ctx->lsm->domain.type,
&fields);
+ session_unlock_list();
if (nb_fields < 0) {
/* Return value is a negative lttng_error_code. */
ret = -nb_fields;
case LTTNG_LIST_CHANNELS:
{
int nb_chan;
- struct lttng_channel *channels;
+ struct lttng_channel *channels = NULL;
nb_chan = cmd_list_channels(cmd_ctx->lsm->domain.type,
cmd_ctx->session, &channels);
cmd_ctx->lsm->u.reg.path, cdata);
break;
}
- case LTTNG_ENABLE_EVENT_WITH_FILTER:
- {
- struct lttng_filter_bytecode *bytecode;
-
- if (cmd_ctx->lsm->u.enable.bytecode_len > LTTNG_FILTER_MAX_LEN) {
- ret = LTTNG_ERR_FILTER_INVAL;
- goto error;
- }
- if (cmd_ctx->lsm->u.enable.bytecode_len == 0) {
- ret = LTTNG_ERR_FILTER_INVAL;
- goto error;
- }
- bytecode = zmalloc(cmd_ctx->lsm->u.enable.bytecode_len);
- if (!bytecode) {
- ret = LTTNG_ERR_FILTER_NOMEM;
- goto error;
- }
- /* Receive var. len. data */
- DBG("Receiving var len data from client ...");
- ret = lttcomm_recv_unix_sock(sock, bytecode,
- cmd_ctx->lsm->u.enable.bytecode_len);
- if (ret <= 0) {
- DBG("Nothing recv() from client var len data... continuing");
- *sock_error = 1;
- ret = LTTNG_ERR_FILTER_INVAL;
- goto error;
- }
-
- if (bytecode->len + sizeof(*bytecode)
- != cmd_ctx->lsm->u.enable.bytecode_len) {
- free(bytecode);
- ret = LTTNG_ERR_FILTER_INVAL;
- goto error;
- }
-
- ret = cmd_enable_event(cmd_ctx->session, &cmd_ctx->lsm->domain,
- cmd_ctx->lsm->u.enable.channel_name,
- &cmd_ctx->lsm->u.enable.event, bytecode, kernel_poll_pipe[1]);
- break;
- }
case LTTNG_DATA_PENDING:
{
ret = cmd_data_pending(cmd_ctx->session);
goto error;
}
+ lttng_sessiond_notify_ready();
+
while (1) {
DBG("Health check ready");
revents = LTTNG_POLL_GETEV(&events, i);
pollfd = LTTNG_POLL_GETFD(&events, i);
+ if (!revents) {
+ /* No activity for this FD (poll implementation). */
+ continue;
+ }
+
/* Thread quit pipe has been closed. Killing thread. */
ret = sessiond_check_thread_quit_pipe(pollfd, revents);
if (ret) {
rcu_thread_online();
- reply.ret_code = 0;
+ memset(&reply, 0, sizeof(reply));
for (i = 0; i < NR_HEALTH_SESSIOND_TYPES; i++) {
/*
* health_check_state returns 0 if health is
health_register(health_sessiond, HEALTH_SESSIOND_TYPE_CMD);
- if (testpoint(thread_manage_clients)) {
- goto error_testpoint;
- }
-
health_code_update();
ret = lttcomm_listen_unix_sock(client_sock);
goto error;
}
- /*
- * Notify parent pid that we are ready to accept command for client side.
- */
- if (opt_sig_parent) {
- kill(ppid, SIGUSR1);
+ lttng_sessiond_notify_ready();
+
+ /* This testpoint is after we signal readiness to the parent. */
+ if (testpoint(sessiond_thread_manage_clients)) {
+ goto error;
}
- if (testpoint(thread_manage_clients_before_loop)) {
+ if (testpoint(sessiond_thread_manage_clients_before_loop)) {
goto error;
}
health_code_update();
+ if (!revents) {
+ /* No activity for this FD (poll implementation). */
+ continue;
+ }
+
/* Thread quit pipe has been closed. Killing thread. */
ret = sessiond_check_thread_quit_pipe(pollfd, revents);
if (ret) {
error_listen:
error_create_poll:
-error_testpoint:
unlink(client_unix_sock_path);
if (client_sock >= 0) {
ret = close(client_sock);
fprintf(stderr, " --consumerd64-path PATH Specify path for the 64-bit UST consumer daemon binary\n");
fprintf(stderr, " --consumerd64-libdir PATH Specify path for the 64-bit UST consumer daemon libraries\n");
fprintf(stderr, " -d, --daemonize Start as a daemon.\n");
+ fprintf(stderr, " -b, --background Start as a daemon, keeping console open.\n");
fprintf(stderr, " -g, --group NAME Specify the tracing group name. (default: tracing)\n");
fprintf(stderr, " -V, --version Show version number.\n");
- fprintf(stderr, " -S, --sig-parent Send SIGCHLD to parent pid to notify readiness.\n");
+ fprintf(stderr, " -S, --sig-parent Send SIGUSR1 to parent pid to notify readiness.\n");
fprintf(stderr, " -q, --quiet No output at all.\n");
fprintf(stderr, " -v, --verbose Verbose mode. Activate DBG() macro.\n");
fprintf(stderr, " -p, --pidfile FILE Write a pid to FILE name overriding the default value.\n");
{ "no-kernel", 0, 0, 'N' },
{ "pidfile", 1, 0, 'p' },
{ "jul-tcp-port", 1, 0, 'J' },
+ { "background", 0, 0, 'b' },
{ NULL, 0, 0, 0 }
};
while (1) {
int option_index = 0;
- c = getopt_long(argc, argv, "dhqvVSN" "a:c:g:s:C:E:D:F:Z:u:t:p:J:",
+ c = getopt_long(argc, argv, "dhqvVSN" "a:c:g:s:C:E:D:F:Z:u:t:p:J:b",
long_options, &option_index);
if (c == -1) {
break;
case 'd':
opt_daemon = 1;
break;
+ case 'b':
+ opt_background = 1;
+ break;
case 'g':
tracing_group_name = optarg;
break;
DBG("SIGTERM caught");
stop_threads();
break;
+ case SIGUSR1:
+ CMM_STORE_SHARED(recv_child_signal, 1);
+ break;
default:
break;
}
return ret;
}
- DBG("Signal handler set for SIGTERM, SIGPIPE and SIGINT");
+ if ((ret = sigaction(SIGUSR1, &sa, NULL)) < 0) {
+ PERROR("sigaction");
+ return ret;
+ }
+
+ DBG("Signal handler set for SIGTERM, SIGUSR1, SIGPIPE and SIGINT");
return ret;
}
return;
}
+/*
+ * Create lockfile using the rundir and return its fd.
+ */
+static int create_lockfile(void)
+{
+ int ret;
+ char lockfile_path[PATH_MAX];
+
+ ret = generate_lock_file_path(lockfile_path, sizeof(lockfile_path));
+ if (ret < 0) {
+ goto error;
+ }
+
+ ret = utils_create_lock_file(lockfile_path);
+error:
+ return ret;
+}
+
+/*
+ * Write JUL TCP port using the rundir.
+ */
+static void write_julport(void)
+{
+ int ret;
+ char path[PATH_MAX];
+
+ assert(rundir);
+
+ ret = snprintf(path, sizeof(path), "%s/"
+ DEFAULT_LTTNG_SESSIOND_JULPORT_FILE, rundir);
+ if (ret < 0) {
+ PERROR("snprintf julport path");
+ goto error;
+ }
+
+ /*
+ * Create TCP JUL port file in rundir. Return value is of no importance.
+ * The execution will continue even though we are not able to write the
+ * file.
+ */
+ (void) utils_create_pid_file(jul_tcp_port, path);
+
+error:
+ return;
+}
+
/*
* main
*/
rcu_register_thread();
+ if ((ret = set_signal_handler()) < 0) {
+ goto error;
+ }
+
setup_consumerd_path();
page_size = sysconf(_SC_PAGESIZE);
}
/* Daemonize */
- if (opt_daemon) {
+ if (opt_daemon || opt_background) {
int i;
- /*
- * fork
- * child: setsid, close FD 0, 1, 2, chdir /
- * parent: exit (if fork is successful)
- */
- ret = daemon(0, 0);
+ ret = lttng_daemonize(&child_ppid, &recv_child_signal,
+ !opt_background);
if (ret < 0) {
- PERROR("daemon");
goto error;
}
+
/*
- * We are in the child. Make sure all other file
- * descriptors are closed, in case we are called with
- * more opened file descriptors than the standard ones.
+ * We are in the child. Make sure all other file descriptors are
+ * closed, in case we are called with more opened file descriptors than
+ * the standard ones.
*/
for (i = 3; i < sysconf(_SC_OPEN_MAX); i++) {
(void) close(i);
if (is_root) {
rundir = strdup(DEFAULT_LTTNG_RUNDIR);
+ if (!rundir) {
+ ret = -ENOMEM;
+ goto error;
+ }
/* Create global run dir with root access */
ret = create_lttng_rundir(rundir);
}
}
+ lockfile_fd = create_lockfile();
+ if (lockfile_fd < 0) {
+ goto error;
+ }
+
/* Set consumer initial state */
kernel_consumerd_state = CONSUMER_STOPPED;
ust_consumerd_state = CONSUMER_STOPPED;
*/
ust_app_ht_alloc();
+ /* Initialize JUL domain subsystem. */
+ if ((ret = jul_init()) < 0) {
+ /* ENOMEM at this point. */
+ goto error;
+ }
+
/* After this point, we can safely call cleanup() with "goto exit" */
/*
goto exit;
}
- if ((ret = set_signal_handler()) < 0) {
- goto exit;
- }
-
/* Setup the needed unix socket */
if ((ret = init_daemon_socket()) < 0) {
goto exit;
}
write_pidfile();
+ write_julport();
/* Initialize communication library */
lttcomm_init();
goto exit_health_sessiond_cleanup;
}
- /* Create thread to manage the client socket */
+ /* Create thread to clean up RCU hash tables */
ret = pthread_create(&ht_cleanup_thread, NULL,
thread_ht_cleanup, (void *) NULL);
if (ret != 0) {
goto exit_ht_cleanup;
}
- /* Create thread to manage the client socket */
+ /* Create health-check thread */
ret = pthread_create(&health_thread, NULL,
thread_manage_health, (void *) NULL);
if (ret != 0) {
ret = pthread_create(&apps_notify_thread, NULL,
ust_thread_manage_notify, (void *) NULL);
if (ret != 0) {
- PERROR("pthread_create apps");
+ PERROR("pthread_create notify");
goto exit_apps_notify;
}
ret = pthread_create(&jul_reg_thread, NULL,
jul_thread_manage_registration, (void *) NULL);
if (ret != 0) {
- PERROR("pthread_create apps");
+ PERROR("pthread_create JUL");
goto exit_jul_reg;
}