X-Git-Url: http://git.lttng.org./?a=blobdiff_plain;f=src%2Fbin%2Flttng-sessiond%2Fmain.c;h=9c2fe4a3ef084751429c12955642a207f8a7b3a7;hb=ab87d60a4e97571d947f42b45d76f4682e0f8a89;hp=ba35726b842b77e0712940d2154712dbbcb09c9a;hpb=ba6e4263b9233648a21c2bad71dfff700cc7c1c4;p=lttng-tools.git diff --git a/src/bin/lttng-sessiond/main.c b/src/bin/lttng-sessiond/main.c index ba35726b8..9c2fe4a3e 100644 --- a/src/bin/lttng-sessiond/main.c +++ b/src/bin/lttng-sessiond/main.c @@ -582,6 +582,34 @@ static int generate_lock_file_path(char *path, size_t len) return ret; } +/* + * Wait on consumer process termination. + * + * Need to be called with the consumer data lock held or from a context + * ensuring no concurrent access to data (e.g: cleanup). + */ +static void wait_consumer(struct consumer_data *consumer_data) +{ + pid_t ret; + int status; + + if (consumer_data->pid <= 0) { + return; + } + + DBG("Waiting for complete teardown of consumerd (PID: %d)", + consumer_data->pid); + ret = waitpid(consumer_data->pid, &status, 0); + if (ret == -1) { + PERROR("consumerd waitpid pid: %d", consumer_data->pid) + } + if (!WIFEXITED(status)) { + ERR("consumerd termination with error: %d", + WEXITSTATUS(ret)); + } + consumer_data->pid = 0; +} + /* * Cleanup the session daemon's data structures. */ @@ -676,6 +704,10 @@ static void sessiond_cleanup(void) } } + wait_consumer(&kconsumer_data); + wait_consumer(&ustconsumer64_data); + wait_consumer(&ustconsumer32_data); + DBG("Cleaning up all agent apps"); agent_app_ht_clean(); @@ -765,6 +797,8 @@ static void sessiond_cleanup_options(void) free(kmod_probes_list); free(kmod_extra_probes_list); + run_as_destroy_worker(); + /* */ DBG("%c[%d;%dm*** assert failed :-) *** ==> %c[%dm%c[%d;%dm" "Matthew, BEET driven development works!%c[%dm", @@ -1129,31 +1163,33 @@ static void *thread_manage_kernel(void *data) } /* Check for data on kernel pipe */ - if (pollfd == kernel_poll_pipe[0] && (revents & LPOLLIN)) { - (void) lttng_read(kernel_poll_pipe[0], - &tmp, 1); - /* - * Ret value is useless here, if this pipe gets any actions an - * update is required anyway. - */ - update_poll_flag = 1; - continue; - } else { - /* - * New CPU detected by the kernel. Adding kernel stream to - * kernel session and updating the kernel consumer - */ - if (revents & LPOLLIN) { + if (revents & LPOLLIN) { + if (pollfd == kernel_poll_pipe[0]) { + (void) lttng_read(kernel_poll_pipe[0], + &tmp, 1); + /* + * Ret value is useless here, if this pipe gets any actions an + * update is required anyway. + */ + update_poll_flag = 1; + continue; + } else { + /* + * New CPU detected by the kernel. Adding kernel stream to + * kernel session and updating the kernel consumer + */ ret = update_kernel_stream(&kconsumer_data, pollfd); if (ret < 0) { continue; } break; - /* - * TODO: We might want to handle the LPOLLERR | LPOLLHUP - * and unregister kernel stream at this point. - */ } + } else if (revents & (LPOLLERR | LPOLLHUP | LPOLLRDHUP)) { + update_poll_flag = 1; + continue; + } else { + ERR("Unexpected poll events %u for sock %d", revents, pollfd); + goto error; } } } @@ -1212,6 +1248,9 @@ static void *thread_manage_consumer(void *data) DBG("[thread] Manage consumer started"); + rcu_register_thread(); + rcu_thread_online(); + health_register(health_sessiond, HEALTH_SESSIOND_TYPE_CONSUMER); health_code_update(); @@ -1280,9 +1319,14 @@ restart: /* Event on the registration socket */ if (pollfd == consumer_data->err_sock) { - if (revents & (LPOLLERR | LPOLLHUP | LPOLLRDHUP)) { + if (revents & LPOLLIN) { + continue; + } else if (revents & (LPOLLERR | LPOLLHUP | LPOLLRDHUP)) { ERR("consumer err socket poll error"); goto error; + } else { + ERR("Unexpected poll events %u for sock %d", revents, pollfd); + goto error; } } } @@ -1412,7 +1456,8 @@ restart_poll: if (pollfd == sock) { /* Event on the consumerd socket */ - if (revents & (LPOLLERR | LPOLLHUP | LPOLLRDHUP)) { + if (revents & (LPOLLERR | LPOLLHUP | LPOLLRDHUP) + && !(revents & LPOLLIN)) { ERR("consumer err socket second poll error"); goto error; } @@ -1430,6 +1475,11 @@ restart_poll: goto exit; } else if (pollfd == consumer_data->metadata_fd) { + if (revents & (LPOLLERR | LPOLLHUP | LPOLLRDHUP) + && !(revents & LPOLLIN)) { + ERR("consumer err metadata socket second poll error"); + goto error; + } /* UST metadata requests */ ret = ust_consumer_metadata_request( &consumer_data->metadata_sock); @@ -1493,7 +1543,6 @@ error: unlink(consumer_data->err_unix_sock_path); unlink(consumer_data->cmd_unix_sock_path); - consumer_data->pid = 0; pthread_mutex_unlock(&consumer_data->lock); /* Cleanup metadata socket mutex. */ @@ -1510,6 +1559,9 @@ error_poll: health_unregister(health_sessiond); DBG("consumer thread cleanup completed"); + rcu_thread_offline(); + rcu_unregister_thread(); + return NULL; } @@ -1595,10 +1647,7 @@ static void *thread_manage_apps(void *data) /* Inspect the apps cmd pipe */ if (pollfd == apps_cmd_pipe[0]) { - if (revents & (LPOLLERR | LPOLLHUP | LPOLLRDHUP)) { - ERR("Apps command pipe error"); - goto error; - } else if (revents & LPOLLIN) { + if (revents & LPOLLIN) { int sock; /* Empty pipe */ @@ -1611,9 +1660,8 @@ static void *thread_manage_apps(void *data) health_code_update(); /* - * We only monitor the error events of the socket. This - * thread does not handle any incoming data from UST - * (POLLIN). + * Since this is a command socket (write then read), + * we only monitor the error events of the socket. */ ret = lttng_poll_add(&events, sock, LPOLLERR | LPOLLHUP | LPOLLRDHUP); @@ -1622,6 +1670,12 @@ static void *thread_manage_apps(void *data) } DBG("Apps with sock %d added to poll set", sock); + } else if (revents & (LPOLLERR | LPOLLHUP | LPOLLRDHUP)) { + ERR("Apps command pipe error"); + goto error; + } else { + ERR("Unknown poll events %u for sock %d", revents, pollfd); + goto error; } } else { /* @@ -1637,6 +1691,9 @@ static void *thread_manage_apps(void *data) /* Socket closed on remote end. */ ust_app_unregister(pollfd); + } else { + ERR("Unexpected poll events %u for sock %d", revents, pollfd); + goto error; } } @@ -1778,6 +1835,9 @@ static void sanitize_wait_queue(struct ust_reg_wait_queue *wait_queue) ust_app_destroy(wait_node->app); free(wait_node); break; + } else { + ERR("Unexpected poll events %u for sock %d", revents, pollfd); + goto error; } } } @@ -1979,7 +2039,7 @@ static void *thread_dispatch_ust_registration(void *data) * Don't care about return value. Let the manage apps threads * handle app unregistration upon socket close. */ - (void) ust_app_register_done(app->sock); + (void) ust_app_register_done(app); /* * Even if the application socket has been closed, send the app @@ -2021,6 +2081,22 @@ error: free(wait_node); } + /* Empty command queue. */ + for (;;) { + /* Dequeue command for registration */ + node = cds_wfcq_dequeue_blocking(&ust_cmd_queue.head, &ust_cmd_queue.tail); + if (node == NULL) { + break; + } + ust_cmd = caa_container_of(node, struct ust_command, node); + ret = close(ust_cmd->sock); + if (ret < 0) { + PERROR("close ust sock exit dispatch %d", ust_cmd->sock); + } + lttng_fd_put(LTTNG_FD_APPS, 1); + free(ust_cmd); + } + error_testpoint: DBG("Dispatch thread dying"); if (err) { @@ -2122,10 +2198,7 @@ static void *thread_registration_apps(void *data) /* Event on the registration socket */ if (pollfd == apps_sock) { - if (revents & (LPOLLERR | LPOLLHUP | LPOLLRDHUP)) { - ERR("Register apps socket poll error"); - goto error; - } else if (revents & LPOLLIN) { + if (revents & LPOLLIN) { sock = lttcomm_accept_unix_sock(apps_sock); if (sock < 0) { goto error; @@ -2212,6 +2285,12 @@ static void *thread_registration_apps(void *data) * barrier with the exchange in cds_wfcq_enqueue. */ futex_nto1_wake(&ust_cmd_queue.futex); + } else if (revents & (LPOLLERR | LPOLLHUP | LPOLLRDHUP)) { + ERR("Register apps socket poll error"); + goto error; + } else { + ERR("Unexpected poll events %u for sock %d", revents, pollfd); + goto error; } } } @@ -2713,7 +2792,7 @@ static int copy_session_consumer(int domain, struct ltt_session *session) * domain. */ if (session->kernel_session->consumer) { - consumer_destroy_output(session->kernel_session->consumer); + consumer_output_put(session->kernel_session->consumer); } session->kernel_session->consumer = consumer_copy_output(session->consumer); @@ -2727,7 +2806,7 @@ static int copy_session_consumer(int domain, struct ltt_session *session) case LTTNG_DOMAIN_UST: DBG3("Copying tracing session consumer output in UST session"); if (session->ust_session->consumer) { - consumer_destroy_output(session->ust_session->consumer); + consumer_output_put(session->ust_session->consumer); } session->ust_session->consumer = consumer_copy_output(session->consumer); @@ -2847,7 +2926,7 @@ static int create_kernel_session(struct ltt_session *session) session->kernel_session->consumer->dst.trace_path, S_IRWXU | S_IRWXG, session->uid, session->gid); if (ret < 0) { - if (ret != -EEXIST) { + if (errno != EEXIST) { ERR("Trace directory creation error"); goto error; } @@ -2909,6 +2988,8 @@ static int process_client_msg(struct command_ctx *cmd_ctx, int sock, DBG("Processing client command %d", cmd_ctx->lsm->cmd_type); + assert(!rcu_read_ongoing()); + *sock_error = 0; switch (cmd_ctx->lsm->cmd_type) { @@ -3968,6 +4049,7 @@ setup_error: session_unlock_list(); } init_setup_error: + assert(!rcu_read_ongoing()); return ret; } @@ -4083,9 +4165,14 @@ restart: /* Event on the registration socket */ if (pollfd == sock) { - if (revents & (LPOLLERR | LPOLLHUP | LPOLLRDHUP)) { + if (revents & LPOLLIN) { + continue; + } else if (revents & (LPOLLERR | LPOLLHUP | LPOLLRDHUP)) { ERR("Health socket poll error"); goto error; + } else { + ERR("Unexpected poll events %u for sock %d", revents, pollfd); + goto error; } } } @@ -4260,9 +4347,14 @@ static void *thread_manage_clients(void *data) /* Event on the registration socket */ if (pollfd == client_sock) { - if (revents & (LPOLLERR | LPOLLHUP | LPOLLRDHUP)) { + if (revents & LPOLLIN) { + continue; + } else if (revents & (LPOLLERR | LPOLLHUP | LPOLLRDHUP)) { ERR("Client socket poll error"); goto error; + } else { + ERR("Unexpected poll events %u for sock %d", revents, pollfd); + goto error; } } } @@ -5384,6 +5476,10 @@ int main(int argc, char **argv) } } + if (run_as_create_worker(argv[0]) < 0) { + goto exit_create_run_as_worker_cleanup; + } + /* * Starting from here, we can create threads. This needs to be after * lttng_daemonize due to RCU. @@ -5937,6 +6033,10 @@ exit_apps: } exit_reg_apps: + /* + * Join dispatch thread after joining reg_apps_thread to ensure + * we don't leak applications in the queue. + */ ret = pthread_join(dispatch_thread, &status); if (ret) { errno = ret; @@ -5997,11 +6097,16 @@ exit_ht_cleanup_quit_pipe: health_app_destroy(health_sessiond); exit_health_sessiond_cleanup: +exit_create_run_as_worker_cleanup: exit_options: + /* Ensure all prior call_rcu are done. */ + rcu_barrier(); + sessiond_cleanup_options(); exit_set_signal_handler: + if (!retval) { exit(EXIT_SUCCESS); } else {