X-Git-Url: http://git.lttng.org./?a=blobdiff_plain;f=src%2Fbin%2Flttng-sessiond%2Fnotification-thread-events.cpp;h=d6c0d813550a21398a8e6e1987e4f2d437697c83;hb=3691d312bcf4dc2cc15f0ecb1f0e2fd2f34315b8;hp=279bc7c5a26aa797c08d800b38b16ae98aa04627;hpb=8ebf1688e54cb656c2a9d17d70a829664e1eaf6b;p=lttng-tools.git diff --git a/src/bin/lttng-sessiond/notification-thread-events.cpp b/src/bin/lttng-sessiond/notification-thread-events.cpp index 279bc7c5a..d6c0d8135 100644 --- a/src/bin/lttng-sessiond/notification-thread-events.cpp +++ b/src/bin/lttng-sessiond/notification-thread-events.cpp @@ -47,8 +47,8 @@ #include "lttng-sessiond.hpp" #include "kernel.hpp" -#define CLIENT_POLL_MASK_IN (LPOLLIN | LPOLLERR | LPOLLHUP | LPOLLRDHUP) -#define CLIENT_POLL_MASK_IN_OUT (CLIENT_POLL_MASK_IN | LPOLLOUT) +#define CLIENT_POLL_EVENTS_IN (LPOLLIN | LPOLLERR | LPOLLHUP | LPOLLRDHUP) +#define CLIENT_POLL_EVENTS_IN_OUT (CLIENT_POLL_EVENTS_IN | LPOLLOUT) /* The tracers currently limit the capture size to PIPE_BUF (4kb on linux). */ #define MAX_CAPTURE_SIZE (PIPE_BUF) @@ -60,12 +60,6 @@ enum lttng_object_type { LTTNG_OBJECT_TYPE_SESSION, }; -struct lttng_trigger_list_element { - /* No ownership of the trigger object is assumed. */ - struct lttng_trigger *trigger; - struct cds_list_head node; -}; - struct lttng_channel_trigger_list { struct channel_key channel_key; /* List of struct lttng_trigger_list_element. */ @@ -117,6 +111,13 @@ struct lttng_session_trigger_list { struct rcu_head rcu_node; }; +namespace { +struct lttng_trigger_list_element { + /* No ownership of the trigger object is assumed. */ + struct lttng_trigger *trigger; + struct cds_list_head node; +}; + struct lttng_trigger_ht_element { struct lttng_trigger *trigger; struct cds_lfht_node node; @@ -140,6 +141,7 @@ struct channel_state_sample { /* call_rcu delayed reclaim. */ struct rcu_head rcu_node; }; +} /* namespace */ static unsigned long hash_channel_key(struct channel_key *key); static int evaluate_buffer_condition(const struct lttng_condition *condition, @@ -3123,28 +3125,43 @@ end: return 0; } +static +int pop_cmd_queue(struct notification_thread_handle *handle, + struct notification_thread_command **cmd) +{ + int ret; + uint64_t counter; + + pthread_mutex_lock(&handle->cmd_queue.lock); + ret = lttng_read(handle->cmd_queue.event_fd, &counter, sizeof(counter)); + if (ret != sizeof(counter)) { + ret = -1; + goto error_unlock; + } + + *cmd = cds_list_first_entry(&handle->cmd_queue.list, + struct notification_thread_command, cmd_list_node); + cds_list_del(&((*cmd)->cmd_list_node)); + ret = 0; + +error_unlock: + pthread_mutex_unlock(&handle->cmd_queue.lock); + return ret; +} + /* Returns 0 on success, 1 on exit requested, negative value on error. */ int handle_notification_thread_command( struct notification_thread_handle *handle, struct notification_thread_state *state) { int ret; - uint64_t counter; struct notification_thread_command *cmd; - /* Read the event pipe to put it back into a quiescent state. */ - ret = lttng_read(lttng_pipe_get_readfd(handle->cmd_queue.event_pipe), &counter, - sizeof(counter)); - if (ret != sizeof(counter)) { + ret = pop_cmd_queue(handle, &cmd); + if (ret) { goto error; } - pthread_mutex_lock(&handle->cmd_queue.lock); - cmd = cds_list_first_entry(&handle->cmd_queue.list, - struct notification_thread_command, cmd_list_node); - cds_list_del(&cmd->cmd_list_node); - pthread_mutex_unlock(&handle->cmd_queue.lock); - DBG("Received `%s` command", notification_command_type_str(cmd->type)); switch (cmd->type) { @@ -3379,9 +3396,9 @@ int handle_notification_thread_client_connect( goto error; } + client->communication.current_poll_events = CLIENT_POLL_EVENTS_IN; ret = lttng_poll_add(&state->events, client->socket, - LPOLLIN | LPOLLERR | - LPOLLHUP | LPOLLRDHUP); + client->communication.current_poll_events); if (ret < 0) { ERR("Failed to add notification channel client socket to poll set"); ret = 0; @@ -3515,6 +3532,18 @@ int handle_notification_thread_trigger_unregister_all( return error_occurred ? -1 : 0; } +static +bool client_has_outbound_data_left( + const struct notification_client *client) +{ + const struct lttng_payload_view pv = lttng_payload_view_from_payload( + &client->communication.outbound.payload, 0, -1); + const bool has_data = pv.buffer.size != 0; + const bool has_fds = lttng_payload_view_get_fd_handle_count(&pv); + + return has_data || has_fds; +} + static int client_handle_transmission_status( struct notification_client *client, @@ -3525,24 +3554,51 @@ int client_handle_transmission_status( switch (transmission_status) { case CLIENT_TRANSMISSION_STATUS_COMPLETE: - ret = lttng_poll_mod(&state->events, client->socket, - CLIENT_POLL_MASK_IN); - if (ret) { - goto end; - } - - break; case CLIENT_TRANSMISSION_STATUS_QUEUED: + { + int current_poll_events; + int new_poll_events; /* * We want to be notified whenever there is buffer space - * available to send the rest of the payload. + * available to send the rest of the payload if we are + * waiting to send data to the client. + * + * The state of the outbound queue being sampled here is + * fine since: + * - it is okay to wake-up "for nothing" in case we see + * that data is left, but another thread succeeds in + * flushing it before us when handling the client "out" + * event. We will simply stop monitoring that event the next + * time it wakes us up and we see no data left to be sent, + * - if another thread fails to flush the entire client + * outgoing queue, it will issue a "communication update" + * command and cause the client's (e)poll mask to be + * re-evaluated. + * + * The situation we seek to avoid would be to disable the + * monitoring of "out" client events indefinitely when there is + * data to be sent, which can't happen because of the + * aforementioned "communication update" mechanism. */ - ret = lttng_poll_mod(&state->events, client->socket, - CLIENT_POLL_MASK_IN_OUT); - if (ret) { - goto end; + pthread_mutex_lock(&client->lock); + current_poll_events = client->communication.current_poll_events; + new_poll_events = client_has_outbound_data_left(client) ? + CLIENT_POLL_EVENTS_IN_OUT : + CLIENT_POLL_EVENTS_IN; + client->communication.current_poll_events = new_poll_events; + pthread_mutex_unlock(&client->lock); + + /* Update the monitored event set only if it changed. */ + if (current_poll_events != new_poll_events) { + ret = lttng_poll_mod(&state->events, client->socket, + new_poll_events); + if (ret) { + goto end; + } } + break; + } case CLIENT_TRANSMISSION_STATUS_FAIL: ret = notification_thread_client_disconnect(client, state); if (ret) { @@ -3682,18 +3738,6 @@ error: return CLIENT_TRANSMISSION_STATUS_ERROR; } -static -bool client_has_outbound_data_left( - const struct notification_client *client) -{ - const struct lttng_payload_view pv = lttng_payload_view_from_payload( - &client->communication.outbound.payload, 0, -1); - const bool has_data = pv.buffer.size != 0; - const bool has_fds = lttng_payload_view_get_fd_handle_count(&pv); - - return has_data || has_fds; -} - /* Client lock must _not_ be held by the caller. */ static int client_send_command_reply(struct notification_client *client, @@ -4102,7 +4146,47 @@ int handle_notification_thread_client_out( } pthread_mutex_lock(&client->lock); - transmission_status = client_flush_outgoing_queue(client); + if (!client_has_outbound_data_left(client)) { + /* + * A client "out" event can be received when no payload is left + * to send under some circumstances. + * + * Many threads can flush a client's outgoing queue and, if they + * had to queue their message (socket was full), will use the + * "communication update" command to signal the (e)poll thread + * to monitor for space being made available in the socket. + * + * Commands are sent over an internal pipe serviced by the same + * thread as the client sockets. + * + * When space is made available in the socket, there is a race + * between the (e)poll thread and the other threads that may + * wish to use the client's socket to flush its outgoing queue. + * + * A non-(e)poll thread may attempt (and succeed) in flushing + * the queue before the (e)poll thread gets a chance to service + * the client's "out" event. + * + * In this situation, the (e)poll thread processing the client + * out event will see an empty payload: there is nothing to do + * except unsubscribing (e)poll "out" events. + * + * Note that this thread is the (e)poll thread so it can modify + * the (e)poll mask directly without using a communication + * update command. Other threads that flush the outgoing queue + * will use the "communication update" command to wake up this + * thread and force it to monitor "out" events. + * + * When other threads succeed in emptying the outgoing queue, + * they don't need to update the (e)poll mask: if the "out" + * event is monitored, it will fire once and the (e)poll + * thread will reach this condition, causing the event to + * stop being monitored. + */ + transmission_status = CLIENT_TRANSMISSION_STATUS_COMPLETE; + } else { + transmission_status = client_flush_outgoing_queue(client); + } pthread_mutex_unlock(&client->lock); ret = client_handle_transmission_status(