2 * Copyright (C) 2011 EfficiOS Inc.
3 * Copyright (C) 2011 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
4 * Copyright (C) 2013 Jérémie Galarneau <jeremie.galarneau@efficios.com>
6 * SPDX-License-Identifier: GPL-2.0-only
10 #include "health-sessiond.hpp"
11 #include "manage-consumer.hpp"
12 #include "testpoint.hpp"
14 #include "ust-consumer.hpp"
17 #include <common/pipe.hpp>
18 #include <common/utils.hpp>
23 struct thread_notifiers
{
24 struct lttng_pipe
*quit_pipe
;
25 struct consumer_data
*consumer_data
;
27 int initialization_result
;
31 static void mark_thread_as_ready(struct thread_notifiers
*notifiers
)
33 DBG("Marking consumer management thread as ready");
34 notifiers
->initialization_result
= 0;
35 sem_post(¬ifiers
->ready
);
38 static void mark_thread_intialization_as_failed(struct thread_notifiers
*notifiers
)
40 ERR("Consumer management thread entering error state");
41 notifiers
->initialization_result
= -1;
42 sem_post(¬ifiers
->ready
);
45 static void wait_until_thread_is_ready(struct thread_notifiers
*notifiers
)
47 DBG("Waiting for consumer management thread to be ready");
48 sem_wait(¬ifiers
->ready
);
49 DBG("Consumer management thread is ready");
53 * This thread manage the consumer error sent back to the session daemon.
55 static void *thread_consumer_management(void *data
)
57 int sock
= -1, i
, ret
, err
= -1, should_quit
= 0;
59 enum lttcomm_return_code code
;
60 struct lttng_poll_event events
;
61 struct thread_notifiers
*notifiers
= (thread_notifiers
*) data
;
62 struct consumer_data
*consumer_data
= notifiers
->consumer_data
;
63 const auto thread_quit_pipe_fd
= lttng_pipe_get_readfd(notifiers
->quit_pipe
);
64 struct consumer_socket
*cmd_socket_wrapper
= nullptr;
66 DBG("[thread] Manage consumer started");
68 rcu_register_thread();
71 health_register(the_health_sessiond
, HEALTH_SESSIOND_TYPE_CONSUMER
);
76 * Pass 3 as size here for the thread quit pipe, consumerd_err_sock and the
77 * metadata_sock. Nothing more will be added to this poll set.
79 ret
= lttng_poll_create(&events
, 3, LTTNG_CLOEXEC
);
81 mark_thread_intialization_as_failed(notifiers
);
85 ret
= lttng_poll_add(&events
, thread_quit_pipe_fd
, LPOLLIN
);
87 mark_thread_intialization_as_failed(notifiers
);
92 * The error socket here is already in a listening state which was done
93 * just before spawning this thread to avoid a race between the consumer
94 * daemon exec trying to connect and the listen() call.
96 ret
= lttng_poll_add(&events
, consumer_data
->err_sock
, LPOLLIN
| LPOLLRDHUP
);
98 mark_thread_intialization_as_failed(notifiers
);
102 health_code_update();
104 /* Infinite blocking call, waiting for transmission */
107 if (testpoint(sessiond_thread_manage_consumer
)) {
108 mark_thread_intialization_as_failed(notifiers
);
112 ret
= lttng_poll_wait(&events
, -1);
115 mark_thread_intialization_as_failed(notifiers
);
121 for (i
= 0; i
< nb_fd
; i
++) {
122 /* Fetch once the poll data */
123 const auto revents
= LTTNG_POLL_GETEV(&events
, i
);
124 const auto pollfd
= LTTNG_POLL_GETFD(&events
, i
);
126 health_code_update();
128 /* Activity on thread quit pipe, exiting. */
129 if (pollfd
== thread_quit_pipe_fd
) {
130 DBG("Activity on thread quit pipe");
132 mark_thread_intialization_as_failed(notifiers
);
134 } else if (pollfd
== consumer_data
->err_sock
) {
135 /* Event on the registration socket */
136 if (revents
& LPOLLIN
) {
138 } else if (revents
& (LPOLLERR
| LPOLLHUP
| LPOLLRDHUP
)) {
139 ERR("consumer err socket poll error");
140 mark_thread_intialization_as_failed(notifiers
);
143 ERR("Unexpected poll events %u for sock %d", revents
, pollfd
);
144 mark_thread_intialization_as_failed(notifiers
);
150 sock
= lttcomm_accept_unix_sock(consumer_data
->err_sock
);
152 mark_thread_intialization_as_failed(notifiers
);
157 * Set the CLOEXEC flag. Return code is useless because either way, the
160 (void) utils_set_fd_cloexec(sock
);
162 health_code_update();
164 DBG2("Receiving code from consumer err_sock");
166 /* Getting status code from kconsumerd */
167 ret
= lttcomm_recv_unix_sock(sock
, &code
, sizeof(enum lttcomm_return_code
));
169 mark_thread_intialization_as_failed(notifiers
);
173 health_code_update();
174 if (code
!= LTTCOMM_CONSUMERD_COMMAND_SOCK_READY
) {
175 ERR("consumer error when waiting for SOCK_READY : %s",
176 lttcomm_get_readable_code((lttcomm_return_code
) -code
));
177 mark_thread_intialization_as_failed(notifiers
);
181 /* Connect both command and metadata sockets. */
182 consumer_data
->cmd_sock
= lttcomm_connect_unix_sock(consumer_data
->cmd_unix_sock_path
);
183 consumer_data
->metadata_fd
= lttcomm_connect_unix_sock(consumer_data
->cmd_unix_sock_path
);
184 if (consumer_data
->cmd_sock
< 0 || consumer_data
->metadata_fd
< 0) {
185 PERROR("consumer connect cmd socket");
186 mark_thread_intialization_as_failed(notifiers
);
190 consumer_data
->metadata_sock
.fd_ptr
= &consumer_data
->metadata_fd
;
192 /* Create metadata socket lock. */
193 consumer_data
->metadata_sock
.lock
= zmalloc
<pthread_mutex_t
>();
194 if (consumer_data
->metadata_sock
.lock
== nullptr) {
195 PERROR("zmalloc pthread mutex");
196 mark_thread_intialization_as_failed(notifiers
);
199 pthread_mutex_init(consumer_data
->metadata_sock
.lock
, nullptr);
201 DBG("Consumer command socket ready (fd: %d)", consumer_data
->cmd_sock
);
202 DBG("Consumer metadata socket ready (fd: %d)", consumer_data
->metadata_fd
);
205 * Remove the consumerd error sock since we've established a connection.
207 ret
= lttng_poll_del(&events
, consumer_data
->err_sock
);
209 mark_thread_intialization_as_failed(notifiers
);
213 /* Add new accepted error socket. */
214 ret
= lttng_poll_add(&events
, sock
, LPOLLIN
| LPOLLRDHUP
);
216 mark_thread_intialization_as_failed(notifiers
);
220 /* Add metadata socket that is successfully connected. */
221 ret
= lttng_poll_add(&events
, consumer_data
->metadata_fd
, LPOLLIN
| LPOLLRDHUP
);
223 mark_thread_intialization_as_failed(notifiers
);
227 health_code_update();
230 * Transfer the write-end of the channel monitoring pipe to the consumer
231 * by issuing a SET_CHANNEL_MONITOR_PIPE command.
233 cmd_socket_wrapper
= consumer_allocate_socket(&consumer_data
->cmd_sock
);
234 if (!cmd_socket_wrapper
) {
235 mark_thread_intialization_as_failed(notifiers
);
238 cmd_socket_wrapper
->lock
= &consumer_data
->lock
;
240 pthread_mutex_lock(cmd_socket_wrapper
->lock
);
241 ret
= consumer_init(cmd_socket_wrapper
, the_sessiond_uuid
);
243 ERR("Failed to send sessiond uuid to consumer daemon");
244 mark_thread_intialization_as_failed(notifiers
);
245 pthread_mutex_unlock(cmd_socket_wrapper
->lock
);
248 pthread_mutex_unlock(cmd_socket_wrapper
->lock
);
250 ret
= consumer_send_channel_monitor_pipe(cmd_socket_wrapper
,
251 consumer_data
->channel_monitor_pipe
);
253 mark_thread_intialization_as_failed(notifiers
);
257 /* Discard the socket wrapper as it is no longer needed. */
258 consumer_destroy_socket(cmd_socket_wrapper
);
259 cmd_socket_wrapper
= nullptr;
261 /* The thread is completely initialized, signal that it is ready. */
262 mark_thread_as_ready(notifiers
);
264 /* Infinite blocking call, waiting for transmission */
266 health_code_update();
268 /* Exit the thread because the thread quit pipe has been triggered. */
270 /* Not a health error. */
276 ret
= lttng_poll_wait(&events
, -1);
284 for (i
= 0; i
< nb_fd
; i
++) {
285 /* Fetch once the poll data */
286 const auto revents
= LTTNG_POLL_GETEV(&events
, i
);
287 const auto pollfd
= LTTNG_POLL_GETFD(&events
, i
);
289 health_code_update();
292 * Thread quit pipe has been triggered, flag that we should stop
293 * but continue the current loop to handle potential data from
296 if (pollfd
== thread_quit_pipe_fd
) {
298 } else if (pollfd
== sock
) {
299 /* Event on the consumerd socket */
300 if (revents
& (LPOLLERR
| LPOLLHUP
| LPOLLRDHUP
) &&
301 !(revents
& LPOLLIN
)) {
302 ERR("consumer err socket second poll error");
305 health_code_update();
306 /* Wait for any kconsumerd error */
307 ret
= lttcomm_recv_unix_sock(
308 sock
, &code
, sizeof(enum lttcomm_return_code
));
310 ERR("consumer closed the command socket");
314 ERR("consumer return code : %s",
315 lttcomm_get_readable_code((lttcomm_return_code
) -code
));
318 } else if (pollfd
== consumer_data
->metadata_fd
) {
319 if (revents
& (LPOLLERR
| LPOLLHUP
| LPOLLRDHUP
) &&
320 !(revents
& LPOLLIN
)) {
321 ERR("consumer err metadata socket second poll error");
324 /* UST metadata requests */
325 ret
= ust_consumer_metadata_request(&consumer_data
->metadata_sock
);
327 ERR("Handling metadata request");
331 /* No need for an else branch all FDs are tested prior. */
333 health_code_update();
339 * We lock here because we are about to close the sockets and some other
340 * thread might be using them so get exclusive access which will abort all
341 * other consumer command by other threads.
343 pthread_mutex_lock(&consumer_data
->lock
);
345 /* Immediately set the consumerd state to stopped */
346 if (consumer_data
->type
== LTTNG_CONSUMER_KERNEL
) {
347 uatomic_set(&the_kernel_consumerd_state
, CONSUMER_ERROR
);
348 } else if (consumer_data
->type
== LTTNG_CONSUMER64_UST
||
349 consumer_data
->type
== LTTNG_CONSUMER32_UST
) {
350 uatomic_set(&the_ust_consumerd_state
, CONSUMER_ERROR
);
352 /* Code flow error... */
356 if (consumer_data
->err_sock
>= 0) {
357 ret
= close(consumer_data
->err_sock
);
361 consumer_data
->err_sock
= -1;
363 if (consumer_data
->cmd_sock
>= 0) {
364 ret
= close(consumer_data
->cmd_sock
);
368 consumer_data
->cmd_sock
= -1;
370 if (consumer_data
->metadata_sock
.fd_ptr
&& *consumer_data
->metadata_sock
.fd_ptr
>= 0) {
371 ret
= close(*consumer_data
->metadata_sock
.fd_ptr
);
383 unlink(consumer_data
->err_unix_sock_path
);
384 unlink(consumer_data
->cmd_unix_sock_path
);
385 pthread_mutex_unlock(&consumer_data
->lock
);
387 /* Cleanup metadata socket mutex. */
388 if (consumer_data
->metadata_sock
.lock
) {
389 pthread_mutex_destroy(consumer_data
->metadata_sock
.lock
);
390 free(consumer_data
->metadata_sock
.lock
);
392 lttng_poll_clean(&events
);
394 if (cmd_socket_wrapper
) {
395 consumer_destroy_socket(cmd_socket_wrapper
);
400 ERR("Health error occurred in %s", __func__
);
402 health_unregister(the_health_sessiond
);
403 DBG("consumer thread cleanup completed");
405 rcu_thread_offline();
406 rcu_unregister_thread();
411 static bool shutdown_consumer_management_thread(void *data
)
413 struct thread_notifiers
*notifiers
= (thread_notifiers
*) data
;
414 const int write_fd
= lttng_pipe_get_writefd(notifiers
->quit_pipe
);
416 return notify_thread_pipe(write_fd
) == 1;
419 static void cleanup_consumer_management_thread(void *data
)
421 struct thread_notifiers
*notifiers
= (thread_notifiers
*) data
;
423 lttng_pipe_destroy(notifiers
->quit_pipe
);
427 bool launch_consumer_management_thread(struct consumer_data
*consumer_data
)
429 struct lttng_pipe
*quit_pipe
;
430 struct thread_notifiers
*notifiers
= nullptr;
431 struct lttng_thread
*thread
;
433 notifiers
= zmalloc
<thread_notifiers
>();
438 quit_pipe
= lttng_pipe_open(FD_CLOEXEC
);
442 notifiers
->quit_pipe
= quit_pipe
;
443 notifiers
->consumer_data
= consumer_data
;
444 sem_init(¬ifiers
->ready
, 0, 0);
446 thread
= lttng_thread_create("Consumer management",
447 thread_consumer_management
,
448 shutdown_consumer_management_thread
,
449 cleanup_consumer_management_thread
,
454 wait_until_thread_is_ready(notifiers
);
455 lttng_thread_put(thread
);
456 return notifiers
->initialization_result
== 0;
458 cleanup_consumer_management_thread(notifiers
);