2 * Copyright (C) 2011 EfficiOS Inc.
3 * Copyright (C) 2011 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
4 * Copyright (C) 2013 Jérémie Galarneau <jeremie.galarneau@efficios.com>
6 * SPDX-License-Identifier: GPL-2.0-only
10 #include "health-sessiond.hpp"
11 #include "kernel-consumer.hpp"
13 #include "manage-kernel.hpp"
14 #include "testpoint.hpp"
18 #include <common/pipe.hpp>
19 #include <common/urcu.hpp>
20 #include <common/utils.hpp>
25 struct thread_notifiers
{
26 struct lttng_pipe
*quit_pipe
;
27 int kernel_poll_pipe_read_fd
;
32 * Update the kernel poll set of all channel fd available over all tracing
33 * session. Add the wakeup pipe at the end of the set.
35 static int update_kernel_poll(struct lttng_poll_event
*events
)
38 struct ltt_kernel_channel
*channel
;
39 struct ltt_session
*session
;
41 DBG("Updating kernel poll set");
43 const auto list_lock
= lttng::sessiond::lock_session_list();
44 const struct ltt_session_list
*session_list
= session_get_list();
46 cds_list_for_each_entry (session
, &session_list
->head
, list
) {
47 if (!session_get(session
)) {
50 session_lock(session
);
51 if (session
->kernel_session
== nullptr) {
52 session_unlock(session
);
57 cds_list_for_each_entry (
58 channel
, &session
->kernel_session
->channel_list
.head
, list
) {
59 /* Add channel fd to the kernel poll set */
60 ret
= lttng_poll_add(events
, channel
->fd
, LPOLLIN
| LPOLLRDNORM
);
62 session_unlock(session
);
66 DBG("Channel fd %d added to kernel set", channel
->fd
);
68 session_unlock(session
);
76 * Find the channel fd from 'fd' over all tracing session. When found, check
77 * for new channel stream and send those stream fds to the kernel consumer.
79 * Useful for CPU hotplug feature.
81 static int update_kernel_stream(int fd
)
84 struct ltt_session
*session
;
85 struct ltt_kernel_session
*ksess
;
86 struct ltt_kernel_channel
*channel
;
88 DBG("Updating kernel streams for channel fd %d", fd
);
90 const auto list_lock
= lttng::sessiond::lock_session_list();
91 const struct ltt_session_list
*session_list
= session_get_list();
93 cds_list_for_each_entry (session
, &session_list
->head
, list
) {
94 if (!session_get(session
)) {
98 session_lock(session
);
99 if (session
->kernel_session
== nullptr) {
100 session_unlock(session
);
101 session_put(session
);
105 ksess
= session
->kernel_session
;
107 cds_list_for_each_entry (channel
, &ksess
->channel_list
.head
, list
) {
108 struct lttng_ht_iter iter
;
109 struct consumer_socket
*socket
;
111 if (channel
->fd
!= fd
) {
114 DBG("Channel found, updating kernel streams");
115 ret
= kernel_open_channel_stream(channel
);
119 /* Update the stream global counter */
120 ksess
->stream_count_global
+= ret
;
123 * Have we already sent fds to the consumer? If yes, it
124 * means that tracing is started so it is safe to send
125 * our updated stream fds.
127 if (ksess
->consumer_fds_sent
!= 1 || ksess
->consumer
== nullptr) {
133 const lttng::urcu::read_lock_guard read_lock
;
135 cds_lfht_for_each_entry (
136 ksess
->consumer
->socks
->ht
, &iter
.iter
, socket
, node
.node
) {
137 pthread_mutex_lock(socket
->lock
);
138 ret
= kernel_consumer_send_channel_streams(
142 session
->output_traces
? 1 : 0);
143 pthread_mutex_unlock(socket
->lock
);
151 session_unlock(session
);
152 session_put(session
);
158 session_unlock(session
);
159 session_put(session
);
164 * This thread manage event coming from the kernel.
166 * Features supported in this thread:
169 static void *thread_kernel_management(void *data
)
171 int ret
, i
, update_poll_flag
= 1, err
= -1;
174 struct lttng_poll_event events
;
175 struct thread_notifiers
*notifiers
= (thread_notifiers
*) data
;
176 const auto thread_quit_pipe_fd
= lttng_pipe_get_readfd(notifiers
->quit_pipe
);
178 DBG("[thread] Thread manage kernel started");
180 health_register(the_health_sessiond
, HEALTH_SESSIOND_TYPE_KERNEL
);
183 * This first step of the while is to clean this structure which could free
184 * non NULL pointers so initialize it before the loop.
186 lttng_poll_init(&events
);
188 if (testpoint(sessiond_thread_manage_kernel
)) {
189 goto error_testpoint
;
192 health_code_update();
194 if (testpoint(sessiond_thread_manage_kernel_before_loop
)) {
195 goto error_testpoint
;
199 health_code_update();
201 if (update_poll_flag
== 1) {
202 /* Clean events object. We are about to populate it again. */
203 lttng_poll_clean(&events
);
205 ret
= lttng_poll_create(&events
, 2, LTTNG_CLOEXEC
);
207 goto error_poll_create
;
210 ret
= lttng_poll_add(&events
, notifiers
->kernel_poll_pipe_read_fd
, LPOLLIN
);
215 ret
= lttng_poll_add(&events
, thread_quit_pipe_fd
, LPOLLIN
);
220 /* This will add the available kernel channel if any. */
221 ret
= update_kernel_poll(&events
);
225 update_poll_flag
= 0;
228 DBG("Thread kernel polling");
230 /* Poll infinite value of time */
233 ret
= lttng_poll_wait(&events
, -1);
234 DBG("Thread kernel return from poll on %d fds", LTTNG_POLL_GETNB(&events
));
238 * Restart interrupted system call.
240 if (errno
== EINTR
) {
244 } else if (ret
== 0) {
245 /* Should not happen since timeout is infinite */
246 ERR("Return value of poll is 0 with an infinite timeout.\n"
247 "This should not have happened! Continuing...");
253 for (i
= 0; i
< nb_fd
; i
++) {
254 /* Fetch once the poll data */
255 const auto revents
= LTTNG_POLL_GETEV(&events
, i
);
256 const auto pollfd
= LTTNG_POLL_GETFD(&events
, i
);
258 health_code_update();
260 /* Activity on thread quit pipe, exiting. */
261 if (pollfd
== thread_quit_pipe_fd
) {
262 DBG("Activity on thread quit pipe");
267 /* Check for data on kernel pipe */
268 if (revents
& LPOLLIN
) {
269 if (pollfd
== notifiers
->kernel_poll_pipe_read_fd
) {
271 notifiers
->kernel_poll_pipe_read_fd
, &tmp
, 1);
273 * Ret value is useless here, if this pipe gets any actions
274 * an update is required anyway.
276 update_poll_flag
= 1;
280 * New CPU detected by the kernel. Adding kernel stream to
281 * kernel session and updating the kernel consumer
283 ret
= update_kernel_stream(pollfd
);
289 } else if (revents
& (LPOLLERR
| LPOLLHUP
| LPOLLRDHUP
)) {
290 update_poll_flag
= 1;
293 ERR("Unexpected poll events %u for sock %d", revents
, pollfd
);
301 lttng_poll_clean(&events
);
306 ERR("Health error occurred in %s", __func__
);
307 WARN("Kernel thread died unexpectedly. "
308 "Kernel tracing can continue but CPU hotplug is disabled.");
310 health_unregister(the_health_sessiond
);
311 DBG("Kernel thread dying");
315 static bool shutdown_kernel_management_thread(void *data
)
317 struct thread_notifiers
*notifiers
= (thread_notifiers
*) data
;
318 const int write_fd
= lttng_pipe_get_writefd(notifiers
->quit_pipe
);
320 return notify_thread_pipe(write_fd
) == 1;
323 static void cleanup_kernel_management_thread(void *data
)
325 struct thread_notifiers
*notifiers
= (thread_notifiers
*) data
;
327 lttng_pipe_destroy(notifiers
->quit_pipe
);
331 bool launch_kernel_management_thread(int kernel_poll_pipe_read_fd
)
333 struct lttng_pipe
*quit_pipe
;
334 struct thread_notifiers
*notifiers
= nullptr;
335 struct lttng_thread
*thread
;
337 notifiers
= zmalloc
<thread_notifiers
>();
341 quit_pipe
= lttng_pipe_open(FD_CLOEXEC
);
345 notifiers
->quit_pipe
= quit_pipe
;
346 notifiers
->kernel_poll_pipe_read_fd
= kernel_poll_pipe_read_fd
;
348 thread
= lttng_thread_create("Kernel management",
349 thread_kernel_management
,
350 shutdown_kernel_management_thread
,
351 cleanup_kernel_management_thread
,
356 lttng_thread_put(thread
);
359 cleanup_kernel_management_thread(notifiers
);