2 * Copyright (C) 2011 EfficiOS Inc.
3 * Copyright (C) 2011 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
4 * Copyright (C) 2013 Jérémie Galarneau <jeremie.galarneau@efficios.com>
6 * SPDX-License-Identifier: GPL-2.0-only
10 #include "health-sessiond.hpp"
11 #include "kernel-consumer.hpp"
13 #include "manage-kernel.hpp"
14 #include "testpoint.hpp"
18 #include <common/pipe.hpp>
19 #include <common/urcu.hpp>
20 #include <common/utils.hpp>
23 struct thread_notifiers
{
24 struct lttng_pipe
*quit_pipe
;
25 int kernel_poll_pipe_read_fd
;
30 * Update the kernel poll set of all channel fd available over all tracing
31 * session. Add the wakeup pipe at the end of the set.
33 static int update_kernel_poll(struct lttng_poll_event
*events
)
36 struct ltt_kernel_channel
*channel
;
37 struct ltt_session
*session
;
38 const struct ltt_session_list
*session_list
= session_get_list();
40 DBG("Updating kernel poll set");
43 cds_list_for_each_entry (session
, &session_list
->head
, list
) {
44 if (!session_get(session
)) {
47 session_lock(session
);
48 if (session
->kernel_session
== nullptr) {
49 session_unlock(session
);
54 cds_list_for_each_entry (
55 channel
, &session
->kernel_session
->channel_list
.head
, list
) {
56 /* Add channel fd to the kernel poll set */
57 ret
= lttng_poll_add(events
, channel
->fd
, LPOLLIN
| LPOLLRDNORM
);
59 session_unlock(session
);
63 DBG("Channel fd %d added to kernel set", channel
->fd
);
65 session_unlock(session
);
68 session_unlock_list();
73 session_unlock_list();
78 * Find the channel fd from 'fd' over all tracing session. When found, check
79 * for new channel stream and send those stream fds to the kernel consumer.
81 * Useful for CPU hotplug feature.
83 static int update_kernel_stream(int fd
)
86 struct ltt_session
*session
;
87 struct ltt_kernel_session
*ksess
;
88 struct ltt_kernel_channel
*channel
;
89 const struct ltt_session_list
*session_list
= session_get_list();
91 DBG("Updating kernel streams for channel fd %d", fd
);
94 cds_list_for_each_entry (session
, &session_list
->head
, list
) {
95 if (!session_get(session
)) {
99 session_lock(session
);
100 if (session
->kernel_session
== nullptr) {
101 session_unlock(session
);
102 session_put(session
);
106 ksess
= session
->kernel_session
;
108 cds_list_for_each_entry (channel
, &ksess
->channel_list
.head
, list
) {
109 struct lttng_ht_iter iter
;
110 struct consumer_socket
*socket
;
112 if (channel
->fd
!= fd
) {
115 DBG("Channel found, updating kernel streams");
116 ret
= kernel_open_channel_stream(channel
);
120 /* Update the stream global counter */
121 ksess
->stream_count_global
+= ret
;
124 * Have we already sent fds to the consumer? If yes, it
125 * means that tracing is started so it is safe to send
126 * our updated stream fds.
128 if (ksess
->consumer_fds_sent
!= 1 || ksess
->consumer
== nullptr) {
134 lttng::urcu::read_lock_guard read_lock
;
136 cds_lfht_for_each_entry (
137 ksess
->consumer
->socks
->ht
, &iter
.iter
, socket
, node
.node
) {
138 pthread_mutex_lock(socket
->lock
);
139 ret
= kernel_consumer_send_channel_streams(
143 session
->output_traces
? 1 : 0);
144 pthread_mutex_unlock(socket
->lock
);
152 session_unlock(session
);
153 session_put(session
);
155 session_unlock_list();
159 session_unlock(session
);
160 session_put(session
);
161 session_unlock_list();
166 * This thread manage event coming from the kernel.
168 * Features supported in this thread:
171 static void *thread_kernel_management(void *data
)
173 int ret
, i
, update_poll_flag
= 1, err
= -1;
176 struct lttng_poll_event events
;
177 struct thread_notifiers
*notifiers
= (thread_notifiers
*) data
;
178 const auto thread_quit_pipe_fd
= lttng_pipe_get_readfd(notifiers
->quit_pipe
);
180 DBG("[thread] Thread manage kernel started");
182 health_register(the_health_sessiond
, HEALTH_SESSIOND_TYPE_KERNEL
);
185 * This first step of the while is to clean this structure which could free
186 * non NULL pointers so initialize it before the loop.
188 lttng_poll_init(&events
);
190 if (testpoint(sessiond_thread_manage_kernel
)) {
191 goto error_testpoint
;
194 health_code_update();
196 if (testpoint(sessiond_thread_manage_kernel_before_loop
)) {
197 goto error_testpoint
;
201 health_code_update();
203 if (update_poll_flag
== 1) {
204 /* Clean events object. We are about to populate it again. */
205 lttng_poll_clean(&events
);
207 ret
= lttng_poll_create(&events
, 2, LTTNG_CLOEXEC
);
209 goto error_poll_create
;
212 ret
= lttng_poll_add(&events
, notifiers
->kernel_poll_pipe_read_fd
, LPOLLIN
);
217 ret
= lttng_poll_add(&events
, thread_quit_pipe_fd
, LPOLLIN
);
222 /* This will add the available kernel channel if any. */
223 ret
= update_kernel_poll(&events
);
227 update_poll_flag
= 0;
230 DBG("Thread kernel polling");
232 /* Poll infinite value of time */
235 ret
= lttng_poll_wait(&events
, -1);
236 DBG("Thread kernel return from poll on %d fds", LTTNG_POLL_GETNB(&events
));
240 * Restart interrupted system call.
242 if (errno
== EINTR
) {
246 } else if (ret
== 0) {
247 /* Should not happen since timeout is infinite */
248 ERR("Return value of poll is 0 with an infinite timeout.\n"
249 "This should not have happened! Continuing...");
255 for (i
= 0; i
< nb_fd
; i
++) {
256 /* Fetch once the poll data */
257 const auto revents
= LTTNG_POLL_GETEV(&events
, i
);
258 const auto pollfd
= LTTNG_POLL_GETFD(&events
, i
);
260 health_code_update();
262 /* Activity on thread quit pipe, exiting. */
263 if (pollfd
== thread_quit_pipe_fd
) {
264 DBG("Activity on thread quit pipe");
269 /* Check for data on kernel pipe */
270 if (revents
& LPOLLIN
) {
271 if (pollfd
== notifiers
->kernel_poll_pipe_read_fd
) {
273 notifiers
->kernel_poll_pipe_read_fd
, &tmp
, 1);
275 * Ret value is useless here, if this pipe gets any actions
276 * an update is required anyway.
278 update_poll_flag
= 1;
282 * New CPU detected by the kernel. Adding kernel stream to
283 * kernel session and updating the kernel consumer
285 ret
= update_kernel_stream(pollfd
);
291 } else if (revents
& (LPOLLERR
| LPOLLHUP
| LPOLLRDHUP
)) {
292 update_poll_flag
= 1;
295 ERR("Unexpected poll events %u for sock %d", revents
, pollfd
);
303 lttng_poll_clean(&events
);
308 ERR("Health error occurred in %s", __func__
);
309 WARN("Kernel thread died unexpectedly. "
310 "Kernel tracing can continue but CPU hotplug is disabled.");
312 health_unregister(the_health_sessiond
);
313 DBG("Kernel thread dying");
317 static bool shutdown_kernel_management_thread(void *data
)
319 struct thread_notifiers
*notifiers
= (thread_notifiers
*) data
;
320 const int write_fd
= lttng_pipe_get_writefd(notifiers
->quit_pipe
);
322 return notify_thread_pipe(write_fd
) == 1;
325 static void cleanup_kernel_management_thread(void *data
)
327 struct thread_notifiers
*notifiers
= (thread_notifiers
*) data
;
329 lttng_pipe_destroy(notifiers
->quit_pipe
);
333 bool launch_kernel_management_thread(int kernel_poll_pipe_read_fd
)
335 struct lttng_pipe
*quit_pipe
;
336 struct thread_notifiers
*notifiers
= nullptr;
337 struct lttng_thread
*thread
;
339 notifiers
= zmalloc
<thread_notifiers
>();
343 quit_pipe
= lttng_pipe_open(FD_CLOEXEC
);
347 notifiers
->quit_pipe
= quit_pipe
;
348 notifiers
->kernel_poll_pipe_read_fd
= kernel_poll_pipe_read_fd
;
350 thread
= lttng_thread_create("Kernel management",
351 thread_kernel_management
,
352 shutdown_kernel_management_thread
,
353 cleanup_kernel_management_thread
,
358 lttng_thread_put(thread
);
361 cleanup_kernel_management_thread(notifiers
);