clang-tidy: apply suggested fixes
[lttng-tools.git] / src / bin / lttng-sessiond / manage-kernel.cpp
1 /*
2 * Copyright (C) 2011 EfficiOS Inc.
3 * Copyright (C) 2011 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
4 * Copyright (C) 2013 Jérémie Galarneau <jeremie.galarneau@efficios.com>
5 *
6 * SPDX-License-Identifier: GPL-2.0-only
7 *
8 */
9
10 #include "health-sessiond.hpp"
11 #include "kernel-consumer.hpp"
12 #include "kernel.hpp"
13 #include "manage-kernel.hpp"
14 #include "testpoint.hpp"
15 #include "thread.hpp"
16 #include "utils.hpp"
17
18 #include <common/pipe.hpp>
19 #include <common/urcu.hpp>
20 #include <common/utils.hpp>
21
22 #include <fcntl.h>
23
24 namespace {
25 struct thread_notifiers {
26 struct lttng_pipe *quit_pipe;
27 int kernel_poll_pipe_read_fd;
28 };
29 } /* namespace */
30
31 /*
32 * Update the kernel poll set of all channel fd available over all tracing
33 * session. Add the wakeup pipe at the end of the set.
34 */
35 static int update_kernel_poll(struct lttng_poll_event *events)
36 {
37 int ret;
38 struct ltt_kernel_channel *channel;
39 struct ltt_session *session;
40
41 DBG("Updating kernel poll set");
42
43 const auto list_lock = lttng::sessiond::lock_session_list();
44 const struct ltt_session_list *session_list = session_get_list();
45
46 cds_list_for_each_entry (session, &session_list->head, list) {
47 if (!session_get(session)) {
48 continue;
49 }
50 session_lock(session);
51 if (session->kernel_session == nullptr) {
52 session_unlock(session);
53 session_put(session);
54 continue;
55 }
56
57 cds_list_for_each_entry (
58 channel, &session->kernel_session->channel_list.head, list) {
59 /* Add channel fd to the kernel poll set */
60 ret = lttng_poll_add(events, channel->fd, LPOLLIN | LPOLLRDNORM);
61 if (ret < 0) {
62 session_unlock(session);
63 session_put(session);
64 return -1;
65 }
66 DBG("Channel fd %d added to kernel set", channel->fd);
67 }
68 session_unlock(session);
69 session_put(session);
70 }
71
72 return 0;
73 }
74
75 /*
76 * Find the channel fd from 'fd' over all tracing session. When found, check
77 * for new channel stream and send those stream fds to the kernel consumer.
78 *
79 * Useful for CPU hotplug feature.
80 */
81 static int update_kernel_stream(int fd)
82 {
83 int ret = 0;
84 struct ltt_session *session;
85 struct ltt_kernel_session *ksess;
86 struct ltt_kernel_channel *channel;
87
88 DBG("Updating kernel streams for channel fd %d", fd);
89
90 const auto list_lock = lttng::sessiond::lock_session_list();
91 const struct ltt_session_list *session_list = session_get_list();
92
93 cds_list_for_each_entry (session, &session_list->head, list) {
94 if (!session_get(session)) {
95 continue;
96 }
97
98 session_lock(session);
99 if (session->kernel_session == nullptr) {
100 session_unlock(session);
101 session_put(session);
102 continue;
103 }
104
105 ksess = session->kernel_session;
106
107 cds_list_for_each_entry (channel, &ksess->channel_list.head, list) {
108 struct lttng_ht_iter iter;
109 struct consumer_socket *socket;
110
111 if (channel->fd != fd) {
112 continue;
113 }
114 DBG("Channel found, updating kernel streams");
115 ret = kernel_open_channel_stream(channel);
116 if (ret < 0) {
117 goto error;
118 }
119 /* Update the stream global counter */
120 ksess->stream_count_global += ret;
121
122 /*
123 * Have we already sent fds to the consumer? If yes, it
124 * means that tracing is started so it is safe to send
125 * our updated stream fds.
126 */
127 if (ksess->consumer_fds_sent != 1 || ksess->consumer == nullptr) {
128 ret = -1;
129 goto error;
130 }
131
132 {
133 const lttng::urcu::read_lock_guard read_lock;
134
135 cds_lfht_for_each_entry (
136 ksess->consumer->socks->ht, &iter.iter, socket, node.node) {
137 pthread_mutex_lock(socket->lock);
138 ret = kernel_consumer_send_channel_streams(
139 socket,
140 channel,
141 ksess,
142 session->output_traces ? 1 : 0);
143 pthread_mutex_unlock(socket->lock);
144 if (ret < 0) {
145 goto error;
146 }
147 }
148 }
149 }
150
151 session_unlock(session);
152 session_put(session);
153 }
154
155 return ret;
156
157 error:
158 session_unlock(session);
159 session_put(session);
160 return ret;
161 }
162
163 /*
164 * This thread manage event coming from the kernel.
165 *
166 * Features supported in this thread:
167 * -) CPU Hotplug
168 */
169 static void *thread_kernel_management(void *data)
170 {
171 int ret, i, update_poll_flag = 1, err = -1;
172 uint32_t nb_fd;
173 char tmp;
174 struct lttng_poll_event events;
175 struct thread_notifiers *notifiers = (thread_notifiers *) data;
176 const auto thread_quit_pipe_fd = lttng_pipe_get_readfd(notifiers->quit_pipe);
177
178 DBG("[thread] Thread manage kernel started");
179
180 health_register(the_health_sessiond, HEALTH_SESSIOND_TYPE_KERNEL);
181
182 /*
183 * This first step of the while is to clean this structure which could free
184 * non NULL pointers so initialize it before the loop.
185 */
186 lttng_poll_init(&events);
187
188 if (testpoint(sessiond_thread_manage_kernel)) {
189 goto error_testpoint;
190 }
191
192 health_code_update();
193
194 if (testpoint(sessiond_thread_manage_kernel_before_loop)) {
195 goto error_testpoint;
196 }
197
198 while (true) {
199 health_code_update();
200
201 if (update_poll_flag == 1) {
202 /* Clean events object. We are about to populate it again. */
203 lttng_poll_clean(&events);
204
205 ret = lttng_poll_create(&events, 2, LTTNG_CLOEXEC);
206 if (ret < 0) {
207 goto error_poll_create;
208 }
209
210 ret = lttng_poll_add(&events, notifiers->kernel_poll_pipe_read_fd, LPOLLIN);
211 if (ret < 0) {
212 goto error;
213 }
214
215 ret = lttng_poll_add(&events, thread_quit_pipe_fd, LPOLLIN);
216 if (ret < 0) {
217 goto error;
218 }
219
220 /* This will add the available kernel channel if any. */
221 ret = update_kernel_poll(&events);
222 if (ret < 0) {
223 goto error;
224 }
225 update_poll_flag = 0;
226 }
227
228 DBG("Thread kernel polling");
229
230 /* Poll infinite value of time */
231 restart:
232 health_poll_entry();
233 ret = lttng_poll_wait(&events, -1);
234 DBG("Thread kernel return from poll on %d fds", LTTNG_POLL_GETNB(&events));
235 health_poll_exit();
236 if (ret < 0) {
237 /*
238 * Restart interrupted system call.
239 */
240 if (errno == EINTR) {
241 goto restart;
242 }
243 goto error;
244 } else if (ret == 0) {
245 /* Should not happen since timeout is infinite */
246 ERR("Return value of poll is 0 with an infinite timeout.\n"
247 "This should not have happened! Continuing...");
248 continue;
249 }
250
251 nb_fd = ret;
252
253 for (i = 0; i < nb_fd; i++) {
254 /* Fetch once the poll data */
255 const auto revents = LTTNG_POLL_GETEV(&events, i);
256 const auto pollfd = LTTNG_POLL_GETFD(&events, i);
257
258 health_code_update();
259
260 /* Activity on thread quit pipe, exiting. */
261 if (pollfd == thread_quit_pipe_fd) {
262 DBG("Activity on thread quit pipe");
263 err = 0;
264 goto exit;
265 }
266
267 /* Check for data on kernel pipe */
268 if (revents & LPOLLIN) {
269 if (pollfd == notifiers->kernel_poll_pipe_read_fd) {
270 (void) lttng_read(
271 notifiers->kernel_poll_pipe_read_fd, &tmp, 1);
272 /*
273 * Ret value is useless here, if this pipe gets any actions
274 * an update is required anyway.
275 */
276 update_poll_flag = 1;
277 continue;
278 } else {
279 /*
280 * New CPU detected by the kernel. Adding kernel stream to
281 * kernel session and updating the kernel consumer
282 */
283 ret = update_kernel_stream(pollfd);
284 if (ret < 0) {
285 continue;
286 }
287 break;
288 }
289 } else if (revents & (LPOLLERR | LPOLLHUP | LPOLLRDHUP)) {
290 update_poll_flag = 1;
291 continue;
292 } else {
293 ERR("Unexpected poll events %u for sock %d", revents, pollfd);
294 goto error;
295 }
296 }
297 }
298
299 exit:
300 error:
301 lttng_poll_clean(&events);
302 error_poll_create:
303 error_testpoint:
304 if (err) {
305 health_error();
306 ERR("Health error occurred in %s", __func__);
307 WARN("Kernel thread died unexpectedly. "
308 "Kernel tracing can continue but CPU hotplug is disabled.");
309 }
310 health_unregister(the_health_sessiond);
311 DBG("Kernel thread dying");
312 return nullptr;
313 }
314
315 static bool shutdown_kernel_management_thread(void *data)
316 {
317 struct thread_notifiers *notifiers = (thread_notifiers *) data;
318 const int write_fd = lttng_pipe_get_writefd(notifiers->quit_pipe);
319
320 return notify_thread_pipe(write_fd) == 1;
321 }
322
323 static void cleanup_kernel_management_thread(void *data)
324 {
325 struct thread_notifiers *notifiers = (thread_notifiers *) data;
326
327 lttng_pipe_destroy(notifiers->quit_pipe);
328 free(notifiers);
329 }
330
331 bool launch_kernel_management_thread(int kernel_poll_pipe_read_fd)
332 {
333 struct lttng_pipe *quit_pipe;
334 struct thread_notifiers *notifiers = nullptr;
335 struct lttng_thread *thread;
336
337 notifiers = zmalloc<thread_notifiers>();
338 if (!notifiers) {
339 goto error_alloc;
340 }
341 quit_pipe = lttng_pipe_open(FD_CLOEXEC);
342 if (!quit_pipe) {
343 goto error;
344 }
345 notifiers->quit_pipe = quit_pipe;
346 notifiers->kernel_poll_pipe_read_fd = kernel_poll_pipe_read_fd;
347
348 thread = lttng_thread_create("Kernel management",
349 thread_kernel_management,
350 shutdown_kernel_management_thread,
351 cleanup_kernel_management_thread,
352 notifiers);
353 if (!thread) {
354 goto error;
355 }
356 lttng_thread_put(thread);
357 return true;
358 error:
359 cleanup_kernel_management_thread(notifiers);
360 error_alloc:
361 return false;
362 }
This page took 0.036719 seconds and 4 git commands to generate.