Use compiler-agnostic defines to silence warning
[lttng-tools.git] / src / bin / lttng-sessiond / manage-kernel.cpp
... / ...
CommitLineData
1/*
2 * Copyright (C) 2011 EfficiOS Inc.
3 * Copyright (C) 2011 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
4 * Copyright (C) 2013 Jérémie Galarneau <jeremie.galarneau@efficios.com>
5 *
6 * SPDX-License-Identifier: GPL-2.0-only
7 *
8 */
9
10#include "health-sessiond.hpp"
11#include "kernel-consumer.hpp"
12#include "kernel.hpp"
13#include "manage-kernel.hpp"
14#include "testpoint.hpp"
15#include "thread.hpp"
16#include "utils.hpp"
17
18#include <common/make-unique-wrapper.hpp>
19#include <common/pipe.hpp>
20#include <common/pthread-lock.hpp>
21#include <common/urcu.hpp>
22#include <common/utils.hpp>
23
24#include <fcntl.h>
25
26namespace {
27struct thread_notifiers {
28 struct lttng_pipe *quit_pipe;
29 int kernel_poll_pipe_read_fd;
30};
31} /* namespace */
32
33/*
34 * Update the kernel poll set of all channel fd available over all tracing
35 * session. Add the wakeup pipe at the end of the set.
36 */
37static int update_kernel_poll(struct lttng_poll_event *events)
38{
39 int ret;
40
41 DBG("Updating kernel poll set");
42
43 const auto list_lock = lttng::sessiond::lock_session_list();
44 const struct ltt_session_list *session_list = session_get_list();
45
46 for (auto *session : lttng::urcu::list_iteration_adapter<ltt_session, &ltt_session::list>(
47 session_list->head)) {
48 if (!session_get(session)) {
49 continue;
50 }
51
52 session_lock(session);
53 if (session->kernel_session == nullptr) {
54 session_unlock(session);
55 session_put(session);
56 continue;
57 }
58
59 for (auto *channel : lttng::urcu::list_iteration_adapter<ltt_kernel_channel,
60 &ltt_kernel_channel::list>(
61 session->kernel_session->channel_list.head)) {
62 /* Add channel fd to the kernel poll set */
63 ret = lttng_poll_add(events, channel->fd, LPOLLIN | LPOLLRDNORM);
64 if (ret < 0) {
65 session_unlock(session);
66 session_put(session);
67 return -1;
68 }
69
70 DBG("Channel fd %d added to kernel set", channel->fd);
71 }
72
73 session_unlock(session);
74 session_put(session);
75 }
76
77 return 0;
78}
79
80/*
81 * Find the channel fd from 'fd' over all tracing session. When found, check
82 * for new channel stream and send those stream fds to the kernel consumer.
83 *
84 * Useful for CPU hotplug feature.
85 */
86static int update_kernel_stream(int fd)
87{
88 int ret = 0;
89
90 DBG("Updating kernel streams for channel fd %d", fd);
91
92 const auto list_lock = lttng::sessiond::lock_session_list();
93 const struct ltt_session_list *session_list = session_get_list();
94
95 for (auto *raw_session_ptr :
96 lttng::urcu::list_iteration_adapter<ltt_session, &ltt_session::list>(
97 session_list->head)) {
98 ltt_kernel_session *ksess;
99
100 const auto session = [raw_session_ptr]() {
101 session_get(raw_session_ptr);
102 raw_session_ptr->lock();
103 return ltt_session::make_locked_ref(*raw_session_ptr);
104 }();
105
106 if (session->kernel_session == nullptr) {
107 continue;
108 }
109
110 ksess = session->kernel_session;
111
112 for (auto *channel : lttng::urcu::list_iteration_adapter<ltt_kernel_channel,
113 &ltt_kernel_channel::list>(
114 ksess->channel_list.head)) {
115 if (channel->fd != fd) {
116 continue;
117 }
118 DBG("Channel found, updating kernel streams");
119 ret = kernel_open_channel_stream(channel);
120 if (ret < 0) {
121 return ret;
122 }
123 /* Update the stream global counter */
124 ksess->stream_count_global += ret;
125
126 /*
127 * Have we already sent fds to the consumer? If yes, it
128 * means that tracing is started so it is safe to send
129 * our updated stream fds.
130 */
131 if (ksess->consumer_fds_sent != 1 || ksess->consumer == nullptr) {
132 return -1;
133 }
134
135 for (auto *socket :
136 lttng::urcu::lfht_iteration_adapter<consumer_socket,
137 decltype(consumer_socket::node),
138 &consumer_socket::node>(
139 *ksess->consumer->socks->ht)) {
140 const lttng::pthread::lock_guard socket_lock(*socket->lock);
141
142 ret = kernel_consumer_send_channel_streams(
143 socket, channel, ksess, session->output_traces ? 1 : 0);
144 if (ret < 0) {
145 return ret;
146 }
147 }
148 }
149 }
150
151 return ret;
152}
153
154/*
155 * This thread manage event coming from the kernel.
156 *
157 * Features supported in this thread:
158 * -) CPU Hotplug
159 */
160static void *thread_kernel_management(void *data)
161{
162 int ret, i, update_poll_flag = 1, err = -1;
163 uint32_t nb_fd;
164 char tmp;
165 struct lttng_poll_event events;
166 struct thread_notifiers *notifiers = (thread_notifiers *) data;
167 const auto thread_quit_pipe_fd = lttng_pipe_get_readfd(notifiers->quit_pipe);
168
169 DBG("[thread] Thread manage kernel started");
170
171 health_register(the_health_sessiond, HEALTH_SESSIOND_TYPE_KERNEL);
172
173 /*
174 * This first step of the while is to clean this structure which could free
175 * non NULL pointers so initialize it before the loop.
176 */
177 lttng_poll_init(&events);
178
179 if (testpoint(sessiond_thread_manage_kernel)) {
180 goto error_testpoint;
181 }
182
183 health_code_update();
184
185 if (testpoint(sessiond_thread_manage_kernel_before_loop)) {
186 goto error_testpoint;
187 }
188
189 while (true) {
190 health_code_update();
191
192 if (update_poll_flag == 1) {
193 /* Clean events object. We are about to populate it again. */
194 lttng_poll_clean(&events);
195
196 ret = lttng_poll_create(&events, 2, LTTNG_CLOEXEC);
197 if (ret < 0) {
198 goto error_poll_create;
199 }
200
201 ret = lttng_poll_add(&events, notifiers->kernel_poll_pipe_read_fd, LPOLLIN);
202 if (ret < 0) {
203 goto error;
204 }
205
206 ret = lttng_poll_add(&events, thread_quit_pipe_fd, LPOLLIN);
207 if (ret < 0) {
208 goto error;
209 }
210
211 /* This will add the available kernel channel if any. */
212 ret = update_kernel_poll(&events);
213 if (ret < 0) {
214 goto error;
215 }
216 update_poll_flag = 0;
217 }
218
219 DBG("Thread kernel polling");
220
221 /* Poll infinite value of time */
222 restart:
223 health_poll_entry();
224 ret = lttng_poll_wait(&events, -1);
225 DBG("Thread kernel return from poll on %d fds", LTTNG_POLL_GETNB(&events));
226 health_poll_exit();
227 if (ret < 0) {
228 /*
229 * Restart interrupted system call.
230 */
231 if (errno == EINTR) {
232 goto restart;
233 }
234 goto error;
235 } else if (ret == 0) {
236 /* Should not happen since timeout is infinite */
237 ERR("Return value of poll is 0 with an infinite timeout.\n"
238 "This should not have happened! Continuing...");
239 continue;
240 }
241
242 nb_fd = ret;
243
244 for (i = 0; i < nb_fd; i++) {
245 /* Fetch once the poll data */
246 const auto revents = LTTNG_POLL_GETEV(&events, i);
247 const auto pollfd = LTTNG_POLL_GETFD(&events, i);
248
249 health_code_update();
250
251 /* Activity on thread quit pipe, exiting. */
252 if (pollfd == thread_quit_pipe_fd) {
253 DBG("Activity on thread quit pipe");
254 err = 0;
255 goto exit;
256 }
257
258 /* Check for data on kernel pipe */
259 if (revents & LPOLLIN) {
260 if (pollfd == notifiers->kernel_poll_pipe_read_fd) {
261 (void) lttng_read(
262 notifiers->kernel_poll_pipe_read_fd, &tmp, 1);
263 /*
264 * Ret value is useless here, if this pipe gets any actions
265 * an update is required anyway.
266 */
267 update_poll_flag = 1;
268 continue;
269 } else {
270 /*
271 * New CPU detected by the kernel. Adding kernel stream to
272 * kernel session and updating the kernel consumer
273 */
274 ret = update_kernel_stream(pollfd);
275 if (ret < 0) {
276 continue;
277 }
278 break;
279 }
280 } else if (revents & (LPOLLERR | LPOLLHUP | LPOLLRDHUP)) {
281 update_poll_flag = 1;
282 continue;
283 } else {
284 ERR("Unexpected poll events %u for sock %d", revents, pollfd);
285 goto error;
286 }
287 }
288 }
289
290exit:
291error:
292 lttng_poll_clean(&events);
293error_poll_create:
294error_testpoint:
295 if (err) {
296 health_error();
297 ERR("Health error occurred in %s", __func__);
298 WARN("Kernel thread died unexpectedly. "
299 "Kernel tracing can continue but CPU hotplug is disabled.");
300 }
301 health_unregister(the_health_sessiond);
302 DBG("Kernel thread dying");
303 return nullptr;
304}
305
306static bool shutdown_kernel_management_thread(void *data)
307{
308 struct thread_notifiers *notifiers = (thread_notifiers *) data;
309 const int write_fd = lttng_pipe_get_writefd(notifiers->quit_pipe);
310
311 return notify_thread_pipe(write_fd) == 1;
312}
313
314static void cleanup_kernel_management_thread(void *data)
315{
316 struct thread_notifiers *notifiers = (thread_notifiers *) data;
317
318 lttng_pipe_destroy(notifiers->quit_pipe);
319 free(notifiers);
320}
321
322bool launch_kernel_management_thread(int kernel_poll_pipe_read_fd)
323{
324 struct lttng_pipe *quit_pipe;
325 struct thread_notifiers *notifiers = nullptr;
326 struct lttng_thread *thread;
327
328 notifiers = zmalloc<thread_notifiers>();
329 if (!notifiers) {
330 goto error_alloc;
331 }
332 quit_pipe = lttng_pipe_open(FD_CLOEXEC);
333 if (!quit_pipe) {
334 goto error;
335 }
336 notifiers->quit_pipe = quit_pipe;
337 notifiers->kernel_poll_pipe_read_fd = kernel_poll_pipe_read_fd;
338
339 thread = lttng_thread_create("Kernel management",
340 thread_kernel_management,
341 shutdown_kernel_management_thread,
342 cleanup_kernel_management_thread,
343 notifiers);
344 if (!thread) {
345 goto error;
346 }
347 lttng_thread_put(thread);
348 return true;
349error:
350 cleanup_kernel_management_thread(notifiers);
351error_alloc:
352 return false;
353}
This page took 0.026158 seconds and 5 git commands to generate.