Commit | Line | Data |
---|---|---|
5b093681 | 1 | /* |
21cf9b6b | 2 | * Copyright (C) 2011 EfficiOS Inc. |
ab5be9fa MJ |
3 | * Copyright (C) 2011 Mathieu Desnoyers <mathieu.desnoyers@efficios.com> |
4 | * Copyright (C) 2013 Jérémie Galarneau <jeremie.galarneau@efficios.com> | |
5b093681 | 5 | * |
ab5be9fa | 6 | * SPDX-License-Identifier: GPL-2.0-only |
5b093681 | 7 | * |
5b093681 JG |
8 | */ |
9 | ||
c9e313bc SM |
10 | #include <common/pipe.hpp> |
11 | #include <common/utils.hpp> | |
5b093681 | 12 | |
c9e313bc SM |
13 | #include "manage-kernel.hpp" |
14 | #include "testpoint.hpp" | |
15 | #include "health-sessiond.hpp" | |
16 | #include "utils.hpp" | |
17 | #include "thread.hpp" | |
18 | #include "kernel.hpp" | |
19 | #include "kernel-consumer.hpp" | |
5b093681 | 20 | |
f1494934 | 21 | namespace { |
5b093681 JG |
22 | struct thread_notifiers { |
23 | struct lttng_pipe *quit_pipe; | |
24 | int kernel_poll_pipe_read_fd; | |
25 | }; | |
f1494934 | 26 | } /* namespace */ |
5b093681 JG |
27 | |
28 | /* | |
29 | * Update the kernel poll set of all channel fd available over all tracing | |
30 | * session. Add the wakeup pipe at the end of the set. | |
31 | */ | |
32 | static int update_kernel_poll(struct lttng_poll_event *events) | |
33 | { | |
34 | int ret; | |
35 | struct ltt_kernel_channel *channel; | |
36 | struct ltt_session *session; | |
37 | const struct ltt_session_list *session_list = session_get_list(); | |
38 | ||
39 | DBG("Updating kernel poll set"); | |
40 | ||
41 | session_lock_list(); | |
42 | cds_list_for_each_entry(session, &session_list->head, list) { | |
43 | if (!session_get(session)) { | |
44 | continue; | |
45 | } | |
46 | session_lock(session); | |
47 | if (session->kernel_session == NULL) { | |
48 | session_unlock(session); | |
49 | session_put(session); | |
50 | continue; | |
51 | } | |
52 | ||
53 | cds_list_for_each_entry(channel, | |
54 | &session->kernel_session->channel_list.head, list) { | |
55 | /* Add channel fd to the kernel poll set */ | |
56 | ret = lttng_poll_add(events, channel->fd, LPOLLIN | LPOLLRDNORM); | |
57 | if (ret < 0) { | |
58 | session_unlock(session); | |
59 | session_put(session); | |
60 | goto error; | |
61 | } | |
62 | DBG("Channel fd %d added to kernel set", channel->fd); | |
63 | } | |
64 | session_unlock(session); | |
0318876b | 65 | session_put(session); |
5b093681 JG |
66 | } |
67 | session_unlock_list(); | |
68 | ||
69 | return 0; | |
70 | ||
71 | error: | |
72 | session_unlock_list(); | |
73 | return -1; | |
74 | } | |
75 | ||
76 | /* | |
77 | * Find the channel fd from 'fd' over all tracing session. When found, check | |
78 | * for new channel stream and send those stream fds to the kernel consumer. | |
79 | * | |
80 | * Useful for CPU hotplug feature. | |
81 | */ | |
82 | static int update_kernel_stream(int fd) | |
83 | { | |
84 | int ret = 0; | |
85 | struct ltt_session *session; | |
86 | struct ltt_kernel_session *ksess; | |
87 | struct ltt_kernel_channel *channel; | |
88 | const struct ltt_session_list *session_list = session_get_list(); | |
89 | ||
90 | DBG("Updating kernel streams for channel fd %d", fd); | |
91 | ||
92 | session_lock_list(); | |
93 | cds_list_for_each_entry(session, &session_list->head, list) { | |
94 | if (!session_get(session)) { | |
95 | continue; | |
96 | } | |
97 | session_lock(session); | |
98 | if (session->kernel_session == NULL) { | |
99 | session_unlock(session); | |
100 | session_put(session); | |
101 | continue; | |
102 | } | |
103 | ksess = session->kernel_session; | |
104 | ||
105 | cds_list_for_each_entry(channel, | |
106 | &ksess->channel_list.head, list) { | |
107 | struct lttng_ht_iter iter; | |
108 | struct consumer_socket *socket; | |
109 | ||
110 | if (channel->fd != fd) { | |
111 | continue; | |
112 | } | |
113 | DBG("Channel found, updating kernel streams"); | |
114 | ret = kernel_open_channel_stream(channel); | |
115 | if (ret < 0) { | |
116 | goto error; | |
117 | } | |
118 | /* Update the stream global counter */ | |
119 | ksess->stream_count_global += ret; | |
120 | ||
121 | /* | |
122 | * Have we already sent fds to the consumer? If yes, it | |
123 | * means that tracing is started so it is safe to send | |
124 | * our updated stream fds. | |
125 | */ | |
126 | if (ksess->consumer_fds_sent != 1 | |
127 | || ksess->consumer == NULL) { | |
128 | ret = -1; | |
129 | goto error; | |
130 | } | |
131 | ||
132 | rcu_read_lock(); | |
133 | cds_lfht_for_each_entry(ksess->consumer->socks->ht, | |
134 | &iter.iter, socket, node.node) { | |
135 | pthread_mutex_lock(socket->lock); | |
136 | ret = kernel_consumer_send_channel_streams(socket, | |
137 | channel, ksess, | |
138 | session->output_traces ? 1 : 0); | |
139 | pthread_mutex_unlock(socket->lock); | |
140 | if (ret < 0) { | |
141 | rcu_read_unlock(); | |
142 | goto error; | |
143 | } | |
144 | } | |
145 | rcu_read_unlock(); | |
146 | } | |
147 | session_unlock(session); | |
148 | session_put(session); | |
149 | } | |
150 | session_unlock_list(); | |
151 | return ret; | |
152 | ||
153 | error: | |
154 | session_unlock(session); | |
155 | session_put(session); | |
156 | session_unlock_list(); | |
157 | return ret; | |
158 | } | |
159 | ||
160 | /* | |
161 | * This thread manage event coming from the kernel. | |
162 | * | |
163 | * Features supported in this thread: | |
164 | * -) CPU Hotplug | |
165 | */ | |
166 | static void *thread_kernel_management(void *data) | |
167 | { | |
168 | int ret, i, pollfd, update_poll_flag = 1, err = -1; | |
169 | uint32_t revents, nb_fd; | |
170 | char tmp; | |
171 | struct lttng_poll_event events; | |
7966af57 | 172 | struct thread_notifiers *notifiers = (thread_notifiers *) data; |
5b093681 JG |
173 | const int quit_pipe_read_fd = lttng_pipe_get_readfd(notifiers->quit_pipe); |
174 | ||
175 | DBG("[thread] Thread manage kernel started"); | |
176 | ||
412d7227 | 177 | health_register(the_health_sessiond, HEALTH_SESSIOND_TYPE_KERNEL); |
5b093681 JG |
178 | |
179 | /* | |
180 | * This first step of the while is to clean this structure which could free | |
181 | * non NULL pointers so initialize it before the loop. | |
182 | */ | |
183 | lttng_poll_init(&events); | |
184 | ||
185 | if (testpoint(sessiond_thread_manage_kernel)) { | |
186 | goto error_testpoint; | |
187 | } | |
188 | ||
189 | health_code_update(); | |
190 | ||
191 | if (testpoint(sessiond_thread_manage_kernel_before_loop)) { | |
192 | goto error_testpoint; | |
193 | } | |
194 | ||
195 | while (1) { | |
196 | health_code_update(); | |
197 | ||
198 | if (update_poll_flag == 1) { | |
199 | /* Clean events object. We are about to populate it again. */ | |
200 | lttng_poll_clean(&events); | |
201 | ||
202 | ret = lttng_poll_create(&events, 2, LTTNG_CLOEXEC); | |
203 | if (ret < 0) { | |
204 | goto error_poll_create; | |
205 | } | |
206 | ||
207 | ret = lttng_poll_add(&events, | |
208 | notifiers->kernel_poll_pipe_read_fd, | |
209 | LPOLLIN); | |
210 | if (ret < 0) { | |
211 | goto error; | |
212 | } | |
213 | ||
214 | ret = lttng_poll_add(&events, | |
215 | quit_pipe_read_fd, | |
216 | LPOLLIN); | |
217 | if (ret < 0) { | |
218 | goto error; | |
219 | } | |
220 | ||
221 | /* This will add the available kernel channel if any. */ | |
222 | ret = update_kernel_poll(&events); | |
223 | if (ret < 0) { | |
224 | goto error; | |
225 | } | |
226 | update_poll_flag = 0; | |
227 | } | |
228 | ||
229 | DBG("Thread kernel polling"); | |
230 | ||
231 | /* Poll infinite value of time */ | |
232 | restart: | |
233 | health_poll_entry(); | |
234 | ret = lttng_poll_wait(&events, -1); | |
235 | DBG("Thread kernel return from poll on %d fds", | |
236 | LTTNG_POLL_GETNB(&events)); | |
237 | health_poll_exit(); | |
238 | if (ret < 0) { | |
239 | /* | |
240 | * Restart interrupted system call. | |
241 | */ | |
242 | if (errno == EINTR) { | |
243 | goto restart; | |
244 | } | |
245 | goto error; | |
246 | } else if (ret == 0) { | |
247 | /* Should not happen since timeout is infinite */ | |
248 | ERR("Return value of poll is 0 with an infinite timeout.\n" | |
249 | "This should not have happened! Continuing..."); | |
250 | continue; | |
251 | } | |
252 | ||
253 | nb_fd = ret; | |
254 | ||
255 | for (i = 0; i < nb_fd; i++) { | |
256 | /* Fetch once the poll data */ | |
257 | revents = LTTNG_POLL_GETEV(&events, i); | |
258 | pollfd = LTTNG_POLL_GETFD(&events, i); | |
259 | ||
260 | health_code_update(); | |
261 | ||
5b093681 JG |
262 | if (pollfd == quit_pipe_read_fd) { |
263 | err = 0; | |
264 | goto exit; | |
265 | } | |
266 | ||
267 | /* Check for data on kernel pipe */ | |
268 | if (revents & LPOLLIN) { | |
269 | if (pollfd == notifiers->kernel_poll_pipe_read_fd) { | |
270 | (void) lttng_read(notifiers->kernel_poll_pipe_read_fd, | |
271 | &tmp, 1); | |
272 | /* | |
273 | * Ret value is useless here, if this pipe gets any actions an | |
274 | * update is required anyway. | |
275 | */ | |
276 | update_poll_flag = 1; | |
277 | continue; | |
278 | } else { | |
279 | /* | |
280 | * New CPU detected by the kernel. Adding kernel stream to | |
281 | * kernel session and updating the kernel consumer | |
282 | */ | |
283 | ret = update_kernel_stream(pollfd); | |
284 | if (ret < 0) { | |
285 | continue; | |
286 | } | |
287 | break; | |
288 | } | |
289 | } else if (revents & (LPOLLERR | LPOLLHUP | LPOLLRDHUP)) { | |
290 | update_poll_flag = 1; | |
291 | continue; | |
292 | } else { | |
293 | ERR("Unexpected poll events %u for sock %d", revents, pollfd); | |
294 | goto error; | |
295 | } | |
296 | } | |
297 | } | |
298 | ||
299 | exit: | |
300 | error: | |
301 | lttng_poll_clean(&events); | |
302 | error_poll_create: | |
303 | error_testpoint: | |
304 | if (err) { | |
305 | health_error(); | |
306 | ERR("Health error occurred in %s", __func__); | |
307 | WARN("Kernel thread died unexpectedly. " | |
308 | "Kernel tracing can continue but CPU hotplug is disabled."); | |
309 | } | |
412d7227 | 310 | health_unregister(the_health_sessiond); |
5b093681 JG |
311 | DBG("Kernel thread dying"); |
312 | return NULL; | |
313 | } | |
314 | ||
315 | static bool shutdown_kernel_management_thread(void *data) | |
316 | { | |
7966af57 | 317 | struct thread_notifiers *notifiers = (thread_notifiers *) data; |
5b093681 JG |
318 | const int write_fd = lttng_pipe_get_writefd(notifiers->quit_pipe); |
319 | ||
320 | return notify_thread_pipe(write_fd) == 1; | |
321 | } | |
322 | ||
323 | static void cleanup_kernel_management_thread(void *data) | |
324 | { | |
7966af57 | 325 | struct thread_notifiers *notifiers = (thread_notifiers *) data; |
5b093681 JG |
326 | |
327 | lttng_pipe_destroy(notifiers->quit_pipe); | |
328 | free(notifiers); | |
329 | } | |
330 | ||
331 | bool launch_kernel_management_thread(int kernel_poll_pipe_read_fd) | |
332 | { | |
333 | struct lttng_pipe *quit_pipe; | |
334 | struct thread_notifiers *notifiers = NULL; | |
335 | struct lttng_thread *thread; | |
336 | ||
64803277 | 337 | notifiers = zmalloc<thread_notifiers>(); |
5b093681 | 338 | if (!notifiers) { |
21fa020e JG |
339 | goto error_alloc; |
340 | } | |
341 | quit_pipe = lttng_pipe_open(FD_CLOEXEC); | |
342 | if (!quit_pipe) { | |
5b093681 JG |
343 | goto error; |
344 | } | |
345 | notifiers->quit_pipe = quit_pipe; | |
346 | notifiers->kernel_poll_pipe_read_fd = kernel_poll_pipe_read_fd; | |
347 | ||
348 | thread = lttng_thread_create("Kernel management", | |
349 | thread_kernel_management, | |
350 | shutdown_kernel_management_thread, | |
351 | cleanup_kernel_management_thread, | |
352 | notifiers); | |
353 | if (!thread) { | |
354 | goto error; | |
355 | } | |
356 | lttng_thread_put(thread); | |
357 | return true; | |
358 | error: | |
359 | cleanup_kernel_management_thread(notifiers); | |
21fa020e | 360 | error_alloc: |
5b093681 JG |
361 | return false; |
362 | } |