Commit | Line | Data |
---|---|---|
3bd1e081 | 1 | /* |
21cf9b6b | 2 | * Copyright (C) 2011 EfficiOS Inc. |
ab5be9fa MJ |
3 | * Copyright (C) 2011 Mathieu Desnoyers <mathieu.desnoyers@efficios.com> |
4 | * Copyright (C) 2017 Jérémie Galarneau <jeremie.galarneau@efficios.com> | |
3bd1e081 | 5 | * |
ab5be9fa | 6 | * SPDX-License-Identifier: GPL-2.0-only |
3bd1e081 | 7 | * |
3bd1e081 MD |
8 | */ |
9 | ||
6c1c0768 | 10 | #define _LGPL_SOURCE |
28ab034a | 11 | #include "kernel-consumer.hpp" |
3bd1e081 | 12 | |
28ab034a | 13 | #include <common/buffer-view.hpp> |
c9e313bc | 14 | #include <common/common.hpp> |
c9e313bc | 15 | #include <common/compat/endian.hpp> |
28ab034a | 16 | #include <common/compat/fcntl.hpp> |
c9e313bc | 17 | #include <common/consumer/consumer-stream.hpp> |
c9e313bc | 18 | #include <common/consumer/consumer-timer.hpp> |
c9e313bc SM |
19 | #include <common/consumer/consumer.hpp> |
20 | #include <common/consumer/metadata-bucket.hpp> | |
28ab034a JG |
21 | #include <common/index/index.hpp> |
22 | #include <common/kernel-ctl/kernel-ctl.hpp> | |
23 | #include <common/optional.hpp> | |
24 | #include <common/pipe.hpp> | |
25 | #include <common/relayd/relayd.hpp> | |
26 | #include <common/sessiond-comm/relayd.hpp> | |
27 | #include <common/sessiond-comm/sessiond-comm.hpp> | |
28 | #include <common/utils.hpp> | |
0857097f | 29 | |
28ab034a JG |
30 | #include <bin/lttng-consumerd/health-consumerd.hpp> |
31 | #include <inttypes.h> | |
32 | #include <poll.h> | |
33 | #include <pthread.h> | |
34 | #include <stdint.h> | |
35 | #include <stdlib.h> | |
36 | #include <string.h> | |
37 | #include <sys/mman.h> | |
38 | #include <sys/socket.h> | |
39 | #include <sys/stat.h> | |
40 | #include <sys/types.h> | |
41 | #include <unistd.h> | |
3bd1e081 | 42 | |
fa29bfbf | 43 | extern struct lttng_consumer_global_data the_consumer_data; |
3bd1e081 | 44 | extern int consumer_poll_timeout; |
3bd1e081 | 45 | |
3bd1e081 MD |
46 | /* |
47 | * Take a snapshot for a specific fd | |
48 | * | |
49 | * Returns 0 on success, < 0 on error | |
50 | */ | |
ffe60014 | 51 | int lttng_kconsumer_take_snapshot(struct lttng_consumer_stream *stream) |
3bd1e081 MD |
52 | { |
53 | int ret = 0; | |
54 | int infd = stream->wait_fd; | |
55 | ||
56 | ret = kernctl_snapshot(infd); | |
d2d2f190 JD |
57 | /* |
58 | * -EAGAIN is not an error, it just means that there is no data to | |
59 | * be read. | |
60 | */ | |
61 | if (ret != 0 && ret != -EAGAIN) { | |
5a510c9f | 62 | PERROR("Getting sub-buffer snapshot."); |
3bd1e081 MD |
63 | } |
64 | ||
65 | return ret; | |
66 | } | |
67 | ||
e9404c27 JG |
68 | /* |
69 | * Sample consumed and produced positions for a specific fd. | |
70 | * | |
71 | * Returns 0 on success, < 0 on error. | |
72 | */ | |
28ab034a | 73 | int lttng_kconsumer_sample_snapshot_positions(struct lttng_consumer_stream *stream) |
e9404c27 | 74 | { |
a0377dfe | 75 | LTTNG_ASSERT(stream); |
e9404c27 JG |
76 | |
77 | return kernctl_snapshot_sample_positions(stream->wait_fd); | |
78 | } | |
79 | ||
3bd1e081 MD |
80 | /* |
81 | * Get the produced position | |
82 | * | |
83 | * Returns 0 on success, < 0 on error | |
84 | */ | |
28ab034a | 85 | int lttng_kconsumer_get_produced_snapshot(struct lttng_consumer_stream *stream, unsigned long *pos) |
3bd1e081 MD |
86 | { |
87 | int ret; | |
88 | int infd = stream->wait_fd; | |
89 | ||
90 | ret = kernctl_snapshot_get_produced(infd, pos); | |
91 | if (ret != 0) { | |
5a510c9f | 92 | PERROR("kernctl_snapshot_get_produced"); |
3bd1e081 MD |
93 | } |
94 | ||
95 | return ret; | |
96 | } | |
97 | ||
07b86b52 JD |
98 | /* |
99 | * Get the consumerd position | |
100 | * | |
101 | * Returns 0 on success, < 0 on error | |
102 | */ | |
28ab034a | 103 | int lttng_kconsumer_get_consumed_snapshot(struct lttng_consumer_stream *stream, unsigned long *pos) |
07b86b52 JD |
104 | { |
105 | int ret; | |
106 | int infd = stream->wait_fd; | |
107 | ||
108 | ret = kernctl_snapshot_get_consumed(infd, pos); | |
109 | if (ret != 0) { | |
5a510c9f | 110 | PERROR("kernctl_snapshot_get_consumed"); |
07b86b52 JD |
111 | } |
112 | ||
113 | return ret; | |
114 | } | |
115 | ||
28ab034a | 116 | static int get_current_subbuf_addr(struct lttng_consumer_stream *stream, const char **addr) |
128708c3 JG |
117 | { |
118 | int ret; | |
119 | unsigned long mmap_offset; | |
97535efa | 120 | const char *mmap_base = (const char *) stream->mmap_base; |
128708c3 JG |
121 | |
122 | ret = kernctl_get_mmap_read_offset(stream->wait_fd, &mmap_offset); | |
123 | if (ret < 0) { | |
124 | PERROR("Failed to get mmap read offset"); | |
125 | goto error; | |
126 | } | |
127 | ||
128 | *addr = mmap_base + mmap_offset; | |
129 | error: | |
130 | return ret; | |
131 | } | |
132 | ||
07b86b52 JD |
133 | /* |
134 | * Take a snapshot of all the stream of a channel | |
3eb928aa | 135 | * RCU read-side lock must be held across this function to ensure existence of |
947bd097 | 136 | * channel. |
07b86b52 JD |
137 | * |
138 | * Returns 0 on success, < 0 on error | |
139 | */ | |
28ab034a JG |
140 | static int lttng_kconsumer_snapshot_channel(struct lttng_consumer_channel *channel, |
141 | uint64_t key, | |
142 | char *path, | |
143 | uint64_t relayd_id, | |
144 | uint64_t nb_packets_per_stream) | |
07b86b52 JD |
145 | { |
146 | int ret; | |
07b86b52 JD |
147 | struct lttng_consumer_stream *stream; |
148 | ||
6a00837f | 149 | DBG("Kernel consumer snapshot channel %" PRIu64, key); |
07b86b52 | 150 | |
947bd097 JR |
151 | /* Prevent channel modifications while we perform the snapshot.*/ |
152 | pthread_mutex_lock(&channel->lock); | |
153 | ||
07b86b52 JD |
154 | rcu_read_lock(); |
155 | ||
07b86b52 JD |
156 | /* Splice is not supported yet for channel snapshot. */ |
157 | if (channel->output != CONSUMER_CHANNEL_MMAP) { | |
9381314c | 158 | ERR("Unsupported output type for channel \"%s\": mmap output is required to record a snapshot", |
28ab034a | 159 | channel->name); |
07b86b52 JD |
160 | ret = -1; |
161 | goto end; | |
162 | } | |
163 | ||
28ab034a | 164 | cds_list_for_each_entry (stream, &channel->streams.head, send_node) { |
923333cd | 165 | unsigned long consumed_pos, produced_pos; |
9ce5646a MD |
166 | |
167 | health_code_update(); | |
168 | ||
07b86b52 JD |
169 | /* |
170 | * Lock stream because we are about to change its state. | |
171 | */ | |
172 | pthread_mutex_lock(&stream->lock); | |
173 | ||
a0377dfe | 174 | LTTNG_ASSERT(channel->trace_chunk); |
d2956687 JG |
175 | if (!lttng_trace_chunk_get(channel->trace_chunk)) { |
176 | /* | |
177 | * Can't happen barring an internal error as the channel | |
178 | * holds a reference to the trace chunk. | |
179 | */ | |
180 | ERR("Failed to acquire reference to channel's trace chunk"); | |
181 | ret = -1; | |
182 | goto end_unlock; | |
183 | } | |
a0377dfe | 184 | LTTNG_ASSERT(!stream->trace_chunk); |
d2956687 JG |
185 | stream->trace_chunk = channel->trace_chunk; |
186 | ||
29decac3 DG |
187 | /* |
188 | * Assign the received relayd ID so we can use it for streaming. The streams | |
189 | * are not visible to anyone so this is OK to change it. | |
190 | */ | |
07b86b52 JD |
191 | stream->net_seq_idx = relayd_id; |
192 | channel->relayd_id = relayd_id; | |
193 | if (relayd_id != (uint64_t) -1ULL) { | |
10a50311 | 194 | ret = consumer_send_relayd_stream(stream, path); |
07b86b52 JD |
195 | if (ret < 0) { |
196 | ERR("sending stream to relayd"); | |
d119bd01 | 197 | goto error_close_stream_output; |
07b86b52 | 198 | } |
07b86b52 | 199 | } else { |
28ab034a | 200 | ret = consumer_stream_create_output_files(stream, false); |
07b86b52 | 201 | if (ret < 0) { |
d119bd01 | 202 | goto error_close_stream_output; |
07b86b52 | 203 | } |
28ab034a | 204 | DBG("Kernel consumer snapshot stream (%" PRIu64 ")", stream->key); |
07b86b52 JD |
205 | } |
206 | ||
f22dd891 | 207 | ret = kernctl_buffer_flush_empty(stream->wait_fd); |
07b86b52 | 208 | if (ret < 0) { |
f22dd891 MD |
209 | /* |
210 | * Doing a buffer flush which does not take into | |
211 | * account empty packets. This is not perfect | |
212 | * for stream intersection, but required as a | |
213 | * fall-back when "flush_empty" is not | |
214 | * implemented by lttng-modules. | |
215 | */ | |
216 | ret = kernctl_buffer_flush(stream->wait_fd); | |
217 | if (ret < 0) { | |
218 | ERR("Failed to flush kernel stream"); | |
d119bd01 | 219 | goto error_close_stream_output; |
f22dd891 | 220 | } |
07b86b52 JD |
221 | goto end_unlock; |
222 | } | |
223 | ||
224 | ret = lttng_kconsumer_take_snapshot(stream); | |
225 | if (ret < 0) { | |
226 | ERR("Taking kernel snapshot"); | |
d119bd01 | 227 | goto error_close_stream_output; |
07b86b52 JD |
228 | } |
229 | ||
230 | ret = lttng_kconsumer_get_produced_snapshot(stream, &produced_pos); | |
231 | if (ret < 0) { | |
232 | ERR("Produced kernel snapshot position"); | |
d119bd01 | 233 | goto error_close_stream_output; |
07b86b52 JD |
234 | } |
235 | ||
236 | ret = lttng_kconsumer_get_consumed_snapshot(stream, &consumed_pos); | |
237 | if (ret < 0) { | |
238 | ERR("Consumerd kernel snapshot position"); | |
d119bd01 | 239 | goto error_close_stream_output; |
07b86b52 JD |
240 | } |
241 | ||
28ab034a JG |
242 | consumed_pos = consumer_get_consume_start_pos( |
243 | consumed_pos, produced_pos, nb_packets_per_stream, stream->max_sb_size); | |
5c786ded | 244 | |
9377d830 | 245 | while ((long) (consumed_pos - produced_pos) < 0) { |
07b86b52 JD |
246 | ssize_t read_len; |
247 | unsigned long len, padded_len; | |
128708c3 | 248 | const char *subbuf_addr; |
fd424d99 | 249 | struct lttng_buffer_view subbuf_view; |
07b86b52 | 250 | |
9ce5646a | 251 | health_code_update(); |
07b86b52 JD |
252 | DBG("Kernel consumer taking snapshot at pos %lu", consumed_pos); |
253 | ||
254 | ret = kernctl_get_subbuf(stream->wait_fd, &consumed_pos); | |
255 | if (ret < 0) { | |
32af2c95 | 256 | if (ret != -EAGAIN) { |
07b86b52 | 257 | PERROR("kernctl_get_subbuf snapshot"); |
d119bd01 | 258 | goto error_close_stream_output; |
07b86b52 JD |
259 | } |
260 | DBG("Kernel consumer get subbuf failed. Skipping it."); | |
261 | consumed_pos += stream->max_sb_size; | |
ddc93ee4 | 262 | stream->chan->lost_packets++; |
07b86b52 JD |
263 | continue; |
264 | } | |
265 | ||
266 | ret = kernctl_get_subbuf_size(stream->wait_fd, &len); | |
267 | if (ret < 0) { | |
268 | ERR("Snapshot kernctl_get_subbuf_size"); | |
29decac3 | 269 | goto error_put_subbuf; |
07b86b52 JD |
270 | } |
271 | ||
272 | ret = kernctl_get_padded_subbuf_size(stream->wait_fd, &padded_len); | |
273 | if (ret < 0) { | |
274 | ERR("Snapshot kernctl_get_padded_subbuf_size"); | |
29decac3 | 275 | goto error_put_subbuf; |
07b86b52 JD |
276 | } |
277 | ||
128708c3 JG |
278 | ret = get_current_subbuf_addr(stream, &subbuf_addr); |
279 | if (ret) { | |
280 | goto error_put_subbuf; | |
281 | } | |
282 | ||
28ab034a | 283 | subbuf_view = lttng_buffer_view_init(subbuf_addr, 0, padded_len); |
f5ba75b4 | 284 | read_len = lttng_consumer_on_read_subbuffer_mmap( |
28ab034a | 285 | stream, &subbuf_view, padded_len - len); |
07b86b52 | 286 | /* |
29decac3 DG |
287 | * We write the padded len in local tracefiles but the data len |
288 | * when using a relay. Display the error but continue processing | |
289 | * to try to release the subbuffer. | |
07b86b52 JD |
290 | */ |
291 | if (relayd_id != (uint64_t) -1ULL) { | |
292 | if (read_len != len) { | |
293 | ERR("Error sending to the relay (ret: %zd != len: %lu)", | |
28ab034a JG |
294 | read_len, |
295 | len); | |
07b86b52 JD |
296 | } |
297 | } else { | |
298 | if (read_len != padded_len) { | |
299 | ERR("Error writing to tracefile (ret: %zd != len: %lu)", | |
28ab034a JG |
300 | read_len, |
301 | padded_len); | |
07b86b52 JD |
302 | } |
303 | } | |
304 | ||
305 | ret = kernctl_put_subbuf(stream->wait_fd); | |
306 | if (ret < 0) { | |
307 | ERR("Snapshot kernctl_put_subbuf"); | |
d119bd01 | 308 | goto error_close_stream_output; |
07b86b52 JD |
309 | } |
310 | consumed_pos += stream->max_sb_size; | |
311 | } | |
312 | ||
d119bd01 | 313 | consumer_stream_close_output(stream); |
07b86b52 JD |
314 | pthread_mutex_unlock(&stream->lock); |
315 | } | |
316 | ||
317 | /* All good! */ | |
318 | ret = 0; | |
319 | goto end; | |
320 | ||
29decac3 DG |
321 | error_put_subbuf: |
322 | ret = kernctl_put_subbuf(stream->wait_fd); | |
323 | if (ret < 0) { | |
324 | ERR("Snapshot kernctl_put_subbuf error path"); | |
325 | } | |
d119bd01 JG |
326 | error_close_stream_output: |
327 | consumer_stream_close_output(stream); | |
07b86b52 JD |
328 | end_unlock: |
329 | pthread_mutex_unlock(&stream->lock); | |
330 | end: | |
331 | rcu_read_unlock(); | |
947bd097 | 332 | pthread_mutex_unlock(&channel->lock); |
07b86b52 JD |
333 | return ret; |
334 | } | |
335 | ||
336 | /* | |
337 | * Read the whole metadata available for a snapshot. | |
3eb928aa | 338 | * RCU read-side lock must be held across this function to ensure existence of |
947bd097 | 339 | * metadata_channel. |
07b86b52 JD |
340 | * |
341 | * Returns 0 on success, < 0 on error | |
342 | */ | |
28ab034a JG |
343 | static int lttng_kconsumer_snapshot_metadata(struct lttng_consumer_channel *metadata_channel, |
344 | uint64_t key, | |
345 | char *path, | |
346 | uint64_t relayd_id, | |
347 | struct lttng_consumer_local_data *ctx) | |
07b86b52 | 348 | { |
d771f832 DG |
349 | int ret, use_relayd = 0; |
350 | ssize_t ret_read; | |
07b86b52 | 351 | struct lttng_consumer_stream *metadata_stream; |
d771f832 | 352 | |
a0377dfe | 353 | LTTNG_ASSERT(ctx); |
07b86b52 | 354 | |
28ab034a | 355 | DBG("Kernel consumer snapshot metadata with key %" PRIu64 " at path %s", key, path); |
07b86b52 JD |
356 | |
357 | rcu_read_lock(); | |
358 | ||
07b86b52 | 359 | metadata_stream = metadata_channel->metadata_stream; |
a0377dfe | 360 | LTTNG_ASSERT(metadata_stream); |
d2956687 | 361 | |
947bd097 | 362 | metadata_stream->read_subbuffer_ops.lock(metadata_stream); |
a0377dfe FD |
363 | LTTNG_ASSERT(metadata_channel->trace_chunk); |
364 | LTTNG_ASSERT(metadata_stream->trace_chunk); | |
07b86b52 | 365 | |
d771f832 | 366 | /* Flag once that we have a valid relayd for the stream. */ |
e2039c7a | 367 | if (relayd_id != (uint64_t) -1ULL) { |
d771f832 DG |
368 | use_relayd = 1; |
369 | } | |
370 | ||
371 | if (use_relayd) { | |
10a50311 | 372 | ret = consumer_send_relayd_stream(metadata_stream, path); |
e2039c7a | 373 | if (ret < 0) { |
fa27abe8 | 374 | goto error_snapshot; |
e2039c7a | 375 | } |
e2039c7a | 376 | } else { |
28ab034a | 377 | ret = consumer_stream_create_output_files(metadata_stream, false); |
e2039c7a | 378 | if (ret < 0) { |
fa27abe8 | 379 | goto error_snapshot; |
e2039c7a | 380 | } |
07b86b52 | 381 | } |
07b86b52 | 382 | |
d771f832 | 383 | do { |
9ce5646a MD |
384 | health_code_update(); |
385 | ||
6f9449c2 | 386 | ret_read = lttng_consumer_read_subbuffer(metadata_stream, ctx, true); |
d771f832 | 387 | if (ret_read < 0) { |
28ab034a | 388 | ERR("Kernel snapshot reading metadata subbuffer (ret: %zd)", ret_read); |
6e5e3c51 MD |
389 | ret = ret_read; |
390 | goto error_snapshot; | |
07b86b52 | 391 | } |
6e5e3c51 | 392 | } while (ret_read > 0); |
07b86b52 | 393 | |
d771f832 DG |
394 | if (use_relayd) { |
395 | close_relayd_stream(metadata_stream); | |
396 | metadata_stream->net_seq_idx = (uint64_t) -1ULL; | |
397 | } else { | |
fdf9986c MD |
398 | if (metadata_stream->out_fd >= 0) { |
399 | ret = close(metadata_stream->out_fd); | |
400 | if (ret < 0) { | |
401 | PERROR("Kernel consumer snapshot metadata close out_fd"); | |
402 | /* | |
403 | * Don't go on error here since the snapshot was successful at this | |
404 | * point but somehow the close failed. | |
405 | */ | |
406 | } | |
407 | metadata_stream->out_fd = -1; | |
d2956687 | 408 | lttng_trace_chunk_put(metadata_stream->trace_chunk); |
cd9adb8b | 409 | metadata_stream->trace_chunk = nullptr; |
e2039c7a | 410 | } |
e2039c7a JD |
411 | } |
412 | ||
07b86b52 | 413 | ret = 0; |
fa27abe8 | 414 | error_snapshot: |
947bd097 | 415 | metadata_stream->read_subbuffer_ops.unlock(metadata_stream); |
cd9adb8b JG |
416 | consumer_stream_destroy(metadata_stream, nullptr); |
417 | metadata_channel->metadata_stream = nullptr; | |
07b86b52 JD |
418 | rcu_read_unlock(); |
419 | return ret; | |
420 | } | |
421 | ||
1803a064 MD |
422 | /* |
423 | * Receive command from session daemon and process it. | |
424 | * | |
425 | * Return 1 on success else a negative value or 0. | |
426 | */ | |
3bd1e081 | 427 | int lttng_kconsumer_recv_cmd(struct lttng_consumer_local_data *ctx, |
28ab034a JG |
428 | int sock, |
429 | struct pollfd *consumer_sockpoll) | |
3bd1e081 | 430 | { |
0c5b3718 | 431 | int ret_func; |
0c759fc9 | 432 | enum lttcomm_return_code ret_code = LTTCOMM_CONSUMERD_SUCCESS; |
3bd1e081 MD |
433 | struct lttcomm_consumer_msg msg; |
434 | ||
9ce5646a MD |
435 | health_code_update(); |
436 | ||
0c5b3718 SM |
437 | { |
438 | ssize_t ret_recv; | |
439 | ||
440 | ret_recv = lttcomm_recv_unix_sock(sock, &msg, sizeof(msg)); | |
441 | if (ret_recv != sizeof(msg)) { | |
442 | if (ret_recv > 0) { | |
28ab034a | 443 | lttng_consumer_send_error(ctx, LTTCOMM_CONSUMERD_ERROR_RECV_CMD); |
0c5b3718 SM |
444 | ret_recv = -1; |
445 | } | |
446 | return ret_recv; | |
1803a064 | 447 | } |
3bd1e081 | 448 | } |
9ce5646a MD |
449 | |
450 | health_code_update(); | |
451 | ||
84382d49 | 452 | /* Deprecated command */ |
a0377dfe | 453 | LTTNG_ASSERT(msg.cmd_type != LTTNG_CONSUMER_STOP); |
3bd1e081 | 454 | |
9ce5646a MD |
455 | health_code_update(); |
456 | ||
b0b335c8 MD |
457 | /* relayd needs RCU read-side protection */ |
458 | rcu_read_lock(); | |
459 | ||
3bd1e081 | 460 | switch (msg.cmd_type) { |
00e2e675 DG |
461 | case LTTNG_CONSUMER_ADD_RELAYD_SOCKET: |
462 | { | |
4222116f JR |
463 | uint32_t major = msg.u.relayd_sock.major; |
464 | uint32_t minor = msg.u.relayd_sock.minor; | |
28ab034a JG |
465 | enum lttcomm_sock_proto protocol = |
466 | (enum lttcomm_sock_proto) msg.u.relayd_sock.relayd_socket_protocol; | |
4222116f | 467 | |
f50f23d9 | 468 | /* Session daemon status message are handled in the following call. */ |
2527bf85 | 469 | consumer_add_relayd_socket(msg.u.relayd_sock.net_index, |
28ab034a JG |
470 | msg.u.relayd_sock.type, |
471 | ctx, | |
472 | sock, | |
473 | consumer_sockpoll, | |
474 | msg.u.relayd_sock.session_id, | |
475 | msg.u.relayd_sock.relayd_session_id, | |
476 | major, | |
477 | minor, | |
478 | protocol); | |
00e2e675 DG |
479 | goto end_nosignal; |
480 | } | |
3bd1e081 MD |
481 | case LTTNG_CONSUMER_ADD_CHANNEL: |
482 | { | |
483 | struct lttng_consumer_channel *new_channel; | |
afbf29db | 484 | int ret_send_status, ret_add_channel = 0; |
d2956687 | 485 | const uint64_t chunk_id = msg.u.channel.chunk_id.value; |
3bd1e081 | 486 | |
9ce5646a MD |
487 | health_code_update(); |
488 | ||
f50f23d9 | 489 | /* First send a status message before receiving the fds. */ |
0c5b3718 SM |
490 | ret_send_status = consumer_send_status_msg(sock, ret_code); |
491 | if (ret_send_status < 0) { | |
f50f23d9 | 492 | /* Somehow, the session daemon is not responding anymore. */ |
1803a064 | 493 | goto error_fatal; |
f50f23d9 | 494 | } |
9ce5646a MD |
495 | |
496 | health_code_update(); | |
497 | ||
d88aee68 | 498 | DBG("consumer_add_channel %" PRIu64, msg.u.channel.channel_key); |
cd9adb8b JG |
499 | new_channel = consumer_allocate_channel(msg.u.channel.channel_key, |
500 | msg.u.channel.session_id, | |
501 | msg.u.channel.chunk_id.is_set ? &chunk_id : | |
502 | nullptr, | |
503 | msg.u.channel.pathname, | |
504 | msg.u.channel.name, | |
505 | msg.u.channel.relayd_id, | |
506 | msg.u.channel.output, | |
507 | msg.u.channel.tracefile_size, | |
508 | msg.u.channel.tracefile_count, | |
509 | 0, | |
510 | msg.u.channel.monitor, | |
511 | msg.u.channel.live_timer_interval, | |
512 | msg.u.channel.is_live, | |
513 | nullptr, | |
514 | nullptr); | |
515 | if (new_channel == nullptr) { | |
f73fabfd | 516 | lttng_consumer_send_error(ctx, LTTCOMM_CONSUMERD_OUTFD_ERROR); |
3bd1e081 MD |
517 | goto end_nosignal; |
518 | } | |
ffe60014 | 519 | new_channel->nb_init_stream_left = msg.u.channel.nb_init_streams; |
95a1109b JD |
520 | switch (msg.u.channel.output) { |
521 | case LTTNG_EVENT_SPLICE: | |
522 | new_channel->output = CONSUMER_CHANNEL_SPLICE; | |
523 | break; | |
524 | case LTTNG_EVENT_MMAP: | |
525 | new_channel->output = CONSUMER_CHANNEL_MMAP; | |
526 | break; | |
527 | default: | |
528 | ERR("Channel output unknown %d", msg.u.channel.output); | |
529 | goto end_nosignal; | |
530 | } | |
ffe60014 DG |
531 | |
532 | /* Translate and save channel type. */ | |
533 | switch (msg.u.channel.type) { | |
534 | case CONSUMER_CHANNEL_TYPE_DATA: | |
535 | case CONSUMER_CHANNEL_TYPE_METADATA: | |
97535efa | 536 | new_channel->type = (consumer_channel_type) msg.u.channel.type; |
ffe60014 DG |
537 | break; |
538 | default: | |
a0377dfe | 539 | abort(); |
ffe60014 DG |
540 | goto end_nosignal; |
541 | }; | |
542 | ||
9ce5646a MD |
543 | health_code_update(); |
544 | ||
cd9adb8b | 545 | if (ctx->on_recv_channel != nullptr) { |
28ab034a | 546 | int ret_recv_channel = ctx->on_recv_channel(new_channel); |
0c5b3718 | 547 | if (ret_recv_channel == 0) { |
28ab034a | 548 | ret_add_channel = consumer_add_channel(new_channel, ctx); |
0c5b3718 | 549 | } else if (ret_recv_channel < 0) { |
3bd1e081 MD |
550 | goto end_nosignal; |
551 | } | |
552 | } else { | |
28ab034a | 553 | ret_add_channel = consumer_add_channel(new_channel, ctx); |
3bd1e081 | 554 | } |
28ab034a | 555 | if (msg.u.channel.type == CONSUMER_CHANNEL_TYPE_DATA && !ret_add_channel) { |
e9404c27 JG |
556 | int monitor_start_ret; |
557 | ||
558 | DBG("Consumer starting monitor timer"); | |
28ab034a | 559 | consumer_timer_live_start(new_channel, msg.u.channel.live_timer_interval); |
e9404c27 | 560 | monitor_start_ret = consumer_timer_monitor_start( |
28ab034a | 561 | new_channel, msg.u.channel.monitor_timer_interval); |
e9404c27 JG |
562 | if (monitor_start_ret < 0) { |
563 | ERR("Starting channel monitoring timer failed"); | |
564 | goto end_nosignal; | |
565 | } | |
94d49140 | 566 | } |
e43c41c5 | 567 | |
9ce5646a MD |
568 | health_code_update(); |
569 | ||
e43c41c5 | 570 | /* If we received an error in add_channel, we need to report it. */ |
0c5b3718 | 571 | if (ret_add_channel < 0) { |
28ab034a | 572 | ret_send_status = consumer_send_status_msg(sock, ret_add_channel); |
0c5b3718 | 573 | if (ret_send_status < 0) { |
1803a064 MD |
574 | goto error_fatal; |
575 | } | |
e43c41c5 JD |
576 | goto end_nosignal; |
577 | } | |
578 | ||
3bd1e081 MD |
579 | goto end_nosignal; |
580 | } | |
581 | case LTTNG_CONSUMER_ADD_STREAM: | |
582 | { | |
dae10966 DG |
583 | int fd; |
584 | struct lttng_pipe *stream_pipe; | |
00e2e675 | 585 | struct lttng_consumer_stream *new_stream; |
ffe60014 | 586 | struct lttng_consumer_channel *channel; |
c80048c6 | 587 | int alloc_ret = 0; |
0c5b3718 SM |
588 | int ret_send_status, ret_poll, ret_get_max_subbuf_size; |
589 | ssize_t ret_pipe_write, ret_recv; | |
3bd1e081 | 590 | |
ffe60014 DG |
591 | /* |
592 | * Get stream's channel reference. Needed when adding the stream to the | |
593 | * global hash table. | |
594 | */ | |
595 | channel = consumer_find_channel(msg.u.stream.channel_key); | |
596 | if (!channel) { | |
597 | /* | |
598 | * We could not find the channel. Can happen if cpu hotplug | |
599 | * happens while tearing down. | |
600 | */ | |
d88aee68 | 601 | ERR("Unable to find channel key %" PRIu64, msg.u.stream.channel_key); |
e462382a | 602 | ret_code = LTTCOMM_CONSUMERD_CHAN_NOT_FOUND; |
ffe60014 DG |
603 | } |
604 | ||
9ce5646a MD |
605 | health_code_update(); |
606 | ||
f50f23d9 | 607 | /* First send a status message before receiving the fds. */ |
0c5b3718 SM |
608 | ret_send_status = consumer_send_status_msg(sock, ret_code); |
609 | if (ret_send_status < 0) { | |
d771f832 | 610 | /* Somehow, the session daemon is not responding anymore. */ |
c5c7998f | 611 | goto error_add_stream_fatal; |
1803a064 | 612 | } |
9ce5646a MD |
613 | |
614 | health_code_update(); | |
615 | ||
0c759fc9 | 616 | if (ret_code != LTTCOMM_CONSUMERD_SUCCESS) { |
d771f832 | 617 | /* Channel was not found. */ |
c5c7998f | 618 | goto error_add_stream_nosignal; |
f50f23d9 DG |
619 | } |
620 | ||
d771f832 | 621 | /* Blocking call */ |
9ce5646a | 622 | health_poll_entry(); |
0c5b3718 | 623 | ret_poll = lttng_consumer_poll_socket(consumer_sockpoll); |
9ce5646a | 624 | health_poll_exit(); |
0c5b3718 | 625 | if (ret_poll) { |
c5c7998f | 626 | goto error_add_stream_fatal; |
3bd1e081 | 627 | } |
00e2e675 | 628 | |
9ce5646a MD |
629 | health_code_update(); |
630 | ||
00e2e675 | 631 | /* Get stream file descriptor from socket */ |
0c5b3718 SM |
632 | ret_recv = lttcomm_recv_fds_unix_sock(sock, &fd, 1); |
633 | if (ret_recv != sizeof(fd)) { | |
f73fabfd | 634 | lttng_consumer_send_error(ctx, LTTCOMM_CONSUMERD_ERROR_RECV_FD); |
0c5b3718 | 635 | ret_func = ret_recv; |
c5c7998f | 636 | goto end; |
3bd1e081 | 637 | } |
3bd1e081 | 638 | |
9ce5646a MD |
639 | health_code_update(); |
640 | ||
f50f23d9 DG |
641 | /* |
642 | * Send status code to session daemon only if the recv works. If the | |
643 | * above recv() failed, the session daemon is notified through the | |
644 | * error socket and the teardown is eventually done. | |
645 | */ | |
0c5b3718 SM |
646 | ret_send_status = consumer_send_status_msg(sock, ret_code); |
647 | if (ret_send_status < 0) { | |
f50f23d9 | 648 | /* Somehow, the session daemon is not responding anymore. */ |
c5c7998f | 649 | goto error_add_stream_nosignal; |
f50f23d9 DG |
650 | } |
651 | ||
9ce5646a MD |
652 | health_code_update(); |
653 | ||
d2956687 | 654 | pthread_mutex_lock(&channel->lock); |
28ab034a JG |
655 | new_stream = consumer_stream_create(channel, |
656 | channel->key, | |
657 | fd, | |
658 | channel->name, | |
659 | channel->relayd_id, | |
660 | channel->session_id, | |
661 | channel->trace_chunk, | |
662 | msg.u.stream.cpu, | |
663 | &alloc_ret, | |
664 | channel->type, | |
665 | channel->monitor); | |
cd9adb8b | 666 | if (new_stream == nullptr) { |
c80048c6 MD |
667 | switch (alloc_ret) { |
668 | case -ENOMEM: | |
669 | case -EINVAL: | |
670 | default: | |
671 | lttng_consumer_send_error(ctx, LTTCOMM_CONSUMERD_OUTFD_ERROR); | |
672 | break; | |
c80048c6 | 673 | } |
d2956687 | 674 | pthread_mutex_unlock(&channel->lock); |
c5c7998f | 675 | goto error_add_stream_nosignal; |
3bd1e081 | 676 | } |
d771f832 | 677 | |
ffe60014 | 678 | new_stream->wait_fd = fd; |
28ab034a JG |
679 | ret_get_max_subbuf_size = |
680 | kernctl_get_max_subbuf_size(new_stream->wait_fd, &new_stream->max_sb_size); | |
0c5b3718 | 681 | if (ret_get_max_subbuf_size < 0) { |
d05185fa JG |
682 | pthread_mutex_unlock(&channel->lock); |
683 | ERR("Failed to get kernel maximal subbuffer size"); | |
c5c7998f | 684 | goto error_add_stream_nosignal; |
d05185fa JG |
685 | } |
686 | ||
28ab034a | 687 | consumer_stream_update_channel_attributes(new_stream, channel); |
00e2e675 | 688 | |
a0c83db9 DG |
689 | /* |
690 | * We've just assigned the channel to the stream so increment the | |
07b86b52 JD |
691 | * refcount right now. We don't need to increment the refcount for |
692 | * streams in no monitor because we handle manually the cleanup of | |
693 | * those. It is very important to make sure there is NO prior | |
694 | * consumer_del_stream() calls or else the refcount will be unbalanced. | |
a0c83db9 | 695 | */ |
07b86b52 JD |
696 | if (channel->monitor) { |
697 | uatomic_inc(&new_stream->chan->refcount); | |
698 | } | |
9d9353f9 | 699 | |
fb3a43a9 DG |
700 | /* |
701 | * The buffer flush is done on the session daemon side for the kernel | |
702 | * so no need for the stream "hangup_flush_done" variable to be | |
703 | * tracked. This is important for a kernel stream since we don't rely | |
704 | * on the flush state of the stream to read data. It's not the case for | |
705 | * user space tracing. | |
706 | */ | |
707 | new_stream->hangup_flush_done = 0; | |
708 | ||
9ce5646a MD |
709 | health_code_update(); |
710 | ||
d2956687 | 711 | pthread_mutex_lock(&new_stream->lock); |
633d0084 | 712 | if (ctx->on_recv_stream) { |
0c5b3718 SM |
713 | int ret_recv_stream = ctx->on_recv_stream(new_stream); |
714 | if (ret_recv_stream < 0) { | |
d2956687 JG |
715 | pthread_mutex_unlock(&new_stream->lock); |
716 | pthread_mutex_unlock(&channel->lock); | |
d771f832 | 717 | consumer_stream_free(new_stream); |
c5c7998f | 718 | goto error_add_stream_nosignal; |
fb3a43a9 | 719 | } |
633d0084 | 720 | } |
9ce5646a MD |
721 | health_code_update(); |
722 | ||
07b86b52 JD |
723 | if (new_stream->metadata_flag) { |
724 | channel->metadata_stream = new_stream; | |
725 | } | |
726 | ||
2bba9e53 DG |
727 | /* Do not monitor this stream. */ |
728 | if (!channel->monitor) { | |
5eecee74 | 729 | DBG("Kernel consumer add stream %s in no monitor mode with " |
28ab034a JG |
730 | "relayd id %" PRIu64, |
731 | new_stream->name, | |
732 | new_stream->net_seq_idx); | |
10a50311 | 733 | cds_list_add(&new_stream->send_node, &channel->streams.head); |
d2956687 JG |
734 | pthread_mutex_unlock(&new_stream->lock); |
735 | pthread_mutex_unlock(&channel->lock); | |
c5c7998f | 736 | goto end_add_stream; |
6dc3064a DG |
737 | } |
738 | ||
e1b71bdc DG |
739 | /* Send stream to relayd if the stream has an ID. */ |
740 | if (new_stream->net_seq_idx != (uint64_t) -1ULL) { | |
0c5b3718 SM |
741 | int ret_send_relayd_stream; |
742 | ||
28ab034a JG |
743 | ret_send_relayd_stream = |
744 | consumer_send_relayd_stream(new_stream, new_stream->chan->pathname); | |
0c5b3718 | 745 | if (ret_send_relayd_stream < 0) { |
d2956687 JG |
746 | pthread_mutex_unlock(&new_stream->lock); |
747 | pthread_mutex_unlock(&channel->lock); | |
e1b71bdc | 748 | consumer_stream_free(new_stream); |
c5c7998f | 749 | goto error_add_stream_nosignal; |
e1b71bdc | 750 | } |
001b7e62 MD |
751 | |
752 | /* | |
753 | * If adding an extra stream to an already | |
754 | * existing channel (e.g. cpu hotplug), we need | |
755 | * to send the "streams_sent" command to relayd. | |
756 | */ | |
757 | if (channel->streams_sent_to_relayd) { | |
0c5b3718 SM |
758 | int ret_send_relayd_streams_sent; |
759 | ||
760 | ret_send_relayd_streams_sent = | |
28ab034a | 761 | consumer_send_relayd_streams_sent(new_stream->net_seq_idx); |
0c5b3718 | 762 | if (ret_send_relayd_streams_sent < 0) { |
d2956687 JG |
763 | pthread_mutex_unlock(&new_stream->lock); |
764 | pthread_mutex_unlock(&channel->lock); | |
c5c7998f | 765 | goto error_add_stream_nosignal; |
001b7e62 MD |
766 | } |
767 | } | |
e2039c7a | 768 | } |
d2956687 JG |
769 | pthread_mutex_unlock(&new_stream->lock); |
770 | pthread_mutex_unlock(&channel->lock); | |
e2039c7a | 771 | |
50f8ae69 | 772 | /* Get the right pipe where the stream will be sent. */ |
633d0084 | 773 | if (new_stream->metadata_flag) { |
66d583dc | 774 | consumer_add_metadata_stream(new_stream); |
dae10966 | 775 | stream_pipe = ctx->consumer_metadata_pipe; |
3bd1e081 | 776 | } else { |
66d583dc | 777 | consumer_add_data_stream(new_stream); |
dae10966 | 778 | stream_pipe = ctx->consumer_data_pipe; |
50f8ae69 DG |
779 | } |
780 | ||
66d583dc | 781 | /* Visible to other threads */ |
5ab66908 MD |
782 | new_stream->globally_visible = 1; |
783 | ||
9ce5646a MD |
784 | health_code_update(); |
785 | ||
5c7248cd JG |
786 | ret_pipe_write = |
787 | lttng_pipe_write(stream_pipe, &new_stream, sizeof(new_stream)); /* NOLINT | |
788 | sizeof | |
789 | used on a | |
790 | pointer. | |
791 | */ | |
0c5b3718 | 792 | if (ret_pipe_write < 0) { |
dae10966 | 793 | ERR("Consumer write %s stream to pipe %d", |
28ab034a JG |
794 | new_stream->metadata_flag ? "metadata" : "data", |
795 | lttng_pipe_get_writefd(stream_pipe)); | |
5ab66908 MD |
796 | if (new_stream->metadata_flag) { |
797 | consumer_del_stream_for_metadata(new_stream); | |
798 | } else { | |
799 | consumer_del_stream_for_data(new_stream); | |
800 | } | |
c5c7998f | 801 | goto error_add_stream_nosignal; |
3bd1e081 | 802 | } |
00e2e675 | 803 | |
02d02e31 | 804 | DBG("Kernel consumer ADD_STREAM %s (fd: %d) %s with relayd id %" PRIu64, |
28ab034a JG |
805 | new_stream->name, |
806 | fd, | |
807 | new_stream->chan->pathname, | |
808 | new_stream->relayd_stream_id); | |
809 | end_add_stream: | |
3bd1e081 | 810 | break; |
28ab034a | 811 | error_add_stream_nosignal: |
c5c7998f | 812 | goto end_nosignal; |
28ab034a | 813 | error_add_stream_fatal: |
c5c7998f | 814 | goto error_fatal; |
3bd1e081 | 815 | } |
a4baae1b JD |
816 | case LTTNG_CONSUMER_STREAMS_SENT: |
817 | { | |
818 | struct lttng_consumer_channel *channel; | |
0c5b3718 | 819 | int ret_send_status; |
a4baae1b JD |
820 | |
821 | /* | |
822 | * Get stream's channel reference. Needed when adding the stream to the | |
823 | * global hash table. | |
824 | */ | |
825 | channel = consumer_find_channel(msg.u.sent_streams.channel_key); | |
826 | if (!channel) { | |
827 | /* | |
828 | * We could not find the channel. Can happen if cpu hotplug | |
829 | * happens while tearing down. | |
830 | */ | |
28ab034a | 831 | ERR("Unable to find channel key %" PRIu64, msg.u.sent_streams.channel_key); |
e462382a | 832 | ret_code = LTTCOMM_CONSUMERD_CHAN_NOT_FOUND; |
a4baae1b JD |
833 | } |
834 | ||
835 | health_code_update(); | |
836 | ||
837 | /* | |
838 | * Send status code to session daemon. | |
839 | */ | |
0c5b3718 | 840 | ret_send_status = consumer_send_status_msg(sock, ret_code); |
28ab034a | 841 | if (ret_send_status < 0 || ret_code != LTTCOMM_CONSUMERD_SUCCESS) { |
a4baae1b | 842 | /* Somehow, the session daemon is not responding anymore. */ |
80d5a658 | 843 | goto error_streams_sent_nosignal; |
a4baae1b JD |
844 | } |
845 | ||
846 | health_code_update(); | |
847 | ||
848 | /* | |
849 | * We should not send this message if we don't monitor the | |
850 | * streams in this channel. | |
851 | */ | |
852 | if (!channel->monitor) { | |
80d5a658 | 853 | goto end_error_streams_sent; |
a4baae1b JD |
854 | } |
855 | ||
856 | health_code_update(); | |
857 | /* Send stream to relayd if the stream has an ID. */ | |
858 | if (msg.u.sent_streams.net_seq_idx != (uint64_t) -1ULL) { | |
0c5b3718 SM |
859 | int ret_send_relay_streams; |
860 | ||
28ab034a JG |
861 | ret_send_relay_streams = |
862 | consumer_send_relayd_streams_sent(msg.u.sent_streams.net_seq_idx); | |
0c5b3718 | 863 | if (ret_send_relay_streams < 0) { |
80d5a658 | 864 | goto error_streams_sent_nosignal; |
a4baae1b | 865 | } |
001b7e62 | 866 | channel->streams_sent_to_relayd = true; |
a4baae1b | 867 | } |
28ab034a | 868 | end_error_streams_sent: |
a4baae1b | 869 | break; |
28ab034a | 870 | error_streams_sent_nosignal: |
80d5a658 | 871 | goto end_nosignal; |
a4baae1b | 872 | } |
3bd1e081 MD |
873 | case LTTNG_CONSUMER_UPDATE_STREAM: |
874 | { | |
3f8e211f DG |
875 | rcu_read_unlock(); |
876 | return -ENOSYS; | |
877 | } | |
878 | case LTTNG_CONSUMER_DESTROY_RELAYD: | |
879 | { | |
a6ba4fe1 | 880 | uint64_t index = msg.u.destroy_relayd.net_seq_idx; |
3f8e211f | 881 | struct consumer_relayd_sock_pair *relayd; |
0c5b3718 | 882 | int ret_send_status; |
3f8e211f | 883 | |
a6ba4fe1 | 884 | DBG("Kernel consumer destroying relayd %" PRIu64, index); |
3f8e211f DG |
885 | |
886 | /* Get relayd reference if exists. */ | |
a6ba4fe1 | 887 | relayd = consumer_find_relayd(index); |
cd9adb8b | 888 | if (relayd == nullptr) { |
3448e266 | 889 | DBG("Unable to find relayd %" PRIu64, index); |
e462382a | 890 | ret_code = LTTCOMM_CONSUMERD_RELAYD_FAIL; |
3bd1e081 | 891 | } |
3f8e211f | 892 | |
a6ba4fe1 DG |
893 | /* |
894 | * Each relayd socket pair has a refcount of stream attached to it | |
895 | * which tells if the relayd is still active or not depending on the | |
896 | * refcount value. | |
897 | * | |
898 | * This will set the destroy flag of the relayd object and destroy it | |
899 | * if the refcount reaches zero when called. | |
900 | * | |
901 | * The destroy can happen either here or when a stream fd hangs up. | |
902 | */ | |
f50f23d9 DG |
903 | if (relayd) { |
904 | consumer_flag_relayd_for_destroy(relayd); | |
905 | } | |
906 | ||
9ce5646a MD |
907 | health_code_update(); |
908 | ||
0c5b3718 SM |
909 | ret_send_status = consumer_send_status_msg(sock, ret_code); |
910 | if (ret_send_status < 0) { | |
f50f23d9 | 911 | /* Somehow, the session daemon is not responding anymore. */ |
1803a064 | 912 | goto error_fatal; |
f50f23d9 | 913 | } |
3f8e211f | 914 | |
3f8e211f | 915 | goto end_nosignal; |
3bd1e081 | 916 | } |
6d805429 | 917 | case LTTNG_CONSUMER_DATA_PENDING: |
53632229 | 918 | { |
0c5b3718 | 919 | int32_t ret_data_pending; |
6d805429 | 920 | uint64_t id = msg.u.data_pending.session_id; |
0c5b3718 | 921 | ssize_t ret_send; |
c8f59ee5 | 922 | |
6d805429 | 923 | DBG("Kernel consumer data pending command for id %" PRIu64, id); |
c8f59ee5 | 924 | |
0c5b3718 | 925 | ret_data_pending = consumer_data_pending(id); |
c8f59ee5 | 926 | |
9ce5646a MD |
927 | health_code_update(); |
928 | ||
c8f59ee5 | 929 | /* Send back returned value to session daemon */ |
28ab034a JG |
930 | ret_send = |
931 | lttcomm_send_unix_sock(sock, &ret_data_pending, sizeof(ret_data_pending)); | |
0c5b3718 | 932 | if (ret_send < 0) { |
6d805429 | 933 | PERROR("send data pending ret code"); |
1803a064 | 934 | goto error_fatal; |
c8f59ee5 | 935 | } |
f50f23d9 DG |
936 | |
937 | /* | |
938 | * No need to send back a status message since the data pending | |
939 | * returned value is the response. | |
940 | */ | |
c8f59ee5 | 941 | break; |
53632229 | 942 | } |
6dc3064a DG |
943 | case LTTNG_CONSUMER_SNAPSHOT_CHANNEL: |
944 | { | |
3eb928aa MD |
945 | struct lttng_consumer_channel *channel; |
946 | uint64_t key = msg.u.snapshot_channel.key; | |
0c5b3718 | 947 | int ret_send_status; |
3eb928aa MD |
948 | |
949 | channel = consumer_find_channel(key); | |
950 | if (!channel) { | |
951 | ERR("Channel %" PRIu64 " not found", key); | |
952 | ret_code = LTTCOMM_CONSUMERD_CHAN_NOT_FOUND; | |
07b86b52 | 953 | } else { |
3eb928aa | 954 | if (msg.u.snapshot_channel.metadata == 1) { |
0c5b3718 SM |
955 | int ret_snapshot; |
956 | ||
957 | ret_snapshot = lttng_kconsumer_snapshot_metadata( | |
28ab034a JG |
958 | channel, |
959 | key, | |
960 | msg.u.snapshot_channel.pathname, | |
961 | msg.u.snapshot_channel.relayd_id, | |
962 | ctx); | |
0c5b3718 | 963 | if (ret_snapshot < 0) { |
3eb928aa MD |
964 | ERR("Snapshot metadata failed"); |
965 | ret_code = LTTCOMM_CONSUMERD_SNAPSHOT_FAILED; | |
966 | } | |
967 | } else { | |
0c5b3718 SM |
968 | int ret_snapshot; |
969 | ||
970 | ret_snapshot = lttng_kconsumer_snapshot_channel( | |
28ab034a JG |
971 | channel, |
972 | key, | |
973 | msg.u.snapshot_channel.pathname, | |
974 | msg.u.snapshot_channel.relayd_id, | |
975 | msg.u.snapshot_channel.nb_packets_per_stream); | |
0c5b3718 | 976 | if (ret_snapshot < 0) { |
3eb928aa MD |
977 | ERR("Snapshot channel failed"); |
978 | ret_code = LTTCOMM_CONSUMERD_SNAPSHOT_FAILED; | |
979 | } | |
07b86b52 JD |
980 | } |
981 | } | |
9ce5646a MD |
982 | health_code_update(); |
983 | ||
0c5b3718 SM |
984 | ret_send_status = consumer_send_status_msg(sock, ret_code); |
985 | if (ret_send_status < 0) { | |
6dc3064a DG |
986 | /* Somehow, the session daemon is not responding anymore. */ |
987 | goto end_nosignal; | |
988 | } | |
989 | break; | |
990 | } | |
07b86b52 JD |
991 | case LTTNG_CONSUMER_DESTROY_CHANNEL: |
992 | { | |
993 | uint64_t key = msg.u.destroy_channel.key; | |
994 | struct lttng_consumer_channel *channel; | |
0c5b3718 | 995 | int ret_send_status; |
07b86b52 JD |
996 | |
997 | channel = consumer_find_channel(key); | |
998 | if (!channel) { | |
999 | ERR("Kernel consumer destroy channel %" PRIu64 " not found", key); | |
e462382a | 1000 | ret_code = LTTCOMM_CONSUMERD_CHAN_NOT_FOUND; |
07b86b52 JD |
1001 | } |
1002 | ||
9ce5646a MD |
1003 | health_code_update(); |
1004 | ||
0c5b3718 SM |
1005 | ret_send_status = consumer_send_status_msg(sock, ret_code); |
1006 | if (ret_send_status < 0) { | |
07b86b52 | 1007 | /* Somehow, the session daemon is not responding anymore. */ |
a9d36096 | 1008 | goto end_destroy_channel; |
07b86b52 JD |
1009 | } |
1010 | ||
9ce5646a MD |
1011 | health_code_update(); |
1012 | ||
15dc512a DG |
1013 | /* Stop right now if no channel was found. */ |
1014 | if (!channel) { | |
a9d36096 | 1015 | goto end_destroy_channel; |
15dc512a DG |
1016 | } |
1017 | ||
07b86b52 JD |
1018 | /* |
1019 | * This command should ONLY be issued for channel with streams set in | |
1020 | * no monitor mode. | |
1021 | */ | |
a0377dfe | 1022 | LTTNG_ASSERT(!channel->monitor); |
07b86b52 JD |
1023 | |
1024 | /* | |
1025 | * The refcount should ALWAYS be 0 in the case of a channel in no | |
1026 | * monitor mode. | |
1027 | */ | |
a0377dfe | 1028 | LTTNG_ASSERT(!uatomic_sub_return(&channel->refcount, 1)); |
07b86b52 JD |
1029 | |
1030 | consumer_del_channel(channel); | |
28ab034a | 1031 | end_destroy_channel: |
07b86b52 JD |
1032 | goto end_nosignal; |
1033 | } | |
fb83fe64 JD |
1034 | case LTTNG_CONSUMER_DISCARDED_EVENTS: |
1035 | { | |
66ab32be JD |
1036 | ssize_t ret; |
1037 | uint64_t count; | |
fb83fe64 JD |
1038 | struct lttng_consumer_channel *channel; |
1039 | uint64_t id = msg.u.discarded_events.session_id; | |
1040 | uint64_t key = msg.u.discarded_events.channel_key; | |
1041 | ||
28ab034a JG |
1042 | DBG("Kernel consumer discarded events command for session id %" PRIu64 |
1043 | ", channel key %" PRIu64, | |
1044 | id, | |
1045 | key); | |
e5742757 | 1046 | |
fb83fe64 JD |
1047 | channel = consumer_find_channel(key); |
1048 | if (!channel) { | |
28ab034a | 1049 | ERR("Kernel consumer discarded events channel %" PRIu64 " not found", key); |
66ab32be | 1050 | count = 0; |
e5742757 | 1051 | } else { |
66ab32be | 1052 | count = channel->discarded_events; |
fb83fe64 JD |
1053 | } |
1054 | ||
fb83fe64 JD |
1055 | health_code_update(); |
1056 | ||
1057 | /* Send back returned value to session daemon */ | |
66ab32be | 1058 | ret = lttcomm_send_unix_sock(sock, &count, sizeof(count)); |
fb83fe64 JD |
1059 | if (ret < 0) { |
1060 | PERROR("send discarded events"); | |
1061 | goto error_fatal; | |
1062 | } | |
1063 | ||
1064 | break; | |
1065 | } | |
1066 | case LTTNG_CONSUMER_LOST_PACKETS: | |
1067 | { | |
66ab32be JD |
1068 | ssize_t ret; |
1069 | uint64_t count; | |
fb83fe64 JD |
1070 | struct lttng_consumer_channel *channel; |
1071 | uint64_t id = msg.u.lost_packets.session_id; | |
1072 | uint64_t key = msg.u.lost_packets.channel_key; | |
1073 | ||
28ab034a JG |
1074 | DBG("Kernel consumer lost packets command for session id %" PRIu64 |
1075 | ", channel key %" PRIu64, | |
1076 | id, | |
1077 | key); | |
e5742757 | 1078 | |
fb83fe64 JD |
1079 | channel = consumer_find_channel(key); |
1080 | if (!channel) { | |
28ab034a | 1081 | ERR("Kernel consumer lost packets channel %" PRIu64 " not found", key); |
66ab32be | 1082 | count = 0; |
e5742757 | 1083 | } else { |
66ab32be | 1084 | count = channel->lost_packets; |
fb83fe64 JD |
1085 | } |
1086 | ||
fb83fe64 JD |
1087 | health_code_update(); |
1088 | ||
1089 | /* Send back returned value to session daemon */ | |
66ab32be | 1090 | ret = lttcomm_send_unix_sock(sock, &count, sizeof(count)); |
fb83fe64 JD |
1091 | if (ret < 0) { |
1092 | PERROR("send lost packets"); | |
1093 | goto error_fatal; | |
1094 | } | |
1095 | ||
1096 | break; | |
1097 | } | |
b3530820 JG |
1098 | case LTTNG_CONSUMER_SET_CHANNEL_MONITOR_PIPE: |
1099 | { | |
1100 | int channel_monitor_pipe; | |
0c5b3718 SM |
1101 | int ret_send_status, ret_set_channel_monitor_pipe; |
1102 | ssize_t ret_recv; | |
b3530820 JG |
1103 | |
1104 | ret_code = LTTCOMM_CONSUMERD_SUCCESS; | |
1105 | /* Successfully received the command's type. */ | |
0c5b3718 SM |
1106 | ret_send_status = consumer_send_status_msg(sock, ret_code); |
1107 | if (ret_send_status < 0) { | |
b3530820 JG |
1108 | goto error_fatal; |
1109 | } | |
1110 | ||
28ab034a | 1111 | ret_recv = lttcomm_recv_fds_unix_sock(sock, &channel_monitor_pipe, 1); |
0c5b3718 | 1112 | if (ret_recv != sizeof(channel_monitor_pipe)) { |
b3530820 JG |
1113 | ERR("Failed to receive channel monitor pipe"); |
1114 | goto error_fatal; | |
1115 | } | |
1116 | ||
1117 | DBG("Received channel monitor pipe (%d)", channel_monitor_pipe); | |
0c5b3718 | 1118 | ret_set_channel_monitor_pipe = |
28ab034a | 1119 | consumer_timer_thread_set_channel_monitor_pipe(channel_monitor_pipe); |
0c5b3718 | 1120 | if (!ret_set_channel_monitor_pipe) { |
b3530820 | 1121 | int flags; |
0c5b3718 | 1122 | int ret_fcntl; |
b3530820 JG |
1123 | |
1124 | ret_code = LTTCOMM_CONSUMERD_SUCCESS; | |
1125 | /* Set the pipe as non-blocking. */ | |
0c5b3718 SM |
1126 | ret_fcntl = fcntl(channel_monitor_pipe, F_GETFL, 0); |
1127 | if (ret_fcntl == -1) { | |
b3530820 JG |
1128 | PERROR("fcntl get flags of the channel monitoring pipe"); |
1129 | goto error_fatal; | |
1130 | } | |
0c5b3718 | 1131 | flags = ret_fcntl; |
b3530820 | 1132 | |
28ab034a | 1133 | ret_fcntl = fcntl(channel_monitor_pipe, F_SETFL, flags | O_NONBLOCK); |
0c5b3718 | 1134 | if (ret_fcntl == -1) { |
b3530820 JG |
1135 | PERROR("fcntl set O_NONBLOCK flag of the channel monitoring pipe"); |
1136 | goto error_fatal; | |
1137 | } | |
1138 | DBG("Channel monitor pipe set as non-blocking"); | |
1139 | } else { | |
1140 | ret_code = LTTCOMM_CONSUMERD_ALREADY_SET; | |
1141 | } | |
0c5b3718 SM |
1142 | ret_send_status = consumer_send_status_msg(sock, ret_code); |
1143 | if (ret_send_status < 0) { | |
b3530820 JG |
1144 | goto error_fatal; |
1145 | } | |
1146 | break; | |
1147 | } | |
b99a8d42 JD |
1148 | case LTTNG_CONSUMER_ROTATE_CHANNEL: |
1149 | { | |
92b7a7f8 MD |
1150 | struct lttng_consumer_channel *channel; |
1151 | uint64_t key = msg.u.rotate_channel.key; | |
0c5b3718 | 1152 | int ret_send_status; |
b99a8d42 | 1153 | |
92b7a7f8 | 1154 | DBG("Consumer rotate channel %" PRIu64, key); |
b99a8d42 | 1155 | |
92b7a7f8 MD |
1156 | channel = consumer_find_channel(key); |
1157 | if (!channel) { | |
1158 | ERR("Channel %" PRIu64 " not found", key); | |
1159 | ret_code = LTTCOMM_CONSUMERD_CHAN_NOT_FOUND; | |
1160 | } else { | |
1161 | /* | |
1162 | * Sample the rotate position of all the streams in this channel. | |
1163 | */ | |
0c5b3718 SM |
1164 | int ret_rotate_channel; |
1165 | ||
1166 | ret_rotate_channel = lttng_consumer_rotate_channel( | |
28ab034a | 1167 | channel, key, msg.u.rotate_channel.relayd_id); |
0c5b3718 | 1168 | if (ret_rotate_channel < 0) { |
92b7a7f8 MD |
1169 | ERR("Rotate channel failed"); |
1170 | ret_code = LTTCOMM_CONSUMERD_ROTATION_FAIL; | |
1171 | } | |
b99a8d42 | 1172 | |
92b7a7f8 MD |
1173 | health_code_update(); |
1174 | } | |
0c5b3718 SM |
1175 | |
1176 | ret_send_status = consumer_send_status_msg(sock, ret_code); | |
1177 | if (ret_send_status < 0) { | |
b99a8d42 | 1178 | /* Somehow, the session daemon is not responding anymore. */ |
713bdd26 | 1179 | goto error_rotate_channel; |
b99a8d42 | 1180 | } |
92b7a7f8 MD |
1181 | if (channel) { |
1182 | /* Rotate the streams that are ready right now. */ | |
0c5b3718 SM |
1183 | int ret_rotate; |
1184 | ||
28ab034a | 1185 | ret_rotate = lttng_consumer_rotate_ready_streams(channel, key); |
0c5b3718 | 1186 | if (ret_rotate < 0) { |
92b7a7f8 MD |
1187 | ERR("Rotate ready streams failed"); |
1188 | } | |
b99a8d42 | 1189 | } |
b99a8d42 | 1190 | break; |
28ab034a | 1191 | error_rotate_channel: |
713bdd26 | 1192 | goto end_nosignal; |
b99a8d42 | 1193 | } |
5f3aff8b MD |
1194 | case LTTNG_CONSUMER_CLEAR_CHANNEL: |
1195 | { | |
1196 | struct lttng_consumer_channel *channel; | |
1197 | uint64_t key = msg.u.clear_channel.key; | |
0c5b3718 | 1198 | int ret_send_status; |
5f3aff8b MD |
1199 | |
1200 | channel = consumer_find_channel(key); | |
1201 | if (!channel) { | |
1202 | DBG("Channel %" PRIu64 " not found", key); | |
1203 | ret_code = LTTCOMM_CONSUMERD_CHAN_NOT_FOUND; | |
1204 | } else { | |
0c5b3718 SM |
1205 | int ret_clear_channel; |
1206 | ||
28ab034a | 1207 | ret_clear_channel = lttng_consumer_clear_channel(channel); |
0c5b3718 | 1208 | if (ret_clear_channel) { |
5f3aff8b | 1209 | ERR("Clear channel failed"); |
97535efa | 1210 | ret_code = (lttcomm_return_code) ret_clear_channel; |
5f3aff8b MD |
1211 | } |
1212 | ||
1213 | health_code_update(); | |
1214 | } | |
0c5b3718 SM |
1215 | |
1216 | ret_send_status = consumer_send_status_msg(sock, ret_code); | |
1217 | if (ret_send_status < 0) { | |
5f3aff8b MD |
1218 | /* Somehow, the session daemon is not responding anymore. */ |
1219 | goto end_nosignal; | |
1220 | } | |
1221 | ||
1222 | break; | |
1223 | } | |
d2956687 | 1224 | case LTTNG_CONSUMER_INIT: |
00fb02ac | 1225 | { |
0c5b3718 | 1226 | int ret_send_status; |
328c2fe7 JG |
1227 | lttng_uuid sessiond_uuid; |
1228 | ||
28ab034a JG |
1229 | std::copy(std::begin(msg.u.init.sessiond_uuid), |
1230 | std::end(msg.u.init.sessiond_uuid), | |
1231 | sessiond_uuid.begin()); | |
0c5b3718 | 1232 | |
28ab034a | 1233 | ret_code = lttng_consumer_init_command(ctx, sessiond_uuid); |
00fb02ac | 1234 | health_code_update(); |
0c5b3718 SM |
1235 | ret_send_status = consumer_send_status_msg(sock, ret_code); |
1236 | if (ret_send_status < 0) { | |
00fb02ac JD |
1237 | /* Somehow, the session daemon is not responding anymore. */ |
1238 | goto end_nosignal; | |
1239 | } | |
1240 | break; | |
1241 | } | |
d2956687 | 1242 | case LTTNG_CONSUMER_CREATE_TRACE_CHUNK: |
d88744a4 | 1243 | { |
d2956687 | 1244 | const struct lttng_credentials credentials = { |
28ab034a JG |
1245 | .uid = LTTNG_OPTIONAL_INIT_VALUE( |
1246 | msg.u.create_trace_chunk.credentials.value.uid), | |
1247 | .gid = LTTNG_OPTIONAL_INIT_VALUE( | |
1248 | msg.u.create_trace_chunk.credentials.value.gid), | |
d2956687 | 1249 | }; |
28ab034a JG |
1250 | const bool is_local_trace = !msg.u.create_trace_chunk.relayd_id.is_set; |
1251 | const uint64_t relayd_id = msg.u.create_trace_chunk.relayd_id.value; | |
1252 | const char *chunk_override_name = *msg.u.create_trace_chunk.override_name ? | |
1253 | msg.u.create_trace_chunk.override_name : | |
cd9adb8b JG |
1254 | nullptr; |
1255 | struct lttng_directory_handle *chunk_directory_handle = nullptr; | |
d88744a4 | 1256 | |
d2956687 JG |
1257 | /* |
1258 | * The session daemon will only provide a chunk directory file | |
1259 | * descriptor for local traces. | |
1260 | */ | |
1261 | if (is_local_trace) { | |
1262 | int chunk_dirfd; | |
0c5b3718 SM |
1263 | int ret_send_status; |
1264 | ssize_t ret_recv; | |
19990ed5 | 1265 | |
d2956687 | 1266 | /* Acnowledge the reception of the command. */ |
28ab034a | 1267 | ret_send_status = consumer_send_status_msg(sock, LTTCOMM_CONSUMERD_SUCCESS); |
0c5b3718 | 1268 | if (ret_send_status < 0) { |
d2956687 JG |
1269 | /* Somehow, the session daemon is not responding anymore. */ |
1270 | goto end_nosignal; | |
1271 | } | |
92816cc3 | 1272 | |
28ab034a | 1273 | ret_recv = lttcomm_recv_fds_unix_sock(sock, &chunk_dirfd, 1); |
0c5b3718 | 1274 | if (ret_recv != sizeof(chunk_dirfd)) { |
d2956687 JG |
1275 | ERR("Failed to receive trace chunk directory file descriptor"); |
1276 | goto error_fatal; | |
1277 | } | |
92816cc3 | 1278 | |
28ab034a JG |
1279 | DBG("Received trace chunk directory fd (%d)", chunk_dirfd); |
1280 | chunk_directory_handle = | |
1281 | lttng_directory_handle_create_from_dirfd(chunk_dirfd); | |
cbf53d23 | 1282 | if (!chunk_directory_handle) { |
d2956687 JG |
1283 | ERR("Failed to initialize chunk directory handle from directory file descriptor"); |
1284 | if (close(chunk_dirfd)) { | |
1285 | PERROR("Failed to close chunk directory file descriptor"); | |
1286 | } | |
1287 | goto error_fatal; | |
1288 | } | |
92816cc3 JG |
1289 | } |
1290 | ||
d2956687 | 1291 | ret_code = lttng_consumer_create_trace_chunk( |
cd9adb8b | 1292 | !is_local_trace ? &relayd_id : nullptr, |
28ab034a JG |
1293 | msg.u.create_trace_chunk.session_id, |
1294 | msg.u.create_trace_chunk.chunk_id, | |
1295 | (time_t) msg.u.create_trace_chunk.creation_timestamp, | |
1296 | chunk_override_name, | |
cd9adb8b | 1297 | msg.u.create_trace_chunk.credentials.is_set ? &credentials : nullptr, |
28ab034a | 1298 | chunk_directory_handle); |
cbf53d23 | 1299 | lttng_directory_handle_put(chunk_directory_handle); |
d2956687 | 1300 | goto end_msg_sessiond; |
d88744a4 | 1301 | } |
d2956687 | 1302 | case LTTNG_CONSUMER_CLOSE_TRACE_CHUNK: |
a1ae2ea5 | 1303 | { |
bbc4768c | 1304 | enum lttng_trace_chunk_command_type close_command = |
28ab034a JG |
1305 | (lttng_trace_chunk_command_type) msg.u.close_trace_chunk.close_command.value; |
1306 | const uint64_t relayd_id = msg.u.close_trace_chunk.relayd_id.value; | |
ecd1a12f MD |
1307 | struct lttcomm_consumer_close_trace_chunk_reply reply; |
1308 | char path[LTTNG_PATH_MAX]; | |
0c5b3718 | 1309 | ssize_t ret_send; |
d2956687 JG |
1310 | |
1311 | ret_code = lttng_consumer_close_trace_chunk( | |
cd9adb8b | 1312 | msg.u.close_trace_chunk.relayd_id.is_set ? &relayd_id : nullptr, |
28ab034a JG |
1313 | msg.u.close_trace_chunk.session_id, |
1314 | msg.u.close_trace_chunk.chunk_id, | |
1315 | (time_t) msg.u.close_trace_chunk.close_timestamp, | |
cd9adb8b | 1316 | msg.u.close_trace_chunk.close_command.is_set ? &close_command : nullptr, |
28ab034a | 1317 | path); |
ecd1a12f MD |
1318 | reply.ret_code = ret_code; |
1319 | reply.path_length = strlen(path) + 1; | |
0c5b3718 SM |
1320 | ret_send = lttcomm_send_unix_sock(sock, &reply, sizeof(reply)); |
1321 | if (ret_send != sizeof(reply)) { | |
ecd1a12f MD |
1322 | goto error_fatal; |
1323 | } | |
28ab034a | 1324 | ret_send = lttcomm_send_unix_sock(sock, path, reply.path_length); |
0c5b3718 | 1325 | if (ret_send != reply.path_length) { |
ecd1a12f MD |
1326 | goto error_fatal; |
1327 | } | |
1328 | goto end_nosignal; | |
3654ed19 | 1329 | } |
d2956687 | 1330 | case LTTNG_CONSUMER_TRACE_CHUNK_EXISTS: |
3654ed19 | 1331 | { |
28ab034a | 1332 | const uint64_t relayd_id = msg.u.trace_chunk_exists.relayd_id.value; |
d2956687 JG |
1333 | |
1334 | ret_code = lttng_consumer_trace_chunk_exists( | |
cd9adb8b | 1335 | msg.u.trace_chunk_exists.relayd_id.is_set ? &relayd_id : nullptr, |
28ab034a JG |
1336 | msg.u.trace_chunk_exists.session_id, |
1337 | msg.u.trace_chunk_exists.chunk_id); | |
d2956687 | 1338 | goto end_msg_sessiond; |
a1ae2ea5 | 1339 | } |
04ed9e10 JG |
1340 | case LTTNG_CONSUMER_OPEN_CHANNEL_PACKETS: |
1341 | { | |
1342 | const uint64_t key = msg.u.open_channel_packets.key; | |
28ab034a | 1343 | struct lttng_consumer_channel *channel = consumer_find_channel(key); |
04ed9e10 JG |
1344 | |
1345 | if (channel) { | |
1346 | pthread_mutex_lock(&channel->lock); | |
1347 | ret_code = lttng_consumer_open_channel_packets(channel); | |
1348 | pthread_mutex_unlock(&channel->lock); | |
1349 | } else { | |
1350 | WARN("Channel %" PRIu64 " not found", key); | |
1351 | ret_code = LTTCOMM_CONSUMERD_CHAN_NOT_FOUND; | |
1352 | } | |
1353 | ||
1354 | health_code_update(); | |
1355 | goto end_msg_sessiond; | |
1356 | } | |
3bd1e081 | 1357 | default: |
3f8e211f | 1358 | goto end_nosignal; |
3bd1e081 | 1359 | } |
3f8e211f | 1360 | |
3bd1e081 | 1361 | end_nosignal: |
4cbc1a04 DG |
1362 | /* |
1363 | * Return 1 to indicate success since the 0 value can be a socket | |
1364 | * shutdown during the recv() or send() call. | |
1365 | */ | |
0c5b3718 | 1366 | ret_func = 1; |
c5c7998f JG |
1367 | goto end; |
1368 | error_fatal: | |
1369 | /* This will issue a consumer stop. */ | |
0c5b3718 | 1370 | ret_func = -1; |
c5c7998f | 1371 | goto end; |
d2956687 JG |
1372 | end_msg_sessiond: |
1373 | /* | |
1374 | * The returned value here is not useful since either way we'll return 1 to | |
1375 | * the caller because the session daemon socket management is done | |
1376 | * elsewhere. Returning a negative code or 0 will shutdown the consumer. | |
1377 | */ | |
0c5b3718 SM |
1378 | { |
1379 | int ret_send_status; | |
1380 | ||
1381 | ret_send_status = consumer_send_status_msg(sock, ret_code); | |
1382 | if (ret_send_status < 0) { | |
1383 | goto error_fatal; | |
1384 | } | |
d2956687 | 1385 | } |
0c5b3718 SM |
1386 | |
1387 | ret_func = 1; | |
1388 | ||
c5c7998f | 1389 | end: |
d2956687 | 1390 | health_code_update(); |
1803a064 | 1391 | rcu_read_unlock(); |
0c5b3718 | 1392 | return ret_func; |
3bd1e081 | 1393 | } |
d41f73b7 | 1394 | |
94d49140 JD |
1395 | /* |
1396 | * Sync metadata meaning request them to the session daemon and snapshot to the | |
1397 | * metadata thread can consumer them. | |
1398 | * | |
1399 | * Metadata stream lock MUST be acquired. | |
94d49140 | 1400 | */ |
28ab034a | 1401 | enum sync_metadata_status lttng_kconsumer_sync_metadata(struct lttng_consumer_stream *metadata) |
94d49140 JD |
1402 | { |
1403 | int ret; | |
577eea73 | 1404 | enum sync_metadata_status status; |
94d49140 | 1405 | |
a0377dfe | 1406 | LTTNG_ASSERT(metadata); |
94d49140 JD |
1407 | |
1408 | ret = kernctl_buffer_flush(metadata->wait_fd); | |
1409 | if (ret < 0) { | |
1410 | ERR("Failed to flush kernel stream"); | |
577eea73 | 1411 | status = SYNC_METADATA_STATUS_ERROR; |
94d49140 JD |
1412 | goto end; |
1413 | } | |
1414 | ||
1415 | ret = kernctl_snapshot(metadata->wait_fd); | |
1416 | if (ret < 0) { | |
577eea73 JG |
1417 | if (errno == EAGAIN) { |
1418 | /* No new metadata, exit. */ | |
1419 | DBG("Sync metadata, no new kernel metadata"); | |
1420 | status = SYNC_METADATA_STATUS_NO_DATA; | |
1421 | } else { | |
94d49140 | 1422 | ERR("Sync metadata, taking kernel snapshot failed."); |
577eea73 | 1423 | status = SYNC_METADATA_STATUS_ERROR; |
94d49140 | 1424 | } |
577eea73 JG |
1425 | } else { |
1426 | status = SYNC_METADATA_STATUS_NEW_DATA; | |
94d49140 JD |
1427 | } |
1428 | ||
1429 | end: | |
577eea73 | 1430 | return status; |
94d49140 | 1431 | } |
309167d2 | 1432 | |
28ab034a JG |
1433 | static int extract_common_subbuffer_info(struct lttng_consumer_stream *stream, |
1434 | struct stream_subbuffer *subbuf) | |
fb83fe64 JD |
1435 | { |
1436 | int ret; | |
fb83fe64 | 1437 | |
28ab034a | 1438 | ret = kernctl_get_subbuf_size(stream->wait_fd, &subbuf->info.data.subbuf_size); |
6f9449c2 | 1439 | if (ret) { |
fb83fe64 JD |
1440 | goto end; |
1441 | } | |
fb83fe64 | 1442 | |
28ab034a JG |
1443 | ret = kernctl_get_padded_subbuf_size(stream->wait_fd, |
1444 | &subbuf->info.data.padded_subbuf_size); | |
6f9449c2 | 1445 | if (ret) { |
fb83fe64 JD |
1446 | goto end; |
1447 | } | |
fb83fe64 JD |
1448 | |
1449 | end: | |
1450 | return ret; | |
1451 | } | |
1452 | ||
28ab034a JG |
1453 | static int extract_metadata_subbuffer_info(struct lttng_consumer_stream *stream, |
1454 | struct stream_subbuffer *subbuf) | |
93ec662e JD |
1455 | { |
1456 | int ret; | |
93ec662e | 1457 | |
6f9449c2 JG |
1458 | ret = extract_common_subbuffer_info(stream, subbuf); |
1459 | if (ret) { | |
93ec662e JD |
1460 | goto end; |
1461 | } | |
1462 | ||
28ab034a | 1463 | ret = kernctl_get_metadata_version(stream->wait_fd, &subbuf->info.metadata.version); |
6f9449c2 | 1464 | if (ret) { |
93ec662e JD |
1465 | goto end; |
1466 | } | |
1467 | ||
93ec662e JD |
1468 | end: |
1469 | return ret; | |
1470 | } | |
1471 | ||
28ab034a JG |
1472 | static int extract_data_subbuffer_info(struct lttng_consumer_stream *stream, |
1473 | struct stream_subbuffer *subbuf) | |
d41f73b7 | 1474 | { |
6f9449c2 | 1475 | int ret; |
d41f73b7 | 1476 | |
6f9449c2 JG |
1477 | ret = extract_common_subbuffer_info(stream, subbuf); |
1478 | if (ret) { | |
1479 | goto end; | |
1480 | } | |
309167d2 | 1481 | |
28ab034a | 1482 | ret = kernctl_get_packet_size(stream->wait_fd, &subbuf->info.data.packet_size); |
6f9449c2 JG |
1483 | if (ret < 0) { |
1484 | PERROR("Failed to get sub-buffer packet size"); | |
1485 | goto end; | |
1486 | } | |
02d02e31 | 1487 | |
28ab034a | 1488 | ret = kernctl_get_content_size(stream->wait_fd, &subbuf->info.data.content_size); |
6f9449c2 JG |
1489 | if (ret < 0) { |
1490 | PERROR("Failed to get sub-buffer content size"); | |
1491 | goto end; | |
d41f73b7 MD |
1492 | } |
1493 | ||
28ab034a | 1494 | ret = kernctl_get_timestamp_begin(stream->wait_fd, &subbuf->info.data.timestamp_begin); |
6f9449c2 JG |
1495 | if (ret < 0) { |
1496 | PERROR("Failed to get sub-buffer begin timestamp"); | |
1497 | goto end; | |
1d4dfdef DG |
1498 | } |
1499 | ||
28ab034a | 1500 | ret = kernctl_get_timestamp_end(stream->wait_fd, &subbuf->info.data.timestamp_end); |
6f9449c2 JG |
1501 | if (ret < 0) { |
1502 | PERROR("Failed to get sub-buffer end timestamp"); | |
1503 | goto end; | |
1504 | } | |
1505 | ||
28ab034a | 1506 | ret = kernctl_get_events_discarded(stream->wait_fd, &subbuf->info.data.events_discarded); |
6f9449c2 JG |
1507 | if (ret) { |
1508 | PERROR("Failed to get sub-buffer events discarded count"); | |
1509 | goto end; | |
1510 | } | |
1511 | ||
1512 | ret = kernctl_get_sequence_number(stream->wait_fd, | |
28ab034a | 1513 | &subbuf->info.data.sequence_number.value); |
6f9449c2 JG |
1514 | if (ret) { |
1515 | /* May not be supported by older LTTng-modules. */ | |
1516 | if (ret != -ENOTTY) { | |
1517 | PERROR("Failed to get sub-buffer sequence number"); | |
1518 | goto end; | |
fb83fe64 | 1519 | } |
1c20f0e2 | 1520 | } else { |
6f9449c2 | 1521 | subbuf->info.data.sequence_number.is_set = true; |
309167d2 JD |
1522 | } |
1523 | ||
28ab034a | 1524 | ret = kernctl_get_stream_id(stream->wait_fd, &subbuf->info.data.stream_id); |
6f9449c2 JG |
1525 | if (ret < 0) { |
1526 | PERROR("Failed to get stream id"); | |
1527 | goto end; | |
1528 | } | |
1d4dfdef | 1529 | |
28ab034a | 1530 | ret = kernctl_get_instance_id(stream->wait_fd, &subbuf->info.data.stream_instance_id.value); |
6f9449c2 JG |
1531 | if (ret) { |
1532 | /* May not be supported by older LTTng-modules. */ | |
1533 | if (ret != -ENOTTY) { | |
1534 | PERROR("Failed to get stream instance id"); | |
1535 | goto end; | |
1d4dfdef | 1536 | } |
6f9449c2 JG |
1537 | } else { |
1538 | subbuf->info.data.stream_instance_id.is_set = true; | |
1539 | } | |
1540 | end: | |
1541 | return ret; | |
1542 | } | |
47e81c02 | 1543 | |
28ab034a JG |
1544 | static enum get_next_subbuffer_status get_subbuffer_common(struct lttng_consumer_stream *stream, |
1545 | struct stream_subbuffer *subbuffer) | |
6f9449c2 JG |
1546 | { |
1547 | int ret; | |
b6797c8e | 1548 | enum get_next_subbuffer_status status; |
6f9449c2 JG |
1549 | |
1550 | ret = kernctl_get_next_subbuf(stream->wait_fd); | |
b6797c8e JG |
1551 | switch (ret) { |
1552 | case 0: | |
1553 | status = GET_NEXT_SUBBUFFER_STATUS_OK; | |
1554 | break; | |
1555 | case -ENODATA: | |
28ab034a | 1556 | case -EAGAIN: |
6e5e3c51 MD |
1557 | /* |
1558 | * The caller only expects -ENODATA when there is no data to | |
1559 | * read, but the kernel tracer returns -EAGAIN when there is | |
1560 | * currently no data for a non-finalized stream, and -ENODATA | |
1561 | * when there is no data for a finalized stream. Those can be | |
1562 | * combined into a -ENODATA return value. | |
1563 | */ | |
b6797c8e JG |
1564 | status = GET_NEXT_SUBBUFFER_STATUS_NO_DATA; |
1565 | goto end; | |
1566 | default: | |
1567 | status = GET_NEXT_SUBBUFFER_STATUS_ERROR; | |
6f9449c2 JG |
1568 | goto end; |
1569 | } | |
1570 | ||
28ab034a | 1571 | ret = stream->read_subbuffer_ops.extract_subbuffer_info(stream, subbuffer); |
b6797c8e JG |
1572 | if (ret) { |
1573 | status = GET_NEXT_SUBBUFFER_STATUS_ERROR; | |
1574 | } | |
6f9449c2 | 1575 | end: |
b6797c8e | 1576 | return status; |
6f9449c2 | 1577 | } |
128708c3 | 1578 | |
28ab034a JG |
1579 | static enum get_next_subbuffer_status |
1580 | get_next_subbuffer_splice(struct lttng_consumer_stream *stream, struct stream_subbuffer *subbuffer) | |
6f9449c2 | 1581 | { |
28ab034a | 1582 | const enum get_next_subbuffer_status status = get_subbuffer_common(stream, subbuffer); |
1d4dfdef | 1583 | |
b6797c8e | 1584 | if (status != GET_NEXT_SUBBUFFER_STATUS_OK) { |
6f9449c2 JG |
1585 | goto end; |
1586 | } | |
1d4dfdef | 1587 | |
6f9449c2 JG |
1588 | subbuffer->buffer.fd = stream->wait_fd; |
1589 | end: | |
b6797c8e | 1590 | return status; |
6f9449c2 | 1591 | } |
fd424d99 | 1592 | |
28ab034a JG |
1593 | static enum get_next_subbuffer_status get_next_subbuffer_mmap(struct lttng_consumer_stream *stream, |
1594 | struct stream_subbuffer *subbuffer) | |
6f9449c2 JG |
1595 | { |
1596 | int ret; | |
b6797c8e | 1597 | enum get_next_subbuffer_status status; |
6f9449c2 JG |
1598 | const char *addr; |
1599 | ||
b6797c8e JG |
1600 | status = get_subbuffer_common(stream, subbuffer); |
1601 | if (status != GET_NEXT_SUBBUFFER_STATUS_OK) { | |
6f9449c2 | 1602 | goto end; |
128708c3 | 1603 | } |
6f9449c2 JG |
1604 | |
1605 | ret = get_current_subbuf_addr(stream, &addr); | |
1606 | if (ret) { | |
b6797c8e | 1607 | status = GET_NEXT_SUBBUFFER_STATUS_ERROR; |
6f9449c2 | 1608 | goto end; |
d41f73b7 | 1609 | } |
6f9449c2 | 1610 | |
28ab034a JG |
1611 | subbuffer->buffer.buffer = |
1612 | lttng_buffer_view_init(addr, 0, subbuffer->info.data.padded_subbuf_size); | |
6f9449c2 | 1613 | end: |
b6797c8e | 1614 | return status; |
6f9449c2 JG |
1615 | } |
1616 | ||
28ab034a JG |
1617 | static enum get_next_subbuffer_status |
1618 | get_next_subbuffer_metadata_check(struct lttng_consumer_stream *stream, | |
1619 | struct stream_subbuffer *subbuffer) | |
f5ba75b4 JG |
1620 | { |
1621 | int ret; | |
1622 | const char *addr; | |
1623 | bool coherent; | |
b6797c8e | 1624 | enum get_next_subbuffer_status status; |
f5ba75b4 | 1625 | |
28ab034a | 1626 | ret = kernctl_get_next_subbuf_metadata_check(stream->wait_fd, &coherent); |
f5ba75b4 JG |
1627 | if (ret) { |
1628 | goto end; | |
1629 | } | |
1630 | ||
28ab034a | 1631 | ret = stream->read_subbuffer_ops.extract_subbuffer_info(stream, subbuffer); |
f5ba75b4 JG |
1632 | if (ret) { |
1633 | goto end; | |
1634 | } | |
1635 | ||
1636 | LTTNG_OPTIONAL_SET(&subbuffer->info.metadata.coherent, coherent); | |
1637 | ||
1638 | ret = get_current_subbuf_addr(stream, &addr); | |
1639 | if (ret) { | |
1640 | goto end; | |
1641 | } | |
1642 | ||
28ab034a JG |
1643 | subbuffer->buffer.buffer = |
1644 | lttng_buffer_view_init(addr, 0, subbuffer->info.data.padded_subbuf_size); | |
f5ba75b4 | 1645 | DBG("Got metadata packet with padded_subbuf_size = %lu, coherent = %s", |
28ab034a JG |
1646 | subbuffer->info.metadata.padded_subbuf_size, |
1647 | coherent ? "true" : "false"); | |
f5ba75b4 | 1648 | end: |
6e5e3c51 MD |
1649 | /* |
1650 | * The caller only expects -ENODATA when there is no data to read, but | |
1651 | * the kernel tracer returns -EAGAIN when there is currently no data | |
1652 | * for a non-finalized stream, and -ENODATA when there is no data for a | |
1653 | * finalized stream. Those can be combined into a -ENODATA return value. | |
1654 | */ | |
b6797c8e JG |
1655 | switch (ret) { |
1656 | case 0: | |
1657 | status = GET_NEXT_SUBBUFFER_STATUS_OK; | |
1658 | break; | |
1659 | case -ENODATA: | |
1660 | case -EAGAIN: | |
1661 | /* | |
1662 | * The caller only expects -ENODATA when there is no data to | |
1663 | * read, but the kernel tracer returns -EAGAIN when there is | |
1664 | * currently no data for a non-finalized stream, and -ENODATA | |
1665 | * when there is no data for a finalized stream. Those can be | |
1666 | * combined into a -ENODATA return value. | |
1667 | */ | |
1668 | status = GET_NEXT_SUBBUFFER_STATUS_NO_DATA; | |
1669 | break; | |
1670 | default: | |
1671 | status = GET_NEXT_SUBBUFFER_STATUS_ERROR; | |
1672 | break; | |
6e5e3c51 MD |
1673 | } |
1674 | ||
b6797c8e | 1675 | return status; |
f5ba75b4 JG |
1676 | } |
1677 | ||
28ab034a JG |
1678 | static int put_next_subbuffer(struct lttng_consumer_stream *stream, |
1679 | struct stream_subbuffer *subbuffer __attribute__((unused))) | |
6f9449c2 JG |
1680 | { |
1681 | const int ret = kernctl_put_next_subbuf(stream->wait_fd); | |
1682 | ||
1683 | if (ret) { | |
1684 | if (ret == -EFAULT) { | |
1685 | PERROR("Error in unreserving sub buffer"); | |
1686 | } else if (ret == -EIO) { | |
d41f73b7 | 1687 | /* Should never happen with newer LTTng versions */ |
6f9449c2 | 1688 | PERROR("Reader has been pushed by the writer, last sub-buffer corrupted"); |
d41f73b7 | 1689 | } |
d41f73b7 MD |
1690 | } |
1691 | ||
6f9449c2 JG |
1692 | return ret; |
1693 | } | |
1c20f0e2 | 1694 | |
28ab034a | 1695 | static bool is_get_next_check_metadata_available(int tracer_fd) |
f5ba75b4 | 1696 | { |
cd9adb8b | 1697 | const int ret = kernctl_get_next_subbuf_metadata_check(tracer_fd, nullptr); |
741e787b JG |
1698 | const bool available = ret != -ENOTTY; |
1699 | ||
1700 | if (ret == 0) { | |
1701 | /* get succeeded, make sure to put the subbuffer. */ | |
1702 | kernctl_put_subbuf(tracer_fd); | |
1703 | } | |
1704 | ||
1705 | return available; | |
f5ba75b4 JG |
1706 | } |
1707 | ||
28ab034a JG |
1708 | static int signal_metadata(struct lttng_consumer_stream *stream, |
1709 | struct lttng_consumer_local_data *ctx __attribute__((unused))) | |
091441eb MD |
1710 | { |
1711 | ASSERT_LOCKED(stream->metadata_rdv_lock); | |
1712 | return pthread_cond_broadcast(&stream->metadata_rdv) ? -errno : 0; | |
1713 | } | |
1714 | ||
28ab034a | 1715 | static int lttng_kconsumer_set_stream_ops(struct lttng_consumer_stream *stream) |
6f9449c2 | 1716 | { |
f5ba75b4 JG |
1717 | int ret = 0; |
1718 | ||
1719 | if (stream->metadata_flag && stream->chan->is_live) { | |
1720 | DBG("Attempting to enable metadata bucketization for live consumers"); | |
1721 | if (is_get_next_check_metadata_available(stream->wait_fd)) { | |
1722 | DBG("Kernel tracer supports get_next_subbuffer_metadata_check, metadata will be accumulated until a coherent state is reached"); | |
1723 | stream->read_subbuffer_ops.get_next_subbuffer = | |
28ab034a JG |
1724 | get_next_subbuffer_metadata_check; |
1725 | ret = consumer_stream_enable_metadata_bucketization(stream); | |
f5ba75b4 JG |
1726 | if (ret) { |
1727 | goto end; | |
1728 | } | |
1729 | } else { | |
1730 | /* | |
1731 | * The kernel tracer version is too old to indicate | |
1732 | * when the metadata stream has reached a "coherent" | |
1733 | * (parseable) point. | |
1734 | * | |
1735 | * This means that a live viewer may see an incoherent | |
1736 | * sequence of metadata and fail to parse it. | |
1737 | */ | |
1738 | WARN("Kernel tracer does not support get_next_subbuffer_metadata_check which may cause live clients to fail to parse the metadata stream"); | |
1739 | metadata_bucket_destroy(stream->metadata_bucket); | |
cd9adb8b | 1740 | stream->metadata_bucket = nullptr; |
f5ba75b4 | 1741 | } |
091441eb MD |
1742 | |
1743 | stream->read_subbuffer_ops.on_sleep = signal_metadata; | |
f5ba75b4 JG |
1744 | } |
1745 | ||
1746 | if (!stream->read_subbuffer_ops.get_next_subbuffer) { | |
1747 | if (stream->chan->output == CONSUMER_CHANNEL_MMAP) { | |
28ab034a | 1748 | stream->read_subbuffer_ops.get_next_subbuffer = get_next_subbuffer_mmap; |
f5ba75b4 | 1749 | } else { |
28ab034a | 1750 | stream->read_subbuffer_ops.get_next_subbuffer = get_next_subbuffer_splice; |
f5ba75b4 | 1751 | } |
94d49140 JD |
1752 | } |
1753 | ||
6f9449c2 | 1754 | if (stream->metadata_flag) { |
28ab034a | 1755 | stream->read_subbuffer_ops.extract_subbuffer_info = extract_metadata_subbuffer_info; |
6f9449c2 | 1756 | } else { |
28ab034a | 1757 | stream->read_subbuffer_ops.extract_subbuffer_info = extract_data_subbuffer_info; |
6f9449c2 | 1758 | if (stream->chan->is_live) { |
28ab034a | 1759 | stream->read_subbuffer_ops.send_live_beacon = consumer_flush_kernel_index; |
6f9449c2 | 1760 | } |
309167d2 JD |
1761 | } |
1762 | ||
6f9449c2 | 1763 | stream->read_subbuffer_ops.put_next_subbuffer = put_next_subbuffer; |
f5ba75b4 JG |
1764 | end: |
1765 | return ret; | |
d41f73b7 MD |
1766 | } |
1767 | ||
1768 | int lttng_kconsumer_on_recv_stream(struct lttng_consumer_stream *stream) | |
1769 | { | |
1770 | int ret; | |
ffe60014 | 1771 | |
a0377dfe | 1772 | LTTNG_ASSERT(stream); |
ffe60014 | 1773 | |
2bba9e53 | 1774 | /* |
d2956687 JG |
1775 | * Don't create anything if this is set for streaming or if there is |
1776 | * no current trace chunk on the parent channel. | |
2bba9e53 | 1777 | */ |
d2956687 | 1778 | if (stream->net_seq_idx == (uint64_t) -1ULL && stream->chan->monitor && |
28ab034a | 1779 | stream->chan->trace_chunk) { |
d2956687 JG |
1780 | ret = consumer_stream_create_output_files(stream, true); |
1781 | if (ret) { | |
fe4477ee JD |
1782 | goto error; |
1783 | } | |
ffe60014 | 1784 | } |
d41f73b7 | 1785 | |
d41f73b7 MD |
1786 | if (stream->output == LTTNG_EVENT_MMAP) { |
1787 | /* get the len of the mmap region */ | |
1788 | unsigned long mmap_len; | |
1789 | ||
1790 | ret = kernctl_get_mmap_len(stream->wait_fd, &mmap_len); | |
1791 | if (ret != 0) { | |
ffe60014 | 1792 | PERROR("kernctl_get_mmap_len"); |
d41f73b7 MD |
1793 | goto error_close_fd; |
1794 | } | |
1795 | stream->mmap_len = (size_t) mmap_len; | |
1796 | ||
28ab034a | 1797 | stream->mmap_base = |
cd9adb8b | 1798 | mmap(nullptr, stream->mmap_len, PROT_READ, MAP_PRIVATE, stream->wait_fd, 0); |
d41f73b7 | 1799 | if (stream->mmap_base == MAP_FAILED) { |
ffe60014 | 1800 | PERROR("Error mmaping"); |
d41f73b7 MD |
1801 | ret = -1; |
1802 | goto error_close_fd; | |
1803 | } | |
1804 | } | |
1805 | ||
f5ba75b4 JG |
1806 | ret = lttng_kconsumer_set_stream_ops(stream); |
1807 | if (ret) { | |
1808 | goto error_close_fd; | |
1809 | } | |
6f9449c2 | 1810 | |
d41f73b7 MD |
1811 | /* we return 0 to let the library handle the FD internally */ |
1812 | return 0; | |
1813 | ||
1814 | error_close_fd: | |
2f225ce2 | 1815 | if (stream->out_fd >= 0) { |
d41f73b7 MD |
1816 | int err; |
1817 | ||
1818 | err = close(stream->out_fd); | |
a0377dfe | 1819 | LTTNG_ASSERT(!err); |
2f225ce2 | 1820 | stream->out_fd = -1; |
d41f73b7 MD |
1821 | } |
1822 | error: | |
1823 | return ret; | |
1824 | } | |
1825 | ||
ca22feea DG |
1826 | /* |
1827 | * Check if data is still being extracted from the buffers for a specific | |
4e9a4686 DG |
1828 | * stream. Consumer data lock MUST be acquired before calling this function |
1829 | * and the stream lock. | |
ca22feea | 1830 | * |
6d805429 | 1831 | * Return 1 if the traced data are still getting read else 0 meaning that the |
ca22feea DG |
1832 | * data is available for trace viewer reading. |
1833 | */ | |
6d805429 | 1834 | int lttng_kconsumer_data_pending(struct lttng_consumer_stream *stream) |
ca22feea DG |
1835 | { |
1836 | int ret; | |
1837 | ||
a0377dfe | 1838 | LTTNG_ASSERT(stream); |
ca22feea | 1839 | |
873b9e9a MD |
1840 | if (stream->endpoint_status != CONSUMER_ENDPOINT_ACTIVE) { |
1841 | ret = 0; | |
1842 | goto end; | |
1843 | } | |
1844 | ||
ca22feea DG |
1845 | ret = kernctl_get_next_subbuf(stream->wait_fd); |
1846 | if (ret == 0) { | |
1847 | /* There is still data so let's put back this subbuffer. */ | |
1848 | ret = kernctl_put_subbuf(stream->wait_fd); | |
a0377dfe | 1849 | LTTNG_ASSERT(ret == 0); |
28ab034a | 1850 | ret = 1; /* Data is pending */ |
4e9a4686 | 1851 | goto end; |
ca22feea DG |
1852 | } |
1853 | ||
6d805429 DG |
1854 | /* Data is NOT pending and ready to be read. */ |
1855 | ret = 0; | |
ca22feea | 1856 | |
6efae65e DG |
1857 | end: |
1858 | return ret; | |
ca22feea | 1859 | } |