Commit | Line | Data |
---|---|---|
3bd1e081 | 1 | /* |
21cf9b6b | 2 | * Copyright (C) 2011 EfficiOS Inc. |
ab5be9fa MJ |
3 | * Copyright (C) 2011 Mathieu Desnoyers <mathieu.desnoyers@efficios.com> |
4 | * Copyright (C) 2017 Jérémie Galarneau <jeremie.galarneau@efficios.com> | |
3bd1e081 | 5 | * |
ab5be9fa | 6 | * SPDX-License-Identifier: GPL-2.0-only |
3bd1e081 | 7 | * |
3bd1e081 MD |
8 | */ |
9 | ||
6c1c0768 | 10 | #define _LGPL_SOURCE |
3bd1e081 MD |
11 | #include <poll.h> |
12 | #include <pthread.h> | |
13 | #include <stdlib.h> | |
14 | #include <string.h> | |
15 | #include <sys/mman.h> | |
16 | #include <sys/socket.h> | |
17 | #include <sys/types.h> | |
77c7c900 | 18 | #include <inttypes.h> |
3bd1e081 | 19 | #include <unistd.h> |
dbb5dfe6 | 20 | #include <sys/stat.h> |
f5ba75b4 | 21 | #include <stdint.h> |
3bd1e081 | 22 | |
51a9e1c7 | 23 | #include <bin/lttng-consumerd/health-consumerd.h> |
990570ed | 24 | #include <common/common.h> |
10a8a223 | 25 | #include <common/kernel-ctl/kernel-ctl.h> |
10a8a223 | 26 | #include <common/sessiond-comm/sessiond-comm.h> |
00e2e675 | 27 | #include <common/sessiond-comm/relayd.h> |
dbb5dfe6 | 28 | #include <common/compat/fcntl.h> |
f263b7fd | 29 | #include <common/compat/endian.h> |
acdb9057 | 30 | #include <common/pipe.h> |
00e2e675 | 31 | #include <common/relayd/relayd.h> |
fe4477ee | 32 | #include <common/utils.h> |
c8fea79c | 33 | #include <common/consumer/consumer-stream.h> |
309167d2 | 34 | #include <common/index/index.h> |
c8fea79c | 35 | #include <common/consumer/consumer-timer.h> |
d2956687 | 36 | #include <common/optional.h> |
6f9449c2 JG |
37 | #include <common/buffer-view.h> |
38 | #include <common/consumer/consumer.h> | |
f5ba75b4 | 39 | #include <common/consumer/metadata-bucket.h> |
0857097f | 40 | |
10a8a223 | 41 | #include "kernel-consumer.h" |
3bd1e081 | 42 | |
fa29bfbf | 43 | extern struct lttng_consumer_global_data the_consumer_data; |
3bd1e081 | 44 | extern int consumer_poll_timeout; |
3bd1e081 | 45 | |
3bd1e081 MD |
46 | /* |
47 | * Take a snapshot for a specific fd | |
48 | * | |
49 | * Returns 0 on success, < 0 on error | |
50 | */ | |
ffe60014 | 51 | int lttng_kconsumer_take_snapshot(struct lttng_consumer_stream *stream) |
3bd1e081 MD |
52 | { |
53 | int ret = 0; | |
54 | int infd = stream->wait_fd; | |
55 | ||
56 | ret = kernctl_snapshot(infd); | |
d2d2f190 JD |
57 | /* |
58 | * -EAGAIN is not an error, it just means that there is no data to | |
59 | * be read. | |
60 | */ | |
61 | if (ret != 0 && ret != -EAGAIN) { | |
5a510c9f | 62 | PERROR("Getting sub-buffer snapshot."); |
3bd1e081 MD |
63 | } |
64 | ||
65 | return ret; | |
66 | } | |
67 | ||
e9404c27 JG |
68 | /* |
69 | * Sample consumed and produced positions for a specific fd. | |
70 | * | |
71 | * Returns 0 on success, < 0 on error. | |
72 | */ | |
73 | int lttng_kconsumer_sample_snapshot_positions( | |
74 | struct lttng_consumer_stream *stream) | |
75 | { | |
a0377dfe | 76 | LTTNG_ASSERT(stream); |
e9404c27 JG |
77 | |
78 | return kernctl_snapshot_sample_positions(stream->wait_fd); | |
79 | } | |
80 | ||
3bd1e081 MD |
81 | /* |
82 | * Get the produced position | |
83 | * | |
84 | * Returns 0 on success, < 0 on error | |
85 | */ | |
ffe60014 | 86 | int lttng_kconsumer_get_produced_snapshot(struct lttng_consumer_stream *stream, |
3bd1e081 MD |
87 | unsigned long *pos) |
88 | { | |
89 | int ret; | |
90 | int infd = stream->wait_fd; | |
91 | ||
92 | ret = kernctl_snapshot_get_produced(infd, pos); | |
93 | if (ret != 0) { | |
5a510c9f | 94 | PERROR("kernctl_snapshot_get_produced"); |
3bd1e081 MD |
95 | } |
96 | ||
97 | return ret; | |
98 | } | |
99 | ||
07b86b52 JD |
100 | /* |
101 | * Get the consumerd position | |
102 | * | |
103 | * Returns 0 on success, < 0 on error | |
104 | */ | |
105 | int lttng_kconsumer_get_consumed_snapshot(struct lttng_consumer_stream *stream, | |
106 | unsigned long *pos) | |
107 | { | |
108 | int ret; | |
109 | int infd = stream->wait_fd; | |
110 | ||
111 | ret = kernctl_snapshot_get_consumed(infd, pos); | |
112 | if (ret != 0) { | |
5a510c9f | 113 | PERROR("kernctl_snapshot_get_consumed"); |
07b86b52 JD |
114 | } |
115 | ||
116 | return ret; | |
117 | } | |
118 | ||
128708c3 JG |
119 | static |
120 | int get_current_subbuf_addr(struct lttng_consumer_stream *stream, | |
121 | const char **addr) | |
122 | { | |
123 | int ret; | |
124 | unsigned long mmap_offset; | |
97535efa | 125 | const char *mmap_base = (const char *) stream->mmap_base; |
128708c3 JG |
126 | |
127 | ret = kernctl_get_mmap_read_offset(stream->wait_fd, &mmap_offset); | |
128 | if (ret < 0) { | |
129 | PERROR("Failed to get mmap read offset"); | |
130 | goto error; | |
131 | } | |
132 | ||
133 | *addr = mmap_base + mmap_offset; | |
134 | error: | |
135 | return ret; | |
136 | } | |
137 | ||
07b86b52 JD |
138 | /* |
139 | * Take a snapshot of all the stream of a channel | |
3eb928aa | 140 | * RCU read-side lock must be held across this function to ensure existence of |
947bd097 | 141 | * channel. |
07b86b52 JD |
142 | * |
143 | * Returns 0 on success, < 0 on error | |
144 | */ | |
f72bb42f JG |
145 | static int lttng_kconsumer_snapshot_channel( |
146 | struct lttng_consumer_channel *channel, | |
147 | uint64_t key, char *path, uint64_t relayd_id, | |
148 | uint64_t nb_packets_per_stream, | |
5c786ded | 149 | struct lttng_consumer_local_data *ctx) |
07b86b52 JD |
150 | { |
151 | int ret; | |
07b86b52 JD |
152 | struct lttng_consumer_stream *stream; |
153 | ||
6a00837f | 154 | DBG("Kernel consumer snapshot channel %" PRIu64, key); |
07b86b52 | 155 | |
947bd097 JR |
156 | /* Prevent channel modifications while we perform the snapshot.*/ |
157 | pthread_mutex_lock(&channel->lock); | |
158 | ||
07b86b52 JD |
159 | rcu_read_lock(); |
160 | ||
07b86b52 JD |
161 | /* Splice is not supported yet for channel snapshot. */ |
162 | if (channel->output != CONSUMER_CHANNEL_MMAP) { | |
9381314c JG |
163 | ERR("Unsupported output type for channel \"%s\": mmap output is required to record a snapshot", |
164 | channel->name); | |
07b86b52 JD |
165 | ret = -1; |
166 | goto end; | |
167 | } | |
168 | ||
10a50311 | 169 | cds_list_for_each_entry(stream, &channel->streams.head, send_node) { |
923333cd | 170 | unsigned long consumed_pos, produced_pos; |
9ce5646a MD |
171 | |
172 | health_code_update(); | |
173 | ||
07b86b52 JD |
174 | /* |
175 | * Lock stream because we are about to change its state. | |
176 | */ | |
177 | pthread_mutex_lock(&stream->lock); | |
178 | ||
a0377dfe | 179 | LTTNG_ASSERT(channel->trace_chunk); |
d2956687 JG |
180 | if (!lttng_trace_chunk_get(channel->trace_chunk)) { |
181 | /* | |
182 | * Can't happen barring an internal error as the channel | |
183 | * holds a reference to the trace chunk. | |
184 | */ | |
185 | ERR("Failed to acquire reference to channel's trace chunk"); | |
186 | ret = -1; | |
187 | goto end_unlock; | |
188 | } | |
a0377dfe | 189 | LTTNG_ASSERT(!stream->trace_chunk); |
d2956687 JG |
190 | stream->trace_chunk = channel->trace_chunk; |
191 | ||
29decac3 DG |
192 | /* |
193 | * Assign the received relayd ID so we can use it for streaming. The streams | |
194 | * are not visible to anyone so this is OK to change it. | |
195 | */ | |
07b86b52 JD |
196 | stream->net_seq_idx = relayd_id; |
197 | channel->relayd_id = relayd_id; | |
198 | if (relayd_id != (uint64_t) -1ULL) { | |
10a50311 | 199 | ret = consumer_send_relayd_stream(stream, path); |
07b86b52 JD |
200 | if (ret < 0) { |
201 | ERR("sending stream to relayd"); | |
202 | goto end_unlock; | |
203 | } | |
07b86b52 | 204 | } else { |
d2956687 JG |
205 | ret = consumer_stream_create_output_files(stream, |
206 | false); | |
07b86b52 | 207 | if (ret < 0) { |
07b86b52 JD |
208 | goto end_unlock; |
209 | } | |
d2956687 JG |
210 | DBG("Kernel consumer snapshot stream (%" PRIu64 ")", |
211 | stream->key); | |
07b86b52 JD |
212 | } |
213 | ||
f22dd891 | 214 | ret = kernctl_buffer_flush_empty(stream->wait_fd); |
07b86b52 | 215 | if (ret < 0) { |
f22dd891 MD |
216 | /* |
217 | * Doing a buffer flush which does not take into | |
218 | * account empty packets. This is not perfect | |
219 | * for stream intersection, but required as a | |
220 | * fall-back when "flush_empty" is not | |
221 | * implemented by lttng-modules. | |
222 | */ | |
223 | ret = kernctl_buffer_flush(stream->wait_fd); | |
224 | if (ret < 0) { | |
225 | ERR("Failed to flush kernel stream"); | |
226 | goto end_unlock; | |
227 | } | |
07b86b52 JD |
228 | goto end_unlock; |
229 | } | |
230 | ||
231 | ret = lttng_kconsumer_take_snapshot(stream); | |
232 | if (ret < 0) { | |
233 | ERR("Taking kernel snapshot"); | |
234 | goto end_unlock; | |
235 | } | |
236 | ||
237 | ret = lttng_kconsumer_get_produced_snapshot(stream, &produced_pos); | |
238 | if (ret < 0) { | |
239 | ERR("Produced kernel snapshot position"); | |
240 | goto end_unlock; | |
241 | } | |
242 | ||
243 | ret = lttng_kconsumer_get_consumed_snapshot(stream, &consumed_pos); | |
244 | if (ret < 0) { | |
245 | ERR("Consumerd kernel snapshot position"); | |
246 | goto end_unlock; | |
247 | } | |
248 | ||
d07ceecd MD |
249 | consumed_pos = consumer_get_consume_start_pos(consumed_pos, |
250 | produced_pos, nb_packets_per_stream, | |
251 | stream->max_sb_size); | |
5c786ded | 252 | |
9377d830 | 253 | while ((long) (consumed_pos - produced_pos) < 0) { |
07b86b52 JD |
254 | ssize_t read_len; |
255 | unsigned long len, padded_len; | |
128708c3 | 256 | const char *subbuf_addr; |
fd424d99 | 257 | struct lttng_buffer_view subbuf_view; |
07b86b52 | 258 | |
9ce5646a | 259 | health_code_update(); |
07b86b52 JD |
260 | DBG("Kernel consumer taking snapshot at pos %lu", consumed_pos); |
261 | ||
262 | ret = kernctl_get_subbuf(stream->wait_fd, &consumed_pos); | |
263 | if (ret < 0) { | |
32af2c95 | 264 | if (ret != -EAGAIN) { |
07b86b52 JD |
265 | PERROR("kernctl_get_subbuf snapshot"); |
266 | goto end_unlock; | |
267 | } | |
268 | DBG("Kernel consumer get subbuf failed. Skipping it."); | |
269 | consumed_pos += stream->max_sb_size; | |
ddc93ee4 | 270 | stream->chan->lost_packets++; |
07b86b52 JD |
271 | continue; |
272 | } | |
273 | ||
274 | ret = kernctl_get_subbuf_size(stream->wait_fd, &len); | |
275 | if (ret < 0) { | |
276 | ERR("Snapshot kernctl_get_subbuf_size"); | |
29decac3 | 277 | goto error_put_subbuf; |
07b86b52 JD |
278 | } |
279 | ||
280 | ret = kernctl_get_padded_subbuf_size(stream->wait_fd, &padded_len); | |
281 | if (ret < 0) { | |
282 | ERR("Snapshot kernctl_get_padded_subbuf_size"); | |
29decac3 | 283 | goto error_put_subbuf; |
07b86b52 JD |
284 | } |
285 | ||
128708c3 JG |
286 | ret = get_current_subbuf_addr(stream, &subbuf_addr); |
287 | if (ret) { | |
288 | goto error_put_subbuf; | |
289 | } | |
290 | ||
fd424d99 JG |
291 | subbuf_view = lttng_buffer_view_init( |
292 | subbuf_addr, 0, padded_len); | |
f5ba75b4 | 293 | read_len = lttng_consumer_on_read_subbuffer_mmap( |
fd424d99 | 294 | stream, &subbuf_view, |
6f9449c2 | 295 | padded_len - len); |
07b86b52 | 296 | /* |
29decac3 DG |
297 | * We write the padded len in local tracefiles but the data len |
298 | * when using a relay. Display the error but continue processing | |
299 | * to try to release the subbuffer. | |
07b86b52 JD |
300 | */ |
301 | if (relayd_id != (uint64_t) -1ULL) { | |
302 | if (read_len != len) { | |
303 | ERR("Error sending to the relay (ret: %zd != len: %lu)", | |
304 | read_len, len); | |
305 | } | |
306 | } else { | |
307 | if (read_len != padded_len) { | |
308 | ERR("Error writing to tracefile (ret: %zd != len: %lu)", | |
309 | read_len, padded_len); | |
310 | } | |
311 | } | |
312 | ||
313 | ret = kernctl_put_subbuf(stream->wait_fd); | |
314 | if (ret < 0) { | |
315 | ERR("Snapshot kernctl_put_subbuf"); | |
316 | goto end_unlock; | |
317 | } | |
318 | consumed_pos += stream->max_sb_size; | |
319 | } | |
320 | ||
321 | if (relayd_id == (uint64_t) -1ULL) { | |
fdf9986c MD |
322 | if (stream->out_fd >= 0) { |
323 | ret = close(stream->out_fd); | |
324 | if (ret < 0) { | |
325 | PERROR("Kernel consumer snapshot close out_fd"); | |
326 | goto end_unlock; | |
327 | } | |
328 | stream->out_fd = -1; | |
07b86b52 | 329 | } |
07b86b52 JD |
330 | } else { |
331 | close_relayd_stream(stream); | |
332 | stream->net_seq_idx = (uint64_t) -1ULL; | |
333 | } | |
d2956687 JG |
334 | lttng_trace_chunk_put(stream->trace_chunk); |
335 | stream->trace_chunk = NULL; | |
07b86b52 JD |
336 | pthread_mutex_unlock(&stream->lock); |
337 | } | |
338 | ||
339 | /* All good! */ | |
340 | ret = 0; | |
341 | goto end; | |
342 | ||
29decac3 DG |
343 | error_put_subbuf: |
344 | ret = kernctl_put_subbuf(stream->wait_fd); | |
345 | if (ret < 0) { | |
346 | ERR("Snapshot kernctl_put_subbuf error path"); | |
347 | } | |
07b86b52 JD |
348 | end_unlock: |
349 | pthread_mutex_unlock(&stream->lock); | |
350 | end: | |
351 | rcu_read_unlock(); | |
947bd097 | 352 | pthread_mutex_unlock(&channel->lock); |
07b86b52 JD |
353 | return ret; |
354 | } | |
355 | ||
356 | /* | |
357 | * Read the whole metadata available for a snapshot. | |
3eb928aa | 358 | * RCU read-side lock must be held across this function to ensure existence of |
947bd097 | 359 | * metadata_channel. |
07b86b52 JD |
360 | * |
361 | * Returns 0 on success, < 0 on error | |
362 | */ | |
d2956687 JG |
363 | static int lttng_kconsumer_snapshot_metadata( |
364 | struct lttng_consumer_channel *metadata_channel, | |
3eb928aa MD |
365 | uint64_t key, char *path, uint64_t relayd_id, |
366 | struct lttng_consumer_local_data *ctx) | |
07b86b52 | 367 | { |
d771f832 DG |
368 | int ret, use_relayd = 0; |
369 | ssize_t ret_read; | |
07b86b52 | 370 | struct lttng_consumer_stream *metadata_stream; |
d771f832 | 371 | |
a0377dfe | 372 | LTTNG_ASSERT(ctx); |
07b86b52 JD |
373 | |
374 | DBG("Kernel consumer snapshot metadata with key %" PRIu64 " at path %s", | |
375 | key, path); | |
376 | ||
377 | rcu_read_lock(); | |
378 | ||
07b86b52 | 379 | metadata_stream = metadata_channel->metadata_stream; |
a0377dfe | 380 | LTTNG_ASSERT(metadata_stream); |
d2956687 | 381 | |
947bd097 | 382 | metadata_stream->read_subbuffer_ops.lock(metadata_stream); |
a0377dfe FD |
383 | LTTNG_ASSERT(metadata_channel->trace_chunk); |
384 | LTTNG_ASSERT(metadata_stream->trace_chunk); | |
07b86b52 | 385 | |
d771f832 | 386 | /* Flag once that we have a valid relayd for the stream. */ |
e2039c7a | 387 | if (relayd_id != (uint64_t) -1ULL) { |
d771f832 DG |
388 | use_relayd = 1; |
389 | } | |
390 | ||
391 | if (use_relayd) { | |
10a50311 | 392 | ret = consumer_send_relayd_stream(metadata_stream, path); |
e2039c7a | 393 | if (ret < 0) { |
fa27abe8 | 394 | goto error_snapshot; |
e2039c7a | 395 | } |
e2039c7a | 396 | } else { |
d2956687 JG |
397 | ret = consumer_stream_create_output_files(metadata_stream, |
398 | false); | |
e2039c7a | 399 | if (ret < 0) { |
fa27abe8 | 400 | goto error_snapshot; |
e2039c7a | 401 | } |
07b86b52 | 402 | } |
07b86b52 | 403 | |
d771f832 | 404 | do { |
9ce5646a MD |
405 | health_code_update(); |
406 | ||
6f9449c2 | 407 | ret_read = lttng_consumer_read_subbuffer(metadata_stream, ctx, true); |
d771f832 | 408 | if (ret_read < 0) { |
6e5e3c51 MD |
409 | ERR("Kernel snapshot reading metadata subbuffer (ret: %zd)", |
410 | ret_read); | |
411 | ret = ret_read; | |
412 | goto error_snapshot; | |
07b86b52 | 413 | } |
6e5e3c51 | 414 | } while (ret_read > 0); |
07b86b52 | 415 | |
d771f832 DG |
416 | if (use_relayd) { |
417 | close_relayd_stream(metadata_stream); | |
418 | metadata_stream->net_seq_idx = (uint64_t) -1ULL; | |
419 | } else { | |
fdf9986c MD |
420 | if (metadata_stream->out_fd >= 0) { |
421 | ret = close(metadata_stream->out_fd); | |
422 | if (ret < 0) { | |
423 | PERROR("Kernel consumer snapshot metadata close out_fd"); | |
424 | /* | |
425 | * Don't go on error here since the snapshot was successful at this | |
426 | * point but somehow the close failed. | |
427 | */ | |
428 | } | |
429 | metadata_stream->out_fd = -1; | |
d2956687 JG |
430 | lttng_trace_chunk_put(metadata_stream->trace_chunk); |
431 | metadata_stream->trace_chunk = NULL; | |
e2039c7a | 432 | } |
e2039c7a JD |
433 | } |
434 | ||
07b86b52 | 435 | ret = 0; |
fa27abe8 | 436 | error_snapshot: |
947bd097 | 437 | metadata_stream->read_subbuffer_ops.unlock(metadata_stream); |
cf53a8a6 JD |
438 | cds_list_del(&metadata_stream->send_node); |
439 | consumer_stream_destroy(metadata_stream, NULL); | |
440 | metadata_channel->metadata_stream = NULL; | |
07b86b52 JD |
441 | rcu_read_unlock(); |
442 | return ret; | |
443 | } | |
444 | ||
1803a064 MD |
445 | /* |
446 | * Receive command from session daemon and process it. | |
447 | * | |
448 | * Return 1 on success else a negative value or 0. | |
449 | */ | |
3bd1e081 MD |
450 | int lttng_kconsumer_recv_cmd(struct lttng_consumer_local_data *ctx, |
451 | int sock, struct pollfd *consumer_sockpoll) | |
452 | { | |
0c5b3718 | 453 | int ret_func; |
0c759fc9 | 454 | enum lttcomm_return_code ret_code = LTTCOMM_CONSUMERD_SUCCESS; |
3bd1e081 MD |
455 | struct lttcomm_consumer_msg msg; |
456 | ||
9ce5646a MD |
457 | health_code_update(); |
458 | ||
0c5b3718 SM |
459 | { |
460 | ssize_t ret_recv; | |
461 | ||
462 | ret_recv = lttcomm_recv_unix_sock(sock, &msg, sizeof(msg)); | |
463 | if (ret_recv != sizeof(msg)) { | |
464 | if (ret_recv > 0) { | |
465 | lttng_consumer_send_error(ctx, | |
466 | LTTCOMM_CONSUMERD_ERROR_RECV_CMD); | |
467 | ret_recv = -1; | |
468 | } | |
469 | return ret_recv; | |
1803a064 | 470 | } |
3bd1e081 | 471 | } |
9ce5646a MD |
472 | |
473 | health_code_update(); | |
474 | ||
84382d49 | 475 | /* Deprecated command */ |
a0377dfe | 476 | LTTNG_ASSERT(msg.cmd_type != LTTNG_CONSUMER_STOP); |
3bd1e081 | 477 | |
9ce5646a MD |
478 | health_code_update(); |
479 | ||
b0b335c8 MD |
480 | /* relayd needs RCU read-side protection */ |
481 | rcu_read_lock(); | |
482 | ||
3bd1e081 | 483 | switch (msg.cmd_type) { |
00e2e675 DG |
484 | case LTTNG_CONSUMER_ADD_RELAYD_SOCKET: |
485 | { | |
f50f23d9 | 486 | /* Session daemon status message are handled in the following call. */ |
2527bf85 | 487 | consumer_add_relayd_socket(msg.u.relayd_sock.net_index, |
7735ef9e | 488 | msg.u.relayd_sock.type, ctx, sock, consumer_sockpoll, |
d3e2ba59 | 489 | &msg.u.relayd_sock.sock, msg.u.relayd_sock.session_id, |
2527bf85 | 490 | msg.u.relayd_sock.relayd_session_id); |
00e2e675 DG |
491 | goto end_nosignal; |
492 | } | |
3bd1e081 MD |
493 | case LTTNG_CONSUMER_ADD_CHANNEL: |
494 | { | |
495 | struct lttng_consumer_channel *new_channel; | |
afbf29db | 496 | int ret_send_status, ret_add_channel = 0; |
d2956687 | 497 | const uint64_t chunk_id = msg.u.channel.chunk_id.value; |
3bd1e081 | 498 | |
9ce5646a MD |
499 | health_code_update(); |
500 | ||
f50f23d9 | 501 | /* First send a status message before receiving the fds. */ |
0c5b3718 SM |
502 | ret_send_status = consumer_send_status_msg(sock, ret_code); |
503 | if (ret_send_status < 0) { | |
f50f23d9 | 504 | /* Somehow, the session daemon is not responding anymore. */ |
1803a064 | 505 | goto error_fatal; |
f50f23d9 | 506 | } |
9ce5646a MD |
507 | |
508 | health_code_update(); | |
509 | ||
d88aee68 | 510 | DBG("consumer_add_channel %" PRIu64, msg.u.channel.channel_key); |
3bd1e081 | 511 | new_channel = consumer_allocate_channel(msg.u.channel.channel_key, |
d2956687 JG |
512 | msg.u.channel.session_id, |
513 | msg.u.channel.chunk_id.is_set ? | |
514 | &chunk_id : NULL, | |
515 | msg.u.channel.pathname, | |
516 | msg.u.channel.name, | |
1624d5b7 JD |
517 | msg.u.channel.relayd_id, msg.u.channel.output, |
518 | msg.u.channel.tracefile_size, | |
1950109e | 519 | msg.u.channel.tracefile_count, 0, |
ecc48a90 | 520 | msg.u.channel.monitor, |
d7ba1388 | 521 | msg.u.channel.live_timer_interval, |
a2814ea7 | 522 | msg.u.channel.is_live, |
3d071855 | 523 | NULL, NULL); |
3bd1e081 | 524 | if (new_channel == NULL) { |
f73fabfd | 525 | lttng_consumer_send_error(ctx, LTTCOMM_CONSUMERD_OUTFD_ERROR); |
3bd1e081 MD |
526 | goto end_nosignal; |
527 | } | |
ffe60014 | 528 | new_channel->nb_init_stream_left = msg.u.channel.nb_init_streams; |
95a1109b JD |
529 | switch (msg.u.channel.output) { |
530 | case LTTNG_EVENT_SPLICE: | |
531 | new_channel->output = CONSUMER_CHANNEL_SPLICE; | |
532 | break; | |
533 | case LTTNG_EVENT_MMAP: | |
534 | new_channel->output = CONSUMER_CHANNEL_MMAP; | |
535 | break; | |
536 | default: | |
537 | ERR("Channel output unknown %d", msg.u.channel.output); | |
538 | goto end_nosignal; | |
539 | } | |
ffe60014 DG |
540 | |
541 | /* Translate and save channel type. */ | |
542 | switch (msg.u.channel.type) { | |
543 | case CONSUMER_CHANNEL_TYPE_DATA: | |
544 | case CONSUMER_CHANNEL_TYPE_METADATA: | |
97535efa | 545 | new_channel->type = (consumer_channel_type) msg.u.channel.type; |
ffe60014 DG |
546 | break; |
547 | default: | |
a0377dfe | 548 | abort(); |
ffe60014 DG |
549 | goto end_nosignal; |
550 | }; | |
551 | ||
9ce5646a MD |
552 | health_code_update(); |
553 | ||
3bd1e081 | 554 | if (ctx->on_recv_channel != NULL) { |
0c5b3718 SM |
555 | int ret_recv_channel = |
556 | ctx->on_recv_channel(new_channel); | |
557 | if (ret_recv_channel == 0) { | |
558 | ret_add_channel = consumer_add_channel( | |
559 | new_channel, ctx); | |
560 | } else if (ret_recv_channel < 0) { | |
3bd1e081 MD |
561 | goto end_nosignal; |
562 | } | |
563 | } else { | |
0c5b3718 SM |
564 | ret_add_channel = |
565 | consumer_add_channel(new_channel, ctx); | |
3bd1e081 | 566 | } |
0c5b3718 SM |
567 | if (msg.u.channel.type == CONSUMER_CHANNEL_TYPE_DATA && |
568 | !ret_add_channel) { | |
e9404c27 JG |
569 | int monitor_start_ret; |
570 | ||
571 | DBG("Consumer starting monitor timer"); | |
94d49140 JD |
572 | consumer_timer_live_start(new_channel, |
573 | msg.u.channel.live_timer_interval); | |
e9404c27 JG |
574 | monitor_start_ret = consumer_timer_monitor_start( |
575 | new_channel, | |
576 | msg.u.channel.monitor_timer_interval); | |
577 | if (monitor_start_ret < 0) { | |
578 | ERR("Starting channel monitoring timer failed"); | |
579 | goto end_nosignal; | |
580 | } | |
94d49140 | 581 | } |
e43c41c5 | 582 | |
9ce5646a MD |
583 | health_code_update(); |
584 | ||
e43c41c5 | 585 | /* If we received an error in add_channel, we need to report it. */ |
0c5b3718 SM |
586 | if (ret_add_channel < 0) { |
587 | ret_send_status = consumer_send_status_msg( | |
588 | sock, ret_add_channel); | |
589 | if (ret_send_status < 0) { | |
1803a064 MD |
590 | goto error_fatal; |
591 | } | |
e43c41c5 JD |
592 | goto end_nosignal; |
593 | } | |
594 | ||
3bd1e081 MD |
595 | goto end_nosignal; |
596 | } | |
597 | case LTTNG_CONSUMER_ADD_STREAM: | |
598 | { | |
dae10966 DG |
599 | int fd; |
600 | struct lttng_pipe *stream_pipe; | |
00e2e675 | 601 | struct lttng_consumer_stream *new_stream; |
ffe60014 | 602 | struct lttng_consumer_channel *channel; |
c80048c6 | 603 | int alloc_ret = 0; |
0c5b3718 SM |
604 | int ret_send_status, ret_poll, ret_get_max_subbuf_size; |
605 | ssize_t ret_pipe_write, ret_recv; | |
3bd1e081 | 606 | |
ffe60014 DG |
607 | /* |
608 | * Get stream's channel reference. Needed when adding the stream to the | |
609 | * global hash table. | |
610 | */ | |
611 | channel = consumer_find_channel(msg.u.stream.channel_key); | |
612 | if (!channel) { | |
613 | /* | |
614 | * We could not find the channel. Can happen if cpu hotplug | |
615 | * happens while tearing down. | |
616 | */ | |
d88aee68 | 617 | ERR("Unable to find channel key %" PRIu64, msg.u.stream.channel_key); |
e462382a | 618 | ret_code = LTTCOMM_CONSUMERD_CHAN_NOT_FOUND; |
ffe60014 DG |
619 | } |
620 | ||
9ce5646a MD |
621 | health_code_update(); |
622 | ||
f50f23d9 | 623 | /* First send a status message before receiving the fds. */ |
0c5b3718 SM |
624 | ret_send_status = consumer_send_status_msg(sock, ret_code); |
625 | if (ret_send_status < 0) { | |
d771f832 | 626 | /* Somehow, the session daemon is not responding anymore. */ |
c5c7998f | 627 | goto error_add_stream_fatal; |
1803a064 | 628 | } |
9ce5646a MD |
629 | |
630 | health_code_update(); | |
631 | ||
0c759fc9 | 632 | if (ret_code != LTTCOMM_CONSUMERD_SUCCESS) { |
d771f832 | 633 | /* Channel was not found. */ |
c5c7998f | 634 | goto error_add_stream_nosignal; |
f50f23d9 DG |
635 | } |
636 | ||
d771f832 | 637 | /* Blocking call */ |
9ce5646a | 638 | health_poll_entry(); |
0c5b3718 | 639 | ret_poll = lttng_consumer_poll_socket(consumer_sockpoll); |
9ce5646a | 640 | health_poll_exit(); |
0c5b3718 | 641 | if (ret_poll) { |
c5c7998f | 642 | goto error_add_stream_fatal; |
3bd1e081 | 643 | } |
00e2e675 | 644 | |
9ce5646a MD |
645 | health_code_update(); |
646 | ||
00e2e675 | 647 | /* Get stream file descriptor from socket */ |
0c5b3718 SM |
648 | ret_recv = lttcomm_recv_fds_unix_sock(sock, &fd, 1); |
649 | if (ret_recv != sizeof(fd)) { | |
f73fabfd | 650 | lttng_consumer_send_error(ctx, LTTCOMM_CONSUMERD_ERROR_RECV_FD); |
0c5b3718 | 651 | ret_func = ret_recv; |
c5c7998f | 652 | goto end; |
3bd1e081 | 653 | } |
3bd1e081 | 654 | |
9ce5646a MD |
655 | health_code_update(); |
656 | ||
f50f23d9 DG |
657 | /* |
658 | * Send status code to session daemon only if the recv works. If the | |
659 | * above recv() failed, the session daemon is notified through the | |
660 | * error socket and the teardown is eventually done. | |
661 | */ | |
0c5b3718 SM |
662 | ret_send_status = consumer_send_status_msg(sock, ret_code); |
663 | if (ret_send_status < 0) { | |
f50f23d9 | 664 | /* Somehow, the session daemon is not responding anymore. */ |
c5c7998f | 665 | goto error_add_stream_nosignal; |
f50f23d9 DG |
666 | } |
667 | ||
9ce5646a MD |
668 | health_code_update(); |
669 | ||
d2956687 | 670 | pthread_mutex_lock(&channel->lock); |
6f9449c2 | 671 | new_stream = consumer_stream_create( |
49f45573 JG |
672 | channel, |
673 | channel->key, | |
ffe60014 | 674 | fd, |
ffe60014 | 675 | channel->name, |
ffe60014 DG |
676 | channel->relayd_id, |
677 | channel->session_id, | |
d2956687 | 678 | channel->trace_chunk, |
ffe60014 DG |
679 | msg.u.stream.cpu, |
680 | &alloc_ret, | |
4891ece8 | 681 | channel->type, |
d2956687 | 682 | channel->monitor); |
3bd1e081 | 683 | if (new_stream == NULL) { |
c80048c6 MD |
684 | switch (alloc_ret) { |
685 | case -ENOMEM: | |
686 | case -EINVAL: | |
687 | default: | |
688 | lttng_consumer_send_error(ctx, LTTCOMM_CONSUMERD_OUTFD_ERROR); | |
689 | break; | |
c80048c6 | 690 | } |
d2956687 | 691 | pthread_mutex_unlock(&channel->lock); |
c5c7998f | 692 | goto error_add_stream_nosignal; |
3bd1e081 | 693 | } |
d771f832 | 694 | |
ffe60014 | 695 | new_stream->wait_fd = fd; |
0c5b3718 SM |
696 | ret_get_max_subbuf_size = kernctl_get_max_subbuf_size( |
697 | new_stream->wait_fd, &new_stream->max_sb_size); | |
698 | if (ret_get_max_subbuf_size < 0) { | |
d05185fa JG |
699 | pthread_mutex_unlock(&channel->lock); |
700 | ERR("Failed to get kernel maximal subbuffer size"); | |
c5c7998f | 701 | goto error_add_stream_nosignal; |
d05185fa JG |
702 | } |
703 | ||
d9a2e16e JD |
704 | consumer_stream_update_channel_attributes(new_stream, |
705 | channel); | |
00e2e675 | 706 | |
a0c83db9 DG |
707 | /* |
708 | * We've just assigned the channel to the stream so increment the | |
07b86b52 JD |
709 | * refcount right now. We don't need to increment the refcount for |
710 | * streams in no monitor because we handle manually the cleanup of | |
711 | * those. It is very important to make sure there is NO prior | |
712 | * consumer_del_stream() calls or else the refcount will be unbalanced. | |
a0c83db9 | 713 | */ |
07b86b52 JD |
714 | if (channel->monitor) { |
715 | uatomic_inc(&new_stream->chan->refcount); | |
716 | } | |
9d9353f9 | 717 | |
fb3a43a9 DG |
718 | /* |
719 | * The buffer flush is done on the session daemon side for the kernel | |
720 | * so no need for the stream "hangup_flush_done" variable to be | |
721 | * tracked. This is important for a kernel stream since we don't rely | |
722 | * on the flush state of the stream to read data. It's not the case for | |
723 | * user space tracing. | |
724 | */ | |
725 | new_stream->hangup_flush_done = 0; | |
726 | ||
9ce5646a MD |
727 | health_code_update(); |
728 | ||
d2956687 | 729 | pthread_mutex_lock(&new_stream->lock); |
633d0084 | 730 | if (ctx->on_recv_stream) { |
0c5b3718 SM |
731 | int ret_recv_stream = ctx->on_recv_stream(new_stream); |
732 | if (ret_recv_stream < 0) { | |
d2956687 JG |
733 | pthread_mutex_unlock(&new_stream->lock); |
734 | pthread_mutex_unlock(&channel->lock); | |
d771f832 | 735 | consumer_stream_free(new_stream); |
c5c7998f | 736 | goto error_add_stream_nosignal; |
fb3a43a9 | 737 | } |
633d0084 | 738 | } |
9ce5646a MD |
739 | health_code_update(); |
740 | ||
07b86b52 JD |
741 | if (new_stream->metadata_flag) { |
742 | channel->metadata_stream = new_stream; | |
743 | } | |
744 | ||
2bba9e53 DG |
745 | /* Do not monitor this stream. */ |
746 | if (!channel->monitor) { | |
5eecee74 | 747 | DBG("Kernel consumer add stream %s in no monitor mode with " |
6dc3064a | 748 | "relayd id %" PRIu64, new_stream->name, |
5eecee74 | 749 | new_stream->net_seq_idx); |
10a50311 | 750 | cds_list_add(&new_stream->send_node, &channel->streams.head); |
d2956687 JG |
751 | pthread_mutex_unlock(&new_stream->lock); |
752 | pthread_mutex_unlock(&channel->lock); | |
c5c7998f | 753 | goto end_add_stream; |
6dc3064a DG |
754 | } |
755 | ||
e1b71bdc DG |
756 | /* Send stream to relayd if the stream has an ID. */ |
757 | if (new_stream->net_seq_idx != (uint64_t) -1ULL) { | |
0c5b3718 SM |
758 | int ret_send_relayd_stream; |
759 | ||
760 | ret_send_relayd_stream = consumer_send_relayd_stream( | |
761 | new_stream, new_stream->chan->pathname); | |
762 | if (ret_send_relayd_stream < 0) { | |
d2956687 JG |
763 | pthread_mutex_unlock(&new_stream->lock); |
764 | pthread_mutex_unlock(&channel->lock); | |
e1b71bdc | 765 | consumer_stream_free(new_stream); |
c5c7998f | 766 | goto error_add_stream_nosignal; |
e1b71bdc | 767 | } |
001b7e62 MD |
768 | |
769 | /* | |
770 | * If adding an extra stream to an already | |
771 | * existing channel (e.g. cpu hotplug), we need | |
772 | * to send the "streams_sent" command to relayd. | |
773 | */ | |
774 | if (channel->streams_sent_to_relayd) { | |
0c5b3718 SM |
775 | int ret_send_relayd_streams_sent; |
776 | ||
777 | ret_send_relayd_streams_sent = | |
778 | consumer_send_relayd_streams_sent( | |
779 | new_stream->net_seq_idx); | |
780 | if (ret_send_relayd_streams_sent < 0) { | |
d2956687 JG |
781 | pthread_mutex_unlock(&new_stream->lock); |
782 | pthread_mutex_unlock(&channel->lock); | |
c5c7998f | 783 | goto error_add_stream_nosignal; |
001b7e62 MD |
784 | } |
785 | } | |
e2039c7a | 786 | } |
d2956687 JG |
787 | pthread_mutex_unlock(&new_stream->lock); |
788 | pthread_mutex_unlock(&channel->lock); | |
e2039c7a | 789 | |
50f8ae69 | 790 | /* Get the right pipe where the stream will be sent. */ |
633d0084 | 791 | if (new_stream->metadata_flag) { |
66d583dc | 792 | consumer_add_metadata_stream(new_stream); |
dae10966 | 793 | stream_pipe = ctx->consumer_metadata_pipe; |
3bd1e081 | 794 | } else { |
66d583dc | 795 | consumer_add_data_stream(new_stream); |
dae10966 | 796 | stream_pipe = ctx->consumer_data_pipe; |
50f8ae69 DG |
797 | } |
798 | ||
66d583dc | 799 | /* Visible to other threads */ |
5ab66908 MD |
800 | new_stream->globally_visible = 1; |
801 | ||
9ce5646a MD |
802 | health_code_update(); |
803 | ||
0c5b3718 SM |
804 | ret_pipe_write = lttng_pipe_write( |
805 | stream_pipe, &new_stream, sizeof(new_stream)); | |
806 | if (ret_pipe_write < 0) { | |
dae10966 | 807 | ERR("Consumer write %s stream to pipe %d", |
50f8ae69 | 808 | new_stream->metadata_flag ? "metadata" : "data", |
dae10966 | 809 | lttng_pipe_get_writefd(stream_pipe)); |
5ab66908 MD |
810 | if (new_stream->metadata_flag) { |
811 | consumer_del_stream_for_metadata(new_stream); | |
812 | } else { | |
813 | consumer_del_stream_for_data(new_stream); | |
814 | } | |
c5c7998f | 815 | goto error_add_stream_nosignal; |
3bd1e081 | 816 | } |
00e2e675 | 817 | |
02d02e31 JD |
818 | DBG("Kernel consumer ADD_STREAM %s (fd: %d) %s with relayd id %" PRIu64, |
819 | new_stream->name, fd, new_stream->chan->pathname, new_stream->relayd_stream_id); | |
c5c7998f | 820 | end_add_stream: |
3bd1e081 | 821 | break; |
c5c7998f JG |
822 | error_add_stream_nosignal: |
823 | goto end_nosignal; | |
824 | error_add_stream_fatal: | |
825 | goto error_fatal; | |
3bd1e081 | 826 | } |
a4baae1b JD |
827 | case LTTNG_CONSUMER_STREAMS_SENT: |
828 | { | |
829 | struct lttng_consumer_channel *channel; | |
0c5b3718 | 830 | int ret_send_status; |
a4baae1b JD |
831 | |
832 | /* | |
833 | * Get stream's channel reference. Needed when adding the stream to the | |
834 | * global hash table. | |
835 | */ | |
836 | channel = consumer_find_channel(msg.u.sent_streams.channel_key); | |
837 | if (!channel) { | |
838 | /* | |
839 | * We could not find the channel. Can happen if cpu hotplug | |
840 | * happens while tearing down. | |
841 | */ | |
842 | ERR("Unable to find channel key %" PRIu64, | |
843 | msg.u.sent_streams.channel_key); | |
e462382a | 844 | ret_code = LTTCOMM_CONSUMERD_CHAN_NOT_FOUND; |
a4baae1b JD |
845 | } |
846 | ||
847 | health_code_update(); | |
848 | ||
849 | /* | |
850 | * Send status code to session daemon. | |
851 | */ | |
0c5b3718 SM |
852 | ret_send_status = consumer_send_status_msg(sock, ret_code); |
853 | if (ret_send_status < 0 || | |
854 | ret_code != LTTCOMM_CONSUMERD_SUCCESS) { | |
a4baae1b | 855 | /* Somehow, the session daemon is not responding anymore. */ |
80d5a658 | 856 | goto error_streams_sent_nosignal; |
a4baae1b JD |
857 | } |
858 | ||
859 | health_code_update(); | |
860 | ||
861 | /* | |
862 | * We should not send this message if we don't monitor the | |
863 | * streams in this channel. | |
864 | */ | |
865 | if (!channel->monitor) { | |
80d5a658 | 866 | goto end_error_streams_sent; |
a4baae1b JD |
867 | } |
868 | ||
869 | health_code_update(); | |
870 | /* Send stream to relayd if the stream has an ID. */ | |
871 | if (msg.u.sent_streams.net_seq_idx != (uint64_t) -1ULL) { | |
0c5b3718 SM |
872 | int ret_send_relay_streams; |
873 | ||
874 | ret_send_relay_streams = consumer_send_relayd_streams_sent( | |
a4baae1b | 875 | msg.u.sent_streams.net_seq_idx); |
0c5b3718 | 876 | if (ret_send_relay_streams < 0) { |
80d5a658 | 877 | goto error_streams_sent_nosignal; |
a4baae1b | 878 | } |
001b7e62 | 879 | channel->streams_sent_to_relayd = true; |
a4baae1b | 880 | } |
80d5a658 | 881 | end_error_streams_sent: |
a4baae1b | 882 | break; |
80d5a658 JG |
883 | error_streams_sent_nosignal: |
884 | goto end_nosignal; | |
a4baae1b | 885 | } |
3bd1e081 MD |
886 | case LTTNG_CONSUMER_UPDATE_STREAM: |
887 | { | |
3f8e211f DG |
888 | rcu_read_unlock(); |
889 | return -ENOSYS; | |
890 | } | |
891 | case LTTNG_CONSUMER_DESTROY_RELAYD: | |
892 | { | |
a6ba4fe1 | 893 | uint64_t index = msg.u.destroy_relayd.net_seq_idx; |
3f8e211f | 894 | struct consumer_relayd_sock_pair *relayd; |
0c5b3718 | 895 | int ret_send_status; |
3f8e211f | 896 | |
a6ba4fe1 | 897 | DBG("Kernel consumer destroying relayd %" PRIu64, index); |
3f8e211f DG |
898 | |
899 | /* Get relayd reference if exists. */ | |
a6ba4fe1 | 900 | relayd = consumer_find_relayd(index); |
3f8e211f | 901 | if (relayd == NULL) { |
3448e266 | 902 | DBG("Unable to find relayd %" PRIu64, index); |
e462382a | 903 | ret_code = LTTCOMM_CONSUMERD_RELAYD_FAIL; |
3bd1e081 | 904 | } |
3f8e211f | 905 | |
a6ba4fe1 DG |
906 | /* |
907 | * Each relayd socket pair has a refcount of stream attached to it | |
908 | * which tells if the relayd is still active or not depending on the | |
909 | * refcount value. | |
910 | * | |
911 | * This will set the destroy flag of the relayd object and destroy it | |
912 | * if the refcount reaches zero when called. | |
913 | * | |
914 | * The destroy can happen either here or when a stream fd hangs up. | |
915 | */ | |
f50f23d9 DG |
916 | if (relayd) { |
917 | consumer_flag_relayd_for_destroy(relayd); | |
918 | } | |
919 | ||
9ce5646a MD |
920 | health_code_update(); |
921 | ||
0c5b3718 SM |
922 | ret_send_status = consumer_send_status_msg(sock, ret_code); |
923 | if (ret_send_status < 0) { | |
f50f23d9 | 924 | /* Somehow, the session daemon is not responding anymore. */ |
1803a064 | 925 | goto error_fatal; |
f50f23d9 | 926 | } |
3f8e211f | 927 | |
3f8e211f | 928 | goto end_nosignal; |
3bd1e081 | 929 | } |
6d805429 | 930 | case LTTNG_CONSUMER_DATA_PENDING: |
53632229 | 931 | { |
0c5b3718 | 932 | int32_t ret_data_pending; |
6d805429 | 933 | uint64_t id = msg.u.data_pending.session_id; |
0c5b3718 | 934 | ssize_t ret_send; |
c8f59ee5 | 935 | |
6d805429 | 936 | DBG("Kernel consumer data pending command for id %" PRIu64, id); |
c8f59ee5 | 937 | |
0c5b3718 | 938 | ret_data_pending = consumer_data_pending(id); |
c8f59ee5 | 939 | |
9ce5646a MD |
940 | health_code_update(); |
941 | ||
c8f59ee5 | 942 | /* Send back returned value to session daemon */ |
0c5b3718 SM |
943 | ret_send = lttcomm_send_unix_sock(sock, &ret_data_pending, |
944 | sizeof(ret_data_pending)); | |
945 | if (ret_send < 0) { | |
6d805429 | 946 | PERROR("send data pending ret code"); |
1803a064 | 947 | goto error_fatal; |
c8f59ee5 | 948 | } |
f50f23d9 DG |
949 | |
950 | /* | |
951 | * No need to send back a status message since the data pending | |
952 | * returned value is the response. | |
953 | */ | |
c8f59ee5 | 954 | break; |
53632229 | 955 | } |
6dc3064a DG |
956 | case LTTNG_CONSUMER_SNAPSHOT_CHANNEL: |
957 | { | |
3eb928aa MD |
958 | struct lttng_consumer_channel *channel; |
959 | uint64_t key = msg.u.snapshot_channel.key; | |
0c5b3718 | 960 | int ret_send_status; |
3eb928aa MD |
961 | |
962 | channel = consumer_find_channel(key); | |
963 | if (!channel) { | |
964 | ERR("Channel %" PRIu64 " not found", key); | |
965 | ret_code = LTTCOMM_CONSUMERD_CHAN_NOT_FOUND; | |
07b86b52 | 966 | } else { |
3eb928aa | 967 | if (msg.u.snapshot_channel.metadata == 1) { |
0c5b3718 SM |
968 | int ret_snapshot; |
969 | ||
970 | ret_snapshot = lttng_kconsumer_snapshot_metadata( | |
971 | channel, key, | |
3eb928aa | 972 | msg.u.snapshot_channel.pathname, |
0c5b3718 SM |
973 | msg.u.snapshot_channel.relayd_id, |
974 | ctx); | |
975 | if (ret_snapshot < 0) { | |
3eb928aa MD |
976 | ERR("Snapshot metadata failed"); |
977 | ret_code = LTTCOMM_CONSUMERD_SNAPSHOT_FAILED; | |
978 | } | |
979 | } else { | |
0c5b3718 SM |
980 | int ret_snapshot; |
981 | ||
982 | ret_snapshot = lttng_kconsumer_snapshot_channel( | |
983 | channel, key, | |
3eb928aa MD |
984 | msg.u.snapshot_channel.pathname, |
985 | msg.u.snapshot_channel.relayd_id, | |
0c5b3718 SM |
986 | msg.u.snapshot_channel |
987 | .nb_packets_per_stream, | |
3eb928aa | 988 | ctx); |
0c5b3718 | 989 | if (ret_snapshot < 0) { |
3eb928aa MD |
990 | ERR("Snapshot channel failed"); |
991 | ret_code = LTTCOMM_CONSUMERD_SNAPSHOT_FAILED; | |
992 | } | |
07b86b52 JD |
993 | } |
994 | } | |
9ce5646a MD |
995 | health_code_update(); |
996 | ||
0c5b3718 SM |
997 | ret_send_status = consumer_send_status_msg(sock, ret_code); |
998 | if (ret_send_status < 0) { | |
6dc3064a DG |
999 | /* Somehow, the session daemon is not responding anymore. */ |
1000 | goto end_nosignal; | |
1001 | } | |
1002 | break; | |
1003 | } | |
07b86b52 JD |
1004 | case LTTNG_CONSUMER_DESTROY_CHANNEL: |
1005 | { | |
1006 | uint64_t key = msg.u.destroy_channel.key; | |
1007 | struct lttng_consumer_channel *channel; | |
0c5b3718 | 1008 | int ret_send_status; |
07b86b52 JD |
1009 | |
1010 | channel = consumer_find_channel(key); | |
1011 | if (!channel) { | |
1012 | ERR("Kernel consumer destroy channel %" PRIu64 " not found", key); | |
e462382a | 1013 | ret_code = LTTCOMM_CONSUMERD_CHAN_NOT_FOUND; |
07b86b52 JD |
1014 | } |
1015 | ||
9ce5646a MD |
1016 | health_code_update(); |
1017 | ||
0c5b3718 SM |
1018 | ret_send_status = consumer_send_status_msg(sock, ret_code); |
1019 | if (ret_send_status < 0) { | |
07b86b52 | 1020 | /* Somehow, the session daemon is not responding anymore. */ |
a9d36096 | 1021 | goto end_destroy_channel; |
07b86b52 JD |
1022 | } |
1023 | ||
9ce5646a MD |
1024 | health_code_update(); |
1025 | ||
15dc512a DG |
1026 | /* Stop right now if no channel was found. */ |
1027 | if (!channel) { | |
a9d36096 | 1028 | goto end_destroy_channel; |
15dc512a DG |
1029 | } |
1030 | ||
07b86b52 JD |
1031 | /* |
1032 | * This command should ONLY be issued for channel with streams set in | |
1033 | * no monitor mode. | |
1034 | */ | |
a0377dfe | 1035 | LTTNG_ASSERT(!channel->monitor); |
07b86b52 JD |
1036 | |
1037 | /* | |
1038 | * The refcount should ALWAYS be 0 in the case of a channel in no | |
1039 | * monitor mode. | |
1040 | */ | |
a0377dfe | 1041 | LTTNG_ASSERT(!uatomic_sub_return(&channel->refcount, 1)); |
07b86b52 JD |
1042 | |
1043 | consumer_del_channel(channel); | |
a9d36096 | 1044 | end_destroy_channel: |
07b86b52 JD |
1045 | goto end_nosignal; |
1046 | } | |
fb83fe64 JD |
1047 | case LTTNG_CONSUMER_DISCARDED_EVENTS: |
1048 | { | |
66ab32be JD |
1049 | ssize_t ret; |
1050 | uint64_t count; | |
fb83fe64 JD |
1051 | struct lttng_consumer_channel *channel; |
1052 | uint64_t id = msg.u.discarded_events.session_id; | |
1053 | uint64_t key = msg.u.discarded_events.channel_key; | |
1054 | ||
e5742757 MD |
1055 | DBG("Kernel consumer discarded events command for session id %" |
1056 | PRIu64 ", channel key %" PRIu64, id, key); | |
1057 | ||
fb83fe64 JD |
1058 | channel = consumer_find_channel(key); |
1059 | if (!channel) { | |
1060 | ERR("Kernel consumer discarded events channel %" | |
1061 | PRIu64 " not found", key); | |
66ab32be | 1062 | count = 0; |
e5742757 | 1063 | } else { |
66ab32be | 1064 | count = channel->discarded_events; |
fb83fe64 JD |
1065 | } |
1066 | ||
fb83fe64 JD |
1067 | health_code_update(); |
1068 | ||
1069 | /* Send back returned value to session daemon */ | |
66ab32be | 1070 | ret = lttcomm_send_unix_sock(sock, &count, sizeof(count)); |
fb83fe64 JD |
1071 | if (ret < 0) { |
1072 | PERROR("send discarded events"); | |
1073 | goto error_fatal; | |
1074 | } | |
1075 | ||
1076 | break; | |
1077 | } | |
1078 | case LTTNG_CONSUMER_LOST_PACKETS: | |
1079 | { | |
66ab32be JD |
1080 | ssize_t ret; |
1081 | uint64_t count; | |
fb83fe64 JD |
1082 | struct lttng_consumer_channel *channel; |
1083 | uint64_t id = msg.u.lost_packets.session_id; | |
1084 | uint64_t key = msg.u.lost_packets.channel_key; | |
1085 | ||
e5742757 MD |
1086 | DBG("Kernel consumer lost packets command for session id %" |
1087 | PRIu64 ", channel key %" PRIu64, id, key); | |
1088 | ||
fb83fe64 JD |
1089 | channel = consumer_find_channel(key); |
1090 | if (!channel) { | |
1091 | ERR("Kernel consumer lost packets channel %" | |
1092 | PRIu64 " not found", key); | |
66ab32be | 1093 | count = 0; |
e5742757 | 1094 | } else { |
66ab32be | 1095 | count = channel->lost_packets; |
fb83fe64 JD |
1096 | } |
1097 | ||
fb83fe64 JD |
1098 | health_code_update(); |
1099 | ||
1100 | /* Send back returned value to session daemon */ | |
66ab32be | 1101 | ret = lttcomm_send_unix_sock(sock, &count, sizeof(count)); |
fb83fe64 JD |
1102 | if (ret < 0) { |
1103 | PERROR("send lost packets"); | |
1104 | goto error_fatal; | |
1105 | } | |
1106 | ||
1107 | break; | |
1108 | } | |
b3530820 JG |
1109 | case LTTNG_CONSUMER_SET_CHANNEL_MONITOR_PIPE: |
1110 | { | |
1111 | int channel_monitor_pipe; | |
0c5b3718 SM |
1112 | int ret_send_status, ret_set_channel_monitor_pipe; |
1113 | ssize_t ret_recv; | |
b3530820 JG |
1114 | |
1115 | ret_code = LTTCOMM_CONSUMERD_SUCCESS; | |
1116 | /* Successfully received the command's type. */ | |
0c5b3718 SM |
1117 | ret_send_status = consumer_send_status_msg(sock, ret_code); |
1118 | if (ret_send_status < 0) { | |
b3530820 JG |
1119 | goto error_fatal; |
1120 | } | |
1121 | ||
0c5b3718 SM |
1122 | ret_recv = lttcomm_recv_fds_unix_sock( |
1123 | sock, &channel_monitor_pipe, 1); | |
1124 | if (ret_recv != sizeof(channel_monitor_pipe)) { | |
b3530820 JG |
1125 | ERR("Failed to receive channel monitor pipe"); |
1126 | goto error_fatal; | |
1127 | } | |
1128 | ||
1129 | DBG("Received channel monitor pipe (%d)", channel_monitor_pipe); | |
0c5b3718 SM |
1130 | ret_set_channel_monitor_pipe = |
1131 | consumer_timer_thread_set_channel_monitor_pipe( | |
1132 | channel_monitor_pipe); | |
1133 | if (!ret_set_channel_monitor_pipe) { | |
b3530820 | 1134 | int flags; |
0c5b3718 | 1135 | int ret_fcntl; |
b3530820 JG |
1136 | |
1137 | ret_code = LTTCOMM_CONSUMERD_SUCCESS; | |
1138 | /* Set the pipe as non-blocking. */ | |
0c5b3718 SM |
1139 | ret_fcntl = fcntl(channel_monitor_pipe, F_GETFL, 0); |
1140 | if (ret_fcntl == -1) { | |
b3530820 JG |
1141 | PERROR("fcntl get flags of the channel monitoring pipe"); |
1142 | goto error_fatal; | |
1143 | } | |
0c5b3718 | 1144 | flags = ret_fcntl; |
b3530820 | 1145 | |
0c5b3718 | 1146 | ret_fcntl = fcntl(channel_monitor_pipe, F_SETFL, |
b3530820 | 1147 | flags | O_NONBLOCK); |
0c5b3718 | 1148 | if (ret_fcntl == -1) { |
b3530820 JG |
1149 | PERROR("fcntl set O_NONBLOCK flag of the channel monitoring pipe"); |
1150 | goto error_fatal; | |
1151 | } | |
1152 | DBG("Channel monitor pipe set as non-blocking"); | |
1153 | } else { | |
1154 | ret_code = LTTCOMM_CONSUMERD_ALREADY_SET; | |
1155 | } | |
0c5b3718 SM |
1156 | ret_send_status = consumer_send_status_msg(sock, ret_code); |
1157 | if (ret_send_status < 0) { | |
b3530820 JG |
1158 | goto error_fatal; |
1159 | } | |
1160 | break; | |
1161 | } | |
b99a8d42 JD |
1162 | case LTTNG_CONSUMER_ROTATE_CHANNEL: |
1163 | { | |
92b7a7f8 MD |
1164 | struct lttng_consumer_channel *channel; |
1165 | uint64_t key = msg.u.rotate_channel.key; | |
0c5b3718 | 1166 | int ret_send_status; |
b99a8d42 | 1167 | |
92b7a7f8 | 1168 | DBG("Consumer rotate channel %" PRIu64, key); |
b99a8d42 | 1169 | |
92b7a7f8 MD |
1170 | channel = consumer_find_channel(key); |
1171 | if (!channel) { | |
1172 | ERR("Channel %" PRIu64 " not found", key); | |
1173 | ret_code = LTTCOMM_CONSUMERD_CHAN_NOT_FOUND; | |
1174 | } else { | |
1175 | /* | |
1176 | * Sample the rotate position of all the streams in this channel. | |
1177 | */ | |
0c5b3718 SM |
1178 | int ret_rotate_channel; |
1179 | ||
1180 | ret_rotate_channel = lttng_consumer_rotate_channel( | |
1181 | channel, key, | |
92b7a7f8 | 1182 | msg.u.rotate_channel.relayd_id, |
0c5b3718 SM |
1183 | msg.u.rotate_channel.metadata, ctx); |
1184 | if (ret_rotate_channel < 0) { | |
92b7a7f8 MD |
1185 | ERR("Rotate channel failed"); |
1186 | ret_code = LTTCOMM_CONSUMERD_ROTATION_FAIL; | |
1187 | } | |
b99a8d42 | 1188 | |
92b7a7f8 MD |
1189 | health_code_update(); |
1190 | } | |
0c5b3718 SM |
1191 | |
1192 | ret_send_status = consumer_send_status_msg(sock, ret_code); | |
1193 | if (ret_send_status < 0) { | |
b99a8d42 | 1194 | /* Somehow, the session daemon is not responding anymore. */ |
713bdd26 | 1195 | goto error_rotate_channel; |
b99a8d42 | 1196 | } |
92b7a7f8 MD |
1197 | if (channel) { |
1198 | /* Rotate the streams that are ready right now. */ | |
0c5b3718 SM |
1199 | int ret_rotate; |
1200 | ||
1201 | ret_rotate = lttng_consumer_rotate_ready_streams( | |
92b7a7f8 | 1202 | channel, key, ctx); |
0c5b3718 | 1203 | if (ret_rotate < 0) { |
92b7a7f8 MD |
1204 | ERR("Rotate ready streams failed"); |
1205 | } | |
b99a8d42 | 1206 | } |
b99a8d42 | 1207 | break; |
713bdd26 JG |
1208 | error_rotate_channel: |
1209 | goto end_nosignal; | |
b99a8d42 | 1210 | } |
5f3aff8b MD |
1211 | case LTTNG_CONSUMER_CLEAR_CHANNEL: |
1212 | { | |
1213 | struct lttng_consumer_channel *channel; | |
1214 | uint64_t key = msg.u.clear_channel.key; | |
0c5b3718 | 1215 | int ret_send_status; |
5f3aff8b MD |
1216 | |
1217 | channel = consumer_find_channel(key); | |
1218 | if (!channel) { | |
1219 | DBG("Channel %" PRIu64 " not found", key); | |
1220 | ret_code = LTTCOMM_CONSUMERD_CHAN_NOT_FOUND; | |
1221 | } else { | |
0c5b3718 SM |
1222 | int ret_clear_channel; |
1223 | ||
1224 | ret_clear_channel = | |
1225 | lttng_consumer_clear_channel(channel); | |
1226 | if (ret_clear_channel) { | |
5f3aff8b | 1227 | ERR("Clear channel failed"); |
97535efa | 1228 | ret_code = (lttcomm_return_code) ret_clear_channel; |
5f3aff8b MD |
1229 | } |
1230 | ||
1231 | health_code_update(); | |
1232 | } | |
0c5b3718 SM |
1233 | |
1234 | ret_send_status = consumer_send_status_msg(sock, ret_code); | |
1235 | if (ret_send_status < 0) { | |
5f3aff8b MD |
1236 | /* Somehow, the session daemon is not responding anymore. */ |
1237 | goto end_nosignal; | |
1238 | } | |
1239 | ||
1240 | break; | |
1241 | } | |
d2956687 | 1242 | case LTTNG_CONSUMER_INIT: |
00fb02ac | 1243 | { |
0c5b3718 SM |
1244 | int ret_send_status; |
1245 | ||
d2956687 JG |
1246 | ret_code = lttng_consumer_init_command(ctx, |
1247 | msg.u.init.sessiond_uuid); | |
00fb02ac | 1248 | health_code_update(); |
0c5b3718 SM |
1249 | ret_send_status = consumer_send_status_msg(sock, ret_code); |
1250 | if (ret_send_status < 0) { | |
00fb02ac JD |
1251 | /* Somehow, the session daemon is not responding anymore. */ |
1252 | goto end_nosignal; | |
1253 | } | |
1254 | break; | |
1255 | } | |
d2956687 | 1256 | case LTTNG_CONSUMER_CREATE_TRACE_CHUNK: |
d88744a4 | 1257 | { |
d2956687 | 1258 | const struct lttng_credentials credentials = { |
ff588497 JR |
1259 | .uid = LTTNG_OPTIONAL_INIT_VALUE(msg.u.create_trace_chunk.credentials.value.uid), |
1260 | .gid = LTTNG_OPTIONAL_INIT_VALUE(msg.u.create_trace_chunk.credentials.value.gid), | |
d2956687 JG |
1261 | }; |
1262 | const bool is_local_trace = | |
1263 | !msg.u.create_trace_chunk.relayd_id.is_set; | |
1264 | const uint64_t relayd_id = | |
1265 | msg.u.create_trace_chunk.relayd_id.value; | |
1266 | const char *chunk_override_name = | |
1267 | *msg.u.create_trace_chunk.override_name ? | |
1268 | msg.u.create_trace_chunk.override_name : | |
1269 | NULL; | |
cbf53d23 | 1270 | struct lttng_directory_handle *chunk_directory_handle = NULL; |
d88744a4 | 1271 | |
d2956687 JG |
1272 | /* |
1273 | * The session daemon will only provide a chunk directory file | |
1274 | * descriptor for local traces. | |
1275 | */ | |
1276 | if (is_local_trace) { | |
1277 | int chunk_dirfd; | |
0c5b3718 SM |
1278 | int ret_send_status; |
1279 | ssize_t ret_recv; | |
19990ed5 | 1280 | |
d2956687 | 1281 | /* Acnowledge the reception of the command. */ |
0c5b3718 SM |
1282 | ret_send_status = consumer_send_status_msg( |
1283 | sock, LTTCOMM_CONSUMERD_SUCCESS); | |
1284 | if (ret_send_status < 0) { | |
d2956687 JG |
1285 | /* Somehow, the session daemon is not responding anymore. */ |
1286 | goto end_nosignal; | |
1287 | } | |
92816cc3 | 1288 | |
0c5b3718 SM |
1289 | ret_recv = lttcomm_recv_fds_unix_sock( |
1290 | sock, &chunk_dirfd, 1); | |
1291 | if (ret_recv != sizeof(chunk_dirfd)) { | |
d2956687 JG |
1292 | ERR("Failed to receive trace chunk directory file descriptor"); |
1293 | goto error_fatal; | |
1294 | } | |
92816cc3 | 1295 | |
d2956687 JG |
1296 | DBG("Received trace chunk directory fd (%d)", |
1297 | chunk_dirfd); | |
cbf53d23 | 1298 | chunk_directory_handle = lttng_directory_handle_create_from_dirfd( |
d2956687 | 1299 | chunk_dirfd); |
cbf53d23 | 1300 | if (!chunk_directory_handle) { |
d2956687 JG |
1301 | ERR("Failed to initialize chunk directory handle from directory file descriptor"); |
1302 | if (close(chunk_dirfd)) { | |
1303 | PERROR("Failed to close chunk directory file descriptor"); | |
1304 | } | |
1305 | goto error_fatal; | |
1306 | } | |
92816cc3 JG |
1307 | } |
1308 | ||
d2956687 JG |
1309 | ret_code = lttng_consumer_create_trace_chunk( |
1310 | !is_local_trace ? &relayd_id : NULL, | |
1311 | msg.u.create_trace_chunk.session_id, | |
1312 | msg.u.create_trace_chunk.chunk_id, | |
e5add6d0 JG |
1313 | (time_t) msg.u.create_trace_chunk |
1314 | .creation_timestamp, | |
d2956687 | 1315 | chunk_override_name, |
e5add6d0 JG |
1316 | msg.u.create_trace_chunk.credentials.is_set ? |
1317 | &credentials : | |
1318 | NULL, | |
cbf53d23 JG |
1319 | chunk_directory_handle); |
1320 | lttng_directory_handle_put(chunk_directory_handle); | |
d2956687 | 1321 | goto end_msg_sessiond; |
d88744a4 | 1322 | } |
d2956687 | 1323 | case LTTNG_CONSUMER_CLOSE_TRACE_CHUNK: |
a1ae2ea5 | 1324 | { |
bbc4768c | 1325 | enum lttng_trace_chunk_command_type close_command = |
97535efa | 1326 | (lttng_trace_chunk_command_type) msg.u.close_trace_chunk.close_command.value; |
d2956687 JG |
1327 | const uint64_t relayd_id = |
1328 | msg.u.close_trace_chunk.relayd_id.value; | |
ecd1a12f MD |
1329 | struct lttcomm_consumer_close_trace_chunk_reply reply; |
1330 | char path[LTTNG_PATH_MAX]; | |
0c5b3718 | 1331 | ssize_t ret_send; |
d2956687 JG |
1332 | |
1333 | ret_code = lttng_consumer_close_trace_chunk( | |
1334 | msg.u.close_trace_chunk.relayd_id.is_set ? | |
bbc4768c JG |
1335 | &relayd_id : |
1336 | NULL, | |
d2956687 JG |
1337 | msg.u.close_trace_chunk.session_id, |
1338 | msg.u.close_trace_chunk.chunk_id, | |
bbc4768c JG |
1339 | (time_t) msg.u.close_trace_chunk.close_timestamp, |
1340 | msg.u.close_trace_chunk.close_command.is_set ? | |
1341 | &close_command : | |
ecd1a12f MD |
1342 | NULL, path); |
1343 | reply.ret_code = ret_code; | |
1344 | reply.path_length = strlen(path) + 1; | |
0c5b3718 SM |
1345 | ret_send = lttcomm_send_unix_sock(sock, &reply, sizeof(reply)); |
1346 | if (ret_send != sizeof(reply)) { | |
ecd1a12f MD |
1347 | goto error_fatal; |
1348 | } | |
0c5b3718 SM |
1349 | ret_send = lttcomm_send_unix_sock( |
1350 | sock, path, reply.path_length); | |
1351 | if (ret_send != reply.path_length) { | |
ecd1a12f MD |
1352 | goto error_fatal; |
1353 | } | |
1354 | goto end_nosignal; | |
3654ed19 | 1355 | } |
d2956687 | 1356 | case LTTNG_CONSUMER_TRACE_CHUNK_EXISTS: |
3654ed19 | 1357 | { |
d2956687 JG |
1358 | const uint64_t relayd_id = |
1359 | msg.u.trace_chunk_exists.relayd_id.value; | |
1360 | ||
1361 | ret_code = lttng_consumer_trace_chunk_exists( | |
1362 | msg.u.trace_chunk_exists.relayd_id.is_set ? | |
1363 | &relayd_id : NULL, | |
1364 | msg.u.trace_chunk_exists.session_id, | |
1365 | msg.u.trace_chunk_exists.chunk_id); | |
1366 | goto end_msg_sessiond; | |
a1ae2ea5 | 1367 | } |
04ed9e10 JG |
1368 | case LTTNG_CONSUMER_OPEN_CHANNEL_PACKETS: |
1369 | { | |
1370 | const uint64_t key = msg.u.open_channel_packets.key; | |
1371 | struct lttng_consumer_channel *channel = | |
1372 | consumer_find_channel(key); | |
1373 | ||
1374 | if (channel) { | |
1375 | pthread_mutex_lock(&channel->lock); | |
1376 | ret_code = lttng_consumer_open_channel_packets(channel); | |
1377 | pthread_mutex_unlock(&channel->lock); | |
1378 | } else { | |
1379 | WARN("Channel %" PRIu64 " not found", key); | |
1380 | ret_code = LTTCOMM_CONSUMERD_CHAN_NOT_FOUND; | |
1381 | } | |
1382 | ||
1383 | health_code_update(); | |
1384 | goto end_msg_sessiond; | |
1385 | } | |
3bd1e081 | 1386 | default: |
3f8e211f | 1387 | goto end_nosignal; |
3bd1e081 | 1388 | } |
3f8e211f | 1389 | |
3bd1e081 | 1390 | end_nosignal: |
4cbc1a04 DG |
1391 | /* |
1392 | * Return 1 to indicate success since the 0 value can be a socket | |
1393 | * shutdown during the recv() or send() call. | |
1394 | */ | |
0c5b3718 | 1395 | ret_func = 1; |
c5c7998f JG |
1396 | goto end; |
1397 | error_fatal: | |
1398 | /* This will issue a consumer stop. */ | |
0c5b3718 | 1399 | ret_func = -1; |
c5c7998f | 1400 | goto end; |
d2956687 JG |
1401 | end_msg_sessiond: |
1402 | /* | |
1403 | * The returned value here is not useful since either way we'll return 1 to | |
1404 | * the caller because the session daemon socket management is done | |
1405 | * elsewhere. Returning a negative code or 0 will shutdown the consumer. | |
1406 | */ | |
0c5b3718 SM |
1407 | { |
1408 | int ret_send_status; | |
1409 | ||
1410 | ret_send_status = consumer_send_status_msg(sock, ret_code); | |
1411 | if (ret_send_status < 0) { | |
1412 | goto error_fatal; | |
1413 | } | |
d2956687 | 1414 | } |
0c5b3718 SM |
1415 | |
1416 | ret_func = 1; | |
1417 | ||
c5c7998f | 1418 | end: |
d2956687 | 1419 | health_code_update(); |
1803a064 | 1420 | rcu_read_unlock(); |
0c5b3718 | 1421 | return ret_func; |
3bd1e081 | 1422 | } |
d41f73b7 | 1423 | |
94d49140 JD |
1424 | /* |
1425 | * Sync metadata meaning request them to the session daemon and snapshot to the | |
1426 | * metadata thread can consumer them. | |
1427 | * | |
1428 | * Metadata stream lock MUST be acquired. | |
94d49140 | 1429 | */ |
577eea73 JG |
1430 | enum sync_metadata_status lttng_kconsumer_sync_metadata( |
1431 | struct lttng_consumer_stream *metadata) | |
94d49140 JD |
1432 | { |
1433 | int ret; | |
577eea73 | 1434 | enum sync_metadata_status status; |
94d49140 | 1435 | |
a0377dfe | 1436 | LTTNG_ASSERT(metadata); |
94d49140 JD |
1437 | |
1438 | ret = kernctl_buffer_flush(metadata->wait_fd); | |
1439 | if (ret < 0) { | |
1440 | ERR("Failed to flush kernel stream"); | |
577eea73 | 1441 | status = SYNC_METADATA_STATUS_ERROR; |
94d49140 JD |
1442 | goto end; |
1443 | } | |
1444 | ||
1445 | ret = kernctl_snapshot(metadata->wait_fd); | |
1446 | if (ret < 0) { | |
577eea73 JG |
1447 | if (errno == EAGAIN) { |
1448 | /* No new metadata, exit. */ | |
1449 | DBG("Sync metadata, no new kernel metadata"); | |
1450 | status = SYNC_METADATA_STATUS_NO_DATA; | |
1451 | } else { | |
94d49140 | 1452 | ERR("Sync metadata, taking kernel snapshot failed."); |
577eea73 | 1453 | status = SYNC_METADATA_STATUS_ERROR; |
94d49140 | 1454 | } |
577eea73 JG |
1455 | } else { |
1456 | status = SYNC_METADATA_STATUS_NEW_DATA; | |
94d49140 JD |
1457 | } |
1458 | ||
1459 | end: | |
577eea73 | 1460 | return status; |
94d49140 | 1461 | } |
309167d2 | 1462 | |
fb83fe64 | 1463 | static |
6f9449c2 JG |
1464 | int extract_common_subbuffer_info(struct lttng_consumer_stream *stream, |
1465 | struct stream_subbuffer *subbuf) | |
fb83fe64 JD |
1466 | { |
1467 | int ret; | |
fb83fe64 | 1468 | |
6f9449c2 JG |
1469 | ret = kernctl_get_subbuf_size( |
1470 | stream->wait_fd, &subbuf->info.data.subbuf_size); | |
1471 | if (ret) { | |
fb83fe64 JD |
1472 | goto end; |
1473 | } | |
fb83fe64 | 1474 | |
6f9449c2 JG |
1475 | ret = kernctl_get_padded_subbuf_size( |
1476 | stream->wait_fd, &subbuf->info.data.padded_subbuf_size); | |
1477 | if (ret) { | |
fb83fe64 JD |
1478 | goto end; |
1479 | } | |
fb83fe64 JD |
1480 | |
1481 | end: | |
1482 | return ret; | |
1483 | } | |
1484 | ||
93ec662e | 1485 | static |
6f9449c2 JG |
1486 | int extract_metadata_subbuffer_info(struct lttng_consumer_stream *stream, |
1487 | struct stream_subbuffer *subbuf) | |
93ec662e JD |
1488 | { |
1489 | int ret; | |
93ec662e | 1490 | |
6f9449c2 JG |
1491 | ret = extract_common_subbuffer_info(stream, subbuf); |
1492 | if (ret) { | |
93ec662e JD |
1493 | goto end; |
1494 | } | |
1495 | ||
6f9449c2 JG |
1496 | ret = kernctl_get_metadata_version( |
1497 | stream->wait_fd, &subbuf->info.metadata.version); | |
1498 | if (ret) { | |
93ec662e JD |
1499 | goto end; |
1500 | } | |
1501 | ||
93ec662e JD |
1502 | end: |
1503 | return ret; | |
1504 | } | |
1505 | ||
6f9449c2 JG |
1506 | static |
1507 | int extract_data_subbuffer_info(struct lttng_consumer_stream *stream, | |
1508 | struct stream_subbuffer *subbuf) | |
d41f73b7 | 1509 | { |
6f9449c2 | 1510 | int ret; |
d41f73b7 | 1511 | |
6f9449c2 JG |
1512 | ret = extract_common_subbuffer_info(stream, subbuf); |
1513 | if (ret) { | |
1514 | goto end; | |
1515 | } | |
309167d2 | 1516 | |
6f9449c2 JG |
1517 | ret = kernctl_get_packet_size( |
1518 | stream->wait_fd, &subbuf->info.data.packet_size); | |
1519 | if (ret < 0) { | |
1520 | PERROR("Failed to get sub-buffer packet size"); | |
1521 | goto end; | |
1522 | } | |
02d02e31 | 1523 | |
6f9449c2 JG |
1524 | ret = kernctl_get_content_size( |
1525 | stream->wait_fd, &subbuf->info.data.content_size); | |
1526 | if (ret < 0) { | |
1527 | PERROR("Failed to get sub-buffer content size"); | |
1528 | goto end; | |
d41f73b7 MD |
1529 | } |
1530 | ||
6f9449c2 JG |
1531 | ret = kernctl_get_timestamp_begin( |
1532 | stream->wait_fd, &subbuf->info.data.timestamp_begin); | |
1533 | if (ret < 0) { | |
1534 | PERROR("Failed to get sub-buffer begin timestamp"); | |
1535 | goto end; | |
1d4dfdef DG |
1536 | } |
1537 | ||
6f9449c2 JG |
1538 | ret = kernctl_get_timestamp_end( |
1539 | stream->wait_fd, &subbuf->info.data.timestamp_end); | |
1540 | if (ret < 0) { | |
1541 | PERROR("Failed to get sub-buffer end timestamp"); | |
1542 | goto end; | |
1543 | } | |
1544 | ||
1545 | ret = kernctl_get_events_discarded( | |
1546 | stream->wait_fd, &subbuf->info.data.events_discarded); | |
1547 | if (ret) { | |
1548 | PERROR("Failed to get sub-buffer events discarded count"); | |
1549 | goto end; | |
1550 | } | |
1551 | ||
1552 | ret = kernctl_get_sequence_number(stream->wait_fd, | |
1553 | &subbuf->info.data.sequence_number.value); | |
1554 | if (ret) { | |
1555 | /* May not be supported by older LTTng-modules. */ | |
1556 | if (ret != -ENOTTY) { | |
1557 | PERROR("Failed to get sub-buffer sequence number"); | |
1558 | goto end; | |
fb83fe64 | 1559 | } |
1c20f0e2 | 1560 | } else { |
6f9449c2 | 1561 | subbuf->info.data.sequence_number.is_set = true; |
309167d2 JD |
1562 | } |
1563 | ||
6f9449c2 JG |
1564 | ret = kernctl_get_stream_id( |
1565 | stream->wait_fd, &subbuf->info.data.stream_id); | |
1566 | if (ret < 0) { | |
1567 | PERROR("Failed to get stream id"); | |
1568 | goto end; | |
1569 | } | |
1d4dfdef | 1570 | |
6f9449c2 JG |
1571 | ret = kernctl_get_instance_id(stream->wait_fd, |
1572 | &subbuf->info.data.stream_instance_id.value); | |
1573 | if (ret) { | |
1574 | /* May not be supported by older LTTng-modules. */ | |
1575 | if (ret != -ENOTTY) { | |
1576 | PERROR("Failed to get stream instance id"); | |
1577 | goto end; | |
1d4dfdef | 1578 | } |
6f9449c2 JG |
1579 | } else { |
1580 | subbuf->info.data.stream_instance_id.is_set = true; | |
1581 | } | |
1582 | end: | |
1583 | return ret; | |
1584 | } | |
47e81c02 | 1585 | |
6f9449c2 | 1586 | static |
b6797c8e JG |
1587 | enum get_next_subbuffer_status get_subbuffer_common( |
1588 | struct lttng_consumer_stream *stream, | |
6f9449c2 JG |
1589 | struct stream_subbuffer *subbuffer) |
1590 | { | |
1591 | int ret; | |
b6797c8e | 1592 | enum get_next_subbuffer_status status; |
6f9449c2 JG |
1593 | |
1594 | ret = kernctl_get_next_subbuf(stream->wait_fd); | |
b6797c8e JG |
1595 | switch (ret) { |
1596 | case 0: | |
1597 | status = GET_NEXT_SUBBUFFER_STATUS_OK; | |
1598 | break; | |
1599 | case -ENODATA: | |
1600 | case -EAGAIN: | |
6e5e3c51 MD |
1601 | /* |
1602 | * The caller only expects -ENODATA when there is no data to | |
1603 | * read, but the kernel tracer returns -EAGAIN when there is | |
1604 | * currently no data for a non-finalized stream, and -ENODATA | |
1605 | * when there is no data for a finalized stream. Those can be | |
1606 | * combined into a -ENODATA return value. | |
1607 | */ | |
b6797c8e JG |
1608 | status = GET_NEXT_SUBBUFFER_STATUS_NO_DATA; |
1609 | goto end; | |
1610 | default: | |
1611 | status = GET_NEXT_SUBBUFFER_STATUS_ERROR; | |
6f9449c2 JG |
1612 | goto end; |
1613 | } | |
1614 | ||
1615 | ret = stream->read_subbuffer_ops.extract_subbuffer_info( | |
b6797c8e JG |
1616 | stream, subbuffer); |
1617 | if (ret) { | |
1618 | status = GET_NEXT_SUBBUFFER_STATUS_ERROR; | |
1619 | } | |
6f9449c2 | 1620 | end: |
b6797c8e | 1621 | return status; |
6f9449c2 | 1622 | } |
128708c3 | 1623 | |
6f9449c2 | 1624 | static |
b6797c8e JG |
1625 | enum get_next_subbuffer_status get_next_subbuffer_splice( |
1626 | struct lttng_consumer_stream *stream, | |
6f9449c2 JG |
1627 | struct stream_subbuffer *subbuffer) |
1628 | { | |
b6797c8e JG |
1629 | const enum get_next_subbuffer_status status = |
1630 | get_subbuffer_common(stream, subbuffer); | |
1d4dfdef | 1631 | |
b6797c8e | 1632 | if (status != GET_NEXT_SUBBUFFER_STATUS_OK) { |
6f9449c2 JG |
1633 | goto end; |
1634 | } | |
1d4dfdef | 1635 | |
6f9449c2 JG |
1636 | subbuffer->buffer.fd = stream->wait_fd; |
1637 | end: | |
b6797c8e | 1638 | return status; |
6f9449c2 | 1639 | } |
fd424d99 | 1640 | |
6f9449c2 | 1641 | static |
b6797c8e JG |
1642 | enum get_next_subbuffer_status get_next_subbuffer_mmap( |
1643 | struct lttng_consumer_stream *stream, | |
6f9449c2 JG |
1644 | struct stream_subbuffer *subbuffer) |
1645 | { | |
1646 | int ret; | |
b6797c8e | 1647 | enum get_next_subbuffer_status status; |
6f9449c2 JG |
1648 | const char *addr; |
1649 | ||
b6797c8e JG |
1650 | status = get_subbuffer_common(stream, subbuffer); |
1651 | if (status != GET_NEXT_SUBBUFFER_STATUS_OK) { | |
6f9449c2 | 1652 | goto end; |
128708c3 | 1653 | } |
6f9449c2 JG |
1654 | |
1655 | ret = get_current_subbuf_addr(stream, &addr); | |
1656 | if (ret) { | |
b6797c8e | 1657 | status = GET_NEXT_SUBBUFFER_STATUS_ERROR; |
6f9449c2 | 1658 | goto end; |
d41f73b7 | 1659 | } |
6f9449c2 JG |
1660 | |
1661 | subbuffer->buffer.buffer = lttng_buffer_view_init( | |
1662 | addr, 0, subbuffer->info.data.padded_subbuf_size); | |
1663 | end: | |
b6797c8e | 1664 | return status; |
6f9449c2 JG |
1665 | } |
1666 | ||
f5ba75b4 | 1667 | static |
b6797c8e | 1668 | enum get_next_subbuffer_status get_next_subbuffer_metadata_check(struct lttng_consumer_stream *stream, |
f5ba75b4 JG |
1669 | struct stream_subbuffer *subbuffer) |
1670 | { | |
1671 | int ret; | |
1672 | const char *addr; | |
1673 | bool coherent; | |
b6797c8e | 1674 | enum get_next_subbuffer_status status; |
f5ba75b4 JG |
1675 | |
1676 | ret = kernctl_get_next_subbuf_metadata_check(stream->wait_fd, | |
1677 | &coherent); | |
1678 | if (ret) { | |
1679 | goto end; | |
1680 | } | |
1681 | ||
1682 | ret = stream->read_subbuffer_ops.extract_subbuffer_info( | |
1683 | stream, subbuffer); | |
1684 | if (ret) { | |
1685 | goto end; | |
1686 | } | |
1687 | ||
1688 | LTTNG_OPTIONAL_SET(&subbuffer->info.metadata.coherent, coherent); | |
1689 | ||
1690 | ret = get_current_subbuf_addr(stream, &addr); | |
1691 | if (ret) { | |
1692 | goto end; | |
1693 | } | |
1694 | ||
1695 | subbuffer->buffer.buffer = lttng_buffer_view_init( | |
1696 | addr, 0, subbuffer->info.data.padded_subbuf_size); | |
1697 | DBG("Got metadata packet with padded_subbuf_size = %lu, coherent = %s", | |
1698 | subbuffer->info.metadata.padded_subbuf_size, | |
1699 | coherent ? "true" : "false"); | |
1700 | end: | |
6e5e3c51 MD |
1701 | /* |
1702 | * The caller only expects -ENODATA when there is no data to read, but | |
1703 | * the kernel tracer returns -EAGAIN when there is currently no data | |
1704 | * for a non-finalized stream, and -ENODATA when there is no data for a | |
1705 | * finalized stream. Those can be combined into a -ENODATA return value. | |
1706 | */ | |
b6797c8e JG |
1707 | switch (ret) { |
1708 | case 0: | |
1709 | status = GET_NEXT_SUBBUFFER_STATUS_OK; | |
1710 | break; | |
1711 | case -ENODATA: | |
1712 | case -EAGAIN: | |
1713 | /* | |
1714 | * The caller only expects -ENODATA when there is no data to | |
1715 | * read, but the kernel tracer returns -EAGAIN when there is | |
1716 | * currently no data for a non-finalized stream, and -ENODATA | |
1717 | * when there is no data for a finalized stream. Those can be | |
1718 | * combined into a -ENODATA return value. | |
1719 | */ | |
1720 | status = GET_NEXT_SUBBUFFER_STATUS_NO_DATA; | |
1721 | break; | |
1722 | default: | |
1723 | status = GET_NEXT_SUBBUFFER_STATUS_ERROR; | |
1724 | break; | |
6e5e3c51 MD |
1725 | } |
1726 | ||
b6797c8e | 1727 | return status; |
f5ba75b4 JG |
1728 | } |
1729 | ||
6f9449c2 JG |
1730 | static |
1731 | int put_next_subbuffer(struct lttng_consumer_stream *stream, | |
1732 | struct stream_subbuffer *subbuffer) | |
1733 | { | |
1734 | const int ret = kernctl_put_next_subbuf(stream->wait_fd); | |
1735 | ||
1736 | if (ret) { | |
1737 | if (ret == -EFAULT) { | |
1738 | PERROR("Error in unreserving sub buffer"); | |
1739 | } else if (ret == -EIO) { | |
d41f73b7 | 1740 | /* Should never happen with newer LTTng versions */ |
6f9449c2 | 1741 | PERROR("Reader has been pushed by the writer, last sub-buffer corrupted"); |
d41f73b7 | 1742 | } |
d41f73b7 MD |
1743 | } |
1744 | ||
6f9449c2 JG |
1745 | return ret; |
1746 | } | |
1c20f0e2 | 1747 | |
f5ba75b4 JG |
1748 | static |
1749 | bool is_get_next_check_metadata_available(int tracer_fd) | |
1750 | { | |
741e787b JG |
1751 | const int ret = kernctl_get_next_subbuf_metadata_check(tracer_fd, NULL); |
1752 | const bool available = ret != -ENOTTY; | |
1753 | ||
1754 | if (ret == 0) { | |
1755 | /* get succeeded, make sure to put the subbuffer. */ | |
1756 | kernctl_put_subbuf(tracer_fd); | |
1757 | } | |
1758 | ||
1759 | return available; | |
f5ba75b4 JG |
1760 | } |
1761 | ||
091441eb MD |
1762 | static |
1763 | int signal_metadata(struct lttng_consumer_stream *stream, | |
1764 | struct lttng_consumer_local_data *ctx) | |
1765 | { | |
1766 | ASSERT_LOCKED(stream->metadata_rdv_lock); | |
1767 | return pthread_cond_broadcast(&stream->metadata_rdv) ? -errno : 0; | |
1768 | } | |
1769 | ||
f5ba75b4 JG |
1770 | static |
1771 | int lttng_kconsumer_set_stream_ops( | |
6f9449c2 JG |
1772 | struct lttng_consumer_stream *stream) |
1773 | { | |
f5ba75b4 JG |
1774 | int ret = 0; |
1775 | ||
1776 | if (stream->metadata_flag && stream->chan->is_live) { | |
1777 | DBG("Attempting to enable metadata bucketization for live consumers"); | |
1778 | if (is_get_next_check_metadata_available(stream->wait_fd)) { | |
1779 | DBG("Kernel tracer supports get_next_subbuffer_metadata_check, metadata will be accumulated until a coherent state is reached"); | |
1780 | stream->read_subbuffer_ops.get_next_subbuffer = | |
1781 | get_next_subbuffer_metadata_check; | |
1782 | ret = consumer_stream_enable_metadata_bucketization( | |
1783 | stream); | |
1784 | if (ret) { | |
1785 | goto end; | |
1786 | } | |
1787 | } else { | |
1788 | /* | |
1789 | * The kernel tracer version is too old to indicate | |
1790 | * when the metadata stream has reached a "coherent" | |
1791 | * (parseable) point. | |
1792 | * | |
1793 | * This means that a live viewer may see an incoherent | |
1794 | * sequence of metadata and fail to parse it. | |
1795 | */ | |
1796 | WARN("Kernel tracer does not support get_next_subbuffer_metadata_check which may cause live clients to fail to parse the metadata stream"); | |
1797 | metadata_bucket_destroy(stream->metadata_bucket); | |
1798 | stream->metadata_bucket = NULL; | |
1799 | } | |
091441eb MD |
1800 | |
1801 | stream->read_subbuffer_ops.on_sleep = signal_metadata; | |
f5ba75b4 JG |
1802 | } |
1803 | ||
1804 | if (!stream->read_subbuffer_ops.get_next_subbuffer) { | |
1805 | if (stream->chan->output == CONSUMER_CHANNEL_MMAP) { | |
1806 | stream->read_subbuffer_ops.get_next_subbuffer = | |
1807 | get_next_subbuffer_mmap; | |
1808 | } else { | |
1809 | stream->read_subbuffer_ops.get_next_subbuffer = | |
1810 | get_next_subbuffer_splice; | |
1811 | } | |
94d49140 JD |
1812 | } |
1813 | ||
6f9449c2 JG |
1814 | if (stream->metadata_flag) { |
1815 | stream->read_subbuffer_ops.extract_subbuffer_info = | |
1816 | extract_metadata_subbuffer_info; | |
1817 | } else { | |
1818 | stream->read_subbuffer_ops.extract_subbuffer_info = | |
1819 | extract_data_subbuffer_info; | |
1820 | if (stream->chan->is_live) { | |
1821 | stream->read_subbuffer_ops.send_live_beacon = | |
1822 | consumer_flush_kernel_index; | |
1823 | } | |
309167d2 JD |
1824 | } |
1825 | ||
6f9449c2 | 1826 | stream->read_subbuffer_ops.put_next_subbuffer = put_next_subbuffer; |
f5ba75b4 JG |
1827 | end: |
1828 | return ret; | |
d41f73b7 MD |
1829 | } |
1830 | ||
1831 | int lttng_kconsumer_on_recv_stream(struct lttng_consumer_stream *stream) | |
1832 | { | |
1833 | int ret; | |
ffe60014 | 1834 | |
a0377dfe | 1835 | LTTNG_ASSERT(stream); |
ffe60014 | 1836 | |
2bba9e53 | 1837 | /* |
d2956687 JG |
1838 | * Don't create anything if this is set for streaming or if there is |
1839 | * no current trace chunk on the parent channel. | |
2bba9e53 | 1840 | */ |
d2956687 JG |
1841 | if (stream->net_seq_idx == (uint64_t) -1ULL && stream->chan->monitor && |
1842 | stream->chan->trace_chunk) { | |
1843 | ret = consumer_stream_create_output_files(stream, true); | |
1844 | if (ret) { | |
fe4477ee JD |
1845 | goto error; |
1846 | } | |
ffe60014 | 1847 | } |
d41f73b7 | 1848 | |
d41f73b7 MD |
1849 | if (stream->output == LTTNG_EVENT_MMAP) { |
1850 | /* get the len of the mmap region */ | |
1851 | unsigned long mmap_len; | |
1852 | ||
1853 | ret = kernctl_get_mmap_len(stream->wait_fd, &mmap_len); | |
1854 | if (ret != 0) { | |
ffe60014 | 1855 | PERROR("kernctl_get_mmap_len"); |
d41f73b7 MD |
1856 | goto error_close_fd; |
1857 | } | |
1858 | stream->mmap_len = (size_t) mmap_len; | |
1859 | ||
ffe60014 DG |
1860 | stream->mmap_base = mmap(NULL, stream->mmap_len, PROT_READ, |
1861 | MAP_PRIVATE, stream->wait_fd, 0); | |
d41f73b7 | 1862 | if (stream->mmap_base == MAP_FAILED) { |
ffe60014 | 1863 | PERROR("Error mmaping"); |
d41f73b7 MD |
1864 | ret = -1; |
1865 | goto error_close_fd; | |
1866 | } | |
1867 | } | |
1868 | ||
f5ba75b4 JG |
1869 | ret = lttng_kconsumer_set_stream_ops(stream); |
1870 | if (ret) { | |
1871 | goto error_close_fd; | |
1872 | } | |
6f9449c2 | 1873 | |
d41f73b7 MD |
1874 | /* we return 0 to let the library handle the FD internally */ |
1875 | return 0; | |
1876 | ||
1877 | error_close_fd: | |
2f225ce2 | 1878 | if (stream->out_fd >= 0) { |
d41f73b7 MD |
1879 | int err; |
1880 | ||
1881 | err = close(stream->out_fd); | |
a0377dfe | 1882 | LTTNG_ASSERT(!err); |
2f225ce2 | 1883 | stream->out_fd = -1; |
d41f73b7 MD |
1884 | } |
1885 | error: | |
1886 | return ret; | |
1887 | } | |
1888 | ||
ca22feea DG |
1889 | /* |
1890 | * Check if data is still being extracted from the buffers for a specific | |
4e9a4686 DG |
1891 | * stream. Consumer data lock MUST be acquired before calling this function |
1892 | * and the stream lock. | |
ca22feea | 1893 | * |
6d805429 | 1894 | * Return 1 if the traced data are still getting read else 0 meaning that the |
ca22feea DG |
1895 | * data is available for trace viewer reading. |
1896 | */ | |
6d805429 | 1897 | int lttng_kconsumer_data_pending(struct lttng_consumer_stream *stream) |
ca22feea DG |
1898 | { |
1899 | int ret; | |
1900 | ||
a0377dfe | 1901 | LTTNG_ASSERT(stream); |
ca22feea | 1902 | |
873b9e9a MD |
1903 | if (stream->endpoint_status != CONSUMER_ENDPOINT_ACTIVE) { |
1904 | ret = 0; | |
1905 | goto end; | |
1906 | } | |
1907 | ||
ca22feea DG |
1908 | ret = kernctl_get_next_subbuf(stream->wait_fd); |
1909 | if (ret == 0) { | |
1910 | /* There is still data so let's put back this subbuffer. */ | |
1911 | ret = kernctl_put_subbuf(stream->wait_fd); | |
a0377dfe | 1912 | LTTNG_ASSERT(ret == 0); |
6d805429 | 1913 | ret = 1; /* Data is pending */ |
4e9a4686 | 1914 | goto end; |
ca22feea DG |
1915 | } |
1916 | ||
6d805429 DG |
1917 | /* Data is NOT pending and ready to be read. */ |
1918 | ret = 0; | |
ca22feea | 1919 | |
6efae65e DG |
1920 | end: |
1921 | return ret; | |
ca22feea | 1922 | } |