Commit | Line | Data |
---|---|---|
3bd1e081 | 1 | /* |
ab5be9fa MJ |
2 | * Copyright (C) 2011 Julien Desfossez <julien.desfossez@polymtl.ca> |
3 | * Copyright (C) 2011 Mathieu Desnoyers <mathieu.desnoyers@efficios.com> | |
4 | * Copyright (C) 2017 Jérémie Galarneau <jeremie.galarneau@efficios.com> | |
3bd1e081 | 5 | * |
ab5be9fa | 6 | * SPDX-License-Identifier: GPL-2.0-only |
3bd1e081 | 7 | * |
3bd1e081 MD |
8 | */ |
9 | ||
6c1c0768 | 10 | #define _LGPL_SOURCE |
3bd1e081 | 11 | #include <assert.h> |
3bd1e081 MD |
12 | #include <poll.h> |
13 | #include <pthread.h> | |
14 | #include <stdlib.h> | |
15 | #include <string.h> | |
16 | #include <sys/mman.h> | |
17 | #include <sys/socket.h> | |
18 | #include <sys/types.h> | |
77c7c900 | 19 | #include <inttypes.h> |
3bd1e081 | 20 | #include <unistd.h> |
dbb5dfe6 | 21 | #include <sys/stat.h> |
f5ba75b4 | 22 | #include <stdint.h> |
3bd1e081 | 23 | |
51a9e1c7 | 24 | #include <bin/lttng-consumerd/health-consumerd.h> |
990570ed | 25 | #include <common/common.h> |
10a8a223 | 26 | #include <common/kernel-ctl/kernel-ctl.h> |
10a8a223 | 27 | #include <common/sessiond-comm/sessiond-comm.h> |
00e2e675 | 28 | #include <common/sessiond-comm/relayd.h> |
dbb5dfe6 | 29 | #include <common/compat/fcntl.h> |
f263b7fd | 30 | #include <common/compat/endian.h> |
acdb9057 | 31 | #include <common/pipe.h> |
00e2e675 | 32 | #include <common/relayd/relayd.h> |
fe4477ee | 33 | #include <common/utils.h> |
c8fea79c | 34 | #include <common/consumer/consumer-stream.h> |
309167d2 | 35 | #include <common/index/index.h> |
c8fea79c | 36 | #include <common/consumer/consumer-timer.h> |
d2956687 | 37 | #include <common/optional.h> |
6f9449c2 JG |
38 | #include <common/buffer-view.h> |
39 | #include <common/consumer/consumer.h> | |
f5ba75b4 | 40 | #include <common/consumer/metadata-bucket.h> |
0857097f | 41 | |
10a8a223 | 42 | #include "kernel-consumer.h" |
3bd1e081 MD |
43 | |
44 | extern struct lttng_consumer_global_data consumer_data; | |
45 | extern int consumer_poll_timeout; | |
3bd1e081 | 46 | |
3bd1e081 MD |
47 | /* |
48 | * Take a snapshot for a specific fd | |
49 | * | |
50 | * Returns 0 on success, < 0 on error | |
51 | */ | |
ffe60014 | 52 | int lttng_kconsumer_take_snapshot(struct lttng_consumer_stream *stream) |
3bd1e081 MD |
53 | { |
54 | int ret = 0; | |
55 | int infd = stream->wait_fd; | |
56 | ||
57 | ret = kernctl_snapshot(infd); | |
d2d2f190 JD |
58 | /* |
59 | * -EAGAIN is not an error, it just means that there is no data to | |
60 | * be read. | |
61 | */ | |
62 | if (ret != 0 && ret != -EAGAIN) { | |
5a510c9f | 63 | PERROR("Getting sub-buffer snapshot."); |
3bd1e081 MD |
64 | } |
65 | ||
66 | return ret; | |
67 | } | |
68 | ||
e9404c27 JG |
69 | /* |
70 | * Sample consumed and produced positions for a specific fd. | |
71 | * | |
72 | * Returns 0 on success, < 0 on error. | |
73 | */ | |
74 | int lttng_kconsumer_sample_snapshot_positions( | |
75 | struct lttng_consumer_stream *stream) | |
76 | { | |
77 | assert(stream); | |
78 | ||
79 | return kernctl_snapshot_sample_positions(stream->wait_fd); | |
80 | } | |
81 | ||
3bd1e081 MD |
82 | /* |
83 | * Get the produced position | |
84 | * | |
85 | * Returns 0 on success, < 0 on error | |
86 | */ | |
ffe60014 | 87 | int lttng_kconsumer_get_produced_snapshot(struct lttng_consumer_stream *stream, |
3bd1e081 MD |
88 | unsigned long *pos) |
89 | { | |
90 | int ret; | |
91 | int infd = stream->wait_fd; | |
92 | ||
93 | ret = kernctl_snapshot_get_produced(infd, pos); | |
94 | if (ret != 0) { | |
5a510c9f | 95 | PERROR("kernctl_snapshot_get_produced"); |
3bd1e081 MD |
96 | } |
97 | ||
98 | return ret; | |
99 | } | |
100 | ||
07b86b52 JD |
101 | /* |
102 | * Get the consumerd position | |
103 | * | |
104 | * Returns 0 on success, < 0 on error | |
105 | */ | |
106 | int lttng_kconsumer_get_consumed_snapshot(struct lttng_consumer_stream *stream, | |
107 | unsigned long *pos) | |
108 | { | |
109 | int ret; | |
110 | int infd = stream->wait_fd; | |
111 | ||
112 | ret = kernctl_snapshot_get_consumed(infd, pos); | |
113 | if (ret != 0) { | |
5a510c9f | 114 | PERROR("kernctl_snapshot_get_consumed"); |
07b86b52 JD |
115 | } |
116 | ||
117 | return ret; | |
118 | } | |
119 | ||
128708c3 JG |
120 | static |
121 | int get_current_subbuf_addr(struct lttng_consumer_stream *stream, | |
122 | const char **addr) | |
123 | { | |
124 | int ret; | |
125 | unsigned long mmap_offset; | |
126 | const char *mmap_base = stream->mmap_base; | |
127 | ||
128 | ret = kernctl_get_mmap_read_offset(stream->wait_fd, &mmap_offset); | |
129 | if (ret < 0) { | |
130 | PERROR("Failed to get mmap read offset"); | |
131 | goto error; | |
132 | } | |
133 | ||
134 | *addr = mmap_base + mmap_offset; | |
135 | error: | |
136 | return ret; | |
137 | } | |
138 | ||
07b86b52 JD |
139 | /* |
140 | * Take a snapshot of all the stream of a channel | |
3eb928aa | 141 | * RCU read-side lock must be held across this function to ensure existence of |
d2956687 | 142 | * channel. The channel lock must be held by the caller. |
07b86b52 JD |
143 | * |
144 | * Returns 0 on success, < 0 on error | |
145 | */ | |
f72bb42f JG |
146 | static int lttng_kconsumer_snapshot_channel( |
147 | struct lttng_consumer_channel *channel, | |
148 | uint64_t key, char *path, uint64_t relayd_id, | |
149 | uint64_t nb_packets_per_stream, | |
5c786ded | 150 | struct lttng_consumer_local_data *ctx) |
07b86b52 JD |
151 | { |
152 | int ret; | |
07b86b52 JD |
153 | struct lttng_consumer_stream *stream; |
154 | ||
6a00837f | 155 | DBG("Kernel consumer snapshot channel %" PRIu64, key); |
07b86b52 JD |
156 | |
157 | rcu_read_lock(); | |
158 | ||
07b86b52 JD |
159 | /* Splice is not supported yet for channel snapshot. */ |
160 | if (channel->output != CONSUMER_CHANNEL_MMAP) { | |
9381314c JG |
161 | ERR("Unsupported output type for channel \"%s\": mmap output is required to record a snapshot", |
162 | channel->name); | |
07b86b52 JD |
163 | ret = -1; |
164 | goto end; | |
165 | } | |
166 | ||
10a50311 | 167 | cds_list_for_each_entry(stream, &channel->streams.head, send_node) { |
923333cd | 168 | unsigned long consumed_pos, produced_pos; |
9ce5646a MD |
169 | |
170 | health_code_update(); | |
171 | ||
07b86b52 JD |
172 | /* |
173 | * Lock stream because we are about to change its state. | |
174 | */ | |
175 | pthread_mutex_lock(&stream->lock); | |
176 | ||
d2956687 JG |
177 | assert(channel->trace_chunk); |
178 | if (!lttng_trace_chunk_get(channel->trace_chunk)) { | |
179 | /* | |
180 | * Can't happen barring an internal error as the channel | |
181 | * holds a reference to the trace chunk. | |
182 | */ | |
183 | ERR("Failed to acquire reference to channel's trace chunk"); | |
184 | ret = -1; | |
185 | goto end_unlock; | |
186 | } | |
187 | assert(!stream->trace_chunk); | |
188 | stream->trace_chunk = channel->trace_chunk; | |
189 | ||
29decac3 DG |
190 | /* |
191 | * Assign the received relayd ID so we can use it for streaming. The streams | |
192 | * are not visible to anyone so this is OK to change it. | |
193 | */ | |
07b86b52 JD |
194 | stream->net_seq_idx = relayd_id; |
195 | channel->relayd_id = relayd_id; | |
196 | if (relayd_id != (uint64_t) -1ULL) { | |
10a50311 | 197 | ret = consumer_send_relayd_stream(stream, path); |
07b86b52 JD |
198 | if (ret < 0) { |
199 | ERR("sending stream to relayd"); | |
200 | goto end_unlock; | |
201 | } | |
07b86b52 | 202 | } else { |
d2956687 JG |
203 | ret = consumer_stream_create_output_files(stream, |
204 | false); | |
07b86b52 | 205 | if (ret < 0) { |
07b86b52 JD |
206 | goto end_unlock; |
207 | } | |
d2956687 JG |
208 | DBG("Kernel consumer snapshot stream (%" PRIu64 ")", |
209 | stream->key); | |
07b86b52 JD |
210 | } |
211 | ||
f22dd891 | 212 | ret = kernctl_buffer_flush_empty(stream->wait_fd); |
07b86b52 | 213 | if (ret < 0) { |
f22dd891 MD |
214 | /* |
215 | * Doing a buffer flush which does not take into | |
216 | * account empty packets. This is not perfect | |
217 | * for stream intersection, but required as a | |
218 | * fall-back when "flush_empty" is not | |
219 | * implemented by lttng-modules. | |
220 | */ | |
221 | ret = kernctl_buffer_flush(stream->wait_fd); | |
222 | if (ret < 0) { | |
223 | ERR("Failed to flush kernel stream"); | |
224 | goto end_unlock; | |
225 | } | |
07b86b52 JD |
226 | goto end_unlock; |
227 | } | |
228 | ||
229 | ret = lttng_kconsumer_take_snapshot(stream); | |
230 | if (ret < 0) { | |
231 | ERR("Taking kernel snapshot"); | |
232 | goto end_unlock; | |
233 | } | |
234 | ||
235 | ret = lttng_kconsumer_get_produced_snapshot(stream, &produced_pos); | |
236 | if (ret < 0) { | |
237 | ERR("Produced kernel snapshot position"); | |
238 | goto end_unlock; | |
239 | } | |
240 | ||
241 | ret = lttng_kconsumer_get_consumed_snapshot(stream, &consumed_pos); | |
242 | if (ret < 0) { | |
243 | ERR("Consumerd kernel snapshot position"); | |
244 | goto end_unlock; | |
245 | } | |
246 | ||
d07ceecd MD |
247 | consumed_pos = consumer_get_consume_start_pos(consumed_pos, |
248 | produced_pos, nb_packets_per_stream, | |
249 | stream->max_sb_size); | |
5c786ded | 250 | |
9377d830 | 251 | while ((long) (consumed_pos - produced_pos) < 0) { |
07b86b52 JD |
252 | ssize_t read_len; |
253 | unsigned long len, padded_len; | |
128708c3 | 254 | const char *subbuf_addr; |
fd424d99 | 255 | struct lttng_buffer_view subbuf_view; |
07b86b52 | 256 | |
9ce5646a | 257 | health_code_update(); |
07b86b52 JD |
258 | DBG("Kernel consumer taking snapshot at pos %lu", consumed_pos); |
259 | ||
260 | ret = kernctl_get_subbuf(stream->wait_fd, &consumed_pos); | |
261 | if (ret < 0) { | |
32af2c95 | 262 | if (ret != -EAGAIN) { |
07b86b52 JD |
263 | PERROR("kernctl_get_subbuf snapshot"); |
264 | goto end_unlock; | |
265 | } | |
266 | DBG("Kernel consumer get subbuf failed. Skipping it."); | |
267 | consumed_pos += stream->max_sb_size; | |
ddc93ee4 | 268 | stream->chan->lost_packets++; |
07b86b52 JD |
269 | continue; |
270 | } | |
271 | ||
272 | ret = kernctl_get_subbuf_size(stream->wait_fd, &len); | |
273 | if (ret < 0) { | |
274 | ERR("Snapshot kernctl_get_subbuf_size"); | |
29decac3 | 275 | goto error_put_subbuf; |
07b86b52 JD |
276 | } |
277 | ||
278 | ret = kernctl_get_padded_subbuf_size(stream->wait_fd, &padded_len); | |
279 | if (ret < 0) { | |
280 | ERR("Snapshot kernctl_get_padded_subbuf_size"); | |
29decac3 | 281 | goto error_put_subbuf; |
07b86b52 JD |
282 | } |
283 | ||
128708c3 JG |
284 | ret = get_current_subbuf_addr(stream, &subbuf_addr); |
285 | if (ret) { | |
286 | goto error_put_subbuf; | |
287 | } | |
288 | ||
fd424d99 JG |
289 | subbuf_view = lttng_buffer_view_init( |
290 | subbuf_addr, 0, padded_len); | |
f5ba75b4 | 291 | read_len = lttng_consumer_on_read_subbuffer_mmap( |
fd424d99 | 292 | stream, &subbuf_view, |
6f9449c2 | 293 | padded_len - len); |
07b86b52 | 294 | /* |
29decac3 DG |
295 | * We write the padded len in local tracefiles but the data len |
296 | * when using a relay. Display the error but continue processing | |
297 | * to try to release the subbuffer. | |
07b86b52 JD |
298 | */ |
299 | if (relayd_id != (uint64_t) -1ULL) { | |
300 | if (read_len != len) { | |
301 | ERR("Error sending to the relay (ret: %zd != len: %lu)", | |
302 | read_len, len); | |
303 | } | |
304 | } else { | |
305 | if (read_len != padded_len) { | |
306 | ERR("Error writing to tracefile (ret: %zd != len: %lu)", | |
307 | read_len, padded_len); | |
308 | } | |
309 | } | |
310 | ||
311 | ret = kernctl_put_subbuf(stream->wait_fd); | |
312 | if (ret < 0) { | |
313 | ERR("Snapshot kernctl_put_subbuf"); | |
314 | goto end_unlock; | |
315 | } | |
316 | consumed_pos += stream->max_sb_size; | |
317 | } | |
318 | ||
319 | if (relayd_id == (uint64_t) -1ULL) { | |
fdf9986c MD |
320 | if (stream->out_fd >= 0) { |
321 | ret = close(stream->out_fd); | |
322 | if (ret < 0) { | |
323 | PERROR("Kernel consumer snapshot close out_fd"); | |
324 | goto end_unlock; | |
325 | } | |
326 | stream->out_fd = -1; | |
07b86b52 | 327 | } |
07b86b52 JD |
328 | } else { |
329 | close_relayd_stream(stream); | |
330 | stream->net_seq_idx = (uint64_t) -1ULL; | |
331 | } | |
d2956687 JG |
332 | lttng_trace_chunk_put(stream->trace_chunk); |
333 | stream->trace_chunk = NULL; | |
07b86b52 JD |
334 | pthread_mutex_unlock(&stream->lock); |
335 | } | |
336 | ||
337 | /* All good! */ | |
338 | ret = 0; | |
339 | goto end; | |
340 | ||
29decac3 DG |
341 | error_put_subbuf: |
342 | ret = kernctl_put_subbuf(stream->wait_fd); | |
343 | if (ret < 0) { | |
344 | ERR("Snapshot kernctl_put_subbuf error path"); | |
345 | } | |
07b86b52 JD |
346 | end_unlock: |
347 | pthread_mutex_unlock(&stream->lock); | |
348 | end: | |
349 | rcu_read_unlock(); | |
350 | return ret; | |
351 | } | |
352 | ||
353 | /* | |
354 | * Read the whole metadata available for a snapshot. | |
3eb928aa | 355 | * RCU read-side lock must be held across this function to ensure existence of |
d2956687 | 356 | * metadata_channel. The channel lock must be held by the caller. |
07b86b52 JD |
357 | * |
358 | * Returns 0 on success, < 0 on error | |
359 | */ | |
d2956687 JG |
360 | static int lttng_kconsumer_snapshot_metadata( |
361 | struct lttng_consumer_channel *metadata_channel, | |
3eb928aa MD |
362 | uint64_t key, char *path, uint64_t relayd_id, |
363 | struct lttng_consumer_local_data *ctx) | |
07b86b52 | 364 | { |
d771f832 DG |
365 | int ret, use_relayd = 0; |
366 | ssize_t ret_read; | |
07b86b52 | 367 | struct lttng_consumer_stream *metadata_stream; |
d771f832 DG |
368 | |
369 | assert(ctx); | |
07b86b52 JD |
370 | |
371 | DBG("Kernel consumer snapshot metadata with key %" PRIu64 " at path %s", | |
372 | key, path); | |
373 | ||
374 | rcu_read_lock(); | |
375 | ||
07b86b52 JD |
376 | metadata_stream = metadata_channel->metadata_stream; |
377 | assert(metadata_stream); | |
d2956687 | 378 | |
fa27abe8 | 379 | pthread_mutex_lock(&metadata_stream->lock); |
d2956687 JG |
380 | assert(metadata_channel->trace_chunk); |
381 | assert(metadata_stream->trace_chunk); | |
07b86b52 | 382 | |
d771f832 | 383 | /* Flag once that we have a valid relayd for the stream. */ |
e2039c7a | 384 | if (relayd_id != (uint64_t) -1ULL) { |
d771f832 DG |
385 | use_relayd = 1; |
386 | } | |
387 | ||
388 | if (use_relayd) { | |
10a50311 | 389 | ret = consumer_send_relayd_stream(metadata_stream, path); |
e2039c7a | 390 | if (ret < 0) { |
fa27abe8 | 391 | goto error_snapshot; |
e2039c7a | 392 | } |
e2039c7a | 393 | } else { |
d2956687 JG |
394 | ret = consumer_stream_create_output_files(metadata_stream, |
395 | false); | |
e2039c7a | 396 | if (ret < 0) { |
fa27abe8 | 397 | goto error_snapshot; |
e2039c7a | 398 | } |
07b86b52 | 399 | } |
07b86b52 | 400 | |
d771f832 | 401 | do { |
9ce5646a MD |
402 | health_code_update(); |
403 | ||
6f9449c2 | 404 | ret_read = lttng_consumer_read_subbuffer(metadata_stream, ctx, true); |
d771f832 | 405 | if (ret_read < 0) { |
56591bac | 406 | if (ret_read != -EAGAIN) { |
6a00837f | 407 | ERR("Kernel snapshot reading metadata subbuffer (ret: %zd)", |
d771f832 | 408 | ret_read); |
fa27abe8 JG |
409 | ret = ret_read; |
410 | goto error_snapshot; | |
07b86b52 | 411 | } |
d771f832 | 412 | /* ret_read is negative at this point so we will exit the loop. */ |
07b86b52 JD |
413 | continue; |
414 | } | |
d771f832 | 415 | } while (ret_read >= 0); |
07b86b52 | 416 | |
d771f832 DG |
417 | if (use_relayd) { |
418 | close_relayd_stream(metadata_stream); | |
419 | metadata_stream->net_seq_idx = (uint64_t) -1ULL; | |
420 | } else { | |
fdf9986c MD |
421 | if (metadata_stream->out_fd >= 0) { |
422 | ret = close(metadata_stream->out_fd); | |
423 | if (ret < 0) { | |
424 | PERROR("Kernel consumer snapshot metadata close out_fd"); | |
425 | /* | |
426 | * Don't go on error here since the snapshot was successful at this | |
427 | * point but somehow the close failed. | |
428 | */ | |
429 | } | |
430 | metadata_stream->out_fd = -1; | |
d2956687 JG |
431 | lttng_trace_chunk_put(metadata_stream->trace_chunk); |
432 | metadata_stream->trace_chunk = NULL; | |
e2039c7a | 433 | } |
e2039c7a JD |
434 | } |
435 | ||
07b86b52 | 436 | ret = 0; |
fa27abe8 JG |
437 | error_snapshot: |
438 | pthread_mutex_unlock(&metadata_stream->lock); | |
cf53a8a6 JD |
439 | cds_list_del(&metadata_stream->send_node); |
440 | consumer_stream_destroy(metadata_stream, NULL); | |
441 | metadata_channel->metadata_stream = NULL; | |
07b86b52 JD |
442 | rcu_read_unlock(); |
443 | return ret; | |
444 | } | |
445 | ||
1803a064 MD |
446 | /* |
447 | * Receive command from session daemon and process it. | |
448 | * | |
449 | * Return 1 on success else a negative value or 0. | |
450 | */ | |
3bd1e081 MD |
451 | int lttng_kconsumer_recv_cmd(struct lttng_consumer_local_data *ctx, |
452 | int sock, struct pollfd *consumer_sockpoll) | |
453 | { | |
454 | ssize_t ret; | |
0c759fc9 | 455 | enum lttcomm_return_code ret_code = LTTCOMM_CONSUMERD_SUCCESS; |
3bd1e081 MD |
456 | struct lttcomm_consumer_msg msg; |
457 | ||
9ce5646a MD |
458 | health_code_update(); |
459 | ||
3bd1e081 MD |
460 | ret = lttcomm_recv_unix_sock(sock, &msg, sizeof(msg)); |
461 | if (ret != sizeof(msg)) { | |
1803a064 | 462 | if (ret > 0) { |
c6857fcf | 463 | lttng_consumer_send_error(ctx, LTTCOMM_CONSUMERD_ERROR_RECV_CMD); |
1803a064 MD |
464 | ret = -1; |
465 | } | |
3bd1e081 MD |
466 | return ret; |
467 | } | |
9ce5646a MD |
468 | |
469 | health_code_update(); | |
470 | ||
84382d49 MD |
471 | /* Deprecated command */ |
472 | assert(msg.cmd_type != LTTNG_CONSUMER_STOP); | |
3bd1e081 | 473 | |
9ce5646a MD |
474 | health_code_update(); |
475 | ||
b0b335c8 MD |
476 | /* relayd needs RCU read-side protection */ |
477 | rcu_read_lock(); | |
478 | ||
3bd1e081 | 479 | switch (msg.cmd_type) { |
00e2e675 DG |
480 | case LTTNG_CONSUMER_ADD_RELAYD_SOCKET: |
481 | { | |
f50f23d9 | 482 | /* Session daemon status message are handled in the following call. */ |
2527bf85 | 483 | consumer_add_relayd_socket(msg.u.relayd_sock.net_index, |
7735ef9e | 484 | msg.u.relayd_sock.type, ctx, sock, consumer_sockpoll, |
d3e2ba59 | 485 | &msg.u.relayd_sock.sock, msg.u.relayd_sock.session_id, |
2527bf85 | 486 | msg.u.relayd_sock.relayd_session_id); |
00e2e675 DG |
487 | goto end_nosignal; |
488 | } | |
3bd1e081 MD |
489 | case LTTNG_CONSUMER_ADD_CHANNEL: |
490 | { | |
491 | struct lttng_consumer_channel *new_channel; | |
e43c41c5 | 492 | int ret_recv; |
d2956687 | 493 | const uint64_t chunk_id = msg.u.channel.chunk_id.value; |
3bd1e081 | 494 | |
9ce5646a MD |
495 | health_code_update(); |
496 | ||
f50f23d9 DG |
497 | /* First send a status message before receiving the fds. */ |
498 | ret = consumer_send_status_msg(sock, ret_code); | |
499 | if (ret < 0) { | |
500 | /* Somehow, the session daemon is not responding anymore. */ | |
1803a064 | 501 | goto error_fatal; |
f50f23d9 | 502 | } |
9ce5646a MD |
503 | |
504 | health_code_update(); | |
505 | ||
d88aee68 | 506 | DBG("consumer_add_channel %" PRIu64, msg.u.channel.channel_key); |
3bd1e081 | 507 | new_channel = consumer_allocate_channel(msg.u.channel.channel_key, |
d2956687 JG |
508 | msg.u.channel.session_id, |
509 | msg.u.channel.chunk_id.is_set ? | |
510 | &chunk_id : NULL, | |
511 | msg.u.channel.pathname, | |
512 | msg.u.channel.name, | |
1624d5b7 JD |
513 | msg.u.channel.relayd_id, msg.u.channel.output, |
514 | msg.u.channel.tracefile_size, | |
1950109e | 515 | msg.u.channel.tracefile_count, 0, |
ecc48a90 | 516 | msg.u.channel.monitor, |
d7ba1388 | 517 | msg.u.channel.live_timer_interval, |
a2814ea7 | 518 | msg.u.channel.is_live, |
3d071855 | 519 | NULL, NULL); |
3bd1e081 | 520 | if (new_channel == NULL) { |
f73fabfd | 521 | lttng_consumer_send_error(ctx, LTTCOMM_CONSUMERD_OUTFD_ERROR); |
3bd1e081 MD |
522 | goto end_nosignal; |
523 | } | |
ffe60014 | 524 | new_channel->nb_init_stream_left = msg.u.channel.nb_init_streams; |
95a1109b JD |
525 | switch (msg.u.channel.output) { |
526 | case LTTNG_EVENT_SPLICE: | |
527 | new_channel->output = CONSUMER_CHANNEL_SPLICE; | |
528 | break; | |
529 | case LTTNG_EVENT_MMAP: | |
530 | new_channel->output = CONSUMER_CHANNEL_MMAP; | |
531 | break; | |
532 | default: | |
533 | ERR("Channel output unknown %d", msg.u.channel.output); | |
534 | goto end_nosignal; | |
535 | } | |
ffe60014 DG |
536 | |
537 | /* Translate and save channel type. */ | |
538 | switch (msg.u.channel.type) { | |
539 | case CONSUMER_CHANNEL_TYPE_DATA: | |
540 | case CONSUMER_CHANNEL_TYPE_METADATA: | |
541 | new_channel->type = msg.u.channel.type; | |
542 | break; | |
543 | default: | |
544 | assert(0); | |
545 | goto end_nosignal; | |
546 | }; | |
547 | ||
9ce5646a MD |
548 | health_code_update(); |
549 | ||
3bd1e081 | 550 | if (ctx->on_recv_channel != NULL) { |
e43c41c5 JD |
551 | ret_recv = ctx->on_recv_channel(new_channel); |
552 | if (ret_recv == 0) { | |
553 | ret = consumer_add_channel(new_channel, ctx); | |
554 | } else if (ret_recv < 0) { | |
3bd1e081 MD |
555 | goto end_nosignal; |
556 | } | |
557 | } else { | |
e43c41c5 | 558 | ret = consumer_add_channel(new_channel, ctx); |
3bd1e081 | 559 | } |
e9404c27 JG |
560 | if (msg.u.channel.type == CONSUMER_CHANNEL_TYPE_DATA && !ret) { |
561 | int monitor_start_ret; | |
562 | ||
563 | DBG("Consumer starting monitor timer"); | |
94d49140 JD |
564 | consumer_timer_live_start(new_channel, |
565 | msg.u.channel.live_timer_interval); | |
e9404c27 JG |
566 | monitor_start_ret = consumer_timer_monitor_start( |
567 | new_channel, | |
568 | msg.u.channel.monitor_timer_interval); | |
569 | if (monitor_start_ret < 0) { | |
570 | ERR("Starting channel monitoring timer failed"); | |
571 | goto end_nosignal; | |
572 | } | |
573 | ||
94d49140 | 574 | } |
e43c41c5 | 575 | |
9ce5646a MD |
576 | health_code_update(); |
577 | ||
e43c41c5 | 578 | /* If we received an error in add_channel, we need to report it. */ |
821fffb2 | 579 | if (ret < 0) { |
1803a064 MD |
580 | ret = consumer_send_status_msg(sock, ret); |
581 | if (ret < 0) { | |
582 | goto error_fatal; | |
583 | } | |
e43c41c5 JD |
584 | goto end_nosignal; |
585 | } | |
586 | ||
3bd1e081 MD |
587 | goto end_nosignal; |
588 | } | |
589 | case LTTNG_CONSUMER_ADD_STREAM: | |
590 | { | |
dae10966 DG |
591 | int fd; |
592 | struct lttng_pipe *stream_pipe; | |
00e2e675 | 593 | struct lttng_consumer_stream *new_stream; |
ffe60014 | 594 | struct lttng_consumer_channel *channel; |
c80048c6 | 595 | int alloc_ret = 0; |
3bd1e081 | 596 | |
ffe60014 DG |
597 | /* |
598 | * Get stream's channel reference. Needed when adding the stream to the | |
599 | * global hash table. | |
600 | */ | |
601 | channel = consumer_find_channel(msg.u.stream.channel_key); | |
602 | if (!channel) { | |
603 | /* | |
604 | * We could not find the channel. Can happen if cpu hotplug | |
605 | * happens while tearing down. | |
606 | */ | |
d88aee68 | 607 | ERR("Unable to find channel key %" PRIu64, msg.u.stream.channel_key); |
e462382a | 608 | ret_code = LTTCOMM_CONSUMERD_CHAN_NOT_FOUND; |
ffe60014 DG |
609 | } |
610 | ||
9ce5646a MD |
611 | health_code_update(); |
612 | ||
f50f23d9 DG |
613 | /* First send a status message before receiving the fds. */ |
614 | ret = consumer_send_status_msg(sock, ret_code); | |
1803a064 | 615 | if (ret < 0) { |
d771f832 | 616 | /* Somehow, the session daemon is not responding anymore. */ |
c5c7998f | 617 | goto error_add_stream_fatal; |
1803a064 | 618 | } |
9ce5646a MD |
619 | |
620 | health_code_update(); | |
621 | ||
0c759fc9 | 622 | if (ret_code != LTTCOMM_CONSUMERD_SUCCESS) { |
d771f832 | 623 | /* Channel was not found. */ |
c5c7998f | 624 | goto error_add_stream_nosignal; |
f50f23d9 DG |
625 | } |
626 | ||
d771f832 | 627 | /* Blocking call */ |
9ce5646a MD |
628 | health_poll_entry(); |
629 | ret = lttng_consumer_poll_socket(consumer_sockpoll); | |
630 | health_poll_exit(); | |
84382d49 | 631 | if (ret) { |
c5c7998f | 632 | goto error_add_stream_fatal; |
3bd1e081 | 633 | } |
00e2e675 | 634 | |
9ce5646a MD |
635 | health_code_update(); |
636 | ||
00e2e675 | 637 | /* Get stream file descriptor from socket */ |
f2fc6720 MD |
638 | ret = lttcomm_recv_fds_unix_sock(sock, &fd, 1); |
639 | if (ret != sizeof(fd)) { | |
f73fabfd | 640 | lttng_consumer_send_error(ctx, LTTCOMM_CONSUMERD_ERROR_RECV_FD); |
c5c7998f | 641 | goto end; |
3bd1e081 | 642 | } |
3bd1e081 | 643 | |
9ce5646a MD |
644 | health_code_update(); |
645 | ||
f50f23d9 DG |
646 | /* |
647 | * Send status code to session daemon only if the recv works. If the | |
648 | * above recv() failed, the session daemon is notified through the | |
649 | * error socket and the teardown is eventually done. | |
650 | */ | |
651 | ret = consumer_send_status_msg(sock, ret_code); | |
652 | if (ret < 0) { | |
653 | /* Somehow, the session daemon is not responding anymore. */ | |
c5c7998f | 654 | goto error_add_stream_nosignal; |
f50f23d9 DG |
655 | } |
656 | ||
9ce5646a MD |
657 | health_code_update(); |
658 | ||
d2956687 | 659 | pthread_mutex_lock(&channel->lock); |
6f9449c2 | 660 | new_stream = consumer_stream_create( |
49f45573 JG |
661 | channel, |
662 | channel->key, | |
ffe60014 | 663 | fd, |
ffe60014 | 664 | channel->name, |
ffe60014 DG |
665 | channel->relayd_id, |
666 | channel->session_id, | |
d2956687 | 667 | channel->trace_chunk, |
ffe60014 DG |
668 | msg.u.stream.cpu, |
669 | &alloc_ret, | |
4891ece8 | 670 | channel->type, |
d2956687 | 671 | channel->monitor); |
3bd1e081 | 672 | if (new_stream == NULL) { |
c80048c6 MD |
673 | switch (alloc_ret) { |
674 | case -ENOMEM: | |
675 | case -EINVAL: | |
676 | default: | |
677 | lttng_consumer_send_error(ctx, LTTCOMM_CONSUMERD_OUTFD_ERROR); | |
678 | break; | |
c80048c6 | 679 | } |
d2956687 | 680 | pthread_mutex_unlock(&channel->lock); |
c5c7998f | 681 | goto error_add_stream_nosignal; |
3bd1e081 | 682 | } |
d771f832 | 683 | |
ffe60014 | 684 | new_stream->wait_fd = fd; |
d05185fa JG |
685 | ret = kernctl_get_max_subbuf_size(new_stream->wait_fd, |
686 | &new_stream->max_sb_size); | |
687 | if (ret < 0) { | |
688 | pthread_mutex_unlock(&channel->lock); | |
689 | ERR("Failed to get kernel maximal subbuffer size"); | |
c5c7998f | 690 | goto error_add_stream_nosignal; |
d05185fa JG |
691 | } |
692 | ||
d9a2e16e JD |
693 | consumer_stream_update_channel_attributes(new_stream, |
694 | channel); | |
00e2e675 | 695 | |
a0c83db9 DG |
696 | /* |
697 | * We've just assigned the channel to the stream so increment the | |
07b86b52 JD |
698 | * refcount right now. We don't need to increment the refcount for |
699 | * streams in no monitor because we handle manually the cleanup of | |
700 | * those. It is very important to make sure there is NO prior | |
701 | * consumer_del_stream() calls or else the refcount will be unbalanced. | |
a0c83db9 | 702 | */ |
07b86b52 JD |
703 | if (channel->monitor) { |
704 | uatomic_inc(&new_stream->chan->refcount); | |
705 | } | |
9d9353f9 | 706 | |
fb3a43a9 DG |
707 | /* |
708 | * The buffer flush is done on the session daemon side for the kernel | |
709 | * so no need for the stream "hangup_flush_done" variable to be | |
710 | * tracked. This is important for a kernel stream since we don't rely | |
711 | * on the flush state of the stream to read data. It's not the case for | |
712 | * user space tracing. | |
713 | */ | |
714 | new_stream->hangup_flush_done = 0; | |
715 | ||
9ce5646a MD |
716 | health_code_update(); |
717 | ||
d2956687 | 718 | pthread_mutex_lock(&new_stream->lock); |
633d0084 DG |
719 | if (ctx->on_recv_stream) { |
720 | ret = ctx->on_recv_stream(new_stream); | |
721 | if (ret < 0) { | |
d2956687 JG |
722 | pthread_mutex_unlock(&new_stream->lock); |
723 | pthread_mutex_unlock(&channel->lock); | |
d771f832 | 724 | consumer_stream_free(new_stream); |
c5c7998f | 725 | goto error_add_stream_nosignal; |
fb3a43a9 | 726 | } |
633d0084 | 727 | } |
9ce5646a MD |
728 | health_code_update(); |
729 | ||
07b86b52 JD |
730 | if (new_stream->metadata_flag) { |
731 | channel->metadata_stream = new_stream; | |
732 | } | |
733 | ||
2bba9e53 DG |
734 | /* Do not monitor this stream. */ |
735 | if (!channel->monitor) { | |
5eecee74 | 736 | DBG("Kernel consumer add stream %s in no monitor mode with " |
6dc3064a | 737 | "relayd id %" PRIu64, new_stream->name, |
5eecee74 | 738 | new_stream->net_seq_idx); |
10a50311 | 739 | cds_list_add(&new_stream->send_node, &channel->streams.head); |
d2956687 JG |
740 | pthread_mutex_unlock(&new_stream->lock); |
741 | pthread_mutex_unlock(&channel->lock); | |
c5c7998f | 742 | goto end_add_stream; |
6dc3064a DG |
743 | } |
744 | ||
e1b71bdc DG |
745 | /* Send stream to relayd if the stream has an ID. */ |
746 | if (new_stream->net_seq_idx != (uint64_t) -1ULL) { | |
194ee077 DG |
747 | ret = consumer_send_relayd_stream(new_stream, |
748 | new_stream->chan->pathname); | |
e1b71bdc | 749 | if (ret < 0) { |
d2956687 JG |
750 | pthread_mutex_unlock(&new_stream->lock); |
751 | pthread_mutex_unlock(&channel->lock); | |
e1b71bdc | 752 | consumer_stream_free(new_stream); |
c5c7998f | 753 | goto error_add_stream_nosignal; |
e1b71bdc | 754 | } |
001b7e62 MD |
755 | |
756 | /* | |
757 | * If adding an extra stream to an already | |
758 | * existing channel (e.g. cpu hotplug), we need | |
759 | * to send the "streams_sent" command to relayd. | |
760 | */ | |
761 | if (channel->streams_sent_to_relayd) { | |
762 | ret = consumer_send_relayd_streams_sent( | |
763 | new_stream->net_seq_idx); | |
764 | if (ret < 0) { | |
d2956687 JG |
765 | pthread_mutex_unlock(&new_stream->lock); |
766 | pthread_mutex_unlock(&channel->lock); | |
c5c7998f | 767 | goto error_add_stream_nosignal; |
001b7e62 MD |
768 | } |
769 | } | |
e2039c7a | 770 | } |
d2956687 JG |
771 | pthread_mutex_unlock(&new_stream->lock); |
772 | pthread_mutex_unlock(&channel->lock); | |
e2039c7a | 773 | |
50f8ae69 | 774 | /* Get the right pipe where the stream will be sent. */ |
633d0084 | 775 | if (new_stream->metadata_flag) { |
66d583dc | 776 | consumer_add_metadata_stream(new_stream); |
dae10966 | 777 | stream_pipe = ctx->consumer_metadata_pipe; |
3bd1e081 | 778 | } else { |
66d583dc | 779 | consumer_add_data_stream(new_stream); |
dae10966 | 780 | stream_pipe = ctx->consumer_data_pipe; |
50f8ae69 DG |
781 | } |
782 | ||
66d583dc | 783 | /* Visible to other threads */ |
5ab66908 MD |
784 | new_stream->globally_visible = 1; |
785 | ||
9ce5646a MD |
786 | health_code_update(); |
787 | ||
dae10966 | 788 | ret = lttng_pipe_write(stream_pipe, &new_stream, sizeof(new_stream)); |
50f8ae69 | 789 | if (ret < 0) { |
dae10966 | 790 | ERR("Consumer write %s stream to pipe %d", |
50f8ae69 | 791 | new_stream->metadata_flag ? "metadata" : "data", |
dae10966 | 792 | lttng_pipe_get_writefd(stream_pipe)); |
5ab66908 MD |
793 | if (new_stream->metadata_flag) { |
794 | consumer_del_stream_for_metadata(new_stream); | |
795 | } else { | |
796 | consumer_del_stream_for_data(new_stream); | |
797 | } | |
c5c7998f | 798 | goto error_add_stream_nosignal; |
3bd1e081 | 799 | } |
00e2e675 | 800 | |
02d02e31 JD |
801 | DBG("Kernel consumer ADD_STREAM %s (fd: %d) %s with relayd id %" PRIu64, |
802 | new_stream->name, fd, new_stream->chan->pathname, new_stream->relayd_stream_id); | |
c5c7998f | 803 | end_add_stream: |
3bd1e081 | 804 | break; |
c5c7998f JG |
805 | error_add_stream_nosignal: |
806 | goto end_nosignal; | |
807 | error_add_stream_fatal: | |
808 | goto error_fatal; | |
3bd1e081 | 809 | } |
a4baae1b JD |
810 | case LTTNG_CONSUMER_STREAMS_SENT: |
811 | { | |
812 | struct lttng_consumer_channel *channel; | |
813 | ||
814 | /* | |
815 | * Get stream's channel reference. Needed when adding the stream to the | |
816 | * global hash table. | |
817 | */ | |
818 | channel = consumer_find_channel(msg.u.sent_streams.channel_key); | |
819 | if (!channel) { | |
820 | /* | |
821 | * We could not find the channel. Can happen if cpu hotplug | |
822 | * happens while tearing down. | |
823 | */ | |
824 | ERR("Unable to find channel key %" PRIu64, | |
825 | msg.u.sent_streams.channel_key); | |
e462382a | 826 | ret_code = LTTCOMM_CONSUMERD_CHAN_NOT_FOUND; |
a4baae1b JD |
827 | } |
828 | ||
829 | health_code_update(); | |
830 | ||
831 | /* | |
832 | * Send status code to session daemon. | |
833 | */ | |
834 | ret = consumer_send_status_msg(sock, ret_code); | |
f261ad0a | 835 | if (ret < 0 || ret_code != LTTCOMM_CONSUMERD_SUCCESS) { |
a4baae1b | 836 | /* Somehow, the session daemon is not responding anymore. */ |
80d5a658 | 837 | goto error_streams_sent_nosignal; |
a4baae1b JD |
838 | } |
839 | ||
840 | health_code_update(); | |
841 | ||
842 | /* | |
843 | * We should not send this message if we don't monitor the | |
844 | * streams in this channel. | |
845 | */ | |
846 | if (!channel->monitor) { | |
80d5a658 | 847 | goto end_error_streams_sent; |
a4baae1b JD |
848 | } |
849 | ||
850 | health_code_update(); | |
851 | /* Send stream to relayd if the stream has an ID. */ | |
852 | if (msg.u.sent_streams.net_seq_idx != (uint64_t) -1ULL) { | |
853 | ret = consumer_send_relayd_streams_sent( | |
854 | msg.u.sent_streams.net_seq_idx); | |
855 | if (ret < 0) { | |
80d5a658 | 856 | goto error_streams_sent_nosignal; |
a4baae1b | 857 | } |
001b7e62 | 858 | channel->streams_sent_to_relayd = true; |
a4baae1b | 859 | } |
80d5a658 | 860 | end_error_streams_sent: |
a4baae1b | 861 | break; |
80d5a658 JG |
862 | error_streams_sent_nosignal: |
863 | goto end_nosignal; | |
a4baae1b | 864 | } |
3bd1e081 MD |
865 | case LTTNG_CONSUMER_UPDATE_STREAM: |
866 | { | |
3f8e211f DG |
867 | rcu_read_unlock(); |
868 | return -ENOSYS; | |
869 | } | |
870 | case LTTNG_CONSUMER_DESTROY_RELAYD: | |
871 | { | |
a6ba4fe1 | 872 | uint64_t index = msg.u.destroy_relayd.net_seq_idx; |
3f8e211f DG |
873 | struct consumer_relayd_sock_pair *relayd; |
874 | ||
a6ba4fe1 | 875 | DBG("Kernel consumer destroying relayd %" PRIu64, index); |
3f8e211f DG |
876 | |
877 | /* Get relayd reference if exists. */ | |
a6ba4fe1 | 878 | relayd = consumer_find_relayd(index); |
3f8e211f | 879 | if (relayd == NULL) { |
3448e266 | 880 | DBG("Unable to find relayd %" PRIu64, index); |
e462382a | 881 | ret_code = LTTCOMM_CONSUMERD_RELAYD_FAIL; |
3bd1e081 | 882 | } |
3f8e211f | 883 | |
a6ba4fe1 DG |
884 | /* |
885 | * Each relayd socket pair has a refcount of stream attached to it | |
886 | * which tells if the relayd is still active or not depending on the | |
887 | * refcount value. | |
888 | * | |
889 | * This will set the destroy flag of the relayd object and destroy it | |
890 | * if the refcount reaches zero when called. | |
891 | * | |
892 | * The destroy can happen either here or when a stream fd hangs up. | |
893 | */ | |
f50f23d9 DG |
894 | if (relayd) { |
895 | consumer_flag_relayd_for_destroy(relayd); | |
896 | } | |
897 | ||
9ce5646a MD |
898 | health_code_update(); |
899 | ||
f50f23d9 DG |
900 | ret = consumer_send_status_msg(sock, ret_code); |
901 | if (ret < 0) { | |
902 | /* Somehow, the session daemon is not responding anymore. */ | |
1803a064 | 903 | goto error_fatal; |
f50f23d9 | 904 | } |
3f8e211f | 905 | |
3f8e211f | 906 | goto end_nosignal; |
3bd1e081 | 907 | } |
6d805429 | 908 | case LTTNG_CONSUMER_DATA_PENDING: |
53632229 | 909 | { |
c8f59ee5 | 910 | int32_t ret; |
6d805429 | 911 | uint64_t id = msg.u.data_pending.session_id; |
c8f59ee5 | 912 | |
6d805429 | 913 | DBG("Kernel consumer data pending command for id %" PRIu64, id); |
c8f59ee5 | 914 | |
6d805429 | 915 | ret = consumer_data_pending(id); |
c8f59ee5 | 916 | |
9ce5646a MD |
917 | health_code_update(); |
918 | ||
c8f59ee5 DG |
919 | /* Send back returned value to session daemon */ |
920 | ret = lttcomm_send_unix_sock(sock, &ret, sizeof(ret)); | |
921 | if (ret < 0) { | |
6d805429 | 922 | PERROR("send data pending ret code"); |
1803a064 | 923 | goto error_fatal; |
c8f59ee5 | 924 | } |
f50f23d9 DG |
925 | |
926 | /* | |
927 | * No need to send back a status message since the data pending | |
928 | * returned value is the response. | |
929 | */ | |
c8f59ee5 | 930 | break; |
53632229 | 931 | } |
6dc3064a DG |
932 | case LTTNG_CONSUMER_SNAPSHOT_CHANNEL: |
933 | { | |
3eb928aa MD |
934 | struct lttng_consumer_channel *channel; |
935 | uint64_t key = msg.u.snapshot_channel.key; | |
936 | ||
937 | channel = consumer_find_channel(key); | |
938 | if (!channel) { | |
939 | ERR("Channel %" PRIu64 " not found", key); | |
940 | ret_code = LTTCOMM_CONSUMERD_CHAN_NOT_FOUND; | |
07b86b52 | 941 | } else { |
d2956687 | 942 | pthread_mutex_lock(&channel->lock); |
3eb928aa MD |
943 | if (msg.u.snapshot_channel.metadata == 1) { |
944 | ret = lttng_kconsumer_snapshot_metadata(channel, key, | |
945 | msg.u.snapshot_channel.pathname, | |
946 | msg.u.snapshot_channel.relayd_id, ctx); | |
947 | if (ret < 0) { | |
948 | ERR("Snapshot metadata failed"); | |
949 | ret_code = LTTCOMM_CONSUMERD_SNAPSHOT_FAILED; | |
950 | } | |
951 | } else { | |
952 | ret = lttng_kconsumer_snapshot_channel(channel, key, | |
953 | msg.u.snapshot_channel.pathname, | |
954 | msg.u.snapshot_channel.relayd_id, | |
955 | msg.u.snapshot_channel.nb_packets_per_stream, | |
956 | ctx); | |
957 | if (ret < 0) { | |
958 | ERR("Snapshot channel failed"); | |
959 | ret_code = LTTCOMM_CONSUMERD_SNAPSHOT_FAILED; | |
960 | } | |
07b86b52 | 961 | } |
d2956687 | 962 | pthread_mutex_unlock(&channel->lock); |
07b86b52 | 963 | } |
9ce5646a MD |
964 | health_code_update(); |
965 | ||
6dc3064a DG |
966 | ret = consumer_send_status_msg(sock, ret_code); |
967 | if (ret < 0) { | |
968 | /* Somehow, the session daemon is not responding anymore. */ | |
969 | goto end_nosignal; | |
970 | } | |
971 | break; | |
972 | } | |
07b86b52 JD |
973 | case LTTNG_CONSUMER_DESTROY_CHANNEL: |
974 | { | |
975 | uint64_t key = msg.u.destroy_channel.key; | |
976 | struct lttng_consumer_channel *channel; | |
977 | ||
978 | channel = consumer_find_channel(key); | |
979 | if (!channel) { | |
980 | ERR("Kernel consumer destroy channel %" PRIu64 " not found", key); | |
e462382a | 981 | ret_code = LTTCOMM_CONSUMERD_CHAN_NOT_FOUND; |
07b86b52 JD |
982 | } |
983 | ||
9ce5646a MD |
984 | health_code_update(); |
985 | ||
07b86b52 JD |
986 | ret = consumer_send_status_msg(sock, ret_code); |
987 | if (ret < 0) { | |
988 | /* Somehow, the session daemon is not responding anymore. */ | |
a9d36096 | 989 | goto end_destroy_channel; |
07b86b52 JD |
990 | } |
991 | ||
9ce5646a MD |
992 | health_code_update(); |
993 | ||
15dc512a DG |
994 | /* Stop right now if no channel was found. */ |
995 | if (!channel) { | |
a9d36096 | 996 | goto end_destroy_channel; |
15dc512a DG |
997 | } |
998 | ||
07b86b52 JD |
999 | /* |
1000 | * This command should ONLY be issued for channel with streams set in | |
1001 | * no monitor mode. | |
1002 | */ | |
1003 | assert(!channel->monitor); | |
1004 | ||
1005 | /* | |
1006 | * The refcount should ALWAYS be 0 in the case of a channel in no | |
1007 | * monitor mode. | |
1008 | */ | |
1009 | assert(!uatomic_sub_return(&channel->refcount, 1)); | |
1010 | ||
1011 | consumer_del_channel(channel); | |
a9d36096 | 1012 | end_destroy_channel: |
07b86b52 JD |
1013 | goto end_nosignal; |
1014 | } | |
fb83fe64 JD |
1015 | case LTTNG_CONSUMER_DISCARDED_EVENTS: |
1016 | { | |
66ab32be JD |
1017 | ssize_t ret; |
1018 | uint64_t count; | |
fb83fe64 JD |
1019 | struct lttng_consumer_channel *channel; |
1020 | uint64_t id = msg.u.discarded_events.session_id; | |
1021 | uint64_t key = msg.u.discarded_events.channel_key; | |
1022 | ||
e5742757 MD |
1023 | DBG("Kernel consumer discarded events command for session id %" |
1024 | PRIu64 ", channel key %" PRIu64, id, key); | |
1025 | ||
fb83fe64 JD |
1026 | channel = consumer_find_channel(key); |
1027 | if (!channel) { | |
1028 | ERR("Kernel consumer discarded events channel %" | |
1029 | PRIu64 " not found", key); | |
66ab32be | 1030 | count = 0; |
e5742757 | 1031 | } else { |
66ab32be | 1032 | count = channel->discarded_events; |
fb83fe64 JD |
1033 | } |
1034 | ||
fb83fe64 JD |
1035 | health_code_update(); |
1036 | ||
1037 | /* Send back returned value to session daemon */ | |
66ab32be | 1038 | ret = lttcomm_send_unix_sock(sock, &count, sizeof(count)); |
fb83fe64 JD |
1039 | if (ret < 0) { |
1040 | PERROR("send discarded events"); | |
1041 | goto error_fatal; | |
1042 | } | |
1043 | ||
1044 | break; | |
1045 | } | |
1046 | case LTTNG_CONSUMER_LOST_PACKETS: | |
1047 | { | |
66ab32be JD |
1048 | ssize_t ret; |
1049 | uint64_t count; | |
fb83fe64 JD |
1050 | struct lttng_consumer_channel *channel; |
1051 | uint64_t id = msg.u.lost_packets.session_id; | |
1052 | uint64_t key = msg.u.lost_packets.channel_key; | |
1053 | ||
e5742757 MD |
1054 | DBG("Kernel consumer lost packets command for session id %" |
1055 | PRIu64 ", channel key %" PRIu64, id, key); | |
1056 | ||
fb83fe64 JD |
1057 | channel = consumer_find_channel(key); |
1058 | if (!channel) { | |
1059 | ERR("Kernel consumer lost packets channel %" | |
1060 | PRIu64 " not found", key); | |
66ab32be | 1061 | count = 0; |
e5742757 | 1062 | } else { |
66ab32be | 1063 | count = channel->lost_packets; |
fb83fe64 JD |
1064 | } |
1065 | ||
fb83fe64 JD |
1066 | health_code_update(); |
1067 | ||
1068 | /* Send back returned value to session daemon */ | |
66ab32be | 1069 | ret = lttcomm_send_unix_sock(sock, &count, sizeof(count)); |
fb83fe64 JD |
1070 | if (ret < 0) { |
1071 | PERROR("send lost packets"); | |
1072 | goto error_fatal; | |
1073 | } | |
1074 | ||
1075 | break; | |
1076 | } | |
b3530820 JG |
1077 | case LTTNG_CONSUMER_SET_CHANNEL_MONITOR_PIPE: |
1078 | { | |
1079 | int channel_monitor_pipe; | |
1080 | ||
1081 | ret_code = LTTCOMM_CONSUMERD_SUCCESS; | |
1082 | /* Successfully received the command's type. */ | |
1083 | ret = consumer_send_status_msg(sock, ret_code); | |
1084 | if (ret < 0) { | |
1085 | goto error_fatal; | |
1086 | } | |
1087 | ||
1088 | ret = lttcomm_recv_fds_unix_sock(sock, &channel_monitor_pipe, | |
1089 | 1); | |
1090 | if (ret != sizeof(channel_monitor_pipe)) { | |
1091 | ERR("Failed to receive channel monitor pipe"); | |
1092 | goto error_fatal; | |
1093 | } | |
1094 | ||
1095 | DBG("Received channel monitor pipe (%d)", channel_monitor_pipe); | |
1096 | ret = consumer_timer_thread_set_channel_monitor_pipe( | |
1097 | channel_monitor_pipe); | |
1098 | if (!ret) { | |
1099 | int flags; | |
1100 | ||
1101 | ret_code = LTTCOMM_CONSUMERD_SUCCESS; | |
1102 | /* Set the pipe as non-blocking. */ | |
1103 | ret = fcntl(channel_monitor_pipe, F_GETFL, 0); | |
1104 | if (ret == -1) { | |
1105 | PERROR("fcntl get flags of the channel monitoring pipe"); | |
1106 | goto error_fatal; | |
1107 | } | |
1108 | flags = ret; | |
1109 | ||
1110 | ret = fcntl(channel_monitor_pipe, F_SETFL, | |
1111 | flags | O_NONBLOCK); | |
1112 | if (ret == -1) { | |
1113 | PERROR("fcntl set O_NONBLOCK flag of the channel monitoring pipe"); | |
1114 | goto error_fatal; | |
1115 | } | |
1116 | DBG("Channel monitor pipe set as non-blocking"); | |
1117 | } else { | |
1118 | ret_code = LTTCOMM_CONSUMERD_ALREADY_SET; | |
1119 | } | |
1120 | ret = consumer_send_status_msg(sock, ret_code); | |
1121 | if (ret < 0) { | |
1122 | goto error_fatal; | |
1123 | } | |
1124 | break; | |
1125 | } | |
b99a8d42 JD |
1126 | case LTTNG_CONSUMER_ROTATE_CHANNEL: |
1127 | { | |
92b7a7f8 MD |
1128 | struct lttng_consumer_channel *channel; |
1129 | uint64_t key = msg.u.rotate_channel.key; | |
b99a8d42 | 1130 | |
92b7a7f8 | 1131 | DBG("Consumer rotate channel %" PRIu64, key); |
b99a8d42 | 1132 | |
92b7a7f8 MD |
1133 | channel = consumer_find_channel(key); |
1134 | if (!channel) { | |
1135 | ERR("Channel %" PRIu64 " not found", key); | |
1136 | ret_code = LTTCOMM_CONSUMERD_CHAN_NOT_FOUND; | |
1137 | } else { | |
1138 | /* | |
1139 | * Sample the rotate position of all the streams in this channel. | |
1140 | */ | |
1141 | ret = lttng_consumer_rotate_channel(channel, key, | |
92b7a7f8 MD |
1142 | msg.u.rotate_channel.relayd_id, |
1143 | msg.u.rotate_channel.metadata, | |
92b7a7f8 MD |
1144 | ctx); |
1145 | if (ret < 0) { | |
1146 | ERR("Rotate channel failed"); | |
1147 | ret_code = LTTCOMM_CONSUMERD_ROTATION_FAIL; | |
1148 | } | |
b99a8d42 | 1149 | |
92b7a7f8 MD |
1150 | health_code_update(); |
1151 | } | |
b99a8d42 JD |
1152 | ret = consumer_send_status_msg(sock, ret_code); |
1153 | if (ret < 0) { | |
1154 | /* Somehow, the session daemon is not responding anymore. */ | |
713bdd26 | 1155 | goto error_rotate_channel; |
b99a8d42 | 1156 | } |
92b7a7f8 MD |
1157 | if (channel) { |
1158 | /* Rotate the streams that are ready right now. */ | |
1159 | ret = lttng_consumer_rotate_ready_streams( | |
1160 | channel, key, ctx); | |
1161 | if (ret < 0) { | |
1162 | ERR("Rotate ready streams failed"); | |
1163 | } | |
b99a8d42 | 1164 | } |
b99a8d42 | 1165 | break; |
713bdd26 JG |
1166 | error_rotate_channel: |
1167 | goto end_nosignal; | |
b99a8d42 | 1168 | } |
5f3aff8b MD |
1169 | case LTTNG_CONSUMER_CLEAR_CHANNEL: |
1170 | { | |
1171 | struct lttng_consumer_channel *channel; | |
1172 | uint64_t key = msg.u.clear_channel.key; | |
1173 | ||
1174 | channel = consumer_find_channel(key); | |
1175 | if (!channel) { | |
1176 | DBG("Channel %" PRIu64 " not found", key); | |
1177 | ret_code = LTTCOMM_CONSUMERD_CHAN_NOT_FOUND; | |
1178 | } else { | |
1179 | ret = lttng_consumer_clear_channel(channel); | |
1180 | if (ret) { | |
1181 | ERR("Clear channel failed"); | |
1182 | ret_code = ret; | |
1183 | } | |
1184 | ||
1185 | health_code_update(); | |
1186 | } | |
1187 | ret = consumer_send_status_msg(sock, ret_code); | |
1188 | if (ret < 0) { | |
1189 | /* Somehow, the session daemon is not responding anymore. */ | |
1190 | goto end_nosignal; | |
1191 | } | |
1192 | ||
1193 | break; | |
1194 | } | |
d2956687 | 1195 | case LTTNG_CONSUMER_INIT: |
00fb02ac | 1196 | { |
d2956687 JG |
1197 | ret_code = lttng_consumer_init_command(ctx, |
1198 | msg.u.init.sessiond_uuid); | |
00fb02ac | 1199 | health_code_update(); |
00fb02ac JD |
1200 | ret = consumer_send_status_msg(sock, ret_code); |
1201 | if (ret < 0) { | |
1202 | /* Somehow, the session daemon is not responding anymore. */ | |
1203 | goto end_nosignal; | |
1204 | } | |
1205 | break; | |
1206 | } | |
d2956687 | 1207 | case LTTNG_CONSUMER_CREATE_TRACE_CHUNK: |
d88744a4 | 1208 | { |
d2956687 | 1209 | const struct lttng_credentials credentials = { |
e5add6d0 JG |
1210 | .uid = msg.u.create_trace_chunk.credentials.value.uid, |
1211 | .gid = msg.u.create_trace_chunk.credentials.value.gid, | |
d2956687 JG |
1212 | }; |
1213 | const bool is_local_trace = | |
1214 | !msg.u.create_trace_chunk.relayd_id.is_set; | |
1215 | const uint64_t relayd_id = | |
1216 | msg.u.create_trace_chunk.relayd_id.value; | |
1217 | const char *chunk_override_name = | |
1218 | *msg.u.create_trace_chunk.override_name ? | |
1219 | msg.u.create_trace_chunk.override_name : | |
1220 | NULL; | |
cbf53d23 | 1221 | struct lttng_directory_handle *chunk_directory_handle = NULL; |
d88744a4 | 1222 | |
d2956687 JG |
1223 | /* |
1224 | * The session daemon will only provide a chunk directory file | |
1225 | * descriptor for local traces. | |
1226 | */ | |
1227 | if (is_local_trace) { | |
1228 | int chunk_dirfd; | |
19990ed5 | 1229 | |
d2956687 JG |
1230 | /* Acnowledge the reception of the command. */ |
1231 | ret = consumer_send_status_msg(sock, | |
1232 | LTTCOMM_CONSUMERD_SUCCESS); | |
1233 | if (ret < 0) { | |
1234 | /* Somehow, the session daemon is not responding anymore. */ | |
1235 | goto end_nosignal; | |
1236 | } | |
92816cc3 | 1237 | |
d2956687 JG |
1238 | ret = lttcomm_recv_fds_unix_sock(sock, &chunk_dirfd, 1); |
1239 | if (ret != sizeof(chunk_dirfd)) { | |
1240 | ERR("Failed to receive trace chunk directory file descriptor"); | |
1241 | goto error_fatal; | |
1242 | } | |
92816cc3 | 1243 | |
d2956687 JG |
1244 | DBG("Received trace chunk directory fd (%d)", |
1245 | chunk_dirfd); | |
cbf53d23 | 1246 | chunk_directory_handle = lttng_directory_handle_create_from_dirfd( |
d2956687 | 1247 | chunk_dirfd); |
cbf53d23 | 1248 | if (!chunk_directory_handle) { |
d2956687 JG |
1249 | ERR("Failed to initialize chunk directory handle from directory file descriptor"); |
1250 | if (close(chunk_dirfd)) { | |
1251 | PERROR("Failed to close chunk directory file descriptor"); | |
1252 | } | |
1253 | goto error_fatal; | |
1254 | } | |
92816cc3 JG |
1255 | } |
1256 | ||
d2956687 JG |
1257 | ret_code = lttng_consumer_create_trace_chunk( |
1258 | !is_local_trace ? &relayd_id : NULL, | |
1259 | msg.u.create_trace_chunk.session_id, | |
1260 | msg.u.create_trace_chunk.chunk_id, | |
e5add6d0 JG |
1261 | (time_t) msg.u.create_trace_chunk |
1262 | .creation_timestamp, | |
d2956687 | 1263 | chunk_override_name, |
e5add6d0 JG |
1264 | msg.u.create_trace_chunk.credentials.is_set ? |
1265 | &credentials : | |
1266 | NULL, | |
cbf53d23 JG |
1267 | chunk_directory_handle); |
1268 | lttng_directory_handle_put(chunk_directory_handle); | |
d2956687 | 1269 | goto end_msg_sessiond; |
d88744a4 | 1270 | } |
d2956687 | 1271 | case LTTNG_CONSUMER_CLOSE_TRACE_CHUNK: |
a1ae2ea5 | 1272 | { |
bbc4768c JG |
1273 | enum lttng_trace_chunk_command_type close_command = |
1274 | msg.u.close_trace_chunk.close_command.value; | |
d2956687 JG |
1275 | const uint64_t relayd_id = |
1276 | msg.u.close_trace_chunk.relayd_id.value; | |
ecd1a12f MD |
1277 | struct lttcomm_consumer_close_trace_chunk_reply reply; |
1278 | char path[LTTNG_PATH_MAX]; | |
d2956687 JG |
1279 | |
1280 | ret_code = lttng_consumer_close_trace_chunk( | |
1281 | msg.u.close_trace_chunk.relayd_id.is_set ? | |
bbc4768c JG |
1282 | &relayd_id : |
1283 | NULL, | |
d2956687 JG |
1284 | msg.u.close_trace_chunk.session_id, |
1285 | msg.u.close_trace_chunk.chunk_id, | |
bbc4768c JG |
1286 | (time_t) msg.u.close_trace_chunk.close_timestamp, |
1287 | msg.u.close_trace_chunk.close_command.is_set ? | |
1288 | &close_command : | |
ecd1a12f MD |
1289 | NULL, path); |
1290 | reply.ret_code = ret_code; | |
1291 | reply.path_length = strlen(path) + 1; | |
1292 | ret = lttcomm_send_unix_sock(sock, &reply, sizeof(reply)); | |
1293 | if (ret != sizeof(reply)) { | |
1294 | goto error_fatal; | |
1295 | } | |
1296 | ret = lttcomm_send_unix_sock(sock, path, reply.path_length); | |
1297 | if (ret != reply.path_length) { | |
1298 | goto error_fatal; | |
1299 | } | |
1300 | goto end_nosignal; | |
3654ed19 | 1301 | } |
d2956687 | 1302 | case LTTNG_CONSUMER_TRACE_CHUNK_EXISTS: |
3654ed19 | 1303 | { |
d2956687 JG |
1304 | const uint64_t relayd_id = |
1305 | msg.u.trace_chunk_exists.relayd_id.value; | |
1306 | ||
1307 | ret_code = lttng_consumer_trace_chunk_exists( | |
1308 | msg.u.trace_chunk_exists.relayd_id.is_set ? | |
1309 | &relayd_id : NULL, | |
1310 | msg.u.trace_chunk_exists.session_id, | |
1311 | msg.u.trace_chunk_exists.chunk_id); | |
1312 | goto end_msg_sessiond; | |
a1ae2ea5 | 1313 | } |
3bd1e081 | 1314 | default: |
3f8e211f | 1315 | goto end_nosignal; |
3bd1e081 | 1316 | } |
3f8e211f | 1317 | |
3bd1e081 | 1318 | end_nosignal: |
4cbc1a04 DG |
1319 | /* |
1320 | * Return 1 to indicate success since the 0 value can be a socket | |
1321 | * shutdown during the recv() or send() call. | |
1322 | */ | |
c5c7998f JG |
1323 | ret = 1; |
1324 | goto end; | |
1325 | error_fatal: | |
1326 | /* This will issue a consumer stop. */ | |
1327 | ret = -1; | |
1328 | goto end; | |
d2956687 JG |
1329 | end_msg_sessiond: |
1330 | /* | |
1331 | * The returned value here is not useful since either way we'll return 1 to | |
1332 | * the caller because the session daemon socket management is done | |
1333 | * elsewhere. Returning a negative code or 0 will shutdown the consumer. | |
1334 | */ | |
1335 | ret = consumer_send_status_msg(sock, ret_code); | |
1336 | if (ret < 0) { | |
1337 | goto error_fatal; | |
1338 | } | |
c5c7998f JG |
1339 | ret = 1; |
1340 | end: | |
d2956687 | 1341 | health_code_update(); |
1803a064 | 1342 | rcu_read_unlock(); |
c5c7998f | 1343 | return ret; |
3bd1e081 | 1344 | } |
d41f73b7 | 1345 | |
94d49140 JD |
1346 | /* |
1347 | * Sync metadata meaning request them to the session daemon and snapshot to the | |
1348 | * metadata thread can consumer them. | |
1349 | * | |
1350 | * Metadata stream lock MUST be acquired. | |
1351 | * | |
1352 | * Return 0 if new metadatda is available, EAGAIN if the metadata stream | |
1353 | * is empty or a negative value on error. | |
1354 | */ | |
1355 | int lttng_kconsumer_sync_metadata(struct lttng_consumer_stream *metadata) | |
1356 | { | |
1357 | int ret; | |
1358 | ||
1359 | assert(metadata); | |
1360 | ||
1361 | ret = kernctl_buffer_flush(metadata->wait_fd); | |
1362 | if (ret < 0) { | |
1363 | ERR("Failed to flush kernel stream"); | |
1364 | goto end; | |
1365 | } | |
1366 | ||
1367 | ret = kernctl_snapshot(metadata->wait_fd); | |
1368 | if (ret < 0) { | |
32af2c95 | 1369 | if (ret != -EAGAIN) { |
94d49140 JD |
1370 | ERR("Sync metadata, taking kernel snapshot failed."); |
1371 | goto end; | |
1372 | } | |
1373 | DBG("Sync metadata, no new kernel metadata"); | |
1374 | /* No new metadata, exit. */ | |
1375 | ret = ENODATA; | |
1376 | goto end; | |
1377 | } | |
1378 | ||
1379 | end: | |
1380 | return ret; | |
1381 | } | |
309167d2 | 1382 | |
fb83fe64 | 1383 | static |
6f9449c2 JG |
1384 | int extract_common_subbuffer_info(struct lttng_consumer_stream *stream, |
1385 | struct stream_subbuffer *subbuf) | |
fb83fe64 JD |
1386 | { |
1387 | int ret; | |
fb83fe64 | 1388 | |
6f9449c2 JG |
1389 | ret = kernctl_get_subbuf_size( |
1390 | stream->wait_fd, &subbuf->info.data.subbuf_size); | |
1391 | if (ret) { | |
fb83fe64 JD |
1392 | goto end; |
1393 | } | |
fb83fe64 | 1394 | |
6f9449c2 JG |
1395 | ret = kernctl_get_padded_subbuf_size( |
1396 | stream->wait_fd, &subbuf->info.data.padded_subbuf_size); | |
1397 | if (ret) { | |
fb83fe64 JD |
1398 | goto end; |
1399 | } | |
fb83fe64 JD |
1400 | |
1401 | end: | |
1402 | return ret; | |
1403 | } | |
1404 | ||
93ec662e | 1405 | static |
6f9449c2 JG |
1406 | int extract_metadata_subbuffer_info(struct lttng_consumer_stream *stream, |
1407 | struct stream_subbuffer *subbuf) | |
93ec662e JD |
1408 | { |
1409 | int ret; | |
93ec662e | 1410 | |
6f9449c2 JG |
1411 | ret = extract_common_subbuffer_info(stream, subbuf); |
1412 | if (ret) { | |
93ec662e JD |
1413 | goto end; |
1414 | } | |
1415 | ||
6f9449c2 JG |
1416 | ret = kernctl_get_metadata_version( |
1417 | stream->wait_fd, &subbuf->info.metadata.version); | |
1418 | if (ret) { | |
93ec662e JD |
1419 | goto end; |
1420 | } | |
1421 | ||
93ec662e JD |
1422 | end: |
1423 | return ret; | |
1424 | } | |
1425 | ||
6f9449c2 JG |
1426 | static |
1427 | int extract_data_subbuffer_info(struct lttng_consumer_stream *stream, | |
1428 | struct stream_subbuffer *subbuf) | |
d41f73b7 | 1429 | { |
6f9449c2 | 1430 | int ret; |
d41f73b7 | 1431 | |
6f9449c2 JG |
1432 | ret = extract_common_subbuffer_info(stream, subbuf); |
1433 | if (ret) { | |
1434 | goto end; | |
1435 | } | |
309167d2 | 1436 | |
6f9449c2 JG |
1437 | ret = kernctl_get_packet_size( |
1438 | stream->wait_fd, &subbuf->info.data.packet_size); | |
1439 | if (ret < 0) { | |
1440 | PERROR("Failed to get sub-buffer packet size"); | |
1441 | goto end; | |
1442 | } | |
02d02e31 | 1443 | |
6f9449c2 JG |
1444 | ret = kernctl_get_content_size( |
1445 | stream->wait_fd, &subbuf->info.data.content_size); | |
1446 | if (ret < 0) { | |
1447 | PERROR("Failed to get sub-buffer content size"); | |
1448 | goto end; | |
d41f73b7 MD |
1449 | } |
1450 | ||
6f9449c2 JG |
1451 | ret = kernctl_get_timestamp_begin( |
1452 | stream->wait_fd, &subbuf->info.data.timestamp_begin); | |
1453 | if (ret < 0) { | |
1454 | PERROR("Failed to get sub-buffer begin timestamp"); | |
1455 | goto end; | |
1d4dfdef DG |
1456 | } |
1457 | ||
6f9449c2 JG |
1458 | ret = kernctl_get_timestamp_end( |
1459 | stream->wait_fd, &subbuf->info.data.timestamp_end); | |
1460 | if (ret < 0) { | |
1461 | PERROR("Failed to get sub-buffer end timestamp"); | |
1462 | goto end; | |
1463 | } | |
1464 | ||
1465 | ret = kernctl_get_events_discarded( | |
1466 | stream->wait_fd, &subbuf->info.data.events_discarded); | |
1467 | if (ret) { | |
1468 | PERROR("Failed to get sub-buffer events discarded count"); | |
1469 | goto end; | |
1470 | } | |
1471 | ||
1472 | ret = kernctl_get_sequence_number(stream->wait_fd, | |
1473 | &subbuf->info.data.sequence_number.value); | |
1474 | if (ret) { | |
1475 | /* May not be supported by older LTTng-modules. */ | |
1476 | if (ret != -ENOTTY) { | |
1477 | PERROR("Failed to get sub-buffer sequence number"); | |
1478 | goto end; | |
fb83fe64 | 1479 | } |
1c20f0e2 | 1480 | } else { |
6f9449c2 | 1481 | subbuf->info.data.sequence_number.is_set = true; |
309167d2 JD |
1482 | } |
1483 | ||
6f9449c2 JG |
1484 | ret = kernctl_get_stream_id( |
1485 | stream->wait_fd, &subbuf->info.data.stream_id); | |
1486 | if (ret < 0) { | |
1487 | PERROR("Failed to get stream id"); | |
1488 | goto end; | |
1489 | } | |
1d4dfdef | 1490 | |
6f9449c2 JG |
1491 | ret = kernctl_get_instance_id(stream->wait_fd, |
1492 | &subbuf->info.data.stream_instance_id.value); | |
1493 | if (ret) { | |
1494 | /* May not be supported by older LTTng-modules. */ | |
1495 | if (ret != -ENOTTY) { | |
1496 | PERROR("Failed to get stream instance id"); | |
1497 | goto end; | |
1d4dfdef | 1498 | } |
6f9449c2 JG |
1499 | } else { |
1500 | subbuf->info.data.stream_instance_id.is_set = true; | |
1501 | } | |
1502 | end: | |
1503 | return ret; | |
1504 | } | |
47e81c02 | 1505 | |
6f9449c2 JG |
1506 | static |
1507 | int get_subbuffer_common(struct lttng_consumer_stream *stream, | |
1508 | struct stream_subbuffer *subbuffer) | |
1509 | { | |
1510 | int ret; | |
1511 | ||
1512 | ret = kernctl_get_next_subbuf(stream->wait_fd); | |
1513 | if (ret) { | |
1514 | goto end; | |
1515 | } | |
1516 | ||
1517 | ret = stream->read_subbuffer_ops.extract_subbuffer_info( | |
1518 | stream, subbuffer); | |
1519 | end: | |
1520 | return ret; | |
1521 | } | |
128708c3 | 1522 | |
6f9449c2 JG |
1523 | static |
1524 | int get_next_subbuffer_splice(struct lttng_consumer_stream *stream, | |
1525 | struct stream_subbuffer *subbuffer) | |
1526 | { | |
1527 | int ret; | |
1d4dfdef | 1528 | |
6f9449c2 JG |
1529 | ret = get_subbuffer_common(stream, subbuffer); |
1530 | if (ret) { | |
1531 | goto end; | |
1532 | } | |
1d4dfdef | 1533 | |
6f9449c2 JG |
1534 | subbuffer->buffer.fd = stream->wait_fd; |
1535 | end: | |
1536 | return ret; | |
1537 | } | |
fd424d99 | 1538 | |
6f9449c2 JG |
1539 | static |
1540 | int get_next_subbuffer_mmap(struct lttng_consumer_stream *stream, | |
1541 | struct stream_subbuffer *subbuffer) | |
1542 | { | |
1543 | int ret; | |
1544 | const char *addr; | |
1545 | ||
1546 | ret = get_subbuffer_common(stream, subbuffer); | |
1547 | if (ret) { | |
1548 | goto end; | |
128708c3 | 1549 | } |
6f9449c2 JG |
1550 | |
1551 | ret = get_current_subbuf_addr(stream, &addr); | |
1552 | if (ret) { | |
1553 | goto end; | |
d41f73b7 | 1554 | } |
6f9449c2 JG |
1555 | |
1556 | subbuffer->buffer.buffer = lttng_buffer_view_init( | |
1557 | addr, 0, subbuffer->info.data.padded_subbuf_size); | |
1558 | end: | |
1559 | return ret; | |
1560 | } | |
1561 | ||
f5ba75b4 JG |
1562 | static |
1563 | int get_next_subbuffer_metadata_check(struct lttng_consumer_stream *stream, | |
1564 | struct stream_subbuffer *subbuffer) | |
1565 | { | |
1566 | int ret; | |
1567 | const char *addr; | |
1568 | bool coherent; | |
1569 | ||
1570 | ret = kernctl_get_next_subbuf_metadata_check(stream->wait_fd, | |
1571 | &coherent); | |
1572 | if (ret) { | |
1573 | goto end; | |
1574 | } | |
1575 | ||
1576 | ret = stream->read_subbuffer_ops.extract_subbuffer_info( | |
1577 | stream, subbuffer); | |
1578 | if (ret) { | |
1579 | goto end; | |
1580 | } | |
1581 | ||
1582 | LTTNG_OPTIONAL_SET(&subbuffer->info.metadata.coherent, coherent); | |
1583 | ||
1584 | ret = get_current_subbuf_addr(stream, &addr); | |
1585 | if (ret) { | |
1586 | goto end; | |
1587 | } | |
1588 | ||
1589 | subbuffer->buffer.buffer = lttng_buffer_view_init( | |
1590 | addr, 0, subbuffer->info.data.padded_subbuf_size); | |
1591 | DBG("Got metadata packet with padded_subbuf_size = %lu, coherent = %s", | |
1592 | subbuffer->info.metadata.padded_subbuf_size, | |
1593 | coherent ? "true" : "false"); | |
1594 | end: | |
1595 | return ret; | |
1596 | } | |
1597 | ||
6f9449c2 JG |
1598 | static |
1599 | int put_next_subbuffer(struct lttng_consumer_stream *stream, | |
1600 | struct stream_subbuffer *subbuffer) | |
1601 | { | |
1602 | const int ret = kernctl_put_next_subbuf(stream->wait_fd); | |
1603 | ||
1604 | if (ret) { | |
1605 | if (ret == -EFAULT) { | |
1606 | PERROR("Error in unreserving sub buffer"); | |
1607 | } else if (ret == -EIO) { | |
d41f73b7 | 1608 | /* Should never happen with newer LTTng versions */ |
6f9449c2 | 1609 | PERROR("Reader has been pushed by the writer, last sub-buffer corrupted"); |
d41f73b7 | 1610 | } |
d41f73b7 MD |
1611 | } |
1612 | ||
6f9449c2 JG |
1613 | return ret; |
1614 | } | |
1c20f0e2 | 1615 | |
f5ba75b4 JG |
1616 | static |
1617 | bool is_get_next_check_metadata_available(int tracer_fd) | |
1618 | { | |
1619 | return kernctl_get_next_subbuf_metadata_check(tracer_fd, NULL) != | |
1620 | -ENOTTY; | |
1621 | } | |
1622 | ||
1623 | static | |
1624 | int lttng_kconsumer_set_stream_ops( | |
6f9449c2 JG |
1625 | struct lttng_consumer_stream *stream) |
1626 | { | |
f5ba75b4 JG |
1627 | int ret = 0; |
1628 | ||
1629 | if (stream->metadata_flag && stream->chan->is_live) { | |
1630 | DBG("Attempting to enable metadata bucketization for live consumers"); | |
1631 | if (is_get_next_check_metadata_available(stream->wait_fd)) { | |
1632 | DBG("Kernel tracer supports get_next_subbuffer_metadata_check, metadata will be accumulated until a coherent state is reached"); | |
1633 | stream->read_subbuffer_ops.get_next_subbuffer = | |
1634 | get_next_subbuffer_metadata_check; | |
1635 | ret = consumer_stream_enable_metadata_bucketization( | |
1636 | stream); | |
1637 | if (ret) { | |
1638 | goto end; | |
1639 | } | |
1640 | } else { | |
1641 | /* | |
1642 | * The kernel tracer version is too old to indicate | |
1643 | * when the metadata stream has reached a "coherent" | |
1644 | * (parseable) point. | |
1645 | * | |
1646 | * This means that a live viewer may see an incoherent | |
1647 | * sequence of metadata and fail to parse it. | |
1648 | */ | |
1649 | WARN("Kernel tracer does not support get_next_subbuffer_metadata_check which may cause live clients to fail to parse the metadata stream"); | |
1650 | metadata_bucket_destroy(stream->metadata_bucket); | |
1651 | stream->metadata_bucket = NULL; | |
1652 | } | |
1653 | } | |
1654 | ||
1655 | if (!stream->read_subbuffer_ops.get_next_subbuffer) { | |
1656 | if (stream->chan->output == CONSUMER_CHANNEL_MMAP) { | |
1657 | stream->read_subbuffer_ops.get_next_subbuffer = | |
1658 | get_next_subbuffer_mmap; | |
1659 | } else { | |
1660 | stream->read_subbuffer_ops.get_next_subbuffer = | |
1661 | get_next_subbuffer_splice; | |
1662 | } | |
94d49140 JD |
1663 | } |
1664 | ||
6f9449c2 JG |
1665 | if (stream->metadata_flag) { |
1666 | stream->read_subbuffer_ops.extract_subbuffer_info = | |
1667 | extract_metadata_subbuffer_info; | |
1668 | } else { | |
1669 | stream->read_subbuffer_ops.extract_subbuffer_info = | |
1670 | extract_data_subbuffer_info; | |
1671 | if (stream->chan->is_live) { | |
1672 | stream->read_subbuffer_ops.send_live_beacon = | |
1673 | consumer_flush_kernel_index; | |
1674 | } | |
309167d2 JD |
1675 | } |
1676 | ||
6f9449c2 | 1677 | stream->read_subbuffer_ops.put_next_subbuffer = put_next_subbuffer; |
f5ba75b4 JG |
1678 | end: |
1679 | return ret; | |
d41f73b7 MD |
1680 | } |
1681 | ||
1682 | int lttng_kconsumer_on_recv_stream(struct lttng_consumer_stream *stream) | |
1683 | { | |
1684 | int ret; | |
ffe60014 DG |
1685 | |
1686 | assert(stream); | |
1687 | ||
2bba9e53 | 1688 | /* |
d2956687 JG |
1689 | * Don't create anything if this is set for streaming or if there is |
1690 | * no current trace chunk on the parent channel. | |
2bba9e53 | 1691 | */ |
d2956687 JG |
1692 | if (stream->net_seq_idx == (uint64_t) -1ULL && stream->chan->monitor && |
1693 | stream->chan->trace_chunk) { | |
1694 | ret = consumer_stream_create_output_files(stream, true); | |
1695 | if (ret) { | |
fe4477ee JD |
1696 | goto error; |
1697 | } | |
ffe60014 | 1698 | } |
d41f73b7 | 1699 | |
d41f73b7 MD |
1700 | if (stream->output == LTTNG_EVENT_MMAP) { |
1701 | /* get the len of the mmap region */ | |
1702 | unsigned long mmap_len; | |
1703 | ||
1704 | ret = kernctl_get_mmap_len(stream->wait_fd, &mmap_len); | |
1705 | if (ret != 0) { | |
ffe60014 | 1706 | PERROR("kernctl_get_mmap_len"); |
d41f73b7 MD |
1707 | goto error_close_fd; |
1708 | } | |
1709 | stream->mmap_len = (size_t) mmap_len; | |
1710 | ||
ffe60014 DG |
1711 | stream->mmap_base = mmap(NULL, stream->mmap_len, PROT_READ, |
1712 | MAP_PRIVATE, stream->wait_fd, 0); | |
d41f73b7 | 1713 | if (stream->mmap_base == MAP_FAILED) { |
ffe60014 | 1714 | PERROR("Error mmaping"); |
d41f73b7 MD |
1715 | ret = -1; |
1716 | goto error_close_fd; | |
1717 | } | |
1718 | } | |
1719 | ||
f5ba75b4 JG |
1720 | ret = lttng_kconsumer_set_stream_ops(stream); |
1721 | if (ret) { | |
1722 | goto error_close_fd; | |
1723 | } | |
6f9449c2 | 1724 | |
d41f73b7 MD |
1725 | /* we return 0 to let the library handle the FD internally */ |
1726 | return 0; | |
1727 | ||
1728 | error_close_fd: | |
2f225ce2 | 1729 | if (stream->out_fd >= 0) { |
d41f73b7 MD |
1730 | int err; |
1731 | ||
1732 | err = close(stream->out_fd); | |
1733 | assert(!err); | |
2f225ce2 | 1734 | stream->out_fd = -1; |
d41f73b7 MD |
1735 | } |
1736 | error: | |
1737 | return ret; | |
1738 | } | |
1739 | ||
ca22feea DG |
1740 | /* |
1741 | * Check if data is still being extracted from the buffers for a specific | |
4e9a4686 DG |
1742 | * stream. Consumer data lock MUST be acquired before calling this function |
1743 | * and the stream lock. | |
ca22feea | 1744 | * |
6d805429 | 1745 | * Return 1 if the traced data are still getting read else 0 meaning that the |
ca22feea DG |
1746 | * data is available for trace viewer reading. |
1747 | */ | |
6d805429 | 1748 | int lttng_kconsumer_data_pending(struct lttng_consumer_stream *stream) |
ca22feea DG |
1749 | { |
1750 | int ret; | |
1751 | ||
1752 | assert(stream); | |
1753 | ||
873b9e9a MD |
1754 | if (stream->endpoint_status != CONSUMER_ENDPOINT_ACTIVE) { |
1755 | ret = 0; | |
1756 | goto end; | |
1757 | } | |
1758 | ||
ca22feea DG |
1759 | ret = kernctl_get_next_subbuf(stream->wait_fd); |
1760 | if (ret == 0) { | |
1761 | /* There is still data so let's put back this subbuffer. */ | |
1762 | ret = kernctl_put_subbuf(stream->wait_fd); | |
1763 | assert(ret == 0); | |
6d805429 | 1764 | ret = 1; /* Data is pending */ |
4e9a4686 | 1765 | goto end; |
ca22feea DG |
1766 | } |
1767 | ||
6d805429 DG |
1768 | /* Data is NOT pending and ready to be read. */ |
1769 | ret = 0; | |
ca22feea | 1770 | |
6efae65e DG |
1771 | end: |
1772 | return ret; | |
ca22feea | 1773 | } |