Fix: consumerd: use packet sequence number for rotation position
[lttng-tools.git] / src / common / consumer / consumer.c
CommitLineData
3bd1e081
MD
1/*
2 * Copyright (C) 2011 - Julien Desfossez <julien.desfossez@polymtl.ca>
3 * Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
00e2e675 4 * 2012 - David Goulet <dgoulet@efficios.com>
3bd1e081 5 *
d14d33bf
AM
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License, version 2 only,
8 * as published by the Free Software Foundation.
3bd1e081 9 *
d14d33bf
AM
10 * This program is distributed in the hope that it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
3bd1e081 14 *
d14d33bf
AM
15 * You should have received a copy of the GNU General Public License along
16 * with this program; if not, write to the Free Software Foundation, Inc.,
17 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
3bd1e081
MD
18 */
19
6c1c0768 20#define _LGPL_SOURCE
3bd1e081 21#include <assert.h>
3bd1e081
MD
22#include <poll.h>
23#include <pthread.h>
24#include <stdlib.h>
25#include <string.h>
26#include <sys/mman.h>
27#include <sys/socket.h>
28#include <sys/types.h>
29#include <unistd.h>
77c7c900 30#include <inttypes.h>
331744e3 31#include <signal.h>
3bd1e081 32
51a9e1c7 33#include <bin/lttng-consumerd/health-consumerd.h>
990570ed 34#include <common/common.h>
fb3a43a9 35#include <common/utils.h>
d2956687 36#include <common/time.h>
fb3a43a9 37#include <common/compat/poll.h>
f263b7fd 38#include <common/compat/endian.h>
309167d2 39#include <common/index/index.h>
10a8a223 40#include <common/kernel-ctl/kernel-ctl.h>
00e2e675 41#include <common/sessiond-comm/relayd.h>
10a8a223
DG
42#include <common/sessiond-comm/sessiond-comm.h>
43#include <common/kernel-consumer/kernel-consumer.h>
00e2e675 44#include <common/relayd/relayd.h>
10a8a223 45#include <common/ust-consumer/ust-consumer.h>
c8fea79c
JR
46#include <common/consumer/consumer-timer.h>
47#include <common/consumer/consumer.h>
48#include <common/consumer/consumer-stream.h>
49#include <common/consumer/consumer-testpoint.h>
50#include <common/align.h>
5feafd41 51#include <common/consumer/consumer-metadata-cache.h>
d2956687
JG
52#include <common/trace-chunk.h>
53#include <common/trace-chunk-registry.h>
54#include <common/string-utils/format.h>
c35f9726 55#include <common/dynamic-array.h>
3bd1e081
MD
56
57struct lttng_consumer_global_data consumer_data = {
3bd1e081
MD
58 .stream_count = 0,
59 .need_update = 1,
60 .type = LTTNG_CONSUMER_UNKNOWN,
61};
62
d8ef542d
MD
63enum consumer_channel_action {
64 CONSUMER_CHANNEL_ADD,
a0cbdd2e 65 CONSUMER_CHANNEL_DEL,
d8ef542d
MD
66 CONSUMER_CHANNEL_QUIT,
67};
68
69struct consumer_channel_msg {
70 enum consumer_channel_action action;
a0cbdd2e
MD
71 struct lttng_consumer_channel *chan; /* add */
72 uint64_t key; /* del */
d8ef542d
MD
73};
74
80957876 75/* Flag used to temporarily pause data consumption from testpoints. */
cf0bcb51
JG
76int data_consumption_paused;
77
3bd1e081
MD
78/*
79 * Flag to inform the polling thread to quit when all fd hung up. Updated by
80 * the consumer_thread_receive_fds when it notices that all fds has hung up.
81 * Also updated by the signal handler (consumer_should_exit()). Read by the
82 * polling threads.
83 */
10211f5c 84int consumer_quit;
3bd1e081 85
43c34bc3 86/*
43c34bc3
DG
87 * Global hash table containing respectively metadata and data streams. The
88 * stream element in this ht should only be updated by the metadata poll thread
89 * for the metadata and the data poll thread for the data.
90 */
40dc48e0
DG
91static struct lttng_ht *metadata_ht;
92static struct lttng_ht *data_ht;
43c34bc3 93
acdb9057
DG
94/*
95 * Notify a thread lttng pipe to poll back again. This usually means that some
96 * global state has changed so we just send back the thread in a poll wait
97 * call.
98 */
99static void notify_thread_lttng_pipe(struct lttng_pipe *pipe)
100{
101 struct lttng_consumer_stream *null_stream = NULL;
102
103 assert(pipe);
104
105 (void) lttng_pipe_write(pipe, &null_stream, sizeof(null_stream));
106}
107
5c635c72
MD
108static void notify_health_quit_pipe(int *pipe)
109{
6cd525e8 110 ssize_t ret;
5c635c72 111
6cd525e8
MD
112 ret = lttng_write(pipe[1], "4", 1);
113 if (ret < 1) {
5c635c72
MD
114 PERROR("write consumer health quit");
115 }
116}
117
d8ef542d
MD
118static void notify_channel_pipe(struct lttng_consumer_local_data *ctx,
119 struct lttng_consumer_channel *chan,
a0cbdd2e 120 uint64_t key,
d8ef542d
MD
121 enum consumer_channel_action action)
122{
123 struct consumer_channel_msg msg;
6cd525e8 124 ssize_t ret;
d8ef542d 125
e56251fc
DG
126 memset(&msg, 0, sizeof(msg));
127
d8ef542d
MD
128 msg.action = action;
129 msg.chan = chan;
f21dae48 130 msg.key = key;
6cd525e8
MD
131 ret = lttng_write(ctx->consumer_channel_pipe[1], &msg, sizeof(msg));
132 if (ret < sizeof(msg)) {
133 PERROR("notify_channel_pipe write error");
134 }
d8ef542d
MD
135}
136
a0cbdd2e
MD
137void notify_thread_del_channel(struct lttng_consumer_local_data *ctx,
138 uint64_t key)
139{
140 notify_channel_pipe(ctx, NULL, key, CONSUMER_CHANNEL_DEL);
141}
142
d8ef542d
MD
143static int read_channel_pipe(struct lttng_consumer_local_data *ctx,
144 struct lttng_consumer_channel **chan,
a0cbdd2e 145 uint64_t *key,
d8ef542d
MD
146 enum consumer_channel_action *action)
147{
148 struct consumer_channel_msg msg;
6cd525e8 149 ssize_t ret;
d8ef542d 150
6cd525e8
MD
151 ret = lttng_read(ctx->consumer_channel_pipe[0], &msg, sizeof(msg));
152 if (ret < sizeof(msg)) {
153 ret = -1;
154 goto error;
d8ef542d 155 }
6cd525e8
MD
156 *action = msg.action;
157 *chan = msg.chan;
158 *key = msg.key;
159error:
160 return (int) ret;
d8ef542d
MD
161}
162
212d67a2
DG
163/*
164 * Cleanup the stream list of a channel. Those streams are not yet globally
165 * visible
166 */
167static void clean_channel_stream_list(struct lttng_consumer_channel *channel)
168{
169 struct lttng_consumer_stream *stream, *stmp;
170
171 assert(channel);
172
173 /* Delete streams that might have been left in the stream list. */
174 cds_list_for_each_entry_safe(stream, stmp, &channel->streams.head,
175 send_node) {
176 cds_list_del(&stream->send_node);
177 /*
178 * Once a stream is added to this list, the buffers were created so we
179 * have a guarantee that this call will succeed. Setting the monitor
180 * mode to 0 so we don't lock nor try to delete the stream from the
181 * global hash table.
182 */
183 stream->monitor = 0;
184 consumer_stream_destroy(stream, NULL);
185 }
186}
187
3bd1e081
MD
188/*
189 * Find a stream. The consumer_data.lock must be locked during this
190 * call.
191 */
d88aee68 192static struct lttng_consumer_stream *find_stream(uint64_t key,
8389e4f8 193 struct lttng_ht *ht)
3bd1e081 194{
e4421fec 195 struct lttng_ht_iter iter;
d88aee68 196 struct lttng_ht_node_u64 *node;
e4421fec 197 struct lttng_consumer_stream *stream = NULL;
3bd1e081 198
8389e4f8
DG
199 assert(ht);
200
d88aee68
DG
201 /* -1ULL keys are lookup failures */
202 if (key == (uint64_t) -1ULL) {
7ad0a0cb 203 return NULL;
7a57cf92 204 }
e4421fec 205
6065ceec
DG
206 rcu_read_lock();
207
d88aee68
DG
208 lttng_ht_lookup(ht, &key, &iter);
209 node = lttng_ht_iter_get_node_u64(&iter);
e4421fec
DG
210 if (node != NULL) {
211 stream = caa_container_of(node, struct lttng_consumer_stream, node);
3bd1e081 212 }
e4421fec 213
6065ceec
DG
214 rcu_read_unlock();
215
e4421fec 216 return stream;
3bd1e081
MD
217}
218
da009f2c 219static void steal_stream_key(uint64_t key, struct lttng_ht *ht)
7ad0a0cb
MD
220{
221 struct lttng_consumer_stream *stream;
222
04253271 223 rcu_read_lock();
ffe60014 224 stream = find_stream(key, ht);
04253271 225 if (stream) {
da009f2c 226 stream->key = (uint64_t) -1ULL;
04253271
MD
227 /*
228 * We don't want the lookup to match, but we still need
229 * to iterate on this stream when iterating over the hash table. Just
230 * change the node key.
231 */
da009f2c 232 stream->node.key = (uint64_t) -1ULL;
04253271
MD
233 }
234 rcu_read_unlock();
7ad0a0cb
MD
235}
236
d56db448
DG
237/*
238 * Return a channel object for the given key.
239 *
240 * RCU read side lock MUST be acquired before calling this function and
241 * protects the channel ptr.
242 */
d88aee68 243struct lttng_consumer_channel *consumer_find_channel(uint64_t key)
3bd1e081 244{
e4421fec 245 struct lttng_ht_iter iter;
d88aee68 246 struct lttng_ht_node_u64 *node;
e4421fec 247 struct lttng_consumer_channel *channel = NULL;
3bd1e081 248
d88aee68
DG
249 /* -1ULL keys are lookup failures */
250 if (key == (uint64_t) -1ULL) {
7ad0a0cb 251 return NULL;
7a57cf92 252 }
e4421fec 253
d88aee68
DG
254 lttng_ht_lookup(consumer_data.channel_ht, &key, &iter);
255 node = lttng_ht_iter_get_node_u64(&iter);
e4421fec
DG
256 if (node != NULL) {
257 channel = caa_container_of(node, struct lttng_consumer_channel, node);
3bd1e081 258 }
e4421fec
DG
259
260 return channel;
3bd1e081
MD
261}
262
b5a6470f
DG
263/*
264 * There is a possibility that the consumer does not have enough time between
265 * the close of the channel on the session daemon and the cleanup in here thus
266 * once we have a channel add with an existing key, we know for sure that this
267 * channel will eventually get cleaned up by all streams being closed.
268 *
269 * This function just nullifies the already existing channel key.
270 */
271static void steal_channel_key(uint64_t key)
272{
273 struct lttng_consumer_channel *channel;
274
275 rcu_read_lock();
276 channel = consumer_find_channel(key);
277 if (channel) {
278 channel->key = (uint64_t) -1ULL;
279 /*
280 * We don't want the lookup to match, but we still need to iterate on
281 * this channel when iterating over the hash table. Just change the
282 * node key.
283 */
284 channel->node.key = (uint64_t) -1ULL;
285 }
286 rcu_read_unlock();
287}
288
ffe60014 289static void free_channel_rcu(struct rcu_head *head)
702b1ea4 290{
d88aee68
DG
291 struct lttng_ht_node_u64 *node =
292 caa_container_of(head, struct lttng_ht_node_u64, head);
ffe60014
DG
293 struct lttng_consumer_channel *channel =
294 caa_container_of(node, struct lttng_consumer_channel, node);
702b1ea4 295
b83e03c4
MD
296 switch (consumer_data.type) {
297 case LTTNG_CONSUMER_KERNEL:
298 break;
299 case LTTNG_CONSUMER32_UST:
300 case LTTNG_CONSUMER64_UST:
301 lttng_ustconsumer_free_channel(channel);
302 break;
303 default:
304 ERR("Unknown consumer_data type");
305 abort();
306 }
ffe60014 307 free(channel);
702b1ea4
MD
308}
309
00e2e675
DG
310/*
311 * RCU protected relayd socket pair free.
312 */
ffe60014 313static void free_relayd_rcu(struct rcu_head *head)
00e2e675 314{
d88aee68
DG
315 struct lttng_ht_node_u64 *node =
316 caa_container_of(head, struct lttng_ht_node_u64, head);
00e2e675
DG
317 struct consumer_relayd_sock_pair *relayd =
318 caa_container_of(node, struct consumer_relayd_sock_pair, node);
319
8994307f
DG
320 /*
321 * Close all sockets. This is done in the call RCU since we don't want the
322 * socket fds to be reassigned thus potentially creating bad state of the
323 * relayd object.
324 *
325 * We do not have to lock the control socket mutex here since at this stage
326 * there is no one referencing to this relayd object.
327 */
328 (void) relayd_close(&relayd->control_sock);
329 (void) relayd_close(&relayd->data_sock);
330
3a84e2f3 331 pthread_mutex_destroy(&relayd->ctrl_sock_mutex);
00e2e675
DG
332 free(relayd);
333}
334
335/*
336 * Destroy and free relayd socket pair object.
00e2e675 337 */
51230d70 338void consumer_destroy_relayd(struct consumer_relayd_sock_pair *relayd)
00e2e675
DG
339{
340 int ret;
341 struct lttng_ht_iter iter;
342
173af62f
DG
343 if (relayd == NULL) {
344 return;
345 }
346
00e2e675
DG
347 DBG("Consumer destroy and close relayd socket pair");
348
349 iter.iter.node = &relayd->node.node;
350 ret = lttng_ht_del(consumer_data.relayd_ht, &iter);
173af62f 351 if (ret != 0) {
8994307f 352 /* We assume the relayd is being or is destroyed */
173af62f
DG
353 return;
354 }
00e2e675 355
00e2e675 356 /* RCU free() call */
ffe60014
DG
357 call_rcu(&relayd->node.head, free_relayd_rcu);
358}
359
360/*
361 * Remove a channel from the global list protected by a mutex. This function is
362 * also responsible for freeing its data structures.
363 */
364void consumer_del_channel(struct lttng_consumer_channel *channel)
365{
ffe60014
DG
366 struct lttng_ht_iter iter;
367
d88aee68 368 DBG("Consumer delete channel key %" PRIu64, channel->key);
ffe60014
DG
369
370 pthread_mutex_lock(&consumer_data.lock);
a9838785 371 pthread_mutex_lock(&channel->lock);
ffe60014 372
212d67a2
DG
373 /* Destroy streams that might have been left in the stream list. */
374 clean_channel_stream_list(channel);
51e762e5 375
d3e2ba59
JD
376 if (channel->live_timer_enabled == 1) {
377 consumer_timer_live_stop(channel);
378 }
e9404c27
JG
379 if (channel->monitor_timer_enabled == 1) {
380 consumer_timer_monitor_stop(channel);
381 }
d3e2ba59 382
ffe60014
DG
383 switch (consumer_data.type) {
384 case LTTNG_CONSUMER_KERNEL:
385 break;
386 case LTTNG_CONSUMER32_UST:
387 case LTTNG_CONSUMER64_UST:
388 lttng_ustconsumer_del_channel(channel);
389 break;
390 default:
391 ERR("Unknown consumer_data type");
392 assert(0);
393 goto end;
394 }
395
d2956687
JG
396 lttng_trace_chunk_put(channel->trace_chunk);
397 channel->trace_chunk = NULL;
5c3892a6 398
d2956687
JG
399 if (channel->is_published) {
400 int ret;
401
402 rcu_read_lock();
403 iter.iter.node = &channel->node.node;
404 ret = lttng_ht_del(consumer_data.channel_ht, &iter);
405 assert(!ret);
ffe60014 406
d2956687
JG
407 iter.iter.node = &channel->channels_by_session_id_ht_node.node;
408 ret = lttng_ht_del(consumer_data.channels_by_session_id_ht,
409 &iter);
410 assert(!ret);
411 rcu_read_unlock();
412 }
413
b6921a17
JG
414 channel->is_deleted = true;
415 call_rcu(&channel->node.head, free_channel_rcu);
ffe60014 416end:
a9838785 417 pthread_mutex_unlock(&channel->lock);
ffe60014 418 pthread_mutex_unlock(&consumer_data.lock);
00e2e675
DG
419}
420
228b5bf7
DG
421/*
422 * Iterate over the relayd hash table and destroy each element. Finally,
423 * destroy the whole hash table.
424 */
425static void cleanup_relayd_ht(void)
426{
427 struct lttng_ht_iter iter;
428 struct consumer_relayd_sock_pair *relayd;
429
430 rcu_read_lock();
431
432 cds_lfht_for_each_entry(consumer_data.relayd_ht->ht, &iter.iter, relayd,
433 node.node) {
51230d70 434 consumer_destroy_relayd(relayd);
228b5bf7
DG
435 }
436
228b5bf7 437 rcu_read_unlock();
36b588ed
MD
438
439 lttng_ht_destroy(consumer_data.relayd_ht);
228b5bf7
DG
440}
441
8994307f
DG
442/*
443 * Update the end point status of all streams having the given network sequence
444 * index (relayd index).
445 *
446 * It's atomically set without having the stream mutex locked which is fine
447 * because we handle the write/read race with a pipe wakeup for each thread.
448 */
da009f2c 449static void update_endpoint_status_by_netidx(uint64_t net_seq_idx,
8994307f
DG
450 enum consumer_endpoint_status status)
451{
452 struct lttng_ht_iter iter;
453 struct lttng_consumer_stream *stream;
454
da009f2c 455 DBG("Consumer set delete flag on stream by idx %" PRIu64, net_seq_idx);
8994307f
DG
456
457 rcu_read_lock();
458
459 /* Let's begin with metadata */
460 cds_lfht_for_each_entry(metadata_ht->ht, &iter.iter, stream, node.node) {
461 if (stream->net_seq_idx == net_seq_idx) {
462 uatomic_set(&stream->endpoint_status, status);
463 DBG("Delete flag set to metadata stream %d", stream->wait_fd);
464 }
465 }
466
467 /* Follow up by the data streams */
468 cds_lfht_for_each_entry(data_ht->ht, &iter.iter, stream, node.node) {
469 if (stream->net_seq_idx == net_seq_idx) {
470 uatomic_set(&stream->endpoint_status, status);
471 DBG("Delete flag set to data stream %d", stream->wait_fd);
472 }
473 }
474 rcu_read_unlock();
475}
476
477/*
478 * Cleanup a relayd object by flagging every associated streams for deletion,
479 * destroying the object meaning removing it from the relayd hash table,
480 * closing the sockets and freeing the memory in a RCU call.
481 *
482 * If a local data context is available, notify the threads that the streams'
483 * state have changed.
484 */
9276e5c8 485void lttng_consumer_cleanup_relayd(struct consumer_relayd_sock_pair *relayd)
8994307f 486{
da009f2c 487 uint64_t netidx;
8994307f
DG
488
489 assert(relayd);
490
9276e5c8 491 DBG("Cleaning up relayd object ID %"PRIu64, relayd->net_seq_idx);
9617607b 492
8994307f
DG
493 /* Save the net sequence index before destroying the object */
494 netidx = relayd->net_seq_idx;
495
496 /*
497 * Delete the relayd from the relayd hash table, close the sockets and free
498 * the object in a RCU call.
499 */
51230d70 500 consumer_destroy_relayd(relayd);
8994307f
DG
501
502 /* Set inactive endpoint to all streams */
503 update_endpoint_status_by_netidx(netidx, CONSUMER_ENDPOINT_INACTIVE);
504
505 /*
506 * With a local data context, notify the threads that the streams' state
507 * have changed. The write() action on the pipe acts as an "implicit"
508 * memory barrier ordering the updates of the end point status from the
509 * read of this status which happens AFTER receiving this notify.
510 */
9276e5c8
JR
511 notify_thread_lttng_pipe(relayd->ctx->consumer_data_pipe);
512 notify_thread_lttng_pipe(relayd->ctx->consumer_metadata_pipe);
8994307f
DG
513}
514
a6ba4fe1
DG
515/*
516 * Flag a relayd socket pair for destruction. Destroy it if the refcount
517 * reaches zero.
518 *
519 * RCU read side lock MUST be aquired before calling this function.
520 */
521void consumer_flag_relayd_for_destroy(struct consumer_relayd_sock_pair *relayd)
522{
523 assert(relayd);
524
525 /* Set destroy flag for this object */
526 uatomic_set(&relayd->destroy_flag, 1);
527
528 /* Destroy the relayd if refcount is 0 */
529 if (uatomic_read(&relayd->refcount) == 0) {
51230d70 530 consumer_destroy_relayd(relayd);
a6ba4fe1
DG
531 }
532}
533
3bd1e081 534/*
1d1a276c
DG
535 * Completly destroy stream from every visiable data structure and the given
536 * hash table if one.
537 *
538 * One this call returns, the stream object is not longer usable nor visible.
3bd1e081 539 */
e316aad5
DG
540void consumer_del_stream(struct lttng_consumer_stream *stream,
541 struct lttng_ht *ht)
3bd1e081 542{
1d1a276c 543 consumer_stream_destroy(stream, ht);
3bd1e081
MD
544}
545
5ab66908
MD
546/*
547 * XXX naming of del vs destroy is all mixed up.
548 */
549void consumer_del_stream_for_data(struct lttng_consumer_stream *stream)
550{
551 consumer_stream_destroy(stream, data_ht);
552}
553
554void consumer_del_stream_for_metadata(struct lttng_consumer_stream *stream)
555{
556 consumer_stream_destroy(stream, metadata_ht);
557}
558
d9a2e16e
JD
559void consumer_stream_update_channel_attributes(
560 struct lttng_consumer_stream *stream,
561 struct lttng_consumer_channel *channel)
562{
563 stream->channel_read_only_attributes.tracefile_size =
564 channel->tracefile_size;
d9a2e16e
JD
565}
566
d88aee68
DG
567struct lttng_consumer_stream *consumer_allocate_stream(uint64_t channel_key,
568 uint64_t stream_key,
ffe60014 569 const char *channel_name,
57a269f2 570 uint64_t relayd_id,
53632229 571 uint64_t session_id,
d2956687 572 struct lttng_trace_chunk *trace_chunk,
ffe60014
DG
573 int cpu,
574 int *alloc_ret,
4891ece8 575 enum consumer_channel_type type,
d2956687 576 unsigned int monitor)
3bd1e081 577{
ffe60014 578 int ret;
3bd1e081 579 struct lttng_consumer_stream *stream;
3bd1e081 580
effcf122 581 stream = zmalloc(sizeof(*stream));
3bd1e081 582 if (stream == NULL) {
7a57cf92 583 PERROR("malloc struct lttng_consumer_stream");
ffe60014 584 ret = -ENOMEM;
7a57cf92 585 goto end;
3bd1e081 586 }
7a57cf92 587
d2956687
JG
588 if (trace_chunk && !lttng_trace_chunk_get(trace_chunk)) {
589 ERR("Failed to acquire trace chunk reference during the creation of a stream");
590 ret = -1;
591 goto error;
592 }
d56db448 593
d2956687 594 rcu_read_lock();
3bd1e081 595 stream->key = stream_key;
d2956687 596 stream->trace_chunk = trace_chunk;
3bd1e081
MD
597 stream->out_fd = -1;
598 stream->out_fd_offset = 0;
e5d1a9b3 599 stream->output_written = 0;
ffe60014 600 stream->net_seq_idx = relayd_id;
53632229 601 stream->session_id = session_id;
4891ece8 602 stream->monitor = monitor;
774d490c 603 stream->endpoint_status = CONSUMER_ENDPOINT_ACTIVE;
f8f3885c 604 stream->index_file = NULL;
fb83fe64 605 stream->last_sequence_number = -1ULL;
a40a503f 606 stream->rotate_position = -1ULL;
53632229 607 pthread_mutex_init(&stream->lock, NULL);
c585821b 608 pthread_mutex_init(&stream->metadata_timer_lock, NULL);
58b1f425 609
ffe60014
DG
610 /* If channel is the metadata, flag this stream as metadata. */
611 if (type == CONSUMER_CHANNEL_TYPE_METADATA) {
612 stream->metadata_flag = 1;
613 /* Metadata is flat out. */
614 strncpy(stream->name, DEFAULT_METADATA_NAME, sizeof(stream->name));
94d49140
JD
615 /* Live rendez-vous point. */
616 pthread_cond_init(&stream->metadata_rdv, NULL);
617 pthread_mutex_init(&stream->metadata_rdv_lock, NULL);
58b1f425 618 } else {
ffe60014
DG
619 /* Format stream name to <channel_name>_<cpu_number> */
620 ret = snprintf(stream->name, sizeof(stream->name), "%s_%d",
621 channel_name, cpu);
622 if (ret < 0) {
623 PERROR("snprintf stream name");
624 goto error;
625 }
58b1f425 626 }
c30aaa51 627
ffe60014 628 /* Key is always the wait_fd for streams. */
d88aee68 629 lttng_ht_node_init_u64(&stream->node, stream->key);
ffe60014 630
d8ef542d
MD
631 /* Init node per channel id key */
632 lttng_ht_node_init_u64(&stream->node_channel_id, channel_key);
633
53632229 634 /* Init session id node with the stream session id */
d88aee68 635 lttng_ht_node_init_u64(&stream->node_session_id, stream->session_id);
53632229 636
07b86b52
JD
637 DBG3("Allocated stream %s (key %" PRIu64 ", chan_key %" PRIu64
638 " relayd_id %" PRIu64 ", session_id %" PRIu64,
639 stream->name, stream->key, channel_key,
640 stream->net_seq_idx, stream->session_id);
d56db448
DG
641
642 rcu_read_unlock();
3bd1e081 643 return stream;
c80048c6
MD
644
645error:
d56db448 646 rcu_read_unlock();
d2956687 647 lttng_trace_chunk_put(stream->trace_chunk);
c80048c6 648 free(stream);
7a57cf92 649end:
ffe60014
DG
650 if (alloc_ret) {
651 *alloc_ret = ret;
652 }
c80048c6 653 return NULL;
3bd1e081
MD
654}
655
656/*
657 * Add a stream to the global list protected by a mutex.
658 */
66d583dc 659void consumer_add_data_stream(struct lttng_consumer_stream *stream)
3bd1e081 660{
5ab66908 661 struct lttng_ht *ht = data_ht;
3bd1e081 662
e316aad5 663 assert(stream);
43c34bc3 664 assert(ht);
c77fc10a 665
d88aee68 666 DBG3("Adding consumer stream %" PRIu64, stream->key);
e316aad5
DG
667
668 pthread_mutex_lock(&consumer_data.lock);
a9838785 669 pthread_mutex_lock(&stream->chan->lock);
ec6ea7d0 670 pthread_mutex_lock(&stream->chan->timer_lock);
2e818a6a 671 pthread_mutex_lock(&stream->lock);
b0b335c8 672 rcu_read_lock();
e316aad5 673
43c34bc3 674 /* Steal stream identifier to avoid having streams with the same key */
ffe60014 675 steal_stream_key(stream->key, ht);
43c34bc3 676
d88aee68 677 lttng_ht_add_unique_u64(ht, &stream->node);
00e2e675 678
d8ef542d
MD
679 lttng_ht_add_u64(consumer_data.stream_per_chan_id_ht,
680 &stream->node_channel_id);
681
ca22feea
DG
682 /*
683 * Add stream to the stream_list_ht of the consumer data. No need to steal
684 * the key since the HT does not use it and we allow to add redundant keys
685 * into this table.
686 */
d88aee68 687 lttng_ht_add_u64(consumer_data.stream_list_ht, &stream->node_session_id);
ca22feea 688
e316aad5 689 /*
ffe60014
DG
690 * When nb_init_stream_left reaches 0, we don't need to trigger any action
691 * in terms of destroying the associated channel, because the action that
e316aad5
DG
692 * causes the count to become 0 also causes a stream to be added. The
693 * channel deletion will thus be triggered by the following removal of this
694 * stream.
695 */
ffe60014 696 if (uatomic_read(&stream->chan->nb_init_stream_left) > 0) {
f2ad556d
MD
697 /* Increment refcount before decrementing nb_init_stream_left */
698 cmm_smp_wmb();
ffe60014 699 uatomic_dec(&stream->chan->nb_init_stream_left);
e316aad5
DG
700 }
701
702 /* Update consumer data once the node is inserted. */
3bd1e081
MD
703 consumer_data.stream_count++;
704 consumer_data.need_update = 1;
705
e316aad5 706 rcu_read_unlock();
2e818a6a 707 pthread_mutex_unlock(&stream->lock);
ec6ea7d0 708 pthread_mutex_unlock(&stream->chan->timer_lock);
a9838785 709 pthread_mutex_unlock(&stream->chan->lock);
3bd1e081 710 pthread_mutex_unlock(&consumer_data.lock);
3bd1e081
MD
711}
712
00e2e675 713/*
3f8e211f
DG
714 * Add relayd socket to global consumer data hashtable. RCU read side lock MUST
715 * be acquired before calling this.
00e2e675 716 */
d09e1200 717static int add_relayd(struct consumer_relayd_sock_pair *relayd)
00e2e675
DG
718{
719 int ret = 0;
d88aee68 720 struct lttng_ht_node_u64 *node;
00e2e675
DG
721 struct lttng_ht_iter iter;
722
ffe60014 723 assert(relayd);
00e2e675 724
00e2e675 725 lttng_ht_lookup(consumer_data.relayd_ht,
d88aee68
DG
726 &relayd->net_seq_idx, &iter);
727 node = lttng_ht_iter_get_node_u64(&iter);
00e2e675 728 if (node != NULL) {
00e2e675
DG
729 goto end;
730 }
d88aee68 731 lttng_ht_add_unique_u64(consumer_data.relayd_ht, &relayd->node);
00e2e675 732
00e2e675
DG
733end:
734 return ret;
735}
736
737/*
738 * Allocate and return a consumer relayd socket.
739 */
027a694f 740static struct consumer_relayd_sock_pair *consumer_allocate_relayd_sock_pair(
da009f2c 741 uint64_t net_seq_idx)
00e2e675
DG
742{
743 struct consumer_relayd_sock_pair *obj = NULL;
744
da009f2c
MD
745 /* net sequence index of -1 is a failure */
746 if (net_seq_idx == (uint64_t) -1ULL) {
00e2e675
DG
747 goto error;
748 }
749
750 obj = zmalloc(sizeof(struct consumer_relayd_sock_pair));
751 if (obj == NULL) {
752 PERROR("zmalloc relayd sock");
753 goto error;
754 }
755
756 obj->net_seq_idx = net_seq_idx;
757 obj->refcount = 0;
173af62f 758 obj->destroy_flag = 0;
f96e4545
MD
759 obj->control_sock.sock.fd = -1;
760 obj->data_sock.sock.fd = -1;
d88aee68 761 lttng_ht_node_init_u64(&obj->node, obj->net_seq_idx);
00e2e675
DG
762 pthread_mutex_init(&obj->ctrl_sock_mutex, NULL);
763
764error:
765 return obj;
766}
767
768/*
769 * Find a relayd socket pair in the global consumer data.
770 *
771 * Return the object if found else NULL.
b0b335c8
MD
772 * RCU read-side lock must be held across this call and while using the
773 * returned object.
00e2e675 774 */
d88aee68 775struct consumer_relayd_sock_pair *consumer_find_relayd(uint64_t key)
00e2e675
DG
776{
777 struct lttng_ht_iter iter;
d88aee68 778 struct lttng_ht_node_u64 *node;
00e2e675
DG
779 struct consumer_relayd_sock_pair *relayd = NULL;
780
781 /* Negative keys are lookup failures */
d88aee68 782 if (key == (uint64_t) -1ULL) {
00e2e675
DG
783 goto error;
784 }
785
d88aee68 786 lttng_ht_lookup(consumer_data.relayd_ht, &key,
00e2e675 787 &iter);
d88aee68 788 node = lttng_ht_iter_get_node_u64(&iter);
00e2e675
DG
789 if (node != NULL) {
790 relayd = caa_container_of(node, struct consumer_relayd_sock_pair, node);
791 }
792
00e2e675
DG
793error:
794 return relayd;
795}
796
10a50311
JD
797/*
798 * Find a relayd and send the stream
799 *
800 * Returns 0 on success, < 0 on error
801 */
802int consumer_send_relayd_stream(struct lttng_consumer_stream *stream,
803 char *path)
804{
805 int ret = 0;
806 struct consumer_relayd_sock_pair *relayd;
807
808 assert(stream);
809 assert(stream->net_seq_idx != -1ULL);
810 assert(path);
811
812 /* The stream is not metadata. Get relayd reference if exists. */
813 rcu_read_lock();
814 relayd = consumer_find_relayd(stream->net_seq_idx);
815 if (relayd != NULL) {
816 /* Add stream on the relayd */
817 pthread_mutex_lock(&relayd->ctrl_sock_mutex);
818 ret = relayd_add_stream(&relayd->control_sock, stream->name,
819 path, &stream->relayd_stream_id,
d2956687
JG
820 stream->chan->tracefile_size,
821 stream->chan->tracefile_count,
822 stream->trace_chunk);
10a50311
JD
823 pthread_mutex_unlock(&relayd->ctrl_sock_mutex);
824 if (ret < 0) {
9276e5c8
JR
825 ERR("Relayd add stream failed. Cleaning up relayd %" PRIu64".", relayd->net_seq_idx);
826 lttng_consumer_cleanup_relayd(relayd);
10a50311
JD
827 goto end;
828 }
1c20f0e2 829
10a50311 830 uatomic_inc(&relayd->refcount);
d01178b6 831 stream->sent_to_relayd = 1;
10a50311
JD
832 } else {
833 ERR("Stream %" PRIu64 " relayd ID %" PRIu64 " unknown. Can't send it.",
834 stream->key, stream->net_seq_idx);
835 ret = -1;
836 goto end;
837 }
838
839 DBG("Stream %s with key %" PRIu64 " sent to relayd id %" PRIu64,
840 stream->name, stream->key, stream->net_seq_idx);
841
842end:
843 rcu_read_unlock();
844 return ret;
845}
846
a4baae1b
JD
847/*
848 * Find a relayd and send the streams sent message
849 *
850 * Returns 0 on success, < 0 on error
851 */
852int consumer_send_relayd_streams_sent(uint64_t net_seq_idx)
853{
854 int ret = 0;
855 struct consumer_relayd_sock_pair *relayd;
856
857 assert(net_seq_idx != -1ULL);
858
859 /* The stream is not metadata. Get relayd reference if exists. */
860 rcu_read_lock();
861 relayd = consumer_find_relayd(net_seq_idx);
862 if (relayd != NULL) {
863 /* Add stream on the relayd */
864 pthread_mutex_lock(&relayd->ctrl_sock_mutex);
865 ret = relayd_streams_sent(&relayd->control_sock);
866 pthread_mutex_unlock(&relayd->ctrl_sock_mutex);
867 if (ret < 0) {
9276e5c8
JR
868 ERR("Relayd streams sent failed. Cleaning up relayd %" PRIu64".", relayd->net_seq_idx);
869 lttng_consumer_cleanup_relayd(relayd);
a4baae1b
JD
870 goto end;
871 }
872 } else {
873 ERR("Relayd ID %" PRIu64 " unknown. Can't send streams_sent.",
874 net_seq_idx);
875 ret = -1;
876 goto end;
877 }
878
879 ret = 0;
880 DBG("All streams sent relayd id %" PRIu64, net_seq_idx);
881
882end:
883 rcu_read_unlock();
884 return ret;
885}
886
10a50311
JD
887/*
888 * Find a relayd and close the stream
889 */
890void close_relayd_stream(struct lttng_consumer_stream *stream)
891{
892 struct consumer_relayd_sock_pair *relayd;
893
894 /* The stream is not metadata. Get relayd reference if exists. */
895 rcu_read_lock();
896 relayd = consumer_find_relayd(stream->net_seq_idx);
897 if (relayd) {
898 consumer_stream_relayd_close(stream, relayd);
899 }
900 rcu_read_unlock();
901}
902
00e2e675
DG
903/*
904 * Handle stream for relayd transmission if the stream applies for network
905 * streaming where the net sequence index is set.
906 *
907 * Return destination file descriptor or negative value on error.
908 */
6197aea7 909static int write_relayd_stream_header(struct lttng_consumer_stream *stream,
1d4dfdef
DG
910 size_t data_size, unsigned long padding,
911 struct consumer_relayd_sock_pair *relayd)
00e2e675
DG
912{
913 int outfd = -1, ret;
00e2e675
DG
914 struct lttcomm_relayd_data_hdr data_hdr;
915
916 /* Safety net */
917 assert(stream);
6197aea7 918 assert(relayd);
00e2e675
DG
919
920 /* Reset data header */
921 memset(&data_hdr, 0, sizeof(data_hdr));
922
00e2e675
DG
923 if (stream->metadata_flag) {
924 /* Caller MUST acquire the relayd control socket lock */
925 ret = relayd_send_metadata(&relayd->control_sock, data_size);
926 if (ret < 0) {
927 goto error;
928 }
929
930 /* Metadata are always sent on the control socket. */
6151a90f 931 outfd = relayd->control_sock.sock.fd;
00e2e675
DG
932 } else {
933 /* Set header with stream information */
934 data_hdr.stream_id = htobe64(stream->relayd_stream_id);
935 data_hdr.data_size = htobe32(data_size);
1d4dfdef 936 data_hdr.padding_size = htobe32(padding);
c35f9726 937
39df6d9f
DG
938 /*
939 * Note that net_seq_num below is assigned with the *current* value of
940 * next_net_seq_num and only after that the next_net_seq_num will be
941 * increment. This is why when issuing a command on the relayd using
942 * this next value, 1 should always be substracted in order to compare
943 * the last seen sequence number on the relayd side to the last sent.
944 */
3604f373 945 data_hdr.net_seq_num = htobe64(stream->next_net_seq_num);
00e2e675
DG
946 /* Other fields are zeroed previously */
947
948 ret = relayd_send_data_hdr(&relayd->data_sock, &data_hdr,
949 sizeof(data_hdr));
950 if (ret < 0) {
951 goto error;
952 }
953
3604f373
DG
954 ++stream->next_net_seq_num;
955
00e2e675 956 /* Set to go on data socket */
6151a90f 957 outfd = relayd->data_sock.sock.fd;
00e2e675
DG
958 }
959
960error:
961 return outfd;
962}
963
d2956687
JG
964/*
965 * Trigger a dump of the metadata content. Following/during the succesful
966 * completion of this call, the metadata poll thread will start receiving
967 * metadata packets to consume.
968 *
969 * The caller must hold the channel and stream locks.
970 */
971static
972int consumer_metadata_stream_dump(struct lttng_consumer_stream *stream)
973{
974 int ret;
975
976 ASSERT_LOCKED(stream->chan->lock);
977 ASSERT_LOCKED(stream->lock);
978 assert(stream->metadata_flag);
979 assert(stream->chan->trace_chunk);
980
981 switch (consumer_data.type) {
982 case LTTNG_CONSUMER_KERNEL:
983 /*
984 * Reset the position of what has been read from the
985 * metadata cache to 0 so we can dump it again.
986 */
987 ret = kernctl_metadata_cache_dump(stream->wait_fd);
988 break;
989 case LTTNG_CONSUMER32_UST:
990 case LTTNG_CONSUMER64_UST:
991 /*
992 * Reset the position pushed from the metadata cache so it
993 * will write from the beginning on the next push.
994 */
995 stream->ust_metadata_pushed = 0;
996 ret = consumer_metadata_wakeup_pipe(stream->chan);
997 break;
998 default:
999 ERR("Unknown consumer_data type");
1000 abort();
1001 }
1002 if (ret < 0) {
1003 ERR("Failed to dump the metadata cache");
1004 }
1005 return ret;
1006}
1007
1008static
1009int lttng_consumer_channel_set_trace_chunk(
1010 struct lttng_consumer_channel *channel,
1011 struct lttng_trace_chunk *new_trace_chunk)
1012{
d2956687 1013 pthread_mutex_lock(&channel->lock);
b6921a17
JG
1014 if (channel->is_deleted) {
1015 /*
1016 * The channel has been logically deleted and should no longer
1017 * be used. It has released its reference to its current trace
1018 * chunk and should not acquire a new one.
1019 *
1020 * Return success as there is nothing for the caller to do.
1021 */
1022 goto end;
1023 }
d2956687
JG
1024
1025 /*
1026 * The acquisition of the reference cannot fail (barring
1027 * a severe internal error) since a reference to the published
1028 * chunk is already held by the caller.
1029 */
1030 if (new_trace_chunk) {
1031 const bool acquired_reference = lttng_trace_chunk_get(
1032 new_trace_chunk);
1033
1034 assert(acquired_reference);
1035 }
1036
1037 lttng_trace_chunk_put(channel->trace_chunk);
1038 channel->trace_chunk = new_trace_chunk;
d2956687
JG
1039end:
1040 pthread_mutex_unlock(&channel->lock);
ce1aa6fe 1041 return 0;
d2956687
JG
1042}
1043
3bd1e081 1044/*
ffe60014
DG
1045 * Allocate and return a new lttng_consumer_channel object using the given key
1046 * to initialize the hash table node.
1047 *
1048 * On error, return NULL.
3bd1e081 1049 */
886224ff 1050struct lttng_consumer_channel *consumer_allocate_channel(uint64_t key,
ffe60014 1051 uint64_t session_id,
d2956687 1052 const uint64_t *chunk_id,
ffe60014
DG
1053 const char *pathname,
1054 const char *name,
57a269f2 1055 uint64_t relayd_id,
1624d5b7
JD
1056 enum lttng_event_output output,
1057 uint64_t tracefile_size,
2bba9e53 1058 uint64_t tracefile_count,
1950109e 1059 uint64_t session_id_per_pid,
ecc48a90 1060 unsigned int monitor,
d7ba1388 1061 unsigned int live_timer_interval,
3d071855 1062 const char *root_shm_path,
d7ba1388 1063 const char *shm_path)
3bd1e081 1064{
d2956687
JG
1065 struct lttng_consumer_channel *channel = NULL;
1066 struct lttng_trace_chunk *trace_chunk = NULL;
1067
1068 if (chunk_id) {
1069 trace_chunk = lttng_trace_chunk_registry_find_chunk(
1070 consumer_data.chunk_registry, session_id,
1071 *chunk_id);
1072 if (!trace_chunk) {
1073 ERR("Failed to find trace chunk reference during creation of channel");
1074 goto end;
1075 }
1076 }
3bd1e081 1077
276b26d1 1078 channel = zmalloc(sizeof(*channel));
3bd1e081 1079 if (channel == NULL) {
7a57cf92 1080 PERROR("malloc struct lttng_consumer_channel");
3bd1e081
MD
1081 goto end;
1082 }
ffe60014
DG
1083
1084 channel->key = key;
3bd1e081 1085 channel->refcount = 0;
ffe60014 1086 channel->session_id = session_id;
1950109e 1087 channel->session_id_per_pid = session_id_per_pid;
ffe60014 1088 channel->relayd_id = relayd_id;
1624d5b7
JD
1089 channel->tracefile_size = tracefile_size;
1090 channel->tracefile_count = tracefile_count;
2bba9e53 1091 channel->monitor = monitor;
ecc48a90 1092 channel->live_timer_interval = live_timer_interval;
a9838785 1093 pthread_mutex_init(&channel->lock, NULL);
ec6ea7d0 1094 pthread_mutex_init(&channel->timer_lock, NULL);
ffe60014 1095
0c759fc9
DG
1096 switch (output) {
1097 case LTTNG_EVENT_SPLICE:
1098 channel->output = CONSUMER_CHANNEL_SPLICE;
1099 break;
1100 case LTTNG_EVENT_MMAP:
1101 channel->output = CONSUMER_CHANNEL_MMAP;
1102 break;
1103 default:
1104 assert(0);
1105 free(channel);
1106 channel = NULL;
1107 goto end;
1108 }
1109
07b86b52
JD
1110 /*
1111 * In monitor mode, the streams associated with the channel will be put in
1112 * a special list ONLY owned by this channel. So, the refcount is set to 1
1113 * here meaning that the channel itself has streams that are referenced.
1114 *
1115 * On a channel deletion, once the channel is no longer visible, the
1116 * refcount is decremented and checked for a zero value to delete it. With
1117 * streams in no monitor mode, it will now be safe to destroy the channel.
1118 */
1119 if (!channel->monitor) {
1120 channel->refcount = 1;
1121 }
1122
ffe60014
DG
1123 strncpy(channel->pathname, pathname, sizeof(channel->pathname));
1124 channel->pathname[sizeof(channel->pathname) - 1] = '\0';
1125
1126 strncpy(channel->name, name, sizeof(channel->name));
1127 channel->name[sizeof(channel->name) - 1] = '\0';
1128
3d071855
MD
1129 if (root_shm_path) {
1130 strncpy(channel->root_shm_path, root_shm_path, sizeof(channel->root_shm_path));
1131 channel->root_shm_path[sizeof(channel->root_shm_path) - 1] = '\0';
1132 }
d7ba1388
MD
1133 if (shm_path) {
1134 strncpy(channel->shm_path, shm_path, sizeof(channel->shm_path));
1135 channel->shm_path[sizeof(channel->shm_path) - 1] = '\0';
1136 }
1137
d88aee68 1138 lttng_ht_node_init_u64(&channel->node, channel->key);
5c3892a6
JG
1139 lttng_ht_node_init_u64(&channel->channels_by_session_id_ht_node,
1140 channel->session_id);
d8ef542d
MD
1141
1142 channel->wait_fd = -1;
ffe60014
DG
1143 CDS_INIT_LIST_HEAD(&channel->streams.head);
1144
d2956687
JG
1145 if (trace_chunk) {
1146 int ret = lttng_consumer_channel_set_trace_chunk(channel,
1147 trace_chunk);
1148 if (ret) {
1149 goto error;
1150 }
1151 }
1152
62a7b8ed 1153 DBG("Allocated channel (key %" PRIu64 ")", channel->key);
3bd1e081 1154
3bd1e081 1155end:
d2956687 1156 lttng_trace_chunk_put(trace_chunk);
3bd1e081 1157 return channel;
d2956687
JG
1158error:
1159 consumer_del_channel(channel);
1160 channel = NULL;
1161 goto end;
3bd1e081
MD
1162}
1163
1164/*
1165 * Add a channel to the global list protected by a mutex.
821fffb2 1166 *
b5a6470f 1167 * Always return 0 indicating success.
3bd1e081 1168 */
d8ef542d
MD
1169int consumer_add_channel(struct lttng_consumer_channel *channel,
1170 struct lttng_consumer_local_data *ctx)
3bd1e081 1171{
3bd1e081 1172 pthread_mutex_lock(&consumer_data.lock);
a9838785 1173 pthread_mutex_lock(&channel->lock);
ec6ea7d0 1174 pthread_mutex_lock(&channel->timer_lock);
c77fc10a 1175
b5a6470f
DG
1176 /*
1177 * This gives us a guarantee that the channel we are about to add to the
1178 * channel hash table will be unique. See this function comment on the why
1179 * we need to steel the channel key at this stage.
1180 */
1181 steal_channel_key(channel->key);
c77fc10a 1182
b5a6470f 1183 rcu_read_lock();
d88aee68 1184 lttng_ht_add_unique_u64(consumer_data.channel_ht, &channel->node);
5c3892a6
JG
1185 lttng_ht_add_u64(consumer_data.channels_by_session_id_ht,
1186 &channel->channels_by_session_id_ht_node);
6065ceec 1187 rcu_read_unlock();
d2956687 1188 channel->is_published = true;
b5a6470f 1189
ec6ea7d0 1190 pthread_mutex_unlock(&channel->timer_lock);
a9838785 1191 pthread_mutex_unlock(&channel->lock);
3bd1e081 1192 pthread_mutex_unlock(&consumer_data.lock);
702b1ea4 1193
b5a6470f 1194 if (channel->wait_fd != -1 && channel->type == CONSUMER_CHANNEL_TYPE_DATA) {
a0cbdd2e 1195 notify_channel_pipe(ctx, channel, -1, CONSUMER_CHANNEL_ADD);
d8ef542d 1196 }
b5a6470f
DG
1197
1198 return 0;
3bd1e081
MD
1199}
1200
1201/*
1202 * Allocate the pollfd structure and the local view of the out fds to avoid
1203 * doing a lookup in the linked list and concurrency issues when writing is
1204 * needed. Called with consumer_data.lock held.
1205 *
1206 * Returns the number of fds in the structures.
1207 */
ffe60014
DG
1208static int update_poll_array(struct lttng_consumer_local_data *ctx,
1209 struct pollfd **pollfd, struct lttng_consumer_stream **local_stream,
9a2fcf78 1210 struct lttng_ht *ht, int *nb_inactive_fd)
3bd1e081 1211{
3bd1e081 1212 int i = 0;
e4421fec
DG
1213 struct lttng_ht_iter iter;
1214 struct lttng_consumer_stream *stream;
3bd1e081 1215
ffe60014
DG
1216 assert(ctx);
1217 assert(ht);
1218 assert(pollfd);
1219 assert(local_stream);
1220
3bd1e081 1221 DBG("Updating poll fd array");
9a2fcf78 1222 *nb_inactive_fd = 0;
481d6c57 1223 rcu_read_lock();
43c34bc3 1224 cds_lfht_for_each_entry(ht->ht, &iter.iter, stream, node.node) {
8994307f
DG
1225 /*
1226 * Only active streams with an active end point can be added to the
1227 * poll set and local stream storage of the thread.
1228 *
1229 * There is a potential race here for endpoint_status to be updated
1230 * just after the check. However, this is OK since the stream(s) will
1231 * be deleted once the thread is notified that the end point state has
1232 * changed where this function will be called back again.
9a2fcf78
JD
1233 *
1234 * We track the number of inactive FDs because they still need to be
1235 * closed by the polling thread after a wakeup on the data_pipe or
1236 * metadata_pipe.
8994307f 1237 */
d2956687 1238 if (stream->endpoint_status == CONSUMER_ENDPOINT_INACTIVE) {
9a2fcf78 1239 (*nb_inactive_fd)++;
3bd1e081
MD
1240 continue;
1241 }
7972aab2
DG
1242 /*
1243 * This clobbers way too much the debug output. Uncomment that if you
1244 * need it for debugging purposes.
7972aab2 1245 */
e4421fec 1246 (*pollfd)[i].fd = stream->wait_fd;
3bd1e081 1247 (*pollfd)[i].events = POLLIN | POLLPRI;
e4421fec 1248 local_stream[i] = stream;
3bd1e081
MD
1249 i++;
1250 }
481d6c57 1251 rcu_read_unlock();
3bd1e081
MD
1252
1253 /*
50f8ae69 1254 * Insert the consumer_data_pipe at the end of the array and don't
3bd1e081
MD
1255 * increment i so nb_fd is the number of real FD.
1256 */
acdb9057 1257 (*pollfd)[i].fd = lttng_pipe_get_readfd(ctx->consumer_data_pipe);
509bb1cf 1258 (*pollfd)[i].events = POLLIN | POLLPRI;
02b3d176
DG
1259
1260 (*pollfd)[i + 1].fd = lttng_pipe_get_readfd(ctx->consumer_wakeup_pipe);
1261 (*pollfd)[i + 1].events = POLLIN | POLLPRI;
3bd1e081
MD
1262 return i;
1263}
1264
1265/*
84382d49
MD
1266 * Poll on the should_quit pipe and the command socket return -1 on
1267 * error, 1 if should exit, 0 if data is available on the command socket
3bd1e081
MD
1268 */
1269int lttng_consumer_poll_socket(struct pollfd *consumer_sockpoll)
1270{
1271 int num_rdy;
1272
88f2b785 1273restart:
3bd1e081
MD
1274 num_rdy = poll(consumer_sockpoll, 2, -1);
1275 if (num_rdy == -1) {
88f2b785
MD
1276 /*
1277 * Restart interrupted system call.
1278 */
1279 if (errno == EINTR) {
1280 goto restart;
1281 }
7a57cf92 1282 PERROR("Poll error");
84382d49 1283 return -1;
3bd1e081 1284 }
509bb1cf 1285 if (consumer_sockpoll[0].revents & (POLLIN | POLLPRI)) {
3bd1e081 1286 DBG("consumer_should_quit wake up");
84382d49 1287 return 1;
3bd1e081
MD
1288 }
1289 return 0;
3bd1e081
MD
1290}
1291
1292/*
1293 * Set the error socket.
1294 */
ffe60014
DG
1295void lttng_consumer_set_error_sock(struct lttng_consumer_local_data *ctx,
1296 int sock)
3bd1e081
MD
1297{
1298 ctx->consumer_error_socket = sock;
1299}
1300
1301/*
1302 * Set the command socket path.
1303 */
3bd1e081
MD
1304void lttng_consumer_set_command_sock_path(
1305 struct lttng_consumer_local_data *ctx, char *sock)
1306{
1307 ctx->consumer_command_sock_path = sock;
1308}
1309
1310/*
1311 * Send return code to the session daemon.
1312 * If the socket is not defined, we return 0, it is not a fatal error
1313 */
ffe60014 1314int lttng_consumer_send_error(struct lttng_consumer_local_data *ctx, int cmd)
3bd1e081
MD
1315{
1316 if (ctx->consumer_error_socket > 0) {
1317 return lttcomm_send_unix_sock(ctx->consumer_error_socket, &cmd,
1318 sizeof(enum lttcomm_sessiond_command));
1319 }
1320
1321 return 0;
1322}
1323
1324/*
228b5bf7
DG
1325 * Close all the tracefiles and stream fds and MUST be called when all
1326 * instances are destroyed i.e. when all threads were joined and are ended.
3bd1e081
MD
1327 */
1328void lttng_consumer_cleanup(void)
1329{
e4421fec 1330 struct lttng_ht_iter iter;
ffe60014 1331 struct lttng_consumer_channel *channel;
e10aec8f 1332 unsigned int trace_chunks_left;
6065ceec
DG
1333
1334 rcu_read_lock();
3bd1e081 1335
ffe60014
DG
1336 cds_lfht_for_each_entry(consumer_data.channel_ht->ht, &iter.iter, channel,
1337 node.node) {
702b1ea4 1338 consumer_del_channel(channel);
3bd1e081 1339 }
6065ceec
DG
1340
1341 rcu_read_unlock();
d6ce1df2 1342
d6ce1df2 1343 lttng_ht_destroy(consumer_data.channel_ht);
5c3892a6 1344 lttng_ht_destroy(consumer_data.channels_by_session_id_ht);
228b5bf7
DG
1345
1346 cleanup_relayd_ht();
1347
d8ef542d
MD
1348 lttng_ht_destroy(consumer_data.stream_per_chan_id_ht);
1349
228b5bf7
DG
1350 /*
1351 * This HT contains streams that are freed by either the metadata thread or
1352 * the data thread so we do *nothing* on the hash table and simply destroy
1353 * it.
1354 */
1355 lttng_ht_destroy(consumer_data.stream_list_ht);
28cc88f3 1356
e10aec8f
MD
1357 /*
1358 * Trace chunks in the registry may still exist if the session
1359 * daemon has encountered an internal error and could not
1360 * tear down its sessions and/or trace chunks properly.
1361 *
1362 * Release the session daemon's implicit reference to any remaining
1363 * trace chunk and print an error if any trace chunk was found. Note
1364 * that there are _no_ legitimate cases for trace chunks to be left,
1365 * it is a leak. However, it can happen following a crash of the
1366 * session daemon and not emptying the registry would cause an assertion
1367 * to hit.
1368 */
1369 trace_chunks_left = lttng_trace_chunk_registry_put_each_chunk(
1370 consumer_data.chunk_registry);
1371 if (trace_chunks_left) {
1372 ERR("%u trace chunks are leaked by lttng-consumerd. "
1373 "This can be caused by an internal error of the session daemon.",
1374 trace_chunks_left);
1375 }
1376 /* Run all callbacks freeing each chunk. */
1377 rcu_barrier();
28cc88f3 1378 lttng_trace_chunk_registry_destroy(consumer_data.chunk_registry);
3bd1e081
MD
1379}
1380
1381/*
1382 * Called from signal handler.
1383 */
1384void lttng_consumer_should_exit(struct lttng_consumer_local_data *ctx)
1385{
6cd525e8
MD
1386 ssize_t ret;
1387
10211f5c 1388 CMM_STORE_SHARED(consumer_quit, 1);
6cd525e8
MD
1389 ret = lttng_write(ctx->consumer_should_quit[1], "4", 1);
1390 if (ret < 1) {
7a57cf92 1391 PERROR("write consumer quit");
3bd1e081 1392 }
ab1027f4
DG
1393
1394 DBG("Consumer flag that it should quit");
3bd1e081
MD
1395}
1396
5199ffc4
JG
1397
1398/*
1399 * Flush pending writes to trace output disk file.
1400 */
1401static
00e2e675
DG
1402void lttng_consumer_sync_trace_file(struct lttng_consumer_stream *stream,
1403 off_t orig_offset)
3bd1e081 1404{
c7a78aab 1405 int ret;
3bd1e081
MD
1406 int outfd = stream->out_fd;
1407
1408 /*
1409 * This does a blocking write-and-wait on any page that belongs to the
1410 * subbuffer prior to the one we just wrote.
1411 * Don't care about error values, as these are just hints and ways to
1412 * limit the amount of page cache used.
1413 */
ffe60014 1414 if (orig_offset < stream->max_sb_size) {
3bd1e081
MD
1415 return;
1416 }
ffe60014
DG
1417 lttng_sync_file_range(outfd, orig_offset - stream->max_sb_size,
1418 stream->max_sb_size,
3bd1e081
MD
1419 SYNC_FILE_RANGE_WAIT_BEFORE
1420 | SYNC_FILE_RANGE_WRITE
1421 | SYNC_FILE_RANGE_WAIT_AFTER);
1422 /*
1423 * Give hints to the kernel about how we access the file:
1424 * POSIX_FADV_DONTNEED : we won't re-access data in a near future after
1425 * we write it.
1426 *
1427 * We need to call fadvise again after the file grows because the
1428 * kernel does not seem to apply fadvise to non-existing parts of the
1429 * file.
1430 *
1431 * Call fadvise _after_ having waited for the page writeback to
1432 * complete because the dirty page writeback semantic is not well
1433 * defined. So it can be expected to lead to lower throughput in
1434 * streaming.
1435 */
c7a78aab 1436 ret = posix_fadvise(outfd, orig_offset - stream->max_sb_size,
ffe60014 1437 stream->max_sb_size, POSIX_FADV_DONTNEED);
a0d0e127 1438 if (ret && ret != -ENOSYS) {
a74a5f4a
JG
1439 errno = ret;
1440 PERROR("posix_fadvise on fd %i", outfd);
c7a78aab 1441 }
3bd1e081
MD
1442}
1443
1444/*
1445 * Initialise the necessary environnement :
1446 * - create a new context
1447 * - create the poll_pipe
1448 * - create the should_quit pipe (for signal handler)
1449 * - create the thread pipe (for splice)
1450 *
1451 * Takes a function pointer as argument, this function is called when data is
1452 * available on a buffer. This function is responsible to do the
1453 * kernctl_get_next_subbuf, read the data with mmap or splice depending on the
1454 * buffer configuration and then kernctl_put_next_subbuf at the end.
1455 *
1456 * Returns a pointer to the new context or NULL on error.
1457 */
1458struct lttng_consumer_local_data *lttng_consumer_create(
1459 enum lttng_consumer_type type,
4078b776 1460 ssize_t (*buffer_ready)(struct lttng_consumer_stream *stream,
d41f73b7 1461 struct lttng_consumer_local_data *ctx),
3bd1e081
MD
1462 int (*recv_channel)(struct lttng_consumer_channel *channel),
1463 int (*recv_stream)(struct lttng_consumer_stream *stream),
30319bcb 1464 int (*update_stream)(uint64_t stream_key, uint32_t state))
3bd1e081 1465{
d8ef542d 1466 int ret;
3bd1e081
MD
1467 struct lttng_consumer_local_data *ctx;
1468
1469 assert(consumer_data.type == LTTNG_CONSUMER_UNKNOWN ||
1470 consumer_data.type == type);
1471 consumer_data.type = type;
1472
effcf122 1473 ctx = zmalloc(sizeof(struct lttng_consumer_local_data));
3bd1e081 1474 if (ctx == NULL) {
7a57cf92 1475 PERROR("allocating context");
3bd1e081
MD
1476 goto error;
1477 }
1478
1479 ctx->consumer_error_socket = -1;
331744e3 1480 ctx->consumer_metadata_socket = -1;
75d83e50 1481 pthread_mutex_init(&ctx->metadata_socket_lock, NULL);
3bd1e081
MD
1482 /* assign the callbacks */
1483 ctx->on_buffer_ready = buffer_ready;
1484 ctx->on_recv_channel = recv_channel;
1485 ctx->on_recv_stream = recv_stream;
1486 ctx->on_update_stream = update_stream;
1487
acdb9057
DG
1488 ctx->consumer_data_pipe = lttng_pipe_open(0);
1489 if (!ctx->consumer_data_pipe) {
3bd1e081
MD
1490 goto error_poll_pipe;
1491 }
1492
02b3d176
DG
1493 ctx->consumer_wakeup_pipe = lttng_pipe_open(0);
1494 if (!ctx->consumer_wakeup_pipe) {
1495 goto error_wakeup_pipe;
1496 }
1497
3bd1e081
MD
1498 ret = pipe(ctx->consumer_should_quit);
1499 if (ret < 0) {
7a57cf92 1500 PERROR("Error creating recv pipe");
3bd1e081
MD
1501 goto error_quit_pipe;
1502 }
1503
d8ef542d
MD
1504 ret = pipe(ctx->consumer_channel_pipe);
1505 if (ret < 0) {
1506 PERROR("Error creating channel pipe");
1507 goto error_channel_pipe;
1508 }
1509
13886d2d
DG
1510 ctx->consumer_metadata_pipe = lttng_pipe_open(0);
1511 if (!ctx->consumer_metadata_pipe) {
fb3a43a9
DG
1512 goto error_metadata_pipe;
1513 }
3bd1e081 1514
e9404c27
JG
1515 ctx->channel_monitor_pipe = -1;
1516
fb3a43a9 1517 return ctx;
3bd1e081 1518
fb3a43a9 1519error_metadata_pipe:
d8ef542d
MD
1520 utils_close_pipe(ctx->consumer_channel_pipe);
1521error_channel_pipe:
d8ef542d 1522 utils_close_pipe(ctx->consumer_should_quit);
3bd1e081 1523error_quit_pipe:
02b3d176
DG
1524 lttng_pipe_destroy(ctx->consumer_wakeup_pipe);
1525error_wakeup_pipe:
acdb9057 1526 lttng_pipe_destroy(ctx->consumer_data_pipe);
3bd1e081
MD
1527error_poll_pipe:
1528 free(ctx);
1529error:
1530 return NULL;
1531}
1532
282dadbc
MD
1533/*
1534 * Iterate over all streams of the hashtable and free them properly.
1535 */
1536static void destroy_data_stream_ht(struct lttng_ht *ht)
1537{
1538 struct lttng_ht_iter iter;
1539 struct lttng_consumer_stream *stream;
1540
1541 if (ht == NULL) {
1542 return;
1543 }
1544
1545 rcu_read_lock();
1546 cds_lfht_for_each_entry(ht->ht, &iter.iter, stream, node.node) {
1547 /*
1548 * Ignore return value since we are currently cleaning up so any error
1549 * can't be handled.
1550 */
1551 (void) consumer_del_stream(stream, ht);
1552 }
1553 rcu_read_unlock();
1554
1555 lttng_ht_destroy(ht);
1556}
1557
1558/*
1559 * Iterate over all streams of the metadata hashtable and free them
1560 * properly.
1561 */
1562static void destroy_metadata_stream_ht(struct lttng_ht *ht)
1563{
1564 struct lttng_ht_iter iter;
1565 struct lttng_consumer_stream *stream;
1566
1567 if (ht == NULL) {
1568 return;
1569 }
1570
1571 rcu_read_lock();
1572 cds_lfht_for_each_entry(ht->ht, &iter.iter, stream, node.node) {
1573 /*
1574 * Ignore return value since we are currently cleaning up so any error
1575 * can't be handled.
1576 */
1577 (void) consumer_del_metadata_stream(stream, ht);
1578 }
1579 rcu_read_unlock();
1580
1581 lttng_ht_destroy(ht);
1582}
1583
3bd1e081
MD
1584/*
1585 * Close all fds associated with the instance and free the context.
1586 */
1587void lttng_consumer_destroy(struct lttng_consumer_local_data *ctx)
1588{
4c462e79
MD
1589 int ret;
1590
ab1027f4
DG
1591 DBG("Consumer destroying it. Closing everything.");
1592
4f2e75b9
DG
1593 if (!ctx) {
1594 return;
1595 }
1596
282dadbc
MD
1597 destroy_data_stream_ht(data_ht);
1598 destroy_metadata_stream_ht(metadata_ht);
1599
4c462e79
MD
1600 ret = close(ctx->consumer_error_socket);
1601 if (ret) {
1602 PERROR("close");
1603 }
331744e3
JD
1604 ret = close(ctx->consumer_metadata_socket);
1605 if (ret) {
1606 PERROR("close");
1607 }
d8ef542d 1608 utils_close_pipe(ctx->consumer_channel_pipe);
acdb9057 1609 lttng_pipe_destroy(ctx->consumer_data_pipe);
13886d2d 1610 lttng_pipe_destroy(ctx->consumer_metadata_pipe);
02b3d176 1611 lttng_pipe_destroy(ctx->consumer_wakeup_pipe);
d8ef542d 1612 utils_close_pipe(ctx->consumer_should_quit);
fb3a43a9 1613
3bd1e081
MD
1614 unlink(ctx->consumer_command_sock_path);
1615 free(ctx);
1616}
1617
6197aea7
DG
1618/*
1619 * Write the metadata stream id on the specified file descriptor.
1620 */
1621static int write_relayd_metadata_id(int fd,
1622 struct lttng_consumer_stream *stream,
239f61af 1623 unsigned long padding)
6197aea7 1624{
6cd525e8 1625 ssize_t ret;
1d4dfdef 1626 struct lttcomm_relayd_metadata_payload hdr;
6197aea7 1627
1d4dfdef
DG
1628 hdr.stream_id = htobe64(stream->relayd_stream_id);
1629 hdr.padding_size = htobe32(padding);
6cd525e8
MD
1630 ret = lttng_write(fd, (void *) &hdr, sizeof(hdr));
1631 if (ret < sizeof(hdr)) {
d7b75ec8 1632 /*
6f04ed72 1633 * This error means that the fd's end is closed so ignore the PERROR
d7b75ec8
DG
1634 * not to clubber the error output since this can happen in a normal
1635 * code path.
1636 */
1637 if (errno != EPIPE) {
1638 PERROR("write metadata stream id");
1639 }
1640 DBG3("Consumer failed to write relayd metadata id (errno: %d)", errno);
534d2592
DG
1641 /*
1642 * Set ret to a negative value because if ret != sizeof(hdr), we don't
1643 * handle writting the missing part so report that as an error and
1644 * don't lie to the caller.
1645 */
1646 ret = -1;
6197aea7
DG
1647 goto end;
1648 }
1d4dfdef
DG
1649 DBG("Metadata stream id %" PRIu64 " with padding %lu written before data",
1650 stream->relayd_stream_id, padding);
6197aea7
DG
1651
1652end:
6cd525e8 1653 return (int) ret;
6197aea7
DG
1654}
1655
3bd1e081 1656/*
09e26845
DG
1657 * Mmap the ring buffer, read it and write the data to the tracefile. This is a
1658 * core function for writing trace buffers to either the local filesystem or
1659 * the network.
1660 *
d2956687 1661 * It must be called with the stream and the channel lock held.
79d4ffb7 1662 *
09e26845 1663 * Careful review MUST be put if any changes occur!
3bd1e081
MD
1664 *
1665 * Returns the number of bytes written
1666 */
4078b776 1667ssize_t lttng_consumer_on_read_subbuffer_mmap(
3bd1e081 1668 struct lttng_consumer_local_data *ctx,
1d4dfdef 1669 struct lttng_consumer_stream *stream, unsigned long len,
309167d2 1670 unsigned long padding,
50adc264 1671 struct ctf_packet_index *index)
3bd1e081 1672{
f02e1e8a 1673 unsigned long mmap_offset;
ffe60014 1674 void *mmap_base;
994ab360 1675 ssize_t ret = 0;
f02e1e8a
DG
1676 off_t orig_offset = stream->out_fd_offset;
1677 /* Default is on the disk */
1678 int outfd = stream->out_fd;
f02e1e8a 1679 struct consumer_relayd_sock_pair *relayd = NULL;
8994307f 1680 unsigned int relayd_hang_up = 0;
f02e1e8a
DG
1681
1682 /* RCU lock for the relayd pointer */
1683 rcu_read_lock();
7fd975c5 1684 assert(stream->net_seq_idx != (uint64_t) -1ULL ||
948411cd 1685 stream->trace_chunk);
d2956687 1686
f02e1e8a 1687 /* Flag that the current stream if set for network streaming. */
da009f2c 1688 if (stream->net_seq_idx != (uint64_t) -1ULL) {
f02e1e8a
DG
1689 relayd = consumer_find_relayd(stream->net_seq_idx);
1690 if (relayd == NULL) {
56591bac 1691 ret = -EPIPE;
f02e1e8a
DG
1692 goto end;
1693 }
1694 }
1695
1696 /* get the offset inside the fd to mmap */
3bd1e081
MD
1697 switch (consumer_data.type) {
1698 case LTTNG_CONSUMER_KERNEL:
ffe60014 1699 mmap_base = stream->mmap_base;
f02e1e8a 1700 ret = kernctl_get_mmap_read_offset(stream->wait_fd, &mmap_offset);
994ab360 1701 if (ret < 0) {
56591bac 1702 PERROR("tracer ctl get_mmap_read_offset");
56591bac
MD
1703 goto end;
1704 }
f02e1e8a 1705 break;
7753dea8
MD
1706 case LTTNG_CONSUMER32_UST:
1707 case LTTNG_CONSUMER64_UST:
ffe60014
DG
1708 mmap_base = lttng_ustctl_get_mmap_base(stream);
1709 if (!mmap_base) {
1710 ERR("read mmap get mmap base for stream %s", stream->name);
994ab360 1711 ret = -EPERM;
ffe60014
DG
1712 goto end;
1713 }
1714 ret = lttng_ustctl_get_mmap_read_offset(stream, &mmap_offset);
56591bac
MD
1715 if (ret != 0) {
1716 PERROR("tracer ctl get_mmap_read_offset");
994ab360 1717 ret = -EINVAL;
56591bac
MD
1718 goto end;
1719 }
f02e1e8a 1720 break;
3bd1e081
MD
1721 default:
1722 ERR("Unknown consumer_data type");
1723 assert(0);
1724 }
b9182dd9 1725
f02e1e8a
DG
1726 /* Handle stream on the relayd if the output is on the network */
1727 if (relayd) {
1728 unsigned long netlen = len;
1729
1730 /*
1731 * Lock the control socket for the complete duration of the function
1732 * since from this point on we will use the socket.
1733 */
1734 if (stream->metadata_flag) {
1735 /* Metadata requires the control socket. */
1736 pthread_mutex_lock(&relayd->ctrl_sock_mutex);
93ec662e
JD
1737 if (stream->reset_metadata_flag) {
1738 ret = relayd_reset_metadata(&relayd->control_sock,
1739 stream->relayd_stream_id,
1740 stream->metadata_version);
1741 if (ret < 0) {
1742 relayd_hang_up = 1;
1743 goto write_error;
1744 }
1745 stream->reset_metadata_flag = 0;
1746 }
1d4dfdef 1747 netlen += sizeof(struct lttcomm_relayd_metadata_payload);
f02e1e8a
DG
1748 }
1749
1d4dfdef 1750 ret = write_relayd_stream_header(stream, netlen, padding, relayd);
994ab360
DG
1751 if (ret < 0) {
1752 relayd_hang_up = 1;
1753 goto write_error;
1754 }
1755 /* Use the returned socket. */
1756 outfd = ret;
f02e1e8a 1757
994ab360
DG
1758 /* Write metadata stream id before payload */
1759 if (stream->metadata_flag) {
239f61af 1760 ret = write_relayd_metadata_id(outfd, stream, padding);
994ab360 1761 if (ret < 0) {
8994307f
DG
1762 relayd_hang_up = 1;
1763 goto write_error;
1764 }
f02e1e8a 1765 }
1d4dfdef
DG
1766 } else {
1767 /* No streaming, we have to set the len with the full padding */
1768 len += padding;
1624d5b7 1769
93ec662e
JD
1770 if (stream->metadata_flag && stream->reset_metadata_flag) {
1771 ret = utils_truncate_stream_file(stream->out_fd, 0);
1772 if (ret < 0) {
1773 ERR("Reset metadata file");
1774 goto end;
1775 }
1776 stream->reset_metadata_flag = 0;
1777 }
1778
1624d5b7
JD
1779 /*
1780 * Check if we need to change the tracefile before writing the packet.
1781 */
1782 if (stream->chan->tracefile_size > 0 &&
1783 (stream->tracefile_size_current + len) >
1784 stream->chan->tracefile_size) {
d2956687
JG
1785 ret = consumer_stream_rotate_output_files(stream);
1786 if (ret) {
1624d5b7
JD
1787 goto end;
1788 }
309167d2 1789 outfd = stream->out_fd;
a1ae300f 1790 orig_offset = 0;
1624d5b7
JD
1791 }
1792 stream->tracefile_size_current += len;
309167d2
JD
1793 if (index) {
1794 index->offset = htobe64(stream->out_fd_offset);
1795 }
f02e1e8a
DG
1796 }
1797
d02b8372
DG
1798 /*
1799 * This call guarantee that len or less is returned. It's impossible to
1800 * receive a ret value that is bigger than len.
1801 */
1802 ret = lttng_write(outfd, mmap_base + mmap_offset, len);
1803 DBG("Consumer mmap write() ret %zd (len %lu)", ret, len);
1804 if (ret < 0 || ((size_t) ret != len)) {
1805 /*
1806 * Report error to caller if nothing was written else at least send the
1807 * amount written.
1808 */
1809 if (ret < 0) {
994ab360 1810 ret = -errno;
f02e1e8a 1811 }
994ab360 1812 relayd_hang_up = 1;
f02e1e8a 1813
d02b8372 1814 /* Socket operation failed. We consider the relayd dead */
fcf0f774 1815 if (errno == EPIPE) {
d02b8372
DG
1816 /*
1817 * This is possible if the fd is closed on the other side
1818 * (outfd) or any write problem. It can be verbose a bit for a
1819 * normal execution if for instance the relayd is stopped
1820 * abruptly. This can happen so set this to a DBG statement.
1821 */
1822 DBG("Consumer mmap write detected relayd hang up");
994ab360
DG
1823 } else {
1824 /* Unhandled error, print it and stop function right now. */
1825 PERROR("Error in write mmap (ret %zd != len %lu)", ret, len);
f02e1e8a 1826 }
994ab360 1827 goto write_error;
d02b8372
DG
1828 }
1829 stream->output_written += ret;
d02b8372
DG
1830
1831 /* This call is useless on a socket so better save a syscall. */
1832 if (!relayd) {
1833 /* This won't block, but will start writeout asynchronously */
1834 lttng_sync_file_range(outfd, stream->out_fd_offset, len,
1835 SYNC_FILE_RANGE_WRITE);
1836 stream->out_fd_offset += len;
f5dbe415 1837 lttng_consumer_sync_trace_file(stream, orig_offset);
f02e1e8a 1838 }
f02e1e8a 1839
8994307f
DG
1840write_error:
1841 /*
1842 * This is a special case that the relayd has closed its socket. Let's
1843 * cleanup the relayd object and all associated streams.
1844 */
1845 if (relayd && relayd_hang_up) {
9276e5c8
JR
1846 ERR("Relayd hangup. Cleaning up relayd %" PRIu64".", relayd->net_seq_idx);
1847 lttng_consumer_cleanup_relayd(relayd);
8994307f
DG
1848 }
1849
f02e1e8a
DG
1850end:
1851 /* Unlock only if ctrl socket used */
1852 if (relayd && stream->metadata_flag) {
1853 pthread_mutex_unlock(&relayd->ctrl_sock_mutex);
1854 }
1855
1856 rcu_read_unlock();
994ab360 1857 return ret;
3bd1e081
MD
1858}
1859
1860/*
1861 * Splice the data from the ring buffer to the tracefile.
1862 *
79d4ffb7
DG
1863 * It must be called with the stream lock held.
1864 *
3bd1e081
MD
1865 * Returns the number of bytes spliced.
1866 */
4078b776 1867ssize_t lttng_consumer_on_read_subbuffer_splice(
3bd1e081 1868 struct lttng_consumer_local_data *ctx,
1d4dfdef 1869 struct lttng_consumer_stream *stream, unsigned long len,
309167d2 1870 unsigned long padding,
50adc264 1871 struct ctf_packet_index *index)
3bd1e081 1872{
f02e1e8a
DG
1873 ssize_t ret = 0, written = 0, ret_splice = 0;
1874 loff_t offset = 0;
1875 off_t orig_offset = stream->out_fd_offset;
1876 int fd = stream->wait_fd;
1877 /* Default is on the disk */
1878 int outfd = stream->out_fd;
f02e1e8a 1879 struct consumer_relayd_sock_pair *relayd = NULL;
fb3a43a9 1880 int *splice_pipe;
8994307f 1881 unsigned int relayd_hang_up = 0;
f02e1e8a 1882
3bd1e081
MD
1883 switch (consumer_data.type) {
1884 case LTTNG_CONSUMER_KERNEL:
f02e1e8a 1885 break;
7753dea8
MD
1886 case LTTNG_CONSUMER32_UST:
1887 case LTTNG_CONSUMER64_UST:
f02e1e8a 1888 /* Not supported for user space tracing */
3bd1e081
MD
1889 return -ENOSYS;
1890 default:
1891 ERR("Unknown consumer_data type");
1892 assert(0);
3bd1e081
MD
1893 }
1894
f02e1e8a
DG
1895 /* RCU lock for the relayd pointer */
1896 rcu_read_lock();
1897
1898 /* Flag that the current stream if set for network streaming. */
da009f2c 1899 if (stream->net_seq_idx != (uint64_t) -1ULL) {
f02e1e8a
DG
1900 relayd = consumer_find_relayd(stream->net_seq_idx);
1901 if (relayd == NULL) {
ad0b0d23 1902 written = -ret;
f02e1e8a
DG
1903 goto end;
1904 }
1905 }
a2361a61 1906 splice_pipe = stream->splice_pipe;
fb3a43a9 1907
f02e1e8a 1908 /* Write metadata stream id before payload */
1d4dfdef 1909 if (relayd) {
ad0b0d23 1910 unsigned long total_len = len;
f02e1e8a 1911
1d4dfdef
DG
1912 if (stream->metadata_flag) {
1913 /*
1914 * Lock the control socket for the complete duration of the function
1915 * since from this point on we will use the socket.
1916 */
1917 pthread_mutex_lock(&relayd->ctrl_sock_mutex);
1918
93ec662e
JD
1919 if (stream->reset_metadata_flag) {
1920 ret = relayd_reset_metadata(&relayd->control_sock,
1921 stream->relayd_stream_id,
1922 stream->metadata_version);
1923 if (ret < 0) {
1924 relayd_hang_up = 1;
1925 goto write_error;
1926 }
1927 stream->reset_metadata_flag = 0;
1928 }
239f61af 1929 ret = write_relayd_metadata_id(splice_pipe[1], stream,
1d4dfdef
DG
1930 padding);
1931 if (ret < 0) {
1932 written = ret;
ad0b0d23
DG
1933 relayd_hang_up = 1;
1934 goto write_error;
1d4dfdef
DG
1935 }
1936
1937 total_len += sizeof(struct lttcomm_relayd_metadata_payload);
1938 }
1939
1940 ret = write_relayd_stream_header(stream, total_len, padding, relayd);
ad0b0d23
DG
1941 if (ret < 0) {
1942 written = ret;
1943 relayd_hang_up = 1;
1944 goto write_error;
f02e1e8a 1945 }
ad0b0d23
DG
1946 /* Use the returned socket. */
1947 outfd = ret;
1d4dfdef
DG
1948 } else {
1949 /* No streaming, we have to set the len with the full padding */
1950 len += padding;
1624d5b7 1951
93ec662e
JD
1952 if (stream->metadata_flag && stream->reset_metadata_flag) {
1953 ret = utils_truncate_stream_file(stream->out_fd, 0);
1954 if (ret < 0) {
1955 ERR("Reset metadata file");
1956 goto end;
1957 }
1958 stream->reset_metadata_flag = 0;
1959 }
1624d5b7
JD
1960 /*
1961 * Check if we need to change the tracefile before writing the packet.
1962 */
1963 if (stream->chan->tracefile_size > 0 &&
1964 (stream->tracefile_size_current + len) >
1965 stream->chan->tracefile_size) {
d2956687 1966 ret = consumer_stream_rotate_output_files(stream);
1624d5b7 1967 if (ret < 0) {
ad0b0d23 1968 written = ret;
1624d5b7
JD
1969 goto end;
1970 }
309167d2 1971 outfd = stream->out_fd;
a1ae300f 1972 orig_offset = 0;
1624d5b7
JD
1973 }
1974 stream->tracefile_size_current += len;
309167d2 1975 index->offset = htobe64(stream->out_fd_offset);
f02e1e8a
DG
1976 }
1977
1978 while (len > 0) {
1d4dfdef
DG
1979 DBG("splice chan to pipe offset %lu of len %lu (fd : %d, pipe: %d)",
1980 (unsigned long)offset, len, fd, splice_pipe[1]);
fb3a43a9 1981 ret_splice = splice(fd, &offset, splice_pipe[1], NULL, len,
f02e1e8a
DG
1982 SPLICE_F_MOVE | SPLICE_F_MORE);
1983 DBG("splice chan to pipe, ret %zd", ret_splice);
1984 if (ret_splice < 0) {
d02b8372 1985 ret = errno;
ad0b0d23 1986 written = -ret;
d02b8372 1987 PERROR("Error in relay splice");
f02e1e8a
DG
1988 goto splice_error;
1989 }
1990
1991 /* Handle stream on the relayd if the output is on the network */
ad0b0d23
DG
1992 if (relayd && stream->metadata_flag) {
1993 size_t metadata_payload_size =
1994 sizeof(struct lttcomm_relayd_metadata_payload);
1995
1996 /* Update counter to fit the spliced data */
1997 ret_splice += metadata_payload_size;
1998 len += metadata_payload_size;
1999 /*
2000 * We do this so the return value can match the len passed as
2001 * argument to this function.
2002 */
2003 written -= metadata_payload_size;
f02e1e8a
DG
2004 }
2005
2006 /* Splice data out */
fb3a43a9 2007 ret_splice = splice(splice_pipe[0], NULL, outfd, NULL,
f02e1e8a 2008 ret_splice, SPLICE_F_MOVE | SPLICE_F_MORE);
a2361a61
JD
2009 DBG("Consumer splice pipe to file (out_fd: %d), ret %zd",
2010 outfd, ret_splice);
f02e1e8a 2011 if (ret_splice < 0) {
d02b8372 2012 ret = errno;
ad0b0d23
DG
2013 written = -ret;
2014 relayd_hang_up = 1;
2015 goto write_error;
f02e1e8a 2016 } else if (ret_splice > len) {
d02b8372
DG
2017 /*
2018 * We don't expect this code path to be executed but you never know
2019 * so this is an extra protection agains a buggy splice().
2020 */
f02e1e8a 2021 ret = errno;
ad0b0d23 2022 written += ret_splice;
d02b8372
DG
2023 PERROR("Wrote more data than requested %zd (len: %lu)", ret_splice,
2024 len);
f02e1e8a 2025 goto splice_error;
d02b8372
DG
2026 } else {
2027 /* All good, update current len and continue. */
2028 len -= ret_splice;
f02e1e8a 2029 }
f02e1e8a
DG
2030
2031 /* This call is useless on a socket so better save a syscall. */
2032 if (!relayd) {
2033 /* This won't block, but will start writeout asynchronously */
2034 lttng_sync_file_range(outfd, stream->out_fd_offset, ret_splice,
2035 SYNC_FILE_RANGE_WRITE);
2036 stream->out_fd_offset += ret_splice;
2037 }
e5d1a9b3 2038 stream->output_written += ret_splice;
f02e1e8a
DG
2039 written += ret_splice;
2040 }
f5dbe415
JG
2041 if (!relayd) {
2042 lttng_consumer_sync_trace_file(stream, orig_offset);
2043 }
f02e1e8a
DG
2044 goto end;
2045
8994307f
DG
2046write_error:
2047 /*
2048 * This is a special case that the relayd has closed its socket. Let's
2049 * cleanup the relayd object and all associated streams.
2050 */
2051 if (relayd && relayd_hang_up) {
9276e5c8
JR
2052 ERR("Relayd hangup. Cleaning up relayd %" PRIu64".", relayd->net_seq_idx);
2053 lttng_consumer_cleanup_relayd(relayd);
8994307f
DG
2054 /* Skip splice error so the consumer does not fail */
2055 goto end;
2056 }
2057
f02e1e8a
DG
2058splice_error:
2059 /* send the appropriate error description to sessiond */
2060 switch (ret) {
f02e1e8a 2061 case EINVAL:
f73fabfd 2062 lttng_consumer_send_error(ctx, LTTCOMM_CONSUMERD_SPLICE_EINVAL);
f02e1e8a
DG
2063 break;
2064 case ENOMEM:
f73fabfd 2065 lttng_consumer_send_error(ctx, LTTCOMM_CONSUMERD_SPLICE_ENOMEM);
f02e1e8a
DG
2066 break;
2067 case ESPIPE:
f73fabfd 2068 lttng_consumer_send_error(ctx, LTTCOMM_CONSUMERD_SPLICE_ESPIPE);
f02e1e8a
DG
2069 break;
2070 }
2071
2072end:
2073 if (relayd && stream->metadata_flag) {
2074 pthread_mutex_unlock(&relayd->ctrl_sock_mutex);
2075 }
2076
2077 rcu_read_unlock();
2078 return written;
3bd1e081
MD
2079}
2080
15055ce5
JD
2081/*
2082 * Sample the snapshot positions for a specific fd
2083 *
2084 * Returns 0 on success, < 0 on error
2085 */
2086int lttng_consumer_sample_snapshot_positions(struct lttng_consumer_stream *stream)
2087{
2088 switch (consumer_data.type) {
2089 case LTTNG_CONSUMER_KERNEL:
2090 return lttng_kconsumer_sample_snapshot_positions(stream);
2091 case LTTNG_CONSUMER32_UST:
2092 case LTTNG_CONSUMER64_UST:
2093 return lttng_ustconsumer_sample_snapshot_positions(stream);
2094 default:
2095 ERR("Unknown consumer_data type");
2096 assert(0);
2097 return -ENOSYS;
2098 }
2099}
3bd1e081
MD
2100/*
2101 * Take a snapshot for a specific fd
2102 *
2103 * Returns 0 on success, < 0 on error
2104 */
ffe60014 2105int lttng_consumer_take_snapshot(struct lttng_consumer_stream *stream)
3bd1e081
MD
2106{
2107 switch (consumer_data.type) {
2108 case LTTNG_CONSUMER_KERNEL:
ffe60014 2109 return lttng_kconsumer_take_snapshot(stream);
7753dea8
MD
2110 case LTTNG_CONSUMER32_UST:
2111 case LTTNG_CONSUMER64_UST:
ffe60014 2112 return lttng_ustconsumer_take_snapshot(stream);
3bd1e081
MD
2113 default:
2114 ERR("Unknown consumer_data type");
2115 assert(0);
2116 return -ENOSYS;
2117 }
3bd1e081
MD
2118}
2119
2120/*
2121 * Get the produced position
2122 *
2123 * Returns 0 on success, < 0 on error
2124 */
ffe60014 2125int lttng_consumer_get_produced_snapshot(struct lttng_consumer_stream *stream,
3bd1e081
MD
2126 unsigned long *pos)
2127{
2128 switch (consumer_data.type) {
2129 case LTTNG_CONSUMER_KERNEL:
ffe60014 2130 return lttng_kconsumer_get_produced_snapshot(stream, pos);
7753dea8
MD
2131 case LTTNG_CONSUMER32_UST:
2132 case LTTNG_CONSUMER64_UST:
ffe60014 2133 return lttng_ustconsumer_get_produced_snapshot(stream, pos);
3bd1e081
MD
2134 default:
2135 ERR("Unknown consumer_data type");
2136 assert(0);
2137 return -ENOSYS;
2138 }
2139}
2140
15055ce5
JD
2141/*
2142 * Get the consumed position (free-running counter position in bytes).
2143 *
2144 * Returns 0 on success, < 0 on error
2145 */
2146int lttng_consumer_get_consumed_snapshot(struct lttng_consumer_stream *stream,
2147 unsigned long *pos)
2148{
2149 switch (consumer_data.type) {
2150 case LTTNG_CONSUMER_KERNEL:
2151 return lttng_kconsumer_get_consumed_snapshot(stream, pos);
2152 case LTTNG_CONSUMER32_UST:
2153 case LTTNG_CONSUMER64_UST:
2154 return lttng_ustconsumer_get_consumed_snapshot(stream, pos);
2155 default:
2156 ERR("Unknown consumer_data type");
2157 assert(0);
2158 return -ENOSYS;
2159 }
2160}
2161
3bd1e081
MD
2162int lttng_consumer_recv_cmd(struct lttng_consumer_local_data *ctx,
2163 int sock, struct pollfd *consumer_sockpoll)
2164{
2165 switch (consumer_data.type) {
2166 case LTTNG_CONSUMER_KERNEL:
2167 return lttng_kconsumer_recv_cmd(ctx, sock, consumer_sockpoll);
7753dea8
MD
2168 case LTTNG_CONSUMER32_UST:
2169 case LTTNG_CONSUMER64_UST:
3bd1e081
MD
2170 return lttng_ustconsumer_recv_cmd(ctx, sock, consumer_sockpoll);
2171 default:
2172 ERR("Unknown consumer_data type");
2173 assert(0);
2174 return -ENOSYS;
2175 }
2176}
2177
1f8d1c14 2178static
6d574024 2179void lttng_consumer_close_all_metadata(void)
d88aee68
DG
2180{
2181 switch (consumer_data.type) {
2182 case LTTNG_CONSUMER_KERNEL:
2183 /*
2184 * The Kernel consumer has a different metadata scheme so we don't
2185 * close anything because the stream will be closed by the session
2186 * daemon.
2187 */
2188 break;
2189 case LTTNG_CONSUMER32_UST:
2190 case LTTNG_CONSUMER64_UST:
2191 /*
2192 * Close all metadata streams. The metadata hash table is passed and
2193 * this call iterates over it by closing all wakeup fd. This is safe
2194 * because at this point we are sure that the metadata producer is
2195 * either dead or blocked.
2196 */
6d574024 2197 lttng_ustconsumer_close_all_metadata(metadata_ht);
d88aee68
DG
2198 break;
2199 default:
2200 ERR("Unknown consumer_data type");
2201 assert(0);
2202 }
2203}
2204
fb3a43a9
DG
2205/*
2206 * Clean up a metadata stream and free its memory.
2207 */
e316aad5
DG
2208void consumer_del_metadata_stream(struct lttng_consumer_stream *stream,
2209 struct lttng_ht *ht)
fb3a43a9 2210{
a6ef8ee6
JG
2211 struct lttng_consumer_channel *channel = NULL;
2212 bool free_channel = false;
fb3a43a9
DG
2213
2214 assert(stream);
2215 /*
2216 * This call should NEVER receive regular stream. It must always be
2217 * metadata stream and this is crucial for data structure synchronization.
2218 */
2219 assert(stream->metadata_flag);
2220
e316aad5
DG
2221 DBG3("Consumer delete metadata stream %d", stream->wait_fd);
2222
74251bb8 2223 pthread_mutex_lock(&consumer_data.lock);
a6ef8ee6
JG
2224 /*
2225 * Note that this assumes that a stream's channel is never changed and
2226 * that the stream's lock doesn't need to be taken to sample its
2227 * channel.
2228 */
2229 channel = stream->chan;
2230 pthread_mutex_lock(&channel->lock);
3dad2c0f 2231 pthread_mutex_lock(&stream->lock);
a6ef8ee6 2232 if (channel->metadata_cache) {
081424af 2233 /* Only applicable to userspace consumers. */
a6ef8ee6 2234 pthread_mutex_lock(&channel->metadata_cache->lock);
081424af 2235 }
8994307f 2236
6d574024
DG
2237 /* Remove any reference to that stream. */
2238 consumer_stream_delete(stream, ht);
ca22feea 2239
6d574024
DG
2240 /* Close down everything including the relayd if one. */
2241 consumer_stream_close(stream);
2242 /* Destroy tracer buffers of the stream. */
2243 consumer_stream_destroy_buffers(stream);
fb3a43a9
DG
2244
2245 /* Atomically decrement channel refcount since other threads can use it. */
a6ef8ee6
JG
2246 if (!uatomic_sub_return(&channel->refcount, 1)
2247 && !uatomic_read(&channel->nb_init_stream_left)) {
c30aaa51 2248 /* Go for channel deletion! */
a6ef8ee6 2249 free_channel = true;
fb3a43a9 2250 }
a6ef8ee6 2251 stream->chan = NULL;
fb3a43a9 2252
73811ecc
DG
2253 /*
2254 * Nullify the stream reference so it is not used after deletion. The
6d574024
DG
2255 * channel lock MUST be acquired before being able to check for a NULL
2256 * pointer value.
73811ecc 2257 */
a6ef8ee6 2258 channel->metadata_stream = NULL;
73811ecc 2259
a6ef8ee6
JG
2260 if (channel->metadata_cache) {
2261 pthread_mutex_unlock(&channel->metadata_cache->lock);
081424af 2262 }
3dad2c0f 2263 pthread_mutex_unlock(&stream->lock);
a6ef8ee6 2264 pthread_mutex_unlock(&channel->lock);
74251bb8 2265 pthread_mutex_unlock(&consumer_data.lock);
e316aad5 2266
a6ef8ee6
JG
2267 if (free_channel) {
2268 consumer_del_channel(channel);
e316aad5
DG
2269 }
2270
d2956687
JG
2271 lttng_trace_chunk_put(stream->trace_chunk);
2272 stream->trace_chunk = NULL;
6d574024 2273 consumer_stream_free(stream);
fb3a43a9
DG
2274}
2275
2276/*
2277 * Action done with the metadata stream when adding it to the consumer internal
2278 * data structures to handle it.
2279 */
66d583dc 2280void consumer_add_metadata_stream(struct lttng_consumer_stream *stream)
fb3a43a9 2281{
5ab66908 2282 struct lttng_ht *ht = metadata_ht;
76082088 2283 struct lttng_ht_iter iter;
d88aee68 2284 struct lttng_ht_node_u64 *node;
fb3a43a9 2285
e316aad5
DG
2286 assert(stream);
2287 assert(ht);
2288
d88aee68 2289 DBG3("Adding metadata stream %" PRIu64 " to hash table", stream->key);
e316aad5
DG
2290
2291 pthread_mutex_lock(&consumer_data.lock);
a9838785 2292 pthread_mutex_lock(&stream->chan->lock);
ec6ea7d0 2293 pthread_mutex_lock(&stream->chan->timer_lock);
2e818a6a 2294 pthread_mutex_lock(&stream->lock);
e316aad5 2295
e316aad5
DG
2296 /*
2297 * From here, refcounts are updated so be _careful_ when returning an error
2298 * after this point.
2299 */
2300
fb3a43a9 2301 rcu_read_lock();
76082088
DG
2302
2303 /*
2304 * Lookup the stream just to make sure it does not exist in our internal
2305 * state. This should NEVER happen.
2306 */
d88aee68
DG
2307 lttng_ht_lookup(ht, &stream->key, &iter);
2308 node = lttng_ht_iter_get_node_u64(&iter);
76082088
DG
2309 assert(!node);
2310
e316aad5 2311 /*
ffe60014
DG
2312 * When nb_init_stream_left reaches 0, we don't need to trigger any action
2313 * in terms of destroying the associated channel, because the action that
e316aad5
DG
2314 * causes the count to become 0 also causes a stream to be added. The
2315 * channel deletion will thus be triggered by the following removal of this
2316 * stream.
2317 */
ffe60014 2318 if (uatomic_read(&stream->chan->nb_init_stream_left) > 0) {
f2ad556d
MD
2319 /* Increment refcount before decrementing nb_init_stream_left */
2320 cmm_smp_wmb();
ffe60014 2321 uatomic_dec(&stream->chan->nb_init_stream_left);
e316aad5
DG
2322 }
2323
d88aee68 2324 lttng_ht_add_unique_u64(ht, &stream->node);
ca22feea 2325
446156b4 2326 lttng_ht_add_u64(consumer_data.stream_per_chan_id_ht,
d8ef542d
MD
2327 &stream->node_channel_id);
2328
ca22feea
DG
2329 /*
2330 * Add stream to the stream_list_ht of the consumer data. No need to steal
2331 * the key since the HT does not use it and we allow to add redundant keys
2332 * into this table.
2333 */
d88aee68 2334 lttng_ht_add_u64(consumer_data.stream_list_ht, &stream->node_session_id);
ca22feea 2335
fb3a43a9 2336 rcu_read_unlock();
e316aad5 2337
2e818a6a 2338 pthread_mutex_unlock(&stream->lock);
a9838785 2339 pthread_mutex_unlock(&stream->chan->lock);
ec6ea7d0 2340 pthread_mutex_unlock(&stream->chan->timer_lock);
e316aad5 2341 pthread_mutex_unlock(&consumer_data.lock);
fb3a43a9
DG
2342}
2343
8994307f
DG
2344/*
2345 * Delete data stream that are flagged for deletion (endpoint_status).
2346 */
2347static void validate_endpoint_status_data_stream(void)
2348{
2349 struct lttng_ht_iter iter;
2350 struct lttng_consumer_stream *stream;
2351
2352 DBG("Consumer delete flagged data stream");
2353
2354 rcu_read_lock();
2355 cds_lfht_for_each_entry(data_ht->ht, &iter.iter, stream, node.node) {
2356 /* Validate delete flag of the stream */
79d4ffb7 2357 if (stream->endpoint_status == CONSUMER_ENDPOINT_ACTIVE) {
8994307f
DG
2358 continue;
2359 }
2360 /* Delete it right now */
2361 consumer_del_stream(stream, data_ht);
2362 }
2363 rcu_read_unlock();
2364}
2365
2366/*
2367 * Delete metadata stream that are flagged for deletion (endpoint_status).
2368 */
2369static void validate_endpoint_status_metadata_stream(
2370 struct lttng_poll_event *pollset)
2371{
2372 struct lttng_ht_iter iter;
2373 struct lttng_consumer_stream *stream;
2374
2375 DBG("Consumer delete flagged metadata stream");
2376
2377 assert(pollset);
2378
2379 rcu_read_lock();
2380 cds_lfht_for_each_entry(metadata_ht->ht, &iter.iter, stream, node.node) {
2381 /* Validate delete flag of the stream */
79d4ffb7 2382 if (stream->endpoint_status == CONSUMER_ENDPOINT_ACTIVE) {
8994307f
DG
2383 continue;
2384 }
2385 /*
2386 * Remove from pollset so the metadata thread can continue without
2387 * blocking on a deleted stream.
2388 */
2389 lttng_poll_del(pollset, stream->wait_fd);
2390
2391 /* Delete it right now */
2392 consumer_del_metadata_stream(stream, metadata_ht);
2393 }
2394 rcu_read_unlock();
2395}
2396
fb3a43a9
DG
2397/*
2398 * Thread polls on metadata file descriptor and write them on disk or on the
2399 * network.
2400 */
7d980def 2401void *consumer_thread_metadata_poll(void *data)
fb3a43a9 2402{
1fc79fb4 2403 int ret, i, pollfd, err = -1;
fb3a43a9 2404 uint32_t revents, nb_fd;
e316aad5 2405 struct lttng_consumer_stream *stream = NULL;
fb3a43a9 2406 struct lttng_ht_iter iter;
d88aee68 2407 struct lttng_ht_node_u64 *node;
fb3a43a9
DG
2408 struct lttng_poll_event events;
2409 struct lttng_consumer_local_data *ctx = data;
2410 ssize_t len;
2411
2412 rcu_register_thread();
2413
1fc79fb4
MD
2414 health_register(health_consumerd, HEALTH_CONSUMERD_TYPE_METADATA);
2415
2d57de81
MD
2416 if (testpoint(consumerd_thread_metadata)) {
2417 goto error_testpoint;
2418 }
2419
9ce5646a
MD
2420 health_code_update();
2421
fb3a43a9
DG
2422 DBG("Thread metadata poll started");
2423
fb3a43a9
DG
2424 /* Size is set to 1 for the consumer_metadata pipe */
2425 ret = lttng_poll_create(&events, 2, LTTNG_CLOEXEC);
2426 if (ret < 0) {
2427 ERR("Poll set creation failed");
d8ef542d 2428 goto end_poll;
fb3a43a9
DG
2429 }
2430
13886d2d
DG
2431 ret = lttng_poll_add(&events,
2432 lttng_pipe_get_readfd(ctx->consumer_metadata_pipe), LPOLLIN);
fb3a43a9
DG
2433 if (ret < 0) {
2434 goto end;
2435 }
2436
2437 /* Main loop */
2438 DBG("Metadata main loop started");
2439
2440 while (1) {
fb3a43a9 2441restart:
7fa2082e 2442 health_code_update();
9ce5646a 2443 health_poll_entry();
7fa2082e 2444 DBG("Metadata poll wait");
fb3a43a9 2445 ret = lttng_poll_wait(&events, -1);
7fa2082e
MD
2446 DBG("Metadata poll return from wait with %d fd(s)",
2447 LTTNG_POLL_GETNB(&events));
9ce5646a 2448 health_poll_exit();
40063ead 2449 DBG("Metadata event caught in thread");
fb3a43a9
DG
2450 if (ret < 0) {
2451 if (errno == EINTR) {
40063ead 2452 ERR("Poll EINTR caught");
fb3a43a9
DG
2453 goto restart;
2454 }
d9607cd7
MD
2455 if (LTTNG_POLL_GETNB(&events) == 0) {
2456 err = 0; /* All is OK */
2457 }
2458 goto end;
fb3a43a9
DG
2459 }
2460
0d9c5d77
DG
2461 nb_fd = ret;
2462
e316aad5 2463 /* From here, the event is a metadata wait fd */
fb3a43a9 2464 for (i = 0; i < nb_fd; i++) {
9ce5646a
MD
2465 health_code_update();
2466
fb3a43a9
DG
2467 revents = LTTNG_POLL_GETEV(&events, i);
2468 pollfd = LTTNG_POLL_GETFD(&events, i);
2469
13886d2d 2470 if (pollfd == lttng_pipe_get_readfd(ctx->consumer_metadata_pipe)) {
03e43155 2471 if (revents & LPOLLIN) {
13886d2d
DG
2472 ssize_t pipe_len;
2473
2474 pipe_len = lttng_pipe_read(ctx->consumer_metadata_pipe,
2475 &stream, sizeof(stream));
6cd525e8 2476 if (pipe_len < sizeof(stream)) {
03e43155
MD
2477 if (pipe_len < 0) {
2478 PERROR("read metadata stream");
2479 }
fb3a43a9 2480 /*
03e43155
MD
2481 * Remove the pipe from the poll set and continue the loop
2482 * since their might be data to consume.
fb3a43a9 2483 */
03e43155
MD
2484 lttng_poll_del(&events,
2485 lttng_pipe_get_readfd(ctx->consumer_metadata_pipe));
2486 lttng_pipe_read_close(ctx->consumer_metadata_pipe);
fb3a43a9
DG
2487 continue;
2488 }
2489
8994307f
DG
2490 /* A NULL stream means that the state has changed. */
2491 if (stream == NULL) {
2492 /* Check for deleted streams. */
2493 validate_endpoint_status_metadata_stream(&events);
3714380f 2494 goto restart;
8994307f
DG
2495 }
2496
fb3a43a9
DG
2497 DBG("Adding metadata stream %d to poll set",
2498 stream->wait_fd);
2499
fb3a43a9
DG
2500 /* Add metadata stream to the global poll events list */
2501 lttng_poll_add(&events, stream->wait_fd,
6d574024 2502 LPOLLIN | LPOLLPRI | LPOLLHUP);
03e43155
MD
2503 } else if (revents & (LPOLLERR | LPOLLHUP)) {
2504 DBG("Metadata thread pipe hung up");
2505 /*
2506 * Remove the pipe from the poll set and continue the loop
2507 * since their might be data to consume.
2508 */
2509 lttng_poll_del(&events,
2510 lttng_pipe_get_readfd(ctx->consumer_metadata_pipe));
2511 lttng_pipe_read_close(ctx->consumer_metadata_pipe);
2512 continue;
2513 } else {
2514 ERR("Unexpected poll events %u for sock %d", revents, pollfd);
2515 goto end;
fb3a43a9
DG
2516 }
2517
e316aad5 2518 /* Handle other stream */
fb3a43a9
DG
2519 continue;
2520 }
2521
d09e1200 2522 rcu_read_lock();
d88aee68
DG
2523 {
2524 uint64_t tmp_id = (uint64_t) pollfd;
2525
2526 lttng_ht_lookup(metadata_ht, &tmp_id, &iter);
2527 }
2528 node = lttng_ht_iter_get_node_u64(&iter);
e316aad5 2529 assert(node);
fb3a43a9
DG
2530
2531 stream = caa_container_of(node, struct lttng_consumer_stream,
58b1f425 2532 node);
fb3a43a9 2533
03e43155
MD
2534 if (revents & (LPOLLIN | LPOLLPRI)) {
2535 /* Get the data out of the metadata file descriptor */
2536 DBG("Metadata available on fd %d", pollfd);
2537 assert(stream->wait_fd == pollfd);
2538
2539 do {
2540 health_code_update();
2541
2542 len = ctx->on_buffer_ready(stream, ctx);
2543 /*
2544 * We don't check the return value here since if we get
83f4233d 2545 * a negative len, it means an error occurred thus we
03e43155
MD
2546 * simply remove it from the poll set and free the
2547 * stream.
2548 */
2549 } while (len > 0);
2550
2551 /* It's ok to have an unavailable sub-buffer */
2552 if (len < 0 && len != -EAGAIN && len != -ENODATA) {
2553 /* Clean up stream from consumer and free it. */
2554 lttng_poll_del(&events, stream->wait_fd);
2555 consumer_del_metadata_stream(stream, metadata_ht);
2556 }
2557 } else if (revents & (LPOLLERR | LPOLLHUP)) {
e316aad5 2558 DBG("Metadata fd %d is hup|err.", pollfd);
fb3a43a9
DG
2559 if (!stream->hangup_flush_done
2560 && (consumer_data.type == LTTNG_CONSUMER32_UST
2561 || consumer_data.type == LTTNG_CONSUMER64_UST)) {
2562 DBG("Attempting to flush and consume the UST buffers");
2563 lttng_ustconsumer_on_stream_hangup(stream);
2564
2565 /* We just flushed the stream now read it. */
4bb94b75 2566 do {
9ce5646a
MD
2567 health_code_update();
2568
4bb94b75
DG
2569 len = ctx->on_buffer_ready(stream, ctx);
2570 /*
2571 * We don't check the return value here since if we get
83f4233d 2572 * a negative len, it means an error occurred thus we
4bb94b75
DG
2573 * simply remove it from the poll set and free the
2574 * stream.
2575 */
2576 } while (len > 0);
fb3a43a9
DG
2577 }
2578
fb3a43a9 2579 lttng_poll_del(&events, stream->wait_fd);
e316aad5
DG
2580 /*
2581 * This call update the channel states, closes file descriptors
2582 * and securely free the stream.
2583 */
2584 consumer_del_metadata_stream(stream, metadata_ht);
03e43155
MD
2585 } else {
2586 ERR("Unexpected poll events %u for sock %d", revents, pollfd);
6f2f1a70 2587 rcu_read_unlock();
03e43155 2588 goto end;
fb3a43a9 2589 }
e316aad5 2590 /* Release RCU lock for the stream looked up */
d09e1200 2591 rcu_read_unlock();
fb3a43a9
DG
2592 }
2593 }
2594
1fc79fb4
MD
2595 /* All is OK */
2596 err = 0;
fb3a43a9
DG
2597end:
2598 DBG("Metadata poll thread exiting");
fb3a43a9 2599
d8ef542d
MD
2600 lttng_poll_clean(&events);
2601end_poll:
2d57de81 2602error_testpoint:
1fc79fb4
MD
2603 if (err) {
2604 health_error();
2605 ERR("Health error occurred in %s", __func__);
2606 }
2607 health_unregister(health_consumerd);
fb3a43a9
DG
2608 rcu_unregister_thread();
2609 return NULL;
2610}
2611
3bd1e081 2612/*
e4421fec 2613 * This thread polls the fds in the set to consume the data and write
3bd1e081
MD
2614 * it to tracefile if necessary.
2615 */
7d980def 2616void *consumer_thread_data_poll(void *data)
3bd1e081 2617{
1fc79fb4 2618 int num_rdy, num_hup, high_prio, ret, i, err = -1;
3bd1e081
MD
2619 struct pollfd *pollfd = NULL;
2620 /* local view of the streams */
c869f647 2621 struct lttng_consumer_stream **local_stream = NULL, *new_stream = NULL;
3bd1e081 2622 /* local view of consumer_data.fds_count */
8bdcc002
JG
2623 int nb_fd = 0;
2624 /* 2 for the consumer_data_pipe and wake up pipe */
2625 const int nb_pipes_fd = 2;
9a2fcf78
JD
2626 /* Number of FDs with CONSUMER_ENDPOINT_INACTIVE but still open. */
2627 int nb_inactive_fd = 0;
3bd1e081 2628 struct lttng_consumer_local_data *ctx = data;
00e2e675 2629 ssize_t len;
3bd1e081 2630
e7b994a3
DG
2631 rcu_register_thread();
2632
1fc79fb4
MD
2633 health_register(health_consumerd, HEALTH_CONSUMERD_TYPE_DATA);
2634
2d57de81
MD
2635 if (testpoint(consumerd_thread_data)) {
2636 goto error_testpoint;
2637 }
2638
9ce5646a
MD
2639 health_code_update();
2640
4df6c8cb
MD
2641 local_stream = zmalloc(sizeof(struct lttng_consumer_stream *));
2642 if (local_stream == NULL) {
2643 PERROR("local_stream malloc");
2644 goto end;
2645 }
3bd1e081
MD
2646
2647 while (1) {
9ce5646a
MD
2648 health_code_update();
2649
3bd1e081
MD
2650 high_prio = 0;
2651 num_hup = 0;
2652
2653 /*
e4421fec 2654 * the fds set has been updated, we need to update our
3bd1e081
MD
2655 * local array as well
2656 */
2657 pthread_mutex_lock(&consumer_data.lock);
2658 if (consumer_data.need_update) {
0e428499
DG
2659 free(pollfd);
2660 pollfd = NULL;
2661
2662 free(local_stream);
2663 local_stream = NULL;
3bd1e081 2664
8bdcc002 2665 /* Allocate for all fds */
261de637 2666 pollfd = zmalloc((consumer_data.stream_count + nb_pipes_fd) * sizeof(struct pollfd));
3bd1e081 2667 if (pollfd == NULL) {
7a57cf92 2668 PERROR("pollfd malloc");
3bd1e081
MD
2669 pthread_mutex_unlock(&consumer_data.lock);
2670 goto end;
2671 }
2672
261de637 2673 local_stream = zmalloc((consumer_data.stream_count + nb_pipes_fd) *
747f8642 2674 sizeof(struct lttng_consumer_stream *));
3bd1e081 2675 if (local_stream == NULL) {
7a57cf92 2676 PERROR("local_stream malloc");
3bd1e081
MD
2677 pthread_mutex_unlock(&consumer_data.lock);
2678 goto end;
2679 }
ffe60014 2680 ret = update_poll_array(ctx, &pollfd, local_stream,
9a2fcf78 2681 data_ht, &nb_inactive_fd);
3bd1e081
MD
2682 if (ret < 0) {
2683 ERR("Error in allocating pollfd or local_outfds");
f73fabfd 2684 lttng_consumer_send_error(ctx, LTTCOMM_CONSUMERD_POLL_ERROR);
3bd1e081
MD
2685 pthread_mutex_unlock(&consumer_data.lock);
2686 goto end;
2687 }
2688 nb_fd = ret;
2689 consumer_data.need_update = 0;
2690 }
2691 pthread_mutex_unlock(&consumer_data.lock);
2692
4078b776 2693 /* No FDs and consumer_quit, consumer_cleanup the thread */
9a2fcf78
JD
2694 if (nb_fd == 0 && nb_inactive_fd == 0 &&
2695 CMM_LOAD_SHARED(consumer_quit) == 1) {
1fc79fb4 2696 err = 0; /* All is OK */
4078b776
MD
2697 goto end;
2698 }
3bd1e081 2699 /* poll on the array of fds */
88f2b785 2700 restart:
261de637 2701 DBG("polling on %d fd", nb_fd + nb_pipes_fd);
cf0bcb51
JG
2702 if (testpoint(consumerd_thread_data_poll)) {
2703 goto end;
2704 }
9ce5646a 2705 health_poll_entry();
261de637 2706 num_rdy = poll(pollfd, nb_fd + nb_pipes_fd, -1);
9ce5646a 2707 health_poll_exit();
3bd1e081
MD
2708 DBG("poll num_rdy : %d", num_rdy);
2709 if (num_rdy == -1) {
88f2b785
MD
2710 /*
2711 * Restart interrupted system call.
2712 */
2713 if (errno == EINTR) {
2714 goto restart;
2715 }
7a57cf92 2716 PERROR("Poll error");
f73fabfd 2717 lttng_consumer_send_error(ctx, LTTCOMM_CONSUMERD_POLL_ERROR);
3bd1e081
MD
2718 goto end;
2719 } else if (num_rdy == 0) {
2720 DBG("Polling thread timed out");
2721 goto end;
2722 }
2723
80957876
JG
2724 if (caa_unlikely(data_consumption_paused)) {
2725 DBG("Data consumption paused, sleeping...");
2726 sleep(1);
2727 goto restart;
2728 }
2729
3bd1e081 2730 /*
50f8ae69 2731 * If the consumer_data_pipe triggered poll go directly to the
00e2e675
DG
2732 * beginning of the loop to update the array. We want to prioritize
2733 * array update over low-priority reads.
3bd1e081 2734 */
509bb1cf 2735 if (pollfd[nb_fd].revents & (POLLIN | POLLPRI)) {
ab30f567 2736 ssize_t pipe_readlen;
04fdd819 2737
50f8ae69 2738 DBG("consumer_data_pipe wake up");
acdb9057
DG
2739 pipe_readlen = lttng_pipe_read(ctx->consumer_data_pipe,
2740 &new_stream, sizeof(new_stream));
6cd525e8
MD
2741 if (pipe_readlen < sizeof(new_stream)) {
2742 PERROR("Consumer data pipe");
23f5f35d
DG
2743 /* Continue so we can at least handle the current stream(s). */
2744 continue;
2745 }
c869f647
DG
2746
2747 /*
2748 * If the stream is NULL, just ignore it. It's also possible that
2749 * the sessiond poll thread changed the consumer_quit state and is
2750 * waking us up to test it.
2751 */
2752 if (new_stream == NULL) {
8994307f 2753 validate_endpoint_status_data_stream();
c869f647
DG
2754 continue;
2755 }
2756
c869f647 2757 /* Continue to update the local streams and handle prio ones */
3bd1e081
MD
2758 continue;
2759 }
2760
02b3d176
DG
2761 /* Handle wakeup pipe. */
2762 if (pollfd[nb_fd + 1].revents & (POLLIN | POLLPRI)) {
2763 char dummy;
2764 ssize_t pipe_readlen;
2765
2766 pipe_readlen = lttng_pipe_read(ctx->consumer_wakeup_pipe, &dummy,
2767 sizeof(dummy));
2768 if (pipe_readlen < 0) {
2769 PERROR("Consumer data wakeup pipe");
2770 }
2771 /* We've been awakened to handle stream(s). */
2772 ctx->has_wakeup = 0;
2773 }
2774
3bd1e081
MD
2775 /* Take care of high priority channels first. */
2776 for (i = 0; i < nb_fd; i++) {
9ce5646a
MD
2777 health_code_update();
2778
9617607b
DG
2779 if (local_stream[i] == NULL) {
2780 continue;
2781 }
fb3a43a9 2782 if (pollfd[i].revents & POLLPRI) {
d41f73b7
MD
2783 DBG("Urgent read on fd %d", pollfd[i].fd);
2784 high_prio = 1;
4078b776 2785 len = ctx->on_buffer_ready(local_stream[i], ctx);
d41f73b7 2786 /* it's ok to have an unavailable sub-buffer */
b64403e3 2787 if (len < 0 && len != -EAGAIN && len != -ENODATA) {
ab1027f4
DG
2788 /* Clean the stream and free it. */
2789 consumer_del_stream(local_stream[i], data_ht);
9617607b 2790 local_stream[i] = NULL;
4078b776
MD
2791 } else if (len > 0) {
2792 local_stream[i]->data_read = 1;
d41f73b7 2793 }
3bd1e081
MD
2794 }
2795 }
2796
4078b776
MD
2797 /*
2798 * If we read high prio channel in this loop, try again
2799 * for more high prio data.
2800 */
2801 if (high_prio) {
3bd1e081
MD
2802 continue;
2803 }
2804
2805 /* Take care of low priority channels. */
4078b776 2806 for (i = 0; i < nb_fd; i++) {
9ce5646a
MD
2807 health_code_update();
2808
9617607b
DG
2809 if (local_stream[i] == NULL) {
2810 continue;
2811 }
4078b776 2812 if ((pollfd[i].revents & POLLIN) ||
02b3d176
DG
2813 local_stream[i]->hangup_flush_done ||
2814 local_stream[i]->has_data) {
4078b776
MD
2815 DBG("Normal read on fd %d", pollfd[i].fd);
2816 len = ctx->on_buffer_ready(local_stream[i], ctx);
2817 /* it's ok to have an unavailable sub-buffer */
b64403e3 2818 if (len < 0 && len != -EAGAIN && len != -ENODATA) {
ab1027f4
DG
2819 /* Clean the stream and free it. */
2820 consumer_del_stream(local_stream[i], data_ht);
9617607b 2821 local_stream[i] = NULL;
4078b776
MD
2822 } else if (len > 0) {
2823 local_stream[i]->data_read = 1;
2824 }
2825 }
2826 }
2827
2828 /* Handle hangup and errors */
2829 for (i = 0; i < nb_fd; i++) {
9ce5646a
MD
2830 health_code_update();
2831
9617607b
DG
2832 if (local_stream[i] == NULL) {
2833 continue;
2834 }
4078b776
MD
2835 if (!local_stream[i]->hangup_flush_done
2836 && (pollfd[i].revents & (POLLHUP | POLLERR | POLLNVAL))
2837 && (consumer_data.type == LTTNG_CONSUMER32_UST
2838 || consumer_data.type == LTTNG_CONSUMER64_UST)) {
2839 DBG("fd %d is hup|err|nval. Attempting flush and read.",
9617607b 2840 pollfd[i].fd);
4078b776
MD
2841 lttng_ustconsumer_on_stream_hangup(local_stream[i]);
2842 /* Attempt read again, for the data we just flushed. */
2843 local_stream[i]->data_read = 1;
2844 }
2845 /*
2846 * If the poll flag is HUP/ERR/NVAL and we have
2847 * read no data in this pass, we can remove the
2848 * stream from its hash table.
2849 */
2850 if ((pollfd[i].revents & POLLHUP)) {
2851 DBG("Polling fd %d tells it has hung up.", pollfd[i].fd);
2852 if (!local_stream[i]->data_read) {
43c34bc3 2853 consumer_del_stream(local_stream[i], data_ht);
9617607b 2854 local_stream[i] = NULL;
4078b776
MD
2855 num_hup++;
2856 }
2857 } else if (pollfd[i].revents & POLLERR) {
2858 ERR("Error returned in polling fd %d.", pollfd[i].fd);
2859 if (!local_stream[i]->data_read) {
43c34bc3 2860 consumer_del_stream(local_stream[i], data_ht);
9617607b 2861 local_stream[i] = NULL;
4078b776
MD
2862 num_hup++;
2863 }
2864 } else if (pollfd[i].revents & POLLNVAL) {
2865 ERR("Polling fd %d tells fd is not open.", pollfd[i].fd);
2866 if (!local_stream[i]->data_read) {
43c34bc3 2867 consumer_del_stream(local_stream[i], data_ht);
9617607b 2868 local_stream[i] = NULL;
4078b776 2869 num_hup++;
3bd1e081
MD
2870 }
2871 }
9617607b
DG
2872 if (local_stream[i] != NULL) {
2873 local_stream[i]->data_read = 0;
2874 }
3bd1e081
MD
2875 }
2876 }
1fc79fb4
MD
2877 /* All is OK */
2878 err = 0;
3bd1e081
MD
2879end:
2880 DBG("polling thread exiting");
0e428499
DG
2881 free(pollfd);
2882 free(local_stream);
fb3a43a9
DG
2883
2884 /*
2885 * Close the write side of the pipe so epoll_wait() in
7d980def
DG
2886 * consumer_thread_metadata_poll can catch it. The thread is monitoring the
2887 * read side of the pipe. If we close them both, epoll_wait strangely does
2888 * not return and could create a endless wait period if the pipe is the
2889 * only tracked fd in the poll set. The thread will take care of closing
2890 * the read side.
fb3a43a9 2891 */
13886d2d 2892 (void) lttng_pipe_write_close(ctx->consumer_metadata_pipe);
fb3a43a9 2893
2d57de81 2894error_testpoint:
1fc79fb4
MD
2895 if (err) {
2896 health_error();
2897 ERR("Health error occurred in %s", __func__);
2898 }
2899 health_unregister(health_consumerd);
2900
e7b994a3 2901 rcu_unregister_thread();
3bd1e081
MD
2902 return NULL;
2903}
2904
d8ef542d
MD
2905/*
2906 * Close wake-up end of each stream belonging to the channel. This will
2907 * allow the poll() on the stream read-side to detect when the
2908 * write-side (application) finally closes them.
2909 */
2910static
2911void consumer_close_channel_streams(struct lttng_consumer_channel *channel)
2912{
2913 struct lttng_ht *ht;
2914 struct lttng_consumer_stream *stream;
2915 struct lttng_ht_iter iter;
2916
2917 ht = consumer_data.stream_per_chan_id_ht;
2918
2919 rcu_read_lock();
2920 cds_lfht_for_each_entry_duplicate(ht->ht,
2921 ht->hash_fct(&channel->key, lttng_ht_seed),
2922 ht->match_fct, &channel->key,
2923 &iter.iter, stream, node_channel_id.node) {
f2ad556d
MD
2924 /*
2925 * Protect against teardown with mutex.
2926 */
2927 pthread_mutex_lock(&stream->lock);
2928 if (cds_lfht_is_node_deleted(&stream->node.node)) {
2929 goto next;
2930 }
d8ef542d
MD
2931 switch (consumer_data.type) {
2932 case LTTNG_CONSUMER_KERNEL:
2933 break;
2934 case LTTNG_CONSUMER32_UST:
2935 case LTTNG_CONSUMER64_UST:
b4a650f3
DG
2936 if (stream->metadata_flag) {
2937 /* Safe and protected by the stream lock. */
2938 lttng_ustconsumer_close_metadata(stream->chan);
2939 } else {
2940 /*
2941 * Note: a mutex is taken internally within
2942 * liblttng-ust-ctl to protect timer wakeup_fd
2943 * use from concurrent close.
2944 */
2945 lttng_ustconsumer_close_stream_wakeup(stream);
2946 }
d8ef542d
MD
2947 break;
2948 default:
2949 ERR("Unknown consumer_data type");
2950 assert(0);
2951 }
f2ad556d
MD
2952 next:
2953 pthread_mutex_unlock(&stream->lock);
d8ef542d
MD
2954 }
2955 rcu_read_unlock();
2956}
2957
2958static void destroy_channel_ht(struct lttng_ht *ht)
2959{
2960 struct lttng_ht_iter iter;
2961 struct lttng_consumer_channel *channel;
2962 int ret;
2963
2964 if (ht == NULL) {
2965 return;
2966 }
2967
2968 rcu_read_lock();
2969 cds_lfht_for_each_entry(ht->ht, &iter.iter, channel, wait_fd_node.node) {
2970 ret = lttng_ht_del(ht, &iter);
2971 assert(ret != 0);
2972 }
2973 rcu_read_unlock();
2974
2975 lttng_ht_destroy(ht);
2976}
2977
2978/*
2979 * This thread polls the channel fds to detect when they are being
2980 * closed. It closes all related streams if the channel is detected as
2981 * closed. It is currently only used as a shim layer for UST because the
2982 * consumerd needs to keep the per-stream wakeup end of pipes open for
2983 * periodical flush.
2984 */
2985void *consumer_thread_channel_poll(void *data)
2986{
1fc79fb4 2987 int ret, i, pollfd, err = -1;
d8ef542d
MD
2988 uint32_t revents, nb_fd;
2989 struct lttng_consumer_channel *chan = NULL;
2990 struct lttng_ht_iter iter;
2991 struct lttng_ht_node_u64 *node;
2992 struct lttng_poll_event events;
2993 struct lttng_consumer_local_data *ctx = data;
2994 struct lttng_ht *channel_ht;
2995
2996 rcu_register_thread();
2997
1fc79fb4
MD
2998 health_register(health_consumerd, HEALTH_CONSUMERD_TYPE_CHANNEL);
2999
2d57de81
MD
3000 if (testpoint(consumerd_thread_channel)) {
3001 goto error_testpoint;
3002 }
3003
9ce5646a
MD
3004 health_code_update();
3005
d8ef542d
MD
3006 channel_ht = lttng_ht_new(0, LTTNG_HT_TYPE_U64);
3007 if (!channel_ht) {
3008 /* ENOMEM at this point. Better to bail out. */
3009 goto end_ht;
3010 }
3011
3012 DBG("Thread channel poll started");
3013
3014 /* Size is set to 1 for the consumer_channel pipe */
3015 ret = lttng_poll_create(&events, 2, LTTNG_CLOEXEC);
3016 if (ret < 0) {
3017 ERR("Poll set creation failed");
3018 goto end_poll;
3019 }
3020
3021 ret = lttng_poll_add(&events, ctx->consumer_channel_pipe[0], LPOLLIN);
3022 if (ret < 0) {
3023 goto end;
3024 }
3025
3026 /* Main loop */
3027 DBG("Channel main loop started");
3028
3029 while (1) {
d8ef542d 3030restart:
7fa2082e
MD
3031 health_code_update();
3032 DBG("Channel poll wait");
9ce5646a 3033 health_poll_entry();
d8ef542d 3034 ret = lttng_poll_wait(&events, -1);
7fa2082e
MD
3035 DBG("Channel poll return from wait with %d fd(s)",
3036 LTTNG_POLL_GETNB(&events));
9ce5646a 3037 health_poll_exit();
40063ead 3038 DBG("Channel event caught in thread");
d8ef542d
MD
3039 if (ret < 0) {
3040 if (errno == EINTR) {
40063ead 3041 ERR("Poll EINTR caught");
d8ef542d
MD
3042 goto restart;
3043 }
d9607cd7
MD
3044 if (LTTNG_POLL_GETNB(&events) == 0) {
3045 err = 0; /* All is OK */
3046 }
d8ef542d
MD
3047 goto end;
3048 }
3049
3050 nb_fd = ret;
3051
3052 /* From here, the event is a channel wait fd */
3053 for (i = 0; i < nb_fd; i++) {
9ce5646a
MD
3054 health_code_update();
3055
d8ef542d
MD
3056 revents = LTTNG_POLL_GETEV(&events, i);
3057 pollfd = LTTNG_POLL_GETFD(&events, i);
3058
d8ef542d 3059 if (pollfd == ctx->consumer_channel_pipe[0]) {
03e43155 3060 if (revents & LPOLLIN) {
d8ef542d 3061 enum consumer_channel_action action;
a0cbdd2e 3062 uint64_t key;
d8ef542d 3063
a0cbdd2e 3064 ret = read_channel_pipe(ctx, &chan, &key, &action);
d8ef542d 3065 if (ret <= 0) {
03e43155
MD
3066 if (ret < 0) {
3067 ERR("Error reading channel pipe");
3068 }
3069 lttng_poll_del(&events, ctx->consumer_channel_pipe[0]);
d8ef542d
MD
3070 continue;
3071 }
3072
3073 switch (action) {
3074 case CONSUMER_CHANNEL_ADD:
3075 DBG("Adding channel %d to poll set",
3076 chan->wait_fd);
3077
3078 lttng_ht_node_init_u64(&chan->wait_fd_node,
3079 chan->wait_fd);
c7260a81 3080 rcu_read_lock();
d8ef542d
MD
3081 lttng_ht_add_unique_u64(channel_ht,
3082 &chan->wait_fd_node);
c7260a81 3083 rcu_read_unlock();
d8ef542d
MD
3084 /* Add channel to the global poll events list */
3085 lttng_poll_add(&events, chan->wait_fd,
03e43155 3086 LPOLLERR | LPOLLHUP);
d8ef542d 3087 break;
a0cbdd2e
MD
3088 case CONSUMER_CHANNEL_DEL:
3089 {
b4a650f3
DG
3090 /*
3091 * This command should never be called if the channel
3092 * has streams monitored by either the data or metadata
3093 * thread. The consumer only notify this thread with a
3094 * channel del. command if it receives a destroy
3095 * channel command from the session daemon that send it
3096 * if a command prior to the GET_CHANNEL failed.
3097 */
3098
c7260a81 3099 rcu_read_lock();
a0cbdd2e
MD
3100 chan = consumer_find_channel(key);
3101 if (!chan) {
c7260a81 3102 rcu_read_unlock();
a0cbdd2e
MD
3103 ERR("UST consumer get channel key %" PRIu64 " not found for del channel", key);
3104 break;
3105 }
3106 lttng_poll_del(&events, chan->wait_fd);
f623cc0b 3107 iter.iter.node = &chan->wait_fd_node.node;
a0cbdd2e
MD
3108 ret = lttng_ht_del(channel_ht, &iter);
3109 assert(ret == 0);
a0cbdd2e 3110
f2a444f1
DG
3111 switch (consumer_data.type) {
3112 case LTTNG_CONSUMER_KERNEL:
3113 break;
3114 case LTTNG_CONSUMER32_UST:
3115 case LTTNG_CONSUMER64_UST:
212d67a2
DG
3116 health_code_update();
3117 /* Destroy streams that might have been left in the stream list. */
3118 clean_channel_stream_list(chan);
f2a444f1
DG
3119 break;
3120 default:
3121 ERR("Unknown consumer_data type");
3122 assert(0);
3123 }
3124
a0cbdd2e
MD
3125 /*
3126 * Release our own refcount. Force channel deletion even if
3127 * streams were not initialized.
3128 */
3129 if (!uatomic_sub_return(&chan->refcount, 1)) {
3130 consumer_del_channel(chan);
3131 }
c7260a81 3132 rcu_read_unlock();
a0cbdd2e
MD
3133 goto restart;
3134 }
d8ef542d
MD
3135 case CONSUMER_CHANNEL_QUIT:
3136 /*
3137 * Remove the pipe from the poll set and continue the loop
3138 * since their might be data to consume.
3139 */
3140 lttng_poll_del(&events, ctx->consumer_channel_pipe[0]);
3141 continue;
3142 default:
3143 ERR("Unknown action");
3144 break;
3145 }
03e43155
MD
3146 } else if (revents & (LPOLLERR | LPOLLHUP)) {
3147 DBG("Channel thread pipe hung up");
3148 /*
3149 * Remove the pipe from the poll set and continue the loop
3150 * since their might be data to consume.
3151 */
3152 lttng_poll_del(&events, ctx->consumer_channel_pipe[0]);
3153 continue;
3154 } else {
3155 ERR("Unexpected poll events %u for sock %d", revents, pollfd);
3156 goto end;
d8ef542d
MD
3157 }
3158
3159 /* Handle other stream */
3160 continue;
3161 }
3162
3163 rcu_read_lock();
3164 {
3165 uint64_t tmp_id = (uint64_t) pollfd;
3166
3167 lttng_ht_lookup(channel_ht, &tmp_id, &iter);
3168 }
3169 node = lttng_ht_iter_get_node_u64(&iter);
3170 assert(node);
3171
3172 chan = caa_container_of(node, struct lttng_consumer_channel,
3173 wait_fd_node);
3174
3175 /* Check for error event */
3176 if (revents & (LPOLLERR | LPOLLHUP)) {
3177 DBG("Channel fd %d is hup|err.", pollfd);
3178
3179 lttng_poll_del(&events, chan->wait_fd);
3180 ret = lttng_ht_del(channel_ht, &iter);
3181 assert(ret == 0);
b4a650f3
DG
3182
3183 /*
3184 * This will close the wait fd for each stream associated to
3185 * this channel AND monitored by the data/metadata thread thus
3186 * will be clean by the right thread.
3187 */
d8ef542d 3188 consumer_close_channel_streams(chan);
f2ad556d
MD
3189
3190 /* Release our own refcount */
3191 if (!uatomic_sub_return(&chan->refcount, 1)
3192 && !uatomic_read(&chan->nb_init_stream_left)) {
3193 consumer_del_channel(chan);
3194 }
03e43155
MD
3195 } else {
3196 ERR("Unexpected poll events %u for sock %d", revents, pollfd);
3197 rcu_read_unlock();
3198 goto end;
d8ef542d
MD
3199 }
3200
3201 /* Release RCU lock for the channel looked up */
3202 rcu_read_unlock();
3203 }
3204 }
3205
1fc79fb4
MD
3206 /* All is OK */
3207 err = 0;
d8ef542d
MD
3208end:
3209 lttng_poll_clean(&events);
3210end_poll:
3211 destroy_channel_ht(channel_ht);
3212end_ht:
2d57de81 3213error_testpoint:
d8ef542d 3214 DBG("Channel poll thread exiting");
1fc79fb4
MD
3215 if (err) {
3216 health_error();
3217 ERR("Health error occurred in %s", __func__);
3218 }
3219 health_unregister(health_consumerd);
d8ef542d
MD
3220 rcu_unregister_thread();
3221 return NULL;
3222}
3223
331744e3
JD
3224static int set_metadata_socket(struct lttng_consumer_local_data *ctx,
3225 struct pollfd *sockpoll, int client_socket)
3226{
3227 int ret;
3228
3229 assert(ctx);
3230 assert(sockpoll);
3231
84382d49
MD
3232 ret = lttng_consumer_poll_socket(sockpoll);
3233 if (ret) {
331744e3
JD
3234 goto error;
3235 }
3236 DBG("Metadata connection on client_socket");
3237
3238 /* Blocking call, waiting for transmission */
3239 ctx->consumer_metadata_socket = lttcomm_accept_unix_sock(client_socket);
3240 if (ctx->consumer_metadata_socket < 0) {
3241 WARN("On accept metadata");
3242 ret = -1;
3243 goto error;
3244 }
3245 ret = 0;
3246
3247error:
3248 return ret;
3249}
3250
3bd1e081
MD
3251/*
3252 * This thread listens on the consumerd socket and receives the file
3253 * descriptors from the session daemon.
3254 */
7d980def 3255void *consumer_thread_sessiond_poll(void *data)
3bd1e081 3256{
1fc79fb4 3257 int sock = -1, client_socket, ret, err = -1;
3bd1e081
MD
3258 /*
3259 * structure to poll for incoming data on communication socket avoids
3260 * making blocking sockets.
3261 */
3262 struct pollfd consumer_sockpoll[2];
3263 struct lttng_consumer_local_data *ctx = data;
3264
e7b994a3
DG
3265 rcu_register_thread();
3266
1fc79fb4
MD
3267 health_register(health_consumerd, HEALTH_CONSUMERD_TYPE_SESSIOND);
3268
2d57de81
MD
3269 if (testpoint(consumerd_thread_sessiond)) {
3270 goto error_testpoint;
3271 }
3272
9ce5646a
MD
3273 health_code_update();
3274
3bd1e081
MD
3275 DBG("Creating command socket %s", ctx->consumer_command_sock_path);
3276 unlink(ctx->consumer_command_sock_path);
3277 client_socket = lttcomm_create_unix_sock(ctx->consumer_command_sock_path);
3278 if (client_socket < 0) {
3279 ERR("Cannot create command socket");
3280 goto end;
3281 }
3282
3283 ret = lttcomm_listen_unix_sock(client_socket);
3284 if (ret < 0) {
3285 goto end;
3286 }
3287
32258573 3288 DBG("Sending ready command to lttng-sessiond");
f73fabfd 3289 ret = lttng_consumer_send_error(ctx, LTTCOMM_CONSUMERD_COMMAND_SOCK_READY);
3bd1e081
MD
3290 /* return < 0 on error, but == 0 is not fatal */
3291 if (ret < 0) {
32258573 3292 ERR("Error sending ready command to lttng-sessiond");
3bd1e081
MD
3293 goto end;
3294 }
3295
3bd1e081
MD
3296 /* prepare the FDs to poll : to client socket and the should_quit pipe */
3297 consumer_sockpoll[0].fd = ctx->consumer_should_quit[0];
3298 consumer_sockpoll[0].events = POLLIN | POLLPRI;
3299 consumer_sockpoll[1].fd = client_socket;
3300 consumer_sockpoll[1].events = POLLIN | POLLPRI;
3301
84382d49
MD
3302 ret = lttng_consumer_poll_socket(consumer_sockpoll);
3303 if (ret) {
3304 if (ret > 0) {
3305 /* should exit */
3306 err = 0;
3307 }
3bd1e081
MD
3308 goto end;
3309 }
3310 DBG("Connection on client_socket");
3311
3312 /* Blocking call, waiting for transmission */
3313 sock = lttcomm_accept_unix_sock(client_socket);
534d2592 3314 if (sock < 0) {
3bd1e081
MD
3315 WARN("On accept");
3316 goto end;
3317 }
3bd1e081 3318
331744e3
JD
3319 /*
3320 * Setup metadata socket which is the second socket connection on the
3321 * command unix socket.
3322 */
3323 ret = set_metadata_socket(ctx, consumer_sockpoll, client_socket);
84382d49
MD
3324 if (ret) {
3325 if (ret > 0) {
3326 /* should exit */
3327 err = 0;
3328 }
331744e3
JD
3329 goto end;
3330 }
3331
d96f09c6
DG
3332 /* This socket is not useful anymore. */
3333 ret = close(client_socket);
3334 if (ret < 0) {
3335 PERROR("close client_socket");
3336 }
3337 client_socket = -1;
3338
3bd1e081
MD
3339 /* update the polling structure to poll on the established socket */
3340 consumer_sockpoll[1].fd = sock;
3341 consumer_sockpoll[1].events = POLLIN | POLLPRI;
3342
3343 while (1) {
9ce5646a
MD
3344 health_code_update();
3345
3346 health_poll_entry();
3347 ret = lttng_consumer_poll_socket(consumer_sockpoll);
3348 health_poll_exit();
84382d49
MD
3349 if (ret) {
3350 if (ret > 0) {
3351 /* should exit */
3352 err = 0;
3353 }
3bd1e081
MD
3354 goto end;
3355 }
3356 DBG("Incoming command on sock");
3357 ret = lttng_consumer_recv_cmd(ctx, sock, consumer_sockpoll);
4cbc1a04
DG
3358 if (ret <= 0) {
3359 /*
3360 * This could simply be a session daemon quitting. Don't output
3361 * ERR() here.
3362 */
3363 DBG("Communication interrupted on command socket");
41ba6035 3364 err = 0;
3bd1e081
MD
3365 goto end;
3366 }
10211f5c 3367 if (CMM_LOAD_SHARED(consumer_quit)) {
3bd1e081 3368 DBG("consumer_thread_receive_fds received quit from signal");
1fc79fb4 3369 err = 0; /* All is OK */
3bd1e081
MD
3370 goto end;
3371 }
ffe60014 3372 DBG("received command on sock");
3bd1e081 3373 }
1fc79fb4
MD
3374 /* All is OK */
3375 err = 0;
3376
3bd1e081 3377end:
ffe60014 3378 DBG("Consumer thread sessiond poll exiting");
3bd1e081 3379
d88aee68
DG
3380 /*
3381 * Close metadata streams since the producer is the session daemon which
3382 * just died.
3383 *
3384 * NOTE: for now, this only applies to the UST tracer.
3385 */
6d574024 3386 lttng_consumer_close_all_metadata();
d88aee68 3387
3bd1e081
MD
3388 /*
3389 * when all fds have hung up, the polling thread
3390 * can exit cleanly
3391 */
10211f5c 3392 CMM_STORE_SHARED(consumer_quit, 1);
3bd1e081 3393
04fdd819 3394 /*
c869f647 3395 * Notify the data poll thread to poll back again and test the
8994307f 3396 * consumer_quit state that we just set so to quit gracefully.
04fdd819 3397 */
acdb9057 3398 notify_thread_lttng_pipe(ctx->consumer_data_pipe);
c869f647 3399
a0cbdd2e 3400 notify_channel_pipe(ctx, NULL, -1, CONSUMER_CHANNEL_QUIT);
d8ef542d 3401
5c635c72
MD
3402 notify_health_quit_pipe(health_quit_pipe);
3403
d96f09c6
DG
3404 /* Cleaning up possibly open sockets. */
3405 if (sock >= 0) {
3406 ret = close(sock);
3407 if (ret < 0) {
3408 PERROR("close sock sessiond poll");
3409 }
3410 }
3411 if (client_socket >= 0) {
38476d24 3412 ret = close(client_socket);
d96f09c6
DG
3413 if (ret < 0) {
3414 PERROR("close client_socket sessiond poll");
3415 }
3416 }
3417
2d57de81 3418error_testpoint:
1fc79fb4
MD
3419 if (err) {
3420 health_error();
3421 ERR("Health error occurred in %s", __func__);
3422 }
3423 health_unregister(health_consumerd);
3424
e7b994a3 3425 rcu_unregister_thread();
3bd1e081
MD
3426 return NULL;
3427}
d41f73b7 3428
4078b776 3429ssize_t lttng_consumer_read_subbuffer(struct lttng_consumer_stream *stream,
d41f73b7
MD
3430 struct lttng_consumer_local_data *ctx)
3431{
74251bb8
DG
3432 ssize_t ret;
3433
d2956687 3434 pthread_mutex_lock(&stream->chan->lock);
74251bb8 3435 pthread_mutex_lock(&stream->lock);
94d49140
JD
3436 if (stream->metadata_flag) {
3437 pthread_mutex_lock(&stream->metadata_rdv_lock);
3438 }
74251bb8 3439
d41f73b7
MD
3440 switch (consumer_data.type) {
3441 case LTTNG_CONSUMER_KERNEL:
d2956687 3442 ret = lttng_kconsumer_read_subbuffer(stream, ctx);
74251bb8 3443 break;
7753dea8
MD
3444 case LTTNG_CONSUMER32_UST:
3445 case LTTNG_CONSUMER64_UST:
d2956687 3446 ret = lttng_ustconsumer_read_subbuffer(stream, ctx);
74251bb8 3447 break;
d41f73b7
MD
3448 default:
3449 ERR("Unknown consumer_data type");
3450 assert(0);
74251bb8
DG
3451 ret = -ENOSYS;
3452 break;
d41f73b7 3453 }
74251bb8 3454
94d49140
JD
3455 if (stream->metadata_flag) {
3456 pthread_cond_broadcast(&stream->metadata_rdv);
3457 pthread_mutex_unlock(&stream->metadata_rdv_lock);
3458 }
74251bb8 3459 pthread_mutex_unlock(&stream->lock);
d2956687 3460 pthread_mutex_unlock(&stream->chan->lock);
02d02e31 3461
74251bb8 3462 return ret;
d41f73b7
MD
3463}
3464
3465int lttng_consumer_on_recv_stream(struct lttng_consumer_stream *stream)
3466{
3467 switch (consumer_data.type) {
3468 case LTTNG_CONSUMER_KERNEL:
3469 return lttng_kconsumer_on_recv_stream(stream);
7753dea8
MD
3470 case LTTNG_CONSUMER32_UST:
3471 case LTTNG_CONSUMER64_UST:
d41f73b7
MD
3472 return lttng_ustconsumer_on_recv_stream(stream);
3473 default:
3474 ERR("Unknown consumer_data type");
3475 assert(0);
3476 return -ENOSYS;
3477 }
3478}
e4421fec
DG
3479
3480/*
3481 * Allocate and set consumer data hash tables.
3482 */
282dadbc 3483int lttng_consumer_init(void)
e4421fec 3484{
d88aee68 3485 consumer_data.channel_ht = lttng_ht_new(0, LTTNG_HT_TYPE_U64);
282dadbc
MD
3486 if (!consumer_data.channel_ht) {
3487 goto error;
3488 }
3489
5c3892a6
JG
3490 consumer_data.channels_by_session_id_ht =
3491 lttng_ht_new(0, LTTNG_HT_TYPE_U64);
3492 if (!consumer_data.channels_by_session_id_ht) {
3493 goto error;
3494 }
3495
d88aee68 3496 consumer_data.relayd_ht = lttng_ht_new(0, LTTNG_HT_TYPE_U64);
282dadbc
MD
3497 if (!consumer_data.relayd_ht) {
3498 goto error;
3499 }
3500
d88aee68 3501 consumer_data.stream_list_ht = lttng_ht_new(0, LTTNG_HT_TYPE_U64);
282dadbc
MD
3502 if (!consumer_data.stream_list_ht) {
3503 goto error;
3504 }
3505
d8ef542d 3506 consumer_data.stream_per_chan_id_ht = lttng_ht_new(0, LTTNG_HT_TYPE_U64);
282dadbc
MD
3507 if (!consumer_data.stream_per_chan_id_ht) {
3508 goto error;
3509 }
3510
3511 data_ht = lttng_ht_new(0, LTTNG_HT_TYPE_U64);
3512 if (!data_ht) {
3513 goto error;
3514 }
3515
3516 metadata_ht = lttng_ht_new(0, LTTNG_HT_TYPE_U64);
3517 if (!metadata_ht) {
3518 goto error;
3519 }
3520
28cc88f3
JG
3521 consumer_data.chunk_registry = lttng_trace_chunk_registry_create();
3522 if (!consumer_data.chunk_registry) {
3523 goto error;
3524 }
3525
282dadbc
MD
3526 return 0;
3527
3528error:
3529 return -1;
e4421fec 3530}
7735ef9e
DG
3531
3532/*
3533 * Process the ADD_RELAYD command receive by a consumer.
3534 *
3535 * This will create a relayd socket pair and add it to the relayd hash table.
3536 * The caller MUST acquire a RCU read side lock before calling it.
3537 */
2527bf85 3538 void consumer_add_relayd_socket(uint64_t net_seq_idx, int sock_type,
7735ef9e 3539 struct lttng_consumer_local_data *ctx, int sock,
6151a90f 3540 struct pollfd *consumer_sockpoll,
d3e2ba59
JD
3541 struct lttcomm_relayd_sock *relayd_sock, uint64_t sessiond_id,
3542 uint64_t relayd_session_id)
7735ef9e 3543{
cd2b09ed 3544 int fd = -1, ret = -1, relayd_created = 0;
0c759fc9 3545 enum lttcomm_return_code ret_code = LTTCOMM_CONSUMERD_SUCCESS;
d4298c99 3546 struct consumer_relayd_sock_pair *relayd = NULL;
7735ef9e 3547
6151a90f
JD
3548 assert(ctx);
3549 assert(relayd_sock);
3550
da009f2c 3551 DBG("Consumer adding relayd socket (idx: %" PRIu64 ")", net_seq_idx);
7735ef9e
DG
3552
3553 /* Get relayd reference if exists. */
3554 relayd = consumer_find_relayd(net_seq_idx);
3555 if (relayd == NULL) {
da009f2c 3556 assert(sock_type == LTTNG_STREAM_CONTROL);
7735ef9e
DG
3557 /* Not found. Allocate one. */
3558 relayd = consumer_allocate_relayd_sock_pair(net_seq_idx);
3559 if (relayd == NULL) {
618a6a28
MD
3560 ret_code = LTTCOMM_CONSUMERD_ENOMEM;
3561 goto error;
0d08d75e 3562 } else {
30319bcb 3563 relayd->sessiond_session_id = sessiond_id;
0d08d75e 3564 relayd_created = 1;
7735ef9e 3565 }
0d08d75e
DG
3566
3567 /*
3568 * This code path MUST continue to the consumer send status message to
3569 * we can notify the session daemon and continue our work without
3570 * killing everything.
3571 */
da009f2c
MD
3572 } else {
3573 /*
3574 * relayd key should never be found for control socket.
3575 */
3576 assert(sock_type != LTTNG_STREAM_CONTROL);
0d08d75e
DG
3577 }
3578
3579 /* First send a status message before receiving the fds. */
0c759fc9 3580 ret = consumer_send_status_msg(sock, LTTCOMM_CONSUMERD_SUCCESS);
618a6a28 3581 if (ret < 0) {
0d08d75e 3582 /* Somehow, the session daemon is not responding anymore. */
618a6a28
MD
3583 lttng_consumer_send_error(ctx, LTTCOMM_CONSUMERD_FATAL);
3584 goto error_nosignal;
7735ef9e
DG
3585 }
3586
3587 /* Poll on consumer socket. */
84382d49
MD
3588 ret = lttng_consumer_poll_socket(consumer_sockpoll);
3589 if (ret) {
3590 /* Needing to exit in the middle of a command: error. */
0d08d75e 3591 lttng_consumer_send_error(ctx, LTTCOMM_CONSUMERD_POLL_ERROR);
618a6a28 3592 goto error_nosignal;
7735ef9e
DG
3593 }
3594
3595 /* Get relayd socket from session daemon */
3596 ret = lttcomm_recv_fds_unix_sock(sock, &fd, 1);
3597 if (ret != sizeof(fd)) {
4028eeb9 3598 fd = -1; /* Just in case it gets set with an invalid value. */
0d08d75e
DG
3599
3600 /*
3601 * Failing to receive FDs might indicate a major problem such as
3602 * reaching a fd limit during the receive where the kernel returns a
3603 * MSG_CTRUNC and fails to cleanup the fd in the queue. Any case, we
3604 * don't take any chances and stop everything.
3605 *
3606 * XXX: Feature request #558 will fix that and avoid this possible
3607 * issue when reaching the fd limit.
3608 */
3609 lttng_consumer_send_error(ctx, LTTCOMM_CONSUMERD_ERROR_RECV_FD);
618a6a28 3610 ret_code = LTTCOMM_CONSUMERD_ERROR_RECV_FD;
f50f23d9
DG
3611 goto error;
3612 }
3613
7735ef9e
DG
3614 /* Copy socket information and received FD */
3615 switch (sock_type) {
3616 case LTTNG_STREAM_CONTROL:
3617 /* Copy received lttcomm socket */
6151a90f
JD
3618 lttcomm_copy_sock(&relayd->control_sock.sock, &relayd_sock->sock);
3619 ret = lttcomm_create_sock(&relayd->control_sock.sock);
4028eeb9 3620 /* Handle create_sock error. */
f66c074c 3621 if (ret < 0) {
618a6a28 3622 ret_code = LTTCOMM_CONSUMERD_ENOMEM;
4028eeb9 3623 goto error;
f66c074c 3624 }
da009f2c
MD
3625 /*
3626 * Close the socket created internally by
3627 * lttcomm_create_sock, so we can replace it by the one
3628 * received from sessiond.
3629 */
3630 if (close(relayd->control_sock.sock.fd)) {
3631 PERROR("close");
3632 }
7735ef9e
DG
3633
3634 /* Assign new file descriptor */
6151a90f
JD
3635 relayd->control_sock.sock.fd = fd;
3636 /* Assign version values. */
3637 relayd->control_sock.major = relayd_sock->major;
3638 relayd->control_sock.minor = relayd_sock->minor;
c5b6f4f0 3639
d3e2ba59 3640 relayd->relayd_session_id = relayd_session_id;
c5b6f4f0 3641
7735ef9e
DG
3642 break;
3643 case LTTNG_STREAM_DATA:
3644 /* Copy received lttcomm socket */
6151a90f
JD
3645 lttcomm_copy_sock(&relayd->data_sock.sock, &relayd_sock->sock);
3646 ret = lttcomm_create_sock(&relayd->data_sock.sock);
4028eeb9 3647 /* Handle create_sock error. */
f66c074c 3648 if (ret < 0) {
618a6a28 3649 ret_code = LTTCOMM_CONSUMERD_ENOMEM;
4028eeb9 3650 goto error;
f66c074c 3651 }
da009f2c
MD
3652 /*
3653 * Close the socket created internally by
3654 * lttcomm_create_sock, so we can replace it by the one
3655 * received from sessiond.
3656 */
3657 if (close(relayd->data_sock.sock.fd)) {
3658 PERROR("close");
3659 }
7735ef9e
DG
3660
3661 /* Assign new file descriptor */
6151a90f
JD
3662 relayd->data_sock.sock.fd = fd;
3663 /* Assign version values. */
3664 relayd->data_sock.major = relayd_sock->major;
3665 relayd->data_sock.minor = relayd_sock->minor;
7735ef9e
DG
3666 break;
3667 default:
3668 ERR("Unknown relayd socket type (%d)", sock_type);
618a6a28 3669 ret_code = LTTCOMM_CONSUMERD_FATAL;
7735ef9e
DG
3670 goto error;
3671 }
3672
d88aee68 3673 DBG("Consumer %s socket created successfully with net idx %" PRIu64 " (fd: %d)",
7735ef9e
DG
3674 sock_type == LTTNG_STREAM_CONTROL ? "control" : "data",
3675 relayd->net_seq_idx, fd);
39d9954c
FD
3676 /*
3677 * We gave the ownership of the fd to the relayd structure. Set the
3678 * fd to -1 so we don't call close() on it in the error path below.
3679 */
3680 fd = -1;
7735ef9e 3681
618a6a28
MD
3682 /* We successfully added the socket. Send status back. */
3683 ret = consumer_send_status_msg(sock, ret_code);
3684 if (ret < 0) {
3685 /* Somehow, the session daemon is not responding anymore. */
3686 lttng_consumer_send_error(ctx, LTTCOMM_CONSUMERD_FATAL);
3687 goto error_nosignal;
3688 }
3689
7735ef9e
DG
3690 /*
3691 * Add relayd socket pair to consumer data hashtable. If object already
3692 * exists or on error, the function gracefully returns.
3693 */
9276e5c8 3694 relayd->ctx = ctx;
d09e1200 3695 add_relayd(relayd);
7735ef9e
DG
3696
3697 /* All good! */
2527bf85 3698 return;
7735ef9e
DG
3699
3700error:
618a6a28
MD
3701 if (consumer_send_status_msg(sock, ret_code) < 0) {
3702 lttng_consumer_send_error(ctx, LTTCOMM_CONSUMERD_FATAL);
3703 }
3704
3705error_nosignal:
4028eeb9
DG
3706 /* Close received socket if valid. */
3707 if (fd >= 0) {
3708 if (close(fd)) {
3709 PERROR("close received socket");
3710 }
3711 }
cd2b09ed
DG
3712
3713 if (relayd_created) {
cd2b09ed
DG
3714 free(relayd);
3715 }
7735ef9e 3716}
ca22feea 3717
f7079f67
DG
3718/*
3719 * Search for a relayd associated to the session id and return the reference.
3720 *
3721 * A rcu read side lock MUST be acquire before calling this function and locked
3722 * until the relayd object is no longer necessary.
3723 */
3724static struct consumer_relayd_sock_pair *find_relayd_by_session_id(uint64_t id)
3725{
3726 struct lttng_ht_iter iter;
f7079f67 3727 struct consumer_relayd_sock_pair *relayd = NULL;
f7079f67
DG
3728
3729 /* Iterate over all relayd since they are indexed by net_seq_idx. */
3730 cds_lfht_for_each_entry(consumer_data.relayd_ht->ht, &iter.iter, relayd,
3731 node.node) {
18261bd1
DG
3732 /*
3733 * Check by sessiond id which is unique here where the relayd session
3734 * id might not be when having multiple relayd.
3735 */
3736 if (relayd->sessiond_session_id == id) {
f7079f67 3737 /* Found the relayd. There can be only one per id. */
18261bd1 3738 goto found;
f7079f67
DG
3739 }
3740 }
3741
18261bd1
DG
3742 return NULL;
3743
3744found:
f7079f67
DG
3745 return relayd;
3746}
3747
ca22feea
DG
3748/*
3749 * Check if for a given session id there is still data needed to be extract
3750 * from the buffers.
3751 *
6d805429 3752 * Return 1 if data is pending or else 0 meaning ready to be read.
ca22feea 3753 */
6d805429 3754int consumer_data_pending(uint64_t id)
ca22feea
DG
3755{
3756 int ret;
3757 struct lttng_ht_iter iter;
3758 struct lttng_ht *ht;
3759 struct lttng_consumer_stream *stream;
f7079f67 3760 struct consumer_relayd_sock_pair *relayd = NULL;
6d805429 3761 int (*data_pending)(struct lttng_consumer_stream *);
ca22feea 3762
6d805429 3763 DBG("Consumer data pending command on session id %" PRIu64, id);
ca22feea 3764
6f6eda74 3765 rcu_read_lock();
ca22feea
DG
3766 pthread_mutex_lock(&consumer_data.lock);
3767
3768 switch (consumer_data.type) {
3769 case LTTNG_CONSUMER_KERNEL:
6d805429 3770 data_pending = lttng_kconsumer_data_pending;
ca22feea
DG
3771 break;
3772 case LTTNG_CONSUMER32_UST:
3773 case LTTNG_CONSUMER64_UST:
6d805429 3774 data_pending = lttng_ustconsumer_data_pending;
ca22feea
DG
3775 break;
3776 default:
3777 ERR("Unknown consumer data type");
3778 assert(0);
3779 }
3780
3781 /* Ease our life a bit */
3782 ht = consumer_data.stream_list_ht;
3783
c8f59ee5 3784 cds_lfht_for_each_entry_duplicate(ht->ht,
d88aee68
DG
3785 ht->hash_fct(&id, lttng_ht_seed),
3786 ht->match_fct, &id,
ca22feea 3787 &iter.iter, stream, node_session_id.node) {
bb586a6e 3788 pthread_mutex_lock(&stream->lock);
ca22feea 3789
4e9a4686
DG
3790 /*
3791 * A removed node from the hash table indicates that the stream has
3792 * been deleted thus having a guarantee that the buffers are closed
3793 * on the consumer side. However, data can still be transmitted
3794 * over the network so don't skip the relayd check.
3795 */
3796 ret = cds_lfht_is_node_deleted(&stream->node.node);
3797 if (!ret) {
3798 /* Check the stream if there is data in the buffers. */
6d805429
DG
3799 ret = data_pending(stream);
3800 if (ret == 1) {
4e9a4686 3801 pthread_mutex_unlock(&stream->lock);
f7079f67 3802 goto data_pending;
4e9a4686
DG
3803 }
3804 }
3805
d9f0c7c7
JR
3806 pthread_mutex_unlock(&stream->lock);
3807 }
3808
3809 relayd = find_relayd_by_session_id(id);
3810 if (relayd) {
3811 unsigned int is_data_inflight = 0;
3812
3813 /* Send init command for data pending. */
3814 pthread_mutex_lock(&relayd->ctrl_sock_mutex);
3815 ret = relayd_begin_data_pending(&relayd->control_sock,
3816 relayd->relayd_session_id);
3817 if (ret < 0) {
3818 pthread_mutex_unlock(&relayd->ctrl_sock_mutex);
3819 /* Communication error thus the relayd so no data pending. */
3820 goto data_not_pending;
3821 }
3822
3823 cds_lfht_for_each_entry_duplicate(ht->ht,
3824 ht->hash_fct(&id, lttng_ht_seed),
3825 ht->match_fct, &id,
3826 &iter.iter, stream, node_session_id.node) {
c8f59ee5 3827 if (stream->metadata_flag) {
ad7051c0
DG
3828 ret = relayd_quiescent_control(&relayd->control_sock,
3829 stream->relayd_stream_id);
c8f59ee5 3830 } else {
6d805429 3831 ret = relayd_data_pending(&relayd->control_sock,
39df6d9f
DG
3832 stream->relayd_stream_id,
3833 stream->next_net_seq_num - 1);
c8f59ee5 3834 }
d9f0c7c7
JR
3835
3836 if (ret == 1) {
3837 pthread_mutex_unlock(&relayd->ctrl_sock_mutex);
3838 goto data_pending;
3839 } else if (ret < 0) {
9276e5c8
JR
3840 ERR("Relayd data pending failed. Cleaning up relayd %" PRIu64".", relayd->net_seq_idx);
3841 lttng_consumer_cleanup_relayd(relayd);
3842 pthread_mutex_unlock(&relayd->ctrl_sock_mutex);
9276e5c8
JR
3843 goto data_not_pending;
3844 }
c8f59ee5 3845 }
f7079f67 3846
d9f0c7c7 3847 /* Send end command for data pending. */
f7079f67
DG
3848 ret = relayd_end_data_pending(&relayd->control_sock,
3849 relayd->relayd_session_id, &is_data_inflight);
3850 pthread_mutex_unlock(&relayd->ctrl_sock_mutex);
bdd88757 3851 if (ret < 0) {
9276e5c8
JR
3852 ERR("Relayd end data pending failed. Cleaning up relayd %" PRIu64".", relayd->net_seq_idx);
3853 lttng_consumer_cleanup_relayd(relayd);
f7079f67
DG
3854 goto data_not_pending;
3855 }
bdd88757
DG
3856 if (is_data_inflight) {
3857 goto data_pending;
3858 }
f7079f67
DG
3859 }
3860
ca22feea 3861 /*
f7079f67
DG
3862 * Finding _no_ node in the hash table and no inflight data means that the
3863 * stream(s) have been removed thus data is guaranteed to be available for
3864 * analysis from the trace files.
ca22feea
DG
3865 */
3866
f7079f67 3867data_not_pending:
ca22feea
DG
3868 /* Data is available to be read by a viewer. */
3869 pthread_mutex_unlock(&consumer_data.lock);
c8f59ee5 3870 rcu_read_unlock();
6d805429 3871 return 0;
ca22feea 3872
f7079f67 3873data_pending:
ca22feea
DG
3874 /* Data is still being extracted from buffers. */
3875 pthread_mutex_unlock(&consumer_data.lock);
c8f59ee5 3876 rcu_read_unlock();
6d805429 3877 return 1;
ca22feea 3878}
f50f23d9
DG
3879
3880/*
3881 * Send a ret code status message to the sessiond daemon.
3882 *
3883 * Return the sendmsg() return value.
3884 */
3885int consumer_send_status_msg(int sock, int ret_code)
3886{
3887 struct lttcomm_consumer_status_msg msg;
3888
53efb85a 3889 memset(&msg, 0, sizeof(msg));
f50f23d9
DG
3890 msg.ret_code = ret_code;
3891
3892 return lttcomm_send_unix_sock(sock, &msg, sizeof(msg));
3893}
ffe60014
DG
3894
3895/*
3896 * Send a channel status message to the sessiond daemon.
3897 *
3898 * Return the sendmsg() return value.
3899 */
3900int consumer_send_status_channel(int sock,
3901 struct lttng_consumer_channel *channel)
3902{
3903 struct lttcomm_consumer_status_channel msg;
3904
3905 assert(sock >= 0);
3906
53efb85a 3907 memset(&msg, 0, sizeof(msg));
ffe60014 3908 if (!channel) {
0c759fc9 3909 msg.ret_code = LTTCOMM_CONSUMERD_CHANNEL_FAIL;
ffe60014 3910 } else {
0c759fc9 3911 msg.ret_code = LTTCOMM_CONSUMERD_SUCCESS;
ffe60014
DG
3912 msg.key = channel->key;
3913 msg.stream_count = channel->streams.count;
3914 }
3915
3916 return lttcomm_send_unix_sock(sock, &msg, sizeof(msg));
3917}
5c786ded 3918
d07ceecd
MD
3919unsigned long consumer_get_consume_start_pos(unsigned long consumed_pos,
3920 unsigned long produced_pos, uint64_t nb_packets_per_stream,
3921 uint64_t max_sb_size)
5c786ded 3922{
d07ceecd 3923 unsigned long start_pos;
5c786ded 3924
d07ceecd
MD
3925 if (!nb_packets_per_stream) {
3926 return consumed_pos; /* Grab everything */
3927 }
3928 start_pos = produced_pos - offset_align_floor(produced_pos, max_sb_size);
3929 start_pos -= max_sb_size * nb_packets_per_stream;
3930 if ((long) (start_pos - consumed_pos) < 0) {
3931 return consumed_pos; /* Grab everything */
3932 }
3933 return start_pos;
5c786ded 3934}
a1ae2ea5 3935
b99a8d42
JD
3936static
3937int consumer_flush_buffer(struct lttng_consumer_stream *stream, int producer_active)
3938{
3939 int ret = 0;
3940
3941 switch (consumer_data.type) {
3942 case LTTNG_CONSUMER_KERNEL:
3943 ret = kernctl_buffer_flush(stream->wait_fd);
3944 if (ret < 0) {
3945 ERR("Failed to flush kernel stream");
3946 goto end;
3947 }
3948 break;
3949 case LTTNG_CONSUMER32_UST:
3950 case LTTNG_CONSUMER64_UST:
3951 lttng_ustctl_flush_buffer(stream, producer_active);
3952 break;
3953 default:
3954 ERR("Unknown consumer_data type");
3955 abort();
3956 }
3957
3958end:
3959 return ret;
3960}
3961
3962/*
3963 * Sample the rotate position for all the streams of a channel. If a stream
3964 * is already at the rotate position (produced == consumed), we flag it as
3965 * ready for rotation. The rotation of ready streams occurs after we have
3966 * replied to the session daemon that we have finished sampling the positions.
92b7a7f8 3967 * Must be called with RCU read-side lock held to ensure existence of channel.
b99a8d42
JD
3968 *
3969 * Returns 0 on success, < 0 on error
3970 */
92b7a7f8 3971int lttng_consumer_rotate_channel(struct lttng_consumer_channel *channel,
d2956687 3972 uint64_t key, uint64_t relayd_id, uint32_t metadata,
b99a8d42
JD
3973 struct lttng_consumer_local_data *ctx)
3974{
3975 int ret;
b99a8d42
JD
3976 struct lttng_consumer_stream *stream;
3977 struct lttng_ht_iter iter;
3978 struct lttng_ht *ht = consumer_data.stream_per_chan_id_ht;
c35f9726
JG
3979 struct lttng_dynamic_array stream_rotation_positions;
3980 uint64_t next_chunk_id, stream_count = 0;
3981 enum lttng_trace_chunk_status chunk_status;
3982 const bool is_local_trace = relayd_id == -1ULL;
3983 struct consumer_relayd_sock_pair *relayd = NULL;
3984 bool rotating_to_new_chunk = true;
b99a8d42
JD
3985
3986 DBG("Consumer sample rotate position for channel %" PRIu64, key);
3987
c35f9726
JG
3988 lttng_dynamic_array_init(&stream_rotation_positions,
3989 sizeof(struct relayd_stream_rotation_position), NULL);
3990
b99a8d42
JD
3991 rcu_read_lock();
3992
b99a8d42 3993 pthread_mutex_lock(&channel->lock);
c35f9726
JG
3994 assert(channel->trace_chunk);
3995 chunk_status = lttng_trace_chunk_get_id(channel->trace_chunk,
3996 &next_chunk_id);
3997 if (chunk_status != LTTNG_TRACE_CHUNK_STATUS_OK) {
3998 ret = -1;
3999 goto end_unlock_channel;
4000 }
b99a8d42
JD
4001
4002 cds_lfht_for_each_entry_duplicate(ht->ht,
4003 ht->hash_fct(&channel->key, lttng_ht_seed),
4004 ht->match_fct, &channel->key, &iter.iter,
4005 stream, node_channel_id.node) {
a40a503f 4006 unsigned long produced_pos = 0, consumed_pos = 0;
b99a8d42
JD
4007
4008 health_code_update();
4009
4010 /*
4011 * Lock stream because we are about to change its state.
4012 */
4013 pthread_mutex_lock(&stream->lock);
4014
c35f9726
JG
4015 if (stream->trace_chunk == stream->chan->trace_chunk) {
4016 rotating_to_new_chunk = false;
4017 }
4018
a40a503f
MD
4019 /*
4020 * Active flush; has no effect if the production position
4021 * is at a packet boundary.
4022 */
4023 ret = consumer_flush_buffer(stream, 1);
b99a8d42 4024 if (ret < 0) {
a40a503f
MD
4025 ERR("Failed to flush stream %" PRIu64 " during channel rotation",
4026 stream->key);
b99a8d42
JD
4027 goto end_unlock_stream;
4028 }
4029
a40a503f
MD
4030 ret = lttng_consumer_take_snapshot(stream);
4031 if (ret < 0 && ret != -ENODATA && ret != -EAGAIN) {
4032 ERR("Failed to sample snapshot position during channel rotation");
b99a8d42
JD
4033 goto end_unlock_stream;
4034 }
a40a503f
MD
4035 if (!ret) {
4036 ret = lttng_consumer_get_produced_snapshot(stream,
4037 &produced_pos);
4038 if (ret < 0) {
4039 ERR("Failed to sample produced position during channel rotation");
4040 goto end_unlock_stream;
4041 }
b99a8d42 4042
a40a503f
MD
4043 ret = lttng_consumer_get_consumed_snapshot(stream,
4044 &consumed_pos);
4045 if (ret < 0) {
4046 ERR("Failed to sample consumed position during channel rotation");
4047 goto end_unlock_stream;
4048 }
4049 }
4050 /*
4051 * Align produced position on the start-of-packet boundary of the first
4052 * packet going into the next trace chunk.
4053 */
4054 produced_pos = ALIGN_FLOOR(produced_pos, stream->max_sb_size);
4055 if (consumed_pos == produced_pos) {
b99a8d42
JD
4056 stream->rotate_ready = true;
4057 }
633d0182 4058 /*
a40a503f
MD
4059 * The rotation position is based on the packet_seq_num of the
4060 * packet following the last packet that was consumed for this
4061 * stream, incremented by the offset between produced and
4062 * consumed positions. This rotation position is a lower bound
4063 * (inclusive) at which the next trace chunk starts. Since it
4064 * is a lower bound, it is OK if the packet_seq_num does not
4065 * correspond exactly to the same packet identified by the
4066 * consumed_pos, which can happen in overwrite mode.
633d0182 4067 */
a40a503f
MD
4068 if (stream->sequence_number_unavailable) {
4069 /*
4070 * Rotation should never be performed on a session which
4071 * interacts with a pre-2.8 lttng-modules, which does
4072 * not implement packet sequence number.
4073 */
4074 ERR("Failure to rotate stream %" PRIu64 ": sequence number unavailable",
b99a8d42 4075 stream->key);
a40a503f 4076 ret = -1;
b99a8d42
JD
4077 goto end_unlock_stream;
4078 }
a40a503f
MD
4079 stream->rotate_position = stream->last_sequence_number + 1 +
4080 ((produced_pos - consumed_pos) / stream->max_sb_size);
b99a8d42 4081
c35f9726 4082 if (!is_local_trace) {
633d0182
JG
4083 /*
4084 * The relay daemon control protocol expects a rotation
4085 * position as "the sequence number of the first packet
a40a503f 4086 * _after_ the current trace chunk".
633d0182 4087 */
c35f9726
JG
4088 const struct relayd_stream_rotation_position position = {
4089 .stream_id = stream->relayd_stream_id,
a40a503f 4090 .rotate_at_seq_num = stream->rotate_position,
c35f9726
JG
4091 };
4092
4093 ret = lttng_dynamic_array_add_element(
4094 &stream_rotation_positions,
4095 &position);
4096 if (ret) {
4097 ERR("Failed to allocate stream rotation position");
4098 goto end_unlock_stream;
4099 }
4100 stream_count++;
4101 }
b99a8d42
JD
4102 pthread_mutex_unlock(&stream->lock);
4103 }
c35f9726 4104 stream = NULL;
b99a8d42
JD
4105 pthread_mutex_unlock(&channel->lock);
4106
c35f9726
JG
4107 if (is_local_trace) {
4108 ret = 0;
4109 goto end;
4110 }
4111
4112 relayd = consumer_find_relayd(relayd_id);
4113 if (!relayd) {
4114 ERR("Failed to find relayd %" PRIu64, relayd_id);
4115 ret = -1;
4116 goto end;
4117 }
4118
4119 pthread_mutex_lock(&relayd->ctrl_sock_mutex);
4120 ret = relayd_rotate_streams(&relayd->control_sock, stream_count,
4121 rotating_to_new_chunk ? &next_chunk_id : NULL,
4122 (const struct relayd_stream_rotation_position *)
4123 stream_rotation_positions.buffer.data);
4124 pthread_mutex_unlock(&relayd->ctrl_sock_mutex);
4125 if (ret < 0) {
4126 ERR("Relayd rotate stream failed. Cleaning up relayd %" PRIu64,
4127 relayd->net_seq_idx);
4128 lttng_consumer_cleanup_relayd(relayd);
4129 goto end;
4130 }
4131
b99a8d42
JD
4132 ret = 0;
4133 goto end;
4134
4135end_unlock_stream:
4136 pthread_mutex_unlock(&stream->lock);
c35f9726 4137end_unlock_channel:
b99a8d42
JD
4138 pthread_mutex_unlock(&channel->lock);
4139end:
4140 rcu_read_unlock();
c35f9726 4141 lttng_dynamic_array_reset(&stream_rotation_positions);
b99a8d42
JD
4142 return ret;
4143}
4144
02d02e31
JD
4145/*
4146 * Check if a stream is ready to be rotated after extracting it.
4147 *
4148 * Return 1 if it is ready for rotation, 0 if it is not, a negative value on
4149 * error. Stream lock must be held.
4150 */
4151int lttng_consumer_stream_is_rotate_ready(struct lttng_consumer_stream *stream)
4152{
02d02e31 4153 if (stream->rotate_ready) {
a40a503f 4154 return 1;
02d02e31
JD
4155 }
4156
4157 /*
a40a503f
MD
4158 * If packet seq num is unavailable, it means we are interacting
4159 * with a pre-2.8 lttng-modules which does not implement the
4160 * sequence number. Rotation should never be used by sessiond in this
4161 * scenario.
02d02e31 4162 */
a40a503f
MD
4163 if (stream->sequence_number_unavailable) {
4164 ERR("Internal error: rotation used on stream %" PRIu64
4165 " with unavailable sequence number",
4166 stream->key);
4167 return -1;
02d02e31
JD
4168 }
4169
a40a503f
MD
4170 if (stream->rotate_position == -1ULL ||
4171 stream->last_sequence_number == -1ULL) {
4172 return 0;
02d02e31
JD
4173 }
4174
a40a503f
MD
4175 /*
4176 * Rotate position not reached yet. The stream rotate position is
4177 * the position of the next packet belonging to the next trace chunk,
4178 * but consumerd considers rotation ready when reaching the last
4179 * packet of the current chunk, hence the "rotate_position - 1".
4180 */
4181 if (stream->last_sequence_number >= stream->rotate_position - 1) {
4182 return 1;
02d02e31 4183 }
02d02e31 4184
a40a503f 4185 return 0;
02d02e31
JD
4186}
4187
d73bf3d7
JD
4188/*
4189 * Reset the state for a stream after a rotation occurred.
4190 */
4191void lttng_consumer_reset_stream_rotate_state(struct lttng_consumer_stream *stream)
4192{
a40a503f 4193 stream->rotate_position = -1ULL;
d73bf3d7
JD
4194 stream->rotate_ready = false;
4195}
4196
4197/*
4198 * Perform the rotation a local stream file.
4199 */
d2956687 4200static
d73bf3d7
JD
4201int rotate_local_stream(struct lttng_consumer_local_data *ctx,
4202 struct lttng_consumer_stream *stream)
4203{
d2956687 4204 int ret = 0;
d73bf3d7 4205
d2956687 4206 DBG("Rotate local stream: stream key %" PRIu64 ", channel key %" PRIu64,
d73bf3d7 4207 stream->key,
d2956687 4208 stream->chan->key);
d73bf3d7 4209 stream->tracefile_size_current = 0;
d2956687 4210 stream->tracefile_count_current = 0;
d73bf3d7 4211
d2956687
JG
4212 if (stream->out_fd >= 0) {
4213 ret = close(stream->out_fd);
4214 if (ret) {
4215 PERROR("Failed to close stream out_fd of channel \"%s\"",
4216 stream->chan->name);
4217 }
4218 stream->out_fd = -1;
4219 }
d73bf3d7 4220
d2956687 4221 if (stream->index_file) {
d73bf3d7 4222 lttng_index_file_put(stream->index_file);
d2956687 4223 stream->index_file = NULL;
d73bf3d7
JD
4224 }
4225
d2956687
JG
4226 if (!stream->trace_chunk) {
4227 goto end;
4228 }
d73bf3d7 4229
d2956687 4230 ret = consumer_stream_create_output_files(stream, true);
d73bf3d7
JD
4231end:
4232 return ret;
d73bf3d7
JD
4233}
4234
d73bf3d7
JD
4235/*
4236 * Performs the stream rotation for the rotate session feature if needed.
d2956687 4237 * It must be called with the channel and stream locks held.
d73bf3d7
JD
4238 *
4239 * Return 0 on success, a negative number of error.
4240 */
4241int lttng_consumer_rotate_stream(struct lttng_consumer_local_data *ctx,
d2956687 4242 struct lttng_consumer_stream *stream)
d73bf3d7
JD
4243{
4244 int ret;
4245
4246 DBG("Consumer rotate stream %" PRIu64, stream->key);
4247
d2956687
JG
4248 /*
4249 * Update the stream's 'current' chunk to the session's (channel)
4250 * now-current chunk.
4251 */
4252 lttng_trace_chunk_put(stream->trace_chunk);
4253 if (stream->chan->trace_chunk == stream->trace_chunk) {
4254 /*
4255 * A channel can be rotated and not have a "next" chunk
4256 * to transition to. In that case, the channel's "current chunk"
4257 * has not been closed yet, but it has not been updated to
4258 * a "next" trace chunk either. Hence, the stream, like its
4259 * parent channel, becomes part of no chunk and can't output
4260 * anything until a new trace chunk is created.
4261 */
4262 stream->trace_chunk = NULL;
4263 } else if (stream->chan->trace_chunk &&
4264 !lttng_trace_chunk_get(stream->chan->trace_chunk)) {
4265 ERR("Failed to acquire a reference to channel's trace chunk during stream rotation");
4266 ret = -1;
4267 goto error;
4268 } else {
4269 /*
4270 * Update the stream's trace chunk to its parent channel's
4271 * current trace chunk.
4272 */
4273 stream->trace_chunk = stream->chan->trace_chunk;
4274 }
4275
c35f9726 4276 if (stream->net_seq_idx == (uint64_t) -1ULL) {
d73bf3d7 4277 ret = rotate_local_stream(ctx, stream);
c35f9726
JG
4278 if (ret < 0) {
4279 ERR("Failed to rotate stream, ret = %i", ret);
4280 goto error;
4281 }
d73bf3d7
JD
4282 }
4283
d2956687
JG
4284 if (stream->metadata_flag && stream->trace_chunk) {
4285 /*
4286 * If the stream has transitioned to a new trace
4287 * chunk, the metadata should be re-dumped to the
4288 * newest chunk.
4289 *
4290 * However, it is possible for a stream to transition to
4291 * a "no-chunk" state. This can happen if a rotation
4292 * occurs on an inactive session. In such cases, the metadata
4293 * regeneration will happen when the next trace chunk is
4294 * created.
4295 */
4296 ret = consumer_metadata_stream_dump(stream);
4297 if (ret) {
4298 goto error;
d73bf3d7
JD
4299 }
4300 }
4301 lttng_consumer_reset_stream_rotate_state(stream);
4302
4303 ret = 0;
4304
4305error:
4306 return ret;
4307}
4308
b99a8d42
JD
4309/*
4310 * Rotate all the ready streams now.
4311 *
4312 * This is especially important for low throughput streams that have already
4313 * been consumed, we cannot wait for their next packet to perform the
4314 * rotation.
92b7a7f8
MD
4315 * Need to be called with RCU read-side lock held to ensure existence of
4316 * channel.
b99a8d42
JD
4317 *
4318 * Returns 0 on success, < 0 on error
4319 */
92b7a7f8
MD
4320int lttng_consumer_rotate_ready_streams(struct lttng_consumer_channel *channel,
4321 uint64_t key, struct lttng_consumer_local_data *ctx)
b99a8d42
JD
4322{
4323 int ret;
b99a8d42
JD
4324 struct lttng_consumer_stream *stream;
4325 struct lttng_ht_iter iter;
4326 struct lttng_ht *ht = consumer_data.stream_per_chan_id_ht;
4327
4328 rcu_read_lock();
4329
4330 DBG("Consumer rotate ready streams in channel %" PRIu64, key);
4331
b99a8d42
JD
4332 cds_lfht_for_each_entry_duplicate(ht->ht,
4333 ht->hash_fct(&channel->key, lttng_ht_seed),
4334 ht->match_fct, &channel->key, &iter.iter,
4335 stream, node_channel_id.node) {
4336 health_code_update();
4337
d2956687 4338 pthread_mutex_lock(&stream->chan->lock);
b99a8d42
JD
4339 pthread_mutex_lock(&stream->lock);
4340
4341 if (!stream->rotate_ready) {
4342 pthread_mutex_unlock(&stream->lock);
d2956687 4343 pthread_mutex_unlock(&stream->chan->lock);
b99a8d42
JD
4344 continue;
4345 }
4346 DBG("Consumer rotate ready stream %" PRIu64, stream->key);
4347
d2956687 4348 ret = lttng_consumer_rotate_stream(ctx, stream);
b99a8d42 4349 pthread_mutex_unlock(&stream->lock);
d2956687 4350 pthread_mutex_unlock(&stream->chan->lock);
b99a8d42
JD
4351 if (ret) {
4352 goto end;
4353 }
4354 }
4355
4356 ret = 0;
4357
4358end:
4359 rcu_read_unlock();
4360 return ret;
4361}
4362
d2956687
JG
4363enum lttcomm_return_code lttng_consumer_init_command(
4364 struct lttng_consumer_local_data *ctx,
4365 const lttng_uuid sessiond_uuid)
00fb02ac 4366{
d2956687 4367 enum lttcomm_return_code ret;
c70636a7 4368 char uuid_str[LTTNG_UUID_STR_LEN];
00fb02ac 4369
d2956687
JG
4370 if (ctx->sessiond_uuid.is_set) {
4371 ret = LTTCOMM_CONSUMERD_ALREADY_SET;
00fb02ac
JD
4372 goto end;
4373 }
4374
d2956687
JG
4375 ctx->sessiond_uuid.is_set = true;
4376 memcpy(ctx->sessiond_uuid.value, sessiond_uuid, sizeof(lttng_uuid));
4377 ret = LTTCOMM_CONSUMERD_SUCCESS;
4378 lttng_uuid_to_str(sessiond_uuid, uuid_str);
4379 DBG("Received session daemon UUID: %s", uuid_str);
00fb02ac
JD
4380end:
4381 return ret;
4382}
4383
d2956687
JG
4384enum lttcomm_return_code lttng_consumer_create_trace_chunk(
4385 const uint64_t *relayd_id, uint64_t session_id,
4386 uint64_t chunk_id,
4387 time_t chunk_creation_timestamp,
4388 const char *chunk_override_name,
4389 const struct lttng_credentials *credentials,
4390 struct lttng_directory_handle *chunk_directory_handle)
00fb02ac
JD
4391{
4392 int ret;
d2956687 4393 enum lttcomm_return_code ret_code = LTTCOMM_CONSUMERD_SUCCESS;
7ea24db3 4394 struct lttng_trace_chunk *created_chunk = NULL, *published_chunk = NULL;
d2956687
JG
4395 enum lttng_trace_chunk_status chunk_status;
4396 char relayd_id_buffer[MAX_INT_DEC_LEN(*relayd_id)];
4397 char creation_timestamp_buffer[ISO8601_STR_LEN];
4398 const char *relayd_id_str = "(none)";
4399 const char *creation_timestamp_str;
4400 struct lttng_ht_iter iter;
4401 struct lttng_consumer_channel *channel;
92816cc3 4402
d2956687
JG
4403 if (relayd_id) {
4404 /* Only used for logging purposes. */
4405 ret = snprintf(relayd_id_buffer, sizeof(relayd_id_buffer),
4406 "%" PRIu64, *relayd_id);
4407 if (ret > 0 && ret < sizeof(relayd_id_buffer)) {
4408 relayd_id_str = relayd_id_buffer;
4409 } else {
4410 relayd_id_str = "(formatting error)";
4411 }
4412 }
4413
4414 /* Local protocol error. */
4415 assert(chunk_creation_timestamp);
4416 ret = time_to_iso8601_str(chunk_creation_timestamp,
4417 creation_timestamp_buffer,
4418 sizeof(creation_timestamp_buffer));
4419 creation_timestamp_str = !ret ? creation_timestamp_buffer :
4420 "(formatting error)";
4421
4422 DBG("Consumer create trace chunk command: relay_id = %s"
4423 ", session_id = %" PRIu64 ", chunk_id = %" PRIu64
4424 ", chunk_override_name = %s"
4425 ", chunk_creation_timestamp = %s",
4426 relayd_id_str, session_id, chunk_id,
4427 chunk_override_name ? : "(none)",
4428 creation_timestamp_str);
92816cc3
JG
4429
4430 /*
d2956687
JG
4431 * The trace chunk registry, as used by the consumer daemon, implicitly
4432 * owns the trace chunks. This is only needed in the consumer since
4433 * the consumer has no notion of a session beyond session IDs being
4434 * used to identify other objects.
4435 *
4436 * The lttng_trace_chunk_registry_publish() call below provides a
4437 * reference which is not released; it implicitly becomes the session
4438 * daemon's reference to the chunk in the consumer daemon.
4439 *
4440 * The lifetime of trace chunks in the consumer daemon is managed by
4441 * the session daemon through the LTTNG_CONSUMER_CREATE_TRACE_CHUNK
4442 * and LTTNG_CONSUMER_DESTROY_TRACE_CHUNK commands.
92816cc3 4443 */
d2956687
JG
4444 created_chunk = lttng_trace_chunk_create(chunk_id,
4445 chunk_creation_timestamp);
4446 if (!created_chunk) {
4447 ERR("Failed to create trace chunk");
4448 ret_code = LTTCOMM_CONSUMERD_CREATE_TRACE_CHUNK_FAILED;
7ea24db3 4449 goto error;
d2956687 4450 }
92816cc3 4451
d2956687
JG
4452 if (chunk_override_name) {
4453 chunk_status = lttng_trace_chunk_override_name(created_chunk,
4454 chunk_override_name);
4455 if (chunk_status != LTTNG_TRACE_CHUNK_STATUS_OK) {
4456 ret_code = LTTCOMM_CONSUMERD_CREATE_TRACE_CHUNK_FAILED;
7ea24db3 4457 goto error;
92816cc3
JG
4458 }
4459 }
4460
d2956687
JG
4461 if (chunk_directory_handle) {
4462 chunk_status = lttng_trace_chunk_set_credentials(created_chunk,
4463 credentials);
4464 if (chunk_status != LTTNG_TRACE_CHUNK_STATUS_OK) {
4465 ERR("Failed to set trace chunk credentials");
4466 ret_code = LTTCOMM_CONSUMERD_CREATE_TRACE_CHUNK_FAILED;
7ea24db3 4467 goto error;
d2956687
JG
4468 }
4469 /*
4470 * The consumer daemon has no ownership of the chunk output
4471 * directory.
4472 */
4473 chunk_status = lttng_trace_chunk_set_as_user(created_chunk,
4474 chunk_directory_handle);
cbf53d23 4475 chunk_directory_handle = NULL;
d2956687
JG
4476 if (chunk_status != LTTNG_TRACE_CHUNK_STATUS_OK) {
4477 ERR("Failed to set trace chunk's directory handle");
4478 ret_code = LTTCOMM_CONSUMERD_CREATE_TRACE_CHUNK_FAILED;
7ea24db3 4479 goto error;
92816cc3
JG
4480 }
4481 }
4482
d2956687
JG
4483 published_chunk = lttng_trace_chunk_registry_publish_chunk(
4484 consumer_data.chunk_registry, session_id,
4485 created_chunk);
4486 lttng_trace_chunk_put(created_chunk);
4487 created_chunk = NULL;
4488 if (!published_chunk) {
4489 ERR("Failed to publish trace chunk");
4490 ret_code = LTTCOMM_CONSUMERD_CREATE_TRACE_CHUNK_FAILED;
7ea24db3 4491 goto error;
d88744a4
JD
4492 }
4493
d2956687
JG
4494 rcu_read_lock();
4495 cds_lfht_for_each_entry_duplicate(consumer_data.channels_by_session_id_ht->ht,
4496 consumer_data.channels_by_session_id_ht->hash_fct(
4497 &session_id, lttng_ht_seed),
4498 consumer_data.channels_by_session_id_ht->match_fct,
4499 &session_id, &iter.iter, channel,
4500 channels_by_session_id_ht_node.node) {
4501 ret = lttng_consumer_channel_set_trace_chunk(channel,
4502 published_chunk);
4503 if (ret) {
4504 /*
4505 * Roll-back the creation of this chunk.
4506 *
4507 * This is important since the session daemon will
4508 * assume that the creation of this chunk failed and
4509 * will never ask for it to be closed, resulting
4510 * in a leak and an inconsistent state for some
4511 * channels.
4512 */
4513 enum lttcomm_return_code close_ret;
ecd1a12f 4514 char path[LTTNG_PATH_MAX];
d2956687
JG
4515
4516 DBG("Failed to set new trace chunk on existing channels, rolling back");
4517 close_ret = lttng_consumer_close_trace_chunk(relayd_id,
4518 session_id, chunk_id,
ecd1a12f
MD
4519 chunk_creation_timestamp, NULL,
4520 path);
d2956687
JG
4521 if (close_ret != LTTCOMM_CONSUMERD_SUCCESS) {
4522 ERR("Failed to roll-back the creation of new chunk: session_id = %" PRIu64 ", chunk_id = %" PRIu64,
4523 session_id, chunk_id);
4524 }
a1ae2ea5 4525
d2956687
JG
4526 ret_code = LTTCOMM_CONSUMERD_CREATE_TRACE_CHUNK_FAILED;
4527 break;
4528 }
a1ae2ea5
JD
4529 }
4530
e5add6d0
JG
4531 if (relayd_id) {
4532 struct consumer_relayd_sock_pair *relayd;
4533
4534 relayd = consumer_find_relayd(*relayd_id);
4535 if (relayd) {
4536 pthread_mutex_lock(&relayd->ctrl_sock_mutex);
4537 ret = relayd_create_trace_chunk(
4538 &relayd->control_sock, published_chunk);
4539 pthread_mutex_unlock(&relayd->ctrl_sock_mutex);
4540 } else {
4541 ERR("Failed to find relay daemon socket: relayd_id = %" PRIu64, *relayd_id);
4542 }
4543
4544 if (!relayd || ret) {
4545 enum lttcomm_return_code close_ret;
ecd1a12f 4546 char path[LTTNG_PATH_MAX];
e5add6d0
JG
4547
4548 close_ret = lttng_consumer_close_trace_chunk(relayd_id,
4549 session_id,
4550 chunk_id,
bbc4768c 4551 chunk_creation_timestamp,
ecd1a12f 4552 NULL, path);
e5add6d0
JG
4553 if (close_ret != LTTCOMM_CONSUMERD_SUCCESS) {
4554 ERR("Failed to roll-back the creation of new chunk: session_id = %" PRIu64 ", chunk_id = %" PRIu64,
4555 session_id,
4556 chunk_id);
4557 }
4558
4559 ret_code = LTTCOMM_CONSUMERD_CREATE_TRACE_CHUNK_FAILED;
7ea24db3 4560 goto error_unlock;
e5add6d0
JG
4561 }
4562 }
7ea24db3 4563error_unlock:
e5add6d0 4564 rcu_read_unlock();
7ea24db3 4565error:
d2956687
JG
4566 /* Release the reference returned by the "publish" operation. */
4567 lttng_trace_chunk_put(published_chunk);
9bb5f1f8 4568 lttng_trace_chunk_put(created_chunk);
d2956687 4569 return ret_code;
a1ae2ea5
JD
4570}
4571
d2956687
JG
4572enum lttcomm_return_code lttng_consumer_close_trace_chunk(
4573 const uint64_t *relayd_id, uint64_t session_id,
bbc4768c 4574 uint64_t chunk_id, time_t chunk_close_timestamp,
ecd1a12f
MD
4575 const enum lttng_trace_chunk_command_type *close_command,
4576 char *path)
a1ae2ea5 4577{
d2956687
JG
4578 enum lttcomm_return_code ret_code = LTTCOMM_CONSUMERD_SUCCESS;
4579 struct lttng_trace_chunk *chunk;
4580 char relayd_id_buffer[MAX_INT_DEC_LEN(*relayd_id)];
4581 const char *relayd_id_str = "(none)";
bbc4768c 4582 const char *close_command_name = "none";
d2956687
JG
4583 struct lttng_ht_iter iter;
4584 struct lttng_consumer_channel *channel;
4585 enum lttng_trace_chunk_status chunk_status;
a1ae2ea5 4586
d2956687
JG
4587 if (relayd_id) {
4588 int ret;
4589
4590 /* Only used for logging purposes. */
4591 ret = snprintf(relayd_id_buffer, sizeof(relayd_id_buffer),
4592 "%" PRIu64, *relayd_id);
4593 if (ret > 0 && ret < sizeof(relayd_id_buffer)) {
4594 relayd_id_str = relayd_id_buffer;
4595 } else {
4596 relayd_id_str = "(formatting error)";
4597 }
bbc4768c
JG
4598 }
4599 if (close_command) {
4600 close_command_name = lttng_trace_chunk_command_type_get_name(
4601 *close_command);
4602 }
d2956687
JG
4603
4604 DBG("Consumer close trace chunk command: relayd_id = %s"
bbc4768c
JG
4605 ", session_id = %" PRIu64 ", chunk_id = %" PRIu64
4606 ", close command = %s",
4607 relayd_id_str, session_id, chunk_id,
4608 close_command_name);
4609
d2956687 4610 chunk = lttng_trace_chunk_registry_find_chunk(
bbc4768c
JG
4611 consumer_data.chunk_registry, session_id, chunk_id);
4612 if (!chunk) {
d2956687
JG
4613 ERR("Failed to find chunk: session_id = %" PRIu64
4614 ", chunk_id = %" PRIu64,
4615 session_id, chunk_id);
4616 ret_code = LTTCOMM_CONSUMERD_UNKNOWN_TRACE_CHUNK;
a1ae2ea5
JD
4617 goto end;
4618 }
4619
d2956687
JG
4620 chunk_status = lttng_trace_chunk_set_close_timestamp(chunk,
4621 chunk_close_timestamp);
4622 if (chunk_status != LTTNG_TRACE_CHUNK_STATUS_OK) {
4623 ret_code = LTTCOMM_CONSUMERD_CLOSE_TRACE_CHUNK_FAILED;
4624 goto end;
45f1d9a1 4625 }
bbc4768c
JG
4626
4627 if (close_command) {
4628 chunk_status = lttng_trace_chunk_set_close_command(
4629 chunk, *close_command);
4630 if (chunk_status != LTTNG_TRACE_CHUNK_STATUS_OK) {
4631 ret_code = LTTCOMM_CONSUMERD_CLOSE_TRACE_CHUNK_FAILED;
4632 goto end;
4633 }
4634 }
a1ae2ea5 4635
d2956687
JG
4636 /*
4637 * chunk is now invalid to access as we no longer hold a reference to
4638 * it; it is only kept around to compare it (by address) to the
4639 * current chunk found in the session's channels.
4640 */
4641 rcu_read_lock();
4642 cds_lfht_for_each_entry(consumer_data.channel_ht->ht, &iter.iter,
4643 channel, node.node) {
4644 int ret;
a1ae2ea5 4645
d2956687
JG
4646 /*
4647 * Only change the channel's chunk to NULL if it still
4648 * references the chunk being closed. The channel may
4649 * reference a newer channel in the case of a session
4650 * rotation. When a session rotation occurs, the "next"
4651 * chunk is created before the "current" chunk is closed.
4652 */
4653 if (channel->trace_chunk != chunk) {
4654 continue;
4655 }
4656 ret = lttng_consumer_channel_set_trace_chunk(channel, NULL);
4657 if (ret) {
4658 /*
4659 * Attempt to close the chunk on as many channels as
4660 * possible.
4661 */
4662 ret_code = LTTCOMM_CONSUMERD_CLOSE_TRACE_CHUNK_FAILED;
4663 }
a1ae2ea5 4664 }
bbc4768c
JG
4665
4666 if (relayd_id) {
4667 int ret;
4668 struct consumer_relayd_sock_pair *relayd;
4669
4670 relayd = consumer_find_relayd(*relayd_id);
4671 if (relayd) {
4672 pthread_mutex_lock(&relayd->ctrl_sock_mutex);
4673 ret = relayd_close_trace_chunk(
ecd1a12f
MD
4674 &relayd->control_sock, chunk,
4675 path);
bbc4768c
JG
4676 pthread_mutex_unlock(&relayd->ctrl_sock_mutex);
4677 } else {
4678 ERR("Failed to find relay daemon socket: relayd_id = %" PRIu64,
4679 *relayd_id);
4680 }
4681
4682 if (!relayd || ret) {
4683 ret_code = LTTCOMM_CONSUMERD_CLOSE_TRACE_CHUNK_FAILED;
4684 goto error_unlock;
4685 }
4686 }
4687error_unlock:
d2956687
JG
4688 rcu_read_unlock();
4689end:
bbc4768c
JG
4690 /*
4691 * Release the reference returned by the "find" operation and
4692 * the session daemon's implicit reference to the chunk.
4693 */
4694 lttng_trace_chunk_put(chunk);
4695 lttng_trace_chunk_put(chunk);
4696
d2956687 4697 return ret_code;
a1ae2ea5 4698}
3654ed19 4699
d2956687
JG
4700enum lttcomm_return_code lttng_consumer_trace_chunk_exists(
4701 const uint64_t *relayd_id, uint64_t session_id,
4702 uint64_t chunk_id)
3654ed19 4703{
c35f9726 4704 int ret;
d2956687 4705 enum lttcomm_return_code ret_code;
d2956687
JG
4706 char relayd_id_buffer[MAX_INT_DEC_LEN(*relayd_id)];
4707 const char *relayd_id_str = "(none)";
c35f9726
JG
4708 const bool is_local_trace = !relayd_id;
4709 struct consumer_relayd_sock_pair *relayd = NULL;
6b584c2e 4710 bool chunk_exists_local, chunk_exists_remote;
d2956687
JG
4711
4712 if (relayd_id) {
4713 int ret;
4714
4715 /* Only used for logging purposes. */
4716 ret = snprintf(relayd_id_buffer, sizeof(relayd_id_buffer),
4717 "%" PRIu64, *relayd_id);
4718 if (ret > 0 && ret < sizeof(relayd_id_buffer)) {
4719 relayd_id_str = relayd_id_buffer;
4720 } else {
4721 relayd_id_str = "(formatting error)";
4722 }
4723 }
4724
4725 DBG("Consumer trace chunk exists command: relayd_id = %s"
d2956687 4726 ", chunk_id = %" PRIu64, relayd_id_str,
c35f9726 4727 chunk_id);
6b584c2e 4728 ret = lttng_trace_chunk_registry_chunk_exists(
d2956687 4729 consumer_data.chunk_registry, session_id,
6b584c2e
JG
4730 chunk_id, &chunk_exists_local);
4731 if (ret) {
4732 /* Internal error. */
4733 ERR("Failed to query the existence of a trace chunk");
4734 ret_code = LTTCOMM_CONSUMERD_FATAL;
13e3b280 4735 goto end;
6b584c2e
JG
4736 }
4737 DBG("Trace chunk %s locally",
4738 chunk_exists_local ? "exists" : "does not exist");
4739 if (chunk_exists_local) {
c35f9726 4740 ret_code = LTTCOMM_CONSUMERD_TRACE_CHUNK_EXISTS_LOCAL;
c35f9726
JG
4741 goto end;
4742 } else if (is_local_trace) {
4743 ret_code = LTTCOMM_CONSUMERD_UNKNOWN_TRACE_CHUNK;
4744 goto end;
4745 }
4746
4747 rcu_read_lock();
4748 relayd = consumer_find_relayd(*relayd_id);
4749 if (!relayd) {
4750 ERR("Failed to find relayd %" PRIu64, *relayd_id);
4751 ret_code = LTTCOMM_CONSUMERD_INVALID_PARAMETERS;
4752 goto end_rcu_unlock;
4753 }
4754 DBG("Looking up existence of trace chunk on relay daemon");
4755 pthread_mutex_lock(&relayd->ctrl_sock_mutex);
4756 ret = relayd_trace_chunk_exists(&relayd->control_sock, chunk_id,
4757 &chunk_exists_remote);
4758 pthread_mutex_unlock(&relayd->ctrl_sock_mutex);
4759 if (ret < 0) {
4760 ERR("Failed to look-up the existence of trace chunk on relay daemon");
4761 ret_code = LTTCOMM_CONSUMERD_RELAYD_FAIL;
4762 goto end_rcu_unlock;
4763 }
4764
4765 ret_code = chunk_exists_remote ?
4766 LTTCOMM_CONSUMERD_TRACE_CHUNK_EXISTS_REMOTE :
d2956687 4767 LTTCOMM_CONSUMERD_UNKNOWN_TRACE_CHUNK;
c35f9726
JG
4768 DBG("Trace chunk %s on relay daemon",
4769 chunk_exists_remote ? "exists" : "does not exist");
d2956687 4770
c35f9726
JG
4771end_rcu_unlock:
4772 rcu_read_unlock();
4773end:
d2956687 4774 return ret_code;
3654ed19 4775}
This page took 0.371859 seconds and 4 git commands to generate.