2 * Copyright (C) 2011 - Julien Desfossez <julien.desfossez@polymtl.ca>
3 * Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License
7 * as published by the Free Software Foundation; only version 2
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
27 #include <sys/socket.h>
28 #include <sys/types.h>
31 #include <common/common.h>
32 #include <common/kernel-ctl/kernel-ctl.h>
33 #include <common/sessiond-comm/sessiond-comm.h>
34 #include <common/kernel-consumer/kernel-consumer.h>
35 #include <common/ust-consumer/ust-consumer.h>
39 struct lttng_consumer_global_data consumer_data
= {
42 .type
= LTTNG_CONSUMER_UNKNOWN
,
45 /* timeout parameter, to control the polling thread grace period. */
46 int consumer_poll_timeout
= -1;
49 * Flag to inform the polling thread to quit when all fd hung up. Updated by
50 * the consumer_thread_receive_fds when it notices that all fds has hung up.
51 * Also updated by the signal handler (consumer_should_exit()). Read by the
54 volatile int consumer_quit
= 0;
57 * Find a stream. The consumer_data.lock must be locked during this
60 static struct lttng_consumer_stream
*consumer_find_stream(int key
)
62 struct lttng_ht_iter iter
;
63 struct lttng_ht_node_ulong
*node
;
64 struct lttng_consumer_stream
*stream
= NULL
;
66 /* Negative keys are lookup failures */
72 lttng_ht_lookup(consumer_data
.stream_ht
, (void *)((unsigned long) key
),
74 node
= lttng_ht_iter_get_node_ulong(&iter
);
76 stream
= caa_container_of(node
, struct lttng_consumer_stream
, node
);
84 static void consumer_steal_stream_key(int key
)
86 struct lttng_consumer_stream
*stream
;
88 stream
= consumer_find_stream(key
);
93 static struct lttng_consumer_channel
*consumer_find_channel(int key
)
95 struct lttng_ht_iter iter
;
96 struct lttng_ht_node_ulong
*node
;
97 struct lttng_consumer_channel
*channel
= NULL
;
99 /* Negative keys are lookup failures */
105 lttng_ht_lookup(consumer_data
.channel_ht
, (void *)((unsigned long) key
),
107 node
= lttng_ht_iter_get_node_ulong(&iter
);
109 channel
= caa_container_of(node
, struct lttng_consumer_channel
, node
);
117 static void consumer_steal_channel_key(int key
)
119 struct lttng_consumer_channel
*channel
;
121 channel
= consumer_find_channel(key
);
127 * Remove a stream from the global list protected by a mutex. This
128 * function is also responsible for freeing its data structures.
130 void consumer_del_stream(struct lttng_consumer_stream
*stream
)
133 struct lttng_ht_iter iter
;
134 struct lttng_consumer_channel
*free_chan
= NULL
;
136 pthread_mutex_lock(&consumer_data
.lock
);
138 switch (consumer_data
.type
) {
139 case LTTNG_CONSUMER_KERNEL
:
140 if (stream
->mmap_base
!= NULL
) {
141 ret
= munmap(stream
->mmap_base
, stream
->mmap_len
);
147 case LTTNG_CONSUMER32_UST
:
148 case LTTNG_CONSUMER64_UST
:
149 lttng_ustconsumer_del_stream(stream
);
152 ERR("Unknown consumer_data type");
159 /* Get stream node from hash table */
160 lttng_ht_lookup(consumer_data
.stream_ht
,
161 (void *)((unsigned long) stream
->key
), &iter
);
162 /* Remove stream node from hash table */
163 ret
= lttng_ht_del(consumer_data
.stream_ht
, &iter
);
168 if (consumer_data
.stream_count
<= 0) {
171 consumer_data
.stream_count
--;
175 if (stream
->out_fd
>= 0) {
176 close(stream
->out_fd
);
178 if (stream
->wait_fd
>= 0 && !stream
->wait_fd_is_copy
) {
179 close(stream
->wait_fd
);
181 if (stream
->shm_fd
>= 0 && stream
->wait_fd
!= stream
->shm_fd
) {
182 close(stream
->shm_fd
);
184 if (!--stream
->chan
->refcount
)
185 free_chan
= stream
->chan
;
188 consumer_data
.need_update
= 1;
189 pthread_mutex_unlock(&consumer_data
.lock
);
192 consumer_del_channel(free_chan
);
195 static void consumer_del_stream_rcu(struct rcu_head
*head
)
197 struct lttng_ht_node_ulong
*node
=
198 caa_container_of(head
, struct lttng_ht_node_ulong
, head
);
199 struct lttng_consumer_stream
*stream
=
200 caa_container_of(node
, struct lttng_consumer_stream
, node
);
202 consumer_del_stream(stream
);
205 struct lttng_consumer_stream
*consumer_allocate_stream(
206 int channel_key
, int stream_key
,
207 int shm_fd
, int wait_fd
,
208 enum lttng_consumer_stream_state state
,
210 enum lttng_event_output output
,
211 const char *path_name
,
215 struct lttng_consumer_stream
*stream
;
218 stream
= zmalloc(sizeof(*stream
));
219 if (stream
== NULL
) {
220 perror("malloc struct lttng_consumer_stream");
223 stream
->chan
= consumer_find_channel(channel_key
);
225 perror("Unable to find channel key");
228 stream
->chan
->refcount
++;
229 stream
->key
= stream_key
;
230 stream
->shm_fd
= shm_fd
;
231 stream
->wait_fd
= wait_fd
;
233 stream
->out_fd_offset
= 0;
234 stream
->state
= state
;
235 stream
->mmap_len
= mmap_len
;
236 stream
->mmap_base
= NULL
;
237 stream
->output
= output
;
240 strncpy(stream
->path_name
, path_name
, PATH_MAX
- 1);
241 stream
->path_name
[PATH_MAX
- 1] = '\0';
242 lttng_ht_node_init_ulong(&stream
->node
, stream
->key
);
244 switch (consumer_data
.type
) {
245 case LTTNG_CONSUMER_KERNEL
:
247 case LTTNG_CONSUMER32_UST
:
248 case LTTNG_CONSUMER64_UST
:
249 stream
->cpu
= stream
->chan
->cpucount
++;
250 ret
= lttng_ustconsumer_allocate_stream(stream
);
257 ERR("Unknown consumer_data type");
261 DBG("Allocated stream %s (key %d, shm_fd %d, wait_fd %d, mmap_len %llu, out_fd %d)",
262 stream
->path_name
, stream
->key
,
265 (unsigned long long) stream
->mmap_len
,
272 * Add a stream to the global list protected by a mutex.
274 int consumer_add_stream(struct lttng_consumer_stream
*stream
)
278 pthread_mutex_lock(&consumer_data
.lock
);
279 /* Steal stream identifier, for UST */
280 consumer_steal_stream_key(stream
->key
);
282 lttng_ht_add_unique_ulong(consumer_data
.stream_ht
, &stream
->node
);
284 consumer_data
.stream_count
++;
285 consumer_data
.need_update
= 1;
287 switch (consumer_data
.type
) {
288 case LTTNG_CONSUMER_KERNEL
:
290 case LTTNG_CONSUMER32_UST
:
291 case LTTNG_CONSUMER64_UST
:
292 /* Streams are in CPU number order (we rely on this) */
293 stream
->cpu
= stream
->chan
->nr_streams
++;
296 ERR("Unknown consumer_data type");
302 pthread_mutex_unlock(&consumer_data
.lock
);
307 * Update a stream according to what we just received.
309 void consumer_change_stream_state(int stream_key
,
310 enum lttng_consumer_stream_state state
)
312 struct lttng_consumer_stream
*stream
;
314 pthread_mutex_lock(&consumer_data
.lock
);
315 stream
= consumer_find_stream(stream_key
);
317 stream
->state
= state
;
319 consumer_data
.need_update
= 1;
320 pthread_mutex_unlock(&consumer_data
.lock
);
324 * Remove a channel from the global list protected by a mutex. This
325 * function is also responsible for freeing its data structures.
327 void consumer_del_channel(struct lttng_consumer_channel
*channel
)
330 struct lttng_ht_iter iter
;
332 pthread_mutex_lock(&consumer_data
.lock
);
334 switch (consumer_data
.type
) {
335 case LTTNG_CONSUMER_KERNEL
:
337 case LTTNG_CONSUMER32_UST
:
338 case LTTNG_CONSUMER64_UST
:
339 lttng_ustconsumer_del_channel(channel
);
342 ERR("Unknown consumer_data type");
349 lttng_ht_lookup(consumer_data
.channel_ht
,
350 (void *)((unsigned long) channel
->key
), &iter
);
351 ret
= lttng_ht_del(consumer_data
.channel_ht
, &iter
);
356 if (channel
->mmap_base
!= NULL
) {
357 ret
= munmap(channel
->mmap_base
, channel
->mmap_len
);
362 if (channel
->wait_fd
>= 0 && !channel
->wait_fd_is_copy
) {
363 close(channel
->wait_fd
);
365 if (channel
->shm_fd
>= 0 && channel
->wait_fd
!= channel
->shm_fd
) {
366 close(channel
->shm_fd
);
370 pthread_mutex_unlock(&consumer_data
.lock
);
373 static void consumer_del_channel_rcu(struct rcu_head
*head
)
375 struct lttng_ht_node_ulong
*node
=
376 caa_container_of(head
, struct lttng_ht_node_ulong
, head
);
377 struct lttng_consumer_channel
*channel
=
378 caa_container_of(node
, struct lttng_consumer_channel
, node
);
380 consumer_del_channel(channel
);
383 struct lttng_consumer_channel
*consumer_allocate_channel(
385 int shm_fd
, int wait_fd
,
387 uint64_t max_sb_size
)
389 struct lttng_consumer_channel
*channel
;
392 channel
= zmalloc(sizeof(*channel
));
393 if (channel
== NULL
) {
394 perror("malloc struct lttng_consumer_channel");
397 channel
->key
= channel_key
;
398 channel
->shm_fd
= shm_fd
;
399 channel
->wait_fd
= wait_fd
;
400 channel
->mmap_len
= mmap_len
;
401 channel
->max_sb_size
= max_sb_size
;
402 channel
->refcount
= 0;
403 channel
->nr_streams
= 0;
404 lttng_ht_node_init_ulong(&channel
->node
, channel
->key
);
406 switch (consumer_data
.type
) {
407 case LTTNG_CONSUMER_KERNEL
:
408 channel
->mmap_base
= NULL
;
409 channel
->mmap_len
= 0;
411 case LTTNG_CONSUMER32_UST
:
412 case LTTNG_CONSUMER64_UST
:
413 ret
= lttng_ustconsumer_allocate_channel(channel
);
420 ERR("Unknown consumer_data type");
424 DBG("Allocated channel (key %d, shm_fd %d, wait_fd %d, mmap_len %llu, max_sb_size %llu)",
428 (unsigned long long) channel
->mmap_len
,
429 (unsigned long long) channel
->max_sb_size
);
435 * Add a channel to the global list protected by a mutex.
437 int consumer_add_channel(struct lttng_consumer_channel
*channel
)
439 pthread_mutex_lock(&consumer_data
.lock
);
440 /* Steal channel identifier, for UST */
441 consumer_steal_channel_key(channel
->key
);
443 lttng_ht_add_unique_ulong(consumer_data
.channel_ht
, &channel
->node
);
445 pthread_mutex_unlock(&consumer_data
.lock
);
450 * Allocate the pollfd structure and the local view of the out fds to avoid
451 * doing a lookup in the linked list and concurrency issues when writing is
452 * needed. Called with consumer_data.lock held.
454 * Returns the number of fds in the structures.
456 int consumer_update_poll_array(
457 struct lttng_consumer_local_data
*ctx
, struct pollfd
**pollfd
,
458 struct lttng_consumer_stream
**local_stream
)
461 struct lttng_ht_iter iter
;
462 struct lttng_consumer_stream
*stream
;
464 DBG("Updating poll fd array");
465 cds_lfht_for_each_entry(consumer_data
.stream_ht
->ht
, &iter
.iter
, stream
,
467 if (stream
->state
!= LTTNG_CONSUMER_ACTIVE_STREAM
) {
470 DBG("Active FD %d", stream
->wait_fd
);
471 (*pollfd
)[i
].fd
= stream
->wait_fd
;
472 (*pollfd
)[i
].events
= POLLIN
| POLLPRI
;
473 local_stream
[i
] = stream
;
478 * Insert the consumer_poll_pipe at the end of the array and don't
479 * increment i so nb_fd is the number of real FD.
481 (*pollfd
)[i
].fd
= ctx
->consumer_poll_pipe
[0];
482 (*pollfd
)[i
].events
= POLLIN
;
487 * Poll on the should_quit pipe and the command socket return -1 on error and
488 * should exit, 0 if data is available on the command socket
490 int lttng_consumer_poll_socket(struct pollfd
*consumer_sockpoll
)
495 num_rdy
= poll(consumer_sockpoll
, 2, -1);
498 * Restart interrupted system call.
500 if (errno
== EINTR
) {
503 perror("Poll error");
506 if (consumer_sockpoll
[0].revents
== POLLIN
) {
507 DBG("consumer_should_quit wake up");
517 * Set the error socket.
519 void lttng_consumer_set_error_sock(
520 struct lttng_consumer_local_data
*ctx
, int sock
)
522 ctx
->consumer_error_socket
= sock
;
526 * Set the command socket path.
529 void lttng_consumer_set_command_sock_path(
530 struct lttng_consumer_local_data
*ctx
, char *sock
)
532 ctx
->consumer_command_sock_path
= sock
;
536 * Send return code to the session daemon.
537 * If the socket is not defined, we return 0, it is not a fatal error
539 int lttng_consumer_send_error(
540 struct lttng_consumer_local_data
*ctx
, int cmd
)
542 if (ctx
->consumer_error_socket
> 0) {
543 return lttcomm_send_unix_sock(ctx
->consumer_error_socket
, &cmd
,
544 sizeof(enum lttcomm_sessiond_command
));
551 * Close all the tracefiles and stream fds, should be called when all instances
554 void lttng_consumer_cleanup(void)
557 struct lttng_ht_iter iter
;
558 struct lttng_ht_node_ulong
*node
;
563 * close all outfd. Called when there are no more threads running (after
564 * joining on the threads), no need to protect list iteration with mutex.
566 cds_lfht_for_each_entry(consumer_data
.stream_ht
->ht
, &iter
.iter
, node
,
568 ret
= lttng_ht_del(consumer_data
.stream_ht
, &iter
);
570 call_rcu(&node
->head
, consumer_del_stream_rcu
);
573 cds_lfht_for_each_entry(consumer_data
.channel_ht
->ht
, &iter
.iter
, node
,
575 ret
= lttng_ht_del(consumer_data
.channel_ht
, &iter
);
577 call_rcu(&node
->head
, consumer_del_channel_rcu
);
584 * Called from signal handler.
586 void lttng_consumer_should_exit(struct lttng_consumer_local_data
*ctx
)
590 ret
= write(ctx
->consumer_should_quit
[1], "4", 1);
592 perror("write consumer quit");
596 void lttng_consumer_sync_trace_file(
597 struct lttng_consumer_stream
*stream
, off_t orig_offset
)
599 int outfd
= stream
->out_fd
;
602 * This does a blocking write-and-wait on any page that belongs to the
603 * subbuffer prior to the one we just wrote.
604 * Don't care about error values, as these are just hints and ways to
605 * limit the amount of page cache used.
607 if (orig_offset
< stream
->chan
->max_sb_size
) {
610 lttng_sync_file_range(outfd
, orig_offset
- stream
->chan
->max_sb_size
,
611 stream
->chan
->max_sb_size
,
612 SYNC_FILE_RANGE_WAIT_BEFORE
613 | SYNC_FILE_RANGE_WRITE
614 | SYNC_FILE_RANGE_WAIT_AFTER
);
616 * Give hints to the kernel about how we access the file:
617 * POSIX_FADV_DONTNEED : we won't re-access data in a near future after
620 * We need to call fadvise again after the file grows because the
621 * kernel does not seem to apply fadvise to non-existing parts of the
624 * Call fadvise _after_ having waited for the page writeback to
625 * complete because the dirty page writeback semantic is not well
626 * defined. So it can be expected to lead to lower throughput in
629 posix_fadvise(outfd
, orig_offset
- stream
->chan
->max_sb_size
,
630 stream
->chan
->max_sb_size
, POSIX_FADV_DONTNEED
);
634 * Initialise the necessary environnement :
635 * - create a new context
636 * - create the poll_pipe
637 * - create the should_quit pipe (for signal handler)
638 * - create the thread pipe (for splice)
640 * Takes a function pointer as argument, this function is called when data is
641 * available on a buffer. This function is responsible to do the
642 * kernctl_get_next_subbuf, read the data with mmap or splice depending on the
643 * buffer configuration and then kernctl_put_next_subbuf at the end.
645 * Returns a pointer to the new context or NULL on error.
647 struct lttng_consumer_local_data
*lttng_consumer_create(
648 enum lttng_consumer_type type
,
649 ssize_t (*buffer_ready
)(struct lttng_consumer_stream
*stream
,
650 struct lttng_consumer_local_data
*ctx
),
651 int (*recv_channel
)(struct lttng_consumer_channel
*channel
),
652 int (*recv_stream
)(struct lttng_consumer_stream
*stream
),
653 int (*update_stream
)(int stream_key
, uint32_t state
))
656 struct lttng_consumer_local_data
*ctx
;
658 assert(consumer_data
.type
== LTTNG_CONSUMER_UNKNOWN
||
659 consumer_data
.type
== type
);
660 consumer_data
.type
= type
;
662 ctx
= zmalloc(sizeof(struct lttng_consumer_local_data
));
664 perror("allocating context");
668 ctx
->consumer_error_socket
= -1;
669 /* assign the callbacks */
670 ctx
->on_buffer_ready
= buffer_ready
;
671 ctx
->on_recv_channel
= recv_channel
;
672 ctx
->on_recv_stream
= recv_stream
;
673 ctx
->on_update_stream
= update_stream
;
675 ret
= pipe(ctx
->consumer_poll_pipe
);
677 perror("Error creating poll pipe");
678 goto error_poll_pipe
;
681 ret
= pipe(ctx
->consumer_should_quit
);
683 perror("Error creating recv pipe");
684 goto error_quit_pipe
;
687 ret
= pipe(ctx
->consumer_thread_pipe
);
689 perror("Error creating thread pipe");
690 goto error_thread_pipe
;
697 for (i
= 0; i
< 2; i
++) {
700 err
= close(ctx
->consumer_should_quit
[i
]);
704 for (i
= 0; i
< 2; i
++) {
707 err
= close(ctx
->consumer_poll_pipe
[i
]);
717 * Close all fds associated with the instance and free the context.
719 void lttng_consumer_destroy(struct lttng_consumer_local_data
*ctx
)
721 close(ctx
->consumer_error_socket
);
722 close(ctx
->consumer_thread_pipe
[0]);
723 close(ctx
->consumer_thread_pipe
[1]);
724 close(ctx
->consumer_poll_pipe
[0]);
725 close(ctx
->consumer_poll_pipe
[1]);
726 close(ctx
->consumer_should_quit
[0]);
727 close(ctx
->consumer_should_quit
[1]);
728 unlink(ctx
->consumer_command_sock_path
);
733 * Mmap the ring buffer, read it and write the data to the tracefile.
735 * Returns the number of bytes written
737 ssize_t
lttng_consumer_on_read_subbuffer_mmap(
738 struct lttng_consumer_local_data
*ctx
,
739 struct lttng_consumer_stream
*stream
, unsigned long len
)
741 switch (consumer_data
.type
) {
742 case LTTNG_CONSUMER_KERNEL
:
743 return lttng_kconsumer_on_read_subbuffer_mmap(ctx
, stream
, len
);
744 case LTTNG_CONSUMER32_UST
:
745 case LTTNG_CONSUMER64_UST
:
746 return lttng_ustconsumer_on_read_subbuffer_mmap(ctx
, stream
, len
);
748 ERR("Unknown consumer_data type");
756 * Splice the data from the ring buffer to the tracefile.
758 * Returns the number of bytes spliced.
760 ssize_t
lttng_consumer_on_read_subbuffer_splice(
761 struct lttng_consumer_local_data
*ctx
,
762 struct lttng_consumer_stream
*stream
, unsigned long len
)
764 switch (consumer_data
.type
) {
765 case LTTNG_CONSUMER_KERNEL
:
766 return lttng_kconsumer_on_read_subbuffer_splice(ctx
, stream
, len
);
767 case LTTNG_CONSUMER32_UST
:
768 case LTTNG_CONSUMER64_UST
:
771 ERR("Unknown consumer_data type");
779 * Take a snapshot for a specific fd
781 * Returns 0 on success, < 0 on error
783 int lttng_consumer_take_snapshot(struct lttng_consumer_local_data
*ctx
,
784 struct lttng_consumer_stream
*stream
)
786 switch (consumer_data
.type
) {
787 case LTTNG_CONSUMER_KERNEL
:
788 return lttng_kconsumer_take_snapshot(ctx
, stream
);
789 case LTTNG_CONSUMER32_UST
:
790 case LTTNG_CONSUMER64_UST
:
791 return lttng_ustconsumer_take_snapshot(ctx
, stream
);
793 ERR("Unknown consumer_data type");
801 * Get the produced position
803 * Returns 0 on success, < 0 on error
805 int lttng_consumer_get_produced_snapshot(
806 struct lttng_consumer_local_data
*ctx
,
807 struct lttng_consumer_stream
*stream
,
810 switch (consumer_data
.type
) {
811 case LTTNG_CONSUMER_KERNEL
:
812 return lttng_kconsumer_get_produced_snapshot(ctx
, stream
, pos
);
813 case LTTNG_CONSUMER32_UST
:
814 case LTTNG_CONSUMER64_UST
:
815 return lttng_ustconsumer_get_produced_snapshot(ctx
, stream
, pos
);
817 ERR("Unknown consumer_data type");
823 int lttng_consumer_recv_cmd(struct lttng_consumer_local_data
*ctx
,
824 int sock
, struct pollfd
*consumer_sockpoll
)
826 switch (consumer_data
.type
) {
827 case LTTNG_CONSUMER_KERNEL
:
828 return lttng_kconsumer_recv_cmd(ctx
, sock
, consumer_sockpoll
);
829 case LTTNG_CONSUMER32_UST
:
830 case LTTNG_CONSUMER64_UST
:
831 return lttng_ustconsumer_recv_cmd(ctx
, sock
, consumer_sockpoll
);
833 ERR("Unknown consumer_data type");
840 * This thread polls the fds in the set to consume the data and write
841 * it to tracefile if necessary.
843 void *lttng_consumer_thread_poll_fds(void *data
)
845 int num_rdy
, num_hup
, high_prio
, ret
, i
;
846 struct pollfd
*pollfd
= NULL
;
847 /* local view of the streams */
848 struct lttng_consumer_stream
**local_stream
= NULL
;
849 /* local view of consumer_data.fds_count */
853 struct lttng_consumer_local_data
*ctx
= data
;
855 rcu_register_thread();
857 local_stream
= zmalloc(sizeof(struct lttng_consumer_stream
));
864 * the fds set has been updated, we need to update our
865 * local array as well
867 pthread_mutex_lock(&consumer_data
.lock
);
868 if (consumer_data
.need_update
) {
869 if (pollfd
!= NULL
) {
873 if (local_stream
!= NULL
) {
878 /* allocate for all fds + 1 for the consumer_poll_pipe */
879 pollfd
= zmalloc((consumer_data
.stream_count
+ 1) * sizeof(struct pollfd
));
880 if (pollfd
== NULL
) {
881 perror("pollfd malloc");
882 pthread_mutex_unlock(&consumer_data
.lock
);
886 /* allocate for all fds + 1 for the consumer_poll_pipe */
887 local_stream
= zmalloc((consumer_data
.stream_count
+ 1) *
888 sizeof(struct lttng_consumer_stream
));
889 if (local_stream
== NULL
) {
890 perror("local_stream malloc");
891 pthread_mutex_unlock(&consumer_data
.lock
);
894 ret
= consumer_update_poll_array(ctx
, &pollfd
, local_stream
);
896 ERR("Error in allocating pollfd or local_outfds");
897 lttng_consumer_send_error(ctx
, CONSUMERD_POLL_ERROR
);
898 pthread_mutex_unlock(&consumer_data
.lock
);
902 consumer_data
.need_update
= 0;
904 pthread_mutex_unlock(&consumer_data
.lock
);
906 /* No FDs and consumer_quit, consumer_cleanup the thread */
907 if (nb_fd
== 0 && consumer_quit
== 1) {
910 /* poll on the array of fds */
912 DBG("polling on %d fd", nb_fd
+ 1);
913 num_rdy
= poll(pollfd
, nb_fd
+ 1, consumer_poll_timeout
);
914 DBG("poll num_rdy : %d", num_rdy
);
917 * Restart interrupted system call.
919 if (errno
== EINTR
) {
922 perror("Poll error");
923 lttng_consumer_send_error(ctx
, CONSUMERD_POLL_ERROR
);
925 } else if (num_rdy
== 0) {
926 DBG("Polling thread timed out");
931 * If the consumer_poll_pipe triggered poll go
932 * directly to the beginning of the loop to update the
933 * array. We want to prioritize array update over
934 * low-priority reads.
936 if (pollfd
[nb_fd
].revents
& POLLIN
) {
937 DBG("consumer_poll_pipe wake up");
938 tmp2
= read(ctx
->consumer_poll_pipe
[0], &tmp
, 1);
940 perror("read consumer poll");
945 /* Take care of high priority channels first. */
946 for (i
= 0; i
< nb_fd
; i
++) {
947 if (pollfd
[i
].revents
& POLLPRI
) {
950 DBG("Urgent read on fd %d", pollfd
[i
].fd
);
952 len
= ctx
->on_buffer_ready(local_stream
[i
], ctx
);
953 /* it's ok to have an unavailable sub-buffer */
954 if (len
< 0 && len
!= -EAGAIN
) {
956 } else if (len
> 0) {
957 local_stream
[i
]->data_read
= 1;
963 * If we read high prio channel in this loop, try again
964 * for more high prio data.
970 /* Take care of low priority channels. */
971 for (i
= 0; i
< nb_fd
; i
++) {
972 if ((pollfd
[i
].revents
& POLLIN
) ||
973 local_stream
[i
]->hangup_flush_done
) {
976 assert(!(pollfd
[i
].revents
& POLLERR
));
977 assert(!(pollfd
[i
].revents
& POLLNVAL
));
978 DBG("Normal read on fd %d", pollfd
[i
].fd
);
979 len
= ctx
->on_buffer_ready(local_stream
[i
], ctx
);
980 /* it's ok to have an unavailable sub-buffer */
981 if (len
< 0 && len
!= -EAGAIN
) {
983 } else if (len
> 0) {
984 local_stream
[i
]->data_read
= 1;
989 /* Handle hangup and errors */
990 for (i
= 0; i
< nb_fd
; i
++) {
991 if (!local_stream
[i
]->hangup_flush_done
992 && (pollfd
[i
].revents
& (POLLHUP
| POLLERR
| POLLNVAL
))
993 && (consumer_data
.type
== LTTNG_CONSUMER32_UST
994 || consumer_data
.type
== LTTNG_CONSUMER64_UST
)) {
995 DBG("fd %d is hup|err|nval. Attempting flush and read.",
997 lttng_ustconsumer_on_stream_hangup(local_stream
[i
]);
998 /* Attempt read again, for the data we just flushed. */
999 local_stream
[i
]->data_read
= 1;
1002 * If the poll flag is HUP/ERR/NVAL and we have
1003 * read no data in this pass, we can remove the
1004 * stream from its hash table.
1006 if ((pollfd
[i
].revents
& POLLHUP
)) {
1007 DBG("Polling fd %d tells it has hung up.", pollfd
[i
].fd
);
1008 if (!local_stream
[i
]->data_read
) {
1010 consumer_del_stream_rcu(&local_stream
[i
]->node
.head
);
1014 } else if (pollfd
[i
].revents
& POLLERR
) {
1015 ERR("Error returned in polling fd %d.", pollfd
[i
].fd
);
1016 if (!local_stream
[i
]->data_read
) {
1018 consumer_del_stream_rcu(&local_stream
[i
]->node
.head
);
1022 } else if (pollfd
[i
].revents
& POLLNVAL
) {
1023 ERR("Polling fd %d tells fd is not open.", pollfd
[i
].fd
);
1024 if (!local_stream
[i
]->data_read
) {
1026 consumer_del_stream_rcu(&local_stream
[i
]->node
.head
);
1031 local_stream
[i
]->data_read
= 0;
1035 DBG("polling thread exiting");
1036 if (pollfd
!= NULL
) {
1040 if (local_stream
!= NULL
) {
1042 local_stream
= NULL
;
1044 rcu_unregister_thread();
1049 * This thread listens on the consumerd socket and receives the file
1050 * descriptors from the session daemon.
1052 void *lttng_consumer_thread_receive_fds(void *data
)
1054 int sock
, client_socket
, ret
;
1056 * structure to poll for incoming data on communication socket avoids
1057 * making blocking sockets.
1059 struct pollfd consumer_sockpoll
[2];
1060 struct lttng_consumer_local_data
*ctx
= data
;
1062 rcu_register_thread();
1064 DBG("Creating command socket %s", ctx
->consumer_command_sock_path
);
1065 unlink(ctx
->consumer_command_sock_path
);
1066 client_socket
= lttcomm_create_unix_sock(ctx
->consumer_command_sock_path
);
1067 if (client_socket
< 0) {
1068 ERR("Cannot create command socket");
1072 ret
= lttcomm_listen_unix_sock(client_socket
);
1077 DBG("Sending ready command to lttng-sessiond");
1078 ret
= lttng_consumer_send_error(ctx
, CONSUMERD_COMMAND_SOCK_READY
);
1079 /* return < 0 on error, but == 0 is not fatal */
1081 ERR("Error sending ready command to lttng-sessiond");
1085 ret
= fcntl(client_socket
, F_SETFL
, O_NONBLOCK
);
1087 perror("fcntl O_NONBLOCK");
1091 /* prepare the FDs to poll : to client socket and the should_quit pipe */
1092 consumer_sockpoll
[0].fd
= ctx
->consumer_should_quit
[0];
1093 consumer_sockpoll
[0].events
= POLLIN
| POLLPRI
;
1094 consumer_sockpoll
[1].fd
= client_socket
;
1095 consumer_sockpoll
[1].events
= POLLIN
| POLLPRI
;
1097 if (lttng_consumer_poll_socket(consumer_sockpoll
) < 0) {
1100 DBG("Connection on client_socket");
1102 /* Blocking call, waiting for transmission */
1103 sock
= lttcomm_accept_unix_sock(client_socket
);
1108 ret
= fcntl(sock
, F_SETFL
, O_NONBLOCK
);
1110 perror("fcntl O_NONBLOCK");
1114 /* update the polling structure to poll on the established socket */
1115 consumer_sockpoll
[1].fd
= sock
;
1116 consumer_sockpoll
[1].events
= POLLIN
| POLLPRI
;
1119 if (lttng_consumer_poll_socket(consumer_sockpoll
) < 0) {
1122 DBG("Incoming command on sock");
1123 ret
= lttng_consumer_recv_cmd(ctx
, sock
, consumer_sockpoll
);
1124 if (ret
== -ENOENT
) {
1125 DBG("Received STOP command");
1129 ERR("Communication interrupted on command socket");
1132 if (consumer_quit
) {
1133 DBG("consumer_thread_receive_fds received quit from signal");
1136 DBG("received fds on sock");
1139 DBG("consumer_thread_receive_fds exiting");
1142 * when all fds have hung up, the polling thread
1148 * 2s of grace period, if no polling events occur during
1149 * this period, the polling thread will exit even if there
1150 * are still open FDs (should not happen, but safety mechanism).
1152 consumer_poll_timeout
= LTTNG_CONSUMER_POLL_TIMEOUT
;
1154 /* wake up the polling thread */
1155 ret
= write(ctx
->consumer_poll_pipe
[1], "4", 1);
1157 perror("poll pipe write");
1159 rcu_unregister_thread();
1163 ssize_t
lttng_consumer_read_subbuffer(struct lttng_consumer_stream
*stream
,
1164 struct lttng_consumer_local_data
*ctx
)
1166 switch (consumer_data
.type
) {
1167 case LTTNG_CONSUMER_KERNEL
:
1168 return lttng_kconsumer_read_subbuffer(stream
, ctx
);
1169 case LTTNG_CONSUMER32_UST
:
1170 case LTTNG_CONSUMER64_UST
:
1171 return lttng_ustconsumer_read_subbuffer(stream
, ctx
);
1173 ERR("Unknown consumer_data type");
1179 int lttng_consumer_on_recv_stream(struct lttng_consumer_stream
*stream
)
1181 switch (consumer_data
.type
) {
1182 case LTTNG_CONSUMER_KERNEL
:
1183 return lttng_kconsumer_on_recv_stream(stream
);
1184 case LTTNG_CONSUMER32_UST
:
1185 case LTTNG_CONSUMER64_UST
:
1186 return lttng_ustconsumer_on_recv_stream(stream
);
1188 ERR("Unknown consumer_data type");
1195 * Allocate and set consumer data hash tables.
1197 void lttng_consumer_init(void)
1199 consumer_data
.stream_ht
= lttng_ht_new(0, LTTNG_HT_TYPE_ULONG
);
1200 consumer_data
.channel_ht
= lttng_ht_new(0, LTTNG_HT_TYPE_ULONG
);