2 * Copyright (C) 2011 - Julien Desfossez <julien.desfossez@polymtl.ca>
3 * Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License
7 * as published by the Free Software Foundation; either version 2
8 * of the License, or (at your option) any later version.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
27 #include <sys/socket.h>
28 #include <sys/types.h>
30 #include <urcu/list.h>
32 #include "libkernelctl.h"
33 #include "liblttkconsumerd.h"
37 struct kconsumerd_global_data
{
39 * kconsumerd_data.lock protects kconsumerd_data.fd_list,
40 * kconsumerd_data.fds_count, and kconsumerd_data.need_update. It
41 * ensures the count matches the number of items in the fd_list.
42 * It ensures the list updates *always* trigger an fd_array
43 * update (therefore need to make list update vs
44 * kconsumerd_data.need_update flag update atomic, and also flag
45 * read, fd array and flag clear atomic).
49 * Number of element for the list below. Protected by
50 * kconsumerd_data.lock.
52 unsigned int fds_count
;
54 * List of FDs. Protected by kconsumerd_data.lock.
56 struct kconsumerd_fd_list fd_list
;
58 * Flag specifying if the local array of FDs needs update in the
59 * poll function. Protected by kconsumerd_data.lock.
61 unsigned int need_update
;
63 .fd_list
.head
= CDS_LIST_HEAD_INIT(kconsumerd_data
.fd_list
.head
),
67 /* communication with splice */
68 static int kconsumerd_thread_pipe
[2];
70 /* pipe to wake the poll thread when necessary */
71 static int kconsumerd_poll_pipe
[2];
74 * TODO: create a should_quit pipe to let the signal handler wake up the
75 * fd receiver thread. It should be initialized before any signal can be
76 * received by the library.
80 /* timeout parameter, to control the polling thread grace period */
81 static int kconsumerd_poll_timeout
= -1;
83 /* socket to communicate errors with sessiond */
84 static int kconsumerd_error_socket
;
86 /* socket to exchange commands with sessiond */
87 static char *kconsumerd_command_sock_path
;
90 * flag to inform the polling thread to quit when all fd hung up.
91 * Updated by the kconsumerd_thread_receive_fds when it notices that all
92 * fds has hung up. Also updated by the signal handler
93 * (kconsumerd_should_exit()). Read by the polling threads.
95 static volatile int kconsumerd_quit
= 0;
98 * kconsumerd_set_error_socket
100 * Set the error socket
102 void kconsumerd_set_error_socket(int sock
)
104 kconsumerd_error_socket
= sock
;
108 * kconsumerd_set_command_socket_path
110 * Set the command socket path
112 void kconsumerd_set_command_socket_path(char *sock
)
114 kconsumerd_command_sock_path
= sock
;
118 * kconsumerd_find_session_fd
120 * Find a session fd in the global list.
121 * The kconsumerd_data.lock must be locked during this call
123 * Return 1 if found else 0
125 static int kconsumerd_find_session_fd(int fd
)
127 struct kconsumerd_fd
*iter
;
129 cds_list_for_each_entry(iter
, &kconsumerd_data
.fd_list
.head
, list
) {
130 if (iter
->sessiond_fd
== fd
) {
131 DBG("Duplicate session fd %d", fd
);
132 pthread_mutex_unlock(&kconsumerd_data
.lock
);
143 * Remove a fd from the global list protected by a mutex
145 static void kconsumerd_del_fd(struct kconsumerd_fd
*lcf
)
147 pthread_mutex_lock(&kconsumerd_data
.lock
);
148 cds_list_del(&lcf
->list
);
149 if (kconsumerd_data
.fds_count
> 0) {
150 kconsumerd_data
.fds_count
--;
153 close(lcf
->consumerd_fd
);
158 kconsumerd_data
.need_update
= 1;
159 pthread_mutex_unlock(&kconsumerd_data
.lock
);
165 * Add a fd to the global list protected by a mutex
167 static int kconsumerd_add_fd(struct lttcomm_kconsumerd_msg
*buf
, int consumerd_fd
)
170 struct kconsumerd_fd
*tmp_fd
;
172 pthread_mutex_lock(&kconsumerd_data
.lock
);
173 /* Check if already exist */
174 ret
= kconsumerd_find_session_fd(buf
->fd
);
179 tmp_fd
= malloc(sizeof(struct kconsumerd_fd
));
180 tmp_fd
->sessiond_fd
= buf
->fd
;
181 tmp_fd
->consumerd_fd
= consumerd_fd
;
182 tmp_fd
->state
= buf
->state
;
183 tmp_fd
->max_sb_size
= buf
->max_sb_size
;
184 strncpy(tmp_fd
->path_name
, buf
->path_name
, PATH_MAX
);
186 /* Opening the tracefile in write mode */
187 ret
= open(tmp_fd
->path_name
,
188 O_WRONLY
|O_CREAT
|O_TRUNC
, S_IRWXU
|S_IRWXG
|S_IRWXO
);
190 ERR("Opening %s", tmp_fd
->path_name
);
194 tmp_fd
->out_fd
= ret
;
195 tmp_fd
->out_fd_offset
= 0;
197 DBG("Adding %s (%d, %d, %d)", tmp_fd
->path_name
,
198 tmp_fd
->sessiond_fd
, tmp_fd
->consumerd_fd
, tmp_fd
->out_fd
);
200 cds_list_add(&tmp_fd
->list
, &kconsumerd_data
.fd_list
.head
);
201 kconsumerd_data
.fds_count
++;
202 kconsumerd_data
.need_update
= 1;
204 pthread_mutex_unlock(&kconsumerd_data
.lock
);
209 * kconsumerd_change_fd_state
211 * Update a fd according to what we just received
213 static void kconsumerd_change_fd_state(int sessiond_fd
,
214 enum kconsumerd_fd_state state
)
216 struct kconsumerd_fd
*iter
;
218 pthread_mutex_lock(&kconsumerd_data
.lock
);
219 cds_list_for_each_entry(iter
, &kconsumerd_data
.fd_list
.head
, list
) {
220 if (iter
->sessiond_fd
== sessiond_fd
) {
225 kconsumerd_data
.need_update
= 1;
226 pthread_mutex_unlock(&kconsumerd_data
.lock
);
230 * kconsumerd_update_poll_array
232 * Allocate the pollfd structure and the local view of the out fds
233 * to avoid doing a lookup in the linked list and concurrency issues
234 * when writing is needed.
235 * Returns the number of fds in the structures
236 * Called with kconsumerd_data.lock held.
238 static int kconsumerd_update_poll_array(struct pollfd
**pollfd
,
239 struct kconsumerd_fd
**local_kconsumerd_fd
)
241 struct kconsumerd_fd
*iter
;
244 DBG("Updating poll fd array");
246 cds_list_for_each_entry(iter
, &kconsumerd_data
.fd_list
.head
, list
) {
247 DBG("Inside for each");
248 if (iter
->state
== ACTIVE_FD
) {
249 DBG("Active FD %d", iter
->consumerd_fd
);
250 (*pollfd
)[i
].fd
= iter
->consumerd_fd
;
251 (*pollfd
)[i
].events
= POLLIN
| POLLPRI
;
252 local_kconsumerd_fd
[i
] = iter
;
258 * insert the kconsumerd_poll_pipe at the end of the array and don't
259 * increment i so nb_fd is the number of real FD
261 (*pollfd
)[i
].fd
= kconsumerd_poll_pipe
[0];
262 (*pollfd
)[i
].events
= POLLIN
;
268 * kconsumerd_on_read_subbuffer_mmap
270 * mmap the ring buffer, read it and write the data to the tracefile.
271 * Returns the number of bytes written
273 static int kconsumerd_on_read_subbuffer_mmap(
274 struct kconsumerd_fd
*kconsumerd_fd
, unsigned long len
)
276 unsigned long mmap_len
, mmap_offset
, padded_len
, padding_len
;
278 char *padding
= NULL
;
280 off_t orig_offset
= kconsumerd_fd
->out_fd_offset
;
281 int fd
= kconsumerd_fd
->consumerd_fd
;
282 int outfd
= kconsumerd_fd
->out_fd
;
284 /* get the padded subbuffer size to know the padding required */
285 ret
= kernctl_get_padded_subbuf_size(fd
, &padded_len
);
288 perror("kernctl_get_padded_subbuf_size");
291 padding_len
= padded_len
- len
;
292 padding
= malloc(padding_len
* sizeof(char));
293 memset(padding
, '\0', padding_len
);
295 /* get the len of the mmap region */
296 ret
= kernctl_get_mmap_len(fd
, &mmap_len
);
299 perror("kernctl_get_mmap_len");
303 /* get the offset inside the fd to mmap */
304 ret
= kernctl_get_mmap_read_offset(fd
, &mmap_offset
);
307 perror("kernctl_get_mmap_read_offset");
311 mmap_base
= mmap(NULL
, mmap_len
, PROT_READ
, MAP_PRIVATE
, fd
, mmap_offset
);
312 if (mmap_base
== MAP_FAILED
) {
313 perror("Error mmaping");
319 ret
= write(outfd
, mmap_base
, len
);
322 } else if (ret
< 0) {
324 perror("Error in file write");
327 /* This won't block, but will start writeout asynchronously */
328 sync_file_range(outfd
, kconsumerd_fd
->out_fd_offset
, ret
,
329 SYNC_FILE_RANGE_WRITE
);
330 kconsumerd_fd
->out_fd_offset
+= ret
;
333 /* once all the data is written, write the padding to disk */
334 ret
= write(outfd
, padding
, padding_len
);
337 perror("Error writing padding to file");
342 * This does a blocking write-and-wait on any page that belongs to the
343 * subbuffer prior to the one we just wrote.
344 * Don't care about error values, as these are just hints and ways to
345 * limit the amount of page cache used.
347 if (orig_offset
>= kconsumerd_fd
->max_sb_size
) {
348 sync_file_range(outfd
, orig_offset
- kconsumerd_fd
->max_sb_size
,
349 kconsumerd_fd
->max_sb_size
,
350 SYNC_FILE_RANGE_WAIT_BEFORE
351 | SYNC_FILE_RANGE_WRITE
352 | SYNC_FILE_RANGE_WAIT_AFTER
);
355 * Give hints to the kernel about how we access the file:
356 * POSIX_FADV_DONTNEED : we won't re-access data in a near future after
359 * We need to call fadvise again after the file grows because the
360 * kernel does not seem to apply fadvise to non-existing parts of the
363 * Call fadvise _after_ having waited for the page writeback to
364 * complete because the dirty page writeback semantic is not well
365 * defined. So it can be expected to lead to lower throughput in
368 posix_fadvise(outfd
, orig_offset
- kconsumerd_fd
->max_sb_size
,
369 kconsumerd_fd
->max_sb_size
, POSIX_FADV_DONTNEED
);
374 if (padding
!= NULL
) {
381 * kconsumerd_on_read_subbuffer
383 * Splice the data from the ring buffer to the tracefile.
384 * Returns the number of bytes spliced
386 static int kconsumerd_on_read_subbuffer(
387 struct kconsumerd_fd
*kconsumerd_fd
, unsigned long len
)
391 off_t orig_offset
= kconsumerd_fd
->out_fd_offset
;
392 int fd
= kconsumerd_fd
->consumerd_fd
;
393 int outfd
= kconsumerd_fd
->out_fd
;
396 DBG("splice chan to pipe offset %lu (fd : %d)",
397 (unsigned long)offset
, fd
);
398 ret
= splice(fd
, &offset
, kconsumerd_thread_pipe
[1], NULL
, len
,
399 SPLICE_F_MOVE
| SPLICE_F_MORE
);
400 DBG("splice chan to pipe ret %ld", ret
);
403 perror("Error in relay splice");
407 ret
= splice(kconsumerd_thread_pipe
[0], NULL
, outfd
, NULL
, ret
,
408 SPLICE_F_MOVE
| SPLICE_F_MORE
);
409 DBG("splice pipe to file %ld", ret
);
412 perror("Error in file splice");
418 /* This won't block, but will start writeout asynchronously */
419 sync_file_range(outfd
, kconsumerd_fd
->out_fd_offset
, ret
,
420 SYNC_FILE_RANGE_WRITE
);
421 kconsumerd_fd
->out_fd_offset
+= ret
;
425 * This does a blocking write-and-wait on any page that belongs to the
426 * subbuffer prior to the one we just wrote.
427 * Don't care about error values, as these are just hints and ways to
428 * limit the amount of page cache used.
430 if (orig_offset
>= kconsumerd_fd
->max_sb_size
) {
431 sync_file_range(outfd
, orig_offset
- kconsumerd_fd
->max_sb_size
,
432 kconsumerd_fd
->max_sb_size
,
433 SYNC_FILE_RANGE_WAIT_BEFORE
434 | SYNC_FILE_RANGE_WRITE
435 | SYNC_FILE_RANGE_WAIT_AFTER
);
437 * Give hints to the kernel about how we access the file:
438 * POSIX_FADV_DONTNEED : we won't re-access data in a near future after
441 * We need to call fadvise again after the file grows because the
442 * kernel does not seem to apply fadvise to non-existing parts of the
445 * Call fadvise _after_ having waited for the page writeback to
446 * complete because the dirty page writeback semantic is not well
447 * defined. So it can be expected to lead to lower throughput in
450 posix_fadvise(outfd
, orig_offset
- kconsumerd_fd
->max_sb_size
,
451 kconsumerd_fd
->max_sb_size
, POSIX_FADV_DONTNEED
);
456 /* send the appropriate error description to sessiond */
459 kconsumerd_send_error(KCONSUMERD_SPLICE_EBADF
);
462 kconsumerd_send_error(KCONSUMERD_SPLICE_EINVAL
);
465 kconsumerd_send_error(KCONSUMERD_SPLICE_ENOMEM
);
468 kconsumerd_send_error(KCONSUMERD_SPLICE_ESPIPE
);
477 * kconsumerd_read_subbuffer
479 * Consume data on a file descriptor and write it on a trace file
481 static int kconsumerd_read_subbuffer(struct kconsumerd_fd
*kconsumerd_fd
)
486 int infd
= kconsumerd_fd
->consumerd_fd
;
488 DBG("In kconsumerd_read_subbuffer (infd : %d)", infd
);
489 /* Get the next subbuffer */
490 err
= kernctl_get_next_subbuf(infd
);
493 perror("Reserving sub buffer failed (everything is normal, "
494 "it is due to concurrency)");
498 switch (DEFAULT_KERNEL_CHANNEL_OUTPUT
) {
499 case LTTNG_EVENT_SPLICE
:
500 /* read the whole subbuffer */
501 err
= kernctl_get_padded_subbuf_size(infd
, &len
);
504 perror("Getting sub-buffer len failed.");
508 /* splice the subbuffer to the tracefile */
509 ret
= kconsumerd_on_read_subbuffer(kconsumerd_fd
, len
);
512 * display the error but continue processing to try
513 * to release the subbuffer
515 ERR("Error splicing to tracefile");
518 case LTTNG_EVENT_MMAP
:
519 /* read the used subbuffer size */
520 err
= kernctl_get_subbuf_size(infd
, &len
);
523 perror("Getting sub-buffer len failed.");
526 /* write the subbuffer to the tracefile */
527 ret
= kconsumerd_on_read_subbuffer_mmap(kconsumerd_fd
, len
);
530 * display the error but continue processing to try
531 * to release the subbuffer
533 ERR("Error writing to tracefile");
537 ERR("Unknown output method");
541 err
= kernctl_put_next_subbuf(infd
);
544 if (errno
== EFAULT
) {
545 perror("Error in unreserving sub buffer\n");
546 } else if (errno
== EIO
) {
547 /* Should never happen with newer LTTng versions */
548 perror("Reader has been pushed by the writer, last sub-buffer corrupted.");
558 * kconsumerd_consumerd_recv_fd
560 * Receives an array of file descriptors and the associated
561 * structures describing each fd (path name).
562 * Returns the size of received data
564 static int kconsumerd_consumerd_recv_fd(int sfd
, int size
,
565 enum kconsumerd_command cmd_type
)
569 int ret
= 0, i
, tmp2
;
570 struct cmsghdr
*cmsg
;
572 char recv_fd
[CMSG_SPACE(sizeof(int))];
573 struct lttcomm_kconsumerd_msg lkm
;
575 /* the number of fds we are about to receive */
576 nb_fd
= size
/ sizeof(struct lttcomm_kconsumerd_msg
);
578 for (i
= 0; i
< nb_fd
; i
++) {
579 memset(&msg
, 0, sizeof(msg
));
581 /* Prepare to receive the structures */
582 iov
[0].iov_base
= &lkm
;
583 iov
[0].iov_len
= sizeof(lkm
);
587 msg
.msg_control
= recv_fd
;
588 msg
.msg_controllen
= sizeof(recv_fd
);
590 DBG("Waiting to receive fd");
591 if ((ret
= recvmsg(sfd
, &msg
, 0)) < 0) {
596 if (ret
!= (size
/ nb_fd
)) {
597 ERR("Received only %d, expected %d", ret
, size
);
598 kconsumerd_send_error(KCONSUMERD_ERROR_RECV_FD
);
602 cmsg
= CMSG_FIRSTHDR(&msg
);
604 ERR("Invalid control message header");
606 kconsumerd_send_error(KCONSUMERD_ERROR_RECV_FD
);
609 /* if we received fds */
610 if (cmsg
->cmsg_level
== SOL_SOCKET
&& cmsg
->cmsg_type
== SCM_RIGHTS
) {
613 DBG("kconsumerd_add_fd %s (%d)", lkm
.path_name
, (CMSG_DATA(cmsg
)[0]));
614 ret
= kconsumerd_add_fd(&lkm
, (CMSG_DATA(cmsg
)[0]));
616 kconsumerd_send_error(KCONSUMERD_OUTFD_ERROR
);
621 kconsumerd_change_fd_state(lkm
.fd
, lkm
.state
);
626 /* signal the poll thread */
627 tmp2
= write(kconsumerd_poll_pipe
[1], "4", 1);
629 ERR("Didn't received any fd");
630 kconsumerd_send_error(KCONSUMERD_ERROR_RECV_FD
);
641 * kconsumerd_thread_poll_fds
643 * This thread polls the fds in the ltt_fd_list to consume the data
644 * and write it to tracefile if necessary.
646 void *kconsumerd_thread_poll_fds(void *data
)
648 int num_rdy
, num_hup
, high_prio
, ret
, i
;
649 struct pollfd
*pollfd
= NULL
;
650 /* local view of the fds */
651 struct kconsumerd_fd
**local_kconsumerd_fd
= NULL
;
652 /* local view of kconsumerd_data.fds_count */
657 ret
= pipe(kconsumerd_thread_pipe
);
659 perror("Error creating pipe");
663 local_kconsumerd_fd
= malloc(sizeof(struct kconsumerd_fd
));
670 * the ltt_fd_list has been updated, we need to update our
671 * local array as well
673 pthread_mutex_lock(&kconsumerd_data
.lock
);
674 if (kconsumerd_data
.need_update
) {
675 if (pollfd
!= NULL
) {
679 if (local_kconsumerd_fd
!= NULL
) {
680 free(local_kconsumerd_fd
);
681 local_kconsumerd_fd
= NULL
;
684 /* allocate for all fds + 1 for the kconsumerd_poll_pipe */
685 pollfd
= malloc((kconsumerd_data
.fds_count
+ 1) * sizeof(struct pollfd
));
686 if (pollfd
== NULL
) {
687 perror("pollfd malloc");
688 pthread_mutex_unlock(&kconsumerd_data
.lock
);
692 /* allocate for all fds + 1 for the kconsumerd_poll_pipe */
693 local_kconsumerd_fd
= malloc((kconsumerd_data
.fds_count
+ 1) *
694 sizeof(struct kconsumerd_fd
));
695 if (local_kconsumerd_fd
== NULL
) {
696 perror("local_kconsumerd_fd malloc");
697 pthread_mutex_unlock(&kconsumerd_data
.lock
);
700 ret
= kconsumerd_update_poll_array(&pollfd
, local_kconsumerd_fd
);
702 ERR("Error in allocating pollfd or local_outfds");
703 kconsumerd_send_error(KCONSUMERD_POLL_ERROR
);
704 pthread_mutex_unlock(&kconsumerd_data
.lock
);
708 kconsumerd_data
.need_update
= 0;
710 pthread_mutex_unlock(&kconsumerd_data
.lock
);
712 /* poll on the array of fds */
713 DBG("polling on %d fd", nb_fd
+ 1);
714 num_rdy
= poll(pollfd
, nb_fd
+ 1, kconsumerd_poll_timeout
);
715 DBG("poll num_rdy : %d", num_rdy
);
717 perror("Poll error");
718 kconsumerd_send_error(KCONSUMERD_POLL_ERROR
);
720 } else if (num_rdy
== 0) {
721 DBG("Polling thread timed out");
725 /* No FDs and kconsumerd_quit, kconsumerd_cleanup the thread */
726 if (nb_fd
== 0 && kconsumerd_quit
== 1) {
731 * If the kconsumerd_poll_pipe triggered poll go
732 * directly to the beginning of the loop to update the
733 * array. We want to prioritize array update over
734 * low-priority reads.
736 if (pollfd
[nb_fd
].revents
== POLLIN
) {
737 DBG("kconsumerd_poll_pipe wake up");
738 tmp2
= read(kconsumerd_poll_pipe
[0], &tmp
, 1);
742 /* Take care of high priority channels first. */
743 for (i
= 0; i
< nb_fd
; i
++) {
744 switch(pollfd
[i
].revents
) {
746 ERR("Error returned in polling fd %d.", pollfd
[i
].fd
);
747 kconsumerd_del_fd(local_kconsumerd_fd
[i
]);
751 DBG("Polling fd %d tells it has hung up.", pollfd
[i
].fd
);
752 kconsumerd_del_fd(local_kconsumerd_fd
[i
]);
756 ERR("Polling fd %d tells fd is not open.", pollfd
[i
].fd
);
757 kconsumerd_del_fd(local_kconsumerd_fd
[i
]);
761 DBG("Urgent read on fd %d", pollfd
[i
].fd
);
763 ret
= kconsumerd_read_subbuffer(local_kconsumerd_fd
[i
]);
764 /* it's ok to have an unavailable sub-buffer */
772 /* If every buffer FD has hung up, we end the read loop here */
773 if (nb_fd
> 0 && num_hup
== nb_fd
) {
774 DBG("every buffer FD has hung up\n");
775 if (kconsumerd_quit
== 1) {
781 /* Take care of low priority channels. */
782 if (high_prio
== 0) {
783 for (i
= 0; i
< nb_fd
; i
++) {
784 if (pollfd
[i
].revents
== POLLIN
) {
785 DBG("Normal read on fd %d", pollfd
[i
].fd
);
786 ret
= kconsumerd_read_subbuffer(local_kconsumerd_fd
[i
]);
787 /* it's ok to have an unavailable subbuffer */
796 DBG("polling thread exiting");
797 if (pollfd
!= NULL
) {
801 if (local_kconsumerd_fd
!= NULL
) {
802 free(local_kconsumerd_fd
);
803 local_kconsumerd_fd
= NULL
;
809 * kconsumerd_create_poll_pipe
811 * create the pipe to wake to polling thread when needed
813 int kconsumerd_create_poll_pipe()
815 return pipe(kconsumerd_poll_pipe
);
819 * kconsumerd_thread_receive_fds
821 * This thread listens on the consumerd socket and
822 * receives the file descriptors from ltt-sessiond
824 void *kconsumerd_thread_receive_fds(void *data
)
826 int sock
, client_socket
, ret
;
827 struct lttcomm_kconsumerd_header tmp
;
829 DBG("Creating command socket %s", kconsumerd_command_sock_path
);
830 unlink(kconsumerd_command_sock_path
);
831 client_socket
= lttcomm_create_unix_sock(kconsumerd_command_sock_path
);
832 if (client_socket
< 0) {
833 ERR("Cannot create command socket");
837 ret
= lttcomm_listen_unix_sock(client_socket
);
842 DBG("Sending ready command to ltt-sessiond");
843 ret
= kconsumerd_send_error(KCONSUMERD_COMMAND_SOCK_READY
);
845 ERR("Error sending ready command to ltt-sessiond");
849 /* TODO: poll on socket and "should_quit" fd pipe */
850 /* TODO: change blocking call into non-blocking call */
851 /* Blocking call, waiting for transmission */
852 sock
= lttcomm_accept_unix_sock(client_socket
);
858 /* We first get the number of fd we are about to receive */
859 /* TODO: poll on sock and "should_quit" fd pipe */
860 /* TODO: change recv into a non-blocking call */
861 ret
= lttcomm_recv_unix_sock(sock
, &tmp
,
862 sizeof(struct lttcomm_kconsumerd_header
));
864 ERR("Communication interrupted on command socket");
867 if (tmp
.cmd_type
== STOP
) {
868 DBG("Received STOP command");
871 if (kconsumerd_quit
) {
872 DBG("kconsumerd_thread_receive_fds received quit from signal");
875 /* we received a command to add or update fds */
876 ret
= kconsumerd_consumerd_recv_fd(sock
, tmp
.payload_size
, tmp
.cmd_type
);
878 ERR("Receiving the FD, exiting");
884 DBG("kconsumerd_thread_receive_fds exiting");
887 * when all fds have hung up, the polling thread
893 * 2s of grace period, if no polling events occur during
894 * this period, the polling thread will exit even if there
895 * are still open FDs (should not happen, but safety mechanism).
897 kconsumerd_poll_timeout
= KCONSUMERD_POLL_GRACE_PERIOD
;
899 /* wake up the polling thread */
900 ret
= write(kconsumerd_poll_pipe
[1], "4", 1);
902 perror("poll pipe write");
910 * Cleanup the daemon's socket on exit
912 void kconsumerd_cleanup(void)
914 struct kconsumerd_fd
*iter
;
916 /* remove the socket file */
917 unlink(kconsumerd_command_sock_path
);
920 * close all outfd. Called when there are no more threads
921 * running (after joining on the threads), no need to protect
922 * list iteration with mutex.
924 cds_list_for_each_entry(iter
, &kconsumerd_data
.fd_list
.head
, list
) {
925 kconsumerd_del_fd(iter
);
930 * Called from signal handler.
932 void kconsumerd_should_exit(void)
936 * TODO: write into a should_quit pipe to wake up the fd
942 * kconsumerd_send_error
944 * send return code to ltt-sessiond
946 int kconsumerd_send_error(enum lttcomm_return_code cmd
)
948 if (kconsumerd_error_socket
> 0) {
949 return lttcomm_send_unix_sock(kconsumerd_error_socket
, &cmd
,
950 sizeof(enum lttcomm_sessiond_command
));