Fix memory allocation of local fd view
[lttng-tools.git] / kconsumerd / kconsumerd.c
CommitLineData
d4a1283e
JD
1/*
2 * Copyright (C) 2011 - Julien Desfossez <julien.desfossez@polymtl.ca>
3 * Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License
7 * as published by the Free Software Foundation; either version 2
8 * of the License, or (at your option) any later version.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
18 */
19
20#define _GNU_SOURCE
21#include <fcntl.h>
22#include <getopt.h>
23#include <grp.h>
24#include <limits.h>
25#include <pthread.h>
26#include <signal.h>
27#include <stdio.h>
28#include <stdlib.h>
29#include <string.h>
30#include <sys/ipc.h>
31#include <sys/shm.h>
32#include <sys/socket.h>
33#include <sys/stat.h>
34#include <sys/types.h>
35#include <urcu/list.h>
36#include <poll.h>
37#include <unistd.h>
38
39#include "lttngerr.h"
40#include "libkernelctl.h"
41#include "liblttsessiondcomm.h"
42#include "kconsumerd.h"
43
44/* Init the list of FDs */
45static struct ltt_kconsumerd_fd_list kconsumerd_fd_list = {
46 .head = CDS_LIST_HEAD_INIT(kconsumerd_fd_list.head),
47};
48
49/* Number of element for the list below. */
50static unsigned int fds_count;
51
52/* If the local array of FDs needs update in the poll function */
53static unsigned int update_fd_array = 1;
54
55/* lock the fd array and structures */
56static pthread_mutex_t kconsumerd_lock_fds;
57
58/* the two threads (receive fd and poll) */
59static pthread_t threads[2];
60
61/* communication with splice */
62static int thread_pipe[2];
63
252fd492
JD
64/* pipe to wake the poll thread when necessary */
65static int poll_pipe[2];
66
d4a1283e
JD
67/* socket to communicate errors with sessiond */
68static int error_socket = -1;
69
70/* Argument variables */
71int opt_quiet;
72int opt_verbose;
73static int opt_daemon;
74static const char *progname;
75static char command_sock_path[PATH_MAX]; /* Global command socket path */
76static char error_sock_path[PATH_MAX]; /* Global error path */
77
bcd8d9db
JD
78/*
79 * del_fd
80 *
81 * Remove a fd from the global list protected by a mutex
82 */
83static void del_fd(struct ltt_kconsumerd_fd *lcf)
84{
6aea26bc 85 DBG("Removing %d", lcf->consumerd_fd);
bcd8d9db
JD
86 pthread_mutex_lock(&kconsumerd_lock_fds);
87 cds_list_del(&lcf->list);
88 if (fds_count > 0) {
89 fds_count--;
90 DBG("Removed ltt_kconsumerd_fd");
91 if (lcf != NULL) {
92 close(lcf->out_fd);
93 close(lcf->consumerd_fd);
94 free(lcf);
95 lcf = NULL;
96 }
97 }
98 pthread_mutex_unlock(&kconsumerd_lock_fds);
99}
100
d4a1283e
JD
101/*
102 * cleanup
103 *
104 * Cleanup the daemon's socket on exit
105 */
106static void cleanup()
107{
bcd8d9db
JD
108 struct ltt_kconsumerd_fd *iter;
109
bcd8d9db 110 /* remove the socket file */
d4a1283e 111 unlink(command_sock_path);
bcd8d9db
JD
112
113 /* unblock the threads */
114 WARN("Terminating the threads before exiting");
115 pthread_cancel(threads[0]);
116 pthread_cancel(threads[1]);
117
118 /* close all outfd */
119 cds_list_for_each_entry(iter, &kconsumerd_fd_list.head, list) {
120 del_fd(iter);
121 }
d4a1283e
JD
122}
123
6aea26bc
JD
124/*
125 * send_error
d4a1283e
JD
126 *
127 * send return code to ltt-sessiond
128 */
129static int send_error(enum lttcomm_return_code cmd)
130{
131 if (error_socket > 0) {
132 return lttcomm_send_unix_sock(error_socket, &cmd,
133 sizeof(enum lttcomm_sessiond_command));
134 } else {
135 return 0;
136 }
137}
138
d4a1283e
JD
139/*
140 * add_fd
141 *
142 * Add a fd to the global list protected by a mutex
143 */
144static int add_fd(struct lttcomm_kconsumerd_msg *buf, int consumerd_fd)
145{
146 struct ltt_kconsumerd_fd *tmp_fd;
147 int ret;
148
149 tmp_fd = malloc(sizeof(struct ltt_kconsumerd_fd));
150 tmp_fd->sessiond_fd = buf->fd;
151 tmp_fd->consumerd_fd = consumerd_fd;
152 tmp_fd->state = buf->state;
153 tmp_fd->max_sb_size = buf->max_sb_size;
154 strncpy(tmp_fd->path_name, buf->path_name, PATH_MAX);
155
156 /* Opening the tracefile in write mode */
157 DBG("Opening %s for writing", tmp_fd->path_name);
158 ret = open(tmp_fd->path_name,
159 O_WRONLY|O_CREAT, S_IRWXU|S_IRWXG|S_IRWXO);
160 if (ret < 0) {
161 ERR("Opening %s", tmp_fd->path_name);
162 perror("open");
163 goto end;
164 }
165 tmp_fd->out_fd = ret;
166 tmp_fd->out_fd_offset = 0;
167
168 DBG("Adding %s (%d, %d, %d)", tmp_fd->path_name,
169 tmp_fd->sessiond_fd, tmp_fd->consumerd_fd, tmp_fd->out_fd);
170
171 pthread_mutex_lock(&kconsumerd_lock_fds);
172 cds_list_add(&tmp_fd->list, &kconsumerd_fd_list.head);
173 fds_count++;
174 pthread_mutex_unlock(&kconsumerd_lock_fds);
175
176end:
177 return ret;
178}
179
d4a1283e
JD
180
181/*
182 * sighandler
183 *
184 * Signal handler for the daemon
185 */
186static void sighandler(int sig)
187{
d4a1283e
JD
188 cleanup();
189
190 return;
191}
192
193/*
194 * set_signal_handler
195 *
196 * Setup signal handler for :
197 * SIGINT, SIGTERM, SIGPIPE
198 */
199static int set_signal_handler(void)
200{
201 int ret = 0;
202 struct sigaction sa;
203 sigset_t sigset;
204
205 if ((ret = sigemptyset(&sigset)) < 0) {
206 perror("sigemptyset");
207 return ret;
208 }
209
210 sa.sa_handler = sighandler;
211 sa.sa_mask = sigset;
212 sa.sa_flags = 0;
213 if ((ret = sigaction(SIGTERM, &sa, NULL)) < 0) {
214 perror("sigaction");
215 return ret;
216 }
217
218 if ((ret = sigaction(SIGINT, &sa, NULL)) < 0) {
219 perror("sigaction");
220 return ret;
221 }
222
223 if ((ret = sigaction(SIGPIPE, &sa, NULL)) < 0) {
224 perror("sigaction");
225 return ret;
226 }
227
228 return ret;
229}
230
231/*
232 * on_read_subbuffer
233 *
234 * Splice the data from the ring buffer to the tracefile.
235 * Returns the number of bytes spliced
236 */
237static int on_read_subbuffer(struct ltt_kconsumerd_fd *kconsumerd_fd,
238 unsigned long len)
239{
240 long ret = 0;
241 loff_t offset = 0;
242 off_t orig_offset = kconsumerd_fd->out_fd_offset;
243 int fd = kconsumerd_fd->consumerd_fd;
244 int outfd = kconsumerd_fd->out_fd;
245
246 while (len > 0) {
247 DBG("splice chan to pipe offset %lu (fd : %d)",
248 (unsigned long)offset, fd);
249 ret = splice(fd, &offset, thread_pipe[1], NULL, len,
250 SPLICE_F_MOVE | SPLICE_F_MORE);
251 DBG("splice chan to pipe ret %ld", ret);
252 if (ret < 0) {
0632499a 253 ret = errno;
d4a1283e 254 perror("Error in relay splice");
0632499a 255 goto splice_error;
d4a1283e
JD
256 }
257
258 ret = splice(thread_pipe[0], NULL, outfd, NULL, ret,
259 SPLICE_F_MOVE | SPLICE_F_MORE);
260 DBG("splice pipe to file %ld", ret);
261 if (ret < 0) {
0632499a 262 ret = errno;
d4a1283e 263 perror("Error in file splice");
0632499a 264 goto splice_error;
d4a1283e
JD
265 }
266 if (ret >= len) {
267 len = 0;
268 }
269 /* This won't block, but will start writeout asynchronously */
270 sync_file_range(outfd, kconsumerd_fd->out_fd_offset, ret,
271 SYNC_FILE_RANGE_WRITE);
272 kconsumerd_fd->out_fd_offset += ret;
273 }
0632499a 274
d4a1283e
JD
275 /*
276 * This does a blocking write-and-wait on any page that belongs to the
277 * subbuffer prior to the one we just wrote.
278 * Don't care about error values, as these are just hints and ways to
279 * limit the amount of page cache used.
280 */
281 if (orig_offset >= kconsumerd_fd->max_sb_size) {
282 sync_file_range(outfd, orig_offset - kconsumerd_fd->max_sb_size,
283 kconsumerd_fd->max_sb_size,
284 SYNC_FILE_RANGE_WAIT_BEFORE
285 | SYNC_FILE_RANGE_WRITE
286 | SYNC_FILE_RANGE_WAIT_AFTER);
287 /*
288 * Give hints to the kernel about how we access the file:
289 * POSIX_FADV_DONTNEED : we won't re-access data in a near
290 * future after we write it.
291 * We need to call fadvise again after the file grows because
292 * the kernel does not seem to apply fadvise to non-existing
293 * parts of the file.
294 * Call fadvise _after_ having waited for the page writeback to
295 * complete because the dirty page writeback semantic is not
296 * well defined. So it can be expected to lead to lower
297 * throughput in streaming.
298 */
299 posix_fadvise(outfd, orig_offset - kconsumerd_fd->max_sb_size,
300 kconsumerd_fd->max_sb_size, POSIX_FADV_DONTNEED);
301 }
0632499a
JD
302 goto end;
303
304splice_error:
305 /* send the appropriate error description to sessiond */
306 switch(ret) {
307 case EBADF:
308 send_error(KCONSUMERD_SPLICE_EBADF);
309 break;
310 case EINVAL:
311 send_error(KCONSUMERD_SPLICE_EINVAL);
312 break;
313 case ENOMEM:
314 send_error(KCONSUMERD_SPLICE_ENOMEM);
315 break;
316 case ESPIPE:
317 send_error(KCONSUMERD_SPLICE_ESPIPE);
318 break;
319 }
320
321end:
d4a1283e
JD
322 return ret;
323}
324
325/*
326 * read_subbuffer
327 *
328 * Consume data on a file descriptor and write it on a trace file
329 */
330static int read_subbuffer(struct ltt_kconsumerd_fd *kconsumerd_fd)
331{
332 unsigned long len;
333 int err;
334 long ret = 0;
335 int infd = kconsumerd_fd->consumerd_fd;
336
6aea26bc 337 DBG("In read_subbuffer (infd : %d)", infd);
d4a1283e
JD
338 /* Get the next subbuffer */
339 err = kernctl_get_next_subbuf(infd);
340 if (err != 0) {
341 ret = errno;
342 perror("Reserving sub buffer failed (everything is normal, "
343 "it is due to concurrency)");
344 goto end;
345 }
346
347 /* read the whole subbuffer */
348 err = kernctl_get_padded_subbuf_size(infd, &len);
349 if (err != 0) {
350 ret = errno;
351 perror("Getting sub-buffer len failed.");
352 goto end;
353 }
354
355 /* splice the subbuffer to the tracefile */
356 ret = on_read_subbuffer(kconsumerd_fd, len);
357 if (ret < 0) {
358 /*
359 * display the error but continue processing to try
360 * to release the subbuffer
361 */
362 ERR("Error splicing to tracefile");
363 }
364
365 err = kernctl_put_next_subbuf(infd);
366 if (err != 0) {
367 ret = errno;
368 if (errno == EFAULT) {
369 perror("Error in unreserving sub buffer\n");
370 } else if (errno == EIO) {
371 /* Should never happen with newer LTTng versions */
372 perror("Reader has been pushed by the writer, last sub-buffer corrupted.");
373 }
374 goto end;
375 }
376
377end:
378 return ret;
379}
380
381/*
382 * change_fd_state
383 *
384 * Update a fd according to what we just received
385 */
386static void change_fd_state(int sessiond_fd,
387 enum lttcomm_kconsumerd_fd_state state)
388{
389 struct ltt_kconsumerd_fd *iter;
390 cds_list_for_each_entry(iter, &kconsumerd_fd_list.head, list) {
391 if (iter->sessiond_fd == sessiond_fd) {
392 iter->state = state;
393 break;
394 }
395 }
396}
397
398/*
399 * consumerd_recv_fd
400 *
401 * Receives an array of file descriptors and the associated
402 * structures describing each fd (path name).
403 * Returns the size of received data
404 */
405static int consumerd_recv_fd(int sfd, int size,
406 enum lttcomm_consumerd_command cmd_type)
407{
408 struct msghdr msg;
409 struct iovec iov[1];
252fd492 410 int ret, i, tmp2;
d4a1283e
JD
411 struct cmsghdr *cmsg;
412 int nb_fd;
413 char tmp[CMSG_SPACE(size)];
414 struct lttcomm_kconsumerd_msg *buf;
415 /* the number of fds we are about to receive */
416 nb_fd = size/sizeof(struct lttcomm_kconsumerd_msg);
417
418 buf = malloc(size);
419
420 memset(&msg, 0, sizeof(msg));
421
422 /* Prepare to receive the structures */
423 iov[0].iov_base = buf;
424 iov[0].iov_len = size;
425 msg.msg_iov = iov;
426 msg.msg_iovlen = 1;
427
428 msg.msg_control = tmp;
429 msg.msg_controllen = sizeof(tmp);
430
431 DBG("Waiting to receive fds");
432 if ((ret = recvmsg(sfd, &msg, 0)) < 0) {
433 perror("recvmsg");
434 }
435 if (ret != size) {
436 ERR("Received only %d, expected %d", ret, size);
437 send_error(KCONSUMERD_ERROR_RECV_FD);
438 goto end;
439 }
440
441 cmsg = CMSG_FIRSTHDR(&msg);
442 if (!cmsg) {
443 ERR("Invalid control message header");
444 ret = -1;
445 send_error(KCONSUMERD_ERROR_RECV_FD);
446 goto end;
447 }
448
449 /* if we received fds */
450 if (cmsg->cmsg_level == SOL_SOCKET && cmsg->cmsg_type == SCM_RIGHTS) {
451 DBG("Receive : expecting %d fds", nb_fd);
452 for (i = 0; i < nb_fd; i++) {
453 switch (cmd_type) {
454 case LTTCOMM_ADD_STREAM:
455 DBG("add_fd %s (%d)", buf[i].path_name, ((int *)CMSG_DATA(cmsg))[i]);
456 ret = add_fd(&buf[i], ((int *)CMSG_DATA(cmsg))[i]);
457 if (ret < 0) {
458 send_error(KCONSUMERD_OUTFD_ERROR);
459 goto end;
460 }
461 break;
462 case LTTCOMM_UPDATE_STREAM:
463 change_fd_state(buf[i].fd, buf[i].state);
464 break;
465 default:
466 break;
467 }
468 }
469 /* flag to tell the polling thread to update its fd array */
470 update_fd_array = 1;
252fd492
JD
471 /* signal the poll thread */
472 tmp2 = write(poll_pipe[1], "4", 1);
d4a1283e
JD
473 } else {
474 ERR("Didn't received any fd");
475 send_error(KCONSUMERD_ERROR_RECV_FD);
476 ret = -1;
477 goto end;
478 }
479
480end:
481 if (buf != NULL) {
482 free(buf);
483 buf = NULL;
484 }
485 return ret;
486}
487
488/*
489 * thread_receive_fds
490 *
491 * This thread listens on the consumerd socket and
492 * receives the file descriptors from ltt-sessiond
493 */
494static void *thread_receive_fds(void *data)
495{
496 int sock, client_socket, ret;
497 struct lttcomm_kconsumerd_header tmp;
498
499 DBG("Creating command socket %s", command_sock_path);
500 unlink(command_sock_path);
501 client_socket = lttcomm_create_unix_sock(command_sock_path);
502 if (client_socket < 0) {
503 ERR("Cannot create command socket");
504 goto error;
505 }
506
507 ret = lttcomm_listen_unix_sock(client_socket);
508 if (ret < 0) {
509 goto error;
510 }
511
512 DBG("Sending ready command to ltt-sessiond");
513 ret = send_error(KCONSUMERD_COMMAND_SOCK_READY);
514 if (ret < 0) {
515 ERR("Error sending ready command to ltt-sessiond");
516 goto error;
517 }
518
7e8c38c6
JD
519 /* Blocking call, waiting for transmission */
520 sock = lttcomm_accept_unix_sock(client_socket);
521 if (sock <= 0) {
4abc0780 522 WARN("On accept");
7e8c38c6
JD
523 goto error;
524 }
d4a1283e 525 while (1) {
d4a1283e
JD
526 /* We first get the number of fd we are about to receive */
527 ret = lttcomm_recv_unix_sock(sock, &tmp,
528 sizeof(struct lttcomm_kconsumerd_header));
6aea26bc 529 if (ret <= 0) {
bcd8d9db
JD
530 ERR("Receiving the lttcomm_kconsumerd_header, exiting");
531 goto error;
d4a1283e
JD
532 }
533 ret = consumerd_recv_fd(sock, tmp.payload_size, tmp.cmd_type);
6aea26bc 534 if (ret <= 0) {
bcd8d9db
JD
535 ERR("Receiving the FD, exiting");
536 goto error;
d4a1283e
JD
537 }
538 }
539
540error:
541 return NULL;
542}
543
544/*
545 * update_poll_array
546 *
547 * Allocate the pollfd structure and the local view of the out fds
548 * to avoid doing a lookup in the linked list and concurrency issues
549 * when writing is needed.
550 * Returns the number of fds in the structures
551 */
552static int update_poll_array(struct pollfd **pollfd,
553 struct ltt_kconsumerd_fd **local_kconsumerd_fd)
554{
555 struct ltt_kconsumerd_fd *iter;
556 int i = 0;
557
d4a1283e
JD
558
559 DBG("Updating poll fd array");
560 pthread_mutex_lock(&kconsumerd_lock_fds);
561
562 cds_list_for_each_entry(iter, &kconsumerd_fd_list.head, list) {
563 DBG("Inside for each");
564 if (iter->state == ACTIVE_FD) {
565 DBG("Active FD %d", iter->consumerd_fd);
1b686c3f
JD
566 (*pollfd)[i].fd = iter->consumerd_fd;
567 (*pollfd)[i].events = POLLIN | POLLPRI;
d4a1283e
JD
568 local_kconsumerd_fd[i] = iter;
569 i++;
570 } else if (iter->state == DELETE_FD) {
571 del_fd(iter);
572 }
573 }
252fd492
JD
574 /*
575 * insert the poll_pipe at the end of the array and don't increment i
576 * so nb_fd is the number of real FD
577 */
578 (*pollfd)[i].fd = poll_pipe[0];
579 (*pollfd)[i].events = POLLIN;
580
d4a1283e
JD
581 update_fd_array = 0;
582 pthread_mutex_unlock(&kconsumerd_lock_fds);
583 return i;
584
d4a1283e
JD
585}
586
587/*
588 * thread_poll_fds
589 *
590 * This thread polls the fds in the ltt_fd_list to consume the data
591 * and write it to tracefile if necessary.
592 */
593static void *thread_poll_fds(void *data)
594{
595 int num_rdy, num_hup, high_prio, ret, i;
596 struct pollfd *pollfd = NULL;
597 /* local view of the fds */
6aea26bc 598 struct ltt_kconsumerd_fd **local_kconsumerd_fd = NULL;
d4a1283e
JD
599 /* local view of fds_count */
600 int nb_fd = 0;
252fd492
JD
601 char tmp;
602 int tmp2;
d4a1283e
JD
603
604 ret = pipe(thread_pipe);
605 if (ret < 0) {
606 perror("Error creating pipe");
607 goto end;
608 }
609
6aea26bc
JD
610 local_kconsumerd_fd = malloc(sizeof(struct ltt_kconsumerd_fd));
611
d4a1283e
JD
612 while (1) {
613 high_prio = 0;
614 num_hup = 0;
615
616 /*
617 * the ltt_fd_list has been updated, we need to update our
618 * local array as well
619 */
620 if (update_fd_array) {
4abc0780
JD
621 if (pollfd != NULL) {
622 free(pollfd);
623 pollfd = NULL;
624 }
625 if (local_kconsumerd_fd != NULL) {
626 free(local_kconsumerd_fd);
627 local_kconsumerd_fd = NULL;
628 }
629 /* allocate for all fds + 1 for the poll_pipe */
630 pollfd = malloc((fds_count + 1) * sizeof(struct pollfd));
631 if (pollfd == NULL) {
632 perror("pollfd malloc");
633 goto end;
634 }
635 /* allocate for all fds + 1 for the poll_pipe */
636 local_kconsumerd_fd = malloc((fds_count + 1) * sizeof(struct ltt_kconsumerd_fd));
637 if (local_kconsumerd_fd == NULL) {
638 perror("local_kconsumerd_fd malloc");
639 goto end;
640 }
641
6aea26bc 642 ret = update_poll_array(&pollfd, local_kconsumerd_fd);
d4a1283e
JD
643 if (ret < 0) {
644 ERR("Error in allocating pollfd or local_outfds");
645 send_error(KCONSUMERD_POLL_ERROR);
646 goto end;
647 }
648 nb_fd = ret;
649 }
650
651 /* poll on the array of fds */
252fd492
JD
652 DBG("polling on %d fd", nb_fd + 1);
653 num_rdy = poll(pollfd, nb_fd + 1, -1);
d4a1283e
JD
654 DBG("poll num_rdy : %d", num_rdy);
655 if (num_rdy == -1) {
656 perror("Poll error");
657 send_error(KCONSUMERD_POLL_ERROR);
658 goto end;
659 }
660
252fd492
JD
661 /*
662 * if only the poll_pipe triggered poll to return just return to the
663 * beginning of the loop to update the array
664 */
665 if (num_rdy == 1 && pollfd[nb_fd].revents == POLLIN) {
666 DBG("poll_pipe wake up");
667 tmp2 = read(poll_pipe[0], &tmp, 1);
668 continue;
669 }
670
d4a1283e
JD
671 /* Take care of high priority channels first. */
672 for (i = 0; i < nb_fd; i++) {
673 switch(pollfd[i].revents) {
674 case POLLERR:
675 ERR("Error returned in polling fd %d.", pollfd[i].fd);
676 num_hup++;
677 send_error(KCONSUMERD_POLL_ERROR);
678 break;
679 case POLLHUP:
680 ERR("Polling fd %d tells it has hung up.", pollfd[i].fd);
681 num_hup++;
682 break;
683 case POLLNVAL:
684 ERR("Polling fd %d tells fd is not open.", pollfd[i].fd);
685 send_error(KCONSUMERD_POLL_NVAL);
686 num_hup++;
687 break;
688 case POLLPRI:
689 DBG("Urgent read on fd %d", pollfd[i].fd);
690 high_prio = 1;
6aea26bc 691 ret = read_subbuffer(local_kconsumerd_fd[i]);
d4a1283e
JD
692 /* it's ok to have an unavailable sub-buffer (FIXME : is it ?) */
693 if (ret == EAGAIN) {
694 ret = 0;
695 }
696 break;
697 }
698 }
699
700 /* If every buffer FD has hung up, we end the read loop here */
701 if (nb_fd > 0 && num_hup == nb_fd) {
702 DBG("every buffer FD has hung up\n");
703 send_error(KCONSUMERD_POLL_HUP);
6aea26bc 704 goto end;
d4a1283e
JD
705 }
706
707 /* Take care of low priority channels. */
708 if (!high_prio) {
709 for (i = 0; i < nb_fd; i++) {
710 switch(pollfd[i].revents) {
711 case POLLIN:
712 DBG("Normal read on fd %d", pollfd[i].fd);
6aea26bc 713 ret = read_subbuffer(local_kconsumerd_fd[i]);
d4a1283e
JD
714 /* it's ok to have an unavailable subbuffer (FIXME : is it ?) */
715 if (ret == EAGAIN) {
716 ret = 0;
717 }
718 break;
719 }
720 }
721 }
722 }
723end:
724 if (pollfd != NULL) {
725 free(pollfd);
726 pollfd = NULL;
727 }
728 if (local_kconsumerd_fd != NULL) {
729 free(local_kconsumerd_fd);
730 local_kconsumerd_fd = NULL;
731 }
bcd8d9db 732 cleanup();
d4a1283e
JD
733 return NULL;
734}
735
736/*
737 * usage function on stderr
738 */
739static void usage(void)
740{
741 fprintf(stderr, "Usage: %s OPTIONS\n\nOptions:\n", progname);
742 fprintf(stderr, " -h, --help "
743 "Display this usage.\n");
744 fprintf(stderr, " -c, --kconsumerd-cmd-sock PATH "
745 "Specify path for the command socket\n");
746 fprintf(stderr, " -e, --kconsumerd-err-sock PATH "
747 "Specify path for the error socket\n");
748 fprintf(stderr, " -d, --daemonize "
749 "Start as a daemon.\n");
750 fprintf(stderr, " -q, --quiet "
751 "No output at all.\n");
752 fprintf(stderr, " -v, --verbose "
753 "Verbose mode. Activate DBG() macro.\n");
754 fprintf(stderr, " -V, --version "
755 "Show version number.\n");
756}
757
758/*
759 * daemon argument parsing
760 */
761static void parse_args(int argc, char **argv)
762{
763 int c;
764
765 static struct option long_options[] = {
766 { "kconsumerd-cmd-sock", 1, 0, 'c' },
767 { "kconsumerd-err-sock", 1, 0, 'e' },
768 { "daemonize", 0, 0, 'd' },
769 { "help", 0, 0, 'h' },
770 { "quiet", 0, 0, 'q' },
771 { "verbose", 0, 0, 'v' },
772 { "version", 0, 0, 'V' },
773 { NULL, 0, 0, 0 }
774 };
775
776 while (1) {
777 int option_index = 0;
778 c = getopt_long(argc, argv, "dhqvV" "c:e:", long_options, &option_index);
779 if (c == -1) {
780 break;
781 }
782
783 switch (c) {
784 case 0:
785 fprintf(stderr, "option %s", long_options[option_index].name);
786 if (optarg) {
787 fprintf(stderr, " with arg %s\n", optarg);
788 }
789 break;
790 case 'c':
791 snprintf(command_sock_path, PATH_MAX, "%s", optarg);
792 break;
793 case 'e':
794 snprintf(error_sock_path, PATH_MAX, "%s", optarg);
795 break;
796 case 'd':
797 opt_daemon = 1;
798 break;
799 case 'h':
800 usage();
801 exit(EXIT_FAILURE);
802 case 'q':
803 opt_quiet = 1;
804 break;
805 case 'v':
806 opt_verbose = 1;
807 break;
808 case 'V':
809 fprintf(stdout, "%s\n", VERSION);
810 exit(EXIT_SUCCESS);
811 default:
812 usage();
813 exit(EXIT_FAILURE);
814 }
815 }
816}
817
818
819/*
820 * main
821 */
822int main(int argc, char **argv)
823{
824 int i;
825 int ret = 0;
826 void *status;
827
828 /* Parse arguments */
829 progname = argv[0];
830 parse_args(argc, argv);
831
832 /* Daemonize */
833 if (opt_daemon) {
834 ret = daemon(0, 0);
835 if (ret < 0) {
836 perror("daemon");
837 goto error;
838 }
839 }
840
841 if (strlen(command_sock_path) == 0) {
842 snprintf(command_sock_path, PATH_MAX,
843 KCONSUMERD_CMD_SOCK_PATH);
844 }
845 if (strlen(error_sock_path) == 0) {
846 snprintf(error_sock_path, PATH_MAX,
847 KCONSUMERD_ERR_SOCK_PATH);
848 }
849
850 if (set_signal_handler() < 0) {
851 goto error;
852 }
853
252fd492
JD
854 /* create the pipe to wake to polling thread when needed */
855 ret = pipe(poll_pipe);
856 if (ret < 0) {
857 perror("Error creating poll pipe");
858 goto end;
859 }
860
d4a1283e
JD
861 /* Connect to the socket created by ltt-sessiond to report errors */
862 DBG("Connecting to error socket %s", error_sock_path);
863 error_socket = lttcomm_connect_unix_sock(error_sock_path);
864 /* not a fatal error, but all communication with ltt-sessiond will fail */
865 if (error_socket < 0) {
866 WARN("Cannot connect to error socket, is ltt-sessiond started ?");
867 }
868
869 /* Create the thread to manage the receive of fd */
870 ret = pthread_create(&threads[0], NULL, thread_receive_fds, (void *) NULL);
871 if (ret != 0) {
872 perror("pthread_create");
873 goto error;
874 }
875
876 /* Create thread to manage the polling/writing of traces */
877 ret = pthread_create(&threads[1], NULL, thread_poll_fds, (void *) NULL);
878 if (ret != 0) {
879 perror("pthread_create");
880 goto error;
881 }
882
883 for (i = 0; i < 2; i++) {
884 ret = pthread_join(threads[i], &status);
885 if (ret != 0) {
886 perror("pthread_join");
887 goto error;
888 }
889 }
890 ret = EXIT_SUCCESS;
891 send_error(KCONSUMERD_EXIT_SUCCESS);
892 goto end;
893
894error:
895 ret = EXIT_FAILURE;
896 send_error(KCONSUMERD_EXIT_FAILURE);
897
898end:
899 cleanup();
900
901 return ret;
902}
This page took 0.081014 seconds and 4 git commands to generate.