2 * Copyright (C) 2011 - Julien Desfossez <julien.desfossez@polymtl.ca>
3 * Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License
7 * as published by the Free Software Foundation; either version 2
8 * of the License, or (at your option) any later version.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
27 #include <sys/socket.h>
28 #include <sys/types.h>
30 #include <urcu/list.h>
32 #include "libkernelctl.h"
33 #include "liblttkconsumerd.h"
36 /* Init the list of FDs */
37 static struct kconsumerd_fd_list kconsumerd_fd_list
= {
38 .head
= CDS_LIST_HEAD_INIT(kconsumerd_fd_list
.head
),
41 /* Number of element for the list below. */
42 static unsigned int kconsumerd_fds_count
;
44 /* If the local array of FDs needs update in the poll function */
45 static unsigned int kconsumerd_update_fd_array
= 1;
47 /* lock the fd array and structures */
48 static pthread_mutex_t kconsumerd_lock_fds
;
50 /* communication with splice */
51 static int kconsumerd_thread_pipe
[2];
53 /* pipe to wake the poll thread when necessary */
54 static int kconsumerd_poll_pipe
[2];
56 /* timeout parameter, to control the polling thread grace period */
57 static int kconsumerd_poll_timeout
= -1;
59 /* socket to communicate errors with sessiond */
60 static int kconsumerd_error_socket
;
62 /* socket to exchange commands with sessiond */
63 static char *kconsumerd_command_sock_path
;
65 /* flag to inform the polling thread to kconsumerd_quit when all fd hung up */
66 static int kconsumerd_quit
= 0;
69 * kconsumerd_set_error_socket
71 * Set the error socket
73 void kconsumerd_set_error_socket(int sock
)
75 kconsumerd_error_socket
= sock
;
79 * kconsumerd_set_command_socket_path
81 * Set the command socket path
83 void kconsumerd_set_command_socket_path(char *sock
)
85 kconsumerd_command_sock_path
= sock
;
91 * Remove a fd from the global list protected by a mutex
93 static void kconsumerd_del_fd(struct kconsumerd_fd
*lcf
)
95 pthread_mutex_lock(&kconsumerd_lock_fds
);
96 cds_list_del(&lcf
->list
);
97 if (kconsumerd_fds_count
> 0) {
98 kconsumerd_fds_count
--;
101 close(lcf
->consumerd_fd
);
106 pthread_mutex_unlock(&kconsumerd_lock_fds
);
112 * Add a fd to the global list protected by a mutex
114 static int kconsumerd_add_fd(struct lttcomm_kconsumerd_msg
*buf
, int consumerd_fd
)
116 struct kconsumerd_fd
*tmp_fd
;
119 tmp_fd
= malloc(sizeof(struct kconsumerd_fd
));
120 tmp_fd
->sessiond_fd
= buf
->fd
;
121 tmp_fd
->consumerd_fd
= consumerd_fd
;
122 tmp_fd
->state
= buf
->state
;
123 tmp_fd
->max_sb_size
= buf
->max_sb_size
;
124 strncpy(tmp_fd
->path_name
, buf
->path_name
, PATH_MAX
);
126 /* Opening the tracefile in write mode */
127 ret
= open(tmp_fd
->path_name
,
128 O_WRONLY
|O_CREAT
|O_TRUNC
, S_IRWXU
|S_IRWXG
|S_IRWXO
);
130 ERR("Opening %s", tmp_fd
->path_name
);
134 tmp_fd
->out_fd
= ret
;
135 tmp_fd
->out_fd_offset
= 0;
137 DBG("Adding %s (%d, %d, %d)", tmp_fd
->path_name
,
138 tmp_fd
->sessiond_fd
, tmp_fd
->consumerd_fd
, tmp_fd
->out_fd
);
140 pthread_mutex_lock(&kconsumerd_lock_fds
);
141 cds_list_add(&tmp_fd
->list
, &kconsumerd_fd_list
.head
);
142 kconsumerd_fds_count
++;
143 pthread_mutex_unlock(&kconsumerd_lock_fds
);
150 * kconsumerd_change_fd_state
152 * Update a fd according to what we just received
154 static void kconsumerd_change_fd_state(int sessiond_fd
,
155 enum kconsumerd_fd_state state
)
157 struct kconsumerd_fd
*iter
;
158 cds_list_for_each_entry(iter
, &kconsumerd_fd_list
.head
, list
) {
159 if (iter
->sessiond_fd
== sessiond_fd
) {
167 * kconsumerd_update_poll_array
169 * Allocate the pollfd structure and the local view of the out fds
170 * to avoid doing a lookup in the linked list and concurrency issues
171 * when writing is needed.
172 * Returns the number of fds in the structures
174 static int kconsumerd_update_poll_array(struct pollfd
**pollfd
,
175 struct kconsumerd_fd
**local_kconsumerd_fd
)
177 struct kconsumerd_fd
*iter
;
180 DBG("Updating poll fd array");
181 pthread_mutex_lock(&kconsumerd_lock_fds
);
183 cds_list_for_each_entry(iter
, &kconsumerd_fd_list
.head
, list
) {
184 DBG("Inside for each");
185 if (iter
->state
== ACTIVE_FD
) {
186 DBG("Active FD %d", iter
->consumerd_fd
);
187 (*pollfd
)[i
].fd
= iter
->consumerd_fd
;
188 (*pollfd
)[i
].events
= POLLIN
| POLLPRI
;
189 local_kconsumerd_fd
[i
] = iter
;
195 * insert the kconsumerd_poll_pipe at the end of the array and don't
196 * increment i so nb_fd is the number of real FD
198 (*pollfd
)[i
].fd
= kconsumerd_poll_pipe
[0];
199 (*pollfd
)[i
].events
= POLLIN
;
201 kconsumerd_update_fd_array
= 0;
202 pthread_mutex_unlock(&kconsumerd_lock_fds
);
208 * kconsumerd_on_read_subbuffer_mmap
210 * mmap the ring buffer, read it and write the data to the tracefile.
211 * Returns the number of bytes written
213 static int kconsumerd_on_read_subbuffer_mmap(
214 struct kconsumerd_fd
*kconsumerd_fd
, unsigned long len
)
216 unsigned long mmap_len
, mmap_offset
, padded_len
, padding_len
;
218 char *padding
= NULL
;
220 off_t orig_offset
= kconsumerd_fd
->out_fd_offset
;
221 int fd
= kconsumerd_fd
->consumerd_fd
;
222 int outfd
= kconsumerd_fd
->out_fd
;
224 /* get the padded subbuffer size to know the padding required */
225 ret
= kernctl_get_padded_subbuf_size(fd
, &padded_len
);
228 perror("kernctl_get_padded_subbuf_size");
231 padding_len
= padded_len
- len
;
232 padding
= malloc(padding_len
* sizeof(char));
233 memset(padding
, '\0', padding_len
);
235 /* get the len of the mmap region */
236 ret
= kernctl_get_mmap_len(fd
, &mmap_len
);
239 perror("kernctl_get_mmap_len");
243 /* get the offset inside the fd to mmap */
244 ret
= kernctl_get_mmap_read_offset(fd
, &mmap_offset
);
247 perror("kernctl_get_mmap_read_offset");
251 mmap_base
= mmap(NULL
, mmap_len
, PROT_READ
, MAP_PRIVATE
, fd
, mmap_offset
);
252 if (mmap_base
== MAP_FAILED
) {
253 perror("Error mmaping");
259 ret
= write(outfd
, mmap_base
, len
);
262 } else if (ret
< 0) {
264 perror("Error in file write");
267 /* This won't block, but will start writeout asynchronously */
268 sync_file_range(outfd
, kconsumerd_fd
->out_fd_offset
, ret
,
269 SYNC_FILE_RANGE_WRITE
);
270 kconsumerd_fd
->out_fd_offset
+= ret
;
273 /* once all the data is written, write the padding to disk */
274 ret
= write(outfd
, padding
, padding_len
);
277 perror("Error writing padding to file");
282 * This does a blocking write-and-wait on any page that belongs to the
283 * subbuffer prior to the one we just wrote.
284 * Don't care about error values, as these are just hints and ways to
285 * limit the amount of page cache used.
287 if (orig_offset
>= kconsumerd_fd
->max_sb_size
) {
288 sync_file_range(outfd
, orig_offset
- kconsumerd_fd
->max_sb_size
,
289 kconsumerd_fd
->max_sb_size
,
290 SYNC_FILE_RANGE_WAIT_BEFORE
291 | SYNC_FILE_RANGE_WRITE
292 | SYNC_FILE_RANGE_WAIT_AFTER
);
295 * Give hints to the kernel about how we access the file:
296 * POSIX_FADV_DONTNEED : we won't re-access data in a near future after
299 * We need to call fadvise again after the file grows because the
300 * kernel does not seem to apply fadvise to non-existing parts of the
303 * Call fadvise _after_ having waited for the page writeback to
304 * complete because the dirty page writeback semantic is not well
305 * defined. So it can be expected to lead to lower throughput in
308 posix_fadvise(outfd
, orig_offset
- kconsumerd_fd
->max_sb_size
,
309 kconsumerd_fd
->max_sb_size
, POSIX_FADV_DONTNEED
);
314 if (padding
!= NULL
) {
321 * kconsumerd_on_read_subbuffer
323 * Splice the data from the ring buffer to the tracefile.
324 * Returns the number of bytes spliced
326 static int kconsumerd_on_read_subbuffer(
327 struct kconsumerd_fd
*kconsumerd_fd
, unsigned long len
)
331 off_t orig_offset
= kconsumerd_fd
->out_fd_offset
;
332 int fd
= kconsumerd_fd
->consumerd_fd
;
333 int outfd
= kconsumerd_fd
->out_fd
;
336 DBG("splice chan to pipe offset %lu (fd : %d)",
337 (unsigned long)offset
, fd
);
338 ret
= splice(fd
, &offset
, kconsumerd_thread_pipe
[1], NULL
, len
,
339 SPLICE_F_MOVE
| SPLICE_F_MORE
);
340 DBG("splice chan to pipe ret %ld", ret
);
343 perror("Error in relay splice");
347 ret
= splice(kconsumerd_thread_pipe
[0], NULL
, outfd
, NULL
, ret
,
348 SPLICE_F_MOVE
| SPLICE_F_MORE
);
349 DBG("splice pipe to file %ld", ret
);
352 perror("Error in file splice");
358 /* This won't block, but will start writeout asynchronously */
359 sync_file_range(outfd
, kconsumerd_fd
->out_fd_offset
, ret
,
360 SYNC_FILE_RANGE_WRITE
);
361 kconsumerd_fd
->out_fd_offset
+= ret
;
365 * This does a blocking write-and-wait on any page that belongs to the
366 * subbuffer prior to the one we just wrote.
367 * Don't care about error values, as these are just hints and ways to
368 * limit the amount of page cache used.
370 if (orig_offset
>= kconsumerd_fd
->max_sb_size
) {
371 sync_file_range(outfd
, orig_offset
- kconsumerd_fd
->max_sb_size
,
372 kconsumerd_fd
->max_sb_size
,
373 SYNC_FILE_RANGE_WAIT_BEFORE
374 | SYNC_FILE_RANGE_WRITE
375 | SYNC_FILE_RANGE_WAIT_AFTER
);
377 * Give hints to the kernel about how we access the file:
378 * POSIX_FADV_DONTNEED : we won't re-access data in a near future after
381 * We need to call fadvise again after the file grows because the
382 * kernel does not seem to apply fadvise to non-existing parts of the
385 * Call fadvise _after_ having waited for the page writeback to
386 * complete because the dirty page writeback semantic is not well
387 * defined. So it can be expected to lead to lower throughput in
390 posix_fadvise(outfd
, orig_offset
- kconsumerd_fd
->max_sb_size
,
391 kconsumerd_fd
->max_sb_size
, POSIX_FADV_DONTNEED
);
396 /* send the appropriate error description to sessiond */
399 kconsumerd_send_error(KCONSUMERD_SPLICE_EBADF
);
402 kconsumerd_send_error(KCONSUMERD_SPLICE_EINVAL
);
405 kconsumerd_send_error(KCONSUMERD_SPLICE_ENOMEM
);
408 kconsumerd_send_error(KCONSUMERD_SPLICE_ESPIPE
);
417 * kconsumerd_read_subbuffer
419 * Consume data on a file descriptor and write it on a trace file
421 static int kconsumerd_read_subbuffer(struct kconsumerd_fd
*kconsumerd_fd
)
426 int infd
= kconsumerd_fd
->consumerd_fd
;
428 DBG("In kconsumerd_read_subbuffer (infd : %d)", infd
);
429 /* Get the next subbuffer */
430 err
= kernctl_get_next_subbuf(infd
);
433 perror("Reserving sub buffer failed (everything is normal, "
434 "it is due to concurrency)");
438 switch (DEFAULT_KERNEL_CHANNEL_OUTPUT
) {
439 case LTTNG_KERNEL_SPLICE
:
440 /* read the whole subbuffer */
441 err
= kernctl_get_padded_subbuf_size(infd
, &len
);
444 perror("Getting sub-buffer len failed.");
448 /* splice the subbuffer to the tracefile */
449 ret
= kconsumerd_on_read_subbuffer(kconsumerd_fd
, len
);
452 * display the error but continue processing to try
453 * to release the subbuffer
455 ERR("Error splicing to tracefile");
458 case LTTNG_KERNEL_MMAP
:
459 /* read the used subbuffer size */
460 err
= kernctl_get_subbuf_size(infd
, &len
);
463 perror("Getting sub-buffer len failed.");
466 /* write the subbuffer to the tracefile */
467 ret
= kconsumerd_on_read_subbuffer_mmap(kconsumerd_fd
, len
);
470 * display the error but continue processing to try
471 * to release the subbuffer
473 ERR("Error writing to tracefile");
477 ERR("Unknown output method");
481 err
= kernctl_put_next_subbuf(infd
);
484 if (errno
== EFAULT
) {
485 perror("Error in unreserving sub buffer\n");
486 } else if (errno
== EIO
) {
487 /* Should never happen with newer LTTng versions */
488 perror("Reader has been pushed by the writer, last sub-buffer corrupted.");
498 * kconsumerd_consumerd_recv_fd
500 * Receives an array of file descriptors and the associated
501 * structures describing each fd (path name).
502 * Returns the size of received data
504 static int kconsumerd_consumerd_recv_fd(int sfd
, int size
,
505 enum kconsumerd_command cmd_type
)
509 int ret
= 0, i
, tmp2
;
510 struct cmsghdr
*cmsg
;
512 char recv_fd
[CMSG_SPACE(sizeof(int))];
513 struct lttcomm_kconsumerd_msg lkm
;
515 /* the number of fds we are about to receive */
516 nb_fd
= size
/ sizeof(struct lttcomm_kconsumerd_msg
);
518 for (i
= 0; i
< nb_fd
; i
++) {
519 memset(&msg
, 0, sizeof(msg
));
521 /* Prepare to receive the structures */
522 iov
[0].iov_base
= &lkm
;
523 iov
[0].iov_len
= sizeof(lkm
);
527 msg
.msg_control
= recv_fd
;
528 msg
.msg_controllen
= sizeof(recv_fd
);
530 DBG("Waiting to receive fd");
531 if ((ret
= recvmsg(sfd
, &msg
, 0)) < 0) {
536 if (ret
!= (size
/ nb_fd
)) {
537 ERR("Received only %d, expected %d", ret
, size
);
538 kconsumerd_send_error(KCONSUMERD_ERROR_RECV_FD
);
542 cmsg
= CMSG_FIRSTHDR(&msg
);
544 ERR("Invalid control message header");
546 kconsumerd_send_error(KCONSUMERD_ERROR_RECV_FD
);
549 /* if we received fds */
550 if (cmsg
->cmsg_level
== SOL_SOCKET
&& cmsg
->cmsg_type
== SCM_RIGHTS
) {
553 DBG("kconsumerd_add_fd %s (%d)", lkm
.path_name
, (CMSG_DATA(cmsg
)[0]));
554 ret
= kconsumerd_add_fd(&lkm
, (CMSG_DATA(cmsg
)[0]));
556 kconsumerd_send_error(KCONSUMERD_OUTFD_ERROR
);
561 kconsumerd_change_fd_state(lkm
.fd
, lkm
.state
);
566 /* flag to tell the polling thread to update its fd array */
567 kconsumerd_update_fd_array
= 1;
568 /* signal the poll thread */
569 tmp2
= write(kconsumerd_poll_pipe
[1], "4", 1);
571 ERR("Didn't received any fd");
572 kconsumerd_send_error(KCONSUMERD_ERROR_RECV_FD
);
579 DBG("kconsumerd_consumerd_recv_fd thread exiting");
584 * kconsumerd_thread_poll_fds
586 * This thread polls the fds in the ltt_fd_list to consume the data
587 * and write it to tracefile if necessary.
589 void *kconsumerd_thread_poll_fds(void *data
)
591 int num_rdy
, num_hup
, high_prio
, ret
, i
;
592 struct pollfd
*pollfd
= NULL
;
593 /* local view of the fds */
594 struct kconsumerd_fd
**local_kconsumerd_fd
= NULL
;
595 /* local view of kconsumerd_fds_count */
600 ret
= pipe(kconsumerd_thread_pipe
);
602 perror("Error creating pipe");
606 local_kconsumerd_fd
= malloc(sizeof(struct kconsumerd_fd
));
613 * the ltt_fd_list has been updated, we need to update our
614 * local array as well
616 if (kconsumerd_update_fd_array
== 1) {
617 if (pollfd
!= NULL
) {
621 if (local_kconsumerd_fd
!= NULL
) {
622 free(local_kconsumerd_fd
);
623 local_kconsumerd_fd
= NULL
;
625 /* allocate for all fds + 1 for the kconsumerd_poll_pipe */
626 pollfd
= malloc((kconsumerd_fds_count
+ 1) * sizeof(struct pollfd
));
627 if (pollfd
== NULL
) {
628 perror("pollfd malloc");
631 /* allocate for all fds + 1 for the kconsumerd_poll_pipe */
632 local_kconsumerd_fd
= malloc((kconsumerd_fds_count
+ 1) *
633 sizeof(struct kconsumerd_fd
));
634 if (local_kconsumerd_fd
== NULL
) {
635 perror("local_kconsumerd_fd malloc");
638 ret
= kconsumerd_update_poll_array(&pollfd
, local_kconsumerd_fd
);
640 ERR("Error in allocating pollfd or local_outfds");
641 kconsumerd_send_error(KCONSUMERD_POLL_ERROR
);
647 /* poll on the array of fds */
648 DBG("polling on %d fd", nb_fd
+ 1);
649 num_rdy
= poll(pollfd
, nb_fd
+ 1, kconsumerd_poll_timeout
);
650 DBG("poll num_rdy : %d", num_rdy
);
652 perror("Poll error");
653 kconsumerd_send_error(KCONSUMERD_POLL_ERROR
);
655 } else if (num_rdy
== 0) {
656 DBG("Polling thread timed out");
660 /* No FDs and kconsumerd_quit, kconsumerd_cleanup the thread */
661 if (nb_fd
== 0 && kconsumerd_quit
== 1) {
666 * if only the kconsumerd_poll_pipe triggered poll to return just
667 * return to the beginning of the loop to update the array
669 if (num_rdy
== 1 && pollfd
[nb_fd
].revents
== POLLIN
) {
670 DBG("kconsumerd_poll_pipe wake up");
671 tmp2
= read(kconsumerd_poll_pipe
[0], &tmp
, 1);
675 /* Take care of high priority channels first. */
676 for (i
= 0; i
< nb_fd
; i
++) {
677 switch(pollfd
[i
].revents
) {
679 ERR("Error returned in polling fd %d.", pollfd
[i
].fd
);
680 kconsumerd_del_fd(local_kconsumerd_fd
[i
]);
681 kconsumerd_update_fd_array
= 1;
685 DBG("Polling fd %d tells it has hung up.", pollfd
[i
].fd
);
686 kconsumerd_del_fd(local_kconsumerd_fd
[i
]);
687 kconsumerd_update_fd_array
= 1;
691 ERR("Polling fd %d tells fd is not open.", pollfd
[i
].fd
);
692 kconsumerd_del_fd(local_kconsumerd_fd
[i
]);
693 kconsumerd_update_fd_array
= 1;
697 DBG("Urgent read on fd %d", pollfd
[i
].fd
);
699 ret
= kconsumerd_read_subbuffer(local_kconsumerd_fd
[i
]);
700 /* it's ok to have an unavailable sub-buffer */
708 /* If every buffer FD has hung up, we end the read loop here */
709 if (nb_fd
> 0 && num_hup
== nb_fd
) {
710 DBG("every buffer FD has hung up\n");
711 if (kconsumerd_quit
== 1) {
717 /* Take care of low priority channels. */
718 if (high_prio
== 0) {
719 for (i
= 0; i
< nb_fd
; i
++) {
720 if (pollfd
[i
].revents
== POLLIN
) {
721 DBG("Normal read on fd %d", pollfd
[i
].fd
);
722 ret
= kconsumerd_read_subbuffer(local_kconsumerd_fd
[i
]);
723 /* it's ok to have an unavailable subbuffer */
732 DBG("polling thread exiting");
733 if (pollfd
!= NULL
) {
737 if (local_kconsumerd_fd
!= NULL
) {
738 free(local_kconsumerd_fd
);
739 local_kconsumerd_fd
= NULL
;
741 kconsumerd_cleanup();
746 * kconsumerd_create_poll_pipe
748 * create the pipe to wake to polling thread when needed
750 int kconsumerd_create_poll_pipe()
752 return pipe(kconsumerd_poll_pipe
);
756 * kconsumerd_thread_receive_fds
758 * This thread listens on the consumerd socket and
759 * receives the file descriptors from ltt-sessiond
761 void *kconsumerd_thread_receive_fds(void *data
)
763 int sock
, client_socket
, ret
;
764 struct lttcomm_kconsumerd_header tmp
;
766 DBG("Creating command socket %s", kconsumerd_command_sock_path
);
767 unlink(kconsumerd_command_sock_path
);
768 client_socket
= lttcomm_create_unix_sock(kconsumerd_command_sock_path
);
769 if (client_socket
< 0) {
770 ERR("Cannot create command socket");
774 ret
= lttcomm_listen_unix_sock(client_socket
);
779 DBG("Sending ready command to ltt-sessiond");
780 ret
= kconsumerd_send_error(KCONSUMERD_COMMAND_SOCK_READY
);
782 ERR("Error sending ready command to ltt-sessiond");
786 /* Blocking call, waiting for transmission */
787 sock
= lttcomm_accept_unix_sock(client_socket
);
793 /* We first get the number of fd we are about to receive */
794 ret
= lttcomm_recv_unix_sock(sock
, &tmp
,
795 sizeof(struct lttcomm_kconsumerd_header
));
797 ERR("Communication interrupted on command socket");
800 if (tmp
.cmd_type
== STOP
) {
801 DBG("Received STOP command");
804 /* we received a command to add or update fds */
805 ret
= kconsumerd_consumerd_recv_fd(sock
, tmp
.payload_size
, tmp
.cmd_type
);
807 ERR("Receiving the FD, exiting");
813 DBG("kconsumerd_thread_receive_fds exiting");
816 * when all fds have hung up, the polling thread
822 * 2s of grace period, if no polling events occur during
823 * this period, the polling thread will exit even if there
824 * are still open FDs (should not happen, but safety mechanism).
826 kconsumerd_poll_timeout
= KCONSUMERD_POLL_GRACE_PERIOD
;
828 /* wake up the polling thread */
829 ret
= write(kconsumerd_poll_pipe
[1], "4", 1);
831 perror("poll pipe write");
839 * Cleanup the daemon's socket on exit
841 void kconsumerd_cleanup()
843 struct kconsumerd_fd
*iter
;
845 /* remove the socket file */
846 unlink(kconsumerd_command_sock_path
);
848 /* close all outfd */
849 cds_list_for_each_entry(iter
, &kconsumerd_fd_list
.head
, list
) {
850 kconsumerd_del_fd(iter
);
855 * kconsumerd_send_error
857 * send return code to ltt-sessiond
859 int kconsumerd_send_error(enum lttcomm_return_code cmd
)
861 if (kconsumerd_error_socket
> 0) {
862 return lttcomm_send_unix_sock(kconsumerd_error_socket
, &cmd
,
863 sizeof(enum lttcomm_sessiond_command
));