Update version to 0.16
[ust.git] / libustconsumer / libustconsumer.c
... / ...
CommitLineData
1/* Copyright (C) 2009 Pierre-Marc Fournier
2 * 2010 Alexis Halle
3 *
4 * This library is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU Lesser General Public
6 * License as published by the Free Software Foundation; either
7 * version 2.1 of the License, or (at your option) any later version.
8 *
9 * This library is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 * Lesser General Public License for more details.
13 *
14 * You should have received a copy of the GNU Lesser General Public
15 * License along with this library; if not, write to the Free Software
16 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
17 */
18
19#define _GNU_SOURCE
20
21#include <sys/epoll.h>
22#include <sys/shm.h>
23#include <sys/types.h>
24#include <sys/stat.h>
25#include <unistd.h>
26#include <pthread.h>
27#include <signal.h>
28
29#include <stdlib.h>
30#include <stdio.h>
31#include <string.h>
32#include <errno.h>
33#include <assert.h>
34
35#include <ust/ustconsumer.h>
36#include "lowlevel.h"
37#include "usterr_signal_safe.h"
38#include "ustcomm.h"
39
40#define GET_SUBBUF_OK 1
41#define GET_SUBBUF_DONE 0
42#define GET_SUBBUF_DIED 2
43
44#define PUT_SUBBUF_OK 1
45#define PUT_SUBBUF_DIED 0
46#define PUT_SUBBUF_PUSHED 2
47#define PUT_SUBBUF_DONE 3
48
49#define UNIX_PATH_MAX 108
50
51static int get_subbuffer(struct buffer_info *buf)
52{
53 struct ustcomm_header _send_hdr, *send_hdr;
54 struct ustcomm_header _recv_hdr, *recv_hdr;
55 struct ustcomm_buffer_info _send_msg, _recv_msg;
56 struct ustcomm_buffer_info *send_msg, *recv_msg;
57 int result;
58
59 send_hdr = &_send_hdr;
60 recv_hdr = &_recv_hdr;
61 send_msg = &_send_msg;
62 recv_msg = &_recv_msg;
63
64 result = ustcomm_pack_buffer_info(send_hdr, send_msg, buf->trace,
65 buf->channel, buf->channel_cpu);
66 if (result < 0) {
67 return result;
68 }
69
70 send_hdr->command = GET_SUBBUFFER;
71
72 result = ustcomm_req(buf->app_sock, send_hdr, (char *)send_msg,
73 recv_hdr, (char *)recv_msg);
74 if ((result < 0 && (errno == ECONNRESET || errno == EPIPE)) ||
75 result == 0) {
76 DBG("app died while being traced");
77 return GET_SUBBUF_DIED;
78 } else if (result < 0) {
79 ERR("get_subbuffer: ustcomm_req failed");
80 return result;
81 }
82
83 if (!recv_hdr->result) {
84 DBG("got subbuffer %s", buf->name);
85 buf->consumed_old = recv_msg->consumed_old;
86 return GET_SUBBUF_OK;
87 } else if (recv_hdr->result == -ENODATA) {
88 DBG("For buffer %s, the trace was not found. This likely means"
89 " it was destroyed by the user.", buf->name);
90 return GET_SUBBUF_DIED;
91 }
92
93 DBG("error getting subbuffer %s", buf->name);
94 return recv_hdr->result;
95}
96
97static int put_subbuffer(struct buffer_info *buf)
98{
99 struct ustcomm_header _send_hdr, *send_hdr;
100 struct ustcomm_header _recv_hdr, *recv_hdr;
101 struct ustcomm_buffer_info _send_msg, *send_msg;
102 int result;
103
104 send_hdr = &_send_hdr;
105 recv_hdr = &_recv_hdr;
106 send_msg = &_send_msg;
107
108 result = ustcomm_pack_buffer_info(send_hdr, send_msg, buf->trace,
109 buf->channel, buf->channel_cpu);
110 if (result < 0) {
111 return result;
112 }
113
114 send_hdr->command = PUT_SUBBUFFER;
115 send_msg->consumed_old = buf->consumed_old;
116
117 result = ustcomm_req(buf->app_sock, send_hdr, (char *)send_msg,
118 recv_hdr, NULL);
119 if ((result < 0 && (errno == ECONNRESET || errno == EPIPE)) ||
120 result == 0) {
121 DBG("app died while being traced");
122 return PUT_SUBBUF_DIED;
123 } else if (result < 0) {
124 ERR("put_subbuffer: ustcomm_req failed");
125 return result;
126 }
127
128 if (!recv_hdr->result) {
129 DBG("put subbuffer %s", buf->name);
130 return PUT_SUBBUF_OK;
131 } else if (recv_hdr->result == -ENODATA) {
132 DBG("For buffer %s, the trace was not found. This likely means"
133 " it was destroyed by the user.", buf->name);
134 return PUT_SUBBUF_DIED;
135 }
136
137 DBG("error getting subbuffer %s", buf->name);
138 return recv_hdr->result;
139}
140
141void decrement_active_buffers(void *arg)
142{
143 struct ustconsumer_instance *instance = arg;
144 pthread_mutex_lock(&instance->mutex);
145 instance->active_buffers--;
146 pthread_mutex_unlock(&instance->mutex);
147}
148
149static int get_pidunique(int sock, int64_t *pidunique)
150{
151 struct ustcomm_header _send_hdr, *send_hdr;
152 struct ustcomm_header _recv_hdr, *recv_hdr;
153 struct ustcomm_pidunique _recv_msg, *recv_msg;
154 int result;
155
156 send_hdr = &_send_hdr;
157 recv_hdr = &_recv_hdr;
158 recv_msg = &_recv_msg;
159
160 memset(send_hdr, 0, sizeof(*send_hdr));
161
162 send_hdr->command = GET_PIDUNIQUE;
163 result = ustcomm_req(sock, send_hdr, NULL, recv_hdr, (char *)recv_msg);
164 if (result < 1) {
165 return -ENOTCONN;
166 }
167 if (recv_hdr->result < 0) {
168 ERR("App responded with error: %s", strerror(recv_hdr->result));
169 return recv_hdr->result;
170 }
171
172 *pidunique = recv_msg->pidunique;
173
174 return 0;
175}
176
177static int get_buf_shmid_pipe_fd(int sock, struct buffer_info *buf,
178 int *buf_shmid, int *buf_struct_shmid,
179 int *buf_pipe_fd)
180{
181 struct ustcomm_header _send_hdr, *send_hdr;
182 struct ustcomm_header _recv_hdr, *recv_hdr;
183 struct ustcomm_buffer_info _send_msg, *send_msg;
184 struct ustcomm_buffer_info _recv_msg, *recv_msg;
185 int result, recv_pipe_fd;
186
187 send_hdr = &_send_hdr;
188 recv_hdr = &_recv_hdr;
189 send_msg = &_send_msg;
190 recv_msg = &_recv_msg;
191
192 result = ustcomm_pack_buffer_info(send_hdr, send_msg, buf->trace,
193 buf->channel, buf->channel_cpu);
194 if (result < 0) {
195 ERR("Failed to pack buffer info");
196 return result;
197 }
198
199 send_hdr->command = GET_BUF_SHMID_PIPE_FD;
200
201 result = ustcomm_send(sock, send_hdr, (char *)send_msg);
202 if (result < 1) {
203 ERR("Failed to send request");
204 return -ENOTCONN;
205 }
206 result = ustcomm_recv_fd(sock, recv_hdr, (char *)recv_msg, &recv_pipe_fd);
207 if (result < 1) {
208 ERR("Failed to receive message and fd");
209 return -ENOTCONN;
210 }
211 if (recv_hdr->result < 0) {
212 ERR("App responded with error %s", strerror(recv_hdr->result));
213 return recv_hdr->result;
214 }
215
216 *buf_shmid = recv_msg->buf_shmid;
217 *buf_struct_shmid = recv_msg->buf_struct_shmid;
218 *buf_pipe_fd = recv_pipe_fd;
219
220 return 0;
221}
222
223static int get_subbuf_num_size(int sock, struct buffer_info *buf,
224 int *subbuf_num, int *subbuf_size)
225{
226 struct ustcomm_header _send_hdr, *send_hdr;
227 struct ustcomm_header _recv_hdr, *recv_hdr;
228 struct ustcomm_channel_info _send_msg, *send_msg;
229 struct ustcomm_channel_info _recv_msg, *recv_msg;
230 int result;
231
232 send_hdr = &_send_hdr;
233 recv_hdr = &_recv_hdr;
234 send_msg = &_send_msg;
235 recv_msg = &_recv_msg;
236
237 result = ustcomm_pack_channel_info(send_hdr, send_msg, buf->trace,
238 buf->channel);
239 if (result < 0) {
240 return result;
241 }
242
243 send_hdr->command = GET_SUBBUF_NUM_SIZE;
244
245 result = ustcomm_req(sock, send_hdr, (char *)send_msg,
246 recv_hdr, (char *)recv_msg);
247 if (result < 1) {
248 return -ENOTCONN;
249 }
250
251 *subbuf_num = recv_msg->subbuf_num;
252 *subbuf_size = recv_msg->subbuf_size;
253
254 return recv_hdr->result;
255}
256
257
258static int notify_buffer_mapped(int sock, struct buffer_info *buf)
259{
260 struct ustcomm_header _send_hdr, *send_hdr;
261 struct ustcomm_header _recv_hdr, *recv_hdr;
262 struct ustcomm_buffer_info _send_msg, *send_msg;
263 int result;
264
265 send_hdr = &_send_hdr;
266 recv_hdr = &_recv_hdr;
267 send_msg = &_send_msg;
268
269 result = ustcomm_pack_buffer_info(send_hdr, send_msg, buf->trace,
270 buf->channel, buf->channel_cpu);
271 if (result < 0) {
272 return result;
273 }
274
275 send_hdr->command = NOTIFY_BUF_MAPPED;
276
277 result = ustcomm_req(sock, send_hdr, (char *)send_msg,
278 recv_hdr, NULL);
279 if (result < 1) {
280 return -ENOTCONN;
281 }
282
283 return recv_hdr->result;
284}
285
286
287struct buffer_info *connect_buffer(struct ustconsumer_instance *instance, pid_t pid,
288 const char *trace, const char *channel,
289 int channel_cpu)
290{
291 struct buffer_info *buf;
292 int result;
293 struct shmid_ds shmds;
294
295 buf = (struct buffer_info *) zmalloc(sizeof(struct buffer_info));
296 if(buf == NULL) {
297 ERR("add_buffer: insufficient memory");
298 return NULL;
299 }
300
301 buf->trace = strdup(trace);
302 if (!buf->trace) {
303 goto free_buf;
304 }
305
306 buf->channel = strdup(channel);
307 if (!buf->channel) {
308 goto free_buf_trace;
309 }
310
311 result = asprintf(&buf->name, "%s_%d", channel, channel_cpu);
312 if (result < 0 || buf->name == NULL) {
313 goto free_buf_channel;
314 }
315
316 buf->channel_cpu = channel_cpu;
317 buf->pid = pid;
318
319 result = ustcomm_connect_app(buf->pid, &buf->app_sock);
320 if(result) {
321 WARN("unable to connect to process, it probably died before we were able to connect");
322 goto free_buf_name;
323 }
324
325 /* get pidunique */
326 result = get_pidunique(buf->app_sock, &buf->pidunique);
327 if (result < 0) {
328 ERR("Failed to get pidunique");
329 goto close_app_sock;
330 }
331
332 /* get shmid and pipe fd */
333 result = get_buf_shmid_pipe_fd(buf->app_sock, buf, &buf->shmid,
334 &buf->bufstruct_shmid, &buf->pipe_fd);
335 if (result < 0) {
336 ERR("Failed to get buf_shmid and pipe_fd");
337 goto close_app_sock;
338 } else {
339 struct stat temp;
340 fstat(buf->pipe_fd, &temp);
341 if (!S_ISFIFO(temp.st_mode)) {
342 ERR("Didn't receive a fifo from the app");
343 goto close_app_sock;
344 }
345 }
346
347
348 /* get number of subbufs and subbuf size */
349 result = get_subbuf_num_size(buf->app_sock, buf, &buf->n_subbufs,
350 &buf->subbuf_size);
351 if (result < 0) {
352 ERR("Failed to get subbuf number and size");
353 goto close_fifo;
354 }
355
356 /* Set subbuffer's information */
357 buf->subbuf_size_order = get_count_order(buf->subbuf_size);
358 buf->alloc_size = buf->subbuf_size * buf->n_subbufs;
359
360 /* attach memory */
361 buf->mem = shmat(buf->shmid, NULL, 0);
362 if(buf->mem == (void *) 0) {
363 PERROR("shmat");
364 goto close_fifo;
365 }
366 DBG("successfully attached buffer memory");
367
368 buf->bufstruct_mem = shmat(buf->bufstruct_shmid, NULL, 0);
369 if(buf->bufstruct_mem == (void *) 0) {
370 PERROR("shmat");
371 goto shmdt_mem;
372 }
373 DBG("successfully attached buffer bufstruct memory");
374
375 /* obtain info on the memory segment */
376 result = shmctl(buf->shmid, IPC_STAT, &shmds);
377 if(result == -1) {
378 PERROR("shmctl");
379 goto shmdt_bufstruct_mem;
380 }
381 buf->memlen = shmds.shm_segsz;
382
383 /* Notify the application that we have mapped the buffer */
384 result = notify_buffer_mapped(buf->app_sock, buf);
385 if (result < 0) {
386 goto shmdt_bufstruct_mem;
387 }
388
389 if(instance->callbacks->on_open_buffer)
390 instance->callbacks->on_open_buffer(instance->callbacks, buf);
391
392 pthread_mutex_lock(&instance->mutex);
393 instance->active_buffers++;
394 pthread_mutex_unlock(&instance->mutex);
395
396 return buf;
397
398shmdt_bufstruct_mem:
399 shmdt(buf->bufstruct_mem);
400
401shmdt_mem:
402 shmdt(buf->mem);
403
404close_fifo:
405 close(buf->pipe_fd);
406
407close_app_sock:
408 close(buf->app_sock);
409
410free_buf_name:
411 free(buf->name);
412
413free_buf_channel:
414 free(buf->channel);
415
416free_buf_trace:
417 free(buf->trace);
418
419free_buf:
420 free(buf);
421 return NULL;
422}
423
424static void destroy_buffer(struct ustconsumer_callbacks *callbacks,
425 struct buffer_info *buf)
426{
427 int result;
428
429 result = close(buf->pipe_fd);
430 if(result == -1) {
431 WARN("problem closing the pipe fd");
432 }
433
434 result = close(buf->app_sock);
435 if(result == -1) {
436 WARN("problem calling ustcomm_close_app");
437 }
438
439 result = shmdt(buf->mem);
440 if(result == -1) {
441 PERROR("shmdt");
442 }
443
444 result = shmdt(buf->bufstruct_mem);
445 if(result == -1) {
446 PERROR("shmdt");
447 }
448
449 if(callbacks->on_close_buffer)
450 callbacks->on_close_buffer(callbacks, buf);
451
452 free(buf);
453}
454
455int consumer_loop(struct ustconsumer_instance *instance, struct buffer_info *buf)
456{
457 int result = 0;
458 int read_result;
459 char read_buf;
460
461 pthread_cleanup_push(decrement_active_buffers, instance);
462
463 for(;;) {
464 read_result = read(buf->pipe_fd, &read_buf, 1);
465 /* get the subbuffer */
466 if (read_result == 1) {
467 result = get_subbuffer(buf);
468 if (result < 0) {
469 ERR("error getting subbuffer");
470 continue;
471 } else if (result == GET_SUBBUF_DIED) {
472 finish_consuming_dead_subbuffer(instance->callbacks, buf);
473 break;
474 }
475 } else if ((read_result == -1 && (errno == ECONNRESET || errno == EPIPE)) ||
476 result == 0) {
477 DBG("App died while being traced");
478 finish_consuming_dead_subbuffer(instance->callbacks, buf);
479 break;
480 } else if (read_result == -1 && errno == EINTR) {
481 continue;
482 }
483
484 if(instance->callbacks->on_read_subbuffer)
485 instance->callbacks->on_read_subbuffer(instance->callbacks, buf);
486
487 /* put the subbuffer */
488 result = put_subbuffer(buf);
489 if(result == -1) {
490 ERR("unknown error putting subbuffer (channel=%s)", buf->name);
491 break;
492 }
493 else if(result == PUT_SUBBUF_PUSHED) {
494 ERR("Buffer overflow (channel=%s), reader pushed. This channel will not be usable passed this point.", buf->name);
495 break;
496 }
497 else if(result == PUT_SUBBUF_DIED) {
498 DBG("application died while putting subbuffer");
499 /* Skip the first subbuffer. We are not sure it is trustable
500 * because the put_subbuffer() did not complete.
501 */
502 /* TODO: check on_put_error return value */
503 if(instance->callbacks->on_put_error)
504 instance->callbacks->on_put_error(instance->callbacks, buf);
505
506 finish_consuming_dead_subbuffer(instance->callbacks, buf);
507 break;
508 }
509 else if(result == PUT_SUBBUF_DONE) {
510 /* Done with this subbuffer */
511 /* FIXME: add a case where this branch is used? Upon
512 * normal trace termination, at put_subbuf time, a
513 * special last-subbuffer code could be returned by
514 * the listener.
515 */
516 break;
517 }
518 else if(result == PUT_SUBBUF_OK) {
519 }
520 }
521
522 DBG("thread for buffer %s is stopping", buf->name);
523
524 /* FIXME: destroy, unalloc... */
525
526 pthread_cleanup_pop(1);
527
528 return 0;
529}
530
531struct consumer_thread_args {
532 pid_t pid;
533 const char *trace;
534 const char *channel;
535 int channel_cpu;
536 struct ustconsumer_instance *instance;
537};
538
539void *consumer_thread(void *arg)
540{
541 struct buffer_info *buf;
542 struct consumer_thread_args *args = (struct consumer_thread_args *) arg;
543 int result;
544 sigset_t sigset;
545
546 pthread_mutex_lock(&args->instance->mutex);
547 args->instance->active_threads++;
548 pthread_mutex_unlock(&args->instance->mutex);
549
550 if(args->instance->callbacks->on_new_thread)
551 args->instance->callbacks->on_new_thread(args->instance->callbacks);
552
553 /* Block signals that should be handled by the main thread. */
554 result = sigemptyset(&sigset);
555 if(result == -1) {
556 PERROR("sigemptyset");
557 goto end;
558 }
559 result = sigaddset(&sigset, SIGTERM);
560 if(result == -1) {
561 PERROR("sigaddset");
562 goto end;
563 }
564 result = sigaddset(&sigset, SIGINT);
565 if(result == -1) {
566 PERROR("sigaddset");
567 goto end;
568 }
569 result = sigprocmask(SIG_BLOCK, &sigset, NULL);
570 if(result == -1) {
571 PERROR("sigprocmask");
572 goto end;
573 }
574
575 buf = connect_buffer(args->instance, args->pid, args->trace,
576 args->channel, args->channel_cpu);
577 if(buf == NULL) {
578 ERR("failed to connect to buffer");
579 goto end;
580 }
581
582 consumer_loop(args->instance, buf);
583
584 destroy_buffer(args->instance->callbacks, buf);
585
586 end:
587
588 if(args->instance->callbacks->on_close_thread)
589 args->instance->callbacks->on_close_thread(args->instance->callbacks);
590
591 pthread_mutex_lock(&args->instance->mutex);
592 args->instance->active_threads--;
593 pthread_mutex_unlock(&args->instance->mutex);
594
595 free((void *)args->channel);
596 free(args);
597 return NULL;
598}
599
600int start_consuming_buffer(struct ustconsumer_instance *instance, pid_t pid,
601 const char *trace, const char *channel,
602 int channel_cpu)
603{
604 pthread_t thr;
605 struct consumer_thread_args *args;
606 int result;
607
608 DBG("beginning of start_consuming_buffer: args: pid %d bufname %s_%d", pid, channel,
609 channel_cpu);
610
611 args = (struct consumer_thread_args *) zmalloc(sizeof(struct consumer_thread_args));
612 if (!args) {
613 return -ENOMEM;
614 }
615
616 args->pid = pid;
617 args->trace = strdup(trace);
618 args->channel = strdup(channel);
619 args->channel_cpu = channel_cpu;
620 args->instance = instance;
621 DBG("beginning2 of start_consuming_buffer: args: pid %d trace %s"
622 " bufname %s_%d", args->pid, args->trace, args->channel, args->channel_cpu);
623
624 result = pthread_create(&thr, NULL, consumer_thread, args);
625 if(result == -1) {
626 ERR("pthread_create failed");
627 return -1;
628 }
629 result = pthread_detach(thr);
630 if(result == -1) {
631 ERR("pthread_detach failed");
632 return -1;
633 }
634 DBG("end of start_consuming_buffer: args: pid %d trace %s "
635 "bufname %s_%d", args->pid, args->channel, args->trace, args->channel_cpu);
636
637 return 0;
638}
639static void process_client_cmd(int sock, struct ustcomm_header *req_header,
640 char *recvbuf, struct ustconsumer_instance *instance)
641{
642 int result;
643 struct ustcomm_header _res_header = {0};
644 struct ustcomm_header *res_header = &_res_header;
645 struct ustcomm_buffer_info *buf_inf;
646
647 DBG("Processing client command");
648
649 switch (req_header->command) {
650 case CONSUME_BUFFER:
651
652 buf_inf = (struct ustcomm_buffer_info *)recvbuf;
653 result = ustcomm_unpack_buffer_info(buf_inf);
654 if (result < 0) {
655 ERR("Couldn't unpack buffer info");
656 return;
657 }
658
659 DBG("Going to consume trace %s buffer %s_%d in process %d",
660 buf_inf->trace, buf_inf->channel, buf_inf->ch_cpu,
661 buf_inf->pid);
662 result = start_consuming_buffer(instance, buf_inf->pid,
663 buf_inf->trace,
664 buf_inf->channel,
665 buf_inf->ch_cpu);
666 if (result < 0) {
667 ERR("error in add_buffer");
668 return;
669 }
670
671 res_header->result = 0;
672 break;
673 case EXIT:
674 res_header->result = 0;
675 /* Only there to force poll to return */
676 break;
677 default:
678 res_header->result = -EINVAL;
679 WARN("unknown command: %d", req_header->command);
680 }
681
682 if (ustcomm_send(sock, res_header, NULL) <= 0) {
683 ERR("couldn't send command response");
684 }
685}
686
687#define MAX_EVENTS 10
688
689int ustconsumer_start_instance(struct ustconsumer_instance *instance)
690{
691 struct ustcomm_header recv_hdr;
692 char recv_buf[USTCOMM_BUFFER_SIZE];
693 struct ustcomm_sock *epoll_sock;
694 struct epoll_event events[MAX_EVENTS];
695 struct sockaddr addr;
696 int result, epoll_fd, accept_fd, nfds, i, addr_size, timeout;
697
698 if(!instance->is_init) {
699 ERR("libustconsumer instance not initialized");
700 return 1;
701 }
702 epoll_fd = instance->epoll_fd;
703
704 timeout = -1;
705
706 /* app loop */
707 for(;;) {
708 nfds = epoll_wait(epoll_fd, events, MAX_EVENTS, timeout);
709 if (nfds == -1 && errno == EINTR) {
710 /* Caught signal */
711 } else if (nfds == -1) {
712 PERROR("ustconsumer_start_instance: epoll_wait failed");
713 continue;
714 }
715
716 for (i = 0; i < nfds; ++i) {
717 epoll_sock = (struct ustcomm_sock *)events[i].data.ptr;
718 if (epoll_sock == instance->listen_sock) {
719 addr_size = sizeof(struct sockaddr);
720 accept_fd = accept(epoll_sock->fd,
721 &addr,
722 (socklen_t *)&addr_size);
723 if (accept_fd == -1) {
724 PERROR("ustconsumer_start_instance: "
725 "accept failed");
726 continue;
727 }
728 ustcomm_init_sock(accept_fd, epoll_fd,
729 &instance->connections);
730 } else {
731 result = ustcomm_recv(epoll_sock->fd, &recv_hdr,
732 recv_buf);
733 if (result < 1) {
734 ustcomm_del_sock(epoll_sock, 0);
735 } else {
736 process_client_cmd(epoll_sock->fd,
737 &recv_hdr, recv_buf,
738 instance);
739 }
740
741 }
742 }
743
744 if (instance->quit_program) {
745 pthread_mutex_lock(&instance->mutex);
746 if (instance->active_buffers == 0 && instance->active_threads == 0) {
747 pthread_mutex_unlock(&instance->mutex);
748 break;
749 }
750 pthread_mutex_unlock(&instance->mutex);
751 timeout = 100;
752 }
753 }
754
755 if(instance->callbacks->on_trace_end)
756 instance->callbacks->on_trace_end(instance);
757
758 ustconsumer_delete_instance(instance);
759
760 return 0;
761}
762
763/* FIXME: threads and connections !? */
764void ustconsumer_delete_instance(struct ustconsumer_instance *instance)
765{
766 if (instance->is_init) {
767 ustcomm_del_named_sock(instance->listen_sock, 0);
768 close(instance->epoll_fd);
769 }
770
771 pthread_mutex_destroy(&instance->mutex);
772 free(instance->sock_path);
773 free(instance);
774}
775
776/* FIXME: Do something about the fixed path length, maybe get rid
777 * of the whole concept and use a pipe?
778 */
779int ustconsumer_stop_instance(struct ustconsumer_instance *instance, int send_msg)
780{
781 int result;
782 int fd;
783 int bytes = 0;
784
785 char msg[] = "exit";
786
787 instance->quit_program = 1;
788
789 if(!send_msg)
790 return 0;
791
792 /* Send a message through the socket to force poll to return */
793
794 struct sockaddr_un addr;
795
796socket_again:
797 result = fd = socket(PF_UNIX, SOCK_STREAM, 0);
798 if(result == -1) {
799 if (errno == EINTR)
800 goto socket_again;
801 PERROR("socket");
802 return 1;
803 }
804
805 addr.sun_family = AF_UNIX;
806
807 strncpy(addr.sun_path, instance->sock_path, UNIX_PATH_MAX);
808 addr.sun_path[UNIX_PATH_MAX-1] = '\0';
809
810connect_again:
811 result = connect(fd, (struct sockaddr *)&addr, sizeof(addr));
812 if(result == -1) {
813 if (errno == EINTR)
814 goto connect_again;
815 PERROR("connect");
816 }
817
818 while(bytes != sizeof(msg)) {
819 int inc = send(fd, msg, sizeof(msg), 0);
820 if (inc < 0 && errno != EINTR)
821 break;
822 else
823 bytes += inc;
824 }
825
826 close(fd);
827
828 return 0;
829}
830
831struct ustconsumer_instance
832*ustconsumer_new_instance(struct ustconsumer_callbacks *callbacks,
833 char *sock_path)
834{
835 struct ustconsumer_instance *instance =
836 zmalloc(sizeof(struct ustconsumer_instance));
837 if(!instance) {
838 return NULL;
839 }
840
841 instance->callbacks = callbacks;
842 instance->quit_program = 0;
843 instance->is_init = 0;
844 instance->active_buffers = 0;
845 pthread_mutex_init(&instance->mutex, NULL);
846
847 if (sock_path) {
848 instance->sock_path = strdup(sock_path);
849 } else {
850 instance->sock_path = NULL;
851 }
852
853 return instance;
854}
855
856static int init_ustconsumer_socket(struct ustconsumer_instance *instance)
857{
858 char *name;
859
860 if (instance->sock_path) {
861 if (asprintf(&name, "%s", instance->sock_path) < 0) {
862 ERR("ustcomm_init_ustconsumer : asprintf failed (sock_path %s)",
863 instance->sock_path);
864 return -1;
865 }
866 } else {
867 int result;
868
869 /* Only check if socket dir exists if we are using the default directory */
870 result = ensure_dir_exists(SOCK_DIR, S_IRWXU | S_IRWXG | S_IRWXO);
871 if (result == -1) {
872 ERR("Unable to create socket directory %s", SOCK_DIR);
873 return -1;
874 }
875
876 if (asprintf(&name, "%s/%s", SOCK_DIR, "ustconsumer") < 0) {
877 ERR("ustcomm_init_ustconsumer : asprintf failed (%s/ustconsumer)",
878 SOCK_DIR);
879 return -1;
880 }
881 }
882
883 /* Set up epoll */
884 instance->epoll_fd = epoll_create(MAX_EVENTS);
885 if (instance->epoll_fd == -1) {
886 ERR("epoll_create failed, start instance bailing");
887 goto free_name;
888 }
889
890 /* Create the named socket */
891 instance->listen_sock = ustcomm_init_named_socket(name,
892 instance->epoll_fd);
893 if(!instance->listen_sock) {
894 ERR("error initializing named socket at %s", name);
895 goto close_epoll;
896 }
897
898 CDS_INIT_LIST_HEAD(&instance->connections);
899
900 free(name);
901
902 return 0;
903
904close_epoll:
905 close(instance->epoll_fd);
906free_name:
907 free(name);
908
909 return -1;
910}
911
912int ustconsumer_init_instance(struct ustconsumer_instance *instance)
913{
914 int result;
915 result = init_ustconsumer_socket(instance);
916 if(result == -1) {
917 ERR("failed to initialize socket");
918 return 1;
919 }
920 instance->is_init = 1;
921 return 0;
922}
923
This page took 0.027043 seconds and 4 git commands to generate.