Only ignore the top config.h.in
[lttng-ust.git] / libust / tracectl.c
1 /* Copyright (C) 2009 Pierre-Marc Fournier
2 *
3 * This library is free software; you can redistribute it and/or
4 * modify it under the terms of the GNU Lesser General Public
5 * License as published by the Free Software Foundation; either
6 * version 2.1 of the License, or (at your option) any later version.
7 *
8 * This library is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * Lesser General Public License for more details.
12 *
13 * You should have received a copy of the GNU Lesser General Public
14 * License along with this library; if not, write to the Free Software
15 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
16 */
17
18 /* This file contains the implementation of the UST listener thread, which
19 * receives trace control commands. It also coordinates the initialization of
20 * libust.
21 */
22
23 #define _GNU_SOURCE
24 #define _LGPL_SOURCE
25 #include <stdio.h>
26 #include <stdlib.h>
27 #include <stdint.h>
28 #include <pthread.h>
29 #include <signal.h>
30 #include <sys/epoll.h>
31 #include <sys/time.h>
32 #include <sys/types.h>
33 #include <sys/socket.h>
34 #include <fcntl.h>
35 #include <poll.h>
36 #include <regex.h>
37 #include <urcu/uatomic_arch.h>
38 #include <urcu/list.h>
39
40 #include <ust/marker.h>
41 #include <ust/tracepoint.h>
42 #include <ust/tracectl.h>
43 #include <ust/clock.h>
44 #include "tracer.h"
45 #include "usterr_signal_safe.h"
46 #include "ustcomm.h"
47 #include "buffers.h"
48 #include "marker-control.h"
49
50 /* This should only be accessed by the constructor, before the creation
51 * of the listener, and then only by the listener.
52 */
53 s64 pidunique = -1LL;
54
55 /* The process pid is used to detect a non-traceable fork
56 * and allow the non-traceable fork to be ignored
57 * by destructor sequences in libust
58 */
59 static pid_t processpid = 0;
60
61 static struct ustcomm_header _receive_header;
62 static struct ustcomm_header *receive_header = &_receive_header;
63 static char receive_buffer[USTCOMM_BUFFER_SIZE];
64 static char send_buffer[USTCOMM_BUFFER_SIZE];
65
66 static int epoll_fd;
67
68 /*
69 * Listener thread data vs fork() protection mechanism. Ensures that no listener
70 * thread mutexes and data structures are being concurrently modified or held by
71 * other threads when fork() is executed.
72 */
73 static pthread_mutex_t listener_thread_data_mutex = PTHREAD_MUTEX_INITIALIZER;
74
75 /* Mutex protecting listen_sock. Nests inside listener_thread_data_mutex. */
76 static pthread_mutex_t listen_sock_mutex = PTHREAD_MUTEX_INITIALIZER;
77 static struct ustcomm_sock *listen_sock;
78
79 extern struct chan_info_struct chan_infos[];
80
81 static struct cds_list_head ust_socks = CDS_LIST_HEAD_INIT(ust_socks);
82
83 /* volatile because shared between the listener and the main thread */
84 int buffers_to_export = 0;
85
86 int ust_clock_source;
87
88 static long long make_pidunique(void)
89 {
90 s64 retval;
91 struct timeval tv;
92
93 gettimeofday(&tv, NULL);
94
95 retval = tv.tv_sec;
96 retval <<= 32;
97 retval |= tv.tv_usec;
98
99 return retval;
100 }
101
102 static void print_ust_marker(FILE *fp)
103 {
104 struct ust_marker_iter iter;
105
106 lock_ust_marker();
107 ust_marker_iter_reset(&iter);
108 ust_marker_iter_start(&iter);
109
110 while (iter.ust_marker) {
111 fprintf(fp, "ust_marker: %s/%s %d \"%s\" %p\n",
112 (*iter.ust_marker)->channel,
113 (*iter.ust_marker)->name,
114 (int)(*iter.ust_marker)->state,
115 (*iter.ust_marker)->format,
116 NULL); /*
117 * location is null for now, will be added
118 * to a different table.
119 */
120 ust_marker_iter_next(&iter);
121 }
122 unlock_ust_marker();
123 }
124
125 static void print_trace_events(FILE *fp)
126 {
127 struct trace_event_iter iter;
128
129 lock_trace_events();
130 trace_event_iter_reset(&iter);
131 trace_event_iter_start(&iter);
132
133 while (iter.trace_event) {
134 fprintf(fp, "trace_event: %s\n", (*iter.trace_event)->name);
135 trace_event_iter_next(&iter);
136 }
137 unlock_trace_events();
138 }
139
140 static int connect_ustconsumer(void)
141 {
142 int result, fd;
143 char default_daemon_path[] = SOCK_DIR "/ustconsumer";
144 char *explicit_daemon_path, *daemon_path;
145
146 explicit_daemon_path = getenv("UST_DAEMON_SOCKET");
147 if (explicit_daemon_path) {
148 daemon_path = explicit_daemon_path;
149 } else {
150 daemon_path = default_daemon_path;
151 }
152
153 DBG("Connecting to daemon_path %s", daemon_path);
154
155 result = ustcomm_connect_path(daemon_path, &fd);
156 if (result < 0) {
157 WARN("connect_ustconsumer failed, daemon_path: %s",
158 daemon_path);
159 return result;
160 }
161
162 return fd;
163 }
164
165
166 static void request_buffer_consumer(int sock,
167 const char *trace,
168 const char *channel,
169 int cpu)
170 {
171 struct ustcomm_header send_header, recv_header;
172 struct ustcomm_buffer_info buf_inf;
173 int result = 0;
174
175 result = ustcomm_pack_buffer_info(&send_header,
176 &buf_inf,
177 trace,
178 channel,
179 cpu);
180
181 if (result < 0) {
182 ERR("failed to pack buffer info message %s_%d",
183 channel, cpu);
184 return;
185 }
186
187 buf_inf.pid = getpid();
188 send_header.command = CONSUME_BUFFER;
189
190 result = ustcomm_req(sock, &send_header, (char *) &buf_inf,
191 &recv_header, NULL);
192 if (result <= 0) {
193 PERROR("request for buffer consumer failed, is the daemon online?");
194 }
195
196 return;
197 }
198
199 /* Ask the daemon to collect a trace called trace_name and being
200 * produced by this pid.
201 *
202 * The trace must be at least allocated. (It can also be started.)
203 * This is because _ltt_trace_find is used.
204 */
205
206 static void inform_consumer_daemon(const char *trace_name)
207 {
208 int sock, i,j;
209 struct ust_trace *trace;
210 const char *ch_name;
211
212 sock = connect_ustconsumer();
213 if (sock < 0) {
214 return;
215 }
216
217 DBG("Connected to ustconsumer");
218
219 ltt_lock_traces();
220
221 trace = _ltt_trace_find(trace_name);
222 if (trace == NULL) {
223 WARN("inform_consumer_daemon: could not find trace \"%s\"; it is probably already destroyed", trace_name);
224 goto unlock_traces;
225 }
226
227 for (i=0; i < trace->nr_channels; i++) {
228 if (trace->channels[i].request_collection) {
229 /* iterate on all cpus */
230 for (j=0; j<trace->channels[i].n_cpus; j++) {
231 ch_name = trace->channels[i].channel_name;
232 request_buffer_consumer(sock, trace_name,
233 ch_name, j);
234 CMM_STORE_SHARED(buffers_to_export,
235 CMM_LOAD_SHARED(buffers_to_export)+1);
236 }
237 }
238 }
239
240 unlock_traces:
241 ltt_unlock_traces();
242
243 close(sock);
244 }
245
246 static struct ust_channel *find_channel(const char *ch_name,
247 struct ust_trace *trace)
248 {
249 int i;
250
251 for (i=0; i<trace->nr_channels; i++) {
252 if (!strcmp(trace->channels[i].channel_name, ch_name)) {
253 return &trace->channels[i];
254 }
255 }
256
257 return NULL;
258 }
259
260 static int get_buffer_shmid_pipe_fd(const char *trace_name, const char *ch_name,
261 int ch_cpu,
262 int *buf_shmid,
263 int *buf_struct_shmid,
264 int *buf_pipe_fd)
265 {
266 struct ust_trace *trace;
267 struct ust_channel *channel;
268 struct ust_buffer *buf;
269
270 DBG("get_buffer_shmid_pipe_fd");
271
272 ltt_lock_traces();
273 trace = _ltt_trace_find(trace_name);
274 ltt_unlock_traces();
275
276 if (trace == NULL) {
277 ERR("cannot find trace!");
278 return -ENODATA;
279 }
280
281 channel = find_channel(ch_name, trace);
282 if (!channel) {
283 ERR("cannot find channel %s!", ch_name);
284 return -ENODATA;
285 }
286
287 buf = channel->buf[ch_cpu];
288
289 *buf_shmid = buf->shmid;
290 *buf_struct_shmid = channel->buf_struct_shmids[ch_cpu];
291 *buf_pipe_fd = buf->data_ready_fd_read;
292
293 return 0;
294 }
295
296 static int get_subbuf_num_size(const char *trace_name, const char *ch_name,
297 int *num, int *size)
298 {
299 struct ust_trace *trace;
300 struct ust_channel *channel;
301
302 DBG("get_subbuf_size");
303
304 ltt_lock_traces();
305 trace = _ltt_trace_find(trace_name);
306 ltt_unlock_traces();
307
308 if (!trace) {
309 ERR("cannot find trace!");
310 return -ENODATA;
311 }
312
313 channel = find_channel(ch_name, trace);
314 if (!channel) {
315 ERR("unable to find channel");
316 return -ENODATA;
317 }
318
319 *num = channel->subbuf_cnt;
320 *size = channel->subbuf_size;
321
322 return 0;
323 }
324
325 /* Return the power of two which is equal or higher to v */
326
327 static unsigned int pow2_higher_or_eq(unsigned int v)
328 {
329 int hb = fls(v);
330 int retval = 1<<(hb-1);
331
332 if (v-retval == 0)
333 return retval;
334 else
335 return retval<<1;
336 }
337
338 static int set_subbuf_size(const char *trace_name, const char *ch_name,
339 unsigned int size)
340 {
341 unsigned int power;
342 int retval = 0;
343 struct ust_trace *trace;
344 struct ust_channel *channel;
345
346 DBG("set_subbuf_size");
347
348 power = pow2_higher_or_eq(size);
349 power = max_t(unsigned int, 2u, power);
350 if (power != size) {
351 WARN("using the next power of two for buffer size = %u\n", power);
352 }
353
354 ltt_lock_traces();
355 trace = _ltt_trace_find_setup(trace_name);
356 if (trace == NULL) {
357 ERR("cannot find trace!");
358 retval = -ENODATA;
359 goto unlock_traces;
360 }
361
362 channel = find_channel(ch_name, trace);
363 if (!channel) {
364 ERR("unable to find channel");
365 retval = -ENODATA;
366 goto unlock_traces;
367 }
368
369 channel->subbuf_size = power;
370 DBG("the set_subbuf_size for the requested channel is %zu", channel->subbuf_size);
371
372 unlock_traces:
373 ltt_unlock_traces();
374
375 return retval;
376 }
377
378 static int set_subbuf_num(const char *trace_name, const char *ch_name,
379 unsigned int num)
380 {
381 struct ust_trace *trace;
382 struct ust_channel *channel;
383 int retval = 0;
384
385 DBG("set_subbuf_num");
386
387 if (num < 2) {
388 ERR("subbuffer count should be greater than 2");
389 return -EINVAL;
390 }
391
392 ltt_lock_traces();
393 trace = _ltt_trace_find_setup(trace_name);
394 if (trace == NULL) {
395 ERR("cannot find trace!");
396 retval = -ENODATA;
397 goto unlock_traces;
398 }
399
400 channel = find_channel(ch_name, trace);
401 if (!channel) {
402 ERR("unable to find channel");
403 retval = -ENODATA;
404 goto unlock_traces;
405 }
406
407 channel->subbuf_cnt = num;
408 DBG("the set_subbuf_cnt for the requested channel is %u", channel->subbuf_cnt);
409
410 unlock_traces:
411 ltt_unlock_traces();
412 return retval;
413 }
414
415 static int get_subbuffer(const char *trace_name, const char *ch_name,
416 int ch_cpu, long *consumed_old)
417 {
418 int retval = 0;
419 struct ust_trace *trace;
420 struct ust_channel *channel;
421 struct ust_buffer *buf;
422
423 DBG("get_subbuf");
424
425 *consumed_old = 0;
426
427 ltt_lock_traces();
428 trace = _ltt_trace_find(trace_name);
429
430 if (!trace) {
431 DBG("Cannot find trace. It was likely destroyed by the user.");
432 retval = -ENODATA;
433 goto unlock_traces;
434 }
435
436 channel = find_channel(ch_name, trace);
437 if (!channel) {
438 ERR("unable to find channel");
439 retval = -ENODATA;
440 goto unlock_traces;
441 }
442
443 buf = channel->buf[ch_cpu];
444
445 retval = ust_buffers_get_subbuf(buf, consumed_old);
446 if (retval < 0) {
447 WARN("missed buffer?");
448 }
449
450 unlock_traces:
451 ltt_unlock_traces();
452
453 return retval;
454 }
455
456
457 static int notify_buffer_mapped(const char *trace_name,
458 const char *ch_name,
459 int ch_cpu)
460 {
461 int retval = 0;
462 struct ust_trace *trace;
463 struct ust_channel *channel;
464 struct ust_buffer *buf;
465
466 DBG("get_buffer_fd");
467
468 ltt_lock_traces();
469 trace = _ltt_trace_find(trace_name);
470
471 if (!trace) {
472 retval = -ENODATA;
473 DBG("Cannot find trace. It was likely destroyed by the user.");
474 goto unlock_traces;
475 }
476
477 channel = find_channel(ch_name, trace);
478 if (!channel) {
479 retval = -ENODATA;
480 ERR("unable to find channel");
481 goto unlock_traces;
482 }
483
484 buf = channel->buf[ch_cpu];
485
486 /* Being here is the proof the daemon has mapped the buffer in its
487 * memory. We may now decrement buffers_to_export.
488 */
489 if (uatomic_read(&buf->consumed) == 0) {
490 DBG("decrementing buffers_to_export");
491 CMM_STORE_SHARED(buffers_to_export, CMM_LOAD_SHARED(buffers_to_export)-1);
492 }
493
494 unlock_traces:
495 ltt_unlock_traces();
496
497 return retval;
498 }
499
500 static int put_subbuffer(const char *trace_name, const char *ch_name,
501 int ch_cpu, long consumed_old)
502 {
503 int retval = 0;
504 struct ust_trace *trace;
505 struct ust_channel *channel;
506 struct ust_buffer *buf;
507
508 DBG("put_subbuf");
509
510 ltt_lock_traces();
511 trace = _ltt_trace_find(trace_name);
512
513 if (!trace) {
514 retval = -ENODATA;
515 DBG("Cannot find trace. It was likely destroyed by the user.");
516 goto unlock_traces;
517 }
518
519 channel = find_channel(ch_name, trace);
520 if (!channel) {
521 retval = -ENODATA;
522 ERR("unable to find channel");
523 goto unlock_traces;
524 }
525
526 buf = channel->buf[ch_cpu];
527
528 retval = ust_buffers_put_subbuf(buf, consumed_old);
529 if (retval < 0) {
530 WARN("ust_buffers_put_subbuf: error (subbuf=%s_%d)",
531 ch_name, ch_cpu);
532 } else {
533 DBG("ust_buffers_put_subbuf: success (subbuf=%s_%d)",
534 ch_name, ch_cpu);
535 }
536
537 unlock_traces:
538 ltt_unlock_traces();
539
540 return retval;
541 }
542
543 static void release_listener_mutex(void *ptr)
544 {
545 pthread_mutex_unlock(&listener_thread_data_mutex);
546 }
547
548 static void listener_cleanup(void *ptr)
549 {
550 pthread_mutex_lock(&listen_sock_mutex);
551 if (listen_sock) {
552 ustcomm_del_named_sock(listen_sock, 0);
553 listen_sock = NULL;
554 }
555 pthread_mutex_unlock(&listen_sock_mutex);
556 }
557
558 static int force_subbuf_switch(const char *trace_name)
559 {
560 struct ust_trace *trace;
561 int i, j, retval = 0;
562
563 ltt_lock_traces();
564 trace = _ltt_trace_find(trace_name);
565 if (!trace) {
566 retval = -ENODATA;
567 DBG("Cannot find trace. It was likely destroyed by the user.");
568 goto unlock_traces;
569 }
570
571 for (i = 0; i < trace->nr_channels; i++) {
572 for (j = 0; j < trace->channels[i].n_cpus; j++) {
573 ltt_force_switch(trace->channels[i].buf[j],
574 FORCE_FLUSH);
575 }
576 }
577
578 unlock_traces:
579 ltt_unlock_traces();
580
581 return retval;
582 }
583
584 static int process_trace_cmd(int command, char *trace_name)
585 {
586 int result;
587 char trace_type[] = "ustrelay";
588
589 switch(command) {
590 case START:
591 /* start is an operation that setups the trace, allocates it and starts it */
592 result = ltt_trace_setup(trace_name);
593 if (result < 0) {
594 ERR("ltt_trace_setup failed");
595 return result;
596 }
597
598 result = ltt_trace_set_type(trace_name, trace_type);
599 if (result < 0) {
600 ERR("ltt_trace_set_type failed");
601 return result;
602 }
603
604 result = ltt_trace_alloc(trace_name);
605 if (result < 0) {
606 ERR("ltt_trace_alloc failed");
607 return result;
608 }
609
610 inform_consumer_daemon(trace_name);
611
612 result = ltt_trace_start(trace_name);
613 if (result < 0) {
614 ERR("ltt_trace_start failed");
615 return result;
616 }
617
618 return 0;
619 case SETUP_TRACE:
620 DBG("trace setup");
621
622 result = ltt_trace_setup(trace_name);
623 if (result < 0) {
624 ERR("ltt_trace_setup failed");
625 return result;
626 }
627
628 result = ltt_trace_set_type(trace_name, trace_type);
629 if (result < 0) {
630 ERR("ltt_trace_set_type failed");
631 return result;
632 }
633
634 return 0;
635 case ALLOC_TRACE:
636 DBG("trace alloc");
637
638 result = ltt_trace_alloc(trace_name);
639 if (result < 0) {
640 ERR("ltt_trace_alloc failed");
641 return result;
642 }
643 inform_consumer_daemon(trace_name);
644
645 return 0;
646
647 case CREATE_TRACE:
648 DBG("trace create");
649
650 result = ltt_trace_setup(trace_name);
651 if (result < 0) {
652 ERR("ltt_trace_setup failed");
653 return result;
654 }
655
656 result = ltt_trace_set_type(trace_name, trace_type);
657 if (result < 0) {
658 ERR("ltt_trace_set_type failed");
659 return result;
660 }
661
662 return 0;
663 case START_TRACE:
664 DBG("trace start");
665
666 result = ltt_trace_alloc(trace_name);
667 if (result < 0) {
668 ERR("ltt_trace_alloc failed");
669 return result;
670 }
671 if (!result) {
672 inform_consumer_daemon(trace_name);
673 }
674
675 result = ltt_trace_start(trace_name);
676 if (result < 0) {
677 ERR("ltt_trace_start failed");
678 return result;
679 }
680
681 return 0;
682 case STOP_TRACE:
683 DBG("trace stop");
684
685 result = ltt_trace_stop(trace_name);
686 if (result < 0) {
687 ERR("ltt_trace_stop failed");
688 return result;
689 }
690
691 return 0;
692 case DESTROY_TRACE:
693 DBG("trace destroy");
694
695 result = ltt_trace_destroy(trace_name, 0);
696 if (result < 0) {
697 ERR("ltt_trace_destroy failed");
698 return result;
699 }
700 return 0;
701 case FORCE_SUBBUF_SWITCH:
702 DBG("force switch");
703
704 result = force_subbuf_switch(trace_name);
705 if (result < 0) {
706 ERR("force_subbuf_switch failed");
707 return result;
708 }
709 return 0;
710 }
711
712 return 0;
713 }
714
715
716 static void process_channel_cmd(int sock, int command,
717 struct ustcomm_channel_info *ch_inf)
718 {
719 struct ustcomm_header _reply_header;
720 struct ustcomm_header *reply_header = &_reply_header;
721 struct ustcomm_channel_info *reply_msg =
722 (struct ustcomm_channel_info *)send_buffer;
723 int result, offset = 0, num, size;
724
725 memset(reply_header, 0, sizeof(*reply_header));
726
727 switch (command) {
728 case GET_SUBBUF_NUM_SIZE:
729 result = get_subbuf_num_size(ch_inf->trace,
730 ch_inf->channel,
731 &num, &size);
732 if (result < 0) {
733 reply_header->result = result;
734 break;
735 }
736
737 reply_msg->channel = USTCOMM_POISON_PTR;
738 reply_msg->subbuf_num = num;
739 reply_msg->subbuf_size = size;
740
741
742 reply_header->size = COMPUTE_MSG_SIZE(reply_msg, offset);
743
744 break;
745 case SET_SUBBUF_NUM:
746 reply_header->result = set_subbuf_num(ch_inf->trace,
747 ch_inf->channel,
748 ch_inf->subbuf_num);
749
750 break;
751 case SET_SUBBUF_SIZE:
752 reply_header->result = set_subbuf_size(ch_inf->trace,
753 ch_inf->channel,
754 ch_inf->subbuf_size);
755
756
757 break;
758 }
759 if (ustcomm_send(sock, reply_header, (char *)reply_msg) < 0) {
760 ERR("ustcomm_send failed");
761 }
762 }
763
764 static void process_buffer_cmd(int sock, int command,
765 struct ustcomm_buffer_info *buf_inf)
766 {
767 struct ustcomm_header _reply_header;
768 struct ustcomm_header *reply_header = &_reply_header;
769 struct ustcomm_buffer_info *reply_msg =
770 (struct ustcomm_buffer_info *)send_buffer;
771 int result, offset = 0, buf_shmid, buf_struct_shmid, buf_pipe_fd;
772 long consumed_old;
773
774 memset(reply_header, 0, sizeof(*reply_header));
775
776 switch (command) {
777 case GET_BUF_SHMID_PIPE_FD:
778 result = get_buffer_shmid_pipe_fd(buf_inf->trace,
779 buf_inf->channel,
780 buf_inf->ch_cpu,
781 &buf_shmid,
782 &buf_struct_shmid,
783 &buf_pipe_fd);
784 if (result < 0) {
785 reply_header->result = result;
786 break;
787 }
788
789 reply_msg->channel = USTCOMM_POISON_PTR;
790 reply_msg->buf_shmid = buf_shmid;
791 reply_msg->buf_struct_shmid = buf_struct_shmid;
792
793 reply_header->size = COMPUTE_MSG_SIZE(reply_msg, offset);
794 reply_header->fd_included = 1;
795
796 if (ustcomm_send_fd(sock, reply_header, (char *)reply_msg,
797 &buf_pipe_fd) < 0) {
798 ERR("ustcomm_send failed");
799 }
800 return;
801
802 case NOTIFY_BUF_MAPPED:
803 reply_header->result =
804 notify_buffer_mapped(buf_inf->trace,
805 buf_inf->channel,
806 buf_inf->ch_cpu);
807 break;
808 case GET_SUBBUFFER:
809 result = get_subbuffer(buf_inf->trace, buf_inf->channel,
810 buf_inf->ch_cpu, &consumed_old);
811 if (result < 0) {
812 reply_header->result = result;
813 break;
814 }
815
816 reply_msg->channel = USTCOMM_POISON_PTR;
817 reply_msg->consumed_old = consumed_old;
818
819 reply_header->size = COMPUTE_MSG_SIZE(reply_msg, offset);
820
821 break;
822 case PUT_SUBBUFFER:
823 result = put_subbuffer(buf_inf->trace, buf_inf->channel,
824 buf_inf->ch_cpu,
825 buf_inf->consumed_old);
826 reply_header->result = result;
827
828 break;
829 }
830
831 if (ustcomm_send(sock, reply_header, (char *)reply_msg) < 0) {
832 ERR("ustcomm_send failed");
833 }
834
835 }
836
837 static void process_ust_marker_cmd(int sock, int command,
838 struct ustcomm_ust_marker_info *ust_marker_inf)
839 {
840 struct ustcomm_header _reply_header;
841 struct ustcomm_header *reply_header = &_reply_header;
842 int result = 0;
843
844 memset(reply_header, 0, sizeof(*reply_header));
845
846 switch(command) {
847 case ENABLE_MARKER:
848
849 result = ltt_ust_marker_connect(ust_marker_inf->channel,
850 ust_marker_inf->ust_marker,
851 "default");
852 if (result < 0) {
853 WARN("could not enable ust_marker; channel=%s,"
854 " name=%s",
855 ust_marker_inf->channel,
856 ust_marker_inf->ust_marker);
857
858 }
859 break;
860 case DISABLE_MARKER:
861 result = ltt_ust_marker_disconnect(ust_marker_inf->channel,
862 ust_marker_inf->ust_marker,
863 "default");
864 if (result < 0) {
865 WARN("could not disable ust_marker; channel=%s,"
866 " name=%s",
867 ust_marker_inf->channel,
868 ust_marker_inf->ust_marker);
869 }
870 break;
871 }
872
873 reply_header->result = result;
874
875 if (ustcomm_send(sock, reply_header, NULL) < 0) {
876 ERR("ustcomm_send failed");
877 }
878
879 }
880 static void process_client_cmd(struct ustcomm_header *recv_header,
881 char *recv_buf, int sock)
882 {
883 int result;
884 struct ustcomm_header _reply_header;
885 struct ustcomm_header *reply_header = &_reply_header;
886 char *send_buf = send_buffer;
887
888 memset(reply_header, 0, sizeof(*reply_header));
889 memset(send_buf, 0, sizeof(send_buffer));
890
891 switch(recv_header->command) {
892 case GET_SUBBUF_NUM_SIZE:
893 case SET_SUBBUF_NUM:
894 case SET_SUBBUF_SIZE:
895 {
896 struct ustcomm_channel_info *ch_inf;
897 ch_inf = (struct ustcomm_channel_info *)recv_buf;
898 result = ustcomm_unpack_channel_info(ch_inf);
899 if (result < 0) {
900 ERR("couldn't unpack channel info");
901 reply_header->result = -EINVAL;
902 goto send_response;
903 }
904 process_channel_cmd(sock, recv_header->command, ch_inf);
905 return;
906 }
907 case GET_BUF_SHMID_PIPE_FD:
908 case NOTIFY_BUF_MAPPED:
909 case GET_SUBBUFFER:
910 case PUT_SUBBUFFER:
911 {
912 struct ustcomm_buffer_info *buf_inf;
913 buf_inf = (struct ustcomm_buffer_info *)recv_buf;
914 result = ustcomm_unpack_buffer_info(buf_inf);
915 if (result < 0) {
916 ERR("couldn't unpack buffer info");
917 reply_header->result = -EINVAL;
918 goto send_response;
919 }
920 process_buffer_cmd(sock, recv_header->command, buf_inf);
921 return;
922 }
923 case ENABLE_MARKER:
924 case DISABLE_MARKER:
925 {
926 struct ustcomm_ust_marker_info *ust_marker_inf;
927 ust_marker_inf = (struct ustcomm_ust_marker_info *)recv_buf;
928 result = ustcomm_unpack_ust_marker_info(ust_marker_inf);
929 if (result < 0) {
930 ERR("couldn't unpack ust_marker info");
931 reply_header->result = -EINVAL;
932 goto send_response;
933 }
934 process_ust_marker_cmd(sock, recv_header->command, ust_marker_inf);
935 return;
936 }
937 case LIST_MARKERS:
938 {
939 char *ptr;
940 size_t size;
941 FILE *fp;
942
943 fp = open_memstream(&ptr, &size);
944 if (fp == NULL) {
945 ERR("opening memstream failed");
946 return;
947 }
948 print_ust_marker(fp);
949 fclose(fp);
950
951 reply_header->size = size + 1; /* Include final \0 */
952
953 result = ustcomm_send(sock, reply_header, ptr);
954
955 free(ptr);
956
957 if (result < 0) {
958 PERROR("failed to send ust_marker list");
959 }
960
961 break;
962 }
963 case LIST_TRACE_EVENTS:
964 {
965 char *ptr;
966 size_t size;
967 FILE *fp;
968
969 fp = open_memstream(&ptr, &size);
970 if (fp == NULL) {
971 ERR("opening memstream failed");
972 return;
973 }
974 print_trace_events(fp);
975 fclose(fp);
976
977 reply_header->size = size + 1; /* Include final \0 */
978
979 result = ustcomm_send(sock, reply_header, ptr);
980
981 free(ptr);
982
983 if (result < 0) {
984 ERR("list_trace_events failed");
985 return;
986 }
987
988 break;
989 }
990 case LOAD_PROBE_LIB:
991 {
992 char *libfile;
993
994 /* FIXME: No functionality at all... */
995 libfile = recv_buf;
996
997 DBG("load_probe_lib loading %s", libfile);
998
999 break;
1000 }
1001 case GET_PIDUNIQUE:
1002 {
1003 struct ustcomm_pidunique *pid_msg;
1004 pid_msg = (struct ustcomm_pidunique *)send_buf;
1005
1006 pid_msg->pidunique = pidunique;
1007 reply_header->size = sizeof(pid_msg);
1008
1009 goto send_response;
1010
1011 }
1012 case GET_SOCK_PATH:
1013 {
1014 struct ustcomm_single_field *sock_msg;
1015 char *sock_path_env;
1016
1017 sock_msg = (struct ustcomm_single_field *)send_buf;
1018
1019 sock_path_env = getenv("UST_DAEMON_SOCKET");
1020
1021 if (!sock_path_env) {
1022 result = ustcomm_pack_single_field(reply_header,
1023 sock_msg,
1024 SOCK_DIR "/ustconsumer");
1025
1026 } else {
1027 result = ustcomm_pack_single_field(reply_header,
1028 sock_msg,
1029 sock_path_env);
1030 }
1031 reply_header->result = result;
1032
1033 goto send_response;
1034 }
1035 case SET_SOCK_PATH:
1036 {
1037 struct ustcomm_single_field *sock_msg;
1038 sock_msg = (struct ustcomm_single_field *)recv_buf;
1039 result = ustcomm_unpack_single_field(sock_msg);
1040 if (result < 0) {
1041 reply_header->result = -EINVAL;
1042 goto send_response;
1043 }
1044
1045 reply_header->result = setenv("UST_DAEMON_SOCKET",
1046 sock_msg->field, 1);
1047
1048 goto send_response;
1049 }
1050 case START:
1051 case SETUP_TRACE:
1052 case ALLOC_TRACE:
1053 case CREATE_TRACE:
1054 case START_TRACE:
1055 case STOP_TRACE:
1056 case DESTROY_TRACE:
1057 case FORCE_SUBBUF_SWITCH:
1058 {
1059 struct ustcomm_single_field *trace_inf =
1060 (struct ustcomm_single_field *)recv_buf;
1061
1062 result = ustcomm_unpack_single_field(trace_inf);
1063 if (result < 0) {
1064 ERR("couldn't unpack trace info");
1065 reply_header->result = -EINVAL;
1066 goto send_response;
1067 }
1068
1069 reply_header->result =
1070 process_trace_cmd(recv_header->command,
1071 trace_inf->field);
1072 goto send_response;
1073
1074 }
1075 default:
1076 reply_header->result = -EINVAL;
1077
1078 goto send_response;
1079 }
1080
1081 return;
1082
1083 send_response:
1084 ustcomm_send(sock, reply_header, send_buf);
1085 }
1086
1087 #define MAX_EVENTS 10
1088
1089 void *listener_main(void *p)
1090 {
1091 struct ustcomm_sock *epoll_sock;
1092 struct epoll_event events[MAX_EVENTS];
1093 struct sockaddr addr;
1094 int accept_fd, nfds, result, i, addr_size;
1095
1096 DBG("LISTENER");
1097
1098 pthread_cleanup_push(listener_cleanup, NULL);
1099
1100 for(;;) {
1101 nfds = epoll_wait(epoll_fd, events, MAX_EVENTS, -1);
1102 if (nfds == -1) {
1103 PERROR("listener_main: epoll_wait failed");
1104 continue;
1105 }
1106
1107 for (i = 0; i < nfds; i++) {
1108 pthread_mutex_lock(&listener_thread_data_mutex);
1109 pthread_cleanup_push(release_listener_mutex, NULL);
1110 epoll_sock = (struct ustcomm_sock *)events[i].data.ptr;
1111 if (epoll_sock == listen_sock) {
1112 addr_size = sizeof(struct sockaddr);
1113 accept_fd = accept(epoll_sock->fd,
1114 &addr,
1115 (socklen_t *)&addr_size);
1116 if (accept_fd == -1) {
1117 PERROR("listener_main: accept failed");
1118 continue;
1119 }
1120 ustcomm_init_sock(accept_fd, epoll_fd,
1121 &ust_socks);
1122 } else {
1123 memset(receive_header, 0,
1124 sizeof(*receive_header));
1125 memset(receive_buffer, 0,
1126 sizeof(receive_buffer));
1127 result = ustcomm_recv(epoll_sock->fd,
1128 receive_header,
1129 receive_buffer);
1130 if (result == 0) {
1131 ustcomm_del_sock(epoll_sock, 0);
1132 } else {
1133 process_client_cmd(receive_header,
1134 receive_buffer,
1135 epoll_sock->fd);
1136 }
1137 }
1138 pthread_cleanup_pop(1); /* release listener mutex */
1139 }
1140 }
1141
1142 pthread_cleanup_pop(1);
1143 }
1144
1145 /* These should only be accessed in the parent thread,
1146 * not the listener.
1147 */
1148 static volatile sig_atomic_t have_listener = 0;
1149 static pthread_t listener_thread;
1150
1151 void create_listener(void)
1152 {
1153 int result;
1154 sigset_t sig_all_blocked;
1155 sigset_t orig_parent_mask;
1156
1157 if (have_listener) {
1158 WARN("not creating listener because we already had one");
1159 return;
1160 }
1161
1162 /* A new thread created by pthread_create inherits the signal mask
1163 * from the parent. To avoid any signal being received by the
1164 * listener thread, we block all signals temporarily in the parent,
1165 * while we create the listener thread.
1166 */
1167
1168 sigfillset(&sig_all_blocked);
1169
1170 result = pthread_sigmask(SIG_SETMASK, &sig_all_blocked, &orig_parent_mask);
1171 if (result) {
1172 PERROR("pthread_sigmask: %s", strerror(result));
1173 }
1174
1175 result = pthread_create(&listener_thread, NULL, listener_main, NULL);
1176 if (result == -1) {
1177 PERROR("pthread_create");
1178 }
1179
1180 /* Restore original signal mask in parent */
1181 result = pthread_sigmask(SIG_SETMASK, &orig_parent_mask, NULL);
1182 if (result) {
1183 PERROR("pthread_sigmask: %s", strerror(result));
1184 } else {
1185 have_listener = 1;
1186 }
1187 }
1188
1189 #define AUTOPROBE_DISABLED 0
1190 #define AUTOPROBE_ENABLE_ALL 1
1191 #define AUTOPROBE_ENABLE_REGEX 2
1192 static int autoprobe_method = AUTOPROBE_DISABLED;
1193 static regex_t autoprobe_regex;
1194
1195 static void auto_probe_connect(struct ust_marker *m)
1196 {
1197 int result;
1198
1199 char* concat_name = NULL;
1200 const char *probe_name = "default";
1201
1202 if (autoprobe_method == AUTOPROBE_DISABLED) {
1203 return;
1204 } else if (autoprobe_method == AUTOPROBE_ENABLE_REGEX) {
1205 result = asprintf(&concat_name, "%s/%s", m->channel, m->name);
1206 if (result == -1) {
1207 ERR("auto_probe_connect: asprintf failed (ust_marker %s/%s)",
1208 m->channel, m->name);
1209 return;
1210 }
1211 if (regexec(&autoprobe_regex, concat_name, 0, NULL, 0)) {
1212 free(concat_name);
1213 return;
1214 }
1215 free(concat_name);
1216 }
1217
1218 result = ltt_ust_marker_connect(m->channel, m->name, probe_name);
1219 if (result && result != -EEXIST)
1220 ERR("ltt_ust_marker_connect (ust_marker = %s/%s, errno = %d)", m->channel, m->name, -result);
1221
1222 DBG("auto connected ust_marker %s (addr: %p) %s to probe default", m->channel, m, m->name);
1223
1224 }
1225
1226 static struct ustcomm_sock * init_app_socket(int epoll_fd)
1227 {
1228 char *dir_name, *sock_name;
1229 int result;
1230 struct ustcomm_sock *sock = NULL;
1231
1232 dir_name = ustcomm_user_sock_dir();
1233 if (!dir_name)
1234 return NULL;
1235
1236 result = asprintf(&sock_name, "%s/%d", dir_name, (int)getpid());
1237 if (result < 0) {
1238 ERR("string overflow allocating socket name, "
1239 "UST thread bailing");
1240 goto free_dir_name;
1241 }
1242
1243 result = ensure_dir_exists(dir_name, S_IRWXU);
1244 if (result == -1) {
1245 ERR("Unable to create socket directory %s, UST thread bailing",
1246 dir_name);
1247 goto free_sock_name;
1248 }
1249
1250 sock = ustcomm_init_named_socket(sock_name, epoll_fd);
1251 if (!sock) {
1252 ERR("Error initializing named socket (%s). Check that directory"
1253 "exists and that it is writable. UST thread bailing", sock_name);
1254 goto free_sock_name;
1255 }
1256
1257 free_sock_name:
1258 free(sock_name);
1259 free_dir_name:
1260 free(dir_name);
1261
1262 return sock;
1263 }
1264
1265 static void __attribute__((constructor)) init()
1266 {
1267 struct timespec ts;
1268 int result;
1269 char* autoprobe_val = NULL;
1270 char* subbuffer_size_val = NULL;
1271 char* subbuffer_count_val = NULL;
1272 unsigned int subbuffer_size;
1273 unsigned int subbuffer_count;
1274 unsigned int power;
1275
1276 /* Assign the pidunique, to be able to differentiate the processes with same
1277 * pid, (before and after an exec).
1278 */
1279 pidunique = make_pidunique();
1280 processpid = getpid();
1281
1282 DBG("Tracectl constructor");
1283
1284 /* Set up epoll */
1285 epoll_fd = epoll_create(MAX_EVENTS);
1286 if (epoll_fd == -1) {
1287 ERR("epoll_create failed, tracing shutting down");
1288 return;
1289 }
1290
1291 /* Create the socket */
1292 listen_sock = init_app_socket(epoll_fd);
1293 if (!listen_sock) {
1294 ERR("failed to create application socket,"
1295 " tracing shutting down");
1296 return;
1297 }
1298
1299 create_listener();
1300
1301 /* Get clock the clock source type */
1302
1303 /* Default clock source */
1304 ust_clock_source = CLOCK_TRACE;
1305 if (clock_gettime(ust_clock_source, &ts) != 0) {
1306 ust_clock_source = CLOCK_MONOTONIC;
1307 DBG("UST traces will not be synchronized with LTTng traces");
1308 }
1309
1310 autoprobe_val = getenv("UST_AUTOPROBE");
1311 if (autoprobe_val) {
1312 struct ust_marker_iter iter;
1313
1314 DBG("Autoprobe enabled.");
1315
1316 /* Ensure ust_marker are initialized */
1317 //init_ust_marker();
1318
1319 /* Ensure ust_marker control is initialized, for the probe */
1320 init_ust_marker_control();
1321
1322 /* first, set the callback that will connect the
1323 * probe on new ust_marker
1324 */
1325 if (autoprobe_val[0] == '/') {
1326 result = regcomp(&autoprobe_regex, autoprobe_val+1, 0);
1327 if (result) {
1328 char regexerr[150];
1329
1330 regerror(result, &autoprobe_regex, regexerr, sizeof(regexerr));
1331 ERR("cannot parse regex %s (%s), will ignore UST_AUTOPROBE", autoprobe_val, regexerr);
1332 /* don't crash the application just for this */
1333 } else {
1334 autoprobe_method = AUTOPROBE_ENABLE_REGEX;
1335 }
1336 } else {
1337 /* just enable all instrumentation */
1338 autoprobe_method = AUTOPROBE_ENABLE_ALL;
1339 }
1340
1341 ust_marker_set_new_ust_marker_cb(auto_probe_connect);
1342
1343 /* Now, connect the probes that were already registered. */
1344 ust_marker_iter_reset(&iter);
1345 ust_marker_iter_start(&iter);
1346
1347 DBG("now iterating on ust_marker already registered");
1348 while (iter.ust_marker) {
1349 DBG("now iterating on ust_marker %s", (*iter.ust_marker)->name);
1350 auto_probe_connect(*iter.ust_marker);
1351 ust_marker_iter_next(&iter);
1352 }
1353 }
1354
1355 if (getenv("UST_OVERWRITE")) {
1356 int val = atoi(getenv("UST_OVERWRITE"));
1357 if (val == 0 || val == 1) {
1358 CMM_STORE_SHARED(ust_channels_overwrite_by_default, val);
1359 } else {
1360 WARN("invalid value for UST_OVERWRITE");
1361 }
1362 }
1363
1364 if (getenv("UST_AUTOCOLLECT")) {
1365 int val = atoi(getenv("UST_AUTOCOLLECT"));
1366 if (val == 0 || val == 1) {
1367 CMM_STORE_SHARED(ust_channels_request_collection_by_default, val);
1368 } else {
1369 WARN("invalid value for UST_AUTOCOLLECT");
1370 }
1371 }
1372
1373 subbuffer_size_val = getenv("UST_SUBBUF_SIZE");
1374 if (subbuffer_size_val) {
1375 sscanf(subbuffer_size_val, "%u", &subbuffer_size);
1376 power = pow2_higher_or_eq(subbuffer_size);
1377 if (power != subbuffer_size)
1378 WARN("using the next power of two for buffer size = %u\n", power);
1379 chan_infos[LTT_CHANNEL_UST].def_subbufsize = power;
1380 }
1381
1382 subbuffer_count_val = getenv("UST_SUBBUF_NUM");
1383 if (subbuffer_count_val) {
1384 sscanf(subbuffer_count_val, "%u", &subbuffer_count);
1385 if (subbuffer_count < 2)
1386 subbuffer_count = 2;
1387 chan_infos[LTT_CHANNEL_UST].def_subbufcount = subbuffer_count;
1388 }
1389
1390 if (getenv("UST_TRACE")) {
1391 char trace_name[] = "auto";
1392 char trace_type[] = "ustrelay";
1393
1394 DBG("starting early tracing");
1395
1396 /* Ensure ust_marker control is initialized */
1397 init_ust_marker_control();
1398
1399 /* Ensure ust_marker are initialized */
1400 init_ust_marker();
1401
1402 /* Ensure buffers are initialized, for the transport to be available.
1403 * We are about to set a trace type and it will fail without this.
1404 */
1405 init_ustrelay_transport();
1406
1407 /* FIXME: When starting early tracing (here), depending on the
1408 * order of constructors, it is very well possible some ust_marker
1409 * sections are not yet registered. Because of this, some
1410 * channels may not be registered. Yet, we are about to ask the
1411 * daemon to collect the channels. Channels which are not yet
1412 * registered will not be collected.
1413 *
1414 * Currently, in LTTng, there is no way to add a channel after
1415 * trace start. The reason for this is that it induces complex
1416 * concurrency issues on the trace structures, which can only
1417 * be resolved using RCU. This has not been done yet. As a
1418 * workaround, we are forcing the registration of the "ust"
1419 * channel here. This is the only channel (apart from metadata)
1420 * that can be reliably used in early tracing.
1421 *
1422 * Non-early tracing does not have this problem and can use
1423 * arbitrary channel names.
1424 */
1425 ltt_channels_register("ust");
1426
1427 result = ltt_trace_setup(trace_name);
1428 if (result < 0) {
1429 ERR("ltt_trace_setup failed");
1430 return;
1431 }
1432
1433 result = ltt_trace_set_type(trace_name, trace_type);
1434 if (result < 0) {
1435 ERR("ltt_trace_set_type failed");
1436 return;
1437 }
1438
1439 result = ltt_trace_alloc(trace_name);
1440 if (result < 0) {
1441 ERR("ltt_trace_alloc failed");
1442 return;
1443 }
1444
1445 result = ltt_trace_start(trace_name);
1446 if (result < 0) {
1447 ERR("ltt_trace_start failed");
1448 return;
1449 }
1450
1451 /* Do this after the trace is started in order to avoid creating confusion
1452 * if the trace fails to start. */
1453 inform_consumer_daemon(trace_name);
1454 }
1455
1456 return;
1457
1458 /* should decrementally destroy stuff if error */
1459
1460 }
1461
1462 /* This is only called if we terminate normally, not with an unhandled signal,
1463 * so we cannot rely on it. However, for now, LTTV requires that the header of
1464 * the last sub-buffer contain a valid end time for the trace. This is done
1465 * automatically only when the trace is properly stopped.
1466 *
1467 * If the traced program crashed, it is always possible to manually add the
1468 * right value in the header, or to open the trace in text mode.
1469 *
1470 * FIXME: Fix LTTV so it doesn't need this.
1471 */
1472
1473 static void destroy_traces(void)
1474 {
1475 int result;
1476
1477 /* if trace running, finish it */
1478
1479 DBG("destructor stopping traces");
1480
1481 result = ltt_trace_stop("auto");
1482 if (result == -1) {
1483 ERR("ltt_trace_stop error");
1484 }
1485
1486 result = ltt_trace_destroy("auto", 0);
1487 if (result == -1) {
1488 ERR("ltt_trace_destroy error");
1489 }
1490 }
1491
1492 static int trace_recording(void)
1493 {
1494 int retval = 0;
1495 struct ust_trace *trace;
1496
1497 ltt_lock_traces();
1498
1499 cds_list_for_each_entry(trace, &ltt_traces.head, list) {
1500 if (trace->active) {
1501 retval = 1;
1502 break;
1503 }
1504 }
1505
1506 ltt_unlock_traces();
1507
1508 return retval;
1509 }
1510
1511 int restarting_usleep(useconds_t usecs)
1512 {
1513 struct timespec tv;
1514 int result;
1515
1516 tv.tv_sec = 0;
1517 tv.tv_nsec = usecs * 1000;
1518
1519 do {
1520 result = nanosleep(&tv, &tv);
1521 } while (result == -1 && errno == EINTR);
1522
1523 return result;
1524 }
1525
1526 static void stop_listener(void)
1527 {
1528 int result;
1529
1530 if (!have_listener)
1531 return;
1532
1533 result = pthread_cancel(listener_thread);
1534 if (result != 0) {
1535 ERR("pthread_cancel: %s", strerror(result));
1536 }
1537 result = pthread_join(listener_thread, NULL);
1538 if (result != 0) {
1539 ERR("pthread_join: %s", strerror(result));
1540 }
1541 }
1542
1543 /* This destructor keeps the process alive for a few seconds in order
1544 * to leave time for ustconsumer to connect to its buffers. This is necessary
1545 * for programs whose execution is very short. It is also useful in all
1546 * programs when tracing is started close to the end of the program
1547 * execution.
1548 *
1549 * FIXME: For now, this only works for the first trace created in a
1550 * process.
1551 */
1552
1553 static void __attribute__((destructor)) keepalive()
1554 {
1555 if (processpid != getpid()) {
1556 return;
1557 }
1558
1559 if (trace_recording() && CMM_LOAD_SHARED(buffers_to_export)) {
1560 int total = 0;
1561 DBG("Keeping process alive for consumer daemon...");
1562 while (CMM_LOAD_SHARED(buffers_to_export)) {
1563 const int interv = 200000;
1564 restarting_usleep(interv);
1565 total += interv;
1566
1567 if (total >= 3000000) {
1568 WARN("non-consumed buffers remaining after wait limit; not waiting anymore");
1569 break;
1570 }
1571 }
1572 DBG("Finally dying...");
1573 }
1574
1575 destroy_traces();
1576
1577 /* Ask the listener to stop and clean up. */
1578 stop_listener();
1579 }
1580
1581 void ust_potential_exec(void)
1582 {
1583 ust_marker(potential_exec, UST_MARKER_NOARGS);
1584
1585 DBG("test");
1586
1587 keepalive();
1588 }
1589
1590 /* Notify ust that there was a fork. This needs to be called inside
1591 * the new process, anytime a process whose memory is not shared with
1592 * the parent is created. If this function is not called, the events
1593 * of the new process will not be collected.
1594 *
1595 * Signals should be disabled before the fork and reenabled only after
1596 * this call in order to guarantee tracing is not started before ust_fork()
1597 * sanitizes the new process.
1598 */
1599
1600 static void ust_fork(void)
1601 {
1602 struct ustcomm_sock *sock, *sock_tmp;
1603 struct ust_trace *trace, *trace_tmp;
1604 int result;
1605
1606 /* FIXME: technically, the locks could have been taken before the fork */
1607 DBG("ust: forking");
1608
1609 /* Get the pid of the new process */
1610 processpid = getpid();
1611
1612 /*
1613 * FIXME: This could be prettier, we loop over the list twice and
1614 * following good locking practice should lock around the loop
1615 */
1616 cds_list_for_each_entry_safe(trace, trace_tmp, &ltt_traces.head, list) {
1617 ltt_trace_stop(trace->trace_name);
1618 }
1619
1620 /* Delete all active connections, but leave them in the epoll set */
1621 cds_list_for_each_entry_safe(sock, sock_tmp, &ust_socks, list) {
1622 ustcomm_del_sock(sock, 1);
1623 }
1624
1625 /*
1626 * FIXME: This could be prettier, we loop over the list twice and
1627 * following good locking practice should lock around the loop
1628 */
1629 cds_list_for_each_entry_safe(trace, trace_tmp, &ltt_traces.head, list) {
1630 ltt_trace_destroy(trace->trace_name, 1);
1631 }
1632
1633 /* Clean up the listener socket and epoll, keeping the socket file */
1634 if (listen_sock) {
1635 ustcomm_del_named_sock(listen_sock, 1);
1636 listen_sock = NULL;
1637 }
1638 close(epoll_fd);
1639
1640 /* Re-start the launch sequence */
1641 CMM_STORE_SHARED(buffers_to_export, 0);
1642 have_listener = 0;
1643
1644 /* Set up epoll */
1645 epoll_fd = epoll_create(MAX_EVENTS);
1646 if (epoll_fd == -1) {
1647 ERR("epoll_create failed, tracing shutting down");
1648 return;
1649 }
1650
1651 /* Create the socket */
1652 listen_sock = init_app_socket(epoll_fd);
1653 if (!listen_sock) {
1654 ERR("failed to create application socket,"
1655 " tracing shutting down");
1656 return;
1657 }
1658 create_listener();
1659 ltt_trace_setup("auto");
1660 result = ltt_trace_set_type("auto", "ustrelay");
1661 if (result < 0) {
1662 ERR("ltt_trace_set_type failed");
1663 return;
1664 }
1665
1666 ltt_trace_alloc("auto");
1667 ltt_trace_start("auto");
1668 inform_consumer_daemon("auto");
1669 }
1670
1671 void ust_before_fork(ust_fork_info_t *fork_info)
1672 {
1673 /* Disable signals. This is to avoid that the child
1674 * intervenes before it is properly setup for tracing. It is
1675 * safer to disable all signals, because then we know we are not
1676 * breaking anything by restoring the original mask.
1677 */
1678 sigset_t all_sigs;
1679 int result;
1680
1681 /* FIXME:
1682 - only do this if tracing is active
1683 */
1684
1685 /* Disable signals */
1686 sigfillset(&all_sigs);
1687 result = sigprocmask(SIG_BLOCK, &all_sigs, &fork_info->orig_sigs);
1688 if (result == -1) {
1689 PERROR("sigprocmask");
1690 return;
1691 }
1692
1693 /*
1694 * Take the fork lock to make sure we are not in the middle of
1695 * something in the listener thread.
1696 */
1697 pthread_mutex_lock(&listener_thread_data_mutex);
1698 /*
1699 * Hold listen_sock_mutex to protect from listen_sock teardown.
1700 */
1701 pthread_mutex_lock(&listen_sock_mutex);
1702 rcu_bp_before_fork();
1703 }
1704
1705 /* Don't call this function directly in a traced program */
1706 static void ust_after_fork_common(ust_fork_info_t *fork_info)
1707 {
1708 int result;
1709
1710 pthread_mutex_unlock(&listen_sock_mutex);
1711 pthread_mutex_unlock(&listener_thread_data_mutex);
1712
1713 /* Restore signals */
1714 result = sigprocmask(SIG_SETMASK, &fork_info->orig_sigs, NULL);
1715 if (result == -1) {
1716 PERROR("sigprocmask");
1717 return;
1718 }
1719 }
1720
1721 void ust_after_fork_parent(ust_fork_info_t *fork_info)
1722 {
1723 rcu_bp_after_fork_parent();
1724 /* Release mutexes and reenable signals */
1725 ust_after_fork_common(fork_info);
1726 }
1727
1728 void ust_after_fork_child(ust_fork_info_t *fork_info)
1729 {
1730 /* Release urcu mutexes */
1731 rcu_bp_after_fork_child();
1732
1733 /* Sanitize the child */
1734 ust_fork();
1735
1736 /* Then release mutexes and reenable signals */
1737 ust_after_fork_common(fork_info);
1738 }
1739
This page took 0.068437 seconds and 4 git commands to generate.