d88c4afb2346c000dedd5a96c9fa5239f37bb265
[lttng-ust.git] / libust / lttng-ust-comm.c
1 /*
2 * lttng-ust-comm.c
3 *
4 * Copyright (C) 2011 David Goulet <david.goulet@polymtl.ca>
5 * Copyright (C) 2011 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
6 *
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; only
10 * version 2.1 of the License.
11 *
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
16 *
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20 */
21
22 #define _LGPL_SOURCE
23 #include <sys/types.h>
24 #include <sys/socket.h>
25 #include <sys/prctl.h>
26 #include <sys/mman.h>
27 #include <sys/stat.h>
28 #include <sys/types.h>
29 #include <sys/wait.h>
30 #include <fcntl.h>
31 #include <unistd.h>
32 #include <errno.h>
33 #include <pthread.h>
34 #include <semaphore.h>
35 #include <time.h>
36 #include <assert.h>
37 #include <signal.h>
38 #include <urcu/uatomic.h>
39 #include <urcu/futex.h>
40
41 #include <lttng-ust-comm.h>
42 #include <ust/usterr-signal-safe.h>
43 #include <ust/lttng-ust-abi.h>
44 #include <ust/tracepoint.h>
45 #include <ust/tracepoint-internal.h>
46 #include <ust/ust.h>
47 #include "ltt-tracer-core.h"
48
49 /*
50 * Has lttng ust comm constructor been called ?
51 */
52 static int initialized;
53
54 /*
55 * The ust_lock/ust_unlock lock is used as a communication thread mutex.
56 * Held when handling a command, also held by fork() to deal with
57 * removal of threads, and by exit path.
58 */
59
60 /* Should the ust comm thread quit ? */
61 static int lttng_ust_comm_should_quit;
62
63 /*
64 * Wait for either of these before continuing to the main
65 * program:
66 * - the register_done message from sessiond daemon
67 * (will let the sessiond daemon enable sessions before main
68 * starts.)
69 * - sessiond daemon is not reachable.
70 * - timeout (ensuring applications are resilient to session
71 * daemon problems).
72 */
73 static sem_t constructor_wait;
74 /*
75 * Doing this for both the global and local sessiond.
76 */
77 static int sem_count = { 2 };
78
79 /*
80 * Info about socket and associated listener thread.
81 */
82 struct sock_info {
83 const char *name;
84 pthread_t ust_listener; /* listener thread */
85 int root_handle;
86 int constructor_sem_posted;
87 int allowed;
88 int global;
89
90 char sock_path[PATH_MAX];
91 int socket;
92
93 char wait_shm_path[PATH_MAX];
94 char *wait_shm_mmap;
95 };
96
97 /* Socket from app (connect) to session daemon (listen) for communication */
98 struct sock_info global_apps = {
99 .name = "global",
100 .global = 1,
101
102 .root_handle = -1,
103 .allowed = 1,
104
105 .sock_path = DEFAULT_GLOBAL_APPS_UNIX_SOCK,
106 .socket = -1,
107
108 .wait_shm_path = DEFAULT_GLOBAL_APPS_WAIT_SHM_PATH,
109 };
110
111 /* TODO: allow global_apps_sock_path override */
112
113 struct sock_info local_apps = {
114 .name = "local",
115 .global = 0,
116 .root_handle = -1,
117 .allowed = 0, /* Check setuid bit first */
118
119 .socket = -1,
120 };
121
122 static int wait_poll_fallback;
123
124 extern void ltt_ring_buffer_client_overwrite_init(void);
125 extern void ltt_ring_buffer_client_discard_init(void);
126 extern void ltt_ring_buffer_metadata_client_init(void);
127 extern void ltt_ring_buffer_client_overwrite_exit(void);
128 extern void ltt_ring_buffer_client_discard_exit(void);
129 extern void ltt_ring_buffer_metadata_client_exit(void);
130
131 static
132 int setup_local_apps(void)
133 {
134 const char *home_dir;
135 uid_t uid;
136
137 uid = getuid();
138 /*
139 * Disallow per-user tracing for setuid binaries.
140 */
141 if (uid != geteuid()) {
142 local_apps.allowed = 0;
143 return 0;
144 } else {
145 local_apps.allowed = 1;
146 }
147 home_dir = (const char *) getenv("HOME");
148 if (!home_dir)
149 return -ENOENT;
150 snprintf(local_apps.sock_path, PATH_MAX,
151 DEFAULT_HOME_APPS_UNIX_SOCK, home_dir);
152 snprintf(local_apps.wait_shm_path, PATH_MAX,
153 DEFAULT_HOME_APPS_WAIT_SHM_PATH, uid);
154 return 0;
155 }
156
157 static
158 int register_app_to_sessiond(int socket)
159 {
160 ssize_t ret;
161 int prctl_ret;
162 struct {
163 uint32_t major;
164 uint32_t minor;
165 pid_t pid;
166 pid_t ppid;
167 uid_t uid;
168 gid_t gid;
169 char name[16]; /* process name */
170 } reg_msg;
171
172 reg_msg.major = LTTNG_UST_COMM_VERSION_MAJOR;
173 reg_msg.minor = LTTNG_UST_COMM_VERSION_MINOR;
174 reg_msg.pid = getpid();
175 reg_msg.ppid = getppid();
176 reg_msg.uid = getuid();
177 reg_msg.gid = getgid();
178 prctl_ret = prctl(PR_GET_NAME, (unsigned long) reg_msg.name, 0, 0, 0);
179 if (prctl_ret) {
180 ERR("Error executing prctl");
181 return -errno;
182 }
183
184 ret = lttcomm_send_unix_sock(socket, &reg_msg, sizeof(reg_msg));
185 if (ret >= 0 && ret != sizeof(reg_msg))
186 return -EIO;
187 return ret;
188 }
189
190 static
191 int send_reply(int sock, struct lttcomm_ust_reply *lur)
192 {
193 ssize_t len;
194
195 len = lttcomm_send_unix_sock(sock, lur, sizeof(*lur));
196 switch (len) {
197 case sizeof(*lur):
198 DBG("message successfully sent");
199 return 0;
200 case -1:
201 if (errno == ECONNRESET) {
202 printf("remote end closed connection\n");
203 return 0;
204 }
205 return -1;
206 default:
207 printf("incorrect message size: %zd\n", len);
208 return -1;
209 }
210 }
211
212 static
213 int handle_register_done(struct sock_info *sock_info)
214 {
215 int ret;
216
217 if (sock_info->constructor_sem_posted)
218 return 0;
219 sock_info->constructor_sem_posted = 1;
220 if (uatomic_read(&sem_count) <= 0) {
221 return 0;
222 }
223 ret = uatomic_add_return(&sem_count, -1);
224 if (ret == 0) {
225 ret = sem_post(&constructor_wait);
226 assert(!ret);
227 }
228 return 0;
229 }
230
231 static
232 int handle_message(struct sock_info *sock_info,
233 int sock, struct lttcomm_ust_msg *lum)
234 {
235 int ret = 0;
236 const struct objd_ops *ops;
237 struct lttcomm_ust_reply lur;
238
239 ust_lock();
240
241 memset(&lur, 0, sizeof(lur));
242
243 if (lttng_ust_comm_should_quit) {
244 ret = -EPERM;
245 goto end;
246 }
247
248 ops = objd_ops(lum->handle);
249 if (!ops) {
250 ret = -ENOENT;
251 goto end;
252 }
253
254 switch (lum->cmd) {
255 case LTTNG_UST_REGISTER_DONE:
256 if (lum->handle == LTTNG_UST_ROOT_HANDLE)
257 ret = handle_register_done(sock_info);
258 else
259 ret = -EINVAL;
260 break;
261 case LTTNG_UST_RELEASE:
262 if (lum->handle == LTTNG_UST_ROOT_HANDLE)
263 ret = -EPERM;
264 else
265 ret = objd_unref(lum->handle);
266 break;
267 default:
268 if (ops->cmd)
269 ret = ops->cmd(lum->handle, lum->cmd,
270 (unsigned long) &lum->u);
271 else
272 ret = -ENOSYS;
273 break;
274 }
275
276 end:
277 lur.handle = lum->handle;
278 lur.cmd = lum->cmd;
279 lur.ret_val = ret;
280 if (ret >= 0) {
281 lur.ret_code = LTTCOMM_OK;
282 } else {
283 lur.ret_code = LTTCOMM_SESSION_FAIL;
284 }
285 if (lum->cmd == LTTNG_UST_STREAM) {
286 /*
287 * Special-case reply to send stream info.
288 * Use lum.u output.
289 */
290 lur.u.stream.memory_map_size = lum->u.stream.memory_map_size;
291 }
292 ret = send_reply(sock, &lur);
293
294 if (lum->cmd == LTTNG_UST_STREAM && ret >= 0) {
295 /* we also need to send the file descriptors. */
296 ret = lttcomm_send_fds_unix_sock(sock,
297 &lum->u.stream.shm_fd, &lum->u.stream.shm_fd,
298 1, sizeof(int));
299 if (ret < 0) {
300 perror("send shm_fd");
301 goto error;
302 }
303 ret = lttcomm_send_fds_unix_sock(sock,
304 &lum->u.stream.wait_fd, &lum->u.stream.wait_fd,
305 1, sizeof(int));
306 if (ret < 0) {
307 perror("send wait_fd");
308 goto error;
309 }
310 }
311 error:
312 ust_unlock();
313 return ret;
314 }
315
316 static
317 void cleanup_sock_info(struct sock_info *sock_info)
318 {
319 int ret;
320
321 if (sock_info->socket != -1) {
322 ret = close(sock_info->socket);
323 if (ret) {
324 ERR("Error closing apps socket");
325 }
326 sock_info->socket = -1;
327 }
328 if (sock_info->root_handle != -1) {
329 ret = objd_unref(sock_info->root_handle);
330 if (ret) {
331 ERR("Error unref root handle");
332 }
333 sock_info->root_handle = -1;
334 }
335 sock_info->constructor_sem_posted = 0;
336 if (sock_info->wait_shm_mmap) {
337 ret = munmap(sock_info->wait_shm_mmap, sysconf(_SC_PAGE_SIZE));
338 if (ret) {
339 ERR("Error unmapping wait shm");
340 }
341 sock_info->wait_shm_mmap = NULL;
342 }
343 }
344
345 /*
346 * Using fork to set umask in the child process (not multi-thread safe).
347 * We deal with the shm_open vs ftruncate race (happening when the
348 * sessiond owns the shm and does not let everybody modify it, to ensure
349 * safety against shm_unlink) by simply letting the mmap fail and
350 * retrying after a few seconds.
351 * For global shm, everybody has rw access to it until the sessiond
352 * starts.
353 */
354 static
355 int get_wait_shm(struct sock_info *sock_info, size_t mmap_size)
356 {
357 int wait_shm_fd, ret;
358 pid_t pid;
359
360 /*
361 * Try to open read-only.
362 */
363 wait_shm_fd = shm_open(sock_info->wait_shm_path, O_RDONLY, 0);
364 if (wait_shm_fd >= 0) {
365 goto end;
366 } else if (wait_shm_fd < 0 && errno != ENOENT) {
367 /*
368 * Real-only open did not work, and it's not because the
369 * entry was not present. It's a failure that prohibits
370 * using shm.
371 */
372 ERR("Error opening shm %s", sock_info->wait_shm_path);
373 goto end;
374 }
375 /*
376 * If the open failed because the file did not exist, try
377 * creating it ourself.
378 */
379 pid = fork();
380 if (pid > 0) {
381 int status;
382
383 /*
384 * Parent: wait for child to return, in which case the
385 * shared memory map will have been created.
386 */
387 pid = wait(&status);
388 if (!WIFEXITED(status) || WEXITSTATUS(status) != 0) {
389 wait_shm_fd = -1;
390 goto end;
391 }
392 /*
393 * Try to open read-only again after creation.
394 */
395 wait_shm_fd = shm_open(sock_info->wait_shm_path, O_RDONLY, 0);
396 if (wait_shm_fd < 0) {
397 /*
398 * Real-only open did not work. It's a failure
399 * that prohibits using shm.
400 */
401 ERR("Error opening shm %s", sock_info->wait_shm_path);
402 goto end;
403 }
404 goto end;
405 } else if (pid == 0) {
406 int create_mode;
407
408 /* Child */
409 create_mode = S_IRUSR | S_IWUSR | S_IRGRP;
410 if (sock_info->global)
411 create_mode |= S_IROTH | S_IWGRP | S_IWOTH;
412 /*
413 * We're alone in a child process, so we can modify the
414 * process-wide umask.
415 */
416 umask(~create_mode);
417 /*
418 * Try creating shm (or get rw access).
419 * We don't do an exclusive open, because we allow other
420 * processes to create+ftruncate it concurrently.
421 */
422 wait_shm_fd = shm_open(sock_info->wait_shm_path,
423 O_RDWR | O_CREAT, create_mode);
424 if (wait_shm_fd >= 0) {
425 ret = ftruncate(wait_shm_fd, mmap_size);
426 if (ret) {
427 PERROR("ftruncate");
428 exit(EXIT_FAILURE);
429 }
430 exit(EXIT_SUCCESS);
431 }
432 /*
433 * For local shm, we need to have rw access to accept
434 * opening it: this means the local sessiond will be
435 * able to wake us up. For global shm, we open it even
436 * if rw access is not granted, because the root.root
437 * sessiond will be able to override all rights and wake
438 * us up.
439 */
440 if (!sock_info->global && errno != EACCES) {
441 ERR("Error opening shm %s", sock_info->wait_shm_path);
442 exit(EXIT_FAILURE);
443 }
444 /*
445 * The shm exists, but we cannot open it RW. Report
446 * success.
447 */
448 exit(EXIT_SUCCESS);
449 } else {
450 return -1;
451 }
452 end:
453 if (wait_shm_fd >= 0 && !sock_info->global) {
454 struct stat statbuf;
455
456 /*
457 * Ensure that our user is the owner of the shm file for
458 * local shm. If we do not own the file, it means our
459 * sessiond will not have access to wake us up (there is
460 * probably a rogue process trying to fake our
461 * sessiond). Fallback to polling method in this case.
462 */
463 ret = fstat(wait_shm_fd, &statbuf);
464 if (ret) {
465 PERROR("fstat");
466 goto error_close;
467 }
468 if (statbuf.st_uid != getuid())
469 goto error_close;
470 }
471 return wait_shm_fd;
472
473 error_close:
474 ret = close(wait_shm_fd);
475 if (ret) {
476 PERROR("Error closing fd");
477 }
478 return -1;
479 }
480
481 static
482 char *get_map_shm(struct sock_info *sock_info)
483 {
484 size_t mmap_size = sysconf(_SC_PAGE_SIZE);
485 int wait_shm_fd, ret;
486 char *wait_shm_mmap;
487
488 wait_shm_fd = get_wait_shm(sock_info, mmap_size);
489 if (wait_shm_fd < 0) {
490 goto error;
491 }
492 wait_shm_mmap = mmap(NULL, mmap_size, PROT_READ,
493 MAP_SHARED, wait_shm_fd, 0);
494 /* close shm fd immediately after taking the mmap reference */
495 ret = close(wait_shm_fd);
496 if (ret) {
497 PERROR("Error closing fd");
498 }
499 if (wait_shm_mmap == MAP_FAILED) {
500 DBG("mmap error (can be caused by race with sessiond). Fallback to poll mode.");
501 goto error;
502 }
503 return wait_shm_mmap;
504
505 error:
506 return NULL;
507 }
508
509 static
510 void wait_for_sessiond(struct sock_info *sock_info)
511 {
512 int ret;
513
514 ust_lock();
515 if (lttng_ust_comm_should_quit) {
516 goto quit;
517 }
518 if (wait_poll_fallback) {
519 goto error;
520 }
521 if (!sock_info->wait_shm_mmap) {
522 sock_info->wait_shm_mmap = get_map_shm(sock_info);
523 if (!sock_info->wait_shm_mmap)
524 goto error;
525 }
526 ust_unlock();
527
528 DBG("Waiting for %s apps sessiond", sock_info->name);
529 /* Wait for futex wakeup */
530 if (uatomic_read((int32_t *) sock_info->wait_shm_mmap) == 0) {
531 ret = futex_async((int32_t *) sock_info->wait_shm_mmap,
532 FUTEX_WAIT, 0, NULL, NULL, 0);
533 if (ret < 0) {
534 if (errno == EFAULT) {
535 wait_poll_fallback = 1;
536 ERR(
537 "Linux kernels 2.6.33 to 3.0 (with the exception of stable versions) "
538 "do not support FUTEX_WAKE on read-only memory mappings correctly. "
539 "Please upgrade your kernel "
540 "(fix is commit 9ea71503a8ed9184d2d0b8ccc4d269d05f7940ae in Linux kernel "
541 "mainline). LTTng-UST will use polling mode fallback.");
542 }
543 PERROR("futex");
544 }
545 }
546 return;
547
548 quit:
549 ust_unlock();
550 return;
551
552 error:
553 ust_unlock();
554 return;
555 }
556
557 /*
558 * This thread does not allocate any resource, except within
559 * handle_message, within mutex protection. This mutex protects against
560 * fork and exit.
561 * The other moment it allocates resources is at socket connexion, which
562 * is also protected by the mutex.
563 */
564 static
565 void *ust_listener_thread(void *arg)
566 {
567 struct sock_info *sock_info = arg;
568 int sock, ret, prev_connect_failed = 0, has_waited = 0;
569
570 /* Restart trying to connect to the session daemon */
571 restart:
572 if (prev_connect_failed) {
573 /* Wait for sessiond availability with pipe */
574 wait_for_sessiond(sock_info);
575 if (has_waited) {
576 has_waited = 0;
577 /*
578 * Sleep for 5 seconds before retrying after a
579 * sequence of failure / wait / failure. This
580 * deals with a killed or broken session daemon.
581 */
582 sleep(5);
583 }
584 has_waited = 1;
585 prev_connect_failed = 0;
586 }
587 ust_lock();
588
589 if (lttng_ust_comm_should_quit) {
590 ust_unlock();
591 goto quit;
592 }
593
594 if (sock_info->socket != -1) {
595 ret = close(sock_info->socket);
596 if (ret) {
597 ERR("Error closing %s apps socket", sock_info->name);
598 }
599 sock_info->socket = -1;
600 }
601
602 /* Register */
603 ret = lttcomm_connect_unix_sock(sock_info->sock_path);
604 if (ret < 0) {
605 ERR("Error connecting to %s apps socket", sock_info->name);
606 prev_connect_failed = 1;
607 /*
608 * If we cannot find the sessiond daemon, don't delay
609 * constructor execution.
610 */
611 ret = handle_register_done(sock_info);
612 assert(!ret);
613 ust_unlock();
614 goto restart;
615 }
616
617 sock_info->socket = sock = ret;
618
619 /*
620 * Create only one root handle per listener thread for the whole
621 * process lifetime.
622 */
623 if (sock_info->root_handle == -1) {
624 ret = lttng_abi_create_root_handle();
625 if (ret) {
626 ERR("Error creating root handle");
627 ust_unlock();
628 goto quit;
629 }
630 sock_info->root_handle = ret;
631 }
632
633 ret = register_app_to_sessiond(sock);
634 if (ret < 0) {
635 ERR("Error registering to %s apps socket", sock_info->name);
636 prev_connect_failed = 1;
637 /*
638 * If we cannot register to the sessiond daemon, don't
639 * delay constructor execution.
640 */
641 ret = handle_register_done(sock_info);
642 assert(!ret);
643 ust_unlock();
644 goto restart;
645 }
646 ust_unlock();
647
648 for (;;) {
649 ssize_t len;
650 struct lttcomm_ust_msg lum;
651
652 len = lttcomm_recv_unix_sock(sock, &lum, sizeof(lum));
653 switch (len) {
654 case 0: /* orderly shutdown */
655 DBG("%s ltt-sessiond has performed an orderly shutdown\n", sock_info->name);
656 goto end;
657 case sizeof(lum):
658 DBG("message received\n");
659 ret = handle_message(sock_info, sock, &lum);
660 if (ret < 0) {
661 ERR("Error handling message for %s socket", sock_info->name);
662 }
663 continue;
664 case -1:
665 if (errno == ECONNRESET) {
666 ERR("%s remote end closed connection\n", sock_info->name);
667 goto end;
668 }
669 goto end;
670 default:
671 ERR("incorrect message size (%s socket): %zd\n", sock_info->name, len);
672 continue;
673 }
674
675 }
676 end:
677 goto restart; /* try to reconnect */
678 quit:
679 return NULL;
680 }
681
682 /*
683 * Return values: -1: don't wait. 0: wait forever. 1: timeout wait.
684 */
685 static
686 int get_timeout(struct timespec *constructor_timeout)
687 {
688 long constructor_delay_ms = LTTNG_UST_DEFAULT_CONSTRUCTOR_TIMEOUT_MS;
689 char *str_delay;
690 int ret;
691
692 str_delay = getenv("UST_REGISTER_TIMEOUT");
693 if (str_delay) {
694 constructor_delay_ms = strtol(str_delay, NULL, 10);
695 }
696
697 switch (constructor_delay_ms) {
698 case -1:/* fall-through */
699 case 0:
700 return constructor_delay_ms;
701 default:
702 break;
703 }
704
705 /*
706 * If we are unable to find the current time, don't wait.
707 */
708 ret = clock_gettime(CLOCK_REALTIME, constructor_timeout);
709 if (ret) {
710 return -1;
711 }
712 constructor_timeout->tv_sec += constructor_delay_ms / 1000UL;
713 constructor_timeout->tv_nsec +=
714 (constructor_delay_ms % 1000UL) * 1000000UL;
715 if (constructor_timeout->tv_nsec >= 1000000000UL) {
716 constructor_timeout->tv_sec++;
717 constructor_timeout->tv_nsec -= 1000000000UL;
718 }
719 return 1;
720 }
721
722 /*
723 * sessiond monitoring thread: monitor presence of global and per-user
724 * sessiond by polling the application common named pipe.
725 */
726 /* TODO */
727
728 void __attribute__((constructor)) lttng_ust_init(void)
729 {
730 struct timespec constructor_timeout;
731 int timeout_mode;
732 int ret;
733
734 if (uatomic_xchg(&initialized, 1) == 1)
735 return;
736
737 /*
738 * We want precise control over the order in which we construct
739 * our sub-libraries vs starting to receive commands from
740 * sessiond (otherwise leading to errors when trying to create
741 * sessiond before the init functions are completed).
742 */
743 init_usterr();
744 init_tracepoint();
745 ltt_ring_buffer_metadata_client_init();
746 ltt_ring_buffer_client_overwrite_init();
747 ltt_ring_buffer_client_discard_init();
748
749 timeout_mode = get_timeout(&constructor_timeout);
750
751 ret = sem_init(&constructor_wait, 0, 0);
752 assert(!ret);
753
754 ret = setup_local_apps();
755 if (ret) {
756 ERR("Error setting up to local apps");
757 }
758 ret = pthread_create(&local_apps.ust_listener, NULL,
759 ust_listener_thread, &local_apps);
760
761 if (local_apps.allowed) {
762 ret = pthread_create(&global_apps.ust_listener, NULL,
763 ust_listener_thread, &global_apps);
764 } else {
765 handle_register_done(&local_apps);
766 }
767
768 switch (timeout_mode) {
769 case 1: /* timeout wait */
770 do {
771 ret = sem_timedwait(&constructor_wait,
772 &constructor_timeout);
773 } while (ret < 0 && errno == EINTR);
774 if (ret < 0 && errno == ETIMEDOUT) {
775 ERR("Timed out waiting for ltt-sessiond");
776 } else {
777 assert(!ret);
778 }
779 break;
780 case -1:/* wait forever */
781 do {
782 ret = sem_wait(&constructor_wait);
783 } while (ret < 0 && errno == EINTR);
784 assert(!ret);
785 break;
786 case 0: /* no timeout */
787 break;
788 }
789 }
790
791 static
792 void lttng_ust_cleanup(int exiting)
793 {
794 cleanup_sock_info(&global_apps);
795 if (local_apps.allowed) {
796 cleanup_sock_info(&local_apps);
797 }
798 lttng_ust_abi_exit();
799 ltt_events_exit();
800 ltt_ring_buffer_client_discard_exit();
801 ltt_ring_buffer_client_overwrite_exit();
802 ltt_ring_buffer_metadata_client_exit();
803 exit_tracepoint();
804 if (!exiting) {
805 /* Reinitialize values for fork */
806 sem_count = 2;
807 lttng_ust_comm_should_quit = 0;
808 initialized = 0;
809 }
810 }
811
812 void __attribute__((destructor)) lttng_ust_exit(void)
813 {
814 int ret;
815
816 /*
817 * Using pthread_cancel here because:
818 * A) we don't want to hang application teardown.
819 * B) the thread is not allocating any resource.
820 */
821
822 /*
823 * Require the communication thread to quit. Synchronize with
824 * mutexes to ensure it is not in a mutex critical section when
825 * pthread_cancel is later called.
826 */
827 ust_lock();
828 lttng_ust_comm_should_quit = 1;
829 ust_unlock();
830
831 ret = pthread_cancel(global_apps.ust_listener);
832 if (ret) {
833 ERR("Error cancelling global ust listener thread");
834 }
835 if (local_apps.allowed) {
836 ret = pthread_cancel(local_apps.ust_listener);
837 if (ret) {
838 ERR("Error cancelling local ust listener thread");
839 }
840 }
841 lttng_ust_cleanup(1);
842 }
843
844 /*
845 * We exclude the worker threads across fork and clone (except
846 * CLONE_VM), because these system calls only keep the forking thread
847 * running in the child. Therefore, we don't want to call fork or clone
848 * in the middle of an tracepoint or ust tracing state modification.
849 * Holding this mutex protects these structures across fork and clone.
850 */
851 void ust_before_fork(ust_fork_info_t *fork_info)
852 {
853 /*
854 * Disable signals. This is to avoid that the child intervenes
855 * before it is properly setup for tracing. It is safer to
856 * disable all signals, because then we know we are not breaking
857 * anything by restoring the original mask.
858 */
859 sigset_t all_sigs;
860 int ret;
861
862 /* Disable signals */
863 sigfillset(&all_sigs);
864 ret = sigprocmask(SIG_BLOCK, &all_sigs, &fork_info->orig_sigs);
865 if (ret == -1) {
866 PERROR("sigprocmask");
867 }
868 ust_lock();
869 rcu_bp_before_fork();
870 }
871
872 static void ust_after_fork_common(ust_fork_info_t *fork_info)
873 {
874 int ret;
875
876 DBG("process %d", getpid());
877 ust_unlock();
878 /* Restore signals */
879 ret = sigprocmask(SIG_SETMASK, &fork_info->orig_sigs, NULL);
880 if (ret == -1) {
881 PERROR("sigprocmask");
882 }
883 }
884
885 void ust_after_fork_parent(ust_fork_info_t *fork_info)
886 {
887 DBG("process %d", getpid());
888 rcu_bp_after_fork_parent();
889 /* Release mutexes and reenable signals */
890 ust_after_fork_common(fork_info);
891 }
892
893 /*
894 * After fork, in the child, we need to cleanup all the leftover state,
895 * except the worker thread which already magically disappeared thanks
896 * to the weird Linux fork semantics. After tyding up, we call
897 * lttng_ust_init() again to start over as a new PID.
898 *
899 * This is meant for forks() that have tracing in the child between the
900 * fork and following exec call (if there is any).
901 */
902 void ust_after_fork_child(ust_fork_info_t *fork_info)
903 {
904 DBG("process %d", getpid());
905 /* Release urcu mutexes */
906 rcu_bp_after_fork_child();
907 lttng_ust_cleanup(0);
908 /* Release mutexes and reenable signals */
909 ust_after_fork_common(fork_info);
910 lttng_ust_init();
911 }
This page took 0.076856 seconds and 4 git commands to generate.