Fix: hash table cleanup call_rcu deadlock
[lttng-tools.git] / src / bin / lttng-sessiond / ust-app.c
1 /*
2 * Copyright (C) 2011 - David Goulet <david.goulet@polymtl.ca>
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License, version 2 only,
6 * as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public License along
14 * with this program; if not, write to the Free Software Foundation, Inc.,
15 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
16 */
17
18 #define _GNU_SOURCE
19 #include <errno.h>
20 #include <inttypes.h>
21 #include <pthread.h>
22 #include <stdio.h>
23 #include <stdlib.h>
24 #include <string.h>
25 #include <sys/stat.h>
26 #include <sys/types.h>
27 #include <unistd.h>
28 #include <urcu/compiler.h>
29 #include <lttng/ust-error.h>
30 #include <signal.h>
31
32 #include <common/common.h>
33 #include <common/sessiond-comm/sessiond-comm.h>
34
35 #include "buffer-registry.h"
36 #include "fd-limit.h"
37 #include "health.h"
38 #include "ust-app.h"
39 #include "ust-consumer.h"
40 #include "ust-ctl.h"
41 #include "utils.h"
42
43 /* Next available channel key. */
44 static unsigned long next_channel_key;
45 static unsigned long next_session_id;
46
47 /*
48 * Return the atomically incremented value of next_channel_key.
49 */
50 static inline unsigned long get_next_channel_key(void)
51 {
52 return uatomic_add_return(&next_channel_key, 1);
53 }
54
55 /*
56 * Return the atomically incremented value of next_session_id.
57 */
58 static inline unsigned long get_next_session_id(void)
59 {
60 return uatomic_add_return(&next_session_id, 1);
61 }
62
63 static void copy_channel_attr_to_ustctl(
64 struct ustctl_consumer_channel_attr *attr,
65 struct lttng_ust_channel_attr *uattr)
66 {
67 /* Copy event attributes since the layout is different. */
68 attr->subbuf_size = uattr->subbuf_size;
69 attr->num_subbuf = uattr->num_subbuf;
70 attr->overwrite = uattr->overwrite;
71 attr->switch_timer_interval = uattr->switch_timer_interval;
72 attr->read_timer_interval = uattr->read_timer_interval;
73 attr->output = uattr->output;
74 }
75
76 /*
77 * Match function for the hash table lookup.
78 *
79 * It matches an ust app event based on three attributes which are the event
80 * name, the filter bytecode and the loglevel.
81 */
82 static int ht_match_ust_app_event(struct cds_lfht_node *node, const void *_key)
83 {
84 struct ust_app_event *event;
85 const struct ust_app_ht_key *key;
86
87 assert(node);
88 assert(_key);
89
90 event = caa_container_of(node, struct ust_app_event, node.node);
91 key = _key;
92
93 /* Match the 3 elements of the key: name, filter and loglevel. */
94
95 /* Event name */
96 if (strncmp(event->attr.name, key->name, sizeof(event->attr.name)) != 0) {
97 goto no_match;
98 }
99
100 /* Event loglevel. */
101 if (event->attr.loglevel != key->loglevel) {
102 if (event->attr.loglevel_type == LTTNG_UST_LOGLEVEL_ALL
103 && key->loglevel == 0 && event->attr.loglevel == -1) {
104 /*
105 * Match is accepted. This is because on event creation, the
106 * loglevel is set to -1 if the event loglevel type is ALL so 0 and
107 * -1 are accepted for this loglevel type since 0 is the one set by
108 * the API when receiving an enable event.
109 */
110 } else {
111 goto no_match;
112 }
113 }
114
115 /* One of the filters is NULL, fail. */
116 if ((key->filter && !event->filter) || (!key->filter && event->filter)) {
117 goto no_match;
118 }
119
120 if (key->filter && event->filter) {
121 /* Both filters exists, check length followed by the bytecode. */
122 if (event->filter->len != key->filter->len ||
123 memcmp(event->filter->data, key->filter->data,
124 event->filter->len) != 0) {
125 goto no_match;
126 }
127 }
128
129 /* Match. */
130 return 1;
131
132 no_match:
133 return 0;
134 }
135
136 /*
137 * Unique add of an ust app event in the given ht. This uses the custom
138 * ht_match_ust_app_event match function and the event name as hash.
139 */
140 static void add_unique_ust_app_event(struct ust_app_channel *ua_chan,
141 struct ust_app_event *event)
142 {
143 struct cds_lfht_node *node_ptr;
144 struct ust_app_ht_key key;
145 struct lttng_ht *ht;
146
147 assert(ua_chan);
148 assert(ua_chan->events);
149 assert(event);
150
151 ht = ua_chan->events;
152 key.name = event->attr.name;
153 key.filter = event->filter;
154 key.loglevel = event->attr.loglevel;
155
156 node_ptr = cds_lfht_add_unique(ht->ht,
157 ht->hash_fct(event->node.key, lttng_ht_seed),
158 ht_match_ust_app_event, &key, &event->node.node);
159 assert(node_ptr == &event->node.node);
160 }
161
162 /*
163 * Close the notify socket from the given RCU head object. This MUST be called
164 * through a call_rcu().
165 */
166 static void close_notify_sock_rcu(struct rcu_head *head)
167 {
168 int ret;
169 struct ust_app_notify_sock_obj *obj =
170 caa_container_of(head, struct ust_app_notify_sock_obj, head);
171
172 /* Must have a valid fd here. */
173 assert(obj->fd >= 0);
174
175 ret = close(obj->fd);
176 if (ret) {
177 ERR("close notify sock %d RCU", obj->fd);
178 }
179 lttng_fd_put(LTTNG_FD_APPS, 1);
180
181 free(obj);
182 }
183
184 /*
185 * Return the session registry according to the buffer type of the given
186 * session.
187 *
188 * A registry per UID object MUST exists before calling this function or else
189 * it assert() if not found. RCU read side lock must be acquired.
190 */
191 static struct ust_registry_session *get_session_registry(
192 struct ust_app_session *ua_sess)
193 {
194 struct ust_registry_session *registry = NULL;
195
196 assert(ua_sess);
197
198 switch (ua_sess->buffer_type) {
199 case LTTNG_BUFFER_PER_PID:
200 {
201 struct buffer_reg_pid *reg_pid = buffer_reg_pid_find(ua_sess->id);
202 if (!reg_pid) {
203 goto error;
204 }
205 registry = reg_pid->registry->reg.ust;
206 break;
207 }
208 case LTTNG_BUFFER_PER_UID:
209 {
210 struct buffer_reg_uid *reg_uid = buffer_reg_uid_find(
211 ua_sess->tracing_id, ua_sess->bits_per_long, ua_sess->uid);
212 if (!reg_uid) {
213 goto error;
214 }
215 registry = reg_uid->registry->reg.ust;
216 break;
217 }
218 default:
219 assert(0);
220 };
221
222 error:
223 return registry;
224 }
225
226 /*
227 * Delete ust context safely. RCU read lock must be held before calling
228 * this function.
229 */
230 static
231 void delete_ust_app_ctx(int sock, struct ust_app_ctx *ua_ctx)
232 {
233 int ret;
234
235 assert(ua_ctx);
236
237 if (ua_ctx->obj) {
238 ret = ustctl_release_object(sock, ua_ctx->obj);
239 if (ret < 0 && ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
240 ERR("UST app sock %d release ctx obj handle %d failed with ret %d",
241 sock, ua_ctx->obj->handle, ret);
242 }
243 free(ua_ctx->obj);
244 }
245 free(ua_ctx);
246 }
247
248 /*
249 * Delete ust app event safely. RCU read lock must be held before calling
250 * this function.
251 */
252 static
253 void delete_ust_app_event(int sock, struct ust_app_event *ua_event)
254 {
255 int ret;
256
257 assert(ua_event);
258
259 free(ua_event->filter);
260
261 if (ua_event->obj != NULL) {
262 ret = ustctl_release_object(sock, ua_event->obj);
263 if (ret < 0 && ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
264 ERR("UST app sock %d release event obj failed with ret %d",
265 sock, ret);
266 }
267 free(ua_event->obj);
268 }
269 free(ua_event);
270 }
271
272 /*
273 * Release ust data object of the given stream.
274 *
275 * Return 0 on success or else a negative value.
276 */
277 static int release_ust_app_stream(int sock, struct ust_app_stream *stream)
278 {
279 int ret = 0;
280
281 assert(stream);
282
283 if (stream->obj) {
284 ret = ustctl_release_object(sock, stream->obj);
285 if (ret < 0 && ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
286 ERR("UST app sock %d release stream obj failed with ret %d",
287 sock, ret);
288 }
289 lttng_fd_put(LTTNG_FD_APPS, 2);
290 free(stream->obj);
291 }
292
293 return ret;
294 }
295
296 /*
297 * Delete ust app stream safely. RCU read lock must be held before calling
298 * this function.
299 */
300 static
301 void delete_ust_app_stream(int sock, struct ust_app_stream *stream)
302 {
303 assert(stream);
304
305 (void) release_ust_app_stream(sock, stream);
306 free(stream);
307 }
308
309 /*
310 * We need to execute ht_destroy outside of RCU read-side critical
311 * section and outside of call_rcu thread, so we postpone its execution
312 * using ht_cleanup_push. It is simpler than to change the semantic of
313 * the many callers of delete_ust_app_session().
314 */
315 static
316 void delete_ust_app_channel_rcu(struct rcu_head *head)
317 {
318 struct ust_app_channel *ua_chan =
319 caa_container_of(head, struct ust_app_channel, rcu_head);
320
321 ht_cleanup_push(ua_chan->ctx);
322 ht_cleanup_push(ua_chan->events);
323 free(ua_chan);
324 }
325
326 /*
327 * Delete ust app channel safely. RCU read lock must be held before calling
328 * this function.
329 */
330 static
331 void delete_ust_app_channel(int sock, struct ust_app_channel *ua_chan,
332 struct ust_app *app)
333 {
334 int ret;
335 struct lttng_ht_iter iter;
336 struct ust_app_event *ua_event;
337 struct ust_app_ctx *ua_ctx;
338 struct ust_app_stream *stream, *stmp;
339 struct ust_registry_session *registry;
340
341 assert(ua_chan);
342
343 DBG3("UST app deleting channel %s", ua_chan->name);
344
345 /* Wipe stream */
346 cds_list_for_each_entry_safe(stream, stmp, &ua_chan->streams.head, list) {
347 cds_list_del(&stream->list);
348 delete_ust_app_stream(sock, stream);
349 }
350
351 /* Wipe context */
352 cds_lfht_for_each_entry(ua_chan->ctx->ht, &iter.iter, ua_ctx, node.node) {
353 ret = lttng_ht_del(ua_chan->ctx, &iter);
354 assert(!ret);
355 delete_ust_app_ctx(sock, ua_ctx);
356 }
357
358 /* Wipe events */
359 cds_lfht_for_each_entry(ua_chan->events->ht, &iter.iter, ua_event,
360 node.node) {
361 ret = lttng_ht_del(ua_chan->events, &iter);
362 assert(!ret);
363 delete_ust_app_event(sock, ua_event);
364 }
365
366 /* Wipe and free registry from session registry. */
367 registry = get_session_registry(ua_chan->session);
368 if (registry) {
369 ust_registry_channel_del_free(registry, ua_chan->key);
370 }
371
372 if (ua_chan->obj != NULL) {
373 /* Remove channel from application UST object descriptor. */
374 iter.iter.node = &ua_chan->ust_objd_node.node;
375 lttng_ht_del(app->ust_objd, &iter);
376 ret = ustctl_release_object(sock, ua_chan->obj);
377 if (ret < 0 && ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
378 ERR("UST app sock %d release channel obj failed with ret %d",
379 sock, ret);
380 }
381 lttng_fd_put(LTTNG_FD_APPS, 1);
382 free(ua_chan->obj);
383 }
384 call_rcu(&ua_chan->rcu_head, delete_ust_app_channel_rcu);
385 }
386
387 /*
388 * Push metadata to consumer socket.
389 *
390 * The socket lock MUST be acquired.
391 * The ust app session lock MUST be acquired.
392 *
393 * On success, return the len of metadata pushed or else a negative value.
394 */
395 ssize_t ust_app_push_metadata(struct ust_registry_session *registry,
396 struct consumer_socket *socket, int send_zero_data)
397 {
398 int ret;
399 char *metadata_str = NULL;
400 size_t len, offset;
401 ssize_t ret_val;
402
403 assert(registry);
404 assert(socket);
405
406 /*
407 * On a push metadata error either the consumer is dead or the metadata
408 * channel has been destroyed because its endpoint might have died (e.g:
409 * relayd). If so, the metadata closed flag is set to 1 so we deny pushing
410 * metadata again which is not valid anymore on the consumer side.
411 *
412 * The ust app session mutex locked allows us to make this check without
413 * the registry lock.
414 */
415 if (registry->metadata_closed) {
416 return -EPIPE;
417 }
418
419 pthread_mutex_lock(&registry->lock);
420
421 offset = registry->metadata_len_sent;
422 len = registry->metadata_len - registry->metadata_len_sent;
423 if (len == 0) {
424 DBG3("No metadata to push for metadata key %" PRIu64,
425 registry->metadata_key);
426 ret_val = len;
427 if (send_zero_data) {
428 DBG("No metadata to push");
429 goto push_data;
430 }
431 goto end;
432 }
433
434 /* Allocate only what we have to send. */
435 metadata_str = zmalloc(len);
436 if (!metadata_str) {
437 PERROR("zmalloc ust app metadata string");
438 ret_val = -ENOMEM;
439 goto error;
440 }
441 /* Copy what we haven't send out. */
442 memcpy(metadata_str, registry->metadata + offset, len);
443 registry->metadata_len_sent += len;
444
445 push_data:
446 pthread_mutex_unlock(&registry->lock);
447 ret = consumer_push_metadata(socket, registry->metadata_key,
448 metadata_str, len, offset);
449 if (ret < 0) {
450 ret_val = ret;
451 goto error_push;
452 }
453
454 free(metadata_str);
455 return len;
456
457 end:
458 error:
459 pthread_mutex_unlock(&registry->lock);
460 error_push:
461 free(metadata_str);
462 return ret_val;
463 }
464
465 /*
466 * For a given application and session, push metadata to consumer. The session
467 * lock MUST be acquired here before calling this.
468 * Either sock or consumer is required : if sock is NULL, the default
469 * socket to send the metadata is retrieved from consumer, if sock
470 * is not NULL we use it to send the metadata.
471 *
472 * Return 0 on success else a negative error.
473 */
474 static int push_metadata(struct ust_registry_session *registry,
475 struct consumer_output *consumer)
476 {
477 int ret_val;
478 ssize_t ret;
479 struct consumer_socket *socket;
480
481 assert(registry);
482 assert(consumer);
483
484 rcu_read_lock();
485
486 /*
487 * Means that no metadata was assigned to the session. This can happens if
488 * no start has been done previously.
489 */
490 if (!registry->metadata_key) {
491 ret_val = 0;
492 goto end_rcu_unlock;
493 }
494
495 /* Get consumer socket to use to push the metadata.*/
496 socket = consumer_find_socket_by_bitness(registry->bits_per_long,
497 consumer);
498 if (!socket) {
499 ret_val = -1;
500 goto error_rcu_unlock;
501 }
502
503 /*
504 * TODO: Currently, we hold the socket lock around sampling of the next
505 * metadata segment to ensure we send metadata over the consumer socket in
506 * the correct order. This makes the registry lock nest inside the socket
507 * lock.
508 *
509 * Please note that this is a temporary measure: we should move this lock
510 * back into ust_consumer_push_metadata() when the consumer gets the
511 * ability to reorder the metadata it receives.
512 */
513 pthread_mutex_lock(socket->lock);
514 ret = ust_app_push_metadata(registry, socket, 0);
515 pthread_mutex_unlock(socket->lock);
516 if (ret < 0) {
517 ret_val = ret;
518 goto error_rcu_unlock;
519 }
520
521 rcu_read_unlock();
522 return 0;
523
524 error_rcu_unlock:
525 /*
526 * On error, flag the registry that the metadata is closed. We were unable
527 * to push anything and this means that either the consumer is not
528 * responding or the metadata cache has been destroyed on the consumer.
529 */
530 registry->metadata_closed = 1;
531 end_rcu_unlock:
532 rcu_read_unlock();
533 return ret_val;
534 }
535
536 /*
537 * Send to the consumer a close metadata command for the given session. Once
538 * done, the metadata channel is deleted and the session metadata pointer is
539 * nullified. The session lock MUST be acquired here unless the application is
540 * in the destroy path.
541 *
542 * Return 0 on success else a negative value.
543 */
544 static int close_metadata(struct ust_registry_session *registry,
545 struct consumer_output *consumer)
546 {
547 int ret;
548 struct consumer_socket *socket;
549
550 assert(registry);
551 assert(consumer);
552
553 rcu_read_lock();
554
555 if (!registry->metadata_key || registry->metadata_closed) {
556 ret = 0;
557 goto end;
558 }
559
560 /* Get consumer socket to use to push the metadata.*/
561 socket = consumer_find_socket_by_bitness(registry->bits_per_long,
562 consumer);
563 if (!socket) {
564 ret = -1;
565 goto error;
566 }
567
568 ret = consumer_close_metadata(socket, registry->metadata_key);
569 if (ret < 0) {
570 goto error;
571 }
572
573 error:
574 /*
575 * Metadata closed. Even on error this means that the consumer is not
576 * responding or not found so either way a second close should NOT be emit
577 * for this registry.
578 */
579 registry->metadata_closed = 1;
580 end:
581 rcu_read_unlock();
582 return ret;
583 }
584
585 /*
586 * We need to execute ht_destroy outside of RCU read-side critical
587 * section and outside of call_rcu thread, so we postpone its execution
588 * using ht_cleanup_push. It is simpler than to change the semantic of
589 * the many callers of delete_ust_app_session().
590 */
591 static
592 void delete_ust_app_session_rcu(struct rcu_head *head)
593 {
594 struct ust_app_session *ua_sess =
595 caa_container_of(head, struct ust_app_session, rcu_head);
596
597 ht_cleanup_push(ua_sess->channels);
598 free(ua_sess);
599 }
600
601 /*
602 * Delete ust app session safely. RCU read lock must be held before calling
603 * this function.
604 */
605 static
606 void delete_ust_app_session(int sock, struct ust_app_session *ua_sess,
607 struct ust_app *app)
608 {
609 int ret;
610 struct lttng_ht_iter iter;
611 struct ust_app_channel *ua_chan;
612 struct ust_registry_session *registry;
613
614 assert(ua_sess);
615
616 pthread_mutex_lock(&ua_sess->lock);
617
618 registry = get_session_registry(ua_sess);
619 if (registry && !registry->metadata_closed) {
620 /* Push metadata for application before freeing the application. */
621 (void) push_metadata(registry, ua_sess->consumer);
622
623 /*
624 * Don't ask to close metadata for global per UID buffers. Close
625 * metadata only on destroy trace session in this case. Also, the
626 * previous push metadata could have flag the metadata registry to
627 * close so don't send a close command if closed.
628 */
629 if (ua_sess->buffer_type != LTTNG_BUFFER_PER_UID &&
630 !registry->metadata_closed) {
631 /* And ask to close it for this session registry. */
632 (void) close_metadata(registry, ua_sess->consumer);
633 }
634 }
635
636 cds_lfht_for_each_entry(ua_sess->channels->ht, &iter.iter, ua_chan,
637 node.node) {
638 ret = lttng_ht_del(ua_sess->channels, &iter);
639 assert(!ret);
640 delete_ust_app_channel(sock, ua_chan, app);
641 }
642
643 /* In case of per PID, the registry is kept in the session. */
644 if (ua_sess->buffer_type == LTTNG_BUFFER_PER_PID) {
645 struct buffer_reg_pid *reg_pid = buffer_reg_pid_find(ua_sess->id);
646 if (reg_pid) {
647 buffer_reg_pid_remove(reg_pid);
648 buffer_reg_pid_destroy(reg_pid);
649 }
650 }
651
652 if (ua_sess->handle != -1) {
653 ret = ustctl_release_handle(sock, ua_sess->handle);
654 if (ret < 0 && ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
655 ERR("UST app sock %d release session handle failed with ret %d",
656 sock, ret);
657 }
658 }
659 pthread_mutex_unlock(&ua_sess->lock);
660
661 call_rcu(&ua_sess->rcu_head, delete_ust_app_session_rcu);
662 }
663
664 /*
665 * Delete a traceable application structure from the global list. Never call
666 * this function outside of a call_rcu call.
667 *
668 * RCU read side lock should _NOT_ be held when calling this function.
669 */
670 static
671 void delete_ust_app(struct ust_app *app)
672 {
673 int ret, sock;
674 struct ust_app_session *ua_sess, *tmp_ua_sess;
675
676 /* Delete ust app sessions info */
677 sock = app->sock;
678 app->sock = -1;
679
680 /* Wipe sessions */
681 cds_list_for_each_entry_safe(ua_sess, tmp_ua_sess, &app->teardown_head,
682 teardown_node) {
683 /* Free every object in the session and the session. */
684 rcu_read_lock();
685 delete_ust_app_session(sock, ua_sess, app);
686 rcu_read_unlock();
687 }
688
689 ht_cleanup_push(app->sessions);
690 ht_cleanup_push(app->ust_objd);
691
692 /*
693 * Wait until we have deleted the application from the sock hash table
694 * before closing this socket, otherwise an application could re-use the
695 * socket ID and race with the teardown, using the same hash table entry.
696 *
697 * It's OK to leave the close in call_rcu. We want it to stay unique for
698 * all RCU readers that could run concurrently with unregister app,
699 * therefore we _need_ to only close that socket after a grace period. So
700 * it should stay in this RCU callback.
701 *
702 * This close() is a very important step of the synchronization model so
703 * every modification to this function must be carefully reviewed.
704 */
705 ret = close(sock);
706 if (ret) {
707 PERROR("close");
708 }
709 lttng_fd_put(LTTNG_FD_APPS, 1);
710
711 DBG2("UST app pid %d deleted", app->pid);
712 free(app);
713 }
714
715 /*
716 * URCU intermediate call to delete an UST app.
717 */
718 static
719 void delete_ust_app_rcu(struct rcu_head *head)
720 {
721 struct lttng_ht_node_ulong *node =
722 caa_container_of(head, struct lttng_ht_node_ulong, head);
723 struct ust_app *app =
724 caa_container_of(node, struct ust_app, pid_n);
725
726 DBG3("Call RCU deleting app PID %d", app->pid);
727 delete_ust_app(app);
728 }
729
730 /*
731 * Delete the session from the application ht and delete the data structure by
732 * freeing every object inside and releasing them.
733 */
734 static void destroy_app_session(struct ust_app *app,
735 struct ust_app_session *ua_sess)
736 {
737 int ret;
738 struct lttng_ht_iter iter;
739
740 assert(app);
741 assert(ua_sess);
742
743 iter.iter.node = &ua_sess->node.node;
744 ret = lttng_ht_del(app->sessions, &iter);
745 if (ret) {
746 /* Already scheduled for teardown. */
747 goto end;
748 }
749
750 /* Once deleted, free the data structure. */
751 delete_ust_app_session(app->sock, ua_sess, app);
752
753 end:
754 return;
755 }
756
757 /*
758 * Alloc new UST app session.
759 */
760 static
761 struct ust_app_session *alloc_ust_app_session(struct ust_app *app)
762 {
763 struct ust_app_session *ua_sess;
764
765 /* Init most of the default value by allocating and zeroing */
766 ua_sess = zmalloc(sizeof(struct ust_app_session));
767 if (ua_sess == NULL) {
768 PERROR("malloc");
769 goto error_free;
770 }
771
772 ua_sess->handle = -1;
773 ua_sess->channels = lttng_ht_new(0, LTTNG_HT_TYPE_STRING);
774 pthread_mutex_init(&ua_sess->lock, NULL);
775
776 return ua_sess;
777
778 error_free:
779 return NULL;
780 }
781
782 /*
783 * Alloc new UST app channel.
784 */
785 static
786 struct ust_app_channel *alloc_ust_app_channel(char *name,
787 struct ust_app_session *ua_sess,
788 struct lttng_ust_channel_attr *attr)
789 {
790 struct ust_app_channel *ua_chan;
791
792 /* Init most of the default value by allocating and zeroing */
793 ua_chan = zmalloc(sizeof(struct ust_app_channel));
794 if (ua_chan == NULL) {
795 PERROR("malloc");
796 goto error;
797 }
798
799 /* Setup channel name */
800 strncpy(ua_chan->name, name, sizeof(ua_chan->name));
801 ua_chan->name[sizeof(ua_chan->name) - 1] = '\0';
802
803 ua_chan->enabled = 1;
804 ua_chan->handle = -1;
805 ua_chan->session = ua_sess;
806 ua_chan->key = get_next_channel_key();
807 ua_chan->ctx = lttng_ht_new(0, LTTNG_HT_TYPE_ULONG);
808 ua_chan->events = lttng_ht_new(0, LTTNG_HT_TYPE_STRING);
809 lttng_ht_node_init_str(&ua_chan->node, ua_chan->name);
810
811 CDS_INIT_LIST_HEAD(&ua_chan->streams.head);
812
813 /* Copy attributes */
814 if (attr) {
815 /* Translate from lttng_ust_channel to ustctl_consumer_channel_attr. */
816 ua_chan->attr.subbuf_size = attr->subbuf_size;
817 ua_chan->attr.num_subbuf = attr->num_subbuf;
818 ua_chan->attr.overwrite = attr->overwrite;
819 ua_chan->attr.switch_timer_interval = attr->switch_timer_interval;
820 ua_chan->attr.read_timer_interval = attr->read_timer_interval;
821 ua_chan->attr.output = attr->output;
822 }
823 /* By default, the channel is a per cpu channel. */
824 ua_chan->attr.type = LTTNG_UST_CHAN_PER_CPU;
825
826 DBG3("UST app channel %s allocated", ua_chan->name);
827
828 return ua_chan;
829
830 error:
831 return NULL;
832 }
833
834 /*
835 * Allocate and initialize a UST app stream.
836 *
837 * Return newly allocated stream pointer or NULL on error.
838 */
839 struct ust_app_stream *ust_app_alloc_stream(void)
840 {
841 struct ust_app_stream *stream = NULL;
842
843 stream = zmalloc(sizeof(*stream));
844 if (stream == NULL) {
845 PERROR("zmalloc ust app stream");
846 goto error;
847 }
848
849 /* Zero could be a valid value for a handle so flag it to -1. */
850 stream->handle = -1;
851
852 error:
853 return stream;
854 }
855
856 /*
857 * Alloc new UST app event.
858 */
859 static
860 struct ust_app_event *alloc_ust_app_event(char *name,
861 struct lttng_ust_event *attr)
862 {
863 struct ust_app_event *ua_event;
864
865 /* Init most of the default value by allocating and zeroing */
866 ua_event = zmalloc(sizeof(struct ust_app_event));
867 if (ua_event == NULL) {
868 PERROR("malloc");
869 goto error;
870 }
871
872 ua_event->enabled = 1;
873 strncpy(ua_event->name, name, sizeof(ua_event->name));
874 ua_event->name[sizeof(ua_event->name) - 1] = '\0';
875 lttng_ht_node_init_str(&ua_event->node, ua_event->name);
876
877 /* Copy attributes */
878 if (attr) {
879 memcpy(&ua_event->attr, attr, sizeof(ua_event->attr));
880 }
881
882 DBG3("UST app event %s allocated", ua_event->name);
883
884 return ua_event;
885
886 error:
887 return NULL;
888 }
889
890 /*
891 * Alloc new UST app context.
892 */
893 static
894 struct ust_app_ctx *alloc_ust_app_ctx(struct lttng_ust_context *uctx)
895 {
896 struct ust_app_ctx *ua_ctx;
897
898 ua_ctx = zmalloc(sizeof(struct ust_app_ctx));
899 if (ua_ctx == NULL) {
900 goto error;
901 }
902
903 if (uctx) {
904 memcpy(&ua_ctx->ctx, uctx, sizeof(ua_ctx->ctx));
905 }
906
907 DBG3("UST app context %d allocated", ua_ctx->ctx.ctx);
908
909 error:
910 return ua_ctx;
911 }
912
913 /*
914 * Allocate a filter and copy the given original filter.
915 *
916 * Return allocated filter or NULL on error.
917 */
918 static struct lttng_ust_filter_bytecode *alloc_copy_ust_app_filter(
919 struct lttng_ust_filter_bytecode *orig_f)
920 {
921 struct lttng_ust_filter_bytecode *filter = NULL;
922
923 /* Copy filter bytecode */
924 filter = zmalloc(sizeof(*filter) + orig_f->len);
925 if (!filter) {
926 PERROR("zmalloc alloc ust app filter");
927 goto error;
928 }
929
930 memcpy(filter, orig_f, sizeof(*filter) + orig_f->len);
931
932 error:
933 return filter;
934 }
935
936 /*
937 * Find an ust_app using the sock and return it. RCU read side lock must be
938 * held before calling this helper function.
939 */
940 static
941 struct ust_app *find_app_by_sock(int sock)
942 {
943 struct lttng_ht_node_ulong *node;
944 struct lttng_ht_iter iter;
945
946 lttng_ht_lookup(ust_app_ht_by_sock, (void *)((unsigned long) sock), &iter);
947 node = lttng_ht_iter_get_node_ulong(&iter);
948 if (node == NULL) {
949 DBG2("UST app find by sock %d not found", sock);
950 goto error;
951 }
952
953 return caa_container_of(node, struct ust_app, sock_n);
954
955 error:
956 return NULL;
957 }
958
959 /*
960 * Find an ust_app using the notify sock and return it. RCU read side lock must
961 * be held before calling this helper function.
962 */
963 static struct ust_app *find_app_by_notify_sock(int sock)
964 {
965 struct lttng_ht_node_ulong *node;
966 struct lttng_ht_iter iter;
967
968 lttng_ht_lookup(ust_app_ht_by_notify_sock, (void *)((unsigned long) sock),
969 &iter);
970 node = lttng_ht_iter_get_node_ulong(&iter);
971 if (node == NULL) {
972 DBG2("UST app find by notify sock %d not found", sock);
973 goto error;
974 }
975
976 return caa_container_of(node, struct ust_app, notify_sock_n);
977
978 error:
979 return NULL;
980 }
981
982 /*
983 * Lookup for an ust app event based on event name, filter bytecode and the
984 * event loglevel.
985 *
986 * Return an ust_app_event object or NULL on error.
987 */
988 static struct ust_app_event *find_ust_app_event(struct lttng_ht *ht,
989 char *name, struct lttng_ust_filter_bytecode *filter, int loglevel)
990 {
991 struct lttng_ht_iter iter;
992 struct lttng_ht_node_str *node;
993 struct ust_app_event *event = NULL;
994 struct ust_app_ht_key key;
995
996 assert(name);
997 assert(ht);
998
999 /* Setup key for event lookup. */
1000 key.name = name;
1001 key.filter = filter;
1002 key.loglevel = loglevel;
1003
1004 /* Lookup using the event name as hash and a custom match fct. */
1005 cds_lfht_lookup(ht->ht, ht->hash_fct((void *) name, lttng_ht_seed),
1006 ht_match_ust_app_event, &key, &iter.iter);
1007 node = lttng_ht_iter_get_node_str(&iter);
1008 if (node == NULL) {
1009 goto end;
1010 }
1011
1012 event = caa_container_of(node, struct ust_app_event, node);
1013
1014 end:
1015 return event;
1016 }
1017
1018 /*
1019 * Create the channel context on the tracer.
1020 *
1021 * Called with UST app session lock held.
1022 */
1023 static
1024 int create_ust_channel_context(struct ust_app_channel *ua_chan,
1025 struct ust_app_ctx *ua_ctx, struct ust_app *app)
1026 {
1027 int ret;
1028
1029 health_code_update();
1030
1031 ret = ustctl_add_context(app->sock, &ua_ctx->ctx,
1032 ua_chan->obj, &ua_ctx->obj);
1033 if (ret < 0) {
1034 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
1035 ERR("UST app create channel context failed for app (pid: %d) "
1036 "with ret %d", app->pid, ret);
1037 } else {
1038 DBG3("UST app disable event failed. Application is dead.");
1039 }
1040 goto error;
1041 }
1042
1043 ua_ctx->handle = ua_ctx->obj->handle;
1044
1045 DBG2("UST app context handle %d created successfully for channel %s",
1046 ua_ctx->handle, ua_chan->name);
1047
1048 error:
1049 health_code_update();
1050 return ret;
1051 }
1052
1053 /*
1054 * Set the filter on the tracer.
1055 */
1056 static
1057 int set_ust_event_filter(struct ust_app_event *ua_event,
1058 struct ust_app *app)
1059 {
1060 int ret;
1061
1062 health_code_update();
1063
1064 if (!ua_event->filter) {
1065 ret = 0;
1066 goto error;
1067 }
1068
1069 ret = ustctl_set_filter(app->sock, ua_event->filter,
1070 ua_event->obj);
1071 if (ret < 0) {
1072 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
1073 ERR("UST app event %s filter failed for app (pid: %d) "
1074 "with ret %d", ua_event->attr.name, app->pid, ret);
1075 } else {
1076 DBG3("UST app filter event failed. Application is dead.");
1077 }
1078 goto error;
1079 }
1080
1081 DBG2("UST filter set successfully for event %s", ua_event->name);
1082
1083 error:
1084 health_code_update();
1085 return ret;
1086 }
1087
1088 /*
1089 * Disable the specified event on to UST tracer for the UST session.
1090 */
1091 static int disable_ust_event(struct ust_app *app,
1092 struct ust_app_session *ua_sess, struct ust_app_event *ua_event)
1093 {
1094 int ret;
1095
1096 health_code_update();
1097
1098 ret = ustctl_disable(app->sock, ua_event->obj);
1099 if (ret < 0) {
1100 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
1101 ERR("UST app event %s disable failed for app (pid: %d) "
1102 "and session handle %d with ret %d",
1103 ua_event->attr.name, app->pid, ua_sess->handle, ret);
1104 } else {
1105 DBG3("UST app disable event failed. Application is dead.");
1106 }
1107 goto error;
1108 }
1109
1110 DBG2("UST app event %s disabled successfully for app (pid: %d)",
1111 ua_event->attr.name, app->pid);
1112
1113 error:
1114 health_code_update();
1115 return ret;
1116 }
1117
1118 /*
1119 * Disable the specified channel on to UST tracer for the UST session.
1120 */
1121 static int disable_ust_channel(struct ust_app *app,
1122 struct ust_app_session *ua_sess, struct ust_app_channel *ua_chan)
1123 {
1124 int ret;
1125
1126 health_code_update();
1127
1128 ret = ustctl_disable(app->sock, ua_chan->obj);
1129 if (ret < 0) {
1130 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
1131 ERR("UST app channel %s disable failed for app (pid: %d) "
1132 "and session handle %d with ret %d",
1133 ua_chan->name, app->pid, ua_sess->handle, ret);
1134 } else {
1135 DBG3("UST app disable channel failed. Application is dead.");
1136 }
1137 goto error;
1138 }
1139
1140 DBG2("UST app channel %s disabled successfully for app (pid: %d)",
1141 ua_chan->name, app->pid);
1142
1143 error:
1144 health_code_update();
1145 return ret;
1146 }
1147
1148 /*
1149 * Enable the specified channel on to UST tracer for the UST session.
1150 */
1151 static int enable_ust_channel(struct ust_app *app,
1152 struct ust_app_session *ua_sess, struct ust_app_channel *ua_chan)
1153 {
1154 int ret;
1155
1156 health_code_update();
1157
1158 ret = ustctl_enable(app->sock, ua_chan->obj);
1159 if (ret < 0) {
1160 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
1161 ERR("UST app channel %s enable failed for app (pid: %d) "
1162 "and session handle %d with ret %d",
1163 ua_chan->name, app->pid, ua_sess->handle, ret);
1164 } else {
1165 DBG3("UST app enable channel failed. Application is dead.");
1166 }
1167 goto error;
1168 }
1169
1170 ua_chan->enabled = 1;
1171
1172 DBG2("UST app channel %s enabled successfully for app (pid: %d)",
1173 ua_chan->name, app->pid);
1174
1175 error:
1176 health_code_update();
1177 return ret;
1178 }
1179
1180 /*
1181 * Enable the specified event on to UST tracer for the UST session.
1182 */
1183 static int enable_ust_event(struct ust_app *app,
1184 struct ust_app_session *ua_sess, struct ust_app_event *ua_event)
1185 {
1186 int ret;
1187
1188 health_code_update();
1189
1190 ret = ustctl_enable(app->sock, ua_event->obj);
1191 if (ret < 0) {
1192 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
1193 ERR("UST app event %s enable failed for app (pid: %d) "
1194 "and session handle %d with ret %d",
1195 ua_event->attr.name, app->pid, ua_sess->handle, ret);
1196 } else {
1197 DBG3("UST app enable event failed. Application is dead.");
1198 }
1199 goto error;
1200 }
1201
1202 DBG2("UST app event %s enabled successfully for app (pid: %d)",
1203 ua_event->attr.name, app->pid);
1204
1205 error:
1206 health_code_update();
1207 return ret;
1208 }
1209
1210 /*
1211 * Send channel and stream buffer to application.
1212 *
1213 * Return 0 on success. On error, a negative value is returned.
1214 */
1215 static int send_channel_pid_to_ust(struct ust_app *app,
1216 struct ust_app_session *ua_sess, struct ust_app_channel *ua_chan)
1217 {
1218 int ret;
1219 struct ust_app_stream *stream, *stmp;
1220
1221 assert(app);
1222 assert(ua_sess);
1223 assert(ua_chan);
1224
1225 health_code_update();
1226
1227 DBG("UST app sending channel %s to UST app sock %d", ua_chan->name,
1228 app->sock);
1229
1230 /* Send channel to the application. */
1231 ret = ust_consumer_send_channel_to_ust(app, ua_sess, ua_chan);
1232 if (ret < 0) {
1233 goto error;
1234 }
1235
1236 health_code_update();
1237
1238 /* Send all streams to application. */
1239 cds_list_for_each_entry_safe(stream, stmp, &ua_chan->streams.head, list) {
1240 ret = ust_consumer_send_stream_to_ust(app, ua_chan, stream);
1241 if (ret < 0) {
1242 goto error;
1243 }
1244 /* We don't need the stream anymore once sent to the tracer. */
1245 cds_list_del(&stream->list);
1246 delete_ust_app_stream(-1, stream);
1247 }
1248 /* Flag the channel that it is sent to the application. */
1249 ua_chan->is_sent = 1;
1250
1251 error:
1252 health_code_update();
1253 return ret;
1254 }
1255
1256 /*
1257 * Create the specified event onto the UST tracer for a UST session.
1258 *
1259 * Should be called with session mutex held.
1260 */
1261 static
1262 int create_ust_event(struct ust_app *app, struct ust_app_session *ua_sess,
1263 struct ust_app_channel *ua_chan, struct ust_app_event *ua_event)
1264 {
1265 int ret = 0;
1266
1267 health_code_update();
1268
1269 /* Create UST event on tracer */
1270 ret = ustctl_create_event(app->sock, &ua_event->attr, ua_chan->obj,
1271 &ua_event->obj);
1272 if (ret < 0) {
1273 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
1274 ERR("Error ustctl create event %s for app pid: %d with ret %d",
1275 ua_event->attr.name, app->pid, ret);
1276 } else {
1277 DBG3("UST app create event failed. Application is dead.");
1278 }
1279 goto error;
1280 }
1281
1282 ua_event->handle = ua_event->obj->handle;
1283
1284 DBG2("UST app event %s created successfully for pid:%d",
1285 ua_event->attr.name, app->pid);
1286
1287 health_code_update();
1288
1289 /* Set filter if one is present. */
1290 if (ua_event->filter) {
1291 ret = set_ust_event_filter(ua_event, app);
1292 if (ret < 0) {
1293 goto error;
1294 }
1295 }
1296
1297 /* If event not enabled, disable it on the tracer */
1298 if (ua_event->enabled == 0) {
1299 ret = disable_ust_event(app, ua_sess, ua_event);
1300 if (ret < 0) {
1301 /*
1302 * If we hit an EPERM, something is wrong with our disable call. If
1303 * we get an EEXIST, there is a problem on the tracer side since we
1304 * just created it.
1305 */
1306 switch (ret) {
1307 case -LTTNG_UST_ERR_PERM:
1308 /* Code flow problem */
1309 assert(0);
1310 case -LTTNG_UST_ERR_EXIST:
1311 /* It's OK for our use case. */
1312 ret = 0;
1313 break;
1314 default:
1315 break;
1316 }
1317 goto error;
1318 }
1319 }
1320
1321 error:
1322 health_code_update();
1323 return ret;
1324 }
1325
1326 /*
1327 * Copy data between an UST app event and a LTT event.
1328 */
1329 static void shadow_copy_event(struct ust_app_event *ua_event,
1330 struct ltt_ust_event *uevent)
1331 {
1332 strncpy(ua_event->name, uevent->attr.name, sizeof(ua_event->name));
1333 ua_event->name[sizeof(ua_event->name) - 1] = '\0';
1334
1335 ua_event->enabled = uevent->enabled;
1336
1337 /* Copy event attributes */
1338 memcpy(&ua_event->attr, &uevent->attr, sizeof(ua_event->attr));
1339
1340 /* Copy filter bytecode */
1341 if (uevent->filter) {
1342 ua_event->filter = alloc_copy_ust_app_filter(uevent->filter);
1343 /* Filter might be NULL here in case of ENONEM. */
1344 }
1345 }
1346
1347 /*
1348 * Copy data between an UST app channel and a LTT channel.
1349 */
1350 static void shadow_copy_channel(struct ust_app_channel *ua_chan,
1351 struct ltt_ust_channel *uchan)
1352 {
1353 struct lttng_ht_iter iter;
1354 struct ltt_ust_event *uevent;
1355 struct ltt_ust_context *uctx;
1356 struct ust_app_event *ua_event;
1357 struct ust_app_ctx *ua_ctx;
1358
1359 DBG2("UST app shadow copy of channel %s started", ua_chan->name);
1360
1361 strncpy(ua_chan->name, uchan->name, sizeof(ua_chan->name));
1362 ua_chan->name[sizeof(ua_chan->name) - 1] = '\0';
1363
1364 ua_chan->tracefile_size = uchan->tracefile_size;
1365 ua_chan->tracefile_count = uchan->tracefile_count;
1366
1367 /* Copy event attributes since the layout is different. */
1368 ua_chan->attr.subbuf_size = uchan->attr.subbuf_size;
1369 ua_chan->attr.num_subbuf = uchan->attr.num_subbuf;
1370 ua_chan->attr.overwrite = uchan->attr.overwrite;
1371 ua_chan->attr.switch_timer_interval = uchan->attr.switch_timer_interval;
1372 ua_chan->attr.read_timer_interval = uchan->attr.read_timer_interval;
1373 ua_chan->attr.output = uchan->attr.output;
1374 /*
1375 * Note that the attribute channel type is not set since the channel on the
1376 * tracing registry side does not have this information.
1377 */
1378
1379 ua_chan->enabled = uchan->enabled;
1380 ua_chan->tracing_channel_id = uchan->id;
1381
1382 cds_lfht_for_each_entry(uchan->ctx->ht, &iter.iter, uctx, node.node) {
1383 ua_ctx = alloc_ust_app_ctx(&uctx->ctx);
1384 if (ua_ctx == NULL) {
1385 continue;
1386 }
1387 lttng_ht_node_init_ulong(&ua_ctx->node,
1388 (unsigned long) ua_ctx->ctx.ctx);
1389 lttng_ht_add_unique_ulong(ua_chan->ctx, &ua_ctx->node);
1390 }
1391
1392 /* Copy all events from ltt ust channel to ust app channel */
1393 cds_lfht_for_each_entry(uchan->events->ht, &iter.iter, uevent, node.node) {
1394 ua_event = find_ust_app_event(ua_chan->events, uevent->attr.name,
1395 uevent->filter, uevent->attr.loglevel);
1396 if (ua_event == NULL) {
1397 DBG2("UST event %s not found on shadow copy channel",
1398 uevent->attr.name);
1399 ua_event = alloc_ust_app_event(uevent->attr.name, &uevent->attr);
1400 if (ua_event == NULL) {
1401 continue;
1402 }
1403 shadow_copy_event(ua_event, uevent);
1404 add_unique_ust_app_event(ua_chan, ua_event);
1405 }
1406 }
1407
1408 DBG3("UST app shadow copy of channel %s done", ua_chan->name);
1409 }
1410
1411 /*
1412 * Copy data between a UST app session and a regular LTT session.
1413 */
1414 static void shadow_copy_session(struct ust_app_session *ua_sess,
1415 struct ltt_ust_session *usess, struct ust_app *app)
1416 {
1417 struct lttng_ht_node_str *ua_chan_node;
1418 struct lttng_ht_iter iter;
1419 struct ltt_ust_channel *uchan;
1420 struct ust_app_channel *ua_chan;
1421 time_t rawtime;
1422 struct tm *timeinfo;
1423 char datetime[16];
1424 int ret;
1425
1426 /* Get date and time for unique app path */
1427 time(&rawtime);
1428 timeinfo = localtime(&rawtime);
1429 strftime(datetime, sizeof(datetime), "%Y%m%d-%H%M%S", timeinfo);
1430
1431 DBG2("Shadow copy of session handle %d", ua_sess->handle);
1432
1433 ua_sess->tracing_id = usess->id;
1434 ua_sess->id = get_next_session_id();
1435 ua_sess->uid = app->uid;
1436 ua_sess->gid = app->gid;
1437 ua_sess->euid = usess->uid;
1438 ua_sess->egid = usess->gid;
1439 ua_sess->buffer_type = usess->buffer_type;
1440 ua_sess->bits_per_long = app->bits_per_long;
1441 /* There is only one consumer object per session possible. */
1442 ua_sess->consumer = usess->consumer;
1443
1444 switch (ua_sess->buffer_type) {
1445 case LTTNG_BUFFER_PER_PID:
1446 ret = snprintf(ua_sess->path, sizeof(ua_sess->path),
1447 DEFAULT_UST_TRACE_PID_PATH "/%s-%d-%s", app->name, app->pid,
1448 datetime);
1449 break;
1450 case LTTNG_BUFFER_PER_UID:
1451 ret = snprintf(ua_sess->path, sizeof(ua_sess->path),
1452 DEFAULT_UST_TRACE_UID_PATH, ua_sess->uid, app->bits_per_long);
1453 break;
1454 default:
1455 assert(0);
1456 goto error;
1457 }
1458 if (ret < 0) {
1459 PERROR("asprintf UST shadow copy session");
1460 assert(0);
1461 goto error;
1462 }
1463
1464 /* Iterate over all channels in global domain. */
1465 cds_lfht_for_each_entry(usess->domain_global.channels->ht, &iter.iter,
1466 uchan, node.node) {
1467 struct lttng_ht_iter uiter;
1468
1469 lttng_ht_lookup(ua_sess->channels, (void *)uchan->name, &uiter);
1470 ua_chan_node = lttng_ht_iter_get_node_str(&uiter);
1471 if (ua_chan_node != NULL) {
1472 /* Session exist. Contiuing. */
1473 continue;
1474 }
1475
1476 DBG2("Channel %s not found on shadow session copy, creating it",
1477 uchan->name);
1478 ua_chan = alloc_ust_app_channel(uchan->name, ua_sess, &uchan->attr);
1479 if (ua_chan == NULL) {
1480 /* malloc failed FIXME: Might want to do handle ENOMEM .. */
1481 continue;
1482 }
1483 shadow_copy_channel(ua_chan, uchan);
1484 /*
1485 * The concept of metadata channel does not exist on the tracing
1486 * registry side of the session daemon so this can only be a per CPU
1487 * channel and not metadata.
1488 */
1489 ua_chan->attr.type = LTTNG_UST_CHAN_PER_CPU;
1490
1491 lttng_ht_add_unique_str(ua_sess->channels, &ua_chan->node);
1492 }
1493
1494 error:
1495 return;
1496 }
1497
1498 /*
1499 * Lookup sesison wrapper.
1500 */
1501 static
1502 void __lookup_session_by_app(struct ltt_ust_session *usess,
1503 struct ust_app *app, struct lttng_ht_iter *iter)
1504 {
1505 /* Get right UST app session from app */
1506 lttng_ht_lookup(app->sessions, (void *)((unsigned long) usess->id), iter);
1507 }
1508
1509 /*
1510 * Return ust app session from the app session hashtable using the UST session
1511 * id.
1512 */
1513 static struct ust_app_session *lookup_session_by_app(
1514 struct ltt_ust_session *usess, struct ust_app *app)
1515 {
1516 struct lttng_ht_iter iter;
1517 struct lttng_ht_node_ulong *node;
1518
1519 __lookup_session_by_app(usess, app, &iter);
1520 node = lttng_ht_iter_get_node_ulong(&iter);
1521 if (node == NULL) {
1522 goto error;
1523 }
1524
1525 return caa_container_of(node, struct ust_app_session, node);
1526
1527 error:
1528 return NULL;
1529 }
1530
1531 /*
1532 * Setup buffer registry per PID for the given session and application. If none
1533 * is found, a new one is created, added to the global registry and
1534 * initialized. If regp is valid, it's set with the newly created object.
1535 *
1536 * Return 0 on success or else a negative value.
1537 */
1538 static int setup_buffer_reg_pid(struct ust_app_session *ua_sess,
1539 struct ust_app *app, struct buffer_reg_pid **regp)
1540 {
1541 int ret = 0;
1542 struct buffer_reg_pid *reg_pid;
1543
1544 assert(ua_sess);
1545 assert(app);
1546
1547 rcu_read_lock();
1548
1549 reg_pid = buffer_reg_pid_find(ua_sess->id);
1550 if (!reg_pid) {
1551 /*
1552 * This is the create channel path meaning that if there is NO
1553 * registry available, we have to create one for this session.
1554 */
1555 ret = buffer_reg_pid_create(ua_sess->id, &reg_pid);
1556 if (ret < 0) {
1557 goto error;
1558 }
1559 buffer_reg_pid_add(reg_pid);
1560 } else {
1561 goto end;
1562 }
1563
1564 /* Initialize registry. */
1565 ret = ust_registry_session_init(&reg_pid->registry->reg.ust, app,
1566 app->bits_per_long, app->uint8_t_alignment,
1567 app->uint16_t_alignment, app->uint32_t_alignment,
1568 app->uint64_t_alignment, app->long_alignment,
1569 app->byte_order, app->version.major,
1570 app->version.minor);
1571 if (ret < 0) {
1572 goto error;
1573 }
1574
1575 DBG3("UST app buffer registry per PID created successfully");
1576
1577 end:
1578 if (regp) {
1579 *regp = reg_pid;
1580 }
1581 error:
1582 rcu_read_unlock();
1583 return ret;
1584 }
1585
1586 /*
1587 * Setup buffer registry per UID for the given session and application. If none
1588 * is found, a new one is created, added to the global registry and
1589 * initialized. If regp is valid, it's set with the newly created object.
1590 *
1591 * Return 0 on success or else a negative value.
1592 */
1593 static int setup_buffer_reg_uid(struct ltt_ust_session *usess,
1594 struct ust_app *app, struct buffer_reg_uid **regp)
1595 {
1596 int ret = 0;
1597 struct buffer_reg_uid *reg_uid;
1598
1599 assert(usess);
1600 assert(app);
1601
1602 rcu_read_lock();
1603
1604 reg_uid = buffer_reg_uid_find(usess->id, app->bits_per_long, app->uid);
1605 if (!reg_uid) {
1606 /*
1607 * This is the create channel path meaning that if there is NO
1608 * registry available, we have to create one for this session.
1609 */
1610 ret = buffer_reg_uid_create(usess->id, app->bits_per_long, app->uid,
1611 LTTNG_DOMAIN_UST, &reg_uid);
1612 if (ret < 0) {
1613 goto error;
1614 }
1615 buffer_reg_uid_add(reg_uid);
1616 } else {
1617 goto end;
1618 }
1619
1620 /* Initialize registry. */
1621 ret = ust_registry_session_init(&reg_uid->registry->reg.ust, NULL,
1622 app->bits_per_long, app->uint8_t_alignment,
1623 app->uint16_t_alignment, app->uint32_t_alignment,
1624 app->uint64_t_alignment, app->long_alignment,
1625 app->byte_order, app->version.major,
1626 app->version.minor);
1627 if (ret < 0) {
1628 goto error;
1629 }
1630 /* Add node to teardown list of the session. */
1631 cds_list_add(&reg_uid->lnode, &usess->buffer_reg_uid_list);
1632
1633 DBG3("UST app buffer registry per UID created successfully");
1634
1635 end:
1636 if (regp) {
1637 *regp = reg_uid;
1638 }
1639 error:
1640 rcu_read_unlock();
1641 return ret;
1642 }
1643
1644 /*
1645 * Create a session on the tracer side for the given app.
1646 *
1647 * On success, ua_sess_ptr is populated with the session pointer or else left
1648 * untouched. If the session was created, is_created is set to 1. On error,
1649 * it's left untouched. Note that ua_sess_ptr is mandatory but is_created can
1650 * be NULL.
1651 *
1652 * Returns 0 on success or else a negative code which is either -ENOMEM or
1653 * -ENOTCONN which is the default code if the ustctl_create_session fails.
1654 */
1655 static int create_ust_app_session(struct ltt_ust_session *usess,
1656 struct ust_app *app, struct ust_app_session **ua_sess_ptr,
1657 int *is_created)
1658 {
1659 int ret, created = 0;
1660 struct ust_app_session *ua_sess;
1661
1662 assert(usess);
1663 assert(app);
1664 assert(ua_sess_ptr);
1665
1666 health_code_update();
1667
1668 ua_sess = lookup_session_by_app(usess, app);
1669 if (ua_sess == NULL) {
1670 DBG2("UST app pid: %d session id %d not found, creating it",
1671 app->pid, usess->id);
1672 ua_sess = alloc_ust_app_session(app);
1673 if (ua_sess == NULL) {
1674 /* Only malloc can failed so something is really wrong */
1675 ret = -ENOMEM;
1676 goto error;
1677 }
1678 shadow_copy_session(ua_sess, usess, app);
1679 created = 1;
1680 }
1681
1682 switch (usess->buffer_type) {
1683 case LTTNG_BUFFER_PER_PID:
1684 /* Init local registry. */
1685 ret = setup_buffer_reg_pid(ua_sess, app, NULL);
1686 if (ret < 0) {
1687 goto error;
1688 }
1689 break;
1690 case LTTNG_BUFFER_PER_UID:
1691 /* Look for a global registry. If none exists, create one. */
1692 ret = setup_buffer_reg_uid(usess, app, NULL);
1693 if (ret < 0) {
1694 goto error;
1695 }
1696 break;
1697 default:
1698 assert(0);
1699 ret = -EINVAL;
1700 goto error;
1701 }
1702
1703 health_code_update();
1704
1705 if (ua_sess->handle == -1) {
1706 ret = ustctl_create_session(app->sock);
1707 if (ret < 0) {
1708 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
1709 ERR("Creating session for app pid %d with ret %d",
1710 app->pid, ret);
1711 } else {
1712 DBG("UST app creating session failed. Application is dead");
1713 }
1714 delete_ust_app_session(-1, ua_sess, app);
1715 if (ret != -ENOMEM) {
1716 /*
1717 * Tracer is probably gone or got an internal error so let's
1718 * behave like it will soon unregister or not usable.
1719 */
1720 ret = -ENOTCONN;
1721 }
1722 goto error;
1723 }
1724
1725 ua_sess->handle = ret;
1726
1727 /* Add ust app session to app's HT */
1728 lttng_ht_node_init_ulong(&ua_sess->node,
1729 (unsigned long) ua_sess->tracing_id);
1730 lttng_ht_add_unique_ulong(app->sessions, &ua_sess->node);
1731
1732 DBG2("UST app session created successfully with handle %d", ret);
1733 }
1734
1735 *ua_sess_ptr = ua_sess;
1736 if (is_created) {
1737 *is_created = created;
1738 }
1739
1740 /* Everything went well. */
1741 ret = 0;
1742
1743 error:
1744 health_code_update();
1745 return ret;
1746 }
1747
1748 /*
1749 * Create a context for the channel on the tracer.
1750 *
1751 * Called with UST app session lock held and a RCU read side lock.
1752 */
1753 static
1754 int create_ust_app_channel_context(struct ust_app_session *ua_sess,
1755 struct ust_app_channel *ua_chan, struct lttng_ust_context *uctx,
1756 struct ust_app *app)
1757 {
1758 int ret = 0;
1759 struct lttng_ht_iter iter;
1760 struct lttng_ht_node_ulong *node;
1761 struct ust_app_ctx *ua_ctx;
1762
1763 DBG2("UST app adding context to channel %s", ua_chan->name);
1764
1765 lttng_ht_lookup(ua_chan->ctx, (void *)((unsigned long)uctx->ctx), &iter);
1766 node = lttng_ht_iter_get_node_ulong(&iter);
1767 if (node != NULL) {
1768 ret = -EEXIST;
1769 goto error;
1770 }
1771
1772 ua_ctx = alloc_ust_app_ctx(uctx);
1773 if (ua_ctx == NULL) {
1774 /* malloc failed */
1775 ret = -1;
1776 goto error;
1777 }
1778
1779 lttng_ht_node_init_ulong(&ua_ctx->node, (unsigned long) ua_ctx->ctx.ctx);
1780 lttng_ht_add_unique_ulong(ua_chan->ctx, &ua_ctx->node);
1781
1782 ret = create_ust_channel_context(ua_chan, ua_ctx, app);
1783 if (ret < 0) {
1784 goto error;
1785 }
1786
1787 error:
1788 return ret;
1789 }
1790
1791 /*
1792 * Enable on the tracer side a ust app event for the session and channel.
1793 *
1794 * Called with UST app session lock held.
1795 */
1796 static
1797 int enable_ust_app_event(struct ust_app_session *ua_sess,
1798 struct ust_app_event *ua_event, struct ust_app *app)
1799 {
1800 int ret;
1801
1802 ret = enable_ust_event(app, ua_sess, ua_event);
1803 if (ret < 0) {
1804 goto error;
1805 }
1806
1807 ua_event->enabled = 1;
1808
1809 error:
1810 return ret;
1811 }
1812
1813 /*
1814 * Disable on the tracer side a ust app event for the session and channel.
1815 */
1816 static int disable_ust_app_event(struct ust_app_session *ua_sess,
1817 struct ust_app_event *ua_event, struct ust_app *app)
1818 {
1819 int ret;
1820
1821 ret = disable_ust_event(app, ua_sess, ua_event);
1822 if (ret < 0) {
1823 goto error;
1824 }
1825
1826 ua_event->enabled = 0;
1827
1828 error:
1829 return ret;
1830 }
1831
1832 /*
1833 * Lookup ust app channel for session and disable it on the tracer side.
1834 */
1835 static
1836 int disable_ust_app_channel(struct ust_app_session *ua_sess,
1837 struct ust_app_channel *ua_chan, struct ust_app *app)
1838 {
1839 int ret;
1840
1841 ret = disable_ust_channel(app, ua_sess, ua_chan);
1842 if (ret < 0) {
1843 goto error;
1844 }
1845
1846 ua_chan->enabled = 0;
1847
1848 error:
1849 return ret;
1850 }
1851
1852 /*
1853 * Lookup ust app channel for session and enable it on the tracer side. This
1854 * MUST be called with a RCU read side lock acquired.
1855 */
1856 static int enable_ust_app_channel(struct ust_app_session *ua_sess,
1857 struct ltt_ust_channel *uchan, struct ust_app *app)
1858 {
1859 int ret = 0;
1860 struct lttng_ht_iter iter;
1861 struct lttng_ht_node_str *ua_chan_node;
1862 struct ust_app_channel *ua_chan;
1863
1864 lttng_ht_lookup(ua_sess->channels, (void *)uchan->name, &iter);
1865 ua_chan_node = lttng_ht_iter_get_node_str(&iter);
1866 if (ua_chan_node == NULL) {
1867 DBG2("Unable to find channel %s in ust session id %u",
1868 uchan->name, ua_sess->tracing_id);
1869 goto error;
1870 }
1871
1872 ua_chan = caa_container_of(ua_chan_node, struct ust_app_channel, node);
1873
1874 ret = enable_ust_channel(app, ua_sess, ua_chan);
1875 if (ret < 0) {
1876 goto error;
1877 }
1878
1879 error:
1880 return ret;
1881 }
1882
1883 /*
1884 * Ask the consumer to create a channel and get it if successful.
1885 *
1886 * Return 0 on success or else a negative value.
1887 */
1888 static int do_consumer_create_channel(struct ltt_ust_session *usess,
1889 struct ust_app_session *ua_sess, struct ust_app_channel *ua_chan,
1890 int bitness, struct ust_registry_session *registry)
1891 {
1892 int ret;
1893 unsigned int nb_fd = 0;
1894 struct consumer_socket *socket;
1895
1896 assert(usess);
1897 assert(ua_sess);
1898 assert(ua_chan);
1899 assert(registry);
1900
1901 rcu_read_lock();
1902 health_code_update();
1903
1904 /* Get the right consumer socket for the application. */
1905 socket = consumer_find_socket_by_bitness(bitness, usess->consumer);
1906 if (!socket) {
1907 ret = -EINVAL;
1908 goto error;
1909 }
1910
1911 health_code_update();
1912
1913 /* Need one fd for the channel. */
1914 ret = lttng_fd_get(LTTNG_FD_APPS, 1);
1915 if (ret < 0) {
1916 ERR("Exhausted number of available FD upon create channel");
1917 goto error;
1918 }
1919
1920 /*
1921 * Ask consumer to create channel. The consumer will return the number of
1922 * stream we have to expect.
1923 */
1924 ret = ust_consumer_ask_channel(ua_sess, ua_chan, usess->consumer, socket,
1925 registry);
1926 if (ret < 0) {
1927 goto error_ask;
1928 }
1929
1930 /*
1931 * Compute the number of fd needed before receiving them. It must be 2 per
1932 * stream (2 being the default value here).
1933 */
1934 nb_fd = DEFAULT_UST_STREAM_FD_NUM * ua_chan->expected_stream_count;
1935
1936 /* Reserve the amount of file descriptor we need. */
1937 ret = lttng_fd_get(LTTNG_FD_APPS, nb_fd);
1938 if (ret < 0) {
1939 ERR("Exhausted number of available FD upon create channel");
1940 goto error_fd_get_stream;
1941 }
1942
1943 health_code_update();
1944
1945 /*
1946 * Now get the channel from the consumer. This call wil populate the stream
1947 * list of that channel and set the ust objects.
1948 */
1949 ret = ust_consumer_get_channel(socket, ua_chan);
1950 if (ret < 0) {
1951 goto error_destroy;
1952 }
1953
1954 rcu_read_unlock();
1955 return 0;
1956
1957 error_destroy:
1958 lttng_fd_put(LTTNG_FD_APPS, nb_fd);
1959 error_fd_get_stream:
1960 /*
1961 * Initiate a destroy channel on the consumer since we had an error
1962 * handling it on our side. The return value is of no importance since we
1963 * already have a ret value set by the previous error that we need to
1964 * return.
1965 */
1966 (void) ust_consumer_destroy_channel(socket, ua_chan);
1967 error_ask:
1968 lttng_fd_put(LTTNG_FD_APPS, 1);
1969 error:
1970 health_code_update();
1971 rcu_read_unlock();
1972 return ret;
1973 }
1974
1975 /*
1976 * Duplicate the ust data object of the ust app stream and save it in the
1977 * buffer registry stream.
1978 *
1979 * Return 0 on success or else a negative value.
1980 */
1981 static int duplicate_stream_object(struct buffer_reg_stream *reg_stream,
1982 struct ust_app_stream *stream)
1983 {
1984 int ret;
1985
1986 assert(reg_stream);
1987 assert(stream);
1988
1989 /* Reserve the amount of file descriptor we need. */
1990 ret = lttng_fd_get(LTTNG_FD_APPS, 2);
1991 if (ret < 0) {
1992 ERR("Exhausted number of available FD upon duplicate stream");
1993 goto error;
1994 }
1995
1996 /* Duplicate object for stream once the original is in the registry. */
1997 ret = ustctl_duplicate_ust_object_data(&stream->obj,
1998 reg_stream->obj.ust);
1999 if (ret < 0) {
2000 ERR("Duplicate stream obj from %p to %p failed with ret %d",
2001 reg_stream->obj.ust, stream->obj, ret);
2002 lttng_fd_put(LTTNG_FD_APPS, 2);
2003 goto error;
2004 }
2005 stream->handle = stream->obj->handle;
2006
2007 error:
2008 return ret;
2009 }
2010
2011 /*
2012 * Duplicate the ust data object of the ust app. channel and save it in the
2013 * buffer registry channel.
2014 *
2015 * Return 0 on success or else a negative value.
2016 */
2017 static int duplicate_channel_object(struct buffer_reg_channel *reg_chan,
2018 struct ust_app_channel *ua_chan)
2019 {
2020 int ret;
2021
2022 assert(reg_chan);
2023 assert(ua_chan);
2024
2025 /* Need two fds for the channel. */
2026 ret = lttng_fd_get(LTTNG_FD_APPS, 1);
2027 if (ret < 0) {
2028 ERR("Exhausted number of available FD upon duplicate channel");
2029 goto error_fd_get;
2030 }
2031
2032 /* Duplicate object for stream once the original is in the registry. */
2033 ret = ustctl_duplicate_ust_object_data(&ua_chan->obj, reg_chan->obj.ust);
2034 if (ret < 0) {
2035 ERR("Duplicate channel obj from %p to %p failed with ret: %d",
2036 reg_chan->obj.ust, ua_chan->obj, ret);
2037 goto error;
2038 }
2039 ua_chan->handle = ua_chan->obj->handle;
2040
2041 return 0;
2042
2043 error:
2044 lttng_fd_put(LTTNG_FD_APPS, 1);
2045 error_fd_get:
2046 return ret;
2047 }
2048
2049 /*
2050 * For a given channel buffer registry, setup all streams of the given ust
2051 * application channel.
2052 *
2053 * Return 0 on success or else a negative value.
2054 */
2055 static int setup_buffer_reg_streams(struct buffer_reg_channel *reg_chan,
2056 struct ust_app_channel *ua_chan)
2057 {
2058 int ret = 0;
2059 struct ust_app_stream *stream, *stmp;
2060
2061 assert(reg_chan);
2062 assert(ua_chan);
2063
2064 DBG2("UST app setup buffer registry stream");
2065
2066 /* Send all streams to application. */
2067 cds_list_for_each_entry_safe(stream, stmp, &ua_chan->streams.head, list) {
2068 struct buffer_reg_stream *reg_stream;
2069
2070 ret = buffer_reg_stream_create(&reg_stream);
2071 if (ret < 0) {
2072 goto error;
2073 }
2074
2075 /*
2076 * Keep original pointer and nullify it in the stream so the delete
2077 * stream call does not release the object.
2078 */
2079 reg_stream->obj.ust = stream->obj;
2080 stream->obj = NULL;
2081 buffer_reg_stream_add(reg_stream, reg_chan);
2082
2083 /* We don't need the streams anymore. */
2084 cds_list_del(&stream->list);
2085 delete_ust_app_stream(-1, stream);
2086 }
2087
2088 error:
2089 return ret;
2090 }
2091
2092 /*
2093 * Create a buffer registry channel for the given session registry and
2094 * application channel object. If regp pointer is valid, it's set with the
2095 * created object. Important, the created object is NOT added to the session
2096 * registry hash table.
2097 *
2098 * Return 0 on success else a negative value.
2099 */
2100 static int create_buffer_reg_channel(struct buffer_reg_session *reg_sess,
2101 struct ust_app_channel *ua_chan, struct buffer_reg_channel **regp)
2102 {
2103 int ret;
2104 struct buffer_reg_channel *reg_chan = NULL;
2105
2106 assert(reg_sess);
2107 assert(ua_chan);
2108
2109 DBG2("UST app creating buffer registry channel for %s", ua_chan->name);
2110
2111 /* Create buffer registry channel. */
2112 ret = buffer_reg_channel_create(ua_chan->tracing_channel_id, &reg_chan);
2113 if (ret < 0) {
2114 goto error_create;
2115 }
2116 assert(reg_chan);
2117 reg_chan->consumer_key = ua_chan->key;
2118
2119 /* Create and add a channel registry to session. */
2120 ret = ust_registry_channel_add(reg_sess->reg.ust,
2121 ua_chan->tracing_channel_id);
2122 if (ret < 0) {
2123 goto error;
2124 }
2125 buffer_reg_channel_add(reg_sess, reg_chan);
2126
2127 if (regp) {
2128 *regp = reg_chan;
2129 }
2130
2131 return 0;
2132
2133 error:
2134 /* Safe because the registry channel object was not added to any HT. */
2135 buffer_reg_channel_destroy(reg_chan, LTTNG_DOMAIN_UST);
2136 error_create:
2137 return ret;
2138 }
2139
2140 /*
2141 * Setup buffer registry channel for the given session registry and application
2142 * channel object. If regp pointer is valid, it's set with the created object.
2143 *
2144 * Return 0 on success else a negative value.
2145 */
2146 static int setup_buffer_reg_channel(struct buffer_reg_session *reg_sess,
2147 struct ust_app_channel *ua_chan, struct buffer_reg_channel *reg_chan)
2148 {
2149 int ret;
2150
2151 assert(reg_sess);
2152 assert(reg_chan);
2153 assert(ua_chan);
2154 assert(ua_chan->obj);
2155
2156 DBG2("UST app setup buffer registry channel for %s", ua_chan->name);
2157
2158 /* Setup all streams for the registry. */
2159 ret = setup_buffer_reg_streams(reg_chan, ua_chan);
2160 if (ret < 0) {
2161 goto error;
2162 }
2163
2164 reg_chan->obj.ust = ua_chan->obj;
2165 ua_chan->obj = NULL;
2166
2167 return 0;
2168
2169 error:
2170 buffer_reg_channel_remove(reg_sess, reg_chan);
2171 buffer_reg_channel_destroy(reg_chan, LTTNG_DOMAIN_UST);
2172 return ret;
2173 }
2174
2175 /*
2176 * Send buffer registry channel to the application.
2177 *
2178 * Return 0 on success else a negative value.
2179 */
2180 static int send_channel_uid_to_ust(struct buffer_reg_channel *reg_chan,
2181 struct ust_app *app, struct ust_app_session *ua_sess,
2182 struct ust_app_channel *ua_chan)
2183 {
2184 int ret;
2185 struct buffer_reg_stream *reg_stream;
2186
2187 assert(reg_chan);
2188 assert(app);
2189 assert(ua_sess);
2190 assert(ua_chan);
2191
2192 DBG("UST app sending buffer registry channel to ust sock %d", app->sock);
2193
2194 ret = duplicate_channel_object(reg_chan, ua_chan);
2195 if (ret < 0) {
2196 goto error;
2197 }
2198
2199 /* Send channel to the application. */
2200 ret = ust_consumer_send_channel_to_ust(app, ua_sess, ua_chan);
2201 if (ret < 0) {
2202 goto error;
2203 }
2204
2205 health_code_update();
2206
2207 /* Send all streams to application. */
2208 pthread_mutex_lock(&reg_chan->stream_list_lock);
2209 cds_list_for_each_entry(reg_stream, &reg_chan->streams, lnode) {
2210 struct ust_app_stream stream;
2211
2212 ret = duplicate_stream_object(reg_stream, &stream);
2213 if (ret < 0) {
2214 goto error_stream_unlock;
2215 }
2216
2217 ret = ust_consumer_send_stream_to_ust(app, ua_chan, &stream);
2218 if (ret < 0) {
2219 (void) release_ust_app_stream(-1, &stream);
2220 goto error_stream_unlock;
2221 }
2222
2223 /*
2224 * The return value is not important here. This function will output an
2225 * error if needed.
2226 */
2227 (void) release_ust_app_stream(-1, &stream);
2228 }
2229 ua_chan->is_sent = 1;
2230
2231 error_stream_unlock:
2232 pthread_mutex_unlock(&reg_chan->stream_list_lock);
2233 error:
2234 return ret;
2235 }
2236
2237 /*
2238 * Create and send to the application the created buffers with per UID buffers.
2239 *
2240 * Return 0 on success else a negative value.
2241 */
2242 static int create_channel_per_uid(struct ust_app *app,
2243 struct ltt_ust_session *usess, struct ust_app_session *ua_sess,
2244 struct ust_app_channel *ua_chan)
2245 {
2246 int ret;
2247 struct buffer_reg_uid *reg_uid;
2248 struct buffer_reg_channel *reg_chan;
2249
2250 assert(app);
2251 assert(usess);
2252 assert(ua_sess);
2253 assert(ua_chan);
2254
2255 DBG("UST app creating channel %s with per UID buffers", ua_chan->name);
2256
2257 reg_uid = buffer_reg_uid_find(usess->id, app->bits_per_long, app->uid);
2258 /*
2259 * The session creation handles the creation of this global registry
2260 * object. If none can be find, there is a code flow problem or a
2261 * teardown race.
2262 */
2263 assert(reg_uid);
2264
2265 reg_chan = buffer_reg_channel_find(ua_chan->tracing_channel_id,
2266 reg_uid);
2267 if (!reg_chan) {
2268 /* Create the buffer registry channel object. */
2269 ret = create_buffer_reg_channel(reg_uid->registry, ua_chan, &reg_chan);
2270 if (ret < 0) {
2271 goto error;
2272 }
2273 assert(reg_chan);
2274
2275 /*
2276 * Create the buffers on the consumer side. This call populates the
2277 * ust app channel object with all streams and data object.
2278 */
2279 ret = do_consumer_create_channel(usess, ua_sess, ua_chan,
2280 app->bits_per_long, reg_uid->registry->reg.ust);
2281 if (ret < 0) {
2282 /*
2283 * Let's remove the previously created buffer registry channel so
2284 * it's not visible anymore in the session registry.
2285 */
2286 ust_registry_channel_del_free(reg_uid->registry->reg.ust,
2287 ua_chan->tracing_channel_id);
2288 buffer_reg_channel_remove(reg_uid->registry, reg_chan);
2289 buffer_reg_channel_destroy(reg_chan, LTTNG_DOMAIN_UST);
2290 goto error;
2291 }
2292
2293 /*
2294 * Setup the streams and add it to the session registry.
2295 */
2296 ret = setup_buffer_reg_channel(reg_uid->registry, ua_chan, reg_chan);
2297 if (ret < 0) {
2298 goto error;
2299 }
2300
2301 }
2302
2303 /* Send buffers to the application. */
2304 ret = send_channel_uid_to_ust(reg_chan, app, ua_sess, ua_chan);
2305 if (ret < 0) {
2306 goto error;
2307 }
2308
2309 error:
2310 return ret;
2311 }
2312
2313 /*
2314 * Create and send to the application the created buffers with per PID buffers.
2315 *
2316 * Return 0 on success else a negative value.
2317 */
2318 static int create_channel_per_pid(struct ust_app *app,
2319 struct ltt_ust_session *usess, struct ust_app_session *ua_sess,
2320 struct ust_app_channel *ua_chan)
2321 {
2322 int ret;
2323 struct ust_registry_session *registry;
2324
2325 assert(app);
2326 assert(usess);
2327 assert(ua_sess);
2328 assert(ua_chan);
2329
2330 DBG("UST app creating channel %s with per PID buffers", ua_chan->name);
2331
2332 rcu_read_lock();
2333
2334 registry = get_session_registry(ua_sess);
2335 assert(registry);
2336
2337 /* Create and add a new channel registry to session. */
2338 ret = ust_registry_channel_add(registry, ua_chan->key);
2339 if (ret < 0) {
2340 goto error;
2341 }
2342
2343 /* Create and get channel on the consumer side. */
2344 ret = do_consumer_create_channel(usess, ua_sess, ua_chan,
2345 app->bits_per_long, registry);
2346 if (ret < 0) {
2347 goto error;
2348 }
2349
2350 ret = send_channel_pid_to_ust(app, ua_sess, ua_chan);
2351 if (ret < 0) {
2352 goto error;
2353 }
2354
2355 error:
2356 rcu_read_unlock();
2357 return ret;
2358 }
2359
2360 /*
2361 * From an already allocated ust app channel, create the channel buffers if
2362 * need and send it to the application. This MUST be called with a RCU read
2363 * side lock acquired.
2364 *
2365 * Return 0 on success or else a negative value.
2366 */
2367 static int do_create_channel(struct ust_app *app,
2368 struct ltt_ust_session *usess, struct ust_app_session *ua_sess,
2369 struct ust_app_channel *ua_chan)
2370 {
2371 int ret;
2372
2373 assert(app);
2374 assert(usess);
2375 assert(ua_sess);
2376 assert(ua_chan);
2377
2378 /* Handle buffer type before sending the channel to the application. */
2379 switch (usess->buffer_type) {
2380 case LTTNG_BUFFER_PER_UID:
2381 {
2382 ret = create_channel_per_uid(app, usess, ua_sess, ua_chan);
2383 if (ret < 0) {
2384 goto error;
2385 }
2386 break;
2387 }
2388 case LTTNG_BUFFER_PER_PID:
2389 {
2390 ret = create_channel_per_pid(app, usess, ua_sess, ua_chan);
2391 if (ret < 0) {
2392 goto error;
2393 }
2394 break;
2395 }
2396 default:
2397 assert(0);
2398 ret = -EINVAL;
2399 goto error;
2400 }
2401
2402 /* Initialize ust objd object using the received handle and add it. */
2403 lttng_ht_node_init_ulong(&ua_chan->ust_objd_node, ua_chan->handle);
2404 lttng_ht_add_unique_ulong(app->ust_objd, &ua_chan->ust_objd_node);
2405
2406 /* If channel is not enabled, disable it on the tracer */
2407 if (!ua_chan->enabled) {
2408 ret = disable_ust_channel(app, ua_sess, ua_chan);
2409 if (ret < 0) {
2410 goto error;
2411 }
2412 }
2413
2414 error:
2415 return ret;
2416 }
2417
2418 /*
2419 * Create UST app channel and create it on the tracer. Set ua_chanp of the
2420 * newly created channel if not NULL.
2421 *
2422 * Called with UST app session lock and RCU read-side lock held.
2423 *
2424 * Return 0 on success or else a negative value.
2425 */
2426 static int create_ust_app_channel(struct ust_app_session *ua_sess,
2427 struct ltt_ust_channel *uchan, struct ust_app *app,
2428 enum lttng_ust_chan_type type, struct ltt_ust_session *usess,
2429 struct ust_app_channel **ua_chanp)
2430 {
2431 int ret = 0;
2432 struct lttng_ht_iter iter;
2433 struct lttng_ht_node_str *ua_chan_node;
2434 struct ust_app_channel *ua_chan;
2435
2436 /* Lookup channel in the ust app session */
2437 lttng_ht_lookup(ua_sess->channels, (void *)uchan->name, &iter);
2438 ua_chan_node = lttng_ht_iter_get_node_str(&iter);
2439 if (ua_chan_node != NULL) {
2440 ua_chan = caa_container_of(ua_chan_node, struct ust_app_channel, node);
2441 goto end;
2442 }
2443
2444 ua_chan = alloc_ust_app_channel(uchan->name, ua_sess, &uchan->attr);
2445 if (ua_chan == NULL) {
2446 /* Only malloc can fail here */
2447 ret = -ENOMEM;
2448 goto error_alloc;
2449 }
2450 shadow_copy_channel(ua_chan, uchan);
2451
2452 /* Set channel type. */
2453 ua_chan->attr.type = type;
2454
2455 ret = do_create_channel(app, usess, ua_sess, ua_chan);
2456 if (ret < 0) {
2457 goto error;
2458 }
2459
2460 DBG2("UST app create channel %s for PID %d completed", ua_chan->name,
2461 app->pid);
2462
2463 /* Only add the channel if successful on the tracer side. */
2464 lttng_ht_add_unique_str(ua_sess->channels, &ua_chan->node);
2465
2466 end:
2467 if (ua_chanp) {
2468 *ua_chanp = ua_chan;
2469 }
2470
2471 /* Everything went well. */
2472 return 0;
2473
2474 error:
2475 delete_ust_app_channel(ua_chan->is_sent ? app->sock : -1, ua_chan, app);
2476 error_alloc:
2477 return ret;
2478 }
2479
2480 /*
2481 * Create UST app event and create it on the tracer side.
2482 *
2483 * Called with ust app session mutex held.
2484 */
2485 static
2486 int create_ust_app_event(struct ust_app_session *ua_sess,
2487 struct ust_app_channel *ua_chan, struct ltt_ust_event *uevent,
2488 struct ust_app *app)
2489 {
2490 int ret = 0;
2491 struct ust_app_event *ua_event;
2492
2493 /* Get event node */
2494 ua_event = find_ust_app_event(ua_chan->events, uevent->attr.name,
2495 uevent->filter, uevent->attr.loglevel);
2496 if (ua_event != NULL) {
2497 ret = -EEXIST;
2498 goto end;
2499 }
2500
2501 /* Does not exist so create one */
2502 ua_event = alloc_ust_app_event(uevent->attr.name, &uevent->attr);
2503 if (ua_event == NULL) {
2504 /* Only malloc can failed so something is really wrong */
2505 ret = -ENOMEM;
2506 goto end;
2507 }
2508 shadow_copy_event(ua_event, uevent);
2509
2510 /* Create it on the tracer side */
2511 ret = create_ust_event(app, ua_sess, ua_chan, ua_event);
2512 if (ret < 0) {
2513 /* Not found previously means that it does not exist on the tracer */
2514 assert(ret != -LTTNG_UST_ERR_EXIST);
2515 goto error;
2516 }
2517
2518 add_unique_ust_app_event(ua_chan, ua_event);
2519
2520 DBG2("UST app create event %s for PID %d completed", ua_event->name,
2521 app->pid);
2522
2523 end:
2524 return ret;
2525
2526 error:
2527 /* Valid. Calling here is already in a read side lock */
2528 delete_ust_app_event(-1, ua_event);
2529 return ret;
2530 }
2531
2532 /*
2533 * Create UST metadata and open it on the tracer side.
2534 *
2535 * Called with UST app session lock held and RCU read side lock.
2536 */
2537 static int create_ust_app_metadata(struct ust_app_session *ua_sess,
2538 struct ust_app *app, struct consumer_output *consumer,
2539 struct ustctl_consumer_channel_attr *attr)
2540 {
2541 int ret = 0;
2542 struct ust_app_channel *metadata;
2543 struct consumer_socket *socket;
2544 struct ust_registry_session *registry;
2545
2546 assert(ua_sess);
2547 assert(app);
2548 assert(consumer);
2549
2550 registry = get_session_registry(ua_sess);
2551 assert(registry);
2552
2553 /* Metadata already exists for this registry or it was closed previously */
2554 if (registry->metadata_key || registry->metadata_closed) {
2555 ret = 0;
2556 goto error;
2557 }
2558
2559 /* Allocate UST metadata */
2560 metadata = alloc_ust_app_channel(DEFAULT_METADATA_NAME, ua_sess, NULL);
2561 if (!metadata) {
2562 /* malloc() failed */
2563 ret = -ENOMEM;
2564 goto error;
2565 }
2566
2567 if (!attr) {
2568 /* Set default attributes for metadata. */
2569 metadata->attr.overwrite = DEFAULT_CHANNEL_OVERWRITE;
2570 metadata->attr.subbuf_size = default_get_metadata_subbuf_size();
2571 metadata->attr.num_subbuf = DEFAULT_METADATA_SUBBUF_NUM;
2572 metadata->attr.switch_timer_interval = DEFAULT_METADATA_SWITCH_TIMER;
2573 metadata->attr.read_timer_interval = DEFAULT_METADATA_READ_TIMER;
2574 metadata->attr.output = LTTNG_UST_MMAP;
2575 metadata->attr.type = LTTNG_UST_CHAN_METADATA;
2576 } else {
2577 memcpy(&metadata->attr, attr, sizeof(metadata->attr));
2578 metadata->attr.output = LTTNG_UST_MMAP;
2579 metadata->attr.type = LTTNG_UST_CHAN_METADATA;
2580 }
2581
2582 /* Need one fd for the channel. */
2583 ret = lttng_fd_get(LTTNG_FD_APPS, 1);
2584 if (ret < 0) {
2585 ERR("Exhausted number of available FD upon create metadata");
2586 goto error;
2587 }
2588
2589 /* Get the right consumer socket for the application. */
2590 socket = consumer_find_socket_by_bitness(app->bits_per_long, consumer);
2591 if (!socket) {
2592 ret = -EINVAL;
2593 goto error_consumer;
2594 }
2595
2596 /*
2597 * Keep metadata key so we can identify it on the consumer side. Assign it
2598 * to the registry *before* we ask the consumer so we avoid the race of the
2599 * consumer requesting the metadata and the ask_channel call on our side
2600 * did not returned yet.
2601 */
2602 registry->metadata_key = metadata->key;
2603
2604 /*
2605 * Ask the metadata channel creation to the consumer. The metadata object
2606 * will be created by the consumer and kept their. However, the stream is
2607 * never added or monitored until we do a first push metadata to the
2608 * consumer.
2609 */
2610 ret = ust_consumer_ask_channel(ua_sess, metadata, consumer, socket,
2611 registry);
2612 if (ret < 0) {
2613 /*
2614 * Safe because the metadata obj pointer is not set so the delete below
2615 * will not put a FD back again.
2616 */
2617 goto error_consumer;
2618 }
2619
2620 /*
2621 * The setup command will make the metadata stream be sent to the relayd,
2622 * if applicable, and the thread managing the metadatas. This is important
2623 * because after this point, if an error occurs, the only way the stream
2624 * can be deleted is to be monitored in the consumer.
2625 */
2626 ret = consumer_setup_metadata(socket, metadata->key);
2627 if (ret < 0) {
2628 /*
2629 * Safe because the metadata obj pointer is not set so the delete below
2630 * will not put a FD back again.
2631 */
2632 goto error_consumer;
2633 }
2634
2635 DBG2("UST metadata with key %" PRIu64 " created for app pid %d",
2636 metadata->key, app->pid);
2637
2638 error_consumer:
2639 lttng_fd_put(LTTNG_FD_APPS, 1);
2640 delete_ust_app_channel(-1, metadata, app);
2641 error:
2642 return ret;
2643 }
2644
2645 /*
2646 * Return pointer to traceable apps list.
2647 */
2648 struct lttng_ht *ust_app_get_ht(void)
2649 {
2650 return ust_app_ht;
2651 }
2652
2653 /*
2654 * Return ust app pointer or NULL if not found. RCU read side lock MUST be
2655 * acquired before calling this function.
2656 */
2657 struct ust_app *ust_app_find_by_pid(pid_t pid)
2658 {
2659 struct ust_app *app = NULL;
2660 struct lttng_ht_node_ulong *node;
2661 struct lttng_ht_iter iter;
2662
2663 lttng_ht_lookup(ust_app_ht, (void *)((unsigned long) pid), &iter);
2664 node = lttng_ht_iter_get_node_ulong(&iter);
2665 if (node == NULL) {
2666 DBG2("UST app no found with pid %d", pid);
2667 goto error;
2668 }
2669
2670 DBG2("Found UST app by pid %d", pid);
2671
2672 app = caa_container_of(node, struct ust_app, pid_n);
2673
2674 error:
2675 return app;
2676 }
2677
2678 /*
2679 * Allocate and init an UST app object using the registration information and
2680 * the command socket. This is called when the command socket connects to the
2681 * session daemon.
2682 *
2683 * The object is returned on success or else NULL.
2684 */
2685 struct ust_app *ust_app_create(struct ust_register_msg *msg, int sock)
2686 {
2687 struct ust_app *lta = NULL;
2688
2689 assert(msg);
2690 assert(sock >= 0);
2691
2692 DBG3("UST app creating application for socket %d", sock);
2693
2694 if ((msg->bits_per_long == 64 &&
2695 (uatomic_read(&ust_consumerd64_fd) == -EINVAL))
2696 || (msg->bits_per_long == 32 &&
2697 (uatomic_read(&ust_consumerd32_fd) == -EINVAL))) {
2698 ERR("Registration failed: application \"%s\" (pid: %d) has "
2699 "%d-bit long, but no consumerd for this size is available.\n",
2700 msg->name, msg->pid, msg->bits_per_long);
2701 goto error;
2702 }
2703
2704 lta = zmalloc(sizeof(struct ust_app));
2705 if (lta == NULL) {
2706 PERROR("malloc");
2707 goto error;
2708 }
2709
2710 lta->ppid = msg->ppid;
2711 lta->uid = msg->uid;
2712 lta->gid = msg->gid;
2713
2714 lta->bits_per_long = msg->bits_per_long;
2715 lta->uint8_t_alignment = msg->uint8_t_alignment;
2716 lta->uint16_t_alignment = msg->uint16_t_alignment;
2717 lta->uint32_t_alignment = msg->uint32_t_alignment;
2718 lta->uint64_t_alignment = msg->uint64_t_alignment;
2719 lta->long_alignment = msg->long_alignment;
2720 lta->byte_order = msg->byte_order;
2721
2722 lta->v_major = msg->major;
2723 lta->v_minor = msg->minor;
2724 lta->sessions = lttng_ht_new(0, LTTNG_HT_TYPE_ULONG);
2725 lta->ust_objd = lttng_ht_new(0, LTTNG_HT_TYPE_ULONG);
2726 lta->notify_sock = -1;
2727
2728 /* Copy name and make sure it's NULL terminated. */
2729 strncpy(lta->name, msg->name, sizeof(lta->name));
2730 lta->name[UST_APP_PROCNAME_LEN] = '\0';
2731
2732 /*
2733 * Before this can be called, when receiving the registration information,
2734 * the application compatibility is checked. So, at this point, the
2735 * application can work with this session daemon.
2736 */
2737 lta->compatible = 1;
2738
2739 lta->pid = msg->pid;
2740 lttng_ht_node_init_ulong(&lta->pid_n, (unsigned long) lta->pid);
2741 lta->sock = sock;
2742 lttng_ht_node_init_ulong(&lta->sock_n, (unsigned long) lta->sock);
2743
2744 CDS_INIT_LIST_HEAD(&lta->teardown_head);
2745
2746 error:
2747 return lta;
2748 }
2749
2750 /*
2751 * For a given application object, add it to every hash table.
2752 */
2753 void ust_app_add(struct ust_app *app)
2754 {
2755 assert(app);
2756 assert(app->notify_sock >= 0);
2757
2758 rcu_read_lock();
2759
2760 /*
2761 * On a re-registration, we want to kick out the previous registration of
2762 * that pid
2763 */
2764 lttng_ht_add_replace_ulong(ust_app_ht, &app->pid_n);
2765
2766 /*
2767 * The socket _should_ be unique until _we_ call close. So, a add_unique
2768 * for the ust_app_ht_by_sock is used which asserts fail if the entry was
2769 * already in the table.
2770 */
2771 lttng_ht_add_unique_ulong(ust_app_ht_by_sock, &app->sock_n);
2772
2773 /* Add application to the notify socket hash table. */
2774 lttng_ht_node_init_ulong(&app->notify_sock_n, app->notify_sock);
2775 lttng_ht_add_unique_ulong(ust_app_ht_by_notify_sock, &app->notify_sock_n);
2776
2777 DBG("App registered with pid:%d ppid:%d uid:%d gid:%d sock:%d name:%s "
2778 "notify_sock:%d (version %d.%d)", app->pid, app->ppid, app->uid,
2779 app->gid, app->sock, app->name, app->notify_sock, app->v_major,
2780 app->v_minor);
2781
2782 rcu_read_unlock();
2783 }
2784
2785 /*
2786 * Set the application version into the object.
2787 *
2788 * Return 0 on success else a negative value either an errno code or a
2789 * LTTng-UST error code.
2790 */
2791 int ust_app_version(struct ust_app *app)
2792 {
2793 int ret;
2794
2795 assert(app);
2796
2797 ret = ustctl_tracer_version(app->sock, &app->version);
2798 if (ret < 0) {
2799 if (ret != -LTTNG_UST_ERR_EXITING && ret != -EPIPE) {
2800 ERR("UST app %d verson failed with ret %d", app->sock, ret);
2801 } else {
2802 DBG3("UST app %d verion failed. Application is dead", app->sock);
2803 }
2804 }
2805
2806 return ret;
2807 }
2808
2809 /*
2810 * Unregister app by removing it from the global traceable app list and freeing
2811 * the data struct.
2812 *
2813 * The socket is already closed at this point so no close to sock.
2814 */
2815 void ust_app_unregister(int sock)
2816 {
2817 struct ust_app *lta;
2818 struct lttng_ht_node_ulong *node;
2819 struct lttng_ht_iter iter;
2820 struct ust_app_session *ua_sess;
2821 int ret;
2822
2823 rcu_read_lock();
2824
2825 /* Get the node reference for a call_rcu */
2826 lttng_ht_lookup(ust_app_ht_by_sock, (void *)((unsigned long) sock), &iter);
2827 node = lttng_ht_iter_get_node_ulong(&iter);
2828 assert(node);
2829
2830 lta = caa_container_of(node, struct ust_app, sock_n);
2831 DBG("PID %d unregistering with sock %d", lta->pid, sock);
2832
2833 /* Remove application from PID hash table */
2834 ret = lttng_ht_del(ust_app_ht_by_sock, &iter);
2835 assert(!ret);
2836
2837 /*
2838 * Remove application from notify hash table. The thread handling the
2839 * notify socket could have deleted the node so ignore on error because
2840 * either way it's valid. The close of that socket is handled by the other
2841 * thread.
2842 */
2843 iter.iter.node = &lta->notify_sock_n.node;
2844 (void) lttng_ht_del(ust_app_ht_by_notify_sock, &iter);
2845
2846 /*
2847 * Ignore return value since the node might have been removed before by an
2848 * add replace during app registration because the PID can be reassigned by
2849 * the OS.
2850 */
2851 iter.iter.node = &lta->pid_n.node;
2852 ret = lttng_ht_del(ust_app_ht, &iter);
2853 if (ret) {
2854 DBG3("Unregister app by PID %d failed. This can happen on pid reuse",
2855 lta->pid);
2856 }
2857
2858 /* Remove sessions so they are not visible during deletion.*/
2859 cds_lfht_for_each_entry(lta->sessions->ht, &iter.iter, ua_sess,
2860 node.node) {
2861 struct ust_registry_session *registry;
2862
2863 ret = lttng_ht_del(lta->sessions, &iter);
2864 if (ret) {
2865 /* The session was already removed so scheduled for teardown. */
2866 continue;
2867 }
2868
2869 /*
2870 * Add session to list for teardown. This is safe since at this point we
2871 * are the only one using this list.
2872 */
2873 pthread_mutex_lock(&ua_sess->lock);
2874
2875 /*
2876 * Normally, this is done in the delete session process which is
2877 * executed in the call rcu below. However, upon registration we can't
2878 * afford to wait for the grace period before pushing data or else the
2879 * data pending feature can race between the unregistration and stop
2880 * command where the data pending command is sent *before* the grace
2881 * period ended.
2882 *
2883 * The close metadata below nullifies the metadata pointer in the
2884 * session so the delete session will NOT push/close a second time.
2885 */
2886 registry = get_session_registry(ua_sess);
2887 if (registry && !registry->metadata_closed) {
2888 /* Push metadata for application before freeing the application. */
2889 (void) push_metadata(registry, ua_sess->consumer);
2890
2891 /*
2892 * Don't ask to close metadata for global per UID buffers. Close
2893 * metadata only on destroy trace session in this case. Also, the
2894 * previous push metadata could have flag the metadata registry to
2895 * close so don't send a close command if closed.
2896 */
2897 if (ua_sess->buffer_type != LTTNG_BUFFER_PER_UID &&
2898 !registry->metadata_closed) {
2899 /* And ask to close it for this session registry. */
2900 (void) close_metadata(registry, ua_sess->consumer);
2901 }
2902 }
2903
2904 cds_list_add(&ua_sess->teardown_node, &lta->teardown_head);
2905 pthread_mutex_unlock(&ua_sess->lock);
2906 }
2907
2908 /* Free memory */
2909 call_rcu(&lta->pid_n.head, delete_ust_app_rcu);
2910
2911 rcu_read_unlock();
2912 return;
2913 }
2914
2915 /*
2916 * Return traceable_app_count
2917 */
2918 unsigned long ust_app_list_count(void)
2919 {
2920 unsigned long count;
2921
2922 rcu_read_lock();
2923 count = lttng_ht_get_count(ust_app_ht);
2924 rcu_read_unlock();
2925
2926 return count;
2927 }
2928
2929 /*
2930 * Fill events array with all events name of all registered apps.
2931 */
2932 int ust_app_list_events(struct lttng_event **events)
2933 {
2934 int ret, handle;
2935 size_t nbmem, count = 0;
2936 struct lttng_ht_iter iter;
2937 struct ust_app *app;
2938 struct lttng_event *tmp_event;
2939
2940 nbmem = UST_APP_EVENT_LIST_SIZE;
2941 tmp_event = zmalloc(nbmem * sizeof(struct lttng_event));
2942 if (tmp_event == NULL) {
2943 PERROR("zmalloc ust app events");
2944 ret = -ENOMEM;
2945 goto error;
2946 }
2947
2948 rcu_read_lock();
2949
2950 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
2951 struct lttng_ust_tracepoint_iter uiter;
2952
2953 health_code_update();
2954
2955 if (!app->compatible) {
2956 /*
2957 * TODO: In time, we should notice the caller of this error by
2958 * telling him that this is a version error.
2959 */
2960 continue;
2961 }
2962 handle = ustctl_tracepoint_list(app->sock);
2963 if (handle < 0) {
2964 if (handle != -EPIPE && handle != -LTTNG_UST_ERR_EXITING) {
2965 ERR("UST app list events getting handle failed for app pid %d",
2966 app->pid);
2967 }
2968 continue;
2969 }
2970
2971 while ((ret = ustctl_tracepoint_list_get(app->sock, handle,
2972 &uiter)) != -LTTNG_UST_ERR_NOENT) {
2973 /* Handle ustctl error. */
2974 if (ret < 0) {
2975 free(tmp_event);
2976 if (ret != -LTTNG_UST_ERR_EXITING || ret != -EPIPE) {
2977 ERR("UST app tp list get failed for app %d with ret %d",
2978 app->sock, ret);
2979 } else {
2980 DBG3("UST app tp list get failed. Application is dead");
2981 }
2982 goto rcu_error;
2983 }
2984
2985 health_code_update();
2986 if (count >= nbmem) {
2987 /* In case the realloc fails, we free the memory */
2988 void *ptr;
2989
2990 DBG2("Reallocating event list from %zu to %zu entries", nbmem,
2991 2 * nbmem);
2992 nbmem *= 2;
2993 ptr = realloc(tmp_event, nbmem * sizeof(struct lttng_event));
2994 if (ptr == NULL) {
2995 PERROR("realloc ust app events");
2996 free(tmp_event);
2997 ret = -ENOMEM;
2998 goto rcu_error;
2999 }
3000 tmp_event = ptr;
3001 }
3002 memcpy(tmp_event[count].name, uiter.name, LTTNG_UST_SYM_NAME_LEN);
3003 tmp_event[count].loglevel = uiter.loglevel;
3004 tmp_event[count].type = (enum lttng_event_type) LTTNG_UST_TRACEPOINT;
3005 tmp_event[count].pid = app->pid;
3006 tmp_event[count].enabled = -1;
3007 count++;
3008 }
3009 }
3010
3011 ret = count;
3012 *events = tmp_event;
3013
3014 DBG2("UST app list events done (%zu events)", count);
3015
3016 rcu_error:
3017 rcu_read_unlock();
3018 error:
3019 health_code_update();
3020 return ret;
3021 }
3022
3023 /*
3024 * Fill events array with all events name of all registered apps.
3025 */
3026 int ust_app_list_event_fields(struct lttng_event_field **fields)
3027 {
3028 int ret, handle;
3029 size_t nbmem, count = 0;
3030 struct lttng_ht_iter iter;
3031 struct ust_app *app;
3032 struct lttng_event_field *tmp_event;
3033
3034 nbmem = UST_APP_EVENT_LIST_SIZE;
3035 tmp_event = zmalloc(nbmem * sizeof(struct lttng_event_field));
3036 if (tmp_event == NULL) {
3037 PERROR("zmalloc ust app event fields");
3038 ret = -ENOMEM;
3039 goto error;
3040 }
3041
3042 rcu_read_lock();
3043
3044 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
3045 struct lttng_ust_field_iter uiter;
3046
3047 health_code_update();
3048
3049 if (!app->compatible) {
3050 /*
3051 * TODO: In time, we should notice the caller of this error by
3052 * telling him that this is a version error.
3053 */
3054 continue;
3055 }
3056 handle = ustctl_tracepoint_field_list(app->sock);
3057 if (handle < 0) {
3058 if (handle != -EPIPE && handle != -LTTNG_UST_ERR_EXITING) {
3059 ERR("UST app list field getting handle failed for app pid %d",
3060 app->pid);
3061 }
3062 continue;
3063 }
3064
3065 while ((ret = ustctl_tracepoint_field_list_get(app->sock, handle,
3066 &uiter)) != -LTTNG_UST_ERR_NOENT) {
3067 /* Handle ustctl error. */
3068 if (ret < 0) {
3069 free(tmp_event);
3070 if (ret != -LTTNG_UST_ERR_EXITING || ret != -EPIPE) {
3071 ERR("UST app tp list field failed for app %d with ret %d",
3072 app->sock, ret);
3073 } else {
3074 DBG3("UST app tp list field failed. Application is dead");
3075 }
3076 goto rcu_error;
3077 }
3078
3079 health_code_update();
3080 if (count >= nbmem) {
3081 /* In case the realloc fails, we free the memory */
3082 void *ptr;
3083
3084 DBG2("Reallocating event field list from %zu to %zu entries", nbmem,
3085 2 * nbmem);
3086 nbmem *= 2;
3087 ptr = realloc(tmp_event, nbmem * sizeof(struct lttng_event_field));
3088 if (ptr == NULL) {
3089 PERROR("realloc ust app event fields");
3090 free(tmp_event);
3091 ret = -ENOMEM;
3092 goto rcu_error;
3093 }
3094 tmp_event = ptr;
3095 }
3096
3097 memcpy(tmp_event[count].field_name, uiter.field_name, LTTNG_UST_SYM_NAME_LEN);
3098 tmp_event[count].type = uiter.type;
3099 tmp_event[count].nowrite = uiter.nowrite;
3100
3101 memcpy(tmp_event[count].event.name, uiter.event_name, LTTNG_UST_SYM_NAME_LEN);
3102 tmp_event[count].event.loglevel = uiter.loglevel;
3103 tmp_event[count].event.type = LTTNG_UST_TRACEPOINT;
3104 tmp_event[count].event.pid = app->pid;
3105 tmp_event[count].event.enabled = -1;
3106 count++;
3107 }
3108 }
3109
3110 ret = count;
3111 *fields = tmp_event;
3112
3113 DBG2("UST app list event fields done (%zu events)", count);
3114
3115 rcu_error:
3116 rcu_read_unlock();
3117 error:
3118 health_code_update();
3119 return ret;
3120 }
3121
3122 /*
3123 * Free and clean all traceable apps of the global list.
3124 *
3125 * Should _NOT_ be called with RCU read-side lock held.
3126 */
3127 void ust_app_clean_list(void)
3128 {
3129 int ret;
3130 struct ust_app *app;
3131 struct lttng_ht_iter iter;
3132
3133 DBG2("UST app cleaning registered apps hash table");
3134
3135 rcu_read_lock();
3136
3137 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
3138 ret = lttng_ht_del(ust_app_ht, &iter);
3139 assert(!ret);
3140 call_rcu(&app->pid_n.head, delete_ust_app_rcu);
3141 }
3142
3143 /* Cleanup socket hash table */
3144 cds_lfht_for_each_entry(ust_app_ht_by_sock->ht, &iter.iter, app,
3145 sock_n.node) {
3146 ret = lttng_ht_del(ust_app_ht_by_sock, &iter);
3147 assert(!ret);
3148 }
3149
3150 /* Cleanup notify socket hash table */
3151 cds_lfht_for_each_entry(ust_app_ht_by_notify_sock->ht, &iter.iter, app,
3152 notify_sock_n.node) {
3153 ret = lttng_ht_del(ust_app_ht_by_notify_sock, &iter);
3154 assert(!ret);
3155 }
3156 rcu_read_unlock();
3157
3158 /* Destroy is done only when the ht is empty */
3159 ht_cleanup_push(ust_app_ht);
3160 ht_cleanup_push(ust_app_ht_by_sock);
3161 ht_cleanup_push(ust_app_ht_by_notify_sock);
3162 }
3163
3164 /*
3165 * Init UST app hash table.
3166 */
3167 void ust_app_ht_alloc(void)
3168 {
3169 ust_app_ht = lttng_ht_new(0, LTTNG_HT_TYPE_ULONG);
3170 ust_app_ht_by_sock = lttng_ht_new(0, LTTNG_HT_TYPE_ULONG);
3171 ust_app_ht_by_notify_sock = lttng_ht_new(0, LTTNG_HT_TYPE_ULONG);
3172 }
3173
3174 /*
3175 * For a specific UST session, disable the channel for all registered apps.
3176 */
3177 int ust_app_disable_channel_glb(struct ltt_ust_session *usess,
3178 struct ltt_ust_channel *uchan)
3179 {
3180 int ret = 0;
3181 struct lttng_ht_iter iter;
3182 struct lttng_ht_node_str *ua_chan_node;
3183 struct ust_app *app;
3184 struct ust_app_session *ua_sess;
3185 struct ust_app_channel *ua_chan;
3186
3187 if (usess == NULL || uchan == NULL) {
3188 ERR("Disabling UST global channel with NULL values");
3189 ret = -1;
3190 goto error;
3191 }
3192
3193 DBG2("UST app disabling channel %s from global domain for session id %d",
3194 uchan->name, usess->id);
3195
3196 rcu_read_lock();
3197
3198 /* For every registered applications */
3199 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
3200 struct lttng_ht_iter uiter;
3201 if (!app->compatible) {
3202 /*
3203 * TODO: In time, we should notice the caller of this error by
3204 * telling him that this is a version error.
3205 */
3206 continue;
3207 }
3208 ua_sess = lookup_session_by_app(usess, app);
3209 if (ua_sess == NULL) {
3210 continue;
3211 }
3212
3213 /* Get channel */
3214 lttng_ht_lookup(ua_sess->channels, (void *)uchan->name, &uiter);
3215 ua_chan_node = lttng_ht_iter_get_node_str(&uiter);
3216 /* If the session if found for the app, the channel must be there */
3217 assert(ua_chan_node);
3218
3219 ua_chan = caa_container_of(ua_chan_node, struct ust_app_channel, node);
3220 /* The channel must not be already disabled */
3221 assert(ua_chan->enabled == 1);
3222
3223 /* Disable channel onto application */
3224 ret = disable_ust_app_channel(ua_sess, ua_chan, app);
3225 if (ret < 0) {
3226 /* XXX: We might want to report this error at some point... */
3227 continue;
3228 }
3229 }
3230
3231 rcu_read_unlock();
3232
3233 error:
3234 return ret;
3235 }
3236
3237 /*
3238 * For a specific UST session, enable the channel for all registered apps.
3239 */
3240 int ust_app_enable_channel_glb(struct ltt_ust_session *usess,
3241 struct ltt_ust_channel *uchan)
3242 {
3243 int ret = 0;
3244 struct lttng_ht_iter iter;
3245 struct ust_app *app;
3246 struct ust_app_session *ua_sess;
3247
3248 if (usess == NULL || uchan == NULL) {
3249 ERR("Adding UST global channel to NULL values");
3250 ret = -1;
3251 goto error;
3252 }
3253
3254 DBG2("UST app enabling channel %s to global domain for session id %d",
3255 uchan->name, usess->id);
3256
3257 rcu_read_lock();
3258
3259 /* For every registered applications */
3260 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
3261 if (!app->compatible) {
3262 /*
3263 * TODO: In time, we should notice the caller of this error by
3264 * telling him that this is a version error.
3265 */
3266 continue;
3267 }
3268 ua_sess = lookup_session_by_app(usess, app);
3269 if (ua_sess == NULL) {
3270 continue;
3271 }
3272
3273 /* Enable channel onto application */
3274 ret = enable_ust_app_channel(ua_sess, uchan, app);
3275 if (ret < 0) {
3276 /* XXX: We might want to report this error at some point... */
3277 continue;
3278 }
3279 }
3280
3281 rcu_read_unlock();
3282
3283 error:
3284 return ret;
3285 }
3286
3287 /*
3288 * Disable an event in a channel and for a specific session.
3289 */
3290 int ust_app_disable_event_glb(struct ltt_ust_session *usess,
3291 struct ltt_ust_channel *uchan, struct ltt_ust_event *uevent)
3292 {
3293 int ret = 0;
3294 struct lttng_ht_iter iter, uiter;
3295 struct lttng_ht_node_str *ua_chan_node, *ua_event_node;
3296 struct ust_app *app;
3297 struct ust_app_session *ua_sess;
3298 struct ust_app_channel *ua_chan;
3299 struct ust_app_event *ua_event;
3300
3301 DBG("UST app disabling event %s for all apps in channel "
3302 "%s for session id %d", uevent->attr.name, uchan->name, usess->id);
3303
3304 rcu_read_lock();
3305
3306 /* For all registered applications */
3307 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
3308 if (!app->compatible) {
3309 /*
3310 * TODO: In time, we should notice the caller of this error by
3311 * telling him that this is a version error.
3312 */
3313 continue;
3314 }
3315 ua_sess = lookup_session_by_app(usess, app);
3316 if (ua_sess == NULL) {
3317 /* Next app */
3318 continue;
3319 }
3320
3321 /* Lookup channel in the ust app session */
3322 lttng_ht_lookup(ua_sess->channels, (void *)uchan->name, &uiter);
3323 ua_chan_node = lttng_ht_iter_get_node_str(&uiter);
3324 if (ua_chan_node == NULL) {
3325 DBG2("Channel %s not found in session id %d for app pid %d."
3326 "Skipping", uchan->name, usess->id, app->pid);
3327 continue;
3328 }
3329 ua_chan = caa_container_of(ua_chan_node, struct ust_app_channel, node);
3330
3331 lttng_ht_lookup(ua_chan->events, (void *)uevent->attr.name, &uiter);
3332 ua_event_node = lttng_ht_iter_get_node_str(&uiter);
3333 if (ua_event_node == NULL) {
3334 DBG2("Event %s not found in channel %s for app pid %d."
3335 "Skipping", uevent->attr.name, uchan->name, app->pid);
3336 continue;
3337 }
3338 ua_event = caa_container_of(ua_event_node, struct ust_app_event, node);
3339
3340 ret = disable_ust_app_event(ua_sess, ua_event, app);
3341 if (ret < 0) {
3342 /* XXX: Report error someday... */
3343 continue;
3344 }
3345 }
3346
3347 rcu_read_unlock();
3348
3349 return ret;
3350 }
3351
3352 /*
3353 * For a specific UST session and UST channel, the event for all
3354 * registered apps.
3355 */
3356 int ust_app_disable_all_event_glb(struct ltt_ust_session *usess,
3357 struct ltt_ust_channel *uchan)
3358 {
3359 int ret = 0;
3360 struct lttng_ht_iter iter, uiter;
3361 struct lttng_ht_node_str *ua_chan_node;
3362 struct ust_app *app;
3363 struct ust_app_session *ua_sess;
3364 struct ust_app_channel *ua_chan;
3365 struct ust_app_event *ua_event;
3366
3367 DBG("UST app disabling all event for all apps in channel "
3368 "%s for session id %d", uchan->name, usess->id);
3369
3370 rcu_read_lock();
3371
3372 /* For all registered applications */
3373 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
3374 if (!app->compatible) {
3375 /*
3376 * TODO: In time, we should notice the caller of this error by
3377 * telling him that this is a version error.
3378 */
3379 continue;
3380 }
3381 ua_sess = lookup_session_by_app(usess, app);
3382 if (!ua_sess) {
3383 /* The application has problem or is probably dead. */
3384 continue;
3385 }
3386
3387 /* Lookup channel in the ust app session */
3388 lttng_ht_lookup(ua_sess->channels, (void *)uchan->name, &uiter);
3389 ua_chan_node = lttng_ht_iter_get_node_str(&uiter);
3390 /* If the channel is not found, there is a code flow error */
3391 assert(ua_chan_node);
3392
3393 ua_chan = caa_container_of(ua_chan_node, struct ust_app_channel, node);
3394
3395 /* Disable each events of channel */
3396 cds_lfht_for_each_entry(ua_chan->events->ht, &uiter.iter, ua_event,
3397 node.node) {
3398 ret = disable_ust_app_event(ua_sess, ua_event, app);
3399 if (ret < 0) {
3400 /* XXX: Report error someday... */
3401 continue;
3402 }
3403 }
3404 }
3405
3406 rcu_read_unlock();
3407
3408 return ret;
3409 }
3410
3411 /*
3412 * For a specific UST session, create the channel for all registered apps.
3413 */
3414 int ust_app_create_channel_glb(struct ltt_ust_session *usess,
3415 struct ltt_ust_channel *uchan)
3416 {
3417 int ret = 0, created;
3418 struct lttng_ht_iter iter;
3419 struct ust_app *app;
3420 struct ust_app_session *ua_sess = NULL;
3421
3422 /* Very wrong code flow */
3423 assert(usess);
3424 assert(uchan);
3425
3426 DBG2("UST app adding channel %s to UST domain for session id %d",
3427 uchan->name, usess->id);
3428
3429 rcu_read_lock();
3430
3431 /* For every registered applications */
3432 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
3433 if (!app->compatible) {
3434 /*
3435 * TODO: In time, we should notice the caller of this error by
3436 * telling him that this is a version error.
3437 */
3438 continue;
3439 }
3440 /*
3441 * Create session on the tracer side and add it to app session HT. Note
3442 * that if session exist, it will simply return a pointer to the ust
3443 * app session.
3444 */
3445 ret = create_ust_app_session(usess, app, &ua_sess, &created);
3446 if (ret < 0) {
3447 switch (ret) {
3448 case -ENOTCONN:
3449 /*
3450 * The application's socket is not valid. Either a bad socket
3451 * or a timeout on it. We can't inform the caller that for a
3452 * specific app, the session failed so lets continue here.
3453 */
3454 continue;
3455 case -ENOMEM:
3456 default:
3457 goto error_rcu_unlock;
3458 }
3459 }
3460 assert(ua_sess);
3461
3462 pthread_mutex_lock(&ua_sess->lock);
3463 if (!strncmp(uchan->name, DEFAULT_METADATA_NAME,
3464 sizeof(uchan->name))) {
3465 struct ustctl_consumer_channel_attr attr;
3466 copy_channel_attr_to_ustctl(&attr, &uchan->attr);
3467 ret = create_ust_app_metadata(ua_sess, app, usess->consumer,
3468 &attr);
3469 } else {
3470 /* Create channel onto application. We don't need the chan ref. */
3471 ret = create_ust_app_channel(ua_sess, uchan, app,
3472 LTTNG_UST_CHAN_PER_CPU, usess, NULL);
3473 }
3474 pthread_mutex_unlock(&ua_sess->lock);
3475 if (ret < 0) {
3476 if (ret == -ENOMEM) {
3477 /* No more memory is a fatal error. Stop right now. */
3478 goto error_rcu_unlock;
3479 }
3480 /* Cleanup the created session if it's the case. */
3481 if (created) {
3482 destroy_app_session(app, ua_sess);
3483 }
3484 }
3485 }
3486
3487 error_rcu_unlock:
3488 rcu_read_unlock();
3489 return ret;
3490 }
3491
3492 /*
3493 * Enable event for a specific session and channel on the tracer.
3494 */
3495 int ust_app_enable_event_glb(struct ltt_ust_session *usess,
3496 struct ltt_ust_channel *uchan, struct ltt_ust_event *uevent)
3497 {
3498 int ret = 0;
3499 struct lttng_ht_iter iter, uiter;
3500 struct lttng_ht_node_str *ua_chan_node;
3501 struct ust_app *app;
3502 struct ust_app_session *ua_sess;
3503 struct ust_app_channel *ua_chan;
3504 struct ust_app_event *ua_event;
3505
3506 DBG("UST app enabling event %s for all apps for session id %d",
3507 uevent->attr.name, usess->id);
3508
3509 /*
3510 * NOTE: At this point, this function is called only if the session and
3511 * channel passed are already created for all apps. and enabled on the
3512 * tracer also.
3513 */
3514
3515 rcu_read_lock();
3516
3517 /* For all registered applications */
3518 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
3519 if (!app->compatible) {
3520 /*
3521 * TODO: In time, we should notice the caller of this error by
3522 * telling him that this is a version error.
3523 */
3524 continue;
3525 }
3526 ua_sess = lookup_session_by_app(usess, app);
3527 if (!ua_sess) {
3528 /* The application has problem or is probably dead. */
3529 continue;
3530 }
3531
3532 pthread_mutex_lock(&ua_sess->lock);
3533
3534 /* Lookup channel in the ust app session */
3535 lttng_ht_lookup(ua_sess->channels, (void *)uchan->name, &uiter);
3536 ua_chan_node = lttng_ht_iter_get_node_str(&uiter);
3537 /* If the channel is not found, there is a code flow error */
3538 assert(ua_chan_node);
3539
3540 ua_chan = caa_container_of(ua_chan_node, struct ust_app_channel, node);
3541
3542 /* Get event node */
3543 ua_event = find_ust_app_event(ua_chan->events, uevent->attr.name,
3544 uevent->filter, uevent->attr.loglevel);
3545 if (ua_event == NULL) {
3546 DBG3("UST app enable event %s not found for app PID %d."
3547 "Skipping app", uevent->attr.name, app->pid);
3548 goto next_app;
3549 }
3550
3551 ret = enable_ust_app_event(ua_sess, ua_event, app);
3552 if (ret < 0) {
3553 pthread_mutex_unlock(&ua_sess->lock);
3554 goto error;
3555 }
3556 next_app:
3557 pthread_mutex_unlock(&ua_sess->lock);
3558 }
3559
3560 error:
3561 rcu_read_unlock();
3562 return ret;
3563 }
3564
3565 /*
3566 * For a specific existing UST session and UST channel, creates the event for
3567 * all registered apps.
3568 */
3569 int ust_app_create_event_glb(struct ltt_ust_session *usess,
3570 struct ltt_ust_channel *uchan, struct ltt_ust_event *uevent)
3571 {
3572 int ret = 0;
3573 struct lttng_ht_iter iter, uiter;
3574 struct lttng_ht_node_str *ua_chan_node;
3575 struct ust_app *app;
3576 struct ust_app_session *ua_sess;
3577 struct ust_app_channel *ua_chan;
3578
3579 DBG("UST app creating event %s for all apps for session id %d",
3580 uevent->attr.name, usess->id);
3581
3582 rcu_read_lock();
3583
3584 /* For all registered applications */
3585 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
3586 if (!app->compatible) {
3587 /*
3588 * TODO: In time, we should notice the caller of this error by
3589 * telling him that this is a version error.
3590 */
3591 continue;
3592 }
3593 ua_sess = lookup_session_by_app(usess, app);
3594 if (!ua_sess) {
3595 /* The application has problem or is probably dead. */
3596 continue;
3597 }
3598
3599 pthread_mutex_lock(&ua_sess->lock);
3600 /* Lookup channel in the ust app session */
3601 lttng_ht_lookup(ua_sess->channels, (void *)uchan->name, &uiter);
3602 ua_chan_node = lttng_ht_iter_get_node_str(&uiter);
3603 /* If the channel is not found, there is a code flow error */
3604 assert(ua_chan_node);
3605
3606 ua_chan = caa_container_of(ua_chan_node, struct ust_app_channel, node);
3607
3608 ret = create_ust_app_event(ua_sess, ua_chan, uevent, app);
3609 pthread_mutex_unlock(&ua_sess->lock);
3610 if (ret < 0) {
3611 if (ret != -LTTNG_UST_ERR_EXIST) {
3612 /* Possible value at this point: -ENOMEM. If so, we stop! */
3613 break;
3614 }
3615 DBG2("UST app event %s already exist on app PID %d",
3616 uevent->attr.name, app->pid);
3617 continue;
3618 }
3619 }
3620
3621 rcu_read_unlock();
3622
3623 return ret;
3624 }
3625
3626 /*
3627 * Start tracing for a specific UST session and app.
3628 */
3629 static
3630 int ust_app_start_trace(struct ltt_ust_session *usess, struct ust_app *app)
3631 {
3632 int ret = 0;
3633 struct ust_app_session *ua_sess;
3634
3635 DBG("Starting tracing for ust app pid %d", app->pid);
3636
3637 rcu_read_lock();
3638
3639 if (!app->compatible) {
3640 goto end;
3641 }
3642
3643 ua_sess = lookup_session_by_app(usess, app);
3644 if (ua_sess == NULL) {
3645 /* The session is in teardown process. Ignore and continue. */
3646 goto end;
3647 }
3648
3649 pthread_mutex_lock(&ua_sess->lock);
3650
3651 /* Upon restart, we skip the setup, already done */
3652 if (ua_sess->started) {
3653 goto skip_setup;
3654 }
3655
3656 /* Create directories if consumer is LOCAL and has a path defined. */
3657 if (usess->consumer->type == CONSUMER_DST_LOCAL &&
3658 strlen(usess->consumer->dst.trace_path) > 0) {
3659 ret = run_as_mkdir_recursive(usess->consumer->dst.trace_path,
3660 S_IRWXU | S_IRWXG, ua_sess->euid, ua_sess->egid);
3661 if (ret < 0) {
3662 if (ret != -EEXIST) {
3663 ERR("Trace directory creation error");
3664 goto error_unlock;
3665 }
3666 }
3667 }
3668
3669 /*
3670 * Create the metadata for the application. This returns gracefully if a
3671 * metadata was already set for the session.
3672 */
3673 ret = create_ust_app_metadata(ua_sess, app, usess->consumer, NULL);
3674 if (ret < 0) {
3675 goto error_unlock;
3676 }
3677
3678 health_code_update();
3679
3680 skip_setup:
3681 /* This start the UST tracing */
3682 ret = ustctl_start_session(app->sock, ua_sess->handle);
3683 if (ret < 0) {
3684 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
3685 ERR("Error starting tracing for app pid: %d (ret: %d)",
3686 app->pid, ret);
3687 } else {
3688 DBG("UST app start session failed. Application is dead.");
3689 }
3690 goto error_unlock;
3691 }
3692
3693 /* Indicate that the session has been started once */
3694 ua_sess->started = 1;
3695
3696 pthread_mutex_unlock(&ua_sess->lock);
3697
3698 health_code_update();
3699
3700 /* Quiescent wait after starting trace */
3701 ret = ustctl_wait_quiescent(app->sock);
3702 if (ret < 0 && ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
3703 ERR("UST app wait quiescent failed for app pid %d ret %d",
3704 app->pid, ret);
3705 }
3706
3707 end:
3708 rcu_read_unlock();
3709 health_code_update();
3710 return 0;
3711
3712 error_unlock:
3713 pthread_mutex_unlock(&ua_sess->lock);
3714 rcu_read_unlock();
3715 health_code_update();
3716 return -1;
3717 }
3718
3719 /*
3720 * Stop tracing for a specific UST session and app.
3721 */
3722 static
3723 int ust_app_stop_trace(struct ltt_ust_session *usess, struct ust_app *app)
3724 {
3725 int ret = 0;
3726 struct ust_app_session *ua_sess;
3727 struct ust_registry_session *registry;
3728
3729 DBG("Stopping tracing for ust app pid %d", app->pid);
3730
3731 rcu_read_lock();
3732
3733 if (!app->compatible) {
3734 goto end_no_session;
3735 }
3736
3737 ua_sess = lookup_session_by_app(usess, app);
3738 if (ua_sess == NULL) {
3739 goto end_no_session;
3740 }
3741
3742 pthread_mutex_lock(&ua_sess->lock);
3743
3744 /*
3745 * If started = 0, it means that stop trace has been called for a session
3746 * that was never started. It's possible since we can have a fail start
3747 * from either the application manager thread or the command thread. Simply
3748 * indicate that this is a stop error.
3749 */
3750 if (!ua_sess->started) {
3751 goto error_rcu_unlock;
3752 }
3753
3754 health_code_update();
3755
3756 /* This inhibits UST tracing */
3757 ret = ustctl_stop_session(app->sock, ua_sess->handle);
3758 if (ret < 0) {
3759 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
3760 ERR("Error stopping tracing for app pid: %d (ret: %d)",
3761 app->pid, ret);
3762 } else {
3763 DBG("UST app stop session failed. Application is dead.");
3764 }
3765 goto error_rcu_unlock;
3766 }
3767
3768 health_code_update();
3769
3770 /* Quiescent wait after stopping trace */
3771 ret = ustctl_wait_quiescent(app->sock);
3772 if (ret < 0 && ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
3773 ERR("UST app wait quiescent failed for app pid %d ret %d",
3774 app->pid, ret);
3775 }
3776
3777 health_code_update();
3778
3779 registry = get_session_registry(ua_sess);
3780 assert(registry);
3781
3782 if (!registry->metadata_closed) {
3783 /* Push metadata for application before freeing the application. */
3784 (void) push_metadata(registry, ua_sess->consumer);
3785 }
3786
3787 pthread_mutex_unlock(&ua_sess->lock);
3788 end_no_session:
3789 rcu_read_unlock();
3790 health_code_update();
3791 return 0;
3792
3793 error_rcu_unlock:
3794 pthread_mutex_unlock(&ua_sess->lock);
3795 rcu_read_unlock();
3796 health_code_update();
3797 return -1;
3798 }
3799
3800 /*
3801 * Flush buffers for a specific UST session and app.
3802 */
3803 static
3804 int ust_app_flush_trace(struct ltt_ust_session *usess, struct ust_app *app)
3805 {
3806 int ret = 0;
3807 struct lttng_ht_iter iter;
3808 struct ust_app_session *ua_sess;
3809 struct ust_app_channel *ua_chan;
3810
3811 DBG("Flushing buffers for ust app pid %d", app->pid);
3812
3813 rcu_read_lock();
3814
3815 if (!app->compatible) {
3816 goto end_no_session;
3817 }
3818
3819 ua_sess = lookup_session_by_app(usess, app);
3820 if (ua_sess == NULL) {
3821 goto end_no_session;
3822 }
3823
3824 pthread_mutex_lock(&ua_sess->lock);
3825
3826 health_code_update();
3827
3828 /* Flushing buffers */
3829 cds_lfht_for_each_entry(ua_sess->channels->ht, &iter.iter, ua_chan,
3830 node.node) {
3831 health_code_update();
3832 assert(ua_chan->is_sent);
3833 ret = ustctl_sock_flush_buffer(app->sock, ua_chan->obj);
3834 if (ret < 0) {
3835 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
3836 ERR("UST app PID %d channel %s flush failed with ret %d",
3837 app->pid, ua_chan->name, ret);
3838 } else {
3839 DBG3("UST app failed to flush %s. Application is dead.",
3840 ua_chan->name);
3841 /* No need to continue. */
3842 break;
3843 }
3844 /* Continuing flushing all buffers */
3845 continue;
3846 }
3847 }
3848
3849 health_code_update();
3850
3851 pthread_mutex_unlock(&ua_sess->lock);
3852 end_no_session:
3853 rcu_read_unlock();
3854 health_code_update();
3855 return 0;
3856 }
3857
3858 /*
3859 * Destroy a specific UST session in apps.
3860 */
3861 static int destroy_trace(struct ltt_ust_session *usess, struct ust_app *app)
3862 {
3863 int ret;
3864 struct ust_app_session *ua_sess;
3865 struct lttng_ht_iter iter;
3866 struct lttng_ht_node_ulong *node;
3867
3868 DBG("Destroy tracing for ust app pid %d", app->pid);
3869
3870 rcu_read_lock();
3871
3872 if (!app->compatible) {
3873 goto end;
3874 }
3875
3876 __lookup_session_by_app(usess, app, &iter);
3877 node = lttng_ht_iter_get_node_ulong(&iter);
3878 if (node == NULL) {
3879 /* Session is being or is deleted. */
3880 goto end;
3881 }
3882 ua_sess = caa_container_of(node, struct ust_app_session, node);
3883
3884 health_code_update();
3885 destroy_app_session(app, ua_sess);
3886
3887 health_code_update();
3888
3889 /* Quiescent wait after stopping trace */
3890 ret = ustctl_wait_quiescent(app->sock);
3891 if (ret < 0 && ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
3892 ERR("UST app wait quiescent failed for app pid %d ret %d",
3893 app->pid, ret);
3894 }
3895 end:
3896 rcu_read_unlock();
3897 health_code_update();
3898 return 0;
3899 }
3900
3901 /*
3902 * Start tracing for the UST session.
3903 */
3904 int ust_app_start_trace_all(struct ltt_ust_session *usess)
3905 {
3906 int ret = 0;
3907 struct lttng_ht_iter iter;
3908 struct ust_app *app;
3909
3910 DBG("Starting all UST traces");
3911
3912 rcu_read_lock();
3913
3914 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
3915 ret = ust_app_start_trace(usess, app);
3916 if (ret < 0) {
3917 /* Continue to next apps even on error */
3918 continue;
3919 }
3920 }
3921
3922 rcu_read_unlock();
3923
3924 return 0;
3925 }
3926
3927 /*
3928 * Start tracing for the UST session.
3929 */
3930 int ust_app_stop_trace_all(struct ltt_ust_session *usess)
3931 {
3932 int ret = 0;
3933 struct lttng_ht_iter iter;
3934 struct ust_app *app;
3935
3936 DBG("Stopping all UST traces");
3937
3938 rcu_read_lock();
3939
3940 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
3941 ret = ust_app_stop_trace(usess, app);
3942 if (ret < 0) {
3943 /* Continue to next apps even on error */
3944 continue;
3945 }
3946 }
3947
3948 /* Flush buffers */
3949 switch (usess->buffer_type) {
3950 case LTTNG_BUFFER_PER_UID:
3951 {
3952 struct buffer_reg_uid *reg;
3953
3954 /* Flush all per UID buffers associated to that session. */
3955 cds_list_for_each_entry(reg, &usess->buffer_reg_uid_list, lnode) {
3956 struct buffer_reg_channel *reg_chan;
3957 struct consumer_socket *socket;
3958
3959 /* Get consumer socket to use to push the metadata.*/
3960 socket = consumer_find_socket_by_bitness(reg->bits_per_long,
3961 usess->consumer);
3962 if (!socket) {
3963 /* Ignore request if no consumer is found for the session. */
3964 continue;
3965 }
3966
3967 cds_lfht_for_each_entry(reg->registry->channels->ht, &iter.iter,
3968 reg_chan, node.node) {
3969 /*
3970 * The following call will print error values so the return
3971 * code is of little importance because whatever happens, we
3972 * have to try them all.
3973 */
3974 (void) consumer_flush_channel(socket, reg_chan->consumer_key);
3975 }
3976 }
3977 break;
3978 }
3979 case LTTNG_BUFFER_PER_PID:
3980 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
3981 ret = ust_app_flush_trace(usess, app);
3982 if (ret < 0) {
3983 /* Continue to next apps even on error */
3984 continue;
3985 }
3986 }
3987 break;
3988 default:
3989 assert(0);
3990 break;
3991 }
3992
3993 rcu_read_unlock();
3994
3995 return 0;
3996 }
3997
3998 /*
3999 * Destroy app UST session.
4000 */
4001 int ust_app_destroy_trace_all(struct ltt_ust_session *usess)
4002 {
4003 int ret = 0;
4004 struct lttng_ht_iter iter;
4005 struct ust_app *app;
4006
4007 DBG("Destroy all UST traces");
4008
4009 rcu_read_lock();
4010
4011 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
4012 ret = destroy_trace(usess, app);
4013 if (ret < 0) {
4014 /* Continue to next apps even on error */
4015 continue;
4016 }
4017 }
4018
4019 rcu_read_unlock();
4020
4021 return 0;
4022 }
4023
4024 /*
4025 * Add channels/events from UST global domain to registered apps at sock.
4026 */
4027 void ust_app_global_update(struct ltt_ust_session *usess, int sock)
4028 {
4029 int ret = 0;
4030 struct lttng_ht_iter iter, uiter, iter_ctx;
4031 struct ust_app *app;
4032 struct ust_app_session *ua_sess = NULL;
4033 struct ust_app_channel *ua_chan;
4034 struct ust_app_event *ua_event;
4035 struct ust_app_ctx *ua_ctx;
4036
4037 assert(usess);
4038 assert(sock >= 0);
4039
4040 DBG2("UST app global update for app sock %d for session id %d", sock,
4041 usess->id);
4042
4043 rcu_read_lock();
4044
4045 app = find_app_by_sock(sock);
4046 if (app == NULL) {
4047 /*
4048 * Application can be unregistered before so this is possible hence
4049 * simply stopping the update.
4050 */
4051 DBG3("UST app update failed to find app sock %d", sock);
4052 goto error;
4053 }
4054
4055 if (!app->compatible) {
4056 goto error;
4057 }
4058
4059 ret = create_ust_app_session(usess, app, &ua_sess, NULL);
4060 if (ret < 0) {
4061 /* Tracer is probably gone or ENOMEM. */
4062 goto error;
4063 }
4064 assert(ua_sess);
4065
4066 pthread_mutex_lock(&ua_sess->lock);
4067
4068 /*
4069 * We can iterate safely here over all UST app session since the create ust
4070 * app session above made a shadow copy of the UST global domain from the
4071 * ltt ust session.
4072 */
4073 cds_lfht_for_each_entry(ua_sess->channels->ht, &iter.iter, ua_chan,
4074 node.node) {
4075 /*
4076 * For a metadata channel, handle it differently.
4077 */
4078 if (!strncmp(ua_chan->name, DEFAULT_METADATA_NAME,
4079 sizeof(ua_chan->name))) {
4080 ret = create_ust_app_metadata(ua_sess, app, usess->consumer,
4081 &ua_chan->attr);
4082 if (ret < 0) {
4083 goto error_unlock;
4084 }
4085 /* Remove it from the hash table and continue!. */
4086 ret = lttng_ht_del(ua_sess->channels, &iter);
4087 assert(!ret);
4088 delete_ust_app_channel(-1, ua_chan, app);
4089 continue;
4090 } else {
4091 ret = do_create_channel(app, usess, ua_sess, ua_chan);
4092 if (ret < 0) {
4093 /*
4094 * Stop everything. On error, the application failed, no more
4095 * file descriptor are available or ENOMEM so stopping here is
4096 * the only thing we can do for now.
4097 */
4098 goto error_unlock;
4099 }
4100 }
4101
4102 cds_lfht_for_each_entry(ua_chan->ctx->ht, &iter_ctx.iter, ua_ctx,
4103 node.node) {
4104 ret = create_ust_channel_context(ua_chan, ua_ctx, app);
4105 if (ret < 0) {
4106 goto error_unlock;
4107 }
4108 }
4109
4110
4111 /* For each events */
4112 cds_lfht_for_each_entry(ua_chan->events->ht, &uiter.iter, ua_event,
4113 node.node) {
4114 ret = create_ust_event(app, ua_sess, ua_chan, ua_event);
4115 if (ret < 0) {
4116 goto error_unlock;
4117 }
4118 }
4119 }
4120
4121 pthread_mutex_unlock(&ua_sess->lock);
4122
4123 if (usess->start_trace) {
4124 ret = ust_app_start_trace(usess, app);
4125 if (ret < 0) {
4126 goto error;
4127 }
4128
4129 DBG2("UST trace started for app pid %d", app->pid);
4130 }
4131
4132 /* Everything went well at this point. */
4133 rcu_read_unlock();
4134 return;
4135
4136 error_unlock:
4137 pthread_mutex_unlock(&ua_sess->lock);
4138 error:
4139 if (ua_sess) {
4140 destroy_app_session(app, ua_sess);
4141 }
4142 rcu_read_unlock();
4143 return;
4144 }
4145
4146 /*
4147 * Add context to a specific channel for global UST domain.
4148 */
4149 int ust_app_add_ctx_channel_glb(struct ltt_ust_session *usess,
4150 struct ltt_ust_channel *uchan, struct ltt_ust_context *uctx)
4151 {
4152 int ret = 0;
4153 struct lttng_ht_node_str *ua_chan_node;
4154 struct lttng_ht_iter iter, uiter;
4155 struct ust_app_channel *ua_chan = NULL;
4156 struct ust_app_session *ua_sess;
4157 struct ust_app *app;
4158
4159 rcu_read_lock();
4160
4161 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
4162 if (!app->compatible) {
4163 /*
4164 * TODO: In time, we should notice the caller of this error by
4165 * telling him that this is a version error.
4166 */
4167 continue;
4168 }
4169 ua_sess = lookup_session_by_app(usess, app);
4170 if (ua_sess == NULL) {
4171 continue;
4172 }
4173
4174 pthread_mutex_lock(&ua_sess->lock);
4175 /* Lookup channel in the ust app session */
4176 lttng_ht_lookup(ua_sess->channels, (void *)uchan->name, &uiter);
4177 ua_chan_node = lttng_ht_iter_get_node_str(&uiter);
4178 if (ua_chan_node == NULL) {
4179 goto next_app;
4180 }
4181 ua_chan = caa_container_of(ua_chan_node, struct ust_app_channel,
4182 node);
4183 ret = create_ust_app_channel_context(ua_sess, ua_chan, &uctx->ctx, app);
4184 if (ret < 0) {
4185 goto next_app;
4186 }
4187 next_app:
4188 pthread_mutex_unlock(&ua_sess->lock);
4189 }
4190
4191 rcu_read_unlock();
4192 return ret;
4193 }
4194
4195 /*
4196 * Enable event for a channel from a UST session for a specific PID.
4197 */
4198 int ust_app_enable_event_pid(struct ltt_ust_session *usess,
4199 struct ltt_ust_channel *uchan, struct ltt_ust_event *uevent, pid_t pid)
4200 {
4201 int ret = 0;
4202 struct lttng_ht_iter iter;
4203 struct lttng_ht_node_str *ua_chan_node;
4204 struct ust_app *app;
4205 struct ust_app_session *ua_sess;
4206 struct ust_app_channel *ua_chan;
4207 struct ust_app_event *ua_event;
4208
4209 DBG("UST app enabling event %s for PID %d", uevent->attr.name, pid);
4210
4211 rcu_read_lock();
4212
4213 app = ust_app_find_by_pid(pid);
4214 if (app == NULL) {
4215 ERR("UST app enable event per PID %d not found", pid);
4216 ret = -1;
4217 goto end;
4218 }
4219
4220 if (!app->compatible) {
4221 ret = 0;
4222 goto end;
4223 }
4224
4225 ua_sess = lookup_session_by_app(usess, app);
4226 if (!ua_sess) {
4227 /* The application has problem or is probably dead. */
4228 ret = 0;
4229 goto end;
4230 }
4231
4232 pthread_mutex_lock(&ua_sess->lock);
4233 /* Lookup channel in the ust app session */
4234 lttng_ht_lookup(ua_sess->channels, (void *)uchan->name, &iter);
4235 ua_chan_node = lttng_ht_iter_get_node_str(&iter);
4236 /* If the channel is not found, there is a code flow error */
4237 assert(ua_chan_node);
4238
4239 ua_chan = caa_container_of(ua_chan_node, struct ust_app_channel, node);
4240
4241 ua_event = find_ust_app_event(ua_chan->events, uevent->attr.name,
4242 uevent->filter, uevent->attr.loglevel);
4243 if (ua_event == NULL) {
4244 ret = create_ust_app_event(ua_sess, ua_chan, uevent, app);
4245 if (ret < 0) {
4246 goto end_unlock;
4247 }
4248 } else {
4249 ret = enable_ust_app_event(ua_sess, ua_event, app);
4250 if (ret < 0) {
4251 goto end_unlock;
4252 }
4253 }
4254
4255 end_unlock:
4256 pthread_mutex_unlock(&ua_sess->lock);
4257 end:
4258 rcu_read_unlock();
4259 return ret;
4260 }
4261
4262 /*
4263 * Disable event for a channel from a UST session for a specific PID.
4264 */
4265 int ust_app_disable_event_pid(struct ltt_ust_session *usess,
4266 struct ltt_ust_channel *uchan, struct ltt_ust_event *uevent, pid_t pid)
4267 {
4268 int ret = 0;
4269 struct lttng_ht_iter iter;
4270 struct lttng_ht_node_str *ua_chan_node, *ua_event_node;
4271 struct ust_app *app;
4272 struct ust_app_session *ua_sess;
4273 struct ust_app_channel *ua_chan;
4274 struct ust_app_event *ua_event;
4275
4276 DBG("UST app disabling event %s for PID %d", uevent->attr.name, pid);
4277
4278 rcu_read_lock();
4279
4280 app = ust_app_find_by_pid(pid);
4281 if (app == NULL) {
4282 ERR("UST app disable event per PID %d not found", pid);
4283 ret = -1;
4284 goto error;
4285 }
4286
4287 if (!app->compatible) {
4288 ret = 0;
4289 goto error;
4290 }
4291
4292 ua_sess = lookup_session_by_app(usess, app);
4293 if (!ua_sess) {
4294 /* The application has problem or is probably dead. */
4295 goto error;
4296 }
4297
4298 /* Lookup channel in the ust app session */
4299 lttng_ht_lookup(ua_sess->channels, (void *)uchan->name, &iter);
4300 ua_chan_node = lttng_ht_iter_get_node_str(&iter);
4301 if (ua_chan_node == NULL) {
4302 /* Channel does not exist, skip disabling */
4303 goto error;
4304 }
4305 ua_chan = caa_container_of(ua_chan_node, struct ust_app_channel, node);
4306
4307 lttng_ht_lookup(ua_chan->events, (void *)uevent->attr.name, &iter);
4308 ua_event_node = lttng_ht_iter_get_node_str(&iter);
4309 if (ua_event_node == NULL) {
4310 /* Event does not exist, skip disabling */
4311 goto error;
4312 }
4313 ua_event = caa_container_of(ua_event_node, struct ust_app_event, node);
4314
4315 ret = disable_ust_app_event(ua_sess, ua_event, app);
4316 if (ret < 0) {
4317 goto error;
4318 }
4319
4320 error:
4321 rcu_read_unlock();
4322 return ret;
4323 }
4324
4325 /*
4326 * Calibrate registered applications.
4327 */
4328 int ust_app_calibrate_glb(struct lttng_ust_calibrate *calibrate)
4329 {
4330 int ret = 0;
4331 struct lttng_ht_iter iter;
4332 struct ust_app *app;
4333
4334 rcu_read_lock();
4335
4336 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
4337 if (!app->compatible) {
4338 /*
4339 * TODO: In time, we should notice the caller of this error by
4340 * telling him that this is a version error.
4341 */
4342 continue;
4343 }
4344
4345 health_code_update();
4346
4347 ret = ustctl_calibrate(app->sock, calibrate);
4348 if (ret < 0) {
4349 switch (ret) {
4350 case -ENOSYS:
4351 /* Means that it's not implemented on the tracer side. */
4352 ret = 0;
4353 break;
4354 default:
4355 DBG2("Calibrate app PID %d returned with error %d",
4356 app->pid, ret);
4357 break;
4358 }
4359 }
4360 }
4361
4362 DBG("UST app global domain calibration finished");
4363
4364 rcu_read_unlock();
4365
4366 health_code_update();
4367
4368 return ret;
4369 }
4370
4371 /*
4372 * Receive registration and populate the given msg structure.
4373 *
4374 * On success return 0 else a negative value returned by the ustctl call.
4375 */
4376 int ust_app_recv_registration(int sock, struct ust_register_msg *msg)
4377 {
4378 int ret;
4379 uint32_t pid, ppid, uid, gid;
4380
4381 assert(msg);
4382
4383 ret = ustctl_recv_reg_msg(sock, &msg->type, &msg->major, &msg->minor,
4384 &pid, &ppid, &uid, &gid,
4385 &msg->bits_per_long,
4386 &msg->uint8_t_alignment,
4387 &msg->uint16_t_alignment,
4388 &msg->uint32_t_alignment,
4389 &msg->uint64_t_alignment,
4390 &msg->long_alignment,
4391 &msg->byte_order,
4392 msg->name);
4393 if (ret < 0) {
4394 switch (-ret) {
4395 case EPIPE:
4396 case ECONNRESET:
4397 case LTTNG_UST_ERR_EXITING:
4398 DBG3("UST app recv reg message failed. Application died");
4399 break;
4400 case LTTNG_UST_ERR_UNSUP_MAJOR:
4401 ERR("UST app recv reg unsupported version %d.%d. Supporting %d.%d",
4402 msg->major, msg->minor, LTTNG_UST_ABI_MAJOR_VERSION,
4403 LTTNG_UST_ABI_MINOR_VERSION);
4404 break;
4405 default:
4406 ERR("UST app recv reg message failed with ret %d", ret);
4407 break;
4408 }
4409 goto error;
4410 }
4411 msg->pid = (pid_t) pid;
4412 msg->ppid = (pid_t) ppid;
4413 msg->uid = (uid_t) uid;
4414 msg->gid = (gid_t) gid;
4415
4416 error:
4417 return ret;
4418 }
4419
4420 /*
4421 * Return a ust app channel object using the application object and the channel
4422 * object descriptor has a key. If not found, NULL is returned. A RCU read side
4423 * lock MUST be acquired before calling this function.
4424 */
4425 static struct ust_app_channel *find_channel_by_objd(struct ust_app *app,
4426 int objd)
4427 {
4428 struct lttng_ht_node_ulong *node;
4429 struct lttng_ht_iter iter;
4430 struct ust_app_channel *ua_chan = NULL;
4431
4432 assert(app);
4433
4434 lttng_ht_lookup(app->ust_objd, (void *)((unsigned long) objd), &iter);
4435 node = lttng_ht_iter_get_node_ulong(&iter);
4436 if (node == NULL) {
4437 DBG2("UST app channel find by objd %d not found", objd);
4438 goto error;
4439 }
4440
4441 ua_chan = caa_container_of(node, struct ust_app_channel, ust_objd_node);
4442
4443 error:
4444 return ua_chan;
4445 }
4446
4447 /*
4448 * Reply to a register channel notification from an application on the notify
4449 * socket. The channel metadata is also created.
4450 *
4451 * The session UST registry lock is acquired in this function.
4452 *
4453 * On success 0 is returned else a negative value.
4454 */
4455 static int reply_ust_register_channel(int sock, int sobjd, int cobjd,
4456 size_t nr_fields, struct ustctl_field *fields)
4457 {
4458 int ret, ret_code = 0;
4459 uint32_t chan_id, reg_count;
4460 uint64_t chan_reg_key;
4461 enum ustctl_channel_header type;
4462 struct ust_app *app;
4463 struct ust_app_channel *ua_chan;
4464 struct ust_app_session *ua_sess;
4465 struct ust_registry_session *registry;
4466 struct ust_registry_channel *chan_reg;
4467
4468 rcu_read_lock();
4469
4470 /* Lookup application. If not found, there is a code flow error. */
4471 app = find_app_by_notify_sock(sock);
4472 if (!app) {
4473 DBG("Application socket %d is being teardown. Abort event notify",
4474 sock);
4475 ret = 0;
4476 goto error_rcu_unlock;
4477 }
4478
4479 /* Lookup channel by UST object descriptor. */
4480 ua_chan = find_channel_by_objd(app, cobjd);
4481 if (!ua_chan) {
4482 DBG("Application channel is being teardown. Abort event notify");
4483 ret = 0;
4484 goto error_rcu_unlock;
4485 }
4486
4487 assert(ua_chan->session);
4488 ua_sess = ua_chan->session;
4489
4490 /* Get right session registry depending on the session buffer type. */
4491 registry = get_session_registry(ua_sess);
4492 assert(registry);
4493
4494 /* Depending on the buffer type, a different channel key is used. */
4495 if (ua_sess->buffer_type == LTTNG_BUFFER_PER_UID) {
4496 chan_reg_key = ua_chan->tracing_channel_id;
4497 } else {
4498 chan_reg_key = ua_chan->key;
4499 }
4500
4501 pthread_mutex_lock(&registry->lock);
4502
4503 chan_reg = ust_registry_channel_find(registry, chan_reg_key);
4504 assert(chan_reg);
4505
4506 if (!chan_reg->register_done) {
4507 reg_count = ust_registry_get_event_count(chan_reg);
4508 if (reg_count < 31) {
4509 type = USTCTL_CHANNEL_HEADER_COMPACT;
4510 } else {
4511 type = USTCTL_CHANNEL_HEADER_LARGE;
4512 }
4513
4514 chan_reg->nr_ctx_fields = nr_fields;
4515 chan_reg->ctx_fields = fields;
4516 chan_reg->header_type = type;
4517 } else {
4518 /* Get current already assigned values. */
4519 type = chan_reg->header_type;
4520 }
4521 /* Channel id is set during the object creation. */
4522 chan_id = chan_reg->chan_id;
4523
4524 /* Append to metadata */
4525 if (!chan_reg->metadata_dumped) {
4526 ret_code = ust_metadata_channel_statedump(registry, chan_reg);
4527 if (ret_code) {
4528 ERR("Error appending channel metadata (errno = %d)", ret_code);
4529 goto reply;
4530 }
4531 }
4532
4533 reply:
4534 DBG3("UST app replying to register channel key %" PRIu64
4535 " with id %u, type: %d, ret: %d", chan_reg_key, chan_id, type,
4536 ret_code);
4537
4538 ret = ustctl_reply_register_channel(sock, chan_id, type, ret_code);
4539 if (ret < 0) {
4540 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
4541 ERR("UST app reply channel failed with ret %d", ret);
4542 } else {
4543 DBG3("UST app reply channel failed. Application died");
4544 }
4545 goto error;
4546 }
4547
4548 /* This channel registry registration is completed. */
4549 chan_reg->register_done = 1;
4550
4551 error:
4552 pthread_mutex_unlock(&registry->lock);
4553 error_rcu_unlock:
4554 rcu_read_unlock();
4555 return ret;
4556 }
4557
4558 /*
4559 * Add event to the UST channel registry. When the event is added to the
4560 * registry, the metadata is also created. Once done, this replies to the
4561 * application with the appropriate error code.
4562 *
4563 * The session UST registry lock is acquired in the function.
4564 *
4565 * On success 0 is returned else a negative value.
4566 */
4567 static int add_event_ust_registry(int sock, int sobjd, int cobjd, char *name,
4568 char *sig, size_t nr_fields, struct ustctl_field *fields, int loglevel,
4569 char *model_emf_uri)
4570 {
4571 int ret, ret_code;
4572 uint32_t event_id = 0;
4573 uint64_t chan_reg_key;
4574 struct ust_app *app;
4575 struct ust_app_channel *ua_chan;
4576 struct ust_app_session *ua_sess;
4577 struct ust_registry_session *registry;
4578
4579 rcu_read_lock();
4580
4581 /* Lookup application. If not found, there is a code flow error. */
4582 app = find_app_by_notify_sock(sock);
4583 if (!app) {
4584 DBG("Application socket %d is being teardown. Abort event notify",
4585 sock);
4586 ret = 0;
4587 goto error_rcu_unlock;
4588 }
4589
4590 /* Lookup channel by UST object descriptor. */
4591 ua_chan = find_channel_by_objd(app, cobjd);
4592 if (!ua_chan) {
4593 DBG("Application channel is being teardown. Abort event notify");
4594 ret = 0;
4595 goto error_rcu_unlock;
4596 }
4597
4598 assert(ua_chan->session);
4599 ua_sess = ua_chan->session;
4600
4601 registry = get_session_registry(ua_sess);
4602 assert(registry);
4603
4604 if (ua_sess->buffer_type == LTTNG_BUFFER_PER_UID) {
4605 chan_reg_key = ua_chan->tracing_channel_id;
4606 } else {
4607 chan_reg_key = ua_chan->key;
4608 }
4609
4610 pthread_mutex_lock(&registry->lock);
4611
4612 ret_code = ust_registry_create_event(registry, chan_reg_key,
4613 sobjd, cobjd, name, sig, nr_fields, fields, loglevel,
4614 model_emf_uri, ua_sess->buffer_type, &event_id);
4615
4616 /*
4617 * The return value is returned to ustctl so in case of an error, the
4618 * application can be notified. In case of an error, it's important not to
4619 * return a negative error or else the application will get closed.
4620 */
4621 ret = ustctl_reply_register_event(sock, event_id, ret_code);
4622 if (ret < 0) {
4623 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
4624 ERR("UST app reply event failed with ret %d", ret);
4625 } else {
4626 DBG3("UST app reply event failed. Application died");
4627 }
4628 /*
4629 * No need to wipe the create event since the application socket will
4630 * get close on error hence cleaning up everything by itself.
4631 */
4632 goto error;
4633 }
4634
4635 DBG3("UST registry event %s with id %" PRId32 " added successfully",
4636 name, event_id);
4637
4638 error:
4639 pthread_mutex_unlock(&registry->lock);
4640 error_rcu_unlock:
4641 rcu_read_unlock();
4642 return ret;
4643 }
4644
4645 /*
4646 * Handle application notification through the given notify socket.
4647 *
4648 * Return 0 on success or else a negative value.
4649 */
4650 int ust_app_recv_notify(int sock)
4651 {
4652 int ret;
4653 enum ustctl_notify_cmd cmd;
4654
4655 DBG3("UST app receiving notify from sock %d", sock);
4656
4657 ret = ustctl_recv_notify(sock, &cmd);
4658 if (ret < 0) {
4659 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
4660 ERR("UST app recv notify failed with ret %d", ret);
4661 } else {
4662 DBG3("UST app recv notify failed. Application died");
4663 }
4664 goto error;
4665 }
4666
4667 switch (cmd) {
4668 case USTCTL_NOTIFY_CMD_EVENT:
4669 {
4670 int sobjd, cobjd, loglevel;
4671 char name[LTTNG_UST_SYM_NAME_LEN], *sig, *model_emf_uri;
4672 size_t nr_fields;
4673 struct ustctl_field *fields;
4674
4675 DBG2("UST app ustctl register event received");
4676
4677 ret = ustctl_recv_register_event(sock, &sobjd, &cobjd, name, &loglevel,
4678 &sig, &nr_fields, &fields, &model_emf_uri);
4679 if (ret < 0) {
4680 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
4681 ERR("UST app recv event failed with ret %d", ret);
4682 } else {
4683 DBG3("UST app recv event failed. Application died");
4684 }
4685 goto error;
4686 }
4687
4688 /* Add event to the UST registry coming from the notify socket. */
4689 ret = add_event_ust_registry(sock, sobjd, cobjd, name, sig, nr_fields,
4690 fields, loglevel, model_emf_uri);
4691 if (ret < 0) {
4692 goto error;
4693 }
4694
4695 break;
4696 }
4697 case USTCTL_NOTIFY_CMD_CHANNEL:
4698 {
4699 int sobjd, cobjd;
4700 size_t nr_fields;
4701 struct ustctl_field *fields;
4702
4703 DBG2("UST app ustctl register channel received");
4704
4705 ret = ustctl_recv_register_channel(sock, &sobjd, &cobjd, &nr_fields,
4706 &fields);
4707 if (ret < 0) {
4708 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
4709 ERR("UST app recv channel failed with ret %d", ret);
4710 } else {
4711 DBG3("UST app recv channel failed. Application died");
4712 }
4713 goto error;
4714 }
4715
4716 ret = reply_ust_register_channel(sock, sobjd, cobjd, nr_fields,
4717 fields);
4718 if (ret < 0) {
4719 goto error;
4720 }
4721
4722 break;
4723 }
4724 default:
4725 /* Should NEVER happen. */
4726 assert(0);
4727 }
4728
4729 error:
4730 return ret;
4731 }
4732
4733 /*
4734 * Once the notify socket hangs up, this is called. First, it tries to find the
4735 * corresponding application. On failure, the call_rcu to close the socket is
4736 * executed. If an application is found, it tries to delete it from the notify
4737 * socket hash table. Whathever the result, it proceeds to the call_rcu.
4738 *
4739 * Note that an object needs to be allocated here so on ENOMEM failure, the
4740 * call RCU is not done but the rest of the cleanup is.
4741 */
4742 void ust_app_notify_sock_unregister(int sock)
4743 {
4744 int err_enomem = 0;
4745 struct lttng_ht_iter iter;
4746 struct ust_app *app;
4747 struct ust_app_notify_sock_obj *obj;
4748
4749 assert(sock >= 0);
4750
4751 rcu_read_lock();
4752
4753 obj = zmalloc(sizeof(*obj));
4754 if (!obj) {
4755 /*
4756 * An ENOMEM is kind of uncool. If this strikes we continue the
4757 * procedure but the call_rcu will not be called. In this case, we
4758 * accept the fd leak rather than possibly creating an unsynchronized
4759 * state between threads.
4760 *
4761 * TODO: The notify object should be created once the notify socket is
4762 * registered and stored independantely from the ust app object. The
4763 * tricky part is to synchronize the teardown of the application and
4764 * this notify object. Let's keep that in mind so we can avoid this
4765 * kind of shenanigans with ENOMEM in the teardown path.
4766 */
4767 err_enomem = 1;
4768 } else {
4769 obj->fd = sock;
4770 }
4771
4772 DBG("UST app notify socket unregister %d", sock);
4773
4774 /*
4775 * Lookup application by notify socket. If this fails, this means that the
4776 * hash table delete has already been done by the application
4777 * unregistration process so we can safely close the notify socket in a
4778 * call RCU.
4779 */
4780 app = find_app_by_notify_sock(sock);
4781 if (!app) {
4782 goto close_socket;
4783 }
4784
4785 iter.iter.node = &app->notify_sock_n.node;
4786
4787 /*
4788 * Whatever happens here either we fail or succeed, in both cases we have
4789 * to close the socket after a grace period to continue to the call RCU
4790 * here. If the deletion is successful, the application is not visible
4791 * anymore by other threads and is it fails it means that it was already
4792 * deleted from the hash table so either way we just have to close the
4793 * socket.
4794 */
4795 (void) lttng_ht_del(ust_app_ht_by_notify_sock, &iter);
4796
4797 close_socket:
4798 rcu_read_unlock();
4799
4800 /*
4801 * Close socket after a grace period to avoid for the socket to be reused
4802 * before the application object is freed creating potential race between
4803 * threads trying to add unique in the global hash table.
4804 */
4805 if (!err_enomem) {
4806 call_rcu(&obj->head, close_notify_sock_rcu);
4807 }
4808 }
This page took 0.206479 seconds and 5 git commands to generate.