2 * Copyright (C) 2011 David Goulet <david.goulet@polymtl.ca>
3 * Copyright (C) 2016 Jérémie Galarneau <jeremie.galarneau@efficios.com>
5 * SPDX-License-Identifier: GPL-2.0-only
19 #include <sys/types.h>
21 #include <urcu/compiler.h>
24 #include <common/bytecode/bytecode.h>
25 #include <common/compat/errno.h>
26 #include <common/common.h>
27 #include <common/hashtable/utils.h>
28 #include <lttng/event-rule/event-rule.h>
29 #include <lttng/event-rule/event-rule-internal.h>
30 #include <lttng/event-rule/tracepoint.h>
31 #include <lttng/condition/condition.h>
32 #include <lttng/condition/on-event-internal.h>
33 #include <lttng/condition/on-event.h>
34 #include <lttng/trigger/trigger-internal.h>
35 #include <common/sessiond-comm/sessiond-comm.h>
37 #include "buffer-registry.h"
38 #include "condition-internal.h"
40 #include "health-sessiond.h"
42 #include "ust-consumer.h"
43 #include "lttng-ust-ctl.h"
44 #include "lttng-ust-error.h"
47 #include "lttng-sessiond.h"
48 #include "notification-thread-commands.h"
51 #include "event-notifier-error-accounting.h"
54 struct lttng_ht
*ust_app_ht
;
55 struct lttng_ht
*ust_app_ht_by_sock
;
56 struct lttng_ht
*ust_app_ht_by_notify_sock
;
59 int ust_app_flush_app_session(struct ust_app
*app
, struct ust_app_session
*ua_sess
);
61 /* Next available channel key. Access under next_channel_key_lock. */
62 static uint64_t _next_channel_key
;
63 static pthread_mutex_t next_channel_key_lock
= PTHREAD_MUTEX_INITIALIZER
;
65 /* Next available session ID. Access under next_session_id_lock. */
66 static uint64_t _next_session_id
;
67 static pthread_mutex_t next_session_id_lock
= PTHREAD_MUTEX_INITIALIZER
;
70 * Return the incremented value of next_channel_key.
72 static uint64_t get_next_channel_key(void)
76 pthread_mutex_lock(&next_channel_key_lock
);
77 ret
= ++_next_channel_key
;
78 pthread_mutex_unlock(&next_channel_key_lock
);
83 * Return the atomically incremented value of next_session_id.
85 static uint64_t get_next_session_id(void)
89 pthread_mutex_lock(&next_session_id_lock
);
90 ret
= ++_next_session_id
;
91 pthread_mutex_unlock(&next_session_id_lock
);
95 static void copy_channel_attr_to_ustctl(
96 struct ustctl_consumer_channel_attr
*attr
,
97 struct lttng_ust_abi_channel_attr
*uattr
)
99 /* Copy event attributes since the layout is different. */
100 attr
->subbuf_size
= uattr
->subbuf_size
;
101 attr
->num_subbuf
= uattr
->num_subbuf
;
102 attr
->overwrite
= uattr
->overwrite
;
103 attr
->switch_timer_interval
= uattr
->switch_timer_interval
;
104 attr
->read_timer_interval
= uattr
->read_timer_interval
;
105 attr
->output
= uattr
->output
;
106 attr
->blocking_timeout
= uattr
->u
.s
.blocking_timeout
;
110 * Match function for the hash table lookup.
112 * It matches an ust app event based on three attributes which are the event
113 * name, the filter bytecode and the loglevel.
115 static int ht_match_ust_app_event(struct cds_lfht_node
*node
, const void *_key
)
117 struct ust_app_event
*event
;
118 const struct ust_app_ht_key
*key
;
119 int ev_loglevel_value
;
124 event
= caa_container_of(node
, struct ust_app_event
, node
.node
);
126 ev_loglevel_value
= event
->attr
.loglevel
;
128 /* Match the 4 elements of the key: name, filter, loglevel, exclusions */
131 if (strncmp(event
->attr
.name
, key
->name
, sizeof(event
->attr
.name
)) != 0) {
135 /* Event loglevel. */
136 if (ev_loglevel_value
!= key
->loglevel_type
) {
137 if (event
->attr
.loglevel_type
== LTTNG_UST_ABI_LOGLEVEL_ALL
138 && key
->loglevel_type
== 0 &&
139 ev_loglevel_value
== -1) {
141 * Match is accepted. This is because on event creation, the
142 * loglevel is set to -1 if the event loglevel type is ALL so 0 and
143 * -1 are accepted for this loglevel type since 0 is the one set by
144 * the API when receiving an enable event.
151 /* One of the filters is NULL, fail. */
152 if ((key
->filter
&& !event
->filter
) || (!key
->filter
&& event
->filter
)) {
156 if (key
->filter
&& event
->filter
) {
157 /* Both filters exists, check length followed by the bytecode. */
158 if (event
->filter
->len
!= key
->filter
->len
||
159 memcmp(event
->filter
->data
, key
->filter
->data
,
160 event
->filter
->len
) != 0) {
165 /* One of the exclusions is NULL, fail. */
166 if ((key
->exclusion
&& !event
->exclusion
) || (!key
->exclusion
&& event
->exclusion
)) {
170 if (key
->exclusion
&& event
->exclusion
) {
171 /* Both exclusions exists, check count followed by the names. */
172 if (event
->exclusion
->count
!= key
->exclusion
->count
||
173 memcmp(event
->exclusion
->names
, key
->exclusion
->names
,
174 event
->exclusion
->count
* LTTNG_UST_ABI_SYM_NAME_LEN
) != 0) {
188 * Unique add of an ust app event in the given ht. This uses the custom
189 * ht_match_ust_app_event match function and the event name as hash.
191 static void add_unique_ust_app_event(struct ust_app_channel
*ua_chan
,
192 struct ust_app_event
*event
)
194 struct cds_lfht_node
*node_ptr
;
195 struct ust_app_ht_key key
;
199 assert(ua_chan
->events
);
202 ht
= ua_chan
->events
;
203 key
.name
= event
->attr
.name
;
204 key
.filter
= event
->filter
;
205 key
.loglevel_type
= event
->attr
.loglevel
;
206 key
.exclusion
= event
->exclusion
;
208 node_ptr
= cds_lfht_add_unique(ht
->ht
,
209 ht
->hash_fct(event
->node
.key
, lttng_ht_seed
),
210 ht_match_ust_app_event
, &key
, &event
->node
.node
);
211 assert(node_ptr
== &event
->node
.node
);
215 * Close the notify socket from the given RCU head object. This MUST be called
216 * through a call_rcu().
218 static void close_notify_sock_rcu(struct rcu_head
*head
)
221 struct ust_app_notify_sock_obj
*obj
=
222 caa_container_of(head
, struct ust_app_notify_sock_obj
, head
);
224 /* Must have a valid fd here. */
225 assert(obj
->fd
>= 0);
227 ret
= close(obj
->fd
);
229 ERR("close notify sock %d RCU", obj
->fd
);
231 lttng_fd_put(LTTNG_FD_APPS
, 1);
237 * Return the session registry according to the buffer type of the given
240 * A registry per UID object MUST exists before calling this function or else
241 * it assert() if not found. RCU read side lock must be acquired.
243 static struct ust_registry_session
*get_session_registry(
244 struct ust_app_session
*ua_sess
)
246 struct ust_registry_session
*registry
= NULL
;
250 switch (ua_sess
->buffer_type
) {
251 case LTTNG_BUFFER_PER_PID
:
253 struct buffer_reg_pid
*reg_pid
= buffer_reg_pid_find(ua_sess
->id
);
257 registry
= reg_pid
->registry
->reg
.ust
;
260 case LTTNG_BUFFER_PER_UID
:
262 struct buffer_reg_uid
*reg_uid
= buffer_reg_uid_find(
263 ua_sess
->tracing_id
, ua_sess
->bits_per_long
,
264 lttng_credentials_get_uid(&ua_sess
->real_credentials
));
268 registry
= reg_uid
->registry
->reg
.ust
;
280 * Delete ust context safely. RCU read lock must be held before calling
284 void delete_ust_app_ctx(int sock
, struct ust_app_ctx
*ua_ctx
,
292 pthread_mutex_lock(&app
->sock_lock
);
293 ret
= ustctl_release_object(sock
, ua_ctx
->obj
);
294 pthread_mutex_unlock(&app
->sock_lock
);
295 if (ret
< 0 && ret
!= -EPIPE
&& ret
!= -LTTNG_UST_ERR_EXITING
) {
296 ERR("UST app sock %d release ctx obj handle %d failed with ret %d",
297 sock
, ua_ctx
->obj
->handle
, ret
);
305 * Delete ust app event safely. RCU read lock must be held before calling
309 void delete_ust_app_event(int sock
, struct ust_app_event
*ua_event
,
316 free(ua_event
->filter
);
317 if (ua_event
->exclusion
!= NULL
)
318 free(ua_event
->exclusion
);
319 if (ua_event
->obj
!= NULL
) {
320 pthread_mutex_lock(&app
->sock_lock
);
321 ret
= ustctl_release_object(sock
, ua_event
->obj
);
322 pthread_mutex_unlock(&app
->sock_lock
);
323 if (ret
< 0 && ret
!= -EPIPE
&& ret
!= -LTTNG_UST_ERR_EXITING
) {
324 ERR("UST app sock %d release event obj failed with ret %d",
333 * Delayed reclaim of a ust_app_event_notifier_rule object. This MUST be called
334 * through a call_rcu().
337 void free_ust_app_event_notifier_rule_rcu(struct rcu_head
*head
)
339 struct ust_app_event_notifier_rule
*obj
= caa_container_of(
340 head
, struct ust_app_event_notifier_rule
, rcu_head
);
346 * Delete ust app event notifier rule safely.
348 static void delete_ust_app_event_notifier_rule(int sock
,
349 struct ust_app_event_notifier_rule
*ua_event_notifier_rule
,
354 assert(ua_event_notifier_rule
);
356 if (ua_event_notifier_rule
->exclusion
!= NULL
) {
357 free(ua_event_notifier_rule
->exclusion
);
360 if (ua_event_notifier_rule
->obj
!= NULL
) {
361 pthread_mutex_lock(&app
->sock_lock
);
362 ret
= ustctl_release_object(sock
, ua_event_notifier_rule
->obj
);
363 pthread_mutex_unlock(&app
->sock_lock
);
364 if (ret
< 0 && ret
!= -EPIPE
&& ret
!= -LTTNG_UST_ERR_EXITING
) {
365 ERR("Failed to release event notifier object: app = '%s' (ppid %d), ret = %d",
366 app
->name
, (int) app
->ppid
, ret
);
369 free(ua_event_notifier_rule
->obj
);
372 lttng_trigger_put(ua_event_notifier_rule
->trigger
);
373 call_rcu(&ua_event_notifier_rule
->rcu_head
,
374 free_ust_app_event_notifier_rule_rcu
);
378 * Release ust data object of the given stream.
380 * Return 0 on success or else a negative value.
382 static int release_ust_app_stream(int sock
, struct ust_app_stream
*stream
,
390 pthread_mutex_lock(&app
->sock_lock
);
391 ret
= ustctl_release_object(sock
, stream
->obj
);
392 pthread_mutex_unlock(&app
->sock_lock
);
393 if (ret
< 0 && ret
!= -EPIPE
&& ret
!= -LTTNG_UST_ERR_EXITING
) {
394 ERR("UST app sock %d release stream obj failed with ret %d",
397 lttng_fd_put(LTTNG_FD_APPS
, 2);
405 * Delete ust app stream safely. RCU read lock must be held before calling
409 void delete_ust_app_stream(int sock
, struct ust_app_stream
*stream
,
414 (void) release_ust_app_stream(sock
, stream
, app
);
419 * We need to execute ht_destroy outside of RCU read-side critical
420 * section and outside of call_rcu thread, so we postpone its execution
421 * using ht_cleanup_push. It is simpler than to change the semantic of
422 * the many callers of delete_ust_app_session().
425 void delete_ust_app_channel_rcu(struct rcu_head
*head
)
427 struct ust_app_channel
*ua_chan
=
428 caa_container_of(head
, struct ust_app_channel
, rcu_head
);
430 ht_cleanup_push(ua_chan
->ctx
);
431 ht_cleanup_push(ua_chan
->events
);
436 * Extract the lost packet or discarded events counter when the channel is
437 * being deleted and store the value in the parent channel so we can
438 * access it from lttng list and at stop/destroy.
440 * The session list lock must be held by the caller.
443 void save_per_pid_lost_discarded_counters(struct ust_app_channel
*ua_chan
)
445 uint64_t discarded
= 0, lost
= 0;
446 struct ltt_session
*session
;
447 struct ltt_ust_channel
*uchan
;
449 if (ua_chan
->attr
.type
!= LTTNG_UST_ABI_CHAN_PER_CPU
) {
454 session
= session_find_by_id(ua_chan
->session
->tracing_id
);
455 if (!session
|| !session
->ust_session
) {
457 * Not finding the session is not an error because there are
458 * multiple ways the channels can be torn down.
460 * 1) The session daemon can initiate the destruction of the
461 * ust app session after receiving a destroy command or
462 * during its shutdown/teardown.
463 * 2) The application, since we are in per-pid tracing, is
464 * unregistering and tearing down its ust app session.
466 * Both paths are protected by the session list lock which
467 * ensures that the accounting of lost packets and discarded
468 * events is done exactly once. The session is then unpublished
469 * from the session list, resulting in this condition.
474 if (ua_chan
->attr
.overwrite
) {
475 consumer_get_lost_packets(ua_chan
->session
->tracing_id
,
476 ua_chan
->key
, session
->ust_session
->consumer
,
479 consumer_get_discarded_events(ua_chan
->session
->tracing_id
,
480 ua_chan
->key
, session
->ust_session
->consumer
,
483 uchan
= trace_ust_find_channel_by_name(
484 session
->ust_session
->domain_global
.channels
,
487 ERR("Missing UST channel to store discarded counters");
491 uchan
->per_pid_closed_app_discarded
+= discarded
;
492 uchan
->per_pid_closed_app_lost
+= lost
;
497 session_put(session
);
502 * Delete ust app channel safely. RCU read lock must be held before calling
505 * The session list lock must be held by the caller.
508 void delete_ust_app_channel(int sock
, struct ust_app_channel
*ua_chan
,
512 struct lttng_ht_iter iter
;
513 struct ust_app_event
*ua_event
;
514 struct ust_app_ctx
*ua_ctx
;
515 struct ust_app_stream
*stream
, *stmp
;
516 struct ust_registry_session
*registry
;
520 DBG3("UST app deleting channel %s", ua_chan
->name
);
523 cds_list_for_each_entry_safe(stream
, stmp
, &ua_chan
->streams
.head
, list
) {
524 cds_list_del(&stream
->list
);
525 delete_ust_app_stream(sock
, stream
, app
);
529 cds_lfht_for_each_entry(ua_chan
->ctx
->ht
, &iter
.iter
, ua_ctx
, node
.node
) {
530 cds_list_del(&ua_ctx
->list
);
531 ret
= lttng_ht_del(ua_chan
->ctx
, &iter
);
533 delete_ust_app_ctx(sock
, ua_ctx
, app
);
537 cds_lfht_for_each_entry(ua_chan
->events
->ht
, &iter
.iter
, ua_event
,
539 ret
= lttng_ht_del(ua_chan
->events
, &iter
);
541 delete_ust_app_event(sock
, ua_event
, app
);
544 if (ua_chan
->session
->buffer_type
== LTTNG_BUFFER_PER_PID
) {
545 /* Wipe and free registry from session registry. */
546 registry
= get_session_registry(ua_chan
->session
);
548 ust_registry_channel_del_free(registry
, ua_chan
->key
,
552 * A negative socket can be used by the caller when
553 * cleaning-up a ua_chan in an error path. Skip the
554 * accounting in this case.
557 save_per_pid_lost_discarded_counters(ua_chan
);
561 if (ua_chan
->obj
!= NULL
) {
562 /* Remove channel from application UST object descriptor. */
563 iter
.iter
.node
= &ua_chan
->ust_objd_node
.node
;
564 ret
= lttng_ht_del(app
->ust_objd
, &iter
);
566 pthread_mutex_lock(&app
->sock_lock
);
567 ret
= ustctl_release_object(sock
, ua_chan
->obj
);
568 pthread_mutex_unlock(&app
->sock_lock
);
569 if (ret
< 0 && ret
!= -EPIPE
&& ret
!= -LTTNG_UST_ERR_EXITING
) {
570 ERR("UST app sock %d release channel obj failed with ret %d",
573 lttng_fd_put(LTTNG_FD_APPS
, 1);
576 call_rcu(&ua_chan
->rcu_head
, delete_ust_app_channel_rcu
);
579 int ust_app_register_done(struct ust_app
*app
)
583 pthread_mutex_lock(&app
->sock_lock
);
584 ret
= ustctl_register_done(app
->sock
);
585 pthread_mutex_unlock(&app
->sock_lock
);
589 int ust_app_release_object(struct ust_app
*app
, struct lttng_ust_abi_object_data
*data
)
594 pthread_mutex_lock(&app
->sock_lock
);
599 ret
= ustctl_release_object(sock
, data
);
601 pthread_mutex_unlock(&app
->sock_lock
);
607 * Push metadata to consumer socket.
609 * RCU read-side lock must be held to guarantee existance of socket.
610 * Must be called with the ust app session lock held.
611 * Must be called with the registry lock held.
613 * On success, return the len of metadata pushed or else a negative value.
614 * Returning a -EPIPE return value means we could not send the metadata,
615 * but it can be caused by recoverable errors (e.g. the application has
616 * terminated concurrently).
618 ssize_t
ust_app_push_metadata(struct ust_registry_session
*registry
,
619 struct consumer_socket
*socket
, int send_zero_data
)
622 char *metadata_str
= NULL
;
623 size_t len
, offset
, new_metadata_len_sent
;
625 uint64_t metadata_key
, metadata_version
;
630 metadata_key
= registry
->metadata_key
;
633 * Means that no metadata was assigned to the session. This can
634 * happens if no start has been done previously.
640 offset
= registry
->metadata_len_sent
;
641 len
= registry
->metadata_len
- registry
->metadata_len_sent
;
642 new_metadata_len_sent
= registry
->metadata_len
;
643 metadata_version
= registry
->metadata_version
;
645 DBG3("No metadata to push for metadata key %" PRIu64
,
646 registry
->metadata_key
);
648 if (send_zero_data
) {
649 DBG("No metadata to push");
655 /* Allocate only what we have to send. */
656 metadata_str
= zmalloc(len
);
658 PERROR("zmalloc ust app metadata string");
662 /* Copy what we haven't sent out. */
663 memcpy(metadata_str
, registry
->metadata
+ offset
, len
);
666 pthread_mutex_unlock(®istry
->lock
);
668 * We need to unlock the registry while we push metadata to
669 * break a circular dependency between the consumerd metadata
670 * lock and the sessiond registry lock. Indeed, pushing metadata
671 * to the consumerd awaits that it gets pushed all the way to
672 * relayd, but doing so requires grabbing the metadata lock. If
673 * a concurrent metadata request is being performed by
674 * consumerd, this can try to grab the registry lock on the
675 * sessiond while holding the metadata lock on the consumer
676 * daemon. Those push and pull schemes are performed on two
677 * different bidirectionnal communication sockets.
679 ret
= consumer_push_metadata(socket
, metadata_key
,
680 metadata_str
, len
, offset
, metadata_version
);
681 pthread_mutex_lock(®istry
->lock
);
684 * There is an acceptable race here between the registry
685 * metadata key assignment and the creation on the
686 * consumer. The session daemon can concurrently push
687 * metadata for this registry while being created on the
688 * consumer since the metadata key of the registry is
689 * assigned *before* it is setup to avoid the consumer
690 * to ask for metadata that could possibly be not found
691 * in the session daemon.
693 * The metadata will get pushed either by the session
694 * being stopped or the consumer requesting metadata if
695 * that race is triggered.
697 if (ret
== -LTTCOMM_CONSUMERD_CHANNEL_FAIL
) {
700 ERR("Error pushing metadata to consumer");
706 * Metadata may have been concurrently pushed, since
707 * we're not holding the registry lock while pushing to
708 * consumer. This is handled by the fact that we send
709 * the metadata content, size, and the offset at which
710 * that metadata belongs. This may arrive out of order
711 * on the consumer side, and the consumer is able to
712 * deal with overlapping fragments. The consumer
713 * supports overlapping fragments, which must be
714 * contiguous starting from offset 0. We keep the
715 * largest metadata_len_sent value of the concurrent
718 registry
->metadata_len_sent
=
719 max_t(size_t, registry
->metadata_len_sent
,
720 new_metadata_len_sent
);
729 * On error, flag the registry that the metadata is
730 * closed. We were unable to push anything and this
731 * means that either the consumer is not responding or
732 * the metadata cache has been destroyed on the
735 registry
->metadata_closed
= 1;
743 * For a given application and session, push metadata to consumer.
744 * Either sock or consumer is required : if sock is NULL, the default
745 * socket to send the metadata is retrieved from consumer, if sock
746 * is not NULL we use it to send the metadata.
747 * RCU read-side lock must be held while calling this function,
748 * therefore ensuring existance of registry. It also ensures existance
749 * of socket throughout this function.
751 * Return 0 on success else a negative error.
752 * Returning a -EPIPE return value means we could not send the metadata,
753 * but it can be caused by recoverable errors (e.g. the application has
754 * terminated concurrently).
756 static int push_metadata(struct ust_registry_session
*registry
,
757 struct consumer_output
*consumer
)
761 struct consumer_socket
*socket
;
766 pthread_mutex_lock(®istry
->lock
);
767 if (registry
->metadata_closed
) {
772 /* Get consumer socket to use to push the metadata.*/
773 socket
= consumer_find_socket_by_bitness(registry
->bits_per_long
,
780 ret
= ust_app_push_metadata(registry
, socket
, 0);
785 pthread_mutex_unlock(®istry
->lock
);
789 pthread_mutex_unlock(®istry
->lock
);
794 * Send to the consumer a close metadata command for the given session. Once
795 * done, the metadata channel is deleted and the session metadata pointer is
796 * nullified. The session lock MUST be held unless the application is
797 * in the destroy path.
799 * Do not hold the registry lock while communicating with the consumerd, because
800 * doing so causes inter-process deadlocks between consumerd and sessiond with
801 * the metadata request notification.
803 * Return 0 on success else a negative value.
805 static int close_metadata(struct ust_registry_session
*registry
,
806 struct consumer_output
*consumer
)
809 struct consumer_socket
*socket
;
810 uint64_t metadata_key
;
811 bool registry_was_already_closed
;
818 pthread_mutex_lock(®istry
->lock
);
819 metadata_key
= registry
->metadata_key
;
820 registry_was_already_closed
= registry
->metadata_closed
;
821 if (metadata_key
!= 0) {
823 * Metadata closed. Even on error this means that the consumer
824 * is not responding or not found so either way a second close
825 * should NOT be emit for this registry.
827 registry
->metadata_closed
= 1;
829 pthread_mutex_unlock(®istry
->lock
);
831 if (metadata_key
== 0 || registry_was_already_closed
) {
836 /* Get consumer socket to use to push the metadata.*/
837 socket
= consumer_find_socket_by_bitness(registry
->bits_per_long
,
844 ret
= consumer_close_metadata(socket
, metadata_key
);
855 * We need to execute ht_destroy outside of RCU read-side critical
856 * section and outside of call_rcu thread, so we postpone its execution
857 * using ht_cleanup_push. It is simpler than to change the semantic of
858 * the many callers of delete_ust_app_session().
861 void delete_ust_app_session_rcu(struct rcu_head
*head
)
863 struct ust_app_session
*ua_sess
=
864 caa_container_of(head
, struct ust_app_session
, rcu_head
);
866 ht_cleanup_push(ua_sess
->channels
);
871 * Delete ust app session safely. RCU read lock must be held before calling
874 * The session list lock must be held by the caller.
877 void delete_ust_app_session(int sock
, struct ust_app_session
*ua_sess
,
881 struct lttng_ht_iter iter
;
882 struct ust_app_channel
*ua_chan
;
883 struct ust_registry_session
*registry
;
887 pthread_mutex_lock(&ua_sess
->lock
);
889 assert(!ua_sess
->deleted
);
890 ua_sess
->deleted
= true;
892 registry
= get_session_registry(ua_sess
);
893 /* Registry can be null on error path during initialization. */
895 /* Push metadata for application before freeing the application. */
896 (void) push_metadata(registry
, ua_sess
->consumer
);
899 * Don't ask to close metadata for global per UID buffers. Close
900 * metadata only on destroy trace session in this case. Also, the
901 * previous push metadata could have flag the metadata registry to
902 * close so don't send a close command if closed.
904 if (ua_sess
->buffer_type
!= LTTNG_BUFFER_PER_UID
) {
905 /* And ask to close it for this session registry. */
906 (void) close_metadata(registry
, ua_sess
->consumer
);
910 cds_lfht_for_each_entry(ua_sess
->channels
->ht
, &iter
.iter
, ua_chan
,
912 ret
= lttng_ht_del(ua_sess
->channels
, &iter
);
914 delete_ust_app_channel(sock
, ua_chan
, app
);
917 /* In case of per PID, the registry is kept in the session. */
918 if (ua_sess
->buffer_type
== LTTNG_BUFFER_PER_PID
) {
919 struct buffer_reg_pid
*reg_pid
= buffer_reg_pid_find(ua_sess
->id
);
922 * Registry can be null on error path during
925 buffer_reg_pid_remove(reg_pid
);
926 buffer_reg_pid_destroy(reg_pid
);
930 if (ua_sess
->handle
!= -1) {
931 pthread_mutex_lock(&app
->sock_lock
);
932 ret
= ustctl_release_handle(sock
, ua_sess
->handle
);
933 pthread_mutex_unlock(&app
->sock_lock
);
934 if (ret
< 0 && ret
!= -EPIPE
&& ret
!= -LTTNG_UST_ERR_EXITING
) {
935 ERR("UST app sock %d release session handle failed with ret %d",
938 /* Remove session from application UST object descriptor. */
939 iter
.iter
.node
= &ua_sess
->ust_objd_node
.node
;
940 ret
= lttng_ht_del(app
->ust_sessions_objd
, &iter
);
944 pthread_mutex_unlock(&ua_sess
->lock
);
946 consumer_output_put(ua_sess
->consumer
);
948 call_rcu(&ua_sess
->rcu_head
, delete_ust_app_session_rcu
);
952 * Delete a traceable application structure from the global list. Never call
953 * this function outside of a call_rcu call.
955 * RCU read side lock should _NOT_ be held when calling this function.
958 void delete_ust_app(struct ust_app
*app
)
961 struct ust_app_session
*ua_sess
, *tmp_ua_sess
;
962 struct lttng_ht_iter iter
;
963 struct ust_app_event_notifier_rule
*event_notifier_rule
;
964 bool event_notifier_write_fd_is_open
;
967 * The session list lock must be held during this function to guarantee
968 * the existence of ua_sess.
971 /* Delete ust app sessions info */
976 cds_list_for_each_entry_safe(ua_sess
, tmp_ua_sess
, &app
->teardown_head
,
978 /* Free every object in the session and the session. */
980 delete_ust_app_session(sock
, ua_sess
, app
);
984 /* Remove the event notifier rules associated with this app. */
986 cds_lfht_for_each_entry (app
->token_to_event_notifier_rule_ht
->ht
,
987 &iter
.iter
, event_notifier_rule
, node
.node
) {
988 ret
= lttng_ht_del(app
->token_to_event_notifier_rule_ht
, &iter
);
991 delete_ust_app_event_notifier_rule(
992 app
->sock
, event_notifier_rule
, app
);
997 ht_cleanup_push(app
->sessions
);
998 ht_cleanup_push(app
->ust_sessions_objd
);
999 ht_cleanup_push(app
->ust_objd
);
1000 ht_cleanup_push(app
->token_to_event_notifier_rule_ht
);
1003 * This could be NULL if the event notifier setup failed (e.g the app
1004 * was killed or the tracer does not support this feature).
1006 if (app
->event_notifier_group
.object
) {
1007 enum lttng_error_code ret_code
;
1008 enum event_notifier_error_accounting_status status
;
1010 const int event_notifier_read_fd
= lttng_pipe_get_readfd(
1011 app
->event_notifier_group
.event_pipe
);
1013 ret_code
= notification_thread_command_remove_tracer_event_source(
1014 the_notification_thread_handle
,
1015 event_notifier_read_fd
);
1016 if (ret_code
!= LTTNG_OK
) {
1017 ERR("Failed to remove application tracer event source from notification thread");
1020 status
= event_notifier_error_accounting_unregister_app(app
);
1021 if (status
!= EVENT_NOTIFIER_ERROR_ACCOUNTING_STATUS_OK
) {
1022 ERR("Error unregistering app from event notifier error accounting");
1025 ustctl_release_object(sock
, app
->event_notifier_group
.object
);
1026 free(app
->event_notifier_group
.object
);
1029 event_notifier_write_fd_is_open
= lttng_pipe_is_write_open(
1030 app
->event_notifier_group
.event_pipe
);
1031 lttng_pipe_destroy(app
->event_notifier_group
.event_pipe
);
1033 * Release the file descriptors reserved for the event notifier pipe.
1034 * The app could be destroyed before the write end of the pipe could be
1035 * passed to the application (and closed). In that case, both file
1036 * descriptors must be released.
1038 lttng_fd_put(LTTNG_FD_APPS
, event_notifier_write_fd_is_open
? 2 : 1);
1041 * Wait until we have deleted the application from the sock hash table
1042 * before closing this socket, otherwise an application could re-use the
1043 * socket ID and race with the teardown, using the same hash table entry.
1045 * It's OK to leave the close in call_rcu. We want it to stay unique for
1046 * all RCU readers that could run concurrently with unregister app,
1047 * therefore we _need_ to only close that socket after a grace period. So
1048 * it should stay in this RCU callback.
1050 * This close() is a very important step of the synchronization model so
1051 * every modification to this function must be carefully reviewed.
1057 lttng_fd_put(LTTNG_FD_APPS
, 1);
1059 DBG2("UST app pid %d deleted", app
->pid
);
1061 session_unlock_list();
1065 * URCU intermediate call to delete an UST app.
1068 void delete_ust_app_rcu(struct rcu_head
*head
)
1070 struct lttng_ht_node_ulong
*node
=
1071 caa_container_of(head
, struct lttng_ht_node_ulong
, head
);
1072 struct ust_app
*app
=
1073 caa_container_of(node
, struct ust_app
, pid_n
);
1075 DBG3("Call RCU deleting app PID %d", app
->pid
);
1076 delete_ust_app(app
);
1080 * Delete the session from the application ht and delete the data structure by
1081 * freeing every object inside and releasing them.
1083 * The session list lock must be held by the caller.
1085 static void destroy_app_session(struct ust_app
*app
,
1086 struct ust_app_session
*ua_sess
)
1089 struct lttng_ht_iter iter
;
1094 iter
.iter
.node
= &ua_sess
->node
.node
;
1095 ret
= lttng_ht_del(app
->sessions
, &iter
);
1097 /* Already scheduled for teardown. */
1101 /* Once deleted, free the data structure. */
1102 delete_ust_app_session(app
->sock
, ua_sess
, app
);
1109 * Alloc new UST app session.
1112 struct ust_app_session
*alloc_ust_app_session(void)
1114 struct ust_app_session
*ua_sess
;
1116 /* Init most of the default value by allocating and zeroing */
1117 ua_sess
= zmalloc(sizeof(struct ust_app_session
));
1118 if (ua_sess
== NULL
) {
1123 ua_sess
->handle
= -1;
1124 ua_sess
->channels
= lttng_ht_new(0, LTTNG_HT_TYPE_STRING
);
1125 ua_sess
->metadata_attr
.type
= LTTNG_UST_ABI_CHAN_METADATA
;
1126 pthread_mutex_init(&ua_sess
->lock
, NULL
);
1135 * Alloc new UST app channel.
1138 struct ust_app_channel
*alloc_ust_app_channel(const char *name
,
1139 struct ust_app_session
*ua_sess
,
1140 struct lttng_ust_abi_channel_attr
*attr
)
1142 struct ust_app_channel
*ua_chan
;
1144 /* Init most of the default value by allocating and zeroing */
1145 ua_chan
= zmalloc(sizeof(struct ust_app_channel
));
1146 if (ua_chan
== NULL
) {
1151 /* Setup channel name */
1152 strncpy(ua_chan
->name
, name
, sizeof(ua_chan
->name
));
1153 ua_chan
->name
[sizeof(ua_chan
->name
) - 1] = '\0';
1155 ua_chan
->enabled
= 1;
1156 ua_chan
->handle
= -1;
1157 ua_chan
->session
= ua_sess
;
1158 ua_chan
->key
= get_next_channel_key();
1159 ua_chan
->ctx
= lttng_ht_new(0, LTTNG_HT_TYPE_ULONG
);
1160 ua_chan
->events
= lttng_ht_new(0, LTTNG_HT_TYPE_STRING
);
1161 lttng_ht_node_init_str(&ua_chan
->node
, ua_chan
->name
);
1163 CDS_INIT_LIST_HEAD(&ua_chan
->streams
.head
);
1164 CDS_INIT_LIST_HEAD(&ua_chan
->ctx_list
);
1166 /* Copy attributes */
1168 /* Translate from lttng_ust_channel to ustctl_consumer_channel_attr. */
1169 ua_chan
->attr
.subbuf_size
= attr
->subbuf_size
;
1170 ua_chan
->attr
.num_subbuf
= attr
->num_subbuf
;
1171 ua_chan
->attr
.overwrite
= attr
->overwrite
;
1172 ua_chan
->attr
.switch_timer_interval
= attr
->switch_timer_interval
;
1173 ua_chan
->attr
.read_timer_interval
= attr
->read_timer_interval
;
1174 ua_chan
->attr
.output
= attr
->output
;
1175 ua_chan
->attr
.blocking_timeout
= attr
->u
.s
.blocking_timeout
;
1177 /* By default, the channel is a per cpu channel. */
1178 ua_chan
->attr
.type
= LTTNG_UST_ABI_CHAN_PER_CPU
;
1180 DBG3("UST app channel %s allocated", ua_chan
->name
);
1189 * Allocate and initialize a UST app stream.
1191 * Return newly allocated stream pointer or NULL on error.
1193 struct ust_app_stream
*ust_app_alloc_stream(void)
1195 struct ust_app_stream
*stream
= NULL
;
1197 stream
= zmalloc(sizeof(*stream
));
1198 if (stream
== NULL
) {
1199 PERROR("zmalloc ust app stream");
1203 /* Zero could be a valid value for a handle so flag it to -1. */
1204 stream
->handle
= -1;
1211 * Alloc new UST app event.
1214 struct ust_app_event
*alloc_ust_app_event(char *name
,
1215 struct lttng_ust_abi_event
*attr
)
1217 struct ust_app_event
*ua_event
;
1219 /* Init most of the default value by allocating and zeroing */
1220 ua_event
= zmalloc(sizeof(struct ust_app_event
));
1221 if (ua_event
== NULL
) {
1222 PERROR("Failed to allocate ust_app_event structure");
1226 ua_event
->enabled
= 1;
1227 strncpy(ua_event
->name
, name
, sizeof(ua_event
->name
));
1228 ua_event
->name
[sizeof(ua_event
->name
) - 1] = '\0';
1229 lttng_ht_node_init_str(&ua_event
->node
, ua_event
->name
);
1231 /* Copy attributes */
1233 memcpy(&ua_event
->attr
, attr
, sizeof(ua_event
->attr
));
1236 DBG3("UST app event %s allocated", ua_event
->name
);
1245 * Allocate a new UST app event notifier rule.
1247 static struct ust_app_event_notifier_rule
*alloc_ust_app_event_notifier_rule(
1248 struct lttng_trigger
*trigger
)
1250 enum lttng_event_rule_generate_exclusions_status
1251 generate_exclusion_status
;
1252 struct ust_app_event_notifier_rule
*ua_event_notifier_rule
;
1253 struct lttng_condition
*condition
= NULL
;
1254 const struct lttng_event_rule
*event_rule
= NULL
;
1256 ua_event_notifier_rule
= zmalloc(sizeof(struct ust_app_event_notifier_rule
));
1257 if (ua_event_notifier_rule
== NULL
) {
1258 PERROR("Failed to allocate ust_app_event_notifier_rule structure");
1262 ua_event_notifier_rule
->enabled
= 1;
1263 ua_event_notifier_rule
->token
= lttng_trigger_get_tracer_token(trigger
);
1264 lttng_ht_node_init_u64(&ua_event_notifier_rule
->node
,
1265 ua_event_notifier_rule
->token
);
1267 condition
= lttng_trigger_get_condition(trigger
);
1269 assert(lttng_condition_get_type(condition
) == LTTNG_CONDITION_TYPE_ON_EVENT
);
1271 assert(LTTNG_CONDITION_STATUS_OK
== lttng_condition_on_event_get_rule(condition
, &event_rule
));
1274 ua_event_notifier_rule
->error_counter_index
=
1275 lttng_condition_on_event_get_error_counter_index(condition
);
1276 /* Acquire the event notifier's reference to the trigger. */
1277 lttng_trigger_get(trigger
);
1279 ua_event_notifier_rule
->trigger
= trigger
;
1280 ua_event_notifier_rule
->filter
= lttng_event_rule_get_filter_bytecode(event_rule
);
1281 generate_exclusion_status
= lttng_event_rule_generate_exclusions(
1282 event_rule
, &ua_event_notifier_rule
->exclusion
);
1283 switch (generate_exclusion_status
) {
1284 case LTTNG_EVENT_RULE_GENERATE_EXCLUSIONS_STATUS_OK
:
1285 case LTTNG_EVENT_RULE_GENERATE_EXCLUSIONS_STATUS_NONE
:
1288 /* Error occured. */
1289 ERR("Failed to generate exclusions from trigger while allocating an event notifier rule");
1290 goto error_put_trigger
;
1293 DBG3("UST app event notifier rule allocated: token = %" PRIu64
,
1294 ua_event_notifier_rule
->token
);
1296 return ua_event_notifier_rule
;
1299 lttng_trigger_put(trigger
);
1301 free(ua_event_notifier_rule
);
1306 * Alloc new UST app context.
1309 struct ust_app_ctx
*alloc_ust_app_ctx(struct lttng_ust_context_attr
*uctx
)
1311 struct ust_app_ctx
*ua_ctx
;
1313 ua_ctx
= zmalloc(sizeof(struct ust_app_ctx
));
1314 if (ua_ctx
== NULL
) {
1318 CDS_INIT_LIST_HEAD(&ua_ctx
->list
);
1321 memcpy(&ua_ctx
->ctx
, uctx
, sizeof(ua_ctx
->ctx
));
1322 if (uctx
->ctx
== LTTNG_UST_ABI_CONTEXT_APP_CONTEXT
) {
1323 char *provider_name
= NULL
, *ctx_name
= NULL
;
1325 provider_name
= strdup(uctx
->u
.app_ctx
.provider_name
);
1326 ctx_name
= strdup(uctx
->u
.app_ctx
.ctx_name
);
1327 if (!provider_name
|| !ctx_name
) {
1328 free(provider_name
);
1333 ua_ctx
->ctx
.u
.app_ctx
.provider_name
= provider_name
;
1334 ua_ctx
->ctx
.u
.app_ctx
.ctx_name
= ctx_name
;
1338 DBG3("UST app context %d allocated", ua_ctx
->ctx
.ctx
);
1346 * Create a liblttng-ust filter bytecode from given bytecode.
1348 * Return allocated filter or NULL on error.
1350 static struct lttng_ust_abi_filter_bytecode
*create_ust_filter_bytecode_from_bytecode(
1351 const struct lttng_bytecode
*orig_f
)
1353 struct lttng_ust_abi_filter_bytecode
*filter
= NULL
;
1355 /* Copy filter bytecode. */
1356 filter
= zmalloc(sizeof(*filter
) + orig_f
->len
);
1358 PERROR("Failed to allocate lttng_ust_filter_bytecode: bytecode len = %" PRIu32
" bytes", orig_f
->len
);
1362 assert(sizeof(struct lttng_bytecode
) ==
1363 sizeof(struct lttng_ust_abi_filter_bytecode
));
1364 memcpy(filter
, orig_f
, sizeof(*filter
) + orig_f
->len
);
1370 * Create a liblttng-ust capture bytecode from given bytecode.
1372 * Return allocated filter or NULL on error.
1374 static struct lttng_ust_abi_capture_bytecode
*
1375 create_ust_capture_bytecode_from_bytecode(const struct lttng_bytecode
*orig_f
)
1377 struct lttng_ust_abi_capture_bytecode
*capture
= NULL
;
1379 /* Copy capture bytecode. */
1380 capture
= zmalloc(sizeof(*capture
) + orig_f
->len
);
1382 PERROR("Failed to allocate lttng_ust_abi_capture_bytecode: bytecode len = %" PRIu32
" bytes", orig_f
->len
);
1386 assert(sizeof(struct lttng_bytecode
) ==
1387 sizeof(struct lttng_ust_abi_capture_bytecode
));
1388 memcpy(capture
, orig_f
, sizeof(*capture
) + orig_f
->len
);
1394 * Find an ust_app using the sock and return it. RCU read side lock must be
1395 * held before calling this helper function.
1397 struct ust_app
*ust_app_find_by_sock(int sock
)
1399 struct lttng_ht_node_ulong
*node
;
1400 struct lttng_ht_iter iter
;
1402 lttng_ht_lookup(ust_app_ht_by_sock
, (void *)((unsigned long) sock
), &iter
);
1403 node
= lttng_ht_iter_get_node_ulong(&iter
);
1405 DBG2("UST app find by sock %d not found", sock
);
1409 return caa_container_of(node
, struct ust_app
, sock_n
);
1416 * Find an ust_app using the notify sock and return it. RCU read side lock must
1417 * be held before calling this helper function.
1419 static struct ust_app
*find_app_by_notify_sock(int sock
)
1421 struct lttng_ht_node_ulong
*node
;
1422 struct lttng_ht_iter iter
;
1424 lttng_ht_lookup(ust_app_ht_by_notify_sock
, (void *)((unsigned long) sock
),
1426 node
= lttng_ht_iter_get_node_ulong(&iter
);
1428 DBG2("UST app find by notify sock %d not found", sock
);
1432 return caa_container_of(node
, struct ust_app
, notify_sock_n
);
1439 * Lookup for an ust app event based on event name, filter bytecode and the
1442 * Return an ust_app_event object or NULL on error.
1444 static struct ust_app_event
*find_ust_app_event(struct lttng_ht
*ht
,
1445 const char *name
, const struct lttng_bytecode
*filter
,
1447 const struct lttng_event_exclusion
*exclusion
)
1449 struct lttng_ht_iter iter
;
1450 struct lttng_ht_node_str
*node
;
1451 struct ust_app_event
*event
= NULL
;
1452 struct ust_app_ht_key key
;
1457 /* Setup key for event lookup. */
1459 key
.filter
= filter
;
1460 key
.loglevel_type
= loglevel_value
;
1461 /* lttng_event_exclusion and lttng_ust_event_exclusion structures are similar */
1462 key
.exclusion
= exclusion
;
1464 /* Lookup using the event name as hash and a custom match fct. */
1465 cds_lfht_lookup(ht
->ht
, ht
->hash_fct((void *) name
, lttng_ht_seed
),
1466 ht_match_ust_app_event
, &key
, &iter
.iter
);
1467 node
= lttng_ht_iter_get_node_str(&iter
);
1472 event
= caa_container_of(node
, struct ust_app_event
, node
);
1479 * Look-up an event notifier rule based on its token id.
1481 * Must be called with the RCU read lock held.
1482 * Return an ust_app_event_notifier_rule object or NULL on error.
1484 static struct ust_app_event_notifier_rule
*find_ust_app_event_notifier_rule(
1485 struct lttng_ht
*ht
, uint64_t token
)
1487 struct lttng_ht_iter iter
;
1488 struct lttng_ht_node_u64
*node
;
1489 struct ust_app_event_notifier_rule
*event_notifier_rule
= NULL
;
1493 lttng_ht_lookup(ht
, &token
, &iter
);
1494 node
= lttng_ht_iter_get_node_u64(&iter
);
1496 DBG2("UST app event notifier rule token not found: token = %" PRIu64
,
1501 event_notifier_rule
= caa_container_of(
1502 node
, struct ust_app_event_notifier_rule
, node
);
1504 return event_notifier_rule
;
1508 * Create the channel context on the tracer.
1510 * Called with UST app session lock held.
1513 int create_ust_channel_context(struct ust_app_channel
*ua_chan
,
1514 struct ust_app_ctx
*ua_ctx
, struct ust_app
*app
)
1518 health_code_update();
1520 pthread_mutex_lock(&app
->sock_lock
);
1521 ret
= ustctl_add_context(app
->sock
, &ua_ctx
->ctx
,
1522 ua_chan
->obj
, &ua_ctx
->obj
);
1523 pthread_mutex_unlock(&app
->sock_lock
);
1525 if (ret
!= -EPIPE
&& ret
!= -LTTNG_UST_ERR_EXITING
) {
1526 ERR("UST app create channel context failed for app (pid: %d) "
1527 "with ret %d", app
->pid
, ret
);
1530 * This is normal behavior, an application can die during the
1531 * creation process. Don't report an error so the execution can
1532 * continue normally.
1535 DBG3("UST app add context failed. Application is dead.");
1540 ua_ctx
->handle
= ua_ctx
->obj
->handle
;
1542 DBG2("UST app context handle %d created successfully for channel %s",
1543 ua_ctx
->handle
, ua_chan
->name
);
1546 health_code_update();
1551 * Set the filter on the tracer.
1553 static int set_ust_object_filter(struct ust_app
*app
,
1554 const struct lttng_bytecode
*bytecode
,
1555 struct lttng_ust_abi_object_data
*ust_object
)
1558 struct lttng_ust_abi_filter_bytecode
*ust_bytecode
= NULL
;
1560 health_code_update();
1562 ust_bytecode
= create_ust_filter_bytecode_from_bytecode(bytecode
);
1563 if (!ust_bytecode
) {
1564 ret
= -LTTNG_ERR_NOMEM
;
1567 pthread_mutex_lock(&app
->sock_lock
);
1568 ret
= ustctl_set_filter(app
->sock
, ust_bytecode
,
1570 pthread_mutex_unlock(&app
->sock_lock
);
1572 if (ret
!= -EPIPE
&& ret
!= -LTTNG_UST_ERR_EXITING
) {
1573 ERR("UST app set object filter failed: object = %p of app pid = %d, ret = %d",
1574 ust_object
, app
->pid
, ret
);
1577 * This is normal behavior, an application can die during the
1578 * creation process. Don't report an error so the execution can
1579 * continue normally.
1582 DBG3("Failed to set UST app object filter. Application is dead.");
1587 DBG2("UST filter successfully set: object = %p", ust_object
);
1590 health_code_update();
1596 * Set a capture bytecode for the passed object.
1597 * The sequence number enforces the ordering at runtime and on reception of
1598 * the captured payloads.
1600 static int set_ust_capture(struct ust_app
*app
,
1601 const struct lttng_bytecode
*bytecode
,
1602 unsigned int capture_seqnum
,
1603 struct lttng_ust_abi_object_data
*ust_object
)
1606 struct lttng_ust_abi_capture_bytecode
*ust_bytecode
= NULL
;
1608 health_code_update();
1610 ust_bytecode
= create_ust_capture_bytecode_from_bytecode(bytecode
);
1611 if (!ust_bytecode
) {
1612 ret
= -LTTNG_ERR_NOMEM
;
1617 * Set the sequence number to ensure the capture of fields is ordered.
1619 ust_bytecode
->seqnum
= capture_seqnum
;
1621 pthread_mutex_lock(&app
->sock_lock
);
1622 ret
= ustctl_set_capture(app
->sock
, ust_bytecode
,
1624 pthread_mutex_unlock(&app
->sock_lock
);
1626 if (ret
!= -EPIPE
&& ret
!= -LTTNG_UST_ERR_EXITING
) {
1627 ERR("UST app set object capture failed: object = %p of app pid = %d, ret = %d",
1628 ust_object
, app
->pid
, ret
);
1631 * This is normal behavior, an application can die during the
1632 * creation process. Don't report an error so the execution can
1633 * continue normally.
1636 DBG3("Failed to set UST app object capture. Application is dead.");
1642 DBG2("UST capture successfully set: object = %p", ust_object
);
1645 health_code_update();
1651 struct lttng_ust_abi_event_exclusion
*create_ust_exclusion_from_exclusion(
1652 const struct lttng_event_exclusion
*exclusion
)
1654 struct lttng_ust_abi_event_exclusion
*ust_exclusion
= NULL
;
1655 size_t exclusion_alloc_size
= sizeof(struct lttng_ust_abi_event_exclusion
) +
1656 LTTNG_UST_ABI_SYM_NAME_LEN
* exclusion
->count
;
1658 ust_exclusion
= zmalloc(exclusion_alloc_size
);
1659 if (!ust_exclusion
) {
1664 assert(sizeof(struct lttng_event_exclusion
) ==
1665 sizeof(struct lttng_ust_abi_event_exclusion
));
1666 memcpy(ust_exclusion
, exclusion
, exclusion_alloc_size
);
1668 return ust_exclusion
;
1672 * Set event exclusions on the tracer.
1674 static int set_ust_object_exclusions(struct ust_app
*app
,
1675 const struct lttng_event_exclusion
*exclusions
,
1676 struct lttng_ust_abi_object_data
*ust_object
)
1679 struct lttng_ust_abi_event_exclusion
*ust_exclusions
= NULL
;
1681 assert(exclusions
&& exclusions
->count
> 0);
1683 health_code_update();
1685 ust_exclusions
= create_ust_exclusion_from_exclusion(
1687 if (!ust_exclusions
) {
1688 ret
= -LTTNG_ERR_NOMEM
;
1691 pthread_mutex_lock(&app
->sock_lock
);
1692 ret
= ustctl_set_exclusion(app
->sock
, ust_exclusions
, ust_object
);
1693 pthread_mutex_unlock(&app
->sock_lock
);
1695 if (ret
!= -EPIPE
&& ret
!= -LTTNG_UST_ERR_EXITING
) {
1696 ERR("Failed to set UST app exclusions for object %p of app (pid: %d) "
1697 "with ret %d", ust_object
, app
->pid
, ret
);
1700 * This is normal behavior, an application can die during the
1701 * creation process. Don't report an error so the execution can
1702 * continue normally.
1705 DBG3("Failed to set UST app object exclusions. Application is dead.");
1710 DBG2("UST exclusions set successfully for object %p", ust_object
);
1713 health_code_update();
1714 free(ust_exclusions
);
1719 * Disable the specified event on to UST tracer for the UST session.
1721 static int disable_ust_object(struct ust_app
*app
,
1722 struct lttng_ust_abi_object_data
*object
)
1726 health_code_update();
1728 pthread_mutex_lock(&app
->sock_lock
);
1729 ret
= ustctl_disable(app
->sock
, object
);
1730 pthread_mutex_unlock(&app
->sock_lock
);
1732 if (ret
!= -EPIPE
&& ret
!= -LTTNG_UST_ERR_EXITING
) {
1733 ERR("Failed to disable UST app object %p app (pid: %d) with ret %d",
1734 object
, app
->pid
, ret
);
1737 * This is normal behavior, an application can die during the
1738 * creation process. Don't report an error so the execution can
1739 * continue normally.
1742 DBG3("Failed to disable UST app object. Application is dead.");
1747 DBG2("UST app object %p disabled successfully for app (pid: %d)",
1751 health_code_update();
1756 * Disable the specified channel on to UST tracer for the UST session.
1758 static int disable_ust_channel(struct ust_app
*app
,
1759 struct ust_app_session
*ua_sess
, struct ust_app_channel
*ua_chan
)
1763 health_code_update();
1765 pthread_mutex_lock(&app
->sock_lock
);
1766 ret
= ustctl_disable(app
->sock
, ua_chan
->obj
);
1767 pthread_mutex_unlock(&app
->sock_lock
);
1769 if (ret
!= -EPIPE
&& ret
!= -LTTNG_UST_ERR_EXITING
) {
1770 ERR("UST app channel %s disable failed for app (pid: %d) "
1771 "and session handle %d with ret %d",
1772 ua_chan
->name
, app
->pid
, ua_sess
->handle
, ret
);
1775 * This is normal behavior, an application can die during the
1776 * creation process. Don't report an error so the execution can
1777 * continue normally.
1780 DBG3("UST app disable channel failed. Application is dead.");
1785 DBG2("UST app channel %s disabled successfully for app (pid: %d)",
1786 ua_chan
->name
, app
->pid
);
1789 health_code_update();
1794 * Enable the specified channel on to UST tracer for the UST session.
1796 static int enable_ust_channel(struct ust_app
*app
,
1797 struct ust_app_session
*ua_sess
, struct ust_app_channel
*ua_chan
)
1801 health_code_update();
1803 pthread_mutex_lock(&app
->sock_lock
);
1804 ret
= ustctl_enable(app
->sock
, ua_chan
->obj
);
1805 pthread_mutex_unlock(&app
->sock_lock
);
1807 if (ret
!= -EPIPE
&& ret
!= -LTTNG_UST_ERR_EXITING
) {
1808 ERR("UST app channel %s enable failed for app (pid: %d) "
1809 "and session handle %d with ret %d",
1810 ua_chan
->name
, app
->pid
, ua_sess
->handle
, ret
);
1813 * This is normal behavior, an application can die during the
1814 * creation process. Don't report an error so the execution can
1815 * continue normally.
1818 DBG3("UST app enable channel failed. Application is dead.");
1823 ua_chan
->enabled
= 1;
1825 DBG2("UST app channel %s enabled successfully for app (pid: %d)",
1826 ua_chan
->name
, app
->pid
);
1829 health_code_update();
1834 * Enable the specified event on to UST tracer for the UST session.
1836 static int enable_ust_object(
1837 struct ust_app
*app
, struct lttng_ust_abi_object_data
*ust_object
)
1841 health_code_update();
1843 pthread_mutex_lock(&app
->sock_lock
);
1844 ret
= ustctl_enable(app
->sock
, ust_object
);
1845 pthread_mutex_unlock(&app
->sock_lock
);
1847 if (ret
!= -EPIPE
&& ret
!= -LTTNG_UST_ERR_EXITING
) {
1848 ERR("UST app enable failed for object %p app (pid: %d) with ret %d",
1849 ust_object
, app
->pid
, ret
);
1852 * This is normal behavior, an application can die during the
1853 * creation process. Don't report an error so the execution can
1854 * continue normally.
1857 DBG3("Failed to enable UST app object. Application is dead.");
1862 DBG2("UST app object %p enabled successfully for app (pid: %d)",
1863 ust_object
, app
->pid
);
1866 health_code_update();
1871 * Send channel and stream buffer to application.
1873 * Return 0 on success. On error, a negative value is returned.
1875 static int send_channel_pid_to_ust(struct ust_app
*app
,
1876 struct ust_app_session
*ua_sess
, struct ust_app_channel
*ua_chan
)
1879 struct ust_app_stream
*stream
, *stmp
;
1885 health_code_update();
1887 DBG("UST app sending channel %s to UST app sock %d", ua_chan
->name
,
1890 /* Send channel to the application. */
1891 ret
= ust_consumer_send_channel_to_ust(app
, ua_sess
, ua_chan
);
1892 if (ret
== -EPIPE
|| ret
== -LTTNG_UST_ERR_EXITING
) {
1893 ret
= -ENOTCONN
; /* Caused by app exiting. */
1895 } else if (ret
< 0) {
1899 health_code_update();
1901 /* Send all streams to application. */
1902 cds_list_for_each_entry_safe(stream
, stmp
, &ua_chan
->streams
.head
, list
) {
1903 ret
= ust_consumer_send_stream_to_ust(app
, ua_chan
, stream
);
1904 if (ret
== -EPIPE
|| ret
== -LTTNG_UST_ERR_EXITING
) {
1905 ret
= -ENOTCONN
; /* Caused by app exiting. */
1907 } else if (ret
< 0) {
1910 /* We don't need the stream anymore once sent to the tracer. */
1911 cds_list_del(&stream
->list
);
1912 delete_ust_app_stream(-1, stream
, app
);
1914 /* Flag the channel that it is sent to the application. */
1915 ua_chan
->is_sent
= 1;
1918 health_code_update();
1923 * Create the specified event onto the UST tracer for a UST session.
1925 * Should be called with session mutex held.
1928 int create_ust_event(struct ust_app
*app
, struct ust_app_session
*ua_sess
,
1929 struct ust_app_channel
*ua_chan
, struct ust_app_event
*ua_event
)
1933 health_code_update();
1935 /* Create UST event on tracer */
1936 pthread_mutex_lock(&app
->sock_lock
);
1937 ret
= ustctl_create_event(app
->sock
, &ua_event
->attr
, ua_chan
->obj
,
1939 pthread_mutex_unlock(&app
->sock_lock
);
1941 if (ret
!= -EPIPE
&& ret
!= -LTTNG_UST_ERR_EXITING
) {
1943 ERR("Error ustctl create event %s for app pid: %d with ret %d",
1944 ua_event
->attr
.name
, app
->pid
, ret
);
1947 * This is normal behavior, an application can die during the
1948 * creation process. Don't report an error so the execution can
1949 * continue normally.
1952 DBG3("UST app create event failed. Application is dead.");
1957 ua_event
->handle
= ua_event
->obj
->handle
;
1959 DBG2("UST app event %s created successfully for pid:%d object: %p",
1960 ua_event
->attr
.name
, app
->pid
, ua_event
->obj
);
1962 health_code_update();
1964 /* Set filter if one is present. */
1965 if (ua_event
->filter
) {
1966 ret
= set_ust_object_filter(app
, ua_event
->filter
, ua_event
->obj
);
1972 /* Set exclusions for the event */
1973 if (ua_event
->exclusion
) {
1974 ret
= set_ust_object_exclusions(app
, ua_event
->exclusion
, ua_event
->obj
);
1980 /* If event not enabled, disable it on the tracer */
1981 if (ua_event
->enabled
) {
1983 * We now need to explicitly enable the event, since it
1984 * is now disabled at creation.
1986 ret
= enable_ust_object(app
, ua_event
->obj
);
1989 * If we hit an EPERM, something is wrong with our enable call. If
1990 * we get an EEXIST, there is a problem on the tracer side since we
1994 case -LTTNG_UST_ERR_PERM
:
1995 /* Code flow problem */
1997 case -LTTNG_UST_ERR_EXIST
:
1998 /* It's OK for our use case. */
2009 health_code_update();
2013 static int init_ust_event_notifier_from_event_rule(
2014 const struct lttng_event_rule
*rule
,
2015 struct lttng_ust_abi_event_notifier
*event_notifier
)
2017 enum lttng_event_rule_status status
;
2018 enum lttng_ust_abi_loglevel_type ust_loglevel_type
= LTTNG_UST_ABI_LOGLEVEL_ALL
;
2019 int loglevel
= -1, ret
= 0;
2020 const char *pattern
;
2022 /* For now only LTTNG_EVENT_RULE_TYPE_TRACEPOINT are supported. */
2023 assert(lttng_event_rule_get_type(rule
) ==
2024 LTTNG_EVENT_RULE_TYPE_TRACEPOINT
);
2026 memset(event_notifier
, 0, sizeof(*event_notifier
));
2028 if (lttng_event_rule_targets_agent_domain(rule
)) {
2030 * Special event for agents
2031 * The actual meat of the event is in the filter that will be
2032 * attached later on.
2033 * Set the default values for the agent event.
2035 pattern
= event_get_default_agent_ust_name(
2036 lttng_event_rule_get_domain_type(rule
));
2038 ust_loglevel_type
= LTTNG_UST_ABI_LOGLEVEL_ALL
;
2040 const struct lttng_log_level_rule
*log_level_rule
;
2042 status
= lttng_event_rule_tracepoint_get_pattern(rule
, &pattern
);
2043 if (status
!= LTTNG_EVENT_RULE_STATUS_OK
) {
2044 /* At this point, this is a fatal error. */
2048 status
= lttng_event_rule_tracepoint_get_log_level_rule(
2049 rule
, &log_level_rule
);
2050 if (status
== LTTNG_EVENT_RULE_STATUS_UNSET
) {
2051 ust_loglevel_type
= LTTNG_UST_ABI_LOGLEVEL_ALL
;
2052 } else if (status
== LTTNG_EVENT_RULE_STATUS_OK
) {
2053 enum lttng_log_level_rule_status llr_status
;
2055 switch (lttng_log_level_rule_get_type(log_level_rule
)) {
2056 case LTTNG_LOG_LEVEL_RULE_TYPE_EXACTLY
:
2057 ust_loglevel_type
= LTTNG_UST_ABI_LOGLEVEL_SINGLE
;
2058 llr_status
= lttng_log_level_rule_exactly_get_level(
2059 log_level_rule
, &loglevel
);
2061 case LTTNG_LOG_LEVEL_RULE_TYPE_AT_LEAST_AS_SEVERE_AS
:
2062 ust_loglevel_type
= LTTNG_UST_ABI_LOGLEVEL_RANGE
;
2063 llr_status
= lttng_log_level_rule_at_least_as_severe_as_get_level(
2064 log_level_rule
, &loglevel
);
2070 assert(llr_status
== LTTNG_LOG_LEVEL_RULE_STATUS_OK
);
2072 /* At this point this is a fatal error. */
2077 event_notifier
->event
.instrumentation
= LTTNG_UST_ABI_TRACEPOINT
;
2078 ret
= lttng_strncpy(event_notifier
->event
.name
, pattern
,
2079 LTTNG_UST_ABI_SYM_NAME_LEN
- 1);
2081 ERR("Failed to copy event rule pattern to notifier: pattern = '%s' ",
2086 event_notifier
->event
.loglevel_type
= ust_loglevel_type
;
2087 event_notifier
->event
.loglevel
= loglevel
;
2093 * Create the specified event notifier against the user space tracer of a
2094 * given application.
2096 static int create_ust_event_notifier(struct ust_app
*app
,
2097 struct ust_app_event_notifier_rule
*ua_event_notifier_rule
)
2100 enum lttng_condition_status condition_status
;
2101 const struct lttng_condition
*condition
= NULL
;
2102 struct lttng_ust_abi_event_notifier event_notifier
;
2103 const struct lttng_event_rule
*event_rule
= NULL
;
2104 unsigned int capture_bytecode_count
= 0, i
;
2105 enum lttng_condition_status cond_status
;
2107 health_code_update();
2108 assert(app
->event_notifier_group
.object
);
2110 condition
= lttng_trigger_get_const_condition(
2111 ua_event_notifier_rule
->trigger
);
2113 assert(lttng_condition_get_type(condition
) == LTTNG_CONDITION_TYPE_ON_EVENT
);
2115 condition_status
= lttng_condition_on_event_get_rule(
2116 condition
, &event_rule
);
2117 assert(condition_status
== LTTNG_CONDITION_STATUS_OK
);
2120 assert(lttng_event_rule_get_type(event_rule
) == LTTNG_EVENT_RULE_TYPE_TRACEPOINT
);
2122 init_ust_event_notifier_from_event_rule(event_rule
, &event_notifier
);
2123 event_notifier
.event
.token
= ua_event_notifier_rule
->token
;
2124 event_notifier
.error_counter_index
= ua_event_notifier_rule
->error_counter_index
;
2126 /* Create UST event notifier against the tracer. */
2127 pthread_mutex_lock(&app
->sock_lock
);
2128 ret
= ustctl_create_event_notifier(app
->sock
, &event_notifier
,
2129 app
->event_notifier_group
.object
,
2130 &ua_event_notifier_rule
->obj
);
2131 pthread_mutex_unlock(&app
->sock_lock
);
2133 if (ret
!= -EPIPE
&& ret
!= -LTTNG_UST_ERR_EXITING
) {
2134 ERR("Error ustctl create event notifier: name = '%s', app = '%s' (ppid: %d), ret = %d",
2135 event_notifier
.event
.name
, app
->name
,
2139 * This is normal behavior, an application can die
2140 * during the creation process. Don't report an error so
2141 * the execution can continue normally.
2144 DBG3("UST app create event notifier failed (application is dead): app = '%s' (ppid = %d)",
2145 app
->name
, app
->ppid
);
2151 ua_event_notifier_rule
->handle
= ua_event_notifier_rule
->obj
->handle
;
2153 DBG2("UST app event notifier %s created successfully: app = '%s' (ppid: %d), object: %p",
2154 event_notifier
.event
.name
, app
->name
, app
->ppid
,
2155 ua_event_notifier_rule
->obj
);
2157 health_code_update();
2159 /* Set filter if one is present. */
2160 if (ua_event_notifier_rule
->filter
) {
2161 ret
= set_ust_object_filter(app
, ua_event_notifier_rule
->filter
,
2162 ua_event_notifier_rule
->obj
);
2168 /* Set exclusions for the event. */
2169 if (ua_event_notifier_rule
->exclusion
) {
2170 ret
= set_ust_object_exclusions(app
,
2171 ua_event_notifier_rule
->exclusion
,
2172 ua_event_notifier_rule
->obj
);
2178 /* Set the capture bytecodes. */
2179 cond_status
= lttng_condition_on_event_get_capture_descriptor_count(
2180 condition
, &capture_bytecode_count
);
2181 assert(cond_status
== LTTNG_CONDITION_STATUS_OK
);
2183 for (i
= 0; i
< capture_bytecode_count
; i
++) {
2184 const struct lttng_bytecode
*capture_bytecode
=
2185 lttng_condition_on_event_get_capture_bytecode_at_index(
2188 ret
= set_ust_capture(app
, capture_bytecode
, i
,
2189 ua_event_notifier_rule
->obj
);
2196 * We now need to explicitly enable the event, since it
2197 * is disabled at creation.
2199 ret
= enable_ust_object(app
, ua_event_notifier_rule
->obj
);
2202 * If we hit an EPERM, something is wrong with our enable call.
2203 * If we get an EEXIST, there is a problem on the tracer side
2204 * since we just created it.
2207 case -LTTNG_UST_ERR_PERM
:
2208 /* Code flow problem. */
2210 case -LTTNG_UST_ERR_EXIST
:
2211 /* It's OK for our use case. */
2221 ua_event_notifier_rule
->enabled
= true;
2224 health_code_update();
2229 * Copy data between an UST app event and a LTT event.
2231 static void shadow_copy_event(struct ust_app_event
*ua_event
,
2232 struct ltt_ust_event
*uevent
)
2234 size_t exclusion_alloc_size
;
2236 strncpy(ua_event
->name
, uevent
->attr
.name
, sizeof(ua_event
->name
));
2237 ua_event
->name
[sizeof(ua_event
->name
) - 1] = '\0';
2239 ua_event
->enabled
= uevent
->enabled
;
2241 /* Copy event attributes */
2242 memcpy(&ua_event
->attr
, &uevent
->attr
, sizeof(ua_event
->attr
));
2244 /* Copy filter bytecode */
2245 if (uevent
->filter
) {
2246 ua_event
->filter
= lttng_bytecode_copy(uevent
->filter
);
2247 /* Filter might be NULL here in case of ENONEM. */
2250 /* Copy exclusion data */
2251 if (uevent
->exclusion
) {
2252 exclusion_alloc_size
= sizeof(struct lttng_event_exclusion
) +
2253 LTTNG_UST_ABI_SYM_NAME_LEN
* uevent
->exclusion
->count
;
2254 ua_event
->exclusion
= zmalloc(exclusion_alloc_size
);
2255 if (ua_event
->exclusion
== NULL
) {
2258 memcpy(ua_event
->exclusion
, uevent
->exclusion
,
2259 exclusion_alloc_size
);
2265 * Copy data between an UST app channel and a LTT channel.
2267 static void shadow_copy_channel(struct ust_app_channel
*ua_chan
,
2268 struct ltt_ust_channel
*uchan
)
2270 DBG2("UST app shadow copy of channel %s started", ua_chan
->name
);
2272 strncpy(ua_chan
->name
, uchan
->name
, sizeof(ua_chan
->name
));
2273 ua_chan
->name
[sizeof(ua_chan
->name
) - 1] = '\0';
2275 ua_chan
->tracefile_size
= uchan
->tracefile_size
;
2276 ua_chan
->tracefile_count
= uchan
->tracefile_count
;
2278 /* Copy event attributes since the layout is different. */
2279 ua_chan
->attr
.subbuf_size
= uchan
->attr
.subbuf_size
;
2280 ua_chan
->attr
.num_subbuf
= uchan
->attr
.num_subbuf
;
2281 ua_chan
->attr
.overwrite
= uchan
->attr
.overwrite
;
2282 ua_chan
->attr
.switch_timer_interval
= uchan
->attr
.switch_timer_interval
;
2283 ua_chan
->attr
.read_timer_interval
= uchan
->attr
.read_timer_interval
;
2284 ua_chan
->monitor_timer_interval
= uchan
->monitor_timer_interval
;
2285 ua_chan
->attr
.output
= uchan
->attr
.output
;
2286 ua_chan
->attr
.blocking_timeout
= uchan
->attr
.u
.s
.blocking_timeout
;
2289 * Note that the attribute channel type is not set since the channel on the
2290 * tracing registry side does not have this information.
2293 ua_chan
->enabled
= uchan
->enabled
;
2294 ua_chan
->tracing_channel_id
= uchan
->id
;
2296 DBG3("UST app shadow copy of channel %s done", ua_chan
->name
);
2300 * Copy data between a UST app session and a regular LTT session.
2302 static void shadow_copy_session(struct ust_app_session
*ua_sess
,
2303 struct ltt_ust_session
*usess
, struct ust_app
*app
)
2305 struct tm
*timeinfo
;
2308 char tmp_shm_path
[PATH_MAX
];
2310 timeinfo
= localtime(&app
->registration_time
);
2311 strftime(datetime
, sizeof(datetime
), "%Y%m%d-%H%M%S", timeinfo
);
2313 DBG2("Shadow copy of session handle %d", ua_sess
->handle
);
2315 ua_sess
->tracing_id
= usess
->id
;
2316 ua_sess
->id
= get_next_session_id();
2317 LTTNG_OPTIONAL_SET(&ua_sess
->real_credentials
.uid
, app
->uid
);
2318 LTTNG_OPTIONAL_SET(&ua_sess
->real_credentials
.gid
, app
->gid
);
2319 LTTNG_OPTIONAL_SET(&ua_sess
->effective_credentials
.uid
, usess
->uid
);
2320 LTTNG_OPTIONAL_SET(&ua_sess
->effective_credentials
.gid
, usess
->gid
);
2321 ua_sess
->buffer_type
= usess
->buffer_type
;
2322 ua_sess
->bits_per_long
= app
->bits_per_long
;
2324 /* There is only one consumer object per session possible. */
2325 consumer_output_get(usess
->consumer
);
2326 ua_sess
->consumer
= usess
->consumer
;
2328 ua_sess
->output_traces
= usess
->output_traces
;
2329 ua_sess
->live_timer_interval
= usess
->live_timer_interval
;
2330 copy_channel_attr_to_ustctl(&ua_sess
->metadata_attr
,
2331 &usess
->metadata_attr
);
2333 switch (ua_sess
->buffer_type
) {
2334 case LTTNG_BUFFER_PER_PID
:
2335 ret
= snprintf(ua_sess
->path
, sizeof(ua_sess
->path
),
2336 DEFAULT_UST_TRACE_PID_PATH
"/%s-%d-%s", app
->name
, app
->pid
,
2339 case LTTNG_BUFFER_PER_UID
:
2340 ret
= snprintf(ua_sess
->path
, sizeof(ua_sess
->path
),
2341 DEFAULT_UST_TRACE_UID_PATH
,
2342 lttng_credentials_get_uid(&ua_sess
->real_credentials
),
2343 app
->bits_per_long
);
2350 PERROR("asprintf UST shadow copy session");
2355 strncpy(ua_sess
->root_shm_path
, usess
->root_shm_path
,
2356 sizeof(ua_sess
->root_shm_path
));
2357 ua_sess
->root_shm_path
[sizeof(ua_sess
->root_shm_path
) - 1] = '\0';
2358 strncpy(ua_sess
->shm_path
, usess
->shm_path
,
2359 sizeof(ua_sess
->shm_path
));
2360 ua_sess
->shm_path
[sizeof(ua_sess
->shm_path
) - 1] = '\0';
2361 if (ua_sess
->shm_path
[0]) {
2362 switch (ua_sess
->buffer_type
) {
2363 case LTTNG_BUFFER_PER_PID
:
2364 ret
= snprintf(tmp_shm_path
, sizeof(tmp_shm_path
),
2365 "/" DEFAULT_UST_TRACE_PID_PATH
"/%s-%d-%s",
2366 app
->name
, app
->pid
, datetime
);
2368 case LTTNG_BUFFER_PER_UID
:
2369 ret
= snprintf(tmp_shm_path
, sizeof(tmp_shm_path
),
2370 "/" DEFAULT_UST_TRACE_UID_PATH
,
2371 app
->uid
, app
->bits_per_long
);
2378 PERROR("sprintf UST shadow copy session");
2382 strncat(ua_sess
->shm_path
, tmp_shm_path
,
2383 sizeof(ua_sess
->shm_path
) - strlen(ua_sess
->shm_path
) - 1);
2384 ua_sess
->shm_path
[sizeof(ua_sess
->shm_path
) - 1] = '\0';
2389 consumer_output_put(ua_sess
->consumer
);
2393 * Lookup sesison wrapper.
2396 void __lookup_session_by_app(const struct ltt_ust_session
*usess
,
2397 struct ust_app
*app
, struct lttng_ht_iter
*iter
)
2399 /* Get right UST app session from app */
2400 lttng_ht_lookup(app
->sessions
, &usess
->id
, iter
);
2404 * Return ust app session from the app session hashtable using the UST session
2407 static struct ust_app_session
*lookup_session_by_app(
2408 const struct ltt_ust_session
*usess
, struct ust_app
*app
)
2410 struct lttng_ht_iter iter
;
2411 struct lttng_ht_node_u64
*node
;
2413 __lookup_session_by_app(usess
, app
, &iter
);
2414 node
= lttng_ht_iter_get_node_u64(&iter
);
2419 return caa_container_of(node
, struct ust_app_session
, node
);
2426 * Setup buffer registry per PID for the given session and application. If none
2427 * is found, a new one is created, added to the global registry and
2428 * initialized. If regp is valid, it's set with the newly created object.
2430 * Return 0 on success or else a negative value.
2432 static int setup_buffer_reg_pid(struct ust_app_session
*ua_sess
,
2433 struct ust_app
*app
, struct buffer_reg_pid
**regp
)
2436 struct buffer_reg_pid
*reg_pid
;
2443 reg_pid
= buffer_reg_pid_find(ua_sess
->id
);
2446 * This is the create channel path meaning that if there is NO
2447 * registry available, we have to create one for this session.
2449 ret
= buffer_reg_pid_create(ua_sess
->id
, ®_pid
,
2450 ua_sess
->root_shm_path
, ua_sess
->shm_path
);
2458 /* Initialize registry. */
2459 ret
= ust_registry_session_init(®_pid
->registry
->reg
.ust
, app
,
2460 app
->bits_per_long
, app
->uint8_t_alignment
,
2461 app
->uint16_t_alignment
, app
->uint32_t_alignment
,
2462 app
->uint64_t_alignment
, app
->long_alignment
,
2463 app
->byte_order
, app
->version
.major
, app
->version
.minor
,
2464 reg_pid
->root_shm_path
, reg_pid
->shm_path
,
2465 lttng_credentials_get_uid(&ua_sess
->effective_credentials
),
2466 lttng_credentials_get_gid(&ua_sess
->effective_credentials
),
2467 ua_sess
->tracing_id
,
2471 * reg_pid->registry->reg.ust is NULL upon error, so we need to
2472 * destroy the buffer registry, because it is always expected
2473 * that if the buffer registry can be found, its ust registry is
2476 buffer_reg_pid_destroy(reg_pid
);
2480 buffer_reg_pid_add(reg_pid
);
2482 DBG3("UST app buffer registry per PID created successfully");
2494 * Setup buffer registry per UID for the given session and application. If none
2495 * is found, a new one is created, added to the global registry and
2496 * initialized. If regp is valid, it's set with the newly created object.
2498 * Return 0 on success or else a negative value.
2500 static int setup_buffer_reg_uid(struct ltt_ust_session
*usess
,
2501 struct ust_app_session
*ua_sess
,
2502 struct ust_app
*app
, struct buffer_reg_uid
**regp
)
2505 struct buffer_reg_uid
*reg_uid
;
2512 reg_uid
= buffer_reg_uid_find(usess
->id
, app
->bits_per_long
, app
->uid
);
2515 * This is the create channel path meaning that if there is NO
2516 * registry available, we have to create one for this session.
2518 ret
= buffer_reg_uid_create(usess
->id
, app
->bits_per_long
, app
->uid
,
2519 LTTNG_DOMAIN_UST
, ®_uid
,
2520 ua_sess
->root_shm_path
, ua_sess
->shm_path
);
2528 /* Initialize registry. */
2529 ret
= ust_registry_session_init(®_uid
->registry
->reg
.ust
, NULL
,
2530 app
->bits_per_long
, app
->uint8_t_alignment
,
2531 app
->uint16_t_alignment
, app
->uint32_t_alignment
,
2532 app
->uint64_t_alignment
, app
->long_alignment
,
2533 app
->byte_order
, app
->version
.major
,
2534 app
->version
.minor
, reg_uid
->root_shm_path
,
2535 reg_uid
->shm_path
, usess
->uid
, usess
->gid
,
2536 ua_sess
->tracing_id
, app
->uid
);
2539 * reg_uid->registry->reg.ust is NULL upon error, so we need to
2540 * destroy the buffer registry, because it is always expected
2541 * that if the buffer registry can be found, its ust registry is
2544 buffer_reg_uid_destroy(reg_uid
, NULL
);
2547 /* Add node to teardown list of the session. */
2548 cds_list_add(®_uid
->lnode
, &usess
->buffer_reg_uid_list
);
2550 buffer_reg_uid_add(reg_uid
);
2552 DBG3("UST app buffer registry per UID created successfully");
2563 * Create a session on the tracer side for the given app.
2565 * On success, ua_sess_ptr is populated with the session pointer or else left
2566 * untouched. If the session was created, is_created is set to 1. On error,
2567 * it's left untouched. Note that ua_sess_ptr is mandatory but is_created can
2570 * Returns 0 on success or else a negative code which is either -ENOMEM or
2571 * -ENOTCONN which is the default code if the ustctl_create_session fails.
2573 static int find_or_create_ust_app_session(struct ltt_ust_session
*usess
,
2574 struct ust_app
*app
, struct ust_app_session
**ua_sess_ptr
,
2577 int ret
, created
= 0;
2578 struct ust_app_session
*ua_sess
;
2582 assert(ua_sess_ptr
);
2584 health_code_update();
2586 ua_sess
= lookup_session_by_app(usess
, app
);
2587 if (ua_sess
== NULL
) {
2588 DBG2("UST app pid: %d session id %" PRIu64
" not found, creating it",
2589 app
->pid
, usess
->id
);
2590 ua_sess
= alloc_ust_app_session();
2591 if (ua_sess
== NULL
) {
2592 /* Only malloc can failed so something is really wrong */
2596 shadow_copy_session(ua_sess
, usess
, app
);
2600 switch (usess
->buffer_type
) {
2601 case LTTNG_BUFFER_PER_PID
:
2602 /* Init local registry. */
2603 ret
= setup_buffer_reg_pid(ua_sess
, app
, NULL
);
2605 delete_ust_app_session(-1, ua_sess
, app
);
2609 case LTTNG_BUFFER_PER_UID
:
2610 /* Look for a global registry. If none exists, create one. */
2611 ret
= setup_buffer_reg_uid(usess
, ua_sess
, app
, NULL
);
2613 delete_ust_app_session(-1, ua_sess
, app
);
2623 health_code_update();
2625 if (ua_sess
->handle
== -1) {
2626 pthread_mutex_lock(&app
->sock_lock
);
2627 ret
= ustctl_create_session(app
->sock
);
2628 pthread_mutex_unlock(&app
->sock_lock
);
2630 if (ret
!= -EPIPE
&& ret
!= -LTTNG_UST_ERR_EXITING
) {
2631 ERR("Creating session for app pid %d with ret %d",
2634 DBG("UST app creating session failed. Application is dead");
2636 * This is normal behavior, an application can die during the
2637 * creation process. Don't report an error so the execution can
2638 * continue normally. This will get flagged ENOTCONN and the
2639 * caller will handle it.
2643 delete_ust_app_session(-1, ua_sess
, app
);
2644 if (ret
!= -ENOMEM
) {
2646 * Tracer is probably gone or got an internal error so let's
2647 * behave like it will soon unregister or not usable.
2654 ua_sess
->handle
= ret
;
2656 /* Add ust app session to app's HT */
2657 lttng_ht_node_init_u64(&ua_sess
->node
,
2658 ua_sess
->tracing_id
);
2659 lttng_ht_add_unique_u64(app
->sessions
, &ua_sess
->node
);
2660 lttng_ht_node_init_ulong(&ua_sess
->ust_objd_node
, ua_sess
->handle
);
2661 lttng_ht_add_unique_ulong(app
->ust_sessions_objd
,
2662 &ua_sess
->ust_objd_node
);
2664 DBG2("UST app session created successfully with handle %d", ret
);
2667 *ua_sess_ptr
= ua_sess
;
2669 *is_created
= created
;
2672 /* Everything went well. */
2676 health_code_update();
2681 * Match function for a hash table lookup of ust_app_ctx.
2683 * It matches an ust app context based on the context type and, in the case
2684 * of perf counters, their name.
2686 static int ht_match_ust_app_ctx(struct cds_lfht_node
*node
, const void *_key
)
2688 struct ust_app_ctx
*ctx
;
2689 const struct lttng_ust_context_attr
*key
;
2694 ctx
= caa_container_of(node
, struct ust_app_ctx
, node
.node
);
2698 if (ctx
->ctx
.ctx
!= key
->ctx
) {
2703 case LTTNG_UST_ABI_CONTEXT_PERF_THREAD_COUNTER
:
2704 if (strncmp(key
->u
.perf_counter
.name
,
2705 ctx
->ctx
.u
.perf_counter
.name
,
2706 sizeof(key
->u
.perf_counter
.name
))) {
2710 case LTTNG_UST_ABI_CONTEXT_APP_CONTEXT
:
2711 if (strcmp(key
->u
.app_ctx
.provider_name
,
2712 ctx
->ctx
.u
.app_ctx
.provider_name
) ||
2713 strcmp(key
->u
.app_ctx
.ctx_name
,
2714 ctx
->ctx
.u
.app_ctx
.ctx_name
)) {
2730 * Lookup for an ust app context from an lttng_ust_context.
2732 * Must be called while holding RCU read side lock.
2733 * Return an ust_app_ctx object or NULL on error.
2736 struct ust_app_ctx
*find_ust_app_context(struct lttng_ht
*ht
,
2737 struct lttng_ust_context_attr
*uctx
)
2739 struct lttng_ht_iter iter
;
2740 struct lttng_ht_node_ulong
*node
;
2741 struct ust_app_ctx
*app_ctx
= NULL
;
2746 /* Lookup using the lttng_ust_context_type and a custom match fct. */
2747 cds_lfht_lookup(ht
->ht
, ht
->hash_fct((void *) uctx
->ctx
, lttng_ht_seed
),
2748 ht_match_ust_app_ctx
, uctx
, &iter
.iter
);
2749 node
= lttng_ht_iter_get_node_ulong(&iter
);
2754 app_ctx
= caa_container_of(node
, struct ust_app_ctx
, node
);
2761 * Create a context for the channel on the tracer.
2763 * Called with UST app session lock held and a RCU read side lock.
2766 int create_ust_app_channel_context(struct ust_app_channel
*ua_chan
,
2767 struct lttng_ust_context_attr
*uctx
,
2768 struct ust_app
*app
)
2771 struct ust_app_ctx
*ua_ctx
;
2773 DBG2("UST app adding context to channel %s", ua_chan
->name
);
2775 ua_ctx
= find_ust_app_context(ua_chan
->ctx
, uctx
);
2781 ua_ctx
= alloc_ust_app_ctx(uctx
);
2782 if (ua_ctx
== NULL
) {
2788 lttng_ht_node_init_ulong(&ua_ctx
->node
, (unsigned long) ua_ctx
->ctx
.ctx
);
2789 lttng_ht_add_ulong(ua_chan
->ctx
, &ua_ctx
->node
);
2790 cds_list_add_tail(&ua_ctx
->list
, &ua_chan
->ctx_list
);
2792 ret
= create_ust_channel_context(ua_chan
, ua_ctx
, app
);
2802 * Enable on the tracer side a ust app event for the session and channel.
2804 * Called with UST app session lock held.
2807 int enable_ust_app_event(struct ust_app_session
*ua_sess
,
2808 struct ust_app_event
*ua_event
, struct ust_app
*app
)
2812 ret
= enable_ust_object(app
, ua_event
->obj
);
2817 ua_event
->enabled
= 1;
2824 * Disable on the tracer side a ust app event for the session and channel.
2826 static int disable_ust_app_event(struct ust_app_session
*ua_sess
,
2827 struct ust_app_event
*ua_event
, struct ust_app
*app
)
2831 ret
= disable_ust_object(app
, ua_event
->obj
);
2836 ua_event
->enabled
= 0;
2843 * Lookup ust app channel for session and disable it on the tracer side.
2846 int disable_ust_app_channel(struct ust_app_session
*ua_sess
,
2847 struct ust_app_channel
*ua_chan
, struct ust_app
*app
)
2851 ret
= disable_ust_channel(app
, ua_sess
, ua_chan
);
2856 ua_chan
->enabled
= 0;
2863 * Lookup ust app channel for session and enable it on the tracer side. This
2864 * MUST be called with a RCU read side lock acquired.
2866 static int enable_ust_app_channel(struct ust_app_session
*ua_sess
,
2867 struct ltt_ust_channel
*uchan
, struct ust_app
*app
)
2870 struct lttng_ht_iter iter
;
2871 struct lttng_ht_node_str
*ua_chan_node
;
2872 struct ust_app_channel
*ua_chan
;
2874 lttng_ht_lookup(ua_sess
->channels
, (void *)uchan
->name
, &iter
);
2875 ua_chan_node
= lttng_ht_iter_get_node_str(&iter
);
2876 if (ua_chan_node
== NULL
) {
2877 DBG2("Unable to find channel %s in ust session id %" PRIu64
,
2878 uchan
->name
, ua_sess
->tracing_id
);
2882 ua_chan
= caa_container_of(ua_chan_node
, struct ust_app_channel
, node
);
2884 ret
= enable_ust_channel(app
, ua_sess
, ua_chan
);
2894 * Ask the consumer to create a channel and get it if successful.
2896 * Called with UST app session lock held.
2898 * Return 0 on success or else a negative value.
2900 static int do_consumer_create_channel(struct ltt_ust_session
*usess
,
2901 struct ust_app_session
*ua_sess
, struct ust_app_channel
*ua_chan
,
2902 int bitness
, struct ust_registry_session
*registry
,
2903 uint64_t trace_archive_id
)
2906 unsigned int nb_fd
= 0;
2907 struct consumer_socket
*socket
;
2915 health_code_update();
2917 /* Get the right consumer socket for the application. */
2918 socket
= consumer_find_socket_by_bitness(bitness
, usess
->consumer
);
2924 health_code_update();
2926 /* Need one fd for the channel. */
2927 ret
= lttng_fd_get(LTTNG_FD_APPS
, 1);
2929 ERR("Exhausted number of available FD upon create channel");
2934 * Ask consumer to create channel. The consumer will return the number of
2935 * stream we have to expect.
2937 ret
= ust_consumer_ask_channel(ua_sess
, ua_chan
, usess
->consumer
, socket
,
2938 registry
, usess
->current_trace_chunk
);
2944 * Compute the number of fd needed before receiving them. It must be 2 per
2945 * stream (2 being the default value here).
2947 nb_fd
= DEFAULT_UST_STREAM_FD_NUM
* ua_chan
->expected_stream_count
;
2949 /* Reserve the amount of file descriptor we need. */
2950 ret
= lttng_fd_get(LTTNG_FD_APPS
, nb_fd
);
2952 ERR("Exhausted number of available FD upon create channel");
2953 goto error_fd_get_stream
;
2956 health_code_update();
2959 * Now get the channel from the consumer. This call will populate the stream
2960 * list of that channel and set the ust objects.
2962 if (usess
->consumer
->enabled
) {
2963 ret
= ust_consumer_get_channel(socket
, ua_chan
);
2973 lttng_fd_put(LTTNG_FD_APPS
, nb_fd
);
2974 error_fd_get_stream
:
2976 * Initiate a destroy channel on the consumer since we had an error
2977 * handling it on our side. The return value is of no importance since we
2978 * already have a ret value set by the previous error that we need to
2981 (void) ust_consumer_destroy_channel(socket
, ua_chan
);
2983 lttng_fd_put(LTTNG_FD_APPS
, 1);
2985 health_code_update();
2991 * Duplicate the ust data object of the ust app stream and save it in the
2992 * buffer registry stream.
2994 * Return 0 on success or else a negative value.
2996 static int duplicate_stream_object(struct buffer_reg_stream
*reg_stream
,
2997 struct ust_app_stream
*stream
)
3004 /* Reserve the amount of file descriptor we need. */
3005 ret
= lttng_fd_get(LTTNG_FD_APPS
, 2);
3007 ERR("Exhausted number of available FD upon duplicate stream");
3011 /* Duplicate object for stream once the original is in the registry. */
3012 ret
= ustctl_duplicate_ust_object_data(&stream
->obj
,
3013 reg_stream
->obj
.ust
);
3015 ERR("Duplicate stream obj from %p to %p failed with ret %d",
3016 reg_stream
->obj
.ust
, stream
->obj
, ret
);
3017 lttng_fd_put(LTTNG_FD_APPS
, 2);
3020 stream
->handle
= stream
->obj
->handle
;
3027 * Duplicate the ust data object of the ust app. channel and save it in the
3028 * buffer registry channel.
3030 * Return 0 on success or else a negative value.
3032 static int duplicate_channel_object(struct buffer_reg_channel
*buf_reg_chan
,
3033 struct ust_app_channel
*ua_chan
)
3037 assert(buf_reg_chan
);
3040 /* Need two fds for the channel. */
3041 ret
= lttng_fd_get(LTTNG_FD_APPS
, 1);
3043 ERR("Exhausted number of available FD upon duplicate channel");
3047 /* Duplicate object for stream once the original is in the registry. */
3048 ret
= ustctl_duplicate_ust_object_data(&ua_chan
->obj
, buf_reg_chan
->obj
.ust
);
3050 ERR("Duplicate channel obj from %p to %p failed with ret: %d",
3051 buf_reg_chan
->obj
.ust
, ua_chan
->obj
, ret
);
3054 ua_chan
->handle
= ua_chan
->obj
->handle
;
3059 lttng_fd_put(LTTNG_FD_APPS
, 1);
3065 * For a given channel buffer registry, setup all streams of the given ust
3066 * application channel.
3068 * Return 0 on success or else a negative value.
3070 static int setup_buffer_reg_streams(struct buffer_reg_channel
*buf_reg_chan
,
3071 struct ust_app_channel
*ua_chan
,
3072 struct ust_app
*app
)
3075 struct ust_app_stream
*stream
, *stmp
;
3077 assert(buf_reg_chan
);
3080 DBG2("UST app setup buffer registry stream");
3082 /* Send all streams to application. */
3083 cds_list_for_each_entry_safe(stream
, stmp
, &ua_chan
->streams
.head
, list
) {
3084 struct buffer_reg_stream
*reg_stream
;
3086 ret
= buffer_reg_stream_create(®_stream
);
3092 * Keep original pointer and nullify it in the stream so the delete
3093 * stream call does not release the object.
3095 reg_stream
->obj
.ust
= stream
->obj
;
3097 buffer_reg_stream_add(reg_stream
, buf_reg_chan
);
3099 /* We don't need the streams anymore. */
3100 cds_list_del(&stream
->list
);
3101 delete_ust_app_stream(-1, stream
, app
);
3109 * Create a buffer registry channel for the given session registry and
3110 * application channel object. If regp pointer is valid, it's set with the
3111 * created object. Important, the created object is NOT added to the session
3112 * registry hash table.
3114 * Return 0 on success else a negative value.
3116 static int create_buffer_reg_channel(struct buffer_reg_session
*reg_sess
,
3117 struct ust_app_channel
*ua_chan
, struct buffer_reg_channel
**regp
)
3120 struct buffer_reg_channel
*buf_reg_chan
= NULL
;
3125 DBG2("UST app creating buffer registry channel for %s", ua_chan
->name
);
3127 /* Create buffer registry channel. */
3128 ret
= buffer_reg_channel_create(ua_chan
->tracing_channel_id
, &buf_reg_chan
);
3132 assert(buf_reg_chan
);
3133 buf_reg_chan
->consumer_key
= ua_chan
->key
;
3134 buf_reg_chan
->subbuf_size
= ua_chan
->attr
.subbuf_size
;
3135 buf_reg_chan
->num_subbuf
= ua_chan
->attr
.num_subbuf
;
3137 /* Create and add a channel registry to session. */
3138 ret
= ust_registry_channel_add(reg_sess
->reg
.ust
,
3139 ua_chan
->tracing_channel_id
);
3143 buffer_reg_channel_add(reg_sess
, buf_reg_chan
);
3146 *regp
= buf_reg_chan
;
3152 /* Safe because the registry channel object was not added to any HT. */
3153 buffer_reg_channel_destroy(buf_reg_chan
, LTTNG_DOMAIN_UST
);
3159 * Setup buffer registry channel for the given session registry and application
3160 * channel object. If regp pointer is valid, it's set with the created object.
3162 * Return 0 on success else a negative value.
3164 static int setup_buffer_reg_channel(struct buffer_reg_session
*reg_sess
,
3165 struct ust_app_channel
*ua_chan
, struct buffer_reg_channel
*buf_reg_chan
,
3166 struct ust_app
*app
)
3171 assert(buf_reg_chan
);
3173 assert(ua_chan
->obj
);
3175 DBG2("UST app setup buffer registry channel for %s", ua_chan
->name
);
3177 /* Setup all streams for the registry. */
3178 ret
= setup_buffer_reg_streams(buf_reg_chan
, ua_chan
, app
);
3183 buf_reg_chan
->obj
.ust
= ua_chan
->obj
;
3184 ua_chan
->obj
= NULL
;
3189 buffer_reg_channel_remove(reg_sess
, buf_reg_chan
);
3190 buffer_reg_channel_destroy(buf_reg_chan
, LTTNG_DOMAIN_UST
);
3195 * Send buffer registry channel to the application.
3197 * Return 0 on success else a negative value.
3199 static int send_channel_uid_to_ust(struct buffer_reg_channel
*buf_reg_chan
,
3200 struct ust_app
*app
, struct ust_app_session
*ua_sess
,
3201 struct ust_app_channel
*ua_chan
)
3204 struct buffer_reg_stream
*reg_stream
;
3206 assert(buf_reg_chan
);
3211 DBG("UST app sending buffer registry channel to ust sock %d", app
->sock
);
3213 ret
= duplicate_channel_object(buf_reg_chan
, ua_chan
);
3218 /* Send channel to the application. */
3219 ret
= ust_consumer_send_channel_to_ust(app
, ua_sess
, ua_chan
);
3220 if (ret
== -EPIPE
|| ret
== -LTTNG_UST_ERR_EXITING
) {
3221 ret
= -ENOTCONN
; /* Caused by app exiting. */
3223 } else if (ret
< 0) {
3227 health_code_update();
3229 /* Send all streams to application. */
3230 pthread_mutex_lock(&buf_reg_chan
->stream_list_lock
);
3231 cds_list_for_each_entry(reg_stream
, &buf_reg_chan
->streams
, lnode
) {
3232 struct ust_app_stream stream
;
3234 ret
= duplicate_stream_object(reg_stream
, &stream
);
3236 goto error_stream_unlock
;
3239 ret
= ust_consumer_send_stream_to_ust(app
, ua_chan
, &stream
);
3241 (void) release_ust_app_stream(-1, &stream
, app
);
3242 if (ret
== -EPIPE
|| ret
== -LTTNG_UST_ERR_EXITING
) {
3243 ret
= -ENOTCONN
; /* Caused by app exiting. */
3245 goto error_stream_unlock
;
3249 * The return value is not important here. This function will output an
3252 (void) release_ust_app_stream(-1, &stream
, app
);
3254 ua_chan
->is_sent
= 1;
3256 error_stream_unlock
:
3257 pthread_mutex_unlock(&buf_reg_chan
->stream_list_lock
);
3263 * Create and send to the application the created buffers with per UID buffers.
3265 * This MUST be called with a RCU read side lock acquired.
3266 * The session list lock and the session's lock must be acquired.
3268 * Return 0 on success else a negative value.
3270 static int create_channel_per_uid(struct ust_app
*app
,
3271 struct ltt_ust_session
*usess
, struct ust_app_session
*ua_sess
,
3272 struct ust_app_channel
*ua_chan
)
3275 struct buffer_reg_uid
*reg_uid
;
3276 struct buffer_reg_channel
*buf_reg_chan
;
3277 struct ltt_session
*session
= NULL
;
3278 enum lttng_error_code notification_ret
;
3279 struct ust_registry_channel
*ust_reg_chan
;
3286 DBG("UST app creating channel %s with per UID buffers", ua_chan
->name
);
3288 reg_uid
= buffer_reg_uid_find(usess
->id
, app
->bits_per_long
, app
->uid
);
3290 * The session creation handles the creation of this global registry
3291 * object. If none can be find, there is a code flow problem or a
3296 buf_reg_chan
= buffer_reg_channel_find(ua_chan
->tracing_channel_id
,
3302 /* Create the buffer registry channel object. */
3303 ret
= create_buffer_reg_channel(reg_uid
->registry
, ua_chan
, &buf_reg_chan
);
3305 ERR("Error creating the UST channel \"%s\" registry instance",
3310 session
= session_find_by_id(ua_sess
->tracing_id
);
3312 assert(pthread_mutex_trylock(&session
->lock
));
3313 assert(session_trylock_list());
3316 * Create the buffers on the consumer side. This call populates the
3317 * ust app channel object with all streams and data object.
3319 ret
= do_consumer_create_channel(usess
, ua_sess
, ua_chan
,
3320 app
->bits_per_long
, reg_uid
->registry
->reg
.ust
,
3321 session
->most_recent_chunk_id
.value
);
3323 ERR("Error creating UST channel \"%s\" on the consumer daemon",
3327 * Let's remove the previously created buffer registry channel so
3328 * it's not visible anymore in the session registry.
3330 ust_registry_channel_del_free(reg_uid
->registry
->reg
.ust
,
3331 ua_chan
->tracing_channel_id
, false);
3332 buffer_reg_channel_remove(reg_uid
->registry
, buf_reg_chan
);
3333 buffer_reg_channel_destroy(buf_reg_chan
, LTTNG_DOMAIN_UST
);
3338 * Setup the streams and add it to the session registry.
3340 ret
= setup_buffer_reg_channel(reg_uid
->registry
,
3341 ua_chan
, buf_reg_chan
, app
);
3343 ERR("Error setting up UST channel \"%s\"", ua_chan
->name
);
3347 /* Notify the notification subsystem of the channel's creation. */
3348 pthread_mutex_lock(®_uid
->registry
->reg
.ust
->lock
);
3349 ust_reg_chan
= ust_registry_channel_find(reg_uid
->registry
->reg
.ust
,
3350 ua_chan
->tracing_channel_id
);
3351 assert(ust_reg_chan
);
3352 ust_reg_chan
->consumer_key
= ua_chan
->key
;
3353 ust_reg_chan
= NULL
;
3354 pthread_mutex_unlock(®_uid
->registry
->reg
.ust
->lock
);
3356 notification_ret
= notification_thread_command_add_channel(
3357 the_notification_thread_handle
, session
->name
,
3358 lttng_credentials_get_uid(
3359 &ua_sess
->effective_credentials
),
3360 lttng_credentials_get_gid(
3361 &ua_sess
->effective_credentials
),
3362 ua_chan
->name
, ua_chan
->key
, LTTNG_DOMAIN_UST
,
3363 ua_chan
->attr
.subbuf_size
* ua_chan
->attr
.num_subbuf
);
3364 if (notification_ret
!= LTTNG_OK
) {
3365 ret
= - (int) notification_ret
;
3366 ERR("Failed to add channel to notification thread");
3371 /* Send buffers to the application. */
3372 ret
= send_channel_uid_to_ust(buf_reg_chan
, app
, ua_sess
, ua_chan
);
3374 if (ret
!= -ENOTCONN
) {
3375 ERR("Error sending channel to application");
3382 session_put(session
);
3388 * Create and send to the application the created buffers with per PID buffers.
3390 * Called with UST app session lock held.
3391 * The session list lock and the session's lock must be acquired.
3393 * Return 0 on success else a negative value.
3395 static int create_channel_per_pid(struct ust_app
*app
,
3396 struct ltt_ust_session
*usess
, struct ust_app_session
*ua_sess
,
3397 struct ust_app_channel
*ua_chan
)
3400 struct ust_registry_session
*registry
;
3401 enum lttng_error_code cmd_ret
;
3402 struct ltt_session
*session
= NULL
;
3403 uint64_t chan_reg_key
;
3404 struct ust_registry_channel
*ust_reg_chan
;
3411 DBG("UST app creating channel %s with per PID buffers", ua_chan
->name
);
3415 registry
= get_session_registry(ua_sess
);
3416 /* The UST app session lock is held, registry shall not be null. */
3419 /* Create and add a new channel registry to session. */
3420 ret
= ust_registry_channel_add(registry
, ua_chan
->key
);
3422 ERR("Error creating the UST channel \"%s\" registry instance",
3427 session
= session_find_by_id(ua_sess
->tracing_id
);
3430 assert(pthread_mutex_trylock(&session
->lock
));
3431 assert(session_trylock_list());
3433 /* Create and get channel on the consumer side. */
3434 ret
= do_consumer_create_channel(usess
, ua_sess
, ua_chan
,
3435 app
->bits_per_long
, registry
,
3436 session
->most_recent_chunk_id
.value
);
3438 ERR("Error creating UST channel \"%s\" on the consumer daemon",
3440 goto error_remove_from_registry
;
3443 ret
= send_channel_pid_to_ust(app
, ua_sess
, ua_chan
);
3445 if (ret
!= -ENOTCONN
) {
3446 ERR("Error sending channel to application");
3448 goto error_remove_from_registry
;
3451 chan_reg_key
= ua_chan
->key
;
3452 pthread_mutex_lock(®istry
->lock
);
3453 ust_reg_chan
= ust_registry_channel_find(registry
, chan_reg_key
);
3454 assert(ust_reg_chan
);
3455 ust_reg_chan
->consumer_key
= ua_chan
->key
;
3456 pthread_mutex_unlock(®istry
->lock
);
3458 cmd_ret
= notification_thread_command_add_channel(
3459 the_notification_thread_handle
, session
->name
,
3460 lttng_credentials_get_uid(
3461 &ua_sess
->effective_credentials
),
3462 lttng_credentials_get_gid(
3463 &ua_sess
->effective_credentials
),
3464 ua_chan
->name
, ua_chan
->key
, LTTNG_DOMAIN_UST
,
3465 ua_chan
->attr
.subbuf_size
* ua_chan
->attr
.num_subbuf
);
3466 if (cmd_ret
!= LTTNG_OK
) {
3467 ret
= - (int) cmd_ret
;
3468 ERR("Failed to add channel to notification thread");
3469 goto error_remove_from_registry
;
3472 error_remove_from_registry
:
3474 ust_registry_channel_del_free(registry
, ua_chan
->key
, false);
3479 session_put(session
);
3485 * From an already allocated ust app channel, create the channel buffers if
3486 * needed and send them to the application. This MUST be called with a RCU read
3487 * side lock acquired.
3489 * Called with UST app session lock held.
3491 * Return 0 on success or else a negative value. Returns -ENOTCONN if
3492 * the application exited concurrently.
3494 static int ust_app_channel_send(struct ust_app
*app
,
3495 struct ltt_ust_session
*usess
, struct ust_app_session
*ua_sess
,
3496 struct ust_app_channel
*ua_chan
)
3502 assert(usess
->active
);
3506 /* Handle buffer type before sending the channel to the application. */
3507 switch (usess
->buffer_type
) {
3508 case LTTNG_BUFFER_PER_UID
:
3510 ret
= create_channel_per_uid(app
, usess
, ua_sess
, ua_chan
);
3516 case LTTNG_BUFFER_PER_PID
:
3518 ret
= create_channel_per_pid(app
, usess
, ua_sess
, ua_chan
);
3530 /* Initialize ust objd object using the received handle and add it. */
3531 lttng_ht_node_init_ulong(&ua_chan
->ust_objd_node
, ua_chan
->handle
);
3532 lttng_ht_add_unique_ulong(app
->ust_objd
, &ua_chan
->ust_objd_node
);
3534 /* If channel is not enabled, disable it on the tracer */
3535 if (!ua_chan
->enabled
) {
3536 ret
= disable_ust_channel(app
, ua_sess
, ua_chan
);
3547 * Create UST app channel and return it through ua_chanp if not NULL.
3549 * Called with UST app session lock and RCU read-side lock held.
3551 * Return 0 on success or else a negative value.
3553 static int ust_app_channel_allocate(struct ust_app_session
*ua_sess
,
3554 struct ltt_ust_channel
*uchan
,
3555 enum lttng_ust_abi_chan_type type
, struct ltt_ust_session
*usess
,
3556 struct ust_app_channel
**ua_chanp
)
3559 struct lttng_ht_iter iter
;
3560 struct lttng_ht_node_str
*ua_chan_node
;
3561 struct ust_app_channel
*ua_chan
;
3563 /* Lookup channel in the ust app session */
3564 lttng_ht_lookup(ua_sess
->channels
, (void *)uchan
->name
, &iter
);
3565 ua_chan_node
= lttng_ht_iter_get_node_str(&iter
);
3566 if (ua_chan_node
!= NULL
) {
3567 ua_chan
= caa_container_of(ua_chan_node
, struct ust_app_channel
, node
);
3571 ua_chan
= alloc_ust_app_channel(uchan
->name
, ua_sess
, &uchan
->attr
);
3572 if (ua_chan
== NULL
) {
3573 /* Only malloc can fail here */
3577 shadow_copy_channel(ua_chan
, uchan
);
3579 /* Set channel type. */
3580 ua_chan
->attr
.type
= type
;
3582 /* Only add the channel if successful on the tracer side. */
3583 lttng_ht_add_unique_str(ua_sess
->channels
, &ua_chan
->node
);
3586 *ua_chanp
= ua_chan
;
3589 /* Everything went well. */
3597 * Create UST app event and create it on the tracer side.
3599 * Must be called with the RCU read side lock held.
3600 * Called with ust app session mutex held.
3603 int create_ust_app_event(struct ust_app_session
*ua_sess
,
3604 struct ust_app_channel
*ua_chan
, struct ltt_ust_event
*uevent
,
3605 struct ust_app
*app
)
3608 struct ust_app_event
*ua_event
;
3610 ua_event
= alloc_ust_app_event(uevent
->attr
.name
, &uevent
->attr
);
3611 if (ua_event
== NULL
) {
3612 /* Only failure mode of alloc_ust_app_event(). */
3616 shadow_copy_event(ua_event
, uevent
);
3618 /* Create it on the tracer side */
3619 ret
= create_ust_event(app
, ua_sess
, ua_chan
, ua_event
);
3622 * Not found previously means that it does not exist on the
3623 * tracer. If the application reports that the event existed,
3624 * it means there is a bug in the sessiond or lttng-ust
3625 * (or corruption, etc.)
3627 if (ret
== -LTTNG_UST_ERR_EXIST
) {
3628 ERR("Tracer for application reported that an event being created already existed: "
3629 "event_name = \"%s\", pid = %d, ppid = %d, uid = %d, gid = %d",
3631 app
->pid
, app
->ppid
, app
->uid
,
3637 add_unique_ust_app_event(ua_chan
, ua_event
);
3639 DBG2("UST app create event completed: app = '%s' (ppid: %d)",
3640 app
->name
, app
->ppid
);
3646 /* Valid. Calling here is already in a read side lock */
3647 delete_ust_app_event(-1, ua_event
, app
);
3652 * Create UST app event notifier rule and create it on the tracer side.
3654 * Must be called with the RCU read side lock held.
3655 * Called with ust app session mutex held.
3658 int create_ust_app_event_notifier_rule(struct lttng_trigger
*trigger
,
3659 struct ust_app
*app
)
3662 struct ust_app_event_notifier_rule
*ua_event_notifier_rule
;
3664 ua_event_notifier_rule
= alloc_ust_app_event_notifier_rule(trigger
);
3665 if (ua_event_notifier_rule
== NULL
) {
3670 /* Create it on the tracer side. */
3671 ret
= create_ust_event_notifier(app
, ua_event_notifier_rule
);
3674 * Not found previously means that it does not exist on the
3675 * tracer. If the application reports that the event existed,
3676 * it means there is a bug in the sessiond or lttng-ust
3677 * (or corruption, etc.)
3679 if (ret
== -LTTNG_UST_ERR_EXIST
) {
3680 ERR("Tracer for application reported that an event notifier being created already exists: "
3681 "token = \"%" PRIu64
"\", pid = %d, ppid = %d, uid = %d, gid = %d",
3682 lttng_trigger_get_tracer_token(trigger
),
3683 app
->pid
, app
->ppid
, app
->uid
,
3689 lttng_ht_add_unique_u64(app
->token_to_event_notifier_rule_ht
,
3690 &ua_event_notifier_rule
->node
);
3692 DBG2("UST app create token event rule completed: app = '%s' (ppid: %d), token = %" PRIu64
,
3693 app
->name
, app
->ppid
, lttng_trigger_get_tracer_token(trigger
));
3698 /* The RCU read side lock is already being held by the caller. */
3699 delete_ust_app_event_notifier_rule(-1, ua_event_notifier_rule
, app
);
3705 * Create UST metadata and open it on the tracer side.
3707 * Called with UST app session lock held and RCU read side lock.
3709 static int create_ust_app_metadata(struct ust_app_session
*ua_sess
,
3710 struct ust_app
*app
, struct consumer_output
*consumer
)
3713 struct ust_app_channel
*metadata
;
3714 struct consumer_socket
*socket
;
3715 struct ust_registry_session
*registry
;
3716 struct ltt_session
*session
= NULL
;
3722 registry
= get_session_registry(ua_sess
);
3723 /* The UST app session is held registry shall not be null. */
3726 pthread_mutex_lock(®istry
->lock
);
3728 /* Metadata already exists for this registry or it was closed previously */
3729 if (registry
->metadata_key
|| registry
->metadata_closed
) {
3734 /* Allocate UST metadata */
3735 metadata
= alloc_ust_app_channel(DEFAULT_METADATA_NAME
, ua_sess
, NULL
);
3737 /* malloc() failed */
3742 memcpy(&metadata
->attr
, &ua_sess
->metadata_attr
, sizeof(metadata
->attr
));
3744 /* Need one fd for the channel. */
3745 ret
= lttng_fd_get(LTTNG_FD_APPS
, 1);
3747 ERR("Exhausted number of available FD upon create metadata");
3751 /* Get the right consumer socket for the application. */
3752 socket
= consumer_find_socket_by_bitness(app
->bits_per_long
, consumer
);
3755 goto error_consumer
;
3759 * Keep metadata key so we can identify it on the consumer side. Assign it
3760 * to the registry *before* we ask the consumer so we avoid the race of the
3761 * consumer requesting the metadata and the ask_channel call on our side
3762 * did not returned yet.
3764 registry
->metadata_key
= metadata
->key
;
3766 session
= session_find_by_id(ua_sess
->tracing_id
);
3769 assert(pthread_mutex_trylock(&session
->lock
));
3770 assert(session_trylock_list());
3773 * Ask the metadata channel creation to the consumer. The metadata object
3774 * will be created by the consumer and kept their. However, the stream is
3775 * never added or monitored until we do a first push metadata to the
3778 ret
= ust_consumer_ask_channel(ua_sess
, metadata
, consumer
, socket
,
3779 registry
, session
->current_trace_chunk
);
3781 /* Nullify the metadata key so we don't try to close it later on. */
3782 registry
->metadata_key
= 0;
3783 goto error_consumer
;
3787 * The setup command will make the metadata stream be sent to the relayd,
3788 * if applicable, and the thread managing the metadatas. This is important
3789 * because after this point, if an error occurs, the only way the stream
3790 * can be deleted is to be monitored in the consumer.
3792 ret
= consumer_setup_metadata(socket
, metadata
->key
);
3794 /* Nullify the metadata key so we don't try to close it later on. */
3795 registry
->metadata_key
= 0;
3796 goto error_consumer
;
3799 DBG2("UST metadata with key %" PRIu64
" created for app pid %d",
3800 metadata
->key
, app
->pid
);
3803 lttng_fd_put(LTTNG_FD_APPS
, 1);
3804 delete_ust_app_channel(-1, metadata
, app
);
3806 pthread_mutex_unlock(®istry
->lock
);
3808 session_put(session
);
3814 * Return ust app pointer or NULL if not found. RCU read side lock MUST be
3815 * acquired before calling this function.
3817 struct ust_app
*ust_app_find_by_pid(pid_t pid
)
3819 struct ust_app
*app
= NULL
;
3820 struct lttng_ht_node_ulong
*node
;
3821 struct lttng_ht_iter iter
;
3823 lttng_ht_lookup(ust_app_ht
, (void *)((unsigned long) pid
), &iter
);
3824 node
= lttng_ht_iter_get_node_ulong(&iter
);
3826 DBG2("UST app no found with pid %d", pid
);
3830 DBG2("Found UST app by pid %d", pid
);
3832 app
= caa_container_of(node
, struct ust_app
, pid_n
);
3839 * Allocate and init an UST app object using the registration information and
3840 * the command socket. This is called when the command socket connects to the
3843 * The object is returned on success or else NULL.
3845 struct ust_app
*ust_app_create(struct ust_register_msg
*msg
, int sock
)
3848 struct ust_app
*lta
= NULL
;
3849 struct lttng_pipe
*event_notifier_event_source_pipe
= NULL
;
3854 DBG3("UST app creating application for socket %d", sock
);
3856 if ((msg
->bits_per_long
== 64 &&
3857 (uatomic_read(&the_ust_consumerd64_fd
) ==
3859 (msg
->bits_per_long
== 32 &&
3860 (uatomic_read(&the_ust_consumerd32_fd
) ==
3862 ERR("Registration failed: application \"%s\" (pid: %d) has "
3863 "%d-bit long, but no consumerd for this size is available.\n",
3864 msg
->name
, msg
->pid
, msg
->bits_per_long
);
3869 * Reserve the two file descriptors of the event source pipe. The write
3870 * end will be closed once it is passed to the application, at which
3871 * point a single 'put' will be performed.
3873 ret
= lttng_fd_get(LTTNG_FD_APPS
, 2);
3875 ERR("Failed to reserve two file descriptors for the event source pipe while creating a new application instance: app = '%s' (ppid: %d)",
3876 msg
->name
, (int) msg
->ppid
);
3880 event_notifier_event_source_pipe
= lttng_pipe_open(FD_CLOEXEC
);
3881 if (!event_notifier_event_source_pipe
) {
3882 PERROR("Failed to open application event source pipe: '%s' (ppid = %d)",
3883 msg
->name
, msg
->ppid
);
3887 lta
= zmalloc(sizeof(struct ust_app
));
3890 goto error_free_pipe
;
3893 lta
->event_notifier_group
.event_pipe
= event_notifier_event_source_pipe
;
3895 lta
->ppid
= msg
->ppid
;
3896 lta
->uid
= msg
->uid
;
3897 lta
->gid
= msg
->gid
;
3899 lta
->bits_per_long
= msg
->bits_per_long
;
3900 lta
->uint8_t_alignment
= msg
->uint8_t_alignment
;
3901 lta
->uint16_t_alignment
= msg
->uint16_t_alignment
;
3902 lta
->uint32_t_alignment
= msg
->uint32_t_alignment
;
3903 lta
->uint64_t_alignment
= msg
->uint64_t_alignment
;
3904 lta
->long_alignment
= msg
->long_alignment
;
3905 lta
->byte_order
= msg
->byte_order
;
3907 lta
->v_major
= msg
->major
;
3908 lta
->v_minor
= msg
->minor
;
3909 lta
->sessions
= lttng_ht_new(0, LTTNG_HT_TYPE_U64
);
3910 lta
->ust_objd
= lttng_ht_new(0, LTTNG_HT_TYPE_ULONG
);
3911 lta
->ust_sessions_objd
= lttng_ht_new(0, LTTNG_HT_TYPE_ULONG
);
3912 lta
->notify_sock
= -1;
3913 lta
->token_to_event_notifier_rule_ht
= lttng_ht_new(0, LTTNG_HT_TYPE_U64
);
3915 /* Copy name and make sure it's NULL terminated. */
3916 strncpy(lta
->name
, msg
->name
, sizeof(lta
->name
));
3917 lta
->name
[UST_APP_PROCNAME_LEN
] = '\0';
3920 * Before this can be called, when receiving the registration information,
3921 * the application compatibility is checked. So, at this point, the
3922 * application can work with this session daemon.
3924 lta
->compatible
= 1;
3926 lta
->pid
= msg
->pid
;
3927 lttng_ht_node_init_ulong(<a
->pid_n
, (unsigned long) lta
->pid
);
3929 pthread_mutex_init(<a
->sock_lock
, NULL
);
3930 lttng_ht_node_init_ulong(<a
->sock_n
, (unsigned long) lta
->sock
);
3932 CDS_INIT_LIST_HEAD(<a
->teardown_head
);
3936 lttng_pipe_destroy(event_notifier_event_source_pipe
);
3937 lttng_fd_put(LTTNG_FD_APPS
, 2);
3943 * For a given application object, add it to every hash table.
3945 void ust_app_add(struct ust_app
*app
)
3948 assert(app
->notify_sock
>= 0);
3950 app
->registration_time
= time(NULL
);
3955 * On a re-registration, we want to kick out the previous registration of
3958 lttng_ht_add_replace_ulong(ust_app_ht
, &app
->pid_n
);
3961 * The socket _should_ be unique until _we_ call close. So, a add_unique
3962 * for the ust_app_ht_by_sock is used which asserts fail if the entry was
3963 * already in the table.
3965 lttng_ht_add_unique_ulong(ust_app_ht_by_sock
, &app
->sock_n
);
3967 /* Add application to the notify socket hash table. */
3968 lttng_ht_node_init_ulong(&app
->notify_sock_n
, app
->notify_sock
);
3969 lttng_ht_add_unique_ulong(ust_app_ht_by_notify_sock
, &app
->notify_sock_n
);
3971 DBG("App registered with pid:%d ppid:%d uid:%d gid:%d sock:%d name:%s "
3972 "notify_sock:%d (version %d.%d)", app
->pid
, app
->ppid
, app
->uid
,
3973 app
->gid
, app
->sock
, app
->name
, app
->notify_sock
, app
->v_major
,
3980 * Set the application version into the object.
3982 * Return 0 on success else a negative value either an errno code or a
3983 * LTTng-UST error code.
3985 int ust_app_version(struct ust_app
*app
)
3991 pthread_mutex_lock(&app
->sock_lock
);
3992 ret
= ustctl_tracer_version(app
->sock
, &app
->version
);
3993 pthread_mutex_unlock(&app
->sock_lock
);
3995 if (ret
!= -LTTNG_UST_ERR_EXITING
&& ret
!= -EPIPE
) {
3996 ERR("UST app %d version failed with ret %d", app
->sock
, ret
);
3998 DBG3("UST app %d version failed. Application is dead", app
->sock
);
4006 * Setup the base event notifier group.
4008 * Return 0 on success else a negative value either an errno code or a
4009 * LTTng-UST error code.
4011 int ust_app_setup_event_notifier_group(struct ust_app
*app
)
4014 int event_pipe_write_fd
;
4015 struct lttng_ust_abi_object_data
*event_notifier_group
= NULL
;
4016 enum lttng_error_code lttng_ret
;
4017 enum event_notifier_error_accounting_status event_notifier_error_accounting_status
;
4021 /* Get the write side of the pipe. */
4022 event_pipe_write_fd
= lttng_pipe_get_writefd(
4023 app
->event_notifier_group
.event_pipe
);
4025 pthread_mutex_lock(&app
->sock_lock
);
4026 ret
= ustctl_create_event_notifier_group(app
->sock
,
4027 event_pipe_write_fd
, &event_notifier_group
);
4028 pthread_mutex_unlock(&app
->sock_lock
);
4030 if (ret
!= -LTTNG_UST_ERR_EXITING
&& ret
!= -EPIPE
) {
4031 ERR("Failed to create application event notifier group: ret = %d, app socket fd = %d, event_pipe_write_fd = %d",
4032 ret
, app
->sock
, event_pipe_write_fd
);
4034 DBG("Failed to create application event notifier group (application is dead): app socket fd = %d",
4041 ret
= lttng_pipe_write_close(app
->event_notifier_group
.event_pipe
);
4043 ERR("Failed to close write end of the application's event source pipe: app = '%s' (ppid = %d)",
4044 app
->name
, app
->ppid
);
4049 * Release the file descriptor that was reserved for the write-end of
4052 lttng_fd_put(LTTNG_FD_APPS
, 1);
4054 lttng_ret
= notification_thread_command_add_tracer_event_source(
4055 the_notification_thread_handle
,
4056 lttng_pipe_get_readfd(
4057 app
->event_notifier_group
.event_pipe
),
4059 if (lttng_ret
!= LTTNG_OK
) {
4060 ERR("Failed to add tracer event source to notification thread");
4065 /* Assign handle only when the complete setup is valid. */
4066 app
->event_notifier_group
.object
= event_notifier_group
;
4068 event_notifier_error_accounting_status
= event_notifier_error_accounting_register_app(app
);
4069 if (event_notifier_error_accounting_status
!= EVENT_NOTIFIER_ERROR_ACCOUNTING_STATUS_OK
) {
4070 if (event_notifier_error_accounting_status
== EVENT_NOTIFIER_ERROR_ACCOUNTING_STATUS_APP_DEAD
) {
4071 DBG3("Failed to setup event notifier error accounting (application is dead): app socket fd = %d",
4074 goto error_accounting
;
4077 ERR("Failed to setup event notifier error accounting for app");
4079 goto error_accounting
;
4085 lttng_ret
= notification_thread_command_remove_tracer_event_source(
4086 the_notification_thread_handle
,
4087 lttng_pipe_get_readfd(
4088 app
->event_notifier_group
.event_pipe
));
4089 if (lttng_ret
!= LTTNG_OK
) {
4090 ERR("Failed to remove application tracer event source from notification thread");
4094 ustctl_release_object(app
->sock
, app
->event_notifier_group
.object
);
4095 free(app
->event_notifier_group
.object
);
4096 app
->event_notifier_group
.object
= NULL
;
4101 * Unregister app by removing it from the global traceable app list and freeing
4104 * The socket is already closed at this point so no close to sock.
4106 void ust_app_unregister(int sock
)
4108 struct ust_app
*lta
;
4109 struct lttng_ht_node_ulong
*node
;
4110 struct lttng_ht_iter ust_app_sock_iter
;
4111 struct lttng_ht_iter iter
;
4112 struct ust_app_session
*ua_sess
;
4117 /* Get the node reference for a call_rcu */
4118 lttng_ht_lookup(ust_app_ht_by_sock
, (void *)((unsigned long) sock
), &ust_app_sock_iter
);
4119 node
= lttng_ht_iter_get_node_ulong(&ust_app_sock_iter
);
4122 lta
= caa_container_of(node
, struct ust_app
, sock_n
);
4123 DBG("PID %d unregistering with sock %d", lta
->pid
, sock
);
4126 * For per-PID buffers, perform "push metadata" and flush all
4127 * application streams before removing app from hash tables,
4128 * ensuring proper behavior of data_pending check.
4129 * Remove sessions so they are not visible during deletion.
4131 cds_lfht_for_each_entry(lta
->sessions
->ht
, &iter
.iter
, ua_sess
,
4133 struct ust_registry_session
*registry
;
4135 ret
= lttng_ht_del(lta
->sessions
, &iter
);
4137 /* The session was already removed so scheduled for teardown. */
4141 if (ua_sess
->buffer_type
== LTTNG_BUFFER_PER_PID
) {
4142 (void) ust_app_flush_app_session(lta
, ua_sess
);
4146 * Add session to list for teardown. This is safe since at this point we
4147 * are the only one using this list.
4149 pthread_mutex_lock(&ua_sess
->lock
);
4151 if (ua_sess
->deleted
) {
4152 pthread_mutex_unlock(&ua_sess
->lock
);
4157 * Normally, this is done in the delete session process which is
4158 * executed in the call rcu below. However, upon registration we can't
4159 * afford to wait for the grace period before pushing data or else the
4160 * data pending feature can race between the unregistration and stop
4161 * command where the data pending command is sent *before* the grace
4164 * The close metadata below nullifies the metadata pointer in the
4165 * session so the delete session will NOT push/close a second time.
4167 registry
= get_session_registry(ua_sess
);
4169 /* Push metadata for application before freeing the application. */
4170 (void) push_metadata(registry
, ua_sess
->consumer
);
4173 * Don't ask to close metadata for global per UID buffers. Close
4174 * metadata only on destroy trace session in this case. Also, the
4175 * previous push metadata could have flag the metadata registry to
4176 * close so don't send a close command if closed.
4178 if (ua_sess
->buffer_type
!= LTTNG_BUFFER_PER_UID
) {
4179 /* And ask to close it for this session registry. */
4180 (void) close_metadata(registry
, ua_sess
->consumer
);
4183 cds_list_add(&ua_sess
->teardown_node
, <a
->teardown_head
);
4185 pthread_mutex_unlock(&ua_sess
->lock
);
4188 /* Remove application from PID hash table */
4189 ret
= lttng_ht_del(ust_app_ht_by_sock
, &ust_app_sock_iter
);
4193 * Remove application from notify hash table. The thread handling the
4194 * notify socket could have deleted the node so ignore on error because
4195 * either way it's valid. The close of that socket is handled by the
4196 * apps_notify_thread.
4198 iter
.iter
.node
= <a
->notify_sock_n
.node
;
4199 (void) lttng_ht_del(ust_app_ht_by_notify_sock
, &iter
);
4202 * Ignore return value since the node might have been removed before by an
4203 * add replace during app registration because the PID can be reassigned by
4206 iter
.iter
.node
= <a
->pid_n
.node
;
4207 ret
= lttng_ht_del(ust_app_ht
, &iter
);
4209 DBG3("Unregister app by PID %d failed. This can happen on pid reuse",
4214 call_rcu(<a
->pid_n
.head
, delete_ust_app_rcu
);
4221 * Fill events array with all events name of all registered apps.
4223 int ust_app_list_events(struct lttng_event
**events
)
4226 size_t nbmem
, count
= 0;
4227 struct lttng_ht_iter iter
;
4228 struct ust_app
*app
;
4229 struct lttng_event
*tmp_event
;
4231 nbmem
= UST_APP_EVENT_LIST_SIZE
;
4232 tmp_event
= zmalloc(nbmem
* sizeof(struct lttng_event
));
4233 if (tmp_event
== NULL
) {
4234 PERROR("zmalloc ust app events");
4241 cds_lfht_for_each_entry(ust_app_ht
->ht
, &iter
.iter
, app
, pid_n
.node
) {
4242 struct lttng_ust_abi_tracepoint_iter uiter
;
4244 health_code_update();
4246 if (!app
->compatible
) {
4248 * TODO: In time, we should notice the caller of this error by
4249 * telling him that this is a version error.
4253 pthread_mutex_lock(&app
->sock_lock
);
4254 handle
= ustctl_tracepoint_list(app
->sock
);
4256 if (handle
!= -EPIPE
&& handle
!= -LTTNG_UST_ERR_EXITING
) {
4257 ERR("UST app list events getting handle failed for app pid %d",
4260 pthread_mutex_unlock(&app
->sock_lock
);
4264 while ((ret
= ustctl_tracepoint_list_get(app
->sock
, handle
,
4265 &uiter
)) != -LTTNG_UST_ERR_NOENT
) {
4266 /* Handle ustctl error. */
4270 if (ret
!= -LTTNG_UST_ERR_EXITING
&& ret
!= -EPIPE
) {
4271 ERR("UST app tp list get failed for app %d with ret %d",
4274 DBG3("UST app tp list get failed. Application is dead");
4276 * This is normal behavior, an application can die during the
4277 * creation process. Don't report an error so the execution can
4278 * continue normally. Continue normal execution.
4283 release_ret
= ustctl_release_handle(app
->sock
, handle
);
4284 if (release_ret
< 0 &&
4285 release_ret
!= -LTTNG_UST_ERR_EXITING
&&
4286 release_ret
!= -EPIPE
) {
4287 ERR("Error releasing app handle for app %d with ret %d", app
->sock
, release_ret
);
4289 pthread_mutex_unlock(&app
->sock_lock
);
4293 health_code_update();
4294 if (count
>= nbmem
) {
4295 /* In case the realloc fails, we free the memory */
4296 struct lttng_event
*new_tmp_event
;
4299 new_nbmem
= nbmem
<< 1;
4300 DBG2("Reallocating event list from %zu to %zu entries",
4302 new_tmp_event
= realloc(tmp_event
,
4303 new_nbmem
* sizeof(struct lttng_event
));
4304 if (new_tmp_event
== NULL
) {
4307 PERROR("realloc ust app events");
4310 release_ret
= ustctl_release_handle(app
->sock
, handle
);
4311 if (release_ret
< 0 &&
4312 release_ret
!= -LTTNG_UST_ERR_EXITING
&&
4313 release_ret
!= -EPIPE
) {
4314 ERR("Error releasing app handle for app %d with ret %d", app
->sock
, release_ret
);
4316 pthread_mutex_unlock(&app
->sock_lock
);
4319 /* Zero the new memory */
4320 memset(new_tmp_event
+ nbmem
, 0,
4321 (new_nbmem
- nbmem
) * sizeof(struct lttng_event
));
4323 tmp_event
= new_tmp_event
;
4325 memcpy(tmp_event
[count
].name
, uiter
.name
, LTTNG_UST_ABI_SYM_NAME_LEN
);
4326 tmp_event
[count
].loglevel
= uiter
.loglevel
;
4327 tmp_event
[count
].type
= (enum lttng_event_type
) LTTNG_UST_ABI_TRACEPOINT
;
4328 tmp_event
[count
].pid
= app
->pid
;
4329 tmp_event
[count
].enabled
= -1;
4332 ret
= ustctl_release_handle(app
->sock
, handle
);
4333 pthread_mutex_unlock(&app
->sock_lock
);
4334 if (ret
< 0 && ret
!= -LTTNG_UST_ERR_EXITING
&& ret
!= -EPIPE
) {
4335 ERR("Error releasing app handle for app %d with ret %d", app
->sock
, ret
);
4340 *events
= tmp_event
;
4342 DBG2("UST app list events done (%zu events)", count
);
4347 health_code_update();
4352 * Fill events array with all events name of all registered apps.
4354 int ust_app_list_event_fields(struct lttng_event_field
**fields
)
4357 size_t nbmem
, count
= 0;
4358 struct lttng_ht_iter iter
;
4359 struct ust_app
*app
;
4360 struct lttng_event_field
*tmp_event
;
4362 nbmem
= UST_APP_EVENT_LIST_SIZE
;
4363 tmp_event
= zmalloc(nbmem
* sizeof(struct lttng_event_field
));
4364 if (tmp_event
== NULL
) {
4365 PERROR("zmalloc ust app event fields");
4372 cds_lfht_for_each_entry(ust_app_ht
->ht
, &iter
.iter
, app
, pid_n
.node
) {
4373 struct lttng_ust_abi_field_iter uiter
;
4375 health_code_update();
4377 if (!app
->compatible
) {
4379 * TODO: In time, we should notice the caller of this error by
4380 * telling him that this is a version error.
4384 pthread_mutex_lock(&app
->sock_lock
);
4385 handle
= ustctl_tracepoint_field_list(app
->sock
);
4387 if (handle
!= -EPIPE
&& handle
!= -LTTNG_UST_ERR_EXITING
) {
4388 ERR("UST app list field getting handle failed for app pid %d",
4391 pthread_mutex_unlock(&app
->sock_lock
);
4395 while ((ret
= ustctl_tracepoint_field_list_get(app
->sock
, handle
,
4396 &uiter
)) != -LTTNG_UST_ERR_NOENT
) {
4397 /* Handle ustctl error. */
4401 if (ret
!= -LTTNG_UST_ERR_EXITING
&& ret
!= -EPIPE
) {
4402 ERR("UST app tp list field failed for app %d with ret %d",
4405 DBG3("UST app tp list field failed. Application is dead");
4407 * This is normal behavior, an application can die during the
4408 * creation process. Don't report an error so the execution can
4409 * continue normally. Reset list and count for next app.
4414 release_ret
= ustctl_release_handle(app
->sock
, handle
);
4415 pthread_mutex_unlock(&app
->sock_lock
);
4416 if (release_ret
< 0 &&
4417 release_ret
!= -LTTNG_UST_ERR_EXITING
&&
4418 release_ret
!= -EPIPE
) {
4419 ERR("Error releasing app handle for app %d with ret %d", app
->sock
, release_ret
);
4424 health_code_update();
4425 if (count
>= nbmem
) {
4426 /* In case the realloc fails, we free the memory */
4427 struct lttng_event_field
*new_tmp_event
;
4430 new_nbmem
= nbmem
<< 1;
4431 DBG2("Reallocating event field list from %zu to %zu entries",
4433 new_tmp_event
= realloc(tmp_event
,
4434 new_nbmem
* sizeof(struct lttng_event_field
));
4435 if (new_tmp_event
== NULL
) {
4438 PERROR("realloc ust app event fields");
4441 release_ret
= ustctl_release_handle(app
->sock
, handle
);
4442 pthread_mutex_unlock(&app
->sock_lock
);
4444 release_ret
!= -LTTNG_UST_ERR_EXITING
&&
4445 release_ret
!= -EPIPE
) {
4446 ERR("Error releasing app handle for app %d with ret %d", app
->sock
, release_ret
);
4450 /* Zero the new memory */
4451 memset(new_tmp_event
+ nbmem
, 0,
4452 (new_nbmem
- nbmem
) * sizeof(struct lttng_event_field
));
4454 tmp_event
= new_tmp_event
;
4457 memcpy(tmp_event
[count
].field_name
, uiter
.field_name
, LTTNG_UST_ABI_SYM_NAME_LEN
);
4458 /* Mapping between these enums matches 1 to 1. */
4459 tmp_event
[count
].type
= (enum lttng_event_field_type
) uiter
.type
;
4460 tmp_event
[count
].nowrite
= uiter
.nowrite
;
4462 memcpy(tmp_event
[count
].event
.name
, uiter
.event_name
, LTTNG_UST_ABI_SYM_NAME_LEN
);
4463 tmp_event
[count
].event
.loglevel
= uiter
.loglevel
;
4464 tmp_event
[count
].event
.type
= LTTNG_EVENT_TRACEPOINT
;
4465 tmp_event
[count
].event
.pid
= app
->pid
;
4466 tmp_event
[count
].event
.enabled
= -1;
4469 ret
= ustctl_release_handle(app
->sock
, handle
);
4470 pthread_mutex_unlock(&app
->sock_lock
);
4472 ret
!= -LTTNG_UST_ERR_EXITING
&&
4474 ERR("Error releasing app handle for app %d with ret %d", app
->sock
, ret
);
4479 *fields
= tmp_event
;
4481 DBG2("UST app list event fields done (%zu events)", count
);
4486 health_code_update();
4491 * Free and clean all traceable apps of the global list.
4493 * Should _NOT_ be called with RCU read-side lock held.
4495 void ust_app_clean_list(void)
4498 struct ust_app
*app
;
4499 struct lttng_ht_iter iter
;
4501 DBG2("UST app cleaning registered apps hash table");
4505 /* Cleanup notify socket hash table */
4506 if (ust_app_ht_by_notify_sock
) {
4507 cds_lfht_for_each_entry(ust_app_ht_by_notify_sock
->ht
, &iter
.iter
, app
,
4508 notify_sock_n
.node
) {
4510 * Assert that all notifiers are gone as all triggers
4511 * are unregistered prior to this clean-up.
4513 assert(lttng_ht_get_count(app
->token_to_event_notifier_rule_ht
) == 0);
4515 ust_app_notify_sock_unregister(app
->notify_sock
);
4520 cds_lfht_for_each_entry(ust_app_ht
->ht
, &iter
.iter
, app
, pid_n
.node
) {
4521 ret
= lttng_ht_del(ust_app_ht
, &iter
);
4523 call_rcu(&app
->pid_n
.head
, delete_ust_app_rcu
);
4527 /* Cleanup socket hash table */
4528 if (ust_app_ht_by_sock
) {
4529 cds_lfht_for_each_entry(ust_app_ht_by_sock
->ht
, &iter
.iter
, app
,
4531 ret
= lttng_ht_del(ust_app_ht_by_sock
, &iter
);
4538 /* Destroy is done only when the ht is empty */
4540 ht_cleanup_push(ust_app_ht
);
4542 if (ust_app_ht_by_sock
) {
4543 ht_cleanup_push(ust_app_ht_by_sock
);
4545 if (ust_app_ht_by_notify_sock
) {
4546 ht_cleanup_push(ust_app_ht_by_notify_sock
);
4551 * Init UST app hash table.
4553 int ust_app_ht_alloc(void)
4555 ust_app_ht
= lttng_ht_new(0, LTTNG_HT_TYPE_ULONG
);
4559 ust_app_ht_by_sock
= lttng_ht_new(0, LTTNG_HT_TYPE_ULONG
);
4560 if (!ust_app_ht_by_sock
) {
4563 ust_app_ht_by_notify_sock
= lttng_ht_new(0, LTTNG_HT_TYPE_ULONG
);
4564 if (!ust_app_ht_by_notify_sock
) {
4571 * For a specific UST session, disable the channel for all registered apps.
4573 int ust_app_disable_channel_glb(struct ltt_ust_session
*usess
,
4574 struct ltt_ust_channel
*uchan
)
4577 struct lttng_ht_iter iter
;
4578 struct lttng_ht_node_str
*ua_chan_node
;
4579 struct ust_app
*app
;
4580 struct ust_app_session
*ua_sess
;
4581 struct ust_app_channel
*ua_chan
;
4583 assert(usess
->active
);
4584 DBG2("UST app disabling channel %s from global domain for session id %" PRIu64
,
4585 uchan
->name
, usess
->id
);
4589 /* For every registered applications */
4590 cds_lfht_for_each_entry(ust_app_ht
->ht
, &iter
.iter
, app
, pid_n
.node
) {
4591 struct lttng_ht_iter uiter
;
4592 if (!app
->compatible
) {
4594 * TODO: In time, we should notice the caller of this error by
4595 * telling him that this is a version error.
4599 ua_sess
= lookup_session_by_app(usess
, app
);
4600 if (ua_sess
== NULL
) {
4605 lttng_ht_lookup(ua_sess
->channels
, (void *)uchan
->name
, &uiter
);
4606 ua_chan_node
= lttng_ht_iter_get_node_str(&uiter
);
4607 /* If the session if found for the app, the channel must be there */
4608 assert(ua_chan_node
);
4610 ua_chan
= caa_container_of(ua_chan_node
, struct ust_app_channel
, node
);
4611 /* The channel must not be already disabled */
4612 assert(ua_chan
->enabled
== 1);
4614 /* Disable channel onto application */
4615 ret
= disable_ust_app_channel(ua_sess
, ua_chan
, app
);
4617 /* XXX: We might want to report this error at some point... */
4627 * For a specific UST session, enable the channel for all registered apps.
4629 int ust_app_enable_channel_glb(struct ltt_ust_session
*usess
,
4630 struct ltt_ust_channel
*uchan
)
4633 struct lttng_ht_iter iter
;
4634 struct ust_app
*app
;
4635 struct ust_app_session
*ua_sess
;
4637 assert(usess
->active
);
4638 DBG2("UST app enabling channel %s to global domain for session id %" PRIu64
,
4639 uchan
->name
, usess
->id
);
4643 /* For every registered applications */
4644 cds_lfht_for_each_entry(ust_app_ht
->ht
, &iter
.iter
, app
, pid_n
.node
) {
4645 if (!app
->compatible
) {
4647 * TODO: In time, we should notice the caller of this error by
4648 * telling him that this is a version error.
4652 ua_sess
= lookup_session_by_app(usess
, app
);
4653 if (ua_sess
== NULL
) {
4657 /* Enable channel onto application */
4658 ret
= enable_ust_app_channel(ua_sess
, uchan
, app
);
4660 /* XXX: We might want to report this error at some point... */
4670 * Disable an event in a channel and for a specific session.
4672 int ust_app_disable_event_glb(struct ltt_ust_session
*usess
,
4673 struct ltt_ust_channel
*uchan
, struct ltt_ust_event
*uevent
)
4676 struct lttng_ht_iter iter
, uiter
;
4677 struct lttng_ht_node_str
*ua_chan_node
;
4678 struct ust_app
*app
;
4679 struct ust_app_session
*ua_sess
;
4680 struct ust_app_channel
*ua_chan
;
4681 struct ust_app_event
*ua_event
;
4683 assert(usess
->active
);
4684 DBG("UST app disabling event %s for all apps in channel "
4685 "%s for session id %" PRIu64
,
4686 uevent
->attr
.name
, uchan
->name
, usess
->id
);
4690 /* For all registered applications */
4691 cds_lfht_for_each_entry(ust_app_ht
->ht
, &iter
.iter
, app
, pid_n
.node
) {
4692 if (!app
->compatible
) {
4694 * TODO: In time, we should notice the caller of this error by
4695 * telling him that this is a version error.
4699 ua_sess
= lookup_session_by_app(usess
, app
);
4700 if (ua_sess
== NULL
) {
4705 /* Lookup channel in the ust app session */
4706 lttng_ht_lookup(ua_sess
->channels
, (void *)uchan
->name
, &uiter
);
4707 ua_chan_node
= lttng_ht_iter_get_node_str(&uiter
);
4708 if (ua_chan_node
== NULL
) {
4709 DBG2("Channel %s not found in session id %" PRIu64
" for app pid %d."
4710 "Skipping", uchan
->name
, usess
->id
, app
->pid
);
4713 ua_chan
= caa_container_of(ua_chan_node
, struct ust_app_channel
, node
);
4715 ua_event
= find_ust_app_event(ua_chan
->events
, uevent
->attr
.name
,
4716 uevent
->filter
, uevent
->attr
.loglevel
,
4718 if (ua_event
== NULL
) {
4719 DBG2("Event %s not found in channel %s for app pid %d."
4720 "Skipping", uevent
->attr
.name
, uchan
->name
, app
->pid
);
4724 ret
= disable_ust_app_event(ua_sess
, ua_event
, app
);
4726 /* XXX: Report error someday... */
4735 /* The ua_sess lock must be held by the caller. */
4737 int ust_app_channel_create(struct ltt_ust_session
*usess
,
4738 struct ust_app_session
*ua_sess
,
4739 struct ltt_ust_channel
*uchan
, struct ust_app
*app
,
4740 struct ust_app_channel
**_ua_chan
)
4743 struct ust_app_channel
*ua_chan
= NULL
;
4746 ASSERT_LOCKED(ua_sess
->lock
);
4748 if (!strncmp(uchan
->name
, DEFAULT_METADATA_NAME
,
4749 sizeof(uchan
->name
))) {
4750 copy_channel_attr_to_ustctl(&ua_sess
->metadata_attr
,
4754 struct ltt_ust_context
*uctx
= NULL
;
4757 * Create channel onto application and synchronize its
4760 ret
= ust_app_channel_allocate(ua_sess
, uchan
,
4761 LTTNG_UST_ABI_CHAN_PER_CPU
, usess
,
4767 ret
= ust_app_channel_send(app
, usess
,
4774 cds_list_for_each_entry(uctx
, &uchan
->ctx_list
, list
) {
4775 ret
= create_ust_app_channel_context(ua_chan
,
4788 * The application's socket is not valid. Either a bad socket
4789 * or a timeout on it. We can't inform the caller that for a
4790 * specific app, the session failed so lets continue here.
4792 ret
= 0; /* Not an error. */
4800 if (ret
== 0 && _ua_chan
) {
4802 * Only return the application's channel on success. Note
4803 * that the channel can still be part of the application's
4804 * channel hashtable on error.
4806 *_ua_chan
= ua_chan
;
4812 * Enable event for a specific session and channel on the tracer.
4814 int ust_app_enable_event_glb(struct ltt_ust_session
*usess
,
4815 struct ltt_ust_channel
*uchan
, struct ltt_ust_event
*uevent
)
4818 struct lttng_ht_iter iter
, uiter
;
4819 struct lttng_ht_node_str
*ua_chan_node
;
4820 struct ust_app
*app
;
4821 struct ust_app_session
*ua_sess
;
4822 struct ust_app_channel
*ua_chan
;
4823 struct ust_app_event
*ua_event
;
4825 assert(usess
->active
);
4826 DBG("UST app enabling event %s for all apps for session id %" PRIu64
,
4827 uevent
->attr
.name
, usess
->id
);
4830 * NOTE: At this point, this function is called only if the session and
4831 * channel passed are already created for all apps. and enabled on the
4837 /* For all registered applications */
4838 cds_lfht_for_each_entry(ust_app_ht
->ht
, &iter
.iter
, app
, pid_n
.node
) {
4839 if (!app
->compatible
) {
4841 * TODO: In time, we should notice the caller of this error by
4842 * telling him that this is a version error.
4846 ua_sess
= lookup_session_by_app(usess
, app
);
4848 /* The application has problem or is probably dead. */
4852 pthread_mutex_lock(&ua_sess
->lock
);
4854 if (ua_sess
->deleted
) {
4855 pthread_mutex_unlock(&ua_sess
->lock
);
4859 /* Lookup channel in the ust app session */
4860 lttng_ht_lookup(ua_sess
->channels
, (void *)uchan
->name
, &uiter
);
4861 ua_chan_node
= lttng_ht_iter_get_node_str(&uiter
);
4863 * It is possible that the channel cannot be found is
4864 * the channel/event creation occurs concurrently with
4865 * an application exit.
4867 if (!ua_chan_node
) {
4868 pthread_mutex_unlock(&ua_sess
->lock
);
4872 ua_chan
= caa_container_of(ua_chan_node
, struct ust_app_channel
, node
);
4874 /* Get event node */
4875 ua_event
= find_ust_app_event(ua_chan
->events
, uevent
->attr
.name
,
4876 uevent
->filter
, uevent
->attr
.loglevel
, uevent
->exclusion
);
4877 if (ua_event
== NULL
) {
4878 DBG3("UST app enable event %s not found for app PID %d."
4879 "Skipping app", uevent
->attr
.name
, app
->pid
);
4883 ret
= enable_ust_app_event(ua_sess
, ua_event
, app
);
4885 pthread_mutex_unlock(&ua_sess
->lock
);
4889 pthread_mutex_unlock(&ua_sess
->lock
);
4898 * For a specific existing UST session and UST channel, creates the event for
4899 * all registered apps.
4901 int ust_app_create_event_glb(struct ltt_ust_session
*usess
,
4902 struct ltt_ust_channel
*uchan
, struct ltt_ust_event
*uevent
)
4905 struct lttng_ht_iter iter
, uiter
;
4906 struct lttng_ht_node_str
*ua_chan_node
;
4907 struct ust_app
*app
;
4908 struct ust_app_session
*ua_sess
;
4909 struct ust_app_channel
*ua_chan
;
4911 assert(usess
->active
);
4912 DBG("UST app creating event %s for all apps for session id %" PRIu64
,
4913 uevent
->attr
.name
, usess
->id
);
4917 /* For all registered applications */
4918 cds_lfht_for_each_entry(ust_app_ht
->ht
, &iter
.iter
, app
, pid_n
.node
) {
4919 if (!app
->compatible
) {
4921 * TODO: In time, we should notice the caller of this error by
4922 * telling him that this is a version error.
4926 ua_sess
= lookup_session_by_app(usess
, app
);
4928 /* The application has problem or is probably dead. */
4932 pthread_mutex_lock(&ua_sess
->lock
);
4934 if (ua_sess
->deleted
) {
4935 pthread_mutex_unlock(&ua_sess
->lock
);
4939 /* Lookup channel in the ust app session */
4940 lttng_ht_lookup(ua_sess
->channels
, (void *)uchan
->name
, &uiter
);
4941 ua_chan_node
= lttng_ht_iter_get_node_str(&uiter
);
4942 /* If the channel is not found, there is a code flow error */
4943 assert(ua_chan_node
);
4945 ua_chan
= caa_container_of(ua_chan_node
, struct ust_app_channel
, node
);
4947 ret
= create_ust_app_event(ua_sess
, ua_chan
, uevent
, app
);
4948 pthread_mutex_unlock(&ua_sess
->lock
);
4950 if (ret
!= -LTTNG_UST_ERR_EXIST
) {
4951 /* Possible value at this point: -ENOMEM. If so, we stop! */
4954 DBG2("UST app event %s already exist on app PID %d",
4955 uevent
->attr
.name
, app
->pid
);
4965 * Start tracing for a specific UST session and app.
4967 * Called with UST app session lock held.
4971 int ust_app_start_trace(struct ltt_ust_session
*usess
, struct ust_app
*app
)
4974 struct ust_app_session
*ua_sess
;
4976 DBG("Starting tracing for ust app pid %d", app
->pid
);
4980 if (!app
->compatible
) {
4984 ua_sess
= lookup_session_by_app(usess
, app
);
4985 if (ua_sess
== NULL
) {
4986 /* The session is in teardown process. Ignore and continue. */
4990 pthread_mutex_lock(&ua_sess
->lock
);
4992 if (ua_sess
->deleted
) {
4993 pthread_mutex_unlock(&ua_sess
->lock
);
4997 if (ua_sess
->enabled
) {
4998 pthread_mutex_unlock(&ua_sess
->lock
);
5002 /* Upon restart, we skip the setup, already done */
5003 if (ua_sess
->started
) {
5007 health_code_update();
5010 /* This starts the UST tracing */
5011 pthread_mutex_lock(&app
->sock_lock
);
5012 ret
= ustctl_start_session(app
->sock
, ua_sess
->handle
);
5013 pthread_mutex_unlock(&app
->sock_lock
);
5015 if (ret
!= -EPIPE
&& ret
!= -LTTNG_UST_ERR_EXITING
) {
5016 ERR("Error starting tracing for app pid: %d (ret: %d)",
5019 DBG("UST app start session failed. Application is dead.");
5021 * This is normal behavior, an application can die during the
5022 * creation process. Don't report an error so the execution can
5023 * continue normally.
5025 pthread_mutex_unlock(&ua_sess
->lock
);
5031 /* Indicate that the session has been started once */
5032 ua_sess
->started
= 1;
5033 ua_sess
->enabled
= 1;
5035 pthread_mutex_unlock(&ua_sess
->lock
);
5037 health_code_update();
5039 /* Quiescent wait after starting trace */
5040 pthread_mutex_lock(&app
->sock_lock
);
5041 ret
= ustctl_wait_quiescent(app
->sock
);
5042 pthread_mutex_unlock(&app
->sock_lock
);
5043 if (ret
< 0 && ret
!= -EPIPE
&& ret
!= -LTTNG_UST_ERR_EXITING
) {
5044 ERR("UST app wait quiescent failed for app pid %d ret %d",
5050 health_code_update();
5054 pthread_mutex_unlock(&ua_sess
->lock
);
5056 health_code_update();
5061 * Stop tracing for a specific UST session and app.
5064 int ust_app_stop_trace(struct ltt_ust_session
*usess
, struct ust_app
*app
)
5067 struct ust_app_session
*ua_sess
;
5068 struct ust_registry_session
*registry
;
5070 DBG("Stopping tracing for ust app pid %d", app
->pid
);
5074 if (!app
->compatible
) {
5075 goto end_no_session
;
5078 ua_sess
= lookup_session_by_app(usess
, app
);
5079 if (ua_sess
== NULL
) {
5080 goto end_no_session
;
5083 pthread_mutex_lock(&ua_sess
->lock
);
5085 if (ua_sess
->deleted
) {
5086 pthread_mutex_unlock(&ua_sess
->lock
);
5087 goto end_no_session
;
5091 * If started = 0, it means that stop trace has been called for a session
5092 * that was never started. It's possible since we can have a fail start
5093 * from either the application manager thread or the command thread. Simply
5094 * indicate that this is a stop error.
5096 if (!ua_sess
->started
) {
5097 goto error_rcu_unlock
;
5100 health_code_update();
5102 /* This inhibits UST tracing */
5103 pthread_mutex_lock(&app
->sock_lock
);
5104 ret
= ustctl_stop_session(app
->sock
, ua_sess
->handle
);
5105 pthread_mutex_unlock(&app
->sock_lock
);
5107 if (ret
!= -EPIPE
&& ret
!= -LTTNG_UST_ERR_EXITING
) {
5108 ERR("Error stopping tracing for app pid: %d (ret: %d)",
5111 DBG("UST app stop session failed. Application is dead.");
5113 * This is normal behavior, an application can die during the
5114 * creation process. Don't report an error so the execution can
5115 * continue normally.
5119 goto error_rcu_unlock
;
5122 health_code_update();
5123 ua_sess
->enabled
= 0;
5125 /* Quiescent wait after stopping trace */
5126 pthread_mutex_lock(&app
->sock_lock
);
5127 ret
= ustctl_wait_quiescent(app
->sock
);
5128 pthread_mutex_unlock(&app
->sock_lock
);
5129 if (ret
< 0 && ret
!= -EPIPE
&& ret
!= -LTTNG_UST_ERR_EXITING
) {
5130 ERR("UST app wait quiescent failed for app pid %d ret %d",
5134 health_code_update();
5136 registry
= get_session_registry(ua_sess
);
5138 /* The UST app session is held registry shall not be null. */
5141 /* Push metadata for application before freeing the application. */
5142 (void) push_metadata(registry
, ua_sess
->consumer
);
5145 pthread_mutex_unlock(&ua_sess
->lock
);
5148 health_code_update();
5152 pthread_mutex_unlock(&ua_sess
->lock
);
5154 health_code_update();
5159 int ust_app_flush_app_session(struct ust_app
*app
,
5160 struct ust_app_session
*ua_sess
)
5162 int ret
, retval
= 0;
5163 struct lttng_ht_iter iter
;
5164 struct ust_app_channel
*ua_chan
;
5165 struct consumer_socket
*socket
;
5167 DBG("Flushing app session buffers for ust app pid %d", app
->pid
);
5171 if (!app
->compatible
) {
5172 goto end_not_compatible
;
5175 pthread_mutex_lock(&ua_sess
->lock
);
5177 if (ua_sess
->deleted
) {
5181 health_code_update();
5183 /* Flushing buffers */
5184 socket
= consumer_find_socket_by_bitness(app
->bits_per_long
,
5187 /* Flush buffers and push metadata. */
5188 switch (ua_sess
->buffer_type
) {
5189 case LTTNG_BUFFER_PER_PID
:
5190 cds_lfht_for_each_entry(ua_sess
->channels
->ht
, &iter
.iter
, ua_chan
,
5192 health_code_update();
5193 ret
= consumer_flush_channel(socket
, ua_chan
->key
);
5195 ERR("Error flushing consumer channel");
5201 case LTTNG_BUFFER_PER_UID
:
5207 health_code_update();
5210 pthread_mutex_unlock(&ua_sess
->lock
);
5214 health_code_update();
5219 * Flush buffers for all applications for a specific UST session.
5220 * Called with UST session lock held.
5223 int ust_app_flush_session(struct ltt_ust_session
*usess
)
5228 DBG("Flushing session buffers for all ust apps");
5232 /* Flush buffers and push metadata. */
5233 switch (usess
->buffer_type
) {
5234 case LTTNG_BUFFER_PER_UID
:
5236 struct buffer_reg_uid
*reg
;
5237 struct lttng_ht_iter iter
;
5239 /* Flush all per UID buffers associated to that session. */
5240 cds_list_for_each_entry(reg
, &usess
->buffer_reg_uid_list
, lnode
) {
5241 struct ust_registry_session
*ust_session_reg
;
5242 struct buffer_reg_channel
*buf_reg_chan
;
5243 struct consumer_socket
*socket
;
5245 /* Get consumer socket to use to push the metadata.*/
5246 socket
= consumer_find_socket_by_bitness(reg
->bits_per_long
,
5249 /* Ignore request if no consumer is found for the session. */
5253 cds_lfht_for_each_entry(reg
->registry
->channels
->ht
, &iter
.iter
,
5254 buf_reg_chan
, node
.node
) {
5256 * The following call will print error values so the return
5257 * code is of little importance because whatever happens, we
5258 * have to try them all.
5260 (void) consumer_flush_channel(socket
, buf_reg_chan
->consumer_key
);
5263 ust_session_reg
= reg
->registry
->reg
.ust
;
5264 /* Push metadata. */
5265 (void) push_metadata(ust_session_reg
, usess
->consumer
);
5269 case LTTNG_BUFFER_PER_PID
:
5271 struct ust_app_session
*ua_sess
;
5272 struct lttng_ht_iter iter
;
5273 struct ust_app
*app
;
5275 cds_lfht_for_each_entry(ust_app_ht
->ht
, &iter
.iter
, app
, pid_n
.node
) {
5276 ua_sess
= lookup_session_by_app(usess
, app
);
5277 if (ua_sess
== NULL
) {
5280 (void) ust_app_flush_app_session(app
, ua_sess
);
5291 health_code_update();
5296 int ust_app_clear_quiescent_app_session(struct ust_app
*app
,
5297 struct ust_app_session
*ua_sess
)
5300 struct lttng_ht_iter iter
;
5301 struct ust_app_channel
*ua_chan
;
5302 struct consumer_socket
*socket
;
5304 DBG("Clearing stream quiescent state for ust app pid %d", app
->pid
);
5308 if (!app
->compatible
) {
5309 goto end_not_compatible
;
5312 pthread_mutex_lock(&ua_sess
->lock
);
5314 if (ua_sess
->deleted
) {
5318 health_code_update();
5320 socket
= consumer_find_socket_by_bitness(app
->bits_per_long
,
5323 ERR("Failed to find consumer (%" PRIu32
") socket",
5324 app
->bits_per_long
);
5329 /* Clear quiescent state. */
5330 switch (ua_sess
->buffer_type
) {
5331 case LTTNG_BUFFER_PER_PID
:
5332 cds_lfht_for_each_entry(ua_sess
->channels
->ht
, &iter
.iter
,
5333 ua_chan
, node
.node
) {
5334 health_code_update();
5335 ret
= consumer_clear_quiescent_channel(socket
,
5338 ERR("Error clearing quiescent state for consumer channel");
5344 case LTTNG_BUFFER_PER_UID
:
5351 health_code_update();
5354 pthread_mutex_unlock(&ua_sess
->lock
);
5358 health_code_update();
5363 * Clear quiescent state in each stream for all applications for a
5364 * specific UST session.
5365 * Called with UST session lock held.
5368 int ust_app_clear_quiescent_session(struct ltt_ust_session
*usess
)
5373 DBG("Clearing stream quiescent state for all ust apps");
5377 switch (usess
->buffer_type
) {
5378 case LTTNG_BUFFER_PER_UID
:
5380 struct lttng_ht_iter iter
;
5381 struct buffer_reg_uid
*reg
;
5384 * Clear quiescent for all per UID buffers associated to
5387 cds_list_for_each_entry(reg
, &usess
->buffer_reg_uid_list
, lnode
) {
5388 struct consumer_socket
*socket
;
5389 struct buffer_reg_channel
*buf_reg_chan
;
5391 /* Get associated consumer socket.*/
5392 socket
= consumer_find_socket_by_bitness(
5393 reg
->bits_per_long
, usess
->consumer
);
5396 * Ignore request if no consumer is found for
5402 cds_lfht_for_each_entry(reg
->registry
->channels
->ht
,
5403 &iter
.iter
, buf_reg_chan
, node
.node
) {
5405 * The following call will print error values so
5406 * the return code is of little importance
5407 * because whatever happens, we have to try them
5410 (void) consumer_clear_quiescent_channel(socket
,
5411 buf_reg_chan
->consumer_key
);
5416 case LTTNG_BUFFER_PER_PID
:
5418 struct ust_app_session
*ua_sess
;
5419 struct lttng_ht_iter iter
;
5420 struct ust_app
*app
;
5422 cds_lfht_for_each_entry(ust_app_ht
->ht
, &iter
.iter
, app
,
5424 ua_sess
= lookup_session_by_app(usess
, app
);
5425 if (ua_sess
== NULL
) {
5428 (void) ust_app_clear_quiescent_app_session(app
,
5440 health_code_update();
5445 * Destroy a specific UST session in apps.
5447 static int destroy_trace(struct ltt_ust_session
*usess
, struct ust_app
*app
)
5450 struct ust_app_session
*ua_sess
;
5451 struct lttng_ht_iter iter
;
5452 struct lttng_ht_node_u64
*node
;
5454 DBG("Destroy tracing for ust app pid %d", app
->pid
);
5458 if (!app
->compatible
) {
5462 __lookup_session_by_app(usess
, app
, &iter
);
5463 node
= lttng_ht_iter_get_node_u64(&iter
);
5465 /* Session is being or is deleted. */
5468 ua_sess
= caa_container_of(node
, struct ust_app_session
, node
);
5470 health_code_update();
5471 destroy_app_session(app
, ua_sess
);
5473 health_code_update();
5475 /* Quiescent wait after stopping trace */
5476 pthread_mutex_lock(&app
->sock_lock
);
5477 ret
= ustctl_wait_quiescent(app
->sock
);
5478 pthread_mutex_unlock(&app
->sock_lock
);
5479 if (ret
< 0 && ret
!= -EPIPE
&& ret
!= -LTTNG_UST_ERR_EXITING
) {
5480 ERR("UST app wait quiescent failed for app pid %d ret %d",
5485 health_code_update();
5490 * Start tracing for the UST session.
5492 int ust_app_start_trace_all(struct ltt_ust_session
*usess
)
5494 struct lttng_ht_iter iter
;
5495 struct ust_app
*app
;
5497 DBG("Starting all UST traces");
5500 * Even though the start trace might fail, flag this session active so
5501 * other application coming in are started by default.
5508 * In a start-stop-start use-case, we need to clear the quiescent state
5509 * of each channel set by the prior stop command, thus ensuring that a
5510 * following stop or destroy is sure to grab a timestamp_end near those
5511 * operations, even if the packet is empty.
5513 (void) ust_app_clear_quiescent_session(usess
);
5515 cds_lfht_for_each_entry(ust_app_ht
->ht
, &iter
.iter
, app
, pid_n
.node
) {
5516 ust_app_global_update(usess
, app
);
5525 * Start tracing for the UST session.
5526 * Called with UST session lock held.
5528 int ust_app_stop_trace_all(struct ltt_ust_session
*usess
)
5531 struct lttng_ht_iter iter
;
5532 struct ust_app
*app
;
5534 DBG("Stopping all UST traces");
5537 * Even though the stop trace might fail, flag this session inactive so
5538 * other application coming in are not started by default.
5544 cds_lfht_for_each_entry(ust_app_ht
->ht
, &iter
.iter
, app
, pid_n
.node
) {
5545 ret
= ust_app_stop_trace(usess
, app
);
5547 /* Continue to next apps even on error */
5552 (void) ust_app_flush_session(usess
);
5560 * Destroy app UST session.
5562 int ust_app_destroy_trace_all(struct ltt_ust_session
*usess
)
5565 struct lttng_ht_iter iter
;
5566 struct ust_app
*app
;
5568 DBG("Destroy all UST traces");
5572 cds_lfht_for_each_entry(ust_app_ht
->ht
, &iter
.iter
, app
, pid_n
.node
) {
5573 ret
= destroy_trace(usess
, app
);
5575 /* Continue to next apps even on error */
5585 /* The ua_sess lock must be held by the caller. */
5587 int find_or_create_ust_app_channel(
5588 struct ltt_ust_session
*usess
,
5589 struct ust_app_session
*ua_sess
,
5590 struct ust_app
*app
,
5591 struct ltt_ust_channel
*uchan
,
5592 struct ust_app_channel
**ua_chan
)
5595 struct lttng_ht_iter iter
;
5596 struct lttng_ht_node_str
*ua_chan_node
;
5598 lttng_ht_lookup(ua_sess
->channels
, (void *) uchan
->name
, &iter
);
5599 ua_chan_node
= lttng_ht_iter_get_node_str(&iter
);
5601 *ua_chan
= caa_container_of(ua_chan_node
,
5602 struct ust_app_channel
, node
);
5606 ret
= ust_app_channel_create(usess
, ua_sess
, uchan
, app
, ua_chan
);
5615 int ust_app_channel_synchronize_event(struct ust_app_channel
*ua_chan
,
5616 struct ltt_ust_event
*uevent
, struct ust_app_session
*ua_sess
,
5617 struct ust_app
*app
)
5620 struct ust_app_event
*ua_event
= NULL
;
5622 ua_event
= find_ust_app_event(ua_chan
->events
, uevent
->attr
.name
,
5623 uevent
->filter
, uevent
->attr
.loglevel
, uevent
->exclusion
);
5625 ret
= create_ust_app_event(ua_sess
, ua_chan
, uevent
, app
);
5630 if (ua_event
->enabled
!= uevent
->enabled
) {
5631 ret
= uevent
->enabled
?
5632 enable_ust_app_event(ua_sess
, ua_event
, app
) :
5633 disable_ust_app_event(ua_sess
, ua_event
, app
);
5641 /* Called with RCU read-side lock held. */
5643 void ust_app_synchronize_event_notifier_rules(struct ust_app
*app
)
5646 enum lttng_error_code ret_code
;
5647 enum lttng_trigger_status t_status
;
5648 struct lttng_ht_iter app_trigger_iter
;
5649 struct lttng_triggers
*triggers
= NULL
;
5650 struct ust_app_event_notifier_rule
*event_notifier_rule
;
5651 unsigned int count
, i
;
5654 * Currrently, registering or unregistering a trigger with an
5655 * event rule condition causes a full synchronization of the event
5658 * The first step attempts to add an event notifier for all registered
5659 * triggers that apply to the user space tracers. Then, the
5660 * application's event notifiers rules are all checked against the list
5661 * of registered triggers. Any event notifier that doesn't have a
5662 * matching trigger can be assumed to have been disabled.
5664 * All of this is inefficient, but is put in place to get the feature
5665 * rolling as it is simpler at this moment. It will be optimized Soon™
5666 * to allow the state of enabled
5667 * event notifiers to be synchronized in a piece-wise way.
5670 /* Get all triggers using uid 0 (root) */
5671 ret_code
= notification_thread_command_list_triggers(
5672 the_notification_thread_handle
, 0, &triggers
);
5673 if (ret_code
!= LTTNG_OK
) {
5680 t_status
= lttng_triggers_get_count(triggers
, &count
);
5681 if (t_status
!= LTTNG_TRIGGER_STATUS_OK
) {
5686 for (i
= 0; i
< count
; i
++) {
5687 struct lttng_condition
*condition
;
5688 struct lttng_event_rule
*event_rule
;
5689 struct lttng_trigger
*trigger
;
5690 const struct ust_app_event_notifier_rule
*looked_up_event_notifier_rule
;
5691 enum lttng_condition_status condition_status
;
5694 trigger
= lttng_triggers_borrow_mutable_at_index(triggers
, i
);
5697 token
= lttng_trigger_get_tracer_token(trigger
);
5698 condition
= lttng_trigger_get_condition(trigger
);
5700 if (lttng_condition_get_type(condition
) != LTTNG_CONDITION_TYPE_ON_EVENT
) {
5701 /* Does not apply */
5705 condition_status
= lttng_condition_on_event_borrow_rule_mutable(condition
, &event_rule
);
5706 assert(condition_status
== LTTNG_CONDITION_STATUS_OK
);
5708 if (lttng_event_rule_get_domain_type(event_rule
) == LTTNG_DOMAIN_KERNEL
) {
5709 /* Skip kernel related triggers. */
5714 * Find or create the associated token event rule. The caller
5715 * holds the RCU read lock, so this is safe to call without
5716 * explicitly acquiring it here.
5718 looked_up_event_notifier_rule
= find_ust_app_event_notifier_rule(
5719 app
->token_to_event_notifier_rule_ht
, token
);
5720 if (!looked_up_event_notifier_rule
) {
5721 ret
= create_ust_app_event_notifier_rule(trigger
, app
);
5729 /* Remove all unknown event sources from the app. */
5730 cds_lfht_for_each_entry (app
->token_to_event_notifier_rule_ht
->ht
,
5731 &app_trigger_iter
.iter
, event_notifier_rule
,
5733 const uint64_t app_token
= event_notifier_rule
->token
;
5737 * Check if the app event trigger still exists on the
5738 * notification side.
5740 for (i
= 0; i
< count
; i
++) {
5741 uint64_t notification_thread_token
;
5742 const struct lttng_trigger
*trigger
=
5743 lttng_triggers_get_at_index(
5748 notification_thread_token
=
5749 lttng_trigger_get_tracer_token(trigger
);
5751 if (notification_thread_token
== app_token
) {
5763 * This trigger was unregistered, disable it on the tracer's
5766 ret
= lttng_ht_del(app
->token_to_event_notifier_rule_ht
,
5770 /* Callee logs errors. */
5771 (void) disable_ust_object(app
, event_notifier_rule
->obj
);
5773 delete_ust_app_event_notifier_rule(
5774 app
->sock
, event_notifier_rule
, app
);
5780 lttng_triggers_destroy(triggers
);
5785 * RCU read lock must be held by the caller.
5788 void ust_app_synchronize_all_channels(struct ltt_ust_session
*usess
,
5789 struct ust_app_session
*ua_sess
,
5790 struct ust_app
*app
)
5793 struct cds_lfht_iter uchan_iter
;
5794 struct ltt_ust_channel
*uchan
;
5800 cds_lfht_for_each_entry(usess
->domain_global
.channels
->ht
, &uchan_iter
,
5802 struct ust_app_channel
*ua_chan
;
5803 struct cds_lfht_iter uevent_iter
;
5804 struct ltt_ust_event
*uevent
;
5807 * Search for a matching ust_app_channel. If none is found,
5808 * create it. Creating the channel will cause the ua_chan
5809 * structure to be allocated, the channel buffers to be
5810 * allocated (if necessary) and sent to the application, and
5811 * all enabled contexts will be added to the channel.
5813 ret
= find_or_create_ust_app_channel(usess
, ua_sess
,
5814 app
, uchan
, &ua_chan
);
5816 /* Tracer is probably gone or ENOMEM. */
5821 /* ua_chan will be NULL for the metadata channel */
5825 cds_lfht_for_each_entry(uchan
->events
->ht
, &uevent_iter
, uevent
,
5827 ret
= ust_app_channel_synchronize_event(ua_chan
,
5828 uevent
, ua_sess
, app
);
5834 if (ua_chan
->enabled
!= uchan
->enabled
) {
5835 ret
= uchan
->enabled
?
5836 enable_ust_app_channel(ua_sess
, uchan
, app
) :
5837 disable_ust_app_channel(ua_sess
, ua_chan
, app
);
5848 * The caller must ensure that the application is compatible and is tracked
5849 * by the process attribute trackers.
5852 void ust_app_synchronize(struct ltt_ust_session
*usess
,
5853 struct ust_app
*app
)
5856 struct ust_app_session
*ua_sess
= NULL
;
5859 * The application's configuration should only be synchronized for
5862 assert(usess
->active
);
5864 ret
= find_or_create_ust_app_session(usess
, app
, &ua_sess
, NULL
);
5866 /* Tracer is probably gone or ENOMEM. */
5871 pthread_mutex_lock(&ua_sess
->lock
);
5872 if (ua_sess
->deleted
) {
5873 pthread_mutex_unlock(&ua_sess
->lock
);
5879 ust_app_synchronize_all_channels(usess
, ua_sess
, app
);
5882 * Create the metadata for the application. This returns gracefully if a
5883 * metadata was already set for the session.
5885 * The metadata channel must be created after the data channels as the
5886 * consumer daemon assumes this ordering. When interacting with a relay
5887 * daemon, the consumer will use this assumption to send the
5888 * "STREAMS_SENT" message to the relay daemon.
5890 ret
= create_ust_app_metadata(ua_sess
, app
, usess
->consumer
);
5898 pthread_mutex_unlock(&ua_sess
->lock
);
5899 /* Everything went well at this point. */
5904 pthread_mutex_unlock(&ua_sess
->lock
);
5907 destroy_app_session(app
, ua_sess
);
5913 void ust_app_global_destroy(struct ltt_ust_session
*usess
, struct ust_app
*app
)
5915 struct ust_app_session
*ua_sess
;
5917 ua_sess
= lookup_session_by_app(usess
, app
);
5918 if (ua_sess
== NULL
) {
5921 destroy_app_session(app
, ua_sess
);
5925 * Add channels/events from UST global domain to registered apps at sock.
5927 * Called with session lock held.
5928 * Called with RCU read-side lock held.
5930 void ust_app_global_update(struct ltt_ust_session
*usess
, struct ust_app
*app
)
5933 assert(usess
->active
);
5935 DBG2("UST app global update for app sock %d for session id %" PRIu64
,
5936 app
->sock
, usess
->id
);
5938 if (!app
->compatible
) {
5941 if (trace_ust_id_tracker_lookup(LTTNG_PROCESS_ATTR_VIRTUAL_PROCESS_ID
,
5943 trace_ust_id_tracker_lookup(
5944 LTTNG_PROCESS_ATTR_VIRTUAL_USER_ID
,
5946 trace_ust_id_tracker_lookup(
5947 LTTNG_PROCESS_ATTR_VIRTUAL_GROUP_ID
,
5950 * Synchronize the application's internal tracing configuration
5951 * and start tracing.
5953 ust_app_synchronize(usess
, app
);
5954 ust_app_start_trace(usess
, app
);
5956 ust_app_global_destroy(usess
, app
);
5961 * Add all event notifiers to an application.
5963 * Called with session lock held.
5964 * Called with RCU read-side lock held.
5966 void ust_app_global_update_event_notifier_rules(struct ust_app
*app
)
5968 DBG2("UST application global event notifier rules update: app = '%s' (ppid: %d)",
5969 app
->name
, app
->ppid
);
5971 if (!app
->compatible
) {
5975 if (app
->event_notifier_group
.object
== NULL
) {
5976 WARN("UST app global update of event notifiers for app skipped since communication handle is null: app = '%s' (ppid: %d)",
5977 app
->name
, app
->ppid
);
5981 ust_app_synchronize_event_notifier_rules(app
);
5985 * Called with session lock held.
5987 void ust_app_global_update_all(struct ltt_ust_session
*usess
)
5989 struct lttng_ht_iter iter
;
5990 struct ust_app
*app
;
5993 cds_lfht_for_each_entry(ust_app_ht
->ht
, &iter
.iter
, app
, pid_n
.node
) {
5994 ust_app_global_update(usess
, app
);
5999 void ust_app_global_update_all_event_notifier_rules(void)
6001 struct lttng_ht_iter iter
;
6002 struct ust_app
*app
;
6005 cds_lfht_for_each_entry(ust_app_ht
->ht
, &iter
.iter
, app
, pid_n
.node
) {
6006 ust_app_global_update_event_notifier_rules(app
);
6013 * Add context to a specific channel for global UST domain.
6015 int ust_app_add_ctx_channel_glb(struct ltt_ust_session
*usess
,
6016 struct ltt_ust_channel
*uchan
, struct ltt_ust_context
*uctx
)
6019 struct lttng_ht_node_str
*ua_chan_node
;
6020 struct lttng_ht_iter iter
, uiter
;
6021 struct ust_app_channel
*ua_chan
= NULL
;
6022 struct ust_app_session
*ua_sess
;
6023 struct ust_app
*app
;
6025 assert(usess
->active
);
6028 cds_lfht_for_each_entry(ust_app_ht
->ht
, &iter
.iter
, app
, pid_n
.node
) {
6029 if (!app
->compatible
) {
6031 * TODO: In time, we should notice the caller of this error by
6032 * telling him that this is a version error.
6036 ua_sess
= lookup_session_by_app(usess
, app
);
6037 if (ua_sess
== NULL
) {
6041 pthread_mutex_lock(&ua_sess
->lock
);
6043 if (ua_sess
->deleted
) {
6044 pthread_mutex_unlock(&ua_sess
->lock
);
6048 /* Lookup channel in the ust app session */
6049 lttng_ht_lookup(ua_sess
->channels
, (void *)uchan
->name
, &uiter
);
6050 ua_chan_node
= lttng_ht_iter_get_node_str(&uiter
);
6051 if (ua_chan_node
== NULL
) {
6054 ua_chan
= caa_container_of(ua_chan_node
, struct ust_app_channel
,
6056 ret
= create_ust_app_channel_context(ua_chan
, &uctx
->ctx
, app
);
6061 pthread_mutex_unlock(&ua_sess
->lock
);
6069 * Receive registration and populate the given msg structure.
6071 * On success return 0 else a negative value returned by the ustctl call.
6073 int ust_app_recv_registration(int sock
, struct ust_register_msg
*msg
)
6076 uint32_t pid
, ppid
, uid
, gid
;
6080 ret
= ustctl_recv_reg_msg(sock
, &msg
->type
, &msg
->major
, &msg
->minor
,
6081 &pid
, &ppid
, &uid
, &gid
,
6082 &msg
->bits_per_long
,
6083 &msg
->uint8_t_alignment
,
6084 &msg
->uint16_t_alignment
,
6085 &msg
->uint32_t_alignment
,
6086 &msg
->uint64_t_alignment
,
6087 &msg
->long_alignment
,
6094 case LTTNG_UST_ERR_EXITING
:
6095 DBG3("UST app recv reg message failed. Application died");
6097 case LTTNG_UST_ERR_UNSUP_MAJOR
:
6098 ERR("UST app recv reg unsupported version %d.%d. Supporting %d.%d",
6099 msg
->major
, msg
->minor
, LTTNG_UST_ABI_MAJOR_VERSION
,
6100 LTTNG_UST_ABI_MINOR_VERSION
);
6103 ERR("UST app recv reg message failed with ret %d", ret
);
6108 msg
->pid
= (pid_t
) pid
;
6109 msg
->ppid
= (pid_t
) ppid
;
6110 msg
->uid
= (uid_t
) uid
;
6111 msg
->gid
= (gid_t
) gid
;
6118 * Return a ust app session object using the application object and the
6119 * session object descriptor has a key. If not found, NULL is returned.
6120 * A RCU read side lock MUST be acquired when calling this function.
6122 static struct ust_app_session
*find_session_by_objd(struct ust_app
*app
,
6125 struct lttng_ht_node_ulong
*node
;
6126 struct lttng_ht_iter iter
;
6127 struct ust_app_session
*ua_sess
= NULL
;
6131 lttng_ht_lookup(app
->ust_sessions_objd
, (void *)((unsigned long) objd
), &iter
);
6132 node
= lttng_ht_iter_get_node_ulong(&iter
);
6134 DBG2("UST app session find by objd %d not found", objd
);
6138 ua_sess
= caa_container_of(node
, struct ust_app_session
, ust_objd_node
);
6145 * Return a ust app channel object using the application object and the channel
6146 * object descriptor has a key. If not found, NULL is returned. A RCU read side
6147 * lock MUST be acquired before calling this function.
6149 static struct ust_app_channel
*find_channel_by_objd(struct ust_app
*app
,
6152 struct lttng_ht_node_ulong
*node
;
6153 struct lttng_ht_iter iter
;
6154 struct ust_app_channel
*ua_chan
= NULL
;
6158 lttng_ht_lookup(app
->ust_objd
, (void *)((unsigned long) objd
), &iter
);
6159 node
= lttng_ht_iter_get_node_ulong(&iter
);
6161 DBG2("UST app channel find by objd %d not found", objd
);
6165 ua_chan
= caa_container_of(node
, struct ust_app_channel
, ust_objd_node
);
6172 * Reply to a register channel notification from an application on the notify
6173 * socket. The channel metadata is also created.
6175 * The session UST registry lock is acquired in this function.
6177 * On success 0 is returned else a negative value.
6179 static int reply_ust_register_channel(int sock
, int cobjd
,
6180 size_t nr_fields
, struct ustctl_field
*fields
)
6182 int ret
, ret_code
= 0;
6184 uint64_t chan_reg_key
;
6185 enum ustctl_channel_header type
;
6186 struct ust_app
*app
;
6187 struct ust_app_channel
*ua_chan
;
6188 struct ust_app_session
*ua_sess
;
6189 struct ust_registry_session
*registry
;
6190 struct ust_registry_channel
*ust_reg_chan
;
6194 /* Lookup application. If not found, there is a code flow error. */
6195 app
= find_app_by_notify_sock(sock
);
6197 DBG("Application socket %d is being torn down. Abort event notify",
6200 goto error_rcu_unlock
;
6203 /* Lookup channel by UST object descriptor. */
6204 ua_chan
= find_channel_by_objd(app
, cobjd
);
6206 DBG("Application channel is being torn down. Abort event notify");
6208 goto error_rcu_unlock
;
6211 assert(ua_chan
->session
);
6212 ua_sess
= ua_chan
->session
;
6214 /* Get right session registry depending on the session buffer type. */
6215 registry
= get_session_registry(ua_sess
);
6217 DBG("Application session is being torn down. Abort event notify");
6219 goto error_rcu_unlock
;
6222 /* Depending on the buffer type, a different channel key is used. */
6223 if (ua_sess
->buffer_type
== LTTNG_BUFFER_PER_UID
) {
6224 chan_reg_key
= ua_chan
->tracing_channel_id
;
6226 chan_reg_key
= ua_chan
->key
;
6229 pthread_mutex_lock(®istry
->lock
);
6231 ust_reg_chan
= ust_registry_channel_find(registry
, chan_reg_key
);
6232 assert(ust_reg_chan
);
6234 if (!ust_reg_chan
->register_done
) {
6236 * TODO: eventually use the registry event count for
6237 * this channel to better guess header type for per-pid
6240 type
= USTCTL_CHANNEL_HEADER_LARGE
;
6241 ust_reg_chan
->nr_ctx_fields
= nr_fields
;
6242 ust_reg_chan
->ctx_fields
= fields
;
6244 ust_reg_chan
->header_type
= type
;
6246 /* Get current already assigned values. */
6247 type
= ust_reg_chan
->header_type
;
6249 /* Channel id is set during the object creation. */
6250 chan_id
= ust_reg_chan
->chan_id
;
6252 /* Append to metadata */
6253 if (!ust_reg_chan
->metadata_dumped
) {
6254 ret_code
= ust_metadata_channel_statedump(registry
, ust_reg_chan
);
6256 ERR("Error appending channel metadata (errno = %d)", ret_code
);
6262 DBG3("UST app replying to register channel key %" PRIu64
6263 " with id %u, type: %d, ret: %d", chan_reg_key
, chan_id
, type
,
6266 ret
= ustctl_reply_register_channel(sock
, chan_id
, type
, ret_code
);
6268 if (ret
!= -EPIPE
&& ret
!= -LTTNG_UST_ERR_EXITING
) {
6269 ERR("UST app reply channel failed with ret %d", ret
);
6271 DBG3("UST app reply channel failed. Application died");
6276 /* This channel registry registration is completed. */
6277 ust_reg_chan
->register_done
= 1;
6280 pthread_mutex_unlock(®istry
->lock
);
6288 * Add event to the UST channel registry. When the event is added to the
6289 * registry, the metadata is also created. Once done, this replies to the
6290 * application with the appropriate error code.
6292 * The session UST registry lock is acquired in the function.
6294 * On success 0 is returned else a negative value.
6296 static int add_event_ust_registry(int sock
, int sobjd
, int cobjd
, char *name
,
6297 char *sig
, size_t nr_fields
, struct ustctl_field
*fields
,
6298 int loglevel_value
, char *model_emf_uri
)
6301 uint32_t event_id
= 0;
6302 uint64_t chan_reg_key
;
6303 struct ust_app
*app
;
6304 struct ust_app_channel
*ua_chan
;
6305 struct ust_app_session
*ua_sess
;
6306 struct ust_registry_session
*registry
;
6310 /* Lookup application. If not found, there is a code flow error. */
6311 app
= find_app_by_notify_sock(sock
);
6313 DBG("Application socket %d is being torn down. Abort event notify",
6316 goto error_rcu_unlock
;
6319 /* Lookup channel by UST object descriptor. */
6320 ua_chan
= find_channel_by_objd(app
, cobjd
);
6322 DBG("Application channel is being torn down. Abort event notify");
6324 goto error_rcu_unlock
;
6327 assert(ua_chan
->session
);
6328 ua_sess
= ua_chan
->session
;
6330 registry
= get_session_registry(ua_sess
);
6332 DBG("Application session is being torn down. Abort event notify");
6334 goto error_rcu_unlock
;
6337 if (ua_sess
->buffer_type
== LTTNG_BUFFER_PER_UID
) {
6338 chan_reg_key
= ua_chan
->tracing_channel_id
;
6340 chan_reg_key
= ua_chan
->key
;
6343 pthread_mutex_lock(®istry
->lock
);
6346 * From this point on, this call acquires the ownership of the sig, fields
6347 * and model_emf_uri meaning any free are done inside it if needed. These
6348 * three variables MUST NOT be read/write after this.
6350 ret_code
= ust_registry_create_event(registry
, chan_reg_key
,
6351 sobjd
, cobjd
, name
, sig
, nr_fields
, fields
,
6352 loglevel_value
, model_emf_uri
, ua_sess
->buffer_type
,
6356 model_emf_uri
= NULL
;
6359 * The return value is returned to ustctl so in case of an error, the
6360 * application can be notified. In case of an error, it's important not to
6361 * return a negative error or else the application will get closed.
6363 ret
= ustctl_reply_register_event(sock
, event_id
, ret_code
);
6365 if (ret
!= -EPIPE
&& ret
!= -LTTNG_UST_ERR_EXITING
) {
6366 ERR("UST app reply event failed with ret %d", ret
);
6368 DBG3("UST app reply event failed. Application died");
6371 * No need to wipe the create event since the application socket will
6372 * get close on error hence cleaning up everything by itself.
6377 DBG3("UST registry event %s with id %" PRId32
" added successfully",
6381 pthread_mutex_unlock(®istry
->lock
);
6386 free(model_emf_uri
);
6391 * Add enum to the UST session registry. Once done, this replies to the
6392 * application with the appropriate error code.
6394 * The session UST registry lock is acquired within this function.
6396 * On success 0 is returned else a negative value.
6398 static int add_enum_ust_registry(int sock
, int sobjd
, char *name
,
6399 struct ustctl_enum_entry
*entries
, size_t nr_entries
)
6401 int ret
= 0, ret_code
;
6402 struct ust_app
*app
;
6403 struct ust_app_session
*ua_sess
;
6404 struct ust_registry_session
*registry
;
6405 uint64_t enum_id
= -1ULL;
6409 /* Lookup application. If not found, there is a code flow error. */
6410 app
= find_app_by_notify_sock(sock
);
6412 /* Return an error since this is not an error */
6413 DBG("Application socket %d is being torn down. Aborting enum registration",
6416 goto error_rcu_unlock
;
6419 /* Lookup session by UST object descriptor. */
6420 ua_sess
= find_session_by_objd(app
, sobjd
);
6422 /* Return an error since this is not an error */
6423 DBG("Application session is being torn down (session not found). Aborting enum registration.");
6425 goto error_rcu_unlock
;
6428 registry
= get_session_registry(ua_sess
);
6430 DBG("Application session is being torn down (registry not found). Aborting enum registration.");
6432 goto error_rcu_unlock
;
6435 pthread_mutex_lock(®istry
->lock
);
6438 * From this point on, the callee acquires the ownership of
6439 * entries. The variable entries MUST NOT be read/written after
6442 ret_code
= ust_registry_create_or_find_enum(registry
, sobjd
, name
,
6443 entries
, nr_entries
, &enum_id
);
6447 * The return value is returned to ustctl so in case of an error, the
6448 * application can be notified. In case of an error, it's important not to
6449 * return a negative error or else the application will get closed.
6451 ret
= ustctl_reply_register_enum(sock
, enum_id
, ret_code
);
6453 if (ret
!= -EPIPE
&& ret
!= -LTTNG_UST_ERR_EXITING
) {
6454 ERR("UST app reply enum failed with ret %d", ret
);
6456 DBG3("UST app reply enum failed. Application died");
6459 * No need to wipe the create enum since the application socket will
6460 * get close on error hence cleaning up everything by itself.
6465 DBG3("UST registry enum %s added successfully or already found", name
);
6468 pthread_mutex_unlock(®istry
->lock
);
6475 * Handle application notification through the given notify socket.
6477 * Return 0 on success or else a negative value.
6479 int ust_app_recv_notify(int sock
)
6482 enum ustctl_notify_cmd cmd
;
6484 DBG3("UST app receiving notify from sock %d", sock
);
6486 ret
= ustctl_recv_notify(sock
, &cmd
);
6488 if (ret
!= -EPIPE
&& ret
!= -LTTNG_UST_ERR_EXITING
) {
6489 ERR("UST app recv notify failed with ret %d", ret
);
6491 DBG3("UST app recv notify failed. Application died");
6497 case USTCTL_NOTIFY_CMD_EVENT
:
6499 int sobjd
, cobjd
, loglevel_value
;
6500 char name
[LTTNG_UST_ABI_SYM_NAME_LEN
], *sig
, *model_emf_uri
;
6502 struct ustctl_field
*fields
;
6504 DBG2("UST app ustctl register event received");
6506 ret
= ustctl_recv_register_event(sock
, &sobjd
, &cobjd
, name
,
6507 &loglevel_value
, &sig
, &nr_fields
, &fields
,
6510 if (ret
!= -EPIPE
&& ret
!= -LTTNG_UST_ERR_EXITING
) {
6511 ERR("UST app recv event failed with ret %d", ret
);
6513 DBG3("UST app recv event failed. Application died");
6519 * Add event to the UST registry coming from the notify socket. This
6520 * call will free if needed the sig, fields and model_emf_uri. This
6521 * code path loses the ownsership of these variables and transfer them
6522 * to the this function.
6524 ret
= add_event_ust_registry(sock
, sobjd
, cobjd
, name
, sig
, nr_fields
,
6525 fields
, loglevel_value
, model_emf_uri
);
6532 case USTCTL_NOTIFY_CMD_CHANNEL
:
6536 struct ustctl_field
*fields
;
6538 DBG2("UST app ustctl register channel received");
6540 ret
= ustctl_recv_register_channel(sock
, &sobjd
, &cobjd
, &nr_fields
,
6543 if (ret
!= -EPIPE
&& ret
!= -LTTNG_UST_ERR_EXITING
) {
6544 ERR("UST app recv channel failed with ret %d", ret
);
6546 DBG3("UST app recv channel failed. Application died");
6552 * The fields ownership are transfered to this function call meaning
6553 * that if needed it will be freed. After this, it's invalid to access
6554 * fields or clean it up.
6556 ret
= reply_ust_register_channel(sock
, cobjd
, nr_fields
,
6564 case USTCTL_NOTIFY_CMD_ENUM
:
6567 char name
[LTTNG_UST_ABI_SYM_NAME_LEN
];
6569 struct ustctl_enum_entry
*entries
;
6571 DBG2("UST app ustctl register enum received");
6573 ret
= ustctl_recv_register_enum(sock
, &sobjd
, name
,
6574 &entries
, &nr_entries
);
6576 if (ret
!= -EPIPE
&& ret
!= -LTTNG_UST_ERR_EXITING
) {
6577 ERR("UST app recv enum failed with ret %d", ret
);
6579 DBG3("UST app recv enum failed. Application died");
6584 /* Callee assumes ownership of entries */
6585 ret
= add_enum_ust_registry(sock
, sobjd
, name
,
6586 entries
, nr_entries
);
6594 /* Should NEVER happen. */
6603 * Once the notify socket hangs up, this is called. First, it tries to find the
6604 * corresponding application. On failure, the call_rcu to close the socket is
6605 * executed. If an application is found, it tries to delete it from the notify
6606 * socket hash table. Whathever the result, it proceeds to the call_rcu.
6608 * Note that an object needs to be allocated here so on ENOMEM failure, the
6609 * call RCU is not done but the rest of the cleanup is.
6611 void ust_app_notify_sock_unregister(int sock
)
6614 struct lttng_ht_iter iter
;
6615 struct ust_app
*app
;
6616 struct ust_app_notify_sock_obj
*obj
;
6622 obj
= zmalloc(sizeof(*obj
));
6625 * An ENOMEM is kind of uncool. If this strikes we continue the
6626 * procedure but the call_rcu will not be called. In this case, we
6627 * accept the fd leak rather than possibly creating an unsynchronized
6628 * state between threads.
6630 * TODO: The notify object should be created once the notify socket is
6631 * registered and stored independantely from the ust app object. The
6632 * tricky part is to synchronize the teardown of the application and
6633 * this notify object. Let's keep that in mind so we can avoid this
6634 * kind of shenanigans with ENOMEM in the teardown path.
6641 DBG("UST app notify socket unregister %d", sock
);
6644 * Lookup application by notify socket. If this fails, this means that the
6645 * hash table delete has already been done by the application
6646 * unregistration process so we can safely close the notify socket in a
6649 app
= find_app_by_notify_sock(sock
);
6654 iter
.iter
.node
= &app
->notify_sock_n
.node
;
6657 * Whatever happens here either we fail or succeed, in both cases we have
6658 * to close the socket after a grace period to continue to the call RCU
6659 * here. If the deletion is successful, the application is not visible
6660 * anymore by other threads and is it fails it means that it was already
6661 * deleted from the hash table so either way we just have to close the
6664 (void) lttng_ht_del(ust_app_ht_by_notify_sock
, &iter
);
6670 * Close socket after a grace period to avoid for the socket to be reused
6671 * before the application object is freed creating potential race between
6672 * threads trying to add unique in the global hash table.
6675 call_rcu(&obj
->head
, close_notify_sock_rcu
);
6680 * Destroy a ust app data structure and free its memory.
6682 void ust_app_destroy(struct ust_app
*app
)
6688 call_rcu(&app
->pid_n
.head
, delete_ust_app_rcu
);
6692 * Take a snapshot for a given UST session. The snapshot is sent to the given
6695 * Returns LTTNG_OK on success or a LTTNG_ERR error code.
6697 enum lttng_error_code
ust_app_snapshot_record(
6698 const struct ltt_ust_session
*usess
,
6699 const struct consumer_output
*output
, int wait
,
6700 uint64_t nb_packets_per_stream
)
6703 enum lttng_error_code status
= LTTNG_OK
;
6704 struct lttng_ht_iter iter
;
6705 struct ust_app
*app
;
6706 char *trace_path
= NULL
;
6713 switch (usess
->buffer_type
) {
6714 case LTTNG_BUFFER_PER_UID
:
6716 struct buffer_reg_uid
*reg
;
6718 cds_list_for_each_entry(reg
, &usess
->buffer_reg_uid_list
, lnode
) {
6719 struct buffer_reg_channel
*buf_reg_chan
;
6720 struct consumer_socket
*socket
;
6721 char pathname
[PATH_MAX
];
6722 size_t consumer_path_offset
= 0;
6724 if (!reg
->registry
->reg
.ust
->metadata_key
) {
6725 /* Skip since no metadata is present */
6729 /* Get consumer socket to use to push the metadata.*/
6730 socket
= consumer_find_socket_by_bitness(reg
->bits_per_long
,
6733 status
= LTTNG_ERR_INVALID
;
6737 memset(pathname
, 0, sizeof(pathname
));
6738 ret
= snprintf(pathname
, sizeof(pathname
),
6739 DEFAULT_UST_TRACE_DIR
"/" DEFAULT_UST_TRACE_UID_PATH
,
6740 reg
->uid
, reg
->bits_per_long
);
6742 PERROR("snprintf snapshot path");
6743 status
= LTTNG_ERR_INVALID
;
6746 /* Free path allowed on previous iteration. */
6748 trace_path
= setup_channel_trace_path(usess
->consumer
, pathname
,
6749 &consumer_path_offset
);
6751 status
= LTTNG_ERR_INVALID
;
6754 /* Add the UST default trace dir to path. */
6755 cds_lfht_for_each_entry(reg
->registry
->channels
->ht
, &iter
.iter
,
6756 buf_reg_chan
, node
.node
) {
6757 status
= consumer_snapshot_channel(socket
,
6758 buf_reg_chan
->consumer_key
,
6759 output
, 0, usess
->uid
,
6760 usess
->gid
, &trace_path
[consumer_path_offset
], wait
,
6761 nb_packets_per_stream
);
6762 if (status
!= LTTNG_OK
) {
6766 status
= consumer_snapshot_channel(socket
,
6767 reg
->registry
->reg
.ust
->metadata_key
, output
, 1,
6768 usess
->uid
, usess
->gid
, &trace_path
[consumer_path_offset
],
6770 if (status
!= LTTNG_OK
) {
6776 case LTTNG_BUFFER_PER_PID
:
6778 cds_lfht_for_each_entry(ust_app_ht
->ht
, &iter
.iter
, app
, pid_n
.node
) {
6779 struct consumer_socket
*socket
;
6780 struct lttng_ht_iter chan_iter
;
6781 struct ust_app_channel
*ua_chan
;
6782 struct ust_app_session
*ua_sess
;
6783 struct ust_registry_session
*registry
;
6784 char pathname
[PATH_MAX
];
6785 size_t consumer_path_offset
= 0;
6787 ua_sess
= lookup_session_by_app(usess
, app
);
6789 /* Session not associated with this app. */
6793 /* Get the right consumer socket for the application. */
6794 socket
= consumer_find_socket_by_bitness(app
->bits_per_long
,
6797 status
= LTTNG_ERR_INVALID
;
6801 /* Add the UST default trace dir to path. */
6802 memset(pathname
, 0, sizeof(pathname
));
6803 ret
= snprintf(pathname
, sizeof(pathname
), DEFAULT_UST_TRACE_DIR
"/%s",
6806 status
= LTTNG_ERR_INVALID
;
6807 PERROR("snprintf snapshot path");
6810 /* Free path allowed on previous iteration. */
6812 trace_path
= setup_channel_trace_path(usess
->consumer
, pathname
,
6813 &consumer_path_offset
);
6815 status
= LTTNG_ERR_INVALID
;
6818 cds_lfht_for_each_entry(ua_sess
->channels
->ht
, &chan_iter
.iter
,
6819 ua_chan
, node
.node
) {
6820 status
= consumer_snapshot_channel(socket
,
6821 ua_chan
->key
, output
, 0,
6822 lttng_credentials_get_uid(&ua_sess
->effective_credentials
),
6823 lttng_credentials_get_gid(&ua_sess
->effective_credentials
),
6824 &trace_path
[consumer_path_offset
], wait
,
6825 nb_packets_per_stream
);
6829 case LTTNG_ERR_CHAN_NOT_FOUND
:
6836 registry
= get_session_registry(ua_sess
);
6838 DBG("Application session is being torn down. Skip application.");
6841 status
= consumer_snapshot_channel(socket
,
6842 registry
->metadata_key
, output
, 1,
6843 lttng_credentials_get_uid(&ua_sess
->effective_credentials
),
6844 lttng_credentials_get_gid(&ua_sess
->effective_credentials
),
6845 &trace_path
[consumer_path_offset
], wait
, 0);
6849 case LTTNG_ERR_CHAN_NOT_FOUND
:
6869 * Return the size taken by one more packet per stream.
6871 uint64_t ust_app_get_size_one_more_packet_per_stream(
6872 const struct ltt_ust_session
*usess
, uint64_t cur_nr_packets
)
6874 uint64_t tot_size
= 0;
6875 struct ust_app
*app
;
6876 struct lttng_ht_iter iter
;
6880 switch (usess
->buffer_type
) {
6881 case LTTNG_BUFFER_PER_UID
:
6883 struct buffer_reg_uid
*reg
;
6885 cds_list_for_each_entry(reg
, &usess
->buffer_reg_uid_list
, lnode
) {
6886 struct buffer_reg_channel
*buf_reg_chan
;
6889 cds_lfht_for_each_entry(reg
->registry
->channels
->ht
, &iter
.iter
,
6890 buf_reg_chan
, node
.node
) {
6891 if (cur_nr_packets
>= buf_reg_chan
->num_subbuf
) {
6893 * Don't take channel into account if we
6894 * already grab all its packets.
6898 tot_size
+= buf_reg_chan
->subbuf_size
* buf_reg_chan
->stream_count
;
6904 case LTTNG_BUFFER_PER_PID
:
6907 cds_lfht_for_each_entry(ust_app_ht
->ht
, &iter
.iter
, app
, pid_n
.node
) {
6908 struct ust_app_channel
*ua_chan
;
6909 struct ust_app_session
*ua_sess
;
6910 struct lttng_ht_iter chan_iter
;
6912 ua_sess
= lookup_session_by_app(usess
, app
);
6914 /* Session not associated with this app. */
6918 cds_lfht_for_each_entry(ua_sess
->channels
->ht
, &chan_iter
.iter
,
6919 ua_chan
, node
.node
) {
6920 if (cur_nr_packets
>= ua_chan
->attr
.num_subbuf
) {
6922 * Don't take channel into account if we
6923 * already grab all its packets.
6927 tot_size
+= ua_chan
->attr
.subbuf_size
* ua_chan
->streams
.count
;
6941 int ust_app_uid_get_channel_runtime_stats(uint64_t ust_session_id
,
6942 struct cds_list_head
*buffer_reg_uid_list
,
6943 struct consumer_output
*consumer
, uint64_t uchan_id
,
6944 int overwrite
, uint64_t *discarded
, uint64_t *lost
)
6947 uint64_t consumer_chan_key
;
6952 ret
= buffer_reg_uid_consumer_channel_key(
6953 buffer_reg_uid_list
, uchan_id
, &consumer_chan_key
);
6961 ret
= consumer_get_lost_packets(ust_session_id
,
6962 consumer_chan_key
, consumer
, lost
);
6964 ret
= consumer_get_discarded_events(ust_session_id
,
6965 consumer_chan_key
, consumer
, discarded
);
6972 int ust_app_pid_get_channel_runtime_stats(struct ltt_ust_session
*usess
,
6973 struct ltt_ust_channel
*uchan
,
6974 struct consumer_output
*consumer
, int overwrite
,
6975 uint64_t *discarded
, uint64_t *lost
)
6978 struct lttng_ht_iter iter
;
6979 struct lttng_ht_node_str
*ua_chan_node
;
6980 struct ust_app
*app
;
6981 struct ust_app_session
*ua_sess
;
6982 struct ust_app_channel
*ua_chan
;
6989 * Iterate over every registered applications. Sum counters for
6990 * all applications containing requested session and channel.
6992 cds_lfht_for_each_entry(ust_app_ht
->ht
, &iter
.iter
, app
, pid_n
.node
) {
6993 struct lttng_ht_iter uiter
;
6995 ua_sess
= lookup_session_by_app(usess
, app
);
6996 if (ua_sess
== NULL
) {
7001 lttng_ht_lookup(ua_sess
->channels
, (void *) uchan
->name
, &uiter
);
7002 ua_chan_node
= lttng_ht_iter_get_node_str(&uiter
);
7003 /* If the session is found for the app, the channel must be there */
7004 assert(ua_chan_node
);
7006 ua_chan
= caa_container_of(ua_chan_node
, struct ust_app_channel
, node
);
7011 ret
= consumer_get_lost_packets(usess
->id
, ua_chan
->key
,
7018 uint64_t _discarded
;
7020 ret
= consumer_get_discarded_events(usess
->id
,
7021 ua_chan
->key
, consumer
, &_discarded
);
7025 (*discarded
) += _discarded
;
7034 int ust_app_regenerate_statedump(struct ltt_ust_session
*usess
,
7035 struct ust_app
*app
)
7038 struct ust_app_session
*ua_sess
;
7040 DBG("Regenerating the metadata for ust app pid %d", app
->pid
);
7044 ua_sess
= lookup_session_by_app(usess
, app
);
7045 if (ua_sess
== NULL
) {
7046 /* The session is in teardown process. Ignore and continue. */
7050 pthread_mutex_lock(&ua_sess
->lock
);
7052 if (ua_sess
->deleted
) {
7056 pthread_mutex_lock(&app
->sock_lock
);
7057 ret
= ustctl_regenerate_statedump(app
->sock
, ua_sess
->handle
);
7058 pthread_mutex_unlock(&app
->sock_lock
);
7061 pthread_mutex_unlock(&ua_sess
->lock
);
7065 health_code_update();
7070 * Regenerate the statedump for each app in the session.
7072 int ust_app_regenerate_statedump_all(struct ltt_ust_session
*usess
)
7075 struct lttng_ht_iter iter
;
7076 struct ust_app
*app
;
7078 DBG("Regenerating the metadata for all UST apps");
7082 cds_lfht_for_each_entry(ust_app_ht
->ht
, &iter
.iter
, app
, pid_n
.node
) {
7083 if (!app
->compatible
) {
7087 ret
= ust_app_regenerate_statedump(usess
, app
);
7089 /* Continue to the next app even on error */
7100 * Rotate all the channels of a session.
7102 * Return LTTNG_OK on success or else an LTTng error code.
7104 enum lttng_error_code
ust_app_rotate_session(struct ltt_session
*session
)
7107 enum lttng_error_code cmd_ret
= LTTNG_OK
;
7108 struct lttng_ht_iter iter
;
7109 struct ust_app
*app
;
7110 struct ltt_ust_session
*usess
= session
->ust_session
;
7116 switch (usess
->buffer_type
) {
7117 case LTTNG_BUFFER_PER_UID
:
7119 struct buffer_reg_uid
*reg
;
7121 cds_list_for_each_entry(reg
, &usess
->buffer_reg_uid_list
, lnode
) {
7122 struct buffer_reg_channel
*buf_reg_chan
;
7123 struct consumer_socket
*socket
;
7125 /* Get consumer socket to use to push the metadata.*/
7126 socket
= consumer_find_socket_by_bitness(reg
->bits_per_long
,
7129 cmd_ret
= LTTNG_ERR_INVALID
;
7133 /* Rotate the data channels. */
7134 cds_lfht_for_each_entry(reg
->registry
->channels
->ht
, &iter
.iter
,
7135 buf_reg_chan
, node
.node
) {
7136 ret
= consumer_rotate_channel(socket
,
7137 buf_reg_chan
->consumer_key
,
7138 usess
->uid
, usess
->gid
,
7140 /* is_metadata_channel */ false);
7142 cmd_ret
= LTTNG_ERR_ROTATION_FAIL_CONSUMER
;
7148 * The metadata channel might not be present.
7150 * Consumer stream allocation can be done
7151 * asynchronously and can fail on intermediary
7152 * operations (i.e add context) and lead to data
7153 * channels created with no metadata channel.
7155 if (!reg
->registry
->reg
.ust
->metadata_key
) {
7156 /* Skip since no metadata is present. */
7160 (void) push_metadata(reg
->registry
->reg
.ust
, usess
->consumer
);
7162 ret
= consumer_rotate_channel(socket
,
7163 reg
->registry
->reg
.ust
->metadata_key
,
7164 usess
->uid
, usess
->gid
,
7166 /* is_metadata_channel */ true);
7168 cmd_ret
= LTTNG_ERR_ROTATION_FAIL_CONSUMER
;
7174 case LTTNG_BUFFER_PER_PID
:
7176 cds_lfht_for_each_entry(ust_app_ht
->ht
, &iter
.iter
, app
, pid_n
.node
) {
7177 struct consumer_socket
*socket
;
7178 struct lttng_ht_iter chan_iter
;
7179 struct ust_app_channel
*ua_chan
;
7180 struct ust_app_session
*ua_sess
;
7181 struct ust_registry_session
*registry
;
7183 ua_sess
= lookup_session_by_app(usess
, app
);
7185 /* Session not associated with this app. */
7189 /* Get the right consumer socket for the application. */
7190 socket
= consumer_find_socket_by_bitness(app
->bits_per_long
,
7193 cmd_ret
= LTTNG_ERR_INVALID
;
7197 registry
= get_session_registry(ua_sess
);
7199 DBG("Application session is being torn down. Skip application.");
7203 /* Rotate the data channels. */
7204 cds_lfht_for_each_entry(ua_sess
->channels
->ht
, &chan_iter
.iter
,
7205 ua_chan
, node
.node
) {
7206 ret
= consumer_rotate_channel(socket
,
7208 lttng_credentials_get_uid(&ua_sess
->effective_credentials
),
7209 lttng_credentials_get_gid(&ua_sess
->effective_credentials
),
7211 /* is_metadata_channel */ false);
7213 /* Per-PID buffer and application going away. */
7214 if (ret
== -LTTNG_ERR_CHAN_NOT_FOUND
)
7216 cmd_ret
= LTTNG_ERR_ROTATION_FAIL_CONSUMER
;
7221 /* Rotate the metadata channel. */
7222 (void) push_metadata(registry
, usess
->consumer
);
7223 ret
= consumer_rotate_channel(socket
,
7224 registry
->metadata_key
,
7225 lttng_credentials_get_uid(&ua_sess
->effective_credentials
),
7226 lttng_credentials_get_gid(&ua_sess
->effective_credentials
),
7228 /* is_metadata_channel */ true);
7230 /* Per-PID buffer and application going away. */
7231 if (ret
== -LTTNG_ERR_CHAN_NOT_FOUND
)
7233 cmd_ret
= LTTNG_ERR_ROTATION_FAIL_CONSUMER
;
7251 enum lttng_error_code
ust_app_create_channel_subdirectories(
7252 const struct ltt_ust_session
*usess
)
7254 enum lttng_error_code ret
= LTTNG_OK
;
7255 struct lttng_ht_iter iter
;
7256 enum lttng_trace_chunk_status chunk_status
;
7257 char *pathname_index
;
7260 assert(usess
->current_trace_chunk
);
7263 switch (usess
->buffer_type
) {
7264 case LTTNG_BUFFER_PER_UID
:
7266 struct buffer_reg_uid
*reg
;
7268 cds_list_for_each_entry(reg
, &usess
->buffer_reg_uid_list
, lnode
) {
7269 fmt_ret
= asprintf(&pathname_index
,
7270 DEFAULT_UST_TRACE_DIR
"/" DEFAULT_UST_TRACE_UID_PATH
"/" DEFAULT_INDEX_DIR
,
7271 reg
->uid
, reg
->bits_per_long
);
7273 ERR("Failed to format channel index directory");
7274 ret
= LTTNG_ERR_CREATE_DIR_FAIL
;
7279 * Create the index subdirectory which will take care
7280 * of implicitly creating the channel's path.
7282 chunk_status
= lttng_trace_chunk_create_subdirectory(
7283 usess
->current_trace_chunk
,
7285 free(pathname_index
);
7286 if (chunk_status
!= LTTNG_TRACE_CHUNK_STATUS_OK
) {
7287 ret
= LTTNG_ERR_CREATE_DIR_FAIL
;
7293 case LTTNG_BUFFER_PER_PID
:
7295 struct ust_app
*app
;
7298 * Create the toplevel ust/ directory in case no apps are running.
7300 chunk_status
= lttng_trace_chunk_create_subdirectory(
7301 usess
->current_trace_chunk
,
7302 DEFAULT_UST_TRACE_DIR
);
7303 if (chunk_status
!= LTTNG_TRACE_CHUNK_STATUS_OK
) {
7304 ret
= LTTNG_ERR_CREATE_DIR_FAIL
;
7308 cds_lfht_for_each_entry(ust_app_ht
->ht
, &iter
.iter
, app
,
7310 struct ust_app_session
*ua_sess
;
7311 struct ust_registry_session
*registry
;
7313 ua_sess
= lookup_session_by_app(usess
, app
);
7315 /* Session not associated with this app. */
7319 registry
= get_session_registry(ua_sess
);
7321 DBG("Application session is being torn down. Skip application.");
7325 fmt_ret
= asprintf(&pathname_index
,
7326 DEFAULT_UST_TRACE_DIR
"/%s/" DEFAULT_INDEX_DIR
,
7329 ERR("Failed to format channel index directory");
7330 ret
= LTTNG_ERR_CREATE_DIR_FAIL
;
7334 * Create the index subdirectory which will take care
7335 * of implicitly creating the channel's path.
7337 chunk_status
= lttng_trace_chunk_create_subdirectory(
7338 usess
->current_trace_chunk
,
7340 free(pathname_index
);
7341 if (chunk_status
!= LTTNG_TRACE_CHUNK_STATUS_OK
) {
7342 ret
= LTTNG_ERR_CREATE_DIR_FAIL
;
7359 * Clear all the channels of a session.
7361 * Return LTTNG_OK on success or else an LTTng error code.
7363 enum lttng_error_code
ust_app_clear_session(struct ltt_session
*session
)
7366 enum lttng_error_code cmd_ret
= LTTNG_OK
;
7367 struct lttng_ht_iter iter
;
7368 struct ust_app
*app
;
7369 struct ltt_ust_session
*usess
= session
->ust_session
;
7375 if (usess
->active
) {
7376 ERR("Expecting inactive session %s (%" PRIu64
")", session
->name
, session
->id
);
7377 cmd_ret
= LTTNG_ERR_FATAL
;
7381 switch (usess
->buffer_type
) {
7382 case LTTNG_BUFFER_PER_UID
:
7384 struct buffer_reg_uid
*reg
;
7386 cds_list_for_each_entry(reg
, &usess
->buffer_reg_uid_list
, lnode
) {
7387 struct buffer_reg_channel
*buf_reg_chan
;
7388 struct consumer_socket
*socket
;
7390 /* Get consumer socket to use to push the metadata.*/
7391 socket
= consumer_find_socket_by_bitness(reg
->bits_per_long
,
7394 cmd_ret
= LTTNG_ERR_INVALID
;
7398 /* Clear the data channels. */
7399 cds_lfht_for_each_entry(reg
->registry
->channels
->ht
, &iter
.iter
,
7400 buf_reg_chan
, node
.node
) {
7401 ret
= consumer_clear_channel(socket
,
7402 buf_reg_chan
->consumer_key
);
7408 (void) push_metadata(reg
->registry
->reg
.ust
, usess
->consumer
);
7411 * Clear the metadata channel.
7412 * Metadata channel is not cleared per se but we still need to
7413 * perform a rotation operation on it behind the scene.
7415 ret
= consumer_clear_channel(socket
,
7416 reg
->registry
->reg
.ust
->metadata_key
);
7423 case LTTNG_BUFFER_PER_PID
:
7425 cds_lfht_for_each_entry(ust_app_ht
->ht
, &iter
.iter
, app
, pid_n
.node
) {
7426 struct consumer_socket
*socket
;
7427 struct lttng_ht_iter chan_iter
;
7428 struct ust_app_channel
*ua_chan
;
7429 struct ust_app_session
*ua_sess
;
7430 struct ust_registry_session
*registry
;
7432 ua_sess
= lookup_session_by_app(usess
, app
);
7434 /* Session not associated with this app. */
7438 /* Get the right consumer socket for the application. */
7439 socket
= consumer_find_socket_by_bitness(app
->bits_per_long
,
7442 cmd_ret
= LTTNG_ERR_INVALID
;
7446 registry
= get_session_registry(ua_sess
);
7448 DBG("Application session is being torn down. Skip application.");
7452 /* Clear the data channels. */
7453 cds_lfht_for_each_entry(ua_sess
->channels
->ht
, &chan_iter
.iter
,
7454 ua_chan
, node
.node
) {
7455 ret
= consumer_clear_channel(socket
, ua_chan
->key
);
7457 /* Per-PID buffer and application going away. */
7458 if (ret
== -LTTNG_ERR_CHAN_NOT_FOUND
) {
7465 (void) push_metadata(registry
, usess
->consumer
);
7468 * Clear the metadata channel.
7469 * Metadata channel is not cleared per se but we still need to
7470 * perform rotation operation on it behind the scene.
7472 ret
= consumer_clear_channel(socket
, registry
->metadata_key
);
7474 /* Per-PID buffer and application going away. */
7475 if (ret
== -LTTNG_ERR_CHAN_NOT_FOUND
) {
7493 case LTTCOMM_CONSUMERD_RELAYD_CLEAR_DISALLOWED
:
7494 cmd_ret
= LTTNG_ERR_CLEAR_RELAY_DISALLOWED
;
7497 cmd_ret
= LTTNG_ERR_CLEAR_FAIL_CONSUMER
;
7507 * This function skips the metadata channel as the begin/end timestamps of a
7508 * metadata packet are useless.
7510 * Moreover, opening a packet after a "clear" will cause problems for live
7511 * sessions as it will introduce padding that was not part of the first trace
7512 * chunk. The relay daemon expects the content of the metadata stream of
7513 * successive metadata trace chunks to be strict supersets of one another.
7515 * For example, flushing a packet at the beginning of the metadata stream of
7516 * a trace chunk resulting from a "clear" session command will cause the
7517 * size of the metadata stream of the new trace chunk to not match the size of
7518 * the metadata stream of the original chunk. This will confuse the relay
7519 * daemon as the same "offset" in a metadata stream will no longer point
7520 * to the same content.
7522 enum lttng_error_code
ust_app_open_packets(struct ltt_session
*session
)
7524 enum lttng_error_code ret
= LTTNG_OK
;
7525 struct lttng_ht_iter iter
;
7526 struct ltt_ust_session
*usess
= session
->ust_session
;
7532 switch (usess
->buffer_type
) {
7533 case LTTNG_BUFFER_PER_UID
:
7535 struct buffer_reg_uid
*reg
;
7537 cds_list_for_each_entry (
7538 reg
, &usess
->buffer_reg_uid_list
, lnode
) {
7539 struct buffer_reg_channel
*buf_reg_chan
;
7540 struct consumer_socket
*socket
;
7542 socket
= consumer_find_socket_by_bitness(
7543 reg
->bits_per_long
, usess
->consumer
);
7545 ret
= LTTNG_ERR_FATAL
;
7549 cds_lfht_for_each_entry(reg
->registry
->channels
->ht
,
7550 &iter
.iter
, buf_reg_chan
, node
.node
) {
7551 const int open_ret
=
7552 consumer_open_channel_packets(
7554 buf_reg_chan
->consumer_key
);
7557 ret
= LTTNG_ERR_UNK
;
7564 case LTTNG_BUFFER_PER_PID
:
7566 struct ust_app
*app
;
7568 cds_lfht_for_each_entry (
7569 ust_app_ht
->ht
, &iter
.iter
, app
, pid_n
.node
) {
7570 struct consumer_socket
*socket
;
7571 struct lttng_ht_iter chan_iter
;
7572 struct ust_app_channel
*ua_chan
;
7573 struct ust_app_session
*ua_sess
;
7574 struct ust_registry_session
*registry
;
7576 ua_sess
= lookup_session_by_app(usess
, app
);
7578 /* Session not associated with this app. */
7582 /* Get the right consumer socket for the application. */
7583 socket
= consumer_find_socket_by_bitness(
7584 app
->bits_per_long
, usess
->consumer
);
7586 ret
= LTTNG_ERR_FATAL
;
7590 registry
= get_session_registry(ua_sess
);
7592 DBG("Application session is being torn down. Skip application.");
7596 cds_lfht_for_each_entry(ua_sess
->channels
->ht
,
7597 &chan_iter
.iter
, ua_chan
, node
.node
) {
7598 const int open_ret
=
7599 consumer_open_channel_packets(
7605 * Per-PID buffer and application going
7608 if (open_ret
== -LTTNG_ERR_CHAN_NOT_FOUND
) {
7612 ret
= LTTNG_ERR_UNK
;