2 * Copyright (C) 2011 David Goulet <david.goulet@polymtl.ca>
3 * Copyright (C) 2016 Jérémie Galarneau <jeremie.galarneau@efficios.com>
5 * SPDX-License-Identifier: GPL-2.0-only
19 #include <sys/types.h>
21 #include <urcu/compiler.h>
24 #include <common/bytecode/bytecode.h>
25 #include <common/compat/errno.h>
26 #include <common/common.h>
27 #include <common/hashtable/utils.h>
28 #include <lttng/event-rule/event-rule.h>
29 #include <lttng/event-rule/event-rule-internal.h>
30 #include <lttng/event-rule/tracepoint.h>
31 #include <lttng/condition/condition.h>
32 #include <lttng/condition/on-event-internal.h>
33 #include <lttng/condition/on-event.h>
34 #include <lttng/trigger/trigger-internal.h>
35 #include <common/sessiond-comm/sessiond-comm.h>
37 #include "buffer-registry.h"
38 #include "condition-internal.h"
40 #include "health-sessiond.h"
42 #include "ust-consumer.h"
43 #include "lttng-ust-ctl.h"
44 #include "lttng-ust-error.h"
47 #include "lttng-sessiond.h"
48 #include "notification-thread-commands.h"
51 #include "event-notifier-error-accounting.h"
54 struct lttng_ht
*ust_app_ht
;
55 struct lttng_ht
*ust_app_ht_by_sock
;
56 struct lttng_ht
*ust_app_ht_by_notify_sock
;
59 int ust_app_flush_app_session(struct ust_app
*app
, struct ust_app_session
*ua_sess
);
61 /* Next available channel key. Access under next_channel_key_lock. */
62 static uint64_t _next_channel_key
;
63 static pthread_mutex_t next_channel_key_lock
= PTHREAD_MUTEX_INITIALIZER
;
65 /* Next available session ID. Access under next_session_id_lock. */
66 static uint64_t _next_session_id
;
67 static pthread_mutex_t next_session_id_lock
= PTHREAD_MUTEX_INITIALIZER
;
70 * Return the incremented value of next_channel_key.
72 static uint64_t get_next_channel_key(void)
76 pthread_mutex_lock(&next_channel_key_lock
);
77 ret
= ++_next_channel_key
;
78 pthread_mutex_unlock(&next_channel_key_lock
);
83 * Return the atomically incremented value of next_session_id.
85 static uint64_t get_next_session_id(void)
89 pthread_mutex_lock(&next_session_id_lock
);
90 ret
= ++_next_session_id
;
91 pthread_mutex_unlock(&next_session_id_lock
);
95 static void copy_channel_attr_to_ustctl(
96 struct ustctl_consumer_channel_attr
*attr
,
97 struct lttng_ust_abi_channel_attr
*uattr
)
99 /* Copy event attributes since the layout is different. */
100 attr
->subbuf_size
= uattr
->subbuf_size
;
101 attr
->num_subbuf
= uattr
->num_subbuf
;
102 attr
->overwrite
= uattr
->overwrite
;
103 attr
->switch_timer_interval
= uattr
->switch_timer_interval
;
104 attr
->read_timer_interval
= uattr
->read_timer_interval
;
105 attr
->output
= uattr
->output
;
106 attr
->blocking_timeout
= uattr
->u
.s
.blocking_timeout
;
110 * Match function for the hash table lookup.
112 * It matches an ust app event based on three attributes which are the event
113 * name, the filter bytecode and the loglevel.
115 static int ht_match_ust_app_event(struct cds_lfht_node
*node
, const void *_key
)
117 struct ust_app_event
*event
;
118 const struct ust_app_ht_key
*key
;
119 int ev_loglevel_value
;
124 event
= caa_container_of(node
, struct ust_app_event
, node
.node
);
126 ev_loglevel_value
= event
->attr
.loglevel
;
128 /* Match the 4 elements of the key: name, filter, loglevel, exclusions */
131 if (strncmp(event
->attr
.name
, key
->name
, sizeof(event
->attr
.name
)) != 0) {
135 /* Event loglevel. */
136 if (ev_loglevel_value
!= key
->loglevel_type
) {
137 if (event
->attr
.loglevel_type
== LTTNG_UST_ABI_LOGLEVEL_ALL
138 && key
->loglevel_type
== 0 &&
139 ev_loglevel_value
== -1) {
141 * Match is accepted. This is because on event creation, the
142 * loglevel is set to -1 if the event loglevel type is ALL so 0 and
143 * -1 are accepted for this loglevel type since 0 is the one set by
144 * the API when receiving an enable event.
151 /* One of the filters is NULL, fail. */
152 if ((key
->filter
&& !event
->filter
) || (!key
->filter
&& event
->filter
)) {
156 if (key
->filter
&& event
->filter
) {
157 /* Both filters exists, check length followed by the bytecode. */
158 if (event
->filter
->len
!= key
->filter
->len
||
159 memcmp(event
->filter
->data
, key
->filter
->data
,
160 event
->filter
->len
) != 0) {
165 /* One of the exclusions is NULL, fail. */
166 if ((key
->exclusion
&& !event
->exclusion
) || (!key
->exclusion
&& event
->exclusion
)) {
170 if (key
->exclusion
&& event
->exclusion
) {
171 /* Both exclusions exists, check count followed by the names. */
172 if (event
->exclusion
->count
!= key
->exclusion
->count
||
173 memcmp(event
->exclusion
->names
, key
->exclusion
->names
,
174 event
->exclusion
->count
* LTTNG_UST_ABI_SYM_NAME_LEN
) != 0) {
188 * Unique add of an ust app event in the given ht. This uses the custom
189 * ht_match_ust_app_event match function and the event name as hash.
191 static void add_unique_ust_app_event(struct ust_app_channel
*ua_chan
,
192 struct ust_app_event
*event
)
194 struct cds_lfht_node
*node_ptr
;
195 struct ust_app_ht_key key
;
199 assert(ua_chan
->events
);
202 ht
= ua_chan
->events
;
203 key
.name
= event
->attr
.name
;
204 key
.filter
= event
->filter
;
205 key
.loglevel_type
= event
->attr
.loglevel
;
206 key
.exclusion
= event
->exclusion
;
208 node_ptr
= cds_lfht_add_unique(ht
->ht
,
209 ht
->hash_fct(event
->node
.key
, lttng_ht_seed
),
210 ht_match_ust_app_event
, &key
, &event
->node
.node
);
211 assert(node_ptr
== &event
->node
.node
);
215 * Close the notify socket from the given RCU head object. This MUST be called
216 * through a call_rcu().
218 static void close_notify_sock_rcu(struct rcu_head
*head
)
221 struct ust_app_notify_sock_obj
*obj
=
222 caa_container_of(head
, struct ust_app_notify_sock_obj
, head
);
224 /* Must have a valid fd here. */
225 assert(obj
->fd
>= 0);
227 ret
= close(obj
->fd
);
229 ERR("close notify sock %d RCU", obj
->fd
);
231 lttng_fd_put(LTTNG_FD_APPS
, 1);
237 * Return the session registry according to the buffer type of the given
240 * A registry per UID object MUST exists before calling this function or else
241 * it assert() if not found. RCU read side lock must be acquired.
243 static struct ust_registry_session
*get_session_registry(
244 struct ust_app_session
*ua_sess
)
246 struct ust_registry_session
*registry
= NULL
;
250 switch (ua_sess
->buffer_type
) {
251 case LTTNG_BUFFER_PER_PID
:
253 struct buffer_reg_pid
*reg_pid
= buffer_reg_pid_find(ua_sess
->id
);
257 registry
= reg_pid
->registry
->reg
.ust
;
260 case LTTNG_BUFFER_PER_UID
:
262 struct buffer_reg_uid
*reg_uid
= buffer_reg_uid_find(
263 ua_sess
->tracing_id
, ua_sess
->bits_per_long
,
264 lttng_credentials_get_uid(&ua_sess
->real_credentials
));
268 registry
= reg_uid
->registry
->reg
.ust
;
280 * Delete ust context safely. RCU read lock must be held before calling
284 void delete_ust_app_ctx(int sock
, struct ust_app_ctx
*ua_ctx
,
292 pthread_mutex_lock(&app
->sock_lock
);
293 ret
= ustctl_release_object(sock
, ua_ctx
->obj
);
294 pthread_mutex_unlock(&app
->sock_lock
);
295 if (ret
< 0 && ret
!= -EPIPE
&& ret
!= -LTTNG_UST_ERR_EXITING
) {
296 ERR("UST app sock %d release ctx obj handle %d failed with ret %d",
297 sock
, ua_ctx
->obj
->handle
, ret
);
305 * Delete ust app event safely. RCU read lock must be held before calling
309 void delete_ust_app_event(int sock
, struct ust_app_event
*ua_event
,
316 free(ua_event
->filter
);
317 if (ua_event
->exclusion
!= NULL
)
318 free(ua_event
->exclusion
);
319 if (ua_event
->obj
!= NULL
) {
320 pthread_mutex_lock(&app
->sock_lock
);
321 ret
= ustctl_release_object(sock
, ua_event
->obj
);
322 pthread_mutex_unlock(&app
->sock_lock
);
323 if (ret
< 0 && ret
!= -EPIPE
&& ret
!= -LTTNG_UST_ERR_EXITING
) {
324 ERR("UST app sock %d release event obj failed with ret %d",
333 * Delayed reclaim of a ust_app_event_notifier_rule object. This MUST be called
334 * through a call_rcu().
337 void free_ust_app_event_notifier_rule_rcu(struct rcu_head
*head
)
339 struct ust_app_event_notifier_rule
*obj
= caa_container_of(
340 head
, struct ust_app_event_notifier_rule
, rcu_head
);
346 * Delete ust app event notifier rule safely.
348 static void delete_ust_app_event_notifier_rule(int sock
,
349 struct ust_app_event_notifier_rule
*ua_event_notifier_rule
,
354 assert(ua_event_notifier_rule
);
356 if (ua_event_notifier_rule
->exclusion
!= NULL
) {
357 free(ua_event_notifier_rule
->exclusion
);
360 if (ua_event_notifier_rule
->obj
!= NULL
) {
361 pthread_mutex_lock(&app
->sock_lock
);
362 ret
= ustctl_release_object(sock
, ua_event_notifier_rule
->obj
);
363 pthread_mutex_unlock(&app
->sock_lock
);
364 if (ret
< 0 && ret
!= -EPIPE
&& ret
!= -LTTNG_UST_ERR_EXITING
) {
365 ERR("Failed to release event notifier object: app = '%s' (ppid %d), ret = %d",
366 app
->name
, (int) app
->ppid
, ret
);
369 free(ua_event_notifier_rule
->obj
);
372 lttng_trigger_put(ua_event_notifier_rule
->trigger
);
373 call_rcu(&ua_event_notifier_rule
->rcu_head
,
374 free_ust_app_event_notifier_rule_rcu
);
378 * Release ust data object of the given stream.
380 * Return 0 on success or else a negative value.
382 static int release_ust_app_stream(int sock
, struct ust_app_stream
*stream
,
390 pthread_mutex_lock(&app
->sock_lock
);
391 ret
= ustctl_release_object(sock
, stream
->obj
);
392 pthread_mutex_unlock(&app
->sock_lock
);
393 if (ret
< 0 && ret
!= -EPIPE
&& ret
!= -LTTNG_UST_ERR_EXITING
) {
394 ERR("UST app sock %d release stream obj failed with ret %d",
397 lttng_fd_put(LTTNG_FD_APPS
, 2);
405 * Delete ust app stream safely. RCU read lock must be held before calling
409 void delete_ust_app_stream(int sock
, struct ust_app_stream
*stream
,
414 (void) release_ust_app_stream(sock
, stream
, app
);
419 * We need to execute ht_destroy outside of RCU read-side critical
420 * section and outside of call_rcu thread, so we postpone its execution
421 * using ht_cleanup_push. It is simpler than to change the semantic of
422 * the many callers of delete_ust_app_session().
425 void delete_ust_app_channel_rcu(struct rcu_head
*head
)
427 struct ust_app_channel
*ua_chan
=
428 caa_container_of(head
, struct ust_app_channel
, rcu_head
);
430 ht_cleanup_push(ua_chan
->ctx
);
431 ht_cleanup_push(ua_chan
->events
);
436 * Extract the lost packet or discarded events counter when the channel is
437 * being deleted and store the value in the parent channel so we can
438 * access it from lttng list and at stop/destroy.
440 * The session list lock must be held by the caller.
443 void save_per_pid_lost_discarded_counters(struct ust_app_channel
*ua_chan
)
445 uint64_t discarded
= 0, lost
= 0;
446 struct ltt_session
*session
;
447 struct ltt_ust_channel
*uchan
;
449 if (ua_chan
->attr
.type
!= LTTNG_UST_ABI_CHAN_PER_CPU
) {
454 session
= session_find_by_id(ua_chan
->session
->tracing_id
);
455 if (!session
|| !session
->ust_session
) {
457 * Not finding the session is not an error because there are
458 * multiple ways the channels can be torn down.
460 * 1) The session daemon can initiate the destruction of the
461 * ust app session after receiving a destroy command or
462 * during its shutdown/teardown.
463 * 2) The application, since we are in per-pid tracing, is
464 * unregistering and tearing down its ust app session.
466 * Both paths are protected by the session list lock which
467 * ensures that the accounting of lost packets and discarded
468 * events is done exactly once. The session is then unpublished
469 * from the session list, resulting in this condition.
474 if (ua_chan
->attr
.overwrite
) {
475 consumer_get_lost_packets(ua_chan
->session
->tracing_id
,
476 ua_chan
->key
, session
->ust_session
->consumer
,
479 consumer_get_discarded_events(ua_chan
->session
->tracing_id
,
480 ua_chan
->key
, session
->ust_session
->consumer
,
483 uchan
= trace_ust_find_channel_by_name(
484 session
->ust_session
->domain_global
.channels
,
487 ERR("Missing UST channel to store discarded counters");
491 uchan
->per_pid_closed_app_discarded
+= discarded
;
492 uchan
->per_pid_closed_app_lost
+= lost
;
497 session_put(session
);
502 * Delete ust app channel safely. RCU read lock must be held before calling
505 * The session list lock must be held by the caller.
508 void delete_ust_app_channel(int sock
, struct ust_app_channel
*ua_chan
,
512 struct lttng_ht_iter iter
;
513 struct ust_app_event
*ua_event
;
514 struct ust_app_ctx
*ua_ctx
;
515 struct ust_app_stream
*stream
, *stmp
;
516 struct ust_registry_session
*registry
;
520 DBG3("UST app deleting channel %s", ua_chan
->name
);
523 cds_list_for_each_entry_safe(stream
, stmp
, &ua_chan
->streams
.head
, list
) {
524 cds_list_del(&stream
->list
);
525 delete_ust_app_stream(sock
, stream
, app
);
529 cds_lfht_for_each_entry(ua_chan
->ctx
->ht
, &iter
.iter
, ua_ctx
, node
.node
) {
530 cds_list_del(&ua_ctx
->list
);
531 ret
= lttng_ht_del(ua_chan
->ctx
, &iter
);
533 delete_ust_app_ctx(sock
, ua_ctx
, app
);
537 cds_lfht_for_each_entry(ua_chan
->events
->ht
, &iter
.iter
, ua_event
,
539 ret
= lttng_ht_del(ua_chan
->events
, &iter
);
541 delete_ust_app_event(sock
, ua_event
, app
);
544 if (ua_chan
->session
->buffer_type
== LTTNG_BUFFER_PER_PID
) {
545 /* Wipe and free registry from session registry. */
546 registry
= get_session_registry(ua_chan
->session
);
548 ust_registry_channel_del_free(registry
, ua_chan
->key
,
552 * A negative socket can be used by the caller when
553 * cleaning-up a ua_chan in an error path. Skip the
554 * accounting in this case.
557 save_per_pid_lost_discarded_counters(ua_chan
);
561 if (ua_chan
->obj
!= NULL
) {
562 /* Remove channel from application UST object descriptor. */
563 iter
.iter
.node
= &ua_chan
->ust_objd_node
.node
;
564 ret
= lttng_ht_del(app
->ust_objd
, &iter
);
566 pthread_mutex_lock(&app
->sock_lock
);
567 ret
= ustctl_release_object(sock
, ua_chan
->obj
);
568 pthread_mutex_unlock(&app
->sock_lock
);
569 if (ret
< 0 && ret
!= -EPIPE
&& ret
!= -LTTNG_UST_ERR_EXITING
) {
570 ERR("UST app sock %d release channel obj failed with ret %d",
573 lttng_fd_put(LTTNG_FD_APPS
, 1);
576 call_rcu(&ua_chan
->rcu_head
, delete_ust_app_channel_rcu
);
579 int ust_app_register_done(struct ust_app
*app
)
583 pthread_mutex_lock(&app
->sock_lock
);
584 ret
= ustctl_register_done(app
->sock
);
585 pthread_mutex_unlock(&app
->sock_lock
);
589 int ust_app_release_object(struct ust_app
*app
, struct lttng_ust_abi_object_data
*data
)
594 pthread_mutex_lock(&app
->sock_lock
);
599 ret
= ustctl_release_object(sock
, data
);
601 pthread_mutex_unlock(&app
->sock_lock
);
607 * Push metadata to consumer socket.
609 * RCU read-side lock must be held to guarantee existance of socket.
610 * Must be called with the ust app session lock held.
611 * Must be called with the registry lock held.
613 * On success, return the len of metadata pushed or else a negative value.
614 * Returning a -EPIPE return value means we could not send the metadata,
615 * but it can be caused by recoverable errors (e.g. the application has
616 * terminated concurrently).
618 ssize_t
ust_app_push_metadata(struct ust_registry_session
*registry
,
619 struct consumer_socket
*socket
, int send_zero_data
)
622 char *metadata_str
= NULL
;
623 size_t len
, offset
, new_metadata_len_sent
;
625 uint64_t metadata_key
, metadata_version
;
630 metadata_key
= registry
->metadata_key
;
633 * Means that no metadata was assigned to the session. This can
634 * happens if no start has been done previously.
640 offset
= registry
->metadata_len_sent
;
641 len
= registry
->metadata_len
- registry
->metadata_len_sent
;
642 new_metadata_len_sent
= registry
->metadata_len
;
643 metadata_version
= registry
->metadata_version
;
645 DBG3("No metadata to push for metadata key %" PRIu64
,
646 registry
->metadata_key
);
648 if (send_zero_data
) {
649 DBG("No metadata to push");
655 /* Allocate only what we have to send. */
656 metadata_str
= zmalloc(len
);
658 PERROR("zmalloc ust app metadata string");
662 /* Copy what we haven't sent out. */
663 memcpy(metadata_str
, registry
->metadata
+ offset
, len
);
666 pthread_mutex_unlock(®istry
->lock
);
668 * We need to unlock the registry while we push metadata to
669 * break a circular dependency between the consumerd metadata
670 * lock and the sessiond registry lock. Indeed, pushing metadata
671 * to the consumerd awaits that it gets pushed all the way to
672 * relayd, but doing so requires grabbing the metadata lock. If
673 * a concurrent metadata request is being performed by
674 * consumerd, this can try to grab the registry lock on the
675 * sessiond while holding the metadata lock on the consumer
676 * daemon. Those push and pull schemes are performed on two
677 * different bidirectionnal communication sockets.
679 ret
= consumer_push_metadata(socket
, metadata_key
,
680 metadata_str
, len
, offset
, metadata_version
);
681 pthread_mutex_lock(®istry
->lock
);
684 * There is an acceptable race here between the registry
685 * metadata key assignment and the creation on the
686 * consumer. The session daemon can concurrently push
687 * metadata for this registry while being created on the
688 * consumer since the metadata key of the registry is
689 * assigned *before* it is setup to avoid the consumer
690 * to ask for metadata that could possibly be not found
691 * in the session daemon.
693 * The metadata will get pushed either by the session
694 * being stopped or the consumer requesting metadata if
695 * that race is triggered.
697 if (ret
== -LTTCOMM_CONSUMERD_CHANNEL_FAIL
) {
700 ERR("Error pushing metadata to consumer");
706 * Metadata may have been concurrently pushed, since
707 * we're not holding the registry lock while pushing to
708 * consumer. This is handled by the fact that we send
709 * the metadata content, size, and the offset at which
710 * that metadata belongs. This may arrive out of order
711 * on the consumer side, and the consumer is able to
712 * deal with overlapping fragments. The consumer
713 * supports overlapping fragments, which must be
714 * contiguous starting from offset 0. We keep the
715 * largest metadata_len_sent value of the concurrent
718 registry
->metadata_len_sent
=
719 max_t(size_t, registry
->metadata_len_sent
,
720 new_metadata_len_sent
);
729 * On error, flag the registry that the metadata is
730 * closed. We were unable to push anything and this
731 * means that either the consumer is not responding or
732 * the metadata cache has been destroyed on the
735 registry
->metadata_closed
= 1;
743 * For a given application and session, push metadata to consumer.
744 * Either sock or consumer is required : if sock is NULL, the default
745 * socket to send the metadata is retrieved from consumer, if sock
746 * is not NULL we use it to send the metadata.
747 * RCU read-side lock must be held while calling this function,
748 * therefore ensuring existance of registry. It also ensures existance
749 * of socket throughout this function.
751 * Return 0 on success else a negative error.
752 * Returning a -EPIPE return value means we could not send the metadata,
753 * but it can be caused by recoverable errors (e.g. the application has
754 * terminated concurrently).
756 static int push_metadata(struct ust_registry_session
*registry
,
757 struct consumer_output
*consumer
)
761 struct consumer_socket
*socket
;
766 pthread_mutex_lock(®istry
->lock
);
767 if (registry
->metadata_closed
) {
772 /* Get consumer socket to use to push the metadata.*/
773 socket
= consumer_find_socket_by_bitness(registry
->bits_per_long
,
780 ret
= ust_app_push_metadata(registry
, socket
, 0);
785 pthread_mutex_unlock(®istry
->lock
);
789 pthread_mutex_unlock(®istry
->lock
);
794 * Send to the consumer a close metadata command for the given session. Once
795 * done, the metadata channel is deleted and the session metadata pointer is
796 * nullified. The session lock MUST be held unless the application is
797 * in the destroy path.
799 * Do not hold the registry lock while communicating with the consumerd, because
800 * doing so causes inter-process deadlocks between consumerd and sessiond with
801 * the metadata request notification.
803 * Return 0 on success else a negative value.
805 static int close_metadata(struct ust_registry_session
*registry
,
806 struct consumer_output
*consumer
)
809 struct consumer_socket
*socket
;
810 uint64_t metadata_key
;
811 bool registry_was_already_closed
;
818 pthread_mutex_lock(®istry
->lock
);
819 metadata_key
= registry
->metadata_key
;
820 registry_was_already_closed
= registry
->metadata_closed
;
821 if (metadata_key
!= 0) {
823 * Metadata closed. Even on error this means that the consumer
824 * is not responding or not found so either way a second close
825 * should NOT be emit for this registry.
827 registry
->metadata_closed
= 1;
829 pthread_mutex_unlock(®istry
->lock
);
831 if (metadata_key
== 0 || registry_was_already_closed
) {
836 /* Get consumer socket to use to push the metadata.*/
837 socket
= consumer_find_socket_by_bitness(registry
->bits_per_long
,
844 ret
= consumer_close_metadata(socket
, metadata_key
);
855 * We need to execute ht_destroy outside of RCU read-side critical
856 * section and outside of call_rcu thread, so we postpone its execution
857 * using ht_cleanup_push. It is simpler than to change the semantic of
858 * the many callers of delete_ust_app_session().
861 void delete_ust_app_session_rcu(struct rcu_head
*head
)
863 struct ust_app_session
*ua_sess
=
864 caa_container_of(head
, struct ust_app_session
, rcu_head
);
866 ht_cleanup_push(ua_sess
->channels
);
871 * Delete ust app session safely. RCU read lock must be held before calling
874 * The session list lock must be held by the caller.
877 void delete_ust_app_session(int sock
, struct ust_app_session
*ua_sess
,
881 struct lttng_ht_iter iter
;
882 struct ust_app_channel
*ua_chan
;
883 struct ust_registry_session
*registry
;
887 pthread_mutex_lock(&ua_sess
->lock
);
889 assert(!ua_sess
->deleted
);
890 ua_sess
->deleted
= true;
892 registry
= get_session_registry(ua_sess
);
893 /* Registry can be null on error path during initialization. */
895 /* Push metadata for application before freeing the application. */
896 (void) push_metadata(registry
, ua_sess
->consumer
);
899 * Don't ask to close metadata for global per UID buffers. Close
900 * metadata only on destroy trace session in this case. Also, the
901 * previous push metadata could have flag the metadata registry to
902 * close so don't send a close command if closed.
904 if (ua_sess
->buffer_type
!= LTTNG_BUFFER_PER_UID
) {
905 /* And ask to close it for this session registry. */
906 (void) close_metadata(registry
, ua_sess
->consumer
);
910 cds_lfht_for_each_entry(ua_sess
->channels
->ht
, &iter
.iter
, ua_chan
,
912 ret
= lttng_ht_del(ua_sess
->channels
, &iter
);
914 delete_ust_app_channel(sock
, ua_chan
, app
);
917 /* In case of per PID, the registry is kept in the session. */
918 if (ua_sess
->buffer_type
== LTTNG_BUFFER_PER_PID
) {
919 struct buffer_reg_pid
*reg_pid
= buffer_reg_pid_find(ua_sess
->id
);
922 * Registry can be null on error path during
925 buffer_reg_pid_remove(reg_pid
);
926 buffer_reg_pid_destroy(reg_pid
);
930 if (ua_sess
->handle
!= -1) {
931 pthread_mutex_lock(&app
->sock_lock
);
932 ret
= ustctl_release_handle(sock
, ua_sess
->handle
);
933 pthread_mutex_unlock(&app
->sock_lock
);
934 if (ret
< 0 && ret
!= -EPIPE
&& ret
!= -LTTNG_UST_ERR_EXITING
) {
935 ERR("UST app sock %d release session handle failed with ret %d",
938 /* Remove session from application UST object descriptor. */
939 iter
.iter
.node
= &ua_sess
->ust_objd_node
.node
;
940 ret
= lttng_ht_del(app
->ust_sessions_objd
, &iter
);
944 pthread_mutex_unlock(&ua_sess
->lock
);
946 consumer_output_put(ua_sess
->consumer
);
948 call_rcu(&ua_sess
->rcu_head
, delete_ust_app_session_rcu
);
952 * Delete a traceable application structure from the global list. Never call
953 * this function outside of a call_rcu call.
955 * RCU read side lock should _NOT_ be held when calling this function.
958 void delete_ust_app(struct ust_app
*app
)
961 struct ust_app_session
*ua_sess
, *tmp_ua_sess
;
962 struct lttng_ht_iter iter
;
963 struct ust_app_event_notifier_rule
*event_notifier_rule
;
964 bool event_notifier_write_fd_is_open
;
967 * The session list lock must be held during this function to guarantee
968 * the existence of ua_sess.
971 /* Delete ust app sessions info */
976 cds_list_for_each_entry_safe(ua_sess
, tmp_ua_sess
, &app
->teardown_head
,
978 /* Free every object in the session and the session. */
980 delete_ust_app_session(sock
, ua_sess
, app
);
984 /* Remove the event notifier rules associated with this app. */
986 cds_lfht_for_each_entry (app
->token_to_event_notifier_rule_ht
->ht
,
987 &iter
.iter
, event_notifier_rule
, node
.node
) {
988 ret
= lttng_ht_del(app
->token_to_event_notifier_rule_ht
, &iter
);
991 delete_ust_app_event_notifier_rule(
992 app
->sock
, event_notifier_rule
, app
);
997 ht_cleanup_push(app
->sessions
);
998 ht_cleanup_push(app
->ust_sessions_objd
);
999 ht_cleanup_push(app
->ust_objd
);
1000 ht_cleanup_push(app
->token_to_event_notifier_rule_ht
);
1003 * This could be NULL if the event notifier setup failed (e.g the app
1004 * was killed or the tracer does not support this feature).
1006 if (app
->event_notifier_group
.object
) {
1007 enum lttng_error_code ret_code
;
1008 enum event_notifier_error_accounting_status status
;
1010 const int event_notifier_read_fd
= lttng_pipe_get_readfd(
1011 app
->event_notifier_group
.event_pipe
);
1013 ret_code
= notification_thread_command_remove_tracer_event_source(
1014 the_notification_thread_handle
,
1015 event_notifier_read_fd
);
1016 if (ret_code
!= LTTNG_OK
) {
1017 ERR("Failed to remove application tracer event source from notification thread");
1020 status
= event_notifier_error_accounting_unregister_app(app
);
1021 if (status
!= EVENT_NOTIFIER_ERROR_ACCOUNTING_STATUS_OK
) {
1022 ERR("Error unregistering app from event notifier error accounting");
1025 ustctl_release_object(sock
, app
->event_notifier_group
.object
);
1026 free(app
->event_notifier_group
.object
);
1029 event_notifier_write_fd_is_open
= lttng_pipe_is_write_open(
1030 app
->event_notifier_group
.event_pipe
);
1031 lttng_pipe_destroy(app
->event_notifier_group
.event_pipe
);
1033 * Release the file descriptors reserved for the event notifier pipe.
1034 * The app could be destroyed before the write end of the pipe could be
1035 * passed to the application (and closed). In that case, both file
1036 * descriptors must be released.
1038 lttng_fd_put(LTTNG_FD_APPS
, event_notifier_write_fd_is_open
? 2 : 1);
1041 * Wait until we have deleted the application from the sock hash table
1042 * before closing this socket, otherwise an application could re-use the
1043 * socket ID and race with the teardown, using the same hash table entry.
1045 * It's OK to leave the close in call_rcu. We want it to stay unique for
1046 * all RCU readers that could run concurrently with unregister app,
1047 * therefore we _need_ to only close that socket after a grace period. So
1048 * it should stay in this RCU callback.
1050 * This close() is a very important step of the synchronization model so
1051 * every modification to this function must be carefully reviewed.
1057 lttng_fd_put(LTTNG_FD_APPS
, 1);
1059 DBG2("UST app pid %d deleted", app
->pid
);
1061 session_unlock_list();
1065 * URCU intermediate call to delete an UST app.
1068 void delete_ust_app_rcu(struct rcu_head
*head
)
1070 struct lttng_ht_node_ulong
*node
=
1071 caa_container_of(head
, struct lttng_ht_node_ulong
, head
);
1072 struct ust_app
*app
=
1073 caa_container_of(node
, struct ust_app
, pid_n
);
1075 DBG3("Call RCU deleting app PID %d", app
->pid
);
1076 delete_ust_app(app
);
1080 * Delete the session from the application ht and delete the data structure by
1081 * freeing every object inside and releasing them.
1083 * The session list lock must be held by the caller.
1085 static void destroy_app_session(struct ust_app
*app
,
1086 struct ust_app_session
*ua_sess
)
1089 struct lttng_ht_iter iter
;
1094 iter
.iter
.node
= &ua_sess
->node
.node
;
1095 ret
= lttng_ht_del(app
->sessions
, &iter
);
1097 /* Already scheduled for teardown. */
1101 /* Once deleted, free the data structure. */
1102 delete_ust_app_session(app
->sock
, ua_sess
, app
);
1109 * Alloc new UST app session.
1112 struct ust_app_session
*alloc_ust_app_session(void)
1114 struct ust_app_session
*ua_sess
;
1116 /* Init most of the default value by allocating and zeroing */
1117 ua_sess
= zmalloc(sizeof(struct ust_app_session
));
1118 if (ua_sess
== NULL
) {
1123 ua_sess
->handle
= -1;
1124 ua_sess
->channels
= lttng_ht_new(0, LTTNG_HT_TYPE_STRING
);
1125 ua_sess
->metadata_attr
.type
= LTTNG_UST_ABI_CHAN_METADATA
;
1126 pthread_mutex_init(&ua_sess
->lock
, NULL
);
1135 * Alloc new UST app channel.
1138 struct ust_app_channel
*alloc_ust_app_channel(const char *name
,
1139 struct ust_app_session
*ua_sess
,
1140 struct lttng_ust_abi_channel_attr
*attr
)
1142 struct ust_app_channel
*ua_chan
;
1144 /* Init most of the default value by allocating and zeroing */
1145 ua_chan
= zmalloc(sizeof(struct ust_app_channel
));
1146 if (ua_chan
== NULL
) {
1151 /* Setup channel name */
1152 strncpy(ua_chan
->name
, name
, sizeof(ua_chan
->name
));
1153 ua_chan
->name
[sizeof(ua_chan
->name
) - 1] = '\0';
1155 ua_chan
->enabled
= 1;
1156 ua_chan
->handle
= -1;
1157 ua_chan
->session
= ua_sess
;
1158 ua_chan
->key
= get_next_channel_key();
1159 ua_chan
->ctx
= lttng_ht_new(0, LTTNG_HT_TYPE_ULONG
);
1160 ua_chan
->events
= lttng_ht_new(0, LTTNG_HT_TYPE_STRING
);
1161 lttng_ht_node_init_str(&ua_chan
->node
, ua_chan
->name
);
1163 CDS_INIT_LIST_HEAD(&ua_chan
->streams
.head
);
1164 CDS_INIT_LIST_HEAD(&ua_chan
->ctx_list
);
1166 /* Copy attributes */
1168 /* Translate from lttng_ust_channel to ustctl_consumer_channel_attr. */
1169 ua_chan
->attr
.subbuf_size
= attr
->subbuf_size
;
1170 ua_chan
->attr
.num_subbuf
= attr
->num_subbuf
;
1171 ua_chan
->attr
.overwrite
= attr
->overwrite
;
1172 ua_chan
->attr
.switch_timer_interval
= attr
->switch_timer_interval
;
1173 ua_chan
->attr
.read_timer_interval
= attr
->read_timer_interval
;
1174 ua_chan
->attr
.output
= attr
->output
;
1175 ua_chan
->attr
.blocking_timeout
= attr
->u
.s
.blocking_timeout
;
1177 /* By default, the channel is a per cpu channel. */
1178 ua_chan
->attr
.type
= LTTNG_UST_ABI_CHAN_PER_CPU
;
1180 DBG3("UST app channel %s allocated", ua_chan
->name
);
1189 * Allocate and initialize a UST app stream.
1191 * Return newly allocated stream pointer or NULL on error.
1193 struct ust_app_stream
*ust_app_alloc_stream(void)
1195 struct ust_app_stream
*stream
= NULL
;
1197 stream
= zmalloc(sizeof(*stream
));
1198 if (stream
== NULL
) {
1199 PERROR("zmalloc ust app stream");
1203 /* Zero could be a valid value for a handle so flag it to -1. */
1204 stream
->handle
= -1;
1211 * Alloc new UST app event.
1214 struct ust_app_event
*alloc_ust_app_event(char *name
,
1215 struct lttng_ust_abi_event
*attr
)
1217 struct ust_app_event
*ua_event
;
1219 /* Init most of the default value by allocating and zeroing */
1220 ua_event
= zmalloc(sizeof(struct ust_app_event
));
1221 if (ua_event
== NULL
) {
1222 PERROR("Failed to allocate ust_app_event structure");
1226 ua_event
->enabled
= 1;
1227 strncpy(ua_event
->name
, name
, sizeof(ua_event
->name
));
1228 ua_event
->name
[sizeof(ua_event
->name
) - 1] = '\0';
1229 lttng_ht_node_init_str(&ua_event
->node
, ua_event
->name
);
1231 /* Copy attributes */
1233 memcpy(&ua_event
->attr
, attr
, sizeof(ua_event
->attr
));
1236 DBG3("UST app event %s allocated", ua_event
->name
);
1245 * Allocate a new UST app event notifier rule.
1247 static struct ust_app_event_notifier_rule
*alloc_ust_app_event_notifier_rule(
1248 struct lttng_trigger
*trigger
)
1250 enum lttng_event_rule_generate_exclusions_status
1251 generate_exclusion_status
;
1252 struct ust_app_event_notifier_rule
*ua_event_notifier_rule
;
1253 struct lttng_condition
*condition
= NULL
;
1254 const struct lttng_event_rule
*event_rule
= NULL
;
1256 ua_event_notifier_rule
= zmalloc(sizeof(struct ust_app_event_notifier_rule
));
1257 if (ua_event_notifier_rule
== NULL
) {
1258 PERROR("Failed to allocate ust_app_event_notifier_rule structure");
1262 ua_event_notifier_rule
->enabled
= 1;
1263 ua_event_notifier_rule
->token
= lttng_trigger_get_tracer_token(trigger
);
1264 lttng_ht_node_init_u64(&ua_event_notifier_rule
->node
,
1265 ua_event_notifier_rule
->token
);
1267 condition
= lttng_trigger_get_condition(trigger
);
1269 assert(lttng_condition_get_type(condition
) == LTTNG_CONDITION_TYPE_ON_EVENT
);
1271 assert(LTTNG_CONDITION_STATUS_OK
== lttng_condition_on_event_get_rule(condition
, &event_rule
));
1274 ua_event_notifier_rule
->error_counter_index
=
1275 lttng_condition_on_event_get_error_counter_index(condition
);
1276 /* Acquire the event notifier's reference to the trigger. */
1277 lttng_trigger_get(trigger
);
1279 ua_event_notifier_rule
->trigger
= trigger
;
1280 ua_event_notifier_rule
->filter
= lttng_event_rule_get_filter_bytecode(event_rule
);
1281 generate_exclusion_status
= lttng_event_rule_generate_exclusions(
1282 event_rule
, &ua_event_notifier_rule
->exclusion
);
1283 switch (generate_exclusion_status
) {
1284 case LTTNG_EVENT_RULE_GENERATE_EXCLUSIONS_STATUS_OK
:
1285 case LTTNG_EVENT_RULE_GENERATE_EXCLUSIONS_STATUS_NONE
:
1288 /* Error occured. */
1289 ERR("Failed to generate exclusions from trigger while allocating an event notifier rule");
1290 goto error_put_trigger
;
1293 DBG3("UST app event notifier rule allocated: token = %" PRIu64
,
1294 ua_event_notifier_rule
->token
);
1296 return ua_event_notifier_rule
;
1299 lttng_trigger_put(trigger
);
1301 free(ua_event_notifier_rule
);
1306 * Alloc new UST app context.
1309 struct ust_app_ctx
*alloc_ust_app_ctx(struct lttng_ust_context_attr
*uctx
)
1311 struct ust_app_ctx
*ua_ctx
;
1313 ua_ctx
= zmalloc(sizeof(struct ust_app_ctx
));
1314 if (ua_ctx
== NULL
) {
1318 CDS_INIT_LIST_HEAD(&ua_ctx
->list
);
1321 memcpy(&ua_ctx
->ctx
, uctx
, sizeof(ua_ctx
->ctx
));
1322 if (uctx
->ctx
== LTTNG_UST_ABI_CONTEXT_APP_CONTEXT
) {
1323 char *provider_name
= NULL
, *ctx_name
= NULL
;
1325 provider_name
= strdup(uctx
->u
.app_ctx
.provider_name
);
1326 ctx_name
= strdup(uctx
->u
.app_ctx
.ctx_name
);
1327 if (!provider_name
|| !ctx_name
) {
1328 free(provider_name
);
1333 ua_ctx
->ctx
.u
.app_ctx
.provider_name
= provider_name
;
1334 ua_ctx
->ctx
.u
.app_ctx
.ctx_name
= ctx_name
;
1338 DBG3("UST app context %d allocated", ua_ctx
->ctx
.ctx
);
1346 * Create a liblttng-ust filter bytecode from given bytecode.
1348 * Return allocated filter or NULL on error.
1350 static struct lttng_ust_abi_filter_bytecode
*create_ust_filter_bytecode_from_bytecode(
1351 const struct lttng_bytecode
*orig_f
)
1353 struct lttng_ust_abi_filter_bytecode
*filter
= NULL
;
1355 /* Copy filter bytecode. */
1356 filter
= zmalloc(sizeof(*filter
) + orig_f
->len
);
1358 PERROR("Failed to allocate lttng_ust_filter_bytecode: bytecode len = %" PRIu32
" bytes", orig_f
->len
);
1362 assert(sizeof(struct lttng_bytecode
) ==
1363 sizeof(struct lttng_ust_abi_filter_bytecode
));
1364 memcpy(filter
, orig_f
, sizeof(*filter
) + orig_f
->len
);
1370 * Create a liblttng-ust capture bytecode from given bytecode.
1372 * Return allocated filter or NULL on error.
1374 static struct lttng_ust_abi_capture_bytecode
*
1375 create_ust_capture_bytecode_from_bytecode(const struct lttng_bytecode
*orig_f
)
1377 struct lttng_ust_abi_capture_bytecode
*capture
= NULL
;
1379 /* Copy capture bytecode. */
1380 capture
= zmalloc(sizeof(*capture
) + orig_f
->len
);
1382 PERROR("Failed to allocate lttng_ust_abi_capture_bytecode: bytecode len = %" PRIu32
" bytes", orig_f
->len
);
1386 assert(sizeof(struct lttng_bytecode
) ==
1387 sizeof(struct lttng_ust_abi_capture_bytecode
));
1388 memcpy(capture
, orig_f
, sizeof(*capture
) + orig_f
->len
);
1394 * Find an ust_app using the sock and return it. RCU read side lock must be
1395 * held before calling this helper function.
1397 struct ust_app
*ust_app_find_by_sock(int sock
)
1399 struct lttng_ht_node_ulong
*node
;
1400 struct lttng_ht_iter iter
;
1402 lttng_ht_lookup(ust_app_ht_by_sock
, (void *)((unsigned long) sock
), &iter
);
1403 node
= lttng_ht_iter_get_node_ulong(&iter
);
1405 DBG2("UST app find by sock %d not found", sock
);
1409 return caa_container_of(node
, struct ust_app
, sock_n
);
1416 * Find an ust_app using the notify sock and return it. RCU read side lock must
1417 * be held before calling this helper function.
1419 static struct ust_app
*find_app_by_notify_sock(int sock
)
1421 struct lttng_ht_node_ulong
*node
;
1422 struct lttng_ht_iter iter
;
1424 lttng_ht_lookup(ust_app_ht_by_notify_sock
, (void *)((unsigned long) sock
),
1426 node
= lttng_ht_iter_get_node_ulong(&iter
);
1428 DBG2("UST app find by notify sock %d not found", sock
);
1432 return caa_container_of(node
, struct ust_app
, notify_sock_n
);
1439 * Lookup for an ust app event based on event name, filter bytecode and the
1442 * Return an ust_app_event object or NULL on error.
1444 static struct ust_app_event
*find_ust_app_event(struct lttng_ht
*ht
,
1445 const char *name
, const struct lttng_bytecode
*filter
,
1447 const struct lttng_event_exclusion
*exclusion
)
1449 struct lttng_ht_iter iter
;
1450 struct lttng_ht_node_str
*node
;
1451 struct ust_app_event
*event
= NULL
;
1452 struct ust_app_ht_key key
;
1457 /* Setup key for event lookup. */
1459 key
.filter
= filter
;
1460 key
.loglevel_type
= loglevel_value
;
1461 /* lttng_event_exclusion and lttng_ust_event_exclusion structures are similar */
1462 key
.exclusion
= exclusion
;
1464 /* Lookup using the event name as hash and a custom match fct. */
1465 cds_lfht_lookup(ht
->ht
, ht
->hash_fct((void *) name
, lttng_ht_seed
),
1466 ht_match_ust_app_event
, &key
, &iter
.iter
);
1467 node
= lttng_ht_iter_get_node_str(&iter
);
1472 event
= caa_container_of(node
, struct ust_app_event
, node
);
1479 * Look-up an event notifier rule based on its token id.
1481 * Must be called with the RCU read lock held.
1482 * Return an ust_app_event_notifier_rule object or NULL on error.
1484 static struct ust_app_event_notifier_rule
*find_ust_app_event_notifier_rule(
1485 struct lttng_ht
*ht
, uint64_t token
)
1487 struct lttng_ht_iter iter
;
1488 struct lttng_ht_node_u64
*node
;
1489 struct ust_app_event_notifier_rule
*event_notifier_rule
= NULL
;
1493 lttng_ht_lookup(ht
, &token
, &iter
);
1494 node
= lttng_ht_iter_get_node_u64(&iter
);
1496 DBG2("UST app event notifier rule token not found: token = %" PRIu64
,
1501 event_notifier_rule
= caa_container_of(
1502 node
, struct ust_app_event_notifier_rule
, node
);
1504 return event_notifier_rule
;
1508 * Create the channel context on the tracer.
1510 * Called with UST app session lock held.
1513 int create_ust_channel_context(struct ust_app_channel
*ua_chan
,
1514 struct ust_app_ctx
*ua_ctx
, struct ust_app
*app
)
1518 health_code_update();
1520 pthread_mutex_lock(&app
->sock_lock
);
1521 ret
= ustctl_add_context(app
->sock
, &ua_ctx
->ctx
,
1522 ua_chan
->obj
, &ua_ctx
->obj
);
1523 pthread_mutex_unlock(&app
->sock_lock
);
1525 if (ret
!= -EPIPE
&& ret
!= -LTTNG_UST_ERR_EXITING
) {
1526 ERR("UST app create channel context failed for app (pid: %d) "
1527 "with ret %d", app
->pid
, ret
);
1530 * This is normal behavior, an application can die during the
1531 * creation process. Don't report an error so the execution can
1532 * continue normally.
1535 DBG3("UST app add context failed. Application is dead.");
1540 ua_ctx
->handle
= ua_ctx
->obj
->handle
;
1542 DBG2("UST app context handle %d created successfully for channel %s",
1543 ua_ctx
->handle
, ua_chan
->name
);
1546 health_code_update();
1551 * Set the filter on the tracer.
1553 static int set_ust_object_filter(struct ust_app
*app
,
1554 const struct lttng_bytecode
*bytecode
,
1555 struct lttng_ust_abi_object_data
*ust_object
)
1558 struct lttng_ust_abi_filter_bytecode
*ust_bytecode
= NULL
;
1560 health_code_update();
1562 ust_bytecode
= create_ust_filter_bytecode_from_bytecode(bytecode
);
1563 if (!ust_bytecode
) {
1564 ret
= -LTTNG_ERR_NOMEM
;
1567 pthread_mutex_lock(&app
->sock_lock
);
1568 ret
= ustctl_set_filter(app
->sock
, ust_bytecode
,
1570 pthread_mutex_unlock(&app
->sock_lock
);
1572 if (ret
!= -EPIPE
&& ret
!= -LTTNG_UST_ERR_EXITING
) {
1573 ERR("UST app set object filter failed: object = %p of app pid = %d, ret = %d",
1574 ust_object
, app
->pid
, ret
);
1577 * This is normal behavior, an application can die during the
1578 * creation process. Don't report an error so the execution can
1579 * continue normally.
1582 DBG3("Failed to set UST app object filter. Application is dead.");
1587 DBG2("UST filter successfully set: object = %p", ust_object
);
1590 health_code_update();
1596 * Set a capture bytecode for the passed object.
1597 * The sequence number enforces the ordering at runtime and on reception of
1598 * the captured payloads.
1600 static int set_ust_capture(struct ust_app
*app
,
1601 const struct lttng_bytecode
*bytecode
,
1602 unsigned int capture_seqnum
,
1603 struct lttng_ust_abi_object_data
*ust_object
)
1606 struct lttng_ust_abi_capture_bytecode
*ust_bytecode
= NULL
;
1608 health_code_update();
1610 ust_bytecode
= create_ust_capture_bytecode_from_bytecode(bytecode
);
1611 if (!ust_bytecode
) {
1612 ret
= -LTTNG_ERR_NOMEM
;
1617 * Set the sequence number to ensure the capture of fields is ordered.
1619 ust_bytecode
->seqnum
= capture_seqnum
;
1621 pthread_mutex_lock(&app
->sock_lock
);
1622 ret
= ustctl_set_capture(app
->sock
, ust_bytecode
,
1624 pthread_mutex_unlock(&app
->sock_lock
);
1626 if (ret
!= -EPIPE
&& ret
!= -LTTNG_UST_ERR_EXITING
) {
1627 ERR("UST app set object capture failed: object = %p of app pid = %d, ret = %d",
1628 ust_object
, app
->pid
, ret
);
1631 * This is normal behavior, an application can die during the
1632 * creation process. Don't report an error so the execution can
1633 * continue normally.
1636 DBG3("Failed to set UST app object capture. Application is dead.");
1642 DBG2("UST capture successfully set: object = %p", ust_object
);
1645 health_code_update();
1651 struct lttng_ust_abi_event_exclusion
*create_ust_exclusion_from_exclusion(
1652 const struct lttng_event_exclusion
*exclusion
)
1654 struct lttng_ust_abi_event_exclusion
*ust_exclusion
= NULL
;
1655 size_t exclusion_alloc_size
= sizeof(struct lttng_ust_abi_event_exclusion
) +
1656 LTTNG_UST_ABI_SYM_NAME_LEN
* exclusion
->count
;
1658 ust_exclusion
= zmalloc(exclusion_alloc_size
);
1659 if (!ust_exclusion
) {
1664 assert(sizeof(struct lttng_event_exclusion
) ==
1665 sizeof(struct lttng_ust_abi_event_exclusion
));
1666 memcpy(ust_exclusion
, exclusion
, exclusion_alloc_size
);
1668 return ust_exclusion
;
1672 * Set event exclusions on the tracer.
1674 static int set_ust_object_exclusions(struct ust_app
*app
,
1675 const struct lttng_event_exclusion
*exclusions
,
1676 struct lttng_ust_abi_object_data
*ust_object
)
1679 struct lttng_ust_abi_event_exclusion
*ust_exclusions
= NULL
;
1681 assert(exclusions
&& exclusions
->count
> 0);
1683 health_code_update();
1685 ust_exclusions
= create_ust_exclusion_from_exclusion(
1687 if (!ust_exclusions
) {
1688 ret
= -LTTNG_ERR_NOMEM
;
1691 pthread_mutex_lock(&app
->sock_lock
);
1692 ret
= ustctl_set_exclusion(app
->sock
, ust_exclusions
, ust_object
);
1693 pthread_mutex_unlock(&app
->sock_lock
);
1695 if (ret
!= -EPIPE
&& ret
!= -LTTNG_UST_ERR_EXITING
) {
1696 ERR("Failed to set UST app exclusions for object %p of app (pid: %d) "
1697 "with ret %d", ust_object
, app
->pid
, ret
);
1700 * This is normal behavior, an application can die during the
1701 * creation process. Don't report an error so the execution can
1702 * continue normally.
1705 DBG3("Failed to set UST app object exclusions. Application is dead.");
1710 DBG2("UST exclusions set successfully for object %p", ust_object
);
1713 health_code_update();
1714 free(ust_exclusions
);
1719 * Disable the specified event on to UST tracer for the UST session.
1721 static int disable_ust_object(struct ust_app
*app
,
1722 struct lttng_ust_abi_object_data
*object
)
1726 health_code_update();
1728 pthread_mutex_lock(&app
->sock_lock
);
1729 ret
= ustctl_disable(app
->sock
, object
);
1730 pthread_mutex_unlock(&app
->sock_lock
);
1732 if (ret
!= -EPIPE
&& ret
!= -LTTNG_UST_ERR_EXITING
) {
1733 ERR("Failed to disable UST app object %p app (pid: %d) with ret %d",
1734 object
, app
->pid
, ret
);
1737 * This is normal behavior, an application can die during the
1738 * creation process. Don't report an error so the execution can
1739 * continue normally.
1742 DBG3("Failed to disable UST app object. Application is dead.");
1747 DBG2("UST app object %p disabled successfully for app (pid: %d)",
1751 health_code_update();
1756 * Disable the specified channel on to UST tracer for the UST session.
1758 static int disable_ust_channel(struct ust_app
*app
,
1759 struct ust_app_session
*ua_sess
, struct ust_app_channel
*ua_chan
)
1763 health_code_update();
1765 pthread_mutex_lock(&app
->sock_lock
);
1766 ret
= ustctl_disable(app
->sock
, ua_chan
->obj
);
1767 pthread_mutex_unlock(&app
->sock_lock
);
1769 if (ret
!= -EPIPE
&& ret
!= -LTTNG_UST_ERR_EXITING
) {
1770 ERR("UST app channel %s disable failed for app (pid: %d) "
1771 "and session handle %d with ret %d",
1772 ua_chan
->name
, app
->pid
, ua_sess
->handle
, ret
);
1775 * This is normal behavior, an application can die during the
1776 * creation process. Don't report an error so the execution can
1777 * continue normally.
1780 DBG3("UST app disable channel failed. Application is dead.");
1785 DBG2("UST app channel %s disabled successfully for app (pid: %d)",
1786 ua_chan
->name
, app
->pid
);
1789 health_code_update();
1794 * Enable the specified channel on to UST tracer for the UST session.
1796 static int enable_ust_channel(struct ust_app
*app
,
1797 struct ust_app_session
*ua_sess
, struct ust_app_channel
*ua_chan
)
1801 health_code_update();
1803 pthread_mutex_lock(&app
->sock_lock
);
1804 ret
= ustctl_enable(app
->sock
, ua_chan
->obj
);
1805 pthread_mutex_unlock(&app
->sock_lock
);
1807 if (ret
!= -EPIPE
&& ret
!= -LTTNG_UST_ERR_EXITING
) {
1808 ERR("UST app channel %s enable failed for app (pid: %d) "
1809 "and session handle %d with ret %d",
1810 ua_chan
->name
, app
->pid
, ua_sess
->handle
, ret
);
1813 * This is normal behavior, an application can die during the
1814 * creation process. Don't report an error so the execution can
1815 * continue normally.
1818 DBG3("UST app enable channel failed. Application is dead.");
1823 ua_chan
->enabled
= 1;
1825 DBG2("UST app channel %s enabled successfully for app (pid: %d)",
1826 ua_chan
->name
, app
->pid
);
1829 health_code_update();
1834 * Enable the specified event on to UST tracer for the UST session.
1836 static int enable_ust_object(
1837 struct ust_app
*app
, struct lttng_ust_abi_object_data
*ust_object
)
1841 health_code_update();
1843 pthread_mutex_lock(&app
->sock_lock
);
1844 ret
= ustctl_enable(app
->sock
, ust_object
);
1845 pthread_mutex_unlock(&app
->sock_lock
);
1847 if (ret
!= -EPIPE
&& ret
!= -LTTNG_UST_ERR_EXITING
) {
1848 ERR("UST app enable failed for object %p app (pid: %d) with ret %d",
1849 ust_object
, app
->pid
, ret
);
1852 * This is normal behavior, an application can die during the
1853 * creation process. Don't report an error so the execution can
1854 * continue normally.
1857 DBG3("Failed to enable UST app object. Application is dead.");
1862 DBG2("UST app object %p enabled successfully for app (pid: %d)",
1863 ust_object
, app
->pid
);
1866 health_code_update();
1871 * Send channel and stream buffer to application.
1873 * Return 0 on success. On error, a negative value is returned.
1875 static int send_channel_pid_to_ust(struct ust_app
*app
,
1876 struct ust_app_session
*ua_sess
, struct ust_app_channel
*ua_chan
)
1879 struct ust_app_stream
*stream
, *stmp
;
1885 health_code_update();
1887 DBG("UST app sending channel %s to UST app sock %d", ua_chan
->name
,
1890 /* Send channel to the application. */
1891 ret
= ust_consumer_send_channel_to_ust(app
, ua_sess
, ua_chan
);
1892 if (ret
== -EPIPE
|| ret
== -LTTNG_UST_ERR_EXITING
) {
1893 ret
= -ENOTCONN
; /* Caused by app exiting. */
1895 } else if (ret
< 0) {
1899 health_code_update();
1901 /* Send all streams to application. */
1902 cds_list_for_each_entry_safe(stream
, stmp
, &ua_chan
->streams
.head
, list
) {
1903 ret
= ust_consumer_send_stream_to_ust(app
, ua_chan
, stream
);
1904 if (ret
== -EPIPE
|| ret
== -LTTNG_UST_ERR_EXITING
) {
1905 ret
= -ENOTCONN
; /* Caused by app exiting. */
1907 } else if (ret
< 0) {
1910 /* We don't need the stream anymore once sent to the tracer. */
1911 cds_list_del(&stream
->list
);
1912 delete_ust_app_stream(-1, stream
, app
);
1914 /* Flag the channel that it is sent to the application. */
1915 ua_chan
->is_sent
= 1;
1918 health_code_update();
1923 * Create the specified event onto the UST tracer for a UST session.
1925 * Should be called with session mutex held.
1928 int create_ust_event(struct ust_app
*app
, struct ust_app_session
*ua_sess
,
1929 struct ust_app_channel
*ua_chan
, struct ust_app_event
*ua_event
)
1933 health_code_update();
1935 /* Create UST event on tracer */
1936 pthread_mutex_lock(&app
->sock_lock
);
1937 ret
= ustctl_create_event(app
->sock
, &ua_event
->attr
, ua_chan
->obj
,
1939 pthread_mutex_unlock(&app
->sock_lock
);
1941 if (ret
!= -EPIPE
&& ret
!= -LTTNG_UST_ERR_EXITING
) {
1943 ERR("Error ustctl create event %s for app pid: %d with ret %d",
1944 ua_event
->attr
.name
, app
->pid
, ret
);
1947 * This is normal behavior, an application can die during the
1948 * creation process. Don't report an error so the execution can
1949 * continue normally.
1952 DBG3("UST app create event failed. Application is dead.");
1957 ua_event
->handle
= ua_event
->obj
->handle
;
1959 DBG2("UST app event %s created successfully for pid:%d object: %p",
1960 ua_event
->attr
.name
, app
->pid
, ua_event
->obj
);
1962 health_code_update();
1964 /* Set filter if one is present. */
1965 if (ua_event
->filter
) {
1966 ret
= set_ust_object_filter(app
, ua_event
->filter
, ua_event
->obj
);
1972 /* Set exclusions for the event */
1973 if (ua_event
->exclusion
) {
1974 ret
= set_ust_object_exclusions(app
, ua_event
->exclusion
, ua_event
->obj
);
1980 /* If event not enabled, disable it on the tracer */
1981 if (ua_event
->enabled
) {
1983 * We now need to explicitly enable the event, since it
1984 * is now disabled at creation.
1986 ret
= enable_ust_object(app
, ua_event
->obj
);
1989 * If we hit an EPERM, something is wrong with our enable call. If
1990 * we get an EEXIST, there is a problem on the tracer side since we
1994 case -LTTNG_UST_ERR_PERM
:
1995 /* Code flow problem */
1997 case -LTTNG_UST_ERR_EXIST
:
1998 /* It's OK for our use case. */
2009 health_code_update();
2013 static int init_ust_event_notifier_from_event_rule(
2014 const struct lttng_event_rule
*rule
,
2015 struct lttng_ust_abi_event_notifier
*event_notifier
)
2017 enum lttng_event_rule_status status
;
2018 enum lttng_ust_abi_loglevel_type ust_loglevel_type
= LTTNG_UST_ABI_LOGLEVEL_ALL
;
2019 int loglevel
= -1, ret
= 0;
2020 const char *pattern
;
2022 /* For now only LTTNG_EVENT_RULE_TYPE_TRACEPOINT are supported. */
2023 assert(lttng_event_rule_get_type(rule
) ==
2024 LTTNG_EVENT_RULE_TYPE_TRACEPOINT
);
2026 memset(event_notifier
, 0, sizeof(*event_notifier
));
2028 if (lttng_event_rule_targets_agent_domain(rule
)) {
2030 * Special event for agents
2031 * The actual meat of the event is in the filter that will be
2032 * attached later on.
2033 * Set the default values for the agent event.
2035 pattern
= event_get_default_agent_ust_name(
2036 lttng_event_rule_get_domain_type(rule
));
2038 ust_loglevel_type
= LTTNG_UST_ABI_LOGLEVEL_ALL
;
2040 const struct lttng_log_level_rule
*log_level_rule
;
2042 status
= lttng_event_rule_tracepoint_get_pattern(rule
, &pattern
);
2043 if (status
!= LTTNG_EVENT_RULE_STATUS_OK
) {
2044 /* At this point, this is a fatal error. */
2048 status
= lttng_event_rule_tracepoint_get_log_level_rule(
2049 rule
, &log_level_rule
);
2050 if (status
== LTTNG_EVENT_RULE_STATUS_UNSET
) {
2051 ust_loglevel_type
= LTTNG_UST_ABI_LOGLEVEL_ALL
;
2052 } else if (status
== LTTNG_EVENT_RULE_STATUS_OK
) {
2053 enum lttng_log_level_rule_status llr_status
;
2055 switch (lttng_log_level_rule_get_type(log_level_rule
)) {
2056 case LTTNG_LOG_LEVEL_RULE_TYPE_EXACTLY
:
2057 ust_loglevel_type
= LTTNG_UST_ABI_LOGLEVEL_SINGLE
;
2058 llr_status
= lttng_log_level_rule_exactly_get_level(
2059 log_level_rule
, &loglevel
);
2061 case LTTNG_LOG_LEVEL_RULE_TYPE_AT_LEAST_AS_SEVERE_AS
:
2062 ust_loglevel_type
= LTTNG_UST_ABI_LOGLEVEL_RANGE
;
2063 llr_status
= lttng_log_level_rule_at_least_as_severe_as_get_level(
2064 log_level_rule
, &loglevel
);
2070 assert(llr_status
== LTTNG_LOG_LEVEL_RULE_STATUS_OK
);
2072 /* At this point this is a fatal error. */
2077 event_notifier
->event
.instrumentation
= LTTNG_UST_ABI_TRACEPOINT
;
2078 ret
= lttng_strncpy(event_notifier
->event
.name
, pattern
,
2079 LTTNG_UST_ABI_SYM_NAME_LEN
- 1);
2081 ERR("Failed to copy event rule pattern to notifier: pattern = '%s' ",
2086 event_notifier
->event
.loglevel_type
= ust_loglevel_type
;
2087 event_notifier
->event
.loglevel
= loglevel
;
2093 * Create the specified event notifier against the user space tracer of a
2094 * given application.
2096 static int create_ust_event_notifier(struct ust_app
*app
,
2097 struct ust_app_event_notifier_rule
*ua_event_notifier_rule
)
2100 enum lttng_condition_status condition_status
;
2101 const struct lttng_condition
*condition
= NULL
;
2102 struct lttng_ust_abi_event_notifier event_notifier
;
2103 const struct lttng_event_rule
*event_rule
= NULL
;
2104 unsigned int capture_bytecode_count
= 0, i
;
2105 enum lttng_condition_status cond_status
;
2107 health_code_update();
2108 assert(app
->event_notifier_group
.object
);
2110 condition
= lttng_trigger_get_const_condition(
2111 ua_event_notifier_rule
->trigger
);
2113 assert(lttng_condition_get_type(condition
) == LTTNG_CONDITION_TYPE_ON_EVENT
);
2115 condition_status
= lttng_condition_on_event_get_rule(
2116 condition
, &event_rule
);
2117 assert(condition_status
== LTTNG_CONDITION_STATUS_OK
);
2120 assert(lttng_event_rule_get_type(event_rule
) == LTTNG_EVENT_RULE_TYPE_TRACEPOINT
);
2122 init_ust_event_notifier_from_event_rule(event_rule
, &event_notifier
);
2123 event_notifier
.event
.token
= ua_event_notifier_rule
->token
;
2124 event_notifier
.error_counter_index
= ua_event_notifier_rule
->error_counter_index
;
2126 /* Create UST event notifier against the tracer. */
2127 pthread_mutex_lock(&app
->sock_lock
);
2128 ret
= ustctl_create_event_notifier(app
->sock
, &event_notifier
,
2129 app
->event_notifier_group
.object
,
2130 &ua_event_notifier_rule
->obj
);
2131 pthread_mutex_unlock(&app
->sock_lock
);
2133 if (ret
!= -EPIPE
&& ret
!= -LTTNG_UST_ERR_EXITING
) {
2134 ERR("Error ustctl create event notifier: name = '%s', app = '%s' (ppid: %d), ret = %d",
2135 event_notifier
.event
.name
, app
->name
,
2139 * This is normal behavior, an application can die
2140 * during the creation process. Don't report an error so
2141 * the execution can continue normally.
2144 DBG3("UST app create event notifier failed (application is dead): app = '%s' (ppid = %d)",
2145 app
->name
, app
->ppid
);
2151 ua_event_notifier_rule
->handle
= ua_event_notifier_rule
->obj
->handle
;
2153 DBG2("UST app event notifier %s created successfully: app = '%s' (ppid: %d), object: %p",
2154 event_notifier
.event
.name
, app
->name
, app
->ppid
,
2155 ua_event_notifier_rule
->obj
);
2157 health_code_update();
2159 /* Set filter if one is present. */
2160 if (ua_event_notifier_rule
->filter
) {
2161 ret
= set_ust_object_filter(app
, ua_event_notifier_rule
->filter
,
2162 ua_event_notifier_rule
->obj
);
2168 /* Set exclusions for the event. */
2169 if (ua_event_notifier_rule
->exclusion
) {
2170 ret
= set_ust_object_exclusions(app
,
2171 ua_event_notifier_rule
->exclusion
,
2172 ua_event_notifier_rule
->obj
);
2178 /* Set the capture bytecodes. */
2179 cond_status
= lttng_condition_on_event_get_capture_descriptor_count(
2180 condition
, &capture_bytecode_count
);
2181 assert(cond_status
== LTTNG_CONDITION_STATUS_OK
);
2183 for (i
= 0; i
< capture_bytecode_count
; i
++) {
2184 const struct lttng_bytecode
*capture_bytecode
=
2185 lttng_condition_on_event_get_capture_bytecode_at_index(
2188 ret
= set_ust_capture(app
, capture_bytecode
, i
,
2189 ua_event_notifier_rule
->obj
);
2196 * We now need to explicitly enable the event, since it
2197 * is disabled at creation.
2199 ret
= enable_ust_object(app
, ua_event_notifier_rule
->obj
);
2202 * If we hit an EPERM, something is wrong with our enable call.
2203 * If we get an EEXIST, there is a problem on the tracer side
2204 * since we just created it.
2207 case -LTTNG_UST_ERR_PERM
:
2208 /* Code flow problem. */
2210 case -LTTNG_UST_ERR_EXIST
:
2211 /* It's OK for our use case. */
2221 ua_event_notifier_rule
->enabled
= true;
2224 health_code_update();
2229 * Copy data between an UST app event and a LTT event.
2231 static void shadow_copy_event(struct ust_app_event
*ua_event
,
2232 struct ltt_ust_event
*uevent
)
2234 size_t exclusion_alloc_size
;
2236 strncpy(ua_event
->name
, uevent
->attr
.name
, sizeof(ua_event
->name
));
2237 ua_event
->name
[sizeof(ua_event
->name
) - 1] = '\0';
2239 ua_event
->enabled
= uevent
->enabled
;
2241 /* Copy event attributes */
2242 memcpy(&ua_event
->attr
, &uevent
->attr
, sizeof(ua_event
->attr
));
2244 /* Copy filter bytecode */
2245 if (uevent
->filter
) {
2246 ua_event
->filter
= lttng_bytecode_copy(uevent
->filter
);
2247 /* Filter might be NULL here in case of ENONEM. */
2250 /* Copy exclusion data */
2251 if (uevent
->exclusion
) {
2252 exclusion_alloc_size
= sizeof(struct lttng_event_exclusion
) +
2253 LTTNG_UST_ABI_SYM_NAME_LEN
* uevent
->exclusion
->count
;
2254 ua_event
->exclusion
= zmalloc(exclusion_alloc_size
);
2255 if (ua_event
->exclusion
== NULL
) {
2258 memcpy(ua_event
->exclusion
, uevent
->exclusion
,
2259 exclusion_alloc_size
);
2265 * Copy data between an UST app channel and a LTT channel.
2267 static void shadow_copy_channel(struct ust_app_channel
*ua_chan
,
2268 struct ltt_ust_channel
*uchan
)
2270 DBG2("UST app shadow copy of channel %s started", ua_chan
->name
);
2272 strncpy(ua_chan
->name
, uchan
->name
, sizeof(ua_chan
->name
));
2273 ua_chan
->name
[sizeof(ua_chan
->name
) - 1] = '\0';
2275 ua_chan
->tracefile_size
= uchan
->tracefile_size
;
2276 ua_chan
->tracefile_count
= uchan
->tracefile_count
;
2278 /* Copy event attributes since the layout is different. */
2279 ua_chan
->attr
.subbuf_size
= uchan
->attr
.subbuf_size
;
2280 ua_chan
->attr
.num_subbuf
= uchan
->attr
.num_subbuf
;
2281 ua_chan
->attr
.overwrite
= uchan
->attr
.overwrite
;
2282 ua_chan
->attr
.switch_timer_interval
= uchan
->attr
.switch_timer_interval
;
2283 ua_chan
->attr
.read_timer_interval
= uchan
->attr
.read_timer_interval
;
2284 ua_chan
->monitor_timer_interval
= uchan
->monitor_timer_interval
;
2285 ua_chan
->attr
.output
= uchan
->attr
.output
;
2286 ua_chan
->attr
.blocking_timeout
= uchan
->attr
.u
.s
.blocking_timeout
;
2289 * Note that the attribute channel type is not set since the channel on the
2290 * tracing registry side does not have this information.
2293 ua_chan
->enabled
= uchan
->enabled
;
2294 ua_chan
->tracing_channel_id
= uchan
->id
;
2296 DBG3("UST app shadow copy of channel %s done", ua_chan
->name
);
2300 * Copy data between a UST app session and a regular LTT session.
2302 static void shadow_copy_session(struct ust_app_session
*ua_sess
,
2303 struct ltt_ust_session
*usess
, struct ust_app
*app
)
2305 struct tm
*timeinfo
;
2308 char tmp_shm_path
[PATH_MAX
];
2310 timeinfo
= localtime(&app
->registration_time
);
2311 strftime(datetime
, sizeof(datetime
), "%Y%m%d-%H%M%S", timeinfo
);
2313 DBG2("Shadow copy of session handle %d", ua_sess
->handle
);
2315 ua_sess
->tracing_id
= usess
->id
;
2316 ua_sess
->id
= get_next_session_id();
2317 LTTNG_OPTIONAL_SET(&ua_sess
->real_credentials
.uid
, app
->uid
);
2318 LTTNG_OPTIONAL_SET(&ua_sess
->real_credentials
.gid
, app
->gid
);
2319 LTTNG_OPTIONAL_SET(&ua_sess
->effective_credentials
.uid
, usess
->uid
);
2320 LTTNG_OPTIONAL_SET(&ua_sess
->effective_credentials
.gid
, usess
->gid
);
2321 ua_sess
->buffer_type
= usess
->buffer_type
;
2322 ua_sess
->bits_per_long
= app
->bits_per_long
;
2324 /* There is only one consumer object per session possible. */
2325 consumer_output_get(usess
->consumer
);
2326 ua_sess
->consumer
= usess
->consumer
;
2328 ua_sess
->output_traces
= usess
->output_traces
;
2329 ua_sess
->live_timer_interval
= usess
->live_timer_interval
;
2330 copy_channel_attr_to_ustctl(&ua_sess
->metadata_attr
,
2331 &usess
->metadata_attr
);
2333 switch (ua_sess
->buffer_type
) {
2334 case LTTNG_BUFFER_PER_PID
:
2335 ret
= snprintf(ua_sess
->path
, sizeof(ua_sess
->path
),
2336 DEFAULT_UST_TRACE_PID_PATH
"/%s-%d-%s", app
->name
, app
->pid
,
2339 case LTTNG_BUFFER_PER_UID
:
2340 ret
= snprintf(ua_sess
->path
, sizeof(ua_sess
->path
),
2341 DEFAULT_UST_TRACE_UID_PATH
,
2342 lttng_credentials_get_uid(&ua_sess
->real_credentials
),
2343 app
->bits_per_long
);
2350 PERROR("asprintf UST shadow copy session");
2355 strncpy(ua_sess
->root_shm_path
, usess
->root_shm_path
,
2356 sizeof(ua_sess
->root_shm_path
));
2357 ua_sess
->root_shm_path
[sizeof(ua_sess
->root_shm_path
) - 1] = '\0';
2358 strncpy(ua_sess
->shm_path
, usess
->shm_path
,
2359 sizeof(ua_sess
->shm_path
));
2360 ua_sess
->shm_path
[sizeof(ua_sess
->shm_path
) - 1] = '\0';
2361 if (ua_sess
->shm_path
[0]) {
2362 switch (ua_sess
->buffer_type
) {
2363 case LTTNG_BUFFER_PER_PID
:
2364 ret
= snprintf(tmp_shm_path
, sizeof(tmp_shm_path
),
2365 "/" DEFAULT_UST_TRACE_PID_PATH
"/%s-%d-%s",
2366 app
->name
, app
->pid
, datetime
);
2368 case LTTNG_BUFFER_PER_UID
:
2369 ret
= snprintf(tmp_shm_path
, sizeof(tmp_shm_path
),
2370 "/" DEFAULT_UST_TRACE_UID_PATH
,
2371 app
->uid
, app
->bits_per_long
);
2378 PERROR("sprintf UST shadow copy session");
2382 strncat(ua_sess
->shm_path
, tmp_shm_path
,
2383 sizeof(ua_sess
->shm_path
) - strlen(ua_sess
->shm_path
) - 1);
2384 ua_sess
->shm_path
[sizeof(ua_sess
->shm_path
) - 1] = '\0';
2389 consumer_output_put(ua_sess
->consumer
);
2393 * Lookup sesison wrapper.
2396 void __lookup_session_by_app(const struct ltt_ust_session
*usess
,
2397 struct ust_app
*app
, struct lttng_ht_iter
*iter
)
2399 /* Get right UST app session from app */
2400 lttng_ht_lookup(app
->sessions
, &usess
->id
, iter
);
2404 * Return ust app session from the app session hashtable using the UST session
2407 static struct ust_app_session
*lookup_session_by_app(
2408 const struct ltt_ust_session
*usess
, struct ust_app
*app
)
2410 struct lttng_ht_iter iter
;
2411 struct lttng_ht_node_u64
*node
;
2413 __lookup_session_by_app(usess
, app
, &iter
);
2414 node
= lttng_ht_iter_get_node_u64(&iter
);
2419 return caa_container_of(node
, struct ust_app_session
, node
);
2426 * Setup buffer registry per PID for the given session and application. If none
2427 * is found, a new one is created, added to the global registry and
2428 * initialized. If regp is valid, it's set with the newly created object.
2430 * Return 0 on success or else a negative value.
2432 static int setup_buffer_reg_pid(struct ust_app_session
*ua_sess
,
2433 struct ust_app
*app
, struct buffer_reg_pid
**regp
)
2436 struct buffer_reg_pid
*reg_pid
;
2443 reg_pid
= buffer_reg_pid_find(ua_sess
->id
);
2446 * This is the create channel path meaning that if there is NO
2447 * registry available, we have to create one for this session.
2449 ret
= buffer_reg_pid_create(ua_sess
->id
, ®_pid
,
2450 ua_sess
->root_shm_path
, ua_sess
->shm_path
);
2458 /* Initialize registry. */
2459 ret
= ust_registry_session_init(®_pid
->registry
->reg
.ust
, app
,
2460 app
->bits_per_long
, app
->uint8_t_alignment
,
2461 app
->uint16_t_alignment
, app
->uint32_t_alignment
,
2462 app
->uint64_t_alignment
, app
->long_alignment
,
2463 app
->byte_order
, app
->version
.major
, app
->version
.minor
,
2464 reg_pid
->root_shm_path
, reg_pid
->shm_path
,
2465 lttng_credentials_get_uid(&ua_sess
->effective_credentials
),
2466 lttng_credentials_get_gid(&ua_sess
->effective_credentials
),
2467 ua_sess
->tracing_id
,
2471 * reg_pid->registry->reg.ust is NULL upon error, so we need to
2472 * destroy the buffer registry, because it is always expected
2473 * that if the buffer registry can be found, its ust registry is
2476 buffer_reg_pid_destroy(reg_pid
);
2480 buffer_reg_pid_add(reg_pid
);
2482 DBG3("UST app buffer registry per PID created successfully");
2494 * Setup buffer registry per UID for the given session and application. If none
2495 * is found, a new one is created, added to the global registry and
2496 * initialized. If regp is valid, it's set with the newly created object.
2498 * Return 0 on success or else a negative value.
2500 static int setup_buffer_reg_uid(struct ltt_ust_session
*usess
,
2501 struct ust_app_session
*ua_sess
,
2502 struct ust_app
*app
, struct buffer_reg_uid
**regp
)
2505 struct buffer_reg_uid
*reg_uid
;
2512 reg_uid
= buffer_reg_uid_find(usess
->id
, app
->bits_per_long
, app
->uid
);
2515 * This is the create channel path meaning that if there is NO
2516 * registry available, we have to create one for this session.
2518 ret
= buffer_reg_uid_create(usess
->id
, app
->bits_per_long
, app
->uid
,
2519 LTTNG_DOMAIN_UST
, ®_uid
,
2520 ua_sess
->root_shm_path
, ua_sess
->shm_path
);
2528 /* Initialize registry. */
2529 ret
= ust_registry_session_init(®_uid
->registry
->reg
.ust
, NULL
,
2530 app
->bits_per_long
, app
->uint8_t_alignment
,
2531 app
->uint16_t_alignment
, app
->uint32_t_alignment
,
2532 app
->uint64_t_alignment
, app
->long_alignment
,
2533 app
->byte_order
, app
->version
.major
,
2534 app
->version
.minor
, reg_uid
->root_shm_path
,
2535 reg_uid
->shm_path
, usess
->uid
, usess
->gid
,
2536 ua_sess
->tracing_id
, app
->uid
);
2539 * reg_uid->registry->reg.ust is NULL upon error, so we need to
2540 * destroy the buffer registry, because it is always expected
2541 * that if the buffer registry can be found, its ust registry is
2544 buffer_reg_uid_destroy(reg_uid
, NULL
);
2547 /* Add node to teardown list of the session. */
2548 cds_list_add(®_uid
->lnode
, &usess
->buffer_reg_uid_list
);
2550 buffer_reg_uid_add(reg_uid
);
2552 DBG3("UST app buffer registry per UID created successfully");
2563 * Create a session on the tracer side for the given app.
2565 * On success, ua_sess_ptr is populated with the session pointer or else left
2566 * untouched. If the session was created, is_created is set to 1. On error,
2567 * it's left untouched. Note that ua_sess_ptr is mandatory but is_created can
2570 * Returns 0 on success or else a negative code which is either -ENOMEM or
2571 * -ENOTCONN which is the default code if the ustctl_create_session fails.
2573 static int find_or_create_ust_app_session(struct ltt_ust_session
*usess
,
2574 struct ust_app
*app
, struct ust_app_session
**ua_sess_ptr
,
2577 int ret
, created
= 0;
2578 struct ust_app_session
*ua_sess
;
2582 assert(ua_sess_ptr
);
2584 health_code_update();
2586 ua_sess
= lookup_session_by_app(usess
, app
);
2587 if (ua_sess
== NULL
) {
2588 DBG2("UST app pid: %d session id %" PRIu64
" not found, creating it",
2589 app
->pid
, usess
->id
);
2590 ua_sess
= alloc_ust_app_session();
2591 if (ua_sess
== NULL
) {
2592 /* Only malloc can failed so something is really wrong */
2596 shadow_copy_session(ua_sess
, usess
, app
);
2600 switch (usess
->buffer_type
) {
2601 case LTTNG_BUFFER_PER_PID
:
2602 /* Init local registry. */
2603 ret
= setup_buffer_reg_pid(ua_sess
, app
, NULL
);
2605 delete_ust_app_session(-1, ua_sess
, app
);
2609 case LTTNG_BUFFER_PER_UID
:
2610 /* Look for a global registry. If none exists, create one. */
2611 ret
= setup_buffer_reg_uid(usess
, ua_sess
, app
, NULL
);
2613 delete_ust_app_session(-1, ua_sess
, app
);
2623 health_code_update();
2625 if (ua_sess
->handle
== -1) {
2626 pthread_mutex_lock(&app
->sock_lock
);
2627 ret
= ustctl_create_session(app
->sock
);
2628 pthread_mutex_unlock(&app
->sock_lock
);
2630 if (ret
!= -EPIPE
&& ret
!= -LTTNG_UST_ERR_EXITING
) {
2631 ERR("Creating session for app pid %d with ret %d",
2634 DBG("UST app creating session failed. Application is dead");
2636 * This is normal behavior, an application can die during the
2637 * creation process. Don't report an error so the execution can
2638 * continue normally. This will get flagged ENOTCONN and the
2639 * caller will handle it.
2643 delete_ust_app_session(-1, ua_sess
, app
);
2644 if (ret
!= -ENOMEM
) {
2646 * Tracer is probably gone or got an internal error so let's
2647 * behave like it will soon unregister or not usable.
2654 ua_sess
->handle
= ret
;
2656 /* Add ust app session to app's HT */
2657 lttng_ht_node_init_u64(&ua_sess
->node
,
2658 ua_sess
->tracing_id
);
2659 lttng_ht_add_unique_u64(app
->sessions
, &ua_sess
->node
);
2660 lttng_ht_node_init_ulong(&ua_sess
->ust_objd_node
, ua_sess
->handle
);
2661 lttng_ht_add_unique_ulong(app
->ust_sessions_objd
,
2662 &ua_sess
->ust_objd_node
);
2664 DBG2("UST app session created successfully with handle %d", ret
);
2667 *ua_sess_ptr
= ua_sess
;
2669 *is_created
= created
;
2672 /* Everything went well. */
2676 health_code_update();
2681 * Match function for a hash table lookup of ust_app_ctx.
2683 * It matches an ust app context based on the context type and, in the case
2684 * of perf counters, their name.
2686 static int ht_match_ust_app_ctx(struct cds_lfht_node
*node
, const void *_key
)
2688 struct ust_app_ctx
*ctx
;
2689 const struct lttng_ust_context_attr
*key
;
2694 ctx
= caa_container_of(node
, struct ust_app_ctx
, node
.node
);
2698 if (ctx
->ctx
.ctx
!= key
->ctx
) {
2703 case LTTNG_UST_ABI_CONTEXT_PERF_THREAD_COUNTER
:
2704 if (strncmp(key
->u
.perf_counter
.name
,
2705 ctx
->ctx
.u
.perf_counter
.name
,
2706 sizeof(key
->u
.perf_counter
.name
))) {
2710 case LTTNG_UST_ABI_CONTEXT_APP_CONTEXT
:
2711 if (strcmp(key
->u
.app_ctx
.provider_name
,
2712 ctx
->ctx
.u
.app_ctx
.provider_name
) ||
2713 strcmp(key
->u
.app_ctx
.ctx_name
,
2714 ctx
->ctx
.u
.app_ctx
.ctx_name
)) {
2730 * Lookup for an ust app context from an lttng_ust_context.
2732 * Must be called while holding RCU read side lock.
2733 * Return an ust_app_ctx object or NULL on error.
2736 struct ust_app_ctx
*find_ust_app_context(struct lttng_ht
*ht
,
2737 struct lttng_ust_context_attr
*uctx
)
2739 struct lttng_ht_iter iter
;
2740 struct lttng_ht_node_ulong
*node
;
2741 struct ust_app_ctx
*app_ctx
= NULL
;
2746 /* Lookup using the lttng_ust_context_type and a custom match fct. */
2747 cds_lfht_lookup(ht
->ht
, ht
->hash_fct((void *) uctx
->ctx
, lttng_ht_seed
),
2748 ht_match_ust_app_ctx
, uctx
, &iter
.iter
);
2749 node
= lttng_ht_iter_get_node_ulong(&iter
);
2754 app_ctx
= caa_container_of(node
, struct ust_app_ctx
, node
);
2761 * Create a context for the channel on the tracer.
2763 * Called with UST app session lock held and a RCU read side lock.
2766 int create_ust_app_channel_context(struct ust_app_channel
*ua_chan
,
2767 struct lttng_ust_context_attr
*uctx
,
2768 struct ust_app
*app
)
2771 struct ust_app_ctx
*ua_ctx
;
2773 DBG2("UST app adding context to channel %s", ua_chan
->name
);
2775 ua_ctx
= find_ust_app_context(ua_chan
->ctx
, uctx
);
2781 ua_ctx
= alloc_ust_app_ctx(uctx
);
2782 if (ua_ctx
== NULL
) {
2788 lttng_ht_node_init_ulong(&ua_ctx
->node
, (unsigned long) ua_ctx
->ctx
.ctx
);
2789 lttng_ht_add_ulong(ua_chan
->ctx
, &ua_ctx
->node
);
2790 cds_list_add_tail(&ua_ctx
->list
, &ua_chan
->ctx_list
);
2792 ret
= create_ust_channel_context(ua_chan
, ua_ctx
, app
);
2802 * Enable on the tracer side a ust app event for the session and channel.
2804 * Called with UST app session lock held.
2807 int enable_ust_app_event(struct ust_app_session
*ua_sess
,
2808 struct ust_app_event
*ua_event
, struct ust_app
*app
)
2812 ret
= enable_ust_object(app
, ua_event
->obj
);
2817 ua_event
->enabled
= 1;
2824 * Disable on the tracer side a ust app event for the session and channel.
2826 static int disable_ust_app_event(struct ust_app_session
*ua_sess
,
2827 struct ust_app_event
*ua_event
, struct ust_app
*app
)
2831 ret
= disable_ust_object(app
, ua_event
->obj
);
2836 ua_event
->enabled
= 0;
2843 * Lookup ust app channel for session and disable it on the tracer side.
2846 int disable_ust_app_channel(struct ust_app_session
*ua_sess
,
2847 struct ust_app_channel
*ua_chan
, struct ust_app
*app
)
2851 ret
= disable_ust_channel(app
, ua_sess
, ua_chan
);
2856 ua_chan
->enabled
= 0;
2863 * Lookup ust app channel for session and enable it on the tracer side. This
2864 * MUST be called with a RCU read side lock acquired.
2866 static int enable_ust_app_channel(struct ust_app_session
*ua_sess
,
2867 struct ltt_ust_channel
*uchan
, struct ust_app
*app
)
2870 struct lttng_ht_iter iter
;
2871 struct lttng_ht_node_str
*ua_chan_node
;
2872 struct ust_app_channel
*ua_chan
;
2874 lttng_ht_lookup(ua_sess
->channels
, (void *)uchan
->name
, &iter
);
2875 ua_chan_node
= lttng_ht_iter_get_node_str(&iter
);
2876 if (ua_chan_node
== NULL
) {
2877 DBG2("Unable to find channel %s in ust session id %" PRIu64
,
2878 uchan
->name
, ua_sess
->tracing_id
);
2882 ua_chan
= caa_container_of(ua_chan_node
, struct ust_app_channel
, node
);
2884 ret
= enable_ust_channel(app
, ua_sess
, ua_chan
);
2894 * Ask the consumer to create a channel and get it if successful.
2896 * Called with UST app session lock held.
2898 * Return 0 on success or else a negative value.
2900 static int do_consumer_create_channel(struct ltt_ust_session
*usess
,
2901 struct ust_app_session
*ua_sess
, struct ust_app_channel
*ua_chan
,
2902 int bitness
, struct ust_registry_session
*registry
,
2903 uint64_t trace_archive_id
)
2906 unsigned int nb_fd
= 0;
2907 struct consumer_socket
*socket
;
2915 health_code_update();
2917 /* Get the right consumer socket for the application. */
2918 socket
= consumer_find_socket_by_bitness(bitness
, usess
->consumer
);
2924 health_code_update();
2926 /* Need one fd for the channel. */
2927 ret
= lttng_fd_get(LTTNG_FD_APPS
, 1);
2929 ERR("Exhausted number of available FD upon create channel");
2934 * Ask consumer to create channel. The consumer will return the number of
2935 * stream we have to expect.
2937 ret
= ust_consumer_ask_channel(ua_sess
, ua_chan
, usess
->consumer
, socket
,
2938 registry
, usess
->current_trace_chunk
);
2944 * Compute the number of fd needed before receiving them. It must be 2 per
2945 * stream (2 being the default value here).
2947 nb_fd
= DEFAULT_UST_STREAM_FD_NUM
* ua_chan
->expected_stream_count
;
2949 /* Reserve the amount of file descriptor we need. */
2950 ret
= lttng_fd_get(LTTNG_FD_APPS
, nb_fd
);
2952 ERR("Exhausted number of available FD upon create channel");
2953 goto error_fd_get_stream
;
2956 health_code_update();
2959 * Now get the channel from the consumer. This call will populate the stream
2960 * list of that channel and set the ust objects.
2962 if (usess
->consumer
->enabled
) {
2963 ret
= ust_consumer_get_channel(socket
, ua_chan
);
2973 lttng_fd_put(LTTNG_FD_APPS
, nb_fd
);
2974 error_fd_get_stream
:
2976 * Initiate a destroy channel on the consumer since we had an error
2977 * handling it on our side. The return value is of no importance since we
2978 * already have a ret value set by the previous error that we need to
2981 (void) ust_consumer_destroy_channel(socket
, ua_chan
);
2983 lttng_fd_put(LTTNG_FD_APPS
, 1);
2985 health_code_update();
2991 * Duplicate the ust data object of the ust app stream and save it in the
2992 * buffer registry stream.
2994 * Return 0 on success or else a negative value.
2996 static int duplicate_stream_object(struct buffer_reg_stream
*reg_stream
,
2997 struct ust_app_stream
*stream
)
3004 /* Reserve the amount of file descriptor we need. */
3005 ret
= lttng_fd_get(LTTNG_FD_APPS
, 2);
3007 ERR("Exhausted number of available FD upon duplicate stream");
3011 /* Duplicate object for stream once the original is in the registry. */
3012 ret
= ustctl_duplicate_ust_object_data(&stream
->obj
,
3013 reg_stream
->obj
.ust
);
3015 ERR("Duplicate stream obj from %p to %p failed with ret %d",
3016 reg_stream
->obj
.ust
, stream
->obj
, ret
);
3017 lttng_fd_put(LTTNG_FD_APPS
, 2);
3020 stream
->handle
= stream
->obj
->handle
;
3027 * Duplicate the ust data object of the ust app. channel and save it in the
3028 * buffer registry channel.
3030 * Return 0 on success or else a negative value.
3032 static int duplicate_channel_object(struct buffer_reg_channel
*buf_reg_chan
,
3033 struct ust_app_channel
*ua_chan
)
3037 assert(buf_reg_chan
);
3040 /* Need two fds for the channel. */
3041 ret
= lttng_fd_get(LTTNG_FD_APPS
, 1);
3043 ERR("Exhausted number of available FD upon duplicate channel");
3047 /* Duplicate object for stream once the original is in the registry. */
3048 ret
= ustctl_duplicate_ust_object_data(&ua_chan
->obj
, buf_reg_chan
->obj
.ust
);
3050 ERR("Duplicate channel obj from %p to %p failed with ret: %d",
3051 buf_reg_chan
->obj
.ust
, ua_chan
->obj
, ret
);
3054 ua_chan
->handle
= ua_chan
->obj
->handle
;
3059 lttng_fd_put(LTTNG_FD_APPS
, 1);
3065 * For a given channel buffer registry, setup all streams of the given ust
3066 * application channel.
3068 * Return 0 on success or else a negative value.
3070 static int setup_buffer_reg_streams(struct buffer_reg_channel
*buf_reg_chan
,
3071 struct ust_app_channel
*ua_chan
,
3072 struct ust_app
*app
)
3075 struct ust_app_stream
*stream
, *stmp
;
3077 assert(buf_reg_chan
);
3080 DBG2("UST app setup buffer registry stream");
3082 /* Send all streams to application. */
3083 cds_list_for_each_entry_safe(stream
, stmp
, &ua_chan
->streams
.head
, list
) {
3084 struct buffer_reg_stream
*reg_stream
;
3086 ret
= buffer_reg_stream_create(®_stream
);
3092 * Keep original pointer and nullify it in the stream so the delete
3093 * stream call does not release the object.
3095 reg_stream
->obj
.ust
= stream
->obj
;
3097 buffer_reg_stream_add(reg_stream
, buf_reg_chan
);
3099 /* We don't need the streams anymore. */
3100 cds_list_del(&stream
->list
);
3101 delete_ust_app_stream(-1, stream
, app
);
3109 * Create a buffer registry channel for the given session registry and
3110 * application channel object. If regp pointer is valid, it's set with the
3111 * created object. Important, the created object is NOT added to the session
3112 * registry hash table.
3114 * Return 0 on success else a negative value.
3116 static int create_buffer_reg_channel(struct buffer_reg_session
*reg_sess
,
3117 struct ust_app_channel
*ua_chan
, struct buffer_reg_channel
**regp
)
3120 struct buffer_reg_channel
*buf_reg_chan
= NULL
;
3125 DBG2("UST app creating buffer registry channel for %s", ua_chan
->name
);
3127 /* Create buffer registry channel. */
3128 ret
= buffer_reg_channel_create(ua_chan
->tracing_channel_id
, &buf_reg_chan
);
3132 assert(buf_reg_chan
);
3133 buf_reg_chan
->consumer_key
= ua_chan
->key
;
3134 buf_reg_chan
->subbuf_size
= ua_chan
->attr
.subbuf_size
;
3135 buf_reg_chan
->num_subbuf
= ua_chan
->attr
.num_subbuf
;
3137 /* Create and add a channel registry to session. */
3138 ret
= ust_registry_channel_add(reg_sess
->reg
.ust
,
3139 ua_chan
->tracing_channel_id
);
3143 buffer_reg_channel_add(reg_sess
, buf_reg_chan
);
3146 *regp
= buf_reg_chan
;
3152 /* Safe because the registry channel object was not added to any HT. */
3153 buffer_reg_channel_destroy(buf_reg_chan
, LTTNG_DOMAIN_UST
);
3159 * Setup buffer registry channel for the given session registry and application
3160 * channel object. If regp pointer is valid, it's set with the created object.
3162 * Return 0 on success else a negative value.
3164 static int setup_buffer_reg_channel(struct buffer_reg_session
*reg_sess
,
3165 struct ust_app_channel
*ua_chan
, struct buffer_reg_channel
*buf_reg_chan
,
3166 struct ust_app
*app
)
3171 assert(buf_reg_chan
);
3173 assert(ua_chan
->obj
);
3175 DBG2("UST app setup buffer registry channel for %s", ua_chan
->name
);
3177 /* Setup all streams for the registry. */
3178 ret
= setup_buffer_reg_streams(buf_reg_chan
, ua_chan
, app
);
3183 buf_reg_chan
->obj
.ust
= ua_chan
->obj
;
3184 ua_chan
->obj
= NULL
;
3189 buffer_reg_channel_remove(reg_sess
, buf_reg_chan
);
3190 buffer_reg_channel_destroy(buf_reg_chan
, LTTNG_DOMAIN_UST
);
3195 * Send buffer registry channel to the application.
3197 * Return 0 on success else a negative value.
3199 static int send_channel_uid_to_ust(struct buffer_reg_channel
*buf_reg_chan
,
3200 struct ust_app
*app
, struct ust_app_session
*ua_sess
,
3201 struct ust_app_channel
*ua_chan
)
3204 struct buffer_reg_stream
*reg_stream
;
3206 assert(buf_reg_chan
);
3211 DBG("UST app sending buffer registry channel to ust sock %d", app
->sock
);
3213 ret
= duplicate_channel_object(buf_reg_chan
, ua_chan
);
3218 /* Send channel to the application. */
3219 ret
= ust_consumer_send_channel_to_ust(app
, ua_sess
, ua_chan
);
3220 if (ret
== -EPIPE
|| ret
== -LTTNG_UST_ERR_EXITING
) {
3221 ret
= -ENOTCONN
; /* Caused by app exiting. */
3223 } else if (ret
< 0) {
3227 health_code_update();
3229 /* Send all streams to application. */
3230 pthread_mutex_lock(&buf_reg_chan
->stream_list_lock
);
3231 cds_list_for_each_entry(reg_stream
, &buf_reg_chan
->streams
, lnode
) {
3232 struct ust_app_stream stream
;
3234 ret
= duplicate_stream_object(reg_stream
, &stream
);
3236 goto error_stream_unlock
;
3239 ret
= ust_consumer_send_stream_to_ust(app
, ua_chan
, &stream
);
3241 (void) release_ust_app_stream(-1, &stream
, app
);
3242 if (ret
== -EPIPE
|| ret
== -LTTNG_UST_ERR_EXITING
) {
3243 ret
= -ENOTCONN
; /* Caused by app exiting. */
3245 goto error_stream_unlock
;
3249 * The return value is not important here. This function will output an
3252 (void) release_ust_app_stream(-1, &stream
, app
);
3254 ua_chan
->is_sent
= 1;
3256 error_stream_unlock
:
3257 pthread_mutex_unlock(&buf_reg_chan
->stream_list_lock
);
3263 * Create and send to the application the created buffers with per UID buffers.
3265 * This MUST be called with a RCU read side lock acquired.
3266 * The session list lock and the session's lock must be acquired.
3268 * Return 0 on success else a negative value.
3270 static int create_channel_per_uid(struct ust_app
*app
,
3271 struct ltt_ust_session
*usess
, struct ust_app_session
*ua_sess
,
3272 struct ust_app_channel
*ua_chan
)
3275 struct buffer_reg_uid
*reg_uid
;
3276 struct buffer_reg_channel
*buf_reg_chan
;
3277 struct ltt_session
*session
= NULL
;
3278 enum lttng_error_code notification_ret
;
3279 struct ust_registry_channel
*ust_reg_chan
;
3286 DBG("UST app creating channel %s with per UID buffers", ua_chan
->name
);
3288 reg_uid
= buffer_reg_uid_find(usess
->id
, app
->bits_per_long
, app
->uid
);
3290 * The session creation handles the creation of this global registry
3291 * object. If none can be find, there is a code flow problem or a
3296 buf_reg_chan
= buffer_reg_channel_find(ua_chan
->tracing_channel_id
,
3302 /* Create the buffer registry channel object. */
3303 ret
= create_buffer_reg_channel(reg_uid
->registry
, ua_chan
, &buf_reg_chan
);
3305 ERR("Error creating the UST channel \"%s\" registry instance",
3310 session
= session_find_by_id(ua_sess
->tracing_id
);
3312 assert(pthread_mutex_trylock(&session
->lock
));
3313 assert(session_trylock_list());
3316 * Create the buffers on the consumer side. This call populates the
3317 * ust app channel object with all streams and data object.
3319 ret
= do_consumer_create_channel(usess
, ua_sess
, ua_chan
,
3320 app
->bits_per_long
, reg_uid
->registry
->reg
.ust
,
3321 session
->most_recent_chunk_id
.value
);
3323 ERR("Error creating UST channel \"%s\" on the consumer daemon",
3327 * Let's remove the previously created buffer registry channel so
3328 * it's not visible anymore in the session registry.
3330 ust_registry_channel_del_free(reg_uid
->registry
->reg
.ust
,
3331 ua_chan
->tracing_channel_id
, false);
3332 buffer_reg_channel_remove(reg_uid
->registry
, buf_reg_chan
);
3333 buffer_reg_channel_destroy(buf_reg_chan
, LTTNG_DOMAIN_UST
);
3338 * Setup the streams and add it to the session registry.
3340 ret
= setup_buffer_reg_channel(reg_uid
->registry
,
3341 ua_chan
, buf_reg_chan
, app
);
3343 ERR("Error setting up UST channel \"%s\"", ua_chan
->name
);
3347 /* Notify the notification subsystem of the channel's creation. */
3348 pthread_mutex_lock(®_uid
->registry
->reg
.ust
->lock
);
3349 ust_reg_chan
= ust_registry_channel_find(reg_uid
->registry
->reg
.ust
,
3350 ua_chan
->tracing_channel_id
);
3351 assert(ust_reg_chan
);
3352 ust_reg_chan
->consumer_key
= ua_chan
->key
;
3353 ust_reg_chan
= NULL
;
3354 pthread_mutex_unlock(®_uid
->registry
->reg
.ust
->lock
);
3356 notification_ret
= notification_thread_command_add_channel(
3357 the_notification_thread_handle
, session
->name
,
3358 lttng_credentials_get_uid(
3359 &ua_sess
->effective_credentials
),
3360 lttng_credentials_get_gid(
3361 &ua_sess
->effective_credentials
),
3362 ua_chan
->name
, ua_chan
->key
, LTTNG_DOMAIN_UST
,
3363 ua_chan
->attr
.subbuf_size
* ua_chan
->attr
.num_subbuf
);
3364 if (notification_ret
!= LTTNG_OK
) {
3365 ret
= - (int) notification_ret
;
3366 ERR("Failed to add channel to notification thread");
3371 /* Send buffers to the application. */
3372 ret
= send_channel_uid_to_ust(buf_reg_chan
, app
, ua_sess
, ua_chan
);
3374 if (ret
!= -ENOTCONN
) {
3375 ERR("Error sending channel to application");
3382 session_put(session
);
3388 * Create and send to the application the created buffers with per PID buffers.
3390 * Called with UST app session lock held.
3391 * The session list lock and the session's lock must be acquired.
3393 * Return 0 on success else a negative value.
3395 static int create_channel_per_pid(struct ust_app
*app
,
3396 struct ltt_ust_session
*usess
, struct ust_app_session
*ua_sess
,
3397 struct ust_app_channel
*ua_chan
)
3400 struct ust_registry_session
*registry
;
3401 enum lttng_error_code cmd_ret
;
3402 struct ltt_session
*session
= NULL
;
3403 uint64_t chan_reg_key
;
3404 struct ust_registry_channel
*ust_reg_chan
;
3411 DBG("UST app creating channel %s with per PID buffers", ua_chan
->name
);
3415 registry
= get_session_registry(ua_sess
);
3416 /* The UST app session lock is held, registry shall not be null. */
3419 /* Create and add a new channel registry to session. */
3420 ret
= ust_registry_channel_add(registry
, ua_chan
->key
);
3422 ERR("Error creating the UST channel \"%s\" registry instance",
3427 session
= session_find_by_id(ua_sess
->tracing_id
);
3430 assert(pthread_mutex_trylock(&session
->lock
));
3431 assert(session_trylock_list());
3433 /* Create and get channel on the consumer side. */
3434 ret
= do_consumer_create_channel(usess
, ua_sess
, ua_chan
,
3435 app
->bits_per_long
, registry
,
3436 session
->most_recent_chunk_id
.value
);
3438 ERR("Error creating UST channel \"%s\" on the consumer daemon",
3440 goto error_remove_from_registry
;
3443 ret
= send_channel_pid_to_ust(app
, ua_sess
, ua_chan
);
3445 if (ret
!= -ENOTCONN
) {
3446 ERR("Error sending channel to application");
3448 goto error_remove_from_registry
;
3451 chan_reg_key
= ua_chan
->key
;
3452 pthread_mutex_lock(®istry
->lock
);
3453 ust_reg_chan
= ust_registry_channel_find(registry
, chan_reg_key
);
3454 assert(ust_reg_chan
);
3455 ust_reg_chan
->consumer_key
= ua_chan
->key
;
3456 pthread_mutex_unlock(®istry
->lock
);
3458 cmd_ret
= notification_thread_command_add_channel(
3459 the_notification_thread_handle
, session
->name
,
3460 lttng_credentials_get_uid(
3461 &ua_sess
->effective_credentials
),
3462 lttng_credentials_get_gid(
3463 &ua_sess
->effective_credentials
),
3464 ua_chan
->name
, ua_chan
->key
, LTTNG_DOMAIN_UST
,
3465 ua_chan
->attr
.subbuf_size
* ua_chan
->attr
.num_subbuf
);
3466 if (cmd_ret
!= LTTNG_OK
) {
3467 ret
= - (int) cmd_ret
;
3468 ERR("Failed to add channel to notification thread");
3469 goto error_remove_from_registry
;
3472 error_remove_from_registry
:
3474 ust_registry_channel_del_free(registry
, ua_chan
->key
, false);
3479 session_put(session
);
3485 * From an already allocated ust app channel, create the channel buffers if
3486 * needed and send them to the application. This MUST be called with a RCU read
3487 * side lock acquired.
3489 * Called with UST app session lock held.
3491 * Return 0 on success or else a negative value. Returns -ENOTCONN if
3492 * the application exited concurrently.
3494 static int ust_app_channel_send(struct ust_app
*app
,
3495 struct ltt_ust_session
*usess
, struct ust_app_session
*ua_sess
,
3496 struct ust_app_channel
*ua_chan
)
3502 assert(usess
->active
);
3506 /* Handle buffer type before sending the channel to the application. */
3507 switch (usess
->buffer_type
) {
3508 case LTTNG_BUFFER_PER_UID
:
3510 ret
= create_channel_per_uid(app
, usess
, ua_sess
, ua_chan
);
3516 case LTTNG_BUFFER_PER_PID
:
3518 ret
= create_channel_per_pid(app
, usess
, ua_sess
, ua_chan
);
3530 /* Initialize ust objd object using the received handle and add it. */
3531 lttng_ht_node_init_ulong(&ua_chan
->ust_objd_node
, ua_chan
->handle
);
3532 lttng_ht_add_unique_ulong(app
->ust_objd
, &ua_chan
->ust_objd_node
);
3534 /* If channel is not enabled, disable it on the tracer */
3535 if (!ua_chan
->enabled
) {
3536 ret
= disable_ust_channel(app
, ua_sess
, ua_chan
);
3547 * Create UST app channel and return it through ua_chanp if not NULL.
3549 * Called with UST app session lock and RCU read-side lock held.
3551 * Return 0 on success or else a negative value.
3553 static int ust_app_channel_allocate(struct ust_app_session
*ua_sess
,
3554 struct ltt_ust_channel
*uchan
,
3555 enum lttng_ust_abi_chan_type type
, struct ltt_ust_session
*usess
,
3556 struct ust_app_channel
**ua_chanp
)
3559 struct lttng_ht_iter iter
;
3560 struct lttng_ht_node_str
*ua_chan_node
;
3561 struct ust_app_channel
*ua_chan
;
3563 /* Lookup channel in the ust app session */
3564 lttng_ht_lookup(ua_sess
->channels
, (void *)uchan
->name
, &iter
);
3565 ua_chan_node
= lttng_ht_iter_get_node_str(&iter
);
3566 if (ua_chan_node
!= NULL
) {
3567 ua_chan
= caa_container_of(ua_chan_node
, struct ust_app_channel
, node
);
3571 ua_chan
= alloc_ust_app_channel(uchan
->name
, ua_sess
, &uchan
->attr
);
3572 if (ua_chan
== NULL
) {
3573 /* Only malloc can fail here */
3577 shadow_copy_channel(ua_chan
, uchan
);
3579 /* Set channel type. */
3580 ua_chan
->attr
.type
= type
;
3582 /* Only add the channel if successful on the tracer side. */
3583 lttng_ht_add_unique_str(ua_sess
->channels
, &ua_chan
->node
);
3586 *ua_chanp
= ua_chan
;
3589 /* Everything went well. */
3597 * Create UST app event and create it on the tracer side.
3599 * Must be called with the RCU read side lock held.
3600 * Called with ust app session mutex held.
3603 int create_ust_app_event(struct ust_app_session
*ua_sess
,
3604 struct ust_app_channel
*ua_chan
, struct ltt_ust_event
*uevent
,
3605 struct ust_app
*app
)
3608 struct ust_app_event
*ua_event
;
3610 ua_event
= alloc_ust_app_event(uevent
->attr
.name
, &uevent
->attr
);
3611 if (ua_event
== NULL
) {
3612 /* Only failure mode of alloc_ust_app_event(). */
3616 shadow_copy_event(ua_event
, uevent
);
3618 /* Create it on the tracer side */
3619 ret
= create_ust_event(app
, ua_sess
, ua_chan
, ua_event
);
3622 * Not found previously means that it does not exist on the
3623 * tracer. If the application reports that the event existed,
3624 * it means there is a bug in the sessiond or lttng-ust
3625 * (or corruption, etc.)
3627 if (ret
== -LTTNG_UST_ERR_EXIST
) {
3628 ERR("Tracer for application reported that an event being created already existed: "
3629 "event_name = \"%s\", pid = %d, ppid = %d, uid = %d, gid = %d",
3631 app
->pid
, app
->ppid
, app
->uid
,
3637 add_unique_ust_app_event(ua_chan
, ua_event
);
3639 DBG2("UST app create event completed: app = '%s' (ppid: %d)",
3640 app
->name
, app
->ppid
);
3646 /* Valid. Calling here is already in a read side lock */
3647 delete_ust_app_event(-1, ua_event
, app
);
3652 * Create UST app event notifier rule and create it on the tracer side.
3654 * Must be called with the RCU read side lock held.
3655 * Called with ust app session mutex held.
3658 int create_ust_app_event_notifier_rule(struct lttng_trigger
*trigger
,
3659 struct ust_app
*app
)
3662 struct ust_app_event_notifier_rule
*ua_event_notifier_rule
;
3664 ua_event_notifier_rule
= alloc_ust_app_event_notifier_rule(trigger
);
3665 if (ua_event_notifier_rule
== NULL
) {
3670 /* Create it on the tracer side. */
3671 ret
= create_ust_event_notifier(app
, ua_event_notifier_rule
);
3674 * Not found previously means that it does not exist on the
3675 * tracer. If the application reports that the event existed,
3676 * it means there is a bug in the sessiond or lttng-ust
3677 * (or corruption, etc.)
3679 if (ret
== -LTTNG_UST_ERR_EXIST
) {
3680 ERR("Tracer for application reported that an event notifier being created already exists: "
3681 "token = \"%" PRIu64
"\", pid = %d, ppid = %d, uid = %d, gid = %d",
3682 lttng_trigger_get_tracer_token(trigger
),
3683 app
->pid
, app
->ppid
, app
->uid
,
3689 lttng_ht_add_unique_u64(app
->token_to_event_notifier_rule_ht
,
3690 &ua_event_notifier_rule
->node
);
3692 DBG2("UST app create token event rule completed: app = '%s' (ppid: %d), token = %" PRIu64
,
3693 app
->name
, app
->ppid
, lttng_trigger_get_tracer_token(trigger
));
3698 /* The RCU read side lock is already being held by the caller. */
3699 delete_ust_app_event_notifier_rule(-1, ua_event_notifier_rule
, app
);
3705 * Create UST metadata and open it on the tracer side.
3707 * Called with UST app session lock held and RCU read side lock.
3709 static int create_ust_app_metadata(struct ust_app_session
*ua_sess
,
3710 struct ust_app
*app
, struct consumer_output
*consumer
)
3713 struct ust_app_channel
*metadata
;
3714 struct consumer_socket
*socket
;
3715 struct ust_registry_session
*registry
;
3716 struct ltt_session
*session
= NULL
;
3722 registry
= get_session_registry(ua_sess
);
3723 /* The UST app session is held registry shall not be null. */
3726 pthread_mutex_lock(®istry
->lock
);
3728 /* Metadata already exists for this registry or it was closed previously */
3729 if (registry
->metadata_key
|| registry
->metadata_closed
) {
3734 /* Allocate UST metadata */
3735 metadata
= alloc_ust_app_channel(DEFAULT_METADATA_NAME
, ua_sess
, NULL
);
3737 /* malloc() failed */
3742 memcpy(&metadata
->attr
, &ua_sess
->metadata_attr
, sizeof(metadata
->attr
));
3744 /* Need one fd for the channel. */
3745 ret
= lttng_fd_get(LTTNG_FD_APPS
, 1);
3747 ERR("Exhausted number of available FD upon create metadata");
3751 /* Get the right consumer socket for the application. */
3752 socket
= consumer_find_socket_by_bitness(app
->bits_per_long
, consumer
);
3755 goto error_consumer
;
3759 * Keep metadata key so we can identify it on the consumer side. Assign it
3760 * to the registry *before* we ask the consumer so we avoid the race of the
3761 * consumer requesting the metadata and the ask_channel call on our side
3762 * did not returned yet.
3764 registry
->metadata_key
= metadata
->key
;
3766 session
= session_find_by_id(ua_sess
->tracing_id
);
3769 assert(pthread_mutex_trylock(&session
->lock
));
3770 assert(session_trylock_list());
3773 * Ask the metadata channel creation to the consumer. The metadata object
3774 * will be created by the consumer and kept their. However, the stream is
3775 * never added or monitored until we do a first push metadata to the
3778 ret
= ust_consumer_ask_channel(ua_sess
, metadata
, consumer
, socket
,
3779 registry
, session
->current_trace_chunk
);
3781 /* Nullify the metadata key so we don't try to close it later on. */
3782 registry
->metadata_key
= 0;
3783 goto error_consumer
;
3787 * The setup command will make the metadata stream be sent to the relayd,
3788 * if applicable, and the thread managing the metadatas. This is important
3789 * because after this point, if an error occurs, the only way the stream
3790 * can be deleted is to be monitored in the consumer.
3792 ret
= consumer_setup_metadata(socket
, metadata
->key
);
3794 /* Nullify the metadata key so we don't try to close it later on. */
3795 registry
->metadata_key
= 0;
3796 goto error_consumer
;
3799 DBG2("UST metadata with key %" PRIu64
" created for app pid %d",
3800 metadata
->key
, app
->pid
);
3803 lttng_fd_put(LTTNG_FD_APPS
, 1);
3804 delete_ust_app_channel(-1, metadata
, app
);
3806 pthread_mutex_unlock(®istry
->lock
);
3808 session_put(session
);
3814 * Return ust app pointer or NULL if not found. RCU read side lock MUST be
3815 * acquired before calling this function.
3817 struct ust_app
*ust_app_find_by_pid(pid_t pid
)
3819 struct ust_app
*app
= NULL
;
3820 struct lttng_ht_node_ulong
*node
;
3821 struct lttng_ht_iter iter
;
3823 lttng_ht_lookup(ust_app_ht
, (void *)((unsigned long) pid
), &iter
);
3824 node
= lttng_ht_iter_get_node_ulong(&iter
);
3826 DBG2("UST app no found with pid %d", pid
);
3830 DBG2("Found UST app by pid %d", pid
);
3832 app
= caa_container_of(node
, struct ust_app
, pid_n
);
3839 * Allocate and init an UST app object using the registration information and
3840 * the command socket. This is called when the command socket connects to the
3843 * The object is returned on success or else NULL.
3845 struct ust_app
*ust_app_create(struct ust_register_msg
*msg
, int sock
)
3848 struct ust_app
*lta
= NULL
;
3849 struct lttng_pipe
*event_notifier_event_source_pipe
= NULL
;
3854 DBG3("UST app creating application for socket %d", sock
);
3856 if ((msg
->bits_per_long
== 64 &&
3857 (uatomic_read(&the_ust_consumerd64_fd
) ==
3859 (msg
->bits_per_long
== 32 &&
3860 (uatomic_read(&the_ust_consumerd32_fd
) ==
3862 ERR("Registration failed: application \"%s\" (pid: %d) has "
3863 "%d-bit long, but no consumerd for this size is available.\n",
3864 msg
->name
, msg
->pid
, msg
->bits_per_long
);
3869 * Reserve the two file descriptors of the event source pipe. The write
3870 * end will be closed once it is passed to the application, at which
3871 * point a single 'put' will be performed.
3873 ret
= lttng_fd_get(LTTNG_FD_APPS
, 2);
3875 ERR("Failed to reserve two file descriptors for the event source pipe while creating a new application instance: app = '%s' (ppid: %d)",
3876 msg
->name
, (int) msg
->ppid
);
3880 event_notifier_event_source_pipe
= lttng_pipe_open(FD_CLOEXEC
);
3881 if (!event_notifier_event_source_pipe
) {
3882 PERROR("Failed to open application event source pipe: '%s' (ppid = %d)",
3883 msg
->name
, msg
->ppid
);
3887 lta
= zmalloc(sizeof(struct ust_app
));
3890 goto error_free_pipe
;
3893 lta
->event_notifier_group
.event_pipe
= event_notifier_event_source_pipe
;
3895 lta
->ppid
= msg
->ppid
;
3896 lta
->uid
= msg
->uid
;
3897 lta
->gid
= msg
->gid
;
3899 lta
->bits_per_long
= msg
->bits_per_long
;
3900 lta
->uint8_t_alignment
= msg
->uint8_t_alignment
;
3901 lta
->uint16_t_alignment
= msg
->uint16_t_alignment
;
3902 lta
->uint32_t_alignment
= msg
->uint32_t_alignment
;
3903 lta
->uint64_t_alignment
= msg
->uint64_t_alignment
;
3904 lta
->long_alignment
= msg
->long_alignment
;
3905 lta
->byte_order
= msg
->byte_order
;
3907 lta
->v_major
= msg
->major
;
3908 lta
->v_minor
= msg
->minor
;
3909 lta
->sessions
= lttng_ht_new(0, LTTNG_HT_TYPE_U64
);
3910 lta
->ust_objd
= lttng_ht_new(0, LTTNG_HT_TYPE_ULONG
);
3911 lta
->ust_sessions_objd
= lttng_ht_new(0, LTTNG_HT_TYPE_ULONG
);
3912 lta
->notify_sock
= -1;
3913 lta
->token_to_event_notifier_rule_ht
= lttng_ht_new(0, LTTNG_HT_TYPE_U64
);
3915 /* Copy name and make sure it's NULL terminated. */
3916 strncpy(lta
->name
, msg
->name
, sizeof(lta
->name
));
3917 lta
->name
[UST_APP_PROCNAME_LEN
] = '\0';
3920 * Before this can be called, when receiving the registration information,
3921 * the application compatibility is checked. So, at this point, the
3922 * application can work with this session daemon.
3924 lta
->compatible
= 1;
3926 lta
->pid
= msg
->pid
;
3927 lttng_ht_node_init_ulong(<a
->pid_n
, (unsigned long) lta
->pid
);
3929 pthread_mutex_init(<a
->sock_lock
, NULL
);
3930 lttng_ht_node_init_ulong(<a
->sock_n
, (unsigned long) lta
->sock
);
3932 CDS_INIT_LIST_HEAD(<a
->teardown_head
);
3936 lttng_pipe_destroy(event_notifier_event_source_pipe
);
3937 lttng_fd_put(LTTNG_FD_APPS
, 2);
3943 * For a given application object, add it to every hash table.
3945 void ust_app_add(struct ust_app
*app
)
3948 assert(app
->notify_sock
>= 0);
3950 app
->registration_time
= time(NULL
);
3955 * On a re-registration, we want to kick out the previous registration of
3958 lttng_ht_add_replace_ulong(ust_app_ht
, &app
->pid_n
);
3961 * The socket _should_ be unique until _we_ call close. So, a add_unique
3962 * for the ust_app_ht_by_sock is used which asserts fail if the entry was
3963 * already in the table.
3965 lttng_ht_add_unique_ulong(ust_app_ht_by_sock
, &app
->sock_n
);
3967 /* Add application to the notify socket hash table. */
3968 lttng_ht_node_init_ulong(&app
->notify_sock_n
, app
->notify_sock
);
3969 lttng_ht_add_unique_ulong(ust_app_ht_by_notify_sock
, &app
->notify_sock_n
);
3971 DBG("App registered with pid:%d ppid:%d uid:%d gid:%d sock:%d name:%s "
3972 "notify_sock:%d (version %d.%d)", app
->pid
, app
->ppid
, app
->uid
,
3973 app
->gid
, app
->sock
, app
->name
, app
->notify_sock
, app
->v_major
,
3980 * Set the application version into the object.
3982 * Return 0 on success else a negative value either an errno code or a
3983 * LTTng-UST error code.
3985 int ust_app_version(struct ust_app
*app
)
3991 pthread_mutex_lock(&app
->sock_lock
);
3992 ret
= ustctl_tracer_version(app
->sock
, &app
->version
);
3993 pthread_mutex_unlock(&app
->sock_lock
);
3995 if (ret
!= -LTTNG_UST_ERR_EXITING
&& ret
!= -EPIPE
) {
3996 ERR("UST app %d version failed with ret %d", app
->sock
, ret
);
3998 DBG3("UST app %d version failed. Application is dead", app
->sock
);
4006 * Setup the base event notifier group.
4008 * Return 0 on success else a negative value either an errno code or a
4009 * LTTng-UST error code.
4011 int ust_app_setup_event_notifier_group(struct ust_app
*app
)
4014 int event_pipe_write_fd
;
4015 struct lttng_ust_abi_object_data
*event_notifier_group
= NULL
;
4016 enum lttng_error_code lttng_ret
;
4017 enum event_notifier_error_accounting_status event_notifier_error_accounting_status
;
4021 /* Get the write side of the pipe. */
4022 event_pipe_write_fd
= lttng_pipe_get_writefd(
4023 app
->event_notifier_group
.event_pipe
);
4025 pthread_mutex_lock(&app
->sock_lock
);
4026 ret
= ustctl_create_event_notifier_group(app
->sock
,
4027 event_pipe_write_fd
, &event_notifier_group
);
4028 pthread_mutex_unlock(&app
->sock_lock
);
4030 if (ret
!= -LTTNG_UST_ERR_EXITING
&& ret
!= -EPIPE
) {
4031 ERR("Failed to create application event notifier group: ret = %d, app socket fd = %d, event_pipe_write_fd = %d",
4032 ret
, app
->sock
, event_pipe_write_fd
);
4034 DBG("Failed to create application event notifier group (application is dead): app socket fd = %d",
4041 ret
= lttng_pipe_write_close(app
->event_notifier_group
.event_pipe
);
4043 ERR("Failed to close write end of the application's event source pipe: app = '%s' (ppid = %d)",
4044 app
->name
, app
->ppid
);
4049 * Release the file descriptor that was reserved for the write-end of
4052 lttng_fd_put(LTTNG_FD_APPS
, 1);
4054 lttng_ret
= notification_thread_command_add_tracer_event_source(
4055 the_notification_thread_handle
,
4056 lttng_pipe_get_readfd(
4057 app
->event_notifier_group
.event_pipe
),
4059 if (lttng_ret
!= LTTNG_OK
) {
4060 ERR("Failed to add tracer event source to notification thread");
4065 /* Assign handle only when the complete setup is valid. */
4066 app
->event_notifier_group
.object
= event_notifier_group
;
4068 event_notifier_error_accounting_status
= event_notifier_error_accounting_register_app(app
);
4069 if (event_notifier_error_accounting_status
!= EVENT_NOTIFIER_ERROR_ACCOUNTING_STATUS_OK
) {
4070 ERR("Failed to setup event notifier error accounting for app");
4078 ustctl_release_object(app
->sock
, app
->event_notifier_group
.object
);
4079 free(app
->event_notifier_group
.object
);
4080 app
->event_notifier_group
.object
= NULL
;
4085 * Unregister app by removing it from the global traceable app list and freeing
4088 * The socket is already closed at this point so no close to sock.
4090 void ust_app_unregister(int sock
)
4092 struct ust_app
*lta
;
4093 struct lttng_ht_node_ulong
*node
;
4094 struct lttng_ht_iter ust_app_sock_iter
;
4095 struct lttng_ht_iter iter
;
4096 struct ust_app_session
*ua_sess
;
4101 /* Get the node reference for a call_rcu */
4102 lttng_ht_lookup(ust_app_ht_by_sock
, (void *)((unsigned long) sock
), &ust_app_sock_iter
);
4103 node
= lttng_ht_iter_get_node_ulong(&ust_app_sock_iter
);
4106 lta
= caa_container_of(node
, struct ust_app
, sock_n
);
4107 DBG("PID %d unregistering with sock %d", lta
->pid
, sock
);
4110 * For per-PID buffers, perform "push metadata" and flush all
4111 * application streams before removing app from hash tables,
4112 * ensuring proper behavior of data_pending check.
4113 * Remove sessions so they are not visible during deletion.
4115 cds_lfht_for_each_entry(lta
->sessions
->ht
, &iter
.iter
, ua_sess
,
4117 struct ust_registry_session
*registry
;
4119 ret
= lttng_ht_del(lta
->sessions
, &iter
);
4121 /* The session was already removed so scheduled for teardown. */
4125 if (ua_sess
->buffer_type
== LTTNG_BUFFER_PER_PID
) {
4126 (void) ust_app_flush_app_session(lta
, ua_sess
);
4130 * Add session to list for teardown. This is safe since at this point we
4131 * are the only one using this list.
4133 pthread_mutex_lock(&ua_sess
->lock
);
4135 if (ua_sess
->deleted
) {
4136 pthread_mutex_unlock(&ua_sess
->lock
);
4141 * Normally, this is done in the delete session process which is
4142 * executed in the call rcu below. However, upon registration we can't
4143 * afford to wait for the grace period before pushing data or else the
4144 * data pending feature can race between the unregistration and stop
4145 * command where the data pending command is sent *before* the grace
4148 * The close metadata below nullifies the metadata pointer in the
4149 * session so the delete session will NOT push/close a second time.
4151 registry
= get_session_registry(ua_sess
);
4153 /* Push metadata for application before freeing the application. */
4154 (void) push_metadata(registry
, ua_sess
->consumer
);
4157 * Don't ask to close metadata for global per UID buffers. Close
4158 * metadata only on destroy trace session in this case. Also, the
4159 * previous push metadata could have flag the metadata registry to
4160 * close so don't send a close command if closed.
4162 if (ua_sess
->buffer_type
!= LTTNG_BUFFER_PER_UID
) {
4163 /* And ask to close it for this session registry. */
4164 (void) close_metadata(registry
, ua_sess
->consumer
);
4167 cds_list_add(&ua_sess
->teardown_node
, <a
->teardown_head
);
4169 pthread_mutex_unlock(&ua_sess
->lock
);
4172 /* Remove application from PID hash table */
4173 ret
= lttng_ht_del(ust_app_ht_by_sock
, &ust_app_sock_iter
);
4177 * Remove application from notify hash table. The thread handling the
4178 * notify socket could have deleted the node so ignore on error because
4179 * either way it's valid. The close of that socket is handled by the
4180 * apps_notify_thread.
4182 iter
.iter
.node
= <a
->notify_sock_n
.node
;
4183 (void) lttng_ht_del(ust_app_ht_by_notify_sock
, &iter
);
4186 * Ignore return value since the node might have been removed before by an
4187 * add replace during app registration because the PID can be reassigned by
4190 iter
.iter
.node
= <a
->pid_n
.node
;
4191 ret
= lttng_ht_del(ust_app_ht
, &iter
);
4193 DBG3("Unregister app by PID %d failed. This can happen on pid reuse",
4198 call_rcu(<a
->pid_n
.head
, delete_ust_app_rcu
);
4205 * Fill events array with all events name of all registered apps.
4207 int ust_app_list_events(struct lttng_event
**events
)
4210 size_t nbmem
, count
= 0;
4211 struct lttng_ht_iter iter
;
4212 struct ust_app
*app
;
4213 struct lttng_event
*tmp_event
;
4215 nbmem
= UST_APP_EVENT_LIST_SIZE
;
4216 tmp_event
= zmalloc(nbmem
* sizeof(struct lttng_event
));
4217 if (tmp_event
== NULL
) {
4218 PERROR("zmalloc ust app events");
4225 cds_lfht_for_each_entry(ust_app_ht
->ht
, &iter
.iter
, app
, pid_n
.node
) {
4226 struct lttng_ust_abi_tracepoint_iter uiter
;
4228 health_code_update();
4230 if (!app
->compatible
) {
4232 * TODO: In time, we should notice the caller of this error by
4233 * telling him that this is a version error.
4237 pthread_mutex_lock(&app
->sock_lock
);
4238 handle
= ustctl_tracepoint_list(app
->sock
);
4240 if (handle
!= -EPIPE
&& handle
!= -LTTNG_UST_ERR_EXITING
) {
4241 ERR("UST app list events getting handle failed for app pid %d",
4244 pthread_mutex_unlock(&app
->sock_lock
);
4248 while ((ret
= ustctl_tracepoint_list_get(app
->sock
, handle
,
4249 &uiter
)) != -LTTNG_UST_ERR_NOENT
) {
4250 /* Handle ustctl error. */
4254 if (ret
!= -LTTNG_UST_ERR_EXITING
&& ret
!= -EPIPE
) {
4255 ERR("UST app tp list get failed for app %d with ret %d",
4258 DBG3("UST app tp list get failed. Application is dead");
4260 * This is normal behavior, an application can die during the
4261 * creation process. Don't report an error so the execution can
4262 * continue normally. Continue normal execution.
4267 release_ret
= ustctl_release_handle(app
->sock
, handle
);
4268 if (release_ret
< 0 &&
4269 release_ret
!= -LTTNG_UST_ERR_EXITING
&&
4270 release_ret
!= -EPIPE
) {
4271 ERR("Error releasing app handle for app %d with ret %d", app
->sock
, release_ret
);
4273 pthread_mutex_unlock(&app
->sock_lock
);
4277 health_code_update();
4278 if (count
>= nbmem
) {
4279 /* In case the realloc fails, we free the memory */
4280 struct lttng_event
*new_tmp_event
;
4283 new_nbmem
= nbmem
<< 1;
4284 DBG2("Reallocating event list from %zu to %zu entries",
4286 new_tmp_event
= realloc(tmp_event
,
4287 new_nbmem
* sizeof(struct lttng_event
));
4288 if (new_tmp_event
== NULL
) {
4291 PERROR("realloc ust app events");
4294 release_ret
= ustctl_release_handle(app
->sock
, handle
);
4295 if (release_ret
< 0 &&
4296 release_ret
!= -LTTNG_UST_ERR_EXITING
&&
4297 release_ret
!= -EPIPE
) {
4298 ERR("Error releasing app handle for app %d with ret %d", app
->sock
, release_ret
);
4300 pthread_mutex_unlock(&app
->sock_lock
);
4303 /* Zero the new memory */
4304 memset(new_tmp_event
+ nbmem
, 0,
4305 (new_nbmem
- nbmem
) * sizeof(struct lttng_event
));
4307 tmp_event
= new_tmp_event
;
4309 memcpy(tmp_event
[count
].name
, uiter
.name
, LTTNG_UST_ABI_SYM_NAME_LEN
);
4310 tmp_event
[count
].loglevel
= uiter
.loglevel
;
4311 tmp_event
[count
].type
= (enum lttng_event_type
) LTTNG_UST_ABI_TRACEPOINT
;
4312 tmp_event
[count
].pid
= app
->pid
;
4313 tmp_event
[count
].enabled
= -1;
4316 ret
= ustctl_release_handle(app
->sock
, handle
);
4317 pthread_mutex_unlock(&app
->sock_lock
);
4318 if (ret
< 0 && ret
!= -LTTNG_UST_ERR_EXITING
&& ret
!= -EPIPE
) {
4319 ERR("Error releasing app handle for app %d with ret %d", app
->sock
, ret
);
4324 *events
= tmp_event
;
4326 DBG2("UST app list events done (%zu events)", count
);
4331 health_code_update();
4336 * Fill events array with all events name of all registered apps.
4338 int ust_app_list_event_fields(struct lttng_event_field
**fields
)
4341 size_t nbmem
, count
= 0;
4342 struct lttng_ht_iter iter
;
4343 struct ust_app
*app
;
4344 struct lttng_event_field
*tmp_event
;
4346 nbmem
= UST_APP_EVENT_LIST_SIZE
;
4347 tmp_event
= zmalloc(nbmem
* sizeof(struct lttng_event_field
));
4348 if (tmp_event
== NULL
) {
4349 PERROR("zmalloc ust app event fields");
4356 cds_lfht_for_each_entry(ust_app_ht
->ht
, &iter
.iter
, app
, pid_n
.node
) {
4357 struct lttng_ust_abi_field_iter uiter
;
4359 health_code_update();
4361 if (!app
->compatible
) {
4363 * TODO: In time, we should notice the caller of this error by
4364 * telling him that this is a version error.
4368 pthread_mutex_lock(&app
->sock_lock
);
4369 handle
= ustctl_tracepoint_field_list(app
->sock
);
4371 if (handle
!= -EPIPE
&& handle
!= -LTTNG_UST_ERR_EXITING
) {
4372 ERR("UST app list field getting handle failed for app pid %d",
4375 pthread_mutex_unlock(&app
->sock_lock
);
4379 while ((ret
= ustctl_tracepoint_field_list_get(app
->sock
, handle
,
4380 &uiter
)) != -LTTNG_UST_ERR_NOENT
) {
4381 /* Handle ustctl error. */
4385 if (ret
!= -LTTNG_UST_ERR_EXITING
&& ret
!= -EPIPE
) {
4386 ERR("UST app tp list field failed for app %d with ret %d",
4389 DBG3("UST app tp list field failed. Application is dead");
4391 * This is normal behavior, an application can die during the
4392 * creation process. Don't report an error so the execution can
4393 * continue normally. Reset list and count for next app.
4398 release_ret
= ustctl_release_handle(app
->sock
, handle
);
4399 pthread_mutex_unlock(&app
->sock_lock
);
4400 if (release_ret
< 0 &&
4401 release_ret
!= -LTTNG_UST_ERR_EXITING
&&
4402 release_ret
!= -EPIPE
) {
4403 ERR("Error releasing app handle for app %d with ret %d", app
->sock
, release_ret
);
4408 health_code_update();
4409 if (count
>= nbmem
) {
4410 /* In case the realloc fails, we free the memory */
4411 struct lttng_event_field
*new_tmp_event
;
4414 new_nbmem
= nbmem
<< 1;
4415 DBG2("Reallocating event field list from %zu to %zu entries",
4417 new_tmp_event
= realloc(tmp_event
,
4418 new_nbmem
* sizeof(struct lttng_event_field
));
4419 if (new_tmp_event
== NULL
) {
4422 PERROR("realloc ust app event fields");
4425 release_ret
= ustctl_release_handle(app
->sock
, handle
);
4426 pthread_mutex_unlock(&app
->sock_lock
);
4428 release_ret
!= -LTTNG_UST_ERR_EXITING
&&
4429 release_ret
!= -EPIPE
) {
4430 ERR("Error releasing app handle for app %d with ret %d", app
->sock
, release_ret
);
4434 /* Zero the new memory */
4435 memset(new_tmp_event
+ nbmem
, 0,
4436 (new_nbmem
- nbmem
) * sizeof(struct lttng_event_field
));
4438 tmp_event
= new_tmp_event
;
4441 memcpy(tmp_event
[count
].field_name
, uiter
.field_name
, LTTNG_UST_ABI_SYM_NAME_LEN
);
4442 /* Mapping between these enums matches 1 to 1. */
4443 tmp_event
[count
].type
= (enum lttng_event_field_type
) uiter
.type
;
4444 tmp_event
[count
].nowrite
= uiter
.nowrite
;
4446 memcpy(tmp_event
[count
].event
.name
, uiter
.event_name
, LTTNG_UST_ABI_SYM_NAME_LEN
);
4447 tmp_event
[count
].event
.loglevel
= uiter
.loglevel
;
4448 tmp_event
[count
].event
.type
= LTTNG_EVENT_TRACEPOINT
;
4449 tmp_event
[count
].event
.pid
= app
->pid
;
4450 tmp_event
[count
].event
.enabled
= -1;
4453 ret
= ustctl_release_handle(app
->sock
, handle
);
4454 pthread_mutex_unlock(&app
->sock_lock
);
4456 ret
!= -LTTNG_UST_ERR_EXITING
&&
4458 ERR("Error releasing app handle for app %d with ret %d", app
->sock
, ret
);
4463 *fields
= tmp_event
;
4465 DBG2("UST app list event fields done (%zu events)", count
);
4470 health_code_update();
4475 * Free and clean all traceable apps of the global list.
4477 * Should _NOT_ be called with RCU read-side lock held.
4479 void ust_app_clean_list(void)
4482 struct ust_app
*app
;
4483 struct lttng_ht_iter iter
;
4485 DBG2("UST app cleaning registered apps hash table");
4489 /* Cleanup notify socket hash table */
4490 if (ust_app_ht_by_notify_sock
) {
4491 cds_lfht_for_each_entry(ust_app_ht_by_notify_sock
->ht
, &iter
.iter
, app
,
4492 notify_sock_n
.node
) {
4494 * Assert that all notifiers are gone as all triggers
4495 * are unregistered prior to this clean-up.
4497 assert(lttng_ht_get_count(app
->token_to_event_notifier_rule_ht
) == 0);
4499 ust_app_notify_sock_unregister(app
->notify_sock
);
4504 cds_lfht_for_each_entry(ust_app_ht
->ht
, &iter
.iter
, app
, pid_n
.node
) {
4505 ret
= lttng_ht_del(ust_app_ht
, &iter
);
4507 call_rcu(&app
->pid_n
.head
, delete_ust_app_rcu
);
4511 /* Cleanup socket hash table */
4512 if (ust_app_ht_by_sock
) {
4513 cds_lfht_for_each_entry(ust_app_ht_by_sock
->ht
, &iter
.iter
, app
,
4515 ret
= lttng_ht_del(ust_app_ht_by_sock
, &iter
);
4522 /* Destroy is done only when the ht is empty */
4524 ht_cleanup_push(ust_app_ht
);
4526 if (ust_app_ht_by_sock
) {
4527 ht_cleanup_push(ust_app_ht_by_sock
);
4529 if (ust_app_ht_by_notify_sock
) {
4530 ht_cleanup_push(ust_app_ht_by_notify_sock
);
4535 * Init UST app hash table.
4537 int ust_app_ht_alloc(void)
4539 ust_app_ht
= lttng_ht_new(0, LTTNG_HT_TYPE_ULONG
);
4543 ust_app_ht_by_sock
= lttng_ht_new(0, LTTNG_HT_TYPE_ULONG
);
4544 if (!ust_app_ht_by_sock
) {
4547 ust_app_ht_by_notify_sock
= lttng_ht_new(0, LTTNG_HT_TYPE_ULONG
);
4548 if (!ust_app_ht_by_notify_sock
) {
4555 * For a specific UST session, disable the channel for all registered apps.
4557 int ust_app_disable_channel_glb(struct ltt_ust_session
*usess
,
4558 struct ltt_ust_channel
*uchan
)
4561 struct lttng_ht_iter iter
;
4562 struct lttng_ht_node_str
*ua_chan_node
;
4563 struct ust_app
*app
;
4564 struct ust_app_session
*ua_sess
;
4565 struct ust_app_channel
*ua_chan
;
4567 assert(usess
->active
);
4568 DBG2("UST app disabling channel %s from global domain for session id %" PRIu64
,
4569 uchan
->name
, usess
->id
);
4573 /* For every registered applications */
4574 cds_lfht_for_each_entry(ust_app_ht
->ht
, &iter
.iter
, app
, pid_n
.node
) {
4575 struct lttng_ht_iter uiter
;
4576 if (!app
->compatible
) {
4578 * TODO: In time, we should notice the caller of this error by
4579 * telling him that this is a version error.
4583 ua_sess
= lookup_session_by_app(usess
, app
);
4584 if (ua_sess
== NULL
) {
4589 lttng_ht_lookup(ua_sess
->channels
, (void *)uchan
->name
, &uiter
);
4590 ua_chan_node
= lttng_ht_iter_get_node_str(&uiter
);
4591 /* If the session if found for the app, the channel must be there */
4592 assert(ua_chan_node
);
4594 ua_chan
= caa_container_of(ua_chan_node
, struct ust_app_channel
, node
);
4595 /* The channel must not be already disabled */
4596 assert(ua_chan
->enabled
== 1);
4598 /* Disable channel onto application */
4599 ret
= disable_ust_app_channel(ua_sess
, ua_chan
, app
);
4601 /* XXX: We might want to report this error at some point... */
4611 * For a specific UST session, enable the channel for all registered apps.
4613 int ust_app_enable_channel_glb(struct ltt_ust_session
*usess
,
4614 struct ltt_ust_channel
*uchan
)
4617 struct lttng_ht_iter iter
;
4618 struct ust_app
*app
;
4619 struct ust_app_session
*ua_sess
;
4621 assert(usess
->active
);
4622 DBG2("UST app enabling channel %s to global domain for session id %" PRIu64
,
4623 uchan
->name
, usess
->id
);
4627 /* For every registered applications */
4628 cds_lfht_for_each_entry(ust_app_ht
->ht
, &iter
.iter
, app
, pid_n
.node
) {
4629 if (!app
->compatible
) {
4631 * TODO: In time, we should notice the caller of this error by
4632 * telling him that this is a version error.
4636 ua_sess
= lookup_session_by_app(usess
, app
);
4637 if (ua_sess
== NULL
) {
4641 /* Enable channel onto application */
4642 ret
= enable_ust_app_channel(ua_sess
, uchan
, app
);
4644 /* XXX: We might want to report this error at some point... */
4654 * Disable an event in a channel and for a specific session.
4656 int ust_app_disable_event_glb(struct ltt_ust_session
*usess
,
4657 struct ltt_ust_channel
*uchan
, struct ltt_ust_event
*uevent
)
4660 struct lttng_ht_iter iter
, uiter
;
4661 struct lttng_ht_node_str
*ua_chan_node
;
4662 struct ust_app
*app
;
4663 struct ust_app_session
*ua_sess
;
4664 struct ust_app_channel
*ua_chan
;
4665 struct ust_app_event
*ua_event
;
4667 assert(usess
->active
);
4668 DBG("UST app disabling event %s for all apps in channel "
4669 "%s for session id %" PRIu64
,
4670 uevent
->attr
.name
, uchan
->name
, usess
->id
);
4674 /* For all registered applications */
4675 cds_lfht_for_each_entry(ust_app_ht
->ht
, &iter
.iter
, app
, pid_n
.node
) {
4676 if (!app
->compatible
) {
4678 * TODO: In time, we should notice the caller of this error by
4679 * telling him that this is a version error.
4683 ua_sess
= lookup_session_by_app(usess
, app
);
4684 if (ua_sess
== NULL
) {
4689 /* Lookup channel in the ust app session */
4690 lttng_ht_lookup(ua_sess
->channels
, (void *)uchan
->name
, &uiter
);
4691 ua_chan_node
= lttng_ht_iter_get_node_str(&uiter
);
4692 if (ua_chan_node
== NULL
) {
4693 DBG2("Channel %s not found in session id %" PRIu64
" for app pid %d."
4694 "Skipping", uchan
->name
, usess
->id
, app
->pid
);
4697 ua_chan
= caa_container_of(ua_chan_node
, struct ust_app_channel
, node
);
4699 ua_event
= find_ust_app_event(ua_chan
->events
, uevent
->attr
.name
,
4700 uevent
->filter
, uevent
->attr
.loglevel
,
4702 if (ua_event
== NULL
) {
4703 DBG2("Event %s not found in channel %s for app pid %d."
4704 "Skipping", uevent
->attr
.name
, uchan
->name
, app
->pid
);
4708 ret
= disable_ust_app_event(ua_sess
, ua_event
, app
);
4710 /* XXX: Report error someday... */
4719 /* The ua_sess lock must be held by the caller. */
4721 int ust_app_channel_create(struct ltt_ust_session
*usess
,
4722 struct ust_app_session
*ua_sess
,
4723 struct ltt_ust_channel
*uchan
, struct ust_app
*app
,
4724 struct ust_app_channel
**_ua_chan
)
4727 struct ust_app_channel
*ua_chan
= NULL
;
4730 ASSERT_LOCKED(ua_sess
->lock
);
4732 if (!strncmp(uchan
->name
, DEFAULT_METADATA_NAME
,
4733 sizeof(uchan
->name
))) {
4734 copy_channel_attr_to_ustctl(&ua_sess
->metadata_attr
,
4738 struct ltt_ust_context
*uctx
= NULL
;
4741 * Create channel onto application and synchronize its
4744 ret
= ust_app_channel_allocate(ua_sess
, uchan
,
4745 LTTNG_UST_ABI_CHAN_PER_CPU
, usess
,
4751 ret
= ust_app_channel_send(app
, usess
,
4758 cds_list_for_each_entry(uctx
, &uchan
->ctx_list
, list
) {
4759 ret
= create_ust_app_channel_context(ua_chan
,
4772 * The application's socket is not valid. Either a bad socket
4773 * or a timeout on it. We can't inform the caller that for a
4774 * specific app, the session failed so lets continue here.
4776 ret
= 0; /* Not an error. */
4784 if (ret
== 0 && _ua_chan
) {
4786 * Only return the application's channel on success. Note
4787 * that the channel can still be part of the application's
4788 * channel hashtable on error.
4790 *_ua_chan
= ua_chan
;
4796 * Enable event for a specific session and channel on the tracer.
4798 int ust_app_enable_event_glb(struct ltt_ust_session
*usess
,
4799 struct ltt_ust_channel
*uchan
, struct ltt_ust_event
*uevent
)
4802 struct lttng_ht_iter iter
, uiter
;
4803 struct lttng_ht_node_str
*ua_chan_node
;
4804 struct ust_app
*app
;
4805 struct ust_app_session
*ua_sess
;
4806 struct ust_app_channel
*ua_chan
;
4807 struct ust_app_event
*ua_event
;
4809 assert(usess
->active
);
4810 DBG("UST app enabling event %s for all apps for session id %" PRIu64
,
4811 uevent
->attr
.name
, usess
->id
);
4814 * NOTE: At this point, this function is called only if the session and
4815 * channel passed are already created for all apps. and enabled on the
4821 /* For all registered applications */
4822 cds_lfht_for_each_entry(ust_app_ht
->ht
, &iter
.iter
, app
, pid_n
.node
) {
4823 if (!app
->compatible
) {
4825 * TODO: In time, we should notice the caller of this error by
4826 * telling him that this is a version error.
4830 ua_sess
= lookup_session_by_app(usess
, app
);
4832 /* The application has problem or is probably dead. */
4836 pthread_mutex_lock(&ua_sess
->lock
);
4838 if (ua_sess
->deleted
) {
4839 pthread_mutex_unlock(&ua_sess
->lock
);
4843 /* Lookup channel in the ust app session */
4844 lttng_ht_lookup(ua_sess
->channels
, (void *)uchan
->name
, &uiter
);
4845 ua_chan_node
= lttng_ht_iter_get_node_str(&uiter
);
4847 * It is possible that the channel cannot be found is
4848 * the channel/event creation occurs concurrently with
4849 * an application exit.
4851 if (!ua_chan_node
) {
4852 pthread_mutex_unlock(&ua_sess
->lock
);
4856 ua_chan
= caa_container_of(ua_chan_node
, struct ust_app_channel
, node
);
4858 /* Get event node */
4859 ua_event
= find_ust_app_event(ua_chan
->events
, uevent
->attr
.name
,
4860 uevent
->filter
, uevent
->attr
.loglevel
, uevent
->exclusion
);
4861 if (ua_event
== NULL
) {
4862 DBG3("UST app enable event %s not found for app PID %d."
4863 "Skipping app", uevent
->attr
.name
, app
->pid
);
4867 ret
= enable_ust_app_event(ua_sess
, ua_event
, app
);
4869 pthread_mutex_unlock(&ua_sess
->lock
);
4873 pthread_mutex_unlock(&ua_sess
->lock
);
4882 * For a specific existing UST session and UST channel, creates the event for
4883 * all registered apps.
4885 int ust_app_create_event_glb(struct ltt_ust_session
*usess
,
4886 struct ltt_ust_channel
*uchan
, struct ltt_ust_event
*uevent
)
4889 struct lttng_ht_iter iter
, uiter
;
4890 struct lttng_ht_node_str
*ua_chan_node
;
4891 struct ust_app
*app
;
4892 struct ust_app_session
*ua_sess
;
4893 struct ust_app_channel
*ua_chan
;
4895 assert(usess
->active
);
4896 DBG("UST app creating event %s for all apps for session id %" PRIu64
,
4897 uevent
->attr
.name
, usess
->id
);
4901 /* For all registered applications */
4902 cds_lfht_for_each_entry(ust_app_ht
->ht
, &iter
.iter
, app
, pid_n
.node
) {
4903 if (!app
->compatible
) {
4905 * TODO: In time, we should notice the caller of this error by
4906 * telling him that this is a version error.
4910 ua_sess
= lookup_session_by_app(usess
, app
);
4912 /* The application has problem or is probably dead. */
4916 pthread_mutex_lock(&ua_sess
->lock
);
4918 if (ua_sess
->deleted
) {
4919 pthread_mutex_unlock(&ua_sess
->lock
);
4923 /* Lookup channel in the ust app session */
4924 lttng_ht_lookup(ua_sess
->channels
, (void *)uchan
->name
, &uiter
);
4925 ua_chan_node
= lttng_ht_iter_get_node_str(&uiter
);
4926 /* If the channel is not found, there is a code flow error */
4927 assert(ua_chan_node
);
4929 ua_chan
= caa_container_of(ua_chan_node
, struct ust_app_channel
, node
);
4931 ret
= create_ust_app_event(ua_sess
, ua_chan
, uevent
, app
);
4932 pthread_mutex_unlock(&ua_sess
->lock
);
4934 if (ret
!= -LTTNG_UST_ERR_EXIST
) {
4935 /* Possible value at this point: -ENOMEM. If so, we stop! */
4938 DBG2("UST app event %s already exist on app PID %d",
4939 uevent
->attr
.name
, app
->pid
);
4949 * Start tracing for a specific UST session and app.
4951 * Called with UST app session lock held.
4955 int ust_app_start_trace(struct ltt_ust_session
*usess
, struct ust_app
*app
)
4958 struct ust_app_session
*ua_sess
;
4960 DBG("Starting tracing for ust app pid %d", app
->pid
);
4964 if (!app
->compatible
) {
4968 ua_sess
= lookup_session_by_app(usess
, app
);
4969 if (ua_sess
== NULL
) {
4970 /* The session is in teardown process. Ignore and continue. */
4974 pthread_mutex_lock(&ua_sess
->lock
);
4976 if (ua_sess
->deleted
) {
4977 pthread_mutex_unlock(&ua_sess
->lock
);
4981 if (ua_sess
->enabled
) {
4982 pthread_mutex_unlock(&ua_sess
->lock
);
4986 /* Upon restart, we skip the setup, already done */
4987 if (ua_sess
->started
) {
4991 health_code_update();
4994 /* This starts the UST tracing */
4995 pthread_mutex_lock(&app
->sock_lock
);
4996 ret
= ustctl_start_session(app
->sock
, ua_sess
->handle
);
4997 pthread_mutex_unlock(&app
->sock_lock
);
4999 if (ret
!= -EPIPE
&& ret
!= -LTTNG_UST_ERR_EXITING
) {
5000 ERR("Error starting tracing for app pid: %d (ret: %d)",
5003 DBG("UST app start session failed. Application is dead.");
5005 * This is normal behavior, an application can die during the
5006 * creation process. Don't report an error so the execution can
5007 * continue normally.
5009 pthread_mutex_unlock(&ua_sess
->lock
);
5015 /* Indicate that the session has been started once */
5016 ua_sess
->started
= 1;
5017 ua_sess
->enabled
= 1;
5019 pthread_mutex_unlock(&ua_sess
->lock
);
5021 health_code_update();
5023 /* Quiescent wait after starting trace */
5024 pthread_mutex_lock(&app
->sock_lock
);
5025 ret
= ustctl_wait_quiescent(app
->sock
);
5026 pthread_mutex_unlock(&app
->sock_lock
);
5027 if (ret
< 0 && ret
!= -EPIPE
&& ret
!= -LTTNG_UST_ERR_EXITING
) {
5028 ERR("UST app wait quiescent failed for app pid %d ret %d",
5034 health_code_update();
5038 pthread_mutex_unlock(&ua_sess
->lock
);
5040 health_code_update();
5045 * Stop tracing for a specific UST session and app.
5048 int ust_app_stop_trace(struct ltt_ust_session
*usess
, struct ust_app
*app
)
5051 struct ust_app_session
*ua_sess
;
5052 struct ust_registry_session
*registry
;
5054 DBG("Stopping tracing for ust app pid %d", app
->pid
);
5058 if (!app
->compatible
) {
5059 goto end_no_session
;
5062 ua_sess
= lookup_session_by_app(usess
, app
);
5063 if (ua_sess
== NULL
) {
5064 goto end_no_session
;
5067 pthread_mutex_lock(&ua_sess
->lock
);
5069 if (ua_sess
->deleted
) {
5070 pthread_mutex_unlock(&ua_sess
->lock
);
5071 goto end_no_session
;
5075 * If started = 0, it means that stop trace has been called for a session
5076 * that was never started. It's possible since we can have a fail start
5077 * from either the application manager thread or the command thread. Simply
5078 * indicate that this is a stop error.
5080 if (!ua_sess
->started
) {
5081 goto error_rcu_unlock
;
5084 health_code_update();
5086 /* This inhibits UST tracing */
5087 pthread_mutex_lock(&app
->sock_lock
);
5088 ret
= ustctl_stop_session(app
->sock
, ua_sess
->handle
);
5089 pthread_mutex_unlock(&app
->sock_lock
);
5091 if (ret
!= -EPIPE
&& ret
!= -LTTNG_UST_ERR_EXITING
) {
5092 ERR("Error stopping tracing for app pid: %d (ret: %d)",
5095 DBG("UST app stop session failed. Application is dead.");
5097 * This is normal behavior, an application can die during the
5098 * creation process. Don't report an error so the execution can
5099 * continue normally.
5103 goto error_rcu_unlock
;
5106 health_code_update();
5107 ua_sess
->enabled
= 0;
5109 /* Quiescent wait after stopping trace */
5110 pthread_mutex_lock(&app
->sock_lock
);
5111 ret
= ustctl_wait_quiescent(app
->sock
);
5112 pthread_mutex_unlock(&app
->sock_lock
);
5113 if (ret
< 0 && ret
!= -EPIPE
&& ret
!= -LTTNG_UST_ERR_EXITING
) {
5114 ERR("UST app wait quiescent failed for app pid %d ret %d",
5118 health_code_update();
5120 registry
= get_session_registry(ua_sess
);
5122 /* The UST app session is held registry shall not be null. */
5125 /* Push metadata for application before freeing the application. */
5126 (void) push_metadata(registry
, ua_sess
->consumer
);
5129 pthread_mutex_unlock(&ua_sess
->lock
);
5132 health_code_update();
5136 pthread_mutex_unlock(&ua_sess
->lock
);
5138 health_code_update();
5143 int ust_app_flush_app_session(struct ust_app
*app
,
5144 struct ust_app_session
*ua_sess
)
5146 int ret
, retval
= 0;
5147 struct lttng_ht_iter iter
;
5148 struct ust_app_channel
*ua_chan
;
5149 struct consumer_socket
*socket
;
5151 DBG("Flushing app session buffers for ust app pid %d", app
->pid
);
5155 if (!app
->compatible
) {
5156 goto end_not_compatible
;
5159 pthread_mutex_lock(&ua_sess
->lock
);
5161 if (ua_sess
->deleted
) {
5165 health_code_update();
5167 /* Flushing buffers */
5168 socket
= consumer_find_socket_by_bitness(app
->bits_per_long
,
5171 /* Flush buffers and push metadata. */
5172 switch (ua_sess
->buffer_type
) {
5173 case LTTNG_BUFFER_PER_PID
:
5174 cds_lfht_for_each_entry(ua_sess
->channels
->ht
, &iter
.iter
, ua_chan
,
5176 health_code_update();
5177 ret
= consumer_flush_channel(socket
, ua_chan
->key
);
5179 ERR("Error flushing consumer channel");
5185 case LTTNG_BUFFER_PER_UID
:
5191 health_code_update();
5194 pthread_mutex_unlock(&ua_sess
->lock
);
5198 health_code_update();
5203 * Flush buffers for all applications for a specific UST session.
5204 * Called with UST session lock held.
5207 int ust_app_flush_session(struct ltt_ust_session
*usess
)
5212 DBG("Flushing session buffers for all ust apps");
5216 /* Flush buffers and push metadata. */
5217 switch (usess
->buffer_type
) {
5218 case LTTNG_BUFFER_PER_UID
:
5220 struct buffer_reg_uid
*reg
;
5221 struct lttng_ht_iter iter
;
5223 /* Flush all per UID buffers associated to that session. */
5224 cds_list_for_each_entry(reg
, &usess
->buffer_reg_uid_list
, lnode
) {
5225 struct ust_registry_session
*ust_session_reg
;
5226 struct buffer_reg_channel
*buf_reg_chan
;
5227 struct consumer_socket
*socket
;
5229 /* Get consumer socket to use to push the metadata.*/
5230 socket
= consumer_find_socket_by_bitness(reg
->bits_per_long
,
5233 /* Ignore request if no consumer is found for the session. */
5237 cds_lfht_for_each_entry(reg
->registry
->channels
->ht
, &iter
.iter
,
5238 buf_reg_chan
, node
.node
) {
5240 * The following call will print error values so the return
5241 * code is of little importance because whatever happens, we
5242 * have to try them all.
5244 (void) consumer_flush_channel(socket
, buf_reg_chan
->consumer_key
);
5247 ust_session_reg
= reg
->registry
->reg
.ust
;
5248 /* Push metadata. */
5249 (void) push_metadata(ust_session_reg
, usess
->consumer
);
5253 case LTTNG_BUFFER_PER_PID
:
5255 struct ust_app_session
*ua_sess
;
5256 struct lttng_ht_iter iter
;
5257 struct ust_app
*app
;
5259 cds_lfht_for_each_entry(ust_app_ht
->ht
, &iter
.iter
, app
, pid_n
.node
) {
5260 ua_sess
= lookup_session_by_app(usess
, app
);
5261 if (ua_sess
== NULL
) {
5264 (void) ust_app_flush_app_session(app
, ua_sess
);
5275 health_code_update();
5280 int ust_app_clear_quiescent_app_session(struct ust_app
*app
,
5281 struct ust_app_session
*ua_sess
)
5284 struct lttng_ht_iter iter
;
5285 struct ust_app_channel
*ua_chan
;
5286 struct consumer_socket
*socket
;
5288 DBG("Clearing stream quiescent state for ust app pid %d", app
->pid
);
5292 if (!app
->compatible
) {
5293 goto end_not_compatible
;
5296 pthread_mutex_lock(&ua_sess
->lock
);
5298 if (ua_sess
->deleted
) {
5302 health_code_update();
5304 socket
= consumer_find_socket_by_bitness(app
->bits_per_long
,
5307 ERR("Failed to find consumer (%" PRIu32
") socket",
5308 app
->bits_per_long
);
5313 /* Clear quiescent state. */
5314 switch (ua_sess
->buffer_type
) {
5315 case LTTNG_BUFFER_PER_PID
:
5316 cds_lfht_for_each_entry(ua_sess
->channels
->ht
, &iter
.iter
,
5317 ua_chan
, node
.node
) {
5318 health_code_update();
5319 ret
= consumer_clear_quiescent_channel(socket
,
5322 ERR("Error clearing quiescent state for consumer channel");
5328 case LTTNG_BUFFER_PER_UID
:
5335 health_code_update();
5338 pthread_mutex_unlock(&ua_sess
->lock
);
5342 health_code_update();
5347 * Clear quiescent state in each stream for all applications for a
5348 * specific UST session.
5349 * Called with UST session lock held.
5352 int ust_app_clear_quiescent_session(struct ltt_ust_session
*usess
)
5357 DBG("Clearing stream quiescent state for all ust apps");
5361 switch (usess
->buffer_type
) {
5362 case LTTNG_BUFFER_PER_UID
:
5364 struct lttng_ht_iter iter
;
5365 struct buffer_reg_uid
*reg
;
5368 * Clear quiescent for all per UID buffers associated to
5371 cds_list_for_each_entry(reg
, &usess
->buffer_reg_uid_list
, lnode
) {
5372 struct consumer_socket
*socket
;
5373 struct buffer_reg_channel
*buf_reg_chan
;
5375 /* Get associated consumer socket.*/
5376 socket
= consumer_find_socket_by_bitness(
5377 reg
->bits_per_long
, usess
->consumer
);
5380 * Ignore request if no consumer is found for
5386 cds_lfht_for_each_entry(reg
->registry
->channels
->ht
,
5387 &iter
.iter
, buf_reg_chan
, node
.node
) {
5389 * The following call will print error values so
5390 * the return code is of little importance
5391 * because whatever happens, we have to try them
5394 (void) consumer_clear_quiescent_channel(socket
,
5395 buf_reg_chan
->consumer_key
);
5400 case LTTNG_BUFFER_PER_PID
:
5402 struct ust_app_session
*ua_sess
;
5403 struct lttng_ht_iter iter
;
5404 struct ust_app
*app
;
5406 cds_lfht_for_each_entry(ust_app_ht
->ht
, &iter
.iter
, app
,
5408 ua_sess
= lookup_session_by_app(usess
, app
);
5409 if (ua_sess
== NULL
) {
5412 (void) ust_app_clear_quiescent_app_session(app
,
5424 health_code_update();
5429 * Destroy a specific UST session in apps.
5431 static int destroy_trace(struct ltt_ust_session
*usess
, struct ust_app
*app
)
5434 struct ust_app_session
*ua_sess
;
5435 struct lttng_ht_iter iter
;
5436 struct lttng_ht_node_u64
*node
;
5438 DBG("Destroy tracing for ust app pid %d", app
->pid
);
5442 if (!app
->compatible
) {
5446 __lookup_session_by_app(usess
, app
, &iter
);
5447 node
= lttng_ht_iter_get_node_u64(&iter
);
5449 /* Session is being or is deleted. */
5452 ua_sess
= caa_container_of(node
, struct ust_app_session
, node
);
5454 health_code_update();
5455 destroy_app_session(app
, ua_sess
);
5457 health_code_update();
5459 /* Quiescent wait after stopping trace */
5460 pthread_mutex_lock(&app
->sock_lock
);
5461 ret
= ustctl_wait_quiescent(app
->sock
);
5462 pthread_mutex_unlock(&app
->sock_lock
);
5463 if (ret
< 0 && ret
!= -EPIPE
&& ret
!= -LTTNG_UST_ERR_EXITING
) {
5464 ERR("UST app wait quiescent failed for app pid %d ret %d",
5469 health_code_update();
5474 * Start tracing for the UST session.
5476 int ust_app_start_trace_all(struct ltt_ust_session
*usess
)
5478 struct lttng_ht_iter iter
;
5479 struct ust_app
*app
;
5481 DBG("Starting all UST traces");
5484 * Even though the start trace might fail, flag this session active so
5485 * other application coming in are started by default.
5492 * In a start-stop-start use-case, we need to clear the quiescent state
5493 * of each channel set by the prior stop command, thus ensuring that a
5494 * following stop or destroy is sure to grab a timestamp_end near those
5495 * operations, even if the packet is empty.
5497 (void) ust_app_clear_quiescent_session(usess
);
5499 cds_lfht_for_each_entry(ust_app_ht
->ht
, &iter
.iter
, app
, pid_n
.node
) {
5500 ust_app_global_update(usess
, app
);
5509 * Start tracing for the UST session.
5510 * Called with UST session lock held.
5512 int ust_app_stop_trace_all(struct ltt_ust_session
*usess
)
5515 struct lttng_ht_iter iter
;
5516 struct ust_app
*app
;
5518 DBG("Stopping all UST traces");
5521 * Even though the stop trace might fail, flag this session inactive so
5522 * other application coming in are not started by default.
5528 cds_lfht_for_each_entry(ust_app_ht
->ht
, &iter
.iter
, app
, pid_n
.node
) {
5529 ret
= ust_app_stop_trace(usess
, app
);
5531 /* Continue to next apps even on error */
5536 (void) ust_app_flush_session(usess
);
5544 * Destroy app UST session.
5546 int ust_app_destroy_trace_all(struct ltt_ust_session
*usess
)
5549 struct lttng_ht_iter iter
;
5550 struct ust_app
*app
;
5552 DBG("Destroy all UST traces");
5556 cds_lfht_for_each_entry(ust_app_ht
->ht
, &iter
.iter
, app
, pid_n
.node
) {
5557 ret
= destroy_trace(usess
, app
);
5559 /* Continue to next apps even on error */
5569 /* The ua_sess lock must be held by the caller. */
5571 int find_or_create_ust_app_channel(
5572 struct ltt_ust_session
*usess
,
5573 struct ust_app_session
*ua_sess
,
5574 struct ust_app
*app
,
5575 struct ltt_ust_channel
*uchan
,
5576 struct ust_app_channel
**ua_chan
)
5579 struct lttng_ht_iter iter
;
5580 struct lttng_ht_node_str
*ua_chan_node
;
5582 lttng_ht_lookup(ua_sess
->channels
, (void *) uchan
->name
, &iter
);
5583 ua_chan_node
= lttng_ht_iter_get_node_str(&iter
);
5585 *ua_chan
= caa_container_of(ua_chan_node
,
5586 struct ust_app_channel
, node
);
5590 ret
= ust_app_channel_create(usess
, ua_sess
, uchan
, app
, ua_chan
);
5599 int ust_app_channel_synchronize_event(struct ust_app_channel
*ua_chan
,
5600 struct ltt_ust_event
*uevent
, struct ust_app_session
*ua_sess
,
5601 struct ust_app
*app
)
5604 struct ust_app_event
*ua_event
= NULL
;
5606 ua_event
= find_ust_app_event(ua_chan
->events
, uevent
->attr
.name
,
5607 uevent
->filter
, uevent
->attr
.loglevel
, uevent
->exclusion
);
5609 ret
= create_ust_app_event(ua_sess
, ua_chan
, uevent
, app
);
5614 if (ua_event
->enabled
!= uevent
->enabled
) {
5615 ret
= uevent
->enabled
?
5616 enable_ust_app_event(ua_sess
, ua_event
, app
) :
5617 disable_ust_app_event(ua_sess
, ua_event
, app
);
5625 /* Called with RCU read-side lock held. */
5627 void ust_app_synchronize_event_notifier_rules(struct ust_app
*app
)
5630 enum lttng_error_code ret_code
;
5631 enum lttng_trigger_status t_status
;
5632 struct lttng_ht_iter app_trigger_iter
;
5633 struct lttng_triggers
*triggers
= NULL
;
5634 struct ust_app_event_notifier_rule
*event_notifier_rule
;
5635 unsigned int count
, i
;
5638 * Currrently, registering or unregistering a trigger with an
5639 * event rule condition causes a full synchronization of the event
5642 * The first step attempts to add an event notifier for all registered
5643 * triggers that apply to the user space tracers. Then, the
5644 * application's event notifiers rules are all checked against the list
5645 * of registered triggers. Any event notifier that doesn't have a
5646 * matching trigger can be assumed to have been disabled.
5648 * All of this is inefficient, but is put in place to get the feature
5649 * rolling as it is simpler at this moment. It will be optimized Soon™
5650 * to allow the state of enabled
5651 * event notifiers to be synchronized in a piece-wise way.
5654 /* Get all triggers using uid 0 (root) */
5655 ret_code
= notification_thread_command_list_triggers(
5656 the_notification_thread_handle
, 0, &triggers
);
5657 if (ret_code
!= LTTNG_OK
) {
5664 t_status
= lttng_triggers_get_count(triggers
, &count
);
5665 if (t_status
!= LTTNG_TRIGGER_STATUS_OK
) {
5670 for (i
= 0; i
< count
; i
++) {
5671 struct lttng_condition
*condition
;
5672 struct lttng_event_rule
*event_rule
;
5673 struct lttng_trigger
*trigger
;
5674 const struct ust_app_event_notifier_rule
*looked_up_event_notifier_rule
;
5675 enum lttng_condition_status condition_status
;
5678 trigger
= lttng_triggers_borrow_mutable_at_index(triggers
, i
);
5681 token
= lttng_trigger_get_tracer_token(trigger
);
5682 condition
= lttng_trigger_get_condition(trigger
);
5684 if (lttng_condition_get_type(condition
) != LTTNG_CONDITION_TYPE_ON_EVENT
) {
5685 /* Does not apply */
5689 condition_status
= lttng_condition_on_event_borrow_rule_mutable(condition
, &event_rule
);
5690 assert(condition_status
== LTTNG_CONDITION_STATUS_OK
);
5692 if (lttng_event_rule_get_domain_type(event_rule
) == LTTNG_DOMAIN_KERNEL
) {
5693 /* Skip kernel related triggers. */
5698 * Find or create the associated token event rule. The caller
5699 * holds the RCU read lock, so this is safe to call without
5700 * explicitly acquiring it here.
5702 looked_up_event_notifier_rule
= find_ust_app_event_notifier_rule(
5703 app
->token_to_event_notifier_rule_ht
, token
);
5704 if (!looked_up_event_notifier_rule
) {
5705 ret
= create_ust_app_event_notifier_rule(trigger
, app
);
5713 /* Remove all unknown event sources from the app. */
5714 cds_lfht_for_each_entry (app
->token_to_event_notifier_rule_ht
->ht
,
5715 &app_trigger_iter
.iter
, event_notifier_rule
,
5717 const uint64_t app_token
= event_notifier_rule
->token
;
5721 * Check if the app event trigger still exists on the
5722 * notification side.
5724 for (i
= 0; i
< count
; i
++) {
5725 uint64_t notification_thread_token
;
5726 const struct lttng_trigger
*trigger
=
5727 lttng_triggers_get_at_index(
5732 notification_thread_token
=
5733 lttng_trigger_get_tracer_token(trigger
);
5735 if (notification_thread_token
== app_token
) {
5747 * This trigger was unregistered, disable it on the tracer's
5750 ret
= lttng_ht_del(app
->token_to_event_notifier_rule_ht
,
5754 /* Callee logs errors. */
5755 (void) disable_ust_object(app
, event_notifier_rule
->obj
);
5757 delete_ust_app_event_notifier_rule(
5758 app
->sock
, event_notifier_rule
, app
);
5764 lttng_triggers_destroy(triggers
);
5769 * RCU read lock must be held by the caller.
5772 void ust_app_synchronize_all_channels(struct ltt_ust_session
*usess
,
5773 struct ust_app_session
*ua_sess
,
5774 struct ust_app
*app
)
5777 struct cds_lfht_iter uchan_iter
;
5778 struct ltt_ust_channel
*uchan
;
5784 cds_lfht_for_each_entry(usess
->domain_global
.channels
->ht
, &uchan_iter
,
5786 struct ust_app_channel
*ua_chan
;
5787 struct cds_lfht_iter uevent_iter
;
5788 struct ltt_ust_event
*uevent
;
5791 * Search for a matching ust_app_channel. If none is found,
5792 * create it. Creating the channel will cause the ua_chan
5793 * structure to be allocated, the channel buffers to be
5794 * allocated (if necessary) and sent to the application, and
5795 * all enabled contexts will be added to the channel.
5797 ret
= find_or_create_ust_app_channel(usess
, ua_sess
,
5798 app
, uchan
, &ua_chan
);
5800 /* Tracer is probably gone or ENOMEM. */
5805 /* ua_chan will be NULL for the metadata channel */
5809 cds_lfht_for_each_entry(uchan
->events
->ht
, &uevent_iter
, uevent
,
5811 ret
= ust_app_channel_synchronize_event(ua_chan
,
5812 uevent
, ua_sess
, app
);
5818 if (ua_chan
->enabled
!= uchan
->enabled
) {
5819 ret
= uchan
->enabled
?
5820 enable_ust_app_channel(ua_sess
, uchan
, app
) :
5821 disable_ust_app_channel(ua_sess
, ua_chan
, app
);
5832 * The caller must ensure that the application is compatible and is tracked
5833 * by the process attribute trackers.
5836 void ust_app_synchronize(struct ltt_ust_session
*usess
,
5837 struct ust_app
*app
)
5840 struct ust_app_session
*ua_sess
= NULL
;
5843 * The application's configuration should only be synchronized for
5846 assert(usess
->active
);
5848 ret
= find_or_create_ust_app_session(usess
, app
, &ua_sess
, NULL
);
5850 /* Tracer is probably gone or ENOMEM. */
5855 pthread_mutex_lock(&ua_sess
->lock
);
5856 if (ua_sess
->deleted
) {
5857 pthread_mutex_unlock(&ua_sess
->lock
);
5863 ust_app_synchronize_all_channels(usess
, ua_sess
, app
);
5866 * Create the metadata for the application. This returns gracefully if a
5867 * metadata was already set for the session.
5869 * The metadata channel must be created after the data channels as the
5870 * consumer daemon assumes this ordering. When interacting with a relay
5871 * daemon, the consumer will use this assumption to send the
5872 * "STREAMS_SENT" message to the relay daemon.
5874 ret
= create_ust_app_metadata(ua_sess
, app
, usess
->consumer
);
5882 pthread_mutex_unlock(&ua_sess
->lock
);
5883 /* Everything went well at this point. */
5888 pthread_mutex_unlock(&ua_sess
->lock
);
5891 destroy_app_session(app
, ua_sess
);
5897 void ust_app_global_destroy(struct ltt_ust_session
*usess
, struct ust_app
*app
)
5899 struct ust_app_session
*ua_sess
;
5901 ua_sess
= lookup_session_by_app(usess
, app
);
5902 if (ua_sess
== NULL
) {
5905 destroy_app_session(app
, ua_sess
);
5909 * Add channels/events from UST global domain to registered apps at sock.
5911 * Called with session lock held.
5912 * Called with RCU read-side lock held.
5914 void ust_app_global_update(struct ltt_ust_session
*usess
, struct ust_app
*app
)
5917 assert(usess
->active
);
5919 DBG2("UST app global update for app sock %d for session id %" PRIu64
,
5920 app
->sock
, usess
->id
);
5922 if (!app
->compatible
) {
5925 if (trace_ust_id_tracker_lookup(LTTNG_PROCESS_ATTR_VIRTUAL_PROCESS_ID
,
5927 trace_ust_id_tracker_lookup(
5928 LTTNG_PROCESS_ATTR_VIRTUAL_USER_ID
,
5930 trace_ust_id_tracker_lookup(
5931 LTTNG_PROCESS_ATTR_VIRTUAL_GROUP_ID
,
5934 * Synchronize the application's internal tracing configuration
5935 * and start tracing.
5937 ust_app_synchronize(usess
, app
);
5938 ust_app_start_trace(usess
, app
);
5940 ust_app_global_destroy(usess
, app
);
5945 * Add all event notifiers to an application.
5947 * Called with session lock held.
5948 * Called with RCU read-side lock held.
5950 void ust_app_global_update_event_notifier_rules(struct ust_app
*app
)
5952 DBG2("UST application global event notifier rules update: app = '%s' (ppid: %d)",
5953 app
->name
, app
->ppid
);
5955 if (!app
->compatible
) {
5959 if (app
->event_notifier_group
.object
== NULL
) {
5960 WARN("UST app global update of event notifiers for app skipped since communication handle is null: app = '%s' (ppid: %d)",
5961 app
->name
, app
->ppid
);
5965 ust_app_synchronize_event_notifier_rules(app
);
5969 * Called with session lock held.
5971 void ust_app_global_update_all(struct ltt_ust_session
*usess
)
5973 struct lttng_ht_iter iter
;
5974 struct ust_app
*app
;
5977 cds_lfht_for_each_entry(ust_app_ht
->ht
, &iter
.iter
, app
, pid_n
.node
) {
5978 ust_app_global_update(usess
, app
);
5983 void ust_app_global_update_all_event_notifier_rules(void)
5985 struct lttng_ht_iter iter
;
5986 struct ust_app
*app
;
5989 cds_lfht_for_each_entry(ust_app_ht
->ht
, &iter
.iter
, app
, pid_n
.node
) {
5990 ust_app_global_update_event_notifier_rules(app
);
5997 * Add context to a specific channel for global UST domain.
5999 int ust_app_add_ctx_channel_glb(struct ltt_ust_session
*usess
,
6000 struct ltt_ust_channel
*uchan
, struct ltt_ust_context
*uctx
)
6003 struct lttng_ht_node_str
*ua_chan_node
;
6004 struct lttng_ht_iter iter
, uiter
;
6005 struct ust_app_channel
*ua_chan
= NULL
;
6006 struct ust_app_session
*ua_sess
;
6007 struct ust_app
*app
;
6009 assert(usess
->active
);
6012 cds_lfht_for_each_entry(ust_app_ht
->ht
, &iter
.iter
, app
, pid_n
.node
) {
6013 if (!app
->compatible
) {
6015 * TODO: In time, we should notice the caller of this error by
6016 * telling him that this is a version error.
6020 ua_sess
= lookup_session_by_app(usess
, app
);
6021 if (ua_sess
== NULL
) {
6025 pthread_mutex_lock(&ua_sess
->lock
);
6027 if (ua_sess
->deleted
) {
6028 pthread_mutex_unlock(&ua_sess
->lock
);
6032 /* Lookup channel in the ust app session */
6033 lttng_ht_lookup(ua_sess
->channels
, (void *)uchan
->name
, &uiter
);
6034 ua_chan_node
= lttng_ht_iter_get_node_str(&uiter
);
6035 if (ua_chan_node
== NULL
) {
6038 ua_chan
= caa_container_of(ua_chan_node
, struct ust_app_channel
,
6040 ret
= create_ust_app_channel_context(ua_chan
, &uctx
->ctx
, app
);
6045 pthread_mutex_unlock(&ua_sess
->lock
);
6053 * Receive registration and populate the given msg structure.
6055 * On success return 0 else a negative value returned by the ustctl call.
6057 int ust_app_recv_registration(int sock
, struct ust_register_msg
*msg
)
6060 uint32_t pid
, ppid
, uid
, gid
;
6064 ret
= ustctl_recv_reg_msg(sock
, &msg
->type
, &msg
->major
, &msg
->minor
,
6065 &pid
, &ppid
, &uid
, &gid
,
6066 &msg
->bits_per_long
,
6067 &msg
->uint8_t_alignment
,
6068 &msg
->uint16_t_alignment
,
6069 &msg
->uint32_t_alignment
,
6070 &msg
->uint64_t_alignment
,
6071 &msg
->long_alignment
,
6078 case LTTNG_UST_ERR_EXITING
:
6079 DBG3("UST app recv reg message failed. Application died");
6081 case LTTNG_UST_ERR_UNSUP_MAJOR
:
6082 ERR("UST app recv reg unsupported version %d.%d. Supporting %d.%d",
6083 msg
->major
, msg
->minor
, LTTNG_UST_ABI_MAJOR_VERSION
,
6084 LTTNG_UST_ABI_MINOR_VERSION
);
6087 ERR("UST app recv reg message failed with ret %d", ret
);
6092 msg
->pid
= (pid_t
) pid
;
6093 msg
->ppid
= (pid_t
) ppid
;
6094 msg
->uid
= (uid_t
) uid
;
6095 msg
->gid
= (gid_t
) gid
;
6102 * Return a ust app session object using the application object and the
6103 * session object descriptor has a key. If not found, NULL is returned.
6104 * A RCU read side lock MUST be acquired when calling this function.
6106 static struct ust_app_session
*find_session_by_objd(struct ust_app
*app
,
6109 struct lttng_ht_node_ulong
*node
;
6110 struct lttng_ht_iter iter
;
6111 struct ust_app_session
*ua_sess
= NULL
;
6115 lttng_ht_lookup(app
->ust_sessions_objd
, (void *)((unsigned long) objd
), &iter
);
6116 node
= lttng_ht_iter_get_node_ulong(&iter
);
6118 DBG2("UST app session find by objd %d not found", objd
);
6122 ua_sess
= caa_container_of(node
, struct ust_app_session
, ust_objd_node
);
6129 * Return a ust app channel object using the application object and the channel
6130 * object descriptor has a key. If not found, NULL is returned. A RCU read side
6131 * lock MUST be acquired before calling this function.
6133 static struct ust_app_channel
*find_channel_by_objd(struct ust_app
*app
,
6136 struct lttng_ht_node_ulong
*node
;
6137 struct lttng_ht_iter iter
;
6138 struct ust_app_channel
*ua_chan
= NULL
;
6142 lttng_ht_lookup(app
->ust_objd
, (void *)((unsigned long) objd
), &iter
);
6143 node
= lttng_ht_iter_get_node_ulong(&iter
);
6145 DBG2("UST app channel find by objd %d not found", objd
);
6149 ua_chan
= caa_container_of(node
, struct ust_app_channel
, ust_objd_node
);
6156 * Reply to a register channel notification from an application on the notify
6157 * socket. The channel metadata is also created.
6159 * The session UST registry lock is acquired in this function.
6161 * On success 0 is returned else a negative value.
6163 static int reply_ust_register_channel(int sock
, int cobjd
,
6164 size_t nr_fields
, struct ustctl_field
*fields
)
6166 int ret
, ret_code
= 0;
6168 uint64_t chan_reg_key
;
6169 enum ustctl_channel_header type
;
6170 struct ust_app
*app
;
6171 struct ust_app_channel
*ua_chan
;
6172 struct ust_app_session
*ua_sess
;
6173 struct ust_registry_session
*registry
;
6174 struct ust_registry_channel
*ust_reg_chan
;
6178 /* Lookup application. If not found, there is a code flow error. */
6179 app
= find_app_by_notify_sock(sock
);
6181 DBG("Application socket %d is being torn down. Abort event notify",
6184 goto error_rcu_unlock
;
6187 /* Lookup channel by UST object descriptor. */
6188 ua_chan
= find_channel_by_objd(app
, cobjd
);
6190 DBG("Application channel is being torn down. Abort event notify");
6192 goto error_rcu_unlock
;
6195 assert(ua_chan
->session
);
6196 ua_sess
= ua_chan
->session
;
6198 /* Get right session registry depending on the session buffer type. */
6199 registry
= get_session_registry(ua_sess
);
6201 DBG("Application session is being torn down. Abort event notify");
6203 goto error_rcu_unlock
;
6206 /* Depending on the buffer type, a different channel key is used. */
6207 if (ua_sess
->buffer_type
== LTTNG_BUFFER_PER_UID
) {
6208 chan_reg_key
= ua_chan
->tracing_channel_id
;
6210 chan_reg_key
= ua_chan
->key
;
6213 pthread_mutex_lock(®istry
->lock
);
6215 ust_reg_chan
= ust_registry_channel_find(registry
, chan_reg_key
);
6216 assert(ust_reg_chan
);
6218 if (!ust_reg_chan
->register_done
) {
6220 * TODO: eventually use the registry event count for
6221 * this channel to better guess header type for per-pid
6224 type
= USTCTL_CHANNEL_HEADER_LARGE
;
6225 ust_reg_chan
->nr_ctx_fields
= nr_fields
;
6226 ust_reg_chan
->ctx_fields
= fields
;
6228 ust_reg_chan
->header_type
= type
;
6230 /* Get current already assigned values. */
6231 type
= ust_reg_chan
->header_type
;
6233 /* Channel id is set during the object creation. */
6234 chan_id
= ust_reg_chan
->chan_id
;
6236 /* Append to metadata */
6237 if (!ust_reg_chan
->metadata_dumped
) {
6238 ret_code
= ust_metadata_channel_statedump(registry
, ust_reg_chan
);
6240 ERR("Error appending channel metadata (errno = %d)", ret_code
);
6246 DBG3("UST app replying to register channel key %" PRIu64
6247 " with id %u, type: %d, ret: %d", chan_reg_key
, chan_id
, type
,
6250 ret
= ustctl_reply_register_channel(sock
, chan_id
, type
, ret_code
);
6252 if (ret
!= -EPIPE
&& ret
!= -LTTNG_UST_ERR_EXITING
) {
6253 ERR("UST app reply channel failed with ret %d", ret
);
6255 DBG3("UST app reply channel failed. Application died");
6260 /* This channel registry registration is completed. */
6261 ust_reg_chan
->register_done
= 1;
6264 pthread_mutex_unlock(®istry
->lock
);
6272 * Add event to the UST channel registry. When the event is added to the
6273 * registry, the metadata is also created. Once done, this replies to the
6274 * application with the appropriate error code.
6276 * The session UST registry lock is acquired in the function.
6278 * On success 0 is returned else a negative value.
6280 static int add_event_ust_registry(int sock
, int sobjd
, int cobjd
, char *name
,
6281 char *sig
, size_t nr_fields
, struct ustctl_field
*fields
,
6282 int loglevel_value
, char *model_emf_uri
)
6285 uint32_t event_id
= 0;
6286 uint64_t chan_reg_key
;
6287 struct ust_app
*app
;
6288 struct ust_app_channel
*ua_chan
;
6289 struct ust_app_session
*ua_sess
;
6290 struct ust_registry_session
*registry
;
6294 /* Lookup application. If not found, there is a code flow error. */
6295 app
= find_app_by_notify_sock(sock
);
6297 DBG("Application socket %d is being torn down. Abort event notify",
6300 goto error_rcu_unlock
;
6303 /* Lookup channel by UST object descriptor. */
6304 ua_chan
= find_channel_by_objd(app
, cobjd
);
6306 DBG("Application channel is being torn down. Abort event notify");
6308 goto error_rcu_unlock
;
6311 assert(ua_chan
->session
);
6312 ua_sess
= ua_chan
->session
;
6314 registry
= get_session_registry(ua_sess
);
6316 DBG("Application session is being torn down. Abort event notify");
6318 goto error_rcu_unlock
;
6321 if (ua_sess
->buffer_type
== LTTNG_BUFFER_PER_UID
) {
6322 chan_reg_key
= ua_chan
->tracing_channel_id
;
6324 chan_reg_key
= ua_chan
->key
;
6327 pthread_mutex_lock(®istry
->lock
);
6330 * From this point on, this call acquires the ownership of the sig, fields
6331 * and model_emf_uri meaning any free are done inside it if needed. These
6332 * three variables MUST NOT be read/write after this.
6334 ret_code
= ust_registry_create_event(registry
, chan_reg_key
,
6335 sobjd
, cobjd
, name
, sig
, nr_fields
, fields
,
6336 loglevel_value
, model_emf_uri
, ua_sess
->buffer_type
,
6340 model_emf_uri
= NULL
;
6343 * The return value is returned to ustctl so in case of an error, the
6344 * application can be notified. In case of an error, it's important not to
6345 * return a negative error or else the application will get closed.
6347 ret
= ustctl_reply_register_event(sock
, event_id
, ret_code
);
6349 if (ret
!= -EPIPE
&& ret
!= -LTTNG_UST_ERR_EXITING
) {
6350 ERR("UST app reply event failed with ret %d", ret
);
6352 DBG3("UST app reply event failed. Application died");
6355 * No need to wipe the create event since the application socket will
6356 * get close on error hence cleaning up everything by itself.
6361 DBG3("UST registry event %s with id %" PRId32
" added successfully",
6365 pthread_mutex_unlock(®istry
->lock
);
6370 free(model_emf_uri
);
6375 * Add enum to the UST session registry. Once done, this replies to the
6376 * application with the appropriate error code.
6378 * The session UST registry lock is acquired within this function.
6380 * On success 0 is returned else a negative value.
6382 static int add_enum_ust_registry(int sock
, int sobjd
, char *name
,
6383 struct ustctl_enum_entry
*entries
, size_t nr_entries
)
6385 int ret
= 0, ret_code
;
6386 struct ust_app
*app
;
6387 struct ust_app_session
*ua_sess
;
6388 struct ust_registry_session
*registry
;
6389 uint64_t enum_id
= -1ULL;
6393 /* Lookup application. If not found, there is a code flow error. */
6394 app
= find_app_by_notify_sock(sock
);
6396 /* Return an error since this is not an error */
6397 DBG("Application socket %d is being torn down. Aborting enum registration",
6400 goto error_rcu_unlock
;
6403 /* Lookup session by UST object descriptor. */
6404 ua_sess
= find_session_by_objd(app
, sobjd
);
6406 /* Return an error since this is not an error */
6407 DBG("Application session is being torn down (session not found). Aborting enum registration.");
6409 goto error_rcu_unlock
;
6412 registry
= get_session_registry(ua_sess
);
6414 DBG("Application session is being torn down (registry not found). Aborting enum registration.");
6416 goto error_rcu_unlock
;
6419 pthread_mutex_lock(®istry
->lock
);
6422 * From this point on, the callee acquires the ownership of
6423 * entries. The variable entries MUST NOT be read/written after
6426 ret_code
= ust_registry_create_or_find_enum(registry
, sobjd
, name
,
6427 entries
, nr_entries
, &enum_id
);
6431 * The return value is returned to ustctl so in case of an error, the
6432 * application can be notified. In case of an error, it's important not to
6433 * return a negative error or else the application will get closed.
6435 ret
= ustctl_reply_register_enum(sock
, enum_id
, ret_code
);
6437 if (ret
!= -EPIPE
&& ret
!= -LTTNG_UST_ERR_EXITING
) {
6438 ERR("UST app reply enum failed with ret %d", ret
);
6440 DBG3("UST app reply enum failed. Application died");
6443 * No need to wipe the create enum since the application socket will
6444 * get close on error hence cleaning up everything by itself.
6449 DBG3("UST registry enum %s added successfully or already found", name
);
6452 pthread_mutex_unlock(®istry
->lock
);
6459 * Handle application notification through the given notify socket.
6461 * Return 0 on success or else a negative value.
6463 int ust_app_recv_notify(int sock
)
6466 enum ustctl_notify_cmd cmd
;
6468 DBG3("UST app receiving notify from sock %d", sock
);
6470 ret
= ustctl_recv_notify(sock
, &cmd
);
6472 if (ret
!= -EPIPE
&& ret
!= -LTTNG_UST_ERR_EXITING
) {
6473 ERR("UST app recv notify failed with ret %d", ret
);
6475 DBG3("UST app recv notify failed. Application died");
6481 case USTCTL_NOTIFY_CMD_EVENT
:
6483 int sobjd
, cobjd
, loglevel_value
;
6484 char name
[LTTNG_UST_ABI_SYM_NAME_LEN
], *sig
, *model_emf_uri
;
6486 struct ustctl_field
*fields
;
6488 DBG2("UST app ustctl register event received");
6490 ret
= ustctl_recv_register_event(sock
, &sobjd
, &cobjd
, name
,
6491 &loglevel_value
, &sig
, &nr_fields
, &fields
,
6494 if (ret
!= -EPIPE
&& ret
!= -LTTNG_UST_ERR_EXITING
) {
6495 ERR("UST app recv event failed with ret %d", ret
);
6497 DBG3("UST app recv event failed. Application died");
6503 * Add event to the UST registry coming from the notify socket. This
6504 * call will free if needed the sig, fields and model_emf_uri. This
6505 * code path loses the ownsership of these variables and transfer them
6506 * to the this function.
6508 ret
= add_event_ust_registry(sock
, sobjd
, cobjd
, name
, sig
, nr_fields
,
6509 fields
, loglevel_value
, model_emf_uri
);
6516 case USTCTL_NOTIFY_CMD_CHANNEL
:
6520 struct ustctl_field
*fields
;
6522 DBG2("UST app ustctl register channel received");
6524 ret
= ustctl_recv_register_channel(sock
, &sobjd
, &cobjd
, &nr_fields
,
6527 if (ret
!= -EPIPE
&& ret
!= -LTTNG_UST_ERR_EXITING
) {
6528 ERR("UST app recv channel failed with ret %d", ret
);
6530 DBG3("UST app recv channel failed. Application died");
6536 * The fields ownership are transfered to this function call meaning
6537 * that if needed it will be freed. After this, it's invalid to access
6538 * fields or clean it up.
6540 ret
= reply_ust_register_channel(sock
, cobjd
, nr_fields
,
6548 case USTCTL_NOTIFY_CMD_ENUM
:
6551 char name
[LTTNG_UST_ABI_SYM_NAME_LEN
];
6553 struct ustctl_enum_entry
*entries
;
6555 DBG2("UST app ustctl register enum received");
6557 ret
= ustctl_recv_register_enum(sock
, &sobjd
, name
,
6558 &entries
, &nr_entries
);
6560 if (ret
!= -EPIPE
&& ret
!= -LTTNG_UST_ERR_EXITING
) {
6561 ERR("UST app recv enum failed with ret %d", ret
);
6563 DBG3("UST app recv enum failed. Application died");
6568 /* Callee assumes ownership of entries */
6569 ret
= add_enum_ust_registry(sock
, sobjd
, name
,
6570 entries
, nr_entries
);
6578 /* Should NEVER happen. */
6587 * Once the notify socket hangs up, this is called. First, it tries to find the
6588 * corresponding application. On failure, the call_rcu to close the socket is
6589 * executed. If an application is found, it tries to delete it from the notify
6590 * socket hash table. Whathever the result, it proceeds to the call_rcu.
6592 * Note that an object needs to be allocated here so on ENOMEM failure, the
6593 * call RCU is not done but the rest of the cleanup is.
6595 void ust_app_notify_sock_unregister(int sock
)
6598 struct lttng_ht_iter iter
;
6599 struct ust_app
*app
;
6600 struct ust_app_notify_sock_obj
*obj
;
6606 obj
= zmalloc(sizeof(*obj
));
6609 * An ENOMEM is kind of uncool. If this strikes we continue the
6610 * procedure but the call_rcu will not be called. In this case, we
6611 * accept the fd leak rather than possibly creating an unsynchronized
6612 * state between threads.
6614 * TODO: The notify object should be created once the notify socket is
6615 * registered and stored independantely from the ust app object. The
6616 * tricky part is to synchronize the teardown of the application and
6617 * this notify object. Let's keep that in mind so we can avoid this
6618 * kind of shenanigans with ENOMEM in the teardown path.
6625 DBG("UST app notify socket unregister %d", sock
);
6628 * Lookup application by notify socket. If this fails, this means that the
6629 * hash table delete has already been done by the application
6630 * unregistration process so we can safely close the notify socket in a
6633 app
= find_app_by_notify_sock(sock
);
6638 iter
.iter
.node
= &app
->notify_sock_n
.node
;
6641 * Whatever happens here either we fail or succeed, in both cases we have
6642 * to close the socket after a grace period to continue to the call RCU
6643 * here. If the deletion is successful, the application is not visible
6644 * anymore by other threads and is it fails it means that it was already
6645 * deleted from the hash table so either way we just have to close the
6648 (void) lttng_ht_del(ust_app_ht_by_notify_sock
, &iter
);
6654 * Close socket after a grace period to avoid for the socket to be reused
6655 * before the application object is freed creating potential race between
6656 * threads trying to add unique in the global hash table.
6659 call_rcu(&obj
->head
, close_notify_sock_rcu
);
6664 * Destroy a ust app data structure and free its memory.
6666 void ust_app_destroy(struct ust_app
*app
)
6672 call_rcu(&app
->pid_n
.head
, delete_ust_app_rcu
);
6676 * Take a snapshot for a given UST session. The snapshot is sent to the given
6679 * Returns LTTNG_OK on success or a LTTNG_ERR error code.
6681 enum lttng_error_code
ust_app_snapshot_record(
6682 const struct ltt_ust_session
*usess
,
6683 const struct consumer_output
*output
, int wait
,
6684 uint64_t nb_packets_per_stream
)
6687 enum lttng_error_code status
= LTTNG_OK
;
6688 struct lttng_ht_iter iter
;
6689 struct ust_app
*app
;
6690 char *trace_path
= NULL
;
6697 switch (usess
->buffer_type
) {
6698 case LTTNG_BUFFER_PER_UID
:
6700 struct buffer_reg_uid
*reg
;
6702 cds_list_for_each_entry(reg
, &usess
->buffer_reg_uid_list
, lnode
) {
6703 struct buffer_reg_channel
*buf_reg_chan
;
6704 struct consumer_socket
*socket
;
6705 char pathname
[PATH_MAX
];
6706 size_t consumer_path_offset
= 0;
6708 if (!reg
->registry
->reg
.ust
->metadata_key
) {
6709 /* Skip since no metadata is present */
6713 /* Get consumer socket to use to push the metadata.*/
6714 socket
= consumer_find_socket_by_bitness(reg
->bits_per_long
,
6717 status
= LTTNG_ERR_INVALID
;
6721 memset(pathname
, 0, sizeof(pathname
));
6722 ret
= snprintf(pathname
, sizeof(pathname
),
6723 DEFAULT_UST_TRACE_DIR
"/" DEFAULT_UST_TRACE_UID_PATH
,
6724 reg
->uid
, reg
->bits_per_long
);
6726 PERROR("snprintf snapshot path");
6727 status
= LTTNG_ERR_INVALID
;
6730 /* Free path allowed on previous iteration. */
6732 trace_path
= setup_channel_trace_path(usess
->consumer
, pathname
,
6733 &consumer_path_offset
);
6735 status
= LTTNG_ERR_INVALID
;
6738 /* Add the UST default trace dir to path. */
6739 cds_lfht_for_each_entry(reg
->registry
->channels
->ht
, &iter
.iter
,
6740 buf_reg_chan
, node
.node
) {
6741 status
= consumer_snapshot_channel(socket
,
6742 buf_reg_chan
->consumer_key
,
6743 output
, 0, usess
->uid
,
6744 usess
->gid
, &trace_path
[consumer_path_offset
], wait
,
6745 nb_packets_per_stream
);
6746 if (status
!= LTTNG_OK
) {
6750 status
= consumer_snapshot_channel(socket
,
6751 reg
->registry
->reg
.ust
->metadata_key
, output
, 1,
6752 usess
->uid
, usess
->gid
, &trace_path
[consumer_path_offset
],
6754 if (status
!= LTTNG_OK
) {
6760 case LTTNG_BUFFER_PER_PID
:
6762 cds_lfht_for_each_entry(ust_app_ht
->ht
, &iter
.iter
, app
, pid_n
.node
) {
6763 struct consumer_socket
*socket
;
6764 struct lttng_ht_iter chan_iter
;
6765 struct ust_app_channel
*ua_chan
;
6766 struct ust_app_session
*ua_sess
;
6767 struct ust_registry_session
*registry
;
6768 char pathname
[PATH_MAX
];
6769 size_t consumer_path_offset
= 0;
6771 ua_sess
= lookup_session_by_app(usess
, app
);
6773 /* Session not associated with this app. */
6777 /* Get the right consumer socket for the application. */
6778 socket
= consumer_find_socket_by_bitness(app
->bits_per_long
,
6781 status
= LTTNG_ERR_INVALID
;
6785 /* Add the UST default trace dir to path. */
6786 memset(pathname
, 0, sizeof(pathname
));
6787 ret
= snprintf(pathname
, sizeof(pathname
), DEFAULT_UST_TRACE_DIR
"/%s",
6790 status
= LTTNG_ERR_INVALID
;
6791 PERROR("snprintf snapshot path");
6794 /* Free path allowed on previous iteration. */
6796 trace_path
= setup_channel_trace_path(usess
->consumer
, pathname
,
6797 &consumer_path_offset
);
6799 status
= LTTNG_ERR_INVALID
;
6802 cds_lfht_for_each_entry(ua_sess
->channels
->ht
, &chan_iter
.iter
,
6803 ua_chan
, node
.node
) {
6804 status
= consumer_snapshot_channel(socket
,
6805 ua_chan
->key
, output
, 0,
6806 lttng_credentials_get_uid(&ua_sess
->effective_credentials
),
6807 lttng_credentials_get_gid(&ua_sess
->effective_credentials
),
6808 &trace_path
[consumer_path_offset
], wait
,
6809 nb_packets_per_stream
);
6813 case LTTNG_ERR_CHAN_NOT_FOUND
:
6820 registry
= get_session_registry(ua_sess
);
6822 DBG("Application session is being torn down. Skip application.");
6825 status
= consumer_snapshot_channel(socket
,
6826 registry
->metadata_key
, output
, 1,
6827 lttng_credentials_get_uid(&ua_sess
->effective_credentials
),
6828 lttng_credentials_get_gid(&ua_sess
->effective_credentials
),
6829 &trace_path
[consumer_path_offset
], wait
, 0);
6833 case LTTNG_ERR_CHAN_NOT_FOUND
:
6853 * Return the size taken by one more packet per stream.
6855 uint64_t ust_app_get_size_one_more_packet_per_stream(
6856 const struct ltt_ust_session
*usess
, uint64_t cur_nr_packets
)
6858 uint64_t tot_size
= 0;
6859 struct ust_app
*app
;
6860 struct lttng_ht_iter iter
;
6864 switch (usess
->buffer_type
) {
6865 case LTTNG_BUFFER_PER_UID
:
6867 struct buffer_reg_uid
*reg
;
6869 cds_list_for_each_entry(reg
, &usess
->buffer_reg_uid_list
, lnode
) {
6870 struct buffer_reg_channel
*buf_reg_chan
;
6873 cds_lfht_for_each_entry(reg
->registry
->channels
->ht
, &iter
.iter
,
6874 buf_reg_chan
, node
.node
) {
6875 if (cur_nr_packets
>= buf_reg_chan
->num_subbuf
) {
6877 * Don't take channel into account if we
6878 * already grab all its packets.
6882 tot_size
+= buf_reg_chan
->subbuf_size
* buf_reg_chan
->stream_count
;
6888 case LTTNG_BUFFER_PER_PID
:
6891 cds_lfht_for_each_entry(ust_app_ht
->ht
, &iter
.iter
, app
, pid_n
.node
) {
6892 struct ust_app_channel
*ua_chan
;
6893 struct ust_app_session
*ua_sess
;
6894 struct lttng_ht_iter chan_iter
;
6896 ua_sess
= lookup_session_by_app(usess
, app
);
6898 /* Session not associated with this app. */
6902 cds_lfht_for_each_entry(ua_sess
->channels
->ht
, &chan_iter
.iter
,
6903 ua_chan
, node
.node
) {
6904 if (cur_nr_packets
>= ua_chan
->attr
.num_subbuf
) {
6906 * Don't take channel into account if we
6907 * already grab all its packets.
6911 tot_size
+= ua_chan
->attr
.subbuf_size
* ua_chan
->streams
.count
;
6925 int ust_app_uid_get_channel_runtime_stats(uint64_t ust_session_id
,
6926 struct cds_list_head
*buffer_reg_uid_list
,
6927 struct consumer_output
*consumer
, uint64_t uchan_id
,
6928 int overwrite
, uint64_t *discarded
, uint64_t *lost
)
6931 uint64_t consumer_chan_key
;
6936 ret
= buffer_reg_uid_consumer_channel_key(
6937 buffer_reg_uid_list
, uchan_id
, &consumer_chan_key
);
6945 ret
= consumer_get_lost_packets(ust_session_id
,
6946 consumer_chan_key
, consumer
, lost
);
6948 ret
= consumer_get_discarded_events(ust_session_id
,
6949 consumer_chan_key
, consumer
, discarded
);
6956 int ust_app_pid_get_channel_runtime_stats(struct ltt_ust_session
*usess
,
6957 struct ltt_ust_channel
*uchan
,
6958 struct consumer_output
*consumer
, int overwrite
,
6959 uint64_t *discarded
, uint64_t *lost
)
6962 struct lttng_ht_iter iter
;
6963 struct lttng_ht_node_str
*ua_chan_node
;
6964 struct ust_app
*app
;
6965 struct ust_app_session
*ua_sess
;
6966 struct ust_app_channel
*ua_chan
;
6973 * Iterate over every registered applications. Sum counters for
6974 * all applications containing requested session and channel.
6976 cds_lfht_for_each_entry(ust_app_ht
->ht
, &iter
.iter
, app
, pid_n
.node
) {
6977 struct lttng_ht_iter uiter
;
6979 ua_sess
= lookup_session_by_app(usess
, app
);
6980 if (ua_sess
== NULL
) {
6985 lttng_ht_lookup(ua_sess
->channels
, (void *) uchan
->name
, &uiter
);
6986 ua_chan_node
= lttng_ht_iter_get_node_str(&uiter
);
6987 /* If the session is found for the app, the channel must be there */
6988 assert(ua_chan_node
);
6990 ua_chan
= caa_container_of(ua_chan_node
, struct ust_app_channel
, node
);
6995 ret
= consumer_get_lost_packets(usess
->id
, ua_chan
->key
,
7002 uint64_t _discarded
;
7004 ret
= consumer_get_discarded_events(usess
->id
,
7005 ua_chan
->key
, consumer
, &_discarded
);
7009 (*discarded
) += _discarded
;
7018 int ust_app_regenerate_statedump(struct ltt_ust_session
*usess
,
7019 struct ust_app
*app
)
7022 struct ust_app_session
*ua_sess
;
7024 DBG("Regenerating the metadata for ust app pid %d", app
->pid
);
7028 ua_sess
= lookup_session_by_app(usess
, app
);
7029 if (ua_sess
== NULL
) {
7030 /* The session is in teardown process. Ignore and continue. */
7034 pthread_mutex_lock(&ua_sess
->lock
);
7036 if (ua_sess
->deleted
) {
7040 pthread_mutex_lock(&app
->sock_lock
);
7041 ret
= ustctl_regenerate_statedump(app
->sock
, ua_sess
->handle
);
7042 pthread_mutex_unlock(&app
->sock_lock
);
7045 pthread_mutex_unlock(&ua_sess
->lock
);
7049 health_code_update();
7054 * Regenerate the statedump for each app in the session.
7056 int ust_app_regenerate_statedump_all(struct ltt_ust_session
*usess
)
7059 struct lttng_ht_iter iter
;
7060 struct ust_app
*app
;
7062 DBG("Regenerating the metadata for all UST apps");
7066 cds_lfht_for_each_entry(ust_app_ht
->ht
, &iter
.iter
, app
, pid_n
.node
) {
7067 if (!app
->compatible
) {
7071 ret
= ust_app_regenerate_statedump(usess
, app
);
7073 /* Continue to the next app even on error */
7084 * Rotate all the channels of a session.
7086 * Return LTTNG_OK on success or else an LTTng error code.
7088 enum lttng_error_code
ust_app_rotate_session(struct ltt_session
*session
)
7091 enum lttng_error_code cmd_ret
= LTTNG_OK
;
7092 struct lttng_ht_iter iter
;
7093 struct ust_app
*app
;
7094 struct ltt_ust_session
*usess
= session
->ust_session
;
7100 switch (usess
->buffer_type
) {
7101 case LTTNG_BUFFER_PER_UID
:
7103 struct buffer_reg_uid
*reg
;
7105 cds_list_for_each_entry(reg
, &usess
->buffer_reg_uid_list
, lnode
) {
7106 struct buffer_reg_channel
*buf_reg_chan
;
7107 struct consumer_socket
*socket
;
7109 /* Get consumer socket to use to push the metadata.*/
7110 socket
= consumer_find_socket_by_bitness(reg
->bits_per_long
,
7113 cmd_ret
= LTTNG_ERR_INVALID
;
7117 /* Rotate the data channels. */
7118 cds_lfht_for_each_entry(reg
->registry
->channels
->ht
, &iter
.iter
,
7119 buf_reg_chan
, node
.node
) {
7120 ret
= consumer_rotate_channel(socket
,
7121 buf_reg_chan
->consumer_key
,
7122 usess
->uid
, usess
->gid
,
7124 /* is_metadata_channel */ false);
7126 cmd_ret
= LTTNG_ERR_ROTATION_FAIL_CONSUMER
;
7132 * The metadata channel might not be present.
7134 * Consumer stream allocation can be done
7135 * asynchronously and can fail on intermediary
7136 * operations (i.e add context) and lead to data
7137 * channels created with no metadata channel.
7139 if (!reg
->registry
->reg
.ust
->metadata_key
) {
7140 /* Skip since no metadata is present. */
7144 (void) push_metadata(reg
->registry
->reg
.ust
, usess
->consumer
);
7146 ret
= consumer_rotate_channel(socket
,
7147 reg
->registry
->reg
.ust
->metadata_key
,
7148 usess
->uid
, usess
->gid
,
7150 /* is_metadata_channel */ true);
7152 cmd_ret
= LTTNG_ERR_ROTATION_FAIL_CONSUMER
;
7158 case LTTNG_BUFFER_PER_PID
:
7160 cds_lfht_for_each_entry(ust_app_ht
->ht
, &iter
.iter
, app
, pid_n
.node
) {
7161 struct consumer_socket
*socket
;
7162 struct lttng_ht_iter chan_iter
;
7163 struct ust_app_channel
*ua_chan
;
7164 struct ust_app_session
*ua_sess
;
7165 struct ust_registry_session
*registry
;
7167 ua_sess
= lookup_session_by_app(usess
, app
);
7169 /* Session not associated with this app. */
7173 /* Get the right consumer socket for the application. */
7174 socket
= consumer_find_socket_by_bitness(app
->bits_per_long
,
7177 cmd_ret
= LTTNG_ERR_INVALID
;
7181 registry
= get_session_registry(ua_sess
);
7183 DBG("Application session is being torn down. Skip application.");
7187 /* Rotate the data channels. */
7188 cds_lfht_for_each_entry(ua_sess
->channels
->ht
, &chan_iter
.iter
,
7189 ua_chan
, node
.node
) {
7190 ret
= consumer_rotate_channel(socket
,
7192 lttng_credentials_get_uid(&ua_sess
->effective_credentials
),
7193 lttng_credentials_get_gid(&ua_sess
->effective_credentials
),
7195 /* is_metadata_channel */ false);
7197 /* Per-PID buffer and application going away. */
7198 if (ret
== -LTTNG_ERR_CHAN_NOT_FOUND
)
7200 cmd_ret
= LTTNG_ERR_ROTATION_FAIL_CONSUMER
;
7205 /* Rotate the metadata channel. */
7206 (void) push_metadata(registry
, usess
->consumer
);
7207 ret
= consumer_rotate_channel(socket
,
7208 registry
->metadata_key
,
7209 lttng_credentials_get_uid(&ua_sess
->effective_credentials
),
7210 lttng_credentials_get_gid(&ua_sess
->effective_credentials
),
7212 /* is_metadata_channel */ true);
7214 /* Per-PID buffer and application going away. */
7215 if (ret
== -LTTNG_ERR_CHAN_NOT_FOUND
)
7217 cmd_ret
= LTTNG_ERR_ROTATION_FAIL_CONSUMER
;
7235 enum lttng_error_code
ust_app_create_channel_subdirectories(
7236 const struct ltt_ust_session
*usess
)
7238 enum lttng_error_code ret
= LTTNG_OK
;
7239 struct lttng_ht_iter iter
;
7240 enum lttng_trace_chunk_status chunk_status
;
7241 char *pathname_index
;
7244 assert(usess
->current_trace_chunk
);
7247 switch (usess
->buffer_type
) {
7248 case LTTNG_BUFFER_PER_UID
:
7250 struct buffer_reg_uid
*reg
;
7252 cds_list_for_each_entry(reg
, &usess
->buffer_reg_uid_list
, lnode
) {
7253 fmt_ret
= asprintf(&pathname_index
,
7254 DEFAULT_UST_TRACE_DIR
"/" DEFAULT_UST_TRACE_UID_PATH
"/" DEFAULT_INDEX_DIR
,
7255 reg
->uid
, reg
->bits_per_long
);
7257 ERR("Failed to format channel index directory");
7258 ret
= LTTNG_ERR_CREATE_DIR_FAIL
;
7263 * Create the index subdirectory which will take care
7264 * of implicitly creating the channel's path.
7266 chunk_status
= lttng_trace_chunk_create_subdirectory(
7267 usess
->current_trace_chunk
,
7269 free(pathname_index
);
7270 if (chunk_status
!= LTTNG_TRACE_CHUNK_STATUS_OK
) {
7271 ret
= LTTNG_ERR_CREATE_DIR_FAIL
;
7277 case LTTNG_BUFFER_PER_PID
:
7279 struct ust_app
*app
;
7282 * Create the toplevel ust/ directory in case no apps are running.
7284 chunk_status
= lttng_trace_chunk_create_subdirectory(
7285 usess
->current_trace_chunk
,
7286 DEFAULT_UST_TRACE_DIR
);
7287 if (chunk_status
!= LTTNG_TRACE_CHUNK_STATUS_OK
) {
7288 ret
= LTTNG_ERR_CREATE_DIR_FAIL
;
7292 cds_lfht_for_each_entry(ust_app_ht
->ht
, &iter
.iter
, app
,
7294 struct ust_app_session
*ua_sess
;
7295 struct ust_registry_session
*registry
;
7297 ua_sess
= lookup_session_by_app(usess
, app
);
7299 /* Session not associated with this app. */
7303 registry
= get_session_registry(ua_sess
);
7305 DBG("Application session is being torn down. Skip application.");
7309 fmt_ret
= asprintf(&pathname_index
,
7310 DEFAULT_UST_TRACE_DIR
"/%s/" DEFAULT_INDEX_DIR
,
7313 ERR("Failed to format channel index directory");
7314 ret
= LTTNG_ERR_CREATE_DIR_FAIL
;
7318 * Create the index subdirectory which will take care
7319 * of implicitly creating the channel's path.
7321 chunk_status
= lttng_trace_chunk_create_subdirectory(
7322 usess
->current_trace_chunk
,
7324 free(pathname_index
);
7325 if (chunk_status
!= LTTNG_TRACE_CHUNK_STATUS_OK
) {
7326 ret
= LTTNG_ERR_CREATE_DIR_FAIL
;
7343 * Clear all the channels of a session.
7345 * Return LTTNG_OK on success or else an LTTng error code.
7347 enum lttng_error_code
ust_app_clear_session(struct ltt_session
*session
)
7350 enum lttng_error_code cmd_ret
= LTTNG_OK
;
7351 struct lttng_ht_iter iter
;
7352 struct ust_app
*app
;
7353 struct ltt_ust_session
*usess
= session
->ust_session
;
7359 if (usess
->active
) {
7360 ERR("Expecting inactive session %s (%" PRIu64
")", session
->name
, session
->id
);
7361 cmd_ret
= LTTNG_ERR_FATAL
;
7365 switch (usess
->buffer_type
) {
7366 case LTTNG_BUFFER_PER_UID
:
7368 struct buffer_reg_uid
*reg
;
7370 cds_list_for_each_entry(reg
, &usess
->buffer_reg_uid_list
, lnode
) {
7371 struct buffer_reg_channel
*buf_reg_chan
;
7372 struct consumer_socket
*socket
;
7374 /* Get consumer socket to use to push the metadata.*/
7375 socket
= consumer_find_socket_by_bitness(reg
->bits_per_long
,
7378 cmd_ret
= LTTNG_ERR_INVALID
;
7382 /* Clear the data channels. */
7383 cds_lfht_for_each_entry(reg
->registry
->channels
->ht
, &iter
.iter
,
7384 buf_reg_chan
, node
.node
) {
7385 ret
= consumer_clear_channel(socket
,
7386 buf_reg_chan
->consumer_key
);
7392 (void) push_metadata(reg
->registry
->reg
.ust
, usess
->consumer
);
7395 * Clear the metadata channel.
7396 * Metadata channel is not cleared per se but we still need to
7397 * perform a rotation operation on it behind the scene.
7399 ret
= consumer_clear_channel(socket
,
7400 reg
->registry
->reg
.ust
->metadata_key
);
7407 case LTTNG_BUFFER_PER_PID
:
7409 cds_lfht_for_each_entry(ust_app_ht
->ht
, &iter
.iter
, app
, pid_n
.node
) {
7410 struct consumer_socket
*socket
;
7411 struct lttng_ht_iter chan_iter
;
7412 struct ust_app_channel
*ua_chan
;
7413 struct ust_app_session
*ua_sess
;
7414 struct ust_registry_session
*registry
;
7416 ua_sess
= lookup_session_by_app(usess
, app
);
7418 /* Session not associated with this app. */
7422 /* Get the right consumer socket for the application. */
7423 socket
= consumer_find_socket_by_bitness(app
->bits_per_long
,
7426 cmd_ret
= LTTNG_ERR_INVALID
;
7430 registry
= get_session_registry(ua_sess
);
7432 DBG("Application session is being torn down. Skip application.");
7436 /* Clear the data channels. */
7437 cds_lfht_for_each_entry(ua_sess
->channels
->ht
, &chan_iter
.iter
,
7438 ua_chan
, node
.node
) {
7439 ret
= consumer_clear_channel(socket
, ua_chan
->key
);
7441 /* Per-PID buffer and application going away. */
7442 if (ret
== -LTTNG_ERR_CHAN_NOT_FOUND
) {
7449 (void) push_metadata(registry
, usess
->consumer
);
7452 * Clear the metadata channel.
7453 * Metadata channel is not cleared per se but we still need to
7454 * perform rotation operation on it behind the scene.
7456 ret
= consumer_clear_channel(socket
, registry
->metadata_key
);
7458 /* Per-PID buffer and application going away. */
7459 if (ret
== -LTTNG_ERR_CHAN_NOT_FOUND
) {
7477 case LTTCOMM_CONSUMERD_RELAYD_CLEAR_DISALLOWED
:
7478 cmd_ret
= LTTNG_ERR_CLEAR_RELAY_DISALLOWED
;
7481 cmd_ret
= LTTNG_ERR_CLEAR_FAIL_CONSUMER
;
7491 * This function skips the metadata channel as the begin/end timestamps of a
7492 * metadata packet are useless.
7494 * Moreover, opening a packet after a "clear" will cause problems for live
7495 * sessions as it will introduce padding that was not part of the first trace
7496 * chunk. The relay daemon expects the content of the metadata stream of
7497 * successive metadata trace chunks to be strict supersets of one another.
7499 * For example, flushing a packet at the beginning of the metadata stream of
7500 * a trace chunk resulting from a "clear" session command will cause the
7501 * size of the metadata stream of the new trace chunk to not match the size of
7502 * the metadata stream of the original chunk. This will confuse the relay
7503 * daemon as the same "offset" in a metadata stream will no longer point
7504 * to the same content.
7506 enum lttng_error_code
ust_app_open_packets(struct ltt_session
*session
)
7508 enum lttng_error_code ret
= LTTNG_OK
;
7509 struct lttng_ht_iter iter
;
7510 struct ltt_ust_session
*usess
= session
->ust_session
;
7516 switch (usess
->buffer_type
) {
7517 case LTTNG_BUFFER_PER_UID
:
7519 struct buffer_reg_uid
*reg
;
7521 cds_list_for_each_entry (
7522 reg
, &usess
->buffer_reg_uid_list
, lnode
) {
7523 struct buffer_reg_channel
*buf_reg_chan
;
7524 struct consumer_socket
*socket
;
7526 socket
= consumer_find_socket_by_bitness(
7527 reg
->bits_per_long
, usess
->consumer
);
7529 ret
= LTTNG_ERR_FATAL
;
7533 cds_lfht_for_each_entry(reg
->registry
->channels
->ht
,
7534 &iter
.iter
, buf_reg_chan
, node
.node
) {
7535 const int open_ret
=
7536 consumer_open_channel_packets(
7538 buf_reg_chan
->consumer_key
);
7541 ret
= LTTNG_ERR_UNK
;
7548 case LTTNG_BUFFER_PER_PID
:
7550 struct ust_app
*app
;
7552 cds_lfht_for_each_entry (
7553 ust_app_ht
->ht
, &iter
.iter
, app
, pid_n
.node
) {
7554 struct consumer_socket
*socket
;
7555 struct lttng_ht_iter chan_iter
;
7556 struct ust_app_channel
*ua_chan
;
7557 struct ust_app_session
*ua_sess
;
7558 struct ust_registry_session
*registry
;
7560 ua_sess
= lookup_session_by_app(usess
, app
);
7562 /* Session not associated with this app. */
7566 /* Get the right consumer socket for the application. */
7567 socket
= consumer_find_socket_by_bitness(
7568 app
->bits_per_long
, usess
->consumer
);
7570 ret
= LTTNG_ERR_FATAL
;
7574 registry
= get_session_registry(ua_sess
);
7576 DBG("Application session is being torn down. Skip application.");
7580 cds_lfht_for_each_entry(ua_sess
->channels
->ht
,
7581 &chan_iter
.iter
, ua_chan
, node
.node
) {
7582 const int open_ret
=
7583 consumer_open_channel_packets(
7589 * Per-PID buffer and application going
7592 if (open_ret
== -LTTNG_ERR_CHAN_NOT_FOUND
) {
7596 ret
= LTTNG_ERR_UNK
;