2 * Copyright (C) 2011 EfficiOS Inc.
3 * Copyright (C) 2016 Jérémie Galarneau <jeremie.galarneau@efficios.com>
5 * SPDX-License-Identifier: GPL-2.0-only
11 #include "buffer-registry.hpp"
12 #include "condition-internal.hpp"
13 #include "event-notifier-error-accounting.hpp"
15 #include "fd-limit.hpp"
17 #include "health-sessiond.hpp"
18 #include "lttng-sessiond.hpp"
19 #include "lttng-ust-ctl.hpp"
20 #include "lttng-ust-error.hpp"
21 #include "notification-thread-commands.hpp"
22 #include "session.hpp"
23 #include "ust-app.hpp"
24 #include "ust-consumer.hpp"
25 #include "ust-field-convert.hpp"
28 #include <common/bytecode/bytecode.hpp>
29 #include <common/common.hpp>
30 #include <common/compat/errno.hpp>
31 #include <common/exception.hpp>
32 #include <common/format.hpp>
33 #include <common/hashtable/utils.hpp>
34 #include <common/make-unique.hpp>
35 #include <common/pthread-lock.hpp>
36 #include <common/sessiond-comm/sessiond-comm.hpp>
37 #include <common/urcu.hpp>
39 #include <lttng/condition/condition.h>
40 #include <lttng/condition/event-rule-matches-internal.hpp>
41 #include <lttng/condition/event-rule-matches.h>
42 #include <lttng/event-rule/event-rule-internal.hpp>
43 #include <lttng/event-rule/event-rule.h>
44 #include <lttng/event-rule/user-tracepoint.h>
45 #include <lttng/trigger/trigger-internal.hpp>
57 #include <sys/types.h>
59 #include <urcu/compiler.h>
62 namespace lsu
= lttng::sessiond::ust
;
63 namespace lst
= lttng::sessiond::trace
;
65 struct lttng_ht
*ust_app_ht
;
66 struct lttng_ht
*ust_app_ht_by_sock
;
67 struct lttng_ht
*ust_app_ht_by_notify_sock
;
69 static int ust_app_flush_app_session(ust_app
& app
, ust_app_session
& ua_sess
);
71 /* Next available channel key. Access under next_channel_key_lock. */
72 static uint64_t _next_channel_key
;
73 static pthread_mutex_t next_channel_key_lock
= PTHREAD_MUTEX_INITIALIZER
;
75 /* Next available session ID. Access under next_session_id_lock. */
76 static uint64_t _next_session_id
;
77 static pthread_mutex_t next_session_id_lock
= PTHREAD_MUTEX_INITIALIZER
;
82 * Return the session registry according to the buffer type of the given
85 * A registry per UID object MUST exists before calling this function or else
86 * it LTTNG_ASSERT() if not found. RCU read side lock must be acquired.
88 static lsu::registry_session
*get_session_registry(const struct ust_app_session
*ua_sess
)
90 lsu::registry_session
*registry
= nullptr;
92 LTTNG_ASSERT(ua_sess
);
94 switch (ua_sess
->buffer_type
) {
95 case LTTNG_BUFFER_PER_PID
:
97 struct buffer_reg_pid
*reg_pid
= buffer_reg_pid_find(ua_sess
->id
);
101 registry
= reg_pid
->registry
->reg
.ust
;
104 case LTTNG_BUFFER_PER_UID
:
106 struct buffer_reg_uid
*reg_uid
=
107 buffer_reg_uid_find(ua_sess
->tracing_id
,
108 ua_sess
->bits_per_long
,
109 lttng_credentials_get_uid(&ua_sess
->real_credentials
));
113 registry
= reg_uid
->registry
->reg
.ust
;
124 lsu::registry_session::locked_ptr
get_locked_session_registry(const struct ust_app_session
*ua_sess
)
126 auto session
= get_session_registry(ua_sess
);
128 pthread_mutex_lock(&session
->_lock
);
131 return lsu::registry_session::locked_ptr
{ session
};
136 * Return the incremented value of next_channel_key.
138 static uint64_t get_next_channel_key()
142 pthread_mutex_lock(&next_channel_key_lock
);
143 ret
= ++_next_channel_key
;
144 pthread_mutex_unlock(&next_channel_key_lock
);
149 * Return the atomically incremented value of next_session_id.
151 static uint64_t get_next_session_id()
155 pthread_mutex_lock(&next_session_id_lock
);
156 ret
= ++_next_session_id
;
157 pthread_mutex_unlock(&next_session_id_lock
);
161 static void copy_channel_attr_to_ustctl(struct lttng_ust_ctl_consumer_channel_attr
*attr
,
162 struct lttng_ust_abi_channel_attr
*uattr
)
164 /* Copy event attributes since the layout is different. */
165 attr
->subbuf_size
= uattr
->subbuf_size
;
166 attr
->num_subbuf
= uattr
->num_subbuf
;
167 attr
->overwrite
= uattr
->overwrite
;
168 attr
->switch_timer_interval
= uattr
->switch_timer_interval
;
169 attr
->read_timer_interval
= uattr
->read_timer_interval
;
170 attr
->output
= (lttng_ust_abi_output
) uattr
->output
;
171 attr
->blocking_timeout
= uattr
->u
.s
.blocking_timeout
;
175 * Match function for the hash table lookup.
177 * It matches an ust app event based on three attributes which are the event
178 * name, the filter bytecode and the loglevel.
180 static int ht_match_ust_app_event(struct cds_lfht_node
*node
, const void *_key
)
182 struct ust_app_event
*event
;
183 const struct ust_app_ht_key
*key
;
188 event
= caa_container_of(node
, struct ust_app_event
, node
.node
);
189 key
= (ust_app_ht_key
*) _key
;
191 /* Match the 4 elements of the key: name, filter, loglevel, exclusions */
194 if (strncmp(event
->attr
.name
, key
->name
, sizeof(event
->attr
.name
)) != 0) {
198 /* Event loglevel. */
199 if (!loglevels_match(event
->attr
.loglevel_type
,
200 event
->attr
.loglevel
,
203 LTTNG_UST_ABI_LOGLEVEL_ALL
)) {
207 /* One of the filters is NULL, fail. */
208 if ((key
->filter
&& !event
->filter
) || (!key
->filter
&& event
->filter
)) {
212 if (key
->filter
&& event
->filter
) {
213 /* Both filters exists, check length followed by the bytecode. */
214 if (event
->filter
->len
!= key
->filter
->len
||
215 memcmp(event
->filter
->data
, key
->filter
->data
, event
->filter
->len
) != 0) {
220 /* One of the exclusions is NULL, fail. */
221 if ((key
->exclusion
&& !event
->exclusion
) || (!key
->exclusion
&& event
->exclusion
)) {
225 if (key
->exclusion
&& event
->exclusion
) {
226 /* Both exclusions exists, check count followed by the names. */
227 if (event
->exclusion
->count
!= key
->exclusion
->count
||
228 memcmp(event
->exclusion
->names
,
229 key
->exclusion
->names
,
230 event
->exclusion
->count
* LTTNG_UST_ABI_SYM_NAME_LEN
) != 0) {
243 * Unique add of an ust app event in the given ht. This uses the custom
244 * ht_match_ust_app_event match function and the event name as hash.
246 static void add_unique_ust_app_event(struct ust_app_channel
*ua_chan
, struct ust_app_event
*event
)
248 struct cds_lfht_node
*node_ptr
;
249 struct ust_app_ht_key key
;
252 LTTNG_ASSERT(ua_chan
);
253 LTTNG_ASSERT(ua_chan
->events
);
256 ht
= ua_chan
->events
;
257 key
.name
= event
->attr
.name
;
258 key
.filter
= event
->filter
;
259 key
.loglevel_type
= (lttng_ust_abi_loglevel_type
) event
->attr
.loglevel_type
;
260 key
.loglevel_value
= event
->attr
.loglevel
;
261 key
.exclusion
= event
->exclusion
;
263 node_ptr
= cds_lfht_add_unique(ht
->ht
,
264 ht
->hash_fct(event
->node
.key
, lttng_ht_seed
),
265 ht_match_ust_app_event
,
268 LTTNG_ASSERT(node_ptr
== &event
->node
.node
);
272 * Close the notify socket from the given RCU head object. This MUST be called
273 * through a call_rcu().
275 static void close_notify_sock_rcu(struct rcu_head
*head
)
278 struct ust_app_notify_sock_obj
*obj
=
279 lttng::utils::container_of(head
, &ust_app_notify_sock_obj::head
);
281 /* Must have a valid fd here. */
282 LTTNG_ASSERT(obj
->fd
>= 0);
284 ret
= close(obj
->fd
);
286 ERR("close notify sock %d RCU", obj
->fd
);
288 lttng_fd_put(LTTNG_FD_APPS
, 1);
294 * Delete ust context safely. RCU read lock must be held before calling
297 static void delete_ust_app_ctx(int sock
, struct ust_app_ctx
*ua_ctx
, struct ust_app
*app
)
301 LTTNG_ASSERT(ua_ctx
);
302 ASSERT_RCU_READ_LOCKED();
305 pthread_mutex_lock(&app
->sock_lock
);
306 ret
= lttng_ust_ctl_release_object(sock
, ua_ctx
->obj
);
307 pthread_mutex_unlock(&app
->sock_lock
);
309 if (ret
== -EPIPE
|| ret
== -LTTNG_UST_ERR_EXITING
) {
310 DBG3("UST app release ctx failed. Application is dead: pid = %d, sock = %d",
313 } else if (ret
== -EAGAIN
) {
314 WARN("UST app release ctx failed. Communication time out: pid = %d, sock = %d",
318 ERR("UST app release ctx obj handle %d failed with ret %d: pid = %d, sock = %d",
328 if (ua_ctx
->ctx
.ctx
== LTTNG_UST_ABI_CONTEXT_APP_CONTEXT
) {
329 free(ua_ctx
->ctx
.u
.app_ctx
.provider_name
);
330 free(ua_ctx
->ctx
.u
.app_ctx
.ctx_name
);
337 * Delete ust app event safely. RCU read lock must be held before calling
340 static void delete_ust_app_event(int sock
, struct ust_app_event
*ua_event
, struct ust_app
*app
)
344 LTTNG_ASSERT(ua_event
);
345 ASSERT_RCU_READ_LOCKED();
347 free(ua_event
->filter
);
348 if (ua_event
->exclusion
!= nullptr)
349 free(ua_event
->exclusion
);
350 if (ua_event
->obj
!= nullptr) {
351 pthread_mutex_lock(&app
->sock_lock
);
352 ret
= lttng_ust_ctl_release_object(sock
, ua_event
->obj
);
353 pthread_mutex_unlock(&app
->sock_lock
);
355 if (ret
== -EPIPE
|| ret
== -LTTNG_UST_ERR_EXITING
) {
356 DBG3("UST app release event failed. Application is dead: pid = %d, sock = %d",
359 } else if (ret
== -EAGAIN
) {
360 WARN("UST app release event failed. Communication time out: pid = %d, sock = %d",
364 ERR("UST app release event obj failed with ret %d: pid = %d, sock = %d",
376 * Delayed reclaim of a ust_app_event_notifier_rule object. This MUST be called
377 * through a call_rcu().
379 static void free_ust_app_event_notifier_rule_rcu(struct rcu_head
*head
)
381 struct ust_app_event_notifier_rule
*obj
=
382 lttng::utils::container_of(head
, &ust_app_event_notifier_rule::rcu_head
);
388 * Delete ust app event notifier rule safely.
390 static void delete_ust_app_event_notifier_rule(
391 int sock
, struct ust_app_event_notifier_rule
*ua_event_notifier_rule
, struct ust_app
*app
)
395 LTTNG_ASSERT(ua_event_notifier_rule
);
397 if (ua_event_notifier_rule
->exclusion
!= nullptr) {
398 free(ua_event_notifier_rule
->exclusion
);
401 if (ua_event_notifier_rule
->obj
!= nullptr) {
402 pthread_mutex_lock(&app
->sock_lock
);
403 ret
= lttng_ust_ctl_release_object(sock
, ua_event_notifier_rule
->obj
);
404 pthread_mutex_unlock(&app
->sock_lock
);
406 if (ret
== -EPIPE
|| ret
== -LTTNG_UST_ERR_EXITING
) {
407 DBG3("UST app release event notifier failed. Application is dead: pid = %d, sock = %d",
410 } else if (ret
== -EAGAIN
) {
411 WARN("UST app release event notifier failed. Communication time out: pid = %d, sock = %d",
415 ERR("UST app release event notifier failed with ret %d: pid = %d, sock = %d",
422 free(ua_event_notifier_rule
->obj
);
425 lttng_trigger_put(ua_event_notifier_rule
->trigger
);
426 call_rcu(&ua_event_notifier_rule
->rcu_head
, free_ust_app_event_notifier_rule_rcu
);
430 * Release ust data object of the given stream.
432 * Return 0 on success or else a negative value.
434 static int release_ust_app_stream(int sock
, struct ust_app_stream
*stream
, struct ust_app
*app
)
438 LTTNG_ASSERT(stream
);
441 pthread_mutex_lock(&app
->sock_lock
);
442 ret
= lttng_ust_ctl_release_object(sock
, stream
->obj
);
443 pthread_mutex_unlock(&app
->sock_lock
);
445 if (ret
== -EPIPE
|| ret
== -LTTNG_UST_ERR_EXITING
) {
446 DBG3("UST app release stream failed. Application is dead: pid = %d, sock = %d",
449 } else if (ret
== -EAGAIN
) {
450 WARN("UST app release stream failed. Communication time out: pid = %d, sock = %d",
454 ERR("UST app release stream obj failed with ret %d: pid = %d, sock = %d",
460 lttng_fd_put(LTTNG_FD_APPS
, 2);
468 * Delete ust app stream safely. RCU read lock must be held before calling
471 static void delete_ust_app_stream(int sock
, struct ust_app_stream
*stream
, struct ust_app
*app
)
473 LTTNG_ASSERT(stream
);
474 ASSERT_RCU_READ_LOCKED();
476 (void) release_ust_app_stream(sock
, stream
, app
);
480 static void delete_ust_app_channel_rcu(struct rcu_head
*head
)
482 struct ust_app_channel
*ua_chan
=
483 lttng::utils::container_of(head
, &ust_app_channel::rcu_head
);
485 lttng_ht_destroy(ua_chan
->ctx
);
486 lttng_ht_destroy(ua_chan
->events
);
491 * Extract the lost packet or discarded events counter when the channel is
492 * being deleted and store the value in the parent channel so we can
493 * access it from lttng list and at stop/destroy.
495 * The session list lock must be held by the caller.
497 static void save_per_pid_lost_discarded_counters(struct ust_app_channel
*ua_chan
)
499 uint64_t discarded
= 0, lost
= 0;
500 struct ltt_session
*session
;
501 struct ltt_ust_channel
*uchan
;
503 if (ua_chan
->attr
.type
!= LTTNG_UST_ABI_CHAN_PER_CPU
) {
507 lttng::urcu::read_lock_guard read_lock
;
508 session
= session_find_by_id(ua_chan
->session
->tracing_id
);
509 if (!session
|| !session
->ust_session
) {
511 * Not finding the session is not an error because there are
512 * multiple ways the channels can be torn down.
514 * 1) The session daemon can initiate the destruction of the
515 * ust app session after receiving a destroy command or
516 * during its shutdown/teardown.
517 * 2) The application, since we are in per-pid tracing, is
518 * unregistering and tearing down its ust app session.
520 * Both paths are protected by the session list lock which
521 * ensures that the accounting of lost packets and discarded
522 * events is done exactly once. The session is then unpublished
523 * from the session list, resulting in this condition.
528 if (ua_chan
->attr
.overwrite
) {
529 consumer_get_lost_packets(ua_chan
->session
->tracing_id
,
531 session
->ust_session
->consumer
,
534 consumer_get_discarded_events(ua_chan
->session
->tracing_id
,
536 session
->ust_session
->consumer
,
539 uchan
= trace_ust_find_channel_by_name(session
->ust_session
->domain_global
.channels
,
542 ERR("Missing UST channel to store discarded counters");
546 uchan
->per_pid_closed_app_discarded
+= discarded
;
547 uchan
->per_pid_closed_app_lost
+= lost
;
551 session_put(session
);
556 * Delete ust app channel safely. RCU read lock must be held before calling
559 * The session list lock must be held by the caller.
561 static void delete_ust_app_channel(int sock
,
562 struct ust_app_channel
*ua_chan
,
564 const lsu::registry_session::locked_ptr
& locked_registry
)
567 struct lttng_ht_iter iter
;
568 struct ust_app_event
*ua_event
;
569 struct ust_app_ctx
*ua_ctx
;
570 struct ust_app_stream
*stream
, *stmp
;
572 LTTNG_ASSERT(ua_chan
);
573 ASSERT_RCU_READ_LOCKED();
575 DBG3("UST app deleting channel %s", ua_chan
->name
);
578 cds_list_for_each_entry_safe (stream
, stmp
, &ua_chan
->streams
.head
, list
) {
579 cds_list_del(&stream
->list
);
580 delete_ust_app_stream(sock
, stream
, app
);
584 cds_lfht_for_each_entry (ua_chan
->ctx
->ht
, &iter
.iter
, ua_ctx
, node
.node
) {
585 cds_list_del(&ua_ctx
->list
);
586 ret
= lttng_ht_del(ua_chan
->ctx
, &iter
);
588 delete_ust_app_ctx(sock
, ua_ctx
, app
);
592 cds_lfht_for_each_entry (ua_chan
->events
->ht
, &iter
.iter
, ua_event
, node
.node
) {
593 ret
= lttng_ht_del(ua_chan
->events
, &iter
);
595 delete_ust_app_event(sock
, ua_event
, app
);
598 if (ua_chan
->session
->buffer_type
== LTTNG_BUFFER_PER_PID
) {
599 /* Wipe and free registry from session registry. */
600 if (locked_registry
) {
602 locked_registry
->remove_channel(ua_chan
->key
, sock
>= 0);
603 } catch (const std::exception
& ex
) {
604 DBG("Could not find channel for removal: %s", ex
.what());
609 * A negative socket can be used by the caller when
610 * cleaning-up a ua_chan in an error path. Skip the
611 * accounting in this case.
614 save_per_pid_lost_discarded_counters(ua_chan
);
618 if (ua_chan
->obj
!= nullptr) {
619 /* Remove channel from application UST object descriptor. */
620 iter
.iter
.node
= &ua_chan
->ust_objd_node
.node
;
621 ret
= lttng_ht_del(app
->ust_objd
, &iter
);
623 pthread_mutex_lock(&app
->sock_lock
);
624 ret
= lttng_ust_ctl_release_object(sock
, ua_chan
->obj
);
625 pthread_mutex_unlock(&app
->sock_lock
);
627 if (ret
== -EPIPE
|| ret
== -LTTNG_UST_ERR_EXITING
) {
628 DBG3("UST app channel %s release failed. Application is dead: pid = %d, sock = %d",
632 } else if (ret
== -EAGAIN
) {
633 WARN("UST app channel %s release failed. Communication time out: pid = %d, sock = %d",
638 ERR("UST app channel %s release failed with ret %d: pid = %d, sock = %d",
645 lttng_fd_put(LTTNG_FD_APPS
, 1);
648 call_rcu(&ua_chan
->rcu_head
, delete_ust_app_channel_rcu
);
651 int ust_app_register_done(struct ust_app
*app
)
655 pthread_mutex_lock(&app
->sock_lock
);
656 ret
= lttng_ust_ctl_register_done(app
->sock
);
657 pthread_mutex_unlock(&app
->sock_lock
);
661 int ust_app_release_object(struct ust_app
*app
, struct lttng_ust_abi_object_data
*data
)
666 pthread_mutex_lock(&app
->sock_lock
);
671 ret
= lttng_ust_ctl_release_object(sock
, data
);
673 pthread_mutex_unlock(&app
->sock_lock
);
679 * Push metadata to consumer socket.
681 * RCU read-side lock must be held to guarantee existence of socket.
682 * Must be called with the ust app session lock held.
683 * Must be called with the registry lock held.
685 * On success, return the len of metadata pushed or else a negative value.
686 * Returning a -EPIPE return value means we could not send the metadata,
687 * but it can be caused by recoverable errors (e.g. the application has
688 * terminated concurrently).
690 ssize_t
ust_app_push_metadata(const lsu::registry_session::locked_ptr
& locked_registry
,
691 struct consumer_socket
*socket
,
695 char *metadata_str
= nullptr;
696 size_t len
, offset
, new_metadata_len_sent
;
698 uint64_t metadata_key
, metadata_version
;
700 LTTNG_ASSERT(locked_registry
);
701 LTTNG_ASSERT(socket
);
702 ASSERT_RCU_READ_LOCKED();
704 metadata_key
= locked_registry
->_metadata_key
;
707 * Means that no metadata was assigned to the session. This can
708 * happens if no start has been done previously.
714 offset
= locked_registry
->_metadata_len_sent
;
715 len
= locked_registry
->_metadata_len
- locked_registry
->_metadata_len_sent
;
716 new_metadata_len_sent
= locked_registry
->_metadata_len
;
717 metadata_version
= locked_registry
->_metadata_version
;
719 DBG3("No metadata to push for metadata key %" PRIu64
,
720 locked_registry
->_metadata_key
);
722 if (send_zero_data
) {
723 DBG("No metadata to push");
729 /* Allocate only what we have to send. */
730 metadata_str
= calloc
<char>(len
);
732 PERROR("zmalloc ust app metadata string");
736 /* Copy what we haven't sent out. */
737 memcpy(metadata_str
, locked_registry
->_metadata
+ offset
, len
);
740 pthread_mutex_unlock(&locked_registry
->_lock
);
742 * We need to unlock the registry while we push metadata to
743 * break a circular dependency between the consumerd metadata
744 * lock and the sessiond registry lock. Indeed, pushing metadata
745 * to the consumerd awaits that it gets pushed all the way to
746 * relayd, but doing so requires grabbing the metadata lock. If
747 * a concurrent metadata request is being performed by
748 * consumerd, this can try to grab the registry lock on the
749 * sessiond while holding the metadata lock on the consumer
750 * daemon. Those push and pull schemes are performed on two
751 * different bidirectionnal communication sockets.
753 ret
= consumer_push_metadata(
754 socket
, metadata_key
, metadata_str
, len
, offset
, metadata_version
);
755 pthread_mutex_lock(&locked_registry
->_lock
);
758 * There is an acceptable race here between the registry
759 * metadata key assignment and the creation on the
760 * consumer. The session daemon can concurrently push
761 * metadata for this registry while being created on the
762 * consumer since the metadata key of the registry is
763 * assigned *before* it is setup to avoid the consumer
764 * to ask for metadata that could possibly be not found
765 * in the session daemon.
767 * The metadata will get pushed either by the session
768 * being stopped or the consumer requesting metadata if
769 * that race is triggered.
771 if (ret
== -LTTCOMM_CONSUMERD_CHANNEL_FAIL
) {
774 ERR("Error pushing metadata to consumer");
780 * Metadata may have been concurrently pushed, since
781 * we're not holding the registry lock while pushing to
782 * consumer. This is handled by the fact that we send
783 * the metadata content, size, and the offset at which
784 * that metadata belongs. This may arrive out of order
785 * on the consumer side, and the consumer is able to
786 * deal with overlapping fragments. The consumer
787 * supports overlapping fragments, which must be
788 * contiguous starting from offset 0. We keep the
789 * largest metadata_len_sent value of the concurrent
792 locked_registry
->_metadata_len_sent
=
793 std::max(locked_registry
->_metadata_len_sent
, new_metadata_len_sent
);
802 * On error, flag the registry that the metadata is
803 * closed. We were unable to push anything and this
804 * means that either the consumer is not responding or
805 * the metadata cache has been destroyed on the
808 locked_registry
->_metadata_closed
= true;
816 * For a given application and session, push metadata to consumer.
817 * Either sock or consumer is required : if sock is NULL, the default
818 * socket to send the metadata is retrieved from consumer, if sock
819 * is not NULL we use it to send the metadata.
820 * RCU read-side lock must be held while calling this function,
821 * therefore ensuring existence of registry. It also ensures existence
822 * of socket throughout this function.
824 * Return 0 on success else a negative error.
825 * Returning a -EPIPE return value means we could not send the metadata,
826 * but it can be caused by recoverable errors (e.g. the application has
827 * terminated concurrently).
829 static int push_metadata(const lsu::registry_session::locked_ptr
& locked_registry
,
830 struct consumer_output
*consumer
)
834 struct consumer_socket
*socket
;
836 LTTNG_ASSERT(locked_registry
);
837 LTTNG_ASSERT(consumer
);
838 ASSERT_RCU_READ_LOCKED();
840 if (locked_registry
->_metadata_closed
) {
845 /* Get consumer socket to use to push the metadata.*/
846 socket
= consumer_find_socket_by_bitness(locked_registry
->abi
.bits_per_long
, consumer
);
852 ret
= ust_app_push_metadata(locked_registry
, socket
, 0);
864 * Send to the consumer a close metadata command for the given session. Once
865 * done, the metadata channel is deleted and the session metadata pointer is
866 * nullified. The session lock MUST be held unless the application is
867 * in the destroy path.
869 * Do not hold the registry lock while communicating with the consumerd, because
870 * doing so causes inter-process deadlocks between consumerd and sessiond with
871 * the metadata request notification.
873 * Return 0 on success else a negative value.
875 static int close_metadata(uint64_t metadata_key
,
876 unsigned int consumer_bitness
,
877 struct consumer_output
*consumer
)
880 struct consumer_socket
*socket
;
881 lttng::urcu::read_lock_guard read_lock_guard
;
883 LTTNG_ASSERT(consumer
);
885 /* Get consumer socket to use to push the metadata. */
886 socket
= consumer_find_socket_by_bitness(consumer_bitness
, consumer
);
892 ret
= consumer_close_metadata(socket
, metadata_key
);
901 static void delete_ust_app_session_rcu(struct rcu_head
*head
)
903 struct ust_app_session
*ua_sess
=
904 lttng::utils::container_of(head
, &ust_app_session::rcu_head
);
906 lttng_ht_destroy(ua_sess
->channels
);
911 * Delete ust app session safely. RCU read lock must be held before calling
914 * The session list lock must be held by the caller.
916 static void delete_ust_app_session(int sock
, struct ust_app_session
*ua_sess
, struct ust_app
*app
)
919 struct lttng_ht_iter iter
;
920 struct ust_app_channel
*ua_chan
;
922 LTTNG_ASSERT(ua_sess
);
923 ASSERT_RCU_READ_LOCKED();
925 pthread_mutex_lock(&ua_sess
->lock
);
927 LTTNG_ASSERT(!ua_sess
->deleted
);
928 ua_sess
->deleted
= true;
930 auto locked_registry
= get_locked_session_registry(ua_sess
);
931 /* Registry can be null on error path during initialization. */
932 if (locked_registry
) {
933 /* Push metadata for application before freeing the application. */
934 (void) push_metadata(locked_registry
, ua_sess
->consumer
);
937 cds_lfht_for_each_entry (ua_sess
->channels
->ht
, &iter
.iter
, ua_chan
, node
.node
) {
938 ret
= lttng_ht_del(ua_sess
->channels
, &iter
);
940 delete_ust_app_channel(sock
, ua_chan
, app
, locked_registry
);
943 if (locked_registry
) {
945 * Don't ask to close metadata for global per UID buffers. Close
946 * metadata only on destroy trace session in this case. Also, the
947 * previous push metadata could have flag the metadata registry to
948 * close so don't send a close command if closed.
950 if (ua_sess
->buffer_type
!= LTTNG_BUFFER_PER_UID
) {
951 const auto metadata_key
= locked_registry
->_metadata_key
;
952 const auto consumer_bitness
= locked_registry
->abi
.bits_per_long
;
954 if (!locked_registry
->_metadata_closed
&& metadata_key
!= 0) {
955 locked_registry
->_metadata_closed
= true;
958 /* Release lock before communication, see comments in close_metadata(). */
959 locked_registry
.reset();
960 (void) close_metadata(metadata_key
, consumer_bitness
, ua_sess
->consumer
);
964 /* In case of per PID, the registry is kept in the session. */
965 if (ua_sess
->buffer_type
== LTTNG_BUFFER_PER_PID
) {
966 struct buffer_reg_pid
*reg_pid
= buffer_reg_pid_find(ua_sess
->id
);
969 * Registry can be null on error path during
972 buffer_reg_pid_remove(reg_pid
);
973 buffer_reg_pid_destroy(reg_pid
);
977 if (ua_sess
->handle
!= -1) {
978 pthread_mutex_lock(&app
->sock_lock
);
979 ret
= lttng_ust_ctl_release_handle(sock
, ua_sess
->handle
);
980 pthread_mutex_unlock(&app
->sock_lock
);
982 if (ret
== -EPIPE
|| ret
== -LTTNG_UST_ERR_EXITING
) {
983 DBG3("UST app release session handle failed. Application is dead: pid = %d, sock = %d",
986 } else if (ret
== -EAGAIN
) {
987 WARN("UST app release session handle failed. Communication time out: pid = %d, sock = %d",
991 ERR("UST app release session handle failed with ret %d: pid = %d, sock = %d",
998 /* Remove session from application UST object descriptor. */
999 iter
.iter
.node
= &ua_sess
->ust_objd_node
.node
;
1000 ret
= lttng_ht_del(app
->ust_sessions_objd
, &iter
);
1004 pthread_mutex_unlock(&ua_sess
->lock
);
1006 consumer_output_put(ua_sess
->consumer
);
1008 call_rcu(&ua_sess
->rcu_head
, delete_ust_app_session_rcu
);
1012 * Delete a traceable application structure from the global list. Never call
1013 * this function outside of a call_rcu call.
1015 static void delete_ust_app(struct ust_app
*app
)
1018 struct ust_app_session
*ua_sess
, *tmp_ua_sess
;
1019 struct lttng_ht_iter iter
;
1020 struct ust_app_event_notifier_rule
*event_notifier_rule
;
1021 bool event_notifier_write_fd_is_open
;
1024 * The session list lock must be held during this function to guarantee
1025 * the existence of ua_sess.
1027 session_lock_list();
1028 /* Delete ust app sessions info */
1033 cds_list_for_each_entry_safe (ua_sess
, tmp_ua_sess
, &app
->teardown_head
, teardown_node
) {
1034 /* Free every object in the session and the session. */
1035 lttng::urcu::read_lock_guard read_lock
;
1036 delete_ust_app_session(sock
, ua_sess
, app
);
1039 /* Remove the event notifier rules associated with this app. */
1041 lttng::urcu::read_lock_guard read_lock
;
1043 cds_lfht_for_each_entry (app
->token_to_event_notifier_rule_ht
->ht
,
1045 event_notifier_rule
,
1047 ret
= lttng_ht_del(app
->token_to_event_notifier_rule_ht
, &iter
);
1050 delete_ust_app_event_notifier_rule(app
->sock
, event_notifier_rule
, app
);
1054 lttng_ht_destroy(app
->sessions
);
1055 lttng_ht_destroy(app
->ust_sessions_objd
);
1056 lttng_ht_destroy(app
->ust_objd
);
1057 lttng_ht_destroy(app
->token_to_event_notifier_rule_ht
);
1060 * This could be NULL if the event notifier setup failed (e.g the app
1061 * was killed or the tracer does not support this feature).
1063 if (app
->event_notifier_group
.object
) {
1064 enum lttng_error_code ret_code
;
1065 enum event_notifier_error_accounting_status status
;
1067 const int event_notifier_read_fd
=
1068 lttng_pipe_get_readfd(app
->event_notifier_group
.event_pipe
);
1070 ret_code
= notification_thread_command_remove_tracer_event_source(
1071 the_notification_thread_handle
, event_notifier_read_fd
);
1072 if (ret_code
!= LTTNG_OK
) {
1073 ERR("Failed to remove application tracer event source from notification thread");
1076 status
= event_notifier_error_accounting_unregister_app(app
);
1077 if (status
!= EVENT_NOTIFIER_ERROR_ACCOUNTING_STATUS_OK
) {
1078 ERR("Error unregistering app from event notifier error accounting");
1081 lttng_ust_ctl_release_object(sock
, app
->event_notifier_group
.object
);
1082 free(app
->event_notifier_group
.object
);
1085 event_notifier_write_fd_is_open
=
1086 lttng_pipe_is_write_open(app
->event_notifier_group
.event_pipe
);
1087 lttng_pipe_destroy(app
->event_notifier_group
.event_pipe
);
1089 * Release the file descriptors reserved for the event notifier pipe.
1090 * The app could be destroyed before the write end of the pipe could be
1091 * passed to the application (and closed). In that case, both file
1092 * descriptors must be released.
1094 lttng_fd_put(LTTNG_FD_APPS
, event_notifier_write_fd_is_open
? 2 : 1);
1097 * Wait until we have deleted the application from the sock hash table
1098 * before closing this socket, otherwise an application could re-use the
1099 * socket ID and race with the teardown, using the same hash table entry.
1101 * It's OK to leave the close in call_rcu. We want it to stay unique for
1102 * all RCU readers that could run concurrently with unregister app,
1103 * therefore we _need_ to only close that socket after a grace period. So
1104 * it should stay in this RCU callback.
1106 * This close() is a very important step of the synchronization model so
1107 * every modification to this function must be carefully reviewed.
1113 lttng_fd_put(LTTNG_FD_APPS
, 1);
1115 DBG2("UST app pid %d deleted", app
->pid
);
1117 session_unlock_list();
1121 * URCU intermediate call to delete an UST app.
1123 static void delete_ust_app_rcu(struct rcu_head
*head
)
1125 struct lttng_ht_node_ulong
*node
=
1126 lttng::utils::container_of(head
, <tng_ht_node_ulong::head
);
1127 struct ust_app
*app
= lttng::utils::container_of(node
, &ust_app::pid_n
);
1129 DBG3("Call RCU deleting app PID %d", app
->pid
);
1130 delete_ust_app(app
);
1134 * Delete the session from the application ht and delete the data structure by
1135 * freeing every object inside and releasing them.
1137 * The session list lock must be held by the caller.
1139 static void destroy_app_session(struct ust_app
*app
, struct ust_app_session
*ua_sess
)
1142 struct lttng_ht_iter iter
;
1145 LTTNG_ASSERT(ua_sess
);
1147 iter
.iter
.node
= &ua_sess
->node
.node
;
1148 ret
= lttng_ht_del(app
->sessions
, &iter
);
1150 /* Already scheduled for teardown. */
1154 /* Once deleted, free the data structure. */
1155 delete_ust_app_session(app
->sock
, ua_sess
, app
);
1162 * Alloc new UST app session.
1164 static struct ust_app_session
*alloc_ust_app_session()
1166 struct ust_app_session
*ua_sess
;
1168 /* Init most of the default value by allocating and zeroing */
1169 ua_sess
= zmalloc
<ust_app_session
>();
1170 if (ua_sess
== nullptr) {
1175 ua_sess
->handle
= -1;
1176 ua_sess
->channels
= lttng_ht_new(0, LTTNG_HT_TYPE_STRING
);
1177 ua_sess
->metadata_attr
.type
= LTTNG_UST_ABI_CHAN_METADATA
;
1178 pthread_mutex_init(&ua_sess
->lock
, nullptr);
1187 * Alloc new UST app channel.
1189 static struct ust_app_channel
*alloc_ust_app_channel(const char *name
,
1190 struct ust_app_session
*ua_sess
,
1191 struct lttng_ust_abi_channel_attr
*attr
)
1193 struct ust_app_channel
*ua_chan
;
1195 /* Init most of the default value by allocating and zeroing */
1196 ua_chan
= zmalloc
<ust_app_channel
>();
1197 if (ua_chan
== nullptr) {
1202 /* Setup channel name */
1203 strncpy(ua_chan
->name
, name
, sizeof(ua_chan
->name
));
1204 ua_chan
->name
[sizeof(ua_chan
->name
) - 1] = '\0';
1206 ua_chan
->enabled
= true;
1207 ua_chan
->handle
= -1;
1208 ua_chan
->session
= ua_sess
;
1209 ua_chan
->key
= get_next_channel_key();
1210 ua_chan
->ctx
= lttng_ht_new(0, LTTNG_HT_TYPE_ULONG
);
1211 ua_chan
->events
= lttng_ht_new(0, LTTNG_HT_TYPE_STRING
);
1212 lttng_ht_node_init_str(&ua_chan
->node
, ua_chan
->name
);
1214 CDS_INIT_LIST_HEAD(&ua_chan
->streams
.head
);
1215 CDS_INIT_LIST_HEAD(&ua_chan
->ctx_list
);
1217 /* Copy attributes */
1219 /* Translate from lttng_ust_channel to lttng_ust_ctl_consumer_channel_attr. */
1220 ua_chan
->attr
.subbuf_size
= attr
->subbuf_size
;
1221 ua_chan
->attr
.num_subbuf
= attr
->num_subbuf
;
1222 ua_chan
->attr
.overwrite
= attr
->overwrite
;
1223 ua_chan
->attr
.switch_timer_interval
= attr
->switch_timer_interval
;
1224 ua_chan
->attr
.read_timer_interval
= attr
->read_timer_interval
;
1225 ua_chan
->attr
.output
= (lttng_ust_abi_output
) attr
->output
;
1226 ua_chan
->attr
.blocking_timeout
= attr
->u
.s
.blocking_timeout
;
1228 /* By default, the channel is a per cpu channel. */
1229 ua_chan
->attr
.type
= LTTNG_UST_ABI_CHAN_PER_CPU
;
1231 DBG3("UST app channel %s allocated", ua_chan
->name
);
1240 * Allocate and initialize a UST app stream.
1242 * Return newly allocated stream pointer or NULL on error.
1244 struct ust_app_stream
*ust_app_alloc_stream()
1246 struct ust_app_stream
*stream
= nullptr;
1248 stream
= zmalloc
<ust_app_stream
>();
1249 if (stream
== nullptr) {
1250 PERROR("zmalloc ust app stream");
1254 /* Zero could be a valid value for a handle so flag it to -1. */
1255 stream
->handle
= -1;
1262 * Alloc new UST app event.
1264 static struct ust_app_event
*alloc_ust_app_event(char *name
, struct lttng_ust_abi_event
*attr
)
1266 struct ust_app_event
*ua_event
;
1268 /* Init most of the default value by allocating and zeroing */
1269 ua_event
= zmalloc
<ust_app_event
>();
1270 if (ua_event
== nullptr) {
1271 PERROR("Failed to allocate ust_app_event structure");
1275 ua_event
->enabled
= true;
1276 strncpy(ua_event
->name
, name
, sizeof(ua_event
->name
));
1277 ua_event
->name
[sizeof(ua_event
->name
) - 1] = '\0';
1278 lttng_ht_node_init_str(&ua_event
->node
, ua_event
->name
);
1280 /* Copy attributes */
1282 memcpy(&ua_event
->attr
, attr
, sizeof(ua_event
->attr
));
1285 DBG3("UST app event %s allocated", ua_event
->name
);
1294 * Allocate a new UST app event notifier rule.
1296 static struct ust_app_event_notifier_rule
*
1297 alloc_ust_app_event_notifier_rule(struct lttng_trigger
*trigger
)
1299 enum lttng_event_rule_generate_exclusions_status generate_exclusion_status
;
1300 enum lttng_condition_status cond_status
;
1301 struct ust_app_event_notifier_rule
*ua_event_notifier_rule
;
1302 struct lttng_condition
*condition
= nullptr;
1303 const struct lttng_event_rule
*event_rule
= nullptr;
1305 ua_event_notifier_rule
= zmalloc
<ust_app_event_notifier_rule
>();
1306 if (ua_event_notifier_rule
== nullptr) {
1307 PERROR("Failed to allocate ust_app_event_notifier_rule structure");
1311 ua_event_notifier_rule
->enabled
= true;
1312 ua_event_notifier_rule
->token
= lttng_trigger_get_tracer_token(trigger
);
1313 lttng_ht_node_init_u64(&ua_event_notifier_rule
->node
, ua_event_notifier_rule
->token
);
1315 condition
= lttng_trigger_get_condition(trigger
);
1316 LTTNG_ASSERT(condition
);
1317 LTTNG_ASSERT(lttng_condition_get_type(condition
) ==
1318 LTTNG_CONDITION_TYPE_EVENT_RULE_MATCHES
);
1320 cond_status
= lttng_condition_event_rule_matches_get_rule(condition
, &event_rule
);
1321 LTTNG_ASSERT(cond_status
== LTTNG_CONDITION_STATUS_OK
);
1322 LTTNG_ASSERT(event_rule
);
1324 ua_event_notifier_rule
->error_counter_index
=
1325 lttng_condition_event_rule_matches_get_error_counter_index(condition
);
1326 /* Acquire the event notifier's reference to the trigger. */
1327 lttng_trigger_get(trigger
);
1329 ua_event_notifier_rule
->trigger
= trigger
;
1330 ua_event_notifier_rule
->filter
= lttng_event_rule_get_filter_bytecode(event_rule
);
1331 generate_exclusion_status
= lttng_event_rule_generate_exclusions(
1332 event_rule
, &ua_event_notifier_rule
->exclusion
);
1333 switch (generate_exclusion_status
) {
1334 case LTTNG_EVENT_RULE_GENERATE_EXCLUSIONS_STATUS_OK
:
1335 case LTTNG_EVENT_RULE_GENERATE_EXCLUSIONS_STATUS_NONE
:
1338 /* Error occurred. */
1339 ERR("Failed to generate exclusions from trigger while allocating an event notifier rule");
1340 goto error_put_trigger
;
1343 DBG3("UST app event notifier rule allocated: token = %" PRIu64
,
1344 ua_event_notifier_rule
->token
);
1346 return ua_event_notifier_rule
;
1349 lttng_trigger_put(trigger
);
1351 free(ua_event_notifier_rule
);
1356 * Alloc new UST app context.
1358 static struct ust_app_ctx
*alloc_ust_app_ctx(struct lttng_ust_context_attr
*uctx
)
1360 struct ust_app_ctx
*ua_ctx
;
1362 ua_ctx
= zmalloc
<ust_app_ctx
>();
1363 if (ua_ctx
== nullptr) {
1367 CDS_INIT_LIST_HEAD(&ua_ctx
->list
);
1370 memcpy(&ua_ctx
->ctx
, uctx
, sizeof(ua_ctx
->ctx
));
1371 if (uctx
->ctx
== LTTNG_UST_ABI_CONTEXT_APP_CONTEXT
) {
1372 char *provider_name
= nullptr, *ctx_name
= nullptr;
1374 provider_name
= strdup(uctx
->u
.app_ctx
.provider_name
);
1375 ctx_name
= strdup(uctx
->u
.app_ctx
.ctx_name
);
1376 if (!provider_name
|| !ctx_name
) {
1377 free(provider_name
);
1382 ua_ctx
->ctx
.u
.app_ctx
.provider_name
= provider_name
;
1383 ua_ctx
->ctx
.u
.app_ctx
.ctx_name
= ctx_name
;
1387 DBG3("UST app context %d allocated", ua_ctx
->ctx
.ctx
);
1395 * Create a liblttng-ust filter bytecode from given bytecode.
1397 * Return allocated filter or NULL on error.
1399 static struct lttng_ust_abi_filter_bytecode
*
1400 create_ust_filter_bytecode_from_bytecode(const struct lttng_bytecode
*orig_f
)
1402 struct lttng_ust_abi_filter_bytecode
*filter
= nullptr;
1404 /* Copy filter bytecode. */
1405 filter
= zmalloc
<lttng_ust_abi_filter_bytecode
>(sizeof(*filter
) + orig_f
->len
);
1407 PERROR("Failed to allocate lttng_ust_filter_bytecode: bytecode len = %" PRIu32
1413 LTTNG_ASSERT(sizeof(struct lttng_bytecode
) == sizeof(struct lttng_ust_abi_filter_bytecode
));
1414 memcpy(filter
, orig_f
, sizeof(*filter
) + orig_f
->len
);
1420 * Create a liblttng-ust capture bytecode from given bytecode.
1422 * Return allocated filter or NULL on error.
1424 static struct lttng_ust_abi_capture_bytecode
*
1425 create_ust_capture_bytecode_from_bytecode(const struct lttng_bytecode
*orig_f
)
1427 struct lttng_ust_abi_capture_bytecode
*capture
= nullptr;
1429 /* Copy capture bytecode. */
1430 capture
= zmalloc
<lttng_ust_abi_capture_bytecode
>(sizeof(*capture
) + orig_f
->len
);
1432 PERROR("Failed to allocate lttng_ust_abi_capture_bytecode: bytecode len = %" PRIu32
1438 LTTNG_ASSERT(sizeof(struct lttng_bytecode
) ==
1439 sizeof(struct lttng_ust_abi_capture_bytecode
));
1440 memcpy(capture
, orig_f
, sizeof(*capture
) + orig_f
->len
);
1446 * Find an ust_app using the sock and return it. RCU read side lock must be
1447 * held before calling this helper function.
1449 struct ust_app
*ust_app_find_by_sock(int sock
)
1451 struct lttng_ht_node_ulong
*node
;
1452 struct lttng_ht_iter iter
;
1454 ASSERT_RCU_READ_LOCKED();
1456 lttng_ht_lookup(ust_app_ht_by_sock
, (void *) ((unsigned long) sock
), &iter
);
1457 node
= lttng_ht_iter_get_node_ulong(&iter
);
1458 if (node
== nullptr) {
1459 DBG2("UST app find by sock %d not found", sock
);
1463 return lttng::utils::container_of(node
, &ust_app::sock_n
);
1470 * Find an ust_app using the notify sock and return it. RCU read side lock must
1471 * be held before calling this helper function.
1473 static struct ust_app
*find_app_by_notify_sock(int sock
)
1475 struct lttng_ht_node_ulong
*node
;
1476 struct lttng_ht_iter iter
;
1478 ASSERT_RCU_READ_LOCKED();
1480 lttng_ht_lookup(ust_app_ht_by_notify_sock
, (void *) ((unsigned long) sock
), &iter
);
1481 node
= lttng_ht_iter_get_node_ulong(&iter
);
1482 if (node
== nullptr) {
1483 DBG2("UST app find by notify sock %d not found", sock
);
1487 return lttng::utils::container_of(node
, &ust_app::notify_sock_n
);
1494 * Lookup for an ust app event based on event name, filter bytecode and the
1497 * Return an ust_app_event object or NULL on error.
1499 static struct ust_app_event
*find_ust_app_event(struct lttng_ht
*ht
,
1501 const struct lttng_bytecode
*filter
,
1502 lttng_ust_abi_loglevel_type loglevel_type
,
1504 const struct lttng_event_exclusion
*exclusion
)
1506 struct lttng_ht_iter iter
;
1507 struct lttng_ht_node_str
*node
;
1508 struct ust_app_event
*event
= nullptr;
1509 struct ust_app_ht_key key
;
1514 /* Setup key for event lookup. */
1516 key
.filter
= filter
;
1517 key
.loglevel_type
= loglevel_type
;
1518 key
.loglevel_value
= loglevel_value
;
1519 /* lttng_event_exclusion and lttng_ust_event_exclusion structures are similar */
1520 key
.exclusion
= exclusion
;
1522 /* Lookup using the event name as hash and a custom match fct. */
1523 cds_lfht_lookup(ht
->ht
,
1524 ht
->hash_fct((void *) name
, lttng_ht_seed
),
1525 ht_match_ust_app_event
,
1528 node
= lttng_ht_iter_get_node_str(&iter
);
1529 if (node
== nullptr) {
1533 event
= lttng::utils::container_of(node
, &ust_app_event::node
);
1540 * Look-up an event notifier rule based on its token id.
1542 * Must be called with the RCU read lock held.
1543 * Return an ust_app_event_notifier_rule object or NULL on error.
1545 static struct ust_app_event_notifier_rule
*find_ust_app_event_notifier_rule(struct lttng_ht
*ht
,
1548 struct lttng_ht_iter iter
;
1549 struct lttng_ht_node_u64
*node
;
1550 struct ust_app_event_notifier_rule
*event_notifier_rule
= nullptr;
1553 ASSERT_RCU_READ_LOCKED();
1555 lttng_ht_lookup(ht
, &token
, &iter
);
1556 node
= lttng_ht_iter_get_node_u64(&iter
);
1557 if (node
== nullptr) {
1558 DBG2("UST app event notifier rule token not found: token = %" PRIu64
, token
);
1562 event_notifier_rule
= lttng::utils::container_of(node
, &ust_app_event_notifier_rule::node
);
1564 return event_notifier_rule
;
1568 * Create the channel context on the tracer.
1570 * Called with UST app session lock held.
1572 static int create_ust_channel_context(struct ust_app_channel
*ua_chan
,
1573 struct ust_app_ctx
*ua_ctx
,
1574 struct ust_app
*app
)
1578 health_code_update();
1580 pthread_mutex_lock(&app
->sock_lock
);
1581 ret
= lttng_ust_ctl_add_context(app
->sock
, &ua_ctx
->ctx
, ua_chan
->obj
, &ua_ctx
->obj
);
1582 pthread_mutex_unlock(&app
->sock_lock
);
1584 if (ret
== -EPIPE
|| ret
== -LTTNG_UST_ERR_EXITING
) {
1586 DBG3("UST app create channel context failed. Application is dead: pid = %d, sock = %d",
1589 } else if (ret
== -EAGAIN
) {
1591 WARN("UST app create channel context failed. Communication time out: pid = %d, sock = %d",
1595 ERR("UST app create channel context failed with ret %d: pid = %d, sock = %d",
1603 ua_ctx
->handle
= ua_ctx
->obj
->handle
;
1605 DBG2("UST app context handle %d created successfully for channel %s",
1610 health_code_update();
1615 * Set the filter on the tracer.
1617 static int set_ust_object_filter(struct ust_app
*app
,
1618 const struct lttng_bytecode
*bytecode
,
1619 struct lttng_ust_abi_object_data
*ust_object
)
1622 struct lttng_ust_abi_filter_bytecode
*ust_bytecode
= nullptr;
1624 health_code_update();
1626 ust_bytecode
= create_ust_filter_bytecode_from_bytecode(bytecode
);
1627 if (!ust_bytecode
) {
1628 ret
= -LTTNG_ERR_NOMEM
;
1631 pthread_mutex_lock(&app
->sock_lock
);
1632 ret
= lttng_ust_ctl_set_filter(app
->sock
, ust_bytecode
, ust_object
);
1633 pthread_mutex_unlock(&app
->sock_lock
);
1635 if (ret
== -EPIPE
|| ret
== -LTTNG_UST_ERR_EXITING
) {
1637 DBG3("UST app set filter failed. Application is dead: pid = %d, sock = %d",
1640 } else if (ret
== -EAGAIN
) {
1642 WARN("UST app set filter failed. Communication time out: pid = %d, sock = %d",
1646 ERR("UST app set filter failed with ret %d: pid = %d, sock = %d, object = %p",
1655 DBG2("UST filter successfully set: object = %p", ust_object
);
1658 health_code_update();
1664 * Set a capture bytecode for the passed object.
1665 * The sequence number enforces the ordering at runtime and on reception of
1666 * the captured payloads.
1668 static int set_ust_capture(struct ust_app
*app
,
1669 const struct lttng_bytecode
*bytecode
,
1670 unsigned int capture_seqnum
,
1671 struct lttng_ust_abi_object_data
*ust_object
)
1674 struct lttng_ust_abi_capture_bytecode
*ust_bytecode
= nullptr;
1676 health_code_update();
1678 ust_bytecode
= create_ust_capture_bytecode_from_bytecode(bytecode
);
1679 if (!ust_bytecode
) {
1680 ret
= -LTTNG_ERR_NOMEM
;
1685 * Set the sequence number to ensure the capture of fields is ordered.
1687 ust_bytecode
->seqnum
= capture_seqnum
;
1689 pthread_mutex_lock(&app
->sock_lock
);
1690 ret
= lttng_ust_ctl_set_capture(app
->sock
, ust_bytecode
, ust_object
);
1691 pthread_mutex_unlock(&app
->sock_lock
);
1693 if (ret
== -EPIPE
|| ret
== -LTTNG_UST_ERR_EXITING
) {
1695 DBG3("UST app set capture failed. Application is dead: pid = %d, sock = %d",
1698 } else if (ret
== -EAGAIN
) {
1700 DBG3("UST app set capture failed. Communication timeout: pid = %d, sock = %d",
1704 ERR("UST app event set capture failed with ret %d: pid = %d, sock = %d",
1713 DBG2("UST capture successfully set: object = %p", ust_object
);
1716 health_code_update();
1721 static struct lttng_ust_abi_event_exclusion
*
1722 create_ust_exclusion_from_exclusion(const struct lttng_event_exclusion
*exclusion
)
1724 struct lttng_ust_abi_event_exclusion
*ust_exclusion
= nullptr;
1725 size_t exclusion_alloc_size
= sizeof(struct lttng_ust_abi_event_exclusion
) +
1726 LTTNG_UST_ABI_SYM_NAME_LEN
* exclusion
->count
;
1728 ust_exclusion
= zmalloc
<lttng_ust_abi_event_exclusion
>(exclusion_alloc_size
);
1729 if (!ust_exclusion
) {
1734 LTTNG_ASSERT(sizeof(struct lttng_event_exclusion
) ==
1735 sizeof(struct lttng_ust_abi_event_exclusion
));
1736 memcpy(ust_exclusion
, exclusion
, exclusion_alloc_size
);
1738 return ust_exclusion
;
1742 * Set event exclusions on the tracer.
1744 static int set_ust_object_exclusions(struct ust_app
*app
,
1745 const struct lttng_event_exclusion
*exclusions
,
1746 struct lttng_ust_abi_object_data
*ust_object
)
1749 struct lttng_ust_abi_event_exclusion
*ust_exclusions
= nullptr;
1751 LTTNG_ASSERT(exclusions
&& exclusions
->count
> 0);
1753 health_code_update();
1755 ust_exclusions
= create_ust_exclusion_from_exclusion(exclusions
);
1756 if (!ust_exclusions
) {
1757 ret
= -LTTNG_ERR_NOMEM
;
1760 pthread_mutex_lock(&app
->sock_lock
);
1761 ret
= lttng_ust_ctl_set_exclusion(app
->sock
, ust_exclusions
, ust_object
);
1762 pthread_mutex_unlock(&app
->sock_lock
);
1764 if (ret
== -EPIPE
|| ret
== -LTTNG_UST_ERR_EXITING
) {
1766 DBG3("UST app event exclusion failed. Application is dead: pid = %d, sock = %d",
1769 } else if (ret
== -EAGAIN
) {
1771 WARN("UST app event exclusion failed. Communication time out(pid: %d, sock = %d",
1775 ERR("UST app event exclusions failed with ret %d: pid = %d, sock = %d, object = %p",
1784 DBG2("UST exclusions set successfully for object %p", ust_object
);
1787 health_code_update();
1788 free(ust_exclusions
);
1793 * Disable the specified event on to UST tracer for the UST session.
1795 static int disable_ust_object(struct ust_app
*app
, struct lttng_ust_abi_object_data
*object
)
1799 health_code_update();
1801 pthread_mutex_lock(&app
->sock_lock
);
1802 ret
= lttng_ust_ctl_disable(app
->sock
, object
);
1803 pthread_mutex_unlock(&app
->sock_lock
);
1805 if (ret
== -EPIPE
|| ret
== -LTTNG_UST_ERR_EXITING
) {
1807 DBG3("UST app disable object failed. Application is dead: pid = %d, sock = %d",
1810 } else if (ret
== -EAGAIN
) {
1812 WARN("UST app disable object failed. Communication time out: pid = %d, sock = %d",
1816 ERR("UST app disable object failed with ret %d: pid = %d, sock = %d, object = %p",
1825 DBG2("UST app object %p disabled successfully for app: pid = %d", object
, app
->pid
);
1828 health_code_update();
1833 * Disable the specified channel on to UST tracer for the UST session.
1835 static int disable_ust_channel(struct ust_app
*app
,
1836 struct ust_app_session
*ua_sess
,
1837 struct ust_app_channel
*ua_chan
)
1841 health_code_update();
1843 pthread_mutex_lock(&app
->sock_lock
);
1844 ret
= lttng_ust_ctl_disable(app
->sock
, ua_chan
->obj
);
1845 pthread_mutex_unlock(&app
->sock_lock
);
1847 if (ret
== -EPIPE
|| ret
== -LTTNG_UST_ERR_EXITING
) {
1849 DBG3("UST app disable channel failed. Application is dead: pid = %d, sock = %d",
1852 } else if (ret
== -EAGAIN
) {
1854 WARN("UST app disable channel failed. Communication time out: pid = %d, sock = %d",
1858 ERR("UST app channel %s disable failed, session handle %d, with ret %d: pid = %d, sock = %d",
1868 DBG2("UST app channel %s disabled successfully for app: pid = %d", ua_chan
->name
, app
->pid
);
1871 health_code_update();
1876 * Enable the specified channel on to UST tracer for the UST session.
1878 static int enable_ust_channel(struct ust_app
*app
,
1879 struct ust_app_session
*ua_sess
,
1880 struct ust_app_channel
*ua_chan
)
1884 health_code_update();
1886 pthread_mutex_lock(&app
->sock_lock
);
1887 ret
= lttng_ust_ctl_enable(app
->sock
, ua_chan
->obj
);
1888 pthread_mutex_unlock(&app
->sock_lock
);
1890 if (ret
== -EPIPE
|| ret
== -LTTNG_UST_ERR_EXITING
) {
1892 DBG3("UST app channel %s enable failed. Application is dead: pid = %d, sock = %d",
1896 } else if (ret
== -EAGAIN
) {
1898 WARN("UST app channel %s enable failed. Communication time out: pid = %d, sock = %d",
1903 ERR("UST app channel %s enable failed, session handle %d, with ret %d: pid = %d, sock = %d",
1913 ua_chan
->enabled
= true;
1915 DBG2("UST app channel %s enabled successfully for app: pid = %d", ua_chan
->name
, app
->pid
);
1918 health_code_update();
1923 * Enable the specified event on to UST tracer for the UST session.
1925 static int enable_ust_object(struct ust_app
*app
, struct lttng_ust_abi_object_data
*ust_object
)
1929 health_code_update();
1931 pthread_mutex_lock(&app
->sock_lock
);
1932 ret
= lttng_ust_ctl_enable(app
->sock
, ust_object
);
1933 pthread_mutex_unlock(&app
->sock_lock
);
1935 if (ret
== -EPIPE
|| ret
== -LTTNG_UST_ERR_EXITING
) {
1937 DBG3("UST app enable object failed. Application is dead: pid = %d, sock = %d",
1940 } else if (ret
== -EAGAIN
) {
1942 WARN("UST app enable object failed. Communication time out: pid = %d, sock = %d",
1946 ERR("UST app enable object failed with ret %d: pid = %d, sock = %d, object = %p",
1955 DBG2("UST app object %p enabled successfully for app: pid = %d", ust_object
, app
->pid
);
1958 health_code_update();
1963 * Send channel and stream buffer to application.
1965 * Return 0 on success. On error, a negative value is returned.
1967 static int send_channel_pid_to_ust(struct ust_app
*app
,
1968 struct ust_app_session
*ua_sess
,
1969 struct ust_app_channel
*ua_chan
)
1972 struct ust_app_stream
*stream
, *stmp
;
1975 LTTNG_ASSERT(ua_sess
);
1976 LTTNG_ASSERT(ua_chan
);
1978 health_code_update();
1980 DBG("UST app sending channel %s to UST app sock %d", ua_chan
->name
, app
->sock
);
1982 /* Send channel to the application. */
1983 ret
= ust_consumer_send_channel_to_ust(app
, ua_sess
, ua_chan
);
1984 if (ret
== -EPIPE
|| ret
== -LTTNG_UST_ERR_EXITING
) {
1985 ret
= -ENOTCONN
; /* Caused by app exiting. */
1987 } else if (ret
== -EAGAIN
) {
1988 /* Caused by timeout. */
1989 WARN("Communication with application %d timed out on send_channel for channel \"%s\" of session \"%" PRIu64
1993 ua_sess
->tracing_id
);
1994 /* Treat this the same way as an application that is exiting. */
1997 } else if (ret
< 0) {
2001 health_code_update();
2003 /* Send all streams to application. */
2004 cds_list_for_each_entry_safe (stream
, stmp
, &ua_chan
->streams
.head
, list
) {
2005 ret
= ust_consumer_send_stream_to_ust(app
, ua_chan
, stream
);
2006 if (ret
== -EPIPE
|| ret
== -LTTNG_UST_ERR_EXITING
) {
2007 ret
= -ENOTCONN
; /* Caused by app exiting. */
2009 } else if (ret
== -EAGAIN
) {
2010 /* Caused by timeout. */
2011 WARN("Communication with application %d timed out on send_stream for stream \"%s\" of channel \"%s\" of session \"%" PRIu64
2016 ua_sess
->tracing_id
);
2018 * Treat this the same way as an application that is
2022 } else if (ret
< 0) {
2025 /* We don't need the stream anymore once sent to the tracer. */
2026 cds_list_del(&stream
->list
);
2027 delete_ust_app_stream(-1, stream
, app
);
2031 health_code_update();
2036 * Create the specified event onto the UST tracer for a UST session.
2038 * Should be called with session mutex held.
2040 static int create_ust_event(struct ust_app
*app
,
2041 struct ust_app_channel
*ua_chan
,
2042 struct ust_app_event
*ua_event
)
2046 health_code_update();
2048 /* Create UST event on tracer */
2049 pthread_mutex_lock(&app
->sock_lock
);
2050 ret
= lttng_ust_ctl_create_event(app
->sock
, &ua_event
->attr
, ua_chan
->obj
, &ua_event
->obj
);
2051 pthread_mutex_unlock(&app
->sock_lock
);
2053 if (ret
== -EPIPE
|| ret
== -LTTNG_UST_ERR_EXITING
) {
2055 DBG3("UST app create event failed. Application is dead: pid = %d, sock = %d",
2058 } else if (ret
== -EAGAIN
) {
2060 WARN("UST app create event failed. Communication time out: pid = %d, sock = %d",
2064 ERR("UST app create event '%s' failed with ret %d: pid = %d, sock = %d",
2065 ua_event
->attr
.name
,
2073 ua_event
->handle
= ua_event
->obj
->handle
;
2075 DBG2("UST app event %s created successfully for pid:%d object = %p",
2076 ua_event
->attr
.name
,
2080 health_code_update();
2082 /* Set filter if one is present. */
2083 if (ua_event
->filter
) {
2084 ret
= set_ust_object_filter(app
, ua_event
->filter
, ua_event
->obj
);
2090 /* Set exclusions for the event */
2091 if (ua_event
->exclusion
) {
2092 ret
= set_ust_object_exclusions(app
, ua_event
->exclusion
, ua_event
->obj
);
2098 /* If event not enabled, disable it on the tracer */
2099 if (ua_event
->enabled
) {
2101 * We now need to explicitly enable the event, since it
2102 * is now disabled at creation.
2104 ret
= enable_ust_object(app
, ua_event
->obj
);
2107 * If we hit an EPERM, something is wrong with our enable call. If
2108 * we get an EEXIST, there is a problem on the tracer side since we
2112 case -LTTNG_UST_ERR_PERM
:
2113 /* Code flow problem */
2115 case -LTTNG_UST_ERR_EXIST
:
2116 /* It's OK for our use case. */
2127 health_code_update();
2132 init_ust_event_notifier_from_event_rule(const struct lttng_event_rule
*rule
,
2133 struct lttng_ust_abi_event_notifier
*event_notifier
)
2135 enum lttng_event_rule_status status
;
2136 enum lttng_ust_abi_loglevel_type ust_loglevel_type
= LTTNG_UST_ABI_LOGLEVEL_ALL
;
2137 int loglevel
= -1, ret
= 0;
2138 const char *pattern
;
2140 memset(event_notifier
, 0, sizeof(*event_notifier
));
2142 if (lttng_event_rule_targets_agent_domain(rule
)) {
2144 * Special event for agents
2145 * The actual meat of the event is in the filter that will be
2146 * attached later on.
2147 * Set the default values for the agent event.
2149 pattern
= event_get_default_agent_ust_name(lttng_event_rule_get_domain_type(rule
));
2151 ust_loglevel_type
= LTTNG_UST_ABI_LOGLEVEL_ALL
;
2153 const struct lttng_log_level_rule
*log_level_rule
;
2155 LTTNG_ASSERT(lttng_event_rule_get_type(rule
) ==
2156 LTTNG_EVENT_RULE_TYPE_USER_TRACEPOINT
);
2158 status
= lttng_event_rule_user_tracepoint_get_name_pattern(rule
, &pattern
);
2159 if (status
!= LTTNG_EVENT_RULE_STATUS_OK
) {
2160 /* At this point, this is a fatal error. */
2164 status
= lttng_event_rule_user_tracepoint_get_log_level_rule(rule
, &log_level_rule
);
2165 if (status
== LTTNG_EVENT_RULE_STATUS_UNSET
) {
2166 ust_loglevel_type
= LTTNG_UST_ABI_LOGLEVEL_ALL
;
2167 } else if (status
== LTTNG_EVENT_RULE_STATUS_OK
) {
2168 enum lttng_log_level_rule_status llr_status
;
2170 switch (lttng_log_level_rule_get_type(log_level_rule
)) {
2171 case LTTNG_LOG_LEVEL_RULE_TYPE_EXACTLY
:
2172 ust_loglevel_type
= LTTNG_UST_ABI_LOGLEVEL_SINGLE
;
2173 llr_status
= lttng_log_level_rule_exactly_get_level(log_level_rule
,
2176 case LTTNG_LOG_LEVEL_RULE_TYPE_AT_LEAST_AS_SEVERE_AS
:
2177 ust_loglevel_type
= LTTNG_UST_ABI_LOGLEVEL_RANGE
;
2178 llr_status
= lttng_log_level_rule_at_least_as_severe_as_get_level(
2179 log_level_rule
, &loglevel
);
2185 LTTNG_ASSERT(llr_status
== LTTNG_LOG_LEVEL_RULE_STATUS_OK
);
2187 /* At this point this is a fatal error. */
2192 event_notifier
->event
.instrumentation
= LTTNG_UST_ABI_TRACEPOINT
;
2193 ret
= lttng_strncpy(
2194 event_notifier
->event
.name
, pattern
, sizeof(event_notifier
->event
.name
));
2196 ERR("Failed to copy event rule pattern to notifier: pattern = '%s' ", pattern
);
2200 event_notifier
->event
.loglevel_type
= ust_loglevel_type
;
2201 event_notifier
->event
.loglevel
= loglevel
;
2207 * Create the specified event notifier against the user space tracer of a
2208 * given application.
2210 static int create_ust_event_notifier(struct ust_app
*app
,
2211 struct ust_app_event_notifier_rule
*ua_event_notifier_rule
)
2214 enum lttng_condition_status condition_status
;
2215 const struct lttng_condition
*condition
= nullptr;
2216 struct lttng_ust_abi_event_notifier event_notifier
;
2217 const struct lttng_event_rule
*event_rule
= nullptr;
2218 unsigned int capture_bytecode_count
= 0, i
;
2219 enum lttng_condition_status cond_status
;
2220 enum lttng_event_rule_type event_rule_type
;
2222 health_code_update();
2223 LTTNG_ASSERT(app
->event_notifier_group
.object
);
2225 condition
= lttng_trigger_get_const_condition(ua_event_notifier_rule
->trigger
);
2226 LTTNG_ASSERT(condition
);
2227 LTTNG_ASSERT(lttng_condition_get_type(condition
) ==
2228 LTTNG_CONDITION_TYPE_EVENT_RULE_MATCHES
);
2230 condition_status
= lttng_condition_event_rule_matches_get_rule(condition
, &event_rule
);
2231 LTTNG_ASSERT(condition_status
== LTTNG_CONDITION_STATUS_OK
);
2233 LTTNG_ASSERT(event_rule
);
2235 event_rule_type
= lttng_event_rule_get_type(event_rule
);
2236 LTTNG_ASSERT(event_rule_type
== LTTNG_EVENT_RULE_TYPE_USER_TRACEPOINT
||
2237 event_rule_type
== LTTNG_EVENT_RULE_TYPE_JUL_LOGGING
||
2238 event_rule_type
== LTTNG_EVENT_RULE_TYPE_LOG4J_LOGGING
||
2239 event_rule_type
== LTTNG_EVENT_RULE_TYPE_PYTHON_LOGGING
);
2241 init_ust_event_notifier_from_event_rule(event_rule
, &event_notifier
);
2242 event_notifier
.event
.token
= ua_event_notifier_rule
->token
;
2243 event_notifier
.error_counter_index
= ua_event_notifier_rule
->error_counter_index
;
2245 /* Create UST event notifier against the tracer. */
2246 pthread_mutex_lock(&app
->sock_lock
);
2247 ret
= lttng_ust_ctl_create_event_notifier(app
->sock
,
2249 app
->event_notifier_group
.object
,
2250 &ua_event_notifier_rule
->obj
);
2251 pthread_mutex_unlock(&app
->sock_lock
);
2253 if (ret
== -EPIPE
|| ret
== -LTTNG_UST_ERR_EXITING
) {
2255 DBG3("UST app create event notifier failed. Application is dead: pid = %d, sock = %d",
2258 } else if (ret
== -EAGAIN
) {
2260 WARN("UST app create event notifier failed. Communication time out: pid = %d, sock = %d",
2264 ERR("UST app create event notifier '%s' failed with ret %d: pid = %d, sock = %d",
2265 event_notifier
.event
.name
,
2273 ua_event_notifier_rule
->handle
= ua_event_notifier_rule
->obj
->handle
;
2275 DBG2("UST app event notifier %s created successfully: app = '%s': pid = %d, object = %p",
2276 event_notifier
.event
.name
,
2279 ua_event_notifier_rule
->obj
);
2281 health_code_update();
2283 /* Set filter if one is present. */
2284 if (ua_event_notifier_rule
->filter
) {
2285 ret
= set_ust_object_filter(
2286 app
, ua_event_notifier_rule
->filter
, ua_event_notifier_rule
->obj
);
2292 /* Set exclusions for the event. */
2293 if (ua_event_notifier_rule
->exclusion
) {
2294 ret
= set_ust_object_exclusions(
2295 app
, ua_event_notifier_rule
->exclusion
, ua_event_notifier_rule
->obj
);
2301 /* Set the capture bytecodes. */
2302 cond_status
= lttng_condition_event_rule_matches_get_capture_descriptor_count(
2303 condition
, &capture_bytecode_count
);
2304 LTTNG_ASSERT(cond_status
== LTTNG_CONDITION_STATUS_OK
);
2306 for (i
= 0; i
< capture_bytecode_count
; i
++) {
2307 const struct lttng_bytecode
*capture_bytecode
=
2308 lttng_condition_event_rule_matches_get_capture_bytecode_at_index(condition
,
2311 ret
= set_ust_capture(app
, capture_bytecode
, i
, ua_event_notifier_rule
->obj
);
2318 * We now need to explicitly enable the event, since it
2319 * is disabled at creation.
2321 ret
= enable_ust_object(app
, ua_event_notifier_rule
->obj
);
2324 * If we hit an EPERM, something is wrong with our enable call.
2325 * If we get an EEXIST, there is a problem on the tracer side
2326 * since we just created it.
2329 case -LTTNG_UST_ERR_PERM
:
2330 /* Code flow problem. */
2332 case -LTTNG_UST_ERR_EXIST
:
2333 /* It's OK for our use case. */
2343 ua_event_notifier_rule
->enabled
= true;
2346 health_code_update();
2351 * Copy data between an UST app event and a LTT event.
2353 static void shadow_copy_event(struct ust_app_event
*ua_event
, struct ltt_ust_event
*uevent
)
2355 size_t exclusion_alloc_size
;
2357 strncpy(ua_event
->name
, uevent
->attr
.name
, sizeof(ua_event
->name
));
2358 ua_event
->name
[sizeof(ua_event
->name
) - 1] = '\0';
2360 ua_event
->enabled
= uevent
->enabled
;
2362 /* Copy event attributes */
2363 memcpy(&ua_event
->attr
, &uevent
->attr
, sizeof(ua_event
->attr
));
2365 /* Copy filter bytecode */
2366 if (uevent
->filter
) {
2367 ua_event
->filter
= lttng_bytecode_copy(uevent
->filter
);
2368 /* Filter might be NULL here in case of ENONEM. */
2371 /* Copy exclusion data */
2372 if (uevent
->exclusion
) {
2373 exclusion_alloc_size
= sizeof(struct lttng_event_exclusion
) +
2374 LTTNG_UST_ABI_SYM_NAME_LEN
* uevent
->exclusion
->count
;
2375 ua_event
->exclusion
= zmalloc
<lttng_event_exclusion
>(exclusion_alloc_size
);
2376 if (ua_event
->exclusion
== nullptr) {
2379 memcpy(ua_event
->exclusion
, uevent
->exclusion
, exclusion_alloc_size
);
2385 * Copy data between an UST app channel and a LTT channel.
2387 static void shadow_copy_channel(struct ust_app_channel
*ua_chan
, struct ltt_ust_channel
*uchan
)
2389 DBG2("UST app shadow copy of channel %s started", ua_chan
->name
);
2391 strncpy(ua_chan
->name
, uchan
->name
, sizeof(ua_chan
->name
));
2392 ua_chan
->name
[sizeof(ua_chan
->name
) - 1] = '\0';
2394 ua_chan
->tracefile_size
= uchan
->tracefile_size
;
2395 ua_chan
->tracefile_count
= uchan
->tracefile_count
;
2397 /* Copy event attributes since the layout is different. */
2398 ua_chan
->attr
.subbuf_size
= uchan
->attr
.subbuf_size
;
2399 ua_chan
->attr
.num_subbuf
= uchan
->attr
.num_subbuf
;
2400 ua_chan
->attr
.overwrite
= uchan
->attr
.overwrite
;
2401 ua_chan
->attr
.switch_timer_interval
= uchan
->attr
.switch_timer_interval
;
2402 ua_chan
->attr
.read_timer_interval
= uchan
->attr
.read_timer_interval
;
2403 ua_chan
->monitor_timer_interval
= uchan
->monitor_timer_interval
;
2404 ua_chan
->attr
.output
= (lttng_ust_abi_output
) uchan
->attr
.output
;
2405 ua_chan
->attr
.blocking_timeout
= uchan
->attr
.u
.s
.blocking_timeout
;
2408 * Note that the attribute channel type is not set since the channel on the
2409 * tracing registry side does not have this information.
2412 ua_chan
->enabled
= uchan
->enabled
;
2413 ua_chan
->tracing_channel_id
= uchan
->id
;
2415 DBG3("UST app shadow copy of channel %s done", ua_chan
->name
);
2419 * Copy data between a UST app session and a regular LTT session.
2421 static void shadow_copy_session(struct ust_app_session
*ua_sess
,
2422 struct ltt_ust_session
*usess
,
2423 struct ust_app
*app
)
2425 struct tm
*timeinfo
;
2428 char tmp_shm_path
[PATH_MAX
];
2430 timeinfo
= localtime(&app
->registration_time
);
2431 strftime(datetime
, sizeof(datetime
), "%Y%m%d-%H%M%S", timeinfo
);
2433 DBG2("Shadow copy of session handle %d", ua_sess
->handle
);
2435 ua_sess
->tracing_id
= usess
->id
;
2436 ua_sess
->id
= get_next_session_id();
2437 LTTNG_OPTIONAL_SET(&ua_sess
->real_credentials
.uid
, app
->uid
);
2438 LTTNG_OPTIONAL_SET(&ua_sess
->real_credentials
.gid
, app
->gid
);
2439 LTTNG_OPTIONAL_SET(&ua_sess
->effective_credentials
.uid
, usess
->uid
);
2440 LTTNG_OPTIONAL_SET(&ua_sess
->effective_credentials
.gid
, usess
->gid
);
2441 ua_sess
->buffer_type
= usess
->buffer_type
;
2442 ua_sess
->bits_per_long
= app
->abi
.bits_per_long
;
2444 /* There is only one consumer object per session possible. */
2445 consumer_output_get(usess
->consumer
);
2446 ua_sess
->consumer
= usess
->consumer
;
2448 ua_sess
->output_traces
= usess
->output_traces
;
2449 ua_sess
->live_timer_interval
= usess
->live_timer_interval
;
2450 copy_channel_attr_to_ustctl(&ua_sess
->metadata_attr
, &usess
->metadata_attr
);
2452 switch (ua_sess
->buffer_type
) {
2453 case LTTNG_BUFFER_PER_PID
:
2454 ret
= snprintf(ua_sess
->path
,
2455 sizeof(ua_sess
->path
),
2456 DEFAULT_UST_TRACE_PID_PATH
"/%s-%d-%s",
2461 case LTTNG_BUFFER_PER_UID
:
2462 ret
= snprintf(ua_sess
->path
,
2463 sizeof(ua_sess
->path
),
2464 DEFAULT_UST_TRACE_UID_PATH
,
2465 lttng_credentials_get_uid(&ua_sess
->real_credentials
),
2466 app
->abi
.bits_per_long
);
2473 PERROR("asprintf UST shadow copy session");
2478 strncpy(ua_sess
->root_shm_path
, usess
->root_shm_path
, sizeof(ua_sess
->root_shm_path
));
2479 ua_sess
->root_shm_path
[sizeof(ua_sess
->root_shm_path
) - 1] = '\0';
2480 strncpy(ua_sess
->shm_path
, usess
->shm_path
, sizeof(ua_sess
->shm_path
));
2481 ua_sess
->shm_path
[sizeof(ua_sess
->shm_path
) - 1] = '\0';
2482 if (ua_sess
->shm_path
[0]) {
2483 switch (ua_sess
->buffer_type
) {
2484 case LTTNG_BUFFER_PER_PID
:
2485 ret
= snprintf(tmp_shm_path
,
2486 sizeof(tmp_shm_path
),
2487 "/" DEFAULT_UST_TRACE_PID_PATH
"/%s-%d-%s",
2492 case LTTNG_BUFFER_PER_UID
:
2493 ret
= snprintf(tmp_shm_path
,
2494 sizeof(tmp_shm_path
),
2495 "/" DEFAULT_UST_TRACE_UID_PATH
,
2497 app
->abi
.bits_per_long
);
2504 PERROR("sprintf UST shadow copy session");
2508 strncat(ua_sess
->shm_path
,
2510 sizeof(ua_sess
->shm_path
) - strlen(ua_sess
->shm_path
) - 1);
2511 ua_sess
->shm_path
[sizeof(ua_sess
->shm_path
) - 1] = '\0';
2516 consumer_output_put(ua_sess
->consumer
);
2520 * Lookup sesison wrapper.
2522 static void __lookup_session_by_app(const struct ltt_ust_session
*usess
,
2523 struct ust_app
*app
,
2524 struct lttng_ht_iter
*iter
)
2526 /* Get right UST app session from app */
2527 lttng_ht_lookup(app
->sessions
, &usess
->id
, iter
);
2531 * Return ust app session from the app session hashtable using the UST session
2534 static struct ust_app_session
*lookup_session_by_app(const struct ltt_ust_session
*usess
,
2535 struct ust_app
*app
)
2537 struct lttng_ht_iter iter
;
2538 struct lttng_ht_node_u64
*node
;
2540 __lookup_session_by_app(usess
, app
, &iter
);
2541 node
= lttng_ht_iter_get_node_u64(&iter
);
2542 if (node
== nullptr) {
2546 return lttng::utils::container_of(node
, &ust_app_session::node
);
2553 * Setup buffer registry per PID for the given session and application. If none
2554 * is found, a new one is created, added to the global registry and
2555 * initialized. If regp is valid, it's set with the newly created object.
2557 * Return 0 on success or else a negative value.
2559 static int setup_buffer_reg_pid(struct ust_app_session
*ua_sess
,
2560 struct ust_app
*app
,
2561 struct buffer_reg_pid
**regp
)
2564 struct buffer_reg_pid
*reg_pid
;
2566 LTTNG_ASSERT(ua_sess
);
2569 lttng::urcu::read_lock_guard read_lock
;
2571 reg_pid
= buffer_reg_pid_find(ua_sess
->id
);
2574 * This is the create channel path meaning that if there is NO
2575 * registry available, we have to create one for this session.
2577 ret
= buffer_reg_pid_create(
2578 ua_sess
->id
, ®_pid
, ua_sess
->root_shm_path
, ua_sess
->shm_path
);
2586 /* Initialize registry. */
2587 reg_pid
->registry
->reg
.ust
= ust_registry_session_per_pid_create(
2592 reg_pid
->root_shm_path
,
2594 lttng_credentials_get_uid(&ua_sess
->effective_credentials
),
2595 lttng_credentials_get_gid(&ua_sess
->effective_credentials
),
2596 ua_sess
->tracing_id
);
2597 if (!reg_pid
->registry
->reg
.ust
) {
2599 * reg_pid->registry->reg.ust is NULL upon error, so we need to
2600 * destroy the buffer registry, because it is always expected
2601 * that if the buffer registry can be found, its ust registry is
2604 buffer_reg_pid_destroy(reg_pid
);
2608 buffer_reg_pid_add(reg_pid
);
2610 DBG3("UST app buffer registry per PID created successfully");
2621 * Setup buffer registry per UID for the given session and application. If none
2622 * is found, a new one is created, added to the global registry and
2623 * initialized. If regp is valid, it's set with the newly created object.
2625 * Return 0 on success or else a negative value.
2627 static int setup_buffer_reg_uid(struct ltt_ust_session
*usess
,
2628 struct ust_app_session
*ua_sess
,
2629 struct ust_app
*app
,
2630 struct buffer_reg_uid
**regp
)
2633 struct buffer_reg_uid
*reg_uid
;
2635 LTTNG_ASSERT(usess
);
2638 lttng::urcu::read_lock_guard read_lock
;
2640 reg_uid
= buffer_reg_uid_find(usess
->id
, app
->abi
.bits_per_long
, app
->uid
);
2643 * This is the create channel path meaning that if there is NO
2644 * registry available, we have to create one for this session.
2646 ret
= buffer_reg_uid_create(usess
->id
,
2647 app
->abi
.bits_per_long
,
2651 ua_sess
->root_shm_path
,
2660 /* Initialize registry. */
2661 reg_uid
->registry
->reg
.ust
= ust_registry_session_per_uid_create(app
->abi
,
2664 reg_uid
->root_shm_path
,
2668 ua_sess
->tracing_id
,
2670 if (!reg_uid
->registry
->reg
.ust
) {
2672 * reg_uid->registry->reg.ust is NULL upon error, so we need to
2673 * destroy the buffer registry, because it is always expected
2674 * that if the buffer registry can be found, its ust registry is
2677 buffer_reg_uid_destroy(reg_uid
, nullptr);
2681 /* Add node to teardown list of the session. */
2682 cds_list_add(®_uid
->lnode
, &usess
->buffer_reg_uid_list
);
2684 buffer_reg_uid_add(reg_uid
);
2686 DBG3("UST app buffer registry per UID created successfully");
2696 * Create a session on the tracer side for the given app.
2698 * On success, ua_sess_ptr is populated with the session pointer or else left
2699 * untouched. If the session was created, is_created is set to 1. On error,
2700 * it's left untouched. Note that ua_sess_ptr is mandatory but is_created can
2703 * Returns 0 on success or else a negative code which is either -ENOMEM or
2704 * -ENOTCONN which is the default code if the lttng_ust_ctl_create_session fails.
2706 static int find_or_create_ust_app_session(struct ltt_ust_session
*usess
,
2707 struct ust_app
*app
,
2708 struct ust_app_session
**ua_sess_ptr
,
2711 int ret
, created
= 0;
2712 struct ust_app_session
*ua_sess
;
2714 LTTNG_ASSERT(usess
);
2716 LTTNG_ASSERT(ua_sess_ptr
);
2718 health_code_update();
2720 ua_sess
= lookup_session_by_app(usess
, app
);
2721 if (ua_sess
== nullptr) {
2722 DBG2("UST app pid: %d session id %" PRIu64
" not found, creating it",
2725 ua_sess
= alloc_ust_app_session();
2726 if (ua_sess
== nullptr) {
2727 /* Only malloc can failed so something is really wrong */
2731 shadow_copy_session(ua_sess
, usess
, app
);
2735 switch (usess
->buffer_type
) {
2736 case LTTNG_BUFFER_PER_PID
:
2737 /* Init local registry. */
2738 ret
= setup_buffer_reg_pid(ua_sess
, app
, nullptr);
2740 delete_ust_app_session(-1, ua_sess
, app
);
2744 case LTTNG_BUFFER_PER_UID
:
2745 /* Look for a global registry. If none exists, create one. */
2746 ret
= setup_buffer_reg_uid(usess
, ua_sess
, app
, nullptr);
2748 delete_ust_app_session(-1, ua_sess
, app
);
2758 health_code_update();
2760 if (ua_sess
->handle
== -1) {
2761 pthread_mutex_lock(&app
->sock_lock
);
2762 ret
= lttng_ust_ctl_create_session(app
->sock
);
2763 pthread_mutex_unlock(&app
->sock_lock
);
2765 if (ret
== -EPIPE
|| ret
== -LTTNG_UST_ERR_EXITING
) {
2766 DBG("UST app creating session failed. Application is dead: pid = %d, sock = %d",
2770 } else if (ret
== -EAGAIN
) {
2771 DBG("UST app creating session failed. Communication time out: pid = %d, sock = %d",
2776 ERR("UST app creating session failed with ret %d: pid = %d, sock =%d",
2781 delete_ust_app_session(-1, ua_sess
, app
);
2782 if (ret
!= -ENOMEM
) {
2784 * Tracer is probably gone or got an internal error so let's
2785 * behave like it will soon unregister or not usable.
2792 ua_sess
->handle
= ret
;
2794 /* Add ust app session to app's HT */
2795 lttng_ht_node_init_u64(&ua_sess
->node
, ua_sess
->tracing_id
);
2796 lttng_ht_add_unique_u64(app
->sessions
, &ua_sess
->node
);
2797 lttng_ht_node_init_ulong(&ua_sess
->ust_objd_node
, ua_sess
->handle
);
2798 lttng_ht_add_unique_ulong(app
->ust_sessions_objd
, &ua_sess
->ust_objd_node
);
2800 DBG2("UST app session created successfully with handle %d", ret
);
2803 *ua_sess_ptr
= ua_sess
;
2805 *is_created
= created
;
2808 /* Everything went well. */
2812 health_code_update();
2817 * Match function for a hash table lookup of ust_app_ctx.
2819 * It matches an ust app context based on the context type and, in the case
2820 * of perf counters, their name.
2822 static int ht_match_ust_app_ctx(struct cds_lfht_node
*node
, const void *_key
)
2824 struct ust_app_ctx
*ctx
;
2825 const struct lttng_ust_context_attr
*key
;
2830 ctx
= caa_container_of(node
, struct ust_app_ctx
, node
.node
);
2831 key
= (lttng_ust_context_attr
*) _key
;
2834 if (ctx
->ctx
.ctx
!= key
->ctx
) {
2839 case LTTNG_UST_ABI_CONTEXT_PERF_THREAD_COUNTER
:
2840 if (strncmp(key
->u
.perf_counter
.name
,
2841 ctx
->ctx
.u
.perf_counter
.name
,
2842 sizeof(key
->u
.perf_counter
.name
)) != 0) {
2846 case LTTNG_UST_ABI_CONTEXT_APP_CONTEXT
:
2847 if (strcmp(key
->u
.app_ctx
.provider_name
, ctx
->ctx
.u
.app_ctx
.provider_name
) != 0 ||
2848 strcmp(key
->u
.app_ctx
.ctx_name
, ctx
->ctx
.u
.app_ctx
.ctx_name
) != 0) {
2864 * Lookup for an ust app context from an lttng_ust_context.
2866 * Must be called while holding RCU read side lock.
2867 * Return an ust_app_ctx object or NULL on error.
2869 static struct ust_app_ctx
*find_ust_app_context(struct lttng_ht
*ht
,
2870 struct lttng_ust_context_attr
*uctx
)
2872 struct lttng_ht_iter iter
;
2873 struct lttng_ht_node_ulong
*node
;
2874 struct ust_app_ctx
*app_ctx
= nullptr;
2878 ASSERT_RCU_READ_LOCKED();
2880 /* Lookup using the lttng_ust_context_type and a custom match fct. */
2881 cds_lfht_lookup(ht
->ht
,
2882 ht
->hash_fct((void *) uctx
->ctx
, lttng_ht_seed
),
2883 ht_match_ust_app_ctx
,
2886 node
= lttng_ht_iter_get_node_ulong(&iter
);
2891 app_ctx
= lttng::utils::container_of(node
, &ust_app_ctx::node
);
2898 * Create a context for the channel on the tracer.
2900 * Called with UST app session lock held and a RCU read side lock.
2902 static int create_ust_app_channel_context(struct ust_app_channel
*ua_chan
,
2903 struct lttng_ust_context_attr
*uctx
,
2904 struct ust_app
*app
)
2907 struct ust_app_ctx
*ua_ctx
;
2909 ASSERT_RCU_READ_LOCKED();
2911 DBG2("UST app adding context to channel %s", ua_chan
->name
);
2913 ua_ctx
= find_ust_app_context(ua_chan
->ctx
, uctx
);
2919 ua_ctx
= alloc_ust_app_ctx(uctx
);
2920 if (ua_ctx
== nullptr) {
2926 lttng_ht_node_init_ulong(&ua_ctx
->node
, (unsigned long) ua_ctx
->ctx
.ctx
);
2927 lttng_ht_add_ulong(ua_chan
->ctx
, &ua_ctx
->node
);
2928 cds_list_add_tail(&ua_ctx
->list
, &ua_chan
->ctx_list
);
2930 ret
= create_ust_channel_context(ua_chan
, ua_ctx
, app
);
2940 * Enable on the tracer side a ust app event for the session and channel.
2942 * Called with UST app session lock held.
2944 static int enable_ust_app_event(struct ust_app_event
*ua_event
, struct ust_app
*app
)
2948 ret
= enable_ust_object(app
, ua_event
->obj
);
2953 ua_event
->enabled
= true;
2960 * Disable on the tracer side a ust app event for the session and channel.
2962 static int disable_ust_app_event(struct ust_app_event
*ua_event
, struct ust_app
*app
)
2966 ret
= disable_ust_object(app
, ua_event
->obj
);
2971 ua_event
->enabled
= false;
2978 * Lookup ust app channel for session and disable it on the tracer side.
2980 static int disable_ust_app_channel(struct ust_app_session
*ua_sess
,
2981 struct ust_app_channel
*ua_chan
,
2982 struct ust_app
*app
)
2986 ret
= disable_ust_channel(app
, ua_sess
, ua_chan
);
2991 ua_chan
->enabled
= false;
2998 * Lookup ust app channel for session and enable it on the tracer side. This
2999 * MUST be called with a RCU read side lock acquired.
3001 static int enable_ust_app_channel(struct ust_app_session
*ua_sess
,
3002 struct ltt_ust_channel
*uchan
,
3003 struct ust_app
*app
)
3006 struct lttng_ht_iter iter
;
3007 struct lttng_ht_node_str
*ua_chan_node
;
3008 struct ust_app_channel
*ua_chan
;
3010 ASSERT_RCU_READ_LOCKED();
3012 lttng_ht_lookup(ua_sess
->channels
, (void *) uchan
->name
, &iter
);
3013 ua_chan_node
= lttng_ht_iter_get_node_str(&iter
);
3014 if (ua_chan_node
== nullptr) {
3015 DBG2("Unable to find channel %s in ust session id %" PRIu64
,
3017 ua_sess
->tracing_id
);
3021 ua_chan
= lttng::utils::container_of(ua_chan_node
, &ust_app_channel::node
);
3023 ret
= enable_ust_channel(app
, ua_sess
, ua_chan
);
3033 * Ask the consumer to create a channel and get it if successful.
3035 * Called with UST app session lock held.
3037 * Return 0 on success or else a negative value.
3039 static int do_consumer_create_channel(struct ltt_ust_session
*usess
,
3040 struct ust_app_session
*ua_sess
,
3041 struct ust_app_channel
*ua_chan
,
3043 lsu::registry_session
*registry
)
3046 unsigned int nb_fd
= 0;
3047 struct consumer_socket
*socket
;
3049 LTTNG_ASSERT(usess
);
3050 LTTNG_ASSERT(ua_sess
);
3051 LTTNG_ASSERT(ua_chan
);
3052 LTTNG_ASSERT(registry
);
3054 lttng::urcu::read_lock_guard read_lock
;
3055 health_code_update();
3057 /* Get the right consumer socket for the application. */
3058 socket
= consumer_find_socket_by_bitness(bitness
, usess
->consumer
);
3064 health_code_update();
3066 /* Need one fd for the channel. */
3067 ret
= lttng_fd_get(LTTNG_FD_APPS
, 1);
3069 ERR("Exhausted number of available FD upon create channel");
3074 * Ask consumer to create channel. The consumer will return the number of
3075 * stream we have to expect.
3077 ret
= ust_consumer_ask_channel(
3078 ua_sess
, ua_chan
, usess
->consumer
, socket
, registry
, usess
->current_trace_chunk
);
3084 * Compute the number of fd needed before receiving them. It must be 2 per
3085 * stream (2 being the default value here).
3087 nb_fd
= DEFAULT_UST_STREAM_FD_NUM
* ua_chan
->expected_stream_count
;
3089 /* Reserve the amount of file descriptor we need. */
3090 ret
= lttng_fd_get(LTTNG_FD_APPS
, nb_fd
);
3092 ERR("Exhausted number of available FD upon create channel");
3093 goto error_fd_get_stream
;
3096 health_code_update();
3099 * Now get the channel from the consumer. This call will populate the stream
3100 * list of that channel and set the ust objects.
3102 if (usess
->consumer
->enabled
) {
3103 ret
= ust_consumer_get_channel(socket
, ua_chan
);
3112 lttng_fd_put(LTTNG_FD_APPS
, nb_fd
);
3113 error_fd_get_stream
:
3115 * Initiate a destroy channel on the consumer since we had an error
3116 * handling it on our side. The return value is of no importance since we
3117 * already have a ret value set by the previous error that we need to
3120 (void) ust_consumer_destroy_channel(socket
, ua_chan
);
3122 lttng_fd_put(LTTNG_FD_APPS
, 1);
3124 health_code_update();
3129 * Duplicate the ust data object of the ust app stream and save it in the
3130 * buffer registry stream.
3132 * Return 0 on success or else a negative value.
3134 static int duplicate_stream_object(struct buffer_reg_stream
*reg_stream
,
3135 struct ust_app_stream
*stream
)
3139 LTTNG_ASSERT(reg_stream
);
3140 LTTNG_ASSERT(stream
);
3142 /* Duplicating a stream requires 2 new fds. Reserve them. */
3143 ret
= lttng_fd_get(LTTNG_FD_APPS
, 2);
3145 ERR("Exhausted number of available FD upon duplicate stream");
3149 /* Duplicate object for stream once the original is in the registry. */
3150 ret
= lttng_ust_ctl_duplicate_ust_object_data(&stream
->obj
, reg_stream
->obj
.ust
);
3152 ERR("Duplicate stream obj from %p to %p failed with ret %d",
3153 reg_stream
->obj
.ust
,
3156 lttng_fd_put(LTTNG_FD_APPS
, 2);
3159 stream
->handle
= stream
->obj
->handle
;
3166 * Duplicate the ust data object of the ust app. channel and save it in the
3167 * buffer registry channel.
3169 * Return 0 on success or else a negative value.
3171 static int duplicate_channel_object(struct buffer_reg_channel
*buf_reg_chan
,
3172 struct ust_app_channel
*ua_chan
)
3176 LTTNG_ASSERT(buf_reg_chan
);
3177 LTTNG_ASSERT(ua_chan
);
3179 /* Duplicating a channel requires 1 new fd. Reserve it. */
3180 ret
= lttng_fd_get(LTTNG_FD_APPS
, 1);
3182 ERR("Exhausted number of available FD upon duplicate channel");
3186 /* Duplicate object for stream once the original is in the registry. */
3187 ret
= lttng_ust_ctl_duplicate_ust_object_data(&ua_chan
->obj
, buf_reg_chan
->obj
.ust
);
3189 ERR("Duplicate channel obj from %p to %p failed with ret: %d",
3190 buf_reg_chan
->obj
.ust
,
3195 ua_chan
->handle
= ua_chan
->obj
->handle
;
3200 lttng_fd_put(LTTNG_FD_APPS
, 1);
3206 * For a given channel buffer registry, setup all streams of the given ust
3207 * application channel.
3209 * Return 0 on success or else a negative value.
3211 static int setup_buffer_reg_streams(struct buffer_reg_channel
*buf_reg_chan
,
3212 struct ust_app_channel
*ua_chan
,
3213 struct ust_app
*app
)
3216 struct ust_app_stream
*stream
, *stmp
;
3218 LTTNG_ASSERT(buf_reg_chan
);
3219 LTTNG_ASSERT(ua_chan
);
3221 DBG2("UST app setup buffer registry stream");
3223 /* Send all streams to application. */
3224 cds_list_for_each_entry_safe (stream
, stmp
, &ua_chan
->streams
.head
, list
) {
3225 struct buffer_reg_stream
*reg_stream
;
3227 ret
= buffer_reg_stream_create(®_stream
);
3233 * Keep original pointer and nullify it in the stream so the delete
3234 * stream call does not release the object.
3236 reg_stream
->obj
.ust
= stream
->obj
;
3237 stream
->obj
= nullptr;
3238 buffer_reg_stream_add(reg_stream
, buf_reg_chan
);
3240 /* We don't need the streams anymore. */
3241 cds_list_del(&stream
->list
);
3242 delete_ust_app_stream(-1, stream
, app
);
3250 * Create a buffer registry channel for the given session registry and
3251 * application channel object. If regp pointer is valid, it's set with the
3252 * created object. Important, the created object is NOT added to the session
3253 * registry hash table.
3255 * Return 0 on success else a negative value.
3257 static int create_buffer_reg_channel(struct buffer_reg_session
*reg_sess
,
3258 struct ust_app_channel
*ua_chan
,
3259 struct buffer_reg_channel
**regp
)
3262 struct buffer_reg_channel
*buf_reg_chan
= nullptr;
3264 LTTNG_ASSERT(reg_sess
);
3265 LTTNG_ASSERT(ua_chan
);
3267 DBG2("UST app creating buffer registry channel for %s", ua_chan
->name
);
3269 /* Create buffer registry channel. */
3270 ret
= buffer_reg_channel_create(ua_chan
->tracing_channel_id
, &buf_reg_chan
);
3274 LTTNG_ASSERT(buf_reg_chan
);
3275 buf_reg_chan
->consumer_key
= ua_chan
->key
;
3276 buf_reg_chan
->subbuf_size
= ua_chan
->attr
.subbuf_size
;
3277 buf_reg_chan
->num_subbuf
= ua_chan
->attr
.num_subbuf
;
3279 /* Create and add a channel registry to session. */
3281 reg_sess
->reg
.ust
->add_channel(ua_chan
->tracing_channel_id
);
3282 } catch (const std::exception
& ex
) {
3283 ERR("Failed to add a channel registry to userspace registry session: %s",
3289 buffer_reg_channel_add(reg_sess
, buf_reg_chan
);
3292 *regp
= buf_reg_chan
;
3298 /* Safe because the registry channel object was not added to any HT. */
3299 buffer_reg_channel_destroy(buf_reg_chan
, LTTNG_DOMAIN_UST
);
3305 * Setup buffer registry channel for the given session registry and application
3306 * channel object. If regp pointer is valid, it's set with the created object.
3308 * Return 0 on success else a negative value.
3310 static int setup_buffer_reg_channel(struct buffer_reg_session
*reg_sess
,
3311 struct ust_app_channel
*ua_chan
,
3312 struct buffer_reg_channel
*buf_reg_chan
,
3313 struct ust_app
*app
)
3317 LTTNG_ASSERT(reg_sess
);
3318 LTTNG_ASSERT(buf_reg_chan
);
3319 LTTNG_ASSERT(ua_chan
);
3320 LTTNG_ASSERT(ua_chan
->obj
);
3322 DBG2("UST app setup buffer registry channel for %s", ua_chan
->name
);
3324 /* Setup all streams for the registry. */
3325 ret
= setup_buffer_reg_streams(buf_reg_chan
, ua_chan
, app
);
3330 buf_reg_chan
->obj
.ust
= ua_chan
->obj
;
3331 ua_chan
->obj
= nullptr;
3336 buffer_reg_channel_remove(reg_sess
, buf_reg_chan
);
3337 buffer_reg_channel_destroy(buf_reg_chan
, LTTNG_DOMAIN_UST
);
3342 * Send buffer registry channel to the application.
3344 * Return 0 on success else a negative value.
3346 static int send_channel_uid_to_ust(struct buffer_reg_channel
*buf_reg_chan
,
3347 struct ust_app
*app
,
3348 struct ust_app_session
*ua_sess
,
3349 struct ust_app_channel
*ua_chan
)
3352 struct buffer_reg_stream
*reg_stream
;
3354 LTTNG_ASSERT(buf_reg_chan
);
3356 LTTNG_ASSERT(ua_sess
);
3357 LTTNG_ASSERT(ua_chan
);
3359 DBG("UST app sending buffer registry channel to ust sock %d", app
->sock
);
3361 ret
= duplicate_channel_object(buf_reg_chan
, ua_chan
);
3366 /* Send channel to the application. */
3367 ret
= ust_consumer_send_channel_to_ust(app
, ua_sess
, ua_chan
);
3368 if (ret
== -EPIPE
|| ret
== -LTTNG_UST_ERR_EXITING
) {
3369 ret
= -ENOTCONN
; /* Caused by app exiting. */
3371 } else if (ret
== -EAGAIN
) {
3372 /* Caused by timeout. */
3373 WARN("Communication with application %d timed out on send_channel for channel \"%s\" of session \"%" PRIu64
3377 ua_sess
->tracing_id
);
3378 /* Treat this the same way as an application that is exiting. */
3381 } else if (ret
< 0) {
3385 health_code_update();
3387 /* Send all streams to application. */
3388 pthread_mutex_lock(&buf_reg_chan
->stream_list_lock
);
3389 cds_list_for_each_entry (reg_stream
, &buf_reg_chan
->streams
, lnode
) {
3390 struct ust_app_stream stream
= {};
3392 ret
= duplicate_stream_object(reg_stream
, &stream
);
3394 goto error_stream_unlock
;
3397 ret
= ust_consumer_send_stream_to_ust(app
, ua_chan
, &stream
);
3399 if (ret
== -EPIPE
|| ret
== -LTTNG_UST_ERR_EXITING
) {
3400 ret
= -ENOTCONN
; /* Caused by app exiting. */
3401 } else if (ret
== -EAGAIN
) {
3403 * Caused by timeout.
3404 * Treat this the same way as an application
3407 WARN("Communication with application %d timed out on send_stream for stream of channel \"%s\" of session \"%" PRIu64
3411 ua_sess
->tracing_id
);
3414 (void) release_ust_app_stream(-1, &stream
, app
);
3415 goto error_stream_unlock
;
3419 * The return value is not important here. This function will output an
3422 (void) release_ust_app_stream(-1, &stream
, app
);
3425 error_stream_unlock
:
3426 pthread_mutex_unlock(&buf_reg_chan
->stream_list_lock
);
3432 * Create and send to the application the created buffers with per UID buffers.
3434 * This MUST be called with a RCU read side lock acquired.
3435 * The session list lock and the session's lock must be acquired.
3437 * Return 0 on success else a negative value.
3439 static int create_channel_per_uid(struct ust_app
*app
,
3440 struct ltt_ust_session
*usess
,
3441 struct ust_app_session
*ua_sess
,
3442 struct ust_app_channel
*ua_chan
)
3445 struct buffer_reg_uid
*reg_uid
;
3446 struct buffer_reg_channel
*buf_reg_chan
;
3447 struct ltt_session
*session
= nullptr;
3448 enum lttng_error_code notification_ret
;
3451 LTTNG_ASSERT(usess
);
3452 LTTNG_ASSERT(ua_sess
);
3453 LTTNG_ASSERT(ua_chan
);
3454 ASSERT_RCU_READ_LOCKED();
3456 DBG("UST app creating channel %s with per UID buffers", ua_chan
->name
);
3458 reg_uid
= buffer_reg_uid_find(usess
->id
, app
->abi
.bits_per_long
, app
->uid
);
3460 * The session creation handles the creation of this global registry
3461 * object. If none can be find, there is a code flow problem or a
3464 LTTNG_ASSERT(reg_uid
);
3466 buf_reg_chan
= buffer_reg_channel_find(ua_chan
->tracing_channel_id
, reg_uid
);
3471 /* Create the buffer registry channel object. */
3472 ret
= create_buffer_reg_channel(reg_uid
->registry
, ua_chan
, &buf_reg_chan
);
3474 ERR("Error creating the UST channel \"%s\" registry instance", ua_chan
->name
);
3478 session
= session_find_by_id(ua_sess
->tracing_id
);
3479 LTTNG_ASSERT(session
);
3480 ASSERT_LOCKED(session
->lock
);
3481 ASSERT_SESSION_LIST_LOCKED();
3484 * Create the buffers on the consumer side. This call populates the
3485 * ust app channel object with all streams and data object.
3487 ret
= do_consumer_create_channel(
3488 usess
, ua_sess
, ua_chan
, app
->abi
.bits_per_long
, reg_uid
->registry
->reg
.ust
);
3490 ERR("Error creating UST channel \"%s\" on the consumer daemon", ua_chan
->name
);
3493 * Let's remove the previously created buffer registry channel so
3494 * it's not visible anymore in the session registry.
3496 auto locked_registry
= reg_uid
->registry
->reg
.ust
->lock();
3498 locked_registry
->remove_channel(ua_chan
->tracing_channel_id
, false);
3499 } catch (const std::exception
& ex
) {
3500 DBG("Could not find channel for removal: %s", ex
.what());
3502 buffer_reg_channel_remove(reg_uid
->registry
, buf_reg_chan
);
3503 buffer_reg_channel_destroy(buf_reg_chan
, LTTNG_DOMAIN_UST
);
3508 * Setup the streams and add it to the session registry.
3510 ret
= setup_buffer_reg_channel(reg_uid
->registry
, ua_chan
, buf_reg_chan
, app
);
3512 ERR("Error setting up UST channel \"%s\"", ua_chan
->name
);
3517 auto locked_registry
= reg_uid
->registry
->reg
.ust
->lock();
3518 auto& ust_reg_chan
= locked_registry
->channel(ua_chan
->tracing_channel_id
);
3520 ust_reg_chan
._consumer_key
= ua_chan
->key
;
3523 /* Notify the notification subsystem of the channel's creation. */
3524 notification_ret
= notification_thread_command_add_channel(
3525 the_notification_thread_handle
,
3530 ua_chan
->attr
.subbuf_size
* ua_chan
->attr
.num_subbuf
);
3531 if (notification_ret
!= LTTNG_OK
) {
3532 ret
= -(int) notification_ret
;
3533 ERR("Failed to add channel to notification thread");
3538 /* Send buffers to the application. */
3539 ret
= send_channel_uid_to_ust(buf_reg_chan
, app
, ua_sess
, ua_chan
);
3541 if (ret
!= -ENOTCONN
) {
3542 ERR("Error sending channel to application");
3549 session_put(session
);
3555 * Create and send to the application the created buffers with per PID buffers.
3557 * Called with UST app session lock held.
3558 * The session list lock and the session's lock must be acquired.
3560 * Return 0 on success else a negative value.
3562 static int create_channel_per_pid(struct ust_app
*app
,
3563 struct ltt_ust_session
*usess
,
3564 struct ust_app_session
*ua_sess
,
3565 struct ust_app_channel
*ua_chan
)
3568 lsu::registry_session
*registry
;
3569 enum lttng_error_code cmd_ret
;
3570 struct ltt_session
*session
= nullptr;
3571 uint64_t chan_reg_key
;
3574 LTTNG_ASSERT(usess
);
3575 LTTNG_ASSERT(ua_sess
);
3576 LTTNG_ASSERT(ua_chan
);
3578 DBG("UST app creating channel %s with per PID buffers", ua_chan
->name
);
3580 lttng::urcu::read_lock_guard read_lock
;
3582 registry
= get_session_registry(ua_sess
);
3583 /* The UST app session lock is held, registry shall not be null. */
3584 LTTNG_ASSERT(registry
);
3586 /* Create and add a new channel registry to session. */
3588 registry
->add_channel(ua_chan
->key
);
3589 } catch (const std::exception
& ex
) {
3590 ERR("Error creating the UST channel \"%s\" registry instance: %s",
3597 session
= session_find_by_id(ua_sess
->tracing_id
);
3598 LTTNG_ASSERT(session
);
3599 ASSERT_LOCKED(session
->lock
);
3600 ASSERT_SESSION_LIST_LOCKED();
3602 /* Create and get channel on the consumer side. */
3603 ret
= do_consumer_create_channel(usess
, ua_sess
, ua_chan
, app
->abi
.bits_per_long
, registry
);
3605 ERR("Error creating UST channel \"%s\" on the consumer daemon", ua_chan
->name
);
3606 goto error_remove_from_registry
;
3609 ret
= send_channel_pid_to_ust(app
, ua_sess
, ua_chan
);
3611 if (ret
!= -ENOTCONN
) {
3612 ERR("Error sending channel to application");
3614 goto error_remove_from_registry
;
3617 chan_reg_key
= ua_chan
->key
;
3619 auto locked_registry
= registry
->lock();
3621 auto& ust_reg_chan
= locked_registry
->channel(chan_reg_key
);
3622 ust_reg_chan
._consumer_key
= ua_chan
->key
;
3625 cmd_ret
= notification_thread_command_add_channel(the_notification_thread_handle
,
3630 ua_chan
->attr
.subbuf_size
*
3631 ua_chan
->attr
.num_subbuf
);
3632 if (cmd_ret
!= LTTNG_OK
) {
3633 ret
= -(int) cmd_ret
;
3634 ERR("Failed to add channel to notification thread");
3635 goto error_remove_from_registry
;
3638 error_remove_from_registry
:
3641 auto locked_registry
= registry
->lock();
3642 locked_registry
->remove_channel(ua_chan
->key
, false);
3643 } catch (const std::exception
& ex
) {
3644 DBG("Could not find channel for removal: %s", ex
.what());
3649 session_put(session
);
3655 * From an already allocated ust app channel, create the channel buffers if
3656 * needed and send them to the application. This MUST be called with a RCU read
3657 * side lock acquired.
3659 * Called with UST app session lock held.
3661 * Return 0 on success or else a negative value. Returns -ENOTCONN if
3662 * the application exited concurrently.
3664 static int ust_app_channel_send(struct ust_app
*app
,
3665 struct ltt_ust_session
*usess
,
3666 struct ust_app_session
*ua_sess
,
3667 struct ust_app_channel
*ua_chan
)
3672 LTTNG_ASSERT(usess
);
3673 LTTNG_ASSERT(usess
->active
);
3674 LTTNG_ASSERT(ua_sess
);
3675 LTTNG_ASSERT(ua_chan
);
3676 ASSERT_RCU_READ_LOCKED();
3678 /* Handle buffer type before sending the channel to the application. */
3679 switch (usess
->buffer_type
) {
3680 case LTTNG_BUFFER_PER_UID
:
3682 ret
= create_channel_per_uid(app
, usess
, ua_sess
, ua_chan
);
3688 case LTTNG_BUFFER_PER_PID
:
3690 ret
= create_channel_per_pid(app
, usess
, ua_sess
, ua_chan
);
3702 /* Initialize ust objd object using the received handle and add it. */
3703 lttng_ht_node_init_ulong(&ua_chan
->ust_objd_node
, ua_chan
->handle
);
3704 lttng_ht_add_unique_ulong(app
->ust_objd
, &ua_chan
->ust_objd_node
);
3706 /* If channel is not enabled, disable it on the tracer */
3707 if (!ua_chan
->enabled
) {
3708 ret
= disable_ust_channel(app
, ua_sess
, ua_chan
);
3719 * Create UST app channel and return it through ua_chanp if not NULL.
3721 * Called with UST app session lock and RCU read-side lock held.
3723 * Return 0 on success or else a negative value.
3725 static int ust_app_channel_allocate(struct ust_app_session
*ua_sess
,
3726 struct ltt_ust_channel
*uchan
,
3727 enum lttng_ust_abi_chan_type type
,
3728 struct ltt_ust_session
*usess
__attribute__((unused
)),
3729 struct ust_app_channel
**ua_chanp
)
3732 struct lttng_ht_iter iter
;
3733 struct lttng_ht_node_str
*ua_chan_node
;
3734 struct ust_app_channel
*ua_chan
;
3736 ASSERT_RCU_READ_LOCKED();
3738 /* Lookup channel in the ust app session */
3739 lttng_ht_lookup(ua_sess
->channels
, (void *) uchan
->name
, &iter
);
3740 ua_chan_node
= lttng_ht_iter_get_node_str(&iter
);
3741 if (ua_chan_node
!= nullptr) {
3742 ua_chan
= lttng::utils::container_of(ua_chan_node
, &ust_app_channel::node
);
3746 ua_chan
= alloc_ust_app_channel(uchan
->name
, ua_sess
, &uchan
->attr
);
3747 if (ua_chan
== nullptr) {
3748 /* Only malloc can fail here */
3752 shadow_copy_channel(ua_chan
, uchan
);
3754 /* Set channel type. */
3755 ua_chan
->attr
.type
= type
;
3757 /* Only add the channel if successful on the tracer side. */
3758 lttng_ht_add_unique_str(ua_sess
->channels
, &ua_chan
->node
);
3761 *ua_chanp
= ua_chan
;
3764 /* Everything went well. */
3772 * Create UST app event and create it on the tracer side.
3774 * Must be called with the RCU read side lock held.
3775 * Called with ust app session mutex held.
3777 static int create_ust_app_event(struct ust_app_channel
*ua_chan
,
3778 struct ltt_ust_event
*uevent
,
3779 struct ust_app
*app
)
3782 struct ust_app_event
*ua_event
;
3784 ASSERT_RCU_READ_LOCKED();
3786 ua_event
= alloc_ust_app_event(uevent
->attr
.name
, &uevent
->attr
);
3787 if (ua_event
== nullptr) {
3788 /* Only failure mode of alloc_ust_app_event(). */
3792 shadow_copy_event(ua_event
, uevent
);
3794 /* Create it on the tracer side */
3795 ret
= create_ust_event(app
, ua_chan
, ua_event
);
3798 * Not found previously means that it does not exist on the
3799 * tracer. If the application reports that the event existed,
3800 * it means there is a bug in the sessiond or lttng-ust
3801 * (or corruption, etc.)
3803 if (ret
== -LTTNG_UST_ERR_EXIST
) {
3804 ERR("Tracer for application reported that an event being created already existed: "
3805 "event_name = \"%s\", pid = %d, ppid = %d, uid = %d, gid = %d",
3815 add_unique_ust_app_event(ua_chan
, ua_event
);
3817 DBG2("UST app create event completed: app = '%s' pid = %d", app
->name
, app
->pid
);
3823 /* Valid. Calling here is already in a read side lock */
3824 delete_ust_app_event(-1, ua_event
, app
);
3829 * Create UST app event notifier rule and create it on the tracer side.
3831 * Must be called with the RCU read side lock held.
3832 * Called with ust app session mutex held.
3834 static int create_ust_app_event_notifier_rule(struct lttng_trigger
*trigger
, struct ust_app
*app
)
3837 struct ust_app_event_notifier_rule
*ua_event_notifier_rule
;
3839 ASSERT_RCU_READ_LOCKED();
3841 ua_event_notifier_rule
= alloc_ust_app_event_notifier_rule(trigger
);
3842 if (ua_event_notifier_rule
== nullptr) {
3847 /* Create it on the tracer side. */
3848 ret
= create_ust_event_notifier(app
, ua_event_notifier_rule
);
3851 * Not found previously means that it does not exist on the
3852 * tracer. If the application reports that the event existed,
3853 * it means there is a bug in the sessiond or lttng-ust
3854 * (or corruption, etc.)
3856 if (ret
== -LTTNG_UST_ERR_EXIST
) {
3857 ERR("Tracer for application reported that an event notifier being created already exists: "
3858 "token = \"%" PRIu64
"\", pid = %d, ppid = %d, uid = %d, gid = %d",
3859 lttng_trigger_get_tracer_token(trigger
),
3868 lttng_ht_add_unique_u64(app
->token_to_event_notifier_rule_ht
,
3869 &ua_event_notifier_rule
->node
);
3871 DBG2("UST app create token event rule completed: app = '%s', pid = %d, token = %" PRIu64
,
3874 lttng_trigger_get_tracer_token(trigger
));
3879 /* The RCU read side lock is already being held by the caller. */
3880 delete_ust_app_event_notifier_rule(-1, ua_event_notifier_rule
, app
);
3886 * Create UST metadata and open it on the tracer side.
3888 * Called with UST app session lock held and RCU read side lock.
3890 static int create_ust_app_metadata(struct ust_app_session
*ua_sess
,
3891 struct ust_app
*app
,
3892 struct consumer_output
*consumer
)
3895 struct ust_app_channel
*metadata
;
3896 struct consumer_socket
*socket
;
3897 struct ltt_session
*session
= nullptr;
3899 LTTNG_ASSERT(ua_sess
);
3901 LTTNG_ASSERT(consumer
);
3902 ASSERT_RCU_READ_LOCKED();
3904 auto locked_registry
= get_locked_session_registry(ua_sess
);
3905 /* The UST app session is held registry shall not be null. */
3906 LTTNG_ASSERT(locked_registry
);
3908 /* Metadata already exists for this registry or it was closed previously */
3909 if (locked_registry
->_metadata_key
|| locked_registry
->_metadata_closed
) {
3914 /* Allocate UST metadata */
3915 metadata
= alloc_ust_app_channel(DEFAULT_METADATA_NAME
, ua_sess
, nullptr);
3917 /* malloc() failed */
3922 memcpy(&metadata
->attr
, &ua_sess
->metadata_attr
, sizeof(metadata
->attr
));
3924 /* Need one fd for the channel. */
3925 ret
= lttng_fd_get(LTTNG_FD_APPS
, 1);
3927 ERR("Exhausted number of available FD upon create metadata");
3931 /* Get the right consumer socket for the application. */
3932 socket
= consumer_find_socket_by_bitness(app
->abi
.bits_per_long
, consumer
);
3935 goto error_consumer
;
3939 * Keep metadata key so we can identify it on the consumer side. Assign it
3940 * to the registry *before* we ask the consumer so we avoid the race of the
3941 * consumer requesting the metadata and the ask_channel call on our side
3942 * did not returned yet.
3944 locked_registry
->_metadata_key
= metadata
->key
;
3946 session
= session_find_by_id(ua_sess
->tracing_id
);
3947 LTTNG_ASSERT(session
);
3948 ASSERT_LOCKED(session
->lock
);
3949 ASSERT_SESSION_LIST_LOCKED();
3952 * Ask the metadata channel creation to the consumer. The metadata object
3953 * will be created by the consumer and kept their. However, the stream is
3954 * never added or monitored until we do a first push metadata to the
3957 ret
= ust_consumer_ask_channel(ua_sess
,
3961 locked_registry
.get(),
3962 session
->current_trace_chunk
);
3964 /* Nullify the metadata key so we don't try to close it later on. */
3965 locked_registry
->_metadata_key
= 0;
3966 goto error_consumer
;
3970 * The setup command will make the metadata stream be sent to the relayd,
3971 * if applicable, and the thread managing the metadatas. This is important
3972 * because after this point, if an error occurs, the only way the stream
3973 * can be deleted is to be monitored in the consumer.
3975 ret
= consumer_setup_metadata(socket
, metadata
->key
);
3977 /* Nullify the metadata key so we don't try to close it later on. */
3978 locked_registry
->_metadata_key
= 0;
3979 goto error_consumer
;
3982 DBG2("UST metadata with key %" PRIu64
" created for app pid %d", metadata
->key
, app
->pid
);
3985 lttng_fd_put(LTTNG_FD_APPS
, 1);
3986 delete_ust_app_channel(-1, metadata
, app
, locked_registry
);
3989 session_put(session
);
3995 * Return ust app pointer or NULL if not found. RCU read side lock MUST be
3996 * acquired before calling this function.
3998 struct ust_app
*ust_app_find_by_pid(pid_t pid
)
4000 struct ust_app
*app
= nullptr;
4001 struct lttng_ht_node_ulong
*node
;
4002 struct lttng_ht_iter iter
;
4004 lttng_ht_lookup(ust_app_ht
, (void *) ((unsigned long) pid
), &iter
);
4005 node
= lttng_ht_iter_get_node_ulong(&iter
);
4006 if (node
== nullptr) {
4007 DBG2("UST app no found with pid %d", pid
);
4011 DBG2("Found UST app by pid %d", pid
);
4013 app
= lttng::utils::container_of(node
, &ust_app::pid_n
);
4020 * Allocate and init an UST app object using the registration information and
4021 * the command socket. This is called when the command socket connects to the
4024 * The object is returned on success or else NULL.
4026 struct ust_app
*ust_app_create(struct ust_register_msg
*msg
, int sock
)
4029 struct ust_app
*lta
= nullptr;
4030 struct lttng_pipe
*event_notifier_event_source_pipe
= nullptr;
4033 LTTNG_ASSERT(sock
>= 0);
4035 DBG3("UST app creating application for socket %d", sock
);
4037 if ((msg
->bits_per_long
== 64 && (uatomic_read(&the_ust_consumerd64_fd
) == -EINVAL
)) ||
4038 (msg
->bits_per_long
== 32 && (uatomic_read(&the_ust_consumerd32_fd
) == -EINVAL
))) {
4039 ERR("Registration failed: application \"%s\" (pid: %d) has "
4040 "%d-bit long, but no consumerd for this size is available.\n",
4043 msg
->bits_per_long
);
4048 * Reserve the two file descriptors of the event source pipe. The write
4049 * end will be closed once it is passed to the application, at which
4050 * point a single 'put' will be performed.
4052 ret
= lttng_fd_get(LTTNG_FD_APPS
, 2);
4054 ERR("Failed to reserve two file descriptors for the event source pipe while creating a new application instance: app = '%s', pid = %d",
4060 event_notifier_event_source_pipe
= lttng_pipe_open(FD_CLOEXEC
);
4061 if (!event_notifier_event_source_pipe
) {
4062 PERROR("Failed to open application event source pipe: '%s' (pid = %d)",
4068 lta
= zmalloc
<ust_app
>();
4069 if (lta
== nullptr) {
4071 goto error_free_pipe
;
4074 urcu_ref_init(<a
->ref
);
4076 lta
->event_notifier_group
.event_pipe
= event_notifier_event_source_pipe
;
4078 lta
->ppid
= msg
->ppid
;
4079 lta
->uid
= msg
->uid
;
4080 lta
->gid
= msg
->gid
;
4083 .bits_per_long
= msg
->bits_per_long
,
4084 .long_alignment
= msg
->long_alignment
,
4085 .uint8_t_alignment
= msg
->uint8_t_alignment
,
4086 .uint16_t_alignment
= msg
->uint16_t_alignment
,
4087 .uint32_t_alignment
= msg
->uint32_t_alignment
,
4088 .uint64_t_alignment
= msg
->uint64_t_alignment
,
4089 .byte_order
= msg
->byte_order
== LITTLE_ENDIAN
?
4090 lttng::sessiond::trace::byte_order::LITTLE_ENDIAN_
:
4091 lttng::sessiond::trace::byte_order::BIG_ENDIAN_
,
4094 lta
->v_major
= msg
->major
;
4095 lta
->v_minor
= msg
->minor
;
4096 lta
->sessions
= lttng_ht_new(0, LTTNG_HT_TYPE_U64
);
4097 lta
->ust_objd
= lttng_ht_new(0, LTTNG_HT_TYPE_ULONG
);
4098 lta
->ust_sessions_objd
= lttng_ht_new(0, LTTNG_HT_TYPE_ULONG
);
4099 lta
->notify_sock
= -1;
4100 lta
->token_to_event_notifier_rule_ht
= lttng_ht_new(0, LTTNG_HT_TYPE_U64
);
4102 /* Copy name and make sure it's NULL terminated. */
4103 strncpy(lta
->name
, msg
->name
, sizeof(lta
->name
));
4104 lta
->name
[UST_APP_PROCNAME_LEN
] = '\0';
4107 * Before this can be called, when receiving the registration information,
4108 * the application compatibility is checked. So, at this point, the
4109 * application can work with this session daemon.
4111 lta
->compatible
= 1;
4113 lta
->pid
= msg
->pid
;
4114 lttng_ht_node_init_ulong(<a
->pid_n
, (unsigned long) lta
->pid
);
4116 pthread_mutex_init(<a
->sock_lock
, nullptr);
4117 lttng_ht_node_init_ulong(<a
->sock_n
, (unsigned long) lta
->sock
);
4119 CDS_INIT_LIST_HEAD(<a
->teardown_head
);
4123 lttng_pipe_destroy(event_notifier_event_source_pipe
);
4124 lttng_fd_put(LTTNG_FD_APPS
, 2);
4130 * For a given application object, add it to every hash table.
4132 void ust_app_add(struct ust_app
*app
)
4135 LTTNG_ASSERT(app
->notify_sock
>= 0);
4137 app
->registration_time
= time(nullptr);
4139 lttng::urcu::read_lock_guard read_lock
;
4142 * On a re-registration, we want to kick out the previous registration of
4145 lttng_ht_add_replace_ulong(ust_app_ht
, &app
->pid_n
);
4148 * The socket _should_ be unique until _we_ call close. So, a add_unique
4149 * for the ust_app_ht_by_sock is used which asserts fail if the entry was
4150 * already in the table.
4152 lttng_ht_add_unique_ulong(ust_app_ht_by_sock
, &app
->sock_n
);
4154 /* Add application to the notify socket hash table. */
4155 lttng_ht_node_init_ulong(&app
->notify_sock_n
, app
->notify_sock
);
4156 lttng_ht_add_unique_ulong(ust_app_ht_by_notify_sock
, &app
->notify_sock_n
);
4158 DBG("App registered with pid:%d ppid:%d uid:%d gid:%d sock =%d name:%s "
4159 "notify_sock =%d (version %d.%d)",
4172 * Set the application version into the object.
4174 * Return 0 on success else a negative value either an errno code or a
4175 * LTTng-UST error code.
4177 int ust_app_version(struct ust_app
*app
)
4183 pthread_mutex_lock(&app
->sock_lock
);
4184 ret
= lttng_ust_ctl_tracer_version(app
->sock
, &app
->version
);
4185 pthread_mutex_unlock(&app
->sock_lock
);
4187 if (ret
== -LTTNG_UST_ERR_EXITING
|| ret
== -EPIPE
) {
4188 DBG3("UST app version failed. Application is dead: pid = %d, sock = %d",
4191 } else if (ret
== -EAGAIN
) {
4192 WARN("UST app version failed. Communication time out: pid = %d, sock = %d",
4196 ERR("UST app version failed with ret %d: pid = %d, sock = %d",
4206 bool ust_app_supports_notifiers(const struct ust_app
*app
)
4208 return app
->v_major
>= 9;
4211 bool ust_app_supports_counters(const struct ust_app
*app
)
4213 return app
->v_major
>= 9;
4217 * Setup the base event notifier group.
4219 * Return 0 on success else a negative value either an errno code or a
4220 * LTTng-UST error code.
4222 int ust_app_setup_event_notifier_group(struct ust_app
*app
)
4225 int event_pipe_write_fd
;
4226 struct lttng_ust_abi_object_data
*event_notifier_group
= nullptr;
4227 enum lttng_error_code lttng_ret
;
4228 enum event_notifier_error_accounting_status event_notifier_error_accounting_status
;
4232 if (!ust_app_supports_notifiers(app
)) {
4237 /* Get the write side of the pipe. */
4238 event_pipe_write_fd
= lttng_pipe_get_writefd(app
->event_notifier_group
.event_pipe
);
4240 pthread_mutex_lock(&app
->sock_lock
);
4241 ret
= lttng_ust_ctl_create_event_notifier_group(
4242 app
->sock
, event_pipe_write_fd
, &event_notifier_group
);
4243 pthread_mutex_unlock(&app
->sock_lock
);
4245 if (ret
== -EPIPE
|| ret
== -LTTNG_UST_ERR_EXITING
) {
4247 DBG3("UST app create event notifier group failed. Application is dead: pid = %d, sock = %d",
4250 } else if (ret
== -EAGAIN
) {
4252 WARN("UST app create event notifier group failed. Communication time out: pid = %d, sock = %d",
4256 ERR("UST app create event notifier group failed with ret %d: pid = %d, sock = %d, event_pipe_write_fd: %d",
4260 event_pipe_write_fd
);
4265 ret
= lttng_pipe_write_close(app
->event_notifier_group
.event_pipe
);
4267 ERR("Failed to close write end of the application's event source pipe: app = '%s' (pid = %d)",
4274 * Release the file descriptor that was reserved for the write-end of
4277 lttng_fd_put(LTTNG_FD_APPS
, 1);
4279 lttng_ret
= notification_thread_command_add_tracer_event_source(
4280 the_notification_thread_handle
,
4281 lttng_pipe_get_readfd(app
->event_notifier_group
.event_pipe
),
4283 if (lttng_ret
!= LTTNG_OK
) {
4284 ERR("Failed to add tracer event source to notification thread");
4289 /* Assign handle only when the complete setup is valid. */
4290 app
->event_notifier_group
.object
= event_notifier_group
;
4292 event_notifier_error_accounting_status
= event_notifier_error_accounting_register_app(app
);
4293 switch (event_notifier_error_accounting_status
) {
4294 case EVENT_NOTIFIER_ERROR_ACCOUNTING_STATUS_OK
:
4296 case EVENT_NOTIFIER_ERROR_ACCOUNTING_STATUS_UNSUPPORTED
:
4297 DBG3("Failed to setup event notifier error accounting (application does not support notifier error accounting): app socket fd = %d, app name = '%s', app pid = %d",
4302 goto error_accounting
;
4303 case EVENT_NOTIFIER_ERROR_ACCOUNTING_STATUS_APP_DEAD
:
4304 DBG3("Failed to setup event notifier error accounting (application is dead): app socket fd = %d, app name = '%s', app pid = %d",
4309 goto error_accounting
;
4311 ERR("Failed to setup event notifier error accounting for app");
4313 goto error_accounting
;
4319 lttng_ret
= notification_thread_command_remove_tracer_event_source(
4320 the_notification_thread_handle
,
4321 lttng_pipe_get_readfd(app
->event_notifier_group
.event_pipe
));
4322 if (lttng_ret
!= LTTNG_OK
) {
4323 ERR("Failed to remove application tracer event source from notification thread");
4327 lttng_ust_ctl_release_object(app
->sock
, app
->event_notifier_group
.object
);
4328 free(app
->event_notifier_group
.object
);
4329 app
->event_notifier_group
.object
= nullptr;
4333 static void ust_app_unregister(ust_app
& app
)
4335 struct lttng_ht_iter iter
;
4336 struct ust_app_session
*ua_sess
;
4338 lttng::urcu::read_lock_guard read_lock
;
4341 * For per-PID buffers, perform "push metadata" and flush all
4342 * application streams before removing app from hash tables,
4343 * ensuring proper behavior of data_pending check.
4344 * Remove sessions so they are not visible during deletion.
4346 cds_lfht_for_each_entry (app
.sessions
->ht
, &iter
.iter
, ua_sess
, node
.node
) {
4347 const auto del_ret
= lttng_ht_del(app
.sessions
, &iter
);
4349 /* The session was already removed so scheduled for teardown. */
4353 if (ua_sess
->buffer_type
== LTTNG_BUFFER_PER_PID
) {
4354 (void) ust_app_flush_app_session(app
, *ua_sess
);
4358 * Add session to list for teardown. This is safe since at this point we
4359 * are the only one using this list.
4361 lttng::pthread::lock_guard
ust_app_session_lock(ua_sess
->lock
);
4363 if (ua_sess
->deleted
) {
4368 * Normally, this is done in the delete session process which is
4369 * executed in the call rcu below. However, upon registration we can't
4370 * afford to wait for the grace period before pushing data or else the
4371 * data pending feature can race between the unregistration and stop
4372 * command where the data pending command is sent *before* the grace
4375 * The close metadata below nullifies the metadata pointer in the
4376 * session so the delete session will NOT push/close a second time.
4378 auto locked_registry
= get_locked_session_registry(ua_sess
);
4379 if (locked_registry
) {
4380 /* Push metadata for application before freeing the application. */
4381 (void) push_metadata(locked_registry
, ua_sess
->consumer
);
4384 * Don't ask to close metadata for global per UID buffers. Close
4385 * metadata only on destroy trace session in this case. Also, the
4386 * previous push metadata could have flag the metadata registry to
4387 * close so don't send a close command if closed.
4389 if (ua_sess
->buffer_type
!= LTTNG_BUFFER_PER_UID
) {
4390 const auto metadata_key
= locked_registry
->_metadata_key
;
4391 const auto consumer_bitness
= locked_registry
->abi
.bits_per_long
;
4393 if (!locked_registry
->_metadata_closed
&& metadata_key
!= 0) {
4394 locked_registry
->_metadata_closed
= true;
4397 /* Release lock before communication, see comments in
4398 * close_metadata(). */
4399 locked_registry
.reset();
4400 (void) close_metadata(
4401 metadata_key
, consumer_bitness
, ua_sess
->consumer
);
4403 locked_registry
.reset();
4407 cds_list_add(&ua_sess
->teardown_node
, &app
.teardown_head
);
4411 * Remove application from notify hash table. The thread handling the
4412 * notify socket could have deleted the node so ignore on error because
4413 * either way it's valid. The close of that socket is handled by the
4414 * apps_notify_thread.
4416 iter
.iter
.node
= &app
.notify_sock_n
.node
;
4417 (void) lttng_ht_del(ust_app_ht_by_notify_sock
, &iter
);
4420 * Ignore return value since the node might have been removed before by an
4421 * add replace during app registration because the PID can be reassigned by
4424 iter
.iter
.node
= &app
.pid_n
.node
;
4425 if (lttng_ht_del(ust_app_ht
, &iter
)) {
4426 DBG3("Unregister app by PID %d failed. This can happen on pid reuse", app
.pid
);
4431 * Unregister app by removing it from the global traceable app list and freeing
4434 * The socket is already closed at this point, so there is no need to close it.
4436 void ust_app_unregister_by_socket(int sock_fd
)
4438 struct ust_app
*app
;
4439 struct lttng_ht_node_ulong
*node
;
4440 struct lttng_ht_iter ust_app_sock_iter
;
4443 lttng::urcu::read_lock_guard read_lock
;
4445 /* Get the node reference for a call_rcu */
4446 lttng_ht_lookup(ust_app_ht_by_sock
, (void *) ((unsigned long) sock_fd
), &ust_app_sock_iter
);
4447 node
= lttng_ht_iter_get_node_ulong(&ust_app_sock_iter
);
4450 app
= caa_container_of(node
, struct ust_app
, sock_n
);
4452 DBG_FMT("Application unregistering after socket activity: pid={}, socket_fd={}",
4456 /* Remove application from socket hash table */
4457 ret
= lttng_ht_del(ust_app_ht_by_sock
, &ust_app_sock_iter
);
4461 * The socket is closed: release its reference to the application
4462 * to trigger its eventual teardown.
4468 * Fill events array with all events name of all registered apps.
4470 int ust_app_list_events(struct lttng_event
**events
)
4473 size_t nbmem
, count
= 0;
4474 struct lttng_ht_iter iter
;
4475 struct ust_app
*app
;
4476 struct lttng_event
*tmp_event
;
4478 nbmem
= UST_APP_EVENT_LIST_SIZE
;
4479 tmp_event
= calloc
<lttng_event
>(nbmem
);
4480 if (tmp_event
== nullptr) {
4481 PERROR("zmalloc ust app events");
4487 lttng::urcu::read_lock_guard read_lock
;
4489 cds_lfht_for_each_entry (ust_app_ht
->ht
, &iter
.iter
, app
, pid_n
.node
) {
4490 struct lttng_ust_abi_tracepoint_iter uiter
;
4492 health_code_update();
4494 if (!app
->compatible
) {
4496 * TODO: In time, we should notice the caller of this error by
4497 * telling him that this is a version error.
4502 pthread_mutex_lock(&app
->sock_lock
);
4503 handle
= lttng_ust_ctl_tracepoint_list(app
->sock
);
4505 if (handle
!= -EPIPE
&& handle
!= -LTTNG_UST_ERR_EXITING
) {
4506 ERR("UST app list events getting handle failed for app pid %d",
4509 pthread_mutex_unlock(&app
->sock_lock
);
4513 while ((ret
= lttng_ust_ctl_tracepoint_list_get(
4514 app
->sock
, handle
, &uiter
)) != -LTTNG_UST_ERR_NOENT
) {
4515 /* Handle ustctl error. */
4519 if (ret
!= -LTTNG_UST_ERR_EXITING
&& ret
!= -EPIPE
) {
4520 ERR("UST app tp list get failed for app %d with ret %d",
4524 DBG3("UST app tp list get failed. Application is dead");
4530 lttng_ust_ctl_release_handle(app
->sock
, handle
);
4531 if (release_ret
< 0 &&
4532 release_ret
!= -LTTNG_UST_ERR_EXITING
&&
4533 release_ret
!= -EPIPE
) {
4534 ERR("Error releasing app handle for app %d with ret %d",
4539 pthread_mutex_unlock(&app
->sock_lock
);
4543 health_code_update();
4544 if (count
>= nbmem
) {
4545 /* In case the realloc fails, we free the memory */
4546 struct lttng_event
*new_tmp_event
;
4549 new_nbmem
= nbmem
<< 1;
4550 DBG2("Reallocating event list from %zu to %zu entries",
4553 new_tmp_event
= (lttng_event
*) realloc(
4554 tmp_event
, new_nbmem
* sizeof(struct lttng_event
));
4555 if (new_tmp_event
== nullptr) {
4558 PERROR("realloc ust app events");
4561 release_ret
= lttng_ust_ctl_release_handle(
4563 if (release_ret
< 0 &&
4564 release_ret
!= -LTTNG_UST_ERR_EXITING
&&
4565 release_ret
!= -EPIPE
) {
4566 ERR("Error releasing app handle for app %d with ret %d",
4571 pthread_mutex_unlock(&app
->sock_lock
);
4574 /* Zero the new memory */
4575 memset(new_tmp_event
+ nbmem
,
4577 (new_nbmem
- nbmem
) * sizeof(struct lttng_event
));
4579 tmp_event
= new_tmp_event
;
4582 memcpy(tmp_event
[count
].name
,
4584 LTTNG_UST_ABI_SYM_NAME_LEN
);
4585 tmp_event
[count
].loglevel
= uiter
.loglevel
;
4586 tmp_event
[count
].type
=
4587 (enum lttng_event_type
) LTTNG_UST_ABI_TRACEPOINT
;
4588 tmp_event
[count
].pid
= app
->pid
;
4589 tmp_event
[count
].enabled
= -1;
4593 ret
= lttng_ust_ctl_release_handle(app
->sock
, handle
);
4594 pthread_mutex_unlock(&app
->sock_lock
);
4596 if (ret
== -EPIPE
|| ret
== -LTTNG_UST_ERR_EXITING
) {
4597 DBG3("Error releasing app handle. Application died: pid = %d, sock = %d",
4600 } else if (ret
== -EAGAIN
) {
4601 WARN("Error releasing app handle. Communication time out: pid = %d, sock = %d",
4605 ERR("Error releasing app handle with ret %d: pid = %d, sock = %d",
4615 *events
= tmp_event
;
4617 DBG2("UST app list events done (%zu events)", count
);
4621 health_code_update();
4626 * Fill events array with all events name of all registered apps.
4628 int ust_app_list_event_fields(struct lttng_event_field
**fields
)
4631 size_t nbmem
, count
= 0;
4632 struct lttng_ht_iter iter
;
4633 struct ust_app
*app
;
4634 struct lttng_event_field
*tmp_event
;
4636 nbmem
= UST_APP_EVENT_LIST_SIZE
;
4637 tmp_event
= calloc
<lttng_event_field
>(nbmem
);
4638 if (tmp_event
== nullptr) {
4639 PERROR("zmalloc ust app event fields");
4645 lttng::urcu::read_lock_guard read_lock
;
4647 cds_lfht_for_each_entry (ust_app_ht
->ht
, &iter
.iter
, app
, pid_n
.node
) {
4648 struct lttng_ust_abi_field_iter uiter
;
4650 health_code_update();
4652 if (!app
->compatible
) {
4654 * TODO: In time, we should notice the caller of this error by
4655 * telling him that this is a version error.
4660 pthread_mutex_lock(&app
->sock_lock
);
4661 handle
= lttng_ust_ctl_tracepoint_field_list(app
->sock
);
4663 if (handle
!= -EPIPE
&& handle
!= -LTTNG_UST_ERR_EXITING
) {
4664 ERR("UST app list field getting handle failed for app pid %d",
4667 pthread_mutex_unlock(&app
->sock_lock
);
4671 while ((ret
= lttng_ust_ctl_tracepoint_field_list_get(
4672 app
->sock
, handle
, &uiter
)) != -LTTNG_UST_ERR_NOENT
) {
4673 /* Handle ustctl error. */
4677 if (ret
!= -LTTNG_UST_ERR_EXITING
&& ret
!= -EPIPE
) {
4678 ERR("UST app tp list field failed for app %d with ret %d",
4682 DBG3("UST app tp list field failed. Application is dead");
4688 lttng_ust_ctl_release_handle(app
->sock
, handle
);
4689 pthread_mutex_unlock(&app
->sock_lock
);
4690 if (release_ret
< 0 &&
4691 release_ret
!= -LTTNG_UST_ERR_EXITING
&&
4692 release_ret
!= -EPIPE
) {
4693 ERR("Error releasing app handle for app %d with ret %d",
4701 health_code_update();
4702 if (count
>= nbmem
) {
4703 /* In case the realloc fails, we free the memory */
4704 struct lttng_event_field
*new_tmp_event
;
4707 new_nbmem
= nbmem
<< 1;
4708 DBG2("Reallocating event field list from %zu to %zu entries",
4711 new_tmp_event
= (lttng_event_field
*) realloc(
4713 new_nbmem
* sizeof(struct lttng_event_field
));
4714 if (new_tmp_event
== nullptr) {
4717 PERROR("realloc ust app event fields");
4720 release_ret
= lttng_ust_ctl_release_handle(
4722 pthread_mutex_unlock(&app
->sock_lock
);
4724 release_ret
!= -LTTNG_UST_ERR_EXITING
&&
4725 release_ret
!= -EPIPE
) {
4726 ERR("Error releasing app handle for app %d with ret %d",
4734 /* Zero the new memory */
4735 memset(new_tmp_event
+ nbmem
,
4737 (new_nbmem
- nbmem
) *
4738 sizeof(struct lttng_event_field
));
4740 tmp_event
= new_tmp_event
;
4743 memcpy(tmp_event
[count
].field_name
,
4745 LTTNG_UST_ABI_SYM_NAME_LEN
);
4746 /* Mapping between these enums matches 1 to 1. */
4747 tmp_event
[count
].type
= (enum lttng_event_field_type
) uiter
.type
;
4748 tmp_event
[count
].nowrite
= uiter
.nowrite
;
4750 memcpy(tmp_event
[count
].event
.name
,
4752 LTTNG_UST_ABI_SYM_NAME_LEN
);
4753 tmp_event
[count
].event
.loglevel
= uiter
.loglevel
;
4754 tmp_event
[count
].event
.type
= LTTNG_EVENT_TRACEPOINT
;
4755 tmp_event
[count
].event
.pid
= app
->pid
;
4756 tmp_event
[count
].event
.enabled
= -1;
4760 ret
= lttng_ust_ctl_release_handle(app
->sock
, handle
);
4761 pthread_mutex_unlock(&app
->sock_lock
);
4762 if (ret
< 0 && ret
!= -LTTNG_UST_ERR_EXITING
&& ret
!= -EPIPE
) {
4763 ERR("Error releasing app handle for app %d with ret %d",
4771 *fields
= tmp_event
;
4773 DBG2("UST app list event fields done (%zu events)", count
);
4777 health_code_update();
4782 * Free and clean all traceable apps of the global list.
4784 void ust_app_clean_list()
4787 struct ust_app
*app
;
4788 struct lttng_ht_iter iter
;
4790 DBG2("UST app cleaning registered apps hash table");
4792 /* Cleanup notify socket hash table */
4793 if (ust_app_ht_by_notify_sock
) {
4794 lttng::urcu::read_lock_guard read_lock
;
4796 cds_lfht_for_each_entry (
4797 ust_app_ht_by_notify_sock
->ht
, &iter
.iter
, app
, notify_sock_n
.node
) {
4799 * Assert that all notifiers are gone as all triggers
4800 * are unregistered prior to this clean-up.
4802 LTTNG_ASSERT(lttng_ht_get_count(app
->token_to_event_notifier_rule_ht
) == 0);
4803 ust_app_notify_sock_unregister(app
->notify_sock
);
4807 /* Cleanup socket hash table */
4808 if (ust_app_ht_by_sock
) {
4809 lttng::urcu::read_lock_guard read_lock
;
4811 cds_lfht_for_each_entry (ust_app_ht_by_sock
->ht
, &iter
.iter
, app
, sock_n
.node
) {
4812 ret
= lttng_ht_del(ust_app_ht_by_sock
, &iter
);
4818 /* Destroy is done only when the ht is empty */
4820 lttng_ht_destroy(ust_app_ht
);
4822 if (ust_app_ht_by_sock
) {
4823 lttng_ht_destroy(ust_app_ht_by_sock
);
4825 if (ust_app_ht_by_notify_sock
) {
4826 lttng_ht_destroy(ust_app_ht_by_notify_sock
);
4831 * Init UST app hash table.
4833 int ust_app_ht_alloc()
4835 ust_app_ht
= lttng_ht_new(0, LTTNG_HT_TYPE_ULONG
);
4839 ust_app_ht_by_sock
= lttng_ht_new(0, LTTNG_HT_TYPE_ULONG
);
4840 if (!ust_app_ht_by_sock
) {
4843 ust_app_ht_by_notify_sock
= lttng_ht_new(0, LTTNG_HT_TYPE_ULONG
);
4844 if (!ust_app_ht_by_notify_sock
) {
4851 * For a specific UST session, disable the channel for all registered apps.
4853 int ust_app_disable_channel_glb(struct ltt_ust_session
*usess
, struct ltt_ust_channel
*uchan
)
4856 struct lttng_ht_iter iter
;
4857 struct lttng_ht_node_str
*ua_chan_node
;
4858 struct ust_app
*app
;
4859 struct ust_app_session
*ua_sess
;
4860 struct ust_app_channel
*ua_chan
;
4862 LTTNG_ASSERT(usess
->active
);
4863 DBG2("UST app disabling channel %s from global domain for session id %" PRIu64
,
4868 lttng::urcu::read_lock_guard read_lock
;
4870 /* For every registered applications */
4871 cds_lfht_for_each_entry (ust_app_ht
->ht
, &iter
.iter
, app
, pid_n
.node
) {
4872 struct lttng_ht_iter uiter
;
4873 if (!app
->compatible
) {
4875 * TODO: In time, we should notice the caller of this error by
4876 * telling him that this is a version error.
4880 ua_sess
= lookup_session_by_app(usess
, app
);
4881 if (ua_sess
== nullptr) {
4886 lttng_ht_lookup(ua_sess
->channels
, (void *) uchan
->name
, &uiter
);
4887 ua_chan_node
= lttng_ht_iter_get_node_str(&uiter
);
4888 /* If the session if found for the app, the channel must be there */
4889 LTTNG_ASSERT(ua_chan_node
);
4891 ua_chan
= lttng::utils::container_of(ua_chan_node
, &ust_app_channel::node
);
4892 /* The channel must not be already disabled */
4893 LTTNG_ASSERT(ua_chan
->enabled
);
4895 /* Disable channel onto application */
4896 ret
= disable_ust_app_channel(ua_sess
, ua_chan
, app
);
4898 /* XXX: We might want to report this error at some point... */
4908 * For a specific UST session, enable the channel for all registered apps.
4910 int ust_app_enable_channel_glb(struct ltt_ust_session
*usess
, struct ltt_ust_channel
*uchan
)
4913 struct lttng_ht_iter iter
;
4914 struct ust_app
*app
;
4915 struct ust_app_session
*ua_sess
;
4917 LTTNG_ASSERT(usess
->active
);
4918 DBG2("UST app enabling channel %s to global domain for session id %" PRIu64
,
4923 lttng::urcu::read_lock_guard read_lock
;
4925 /* For every registered applications */
4926 cds_lfht_for_each_entry (ust_app_ht
->ht
, &iter
.iter
, app
, pid_n
.node
) {
4927 if (!app
->compatible
) {
4929 * TODO: In time, we should notice the caller of this error by
4930 * telling him that this is a version error.
4934 ua_sess
= lookup_session_by_app(usess
, app
);
4935 if (ua_sess
== nullptr) {
4939 /* Enable channel onto application */
4940 ret
= enable_ust_app_channel(ua_sess
, uchan
, app
);
4942 /* XXX: We might want to report this error at some point... */
4952 * Disable an event in a channel and for a specific session.
4954 int ust_app_disable_event_glb(struct ltt_ust_session
*usess
,
4955 struct ltt_ust_channel
*uchan
,
4956 struct ltt_ust_event
*uevent
)
4959 struct lttng_ht_iter iter
, uiter
;
4960 struct lttng_ht_node_str
*ua_chan_node
;
4961 struct ust_app
*app
;
4962 struct ust_app_session
*ua_sess
;
4963 struct ust_app_channel
*ua_chan
;
4964 struct ust_app_event
*ua_event
;
4966 LTTNG_ASSERT(usess
->active
);
4967 DBG("UST app disabling event %s for all apps in channel "
4968 "%s for session id %" PRIu64
,
4974 lttng::urcu::read_lock_guard read_lock
;
4976 /* For all registered applications */
4977 cds_lfht_for_each_entry (ust_app_ht
->ht
, &iter
.iter
, app
, pid_n
.node
) {
4978 if (!app
->compatible
) {
4980 * TODO: In time, we should notice the caller of this error by
4981 * telling him that this is a version error.
4985 ua_sess
= lookup_session_by_app(usess
, app
);
4986 if (ua_sess
== nullptr) {
4991 /* Lookup channel in the ust app session */
4992 lttng_ht_lookup(ua_sess
->channels
, (void *) uchan
->name
, &uiter
);
4993 ua_chan_node
= lttng_ht_iter_get_node_str(&uiter
);
4994 if (ua_chan_node
== nullptr) {
4995 DBG2("Channel %s not found in session id %" PRIu64
5003 ua_chan
= lttng::utils::container_of(ua_chan_node
, &ust_app_channel::node
);
5005 ua_event
= find_ust_app_event(
5009 (enum lttng_ust_abi_loglevel_type
) uevent
->attr
.loglevel_type
,
5010 uevent
->attr
.loglevel
,
5012 if (ua_event
== nullptr) {
5013 DBG2("Event %s not found in channel %s for app pid %d."
5021 ret
= disable_ust_app_event(ua_event
, app
);
5023 /* XXX: Report error someday... */
5032 /* The ua_sess lock must be held by the caller. */
5033 static int ust_app_channel_create(struct ltt_ust_session
*usess
,
5034 struct ust_app_session
*ua_sess
,
5035 struct ltt_ust_channel
*uchan
,
5036 struct ust_app
*app
,
5037 struct ust_app_channel
**_ua_chan
)
5040 struct ust_app_channel
*ua_chan
= nullptr;
5042 LTTNG_ASSERT(ua_sess
);
5043 ASSERT_LOCKED(ua_sess
->lock
);
5045 if (!strncmp(uchan
->name
, DEFAULT_METADATA_NAME
, sizeof(uchan
->name
))) {
5046 copy_channel_attr_to_ustctl(&ua_sess
->metadata_attr
, &uchan
->attr
);
5049 struct ltt_ust_context
*uctx
= nullptr;
5052 * Create channel onto application and synchronize its
5055 ret
= ust_app_channel_allocate(
5056 ua_sess
, uchan
, LTTNG_UST_ABI_CHAN_PER_CPU
, usess
, &ua_chan
);
5061 ret
= ust_app_channel_send(app
, usess
, ua_sess
, ua_chan
);
5067 cds_list_for_each_entry (uctx
, &uchan
->ctx_list
, list
) {
5068 ret
= create_ust_app_channel_context(ua_chan
, &uctx
->ctx
, app
);
5080 * The application's socket is not valid. Either a bad socket
5081 * or a timeout on it. We can't inform the caller that for a
5082 * specific app, the session failed so lets continue here.
5084 ret
= 0; /* Not an error. */
5092 if (ret
== 0 && _ua_chan
) {
5094 * Only return the application's channel on success. Note
5095 * that the channel can still be part of the application's
5096 * channel hashtable on error.
5098 *_ua_chan
= ua_chan
;
5104 * Enable event for a specific session and channel on the tracer.
5106 int ust_app_enable_event_glb(struct ltt_ust_session
*usess
,
5107 struct ltt_ust_channel
*uchan
,
5108 struct ltt_ust_event
*uevent
)
5111 struct lttng_ht_iter iter
, uiter
;
5112 struct lttng_ht_node_str
*ua_chan_node
;
5113 struct ust_app
*app
;
5114 struct ust_app_session
*ua_sess
;
5115 struct ust_app_channel
*ua_chan
;
5116 struct ust_app_event
*ua_event
;
5118 LTTNG_ASSERT(usess
->active
);
5119 DBG("UST app enabling event %s for all apps for session id %" PRIu64
,
5124 * NOTE: At this point, this function is called only if the session and
5125 * channel passed are already created for all apps. and enabled on the
5130 lttng::urcu::read_lock_guard read_lock
;
5132 /* For all registered applications */
5133 cds_lfht_for_each_entry (ust_app_ht
->ht
, &iter
.iter
, app
, pid_n
.node
) {
5134 if (!app
->compatible
) {
5136 * TODO: In time, we should notice the caller of this error by
5137 * telling him that this is a version error.
5141 ua_sess
= lookup_session_by_app(usess
, app
);
5143 /* The application has problem or is probably dead. */
5147 pthread_mutex_lock(&ua_sess
->lock
);
5149 if (ua_sess
->deleted
) {
5150 pthread_mutex_unlock(&ua_sess
->lock
);
5154 /* Lookup channel in the ust app session */
5155 lttng_ht_lookup(ua_sess
->channels
, (void *) uchan
->name
, &uiter
);
5156 ua_chan_node
= lttng_ht_iter_get_node_str(&uiter
);
5158 * It is possible that the channel cannot be found is
5159 * the channel/event creation occurs concurrently with
5160 * an application exit.
5162 if (!ua_chan_node
) {
5163 pthread_mutex_unlock(&ua_sess
->lock
);
5167 ua_chan
= lttng::utils::container_of(ua_chan_node
, &ust_app_channel::node
);
5169 /* Get event node */
5170 ua_event
= find_ust_app_event(
5174 (enum lttng_ust_abi_loglevel_type
) uevent
->attr
.loglevel_type
,
5175 uevent
->attr
.loglevel
,
5177 if (ua_event
== nullptr) {
5178 DBG3("UST app enable event %s not found for app PID %d."
5185 ret
= enable_ust_app_event(ua_event
, app
);
5187 pthread_mutex_unlock(&ua_sess
->lock
);
5191 pthread_mutex_unlock(&ua_sess
->lock
);
5199 * For a specific existing UST session and UST channel, creates the event for
5200 * all registered apps.
5202 int ust_app_create_event_glb(struct ltt_ust_session
*usess
,
5203 struct ltt_ust_channel
*uchan
,
5204 struct ltt_ust_event
*uevent
)
5207 struct lttng_ht_iter iter
, uiter
;
5208 struct lttng_ht_node_str
*ua_chan_node
;
5209 struct ust_app
*app
;
5210 struct ust_app_session
*ua_sess
;
5211 struct ust_app_channel
*ua_chan
;
5213 LTTNG_ASSERT(usess
->active
);
5214 DBG("UST app creating event %s for all apps for session id %" PRIu64
,
5219 lttng::urcu::read_lock_guard read_lock
;
5221 /* For all registered applications */
5222 cds_lfht_for_each_entry (ust_app_ht
->ht
, &iter
.iter
, app
, pid_n
.node
) {
5223 if (!app
->compatible
) {
5225 * TODO: In time, we should notice the caller of this error by
5226 * telling him that this is a version error.
5231 ua_sess
= lookup_session_by_app(usess
, app
);
5233 /* The application has problem or is probably dead. */
5237 pthread_mutex_lock(&ua_sess
->lock
);
5239 if (ua_sess
->deleted
) {
5240 pthread_mutex_unlock(&ua_sess
->lock
);
5244 /* Lookup channel in the ust app session */
5245 lttng_ht_lookup(ua_sess
->channels
, (void *) uchan
->name
, &uiter
);
5246 ua_chan_node
= lttng_ht_iter_get_node_str(&uiter
);
5247 /* If the channel is not found, there is a code flow error */
5248 LTTNG_ASSERT(ua_chan_node
);
5250 ua_chan
= lttng::utils::container_of(ua_chan_node
, &ust_app_channel::node
);
5252 ret
= create_ust_app_event(ua_chan
, uevent
, app
);
5253 pthread_mutex_unlock(&ua_sess
->lock
);
5255 if (ret
!= -LTTNG_UST_ERR_EXIST
) {
5256 /* Possible value at this point: -ENOMEM. If so, we stop! */
5260 DBG2("UST app event %s already exist on app PID %d",
5272 * Start tracing for a specific UST session and app.
5274 * Called with UST app session lock held.
5277 static int ust_app_start_trace(struct ltt_ust_session
*usess
, struct ust_app
*app
)
5280 struct ust_app_session
*ua_sess
;
5282 DBG("Starting tracing for ust app pid %d", app
->pid
);
5284 lttng::urcu::read_lock_guard read_lock
;
5286 if (!app
->compatible
) {
5290 ua_sess
= lookup_session_by_app(usess
, app
);
5291 if (ua_sess
== nullptr) {
5292 /* The session is in teardown process. Ignore and continue. */
5296 pthread_mutex_lock(&ua_sess
->lock
);
5298 if (ua_sess
->deleted
) {
5299 pthread_mutex_unlock(&ua_sess
->lock
);
5303 if (ua_sess
->enabled
) {
5304 pthread_mutex_unlock(&ua_sess
->lock
);
5308 /* Upon restart, we skip the setup, already done */
5309 if (ua_sess
->started
) {
5313 health_code_update();
5316 /* This starts the UST tracing */
5317 pthread_mutex_lock(&app
->sock_lock
);
5318 ret
= lttng_ust_ctl_start_session(app
->sock
, ua_sess
->handle
);
5319 pthread_mutex_unlock(&app
->sock_lock
);
5321 if (ret
== -EPIPE
|| ret
== -LTTNG_UST_ERR_EXITING
) {
5322 DBG3("UST app start session failed. Application is dead: pid = %d, sock = %d",
5325 pthread_mutex_unlock(&ua_sess
->lock
);
5327 } else if (ret
== -EAGAIN
) {
5328 WARN("UST app start session failed. Communication time out: pid = %d, sock = %d",
5331 pthread_mutex_unlock(&ua_sess
->lock
);
5335 ERR("UST app start session failed with ret %d: pid = %d, sock = %d",
5343 /* Indicate that the session has been started once */
5344 ua_sess
->started
= true;
5345 ua_sess
->enabled
= true;
5347 pthread_mutex_unlock(&ua_sess
->lock
);
5349 health_code_update();
5351 /* Quiescent wait after starting trace */
5352 pthread_mutex_lock(&app
->sock_lock
);
5353 ret
= lttng_ust_ctl_wait_quiescent(app
->sock
);
5354 pthread_mutex_unlock(&app
->sock_lock
);
5356 if (ret
== -EPIPE
|| ret
== -LTTNG_UST_ERR_EXITING
) {
5357 DBG3("UST app wait quiescent failed. Application is dead: pid = %d, sock = %d",
5360 } else if (ret
== -EAGAIN
) {
5361 WARN("UST app wait quiescent failed. Communication time out: pid = %d, sock = %d",
5365 ERR("UST app wait quiescent failed with ret %d: pid %d, sock = %d",
5373 health_code_update();
5377 pthread_mutex_unlock(&ua_sess
->lock
);
5378 health_code_update();
5383 * Stop tracing for a specific UST session and app.
5385 static int ust_app_stop_trace(struct ltt_ust_session
*usess
, struct ust_app
*app
)
5388 struct ust_app_session
*ua_sess
;
5390 DBG("Stopping tracing for ust app pid %d", app
->pid
);
5392 lttng::urcu::read_lock_guard read_lock
;
5394 if (!app
->compatible
) {
5395 goto end_no_session
;
5398 ua_sess
= lookup_session_by_app(usess
, app
);
5399 if (ua_sess
== nullptr) {
5400 goto end_no_session
;
5403 pthread_mutex_lock(&ua_sess
->lock
);
5405 if (ua_sess
->deleted
) {
5406 pthread_mutex_unlock(&ua_sess
->lock
);
5407 goto end_no_session
;
5411 * If started = 0, it means that stop trace has been called for a session
5412 * that was never started. It's possible since we can have a fail start
5413 * from either the application manager thread or the command thread. Simply
5414 * indicate that this is a stop error.
5416 if (!ua_sess
->started
) {
5417 goto error_rcu_unlock
;
5420 health_code_update();
5422 /* This inhibits UST tracing */
5423 pthread_mutex_lock(&app
->sock_lock
);
5424 ret
= lttng_ust_ctl_stop_session(app
->sock
, ua_sess
->handle
);
5425 pthread_mutex_unlock(&app
->sock_lock
);
5427 if (ret
== -EPIPE
|| ret
== -LTTNG_UST_ERR_EXITING
) {
5428 DBG3("UST app stop session failed. Application is dead: pid = %d, sock = %d",
5432 } else if (ret
== -EAGAIN
) {
5433 WARN("UST app stop session failed. Communication time out: pid = %d, sock = %d",
5439 ERR("UST app stop session failed with ret %d: pid = %d, sock = %d",
5444 goto error_rcu_unlock
;
5447 health_code_update();
5448 ua_sess
->enabled
= false;
5450 /* Quiescent wait after stopping trace */
5451 pthread_mutex_lock(&app
->sock_lock
);
5452 ret
= lttng_ust_ctl_wait_quiescent(app
->sock
);
5453 pthread_mutex_unlock(&app
->sock_lock
);
5455 if (ret
== -EPIPE
|| ret
== -LTTNG_UST_ERR_EXITING
) {
5456 DBG3("UST app wait quiescent failed. Application is dead: pid= %d, sock = %d",
5459 } else if (ret
== -EAGAIN
) {
5460 WARN("UST app wait quiescent failed. Communication time out: pid= %d, sock = %d",
5464 ERR("UST app wait quiescent failed with ret %d: pid= %d, sock = %d",
5471 health_code_update();
5474 auto locked_registry
= get_locked_session_registry(ua_sess
);
5476 /* The UST app session is held registry shall not be null. */
5477 LTTNG_ASSERT(locked_registry
);
5479 /* Push metadata for application before freeing the application. */
5480 (void) push_metadata(locked_registry
, ua_sess
->consumer
);
5484 pthread_mutex_unlock(&ua_sess
->lock
);
5486 health_code_update();
5490 pthread_mutex_unlock(&ua_sess
->lock
);
5491 health_code_update();
5495 static int ust_app_flush_app_session(ust_app
& app
, ust_app_session
& ua_sess
)
5497 int ret
, retval
= 0;
5498 struct lttng_ht_iter iter
;
5499 struct ust_app_channel
*ua_chan
;
5500 struct consumer_socket
*socket
;
5502 DBG("Flushing app session buffers for ust app pid %d", app
.pid
);
5504 if (!app
.compatible
) {
5505 goto end_not_compatible
;
5508 pthread_mutex_lock(&ua_sess
.lock
);
5510 if (ua_sess
.deleted
) {
5514 health_code_update();
5516 /* Flushing buffers */
5517 socket
= consumer_find_socket_by_bitness(app
.abi
.bits_per_long
, ua_sess
.consumer
);
5519 /* Flush buffers and push metadata. */
5520 switch (ua_sess
.buffer_type
) {
5521 case LTTNG_BUFFER_PER_PID
:
5523 lttng::urcu::read_lock_guard read_lock
;
5525 cds_lfht_for_each_entry (ua_sess
.channels
->ht
, &iter
.iter
, ua_chan
, node
.node
) {
5526 health_code_update();
5527 ret
= consumer_flush_channel(socket
, ua_chan
->key
);
5529 ERR("Error flushing consumer channel");
5537 case LTTNG_BUFFER_PER_UID
:
5543 health_code_update();
5546 pthread_mutex_unlock(&ua_sess
.lock
);
5549 health_code_update();
5554 * Flush buffers for all applications for a specific UST session.
5555 * Called with UST session lock held.
5557 static int ust_app_flush_session(struct ltt_ust_session
*usess
)
5562 DBG("Flushing session buffers for all ust apps");
5564 /* Flush buffers and push metadata. */
5565 switch (usess
->buffer_type
) {
5566 case LTTNG_BUFFER_PER_UID
:
5568 struct buffer_reg_uid
*reg
;
5569 struct lttng_ht_iter iter
;
5571 /* Flush all per UID buffers associated to that session. */
5572 cds_list_for_each_entry (reg
, &usess
->buffer_reg_uid_list
, lnode
) {
5573 lttng::urcu::read_lock_guard read_lock
;
5574 lsu::registry_session
*ust_session_reg
;
5575 struct buffer_reg_channel
*buf_reg_chan
;
5576 struct consumer_socket
*socket
;
5578 /* Get consumer socket to use to push the metadata.*/
5579 socket
= consumer_find_socket_by_bitness(reg
->bits_per_long
,
5582 /* Ignore request if no consumer is found for the session. */
5586 cds_lfht_for_each_entry (
5587 reg
->registry
->channels
->ht
, &iter
.iter
, buf_reg_chan
, node
.node
) {
5589 * The following call will print error values so the return
5590 * code is of little importance because whatever happens, we
5591 * have to try them all.
5593 (void) consumer_flush_channel(socket
, buf_reg_chan
->consumer_key
);
5596 ust_session_reg
= reg
->registry
->reg
.ust
;
5597 /* Push metadata. */
5598 auto locked_registry
= ust_session_reg
->lock();
5599 (void) push_metadata(locked_registry
, usess
->consumer
);
5604 case LTTNG_BUFFER_PER_PID
:
5606 struct ust_app_session
*ua_sess
;
5607 struct lttng_ht_iter iter
;
5608 struct ust_app
*app
;
5609 lttng::urcu::read_lock_guard read_lock
;
5611 cds_lfht_for_each_entry (ust_app_ht
->ht
, &iter
.iter
, app
, pid_n
.node
) {
5612 ua_sess
= lookup_session_by_app(usess
, app
);
5613 if (ua_sess
== nullptr) {
5617 (void) ust_app_flush_app_session(*app
, *ua_sess
);
5628 health_code_update();
5632 static int ust_app_clear_quiescent_app_session(struct ust_app
*app
, struct ust_app_session
*ua_sess
)
5635 struct lttng_ht_iter iter
;
5636 struct ust_app_channel
*ua_chan
;
5637 struct consumer_socket
*socket
;
5639 DBG("Clearing stream quiescent state for ust app pid %d", app
->pid
);
5641 lttng::urcu::read_lock_guard read_lock
;
5643 if (!app
->compatible
) {
5644 goto end_not_compatible
;
5647 pthread_mutex_lock(&ua_sess
->lock
);
5649 if (ua_sess
->deleted
) {
5653 health_code_update();
5655 socket
= consumer_find_socket_by_bitness(app
->abi
.bits_per_long
, ua_sess
->consumer
);
5657 ERR("Failed to find consumer (%" PRIu32
") socket", app
->abi
.bits_per_long
);
5662 /* Clear quiescent state. */
5663 switch (ua_sess
->buffer_type
) {
5664 case LTTNG_BUFFER_PER_PID
:
5665 cds_lfht_for_each_entry (ua_sess
->channels
->ht
, &iter
.iter
, ua_chan
, node
.node
) {
5666 health_code_update();
5667 ret
= consumer_clear_quiescent_channel(socket
, ua_chan
->key
);
5669 ERR("Error clearing quiescent state for consumer channel");
5675 case LTTNG_BUFFER_PER_UID
:
5682 health_code_update();
5685 pthread_mutex_unlock(&ua_sess
->lock
);
5688 health_code_update();
5693 * Clear quiescent state in each stream for all applications for a
5694 * specific UST session.
5695 * Called with UST session lock held.
5697 static int ust_app_clear_quiescent_session(struct ltt_ust_session
*usess
)
5702 DBG("Clearing stream quiescent state for all ust apps");
5704 switch (usess
->buffer_type
) {
5705 case LTTNG_BUFFER_PER_UID
:
5707 struct lttng_ht_iter iter
;
5708 struct buffer_reg_uid
*reg
;
5711 * Clear quiescent for all per UID buffers associated to
5714 cds_list_for_each_entry (reg
, &usess
->buffer_reg_uid_list
, lnode
) {
5715 struct consumer_socket
*socket
;
5716 struct buffer_reg_channel
*buf_reg_chan
;
5717 lttng::urcu::read_lock_guard read_lock
;
5719 /* Get associated consumer socket.*/
5720 socket
= consumer_find_socket_by_bitness(reg
->bits_per_long
,
5724 * Ignore request if no consumer is found for
5730 cds_lfht_for_each_entry (
5731 reg
->registry
->channels
->ht
, &iter
.iter
, buf_reg_chan
, node
.node
) {
5733 * The following call will print error values so
5734 * the return code is of little importance
5735 * because whatever happens, we have to try them
5738 (void) consumer_clear_quiescent_channel(socket
,
5739 buf_reg_chan
->consumer_key
);
5745 case LTTNG_BUFFER_PER_PID
:
5747 struct ust_app_session
*ua_sess
;
5748 struct lttng_ht_iter iter
;
5749 struct ust_app
*app
;
5750 lttng::urcu::read_lock_guard read_lock
;
5752 cds_lfht_for_each_entry (ust_app_ht
->ht
, &iter
.iter
, app
, pid_n
.node
) {
5753 ua_sess
= lookup_session_by_app(usess
, app
);
5754 if (ua_sess
== nullptr) {
5757 (void) ust_app_clear_quiescent_app_session(app
, ua_sess
);
5768 health_code_update();
5773 * Destroy a specific UST session in apps.
5775 static int destroy_trace(struct ltt_ust_session
*usess
, struct ust_app
*app
)
5778 struct ust_app_session
*ua_sess
;
5779 struct lttng_ht_iter iter
;
5780 struct lttng_ht_node_u64
*node
;
5782 DBG("Destroy tracing for ust app pid %d", app
->pid
);
5784 lttng::urcu::read_lock_guard read_lock
;
5786 if (!app
->compatible
) {
5790 __lookup_session_by_app(usess
, app
, &iter
);
5791 node
= lttng_ht_iter_get_node_u64(&iter
);
5792 if (node
== nullptr) {
5793 /* Session is being or is deleted. */
5796 ua_sess
= lttng::utils::container_of(node
, &ust_app_session::node
);
5798 health_code_update();
5799 destroy_app_session(app
, ua_sess
);
5801 health_code_update();
5803 /* Quiescent wait after stopping trace */
5804 pthread_mutex_lock(&app
->sock_lock
);
5805 ret
= lttng_ust_ctl_wait_quiescent(app
->sock
);
5806 pthread_mutex_unlock(&app
->sock_lock
);
5808 if (ret
== -EPIPE
|| ret
== -LTTNG_UST_ERR_EXITING
) {
5809 DBG3("UST app wait quiescent failed. Application is dead: pid= %d, sock = %d",
5812 } else if (ret
== -EAGAIN
) {
5813 WARN("UST app wait quiescent failed. Communication time out: pid= %d, sock = %d",
5817 ERR("UST app wait quiescent failed with ret %d: pid= %d, sock = %d",
5824 health_code_update();
5829 * Start tracing for the UST session.
5831 int ust_app_start_trace_all(struct ltt_ust_session
*usess
)
5833 struct lttng_ht_iter iter
;
5834 struct ust_app
*app
;
5836 DBG("Starting all UST traces");
5839 * Even though the start trace might fail, flag this session active so
5840 * other application coming in are started by default.
5842 usess
->active
= true;
5845 * In a start-stop-start use-case, we need to clear the quiescent state
5846 * of each channel set by the prior stop command, thus ensuring that a
5847 * following stop or destroy is sure to grab a timestamp_end near those
5848 * operations, even if the packet is empty.
5850 (void) ust_app_clear_quiescent_session(usess
);
5853 lttng::urcu::read_lock_guard read_lock
;
5855 cds_lfht_for_each_entry (ust_app_ht
->ht
, &iter
.iter
, app
, pid_n
.node
) {
5856 ust_app_global_update(usess
, app
);
5864 * Start tracing for the UST session.
5865 * Called with UST session lock held.
5867 int ust_app_stop_trace_all(struct ltt_ust_session
*usess
)
5870 struct lttng_ht_iter iter
;
5871 struct ust_app
*app
;
5873 DBG("Stopping all UST traces");
5876 * Even though the stop trace might fail, flag this session inactive so
5877 * other application coming in are not started by default.
5879 usess
->active
= false;
5882 lttng::urcu::read_lock_guard read_lock
;
5884 cds_lfht_for_each_entry (ust_app_ht
->ht
, &iter
.iter
, app
, pid_n
.node
) {
5885 ret
= ust_app_stop_trace(usess
, app
);
5887 /* Continue to next apps even on error */
5893 (void) ust_app_flush_session(usess
);
5899 * Destroy app UST session.
5901 int ust_app_destroy_trace_all(struct ltt_ust_session
*usess
)
5904 struct lttng_ht_iter iter
;
5905 struct ust_app
*app
;
5907 DBG("Destroy all UST traces");
5910 lttng::urcu::read_lock_guard read_lock
;
5912 cds_lfht_for_each_entry (ust_app_ht
->ht
, &iter
.iter
, app
, pid_n
.node
) {
5913 ret
= destroy_trace(usess
, app
);
5915 /* Continue to next apps even on error */
5924 /* The ua_sess lock must be held by the caller. */
5925 static int find_or_create_ust_app_channel(struct ltt_ust_session
*usess
,
5926 struct ust_app_session
*ua_sess
,
5927 struct ust_app
*app
,
5928 struct ltt_ust_channel
*uchan
,
5929 struct ust_app_channel
**ua_chan
)
5932 struct lttng_ht_iter iter
;
5933 struct lttng_ht_node_str
*ua_chan_node
;
5935 lttng_ht_lookup(ua_sess
->channels
, (void *) uchan
->name
, &iter
);
5936 ua_chan_node
= lttng_ht_iter_get_node_str(&iter
);
5938 *ua_chan
= caa_container_of(ua_chan_node
, struct ust_app_channel
, node
);
5942 ret
= ust_app_channel_create(usess
, ua_sess
, uchan
, app
, ua_chan
);
5950 static int ust_app_channel_synchronize_event(struct ust_app_channel
*ua_chan
,
5951 struct ltt_ust_event
*uevent
,
5952 struct ust_app
*app
)
5955 struct ust_app_event
*ua_event
= nullptr;
5957 ua_event
= find_ust_app_event(ua_chan
->events
,
5960 (enum lttng_ust_abi_loglevel_type
) uevent
->attr
.loglevel_type
,
5961 uevent
->attr
.loglevel
,
5964 ret
= create_ust_app_event(ua_chan
, uevent
, app
);
5969 if (ua_event
->enabled
!= uevent
->enabled
) {
5970 ret
= uevent
->enabled
? enable_ust_app_event(ua_event
, app
) :
5971 disable_ust_app_event(ua_event
, app
);
5979 /* Called with RCU read-side lock held. */
5980 static void ust_app_synchronize_event_notifier_rules(struct ust_app
*app
)
5983 enum lttng_error_code ret_code
;
5984 enum lttng_trigger_status t_status
;
5985 struct lttng_ht_iter app_trigger_iter
;
5986 struct lttng_triggers
*triggers
= nullptr;
5987 struct ust_app_event_notifier_rule
*event_notifier_rule
;
5988 unsigned int count
, i
;
5990 ASSERT_RCU_READ_LOCKED();
5992 if (!ust_app_supports_notifiers(app
)) {
5997 * Currrently, registering or unregistering a trigger with an
5998 * event rule condition causes a full synchronization of the event
6001 * The first step attempts to add an event notifier for all registered
6002 * triggers that apply to the user space tracers. Then, the
6003 * application's event notifiers rules are all checked against the list
6004 * of registered triggers. Any event notifier that doesn't have a
6005 * matching trigger can be assumed to have been disabled.
6007 * All of this is inefficient, but is put in place to get the feature
6008 * rolling as it is simpler at this moment. It will be optimized Soon™
6009 * to allow the state of enabled
6010 * event notifiers to be synchronized in a piece-wise way.
6013 /* Get all triggers using uid 0 (root) */
6014 ret_code
= notification_thread_command_list_triggers(
6015 the_notification_thread_handle
, 0, &triggers
);
6016 if (ret_code
!= LTTNG_OK
) {
6020 LTTNG_ASSERT(triggers
);
6022 t_status
= lttng_triggers_get_count(triggers
, &count
);
6023 if (t_status
!= LTTNG_TRIGGER_STATUS_OK
) {
6027 for (i
= 0; i
< count
; i
++) {
6028 const struct lttng_condition
*condition
;
6029 const struct lttng_event_rule
*event_rule
;
6030 struct lttng_trigger
*trigger
;
6031 const struct ust_app_event_notifier_rule
*looked_up_event_notifier_rule
;
6032 enum lttng_condition_status condition_status
;
6035 trigger
= lttng_triggers_borrow_mutable_at_index(triggers
, i
);
6036 LTTNG_ASSERT(trigger
);
6038 token
= lttng_trigger_get_tracer_token(trigger
);
6039 condition
= lttng_trigger_get_const_condition(trigger
);
6041 if (lttng_condition_get_type(condition
) !=
6042 LTTNG_CONDITION_TYPE_EVENT_RULE_MATCHES
) {
6043 /* Does not apply */
6048 lttng_condition_event_rule_matches_get_rule(condition
, &event_rule
);
6049 LTTNG_ASSERT(condition_status
== LTTNG_CONDITION_STATUS_OK
);
6051 if (lttng_event_rule_get_domain_type(event_rule
) == LTTNG_DOMAIN_KERNEL
) {
6052 /* Skip kernel related triggers. */
6057 * Find or create the associated token event rule. The caller
6058 * holds the RCU read lock, so this is safe to call without
6059 * explicitly acquiring it here.
6061 looked_up_event_notifier_rule
= find_ust_app_event_notifier_rule(
6062 app
->token_to_event_notifier_rule_ht
, token
);
6063 if (!looked_up_event_notifier_rule
) {
6064 ret
= create_ust_app_event_notifier_rule(trigger
, app
);
6072 lttng::urcu::read_lock_guard read_lock
;
6074 /* Remove all unknown event sources from the app. */
6075 cds_lfht_for_each_entry (app
->token_to_event_notifier_rule_ht
->ht
,
6076 &app_trigger_iter
.iter
,
6077 event_notifier_rule
,
6079 const uint64_t app_token
= event_notifier_rule
->token
;
6083 * Check if the app event trigger still exists on the
6084 * notification side.
6086 for (i
= 0; i
< count
; i
++) {
6087 uint64_t notification_thread_token
;
6088 const struct lttng_trigger
*trigger
=
6089 lttng_triggers_get_at_index(triggers
, i
);
6091 LTTNG_ASSERT(trigger
);
6093 notification_thread_token
= lttng_trigger_get_tracer_token(trigger
);
6095 if (notification_thread_token
== app_token
) {
6107 * This trigger was unregistered, disable it on the tracer's
6110 ret
= lttng_ht_del(app
->token_to_event_notifier_rule_ht
, &app_trigger_iter
);
6111 LTTNG_ASSERT(ret
== 0);
6113 /* Callee logs errors. */
6114 (void) disable_ust_object(app
, event_notifier_rule
->obj
);
6116 delete_ust_app_event_notifier_rule(app
->sock
, event_notifier_rule
, app
);
6121 lttng_triggers_destroy(triggers
);
6126 * RCU read lock must be held by the caller.
6128 static void ust_app_synchronize_all_channels(struct ltt_ust_session
*usess
,
6129 struct ust_app_session
*ua_sess
,
6130 struct ust_app
*app
)
6133 struct cds_lfht_iter uchan_iter
;
6134 struct ltt_ust_channel
*uchan
;
6136 LTTNG_ASSERT(usess
);
6137 LTTNG_ASSERT(ua_sess
);
6139 ASSERT_RCU_READ_LOCKED();
6141 cds_lfht_for_each_entry (usess
->domain_global
.channels
->ht
, &uchan_iter
, uchan
, node
.node
) {
6142 struct ust_app_channel
*ua_chan
;
6143 struct cds_lfht_iter uevent_iter
;
6144 struct ltt_ust_event
*uevent
;
6147 * Search for a matching ust_app_channel. If none is found,
6148 * create it. Creating the channel will cause the ua_chan
6149 * structure to be allocated, the channel buffers to be
6150 * allocated (if necessary) and sent to the application, and
6151 * all enabled contexts will be added to the channel.
6153 ret
= find_or_create_ust_app_channel(usess
, ua_sess
, app
, uchan
, &ua_chan
);
6155 /* Tracer is probably gone or ENOMEM. */
6160 /* ua_chan will be NULL for the metadata channel */
6164 cds_lfht_for_each_entry (uchan
->events
->ht
, &uevent_iter
, uevent
, node
.node
) {
6165 ret
= ust_app_channel_synchronize_event(ua_chan
, uevent
, app
);
6171 if (ua_chan
->enabled
!= uchan
->enabled
) {
6172 ret
= uchan
->enabled
? enable_ust_app_channel(ua_sess
, uchan
, app
) :
6173 disable_ust_app_channel(ua_sess
, ua_chan
, app
);
6184 * The caller must ensure that the application is compatible and is tracked
6185 * by the process attribute trackers.
6187 static void ust_app_synchronize(struct ltt_ust_session
*usess
, struct ust_app
*app
)
6190 struct ust_app_session
*ua_sess
= nullptr;
6193 * The application's configuration should only be synchronized for
6196 LTTNG_ASSERT(usess
->active
);
6198 ret
= find_or_create_ust_app_session(usess
, app
, &ua_sess
, nullptr);
6200 /* Tracer is probably gone or ENOMEM. */
6204 LTTNG_ASSERT(ua_sess
);
6206 pthread_mutex_lock(&ua_sess
->lock
);
6207 if (ua_sess
->deleted
) {
6208 goto deleted_session
;
6212 lttng::urcu::read_lock_guard read_lock
;
6214 ust_app_synchronize_all_channels(usess
, ua_sess
, app
);
6217 * Create the metadata for the application. This returns gracefully if a
6218 * metadata was already set for the session.
6220 * The metadata channel must be created after the data channels as the
6221 * consumer daemon assumes this ordering. When interacting with a relay
6222 * daemon, the consumer will use this assumption to send the
6223 * "STREAMS_SENT" message to the relay daemon.
6225 ret
= create_ust_app_metadata(ua_sess
, app
, usess
->consumer
);
6227 ERR("Metadata creation failed for app sock %d for session id %" PRIu64
,
6234 pthread_mutex_unlock(&ua_sess
->lock
);
6239 static void ust_app_global_destroy(struct ltt_ust_session
*usess
, struct ust_app
*app
)
6241 struct ust_app_session
*ua_sess
;
6243 ua_sess
= lookup_session_by_app(usess
, app
);
6244 if (ua_sess
== nullptr) {
6247 destroy_app_session(app
, ua_sess
);
6251 * Add channels/events from UST global domain to registered apps at sock.
6253 * Called with session lock held.
6254 * Called with RCU read-side lock held.
6256 void ust_app_global_update(struct ltt_ust_session
*usess
, struct ust_app
*app
)
6258 LTTNG_ASSERT(usess
);
6259 LTTNG_ASSERT(usess
->active
);
6260 ASSERT_RCU_READ_LOCKED();
6262 DBG2("UST app global update for app sock %d for session id %" PRIu64
, app
->sock
, usess
->id
);
6264 if (!app
->compatible
) {
6267 if (trace_ust_id_tracker_lookup(LTTNG_PROCESS_ATTR_VIRTUAL_PROCESS_ID
, usess
, app
->pid
) &&
6268 trace_ust_id_tracker_lookup(LTTNG_PROCESS_ATTR_VIRTUAL_USER_ID
, usess
, app
->uid
) &&
6269 trace_ust_id_tracker_lookup(LTTNG_PROCESS_ATTR_VIRTUAL_GROUP_ID
, usess
, app
->gid
)) {
6271 * Synchronize the application's internal tracing configuration
6272 * and start tracing.
6274 ust_app_synchronize(usess
, app
);
6275 ust_app_start_trace(usess
, app
);
6277 ust_app_global_destroy(usess
, app
);
6282 * Add all event notifiers to an application.
6284 * Called with session lock held.
6285 * Called with RCU read-side lock held.
6287 void ust_app_global_update_event_notifier_rules(struct ust_app
*app
)
6289 ASSERT_RCU_READ_LOCKED();
6291 DBG2("UST application global event notifier rules update: app = '%s', pid = %d",
6295 if (!app
->compatible
|| !ust_app_supports_notifiers(app
)) {
6299 if (app
->event_notifier_group
.object
== nullptr) {
6300 WARN("UST app global update of event notifiers for app skipped since communication handle is null: app = '%s', pid = %d",
6306 ust_app_synchronize_event_notifier_rules(app
);
6310 * Called with session lock held.
6312 void ust_app_global_update_all(struct ltt_ust_session
*usess
)
6314 struct lttng_ht_iter iter
;
6315 struct ust_app
*app
;
6318 lttng::urcu::read_lock_guard read_lock
;
6320 cds_lfht_for_each_entry (ust_app_ht
->ht
, &iter
.iter
, app
, pid_n
.node
) {
6321 ust_app_global_update(usess
, app
);
6326 void ust_app_global_update_all_event_notifier_rules()
6328 struct lttng_ht_iter iter
;
6329 struct ust_app
*app
;
6331 lttng::urcu::read_lock_guard read_lock
;
6332 cds_lfht_for_each_entry (ust_app_ht
->ht
, &iter
.iter
, app
, pid_n
.node
) {
6333 ust_app_global_update_event_notifier_rules(app
);
6338 * Add context to a specific channel for global UST domain.
6340 int ust_app_add_ctx_channel_glb(struct ltt_ust_session
*usess
,
6341 struct ltt_ust_channel
*uchan
,
6342 struct ltt_ust_context
*uctx
)
6345 struct lttng_ht_node_str
*ua_chan_node
;
6346 struct lttng_ht_iter iter
, uiter
;
6347 struct ust_app_channel
*ua_chan
= nullptr;
6348 struct ust_app_session
*ua_sess
;
6349 struct ust_app
*app
;
6351 LTTNG_ASSERT(usess
->active
);
6354 lttng::urcu::read_lock_guard read_lock
;
6355 cds_lfht_for_each_entry (ust_app_ht
->ht
, &iter
.iter
, app
, pid_n
.node
) {
6356 if (!app
->compatible
) {
6358 * TODO: In time, we should notice the caller of this error by
6359 * telling him that this is a version error.
6363 ua_sess
= lookup_session_by_app(usess
, app
);
6364 if (ua_sess
== nullptr) {
6368 pthread_mutex_lock(&ua_sess
->lock
);
6370 if (ua_sess
->deleted
) {
6371 pthread_mutex_unlock(&ua_sess
->lock
);
6375 /* Lookup channel in the ust app session */
6376 lttng_ht_lookup(ua_sess
->channels
, (void *) uchan
->name
, &uiter
);
6377 ua_chan_node
= lttng_ht_iter_get_node_str(&uiter
);
6378 if (ua_chan_node
== nullptr) {
6381 ua_chan
= caa_container_of(ua_chan_node
, struct ust_app_channel
, node
);
6382 ret
= create_ust_app_channel_context(ua_chan
, &uctx
->ctx
, app
);
6387 pthread_mutex_unlock(&ua_sess
->lock
);
6395 * Receive registration and populate the given msg structure.
6397 * On success return 0 else a negative value returned by the ustctl call.
6399 int ust_app_recv_registration(int sock
, struct ust_register_msg
*msg
)
6402 uint32_t pid
, ppid
, uid
, gid
;
6406 ret
= lttng_ust_ctl_recv_reg_msg(sock
,
6414 &msg
->bits_per_long
,
6415 &msg
->uint8_t_alignment
,
6416 &msg
->uint16_t_alignment
,
6417 &msg
->uint32_t_alignment
,
6418 &msg
->uint64_t_alignment
,
6419 &msg
->long_alignment
,
6426 case LTTNG_UST_ERR_EXITING
:
6427 DBG3("UST app recv reg message failed. Application died");
6429 case LTTNG_UST_ERR_UNSUP_MAJOR
:
6430 ERR("UST app recv reg unsupported version %d.%d. Supporting %d.%d",
6433 LTTNG_UST_ABI_MAJOR_VERSION
,
6434 LTTNG_UST_ABI_MINOR_VERSION
);
6437 ERR("UST app recv reg message failed with ret %d", ret
);
6442 msg
->pid
= (pid_t
) pid
;
6443 msg
->ppid
= (pid_t
) ppid
;
6444 msg
->uid
= (uid_t
) uid
;
6445 msg
->gid
= (gid_t
) gid
;
6452 * Return a ust app session object using the application object and the
6453 * session object descriptor has a key. If not found, NULL is returned.
6454 * A RCU read side lock MUST be acquired when calling this function.
6456 static struct ust_app_session
*find_session_by_objd(struct ust_app
*app
, int objd
)
6458 struct lttng_ht_node_ulong
*node
;
6459 struct lttng_ht_iter iter
;
6460 struct ust_app_session
*ua_sess
= nullptr;
6463 ASSERT_RCU_READ_LOCKED();
6465 lttng_ht_lookup(app
->ust_sessions_objd
, (void *) ((unsigned long) objd
), &iter
);
6466 node
= lttng_ht_iter_get_node_ulong(&iter
);
6467 if (node
== nullptr) {
6468 DBG2("UST app session find by objd %d not found", objd
);
6472 ua_sess
= lttng::utils::container_of(node
, &ust_app_session::ust_objd_node
);
6479 * Return a ust app channel object using the application object and the channel
6480 * object descriptor has a key. If not found, NULL is returned. A RCU read side
6481 * lock MUST be acquired before calling this function.
6483 static struct ust_app_channel
*find_channel_by_objd(struct ust_app
*app
, int objd
)
6485 struct lttng_ht_node_ulong
*node
;
6486 struct lttng_ht_iter iter
;
6487 struct ust_app_channel
*ua_chan
= nullptr;
6490 ASSERT_RCU_READ_LOCKED();
6492 lttng_ht_lookup(app
->ust_objd
, (void *) ((unsigned long) objd
), &iter
);
6493 node
= lttng_ht_iter_get_node_ulong(&iter
);
6494 if (node
== nullptr) {
6495 DBG2("UST app channel find by objd %d not found", objd
);
6499 ua_chan
= lttng::utils::container_of(node
, &ust_app_channel::ust_objd_node
);
6506 * Reply to a register channel notification from an application on the notify
6507 * socket. The channel metadata is also created.
6509 * The session UST registry lock is acquired in this function.
6511 * On success 0 is returned else a negative value.
6513 static int handle_app_register_channel_notification(int sock
,
6515 struct lttng_ust_ctl_field
*raw_context_fields
,
6516 size_t context_field_count
)
6518 int ret
, ret_code
= 0;
6520 uint64_t chan_reg_key
;
6521 struct ust_app
*app
;
6522 struct ust_app_channel
*ua_chan
;
6523 struct ust_app_session
*ua_sess
;
6524 auto ust_ctl_context_fields
=
6525 lttng::make_unique_wrapper
<lttng_ust_ctl_field
, lttng::memory::free
>(
6526 raw_context_fields
);
6528 lttng::urcu::read_lock_guard read_lock_guard
;
6530 /* Lookup application. If not found, there is a code flow error. */
6531 app
= find_app_by_notify_sock(sock
);
6533 DBG("Application socket %d is being torn down. Abort event notify", sock
);
6537 /* Lookup channel by UST object descriptor. */
6538 ua_chan
= find_channel_by_objd(app
, cobjd
);
6540 DBG("Application channel is being torn down. Abort event notify");
6544 LTTNG_ASSERT(ua_chan
->session
);
6545 ua_sess
= ua_chan
->session
;
6547 /* Get right session registry depending on the session buffer type. */
6548 auto locked_registry_session
= get_locked_session_registry(ua_sess
);
6549 if (!locked_registry_session
) {
6550 DBG("Application session is being torn down. Abort event notify");
6554 /* Depending on the buffer type, a different channel key is used. */
6555 if (ua_sess
->buffer_type
== LTTNG_BUFFER_PER_UID
) {
6556 chan_reg_key
= ua_chan
->tracing_channel_id
;
6558 chan_reg_key
= ua_chan
->key
;
6561 auto& ust_reg_chan
= locked_registry_session
->channel(chan_reg_key
);
6563 /* Channel id is set during the object creation. */
6564 chan_id
= ust_reg_chan
.id
;
6567 * The application returns the typing information of the channel's
6568 * context fields. In per-PID buffering mode, this is the first and only
6569 * time we get this information. It is our chance to finalize the
6570 * initialiation of the channel and serialize it's layout's description
6571 * to the trace's metadata.
6573 * However, in per-UID buffering mode, every application will provide
6574 * this information (redundantly). The first time will allow us to
6575 * complete the initialization. The following times, we simply validate
6576 * that all apps provide the same typing for the context fields as a
6580 auto app_context_fields
= lsu::create_trace_fields_from_ust_ctl_fields(
6581 *locked_registry_session
,
6582 ust_ctl_context_fields
.get(),
6583 context_field_count
,
6584 lst::field_location::root::EVENT_RECORD_COMMON_CONTEXT
,
6585 lsu::ctl_field_quirks::UNDERSCORE_PREFIXED_VARIANT_TAG_MAPPINGS
);
6587 if (!ust_reg_chan
.is_registered()) {
6588 lst::type::cuptr event_context
= app_context_fields
.size() ?
6589 lttng::make_unique
<lst::structure_type
>(
6590 0, std::move(app_context_fields
)) :
6593 ust_reg_chan
.event_context(std::move(event_context
));
6596 * Validate that the context fields match between
6597 * registry and newcoming application.
6599 bool context_fields_match
;
6600 const auto *previous_event_context
= ust_reg_chan
.event_context();
6602 if (!previous_event_context
) {
6603 context_fields_match
= app_context_fields
.size() == 0;
6605 const lst::structure_type
app_event_context_struct(
6606 0, std::move(app_context_fields
));
6608 context_fields_match
= *previous_event_context
==
6609 app_event_context_struct
;
6612 if (!context_fields_match
) {
6613 ERR("Registering application channel due to context field mismatch: pid = %d, sock = %d",
6620 } catch (const std::exception
& ex
) {
6621 ERR("Failed to handle application context: %s", ex
.what());
6627 DBG3("UST app replying to register channel key %" PRIu64
" with id %u, ret = %d",
6632 ret
= lttng_ust_ctl_reply_register_channel(
6635 ust_reg_chan
.header_type_
== lst::stream_class::header_type::COMPACT
?
6636 LTTNG_UST_CTL_CHANNEL_HEADER_COMPACT
:
6637 LTTNG_UST_CTL_CHANNEL_HEADER_LARGE
,
6640 if (ret
== -EPIPE
|| ret
== -LTTNG_UST_ERR_EXITING
) {
6641 DBG3("UST app reply channel failed. Application died: pid = %d, sock = %d",
6644 } else if (ret
== -EAGAIN
) {
6645 WARN("UST app reply channel failed. Communication time out: pid = %d, sock = %d",
6649 ERR("UST app reply channel failed with ret %d: pid = %d, sock = %d",
6658 /* This channel registry's registration is completed. */
6659 ust_reg_chan
.set_as_registered();
6665 * Add event to the UST channel registry. When the event is added to the
6666 * registry, the metadata is also created. Once done, this replies to the
6667 * application with the appropriate error code.
6669 * The session UST registry lock is acquired in the function.
6671 * On success 0 is returned else a negative value.
6673 static int add_event_ust_registry(int sock
,
6677 char *raw_signature
,
6679 struct lttng_ust_ctl_field
*raw_fields
,
6681 char *raw_model_emf_uri
)
6684 uint32_t event_id
= 0;
6685 uint64_t chan_reg_key
;
6686 struct ust_app
*app
;
6687 struct ust_app_channel
*ua_chan
;
6688 struct ust_app_session
*ua_sess
;
6689 lttng::urcu::read_lock_guard rcu_lock
;
6690 auto signature
= lttng::make_unique_wrapper
<char, lttng::memory::free
>(raw_signature
);
6692 lttng::make_unique_wrapper
<lttng_ust_ctl_field
, lttng::memory::free
>(raw_fields
);
6693 auto model_emf_uri
=
6694 lttng::make_unique_wrapper
<char, lttng::memory::free
>(raw_model_emf_uri
);
6696 /* Lookup application. If not found, there is a code flow error. */
6697 app
= find_app_by_notify_sock(sock
);
6699 DBG("Application socket %d is being torn down. Abort event notify", sock
);
6703 /* Lookup channel by UST object descriptor. */
6704 ua_chan
= find_channel_by_objd(app
, cobjd
);
6706 DBG("Application channel is being torn down. Abort event notify");
6710 LTTNG_ASSERT(ua_chan
->session
);
6711 ua_sess
= ua_chan
->session
;
6713 if (ua_sess
->buffer_type
== LTTNG_BUFFER_PER_UID
) {
6714 chan_reg_key
= ua_chan
->tracing_channel_id
;
6716 chan_reg_key
= ua_chan
->key
;
6720 auto locked_registry
= get_locked_session_registry(ua_sess
);
6721 if (locked_registry
) {
6723 * From this point on, this call acquires the ownership of the signature,
6724 * fields and model_emf_uri meaning any free are done inside it if needed.
6725 * These three variables MUST NOT be read/write after this.
6728 auto& channel
= locked_registry
->channel(chan_reg_key
);
6730 /* event_id is set on success. */
6736 lsu::create_trace_fields_from_ust_ctl_fields(
6740 lst::field_location::root::EVENT_RECORD_PAYLOAD
,
6741 lsu::ctl_field_quirks::
6742 UNDERSCORE_PREFIXED_VARIANT_TAG_MAPPINGS
),
6744 model_emf_uri
.get() ?
6745 nonstd::optional
<std::string
>(model_emf_uri
.get()) :
6747 ua_sess
->buffer_type
,
6751 } catch (const std::exception
& ex
) {
6752 ERR("Failed to add event `%s` to registry session: %s",
6755 /* Inform the application of the error; don't return directly. */
6759 DBG("Application session is being torn down. Abort event notify");
6765 * The return value is returned to ustctl so in case of an error, the
6766 * application can be notified. In case of an error, it's important not to
6767 * return a negative error or else the application will get closed.
6769 ret
= lttng_ust_ctl_reply_register_event(sock
, event_id
, ret_code
);
6771 if (ret
== -EPIPE
|| ret
== -LTTNG_UST_ERR_EXITING
) {
6772 DBG3("UST app reply event failed. Application died: pid = %d, sock = %d.",
6775 } else if (ret
== -EAGAIN
) {
6776 WARN("UST app reply event failed. Communication time out: pid = %d, sock = %d",
6780 ERR("UST app reply event failed with ret %d: pid = %d, sock = %d",
6786 * No need to wipe the create event since the application socket will
6787 * get close on error hence cleaning up everything by itself.
6792 DBG3("UST registry event %s with id %" PRId32
" added successfully", name
, event_id
);
6797 * Add enum to the UST session registry. Once done, this replies to the
6798 * application with the appropriate error code.
6800 * The session UST registry lock is acquired within this function.
6802 * On success 0 is returned else a negative value.
6804 static int add_enum_ust_registry(int sock
,
6807 struct lttng_ust_ctl_enum_entry
*raw_entries
,
6811 struct ust_app
*app
;
6812 struct ust_app_session
*ua_sess
;
6813 uint64_t enum_id
= -1ULL;
6814 lttng::urcu::read_lock_guard read_lock_guard
;
6816 lttng::make_unique_wrapper
<struct lttng_ust_ctl_enum_entry
, lttng::memory::free
>(
6819 /* Lookup application. If not found, there is a code flow error. */
6820 app
= find_app_by_notify_sock(sock
);
6822 /* Return an error since this is not an error */
6823 DBG("Application socket %d is being torn down. Aborting enum registration", sock
);
6827 /* Lookup session by UST object descriptor. */
6828 ua_sess
= find_session_by_objd(app
, sobjd
);
6830 /* Return an error since this is not an error */
6831 DBG("Application session is being torn down (session not found). Aborting enum registration.");
6835 auto locked_registry
= get_locked_session_registry(ua_sess
);
6836 if (!locked_registry
) {
6837 DBG("Application session is being torn down (registry not found). Aborting enum registration.");
6842 * From this point on, the callee acquires the ownership of
6843 * entries. The variable entries MUST NOT be read/written after
6846 int application_reply_code
;
6848 locked_registry
->create_or_find_enum(
6849 sobjd
, name
, entries
.release(), nr_entries
, &enum_id
);
6850 application_reply_code
= 0;
6851 } catch (const std::exception
& ex
) {
6854 "Failed to create or find enumeration provided by application: app = {}, enumeration name = {}",
6859 application_reply_code
= -1;
6863 * The return value is returned to ustctl so in case of an error, the
6864 * application can be notified. In case of an error, it's important not to
6865 * return a negative error or else the application will get closed.
6867 ret
= lttng_ust_ctl_reply_register_enum(sock
, enum_id
, application_reply_code
);
6869 if (ret
== -EPIPE
|| ret
== -LTTNG_UST_ERR_EXITING
) {
6870 DBG3("UST app reply enum failed. Application died: pid = %d, sock = %d",
6873 } else if (ret
== -EAGAIN
) {
6874 WARN("UST app reply enum failed. Communication time out: pid = %d, sock = %d",
6878 ERR("UST app reply enum failed with ret %d: pid = %d, sock = %d",
6884 * No need to wipe the create enum since the application socket will
6885 * get close on error hence cleaning up everything by itself.
6890 DBG3("UST registry enum %s added successfully or already found", name
);
6895 * Handle application notification through the given notify socket.
6897 * Return 0 on success or else a negative value.
6899 int ust_app_recv_notify(int sock
)
6902 enum lttng_ust_ctl_notify_cmd cmd
;
6904 DBG3("UST app receiving notify from sock %d", sock
);
6906 ret
= lttng_ust_ctl_recv_notify(sock
, &cmd
);
6908 if (ret
== -EPIPE
|| ret
== -LTTNG_UST_ERR_EXITING
) {
6909 DBG3("UST app recv notify failed. Application died: sock = %d", sock
);
6910 } else if (ret
== -EAGAIN
) {
6911 WARN("UST app recv notify failed. Communication time out: sock = %d", sock
);
6913 ERR("UST app recv notify failed with ret %d: sock = %d", ret
, sock
);
6919 case LTTNG_UST_CTL_NOTIFY_CMD_EVENT
:
6921 int sobjd
, cobjd
, loglevel_value
;
6922 char name
[LTTNG_UST_ABI_SYM_NAME_LEN
], *sig
, *model_emf_uri
;
6924 struct lttng_ust_ctl_field
*fields
;
6926 DBG2("UST app ustctl register event received");
6928 ret
= lttng_ust_ctl_recv_register_event(sock
,
6938 if (ret
== -EPIPE
|| ret
== -LTTNG_UST_ERR_EXITING
) {
6939 DBG3("UST app recv event failed. Application died: sock = %d",
6941 } else if (ret
== -EAGAIN
) {
6942 WARN("UST app recv event failed. Communication time out: sock = %d",
6945 ERR("UST app recv event failed with ret %d: sock = %d", ret
, sock
);
6951 lttng::urcu::read_lock_guard rcu_lock
;
6952 const struct ust_app
*app
= find_app_by_notify_sock(sock
);
6954 DBG("Application socket %d is being torn down. Abort event notify",
6961 if ((!fields
&& nr_fields
> 0) || (fields
&& nr_fields
== 0)) {
6962 ERR("Invalid return value from lttng_ust_ctl_recv_register_event: fields = %p, nr_fields = %zu",
6971 * Add event to the UST registry coming from the notify socket. This
6972 * call will free if needed the sig, fields and model_emf_uri. This
6973 * code path loses the ownsership of these variables and transfer them
6974 * to the this function.
6976 ret
= add_event_ust_registry(sock
,
6991 case LTTNG_UST_CTL_NOTIFY_CMD_CHANNEL
:
6995 struct lttng_ust_ctl_field
*context_fields
;
6997 DBG2("UST app ustctl register channel received");
6999 ret
= lttng_ust_ctl_recv_register_channel(
7000 sock
, &sobjd
, &cobjd
, &field_count
, &context_fields
);
7002 if (ret
== -EPIPE
|| ret
== -LTTNG_UST_ERR_EXITING
) {
7003 DBG3("UST app recv channel failed. Application died: sock = %d",
7005 } else if (ret
== -EAGAIN
) {
7006 WARN("UST app recv channel failed. Communication time out: sock = %d",
7009 ERR("UST app recv channel failed with ret %d: sock = %d",
7017 * The fields ownership are transfered to this function call meaning
7018 * that if needed it will be freed. After this, it's invalid to access
7019 * fields or clean them up.
7021 ret
= handle_app_register_channel_notification(
7022 sock
, cobjd
, context_fields
, field_count
);
7029 case LTTNG_UST_CTL_NOTIFY_CMD_ENUM
:
7032 char name
[LTTNG_UST_ABI_SYM_NAME_LEN
];
7034 struct lttng_ust_ctl_enum_entry
*entries
;
7036 DBG2("UST app ustctl register enum received");
7038 ret
= lttng_ust_ctl_recv_register_enum(sock
, &sobjd
, name
, &entries
, &nr_entries
);
7040 if (ret
== -EPIPE
|| ret
== -LTTNG_UST_ERR_EXITING
) {
7041 DBG3("UST app recv enum failed. Application died: sock = %d", sock
);
7042 } else if (ret
== -EAGAIN
) {
7043 WARN("UST app recv enum failed. Communication time out: sock = %d",
7046 ERR("UST app recv enum failed with ret %d: sock = %d", ret
, sock
);
7051 /* Callee assumes ownership of entries. */
7052 ret
= add_enum_ust_registry(sock
, sobjd
, name
, entries
, nr_entries
);
7060 /* Should NEVER happen. */
7069 * Once the notify socket hangs up, this is called. First, it tries to find the
7070 * corresponding application. On failure, the call_rcu to close the socket is
7071 * executed. If an application is found, it tries to delete it from the notify
7072 * socket hash table. Whathever the result, it proceeds to the call_rcu.
7074 * Note that an object needs to be allocated here so on ENOMEM failure, the
7075 * call RCU is not done but the rest of the cleanup is.
7077 void ust_app_notify_sock_unregister(int sock
)
7080 struct lttng_ht_iter iter
;
7081 struct ust_app
*app
;
7082 struct ust_app_notify_sock_obj
*obj
;
7084 LTTNG_ASSERT(sock
>= 0);
7086 lttng::urcu::read_lock_guard read_lock
;
7088 obj
= zmalloc
<ust_app_notify_sock_obj
>();
7091 * An ENOMEM is kind of uncool. If this strikes we continue the
7092 * procedure but the call_rcu will not be called. In this case, we
7093 * accept the fd leak rather than possibly creating an unsynchronized
7094 * state between threads.
7096 * TODO: The notify object should be created once the notify socket is
7097 * registered and stored independantely from the ust app object. The
7098 * tricky part is to synchronize the teardown of the application and
7099 * this notify object. Let's keep that in mind so we can avoid this
7100 * kind of shenanigans with ENOMEM in the teardown path.
7107 DBG("UST app notify socket unregister %d", sock
);
7110 * Lookup application by notify socket. If this fails, this means that the
7111 * hash table delete has already been done by the application
7112 * unregistration process so we can safely close the notify socket in a
7115 app
= find_app_by_notify_sock(sock
);
7120 iter
.iter
.node
= &app
->notify_sock_n
.node
;
7123 * Whatever happens here either we fail or succeed, in both cases we have
7124 * to close the socket after a grace period to continue to the call RCU
7125 * here. If the deletion is successful, the application is not visible
7126 * anymore by other threads and is it fails it means that it was already
7127 * deleted from the hash table so either way we just have to close the
7130 (void) lttng_ht_del(ust_app_ht_by_notify_sock
, &iter
);
7135 * Close socket after a grace period to avoid for the socket to be reused
7136 * before the application object is freed creating potential race between
7137 * threads trying to add unique in the global hash table.
7140 call_rcu(&obj
->head
, close_notify_sock_rcu
);
7145 * Destroy a ust app data structure and free its memory.
7147 static void ust_app_destroy(ust_app
& app
)
7149 call_rcu(&app
.pid_n
.head
, delete_ust_app_rcu
);
7153 * Take a snapshot for a given UST session. The snapshot is sent to the given
7156 * Returns LTTNG_OK on success or a LTTNG_ERR error code.
7158 enum lttng_error_code
ust_app_snapshot_record(const struct ltt_ust_session
*usess
,
7159 const struct consumer_output
*output
,
7160 uint64_t nb_packets_per_stream
)
7163 enum lttng_error_code status
= LTTNG_OK
;
7164 struct lttng_ht_iter iter
;
7165 struct ust_app
*app
;
7166 char *trace_path
= nullptr;
7168 LTTNG_ASSERT(usess
);
7169 LTTNG_ASSERT(output
);
7171 switch (usess
->buffer_type
) {
7172 case LTTNG_BUFFER_PER_UID
:
7174 struct buffer_reg_uid
*reg
;
7176 lttng::urcu::read_lock_guard read_lock
;
7178 cds_list_for_each_entry (reg
, &usess
->buffer_reg_uid_list
, lnode
) {
7179 struct buffer_reg_channel
*buf_reg_chan
;
7180 struct consumer_socket
*socket
;
7181 char pathname
[PATH_MAX
];
7182 size_t consumer_path_offset
= 0;
7184 if (!reg
->registry
->reg
.ust
->_metadata_key
) {
7185 /* Skip since no metadata is present */
7189 /* Get consumer socket to use to push the metadata.*/
7190 socket
= consumer_find_socket_by_bitness(reg
->bits_per_long
,
7193 status
= LTTNG_ERR_INVALID
;
7197 memset(pathname
, 0, sizeof(pathname
));
7198 ret
= snprintf(pathname
,
7200 DEFAULT_UST_TRACE_UID_PATH
,
7202 reg
->bits_per_long
);
7204 PERROR("snprintf snapshot path");
7205 status
= LTTNG_ERR_INVALID
;
7208 /* Free path allowed on previous iteration. */
7210 trace_path
= setup_channel_trace_path(
7211 usess
->consumer
, pathname
, &consumer_path_offset
);
7213 status
= LTTNG_ERR_INVALID
;
7216 /* Add the UST default trace dir to path. */
7217 cds_lfht_for_each_entry (
7218 reg
->registry
->channels
->ht
, &iter
.iter
, buf_reg_chan
, node
.node
) {
7220 consumer_snapshot_channel(socket
,
7221 buf_reg_chan
->consumer_key
,
7224 &trace_path
[consumer_path_offset
],
7225 nb_packets_per_stream
);
7226 if (status
!= LTTNG_OK
) {
7230 status
= consumer_snapshot_channel(socket
,
7231 reg
->registry
->reg
.ust
->_metadata_key
,
7234 &trace_path
[consumer_path_offset
],
7236 if (status
!= LTTNG_OK
) {
7243 case LTTNG_BUFFER_PER_PID
:
7245 lttng::urcu::read_lock_guard read_lock
;
7247 cds_lfht_for_each_entry (ust_app_ht
->ht
, &iter
.iter
, app
, pid_n
.node
) {
7248 struct consumer_socket
*socket
;
7249 struct lttng_ht_iter chan_iter
;
7250 struct ust_app_channel
*ua_chan
;
7251 struct ust_app_session
*ua_sess
;
7252 lsu::registry_session
*registry
;
7253 char pathname
[PATH_MAX
];
7254 size_t consumer_path_offset
= 0;
7256 ua_sess
= lookup_session_by_app(usess
, app
);
7258 /* Session not associated with this app. */
7262 /* Get the right consumer socket for the application. */
7263 socket
= consumer_find_socket_by_bitness(app
->abi
.bits_per_long
, output
);
7265 status
= LTTNG_ERR_INVALID
;
7269 /* Add the UST default trace dir to path. */
7270 memset(pathname
, 0, sizeof(pathname
));
7271 ret
= snprintf(pathname
, sizeof(pathname
), "%s", ua_sess
->path
);
7273 status
= LTTNG_ERR_INVALID
;
7274 PERROR("snprintf snapshot path");
7277 /* Free path allowed on previous iteration. */
7279 trace_path
= setup_channel_trace_path(
7280 usess
->consumer
, pathname
, &consumer_path_offset
);
7282 status
= LTTNG_ERR_INVALID
;
7285 cds_lfht_for_each_entry (
7286 ua_sess
->channels
->ht
, &chan_iter
.iter
, ua_chan
, node
.node
) {
7288 consumer_snapshot_channel(socket
,
7292 &trace_path
[consumer_path_offset
],
7293 nb_packets_per_stream
);
7297 case LTTNG_ERR_CHAN_NOT_FOUND
:
7304 registry
= get_session_registry(ua_sess
);
7306 DBG("Application session is being torn down. Skip application.");
7309 status
= consumer_snapshot_channel(socket
,
7310 registry
->_metadata_key
,
7313 &trace_path
[consumer_path_offset
],
7318 case LTTNG_ERR_CHAN_NOT_FOUND
:
7337 * Return the size taken by one more packet per stream.
7339 uint64_t ust_app_get_size_one_more_packet_per_stream(const struct ltt_ust_session
*usess
,
7340 uint64_t cur_nr_packets
)
7342 uint64_t tot_size
= 0;
7343 struct ust_app
*app
;
7344 struct lttng_ht_iter iter
;
7346 LTTNG_ASSERT(usess
);
7348 switch (usess
->buffer_type
) {
7349 case LTTNG_BUFFER_PER_UID
:
7351 struct buffer_reg_uid
*reg
;
7353 cds_list_for_each_entry (reg
, &usess
->buffer_reg_uid_list
, lnode
) {
7354 struct buffer_reg_channel
*buf_reg_chan
;
7356 lttng::urcu::read_lock_guard read_lock
;
7358 cds_lfht_for_each_entry (
7359 reg
->registry
->channels
->ht
, &iter
.iter
, buf_reg_chan
, node
.node
) {
7360 if (cur_nr_packets
>= buf_reg_chan
->num_subbuf
) {
7362 * Don't take channel into account if we
7363 * already grab all its packets.
7367 tot_size
+= buf_reg_chan
->subbuf_size
* buf_reg_chan
->stream_count
;
7372 case LTTNG_BUFFER_PER_PID
:
7374 lttng::urcu::read_lock_guard read_lock
;
7376 cds_lfht_for_each_entry (ust_app_ht
->ht
, &iter
.iter
, app
, pid_n
.node
) {
7377 struct ust_app_channel
*ua_chan
;
7378 struct ust_app_session
*ua_sess
;
7379 struct lttng_ht_iter chan_iter
;
7381 ua_sess
= lookup_session_by_app(usess
, app
);
7383 /* Session not associated with this app. */
7387 cds_lfht_for_each_entry (
7388 ua_sess
->channels
->ht
, &chan_iter
.iter
, ua_chan
, node
.node
) {
7389 if (cur_nr_packets
>= ua_chan
->attr
.num_subbuf
) {
7391 * Don't take channel into account if we
7392 * already grab all its packets.
7396 tot_size
+= ua_chan
->attr
.subbuf_size
* ua_chan
->streams
.count
;
7409 int ust_app_uid_get_channel_runtime_stats(uint64_t ust_session_id
,
7410 struct cds_list_head
*buffer_reg_uid_list
,
7411 struct consumer_output
*consumer
,
7414 uint64_t *discarded
,
7418 uint64_t consumer_chan_key
;
7423 ret
= buffer_reg_uid_consumer_channel_key(
7424 buffer_reg_uid_list
, uchan_id
, &consumer_chan_key
);
7432 ret
= consumer_get_lost_packets(ust_session_id
, consumer_chan_key
, consumer
, lost
);
7434 ret
= consumer_get_discarded_events(
7435 ust_session_id
, consumer_chan_key
, consumer
, discarded
);
7442 int ust_app_pid_get_channel_runtime_stats(struct ltt_ust_session
*usess
,
7443 struct ltt_ust_channel
*uchan
,
7444 struct consumer_output
*consumer
,
7446 uint64_t *discarded
,
7450 struct lttng_ht_iter iter
;
7451 struct lttng_ht_node_str
*ua_chan_node
;
7452 struct ust_app
*app
;
7453 struct ust_app_session
*ua_sess
;
7454 struct ust_app_channel
*ua_chan
;
7460 * Iterate over every registered applications. Sum counters for
7461 * all applications containing requested session and channel.
7463 lttng::urcu::read_lock_guard read_lock
;
7465 cds_lfht_for_each_entry (ust_app_ht
->ht
, &iter
.iter
, app
, pid_n
.node
) {
7466 struct lttng_ht_iter uiter
;
7468 ua_sess
= lookup_session_by_app(usess
, app
);
7469 if (ua_sess
== nullptr) {
7474 lttng_ht_lookup(ua_sess
->channels
, (void *) uchan
->name
, &uiter
);
7475 ua_chan_node
= lttng_ht_iter_get_node_str(&uiter
);
7476 /* If the session is found for the app, the channel must be there */
7477 LTTNG_ASSERT(ua_chan_node
);
7479 ua_chan
= lttng::utils::container_of(ua_chan_node
, &ust_app_channel::node
);
7484 ret
= consumer_get_lost_packets(usess
->id
, ua_chan
->key
, consumer
, &_lost
);
7490 uint64_t _discarded
;
7492 ret
= consumer_get_discarded_events(
7493 usess
->id
, ua_chan
->key
, consumer
, &_discarded
);
7497 (*discarded
) += _discarded
;
7504 static int ust_app_regenerate_statedump(struct ltt_ust_session
*usess
, struct ust_app
*app
)
7507 struct ust_app_session
*ua_sess
;
7509 DBG("Regenerating the metadata for ust app pid %d", app
->pid
);
7511 lttng::urcu::read_lock_guard read_lock
;
7513 ua_sess
= lookup_session_by_app(usess
, app
);
7514 if (ua_sess
== nullptr) {
7515 /* The session is in teardown process. Ignore and continue. */
7519 pthread_mutex_lock(&ua_sess
->lock
);
7521 if (ua_sess
->deleted
) {
7525 pthread_mutex_lock(&app
->sock_lock
);
7526 ret
= lttng_ust_ctl_regenerate_statedump(app
->sock
, ua_sess
->handle
);
7527 pthread_mutex_unlock(&app
->sock_lock
);
7530 pthread_mutex_unlock(&ua_sess
->lock
);
7533 health_code_update();
7538 * Regenerate the statedump for each app in the session.
7540 int ust_app_regenerate_statedump_all(struct ltt_ust_session
*usess
)
7543 struct lttng_ht_iter iter
;
7544 struct ust_app
*app
;
7546 DBG("Regenerating the metadata for all UST apps");
7548 lttng::urcu::read_lock_guard read_lock
;
7550 cds_lfht_for_each_entry (ust_app_ht
->ht
, &iter
.iter
, app
, pid_n
.node
) {
7551 if (!app
->compatible
) {
7555 ret
= ust_app_regenerate_statedump(usess
, app
);
7557 /* Continue to the next app even on error */
7566 * Rotate all the channels of a session.
7568 * Return LTTNG_OK on success or else an LTTng error code.
7570 enum lttng_error_code
ust_app_rotate_session(struct ltt_session
*session
)
7573 enum lttng_error_code cmd_ret
= LTTNG_OK
;
7574 struct lttng_ht_iter iter
;
7575 struct ltt_ust_session
*usess
= session
->ust_session
;
7577 LTTNG_ASSERT(usess
);
7579 switch (usess
->buffer_type
) {
7580 case LTTNG_BUFFER_PER_UID
:
7582 struct buffer_reg_uid
*reg
;
7584 cds_list_for_each_entry (reg
, &usess
->buffer_reg_uid_list
, lnode
) {
7585 struct buffer_reg_channel
*buf_reg_chan
;
7586 struct consumer_socket
*socket
;
7587 lttng::urcu::read_lock_guard read_lock
;
7589 /* Get consumer socket to use to push the metadata.*/
7590 socket
= consumer_find_socket_by_bitness(reg
->bits_per_long
,
7593 cmd_ret
= LTTNG_ERR_INVALID
;
7597 /* Rotate the data channels. */
7598 cds_lfht_for_each_entry (
7599 reg
->registry
->channels
->ht
, &iter
.iter
, buf_reg_chan
, node
.node
) {
7600 ret
= consumer_rotate_channel(socket
,
7601 buf_reg_chan
->consumer_key
,
7603 /* is_metadata_channel */ false);
7605 cmd_ret
= LTTNG_ERR_ROTATION_FAIL_CONSUMER
;
7611 * The metadata channel might not be present.
7613 * Consumer stream allocation can be done
7614 * asynchronously and can fail on intermediary
7615 * operations (i.e add context) and lead to data
7616 * channels created with no metadata channel.
7618 if (!reg
->registry
->reg
.ust
->_metadata_key
) {
7619 /* Skip since no metadata is present. */
7624 auto locked_registry
= reg
->registry
->reg
.ust
->lock();
7625 (void) push_metadata(locked_registry
, usess
->consumer
);
7628 ret
= consumer_rotate_channel(socket
,
7629 reg
->registry
->reg
.ust
->_metadata_key
,
7631 /* is_metadata_channel */ true);
7633 cmd_ret
= LTTNG_ERR_ROTATION_FAIL_CONSUMER
;
7639 case LTTNG_BUFFER_PER_PID
:
7641 lttng::urcu::read_lock_guard read_lock
;
7644 cds_lfht_for_each_entry (ust_app_ht
->ht
, &iter
.iter
, raw_app
, pid_n
.node
) {
7645 struct consumer_socket
*socket
;
7646 struct lttng_ht_iter chan_iter
;
7647 struct ust_app_channel
*ua_chan
;
7648 struct ust_app_session
*ua_sess
;
7649 lsu::registry_session
*registry
;
7650 bool app_reference_taken
;
7652 app_reference_taken
= ust_app_get(*raw_app
);
7653 if (!app_reference_taken
) {
7654 /* Application unregistered concurrently, skip it. */
7655 DBG("Could not get application reference as it is being torn down; skipping application");
7659 ust_app_reference
app(raw_app
);
7662 ua_sess
= lookup_session_by_app(usess
, app
.get());
7664 /* Session not associated with this app. */
7668 /* Get the right consumer socket for the application. */
7669 socket
= consumer_find_socket_by_bitness(app
->abi
.bits_per_long
,
7672 cmd_ret
= LTTNG_ERR_INVALID
;
7676 registry
= get_session_registry(ua_sess
);
7677 LTTNG_ASSERT(registry
);
7679 /* Rotate the data channels. */
7680 cds_lfht_for_each_entry (
7681 ua_sess
->channels
->ht
, &chan_iter
.iter
, ua_chan
, node
.node
) {
7682 ret
= consumer_rotate_channel(socket
,
7685 /* is_metadata_channel */ false);
7687 cmd_ret
= LTTNG_ERR_ROTATION_FAIL_CONSUMER
;
7692 /* Rotate the metadata channel. */
7694 auto locked_registry
= registry
->lock();
7696 (void) push_metadata(locked_registry
, usess
->consumer
);
7699 ret
= consumer_rotate_channel(socket
,
7700 registry
->_metadata_key
,
7702 /* is_metadata_channel */ true);
7704 cmd_ret
= LTTNG_ERR_ROTATION_FAIL_CONSUMER
;
7722 enum lttng_error_code
ust_app_create_channel_subdirectories(const struct ltt_ust_session
*usess
)
7724 enum lttng_error_code ret
= LTTNG_OK
;
7725 struct lttng_ht_iter iter
;
7726 enum lttng_trace_chunk_status chunk_status
;
7727 char *pathname_index
;
7730 LTTNG_ASSERT(usess
->current_trace_chunk
);
7732 switch (usess
->buffer_type
) {
7733 case LTTNG_BUFFER_PER_UID
:
7735 struct buffer_reg_uid
*reg
;
7736 lttng::urcu::read_lock_guard read_lock
;
7738 cds_list_for_each_entry (reg
, &usess
->buffer_reg_uid_list
, lnode
) {
7739 fmt_ret
= asprintf(&pathname_index
,
7740 DEFAULT_UST_TRACE_DIR
"/" DEFAULT_UST_TRACE_UID_PATH
7741 "/" DEFAULT_INDEX_DIR
,
7743 reg
->bits_per_long
);
7745 ERR("Failed to format channel index directory");
7746 ret
= LTTNG_ERR_CREATE_DIR_FAIL
;
7751 * Create the index subdirectory which will take care
7752 * of implicitly creating the channel's path.
7754 chunk_status
= lttng_trace_chunk_create_subdirectory(
7755 usess
->current_trace_chunk
, pathname_index
);
7756 free(pathname_index
);
7757 if (chunk_status
!= LTTNG_TRACE_CHUNK_STATUS_OK
) {
7758 ret
= LTTNG_ERR_CREATE_DIR_FAIL
;
7764 case LTTNG_BUFFER_PER_PID
:
7766 struct ust_app
*app
;
7767 lttng::urcu::read_lock_guard read_lock
;
7770 * Create the toplevel ust/ directory in case no apps are running.
7772 chunk_status
= lttng_trace_chunk_create_subdirectory(usess
->current_trace_chunk
,
7773 DEFAULT_UST_TRACE_DIR
);
7774 if (chunk_status
!= LTTNG_TRACE_CHUNK_STATUS_OK
) {
7775 ret
= LTTNG_ERR_CREATE_DIR_FAIL
;
7779 cds_lfht_for_each_entry (ust_app_ht
->ht
, &iter
.iter
, app
, pid_n
.node
) {
7780 struct ust_app_session
*ua_sess
;
7781 lsu::registry_session
*registry
;
7783 ua_sess
= lookup_session_by_app(usess
, app
);
7785 /* Session not associated with this app. */
7789 registry
= get_session_registry(ua_sess
);
7791 DBG("Application session is being torn down. Skip application.");
7795 fmt_ret
= asprintf(&pathname_index
,
7796 DEFAULT_UST_TRACE_DIR
"/%s/" DEFAULT_INDEX_DIR
,
7799 ERR("Failed to format channel index directory");
7800 ret
= LTTNG_ERR_CREATE_DIR_FAIL
;
7804 * Create the index subdirectory which will take care
7805 * of implicitly creating the channel's path.
7807 chunk_status
= lttng_trace_chunk_create_subdirectory(
7808 usess
->current_trace_chunk
, pathname_index
);
7809 free(pathname_index
);
7810 if (chunk_status
!= LTTNG_TRACE_CHUNK_STATUS_OK
) {
7811 ret
= LTTNG_ERR_CREATE_DIR_FAIL
;
7827 * Clear all the channels of a session.
7829 * Return LTTNG_OK on success or else an LTTng error code.
7831 enum lttng_error_code
ust_app_clear_session(struct ltt_session
*session
)
7834 enum lttng_error_code cmd_ret
= LTTNG_OK
;
7835 struct lttng_ht_iter iter
;
7836 struct ust_app
*app
;
7837 struct ltt_ust_session
*usess
= session
->ust_session
;
7839 LTTNG_ASSERT(usess
);
7841 if (usess
->active
) {
7842 ERR("Expecting inactive session %s (%" PRIu64
")", session
->name
, session
->id
);
7843 cmd_ret
= LTTNG_ERR_FATAL
;
7847 switch (usess
->buffer_type
) {
7848 case LTTNG_BUFFER_PER_UID
:
7850 struct buffer_reg_uid
*reg
;
7851 lttng::urcu::read_lock_guard read_lock
;
7853 cds_list_for_each_entry (reg
, &usess
->buffer_reg_uid_list
, lnode
) {
7854 struct buffer_reg_channel
*buf_reg_chan
;
7855 struct consumer_socket
*socket
;
7857 /* Get consumer socket to use to push the metadata.*/
7858 socket
= consumer_find_socket_by_bitness(reg
->bits_per_long
,
7861 cmd_ret
= LTTNG_ERR_INVALID
;
7865 /* Clear the data channels. */
7866 cds_lfht_for_each_entry (
7867 reg
->registry
->channels
->ht
, &iter
.iter
, buf_reg_chan
, node
.node
) {
7868 ret
= consumer_clear_channel(socket
, buf_reg_chan
->consumer_key
);
7875 auto locked_registry
= reg
->registry
->reg
.ust
->lock();
7876 (void) push_metadata(locked_registry
, usess
->consumer
);
7880 * Clear the metadata channel.
7881 * Metadata channel is not cleared per se but we still need to
7882 * perform a rotation operation on it behind the scene.
7884 ret
= consumer_clear_channel(socket
, reg
->registry
->reg
.ust
->_metadata_key
);
7891 case LTTNG_BUFFER_PER_PID
:
7893 lttng::urcu::read_lock_guard read_lock
;
7895 cds_lfht_for_each_entry (ust_app_ht
->ht
, &iter
.iter
, app
, pid_n
.node
) {
7896 struct consumer_socket
*socket
;
7897 struct lttng_ht_iter chan_iter
;
7898 struct ust_app_channel
*ua_chan
;
7899 struct ust_app_session
*ua_sess
;
7900 lsu::registry_session
*registry
;
7902 ua_sess
= lookup_session_by_app(usess
, app
);
7904 /* Session not associated with this app. */
7908 /* Get the right consumer socket for the application. */
7909 socket
= consumer_find_socket_by_bitness(app
->abi
.bits_per_long
,
7912 cmd_ret
= LTTNG_ERR_INVALID
;
7916 registry
= get_session_registry(ua_sess
);
7918 DBG("Application session is being torn down. Skip application.");
7922 /* Clear the data channels. */
7923 cds_lfht_for_each_entry (
7924 ua_sess
->channels
->ht
, &chan_iter
.iter
, ua_chan
, node
.node
) {
7925 ret
= consumer_clear_channel(socket
, ua_chan
->key
);
7927 /* Per-PID buffer and application going away. */
7928 if (ret
== -LTTNG_ERR_CHAN_NOT_FOUND
) {
7936 auto locked_registry
= registry
->lock();
7937 (void) push_metadata(locked_registry
, usess
->consumer
);
7941 * Clear the metadata channel.
7942 * Metadata channel is not cleared per se but we still need to
7943 * perform rotation operation on it behind the scene.
7945 ret
= consumer_clear_channel(socket
, registry
->_metadata_key
);
7947 /* Per-PID buffer and application going away. */
7948 if (ret
== -LTTNG_ERR_CHAN_NOT_FOUND
) {
7966 case LTTCOMM_CONSUMERD_RELAYD_CLEAR_DISALLOWED
:
7967 cmd_ret
= LTTNG_ERR_CLEAR_RELAY_DISALLOWED
;
7970 cmd_ret
= LTTNG_ERR_CLEAR_FAIL_CONSUMER
;
7979 * This function skips the metadata channel as the begin/end timestamps of a
7980 * metadata packet are useless.
7982 * Moreover, opening a packet after a "clear" will cause problems for live
7983 * sessions as it will introduce padding that was not part of the first trace
7984 * chunk. The relay daemon expects the content of the metadata stream of
7985 * successive metadata trace chunks to be strict supersets of one another.
7987 * For example, flushing a packet at the beginning of the metadata stream of
7988 * a trace chunk resulting from a "clear" session command will cause the
7989 * size of the metadata stream of the new trace chunk to not match the size of
7990 * the metadata stream of the original chunk. This will confuse the relay
7991 * daemon as the same "offset" in a metadata stream will no longer point
7992 * to the same content.
7994 enum lttng_error_code
ust_app_open_packets(struct ltt_session
*session
)
7996 enum lttng_error_code ret
= LTTNG_OK
;
7997 struct lttng_ht_iter iter
;
7998 struct ltt_ust_session
*usess
= session
->ust_session
;
8000 LTTNG_ASSERT(usess
);
8002 switch (usess
->buffer_type
) {
8003 case LTTNG_BUFFER_PER_UID
:
8005 struct buffer_reg_uid
*reg
;
8007 cds_list_for_each_entry (reg
, &usess
->buffer_reg_uid_list
, lnode
) {
8008 struct buffer_reg_channel
*buf_reg_chan
;
8009 struct consumer_socket
*socket
;
8010 lttng::urcu::read_lock_guard read_lock
;
8012 socket
= consumer_find_socket_by_bitness(reg
->bits_per_long
,
8015 ret
= LTTNG_ERR_FATAL
;
8019 cds_lfht_for_each_entry (
8020 reg
->registry
->channels
->ht
, &iter
.iter
, buf_reg_chan
, node
.node
) {
8021 const int open_ret
= consumer_open_channel_packets(
8022 socket
, buf_reg_chan
->consumer_key
);
8025 ret
= LTTNG_ERR_UNK
;
8032 case LTTNG_BUFFER_PER_PID
:
8034 struct ust_app
*app
;
8035 lttng::urcu::read_lock_guard read_lock
;
8037 cds_lfht_for_each_entry (ust_app_ht
->ht
, &iter
.iter
, app
, pid_n
.node
) {
8038 struct consumer_socket
*socket
;
8039 struct lttng_ht_iter chan_iter
;
8040 struct ust_app_channel
*ua_chan
;
8041 struct ust_app_session
*ua_sess
;
8042 lsu::registry_session
*registry
;
8044 ua_sess
= lookup_session_by_app(usess
, app
);
8046 /* Session not associated with this app. */
8050 /* Get the right consumer socket for the application. */
8051 socket
= consumer_find_socket_by_bitness(app
->abi
.bits_per_long
,
8054 ret
= LTTNG_ERR_FATAL
;
8058 registry
= get_session_registry(ua_sess
);
8060 DBG("Application session is being torn down. Skip application.");
8064 cds_lfht_for_each_entry (
8065 ua_sess
->channels
->ht
, &chan_iter
.iter
, ua_chan
, node
.node
) {
8066 const int open_ret
=
8067 consumer_open_channel_packets(socket
, ua_chan
->key
);
8071 * Per-PID buffer and application going
8074 if (open_ret
== -LTTNG_ERR_CHAN_NOT_FOUND
) {
8078 ret
= LTTNG_ERR_UNK
;
8094 lsu::ctl_field_quirks
ust_app::ctl_field_quirks() const
8097 * Application contexts are expressed as variants. LTTng-UST announces
8098 * those by registering an enumeration named `..._tag`. It then registers a
8099 * variant as part of the event context that contains the various possible
8102 * Unfortunately, the names used in the enumeration and variant don't
8103 * match: the enumeration names are all prefixed with an underscore while
8104 * the variant type tag fields aren't.
8106 * While the CTF 1.8.3 specification mentions that
8107 * underscores *should* (not *must*) be removed by CTF readers. Babeltrace
8108 * 1.x (and possibly others) expect a perfect match between the names used
8109 * by tags and variants.
8111 * When the UNDERSCORE_PREFIXED_VARIANT_TAG_MAPPINGS quirk is enabled,
8112 * the variant's fields are modified to match the mappings of its tag.
8114 * From ABI version >= 10.x, the variant fields and tag mapping names
8115 * correctly match, making this quirk unnecessary.
8117 return v_major
<= 9 ? lsu::ctl_field_quirks::UNDERSCORE_PREFIXED_VARIANT_TAG_MAPPINGS
:
8118 lsu::ctl_field_quirks::NONE
;
8121 static void ust_app_release(urcu_ref
*ref
)
8123 auto& app
= *lttng::utils::container_of(ref
, &ust_app::ref
);
8125 ust_app_unregister(app
);
8126 ust_app_destroy(app
);
8129 bool ust_app_get(ust_app
& app
)
8131 return urcu_ref_get_unless_zero(&app
.ref
);
8134 void ust_app_put(struct ust_app
*app
)
8140 urcu_ref_put(&app
->ref
, ust_app_release
);